licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 132 | include("measurement.jl")
include("comparison.jl")
include("projection.jl")
include("intersection.jl")
include("transformation.jl")
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 1256 | import Base: isapprox
function is_zero(v; kwargs...)
isapprox(v β
v, 0; kwargs...)
end
function are_parallel(u, v; kwargs...)
if is_zero(u; kwargs...) || is_zero(v; kwargs...)
return true
end
similarity = cosine_similarity(u, v)
return isapprox(abs(similarity), 1; kwargs...)
end
function are_perpendicular(u, v; kwargs...)
return isapprox(u β
v, 0; kwargs...)
end
function are_coplanar(points::AbstractMatrix)
point_1 = points[:, 1]
vectors = points .- point_1
return rank(vectors) <= 2
end
function are_coplanar(line_a::AbstractLine, line_b::AbstractLine)
point_a1 = line_a.point
point_a2 = to_point(line_a)
point_b1 = line_b.point
point_b2 = to_point(line_b)
points = hcat(point_a1, point_a2, point_b1, point_b2)
return are_coplanar(points)
end
function on_surface(point::AbstractVector, line::AbstractLine; kwargs...)
return isapprox(distance(point, line), 0; kwargs...)
end
function isapprox(line_a::AbstractLine, line_b::AbstractLine; kwargs...)
point_on_surface = on_surface(line_a.point, line_b; kwargs...)
directions_parallel = are_parallel(line_a.direction, line_b.direction; kwargs...)
return point_on_surface && directions_parallel
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 3716 | import ScikitSpatial: intersect
function intersect(line_a::AbstractLine, line_b::AbstractLine)
if !are_coplanar(line_a, line_b)
throw(ArgumentError("The lines are not coplanar."))
end
if are_parallel(line_a.direction, line_b.direction)
throw(ArgumentError("The lines are parallel."))
end
# Vector from line A to line B.
vector_ab = Vector(line_a.point, line_b.point)
# Vector perpendicular to both lines.
vector_perpendicular = line_a.direction Γ line_b.direction
num = (vector_ab Γ line_b.direction) β
vector_perpendicular
denom = norm(vector_perpendicular)^2
# Vector along line A to the intersection point.
vector_a_scaled = num / denom * line_a.direction
return line_a.point + vector_a_scaled
end
function intersect(plane::AbstractPlane, line::AbstractLine; kwargs...)
if are_perpendicular(line.direction, plane.normal; kwargs...)
throw(ArgumentError("The line and plane are parallel."))
end
vector_plane_line = Vector(plane.point, line.point)
num = -plane.normal β
vector_plane_line
denom = plane.normal β
line.direction
# Vector along the line to the intersection point.
vector_line_scaled = num / denom * line.direction
return line.point + vector_line_scaled
end
function intersect(plane_a::AbstractPlane, plane_b::AbstractPlane)
array_normals_stacked = vcat(plane_a.normal', plane_b.normal')
array_11 = 2 * Matrix(1I, 3, 3)
array_12 = array_normals_stacked'
array_21 = array_normals_stacked
array_22 = zeros(2, 2)
matrix = vcat(hcat(array_11, array_12), hcat(array_21, array_22))
dot_a = plane_a.point β
plane_a.normal
dot_b = plane_b.point β
plane_b.normal
y = [0, 0, 0, dot_a, dot_b]
solution = matrix \ y
point = solution[1:3]
direction = plane_a.normal Γ plane_b.normal
return Line(point, direction)
end
function intersect(circle::Circle, line::AbstractLine)
# Two points on the line.
point_1 = line.point
point_2 = point_1 + unit(line.direction)
# Translate the points on the line to mimic the circle being centered on the origin.
point_translated_1 = point_1 - circle.point
point_translated_2 = point_2 - circle.point
x_1, y_1 = point_translated_1
x_2, y_2 = point_translated_2
d_x = x_2 - x_1
d_y = y_2 - y_1
# Pre-compute variables common to x and y equations.
d_r_squared = d_x^2 + d_y^2
determinant = x_1 * y_2 - x_2 * y_1
discriminant = circle.radius^2 * d_r_squared - determinant^2
root = βdiscriminant
mp = [-1, 1] # Array to compute minus/plus.
sign = d_y < 0 ? -1 : 1
coords_x = (determinant * d_y .+ mp * sign * d_x * root) / d_r_squared
coords_y = (-determinant * d_x .+ mp * abs(d_y) * root) / d_r_squared
point_translated_a = [coords_x[1], coords_y[1]]
point_translated_b = [coords_x[2], coords_y[2]]
# Translate the intersection points back from origin circle to real circle.
point_a = point_translated_a + circle.point
point_b = point_translated_b + circle.point
return point_a, point_b
end
function intersect(sphere::Sphere, line::AbstractLine)
vector_to_line = Vector(sphere.point, line.point)
direction_unit = unit(line.direction)
dot = direction_unit β
vector_to_line
discriminant = dot^2 - (norm(vector_to_line)^2 - sphere.radius^2)
if discriminant < 0
throw(ArgumentError("The line does not intersect the sphere."))
end
pm = [-1, 1] # Array to compute minus/plus.
distances = -dot .+ βdiscriminant * pm
points = line.point .+ distances' .* direction_unit
point_a = points[:, 1]
point_b = points[:, 2]
return point_a, point_b
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 2133 | """
cosine_similarity(u::AbstractVector, v::AbstractVector) -> Float
Compute the cosine similarity of two vectors.
# Examples
```jldoctest
julia> cosine_similarity([1, 0], [1, 0])
1.0
julia> round(cosine_similarity([1,1], [1,0]), digits=3)
0.707
julia> cosine_similarity([1, 0], [0, 1])
0.0
julia> cosine_similarity([-1, 0], [1, 0])
-1.0
julia> round(cosine_similarity([1,0,0], [1,1,1]), digits=3)
0.577
```
"""
function cosine_similarity(u::AbstractVector, v::AbstractVector)
u β
v / (norm(u) * norm(v))
end
"""
angle_between(u::AbstractVector, v::AbstractVector) -> Float
Compute the angle between two vectors. The angle is returned in radians.
# Examples
```jldoctest
julia> angle_between([1, 0], [1, 0])
0.0
julia> round(angle_between([1,1], [1,0]), digits=3)
0.785
julia> round(angle_between([1, 0], [0, 1]), digits=3)
1.571
julia> round(angle_between([-1, 0], [1, 0]), digits=3)
3.142
julia> round(angle_between([1,0,0], [1,1,1]), digits=3)
0.955
```
"""
function angle_between(u::AbstractVector, v::AbstractVector)
acos(cosine_similarity(u, v))
end
"""
distance(point_a::AbstractVector, point_b::AbstractVector) -> Float
Compute the distance between two points.
# Examples
```jldoctest
julia> distance([0, 0], [0, 0])
0.0
julia> distance([1, 0], [0, 0])
1.0
julia> round(distance([1, 1], [2, 2]), digits=3)
1.414
julia> round(distance([0, 0, 0], [-1, -1, -1]), digits=3)
1.732
```
"""
function distance(point_a::AbstractVector, point_b::AbstractVector)
return norm(Vector(point_a, point_b))
end
"""
distance(point_a::AbstractVector, point_b::AbstractVector) -> Float
Compute the distance from a point to a line.
This is the distance from the point to its projection on the line.
# Examples
```jldoctest
julia> distance([0, 0], Line([0, 0], [1, 0]))
0.0
julia> round(distance([1, 0], Line([0, 0], [1, 1])), digits=3)
0.707
julia> round(distance([1, 2, 3], Line([-1, 3, 2], [7, 4, 2])), digits=3)
1.978
```
"""
function distance(point::AbstractVector, line::AbstractLine)
point_projected = project(point, line)
return distance(point, point_projected)
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 2357 | """
project(u::AbstractVector, v::AbstractVector) -> AbstractVector
Project vector `u` onto vector `v`.
# Examples
```jldoctest
julia> project([1, 1], [1, 0])
2-element Base.Vector{Float64}:
1.0
0.0
julia> project([5, 5], [1, 0])
2-element Base.Vector{Float64}:
5.0
0.0
julia> project([5, -5], [0, 1])
2-element Base.Vector{Float64}:
-0.0
-5.0
```
"""
function project(u::AbstractVector, v::AbstractVector)
return (u β
v) / (v β
v) * v
end
"""
project(point::AbstractVector, line::AbstractLine) -> StaticArrays.SVector
Project a point onto a line.
# Examples
```jldoctest
julia> project([1, 1], Line([0, 0], [1, 0]))
2-element StaticArrays.SVector{2, Float64} with indices SOneTo(2):
1.0
0.0
julia> project([5, -1], Line([0, 0], [1, 0]))
2-element StaticArrays.SVector{2, Float64} with indices SOneTo(2):
5.0
0.0
julia> project([1, 0], Line([0, 0], [1, 1]))
2-element StaticArrays.SVector{2, Float64} with indices SOneTo(2):
0.5
0.5
julia> point = project([1, 0, 0], Line([0, 0, 0], [1, 1, 1]));
julia> round.(point, digits=3)
3-element StaticArrays.SVector{3, Float64} with indices SOneTo(3):
0.333
0.333
0.333
```
"""
function project(point::AbstractVector, line::AbstractLine)
# Vector from the point on the line to the point in space.
vector_to_point = Vector(line.point, point)
# Vector projected onto the line.
vector_projected = project(vector_to_point, line.direction)
return line.point + vector_projected
end
"""
project(point::AbstractVector, plane::AbstractPlane) -> StaticArrays.SVector
Project a point onto a plane.
# Examples
```jldoctest
julia> project([0, 0, 5], Plane([0, 0, 0], [0, 0, 1]))
3-element StaticArrays.SVector{3, Float64} with indices SOneTo(3):
0.0
0.0
0.0
julia> plane = Plane([1,2,3], [1, 3, -2]);
julia> point_projected = project([5, 1, 3], plane);
julia> round.(point_projected, digits=3)
3-element StaticArrays.SVector{3, Float64} with indices SOneTo(3):
4.929
0.786
3.143
```
"""
function project(point::AbstractVector, plane::AbstractPlane)
# Vector from the point in space to the point on the plane.
vector_to_plane_point = Vector(point, plane.point)
# Perpendicular vector from the point in space to the plane.
vector_to_plane = project(vector_to_plane_point, plane.normal)
return point + vector_to_plane
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 451 | using Statistics
function Vector(point_a::AbstractVector, point_b::AbstractVector)
return point_b - point_a
end
function unit(v::AbstractVector)
return v / norm(v)
end
function to_point(line::AbstractLine; t::Int=1)
return line.point + t * line.direction
end
function centroid(points::AbstractMatrix)
return vec(mean(points, dims=2))
end
function mean_center(points::AbstractMatrix)
return points .- centroid(points)
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 96 | abstract type AbstractSpatial end
include("line_and_plane.jl")
include("circle_and_sphere.jl")
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 937 | abstract type AbstractHypersphere <: AbstractSpatial end
struct Hypersphere{N, T} <: AbstractHypersphere where {N, T}
point::SVector{N, T}
radius::Real
function Hypersphere(point::SVector{N, T}, radius::Real) where {N, T}
if radius <= 0
throw(ArgumentError("The radius must be positive."))
end
return new{N, T}(point, radius)
end
end
function Hypersphere(point::AbstractVector, radius::Real)
point_static = _convert_to_svector(point)
return Hypersphere(point_static, radius)
end
const Circle = Hypersphere{2, T} where {T}
const Sphere = Hypersphere{3, T} where {T}
for (type, dim) in zip([:Circle, :Sphere], [2, 3])
@eval function $type(point::AbstractVector, radius::Real)
if length(point) != $dim
throw(ArgumentError("The dimension of the point must be " * string($dim)))
end
return Hypersphere(point, radius)
end
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 1162 | abstract type AbstractHyperplane <: AbstractSpatial end
abstract type AbstractLine <: AbstractHyperplane end
abstract type AbstractPlane <: AbstractHyperplane end
for (type, supertype) in zip([:Line, :Plane], [:AbstractLine, :AbstractPlane])
@eval struct $type{N, T} <: $supertype
point::SVector{N, T}
vector::SVector{N, T}
function $type(point::SVector{N, T}, vector::SVector{N, T}; kwargs...) where {N, T}
if is_zero(vector; kwargs...)
throw(ArgumentError("The vector must have a non-zero magnitude."))
end
new{N, T}(point, vector)
end
end
@eval function $type(point::AbstractVector, vector::AbstractVector; kwargs...)
point_static, vector_static = _convert_to_svector([point, vector])
return $type(point_static, vector_static; kwargs...)
end
end
for (type, alias) in zip(
(:AbstractLine, :AbstractPlane),
(:(:direction), :(:normal)),
)
@eval function Base.getproperty(obj::$type, symbol::Symbol)
if symbol === $alias
return obj.vector
end
return getfield(obj, symbol)
end
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 109 | using Test
using ScikitSpatial
using StaticArrays
include("types/base.jl")
include("computations/base.jl")
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 132 | include("measurement.jl")
include("comparison.jl")
include("projection.jl")
include("intersection.jl")
include("transformation.jl")
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 4370 | @testset "Is zero" begin
@test is_zero([0])
@test is_zero([0, 0])
@test is_zero([0, 0, 0])
@test !is_zero([1])
@test !is_zero([1, 0])
@test !is_zero([1, 0, 0])
@test !is_zero([1e-2, 0, 0])
@test is_zero([1e-2, 0, 0]; atol=1e-4)
@test !is_zero([1e-1, 0, 0]; atol=1e-4)
end
@testset "Is Parallel" begin
@test are_parallel([1, 0], [1, 0])
@test are_parallel([1, 0], [-1, 0])
@test are_parallel([1, 0], [2, 0])
@test are_parallel([1, 5], [1, 5])
@test are_parallel([1, 5], [3, 15])
@test are_parallel([1, 5], [-5, -25])
@test are_parallel([1, 2, 3], [3, 6, 9])
@test !are_parallel([1, 0], [1, 1])
@test !are_parallel([1, 0], [1, 1e-2])
@test are_parallel([1, 0], [1, 1e-2]; atol=1e-2)
# The zero vector is parallel to all vectors.
@test are_parallel([0, 0], [1, 0])
@test are_parallel([0, 0], [5, 3])
@test are_parallel([3, 4], [0, 0])
@test are_parallel([3, 4, -21], [0, 0, 0])
@test !are_parallel([0, 1e-2], [1, 0])
@test are_parallel([0, 1e-2], [1, 0]; atol=1e-2)
@test !are_parallel([1, 0], [0, 1e-2])
@test are_parallel([1, 0], [0, 1e-2]; atol=1e-2)
end
@testset "Is Perpendicular" begin
@test are_perpendicular([1, 0], [0, 1])
@test are_perpendicular([0, 1], [-1, 0])
@test !are_perpendicular([1, 0], [1, 0])
@test are_perpendicular([50, 0], [0, 2])
@test are_perpendicular([5, 2], [2, -5])
@test are_perpendicular([1, 1, 0], [0, 0, 1])
@test !are_perpendicular([1, 0], [1e-2, 1])
@test are_perpendicular([1, 0], [1e-2, 1]; atol=1e-2)
end
@testset "Are coplanar" begin
# Any three or fewer points are coplanar.
points = hcat([0; 0; 1])
@test are_coplanar(points)
points = [
0 1;
0 1;
1 2;
]
@test are_coplanar(points)
points = [
0 1 5;
0 1 2;
1 2 7;
]
@test are_coplanar(points)
# points = [
# 0 0 1;
# 1 1 0;
# 5 2 0;
# 6 -2 0;
# ]
points = [
0 1 5 6;
0 1 2 -2;
1 0 0 0;
]
@test !are_coplanar(points)
points = [
0 1 2 3;
0 1 2 3;
0 1 2 3;
]
@test are_coplanar(points)
points = [
5 1 5 6;
7 1 2 -2;
4 4 4 4;
]
@test are_coplanar(points)
# Duplicates do not matter.
points = [
0 0 0;
1 1 1;
2 2 2;
3 3 3;
3 3 3;
]
points = [
0 1 2 3;
0 1 2 3;
0 1 2 3;
]
@test are_coplanar(points)
end
@testset "Point on surface of line" begin
for (point, line, bool_expected) in [
([0, 0], Line([0, 0], [1, 0]), true)
([2, 0], Line([0, 0], [5, 0]), true)
([2, 0], Line([0, 0], [-5, 0]), true)
([0, 0], Line([0, 0], [1, 1]), true)
([1, 0], Line([0, 0], [1, 1]), false)
([1, 0], Line([0, 0], [1, 1]), false)
]
@test on_surface(point, line) == bool_expected
end
for (point, line, atol, bool_expected) in [
([0, 1e-2], Line([0, 0], [1, 0]), 1e-2, true)
([0, 1e-2], Line([0, 0], [1, 0]), 1e-3, false)
]
@test on_surface(point, line; atol=atol) == bool_expected
end
end
@testset "Lines are approximately equal" begin
for (line_a, line_b, bool_expected) in [
(Line([0, 0], [1, 0]), Line([0, 0], [1, 0]), true),
(Line([0, 0], [1, 0]), Line([0, 0], [5, 0]), true),
(Line([0, 0], [1, 0]), Line([0, 0], [-1, 0]), true),
(Line([0, 0], [1, 0]), Line([0, 0], [-5, 0]), true),
(Line([3, 0], [1, 0]), Line([0, 0], [-5, 0]), true),
(Line([-3, 0], [1, 0]), Line([0, 0], [-5, 0]), true),
(Line([0, 1], [1, 0]), Line([0, 0], [1, 0]), false),
(Line([0, -1], [1, 0]), Line([0, 0], [1, 0]), false),
(Line([0, 0], [1, 0]), Line([0, 0], [1, 1]), false),
]
@test isapprox(line_a, line_b) == bool_expected
end
for (line_a, line_b, atol, bool_expected) in [
(Line([0, 1e-2], [1, 0]), Line([0, 0], [1, 0]), 1e-2, true),
(Line([0, 1e-2], [1, 0]), Line([0, 0], [1, 0]), 1e-3, false),
(Line([0, 0], [1, 1]), Line([0, 0], [1, 1.01]), 1e-2, true),
(Line([0, 0], [1, 1]), Line([0, 0], [1, 1.01]), 1e-5, false),
]
@test isapprox(line_a, line_b; atol=atol) == bool_expected
end
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 3015 | @testset "Intersection of two lines" begin
line_a = Line([0, 0], [1, 0])
line_b = Line([0, 0], [0, 1])
@test intersect(line_a, line_b) == [0, 0]
line_b = Line([5, -3], [0, 1])
@test intersect(line_a, line_b) == [5, 0]
@test intersect(Line([0, 0], [1, 1]), Line([1, 0], [1, -1])) == [0.5, 0.5]
@test intersect(Line([0, 0, 0], [1, 1, 1]), Line([1, 1, 0], [0, 0, 1])) β ones(3)
line = Line([0, 0], [1, 1])
line_almost_parallel = Line([1, 0], [1, 1.01])
intersect(line, line_almost_parallel)
message = "The lines are parallel."
@test_throws ArgumentError(message) intersect(Line([0, 0], [1, 1]), Line([1, 0], [1, 1]))
message = "The lines are not coplanar."
line_a = Line([0, 0, 0], [1, 0, 0])
line_b = Line([0, 1, 0], [0, 0, 1])
@test_throws ArgumentError(message) intersect(line_a, line_b)
end
@testset "Intersection of plane and line" begin
origin = [0, 0, 0]
line = Line(origin, [0, 0, 1])
plane = Plane(origin, [0, 0, 1])
@test intersect(plane, line) == [0, 0, 0]
@test intersect(plane, Line([3, 5, 2], [0, 0, 1])) == [3, 5, 0]
message = "The line and plane are parallel."
@test_throws ArgumentError(message) intersect(plane, Line(origin, [1, 0, 0]))
# The line is almost parallel to the plane.
# This throws an error if the tolerance is large enough.
line_almost_parallel = Line([0, 0, 1], [1, 0, 1e-2])
intersect(plane, line_almost_parallel)
@test_throws ArgumentError(message) intersect(plane, line_almost_parallel; atol=1e-2)
end
@testset "Intersection of circle/sphere and line" begin
for (circle_or_sphere, line, point_expected_a, point_expected_b) in [
(Circle([0, 0], 1), Line([0, 0], [1, 0]), [-1, 0], [1, 0]),
(Circle([0, 0], 1), Line([0, 0], [0, 1]), [0, -1], [0, 1]),
(Circle([0, 0], 1), Line([0, 1], [1, 0]), [0, 1], [0, 1]),
(
Circle([0, 0], 1),
Line([0, 0.5], [1, 0]),
[-β3 / 2, 0.5],
[β3 / 2, 0.5],
),
(Circle([1, 0], 1), Line([0, 0], [1, 0]), [0, 0], [2, 0]),
(Circle([1.5, 0], 1), Line([0, 0], [1, 0]), [0.5, 0], [2.5, 0]),
(Sphere([0, 0, 0], 1), Line([0, 0, 0], [1, 0, 0]), [-1, 0, 0], [1, 0, 0]),
]
point_a, point_b = intersect(circle_or_sphere, line)
@test point_a == point_expected_a && point_b == point_expected_b
end
end
@testset "Intersection of two planes" begin
for (plane_a, plane_b, line_expected) in [
(Plane(zeros(3), [0, 0, 1]), Plane(zeros(3), [0, 1, 0]), Line(zeros(3), [1, 0, 0])),
(Plane(zeros(3), [0, 0, 1]), Plane([0, 0, 1], [1, 0, 1]), Line([1, 0, 0], [0, 1, 0])),
(Plane(zeros(3), [-1, 1, 0]), Plane([8, 0, 0], [1, 1, 0]), Line([4, 4, 0], [0, 0, -1])),
(Plane([-1, 0, 0], [-1, 0, 1]), Plane([1, 0, 0], [1, 0, 1]), Line([0, 0, 1], [0, 1, 0])),
]
line_intersection = intersect(plane_a, plane_b)
@test line_intersection β line_expected
end
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 1518 | @testset "Cosine Similarity" begin
for (point_a, point_b, similarity_expected) in [
([1, 0], [0, 1], 0),
([1, 0], [1, 1], β2 / 2),
([1, 0, 0], [1, 0, 0], 1),
([1, 0, 0], [0, 0, 1], 0),
([1, 0, 0], [1, 1, 1], β3 / 3),
]
@test cosine_similarity(point_a, point_b) β similarity_expected
end
end
@testset "Angle between vectors" begin
for (point_a, point_b, angle_expected) in [
([1, 0], [1, 0], 0),
([1, 0], [0, 1], Ο / 2),
([1, 0], [1, 1], Ο / 4),
([1, 0, 0], [1, 0, 1], Ο / 4),
]
@test angle_between(point_a, point_b) β angle_expected
end
end
@testset "Distance between points" begin
for (point_a, point_b, distance_expected) in [
([0], [0], 0),
([0], [1], 1),
([0], [-1], 1),
([0, 0], [0, 1], 1),
([0, 0], [1, 1], β2),
([0, 0, 0], [1, 1, 1], β3),
]
@test distance(point_a, point_b) β distance_expected
end
end
@testset "Distance from point to line" begin
for (point, line, distance_expected) in [
([0, 0], Line([0, 0], [1, 0]), 0),
([1, 0], Line([0, 0], [1, 0]), 0),
([1, 1], Line([0, 0], [1, 0]), 1),
([1, 1], Line([0, 0], [1, 1]), 0),
([1, 1], Line([0, 0], [-1, -1]), 0),
([8, 7], Line([0, 0], [1, 0]), 7),
([20, -3], Line([0, 0], [1, 0]), 3),
([20, -3, 1], Line([0, 0, 0], [1, 0, 0]), β10),
]
@test distance(point, line) β distance_expected
end
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 2425 | @testset "Vector-vector projection" begin
x_unit = [1, 0]
y_unit = [0, 1]
project(x_unit, x_unit) == x_unit
project(y_unit, y_unit) == y_unit
# The projection is the zero vector if u and v are perpendicular.
project(x_unit, y_unit) == [0, 0]
project(y_unit, x_unit) == [0, 0]
project([5, 3], [-3, 5]) == [0, 0]
@test project([1, 0], x_unit) == [1, 0]
@test project([1, 1], x_unit) == [1, 0]
@test project([5, 5], x_unit) == [5, 0]
@test project([5, -5], x_unit) == [5, 0]
@test project([-5, 5], x_unit) == [-5, 0]
# The magnitude of the second vector shouldn't matter.
@test project([1, 0], 2 * x_unit) == [1, 0]
@test project([1, 0], [1, 1]) == [0.5, 0.5]
@test project([1, 2, 4], [5, 1, -1]) == [5, 1, -1] / 9
end
@testset "Point-line projection" begin
line_x = Line([0, 0], [1, 0])
@test project([0, 0], line_x) == [0, 0]
@test project([10, 0], line_x) == [10, 0]
@test project([0, 10], line_x) == [0, 0]
@test project([10, 10], line_x) == [10, 0]
line_diag = Line([0, 0], [1, 1])
@test project([0, 0], line_diag) == [0, 0]
@test project([1, 0], line_diag) == 0.5 * ones(2)
line_diag_3d = Line([0, 0, 0], [1, 1, 1])
@test project([1, 0, 0], line_diag_3d) β 1/3 * ones(3)
# The magnitude of the direction vector should not matter.
@test project([1, 0, 0], Line([0, 0, 0], -2 * ones(Int, 3))) β 1/3 * ones(3)
@test project([50, 10], Line([1, -5], [0, 3])) == [1, 10]
end
@testset "Point-plane projection" begin
plane_xy = Plane([0, 0, 0], [0, 0, 1])
@test project([0, 0, 0], plane_xy) == [0, 0, 0]
@test project([1, 0, 0], plane_xy) == [1, 0, 0]
@test project([50, -34, 0], plane_xy) == [50, -34, 0]
@test project([50, -34, 50], plane_xy) == [50, -34, 0]
@test project([50, -34, -150], plane_xy) == [50, -34, 0]
# The magnitude of the plane normal should not matter.
@test project([50, -34, -150], Plane([0, 0, 0], [0, 0, 5])) == [50, -34, 0]
@test project([50, -34, -150], Plane([0, 0, 0], [0, 0, -5])) == [50, -34, 0]
@test project([50, -34, -150], Plane([0, 0, -5], [0, 0, 1])) == [50, -34, -5]
@test project([50, -34, -150], Plane([18, 23, -5], [0, 0, 1])) == [50, -34, -5]
@test project([50, -34, -150], Plane([18, 23, -5], [0, 0, 1])) == [50, -34, -5]
@test project([7, -3, 5], Plane([1, 2, 3], [1, 1, 1])) == [6, -4, 4]
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 1880 | import ScikitSpatial: Vector
@testset "Vector from two points" begin
@test Vector([0, 0], [0, 0]) == [0, 0]
@test Vector([0, 0], [1, 0]) == [1, 0]
@test Vector([1, 0, 1], [0, 0, 5]) == [-1, 0, 4]
end
@testset "Unit" begin
for (vector, vector_unit_expected) in [
([1, 0], [1, 0])
([2, 0], [1, 0])
([5, 0], [1, 0])
([-5, 0], [-1, 0])
([1, 1], β2 / 2 * ones(2))
([1, 1, 1], β3 / 3 * ones(3))
]
@test unit(vector) β vector_unit_expected
end
end
@testset "Centroid of points" begin
points = [
0 1;
0 0;
]
centroid_ = centroid(points)
# Use type assertion to ensure centroid is a vector (not matrix).
centroid_::AbstractVector
@test centroid_ == [0.5, 0]
points = [
0 1 2;
-1 3 5;
]
@test centroid(points) β [1, 7/3]
points = [
0 1 2;
-1 3 5;
2 5 9;
]
@test centroid(points) β [1, 7/3, 16/3]
end
@testset "Mean center points" begin
points = [
1 2 3;
0 1 2;
]
points_centered_expected = [
-1 0 1;
-1 0 1;
]
@test mean_center(points) == points_centered_expected
points = [
-2 4;
0 1;
2 -3;
5 2;
]
points_centered_expected = [
-3 3;
-0.5 0.5;
2.5 -2.5;
1.5 -1.5;
]
@test mean_center(points) == points_centered_expected
end
@testset "To point" begin
line = Line([0, 0], [1, 0])
@test to_point(line) == [1, 0]
@test to_point(line; t=1) == [1, 0]
@test to_point(line; t=2) == [2, 0]
@test to_point(line; t=-1) == [-1, 0]
line = Line([1, 2, 3], [1, 2, 3])
@test to_point(line) == [2, 4, 6]
@test to_point(line; t=1) == [2, 4, 6]
@test to_point(line; t=2) == [3, 6, 9]
@test to_point(line; t=-1) == [0, 0, 0]
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 61 | include("line_and_plane.jl")
include("circle_and_sphere.jl")
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 497 | @testset "Circle/Sphere construction" begin
for (type, dim_allowed) in zip([Circle, Sphere], [2, 3])
for dim in 1:4
point = zeros(Int64, dim)
if dim == dim_allowed
obj = type(point, 1)
@test isa(obj.point, SVector{dim, Int64})
else
message = "The dimension of the point must be $dim_allowed"
@test_throws ArgumentError(message) type(point, 1)
end
end
end
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | code | 1570 | using Formatting
@testset "$type" for type in [Line, Plane]
# The object can be constructed from static arrays.
type(SA[0, 0, 0], SA[1, 0, 0])
# It can also be constructed from normal arrays.
obj = type([0, 0], [1, 0])
@test obj.point isa SVector{2, Int}
@test obj.point == [0, 0]
@test obj.vector isa SVector{2, Int}
@test obj.vector == [1, 0]
# The point and vector must be Julia vectors (one-dimensional arrays).
@test_throws MethodError type(0, [1, 0])
@test_throws MethodError type([0, 0], 1)
@test_throws MethodError type([0 0; 0 0], [1, 0])
@test_throws MethodError type([0, 0], [1 0; 0 0])
template = "expected input array of length {}, got length {}"
@test_throws DimensionMismatch(format(template, 1, 2)) type([0], [1, 0])
@test_throws DimensionMismatch(format(template, 2, 3)) type([0, 0], [1, 0, 0])
@test_throws DimensionMismatch(format(template, 3, 2)) type([0, 0, 0], [1, 0])
message = "The vector must have a non-zero magnitude."
@test_throws ArgumentError(message) type([0, 0], [0, 0])
# The vector magnitude can be checked with a tolerance.
type([0, 0], [1e-2, 0])
@test_throws ArgumentError(message) type([0, 0], [1e-2, 0]; atol=1e-4)
end
@testset "Vector aliases" begin
for (type, alias) in zip((:Line, :Plane), (:direction, :normal))
eval(
quote
obj = $type([0, 0], [1, 0])
@test obj.$alias isa SVector{2, Int}
@test obj.$alias == [1, 0]
end
)
end
end
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | docs | 1493 | # ScikitSpatial.jl
A Julia implementation of the Python package [scikit-spatial](https://github.com/ajhynes7/scikit-spatial).
This package provides spatial types and computations between them.
The following types are provided:
- Line
- Plane
- Circle
- Sphere
Most of the computations fall into the following categories:
- Measurement
- Comparison
- Projection
- Intersection
- Transformation
## Usage
### Composite types
A line has a point and direction vector. These are stored as static arrays from [StaticArrays.jl](https://github.com/JuliaArrays/StaticArrays.jl).
```jldoctest
julia> using ScikitSpatial
julia> line = Line([2, 3], [1, 2])
Line{2, Int64}([2, 3], [1, 2])
julia> line.point
2-element StaticArrays.SVector{2, Int64} with indices SOneTo(2):
2
3
julia> line.vector
2-element StaticArrays.SVector{2, Int64} with indices SOneTo(2):
1
2
```
The property `direction` can also be used to access the direction vector.
```
julia> line.direction
2-element StaticArrays.SVector{2, Int64} with indices SOneTo(2):
1
2
```
### Measurement
Measure the cosine similarity between two vectors.
```jldoctest
julia> round(cosine_similarity([1, 0], [1, 1]), digits=3)
0.707
```
### Projection
Project a vector onto a vector.
```jldoctest
julia> project([5, 3], [1, 1])
2-element Base.Vector{Float64}:
4.0
4.0
```
Project a point onto a line.
```jldoctest
julia> project([8, -2], line)
2-element StaticArrays.SVector{2, Float64} with indices SOneTo(2):
1.2
1.4
```
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | docs | 389 | # ScikitSpatial.jl
Welcome to the `ScikitSpatial.jl` documentation!
`ScikitSpatial.jl` provides types representing spatial entities, as well as functions for computations between them.
The following types are provided:
- Line
- Plane
- Circle
- Sphere
Most of the computations fall into the following categories:
- Measurement
- Comparison
- Projection
- Intersection
- Transformation
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | docs | 69 | # Measurement
```@docs
cosine_similarity
angle_between
distance
```
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | docs | 35 | # Projection
```@docs
project
```
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | docs | 546 | # Line
A line is defined by a point and a direction vector.
```jldoctest line
julia> using ScikitSpatial
julia> line = Line([0, 0], [1, 0])
Line{2, Int64}([0, 0], [1, 0])
julia> line.point
2-element StaticArrays.SVector{2, Int64} with indices SOneTo(2):
0
0
julia> line.vector
2-element StaticArrays.SVector{2, Int64} with indices SOneTo(2):
1
0
```
The direction vector can also be accessed with the `direction` field.
```jldoctest line
julia> line.direction
2-element StaticArrays.SVector{2, Int64} with indices SOneTo(2):
1
0
```
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"BSD-3-Clause"
] | 0.2.0 | 4914a192b0c8da30d7416803c2d1659cdc7da78c | docs | 565 | # Plane
A plane is defined by a point and a normal vector.
```jldoctest plane
julia> using ScikitSpatial
julia> plane = Plane([0, 0, 0], [0, 0, 1])
Plane{3, Int64}([0, 0, 0], [0, 0, 1])
julia> plane.point
3-element StaticArrays.SVector{3, Int64} with indices SOneTo(3):
0
0
0
julia> plane.vector
3-element StaticArrays.SVector{3, Int64} with indices SOneTo(3):
0
0
1
```
The normal vector can also be accessed with the `normal` field.
```jldoctest plane
julia> plane.normal
3-element StaticArrays.SVector{3, Int64} with indices SOneTo(3):
0
0
1
```
| ScikitSpatial | https://github.com/ajhynes7/ScikitSpatial.jl.git |
|
[
"MIT"
] | 0.0.1 | 21bd0cf73aa41655139f1ab288dfe8fdcf78b5a5 | code | 22704 | module MultiobjectiveProximalBundle
using Parameters: @with_kw
using LinearAlgebra: norm
using JuMP
using COSMO
using DocStringExtensions
export mpb_optimize
export MPBOptions
"""
MPBOptions(; kwargs...)
Options to configure the algorithm. An instance of type
`MPBOptions` can be passed to optimization methods
with the keyword argument `options`.
Possible `kwargs` for `MPBOptions` are:
$(FIELDS)
"""
@with_kw struct MPBOptions
"Maximum number of iterations (default: 1000)."
max_iter :: Int = 1000 # max_iter=1000 was used in [^2]
"Maximum number of function evaluations (default: 1000)."
max_fun_calls :: Int = 1000 # max_fun_calls=100 was used in [^2]
"Line-search parameter for determination of ``tL β [0,1]`` (default: 1e-2)."
mL :: Float64 = 1e-2 # mL=1e-2 was used in [^1], sec. 5.1
"Line-search parameter for determination of ``tR`` (default: 0.5)."
mR :: Float64 = 0.5 # mR=0.5 was used in [^1]
"Line-search parameter for classification of long or short steps (default: 1e-2)."
t_bar :: Float64 = 1e-2 # t_bar=1e-2 was used in [^1]
"Minimum stepsize tested in line-search (default: `eps(Float64)*1e2`)."
t_min :: Float64 = eps(Float64) * 1e2
"Maximum number of function calls in line-search (default: 100)."
max_fun_calls_ls :: Int = 100 # 100 was used in [^2]
"Fallback non-convexity constant if none are provided (default: 0.5)."
gamma_default :: Float64 = 0.5 # Ξ³_default=0.5 was used in [^2]
#"Feasibility tolerance."
#tol_feas :: Float64 = 1e-9 # tol_feas=1e-9 was used in [^2]
"Final accuracy tolerance (default: 1e-5)."
tol_acc :: Float64 = 1e-5 # Ξ΅β=1e-5 was used in [^2]
"Initial weight ``u^1`` (default: NaN)."
initial_weight :: Float64 = NaN # NaN leads to computation according to [^2]
"Maximum number of stored subgradients (default: 10)."
max_subgrads :: Int = 10 # max_subgrads=10 used in [^2]
@assert 0 < mL < 0.5 "`mL` must be in (0, 0.5)."
@assert mL < mR < 1 "`mR` must be in `(m_x, 1)`."
@assert 0 < t_bar <= 1 "`t_bar` must be in (0,1]."
@assert gamma_default >= 0
@assert max_fun_calls > 1
@assert max_fun_calls_ls > 1
@assert max_iter > 0
@assert max_subgrads >= 2
#@assert tol_feas * tol_objf > 0
@assert tol_acc > 0
@assert t_min > 0
end
"""
check_functions(_objectives, _constraints)
Return the number of scalar objectives functions,
the number of constraint functions and the vectors
of objective functions and constraint functions.
`_objectives` and `_constraints` are provided by
the user. In case of a single function we allow to
pass a `Function` instead of a `Vector{<:Function}`.
"""
function check_functions(
_objective_functions, _constraint_functions;
)
## ensure vector-of-functions:
objective_functions = if isa(_objective_functions, Function)
[_objective_functions,]
elseif isa(_objective_functions, AbstractVector{<:Function})
_objective_functions
else
error("Provide a vector of objective functions.")
end
num_objectives = length(objective_functions)
@assert num_objectives > 0 "Provide at least one objective function."
constraint_functions = if isa(_constraint_functions, Function)
[_constraint_functions,]
elseif isa(_constraint_functions, AbstractVector{<:Function})
_constraint_functions
else
error("Provide a vector of constraint functions.")
end
num_constraints = length(constraint_functions)
return num_objectives, num_constraints, objective_functions, constraint_functions
end
"""
gamma_vectors(
gammas_objectives, gammas_constraints,
num_objectives, num_constraints;
options
)
Return vectors of nonlocality constants for objectives
and constraints. `gammas_objectives` and `gammas_constraints`
are provided by the user.
"""
function gamma_vectors(
gammas_objectives, gammas_constraints,
num_objectives, num_constraints;
options
)
gamma_f = if gammas_objectives isa Real
fill( gammas_objectives, num_objectives )
elseif gammas_objectives isa Vector{Real} && length(gammas_objectives) == num_objectives
gammas_objectives
else
fill( options.gamma_default, num_objectives )
end
gamma_g = if gammas_constraints isa Real
fill( gammas_constraints, num_constraints )
elseif gammas_constraints isa Vector{Real} && length(gammas_constraints) == num_constraints
gammas_constraints
else
fill( options.gamma_default, num_constraints )
end
return gamma_f, gamma_g
end
function prealloc_iter_arrays(x0,n,K,M,j_max; options)
x = copy(x0)
# evaluation value vectors at current iterate
fx = Vector{Float64}(undef, K)
gx = Vector{Float64}(undef, M)
return x, fx, gx
end
function prealloc_diff_arrays(x0,n,K,M,j_max; options)
# generalized Jacobian arrays
# sg_f[:,:,j] is the jacobian of f at yΚ² etc.
# sg_f[i,:,j] is the gradient of fα΅’ at yΚ²
sg_f = Array{Float64}(undef, K, n, j_max)
sg_g = Array{Float64}(undef, M, n, j_max)
# matrices to store the linearization values:
# lin_f[i,j] = linearization of objective i at *current* x around yΚ², i.e.,
# lin_f[:,j] =Μ lin_f(x) = f(yΚ²) + sg_fΚ²(x - yΚ²)
lin_f = Matrix{Float64}(undef, K, j_max)
lin_g = Matrix{Float64}(undef, M, j_max)
# linearization errors
# err_f[i,j] = linearization error of objective i at *current* x around yΚ², i.e.,
# err_f[:, j] =Μ max.( abs.( fx .- lin_f[:,j] ), gamma_f .* (sβ±Όα΅)Β² )
# err_g[:, j] =Μ max.( abs.( lin_g[:,j] ), gamma_g .* (sβ±Όα΅)Β² )
err_f = Matrix{Float64}(undef, K, j_max)
err_g = Matrix{Float64}(undef, M, j_max)
# array of aggregate distances measures of x to yΚ²
dists = Vector{Float64}(undef, j_max)
return sg_f, sg_g, lin_f, lin_g, err_f, err_g, dists
end
function prealloc_ls_res_arrays(x0,n,K,M,j_max; options)
# LINE SEARCH ARRAYS
# we denote the *n*ext iterate as `x_n`
# instead of only calculating a step-size `t_x`,
# we directly set and store `x_n` and the values
# `fx_n` and `gx_n`.
x_n = Vector{Float64}(undef, n)
fx_n = Vector{Float64}(undef, K)
gx_n = Vector{Float64}(undef, M)
# likewise, we do not only calculate `t_y`
# but directly store the derivative values for the
sg_f_n = Matrix{Float64}(undef, K, n)
sg_g_n = Matrix{Float64}(undef, M, n)
# additionally, we can already calculate the
# linearization around `yn` evaluated at `x_n`
# we store these values as `lin_f_n` and `lin_g_n`.
# Lastly, the linearization errors are `err_fx_n` and `err_gx_n`.
lin_f_n = Vector{Float64}(undef, K)
lin_g_n = Vector{Float64}(undef, M)
err_f_n = Vector{Float64}(undef, K)
err_g_n = Vector{Float64}(undef, M)
return x_n, fx_n, gx_n, sg_f_n, sg_g_n, lin_f_n, lin_g_n, err_f_n, err_g_n
end
function prealloc_ls_w_arrays(x0,n,K,M,j_max; options)
# working arrays during line-search, at x_t = x + t*d
x_t = Vector{Float64}(undef, n)
f_x_t = Vector{Float64}(undef, K)
g_x_t = Vector{Float64}(undef, M)
sg_f_t = Matrix{Float64}(undef, K, n)
sg_g_t = Matrix{Float64}(undef, M, n)
lin_f_t = similar(f_x_t)
err_f_t = similar(f_x_t)
diff_t = similar(f_x_t) # stores jacobian-vector product
return x_t, f_x_t, g_x_t, sg_f_t, sg_g_t, lin_f_t, err_f_t, diff_t
end
function prealloc_step_arrays(x0,n,K,M,j_max; options)
# descent step array(s)
p = similar(x0)
d = similar(x0)
return p, d
end
function prealloc_aggregation_arrays(x0,n,K,M,j_max; options)
# aggregate subgradients:
sg_f_p = Matrix{Float64}(undef,K,n)
sg_g_p = Matrix{Float64}(undef,M,n)
# aggregate linearizations
lin_f_p = Vector{Float64}(undef,K)
lin_g_p = Vector{Float64}(undef,M)
# aggregate errors
err_f_p = similar(lin_f_p)
err_g_p = similar(lin_g_p)
# aggregate distance
dist_f_p = Ref(0.0)
dist_g_p = Ref(0.0)
# errors computed from scaled lagrange coefficients:
_err_f_p = similar(err_f_p)
_err_g_p = similar(err_g_p)
return sg_f_p, sg_g_p, lin_f_p, lin_g_p, err_f_p, err_g_p, dist_f_p, dist_g_p,_err_f_p, _err_g_p
end
function prealloc_lagrange_arrays(x0,n,K,M,j_max; options)
lambda_p=Vector{Float64}(undef, K) # Lagrange multipliers for aggregated jacobian of f
mu_p=Vector{Float64}(undef, M) # Lagrange multipliers for aggregated jacobian of g
lambda=Matrix{Float64}(undef,K,j_max) # multipliers for stored jacobians of f
mu=Matrix{Float64}(undef,M,j_max) # multipliers for stored jacobians of g
sum_lambda=similar(lambda_p) # column sum of Ξ» multipliers for f
sum_mu=similar(mu_p) # column sum of ΞΌ mulitpliers for g
z_f=Vector{Bool}(undef, K) # zero rows of sum_lambda
z_g=Vector{Bool}(undef, M) # zero rows of sum_mu
nz_f=similar(z_f) # elementwise Β¬ of `z_f`
nz_g=similar(z_g) # elementwise Β¬ of `z_g`
return lambda_p, lambda, mu_p, mu, sum_lambda, sum_mu, z_f, z_g, nz_f, nz_g
end
"""
eval_and_jac!(y, J, funcs, x)
Evaluate all scalar functions in `funcs`.
Mutate `y` to contain the primals and
`J` to have the gradients as rows.
"""
function eval_and_jac!(y, J, funcs, x)
for (i,f)=enumerate(funcs)
fi, dfi = f(x)
y[i] = fi
J[i,:] .= dfi
end
return nothing
end
function mpb_optimize(
x0, objectives, constraints,
gammas_f = nothing, gammas_g = nothing;
options=MPBOptions()
)
# Check dimensions
n = length(x0)
@assert n > 0 "`x0` must not be empty!."
K, M, f_funcs, g_funcs = check_functions(objectives, constraints)
# Pre-allocation of working arrays:
j_max = options.max_subgrads
x, fx, gx = prealloc_iter_arrays(x0, n, K, M, j_max; options)
(
sg_f, sg_g, lin_f, lin_g, err_f, err_g, dists
) = prealloc_diff_arrays(x0, n, K, M, j_max; options)
(
x_n, fx_n, gx_n, sg_f_n, sg_g_n, lin_f_n, lin_g_n, err_f_n, err_g_n
) = prealloc_ls_res_arrays(x0, n, K, M, j_max; options)
(
x_t, f_x_t, g_x_t, sg_f_t, sg_g_t, lin_f_t, err_f_t, diff_t
) = prealloc_ls_w_arrays(x0, n, K, M, j_max; options)
p, d = prealloc_step_arrays(x0, n, K, M, j_max; options)
(
sg_f_p, sg_g_p, lin_f_p, lin_g_p, err_f_p, err_g_p,
dist_f_p, dist_g_p,_err_f_p, _err_g_p
) = prealloc_aggregation_arrays(x0, n, K, M, j_max; options)
(
lambda_p, lambda, mu_p, mu, sum_lambda, sum_mu, z_f, z_g, nz_f, nz_g
) = prealloc_lagrange_arrays(x0, n, K, M, j_max; options)
# vectors of nonlocality constants
gamma_f, gamma_g = gamma_vectors(gammas_f,gammas_g, K, M; options)
# Initialization of first iterate
x .= x0
j_x = 1
dists[j_x] = 0.0 # yβ == x
# evaluate and set subgradients/jacobians
_sg_f = view(sg_f, :, :, j_x)
_sg_g = view(sg_g, :, :, j_x)
eval_and_jac!(fx, _sg_f, f_funcs, x)
eval_and_jac!(gx, _sg_g, g_funcs, x)
num_fun_calls = Ref(1)
# initial aggregrate directions
sg_f_p .= _sg_f
sg_g_p .= _sg_g
# initial linearizations `lin_f(x) = f(x) + (x - y)^T ΞΎ` = f(x):
lin_f[:,j_x] .= fx
lin_g[:,j_x] .= gx
lin_f_p .= fx
lin_g_p .= gx
# initial locality measures (y == x):
err_f[:,j_x] .= 0.0
err_g[:,j_x] .= 0.0
err_f_p .= 0.0
err_g_p .= 0.0
dist_f_p[] = 0.0
dist_g_p[] = 0.0
# subgradient indices
J = zeros(Bool, j_max) # index of linearizations used in step calculation
J[j_x] = true
# inital weight for descent step calculation
weight = if isnan(options.initial_weight)
sum(norm.(eachrow(_sg_f)))/K
else
options.initial_weight
end
var_est = Inf # variation estimate Ξ΅α΅₯α΅ in [^3], EPSV in [^2]
weight_counter = 0 # iteration counter for changes of `u`, iα΅€α΅ in [^3], IUK in [^2]
weight_min = 1e-10 * weight
zeta = 1 - 0.5 * 1/(1-options.mL)
for iter=1:options.max_iter
model, _J, nJ = get_jump_model(n,K,M,j_max,J,weight,
sg_f, sg_g, sg_f_p, sg_g_p,
err_f, err_g, err_f_p, err_g_p
)
crit_val1, var_est_p, crit_val2 = solve_dual!(
# modified:
lambda_p, lambda, mu_p, mu, # lagrangian multipliers
sum_lambda, sum_mu, # sum of multipliers
z_f, z_g, nz_f, nz_g, # zero index vectors
d, p, # descent steps arrays
sg_f_p, sg_g_p, lin_f_p, lin_g_p, dist_f_p, dist_g_p, # aggregation arrays
_err_f_p, _err_g_p, # linearization error with scaled multipliers
model,
# not modified:
_J, nJ,
weight, fx, gamma_f, gamma_g,
sg_f, sg_g, lin_f, lin_g, dists,
)
if crit_val1 <= options.tol_acc
@info "Criticality is $(crit_val1) <= $(options.tol_acc)."
break
end
LS_success, weight_n, weight_counter_n, var_est_n, tL, tR, dist_n = linesearch!(
# modified:
x_n, fx_n, gx_n, sg_f_n, sg_g_n,
lin_f_n, lin_g_n, err_f_n, err_g_n,
x_t, f_x_t, g_x_t, sg_f_t, sg_g_t, lin_f_t, err_f_t, diff_t,
num_fun_calls,
# not_modified
j_x, zeta, gamma_f, gamma_g,
weight, weight_counter, var_est, var_est_p, weight_min,
crit_val2, d, x, fx, gx, sg_f, sg_g,
f_funcs, g_funcs
;
options
)
@info """
k = $(iter)
x = $(x)
f(x) = $(fx)
g(x) = $(gx)
weight = $(weight)
crit_val1 = $(crit_val1)
d = $(d)
tL = $(tL)
tR = $(tR)
"""
if !LS_success
break
end
# updates:
weight, weight_counter, var_est = weight_n, weight_counter_n, var_est_n
# set next iterate and values
x .= x_n
fx .= fx_n
gx .= gx_n
# calculate index for derivative storage
j_x_n = mod( j_x + 1, j_max )
if j_x_n == 0
j_x_n = j_max
end
# update previous linearizations to hold at x_n
if tL != 0
d .*= tL # so now: `x_n - x == d` and for updating we only have to add ΞΎ*d
for j=_J
if j != j_x_n
lin_f[:,j] .+= sg_f[:,:,j]*d
lin_g[:,j] .+= sg_g[:,:,j]*d
dists[j] += dist_n
err_f[:,j] .= max.( abs.(fx_n .- lin_f[:,j]), dists[j]^2 .* gamma_f )
err_g[:,j] .= max.( abs.(lin_g[:,j]), dists[j]^2 .* gamma_g )
end
end
end
dist_f_p[] += dist_n
dist_g_p[] += dist_n
lin_f_p .+= tL .* sg_f_p*d
lin_g_p .+= tL .* sg_g_p*d
err_f_p .= max.( abs.(fx_n .- lin_f_p), dist_n^2 .* gamma_f )
err_g_p .= max.( abs.(lin_g_p), dist_n^2 .* gamma_g )
dists[j_x_n] = dist_n
# store new derivatives
sg_f[:,:,j_x_n] .= sg_f_n
sg_g[:,:,j_x_n] .= sg_g_n
# store new linearizations and errors
lin_f[:, j_x_n] .= lin_f_n
lin_g[:, j_x_n] .= lin_g_n
err_f[:, j_x_n] .= err_f_n
err_g[:, j_x_n] .= err_g_n
J[j_x_n] = true
j_x = j_x_n
end
return x, fx
end
function get_jump_model(
n,K,M,j_max,J,weight,
sg_f, sg_g, sg_f_p, sg_g_p,
err_f, err_g, err_f_p, err_g_p
)
# NOTE: It would be great if we could pre-allocate a parameterized
# version of the model and re-use it in every iteration instead of
# re-building it each time from scratch.
# I investigated `ParameterJuMP` to make the subgradients (which appear
# as coefficients) model parameters, but due to bugs such as
# https://github.com/JuliaStochOpt/ParameterJuMP.jl/issues/35
# it did not work out
# The comments there recommend ParametricOptInterface.
# But I do not want to build the objective function by hand :(
_J = findall(J)
nJ = length(_J)
model = Model(COSMO.Optimizer)
JuMP.set_silent(model)
#JuMP.set_optimizer_attribute(model, "polish", true)
# denote with an underscore the JuMP variable names
@variable(model, _lambda[1:K,j=1:nJ] .>= 0)
@variable(model, _lambda_p[1:K] .>= 0)
@variable(model, _mu[1:M,j=1:nJ] .>= 0)
@variable(model, _mu_p[1:M] >= 0)
#=@expression(model, p_unscaled,
sum( sg_f[:,:,j]'_lambda[:,j] .+ sg_g[:,:,j]'_mu[:,j] for (i,j)=enumerate(_J) ) .+
sg_f_p'_lambda_p .+ sg_g_p'_mu_p
) #value.(p_uncsaled) can be used to test against the primal solution
=#
@objective(model, Min, 1/(2*weight) *
sum((
sum( sg_f[:,:,j]'_lambda[:,i] .+ sg_g[:,:,j]'_mu[:,i] for (i,j)=enumerate(_J) ) .+
sg_f_p'_lambda_p .+ sg_g_p'_mu_p
).^2) +
sum(_lambda[:,i]'err_f[:,j] .+ _mu[:,i]'err_g[:,j] for (i,j)=enumerate(_J)) +
_lambda_p'err_f_p + _mu_p'err_g_p
)
@constraint(model, sum(_lambda) + sum(_mu) + sum(_lambda_p) + sum(_mu_p) == 1)
return model, _J, nJ
end
"""
Solve the dual direction finding problem, scale the Lagrange multipliers and set the direction and return criticality values.
"""
function solve_dual!(
# modified:
lambda_p, lambda, mu_p, mu, # lagrangian multipliers
sum_lambda, sum_mu, # sum of multipliers
z_f, z_g, nz_f, nz_g, # zero index vectors
d, p, # descent steps arrays
sg_f_p, sg_g_p, lin_f_p, lin_g_p, dist_f_p, dist_g_p, # aggregation arrays
_err_f_p, _err_g_p, # linearization error with scaled multipliers
model,
# not modified:
_J, nJ,
weight, fx, gamma_f, gamma_g,
sg_f, sg_g, lin_f, lin_g, dists,
)
optimize!(model)
# extract solutions
lambda_p = value.(model[:_lambda_p]) # K-Vector
lambda = value.(model[:_lambda]) # K x nJ Matrix
mu_p = value.(model[:_mu_p]) # M-Vector
mu = value.(model[:_mu]) # M x nJ Matrix
# scale multipliers (per objective/constraint):
sum_lambda = lambda_p .+ sum(lambda[:, i] for i=eachindex(_J)) # K-vector
sum_mu = mu_p .+ sum(mu[:, i] for i=eachindex(_J)) # M-vector
z_f = iszero.(sum_lambda)
z_g = iszero.(sum_mu)
nz_f = .!z_f
nz_g = .!z_g
mult_fallback = 1/(nJ + 1)
## row-wise scaling:
## In the book [^3], these parameters are denoted with a tilde.
## We save the memory and instead mutate the original arrays.
lambda[ nz_f, : ] ./= sum_lambda[ nz_f ]
lambda[ z_f, : ] .= mult_fallback
mu[ nz_g, : ] ./= sum_mu[ nz_g ]
mu[ z_g, : ] .= mult_fallback
lambda_p[nz_f] ./= sum_lambda[ nz_f ]
lambda_p[z_f] .= mult_fallback
mu_p[ nz_g ] ./= sum_mu[ nz_g ]
mu_p[ z_g ] .= mult_fallback
# aggregate and begin update of the subgradients
# in accordance with the book, we would have to
# denote the vectors by `pβαΆ , fΜβα΅,sΜβα΅` etc.
# As we don't need `sg_f_p, ...` anymore, we
# instead modify them in place:
sg_f_p .*= lambda_p # pβαΆ = Ξ»Μβα΅ .* sg_f_p
sg_g_p .*= mu_p # pβα΅ = ΞΌΜβα΅ .* sg_g_p
lin_f_p .*= lambda_p # fΜβα΅ = Ξ»Μβα΅ .* lin_f_p
lin_g_p .*= mu_p # gΜβα΅ = ΞΌΜβα΅ .* lin_g_p
dist_f_p[] *= sum(lambda_p) # sΜβαΆ = Ξ»Μβα΅ * dist_f_p # TODO does `sum(lambda_p)` make sense??
dist_g_p[] *= sum(mu_p) # sΜβα΅ = ΞΌΜβα΅ * dist_g_p
for (i,j) = enumerate(_J)
sg_f_p .+= lambda[:,i] .* sg_f[:,:,j]
sg_g_p .+= mu[:,i] .* sg_g[:,:,j]
lin_f_p .+= lambda[:,i] .* lin_f[:,j]
lin_g_p .+= mu[:,i] .* lin_g[:,j]
dist_f_p[] += sum(lambda[:,i]) * dists[j] # TODO as above: `sum()` sensible?
dist_g_p[] += sum(mu[:,i]) * dists[j]
end
p .= sg_f_p'sum_lambda .+ sg_g_p'sum_mu
_err_f_p .= max.( abs.( fx .- lin_f_p ), dist_f_p[] .* gamma_f ) # we do not really need extra arrays here, just use err_f_p...
_err_g_p .= max.( abs.(lin_g_p), dist_g_p[] .* gamma_g )
_err_p = sum_lambda'_err_f_p + sum_mu'_err_g_p
d .= -1/weight .* p
pp = p'p
p_norm = sqrt(pp)
crit_val1 = 0.5 * pp + _err_p
var_est_p = p_norm + _err_p
crit_val2 = -( pp/weight + _err_p ) # TODO this is a guess...
#=========================================
# Below is the primal for testing purposes.
# Without the scaling of multipliers it should hold
# that `-1/weight * p β value.(d)`, where `p` is the solution
# from above and `d` is the solution from below:
model = Model(OSQP.Optimizer)
JuMP.set_optimizer_attribute(model, "polish", true)
JuMP.set_silent(model)
@variable(model, Ξ½)
@variable(model, d[1:n])
@objective(model, Min, Ξ½ + 0.5 * weight * sum(d.^2))
@constraint(model, -err_f_p .+ sg_f_p*d .<= Ξ½)
@constraint(model, -err_g_p .+ sg_g_p*d .<= Ξ½)
for j in _J
@constraint(model, -err_f[:,j] .+ sg_f[:,:,j]*d .<= Ξ½)
@constraint(model, -err_g[:,j] .+ sg_g[:,:,j]*d .<= Ξ½)
end
optimize!(model)
@show objective_value(model)
@show value(Ξ½)
@show value.(d)
=#
return crit_val1, var_est_p, crit_val2
end
"Perform two point linesearch and also calculate the next weight."
function linesearch!(
# modified:
x_n, fx_n, gx_n, sg_f_n, sg_g_n,
lin_f_n, lin_g_n, err_f_n, err_g_n,
x_t, f_x_t, g_x_t, sg_f_t, sg_g_t, lin_f_t, err_f_t, diff_t,
num_fun_calls,
# not_modified
j_x, zeta, gamma_f, gamma_g,
weight, weight_counter, var_est, var_est_p, weight_min,
crit_val2, d, x, fx, gx, sg_f, sg_g,
f_funcs, g_funcs
;
options
)
# line search like in Algorithm 3.2.2 in [^3], implemented in [^2]
mL = options.mL
mR = options.mR
tL = 0.0
t = tU = 1.0
t_bar = options.t_bar
tR = NaN
# set values for tL = 0
x_n .= x
fx_n .= fx
gx_n .= gx
sg_f_n .= sg_f[:,:,j_x]
sg_g_n .= sg_g[:,:,j_x]
Fx_t = 0.0
FxU = NaN
dist_n = d_norm = norm(d)
sigma = 1.0
rhsL = mL * crit_val2
rhsR = mR * crit_val2
max_fc = options.max_fun_calls
max_fc_ls = options.max_fun_calls_ls
t_min = options.t_min
fc_ls = 0
while t >= t_min && fc_ls <= max_fc_ls && num_fun_calls[] <= max_fc
x_t .= x .+ t .* d
# evaluate:
eval_and_jac!(f_x_t, sg_f_t, f_funcs, x_t)
eval_and_jac!(g_x_t, sg_g_t, g_funcs, x_t)
fc_ls += 1
num_fun_calls[] += 1
# Step (ii) in [^3]
Fx_t, lm = findmax(f_x_t .- fx)
if Fx_t <= t * rhsL && maximum(g_x_t) <= 0
# accept stepsize for x and make it next lower bound
tL = t
if t == tU FxU = Fx_t end
x_n .= x_t
fx_n .= f_x_t
gx_n .= gx
sg_f_n .= sg_f_t
sg_g_n .= sg_g_t
else
# make stepsize next upper bound
tU = t
fxU = Fx_t
end
sigma = t-tL
# Step (iii) in [^3]
if tL >= t_bar
tR = tL
lin_f_t .= f_x_t
err_f_t .= 0.0
dist_n = 0.0
break
end
# calculate lineariztion and error assuming yβββ = x + t*d
dist_n = sigma * d_norm
diff_t .= sg_f_t * d
lin_f_t .= f_x_t .- sigma .* diff_t
err_f_t .= max.( abs.(fx_n .- lin_f_t ), dist_n^2 .* gamma_f )
if -err_f_t[lm] + weight * diff_t[lm] .>= rhsR
tR = t
break
end
# Step (iv) in [^3]
if tL <= 0
t = zeta * tU
if tU*crit_val2 > FxU
t = max( t, 0.5 * tU^2 * crit_val2 /(tU*crit_val2 - FxU) )
end
else
t = 0.5 * (tL + tU)
end
end
weight_n = weight
weight_counter_n= weight_counter
var_est_n = var_est
if !isnan(tR)
# line search successful, set remaining values for next iteration
# @info "Successful Line Search with tL=$(tL) and tR=$(tR)."
lin_f_n .= lin_f_t
err_f_n .= err_f_t
if tL == tR
lin_g_n .= gx_n
err_g_n .= abs.(lin_g_n)
else
lin_g_n .= g_x_t .- sigma * sg_g_t * d
err_g_n .= max.( abs.(lin_g_n), dist_n^2 .* gamma_g )
end
LS_success = true
# determine next weight:
u = weight
weight_interp = 2 * weight * (1 - Fx_t/crit_val2)
if tL == 0
var_est_n = min( var_est, var_est_p )
if weight_counter < - 3 && all(err_f_n .> max(var_est_n, -10*crit_val2))
u = weight_interp
end
weight_n = min(u, 10*weight)
weight_counter_n = min( weight_counter-1, -1)
if weight_n != weight
weight_counter_n = - 1
end
elseif tL == 1
var_est_n = min( var_est, -2*crit_val2)
if weight_counter > 0
if Fx_t <= rhsR
u = weight_interp
end
if weight_counter > 3
u = weight/2
end
end
weight_n = max( u, weight/10, weight_min )
weight_counter_n = max( weight_counter + 1, 1 )
if weight_n != weight
weight_counter_n = 1
end
end
else
@warn "Line Search was not successful!"
LS_success = false
end
return LS_success, weight_n, weight_counter_n, var_est_n, tL, tR, dist_n
end
end
| MultiobjectiveProximalBundle | https://github.com/manuelbb-upb/MultiobjectiveProximalBundle.jl.git |
|
[
"MIT"
] | 0.0.1 | 21bd0cf73aa41655139f1ab288dfe8fdcf78b5a5 | docs | 2973 | # MultiobjectiveProximalBundle
This an implementation of the Multiobjective Proximal Bundle Method presented in [^1].
Many important details can be found in [^2].
I used this for personal testing purposes only and cannot promise to keep this repository up to date.
Features:
* Subgradient aggregation to save memory.
* Support for multiple non-smooth objective and constraint functions.
* Pure Julia implementation, no wrapper of Fortran code.
## Installation
The package is not yet registered.
You have to do
```julia
using Pkg
Pkg.add(;url="https://github.com/manuelbb-upb/MultiobjectiveProximalBundle.jl.git")
```
## Usage
Importing the module via
```julia
using MultiobjectiveProximalBundle
```
provides `MPBOptions` (see docstring) and `mpb_optimize`.
The latter has the following siganture:
```
x, fx = mpb_optimize(
x0, objective_funcs, constraint_funcs,
gammas_objectives=nothing, gammas_constraints=nothing;
options = MPBOptions()
)
```
Here, `x0` is the initial **feasible** start vector and
`objective_funcs` is a vector of functions.
Each function must return a tuple of its scalar primal value
and its gradient. Likewise for the constraints.
`gammas_objectives` are constants to compensate non-convexity.
The default value of `nothing` results in `0.5` being used.
### Example
```julia
using MultiobjectiveProximalBundle
using LinearAlgebra: norm
function f1(x)
__x = norm(x)
_x = __x + 2
y = sqrt(_x)
dy = if __x == 0
zeros(length(x))
else
1/(2 * __x * y) .* x
end
return y, dy
end
function f2(x)
_f21 = -sum(x)
_f22 = _f21 + sum(x.^2) - 1
_df21 = -ones(length(x))
y, dy = if _f22 >= _f21
_df22 = _df21 .+ 2 .* x
if _f22==_f21
_f22, 0.5 .* (_df21 .+ _df22)
else
_f22, _df22
end
else
_f21, _df21
end
return y, dy
end
function g(x)
_g1 = sum(x.^2) - 10.0
_g2 = 3 * x[1] + sum(x[2:end]) + 1.5
_dg1 = 2 .* x
y, dy = if _g2 >= _g1
_dg2 = ones(length(x))
_dg2[1] = 3
if _g2 == _g1
_g2, 0.5 .* (_dg1 .+ _dg2)
else
_g2, _dg2
end
else
_g1, _dg1
end
return y, dy
end
x0 = fill(-0.5, 2)
x, fx = mpb_optimize(x0, [f1, f2], [g])
```
## Notes
ToDo's:
* Support linear constraints.
* Remove unnecessary arrays.
* ~βPre-allocateβ model of sub-problem via parametrization.~
(I have tested this on another branch and it was slower.)
* Enable choice of sub-problem solver.
* More verbosity, informative return codes.
[^1]: M. M. MΓ€kelΓ€, N. Karmitsa, and O. Wilppu, βProximal Bundle Method for Nonsmooth and Nonconvex Multiobjective Optimization,β in Mathematical Modeling and Optimization of Complex Structures, vol. 40, P. NeittaanmΓ€ki, S. Repin, and T. Tuovinen, Eds. Cham: Springer International Publishing, 2016, pp. 191β204. doi: 10.1007/978-3-319-23564-6_12.
[^2]: M. M. MΓ€kelΓ€, βNonsmooth Optimizationβ
| MultiobjectiveProximalBundle | https://github.com/manuelbb-upb/MultiobjectiveProximalBundle.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 146 | using SurvivalDistributions
using BenchmarkTools
SUITE = BenchmarkGroup()
SUITE["rand"] = @benchmarkable rand(10)
# Write your benchmarks here.
| SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 1045 | using SurvivalDistributions
using Documenter
using DocumenterCitations
DocMeta.setdocmeta!(SurvivalDistributions, :DocTestSetup, :(using SurvivalDistributions); recursive=true)
bib = CitationBibliography(
joinpath(@__DIR__,"src","assets","references.bib"),
style=:numeric
)
makedocs(;
plugins=[bib],
modules=[SurvivalDistributions],
authors="Oskar Laverny <[email protected]> and contributors",
sitename="SurvivalDistributions.jl",
format=Documenter.HTML(;
canonical="https://JuliaSurv.github.io/SurvivalDistributions.jl",
edit_link="main",
assets=String["assets/citations.css"],
),
pages=[
"index.md",
"utils.md",
"Implemented Distributions" => [
"distros/PCHD.md",
"distros/GenGamma.md",
"distros/PGW.md",
"distros/EW.md",
"distros/LogLogistic.md",
],
"references.md"
],
)
deploydocs(;
repo="github.com/JuliaSurv/SurvivalDistributions.jl",
devbranch="main",
) | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 1057 | module SurvivalDistributions
# Import everything we need from Distributions.jl:
using SpecialFunctions: loggamma
using Distributions: logcdf, logccdf, ccdf, cdf, pdf, logpdf, quantile, rand, ContinuousUnivariateDistribution, UnivariateDistribution, @distr_support, AbstractRNG, Weibull, Gamma, Logistic, expectation
import Distributions: logcdf, logccdf, ccdf, cdf, pdf, logpdf, quantile, rand, expectation
# Export a few utilities :
include("utilities.jl")
export censored_loglikelihood, hazard, loghazard, cumhazard
# Export other distributions:
include("distros/AbstractHazardDistribution.jl")
include("distros/PiecewiseConstantHazardDistribution.jl")
include("distros/ExpoDist.jl")
include("distros/ExponentiatedWeibull.jl")
include("distros/GeneralizedGamma.jl")
include("distros/PowerGeneralizedWeibull.jl")
include("distros/LogLogistic.jl")
export ExpoDist, ExponentiatedWeibull, GeneralizedGamma, PowerGeneralizedWeibull, LogLogistic, PiecewiseConstantHazardDistribution
end
| SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 1196 | """
hazard(X::UnivariateDistribution, t)
Provide the hazard fucntion of the random variable X (supposed to be a `Distributions.ContinuousUnivariateDistributions`) at point t. The default implementation is simply ``vh(t) = \\frac{f(t)}{S(t)}`` where ``f`` and ``S`` are the density and survival function of X.
"""
hazard(X::UnivariateDistribution, t) = pdf(X,t)/ccdf(X,t)
"""
loghazard(X, t)
Provide the log of the hazard fucntion of the random variable X. See [`hazard`](@ref) for the formal definition.
"""
loghazard(X::UnivariateDistribution,t) = log(hazard(X,t))
"""
loghazard(X::UnivariateDistribution, t)
Provide the cumulative hazard function of the random variable X. It is defined as ``H(t) = - \\ln S(t)`` where ``S`` is the survival fucntion of the random variable X.
"""
cumhazard(X::UnivariateDistribution,t) = -log(ccdf(X,t))
"""
censored_loglikelihood(X::UnivariateDistribution, t, Ξ΄)
Provide the censored logliklyhood of the distribution X at point t, with status indicatrix Ξ΄. Is if defined as
```math
Ξ΄ * loghazard(X,t) - cumhazard(X,t)
```
"""
censored_loglikelihood(X::UnivariateDistribution, t, Ξ΄) = Ξ΄*loghazard(X,t) - cumhazard(X,t) | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 896 | abstract type AbstractHazardDistribution <: ContinuousUnivariateDistribution end
@distr_support AbstractHazardDistribution 0.0 Inf
loghazard(X::AbstractHazardDistribution, t::Real) = log(hazard(X,t))
cumhazard(X::AbstractHazardDistribution, t::Real) = quadgk(u -> hazard(X,u), 0, t)[1]
logccdf( X::AbstractHazardDistribution, t::Real) = -cumhazard(X,t)
ccdf( X::AbstractHazardDistribution, t::Real) = exp(-cumhazard(X,t))
cdf( X::AbstractHazardDistribution, t::Real) = -expm1(-cumhazard(X,t))
logcdf( X::AbstractHazardDistribution, t::Real) = log1mexp(-cumhazard(X,t))
pdf( X::AbstractHazardDistribution, t::Real) = hazard(X,t)*ccdf(X,t)
logpdf( X::AbstractHazardDistribution, t::Real) = loghazard(X,t) - cumhazard(X,t)
function quantile( X::AbstractHazardDistribution, t::Real)
u = log(1-t)
return find_zero(x -> u + cumhazard(X,x), (0.0, Inf))
end
| SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 794 | """
ExpoDist(Ξ³, X)
A power distribution with power Ξ³ and base distribution X<:ContinuousUnivariateDistribution is defined as the distribution that has cumulative distribution function ``F^Ξ³`` where F was the distribution function of X.
"""
struct ExpoDist{D} <: ContinuousUnivariateDistribution
Ξ³::Float64
X::D
function ExpoDist(Ξ³, X)
return new{typeof(X)}(Ξ³,X)
end
end
params(d::ExpoDist) = (d.Ξ³, d.X)
@distr_support ExpoDist 0.0 Inf
cdf(d::ExpoDist, t::Real) = cdf(d.X,t)^d.Ξ³
logcdf(d::ExpoDist, t::Real) = d.Ξ³ * logcdf(d.X, t)
logpdf(d::ExpoDist, t::Real) = log(d.Ξ³) + (d.Ξ³ - 1) * logcdf(d.X, t) + logpdf(d.X, t)
quantile(d::ExpoDist, p::Real) = quantile(d.X, p .^ (1 / d.Ξ³))
rand(rng::AbstractRNG, d::ExpoDist) = quantile(d, Base.rand(rng)) | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 1118 | """
ExponentiatedWeibull(Ξ±,ΞΈ,Ξ³)
The [Exponentiated Weibull distribution](https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution) is obtain by exponentiating the cdf of the [Weibull distribution](https://en.wikipedia.org/wiki/Weibull_distribution). This simple transformation adds a second shape parameter that, interestingly, induces a lot of flexibility on the hazard function. The hazard function of the Exponentiated Weibull distribution can capture the basic shapes: constant, increasing, decreasing, bathtub, and unimodal, making it appealing for survival models.
A random variable X follows an `ExponentiatedWeibull(Ξ±,ΞΈ,Ξ³)` distribution when it has cumulative distribution function ``F_X = F_W^{Ξ³}`` where ``F_W`` is the cumulative distribution function of a `Weibull(Ξ±,ΞΈ)`.
References:
* [Exponentiated Weibull distribution](https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution)
* [Weibull distribution](https://en.wikipedia.org/wiki/Weibull_distribution)
"""
const ExponentiatedWeibull{T} = ExpoDist{Weibull{T}}
ExponentiatedWeibull(Ξ±,ΞΈ,Ξ³) = ExpoDist(Ξ³, Weibull(Ξ±,ΞΈ)) | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 1822 | """
GeneralizedGamma(Ο,nu,gamma)
The [Generalised Gamma](https://en.wikipedia.org/wiki/Generalized_gamma_distribution) (GG) distribution is a three-parameter distribution with support on ``{\\mathbb R}_+``. The corresponding hazard function can accommodate bathtub, unimodal and monotone (increasing and decreasing) hazard shapes. The GG distribution has become popular in survival analysis due to its flexibility.
References:
* [Generalised Gamma](https://en.wikipedia.org/wiki/Generalized_gamma_distribution)
* [stacy:1962](@cite) Stacy, E.W. A generalization of the gamma distribution, *The Annals of Mathematical Statistics*, 1962
"""
struct GeneralizedGamma{T<:Real} <: ContinuousUnivariateDistribution
sigma::T
nu::T
gamma::T
G::Gamma{T} # underlying Gamma distribution.
function GeneralizedGamma(sigma,nu,gamma)
T = promote_type(Float64, eltype.((sigma,nu,gamma))...)
return new{T}(T(sigma), T(nu), T(gamma), Gamma(nu / gamma, sigma^gamma))
end
end
GeneralizedGamma() = GeneralizedGamma(1,1,1)
params(d::GeneralizedGamma) = (d.sigma,d.nu,d.gamma)
@distr_support GeneralizedGamma 0.0 Inf
function rand(rng::AbstractRNG, d::GeneralizedGamma)
# This looks suspicious. Maybe something waaaay smarter can be done here.
return rand(rng, d.G) .^ (1 / d.gamma)
end
function logpdf(d::GeneralizedGamma, t::Real)
return log(d.gamma) - d.nu * log(d.sigma) - loggamma(d.nu / d.gamma) + (d.nu - 1) * log(t) - (t / d.sigma) ^ d.gamma
end
function logccdf(d::GeneralizedGamma, t::Real)
return logccdf(d.G, t ^ d.gamma)
end
ccdf(d::GeneralizedGamma, t::Real) = exp(logccdf(d,t))
cdf(d::GeneralizedGamma, t::Real) = 1 - ccdf(d,t)
function quantile(d::GeneralizedGamma, p::Real)
return quantile(d.G, p) .^ (1 / d.gamma)
end | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 2002 | """
LogLogistic(ΞΌ,Ο)
According to [its wikipedia page](https://en.wikipedia.org/wiki/Log-logistic_distribution), the the log-logistic distribution (known as the Fisk distribution in economics) is a continuous probability distribution for a non-negative random variable. It is used in survival analysis as a parametric model for events whose rate increases initially and decreases later, as, for example, mortality rate from cancer following diagnosis or treatment. It has also been used in hydrology to model stream flow and precipitation, in economics as a simple model of the distribution of wealth or income, and in networking to model the transmission times of data considering both the network and the software.
The log-logistic distribution is the probability distribution of a random variable whose logarithm has a logistic distribution. It is similar in shape to the log-normal distribution but has heavier tails. Unlike the log-normal, its cumulative distribution function can be written in closed form.
It is characterized by its density function as
```math
f(x) = \\frac{(\\frac{Ξ²}{Ξ±})(\\frac{x}{Ξ±})^{Ξ²-1} }{(1 + (\\frac{x}{Ξ±})^{Ξ²})^2},
```
where Ξ± = e^ΞΌ and Ξ² = 1/Ο.
"""
struct LogLogistic{T<:Real} <: ContinuousUnivariateDistribution
X::Logistic{T}
function LogLogistic(ΞΌ,Ο)
X = Logistic(ΞΌ, Ο)
return new{eltype(X)}(X)
end
end
LogLogistic() = LogLogistic(1,1)
params(d::LogLogistic) = (d.X.ΞΌ,d.X.ΞΈ)
@distr_support LogLogistic 0.0 Inf
function loghazard(d::LogLogistic, t::Real)
lt = log(t)
lpdf0 = logpdf(Logistic(d.X.ΞΌ, d.X.ΞΈ), lt)
ls0 = logccdf(Logistic(d.X.ΞΌ, d.X.ΞΈ), lt)
return lpdf0 - ls0 - lt
end
function cumhazard(d::LogLogistic,t::Real)
lt = log.(t)
return -logccdf.(Logistic(d.X.ΞΌ, d.X.ΞΈ), lt)
end
logpdf(d::LogLogistic, t::Real) = loghazard(d,t) - cumhazard(d,t)
cdf(d::LogLogistic, t::Real) = -expm1(-cumhazard(d,t))
rand(rng::AbstractRNG, d::LogLogistic) = exp(rand(rng,d.X))
| SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 1705 | struct PiecewiseConstantHazardDistribution <: AbstractHazardDistribution
βt::Vector{Float64}
Ξ»::Vector{Float64}
end
# the three folowing functions are actually enough i think to be able to sample eficiently for piecewise constant hazard distributions.
function hazard(D::PiecewiseConstantHazardDistribution, t::Real)
u = 0.0
for i in 1:length(D.βt)
u += D.βt[i]
if t < u
return D.Ξ»[i]
end
end
return D.Ξ»[end]
end
function cumhazard(D::PiecewiseConstantHazardDistribution, t::Real)
Ξ = 0.0
u = 0.0
for j in eachindex(D.βt)
u += D.βt[j]
if t > u
Ξ += D.Ξ»[j]*D.βt[j]
else
Ξ += D.Ξ»[j]*(t-(u-D.βt[j]))
return Ξ
end
end
# We consider that the last box is in fact infinitely wide (exponential tail)
return Ξ + (t-u)*L.Ξ»[end]
end
function quantile(D::PiecewiseConstantHazardDistribution, p::Real)
Ξ_target = -log(1-p)
Ξ = 0.0
u = 0.0
for j in eachindex(D.βt)
Ξ += D.Ξ»[j]*D.βt[j]
u += D.βt[j]
if Ξ_target < Ξ
u -= (Ξ - Ξ_target) / D.Ξ»[j]
return u
end
end
return u
end
function expectation(L::PiecewiseConstantHazardDistribution)
S = 1.0
E = 0.0
for j in eachindex(D.βt)
if D.Ξ»[j] > 0
S_inc = exp(-D.Ξ»[j]*D.βt[j])
E += S * (1 - S_inc) / D.Ξ»[j]
S *= S_inc
else
E += S * D.βt[j]
end
end
# This reminder assumes a exponential life time afer the maximuum age.
R = ifelse(D.Ξ»[end] == 0.0, 0.0, S / D.Ξ»[end])
return E + R
end
| SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 2224 | """
PowerGeneralizedWeibull(Ο,Ξ½,Ξ³)
The Power Generalised Weibull (PGW) distribution is a three-parameter distribution with support on ``{\\mathbb R}_+``. The corresponding hazard function can accommodate bathtub, unimodal and monotone (increasing and decreasing) hazard shapes. The PGW distribution has become popular in survival analysis given the tractability of its hazard and survival functions.
The `PowerGeneralizedWeibull(Ο,Ξ½,Ξ³)` distribution, with scale `Ο`, shape `Ξ½` (nu) and second shape `Ξ³` has probability density function
```math
f(t;Ο,Ξ½,Ξ³) = \\dfrac{Ξ½}{Ξ³ Ο^Ξ½}t^{Ξ½-1} \\left[ 1 + \\left(\\dfrac{t}{Ο}\\right)^Ξ½\\right]^{\\left(\\frac{1}{Ξ³}-1\\right)} \\exp\\left\\{ 1- \\left[ 1 + \\left(\\dfrac{t}{Ο}\\right)^Ξ½\\right]^{\\frac{1}{Ξ³}}
\\right\\}.
```
References:
* [nikulin:2009](@cite) Nikulin, M. and Haghighi, F. On the power generalized Weibull family: model for cancer censored data. Metron -- International Journal of Statistics, 2009
"""
struct PowerGeneralizedWeibull{T<:Real} <: ContinuousUnivariateDistribution
sigma::T
nu::T
gamma::T
function PowerGeneralizedWeibull(sigma,nu,gamma)
T = promote_type(Float64, eltype.((sigma,nu,gamma))...)
return new{T}(T(sigma), T(nu), T(gamma))
end
end
PowerGeneralizedWeibull() = PowerGeneralizedWeibull(1,1,1)
params(d::PowerGeneralizedWeibull) = (d.sigma,d.nu,d.gamma)
@distr_support PowerGeneralizedWeibull 0.0 Inf
function rand(rng::AbstractRNG, d::PowerGeneralizedWeibull)
return d.sigma * ((1 - log(1 - Base.rand(rng))) ^ d.gamma - 1) ^ (1 / d.nu)
end
function logpdf(d::PowerGeneralizedWeibull, t::Real)
return log(d.nu) - log(d.gamma) - d.nu * log(d.sigma) + (d.nu - 1) * log(t) +
(1 / d.gamma - 1) * log(1 + (t / d.sigma) ^ d.nu) +
(1 - (1 + (t / d.sigma) ^ d.nu) ^ (1 / d.gamma))
end
function logccdf(d::PowerGeneralizedWeibull, t::Real)
return 1 - (1 + (t / d.sigma) ^ d.nu) ^ (1 / d.gamma)
end
ccdf(d::PowerGeneralizedWeibull, t::Real) = exp(logccdf(d,t))
cdf(d::PowerGeneralizedWeibull, t::Real) = 1 - ccdf(d,t)
function quantile(d::PowerGeneralizedWeibull, p::Real)
return d.sigma * ((1 - log(1 - p)) ^ d.gamma - 1) ^ (1 / d.nu)
end | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | code | 1128 | using TestItemRunner
@run_package_tests
@testitem "trivial test" begin
@test true
end
@testitem "Code quality (Aqua.jl)" begin
using Aqua
Aqua.test_all(
SurvivalDistributions;
ambiguities=false,
)
end
@testitem "Check Hazard Values for Exponential distribution" begin
using Distributions
Ξ», t = 10rand(), 3rand()
X = Exponential(1 / Ξ»)
@test hazard(X, t) β Ξ»
@test cumhazard(X, t) β t * Ξ»
end
@testitem "Check Hazard Values for LogNormal distribution" begin
using Distributions
function hLogNormal(t, mu, sigma, logh::Bool=false)
lpdf0 = logpdf.(LogNormal(mu, sigma), t)
ls0 = logccdf.(LogNormal(mu, sigma), t)
val = lpdf0 .- ls0
if logh
return val
else
return exp.(val)
end
end
ΞΌ, Ο, t = rand(), rand(), rand()
X = LogNormal(ΞΌ,Ο)
@test hLogNormal(t,ΞΌ,Ο) β hazard(X,t)
end
@testitem "Check pwchd" begin
using Distributions, HypothesisTests
X = PiecewiseConstantHazardDistribution(rand(10),rand(10))
x = rand(X,100000)
ApproximateOneSampleKSTest(x, X)
end | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 2075 | # SurvivalDistributions
[](https://JuliaSurv.github.io/SurvivalDistributions.jl/stable/)
[](https://JuliaSurv.github.io/SurvivalDistributions.jl/dev/)
[](https://github.com/JuliaSurv/SurvivalDistributions.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/JuliaSurv/SurvivalDistributions.jl)
[](https://github.com/invenia/BlueStyle)
[](https://github.com/SciML/ColPrac)
[](https://JuliaCI.github.io/NanosoldierReports/pkgeval_badges/S/SurvivalDistributions.html)
[](https://github.com/JuliaTesting/Aqua.jl)
The [`SurvivalDistributions.jl`](https://github.com/JuliaSurv/SurvivalDistributions.jl) package is part of the JuliaSurv organisation. It provides a few utility functions (related to hazard and censoring) to manipulate distributions from [`Distributions.jl`](https://github.com/JuliaStats/Distributions.jl), but most importantly a few Distributions familieis that are sometimes usefull when doing survival analysis.
The implementation is very light and does limit its dependencies to a minimum (namely, `Distributions.jl`), so that depending on `SurvivalDistributions.jl` should not incur extra startup time.
Do not hesitate to open an issue if you want to add another distribution that you might find usefull for survival analysis, or if you want to discuss about the content of the package itself !
| SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 884 | ```@meta
CurrentModule = SurvivalDistributions
```
# SurvivalDistributions
The [`SurvivalDistributions.jl`](https://github.com/JuliaSurv/SurvivalDistributions.jl) package is part of the JuliaSurv organisation. It provides a few utility functions (related to hazard and censoring) to manipulate distributions from [`Distributions.jl`](https://github.com/JuliaStats/Distributions.jl), but most importantly a few Distributions familieis that are sometimes usefull when doing survival analysis.
The implementation is very light and does limit its dependencies to a minimum (namely, `Distributions.jl`), so that depending on `SurvivalDistributions.jl` should not incur extra startup time.
Do not hesitate to open an issue if you want to add another distribution that you might find usefull for survival analysis, or if you want to discuss about the content of the package itself !
| SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 197 | ```@meta
CurrentModule = SurvivalDistributions
```
# Reference
## Index
```@index
```
```@autodocs
Modules = [SurvivalDistributions]
```
## Bibliography
```@bibliography
``` | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 1297 | ```@meta
CurrentModule = SurvivalDistributions
```
# Utility functions
Since survival analysis regularly defines things in term of hazard of (positive) random variables, we defined the following functions here purely for convenience:
## Hazard function
```@docs
hazard
```
## Log Hazard Function
```@docs
loghazard
```
## Cumulative Hazard Function
```@docs
cumhazard
```
## Censored Loglikelyhood.
```@docs
censored_loglikelihood
```
## Examples of hazard and cumulative hazard functions
The hazard and the cumulative hazard functions play a crucial role in survival analysis. These functions define the likelihood function in the presence of censored observations. Thus, they are important in many context.
### LogNormal
```@example 1
using SurvivalDistributions, Distributions, Plots, StatsBase
function mkplot(d)
h = plot(t -> hazard(d, t), xlims = (0,10), ylabel = "Hazard")
H = plot(t -> cumhazard(d,t), xlims = (0,10), ylabel = "Cumulative Hazard",)
return plot(h,H,plot_title = "$d")
end
mkplot(LogNormal(0.5, 1))
```
### LogLogistic
```@example 1
mkplot(LogLogistic(1, 0.5))
```
### Weibull
```@example 1
mkplot(Weibull(3, 0.5))
```
### Gamma
```@example 1
mkplot(Gamma(3, 0.5))
``` | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 2181 | # Exponentiated Weibull Distribution
## Definition
The [Exponentiated Weibull distribution](https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution) is obtain by exponentiating the cdf of the [Weibull distribution](https://en.wikipedia.org/wiki/Weibull_distribution). This simple transformation adds a second shape parameter that, interestingly, induces a lot of flexibility on the hazard function. The hazard function of the Exponentiated Weibull distribution can capture the basic shapes: constant, increasing, decreasing, bathtub, and unimodal, making it appealing for survival models.
The probability density function and cumulative distribution function of the Exponentiated Weibull distribution are respectively given by:
$$\begin{split}
f(t) &= \alpha \dfrac{\kappa}{\lambda} \left(\dfrac{t}{\lambda}\right)^{\kappa-1} \left[1-\exp\left\{-\left(\dfrac{t}{\lambda}\right)^{\kappa}\right\}\right]^{\alpha-1} \exp\left\{-\left(\dfrac{t}{\lambda}\right)^{\kappa}\right\}, \\
F(t) &= \left[1-\exp\left\{-\left(\dfrac{t}{\lambda}\right)^{\kappa}\right\}\right]^{\alpha},
\end{split}$$
where $t>0$, $\alpha>0$, $\lambda>0$, and $\kappa>0$. The parameter $\lambda$ is a scale parameter, $\kappa$ is a shape parameter, and $\alpha$ is the power (shape) parameter.
## Examples
Let us sample a dataset from an Exponentiated Weibull distribution:
```@example 1
using SurvivalDistributions, Distributions, Random, Plots, StatsBase
Random.seed!(123)
D = ExponentiatedWeibull(0.25, 0.5, 5)
sim = rand(D,1000);
```
First, let's have a look at the hazard function:
```@example 1
plot(t -> hazard(D,t), ylabel = "Hazard", xlims = (0,10))
```
Then, we can verify the coherence of our code by comparing the obtained sample and the true pdf:
```@example 1
histogram(sim, normalize=:pdf, bins = range(0, 5, length=30))
plot!(t -> pdf(D,t), ylabel = "Density", xlims = (0,5))
```
We could also compare the empirical and theroetical cdfs:
```@example 1
ecdfsim = ecdf(sim)
plot(x -> ecdfsim(x), 0, 5, label = "ECDF", linecolor = "gray", linewidth=3)
plot!(t -> cdf(D,t), xlabel = "x", ylabel = "CDF vs. ECDF", xlims = (0,5))
``` | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 3178 | # Generalised Gamma Distribution
## Definition
The [Generalised Gamma](https://en.wikipedia.org/wiki/Generalized_gamma_distribution) (GG) distribution [stacy:1962](@cite) is a three-parameter distribution with support on ${\mathbb R}_+$. The corresponding hazard function can accommodate bathtub, unimodal and monotone (increasing and decreasing) hazard shapes. The GG distribution has become popular in survival analysis due to its flexibility. Other flexible distributions that can account for these hazard shapes are discussed in @rubio:2021 and @jones:2015.
## Probability Density Function
The pdf of the GG distribution is
$$f(t;\theta,\kappa,\delta) = \dfrac{\delta}{\Gamma\left(\frac{\kappa}{\delta}\right)\theta^\kappa} t^{{\kappa-1}}e^{{-\left(\frac{t}{\theta}\right)^{\delta}}},$$
where $\theta>0$ is a scale parameter, and $\kappa,\delta >0$ are shape parameters.
## Cumulative Distribution Function
The CDF of the GG distribution is
$$F(t;\theta,\kappa,\delta) = {\frac {\gamma \left( \frac{\kappa}{\delta},\left(\frac{t}{\theta}\right)^{\delta}\right)}{\Gamma\left(\frac{\kappa}{\delta}\right)}},$$
where where $\gamma (\cdot )$ denotes the lower incomplete gamma function. The survival function can be obtained using the relationship $S(t;\theta,\kappa,\delta)=1-F(t;\theta,\kappa,\delta)$. An interesting relationship between the [Gamma CDF](https://en.wikipedia.org/wiki/Gamma_distribution) ($G(t;\theta,\kappa)$, scale $\theta$ and shape $\kappa$) and the GG CDF is
$$F(t;\theta,\kappa,\delta) = G\left(t^\delta; \theta^\delta, \frac{\kappa}{\delta}\right).$$
This allows the implementation of the GG CDF using the Julia command `Gamma`.
### Hazard Function
The hazard function of the GG distribution is
$$h(t;\theta,\kappa,\delta) = \dfrac{f(t;\theta,\kappa,\delta)}{1-F(t;\theta,\kappa,\delta)}.$$
The survival function can be obtained as $S(t;\theta,\kappa,\delta)=1-F(t;\theta,\kappa,\delta)$, and the cumulative hazard function as $H(t;\theta,\kappa,\delta) = -\log S(t;\theta,\kappa,\delta)$, as usual. The connection of the GG CDF with the Gamma distribution allows for writing these functions in terms of the Julia command `Gamma` as shown in the following code.
## Examples
Let us sample a dataset from a Generalized gamma: :
```@example 1
using SurvivalDistributions, Distributions, Random, Plots, StatsBase
Random.seed!(123)
D = GeneralizedGamma(0.5, 1.5, 0.75)
sim = rand(D,1000);
```
First, let's have a look at the hazard function:
```@example 1
plot(t -> hazard(D,t), ylabel = "Hazard", xlims = (0,10))
```
Then, we can verify the coherence of our code by comparing the obtained sample and the true pdf:
```@example 1
histogram(sim, normalize=:pdf, bins = range(0, 5, length=30))
plot!(t -> pdf(D,t), ylabel = "Density", xlims = (0,5))
```
We could also compare the empirical and theroetical cdfs:
```@example 1
ecdfsim = ecdf(sim)
plot(x -> ecdfsim(x), 0, 5, label = "ECDF", linecolor = "gray", linewidth=3)
plot!(t -> cdf(D,t), xlabel = "x", ylabel = "CDF vs. ECDF", xlims = (0,5))
```
```@bibliography
Pages = ["GenGamma.md"]
Canonical = false
``` | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 1617 | # LogLogistic
## Definition
The [LogLogistic](https://en.wikipedia.org/wiki/Log-logistic_distribution) distribution is the probability distribution of a random variable whose logarithm has a logistic distribution. It is similar in shape to the log-normal distribution but has heavier tails.
It is used in survival analysis as a parametric model for events whose rate increases initially and decreases later, as, for example, mortality rate from cancer following diagnosis or treatment. It has also been used in hydrology to model stream flow and precipitation, in economics as a simple model of the distribution of wealth or income, and in networking to model the transmission times of data considering both the network and the software.
## Examples
Let us sample a dataset from an Exponentiated Weibull distribution:
```@example 1
using SurvivalDistributions, Distributions, Random, Plots, StatsBase
Random.seed!(123)
D = LogLogistic(1,2)
sim = rand(D,1000);
```
First, let's have a look at the hazard function:
```@example 1
plot(t -> hazard(D,t), ylabel = "Hazard", xlims = (0,10))
```
Then, we can verify the coherence of our code by comparing the obtained sample and the true pdf:
```@example 1
histogram(sim, normalize=:pdf, bins = range(0, 5, length=30))
plot!(t -> pdf(D,t), ylabel = "Density", xlims = (0,5))
```
We could also compare the empirical and theroetical cdfs:
```@example 1
ecdfsim = ecdf(sim)
plot(x -> ecdfsim(x), 0, 5, label = "ECDF", linecolor = "gray", linewidth=3)
plot!(t -> cdf(D,t), xlabel = "x", ylabel = "CDF vs. ECDF", xlims = (0,5))
``` | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 1513 | # Piecewise constant hazard distributions
## Definition
The `PiecewiseConstantHazardDistribution` is one of the most simple and yet most usefull distribution provided in this package. These distributions are defined by their hazard functions, which are assumed to be piecewise constant (hence their names).
While dealing with census data and rate tables, having a survival model defined by a piecewise constant hazard is very common. In particular, random lifes extracted from `RateTable`s from [`RateTables.jl`](https://github.com/JuliaSurv/RateTables.jl) follows this pattern.
## Examples
```@example 1
using SurvivalDistributions, Distributions, Random, Plots, StatsBase
Random.seed!(123)
βt = rand(20)
Ξ» = rand(20)
D = PiecewiseConstantHazardDistribution(βt,Ξ»)
sim = rand(D,1000);
```
First, let's have a look at the hazard function:
```@example 1
plot(t -> hazard(D,t), ylabel = "Hazard", xlims = (0,10))
```
As excepted, it is quite random.
Then, we can verify the coherence of our code by comparing the obtained sample and the true pdf:
```@example 1
histogram(sim, normalize=:pdf, bins = range(0, 5, length=30))
plot!(t -> pdf(D,t), ylabel = "Density", xlims = (0,5))
```
The comparison is not too bad ! We could also compare the empirical and theroetical cdfs:
```@example 1
ecdfsim = ecdf(sim)
plot(x -> ecdfsim(x), 0, 5, label = "ECDF", linecolor = "gray", linewidth=3)
plot!(t -> cdf(D,t), xlabel = "x", ylabel = "CDF vs. ECDF", xlims = (0,5))
``` | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | 5527f9c4069f083e5ed62cc75b4bd59ae11703ff | docs | 2733 | # Power Generalised Weibull Distribution
## Definition
The Power Generalised Weibull (PGW) distribution [nikulin:2009](@cite) is a three-parameter distribution with support on ${\mathbb R}_+$. The corresponding hazard function can accommodate bathtub, unimodal and monotone (increasing and decreasing) hazard shapes. The PGW distribution has become popular in survival analysis given the tractability of its hazard and survival functions. Other flexible distributions that can account for these hazard shapes are discussed in @rubio:2021 and @jones:2015.
## Probability Density Function
The pdf of the PGW distribution is
$$f(t;\sigma,\nu,\gamma) = \dfrac{\nu}{\gamma \sigma^\nu}t^{\nu-1} \left[ 1 + \left(\dfrac{t}{\sigma}\right)^\nu\right]^{\left(\frac{1}{\gamma}-1\right)} \exp\left\{ 1- \left[ 1 + \left(\dfrac{t}{\sigma}\right)^\nu\right]^{\frac{1}{\gamma}}
\right\},$$
where $\sigma>0$ is a scale parameter, and $\nu,\gamma >0$ are shape parameters.
## Survival Function
The survival function of the PGW distribution is
$$S(t;\sigma,\nu,\gamma) = \exp\left\{ 1- \left[ 1 + \left(\dfrac{t}{\sigma}\right)^\nu\right]^{\frac{1}{\gamma}}
\right\}.$$
## Hazard Function
The hazard function of the PGW distribution is
$$h(t;\sigma,\nu,\gamma) = \dfrac{\nu}{\gamma \sigma^\nu}t^{\nu-1} \left[ 1 + \left(\dfrac{t}{\sigma}\right)^\nu\right]^{\left(\frac{1}{\gamma}-1\right)}.$$
The cdf can be obtained as $F(t;\sigma,\nu,\gamma)=1-S(t;\sigma,\nu,\gamma)$, and the cumulative hazard function as $H(t;\sigma,\nu,\gamma) = -\log S(t;\sigma,\nu,\gamma)$, as usual.
## Quantile Function
The quantile function of the PGW distribution is
$$Q(p;\sigma,\nu,\gamma) = \sigma \left[ \left( 1 - \log(1-p) \right)^{\gamma} - 1 \right]^{\frac{1}{\nu}},$$
where $p\in(0,1)$.
## Examples
Let us sample a dataset from a PGW :
```@example 1
using SurvivalDistributions, Distributions, Random, Plots, StatsBase
Random.seed!(123)
D = PowerGeneralizedWeibull(0.5, 2, 5)
sim = rand(D,1000);
```
First, let's have a look at the hazard function:
```@example 1
plot(t -> hazard(D,t), ylabel = "Hazard", xlims = (0,10))
```
Then, we can verify the coherence of our code by comparing the obtained sample and the true pdf:
```@example 1
histogram(sim, normalize=:pdf, bins = range(0, 5, length=30))
plot!(t -> pdf(D,t), ylabel = "Density", xlims = (0,5))
```
We could also compare the empirical and theroetical cdfs:
```@example 1
ecdfsim = ecdf(sim)
plot(x -> ecdfsim(x), 0, 5, label = "ECDF", linecolor = "gray", linewidth=3)
plot!(t -> cdf(D,t), xlabel = "x", ylabel = "CDF vs. ECDF", xlims = (0,5))
```
```@bibliography
Pages = ["PGW.md"]
Canonical = false
``` | SurvivalDistributions | https://github.com/JuliaSurv/SurvivalDistributions.jl.git |
|
[
"MIT"
] | 1.0.0 | b101c921fe636be236ba9ab5e5ea348e67f1f72e | code | 717 | using ProductArrays
using Documenter
DocMeta.setdocmeta!(ProductArrays, :DocTestSetup, :(using ProductArrays); recursive=true)
makedocs(;
modules=[ProductArrays],
authors="Felix Benning <[email protected]> and contributors",
repo="https://github.com/lazyLibraries/ProductArrays.jl/blob/{commit}{path}#{line}",
sitename="ProductArrays.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://lazyLibraries.github.io/ProductArrays.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/lazyLibraries/ProductArrays.jl",
devbranch="main",
)
| ProductArrays | https://github.com/lazyLibraries/ProductArrays.jl.git |
|
[
"MIT"
] | 1.0.0 | b101c921fe636be236ba9ab5e5ea348e67f1f72e | code | 3627 | module ProductArrays
export productArray
function _ensure_all_linear_indexed(vecs::T) where {T<:Tuple}
linear_indexed = ntuple(
n -> hasmethod(Base.getindex, (fieldtype(T, n), Int)),
Base._counttuple(T)
)
all(linear_indexed) || throw(ArgumentError(
"$(vecs[findfirst(x->!x, linear_indexed)]) cannot be linearly accessed. All inputs need to implement `Base.getindex(::T, ::Int)`"
))
end
struct ProductArray{T<:Tuple,Eltype,N} <: AbstractArray{Eltype,N}
prodIt::Iterators.ProductIterator{T}
ProductArray(t::T) where {T} = begin
_ensure_all_linear_indexed(t)
prodIt = Iterators.ProductIterator(t)
new{T,eltype(Iterators.ProductIterator{T}),ndims(prodIt)}(prodIt)
end
end
# wrap ProductIterator
function Base.IteratorSize(::Type{ProductArray{T,Eltype,N}}) where {T,Eltype,N}
Base.IteratorSize(Iterators.ProductIterator{T})
end
Base.size(p::ProductArray) = size(p.prodIt)
Base.axes(p::ProductArray) = axes(p.prodIt)
Base.ndims(::ProductArray{T,Eltype,N}) where {T,Eltype,N} = N
Base.length(p::ProductArray) = length(p.prodIt)
function Base.IteratorEltype(::Type{<:ProductArray{T}}) where {T}
Base.IteratorEltype(Iterators.ProductIterator{T})
end
Base.eltype(::Type{ProductArray{T,Eltype,N}}) where {T,Eltype,N} = Eltype
Base.iterate(p::ProductArray) = iterate(p.prodIt)
Base.iterate(p::ProductArray, state) = iterate(p.prodIt, state)
# implement private _getindex for ProductIterator
index_dim(v) = index_dim(Base.IteratorSize(typeof(v)), v)
index_dim(::Base.HasShape{N}, v) where {N} = N
index_dim(::Base.HasLength, v) = 1
index_dim(::ST, v::AbstractArray{T,N}) where {ST<:Union{Base.IsInfinite,Base.SizeUnknown},T,N} = N
function index_dim(::T, v) where {T<:Union{Base.IsInfinite,Base.SizeUnknown}}
try
return ndim(v)
catch
throw(ArgumentError("ProductArray cannot deal with $(typeof(v)) as its IteratorSize is of type $T and it does not implement `ndim`."))
end
end
function _getindex(prod::Iterators.ProductIterator, indices::Int...)
return _prod_getindex(prod.iterators, indices...)
end
_prod_getindex(::Tuple{}) = ()
function _prod_getindex(p_vecs::Tuple, indices::Int...)
v = first(p_vecs)
n = index_dim(v)
return (
v[indices[1:n]...],
_prod_getindex(Base.tail(p_vecs), indices[n+1:end]...)...
)
end
# apply this to ProductArray
Base.getindex(p::ProductArray{T,Eltype,N}, indices::Vararg{Int,N}) where {T,Eltype,N} = _getindex(p.prodIt, indices...)
"""
productArray(vectors...)
The output is a lazy form of
```julia
collect(Iterators.product(vectors...))
```
i.e. it is an AbstractArray in contrast to `Iterators.product(vectors...)`. So
is accessible with `getindex` and gets default Array implementations for free.
In particular it can be passed to `Base.PermutedDimsArray` for lazy permutation
and `vec()` to obtain a lazy `Base.ReshapedArray`.
Examples:
```jldoctest
julia> A = productArray(1:3, (:a,:b))
3Γ2 ProductArrays.ProductArray{Tuple{UnitRange{Int64}, Tuple{Symbol, Symbol}}, Tuple{Int64, Symbol}, 2}:
(1, :a) (1, :b)
(2, :a) (2, :b)
(3, :a) (3, :b)
julia> vec(A)
6-element reshape(::ProductArrays.ProductArray{Tuple{UnitRange{Int64}, Tuple{Symbol, Symbol}}, Tuple{Int64, Symbol}, 2}, 6) with eltype Tuple{Int64, Symbol}:
(1, :a)
(2, :a)
(3, :a)
(1, :b)
(2, :b)
(3, :b)
julia> sizeof(A) == sizeof(1:3) + sizeof((:a,:b))
true
julia> A == collect(Iterators.product(1:3, (:a,:b)))
true
```
"""
productArray(vectors...) = ProductArray(vectors)
if VERSION >= v"1.8" # compatibility
Base.last(p::ProductArray) = last(p.prodIt)
end
end
| ProductArrays | https://github.com/lazyLibraries/ProductArrays.jl.git |
|
[
"MIT"
] | 1.0.0 | b101c921fe636be236ba9ab5e5ea348e67f1f72e | code | 1416 | using ProductArrays
using Test
random_tuple(n) = map(x->abs(x)%floor(Int,10^(6/n))+1, map(rand, ntuple(x->Int, n)))
@testset "ProductArrays.jl" begin
@testset "Ensure identical behavior to collected Base.product with v=$v" for v in [
(1:3, 4:10),
(rand(3,2),(:a,:b)),
map(x->rand(x), random_tuple(1)),
map(x->rand(x), random_tuple(2)),
map(x->rand(x), random_tuple(3)),
map(x->rand(x), random_tuple(4)),
]
p = productArray(v...)
@testset "Test Type Functions" begin
@test Base.IteratorSize(typeof(p)) == Base.IteratorSize(typeof(p.prodIt))
@test Base.IteratorEltype(typeof(p)) == Base.IteratorEltype(typeof(p.prodIt))
@test Base.eltype(typeof(p)) == Base.eltype(typeof(p.prodIt))
end
c = collect(Base.product(v...))
@testset "Test Entries and Access" begin
@test p == c # same shape and entries
cart_idx = map(first, axes(p))
@test p[cart_idx...] == c[cart_idx...] # cartesian access
@test p[1] == c[1] # linear access
@test p[:] == c[:] # colon access
@test p[1:lastindex(p)] == vec(c) # range access
@test reverse(p) == reverse(c)
@test last(p) == p[map(last, axes(p))...]
end
@testset "Sanity Checks" begin
@test axes(p) == axes(c)
@test size(p) == map(length, axes(p))
@test length(p) == reduce(*, size(p))
@test axes(p, 1) == axes(p)[1]
@test ndims(p) == ndims(c)
end
end
end
| ProductArrays | https://github.com/lazyLibraries/ProductArrays.jl.git |
|
[
"MIT"
] | 1.0.0 | b101c921fe636be236ba9ab5e5ea348e67f1f72e | docs | 1555 | # ProductArrays
[](https://lazyLibraries.github.io/ProductArrays.jl/stable/)
[](https://lazyLibraries.github.io/ProductArrays.jl/dev/)
[](https://github.com/lazyLibraries/ProductArrays.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/lazyLibraries/ProductArrays.jl)
The ProductArrays is a lazy form of
```julia
collect(Iterators.product(vectors...))
```
i.e. it is an `AbstractArray` in contrast to `Iterators.product(vectors...)`. So
is accessible with `getindex` and gets default Array implementations for free.
In particular it can be passed to `Base.PermutedDimsArray` for lazy permutation
and `vec()` to obtain a lazy `Base.ReshapedArray`.
## Examples:
```julia
julia> A = productArray(1:3, (:a,:b))
3Γ2 ProductArrays.ProductArray{Tuple{UnitRange{Int64}, Tuple{Symbol, Symbol}}, Tuple{Int64, Symbol}, 2}:
(1, :a) (1, :b)
(2, :a) (2, :b)
(3, :a) (3, :b)
julia> vec(A)
6-element reshape(::ProductArrays.ProductArray{Tuple{UnitRange{Int64}, Tuple{Symbol, Symbol}}, Tuple{Int64, Symbol}, 2}, 6) with eltype Tuple{Int64, Symbol}:
(1, :a)
(2, :a)
(3, :a)
(1, :b)
(2, :b)
(3, :b)
julia> sizeof(A) == sizeof(1:3) + sizeof((:a,:b))
true
julia> A == collect(Iterators.product(1:3, (:a,:b)))
```
| ProductArrays | https://github.com/lazyLibraries/ProductArrays.jl.git |
|
[
"MIT"
] | 1.0.0 | b101c921fe636be236ba9ab5e5ea348e67f1f72e | docs | 206 | ```@meta
CurrentModule = ProductArrays
```
# ProductArrays
Documentation for [ProductArrays](https://github.com/lazyLibraries/ProductArrays.jl).
```@index
```
```@autodocs
Modules = [ProductArrays]
```
| ProductArrays | https://github.com/lazyLibraries/ProductArrays.jl.git |
|
[
"MIT"
] | 0.1.0 | f803ed73d3bf218397e1d4134f83a4255c00aee1 | code | 2882 | ### A Pluto.jl notebook ###
# v0.19.46
using Markdown
using InteractiveUtils
# βββ‘ d1d9ae1e-22bb-4aae-af97-4b4c07abc0f0
begin
import Pkg
Pkg.activate(Base.current_project())
end
# βββ‘ 15413012-9e17-45be-aaae-cc867ede63cb
begin
using RDatasets
using StatsModels
using Plots
using Statistics
end
# βββ‘ a7b141eb-2085-4efd-9b9e-2e7851e4d8d7
include("../src/RelevanceVectorMachine.jl")
# βββ‘ 42bdf6a4-73c0-11ef-129e-158f23f29580
md"""
# Boston Dataset Regression Example
"""
# βββ‘ 5d96b1e1-6c81-4ea5-92a0-e9afe93b1bf3
md"""
## Introduction
The following illustrates how to do univariate regression with the relevance vector machine.
"""
# βββ‘ 75230d1e-e217-4e55-8678-4cd930d8fdf1
md"""
## Processing Data
"""
# βββ‘ a92e2baa-4f90-4418-9da6-4d16b773aba9
boston_data = RDatasets.dataset("MASS", "Boston")
# βββ‘ f793afcf-0873-4bf8-9428-bebee2093fd1
md"""
## Training the Model
"""
# βββ‘ 56a1ed96-456a-4163-927a-df4262396929
scatter(boston_data[:, :Rm], boston_data[:, :MedV])
# βββ‘ 2740ddcc-ab93-4eca-856f-044c4ef6fb94
rvm = RelevanceVectorMachine.rvm(@formula(MedV ~ Rm), boston_data)
# βββ‘ 17fd3b57-5cf0-4fb0-89b5-dc8f719ec906
md"""
## Plotting Predictions
"""
# βββ‘ 0a0018b3-c361-4eb0-9eb1-71a5e5078e91
begin
ages = boston_data[:, :Rm][:, :] # put it into matrix form
preds = RelevanceVectorMachine.predict(rvm, ages)
p = plot(boston_data[:, :Rm], preds)
scatter!(p, boston_data[:, :Rm], boston_data[:, :MedV])
end
# βββ‘ 7b585bc7-2410-401d-8068-9e650a684ecf
md"""
## Examining the Posterior
"""
# βββ‘ 40aa5ac2-179c-4775-8d77-820fc82e9ed7
post = RelevanceVectorMachine.posterior(rvm)
# βββ‘ de8a74ab-f3f0-447f-a296-a24ce4562bdf
ΞΌ_post = rand(post, 10000)
# βββ‘ f090679c-6f38-4f88-addf-6e42bb2e0606
pred_samples = ages * ΞΌ_post
# βββ‘ d13e59f2-49ce-4235-a0e6-3bed30930b21
# This calculates the means from the samples
mean_pred_samples = mean(pred_samples, dims = 2)
# βββ‘ adc6a865-e753-40a9-8b7a-de39b16f5195
begin
# This calculates a 95% credible interval
function get_endpoints(sample_row)
sort(sample_row)[[26, 975]]
end
cred_pred = stack(map(get_endpoints, eachrow(pred_samples)), dims = 1)
end
# βββ‘ Cell order:
# β β42bdf6a4-73c0-11ef-129e-158f23f29580
# β β5d96b1e1-6c81-4ea5-92a0-e9afe93b1bf3
# β β75230d1e-e217-4e55-8678-4cd930d8fdf1
# β βd1d9ae1e-22bb-4aae-af97-4b4c07abc0f0
# β βa7b141eb-2085-4efd-9b9e-2e7851e4d8d7
# β β15413012-9e17-45be-aaae-cc867ede63cb
# β βa92e2baa-4f90-4418-9da6-4d16b773aba9
# β βf793afcf-0873-4bf8-9428-bebee2093fd1
# β β56a1ed96-456a-4163-927a-df4262396929
# β β2740ddcc-ab93-4eca-856f-044c4ef6fb94
# β β17fd3b57-5cf0-4fb0-89b5-dc8f719ec906
# β β0a0018b3-c361-4eb0-9eb1-71a5e5078e91
# β β7b585bc7-2410-401d-8068-9e650a684ecf
# β β40aa5ac2-179c-4775-8d77-820fc82e9ed7
# β βde8a74ab-f3f0-447f-a296-a24ce4562bdf
# β βf090679c-6f38-4f88-addf-6e42bb2e0606
# β βd13e59f2-49ce-4235-a0e6-3bed30930b21
# β βadc6a865-e753-40a9-8b7a-de39b16f5195
| RelevanceVectorMachine | https://github.com/svaniksharma/RelevanceVectorMachine.jl.git |
|
[
"MIT"
] | 0.1.0 | f803ed73d3bf218397e1d4134f83a4255c00aee1 | code | 4380 | ### A Pluto.jl notebook ###
# v0.19.46
using Markdown
using InteractiveUtils
# βββ‘ 925038a5-8a4d-41bf-982c-e266f3c57378
begin
import Pkg
Pkg.activate(Base.current_project())
end
# βββ‘ c83ce400-dc44-4fe5-8ee5-3941940711f1
begin
using RDatasets
using StatsModels
using Statistics
end
# βββ‘ b3644946-92cb-4054-942d-b8caf1f08358
include("../src/RelevanceVectorMachine.jl")
# βββ‘ e2d43a3c-72f5-11ef-3fc0-991cbff513be
md"""
## Iris Dataset
"""
# βββ‘ 87d0e2e9-b989-4e97-b86b-ab8c90fbc37c
md"""
## Introduction
In this example, we train an RVM on the Iris dataset to classify flower species. Since the relevance vector machine implemented is for binary classification, we use it on only the versicolor and virginica species.
"""
# βββ‘ b25e56ba-8215-4ae1-b5c1-ad5d984a4674
md"""
## Processing Data
"""
# βββ‘ ae02f097-de63-40e4-8083-89b65ccb606b
begin
function load_dataset_with_two_categories()
iris_dataset = RDatasets.dataset("datasets", "iris")
iris_dataset = filter(row -> row.Species != "setosa", iris_dataset)
iris_dataset
end
iris_dataset = load_dataset_with_two_categories()
end
# βββ‘ 029dd796-0495-4193-91fc-5cbd26f26a99
md"""
## Training RVM
"""
# βββ‘ e173bef2-b417-44d6-85b5-bd491a6fc78e
begin
rvm_formula = @formula(Species ~ SepalLength + SepalWidth + PetalLength + PetalWidth)
rvm = RelevanceVectorMachine.rvm(rvm_formula, iris_dataset, "classification")
end
# βββ‘ 2bd11b12-c00b-4d6f-8c43-b4befb910294
md"""
## Making Predictions
"""
# βββ‘ 8807f224-5413-4850-b499-aeb56d5dfa0a
predictions = RelevanceVectorMachine.predict(rvm, Matrix(select(iris_dataset, Not(:Species))))
# βββ‘ 374ff4bc-7c6e-4e22-b334-546543ea7776
md"""
`StatsModels` [uses the first level](https://juliastats.org/StatsModels.jl/stable/contrasts/#StatsModels.DummyCoding) in the Iris dataframe ("versicolor") as the "base" level, meaning that 0 corresponds to "versicolor" and 1 corresponds to "virginica".
"""
# βββ‘ fb1318ed-3684-46b5-99b5-28987f678c67
md"""
## Examining the Posterior Distribution
"""
# βββ‘ 1057c497-191b-4eda-9510-2b17eb4a3663
md"""
We can examine the posterior distribution of the weight vector $w$ produced after training. This distribution will be multivariate normal. Below, we sample $1000$ values of the weight vector from the posterior distribution, then use each of these weight vectors to compute the probability that the given datapoint is "versicolor" or "virginia". Then, we use these predictions with the weight samples to construct a 95% credible interval, and plot the curve.
"""
# βββ‘ d9fb2c2d-8c28-40d1-a556-6e95d0db420e
post = RelevanceVectorMachine.posterior(rvm)
# βββ‘ cb08b0ba-be3d-4c42-9e7c-2771b7ade377
post_samples = rand(post, 1000)
# βββ‘ 836cfee8-6bfd-4d68-9f0d-3415726f4d5a
Ο(y) = 1 / (1 + exp(-y)) # sigmoid function
# βββ‘ 91832a51-1040-4340-86a9-140eb076a318
begin
X = Matrix(select(iris_dataset, Not(:Species)))
pred_samples = Ο.(X * post_samples)
end
# βββ‘ 0f779131-9ae6-4e8f-8bb4-162276bbeb22
mean_pred_prob = mean(pred_samples, dims = 2)
# βββ‘ 223fef89-9735-4123-ad7d-f147875fe481
function get_endpoints(sample_row)
sort(sample_row)[[26, 975]]
end
# βββ‘ 9f53e207-bec2-4eba-bd6c-fe08c43b6a8d
cred_pred_prob = stack(map(get_endpoints, eachrow(pred_samples)), dims = 1)
# βββ‘ 22360b5e-a65c-43a9-9717-7cdc61a46e00
md"""
If this were in two or three dimensions, we could plot it, but in this case we are dealing with 4D data points (`SepalLength`, `SepalWidth`, `PetalLength`, `PetalWidth`).
"""
# βββ‘ Cell order:
# β βe2d43a3c-72f5-11ef-3fc0-991cbff513be
# β β87d0e2e9-b989-4e97-b86b-ab8c90fbc37c
# β βb25e56ba-8215-4ae1-b5c1-ad5d984a4674
# β β925038a5-8a4d-41bf-982c-e266f3c57378
# β βb3644946-92cb-4054-942d-b8caf1f08358
# β βc83ce400-dc44-4fe5-8ee5-3941940711f1
# β βae02f097-de63-40e4-8083-89b65ccb606b
# β β029dd796-0495-4193-91fc-5cbd26f26a99
# β βe173bef2-b417-44d6-85b5-bd491a6fc78e
# β β2bd11b12-c00b-4d6f-8c43-b4befb910294
# β β8807f224-5413-4850-b499-aeb56d5dfa0a
# β β374ff4bc-7c6e-4e22-b334-546543ea7776
# β βfb1318ed-3684-46b5-99b5-28987f678c67
# β β1057c497-191b-4eda-9510-2b17eb4a3663
# β βd9fb2c2d-8c28-40d1-a556-6e95d0db420e
# β βcb08b0ba-be3d-4c42-9e7c-2771b7ade377
# β β836cfee8-6bfd-4d68-9f0d-3415726f4d5a
# β β91832a51-1040-4340-86a9-140eb076a318
# β β0f779131-9ae6-4e8f-8bb4-162276bbeb22
# β β223fef89-9735-4123-ad7d-f147875fe481
# β β9f53e207-bec2-4eba-bd6c-fe08c43b6a8d
# β β22360b5e-a65c-43a9-9717-7cdc61a46e00
| RelevanceVectorMachine | https://github.com/svaniksharma/RelevanceVectorMachine.jl.git |
|
[
"MIT"
] | 0.1.0 | f803ed73d3bf218397e1d4134f83a4255c00aee1 | code | 5792 | module RelevanceVectorMachine
export rvm, predict, posterior, RVM
using StatsModels
using Tables
using LinearAlgebra
using Statistics
using Distributions
"""
RVM
An instance of a relevance vector machine.
"""
struct RVM
ΞΌ::Vector{Float64}
Ξ£::Matrix{Float64}
Ξ±::Vector{Float64}
B::Matrix{Float64}
is_regression::Bool
end
"""
rvm(formula::FormulaTerm, data, mode = "regression")
Initialize and train a relevance vector machine, using the variables specified in
`formula` with the data provided in `data`. `mode` can either be "regression" or
"classification".
"""
function rvm(formula::FormulaTerm, data, mode = "regression")
Ξ¦ = get_Ξ¦(formula, data)
t = get_t(formula, data)
if mode != "regression" && mode != "classification"
error("Specify mode as regression or classification")
end
sparse_seq_bayes(Ξ¦, t, mode == "regression")
end
"""
predict(rvm::RVM, X)
Given a matrix `X` and relevance vector machine `rvm`, compute predictions for `X`.
"""
function predict(rvm::RVM, X)
if rvm.is_regression
X * rvm.ΞΌ
else
Ο.(X * rvm.ΞΌ)
end
end
"""
posterior(rvm::RVM)
Returns a normal distribution with mean ΞΌ and covariance Ξ£ corresponding to
the parameters of the relevance vector machine.
"""
posterior(rvm::RVM) = MvNormal(rvm.ΞΌ, Hermitian(rvm.Ξ£))
get_Ξ¦(formula, data) = float.(modelmatrix(formula.rhs, data))
get_t(formula, data) = float.(vec(modelmatrix(formula.lhs, data)))
get_N(Ξ¦) = size(Ξ¦, 1)
get_M(Ξ¦) = size(Ξ¦, 2)
Ο(y) = 1 / (1 + exp(-y))
function sparse_seq_bayes(Ξ¦::Matrix{Float64}, t::Vector{Float64}, is_regression::Bool)
N = get_N(Ξ¦)
B = randn(N, N)
if is_regression
Ξ² = var(t) * 0.1
B = Ξ² * diagm(ones(N))
end
M = get_M(Ξ¦)
Ξ± = fill(Inf, M)
ΞΌ = zeros(M,)
Ξ£ = zeros(M, M)
B = compute_B(B, Ξ¦, ΞΌ, is_regression)
t_hat = compute_t_hat(B, Ξ¦, ΞΌ, t, is_regression)
mask = BitArray(fill(false, M))
mask[1] = true
S = compute_S(Ξ¦, B, Ξ£, mask)
Q = compute_Q(Ξ¦, B, Ξ£, t_hat, mask)
q = compute_q(Q, S, Ξ±)
s = compute_s(S, Ξ±)
Ξ±[1] = update_Ξ±(1, q, s)
Ξ£[mask, mask] = compute_Ξ£(Ξ¦, B, Ξ±, mask)
ΞΌ[mask] = compute_ΞΌ(Ξ¦, B, Ξ£, t_hat, mask)
while !converged(Ξ±, q, s)
for i β 1:M
if q[i]^2 > s[i] && Ξ±[i] < Inf
Ξ±[i] = update_Ξ±(i, q, s)
elseif q[i]^2 > s[i] && Ξ±[i] == Inf
mask[i] = true
Ξ±[i] = update_Ξ±(i, q, s)
elseif q[i]^2 β€ s[i] && Ξ±[i] < Inf
mask[i] = false
Ξ±[i] = Inf
end
end
if is_regression
B = update_Ξ²(Ξ¦, ΞΌ, Ξ£, Ξ±, t, mask)
end
B = compute_B(B, Ξ¦, ΞΌ, is_regression)
t_hat = compute_t_hat(B, Ξ¦, ΞΌ, t, is_regression)
S = compute_S(Ξ¦, B, Ξ£, mask)
Q = compute_Q(Ξ¦, B, Ξ£, t_hat, mask)
q = compute_q(Q, S, Ξ±)
s = compute_s(S, Ξ±)
Ξ£[mask, mask] = compute_Ξ£(Ξ¦, B, Ξ±, mask)
ΞΌ[mask] = compute_ΞΌ(Ξ¦, B, Ξ£, t_hat, mask)
end
RVM(ΞΌ, Ξ£, Ξ±, B, is_regression)
end
function compute_B(B::Matrix{Float64}, Ξ¦::Matrix{Float64}, ΞΌ::Vector{Float64}, is_regression::Bool)
if is_regression
return B
else
return diagm(Ο.(Ξ¦ * ΞΌ))
end
end
function compute_t_hat(B::Matrix{Float64}, Ξ¦::Matrix{Float64}, ΞΌ::Vector{Float64}, t::Vector{Float64}, is_regression::Bool)
if is_regression
return t
else
return Ξ¦ * ΞΌ + B^(-1) * (t .- Ο.(Ξ¦ * ΞΌ))
end
end
function compute_S(Ξ¦::Matrix{Float64}, B::Matrix{Float64}, Ξ£::Matrix{Float64}, mask::BitVector)
M = get_M(Ξ¦)
S = zeros(M,)
Ξ¦_vw = Ξ¦[:, mask]
Ξ£_vw = Ξ£[mask, mask]
for i β 1:M
S[i] = transpose(Ξ¦[:, i]) * B * Ξ¦[:, i] - transpose(Ξ¦[:, i]) * B * Ξ¦_vw * Ξ£_vw * transpose(Ξ¦_vw) * B * Ξ¦[:, i]
end
S
end
function compute_Q(Ξ¦::Matrix{Float64}, B::Matrix{Float64}, Ξ£::Matrix{Float64}, t_hat::Vector{Float64}, mask::BitVector)
M = get_M(Ξ¦)
Q = zeros(M,)
Ξ¦_vw = Ξ¦[:, mask]
Ξ£_vw = Ξ£[mask, mask]
for i β 1:M
Q[i] = transpose(Ξ¦[:, i]) * B * t_hat - transpose(Ξ¦[:, i]) * B * Ξ¦_vw * Ξ£_vw * transpose(Ξ¦_vw) * B * t_hat
end
Q
end
function compute_s(S::Vector{Float64}, Ξ±::Vector{Float64})
s = zeros(size(S, 1),)
for i β eachindex(S)
if Ξ±[i] == Inf
s[i] = S[i]
else
s[i] = Ξ±[i] * S[i] / (Ξ±[i] - S[i])
end
end
s
end
function compute_q(Q::Vector{Float64}, S::Vector{Float64}, Ξ±::Vector{Float64})
q = zeros(size(Q, 1),)
for i β eachindex(Q)
if Ξ±[i] == Inf
q[i] = Q[i]
else
q[i] = Ξ±[i] * Q[i] / (Ξ±[i] - S[i])
end
end
q
end
function update_Ξ±(i, q::Vector{Float64}, s::Vector{Float64})
q[i]^2 / (q[i]^2 - s[i])
end
function compute_Ξ£(Ξ¦::Matrix{Float64}, B::Matrix{Float64}, Ξ±::Vector{Float64}, mask::BitVector)
Ξ¦_vw = @view Ξ¦[:, mask]
Ξ±_vw = @view Ξ±[mask]
A = diagm(Ξ±_vw)
(transpose(Ξ¦_vw) * B * Ξ¦_vw + A)^(-1)
end
function compute_ΞΌ(Ξ¦::Matrix{Float64}, B::Matrix{Float64}, Ξ£::Matrix{Float64}, t_hat::Vector{Float64}, mask::BitVector)
Ξ¦_vw = @view Ξ¦[:, mask]
Ξ£_vw = @view Ξ£[mask, mask]
Ξ£_vw * transpose(Ξ¦_vw) * B * t_hat
end
function update_Ξ²(Ξ¦::Matrix{Float64}, ΞΌ::Vector{Float64}, Ξ£::Matrix{Float64}, Ξ±::Vector{Float64}, t_hat::Vector{Float64}, mask::BitVector)
N = get_N(Ξ¦)
M = get_M(Ξ¦)
Ξ£_vw = @view Ξ£[mask, mask]
Ξ¦_vw = @view Ξ¦[:, mask]
ΞΌ_vw = @view ΞΌ[mask]
Ξ±_vw = @view Ξ±[mask]
(N - M + sum(Ξ±_vw .* diag(Ξ£_vw))) / (norm(t_hat - Ξ¦_vw * ΞΌ_vw)^2) * diagm(ones(N))
end
function converged(Ξ±::Vector{Float64}, q::Vector{Float64}, s::Vector{Float64})
all(log.(abs.(Ξ±)) .< 1e-6) && all(q.^2 .> s)
end
end # module RelevanceVectorMachine
| RelevanceVectorMachine | https://github.com/svaniksharma/RelevanceVectorMachine.jl.git |
|
[
"MIT"
] | 0.1.0 | f803ed73d3bf218397e1d4134f83a4255c00aee1 | code | 1998 | include("../src/RelevanceVectorMachine.jl")
using DataFrames
using StatsModels
using Distributions
using RDatasets
using Test
# Check that we get can get target and predictor variables from formula
function test_formula_parsing()
df = RDatasets.dataset("datasets", "women")
nrows = nrow(df)
ncols = ncol(df)
Ξ¦ = RelevanceVectorMachine.get_Ξ¦(@formula(Height ~ Weight), df)
t = RelevanceVectorMachine.get_t(@formula(Height ~ Weight), df)
@test size(Ξ¦, 1) == nrows
@test size(Ξ¦, 2) == ncols - 1
@test size(t, 1) == nrows
@test size(t, 2) == 1
end
# check that output is between 0 and 1 for classification tasks
function test_classification_output()
df = RDatasets.dataset("datasets", "women")
rvm = RelevanceVectorMachine.rvm(@formula(Height ~ Weight), df, "classification")
X = Matrix(select(df, :Weight))
predictions = RelevanceVectorMachine.predict(rvm, X)
@test all(0 .β€ predictions .β€ 1)
end
# create a perfectly linear dataset and check that the data is overfit
function test_rvm_regression()
X = randn(100, 4)
w = randn(4,)
y = X * w
model_matrix = hcat(X, y)
df = DataFrame(model_matrix, :auto)
rvm = RelevanceVectorMachine.rvm(@formula(x5 ~ x1 + x2 + x3 + x4), df)
predictions = RelevanceVectorMachine.predict(rvm, X)
@test isapprox(y, predictions, rtol=1e-3)
end
# create a perfectly separable dataset and check that the data is overfit
function test_rvm_classification()
X = randn(100, 1)
w = rand()
y = X * w
y[y .< 0] .= -1
y[y .β₯ 0] .= 1
model_matrix = hcat(X, y)
df = DataFrame(model_matrix, :auto)
rvm = RelevanceVectorMachine.rvm(@formula(x2 ~ x1), df, "classification")
predictions = RelevanceVectorMachine.predict(rvm, X)
predictions[predictions .< 0.5] .= -1
predictions[predictions .β₯ 0.5] .= 1
@test mean(y .== predictions) == 1.0
end
test_formula_parsing()
test_classification_output()
test_rvm_regression()
test_rvm_classification() | RelevanceVectorMachine | https://github.com/svaniksharma/RelevanceVectorMachine.jl.git |
|
[
"MIT"
] | 0.1.0 | f803ed73d3bf218397e1d4134f83a4255c00aee1 | docs | 88 | # RelevanceVectorMachine.jl
A relevance vector machine implementation written in Julia. | RelevanceVectorMachine | https://github.com/svaniksharma/RelevanceVectorMachine.jl.git |
|
[
"MIT"
] | 0.7.1 | 0b1c87463f24bd18a9685601e6bf6207cd051cdd | code | 1195 | VERSION < v"0.7.0-beta2.199" && __precompile__()
module MechanismGeometries
using LinearAlgebra
using GeometryBasics
using StaticArrays
using RigidBodyDynamics
using ColorTypes: RGBA
using Rotations: rotation_between, RotMatrix, AngleAxis
using CoordinateTransformations: AffineMap, Transformation,
IdentityTransformation,
LinearMap, Translation
export AbstractGeometrySource,
VisualElement,
visual_elements,
Skeleton,
HyperPlane,
URDFVisuals,
MeshFile
# Re-export from ColorTypes
export RGBA
abstract type AbstractGeometrySource end
function visual_elements(mechanism, source::AbstractGeometrySource) end
struct MeshFile
filename::String
end
const GeometryLike = Union{AbstractGeometry, AbstractMesh, MeshFile}
const DEFAULT_COLOR = RGBA{Float32}(0.7, 0.7, 0.7, 1.0)
struct HyperPlane{N, T} <: GeometryPrimitive{N, T}
normal::Vec{N, T}
end
mutable struct VisualElement{G <: GeometryLike, T <: Transformation}
frame::CartesianFrame3D
geometry::G
color::RGBA{Float32}
transform::T
end
include("skeleton.jl")
include("urdf.jl")
using .URDF
end # module
| MechanismGeometries | https://github.com/JuliaRobotics/MechanismGeometries.jl.git |
|
[
"MIT"
] | 0.7.1 | 0b1c87463f24bd18a9685601e6bf6207cd051cdd | code | 5290 | to_affine_map(tform::Transform3D) = AffineMap(rotation(tform), translation(tform))
rotation_from_x_axis(translation::AbstractVector{T}) where {T} = rotation_between(SVector{3, T}(1,0,0), translation)
function inertial_ellipsoid_dimensions(mass, axis_inertias)
# Ix = m/5 (dy^2 + dz^2)
# Iy = m/5 (dx^2 + dz^2)
# Iz = m/5 (dx^2 + dy^2)
#
# let A = [0 1 1
# 1 0 1
# 1 1 0]
# b = 5 / m * [Ix; Iy; Iz]
# Then A \ b = [dx^2; dy^2; dz^2]
#
# This is only valid if the axis inertias obey the triangle inequalities:
# Ix + Iy >= Iz
# Ix + Iz >= Iy
# Iy + Iz >= Ix
# Ix - Iy = m/5 (dy^2 - dx^2)
# Ix - Iy + Iz = m/5 (2*dy^2)
# dy^2 = 0.5 (Ix - Iy + Iz) * 5/m
squared_lengths = 0.5 * 5.0 / mass *
[-axis_inertias[1] + axis_inertias[2] + axis_inertias[3];
axis_inertias[1] - axis_inertias[2] + axis_inertias[3];
axis_inertias[1] + axis_inertias[2] - axis_inertias[3]]
for i = 1:3
total_inertia_of_other_axes = zero(axis_inertias[1])
for j = 1:3
if i == j
continue
end
total_inertia_of_other_axes += axis_inertias[j]
end
if axis_inertias[i] > total_inertia_of_other_axes
error("Principal inertias $(axis_inertias) do not satisfy the triangle inequalities, so the equivalent inertial ellipsoid is not well-defined.")
end
end
return sqrt.(squared_lengths)
end
inertial_ellipsoid(body::RigidBody) = inertial_ellipsoid(spatial_inertia(body))
if VERSION < v"0.7.0-DEV.5211"
const _eigen = eig
else
const _eigen = eigen
end
function inertial_ellipsoid(inertia::SpatialInertia)
com_frame = CartesianFrame3D("CoM")
com_frame_to_inertia_frame = Transform3D(com_frame, inertia.frame, center_of_mass(inertia).v)
com_inertia = transform(inertia, inv(com_frame_to_inertia_frame))
principal_inertias, axes = _eigen(Array(com_inertia.moment)) # StaticArrays.eig checks that the matrix is Hermitian with zero tolerance...
axes[:,3] *= sign(dot(cross(axes[:,1], axes[:,2]), axes[:,3])) # Ensure the axes form a right-handed coordinate system
radii = inertial_ellipsoid_dimensions(com_inertia.mass, principal_inertias)
# We create an ellipsoid by generating a sphere and then scaling it
# along each axis
geometry = HyperSphere(zero(Point{3, Float64}), 1.0)
scaling = LinearMap(SDiagonal(radii[1], radii[2], radii[3]))
tform = AffineMap(RotMatrix{3}(axes), center_of_mass(inertia).v) β scaling
return VisualElement(inertia.frame, geometry, DEFAULT_COLOR, tform)
end
function create_frame_to_frame_geometry(joint_to_joint, radius)
trans = translation(joint_to_joint)
geom_length = norm(trans)
radius = min(radius, geom_length)
tform = if norm(trans) > 1e-10
LinearMap(rotation_from_x_axis(trans))
else
IdentityTransformation()
end
geometry = Rect(Vec(0, -radius, -radius), Vec(geom_length, 2*radius, 2*radius))
return VisualElement(joint_to_joint.to, geometry, DEFAULT_COLOR, tform)
end
function maximum_link_length(body_fixed_joint_frames::Dict{RigidBody{T}, Vector{CartesianFrame3D}}) where T
result = zero(T)
for (body, joint_frames) in body_fixed_joint_frames
for framei in joint_frames, framej in joint_frames
transform = fixed_transform(body, framei, framej)
result = max(result, norm(translation(transform)))
end
end
result
end
struct Skeleton <: AbstractGeometrySource
inertias::Bool
randomize_colors::Bool
end
Skeleton(; inertias=true, randomize_colors=false) = Skeleton(inertias, randomize_colors)
function visual_elements(mechanism::Mechanism, source::Skeleton)
body_fixed_joint_frames = Dict(body => begin
[map(frame_before, out_joints(body, mechanism)); map(frame_after, in_joints(body, mechanism))]
end for body in bodies(mechanism))
box_width = 0.05 * maximum_link_length(body_fixed_joint_frames)
elements = Vector{VisualElement}()
for body in bodies(mechanism)
if source.inertias && has_defined_inertia(body) && spatial_inertia(body).mass >= 1e-3
push!(elements, inertial_ellipsoid(body))
else
for joint in out_joints(body, mechanism)
if !iszero(box_width)
push!(elements, VisualElement(
frame_before(joint),
HyperSphere{3, Float64}(zero(Point{3, Float64}), box_width),
DEFAULT_COLOR,
IdentityTransformation()
))
end
end
end
frames = body_fixed_joint_frames[body]
for (i, framei) in enumerate(frames)
for j = i + 1 : length(frames)
framej = frames[j]
joint_to_joint = fixed_transform(mechanism, framei, framej)
push!(elements, create_frame_to_frame_geometry(
joint_to_joint,
box_width / 2
))
end
end
end
if source.randomize_colors
for element in elements
element.color = RGBA{Float32}(rand(), rand(), rand(), 0.5)
end
end
elements
end | MechanismGeometries | https://github.com/JuliaRobotics/MechanismGeometries.jl.git |
|
[
"MIT"
] | 0.7.1 | 0b1c87463f24bd18a9685601e6bf6207cd051cdd | code | 7837 | module URDF
using LightXML
using RigidBodyDynamics
using RigidBodyDynamics.Graphs
const rbd = RigidBodyDynamics
using ColorTypes: RGBA
using GeometryBasics
using MechanismGeometries: GeometryLike, VisualElement, DEFAULT_COLOR, AbstractGeometrySource, HyperPlane, MeshFile
import MechanismGeometries: visual_elements
using CoordinateTransformations: AffineMap, LinearMap, Transformation
using StaticArrays: SDiagonal
export URDFVisuals
function parse_geometries(xml_geometry::XMLElement, package_path, frame::CartesianFrame3D, color::RGBA, tform::Transformation, file_path="")
elements = Vector{VisualElement}()
for xml_cylinder in get_elements_by_tagname(xml_geometry, "cylinder")
length = rbd.parse_scalar(Float32, xml_cylinder, "length")
radius = rbd.parse_scalar(Float32, xml_cylinder, "radius")
extent = Point(0, 0, length)
push!(elements, VisualElement(frame, Cylinder{3, Float32}(Point(0, 0, -length/2), Point(0, 0, length/2), radius), color, tform))
end
for xml_box in get_elements_by_tagname(xml_geometry, "box")
size = Vec{3, Float32}(rbd.parse_vector(Float32, xml_box, "size", "0 0 0"))
push!(elements, VisualElement(frame, Rect(-size / 2, size), color, tform))
end
for xml_sphere in get_elements_by_tagname(xml_geometry, "sphere")
radius = rbd.parse_scalar(Float32, xml_sphere, "radius")
push!(elements, VisualElement(frame, HyperSphere(zero(Point{3, Float32}), radius), color, tform))
end
for xml_plane in get_elements_by_tagname(xml_geometry, "plane")
normal = Vec{3, Float32}(rbd.parse_vector(Float32, xml_plane, "normal", "0 0 1"))
push!(elements, VisualElement(frame, HyperPlane(normal), color, tform))
end
for xml_mesh in get_elements_by_tagname(xml_geometry, "mesh")
filename = attribute(xml_mesh, "filename")
scale = Vec{3, Float32}(rbd.parse_vector(Float32, xml_mesh, "scale", "1 1 1"))
package_pattern = r"^package://"
if occursin(package_pattern, filename)
found_mesh = false
for package_directory in package_path
basename, ext = splitext(joinpath(package_directory, replace(filename, package_pattern => "")))
for ext_to_try in [ext, ".obj"] # TODO: remove this once other packages are updated
filename_in_package = basename * ext_to_try
if isfile(filename_in_package)
push!(elements, VisualElement(frame, MeshFile(filename_in_package), color, tform β LinearMap(SDiagonal(scale))))
found_mesh = true
break
end
end
end
if !found_mesh
warning_message = """
Could not find the mesh file: $(filename). Tried substituting the following folders for the 'package://' prefix: $(package_path).
Also tried changing the extension to .obj.
"""
@warn(warning_message)
end
else
basename, ext = splitext(joinpath(file_path, filename))
found_mesh = false
for ext_to_try in [ext, ".obj"] # TODO: remove this once other packages are updated
filename = basename * ext_to_try
if isfile(filename)
push!(elements, VisualElement(frame, MeshFile(filename), color, tform β LinearMap(SDiagonal(scale))))
found_mesh = true
break
end
end
if !found_mesh
@warn "Could not find the mesh file: $(filename). Also tried changing the extension to .obj."
end
end
end
elements
end
function parse_material!(material_colors::Dict{String, RGBA{Float32}}, xml_material)
if xml_material === nothing
return DEFAULT_COLOR
end
name = attribute(xml_material, "name")
xml_color = find_element(xml_material, "color")
if xml_color !== nothing
default = "0.7 0.7 0.7 1."
material_colors[name] = RGBA{Float32}(rbd.parse_vector(Float32, xml_color, "rgba", default)...)
end
get(material_colors, name, DEFAULT_COLOR)
end
function parse_link!(material_colors::Dict, xml_link, frame::CartesianFrame3D,
package_path=ros_package_path(), file_path="", tag="visual", link_colors=Dict{String, RGBA{Float32}}())
elements = Vector{VisualElement}()
xml_visuals = get_elements_by_tagname(xml_link, tag)
for xml_visual in xml_visuals
xml_material = find_element(xml_visual, tag)
linkname = attribute(xml_link, "name")
color = get(link_colors, linkname, parse_material!(material_colors, find_element(xml_visual, "material")))
rot, trans = rbd.parse_pose(Float64, find_element(xml_visual, "origin"))
tform = AffineMap(rot, trans)
append!(elements, parse_geometries(find_element(xml_visual, "geometry"), package_path, frame, color, tform, file_path))
end
elements
end
function create_graph(xml_links, xml_joints)
# create graph structure of XML elements
graph = DirectedGraph{Vertex{XMLElement}, Edge{XMLElement}}()
vertices = Vertex.(xml_links)
for vertex in vertices
add_vertex!(graph, vertex)
end
name_to_vertex = Dict(attribute(v.data, "name") => v for v in vertices)
for xml_joint in xml_joints
parent = name_to_vertex[attribute(find_element(xml_joint, "parent"), "link")]
child = name_to_vertex[attribute(find_element(xml_joint, "child"), "link")]
add_edge!(graph, parent, child, Edge(xml_joint))
end
graph
end
ros_package_path() = split(get(ENV, "ROS_PACKAGE_PATH", ""), ':')
struct URDFVisuals <: AbstractGeometrySource
xdoc::XMLDocument
package_path::Vector{String}
file_path::String
tag::String
link_colors::Dict{String, RGBA{Float32}}
end
function URDFVisuals(xdoc::XMLDocument; package_path=ros_package_path(), file_path="", tag="visual", link_colors=Dict{String, RGBA{Float32}}())
URDFVisuals(xdoc, package_path, file_path, tag, link_colors)
end
URDFVisuals(filename::AbstractString; kw...) = URDFVisuals(parse_file(filename); kw...)
function visual_elements(mechanism::Mechanism, source::URDFVisuals)
xroot = LightXML.root(source.xdoc)
@assert LightXML.name(xroot) == "robot"
xml_links = get_elements_by_tagname(xroot, "link")
xml_joints = get_elements_by_tagname(xroot, "joint")
xml_materials = get_elements_by_tagname(xroot, "material")
graph = create_graph(xml_links, xml_joints)
roots = collect(filter(v -> isempty(in_edges(v, graph)), rbd.Graphs.vertices(graph)))
length(roots) != 1 && error("Can only handle a single root")
tree = SpanningTree(graph, first(roots))
material_colors = Dict{String, RGBA{Float32}}()
for xml_material in xml_materials
parse_material!(material_colors, xml_material)
end
name_to_frame = Dict(string(tf.from) => tf.from for body in bodies(mechanism) for tf in rbd.frame_definitions(body))
elements = Vector{VisualElement}()
for vertex in rbd.Graphs.vertices(tree)
xml_link = vertex.data
linkname = attribute(xml_link, "name")
framename = if vertex == rbd.Graphs.root(tree)
linkname
else
xml_joint = edge_to_parent(vertex, tree).data
jointname = attribute(xml_joint, "name")
string("after_", jointname) # TODO: create function in RBD, call it here
end
if haskey(name_to_frame, framename)
body_frame = name_to_frame[framename]
append!(elements, parse_link!(material_colors, xml_link, body_frame, source.package_path, source.file_path, source.tag, source.link_colors))
end
end
elements
end
end
| MechanismGeometries | https://github.com/JuliaRobotics/MechanismGeometries.jl.git |
|
[
"MIT"
] | 0.7.1 | 0b1c87463f24bd18a9685601e6bf6207cd051cdd | code | 9552 | using MechanismGeometries
using RigidBodyDynamics
const rbd = RigidBodyDynamics
using GeometryBasics
using StaticArrays: SVector, SDiagonal
using CoordinateTransformations: AffineMap, IdentityTransformation, LinearMap, Translation, transform_deriv
using ColorTypes: RGBA, BGR
using ValkyrieRobot
using Test
using LinearAlgebra
function homog(::IdentityTransformation)
Matrix(1.0I, 4, 4)
end
function homog(m::AffineMap)
H = Matrix(1.0I, 4, 4)
H[1:3, 1:3] .= transform_deriv(m, SVector(0., 0., 0.))
H[1:3, 4] .= m(SVector(0., 0., 0.))
H
end
homog(m::LinearMap) = homog(m β Translation(0, 0, 0))
homog(t::Translation) = homog(AffineMap(Matrix(1.0I, 3, 3), t(SVector(0., 0., 0.))))
@testset "MechanismGeometries" begin
@testset "skeletons" begin
@testset "unit sphere" begin
frame = CartesianFrame3D("com")
inertia = SpatialInertia(frame, SDiagonal(1., 1, 1), SVector(0., 0, 0), 1.0)
# I = diag([1, 1, 1]), M = 1
# so the equivalent ellipsoid must have the same moment of inertia
# Ixx = 1/5 * M * (b^2 + c^2)
# so 1 = 1/5 * (b^2 + c^2) and b = c
# so 1 = 2/5 b^2
# and b^2 = 5/2 -> b = sqrt(5/2)
element = MechanismGeometries.inertial_ellipsoid(inertia)
@test element.frame === frame
@test homog(element.transform) β homog(AffineMap(SDiagonal(sqrt(5/2), sqrt(5/2), sqrt(5/2)), SVector(0., 0, 0)))
@test element.geometry isa HyperSphere
@test radius(element.geometry) β 1
end
@testset "heavy sphere" begin
frame = CartesianFrame3D("com")
inertia = SpatialInertia(frame, SDiagonal(1., 1, 1), SVector(0., 0, 0), 5.0)
# I = diag([1, 1, 1]), M = 5
# so the equivalent ellipsoid must have the same moment of inertia
# Ixx = 1/5 * M * (b^2 + c^2)
# so 1 = 1/5 * 5 * (b^2 + c^2) and b = c
# so 1 = 2 b^2
# and b^2 = 1/2 -> b = sqrt(1/2)
element = MechanismGeometries.inertial_ellipsoid(inertia)
@test element.frame === frame
@test homog(element.transform) β homog(LinearMap(SDiagonal(sqrt(1/2), sqrt(1/2), sqrt(1/2))))
@test element.geometry isa HyperSphere
@test radius(element.geometry) β 1
end
@testset "translated sphere" begin
frame = CartesianFrame3D("com")
inertia = SpatialInertia(frame, SDiagonal(1., 1, 1), SVector(1., 0, 0), 5.0)
@test center_of_mass(inertia).v β [0.2, 0, 0]
element = MechanismGeometries.inertial_ellipsoid(inertia)
@test element.frame === frame
@test element.transform(SVector(0, 0, 0)) β center_of_mass(inertia).v
@test element.geometry isa HyperSphere
@test radius(element.geometry) β 1
end
end
@testset "urdf" begin
@testset "acrobot" begin
urdf = "urdf/Acrobot.urdf"
robot = parse_urdf(urdf, remove_fixed_tree_joints=false)
elements = visual_elements(robot, URDFVisuals(urdf))
@test length(elements) == 3
element = elements[1]
@test string(rbd.body_fixed_frame_to_body(robot, element.frame)) == "base_link"
@test element.geometry isa Rect
@test element.geometry.origin β [-0.1, -0.1, -0.1]
@test element.geometry.widths β [0.2, 0.2, 0.2]
@test element.color == RGBA(0, 1, 0, 1)
@test homog(element.transform) β homog(IdentityTransformation())
element = elements[2]
@test string(rbd.body_fixed_frame_to_body(robot, element.frame)) == "upper_link"
@test element.geometry isa Cylinder
@test element.geometry.origin β [0, 0, -1.1 / 2]
@test element.geometry.extremity β [0, 0, 1.1 / 2]
@test radius(element.geometry) β 0.05
@test element.color == RGBA(1, 0, 0, 1)
@test homog(element.transform) β homog(Translation(0, 0, -0.5))
element = elements[3]
@test string(rbd.body_fixed_frame_to_body(robot, element.frame)) == "lower_link"
@test element.geometry isa Cylinder
@test element.geometry.origin β [0, 0, -2.1 / 2]
@test element.geometry.extremity β [0, 0, 2.1 / 2]
@test radius(element.geometry) β 0.05
@test element.color == RGBA(0, 0, 1, 1)
@test homog(element.transform) β homog(Translation(0, 0, -1))
end
@testset "acrobot with fixed elbow" begin
urdf = "urdf/Acrobot_fixed.urdf"
robot = parse_urdf(urdf, remove_fixed_tree_joints=false)
rbd.remove_fixed_tree_joints!(robot)
elements = visual_elements(robot, URDFVisuals(urdf))
@test length(elements) == 3
element = elements[1]
@test string(rbd.body_fixed_frame_to_body(robot, element.frame)) == "world"
@test element.geometry isa Rect
@test element.geometry.origin β [-0.1, -0.1, -0.1]
@test element.geometry.widths β [0.2, 0.2, 0.2]
@test element.color == RGBA(0, 1, 0, 1)
@test homog(element.transform) β homog(IdentityTransformation())
element = elements[2]
@test string(rbd.body_fixed_frame_to_body(robot, element.frame)) == "upper_link"
@test element.geometry isa Cylinder
@test element.geometry.origin β [0, 0, -1.1 / 2]
@test element.geometry.extremity β [0, 0, 1.1 / 2]
@test radius(element.geometry) β 0.05
@test element.color == RGBA(1, 0, 0, 1)
@test homog(element.transform) β homog(Translation(0, 0, -0.5))
element = elements[3]
@test string(rbd.body_fixed_frame_to_body(robot, element.frame)) == "upper_link"
@test element.geometry isa Cylinder
@test element.geometry.origin β [0, 0, -2.1 / 2]
@test element.geometry.extremity β [0, 0, 2.1 / 2]
@test radius(element.geometry) β 0.05
@test element.color == RGBA(0, 0, 1, 1)
@test homog(element.transform) β homog(Translation(0, 0, -1))
end
@testset "acrobot submechanism" begin
urdf = "urdf/Acrobot.urdf"
robot = parse_urdf(urdf, remove_fixed_tree_joints=false)
s = submechanism(robot, bodies(robot)[3])
elements = visual_elements(s, URDFVisuals(urdf))
@test length(elements) == 2
@test string(rbd.body_fixed_frame_to_body(robot, elements[1].frame)) == "upper_link"
@test string(rbd.body_fixed_frame_to_body(robot, elements[2].frame)) == "lower_link"
end
@testset "ground plane" begin
urdf = "urdf/ground_plane.urdf"
robot = parse_urdf(urdf, remove_fixed_tree_joints=false)
elements = visual_elements(robot, URDFVisuals(urdf; tag="collision"))
@test length(elements) == 2
@test elements[1].frame === elements[2].frame
@test elements[1].geometry isa Rect
@test elements[2].geometry isa HyperPlane
@test elements[2].geometry.normal == Vec(0., 0, 1)
@test homog(elements[2].transform) β homog(Translation(0, 0, 0.025))
end
@testset "file with missing mesh" begin
urdf = "urdf/missing_meshfile.urdf"
robot = parse_urdf(urdf, remove_fixed_tree_joints=false)
elements = visual_elements(robot, URDFVisuals(urdf; tag="collision"))
end
@testset "link_colors keyword argument" begin
urdf = "urdf/Acrobot.urdf"
robot = parse_urdf(urdf, remove_fixed_tree_joints=false)
link = last(bodies(robot))
for override_color in [RGBA(0.1f0, 0.2f0, 0.3f0, 0.4f0), BGR(0.1, 0.2, 0.3)]
link_colors = Dict(string(link) => override_color)
elements_override = visual_elements(robot, URDFVisuals(urdf; link_colors=link_colors))
elements_base = visual_elements(robot, URDFVisuals(urdf))
for (element_override, element_base) in zip(elements_override, elements_base)
if element_base == last(elements_base)
@test RGBA{Float32}(element_override.color) == RGBA{Float32}(override_color)
else
@test element_override.color == element_base.color
end
end
end
end
@testset "anymal" begin
# The main reason for testing this URDF is that it has `scale` tags for the meshes.
urdf = "urdf/anymal_dummy_meshes.urdf"
robot = parse_urdf(urdf, remove_fixed_tree_joints=false)
elements = visual_elements(robot, URDFVisuals(urdf; package_path=["urdf"]))
@test length(elements) == 17
for element in elements
_, s, _ = svd(element.transform.linear)
expected_scale = 1e-3
@test all(x -> isapprox(x, expected_scale; atol=1e-10), s)
end
end
end
@testset "valkyrie" begin
robot = ValkyrieRobot.Valkyrie().mechanism
visual_elements(robot, Skeleton())
visual_elements(robot, Skeleton(inertias=false, randomize_colors=true))
visual_elements(robot, URDFVisuals(ValkyrieRobot.urdfpath(), package_path=[ValkyrieRobot.packagepath()]))
end
end
| MechanismGeometries | https://github.com/JuliaRobotics/MechanismGeometries.jl.git |
|
[
"MIT"
] | 0.7.1 | 0b1c87463f24bd18a9685601e6bf6207cd051cdd | docs | 3408 | # MechanismGeometries
[](https://github.com/JuliaRobotics/MechanismGeometries.jl/actions?query=workflow%3ACI)
[](http://codecov.io/github/JuliaRobotics/MechanismGeometries.jl?branch=master)
This package implements several methods of generating or loading geometries associated with a [RigidBodyDynamics.jl](https://github.com/tkoolen/RigidBodyDynamics.jl) `Mechanism` in Julia. It is currently used by [MeshCatMechanisms.jl](https://github.com/JuliaRobotics/MeshCatMechanisms.jl) but can also be used independently.
# Interface
This package exports one primary method:
```julia
visual_elements(mechanism::Mechanism, source::AbstractGeometrySource)
```
`visual_elements` returns a vector of `VisualElement` structs, each of which contains:
* `frame`: A `CartesianFrame3D` indicating where the geometry is attached in the mechanism
* `geometry`: One of the [GeometryBasics.jl](https://github.com/JuliaGeometry/GeometryBasics.jl) types
* `color`: an RGBA color from [ColorTypes.jl](https://github.com/JuliaGraphics/ColorTypes.jl)
* `transform`: a `Transformation` from [CoordinateTransformations.jl](https://github.com/FugroRoames/CoordinateTransformations.jl/) indicating the pose of the geometry w.r.t its attached frame.
# Currently implemented sources
These demonstrations use the Boston Dynamics Atlas robot from [AtlasRobot.jl](https://github.com/tkoolen/AtlasRobot.jl).
```julia
using AtlasRobot
using MechanismGeometries
mechanism = AtlasRobot.mechanism()
```
## Skeleton
```julia
Skeleton <: AbstractGeometrySource
```
The `Skeleton` type uses only the joints and bodies in the mechanism itself to construct a visual representation of the robot's links. The sticks connect joints in the mechanism and the ellipsoids represent the mass and moment of inertia of each body:
```julia
visual_elements(mechanism, Skeleton())
```

The moment of inertia ellipsoids can also be turned off, leaving just the joint connections:
```julia
visual_elements(mechanism, Skeleton(inertias=false))
```

## URDF Visuals
```julia
URDFVisuals <: AbstractGeometrySource
```
The `URDFVisuals` type loads the visual elements from a given URDF file (passed as either a filename or a parsed `XMLDocument` from LightXML.jl). One particularly useful argument is `package_path`, which accepts a list of strings to use as potential directories to search when encountering mesh files using the ROS `package://` syntax.
```julia
visual_elements(mechanism,
URDFVisuals(AtlasRobot.urdfpath(),
package_path=[AtlasRobot.packagepath()]))
```

### URDF Extensions
The following extensions to the URDF spec are parsed by MechanismGeometries.jl:
* `<plane normal="0 0 1"/>`: Represents an infinite plane perpendicular to the `normal` given as an x y z unit vector. Returns a MechanismGeometries.HyperPlane
| MechanismGeometries | https://github.com/JuliaRobotics/MechanismGeometries.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1702 | using Documenter, DocumenterCitations, ExaModels, Literate
if !(@isdefined _LATEX)
const _LATEX = true
end
if !(@isdefined _PAGES)
const _PAGES = [
"Introduction" => "index.md",
"Mathematical Abstraction" => "simd.md",
"Tutorial" => [
"guide.md",
"performance.md",
"gpu.md",
"develop.md",
"quad.md",
"distillation.md",
"opf.md",
],
"JuMP Interface (experimental)" => "jump.md",
"API Manual" => "core.md",
"References" => "ref.md",
]
end
if !(@isdefined _JL_FILENAMES)
const _JL_FILENAMES = [
"guide.jl",
"jump.jl",
"quad.jl",
"distillation.jl",
"opf.jl",
"gpu.jl",
"performance.jl",
]
end
for jl_filename in _JL_FILENAMES
Literate.markdown(
joinpath(@__DIR__, "src", jl_filename),
joinpath(@__DIR__, "src");
documenter = true,
execute = true,
)
end
bib = CitationBibliography(joinpath(@__DIR__, "src", "refs.bib"))
# if _LATEX
# makedocs(
# bib,
# sitename = "ExaModels",
# authors = "Sungho Shin",
# format = Documenter.LaTeX(),
# pages = _PAGES,
# )
# end
makedocs(
plugins = [bib],
sitename = "ExaModels.jl",
modules = [ExaModels],
authors = "Sungho Shin",
format = Documenter.HTML(
assets = ["assets/favicon.ico", "assets/citations.css"],
prettyurls = true,
sidebar_sitename = true,
collapselevel = 1,
),
pages = _PAGES,
clean = false,
)
deploydocs(repo = "github.com/exanauts/ExaModels.jl.git"; push_preview = true)
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 2235 | # # [Example: Distillation Column](@id distillation)
function distillation_column_model(T = 3; backend = nothing)
NT = 30
FT = 17
Ac = 0.5
At = 0.25
Ar = 1.0
D = 0.2
F = 0.4
ybar = 0.8958
ubar = 2.0
alpha = 1.6
dt = 10 / T
xAf = 0.5
xA0s = ExaModels.convert_array([(i, 0.5) for i = 0:NT+1], backend)
itr0 = ExaModels.convert_array(collect(Iterators.product(1:T, 1:FT-1)), backend)
itr1 = ExaModels.convert_array(collect(Iterators.product(1:T, FT+1:NT)), backend)
itr2 = ExaModels.convert_array(collect(Iterators.product(0:T, 0:NT+1)), backend)
c = ExaCore(backend)
xA = variable(c, 0:T, 0:NT+1; start = 0.5)
yA = variable(c, 0:T, 0:NT+1; start = 0.5)
u = variable(c, 0:T; start = 1.0)
V = variable(c, 0:T; start = 1.0)
L2 = variable(c, 0:T; start = 1.0)
objective(c, (yA[t, 1] - ybar)^2 for t = 0:T)
objective(c, (u[t] - ubar)^2 for t = 0:T)
constraint(c, xA[0, i] - xA0 for (i, xA0) in xA0s)
constraint(
c,
(xA[t, 0] - xA[t-1, 0]) / dt - (1 / Ac) * (yA[t, 1] - xA[t, 0]) for t = 1:T
)
constraint(
c,
(xA[t, i] - xA[t-1, i]) / dt -
(1 / At) * (u[t] * D * (yA[t, i-1] - xA[t, i]) - V[t] * (yA[t, i] - yA[t, i+1])) for
(t, i) in itr0
)
constraint(
c,
(xA[t, FT] - xA[t-1, FT]) / dt -
(1 / At) * (
F * xAf + u[t] * D * xA[t, FT-1] - L2[t] * xA[t, FT] -
V[t] * (yA[t, FT] - yA[t, FT+1])
) for t = 1:T
)
constraint(
c,
(xA[t, i] - xA[t-1, i]) / dt -
(1 / At) * (L2[t] * (yA[t, i-1] - xA[t, i]) - V[t] * (yA[t, i] - yA[t, i+1])) for
(t, i) in itr1
)
constraint(
c,
(xA[t, NT+1] - xA[t-1, NT+1]) / dt -
(1 / Ar) * (L2[t] * xA[t, NT] - (F - D) * xA[t, NT+1] - V[t] * yA[t, NT+1]) for
t = 1:T
)
constraint(c, V[t] - u[t] * D - D for t = 0:T)
constraint(c, L2[t] - u[t] * D - F for t = 0:T)
constraint(
c,
yA[t, i] * (1 - xA[t, i]) - alpha * xA[t, i] * (1 - yA[t, i]) for (t, i) in itr2
)
return ExaModel(c)
end
#-
using ExaModels, NLPModelsIpopt
m = distillation_column_model(10)
ipopt(m)
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 2591 | # # Accelerations
# One of the key features of ExaModels.jl is being able to evaluate derivatives either on multi-threaded CPUs or GPU accelerators. Currently, GPU acceleration is only tested for NVIDIA GPUs. If you'd like to use multi-threaded CPU acceleration, start julia with
# ```
# $ julia -t 4 # using 4 threads
# ```
# Also, if you're using NVIDIA GPUs, make sure to have installed appropriate drivers.
# Let's say that our CPU code is as follows.
function luksan_vlcek_obj(x, i)
return 100 * (x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2
end
function luksan_vlcek_con(x, i)
return 3x[i+1]^3 + 2 * x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] -
x[i]exp(x[i] - x[i+1]) - 3
end
function luksan_vlcek_x0(i)
return mod(i, 2) == 1 ? -1.2 : 1.0
end
function luksan_vlcek_model(N)
c = ExaCore()
x = variable(c, N; start = (luksan_vlcek_x0(i) for i = 1:N))
constraint(c, luksan_vlcek_con(x, i) for i = 1:N-2)
objective(c, luksan_vlcek_obj(x, i) for i = 2:N)
return ExaModel(c)
end
# Now we simply modify this by
function luksan_vlcek_model(N, backend = nothing)
c = ExaCore(; backend = backend) # specify the backend
x = variable(c, N; start = (luksan_vlcek_x0(i) for i = 1:N))
constraint(c, luksan_vlcek_con(x, i) for i = 1:N-2)
objective(c, luksan_vlcek_obj(x, i) for i = 2:N)
return ExaModel(c)
end
# The acceleration can be done simply by specifying the backend. In particular, for multi-threaded CPUs,
using ExaModels, NLPModelsIpopt, KernelAbstractions
m = luksan_vlcek_model(10, CPU())
ipopt(m)
# For NVIDIA GPUs, we can use `CUDABackend`. However, currently, there are not many optimization solvers that are capable of solving problems on GPUs. The only option right now is using [MadNLP.jl](https://github.com/MadNLP/MadNLP.jl). To use this, first install
# ```julia
# import Pkg; Pkg.add("MadNLPGPU")
# ```
# Then, we can run:
# ```julia
# using CUDA, MadNLPGPU
#
# m = luksan_vlcek_model(10, CUDABackend())
# madnlp(m)
# ```
# In the case we have arrays for the data, what we need to do is to simply convert the array types to the corresponding device array types. In particular,
function cuda_luksan_vlcek_model(N)
c = ExaCore(; backend = CUDABackend())
d1 = CuArray(1:N-2)
d2 = CuArray(2:N)
d3 = CuArray([luksan_vlcek_x0(i) for i = 1:N])
x = variable(c, N; start = d3)
constraint(c, luksan_vlcek_con(x, i) for i in d1)
objective(c, luksan_vlcek_obj(x, i) for i in d2)
return ExaModel(c)
end
# ```julia
# m = cuda_luksan_vlcek_model(10)
# madnlp(m)
# ```
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 4616 | # # [Getting Started](@id guide)
# ExaModels can create nonlinear prgogramming models and allows solving the created models using NLP solvers (in particular, those that are interfaced with `NLPModels`, such as [NLPModelsIpopt](https://github.com/JuliaSmoothOptimizers/NLPModelsIpopt.jl) and [MadNLP](https://github.com/MadNLP/MadNLP.jl). This documentation page will describe how to use `ExaModels` to model and solve nonlinear optimization problems.
# We will first consider the following simple nonlinear program [lukvsan1998indefinitely](@cite):
# ```math
# \begin{aligned}
# \min_{\{x_i\}_{i=0}^N} &\sum_{i=2}^N 100(x_{i-1}^2-x_i)^2+(x_{i-1}-1)^2\\
# \text{s.t.} & 3x_{i+1}^3+2x_{i+2}-5+\sin(x_{i+1}-x_{i+2})\sin(x_{i+1}+x_{i+2})+4x_{i+1}-x_i e^{x_i-x_{i+1}}-3 = 0
# \end{aligned}
# ```
# We will follow the following Steps to create the model/solve this optimization problem.
# - Step 0: import ExaModels.jl
# - Step 1: create a [`ExaCore`](@ref) object, wherein we can progressively build an optimization model.
# - Step 2: create optimization variables with [`variable`](@ref), while attaching it to previously created `ExaCore`.
# - Step 3 (interchangable with Step 3): create objective function with [`objective`](@ref), while attaching it to previously created `ExaCore`.
# - Step 4 (interchangable with Step 2): create constraints with [`constraint`](@ref), while attaching it to previously created `ExaCore`.
# - Step 5: create an [`ExaModel`](@ref) based on the `ExaCore`.
# Now, let's jump right in. We import ExaModels via (Step 0):
using ExaModels
# Now, all the functions that are necessary for creating model are imported to into `Main`.
# An `ExaCore` object can be created simply by (Step 1):
c = ExaCore()
# This is where our optimziation model information will be progressively stored. This object is not yet an `NLPModel`, but it will essentially store all the necessary information.
# Now, let's create the optimziation variables. From the problem definition, we can see that we will need $N$ scalar variables. We will choose $N=10$, and create the variable $x\in\mathbb{R}^{N}$ with the follwoing command:
N = 10
x = variable(c, N; start = (mod(i, 2) == 1 ? -1.2 : 1.0 for i = 1:N))
# This creates the variable `x`, which we will be able to refer to when we create constraints/objective constraionts. Also, this modifies the information in the `ExaCore` object properly so that later an optimization model can be properly created with the necessary information. Observe that we have used the keyword argument `start` to specify the initial guess for the solution. The variable upper and lower bounds can be specified in a similar manner.
# The objective can be set as follows:
objective(c, 100 * (x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i = 2:N)
# !!! note
# Note that the terms here are summed, without explicitly using `sum( ... )` syntax.
# The constraints can be set as follows:
constraint(
c,
3x[i+1]^3 + 2 * x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] -
x[i]exp(x[i] - x[i+1]) - 3 for i = 1:N-2
)
# Finally, we are ready to create an `ExaModel` from the data we have collected in `ExaCore`. Since `ExaCore` includes all the necessary information, we can do this simply by:
m = ExaModel(c)
# Now, we got an optimization model ready to be solved. This problem can be solved with for example, with the Ipopt solver, as follows.
using NLPModelsIpopt
result = ipopt(m)
# Here, `result` is an `AbstractExecutionStats`, which typically contains the solution information. We can check several information as follows.
println("Status: $(result.status)")
println("Number of iterations: $(result.iter)")
# The solution values for variable `x` can be inquired by:
sol = solution(result, x)
# ExaModels provide several APIs similar to this:
# - [`solution`](@ref) inquires the primal solution.
# - [`multipliers`](@ref) inquires the dual solution.
# - [`multipliers_L`](@ref) inquires the lower bound dual solution.
# - [`multipliers_U`](@ref) inquires the upper bound dual solution.
# This concludes a short tutorial on how to use ExaModels to model and solve optimization problems. Want to learn more? Take a look at the following examples, which provide further tutorial on how to use ExaModels.jl. Each of the examples are designed to instruct a few additional techniques.
# - [Example: Quadrotor](): modeling multiple types of objective values and constraints.
# - [Example: Distillation Column](): using two-dimensional index sets for variables.
# - [Example: Optimal Power Flow](): handling complex data and using constraint augmentation.
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1365 | # # JuMP Interface (Experimental)
# ## JuMP to an ExaModel
# We have an experimental interface to JuMP model. A JuMP model can be directly converted to a `ExaModel`. It is as simple as this:
using ExaModels, JuMP, CUDA
N = 10
jm = Model()
@variable(jm, x[i = 1:N], start = mod(i, 2) == 1 ? -1.2 : 1.0)
@constraint(
jm,
s[i = 1:N-2],
3x[i+1]^3 + 2x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] -
x[i]exp(x[i] - x[i+1]) - 3 == 0.0
)
@objective(jm, Min, sum(100(x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i = 2:N))
em = ExaModel(jm; backend = CUDABackend())
# Here, note that only scalar objective/constraints created via `@constraint` and `@objective` API are supported. Older syntax like `@NLconstraint` and `@NLobjective` are not supported.
# We can solve the model using any of the solvers supported by ExaModels. For example, we can use MadNLP:
using MadNLPGPU
result = madnlp(em)
# ## JuMP Optimizer
# Alternatively, one can use the `Optimizer` interface provided by `ExaModels`. This feature can be used as follows.
using ExaModels, JuMP, CUDA
using MadNLPGPU
set_optimizer(jm, () -> ExaModels.MadNLPOptimizer(CUDABackend()))
optimize!(jm)
# Again, only scalar objective/constraints created via `@constraint` and `@objective` API are supported. Older syntax like `@NLconstraint` and `@NLobjective` are not supported.
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 7072 | # # [Example: Optimal Power Flow](@id opf)
function parse_ac_power_data(filename)
data = PowerModels.parse_file(filename)
PowerModels.standardize_cost_terms!(data, order = 2)
PowerModels.calc_thermal_limits!(data)
ref = PowerModels.build_ref(data)[:it][:pm][:nw][0]
arcdict = Dict(a => k for (k, a) in enumerate(ref[:arcs]))
busdict = Dict(k => i for (i, (k, v)) in enumerate(ref[:bus]))
gendict = Dict(k => i for (i, (k, v)) in enumerate(ref[:gen]))
branchdict = Dict(k => i for (i, (k, v)) in enumerate(ref[:branch]))
return (
bus = [
begin
bus_loads = [ref[:load][l] for l in ref[:bus_loads][k]]
bus_shunts = [ref[:shunt][s] for s in ref[:bus_shunts][k]]
pd = sum(load["pd"] for load in bus_loads; init = 0.0)
gs = sum(shunt["gs"] for shunt in bus_shunts; init = 0.0)
qd = sum(load["qd"] for load in bus_loads; init = 0.0)
bs = sum(shunt["bs"] for shunt in bus_shunts; init = 0.0)
(i = busdict[k], pd = pd, gs = gs, qd = qd, bs = bs)
end for (k, v) in ref[:bus]
],
gen = [
(
i = gendict[k],
cost1 = v["cost"][1],
cost2 = v["cost"][2],
cost3 = v["cost"][3],
bus = busdict[v["gen_bus"]],
) for (k, v) in ref[:gen]
],
arc = [
(i = k, rate_a = ref[:branch][l]["rate_a"], bus = busdict[i]) for
(k, (l, i, j)) in enumerate(ref[:arcs])
],
branch = [
begin
f_idx = arcdict[i, branch["f_bus"], branch["t_bus"]]
t_idx = arcdict[i, branch["t_bus"], branch["f_bus"]]
g, b = PowerModels.calc_branch_y(branch)
tr, ti = PowerModels.calc_branch_t(branch)
ttm = tr^2 + ti^2
g_fr = branch["g_fr"]
b_fr = branch["b_fr"]
g_to = branch["g_to"]
b_to = branch["b_to"]
c1 = (-g * tr - b * ti) / ttm
c2 = (-b * tr + g * ti) / ttm
c3 = (-g * tr + b * ti) / ttm
c4 = (-b * tr - g * ti) / ttm
c5 = (g + g_fr) / ttm
c6 = (b + b_fr) / ttm
c7 = (g + g_to)
c8 = (b + b_to)
(
i = branchdict[i],
j = 1,
f_idx = f_idx,
t_idx = t_idx,
f_bus = busdict[branch["f_bus"]],
t_bus = busdict[branch["t_bus"]],
c1 = c1,
c2 = c2,
c3 = c3,
c4 = c4,
c5 = c5,
c6 = c6,
c7 = c7,
c8 = c8,
rate_a_sq = branch["rate_a"]^2,
)
end for (i, branch) in ref[:branch]
],
ref_buses = [busdict[i] for (i, k) in ref[:ref_buses]],
vmax = [v["vmax"] for (k, v) in ref[:bus]],
vmin = [v["vmin"] for (k, v) in ref[:bus]],
pmax = [v["pmax"] for (k, v) in ref[:gen]],
pmin = [v["pmin"] for (k, v) in ref[:gen]],
qmax = [v["qmax"] for (k, v) in ref[:gen]],
qmin = [v["qmin"] for (k, v) in ref[:gen]],
rate_a = [ref[:branch][l]["rate_a"] for (k, (l, i, j)) in enumerate(ref[:arcs])],
angmax = [b["angmax"] for (i, b) in ref[:branch]],
angmin = [b["angmin"] for (i, b) in ref[:branch]],
)
end
convert_data(data::N, backend) where {names,N<:NamedTuple{names}} =
NamedTuple{names}(ExaModels.convert_array(d, backend) for d in data)
parse_ac_power_data(filename, backend) =
convert_data(parse_ac_power_data(filename), backend)
function ac_power_model(filename; backend = nothing, T = Float64)
data = parse_ac_power_data(filename, backend)
w = ExaCore(T; backend = backend)
va = variable(w, length(data.bus);)
vm = variable(
w,
length(data.bus);
start = fill!(similar(data.bus, Float64), 1.0),
lvar = data.vmin,
uvar = data.vmax,
)
pg = variable(w, length(data.gen); lvar = data.pmin, uvar = data.pmax)
qg = variable(w, length(data.gen); lvar = data.qmin, uvar = data.qmax)
p = variable(w, length(data.arc); lvar = -data.rate_a, uvar = data.rate_a)
q = variable(w, length(data.arc); lvar = -data.rate_a, uvar = data.rate_a)
o = objective(w, g.cost1 * pg[g.i]^2 + g.cost2 * pg[g.i] + g.cost3 for g in data.gen)
c1 = constraint(w, va[i] for i in data.ref_buses)
c2 = constraint(
w,
p[b.f_idx] - b.c5 * vm[b.f_bus]^2 -
b.c3 * (vm[b.f_bus] * vm[b.t_bus] * cos(va[b.f_bus] - va[b.t_bus])) -
b.c4 * (vm[b.f_bus] * vm[b.t_bus] * sin(va[b.f_bus] - va[b.t_bus])) for
b in data.branch
)
c3 = constraint(
w,
q[b.f_idx] +
b.c6 * vm[b.f_bus]^2 +
b.c4 * (vm[b.f_bus] * vm[b.t_bus] * cos(va[b.f_bus] - va[b.t_bus])) -
b.c3 * (vm[b.f_bus] * vm[b.t_bus] * sin(va[b.f_bus] - va[b.t_bus])) for
b in data.branch
)
c4 = constraint(
w,
p[b.t_idx] - b.c7 * vm[b.t_bus]^2 -
b.c1 * (vm[b.t_bus] * vm[b.f_bus] * cos(va[b.t_bus] - va[b.f_bus])) -
b.c2 * (vm[b.t_bus] * vm[b.f_bus] * sin(va[b.t_bus] - va[b.f_bus])) for
b in data.branch
)
c5 = constraint(
w,
q[b.t_idx] +
b.c8 * vm[b.t_bus]^2 +
b.c2 * (vm[b.t_bus] * vm[b.f_bus] * cos(va[b.t_bus] - va[b.f_bus])) -
b.c1 * (vm[b.t_bus] * vm[b.f_bus] * sin(va[b.t_bus] - va[b.f_bus])) for
b in data.branch
)
c6 = constraint(
w,
va[b.f_bus] - va[b.t_bus] for b in data.branch;
lcon = data.angmin,
ucon = data.angmax,
)
c7 = constraint(
w,
p[b.f_idx]^2 + q[b.f_idx]^2 - b.rate_a_sq for b in data.branch;
lcon = fill!(similar(data.branch, Float64, length(data.branch)), -Inf),
)
c8 = constraint(
w,
p[b.t_idx]^2 + q[b.t_idx]^2 - b.rate_a_sq for b in data.branch;
lcon = fill!(similar(data.branch, Float64, length(data.branch)), -Inf),
)
c9 = constraint(w, b.pd + b.gs * vm[b.i]^2 for b in data.bus)
c10 = constraint(w, b.qd - b.bs * vm[b.i]^2 for b in data.bus)
c11 = constraint!(w, c9, a.bus => p[a.i] for a in data.arc)
c12 = constraint!(w, c10, a.bus => q[a.i] for a in data.arc)
c13 = constraint!(w, c9, g.bus => -pg[g.i] for g in data.gen)
c14 = constraint!(w, c10, g.bus => -qg[g.i] for g in data.gen)
return ExaModel(w)
end
# We first download the case file.
using Downloads
case = tempname() * ".m"
Downloads.download(
"https://raw.githubusercontent.com/power-grid-lib/pglib-opf/dc6be4b2f85ca0e776952ec22cbd4c22396ea5a3/pglib_opf_case3_lmbd.m",
case,
)
# Then, we can model/sovle the problem.
using PowerModels, ExaModels, NLPModelsIpopt
m = ac_power_model(case)
ipopt(m)
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 6336 | # # Performance Tips
# ## Use a function to create a model
# It is always better to use functions to create ExaModels. This in this way, the functions used for specifing objective/constraint functions are not recreated over all over, and thus, we can take advantage of the previously compiled model creation code. Let's consider the following example.
using ExaModels
t = @elapsed begin
c = ExaCore()
N = 10
x = variable(c, N; start = (mod(i, 2) == 1 ? -1.2 : 1.0 for i = 1:N))
objective(c, 100 * (x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i = 2:N)
constraint(
c,
3x[i+1]^3 + 2 * x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] - x[i]exp(x[i] - x[i+1]) - 3 for i = 1:N-2
)
m = ExaModel(c)
end
println("$t seconds elapsed")
# Even at the second call,
t = @elapsed begin
c = ExaCore()
N = 10
x = variable(c, N; start = (mod(i, 2) == 1 ? -1.2 : 1.0 for i = 1:N))
objective(c, 100 * (x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i = 2:N)
constraint(
c,
3x[i+1]^3 + 2 * x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] - x[i]exp(x[i] - x[i+1]) - 3 for i = 1:N-2
)
m = ExaModel(c)
end
println("$t seconds elapsed")
# the model creation time can be slightly reduced but the compilation time is still quite significant.
# But instead, if you create a function, we can significantly reduce the model creation time.
function luksan_vlcek_model(N)
c = ExaCore()
x = variable(c, N; start = (mod(i, 2) == 1 ? -1.2 : 1.0 for i = 1:N))
objective(c, 100 * (x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i = 2:N)
constraint(
c,
3x[i+1]^3 + 2 * x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] -
x[i]exp(x[i] - x[i+1]) - 3 for i = 1:N-2
)
m = ExaModel(c)
end
t = @elapsed luksan_vlcek_model(N)
println("$t seconds elapsed")
#-
t = @elapsed luksan_vlcek_model(N)
println("$t seconds elapsed")
# So, the model creation time can be essentially nothing. Thus, if you care about the model creation time, always make sure to write a function for creating the model, and do not directly create a model from the REPL.
# ## Make sure your array's eltype is concrete
# In order for ExaModels to run for loops over the array you provided without any overhead caused by type inference, the eltype of the data array should always be a concrete type. Furthermore, this is **required** if you want to run ExaModels on GPU accelerators.
# Let's take an example.
using ExaModels
N = 1000
function luksan_vlcek_model_concrete(N)
c = ExaCore()
arr1 = Array(2:N)
arr2 = Array(1:N-2)
x = variable(c, N; start = (mod(i, 2) == 1 ? -1.2 : 1.0 for i = 1:N))
objective(c, 100 * (x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i in arr1)
constraint(
c,
3x[i+1]^3 + 2 * x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] -
x[i]exp(x[i] - x[i+1]) - 3 for i in arr2
)
m = ExaModel(c)
end
function luksan_vlcek_model_non_concrete(N)
c = ExaCore()
arr1 = Array{Any}(2:N)
arr2 = Array{Any}(1:N-2)
x = variable(c, N; start = (mod(i, 2) == 1 ? -1.2 : 1.0 for i = 1:N))
objective(c, 100 * (x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i in arr1)
constraint(
c,
3x[i+1]^3 + 2 * x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] -
x[i]exp(x[i] - x[i+1]) - 3 for i in arr2
)
m = ExaModel(c)
end
# Here, observe that
isconcretetype(eltype(Array(2:N)))
#-
isconcretetype(eltype(Array{Any}(2:N)))
# As you can see, the first array type has concrete eltypes, whereas the second array type has non concrete eltypes. Due to this, the array stored in the model created by `luksan_vlcek_model_non_concrete` will have non-concrete eltypes.
# Now let's compare the performance. We will use the following benchmark function here.
using NLPModels
function benchmark_callbacks(m; N = 100)
nvar = m.meta.nvar
ncon = m.meta.ncon
nnzj = m.meta.nnzj
nnzh = m.meta.nnzh
x = copy(m.meta.x0)
y = similar(m.meta.x0, ncon)
c = similar(m.meta.x0, ncon)
g = similar(m.meta.x0, nvar)
jac = similar(m.meta.x0, nnzj)
hess = similar(m.meta.x0, nnzh)
jrows = similar(m.meta.x0, Int, nnzj)
jcols = similar(m.meta.x0, Int, nnzj)
hrows = similar(m.meta.x0, Int, nnzh)
hcols = similar(m.meta.x0, Int, nnzh)
GC.enable(false)
NLPModels.obj(m, x) # to compile
tobj = (1 / N) * @elapsed for t = 1:N
NLPModels.obj(m, x)
end
NLPModels.cons!(m, x, c) # to compile
tcon = (1 / N) * @elapsed for t = 1:N
NLPModels.cons!(m, x, c)
end
NLPModels.grad!(m, x, g) # to compile
tgrad = (1 / N) * @elapsed for t = 1:N
NLPModels.grad!(m, x, g)
end
NLPModels.jac_coord!(m, x, jac) # to compile
tjac = (1 / N) * @elapsed for t = 1:N
NLPModels.jac_coord!(m, x, jac)
end
NLPModels.hess_coord!(m, x, y, hess) # to compile
thess = (1 / N) * @elapsed for t = 1:N
NLPModels.hess_coord!(m, x, y, hess)
end
NLPModels.jac_structure!(m, jrows, jcols) # to compile
tjacs = (1 / N) * @elapsed for t = 1:N
NLPModels.jac_structure!(m, jrows, jcols)
end
NLPModels.hess_structure!(m, hrows, hcols) # to compile
thesss = (1 / N) * @elapsed for t = 1:N
NLPModels.hess_structure!(m, hrows, hcols)
end
GC.enable(true)
return (
tobj = tobj,
tcon = tcon,
tgrad = tgrad,
tjac = tjac,
thess = thess,
tjacs = tjacs,
thesss = thesss,
)
end
# The performance comparison is here:
m1 = luksan_vlcek_model_concrete(N)
m2 = luksan_vlcek_model_non_concrete(N)
benchmark_callbacks(m1)
#-
benchmark_callbacks(m2)
# As can be seen here, having concrete eltype dramatically improves the performance. This is because when all the data arrays' eltypes are concrete, the AD evaluations can be performed without any type inferernce, and this should be as fast as highly optimized C/C++/Fortran code.
# When you're using GPU accelerators, the eltype of the array should always be concrete. In fact, non-concrete etlype will already cause an error when creating the array. For example,
using CUDA
try
arr1 = CuArray(Array{Any}(2:N))
catch e
showerror(stdout, e)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 2427 | # # [Example: Quadrotor](@id quad)
function quadrotor_model(N = 3; backend = nothing)
n = 9
p = 4
nd = 9
d(i, j, N) =
(j == 1 ? 1 * sin(2 * pi / N * i) : 0.0) +
(j == 3 ? 2 * sin(4 * pi / N * i) : 0.0) +
(j == 5 ? 2 * i / N : 0.0)
dt = 0.01
R = fill(1 / 10, 4)
Q = [1, 0, 1, 0, 1, 0, 1, 1, 1]
Qf = [1, 0, 1, 0, 1, 0, 1, 1, 1] / dt
x0s = [(i, 0.0) for i = 1:n]
itr0 = [(i, j, R[j]) for (i, j) in Base.product(1:N, 1:p)]
itr1 = [(i, j, Q[j], d(i, j, N)) for (i, j) in Base.product(1:N, 1:n)]
itr2 = [(j, Qf[j], d(N + 1, j, N)) for j = 1:n]
c = ExaCore(; backend = backend)
x = variable(c, 1:N+1, 1:n)
u = variable(c, 1:N, 1:p)
constraint(c, x[1, i] - x0 for (i, x0) in x0s)
constraint(c, -x[i+1, 1] + x[i, 1] + (x[i, 2]) * dt for i = 1:N)
constraint(
c,
-x[i+1, 2] +
x[i, 2] +
(
u[i, 1] * cos(x[i, 7]) * sin(x[i, 8]) * cos(x[i, 9]) +
u[i, 1] * sin(x[i, 7]) * sin(x[i, 9])
) * dt for i = 1:N
)
constraint(c, -x[i+1, 3] + x[i, 3] + (x[i, 4]) * dt for i = 1:N)
constraint(
c,
-x[i+1, 4] +
x[i, 4] +
(
u[i, 1] * cos(x[i, 7]) * sin(x[i, 8]) * sin(x[i, 9]) -
u[i, 1] * sin(x[i, 7]) * cos(x[i, 9])
) * dt for i = 1:N
)
constraint(c, -x[i+1, 5] + x[i, 5] + (x[i, 6]) * dt for i = 1:N)
constraint(
c,
-x[i+1, 6] + x[i, 6] + (u[i, 1] * cos(x[i, 7]) * cos(x[i, 8]) - 9.8) * dt for
i = 1:N
)
constraint(
c,
-x[i+1, 7] +
x[i, 7] +
(u[i, 2] * cos(x[i, 7]) / cos(x[i, 8]) + u[i, 3] * sin(x[i, 7]) / cos(x[i, 8])) * dt
for i = 1:N
)
constraint(
c,
-x[i+1, 8] + x[i, 8] + (-u[i, 2] * sin(x[i, 7]) + u[i, 3] * cos(x[i, 7])) * dt for
i = 1:N
)
constraint(
c,
-x[i+1, 9] +
x[i, 9] +
(
u[i, 2] * cos(x[i, 7]) * tan(x[i, 8]) +
u[i, 3] * sin(x[i, 7]) * tan(x[i, 8]) +
u[i, 4]
) * dt for i = 1:N
)
objective(c, 0.5 * R * (u[i, j]^2) for (i, j, R) in itr0)
objective(c, 0.5 * Q * (x[i, j] - d)^2 for (i, j, Q, d) in itr1)
objective(c, 0.5 * Qf * (x[N+1, j] - d)^2 for (j, Qf, d) in itr2)
m = ExaModel(c)
end
#-
using ExaModels, NLPModelsIpopt
m = quadrotor_model(100)
result = ipopt(m)
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 568 | module ExaModelsAMDGPU
import ExaModels, AMDGPU
ExaModels.convert_array(v, backend::AMDGPU.ROCBackend) = AMDGPU.ROCArray(v)
ExaModels.sort!(array::A; lt = isless) where {A<:AMDGPU.ROCVector} =
copyto!(array, sort!(Array(array); lt = lt))
# Below are type piracy
function Base.findall(f::F, bitarray::A) where {F<:Function,A<:AMDGPU.ROCVector}
a = Array(bitarray)
b = findall(f, a)
c = similar(bitarray, eltype(b), length(b))
return copyto!(c, b)
end
Base.findall(bitarray::A) where {A<:AMDGPU.ROCVector} = Base.findall(identity, bitarray)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 169 | module ExaModelsCUDA
import ExaModels: ExaModels, NLPModels
import CUDA: CUDA, CUDABackend, CuArray
ExaModels.convert_array(v, backend::CUDABackend) = CuArray(v)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1137 | module ExaModelsIpopt
import ExaModels
import NLPModelsIpopt
import MathOptInterface
const MOI = MathOptInterface
function ExaModels.result_status_translator(::typeof(NLPModelsIpopt.ipopt), status)
Base.get(_RESULT_STATUS_CODES, status, MOI.UNKNOWN_RESULT_STATUS)
end
function ExaModels.termination_status_translator(::typeof(NLPModelsIpopt.ipopt), status)
Base.get(_TERMINATION_STATUS_CODES, status, MOI.OTHER_ERROR)
end
ExaModels.IpoptOptimizer(; kwargs...) =
ExaModels.Optimizer(NLPModelsIpopt.ipopt, nothing; kwargs...)
const _RESULT_STATUS_CODES = Dict{Symbol,MathOptInterface.ResultStatusCode}(
:first_order => MOI.FEASIBLE_POINT,
:acceptable => MOI.NEARLY_FEASIBLE_POINT,
:infeasible => MOI.INFEASIBLE_POINT,
)
const _TERMINATION_STATUS_CODES = Dict{Symbol,MOI.TerminationStatusCode}(
:first_order => MOI.LOCALLY_SOLVED,
:acceptable => MOI.ALMOST_LOCALLY_SOLVED,
:small_step => MOI.SLOW_PROGRESS,
:infeasible => MOI.INFEASIBLE_OR_UNBOUNDED,
:max_iter => MOI.ITERATION_LIMIT,
:max_time => MOI.TIME_LIMIT,
:user => MOI.INTERRUPTED,
:exception => MOI.OTHER_ERROR,
)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 244 | module ExaModelsJuMP
import ExaModels
import JuMP
function ExaModels.ExaModel(jm::JuMP.GenericModel{T}; backend = nothing) where {T}
return ExaModels.ExaModel(jm.moi_backend.model_cache; backend = backend)
end
end # module ExaModelsJuMP
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 19671 | module ExaModelsKernelAbstractions
import ExaModels: ExaModels, NLPModels
import KernelAbstractions: KernelAbstractions, @kernel, @index, @Const, synchronize, CPU
ExaModels.convert_array(v, backend::CPU) = v
function getitr(gen::UnitRange{Int64})
return gen
end
function getitr(gen::Base.Iterators.ProductIterator{NTuple{N,UnitRange{Int64}}}) where {N} end
ExaModels.ExaCore(T, backend::KernelAbstractions.CPU) =
ExaModels.ExaCore(x0 = zeros(T, 0), backend = backend)
ExaModels.ExaCore(backend::KernelAbstractions.CPU) = ExaModels.ExaCore(backend = backend)
function ExaModels.getptr(backend, array; cmp = (x, y) -> x != y)
bitarray = similar(array, Bool, length(array) + 1)
kergetptr(backend)(cmp, bitarray, array; ndrange = length(array) + 1)
synchronize(backend)
return ExaModels.findall(identity, bitarray)
end
struct KAExtension{T,VT<:AbstractVector{T},H,VI1,VI2,B}
backend::B
objbuffer::VT
gradbuffer::VT
gsparsity::VI1
gptr::VI2
conbuffer::VT
conaugsparsity::VI1
conaugptr::VI2
prodhelper::H
end
function ExaModels.ExaModel(
c::C;
prod = false,
kwargs...,
) where {T,VT<:AbstractVector{T},B<:KernelAbstractions.Backend,C<:ExaModels.ExaCore{T,VT,B}}
gsparsity = similar(c.x0, Tuple{Int,Int}, c.nnzg)
_grad_structure!(c.backend, c.obj, gsparsity)
if !isempty(gsparsity)
ExaModels.sort!(gsparsity; lt = ((i, j), (k, l)) -> i < k)
end
gptr = ExaModels.getptr(c.backend, gsparsity; cmp = (x, y) -> x[1] != y[1])
conaugsparsity = similar(c.x0, Tuple{Int,Int}, c.nconaug)
_conaug_structure!(c.backend, c.con, conaugsparsity)
if !isempty(conaugsparsity)
ExaModels.sort!(conaugsparsity; lt = ((i, j), (k, l)) -> i < k)
end
conaugptr = ExaModels.getptr(c.backend, conaugsparsity; cmp = (x, y) -> x[1] != y[1])
if prod
jacbuffer = similar(c.x0, c.nnzj)
hessbuffer = similar(c.x0, c.nnzh)
jacsparsityi = similar(c.x0, Tuple{Tuple{Int,Int},Int}, c.nnzj)
hesssparsityi = similar(c.x0, Tuple{Tuple{Int,Int},Int}, c.nnzh)
_jac_structure!(c.backend, c.con, jacsparsityi, nothing)
jacsparsityj = copy(jacsparsityi)
_obj_hess_structure!(c.backend, c.obj, hesssparsityi, nothing)
_con_hess_structure!(c.backend, c.con, hesssparsityi, nothing)
hesssparsityj = copy(hesssparsityi)
if !isempty(jacsparsityi)
ExaModels.sort!(jacsparsityi; lt = (((i, j), k), ((n, m), l)) -> i < n)
end
jacptri =
ExaModels.getptr(c.backend, jacsparsityi; cmp = (x, y) -> x[1][1] != y[1][1])
if !isempty(jacsparsityj)
ExaModels.sort!(jacsparsityj; lt = (((i, j), k), ((n, m), l)) -> j < m)
end
jacptrj =
ExaModels.getptr(c.backend, jacsparsityj; cmp = (x, y) -> x[1][2] != y[1][2])
if !isempty(hesssparsityi)
ExaModels.sort!(hesssparsityi; lt = (((i, j), k), ((n, m), l)) -> i < n)
end
hessptri =
ExaModels.getptr(c.backend, hesssparsityi; cmp = (x, y) -> x[1][1] != y[1][1])
if !isempty(hesssparsityj)
ExaModels.sort!(hesssparsityj; lt = (((i, j), k), ((n, m), l)) -> j < m)
end
hessptrj =
ExaModels.getptr(c.backend, hesssparsityj; cmp = (x, y) -> x[1][2] != y[1][2])
prodhelper = (
jacbuffer = jacbuffer,
jacsparsityi = jacsparsityi,
jacsparsityj = jacsparsityj,
jacptri = jacptri,
jacptrj = jacptrj,
hessbuffer = hessbuffer,
hesssparsityi = hesssparsityi,
hesssparsityj = hesssparsityj,
hessptri = hessptri,
hessptrj = hessptrj,
)
else
prodhelper = nothing
end
return ExaModels.ExaModel(
c.obj,
c.con,
NLPModels.NLPModelMeta(
c.nvar,
ncon = c.ncon,
nnzj = c.nnzj,
nnzh = c.nnzh,
x0 = c.x0,
lvar = c.lvar,
uvar = c.uvar,
y0 = c.y0,
lcon = c.lcon,
ucon = c.ucon,
),
NLPModels.Counters(),
KAExtension(
c.backend,
similar(c.x0, c.nobj),
similar(c.x0, c.nnzg),
gsparsity,
gptr,
similar(c.x0, c.nconaug),
conaugsparsity,
conaugptr,
prodhelper,
),
)
end
function _conaug_structure!(backend, cons, sparsity)
if !isempty(cons.itr)
kers(backend)(sparsity, cons.f, cons.itr, cons.oa; ndrange = length(cons.itr))
end
_conaug_structure!(backend, cons.inner, sparsity)
synchronize(backend)
end
function _conaug_structure!(backend, cons::ExaModels.Constraint, sparsity)
_conaug_structure!(backend, cons.inner, sparsity)
end
function _conaug_structure!(backend, cons::ExaModels.ConstraintNull, sparsity) end
@kernel function kers(sparsity, @Const(f), @Const(itr), @Const(oa))
I = @index(Global)
@inbounds sparsity[oa+I] = (ExaModels.offset0(f, itr, I), oa + I)
end
function _grad_structure!(backend, objs, gsparsity)
ExaModels.sgradient!(backend, gsparsity, objs, nothing, NaN)
_grad_structure!(backend, objs.inner, gsparsity)
synchronize(backend)
end
function _grad_structure!(backend, objs::ExaModels.ObjectiveNull, gsparsity) end
function ExaModels.jac_structure!(
m::ExaModels.ExaModel{T,VT,E} where {T,VT,E<:KAExtension},
rows::V,
cols::V,
) where {V<:AbstractVector}
if !isempty(rows)
_jac_structure!(m.ext.backend, m.cons, rows, cols)
end
end
function _jac_structure!(backend, cons, rows, cols)
ExaModels.sjacobian!(backend, rows, cols, cons, nothing, NaN)
_jac_structure!(backend, cons.inner, rows, cols)
synchronize(backend)
end
function _jac_structure!(backend, cons::ExaModels.ConstraintNull, rows, cols) end
function ExaModels.hess_structure!(
m::ExaModels.ExaModel{T,VT,E} where {T,VT,E<:KAExtension},
rows::V,
cols::V,
) where {V<:AbstractVector}
if !isempty(rows)
_obj_hess_structure!(m.ext.backend, m.objs, rows, cols)
_con_hess_structure!(m.ext.backend, m.cons, rows, cols)
end
end
function _obj_hess_structure!(backend, objs, rows, cols)
ExaModels.shessian!(backend, rows, cols, objs, nothing, NaN, NaN)
_obj_hess_structure!(backend, objs.inner, rows, cols)
synchronize(backend)
end
function _obj_hess_structure!(backend, objs::ExaModels.ObjectiveNull, rows, cols) end
function _con_hess_structure!(backend, cons, rows, cols)
ExaModels.shessian!(backend, rows, cols, cons, nothing, NaN, NaN)
_con_hess_structure!(backend, cons.inner, rows, cols)
synchronize(backend)
end
function _con_hess_structure!(backend, cons::ExaModels.ConstraintNull, rows, cols) end
function ExaModels.obj(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
) where {T,VT,E<:KAExtension}
if !isempty(m.ext.objbuffer)
_obj(m.ext.backend, m.ext.objbuffer, m.objs, x)
result = ExaModels.sum(m.ext.objbuffer)
return result
else
return zero(T)
end
end
function _obj(backend, objbuffer, obj, x)
if !isempty(obj.itr)
kerf(backend)(objbuffer, obj.f, obj.itr, x; ndrange = length(obj.itr))
end
_obj(backend, objbuffer, obj.inner, x)
synchronize(backend)
end
function _obj(backend, objbuffer, f::ExaModels.ObjectiveNull, x) end
function ExaModels.cons_nln!(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
y::AbstractVector,
) where {T,VT,E<:KAExtension}
_cons_nln!(m.ext.backend, y, m.cons, x)
_conaugs!(m.ext.backend, m.ext.conbuffer, m.cons, x)
if length(m.ext.conaugptr) > 1
compress_to_dense(m.ext.backend)(
y,
m.ext.conbuffer,
m.ext.conaugptr,
m.ext.conaugsparsity;
ndrange = length(m.ext.conaugptr) - 1,
)
synchronize(m.ext.backend)
end
end
function _cons_nln!(backend, y, con::ExaModels.Constraint, x)
if !isempty(con.itr)
kerf(backend)(y, con.f, con.itr, x; ndrange = length(con.itr))
end
_cons_nln!(backend, y, con.inner, x)
synchronize(backend)
end
function _cons_nln!(backend, y, con::ExaModels.ConstraintNull, x) end
function _cons_nln!(backend, y, con::ExaModels.ConstraintAug, x)
_cons_nln!(backend, y, con.inner, x)
end
function _conaugs!(backend, y, con::ExaModels.ConstraintAug, x)
if !isempty(con.itr)
kerf2(backend)(y, con.f, con.itr, x, con.oa; ndrange = length(con.itr))
end
_conaugs!(backend, y, con.inner, x)
synchronize(backend)
end
function _conaugs!(backend, y, con::ExaModels.Constraint, x)
_conaugs!(backend, y, con.inner, x)
end
function _conaugs!(backend, y, con::ExaModels.ConstraintNull, x) end
function ExaModels.grad!(
m::ExaModels.ExaModel{T,VT,E} where {T,VT,E<:KAExtension},
x::V,
y::V,
) where {V<:AbstractVector}
gradbuffer = m.ext.gradbuffer
if !isempty(gradbuffer)
fill!(gradbuffer, zero(eltype(gradbuffer)))
_grad!(m.ext.backend, m.ext.gradbuffer, m.objs, x)
fill!(y, zero(eltype(y)))
compress_to_dense(m.ext.backend)(
y,
gradbuffer,
m.ext.gptr,
m.ext.gsparsity;
ndrange = length(m.ext.gptr) - 1,
)
synchronize(m.ext.backend)
end
return y
end
function _grad!(backend, y, objs, x)
ExaModels.sgradient!(backend, y, objs, x, one(eltype(y)))
_grad!(backend, y, objs.inner, x)
synchronize(backend)
end
function _grad!(backend, y, objs::ExaModels.ObjectiveNull, x) end
function ExaModels.jac_coord!(
m::ExaModels.ExaModel{T,VT,E} where {T,VT,E<:KAExtension},
x::V,
y::V,
) where {V<:AbstractVector}
fill!(y, zero(eltype(y)))
_jac_coord!(m.ext.backend, y, m.cons, x)
end
function _jac_coord!(backend, y, cons, x)
ExaModels.sjacobian!(backend, y, nothing, cons, x, one(eltype(y)))
_jac_coord!(backend, y, cons.inner, x)
synchronize(backend)
end
function _jac_coord!(backend, y, cons::ExaModels.ConstraintNull, x) end
function ExaModels.jprod_nln!(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
) where {T,VT,E<:KAExtension{T,VT,Nothing}}
error("Prodhelper is not defined. Use ExaModels(c; prod=true) to use jprod_nln!")
end
function ExaModels.jtprod_nln!(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
) where {T,VT,E<:KAExtension{T,VT,Nothing}}
error("Prodhelper is not defined. Use ExaModels(c; prod=true) to use jtprod_nln!")
end
function ExaModels.hprod!(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
y::AbstractVector,
v::AbstractVector,
Hv::AbstractVector;
obj_weight = one(eltype(x)),
) where {T,VT,E<:KAExtension{T,VT,Nothing}}
error("Prodhelper is not defined. Use ExaModels(c; prod=true) to use hprod!")
end
function ExaModels.jprod_nln!(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
) where {T,VT,E<:KAExtension}
fill!(Jv, zero(eltype(Jv)))
fill!(m.ext.prodhelper.jacbuffer, zero(eltype(Jv)))
_jac_coord!(m.ext.backend, m.ext.prodhelper.jacbuffer, m.cons, x)
synchronize(m.ext.backend)
kerspmv(m.ext.backend)(
Jv,
v,
m.ext.prodhelper.jacsparsityi,
m.ext.prodhelper.jacbuffer,
m.ext.prodhelper.jacptri,
ndrange = length(m.ext.prodhelper.jacptri) - 1,
)
synchronize(m.ext.backend)
end
function ExaModels.jtprod_nln!(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
) where {T,VT,E<:KAExtension}
fill!(Jtv, zero(eltype(Jtv)))
fill!(m.ext.prodhelper.jacbuffer, zero(eltype(Jtv)))
_jac_coord!(m.ext.backend, m.ext.prodhelper.jacbuffer, m.cons, x)
synchronize(m.ext.backend)
kerspmv2(m.ext.backend)(
Jtv,
v,
m.ext.prodhelper.jacsparsityj,
m.ext.prodhelper.jacbuffer,
m.ext.prodhelper.jacptrj,
ndrange = length(m.ext.prodhelper.jacptrj) - 1,
)
synchronize(m.ext.backend)
end
function ExaModels.hprod!(
m::ExaModels.ExaModel{T,VT,E},
x::AbstractVector,
y::AbstractVector,
v::AbstractVector,
Hv::AbstractVector;
obj_weight = one(eltype(x)),
) where {T,VT,E<:KAExtension}
fill!(Hv, zero(eltype(Hv)))
fill!(m.ext.prodhelper.hessbuffer, zero(eltype(Hv)))
_obj_hess_coord!(m.ext.backend, m.ext.prodhelper.hessbuffer, m.objs, x, obj_weight)
_con_hess_coord!(m.ext.backend, m.ext.prodhelper.hessbuffer, m.cons, x, y)
synchronize(m.ext.backend)
kersyspmv(m.ext.backend)(
Hv,
v,
m.ext.prodhelper.hesssparsityi,
m.ext.prodhelper.hessbuffer,
m.ext.prodhelper.hessptri,
ndrange = length(m.ext.prodhelper.hessptri) - 1,
)
synchronize(m.ext.backend)
kersyspmv2(m.ext.backend)(
Hv,
v,
m.ext.prodhelper.hesssparsityj,
m.ext.prodhelper.hessbuffer,
m.ext.prodhelper.hessptrj,
ndrange = length(m.ext.prodhelper.hessptrj) - 1,
)
synchronize(m.ext.backend)
return Hv
end
@kernel function kerspmv(y, @Const(x), @Const(coord), @Const(V), @Const(ptr))
idx = @index(Global)
@inbounds for l = ptr[idx]:ptr[idx+1]-1
((i, j), ind) = coord[l]
y[i] += V[ind] * x[j]
end
end
@kernel function kerspmv2(y, @Const(x), @Const(coord), @Const(V), @Const(ptr))
idx = @index(Global)
@inbounds for l = ptr[idx]:ptr[idx+1]-1
((i, j), ind) = coord[l]
y[j] += V[ind] * x[i]
end
end
@kernel function kersyspmv(y, @Const(x), @Const(coord), @Const(V), @Const(ptr))
idx = @index(Global)
@inbounds for l = ptr[idx]:ptr[idx+1]-1
((i, j), ind) = coord[l]
y[i] += V[ind] * x[j]
end
end
@kernel function kersyspmv2(y, @Const(x), @Const(coord), @Const(V), @Const(ptr))
idx = @index(Global)
@inbounds for l = ptr[idx]:ptr[idx+1]-1
((i, j), ind) = coord[l]
if i != j
y[j] += V[ind] * x[i]
end
end
end
function ExaModels.hess_coord!(
m::ExaModels.ExaModel{T,VT,E} where {T,VT,E<:KAExtension},
x::V,
y::V,
hess::V;
obj_weight = one(eltype(y)),
) where {V<:AbstractVector}
fill!(hess, zero(eltype(hess)))
_obj_hess_coord!(m.ext.backend, hess, m.objs, x, obj_weight)
_con_hess_coord!(m.ext.backend, hess, m.cons, x, y)
end
function _obj_hess_coord!(backend, hess, objs, x, obj_weight)
ExaModels.shessian!(backend, hess, nothing, objs, x, obj_weight, zero(eltype(hess)))
_obj_hess_coord!(backend, hess, objs.inner, x, obj_weight)
synchronize(backend)
end
function _obj_hess_coord!(backend, hess, objs::ExaModels.ObjectiveNull, x, obj_weight) end
function _con_hess_coord!(backend, hess, cons, x, y)
ExaModels.shessian!(backend, hess, nothing, cons, x, y, zero(eltype(hess)))
_con_hess_coord!(backend, hess, cons.inner, x, y)
synchronize(backend)
end
function _con_hess_coord!(backend, hess, cons::ExaModels.ConstraintNull, x, y) end
function ExaModels.sgradient!(
backend::B,
y,
f,
x,
adj,
) where {B<:KernelAbstractions.Backend}
if !isempty(f.itr)
kerg(backend)(y, f.f, f.itr, x, adj; ndrange = length(f.itr))
end
end
function ExaModels.sjacobian!(
backend::B,
y1,
y2,
f,
x,
adj,
) where {B<:KernelAbstractions.Backend}
if !isempty(f.itr)
kerj(backend)(y1, y2, f.f, f.itr, x, adj; ndrange = length(f.itr))
end
end
function ExaModels.shessian!(
backend::B,
y1,
y2,
f,
x,
adj,
adj2,
) where {B<:KernelAbstractions.Backend}
if !isempty(f.itr)
kerh(backend)(y1, y2, f.f, f.itr, x, adj, adj2; ndrange = length(f.itr))
end
end
function ExaModels.shessian!(
backend::B,
y1,
y2,
f,
x,
adj::V,
adj2,
) where {B<:KernelAbstractions.Backend,V<:AbstractVector}
if !isempty(f.itr)
kerh2(backend)(y1, y2, f.f, f.itr, x, adj, adj2; ndrange = length(f.itr))
end
end
@kernel function kerh(y1, y2, @Const(f), @Const(itr), @Const(x), @Const(adj1), @Const(adj2))
I = @index(Global)
@inbounds ExaModels.hrpass0(
f.f(itr[I], ExaModels.SecondAdjointNodeSource(x)),
f.comp2,
y1,
y2,
ExaModels.offset2(f, I),
0,
adj1,
adj2,
)
end
@kernel function kerh2(
y1,
y2,
@Const(f),
@Const(itr),
@Const(x),
@Const(adjs1),
@Const(adj2)
)
I = @index(Global)
@inbounds ExaModels.hrpass0(
f.f(itr[I], ExaModels.SecondAdjointNodeSource(x)),
f.comp2,
y1,
y2,
ExaModels.offset2(f, I),
0,
adjs1[ExaModels.offset0(f, itr, I)],
adj2,
)
end
@kernel function kerj(y1, y2, @Const(f), @Const(itr), @Const(x), @Const(adj))
I = @index(Global)
@inbounds ExaModels.jrpass(
f.f(itr[I], ExaModels.AdjointNodeSource(x)),
f.comp1,
ExaModels.offset0(f, itr, I),
y1,
y2,
ExaModels.offset1(f, I),
0,
adj,
)
end
@kernel function kerg(y, @Const(f), @Const(itr), @Const(x), @Const(adj))
I = @index(Global)
@inbounds ExaModels.grpass(
f.f(itr[I], ExaModels.AdjointNodeSource(x)),
f.comp1,
y,
ExaModels.offset1(f, I),
0,
adj,
)
end
@kernel function kerf(y, @Const(f), @Const(itr), @Const(x))
I = @index(Global)
@inbounds y[ExaModels.offset0(f, itr, I)] = f.f(itr[I], x)
end
@kernel function kerf2(y, @Const(f), @Const(itr), @Const(x), @Const(oa))
I = @index(Global)
@inbounds y[oa+I] = f.f(itr[I], x)
end
@kernel function compress_to_dense(y, @Const(y0), @Const(ptr), @Const(sparsity))
I = @index(Global)
@inbounds for j = ptr[I]:ptr[I+1]-1
(k, l) = sparsity[j]
y[k] += y0[l]
end
end
@kernel function kergetptr(cmp, bitarray, @Const(array))
I = @index(Global)
@inbounds if I == 1
bitarray[I] = true
elseif I == length(array) + 1
bitarray[I] = true
else
i0 = array[I-1]
i1 = array[I]
if cmp(i0, i1)
bitarray[I] = true
else
bitarray[I] = false
end
end
end
ExaModels.getbackend(m::ExaModels.ExaModel{T,VT,E}) where {T,VT,E<:KAExtension} =
m.ext.backend
function ExaModels._compress!(V, buffer, ptr, sparsity, backend)
fill!(V, zero(eltype(V)))
ker_compress!(backend)(V, buffer, ptr, sparsity; ndrange = length(ptr) - 1)
synchronize(backend)
end
@kernel function ker_compress!(V, @Const(buffer), @Const(ptr), @Const(sparsity))
i = @index(Global)
@inbounds for j = ptr[i]:ptr[i+1]-1
V[i] += buffer[sparsity[j][2]]
end
end
function ExaModels._structure!(I, J, ptr, sparsity, backend)
ker_structure!(backend)(I, J, ptr, sparsity, ndrange = length(ptr) - 1)
synchronize(backend)
end
@kernel function ker_structure!(I, J, @Const(ptr), @Const(sparsity))
i = @index(Global)
@inbounds J[i], I[i] = sparsity[ptr[i]][1]
end
function ExaModels.get_compressed_sparsity(nnz, Ibuffer, Jbuffer, backend)
sparsity = similar(Ibuffer, Tuple{Tuple{Int,Int},Int}, nnz)
ker_get_compressed_sparsity(backend)(sparsity, Ibuffer, Jbuffer; ndrange = nnz)
synchronize(backend)
return sparsity
end
@kernel function ker_get_compressed_sparsity(sparsity, @Const(I), @Const(J))
i = @index(Global)
@inbounds sparsity[i] = ((J[i], I[i]), i)
end
end # module ExaModelsKernelAbstractions
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 16201 | module ExaModelsMOI
import ExaModels: ExaModels, NLPModels, SolverCore
import MathOptInterface
const MOI = MathOptInterface
const MOIU = MathOptInterface.Utilities
const MOIB = MathOptInterface.Bridges
const SUPPORTED_OBJ_TYPE =
[:scalar_nonlinear, :scalar_affine, :scalar_quadratic, :single_variable]
const UNSUPPORTED_OBJ_TYPE =
[:vector_nonlinear, :vector_affine, :vector_quadratic, :vector_variables]
const SUPPORTED_CONS_TYPE =
[:moi_scalarnonlinearfunction, :moi_scalaraffinefunction, :moi_scalarquadraticfunction]
const UNSUPPORTED_CONS_TYPE = [
:moi_vectoraffinefunction,
:moi_vectornonlinearfunction,
:moi_vectorquadraticfunction,
:moi_vectorofvariables,
]
"""
Abstract data structure for storing expression tree and data arrays
"""
abstract type AbstractBin end
struct Bin{E,P,I} <: AbstractBin
head::E
data::P
inner::I
end
struct BinNull <: AbstractBin end
function update_bin!(bin, e, p)
if _update_bin!(bin, e, p) # if update succeeded, return the original bin
return bin
else # if update has failed, return a new bin
return Bin(e, [p], bin)
end
end
function _update_bin!(bin::Bin{E,P,I}, e, p) where {E,P,I}
if e == bin.head && p isa eltype(bin.data)
push!(bin.data, p)
return true
else
return _update_bin!(bin.inner, e, p)
end
end
function _update_bin!(::BinNull, e, p)
return false
end
float_type(::MOIU.Model{T}) where {T} = T
function ExaModels.ExaModel(jm_cache::MOI.ModelLike; backend = nothing)
T = float_type(jm_cache.model)
minimize = jm_cache.model.objective.sense == MOI.MIN_SENSE
# create exacore
c = ExaModels.ExaCore(T; backend = backend, minimize = minimize)
# variables
jvars = jm_cache.model.variables
lvar = jvars.lower
uvar = jvars.upper
x0 = fill!(similar(lvar), 0.0)
nvar = length(lvar)
if haskey(jm_cache.varattr, MOI.VariablePrimalStart())
list = jm_cache.varattr[MOI.VariablePrimalStart()]
for (k, v) in list
x0[k.value] = v
end
end
v = ExaModels.variable(c, nvar; start = x0, lvar = lvar, uvar = uvar)
# objective
jobjs = jm_cache.model.objective
bin = BinNull()
for field in SUPPORTED_OBJ_TYPE
bin = exafy_obj(getfield(jobjs, field), bin)
end
for field in UNSUPPORTED_OBJ_TYPE
if getfield(jobjs, field) != nothing
error("$field type objective is not supported")
end
end
build_objective(c, bin)
# constraint
jcons = jm_cache.model.constraints
bin = BinNull()
offset = 0
lcon = similar(x0, 0)
ucon = similar(x0, 0)
for field in SUPPORTED_CONS_TYPE
bin, offset = exafy_con(getfield(jcons, field), bin, offset, lcon, ucon)
end
for field in UNSUPPORTED_CONS_TYPE
if getfield(jcons, field) != nothing
error("$field type constraint is not supported")
end
end
y0 = fill!(similar(lcon), zero(T))
cons = ExaModels.constraint(c, offset; start = y0, lcon = lcon, ucon = ucon)
build_constraint!(c, cons, bin)
return ExaModels.ExaModel(c)
end
function exafy_con(cons::Nothing, bin, offset, lcon, ucon)
return bin, offset
end
function exafy_con(cons, bin, offset, lcon, ucon)
bin, offset = _exafy_con(cons.moi_equalto, bin, offset, lcon, ucon)
bin, offset = _exafy_con(cons.moi_greaterthan, bin, offset, lcon, ucon)
bin, offset = _exafy_con(cons.moi_lessthan, bin, offset, lcon, ucon)
bin, offset = _exafy_con(cons.moi_interval, bin, offset, lcon, ucon)
return bin, offset
end
function _exafy_con(i, c::C, bin, offset; pos = true) where {C<:MOI.ScalarAffineFunction}
for mm in c.terms
e, p = _exafy(mm)
e = pos ? e : -e
bin = update_bin!(
bin,
ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e,
(p..., offset + i.value),
) # augment data with constraint index
end
bin = update_bin!(bin, ExaModels.Null(c.constant), (1,))
return bin, offset
end
function _exafy_con(i, c::C, bin, offset; pos = true) where {C<:MOI.ScalarQuadraticFunction}
for mm in c.affine_terms
e, p = _exafy(mm)
e = pos ? e : -e
bin = update_bin!(
bin,
ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e,
(p..., offset + i.value),
) # augment data with constraint index
end
for mm in c.quadratic_terms
e, p = _exafy(mm)
e = pos ? e : -e
bin = update_bin!(
bin,
ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e,
(p..., offset + i.value),
) # augment data with constraint index
end
bin = update_bin!(bin, ExaModels.Null(c.constant), (1,))
return bin, offset
end
function _exafy_con(i, c::C, bin, offset; pos = true) where {C<:MOI.ScalarNonlinearFunction}
if c.head == :+
for mm in c.args
bin, offset = _exafy_con(i, mm, bin, offset)
end
# elseif c.head == :-
# bin, offset = _exafy_con(i, c.args[1], bin, offset)
# bin, offset = _exafy_con(i, c.args[2], bin, offset; pos = false)
else
e, p = _exafy(c)
e = pos ? e : -e
bin = update_bin!(
bin,
ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) => e,
(p..., offset + i.value),
) # augment data with constraint index
end
return bin, offset
end
function _exafy_con(i, c::C, bin, offset; pos = true) where {C<:Real}
e =
pos ? ExaModels.ParIndexed(ExaModels.ParSource(), 1) :
-ExaModels.ParIndexed(ExaModels.ParSource(), 1)
bin = update_bin!(
bin,
ExaModels.ParIndexed(ExaModels.ParSource(), 2) => 0 * ExaModels.Var(1) + e,
(c, offset + i.value),
)
return bin, offset
end
function _exafy_con(cons::V, bin, offset, lcon, ucon) where {V<:MOIU.VectorOfConstraints}
l = length(cons.constraints)
resize!(lcon, offset + l)
resize!(ucon, offset + l)
for (i, (c, e)) in cons.constraints
_exafy_con_update_vector(i, e, lcon, ucon, offset)
bin, offset = _exafy_con(i, c, bin, offset)
end
return bin, (offset += l)
end
function _exafy_con(::Nothing, bin, offset, lcon, ucon)
return bin, offset
end
function _exafy_con_update_vector(i, e::MOI.Interval{T}, lcon, ucon, offset) where {T}
lcon[offset+i.value] = e.lower
ucon[offset+i.value] = e.upper
end
function _exafy_con_update_vector(i, e::MOI.LessThan{T}, lcon, ucon, offset) where {T}
lcon[offset+i.value] = -Inf
ucon[offset+i.value] = e.upper
end
function _exafy_con_update_vector(i, e::MOI.GreaterThan{T}, lcon, ucon, offset) where {T}
ucon[offset+i.value] = Inf
lcon[offset+i.value] = e.lower
end
function _exafy_con_update_vector(i, e::MOI.EqualTo{T}, lcon, ucon, offset) where {T}
lcon[offset+i.value] = e.value
ucon[offset+i.value] = e.value
end
function build_constraint!(c, cons, bin)
build_constraint!(c, cons, bin.inner)
ExaModels.constraint!(c, cons, bin.head, bin.data)
end
function build_constraint!(c, cons, ::BinNull) end
function build_objective(c, bin)
build_objective(c, bin.inner)
ExaModels.objective(c, bin.head, bin.data)
end
function build_objective(c, ::BinNull) end
function exafy_obj(o::Nothing, bin)
return bin
end
function exafy_obj(o::MOI.VariableIndex, bin)
e, p = _exafy(o)
return update_bin!(bin, e, p)
end
function exafy_obj(o::MOI.ScalarQuadraticFunction{T}, bin) where {T}
for m in o.affine_terms
e, p = _exafy(m)
bin = update_bin!(bin, e, p)
end
for m in o.quadratic_terms
e, p = _exafy(m)
bin = update_bin!(bin, e, p)
end
return update_bin!(bin, ExaModels.Null(o.constant), (1,))
end
function exafy_obj(o::MOI.ScalarAffineFunction{T}, bin) where {T}
for m in o.terms
e, p = _exafy(m)
bin = update_bin!(bin, e, p)
end
return update_bin!(bin, ExaModels.Null(o.constant), (1,))
end
function exafy_obj(o::MOI.ScalarNonlinearFunction, bin)
constant = 0.0
if o.head == :+
for m in o.args
if m isa MOI.ScalarAffineFunction
for mm in m.affine_terms
e, p = _exafy(mm)
bin = update_bin!(bin, e, p)
end
elseif m isa MOI.ScalarQuadraticFunction
for mm in m.affine_terms
e, p = _exafy(mm)
bin = update_bin!(bin, e, p)
end
for mm in m.quadratic_terms
e, p = _exafy(mm)
bin = update_bin!(bin, e, p)
end
constant += m.constant
else
e, p = _exafy(m)
bin = update_bin!(bin, e, p)
end
end
else
e, p = _exafy(o)
bin = update_bin!(bin, e, p)
end
return update_bin!(bin, ExaModels.Null(constant), (1,)) # TODO see if this can be empty tuple
end
function _exafy(v::MOI.VariableIndex, p = ())
i = ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1)
return ExaModels.Var(i), (p..., v.value)
end
function _exafy(i::R, p) where {R<:Real}
return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1), (p..., i)
end
function _exafy(e::MOI.ScalarNonlinearFunction, p = ())
return op(e.head)((
begin
c, p = _exafy(e, p)
c
end for e in e.args
)...), p
end
function _exafy(e::MOI.ScalarAffineFunction{T}, p = ()) where {T}
ec = if !isempty(e.terms)
sum(begin
c1, p = _exafy(term, p)
c1
end for term in e.terms) +
ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1)
else
ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1)
end
return ec, (p..., e.constant)
end
function _exafy(e::MOI.ScalarAffineTerm{T}, p = ()) where {T}
c1, p = _exafy(e.variable, p)
return *(c1, ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1)),
(p..., e.coefficient)
end
function _exafy(e::MOI.ScalarQuadraticFunction{T}, p = ()) where {T}
t = ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1)
p = (p..., e.constant)
if !isempty(e.affine_terms)
t += sum(begin
c1, p = _exafy(term, p)
c1
end for term in e.affine_terms)
end
if !isempty(e.quadratic_terms)
t += sum(begin
c1, p = _exafy(term, p)
c1
end for term in e.quadratic_terms)
end
return t, p
end
function _exafy(e::MOI.ScalarQuadraticTerm{T}, p = ()) where {T}
if e.variable_1 == e.variable_2
v, p = _exafy(e.variable_1, p)
return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) * abs2(v),
(p..., e.coefficient / 2) # it seems that MOI assumes this by default
else
v1, p = _exafy(e.variable_1, p)
v2, p = _exafy(e.variable_2, p)
return ExaModels.ParIndexed(ExaModels.ParSource(), length(p) + 1) * v1 * v2,
(p..., e.coefficient)
end
end
# eval can be a performance killer -- we want to explicitly include symbols for frequently used operations.
function op(s::Symbol)
if s == :+
return +
elseif s == :-
return -
elseif s == :*
return *
elseif s == :/
return /
elseif s == :^
return ^
elseif s == :sin
return sin
elseif s == :cos
return cos
elseif s == :exp
return exp
else
return eval(s)
end
end
# struct EmptyOptimizer{B}
# backend::B
# end
mutable struct Optimizer{B,S} <: MOI.AbstractOptimizer
solver::S
backend::B
model::Union{Nothing,ExaModels.ExaModel}
result::Any
solve_time::Float64
options::Dict{Symbol,Any}
end
MOI.is_empty(model::Optimizer) = model.model == nothing
const _FUNCTIONS = Union{
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64},
MOI.ScalarNonlinearFunction,
}
const _SETS = Union{MOI.GreaterThan{Float64},MOI.LessThan{Float64},MOI.EqualTo{Float64}}
function MOI.supports_constraint(
::Optimizer,
::Type{<:Union{MOI.VariableIndex,_FUNCTIONS}},
::Type{<:_SETS},
)
return true
end
function MOI.supports(
::Optimizer,
::MOI.ObjectiveFunction{<:Union{MOI.VariableIndex,<:_FUNCTIONS}},
)
return true
end
function MOI.supports(::Optimizer, ::MOI.VariablePrimalStart, ::Type{MOI.VariableIndex})
return true
end
function ExaModels.Optimizer(solver, backend = nothing; kwargs...)
return Optimizer(solver, backend, nothing, nothing, 0.0, Dict{Symbol,Any}(kwargs...))
end
function MOI.empty!(model::ExaModelsMOI.Optimizer)
model.model = nothing
end
function MOI.copy_to(dest::Optimizer, src::MOI.ModelLike)
dest.model = ExaModels.ExaModel(src; backend = dest.backend)
return MOIU.identity_index_map(src)
end
function MOI.optimize!(optimizer::Optimizer)
optimizer.solve_time = @elapsed begin
result = optimizer.solver(optimizer.model; optimizer.options...)
optimizer.result = (
objective = result.objective,
solution = Array(result.solution),
multipliers = Array(result.multipliers),
multipliers_L = Array(result.multipliers_L),
multipliers_U = Array(result.multipliers_U),
status = result.status,
)
end
return optimizer
end
MOI.get(optimizer::Optimizer, ::MOI.TerminationStatus) =
ExaModels.termination_status_translator(optimizer.solver, optimizer.result.status)
MOI.get(model::Optimizer, attr::Union{MOI.PrimalStatus,MOI.DualStatus}) =
ExaModels.result_status_translator(model.solver, model.result.status)
function MOI.get(model::Optimizer, attr::MOI.VariablePrimal, vi::MOI.VariableIndex)
MOI.check_result_index_bounds(model, attr)
# MOI.throw_if_not_valid(model, vi)
# if _is_parameter(vi)
# p = model.parameters[vi]
# return model.nlp_model[p]
# end
return model.result.solution[vi.value]
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{<:_FUNCTIONS,<:_SETS},
)
MOI.check_result_index_bounds(model, attr)
# MOI.throw_if_not_valid(model, ci)
s = -1.0
return s * model.result.multipliers[ci.value]
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.LessThan{Float64}},
)
MOI.check_result_index_bounds(model, attr)
# MOI.throw_if_not_valid(model, ci)
rc = model.result.multipliers_L[ci.value] - model.result.multipliers_U[ci.value]
return min(0.0, rc)
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.GreaterThan{Float64}},
)
MOI.check_result_index_bounds(model, attr)
# MOI.throw_if_not_valid(model, ci)
rc = model.result.multipliers_L[ci.value] - model.result.multipliers_U[ci.value]
return max(0.0, rc)
end
function MOI.get(
model::Optimizer,
attr::MOI.ConstraintDual,
ci::MOI.ConstraintIndex{MOI.VariableIndex,MOI.EqualTo{Float64}},
)
MOI.check_result_index_bounds(model, attr)
# MOI.throw_if_not_valid(model, ci)
rc = model.result.multipliers_L[ci.value] - model.result.multipliers_U[ci.value]
return rc
end
function MOI.get(model::Optimizer, ::MOI.ResultCount)
return (model.result !== nothing) ? 1 : 0
end
function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue)
MOI.check_result_index_bounds(model, attr)
# scale = (model.sense == MOI.MAX_SENSE) ? -1 : 1
# return scale * model.result.objective
return model.result.objective
end
MOI.get(model::Optimizer, ::MOI.SolveTimeSec) = model.solve_time
MOI.get(model::Optimizer, ::MOI.SolverName) =
"$(string(model.solver)) running with ExaModels"
function MOI.set(model::Optimizer, p::MOI.RawOptimizerAttribute, value)
model.options[Symbol(p.name)] = value
# No need to reset model.solver because this gets handled in optimize!.
return
end
end # module
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1127 | module ExaModelsMadNLP
import ExaModels
import MadNLP
import MathOptInterface
const MOI = MathOptInterface
ExaModels.MadNLPOptimizer(backend = nothing; kwargs...) =
ExaModels.Optimizer(MadNLP.madnlp, backend; kwargs...)
function ExaModels.result_status_translator(::typeof(MadNLP.madnlp), status)
Base.get(_RESULT_STATUS_CODES, status, MOI.UNKNOWN_RESULT_STATUS)
end
function ExaModels.termination_status_translator(::typeof(MadNLP.madnlp), status)
Base.get(_TERMINATION_STATUS_CODES, status, MOI.OTHER_ERROR)
end
const _RESULT_STATUS_CODES = Dict{Symbol,MathOptInterface.ResultStatusCode}(
:first_order => MOI.FEASIBLE_POINT,
:acceptable => MOI.NEARLY_FEASIBLE_POINT,
:infeasible => MOI.INFEASIBLE_POINT,
)
const _TERMINATION_STATUS_CODES = Dict{Symbol,MOI.TerminationStatusCode}(
:first_order => MOI.LOCALLY_SOLVED,
:acceptable => MOI.ALMOST_LOCALLY_SOLVED,
:small_step => MOI.SLOW_PROGRESS,
:infeasible => MOI.INFEASIBLE_OR_UNBOUNDED,
:max_iter => MOI.ITERATION_LIMIT,
:max_time => MOI.TIME_LIMIT,
:user => MOI.INTERRUPTED,
:exception => MOI.OTHER_ERROR,
)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1698 | module ExaModelsOneAPI
import ExaModels, oneAPI
ExaModels.sum(a::A) where {A<:oneAPI.oneArray} = Base.sum(Array(a))
function ExaModels.append!(
backend,
a::A,
b::Base.Generator{UnitRange{I}},
lb,
) where {I,A<:oneAPI.oneArray}
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
map!(b.f, view(aa, (la+1):(la+lb)), b.iter)
return aa
end
function ExaModels.append!(backend, a::A, b::Base.Generator, lb) where {A<:oneAPI.oneArray}
b = ExaModels._adapt_gen(b)
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
map!(b.f, view(aa, (la+1):(la+lb)), ExaModels.convert_array(b.iter, backend))
return aa
end
function ExaModels.append!(
backend,
a::A,
b::V,
lb,
) where {A<:oneAPI.oneArray,V<:AbstractArray}
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
copyto!(view(aa, (la+1):(la+lb)), b)
return aa
end
function ExaModels.append!(backend, a::A, b::Number, lb) where {A<:oneAPI.oneArray}
la = length(a)
aa = similar(a, la + lb)
copyto!(view(aa, 1:la), a)
fill!(view(aa, (la+1):(la+lb)), b)
return aa
end
ExaModels.convert_array(v, backend::oneAPI.oneAPIBackend) = oneAPI.oneArray(v)
ExaModels.sort!(array::A; lt = isless) where {A<:oneAPI.oneArray} =
copyto!(array, sort!(Array(array); lt = lt))
# below is type piracy
function Base.findall(f::F, bitarray::A) where {F<:Function,A<:oneAPI.oneArray}
a = Array(bitarray)
b = findall(f, a)
c = similar(bitarray, eltype(b), length(b))
return copyto!(c, b)
end
Base.findall(bitarray::A) where {A<:oneAPI.oneArray} = Base.findall(identity, bitarray)
end # module
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 4594 | module ExaModelsSpecialFunctions
using ExaModels, SpecialFunctions
ExaModels.@register_univariate(
SpecialFunctions.erfi,
x -> 1.1283791670955125585606992899556644260883331298828125 * exp(x^2),
x -> 1.1283791670955125585606992899556644260883331298828125 * exp(x^2) * 2x
)
ExaModels.@register_univariate(
SpecialFunctions.erfcinv,
x -> -0.8862269254527579409597137782839126884937286376953125 * exp(erfcinv(x)^2),
x ->
-0.8862269254527579409597137782839126884937286376953125 *
exp(erfcinv(x)^2) *
2 *
erfcinv(x) *
-0.8862269254527579409597137782839126884937286376953125 *
exp(erfcinv(x)^2)
)
ExaModels.@register_univariate(
SpecialFunctions.erfcx,
x -> 2 * x * erfcx(x) - 1.1283791670955125585606992899556644260883331298828125,
x ->
2 * erfcx(x) +
(2 * x * erfcx(x) - 1.1283791670955125585606992899556644260883331298828125) * 2 * x
)
ExaModels.@register_univariate(
SpecialFunctions.invdigamma,
x -> inv(trigamma(invdigamma(x))),
x ->
(-abs2(inv(trigamma(invdigamma(x))))) *
polygamma(2, invdigamma(x)) *
inv(trigamma(invdigamma(x)))
)
ExaModels.@register_univariate(
SpecialFunctions.bessely1,
x -> (bessely0(x) - bessely(2, x)) / 2,
x -> (-bessely1(x) + -(bessely(1, x) - bessely(3, x)) / 2) / 2
)
ExaModels.@register_univariate(
SpecialFunctions.besselj1,
x -> (besselj0(x) - besselj(2, x)) / 2,
x -> (-besselj1(x) + -(besselj(1, x) - besselj(3, x)) / 2) / 2
)
ExaModels.@register_univariate(
SpecialFunctions.dawson,
x -> 1 - 2 * x * dawson(x),
x -> -(2 * dawson(x) + (1 - 2 * x * dawson(x)) * 2 * x)
)
ExaModels.@register_univariate(
SpecialFunctions.airyaiprime,
x -> x * airyai(x),
x -> airyai(x) + airyaiprime(x) * x
)
ExaModels.@register_univariate(
SpecialFunctions.erf,
x -> 1.1283791670955125585606992899556644260883331298828125 * exp(-x * x),
x -> 1.1283791670955125585606992899556644260883331298828125 * exp(-x * x) * (-2x)
)
ExaModels.@register_univariate(SpecialFunctions.digamma, trigamma, x -> polygamma(2, x))
ExaModels.@register_univariate(
SpecialFunctions.gamma,
x -> digamma(x) * gamma(x),
x -> trigamma(x) * gamma(x) + digamma(x) * gamma(x) * digamma(x)
)
ExaModels.@register_univariate(SpecialFunctions.airyai, airyaiprime, x -> x * airyai(x))
ExaModels.@register_univariate(SpecialFunctions.airybi, airybiprime, x -> x * airybi(x))
ExaModels.@register_univariate(
SpecialFunctions.erfinv,
x -> 0.8862269254527579409597137782839126884937286376953125 * exp(erfinv(x)^2),
x ->
0.8862269254527579409597137782839126884937286376953125 *
exp(erfinv(x)^2) *
2 *
erfinv(x) *
0.8862269254527579409597137782839126884937286376953125 *
exp(erfinv(x)^2)
)
ExaModels.@register_univariate(
SpecialFunctions.bessely0,
x -> -bessely1(x),
x -> -(bessely0(x) - bessely(2, x)) / 2
)
ExaModels.@register_univariate(
SpecialFunctions.erfc,
x -> -1.1283791670955125585606992899556644260883331298828125 * exp(-x * x),
x -> -1.1283791670955125585606992899556644260883331298828125 * exp(-x * x) * (-2x)
)
ExaModels.@register_univariate(
SpecialFunctions.trigamma,
x -> polygamma(2, x),
x -> polygamma(3, x)
)
ExaModels.@register_univariate(
SpecialFunctions.airybiprime,
x -> x * airybi(x),
x -> airybi(x) + airybiprime(x) * x
)
ExaModels.@register_univariate(
SpecialFunctions.besselj0,
x -> -besselj1(x),
x -> -(besselj0(x) - besselj(2, x)) / 2
)
ExaModels.@register_bivariate(
SpecialFunctions.beta,
(x1, x2) -> beta(x1, x2) * (digamma(x1) - digamma(x1 + x2)),
(x1, x2) -> beta(x1, x2) * (digamma(x2) - digamma(x1 + x2)),
(x1, x2) ->
beta(x1, x2) * (digamma(x1) - digamma(x1 + x2)) * (digamma(x1) - digamma(x1 + x2)) +
(trigamma(x1) + -trigamma(x1 + x2)) * beta(x1, x2),
(x1, x2) ->
beta(x1, x2) * (digamma(x2) - digamma(x1 + x2)) * (digamma(x1) - digamma(x1 + x2)) +
(-trigamma(x1 + x2)) * beta(x1, x2),
(x1, x2) ->
beta(x1, x2) * (digamma(x2) - digamma(x1 + x2)) * (digamma(x2) - digamma(x1 + x2)) +
(trigamma(x2) + -trigamma(x1 + x2)) * beta(x1, x2)
)
ExaModels.@register_bivariate(
SpecialFunctions.logbeta,
(x1, x2) -> digamma(x1) - digamma(x1 + x2),
(x1, x2) -> digamma(x2) - digamma(x1 + x2),
(x1, x2) -> trigamma(x1) + -trigamma(x1 + x2),
(x1, x2) -> -trigamma(x1 + x2),
(x1, x2) -> trigamma(x2) + -trigamma(x1 + x2)
)
end # module ExaModelsSpecialFunctions
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1029 | """
ExaModels
An algebraic modeling and automatic differentiation tool in Julia Language, specialized for SIMD abstraction of nonlinear programs.
For more information, please visit https://github.com/exanauts/ExaModels.jl
"""
module ExaModels
import NLPModels:
NLPModels,
obj,
cons!,
grad!,
jac_coord!,
hess_coord!,
jprod!,
jtprod!,
hprod!,
jac_structure!,
hess_structure!,
cons_nln!,
jtprod_nln!,
jprod_nln!
import SolverCore
import Printf
include("templates.jl")
include("graph.jl")
include("register.jl")
include("specialization.jl")
include("functionlist.jl")
include("simdfunction.jl")
include("gradient.jl")
include("jacobian.jl")
include("hessian.jl")
include("nlp.jl")
include("utils.jl")
export ExaModel,
ExaCore,
ExaModelsBackend,
data,
variable,
objective,
constraint,
constraint!,
solution,
multipliers,
multipliers_L,
multipliers_U,
@register_univariate,
@register_bivariate
end # module ExaModels
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 7751 | @inline _mone(x) = -one(x)
@inline _one(x1, x2) = one(x1)
@inline _zero(x1, x2) = zero(x1)
@inline _mone(x1, x2) = -one(x1)
@inline _x1(x1, x2) = x1
@inline _x2(x1, x2) = x2
@inline _and(x::Bool, y::Bool) = x && y
@inline _or(x::Bool, y::Bool) = x || y
@inline _and(x, y::Bool) = x == 1 && y
@inline _or(x, y::Bool) = x == 1 || y
@inline _and(x::Bool, y) = x && y == 1
@inline _or(x::Bool, y) = x || y == 1
@inline _and(x, y) = x == 1 && y == 1
@inline _or(x, y) = x == 1 || y == 1
@register_univariate(Base.:+, one, zero)
@register_univariate(Base.:-, _mone, zero)
@register_univariate(Base.inv, x -> -abs2(inv(x)), x -> -(2 * inv(x)) * (-abs2(inv(x))))
@register_univariate(Base.abs, x -> (ifelse(x >= 0, one(x), -one(x))), zero)
@register_univariate(
Base.sqrt,
x -> (0.5 / sqrt(x)),
x -> ((0.5 * -(0.5 / sqrt(x))) / sqrt(x)^2)
)
@register_univariate(
Base.cbrt,
x -> (0.3333333333333333 / cbrt(x)^2),
x -> (
(0.3333333333333333 * -(2 * (0.3333333333333333 / cbrt(x)^2) * cbrt(x))) /
(cbrt(x)^2)^2
)
)
@register_univariate(Base.abs2, x -> 2x, x -> 2)
@register_univariate(Base.exp, exp, exp)
@register_univariate(
Base.exp2,
x -> exp2(x) * 0.69314718055994528622676398299518041312694549560546875,
x ->
exp2(x) *
0.69314718055994528622676398299518041312694549560546875 *
0.69314718055994528622676398299518041312694549560546875
)
@register_univariate(
Base.exp10,
x -> exp10(x) * 2.30258509299404590109361379290930926799774169921875,
x ->
exp10(x) *
2.30258509299404590109361379290930926799774169921875 *
2.30258509299404590109361379290930926799774169921875
)
@register_univariate(Base.log, inv, x -> -abs2(inv(x)))
@register_univariate(
Base.log2,
x -> inv(x) / 0.69314718055994528622676398299518041312694549560546875,
x -> (-abs2(inv(x))) / 0.69314718055994528622676398299518041312694549560546875
)
@register_univariate(Base.log1p, x -> (1 / (1 + x)), x -> (-1 / (1 + x)^2))
@register_univariate(
Base.log10,
x -> inv(x) / 2.30258509299404590109361379290930926799774169921875,
x -> (-abs2(inv(x))) / 2.30258509299404590109361379290930926799774169921875
)
@register_univariate(Base.sin, cos, x -> -sin(x))
@register_univariate(Base.cos, x -> -sin(x), x -> -cos(x))
@register_univariate(Base.tan, x -> 1 + tan(x)^2, x -> 2 * tan(x) * (1 + tan(x)^2))
@register_univariate(
Base.asin,
x -> (1 / sqrt(1 - x^2)),
x -> (-(-(2x) * (0.5 / sqrt(1 - x^2))) / sqrt(1 - x^2)^2)
)
@register_univariate(
Base.acos,
x -> (-1 / sqrt(1 - x^2)),
x -> (-(-(-(2x) * (0.5 / sqrt(1 - x^2)))) / sqrt(1 - x^2)^2)
)
@register_univariate(
Base.csc,
x -> (-csc(x)) * cot(x),
x -> (-(-csc(x)) * cot(x)) * cot(x) + (-(1 + cot(x)^2)) * (-csc(x))
)
@register_univariate(
Base.sec,
x -> sec(x) * tan(x),
x -> sec(x) * tan(x) * tan(x) + (1 + tan(x)^2) * sec(x)
)
@register_univariate(Base.cot, x -> -(1 + cot(x)^2), x -> -2 * cot(x) * (-(1 + cot(x)^2)))
@register_univariate(Base.atan, x -> inv(1 + x^2), x -> (-abs2(inv(1 + x^2))) * 2x)
@register_univariate(Base.acot, x -> -inv(1 + x^2), x -> -(-abs2(inv(1 + x^2))) * 2x)
@register_univariate(
Base.sind,
x -> 0.0174532925199432954743716805978692718781530857086181640625 * cosd(x),
x ->
0.0174532925199432954743716805978692718781530857086181640625 *
-0.0174532925199432954743716805978692718781530857086181640625 *
sind(x)
)
@register_univariate(
Base.cosd,
x -> -0.0174532925199432954743716805978692718781530857086181640625 * sind(x),
x ->
-0.0174532925199432954743716805978692718781530857086181640625 *
0.0174532925199432954743716805978692718781530857086181640625 *
cosd(x)
)
@register_univariate(
Base.tand,
x -> 0.0174532925199432954743716805978692718781530857086181640625 * (1 + tand(x)^2),
x ->
0.0174532925199432954743716805978692718781530857086181640625 *
2 *
tand(x) *
0.0174532925199432954743716805978692718781530857086181640625 *
(1 + tand(x)^2)
)
@register_univariate(
Base.cscd,
x -> -0.0174532925199432954743716805978692718781530857086181640625 * cscd(x) * cotd(x),
x ->
-0.0174532925199432954743716805978692718781530857086181640625 *
-0.0174532925199432954743716805978692718781530857086181640625 *
cscd(x) *
cotd(x) *
cotd(x) +
-0.0174532925199432954743716805978692718781530857086181640625 *
(1 + cotd(x)^2) *
-0.0174532925199432954743716805978692718781530857086181640625 *
cscd(x)
)
@register_univariate(
Base.secd,
x -> 0.0174532925199432954743716805978692718781530857086181640625 * secd(x) * tand(x),
x ->
0.0174532925199432954743716805978692718781530857086181640625 *
0.0174532925199432954743716805978692718781530857086181640625 *
secd(x) *
tand(x) *
tand(x) +
0.0174532925199432954743716805978692718781530857086181640625 *
(1 + tand(x)^2) *
0.0174532925199432954743716805978692718781530857086181640625 *
secd(x)
)
@register_univariate(
Base.cotd,
x -> -0.0174532925199432954743716805978692718781530857086181640625 * (1 + cotd(x)^2),
x ->
-0.0174532925199432954743716805978692718781530857086181640625 *
2 *
cotd(x) *
-0.0174532925199432954743716805978692718781530857086181640625 *
(1 + cotd(x)^2)
)
@register_univariate(
Base.atand,
x -> 57.29577951308232286464772187173366546630859375 / (1 + x^2),
x -> -57.29577951308232286464772187173366546630859375 * 2 * x / (1 + x^2)^2
)
@register_univariate(
Base.acotd,
x -> -57.29577951308232286464772187173366546630859375 / (1 + x^2),
x -> 57.29577951308232286464772187173366546630859375 * 2 * x / (1 + x^2)^2
)
@register_univariate(Base.sinh, cosh, sinh)
@register_univariate(Base.cosh, sinh, cosh)
@register_univariate(Base.tanh, x -> 1 - tanh(x)^2, x -> -2 * tanh(x) * (1 - tanh(x)^2))
@register_univariate(
Base.csch,
x -> (-coth(x)) * csch(x),
x -> csch(x)^2 * csch(x) + (-coth(x)) * csch(x) * (-coth(x))
)
@register_univariate(
Base.sech,
x -> (-tanh(x)) * sech(x),
x -> (-(1 - tanh(x)^2)) * sech(x) + (-tanh(x)) * sech(x) * (-tanh(x))
)
@register_univariate(Base.coth, x -> -csch(x)^2, x -> -2 * csch(x) * (-coth(x)) * csch(x))
@register_univariate(
Base.atanh,
x -> abs(x) > 1.0 ? NaN : inv(1 - x^2),
x -> abs(x) > 1.0 ? NaN : (-abs2(inv(1 - x^2))) * (-2x)
)
@register_univariate(
Base.acoth,
x -> abs(x) < 1.0 ? NaN : inv(1 - x^2),
x -> abs(x) < 1.0 ? NaN : (-abs2(inv(1 - x^2))) * (-2x)
)
@register_bivariate(Base.:+, _one, _one, _zero, _zero, _zero)
@register_bivariate(Base.:-, _one, _mone, _zero, _zero, _zero)
@register_bivariate(Base.:*, _x2, _x1, _zero, _one, _zero)
@register_bivariate(
Base.:^,
((x1, x2) -> x2 * x1^(x2 - 1)),
((x1, x2) -> x1^x2 * log(x1)),
((x1, x2) -> x2 * (x2 - 1) * x1^(x2 - 2)),
((x1, x2) -> x2 * x1^(x2 - 1) * log(x1) + x1^(x2 - 1)),
((x1, x2) -> x1^x2 * log(x1) * log(x1))
)
@register_bivariate(
Base.:/,
((x1, x2) -> 1 / x2),
((x1, x2) -> -x1 / x2^2),
_zero,
((x1, x2) -> -1 / x2^2),
((x1, x2) -> 2x1 / x2^3),
)
# @register_bivariate(Base.:<=, _zero, _zero, _zero, _zero, _zero)
# @register_bivariate(Base.:>=, _zero, _zero, _zero, _zero, _zero)
# @register_bivariate(Base.:(==), _zero, _zero, _zero, _zero, _zero)
# @register_bivariate(Base.:<, _zero, _zero, _zero, _zero, _zero)
# @register_bivariate(Base.:>, _zero, _zero, _zero, _zero, _zero)
# @register_bivariate(_and, _zero, _zero, _zero, _zero, _zero)
# @register_bivariate(_or, _zero, _zero, _zero, _zero, _zero)
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 3185 | """
drpass(d::D, y, adj)
Performs dense gradient evaluation via the reverse pass on the computation (sub)graph formed by forward pass
# Arguments:
- `d`: first-order computation (sub)graph
- `y`: result vector
- `adj`: adjoint propagated up to the current node
"""
@inline function drpass(d::D, y, adj) where {D<:AdjointNull}
nothing
end
@inline function drpass(d::D, y, adj) where {D<:AdjointNode1}
offset = drpass(d.inner, y, adj * d.y)
nothing
end
@inline function drpass(d::D, y, adj) where {D<:AdjointNode2}
offset = drpass(d.inner1, y, adj * d.y1)
offset = drpass(d.inner2, y, adj * d.y2)
nothing
end
@inline function drpass(d::D, y, adj) where {D<:AdjointNodeVar}
@inbounds y[d.i] += adj
nothing
end
"""
gradient!(y, f, x, adj)
Performs dense gradient evalution
# Arguments:
- `y`: result vector
- `f`: the function to be differentiated in `SIMDFunction` format
- `x`: variable vector
- `adj`: initial adjoint
"""
function gradient!(y, f, x, adj)
@simd for k in eachindex(f.itr)
@inbounds gradient!(y, f.f.f, x, f.itr[k], adj)
end
return y
end
function gradient!(y, f, x, p, adj)
graph = f(p, AdjointNodeSource(x))
drpass(graph, y, adj)
return y
end
"""
grpass(d::D, comp, y, o1, cnt, adj)
Performs dsparse gradient evaluation via the reverse pass on the computation (sub)graph formed by forward pass
# Arguments:
- `d`: first-order computation (sub)graph
- `comp`: a `Compressor`, which helps map counter to sparse vector index
- `y`: result vector
- `o1`: index offset
- `cnt`: counter
- `adj`: adjoint propagated up to the current node
"""
@inline function grpass(
d::D,
comp,
y,
o1,
cnt,
adj,
) where {D<:Union{AdjointNull,ParIndexed}}
return cnt
end
@inline function grpass(d::D, comp, y, o1, cnt, adj) where {D<:AdjointNode1}
cnt = grpass(d.inner, comp, y, o1, cnt, adj * d.y)
return cnt
end
@inline function grpass(d::D, comp, y, o1, cnt, adj) where {D<:AdjointNode2}
cnt = grpass(d.inner1, comp, y, o1, cnt, adj * d.y1)
cnt = grpass(d.inner2, comp, y, o1, cnt, adj * d.y2)
return cnt
end
@inline function grpass(d::D, comp, y, o1, cnt, adj) where {D<:AdjointNodeVar}
@inbounds y[o1+comp(cnt += 1)] += adj
return cnt
end
@inline function grpass(d::AdjointNodeVar, comp::Nothing, y, o1, cnt, adj) # despecialization
push!(y, d.i)
return (cnt += 1)
end
@inline function grpass(
d::D,
comp,
y::V,
o1,
cnt,
adj,
) where {D<:AdjointNodeVar,V<:AbstractVector{Tuple{Int,Int}}}
ind = o1 + comp(cnt += 1)
@inbounds y[ind] = (d.i, ind)
return cnt
end
""" sgradient!(y, f, x, adj)
Performs sparse gradient evalution
# Arguments:
- `y`: result vector
- `f`: the function to be differentiated in `SIMDFunction` format
- `x`: variable vector
- `adj`: initial adjoint
"""
function sgradient!(y, f, x, adj)
@simd for k in eachindex(f.itr)
@inbounds sgradient!(y, f.f.f, f.itr[k], x, f.itr.comp1, offset1(f, k), adj)
end
return y
end
function sgradient!(y, f, p, x, comp, o1, adj)
graph = f(p, AdjointNodeSource(x))
grpass(graph, comp, y, o1, 0, adj)
return y
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 6589 | # Abstract node type for the computation graph for symbolic expression
abstract type AbstractNode end
# Abstract node type for first-order forward pass tree
abstract type AbstractAdjointNode end
# Abstract node type for the computation graph for second-order forward pass
abstract type AbstractSecondAdjointNode end
"""
Null
A null node
"""
struct Null{T} <: AbstractNode
value::T
end
Null() = Null(nothing)
"""
Null
A null node
"""
struct AdjointNull <: AbstractAdjointNode end
"""
Null
A null node
"""
struct SecondAdjointNull <: AbstractSecondAdjointNode end
"""
VarSource
A source of variable nodes
"""
struct VarSource <: AbstractNode end
"""
Var{I}
A variable node for symbolic expression tree
# Fields:
- `i::I`: (parameterized) index
"""
struct Var{I} <: AbstractNode
i::I
end
"""
ParSource
A source of parameterized data
"""
struct ParSource <: AbstractNode end
"""
ParIndexed{I, J}
A parameterized data node
# Fields:
- `inner::I`: parameter for the data
"""
struct ParIndexed{I,J} <: AbstractNode
inner::I
end
@inline ParIndexed(inner::I, n) where {I} = ParIndexed{I,n}(inner)
"""
Node1{F, I}
A node with one child for symbolic expression tree
# Fields:
- `inner::I`: children
"""
struct Node1{F,I} <: AbstractNode
inner::I
end
"""
Node2{F, I1, I2}
A node with two children for symbolic expression tree
# Fields:
- `inner1::I1`: children #1
- `inner2::I2`: children #2
"""
struct Node2{F,I1,I2} <: AbstractNode
inner1::I1
inner2::I2
end
struct FirstFixed{F}
inner::F
end
struct SecondFixed{F}
inner::F
end
@inline Base.getindex(n::ParSource, i) = ParIndexed(n, i)
@inline Base.getindex(n::VarSource, i) = Var(i)
Par(iter::DataType) = ParSource()
Par(iter, idx...) = ParIndexed(Par(iter, idx[2:end]...), idx[1])
Par(iter::Type{T}, idx...) where {T<:Tuple} =
Tuple(Par(p, i, idx...) for (i, p) in enumerate(T.parameters))
Par(iter::Type{T}, idx...) where {T<:NamedTuple} = NamedTuple{T.parameters[1]}(
Par(p, i, idx...) for (i, p) in enumerate(T.parameters[2].parameters)
)
@inline Node1(f::F, inner::I) where {F,I} = Node1{F,I}(inner)
@inline Node2(f::F, inner1::I1, inner2::I2) where {F,I1,I2} = Node2{F,I1,I2}(inner1, inner2)
struct Identity end
@inline (v::Var{I})(i, x) where {I<:AbstractNode} = @inbounds x[v.i(i, x)]
@inline (v::Var{I})(i, x) where {I} = @inbounds x[v.i]
@inline (v::Var{I})(i::Identity, x) where {I<:AbstractNode} = @inbounds x[v.i]
@inline (v::ParSource)(i, x) = i
@inline (v::ParIndexed{I,n})(i, x) where {I,n} = @inbounds v.inner(i, x)[n]
(v::ParIndexed)(i::Identity, x) = NaN # despecialized
(v::ParSource)(i::Identity, x) = NaN # despecialized
"""
AdjointNode1{F, T, I}
A node with one child for first-order forward pass tree
# Fields:
- `x::T`: function value
- `y::T`: first-order sensitivity
- `inner::I`: children
"""
struct AdjointNode1{F,T,I} <: AbstractAdjointNode
x::T
y::T
inner::I
end
"""
AdjointNode2{F, T, I1, I2}
A node with two children for first-order forward pass tree
# Fields:
- `x::T`: function value
- `y1::T`: first-order sensitivity w.r.t. first argument
- `y2::T`: first-order sensitivity w.r.t. second argument
- `inner1::I1`: children #1
- `inner2::I2`: children #2
"""
struct AdjointNode2{F,T,I1,I2} <: AbstractAdjointNode
x::T
y1::T
y2::T
inner1::I1
inner2::I2
end
"""
AdjointNodeVar{I, T}
A variable node for first-order forward pass tree
# Fields:
- `i::I`: index
- `x::T`: value
"""
struct AdjointNodeVar{I,T} <: AbstractAdjointNode
i::I
x::T
end
"""
AdjointNodeSource{VT}
A source of `AdjointNode`. `adjoint_node_source[i]` returns an `AdjointNodeVar` at index `i`.
# Fields:
- `inner::VT`: variable vector
"""
struct AdjointNodeSource{VT}
inner::VT
end
@inline AdjointNode1(f::F, x::T, y, inner::I) where {F,T,I} =
AdjointNode1{F,T,I}(x, y, inner)
@inline AdjointNode2(f::F, x::T, y1, y2, inner1::I1, inner2::I2) where {F,T,I1,I2} =
AdjointNode2{F,T,I1,I2}(x, y1, y2, inner1, inner2)
@inline Base.getindex(x::I, i) where {I<:AdjointNodeSource{Nothing}} =
AdjointNodeVar(i, NaN)
@inline Base.getindex(x::I, i) where {I<:AdjointNodeSource} =
@inbounds AdjointNodeVar(i, x.inner[i])
"""
SecondAdjointNode1{F, T, I}
A node with one child for second-order forward pass tree
# Fields:
- `x::T`: function value
- `y::T`: first-order sensitivity
- `h::T`: second-order sensitivity
- `inner::I`: DESCRIPTION
"""
struct SecondAdjointNode1{F,T,I} <: AbstractSecondAdjointNode
x::T
y::T
h::T
inner::I
end
"""
SecondAdjointNode2{F, T, I1, I2}
A node with one child for second-order forward pass tree
# Fields:
- `x::T`: function value
- `y1::T`: first-order sensitivity w.r.t. first argument
- `y2::T`: first-order sensitivity w.r.t. first argument
- `h11::T`: second-order sensitivity w.r.t. first argument
- `h12::T`: second-order sensitivity w.r.t. first and second argument
- `h22::T`: second-order sensitivity w.r.t. second argument
- `inner1::I1`: children #1
- `inner2::I2`: children #2
"""
struct SecondAdjointNode2{F,T,I1,I2} <: AbstractSecondAdjointNode
x::T
y1::T
y2::T
h11::T
h12::T
h22::T
inner1::I1
inner2::I2
end
"""
SecondAdjointNodeVar{I, T}
A variable node for first-order forward pass tree
# Fields:
- `i::I`: index
- `x::T`: value
"""
struct SecondAdjointNodeVar{I,T} <: AbstractSecondAdjointNode
i::I
x::T
end
"""
SecondAdjointNodeSource{VT}
A source of `AdjointNode`. `adjoint_node_source[i]` returns an `AdjointNodeVar` at index `i`.
# Fields:
- `inner::VT`: variable vector
"""
struct SecondAdjointNodeSource{VT}
inner::VT
end
@inline SecondAdjointNode1(f::F, x::T, y, h, inner::I) where {F,T,I} =
SecondAdjointNode1{F,T,I}(x, y, h, inner)
@inline SecondAdjointNode2(
f::F,
x::T,
y1,
y2,
h11,
h12,
h22,
inner1::I1,
inner2::I2,
) where {F,T,I1,I2} =
SecondAdjointNode2{F,T,I1,I2}(x, y1, y2, h11, h12, h22, inner1, inner2)
@inline Base.getindex(x::I, i) where {I<:SecondAdjointNodeSource{Nothing}} =
SecondAdjointNodeVar(i, NaN)
@inline Base.getindex(x::I, i) where {I<:SecondAdjointNodeSource} =
@inbounds SecondAdjointNodeVar(i, x.inner[i])
@inline (v::Null{Nothing})(i, x::V) where {T,V<:AbstractVector{T}} = zero(T)
@inline (v::Null{N})(i, x::V) where {N,T,V<:AbstractVector{T}} = T(v.value)
@inline (v::Null{N})(i, x::AdjointNodeSource{T}) where {N,T} = AdjointNull()
@inline (v::Null{N})(i, x::SecondAdjointNodeSource{T}) where {N,T} = SecondAdjointNull()
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 13268 | """
hdrpass(t1::T1, t2::T2, comp, y1, y2, o2, cnt, adj)
Performs sparse hessian evaluation (`(df1/dx)(df2/dx)'` portion) via the reverse pass on the computation (sub)graph formed by second-order forward pass
# Arguments:
- `t1`: second-order computation (sub)graph regarding f1
- `t2`: second-order computation (sub)graph regarding f2
- `comp`: a `Compressor`, which helps map counter to sparse vector index
- `y1`: result vector #1
- `y2`: result vector #2 (only used when evaluating sparsity)
- `o2`: index offset
- `cnt`: counter
- `adj`: second adjoint propagated up to the current node
"""
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNode1,T2<:SecondAdjointNode1}
cnt = hdrpass(t1.inner, t2.inner, comp, y1, y2, o2, cnt, adj * t1.y * t2.y)
cnt
end
function hdrpass(
t1::SecondAdjointNode1,
t2::SecondAdjointNode1,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1.inner, t2.inner, comp, y1, y2, o2, cnt, adj * t1.y * t2.y)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNodeVar,T2<:SecondAdjointNode1}
cnt = hdrpass(t1, t2.inner, comp, y1, y2, o2, cnt, adj * t2.y)
cnt
end
function hdrpass(
t1::SecondAdjointNodeVar,
t2::SecondAdjointNode1,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1, t2.inner, comp, y1, y2, o2, cnt, adj * t2.y)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNode1,T2<:SecondAdjointNodeVar}
cnt = hdrpass(t1.inner, t2, comp, y1, y2, o2, cnt, adj * t1.y)
cnt
end
function hdrpass(
t1::SecondAdjointNode1,
t2::SecondAdjointNodeVar,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1.inner, t2, comp, y1, y2, o2, cnt, adj * t1.y)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNode2,T2<:SecondAdjointNode2}
cnt = hdrpass(t1.inner1, t2.inner1, comp, y1, y2, o2, cnt, adj * t1.y1 * t2.y1)
cnt = hdrpass(t1.inner1, t2.inner2, comp, y1, y2, o2, cnt, adj * t1.y1 * t2.y2)
cnt = hdrpass(t1.inner2, t2.inner1, comp, y1, y2, o2, cnt, adj * t1.y2 * t2.y1)
cnt = hdrpass(t1.inner2, t2.inner2, comp, y1, y2, o2, cnt, adj * t1.y2 * t2.y2)
cnt
end
function hdrpass(
t1::SecondAdjointNode2,
t2::SecondAdjointNode2,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1.inner1, t2.inner1, comp, y1, y2, o2, cnt, adj * t1.y1 * t2.y1)
cnt = hdrpass(t1.inner1, t2.inner2, comp, y1, y2, o2, cnt, adj * t1.y1 * t2.y2)
cnt = hdrpass(t1.inner2, t2.inner1, comp, y1, y2, o2, cnt, adj * t1.y2 * t2.y1)
cnt = hdrpass(t1.inner2, t2.inner2, comp, y1, y2, o2, cnt, adj * t1.y2 * t2.y2)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNode1,T2<:SecondAdjointNode2}
cnt = hdrpass(t1.inner, t2.inner1, comp, y1, y2, o2, cnt, adj * t1.y * t2.y1)
cnt = hdrpass(t1.inner, t2.inner2, comp, y1, y2, o2, cnt, adj * t1.y * t2.y2)
cnt
end
function hdrpass(
t1::SecondAdjointNode1,
t2::SecondAdjointNode2,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1.inner, t2.inner1, comp, y1, y2, o2, cnt, adj * t1.y * t2.y1)
cnt = hdrpass(t1.inner, t2.inner2, comp, y1, y2, o2, cnt, adj * t1.y * t2.y2)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNode2,T2<:SecondAdjointNode1}
cnt = hdrpass(t1.inner1, t2.inner, comp, y1, y2, o2, cnt, adj * t1.y1 * t2.y)
cnt = hdrpass(t1.inner2, t2.inner, comp, y1, y2, o2, cnt, adj * t1.y2 * t2.y)
cnt
end
function hdrpass(
t1::SecondAdjointNode2,
t2::SecondAdjointNode1,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1.inner1, t2.inner, comp, y1, y2, o2, cnt, adj * t1.y1 * t2.y)
cnt = hdrpass(t1.inner2, t2.inner, comp, y1, y2, o2, cnt, adj * t1.y2 * t2.y)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNodeVar,T2<:SecondAdjointNode2}
cnt = hdrpass(t1, t2.inner1, comp, y1, y2, o2, cnt, adj * t2.y1)
cnt = hdrpass(t1, t2.inner2, comp, y1, y2, o2, cnt, adj * t2.y2)
cnt
end
function hdrpass(
t1::SecondAdjointNodeVar,
t2::SecondAdjointNode2,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1, t2.inner1, comp, y1, y2, o2, cnt, adj * t2.y1)
cnt = hdrpass(t1, t2.inner2, comp, y1, y2, o2, cnt, adj * t2.y2)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNode2,T2<:SecondAdjointNodeVar}
cnt = hdrpass(t1.inner1, t2, comp, y1, y2, o2, cnt, adj * t1.y1)
cnt = hdrpass(t1.inner2, t2, comp, y1, y2, o2, cnt, adj * t1.y2)
cnt
end
function hdrpass(
t1::SecondAdjointNode2,
t2::SecondAdjointNodeVar,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
) # despecialized
cnt = hdrpass(t1.inner1, t2, comp, y1, y2, o2, cnt, adj * t1.y1)
cnt = hdrpass(t1.inner2, t2, comp, y1, y2, o2, cnt, adj * t1.y2)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1,
y2,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNodeVar,T2<:SecondAdjointNodeVar}
i, j = t1.i, t2.i
@inbounds if i == j
y1[o2+comp(cnt += 1)] += 2 * adj
else
y1[o2+comp(cnt += 1)] += adj
end
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1::Tuple{V1,V2},
y2,
o2,
cnt,
adj,
) where {
T1<:SecondAdjointNodeVar,
T2<:SecondAdjointNodeVar,
V1<:AbstractVector,
V2<:AbstractVector,
}
i, j = t1.i, t2.i
y, v = y1
@inbounds if i == j
y[i] += 2 * adj * v[i]
else
y[i] += adj * v[j]
y[j] += adj * v[i]
end
return (cnt += 1)
end
"""
hrpass(t::D, comp, y1, y2, o2, cnt, adj, adj2)
Performs sparse hessian evaluation (`dΒ²f/dxΒ²` portion) via the reverse pass on the computation (sub)graph formed by second-order forward pass
# Arguments:
- `comp`: a `Compressor`, which helps map counter to sparse vector index
- `y1`: result vector #1
- `y2`: result vector #2 (only used when evaluating sparsity)
- `o2`: index offset
- `cnt`: counter
- `adj`: first adjoint propagated up to the current node
- `adj`: second adjoint propagated up to the current node
"""
@inline function hrpass(t::SecondAdjointNull, comp, y1, y2, o2, cnt, adj, adj2)
cnt
end
@inline function hrpass(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode1}
cnt = hrpass(t.inner, comp, y1, y2, o2, cnt, adj * t.y, adj2 * (t.y)^2 + adj * t.h)
cnt
end
@inline function hrpass(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode2}
adj2y1y2 = adj2 * t.y1 * t.y2
adjh12 = adj * t.h12
cnt = hrpass(t.inner1, comp, y1, y2, o2, cnt, adj * t.y1, adj2 * (t.y1)^2 + adj * t.h11)
cnt = hrpass(t.inner2, comp, y1, y2, o2, cnt, adj * t.y2, adj2 * (t.y2)^2 + adj * t.h22)
cnt = hdrpass(t.inner1, t.inner2, comp, y1, y2, o2, cnt, adj2y1y2 + adjh12)
cnt
end
@inline hrpass0(args...) = hrpass(args...)
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {N<:Union{FirstFixed{typeof(*)},SecondFixed{typeof(*)}},D<:SecondAdjointNode1{N}}
cnt = hrpass0(t.inner, comp, y1, y2, o2, cnt, adj * t.y, adj2 * (t.y)^2)
cnt
end
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {N<:Union{FirstFixed{typeof(+)},SecondFixed{typeof(+)}},D<:SecondAdjointNode1{N}}
cnt = hrpass0(t.inner, comp, y1, y2, o2, cnt, adj, adj2)
cnt
end
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode1{FirstFixed{typeof(-)}}}
cnt = hrpass0(t.inner, comp, y1, y2, o2, cnt, -adj, adj2)
cnt
end
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode1{SecondFixed{typeof(-)}}}
cnt = hrpass0(t.inner, comp, y1, y2, o2, cnt, adj, adj2)
cnt
end
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode1{typeof(+)}}
cnt = hrpass0(t.inner, comp, y1, y2, o2, cnt, adj, adj2)
cnt
end
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode1{typeof(-)}}
cnt = hrpass0(t.inner, comp, y1, y2, o2, cnt, -adj, adj2)
cnt
end
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode2{typeof(+)}}
cnt = hrpass0(t.inner1, comp, y1, y2, o2, cnt, adj, adj2)
cnt = hrpass0(t.inner2, comp, y1, y2, o2, cnt, adj, adj2)
cnt
end
@inline function hrpass0(
t::D,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {D<:SecondAdjointNode2{typeof(-)}}
cnt = hrpass0(t.inner1, comp, y1, y2, o2, cnt, adj, adj2)
cnt = hrpass0(t.inner2, comp, y1, y2, o2, cnt, -adj, adj2)
cnt
end
@inline function hrpass0(
t::T,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {T<:SecondAdjointNodeVar}
cnt
end
@inline function hrpass0(
t::T,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {T<:SecondAdjointNodeVar}
cnt
end
function hdrpass(
t1::SecondAdjointNodeVar,
t2::SecondAdjointNodeVar,
comp::Nothing,
y1,
y2,
o2,
cnt,
adj,
)
cnt += 1
push!(y1, (t1.i, t2.i))
cnt
end
function hrpass(t::SecondAdjointNodeVar, comp::Nothing, y1, y2, o2, cnt, adj, adj2)
cnt += 1
push!(y1, (t.i, t.i))
cnt
end
@inline function hrpass(
t::T,
comp,
y1::Tuple{V1,V2},
y2,
o2,
cnt,
adj,
adj2,
) where {T<:SecondAdjointNodeVar,V1<:AbstractVector,V2<:AbstractVector}
y, v = y1
@inbounds y[t.i] += adj2 * v[t.i]
return (cnt += 1)
end
@inline function hrpass(
t::T,
comp,
y1,
y2,
o2,
cnt,
adj,
adj2,
) where {T<:SecondAdjointNodeVar}
@inbounds y1[o2+comp(cnt += 1)] += adj2
cnt
end
@inline function hrpass(
t::T,
comp,
y1::V,
y2::V,
o2,
cnt,
adj,
adj2,
) where {T<:SecondAdjointNodeVar,I<:Integer,V<:AbstractVector{I}}
ind = o2 + comp(cnt += 1)
@inbounds y1[ind] = t.i
@inbounds y2[ind] = t.i
cnt
end
@inline function hrpass(
t::T,
comp,
y1::V,
y2,
o2,
cnt,
adj,
adj2,
) where {T<:SecondAdjointNodeVar,I<:Tuple{Tuple{Int,Int},Int},V<:AbstractVector{I}}
ind = o2 + comp(cnt += 1)
@inbounds y1[ind] = ((t.i, t.i), ind)
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1::V,
y2::V,
o2,
cnt,
adj,
) where {T1<:SecondAdjointNodeVar,T2<:SecondAdjointNodeVar,I<:Integer,V<:AbstractVector{I}}
i, j = t1.i, t2.i
ind = o2 + comp(cnt += 1)
@inbounds if i >= j
y1[ind] = i
y2[ind] = j
else
y1[ind] = j
y2[ind] = i
end
cnt
end
@inline function hdrpass(
t1::T1,
t2::T2,
comp,
y1::V,
y2,
o2,
cnt,
adj,
) where {
T1<:SecondAdjointNodeVar,
T2<:SecondAdjointNodeVar,
I<:Tuple{Tuple{Int,Int},Int},
V<:AbstractVector{I},
}
i, j = t1.i, t2.i
ind = o2 + comp(cnt += 1)
@inbounds if i >= j
y1[ind] = ((i, j), ind)
else
y1[ind] = ((j, i), ind)
end
cnt
end
"""
shessian!(y1, y2, f, x, adj1, adj2)
Performs sparse jacobian evalution
# Arguments:
- `y1`: result vector #1
- `y2`: result vector #2 (only used when evaluating sparsity)
- `f`: the function to be differentiated in `SIMDFunction` format
- `x`: variable vector
- `adj1`: initial first adjoint
- `adj2`: initial second adjoint
"""
function shessian!(y1, y2, f, x, adj1, adj2)
@simd for k in eachindex(f.itr)
@inbounds shessian!(
y1,
y2,
f.f.f,
f.itr[k],
x,
f.f.comp2,
offset2(f, k),
adj1,
adj2,
)
end
end
function shessian!(y1, y2, f, x, adj1s::V, adj2) where {V<:AbstractVector}
@simd for k in eachindex(f.itr)
@inbounds shessian!(
y1,
y2,
f.f.f,
f.itr[k],
x,
f.f.comp2,
offset2(f, k),
adj1s[offset0(f, k)],
adj2,
)
end
end
function shessian!(y1, y2, f, p, x, comp, o2, adj1, adj2)
graph = f(p, SecondAdjointNodeSource(x))
hrpass0(graph, comp, y1, y2, o2, 0, adj1, adj2)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 2943 | """
jrpass(d::D, comp, i, y1, y2, o1, cnt, adj)
Performs sparse jacobian evaluation via the reverse pass on the computation (sub)graph formed by forward pass
# Arguments:
- `d`: first-order computation (sub)graph
- `comp`: a `Compressor`, which helps map counter to sparse vector index
- `i`: constraint index (this is `i`-th constraint)
- `y1`: result vector #1
- `y2`: result vector #2 (only used when evaluating sparsity)
- `o1`: index offset
- `cnt`: counter
- `adj`: adjoint propagated up to the current node
"""
@inline function jrpass(d::AdjointNull, comp, i, y1, y2, o1, cnt, adj)
return cnt
end
@inline function jrpass(d::D, comp, i, y1, y2, o1, cnt, adj) where {D<:AdjointNode1}
cnt = jrpass(d.inner, comp, i, y1, y2, o1, cnt, adj * d.y)
return cnt
end
@inline function jrpass(d::D, comp, i, y1, y2, o1, cnt, adj) where {D<:AdjointNode2}
cnt = jrpass(d.inner1, comp, i, y1, y2, o1, cnt, adj * d.y1)
cnt = jrpass(d.inner2, comp, i, y1, y2, o1, cnt, adj * d.y2)
return cnt
end
@inline function jrpass(d::D, comp, i, y1, y2, o1, cnt, adj) where {D<:AdjointNodeVar}
@inbounds y1[o1+comp(cnt += 1)] += adj
return cnt
end
@inline function jrpass(
d::D,
comp,
i,
y1::Tuple{V1,V2},
y2,
o1,
cnt,
adj,
) where {D<:AdjointNodeVar,V1<:AbstractVector,V2<:AbstractVector}
(y, v) = y1
@inbounds y[i] += adj * v[d.i]
return (cnt += 1)
end
@inline function jrpass(
d::D,
comp,
i,
y1,
y2::Tuple{V1,V2},
o1,
cnt,
adj,
) where {D<:AdjointNodeVar,V1<:AbstractVector,V2<:AbstractVector}
y, v = y2
@inbounds y[d.i] += adj * v[i]
return (cnt += 1)
end
@inline function jrpass(
d::D,
comp,
i,
y1::V,
y2::V,
o1,
cnt,
adj,
) where {D<:AdjointNodeVar,I<:Integer,V<:AbstractVector{I}}
ind = o1 + comp(cnt += 1)
@inbounds y1[ind] = i
@inbounds y2[ind] = d.i
return cnt
end
@inline function jrpass(
d::D,
comp,
i,
y1::V,
y2,
o1,
cnt,
adj,
) where {D<:AdjointNodeVar,I<:Tuple{Tuple{Int,Int},Int},V<:AbstractVector{I}}
ind = o1 + comp(cnt += 1)
@inbounds y1[ind] = ((i, d.i), ind)
return cnt
end
"""
sjacobian!(y1, y2, f, x, adj)
Performs sparse jacobian evalution
# Arguments:
- `y1`: result vector #1
- `y2`: result vector #2 (only used when evaluating sparsity)
- `f`: the function to be differentiated in `SIMDFunction` format
- `x`: variable vector
- `adj`: initial adjoint
"""
function sjacobian!(y1, y2, f, x, adj)
@simd for i in eachindex(f.itr)
@inbounds sjacobian!(
y1,
y2,
f.f.f,
f.itr[i],
x,
f.f.comp1,
offset0(f, i),
offset1(f, i),
adj,
)
end
end
function sjacobian!(y1, y2, f, p, x, comp, o0, o1, adj)
graph = f(p, AdjointNodeSource(x))
jrpass(graph, comp, o0, y1, y2, o1, 0, adj)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 21264 | abstract type AbstractVariable end
abstract type AbstractConstraint end
abstract type AbstractObjective end
struct VariableNull <: AbstractVariable end
struct ObjectiveNull <: AbstractObjective end
struct ConstraintNull <: AbstractConstraint end
struct Variable{S,O} <: AbstractVariable
size::S
length::O
offset::O
end
Base.show(io::IO, v::Variable) = print(
io,
"""
Variable
x β R^{$(join(size(v.size)," Γ "))}
""",
)
struct Objective{R,F,I} <: AbstractObjective
inner::R
f::F
itr::I
end
Base.show(io::IO, v::Objective) = print(
io,
"""
Objective
min (...) + β_{p β P} f(x,p)
where |P| = $(length(v.itr))
""",
)
struct Constraint{R,F,I,O} <: AbstractConstraint
inner::R
f::F
itr::I
offset::O
end
Base.show(io::IO, v::Constraint) = print(
io,
"""
Constraint
s.t. (...)
gβ β€ [g(x,p)]_{p β P} β€ gβ―
where |P| = $(length(v.itr))
""",
)
struct ConstraintAug{R,F,I} <: AbstractConstraint
inner::R
f::F
itr::I
oa::Int
end
Base.show(io::IO, v::ConstraintAug) = print(
io,
"""
Constrant Augmentation
s.t. (...)
gβ β€ (...) + β_{p β P} h(x,p) β€ gβ―
where |P| = $(length(v.itr))
""",
)
"""
ExaCore([array_eltype::Type; backend = backend, minimize = true])
Returns an intermediate data object `ExaCore`, which later can be used for creating `ExaModel`
## Example
```jldoctest
julia> using ExaModels
julia> c = ExaCore()
An ExaCore
Float type: ...................... Float64
Array type: ...................... Vector{Float64}
Backend: ......................... Nothing
number of objective patterns: .... 0
number of constraint patterns: ... 0
julia> c = ExaCore(Float32)
An ExaCore
Float type: ...................... Float32
Array type: ...................... Vector{Float32}
Backend: ......................... Nothing
number of objective patterns: .... 0
number of constraint patterns: ... 0
julia> using CUDA
julia> c = ExaCore(Float32; backend = CUDABackend())
An ExaCore
Float type: ...................... Float32
Array type: ...................... CUDA.CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}
Backend: ......................... CUDA.CUDAKernels.CUDABackend
number of objective patterns: .... 0
number of constraint patterns: ... 0
```
"""
Base.@kwdef mutable struct ExaCore{T,VT<:AbstractVector{T},B}
backend::B = nothing
obj::AbstractObjective = ObjectiveNull()
con::AbstractConstraint = ConstraintNull()
nvar::Int = 0
ncon::Int = 0
nconaug::Int = 0
nobj::Int = 0
nnzc::Int = 0
nnzg::Int = 0
nnzj::Int = 0
nnzh::Int = 0
x0::VT = convert_array(zeros(0), backend)
lvar::VT = similar(x0)
uvar::VT = similar(x0)
y0::VT = similar(x0)
lcon::VT = similar(x0)
ucon::VT = similar(x0)
minimize::Bool = true
end
# Deprecated as of v0.7
function ExaCore(::Type{T}, backend) where {T<:AbstractFloat}
@warn "ExaCore(T, backend) is deprecated. Use ExaCore(T; backend = backend) instead"
return ExaCore(T; backend = backend)
end
function ExaCore(backend)
@warn "ExaCore(backend) is deprecated. Use ExaCore(T; backend = backend) instead"
return ExaCore(; backend = backend)
end
ExaCore(::Type{T}; backend = nothing, kwargs...) where {T<:AbstractFloat} =
ExaCore(x0 = convert_array(zeros(T, 0), backend); backend = backend, kwargs...)
depth(a) = depth(a.inner) + 1
depth(a::ObjectiveNull) = 0
depth(a::ConstraintNull) = 0
Base.show(io::IO, c::ExaCore{T,VT,B}) where {T,VT,B} = print(
io,
"""
An ExaCore
Float type: ...................... $T
Array type: ...................... $VT
Backend: ......................... $B
number of objective patterns: .... $(depth(c.obj))
number of constraint patterns: ... $(depth(c.con))
""",
)
struct ExaModel{T,VT,E,O,C} <: NLPModels.AbstractNLPModel{T,VT}
objs::O
cons::C
meta::NLPModels.NLPModelMeta{T,VT}
counters::NLPModels.Counters
ext::E
end
function Base.show(io::IO, c::ExaModel{T,VT}) where {T,VT}
println(io, "An ExaModel{$T, $VT, ...}\n")
Base.show(io, c.meta)
end
"""
ExaModel(core)
Returns an `ExaModel` object, which can be solved by nonlinear
optimization solvers within `JuliaSmoothOptimizer` ecosystem, such as
`NLPModelsIpopt` or `MadNLP`.
## Example
```jldoctest
julia> using ExaModels
julia> c = ExaCore(); # create an ExaCore object
julia> x = variable(c, 1:10); # create variables
julia> objective(c, x[i]^2 for i in 1:10); # set objective function
julia> m = ExaModel(c) # creat an ExaModel object
An ExaModel{Float64, Vector{Float64}, ...}
Problem name: Generic
All variables: ββββββββββββββββββββ 10 All constraints: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
free: ββββββββββββββββββββ 10 free: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
lower: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0 lower: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
upper: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0 upper: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
low/upp: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0 low/upp: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
fixed: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0 fixed: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
infeas: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0 infeas: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
nnzh: ( 81.82% sparsity) 10 linear: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
nonlinear: β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
β
0
nnzj: (------% sparsity)
julia> using NLPModelsIpopt
julia> result = ipopt(m; print_level=0) # solve the problem
"Execution stats: first-order stationary"
```
"""
function ExaModel(c::C; prod = nothing) where {C<:ExaCore}
return ExaModel(
c.obj,
c.con,
NLPModels.NLPModelMeta(
c.nvar,
ncon = c.ncon,
nnzj = c.nnzj,
nnzh = c.nnzh,
x0 = c.x0,
lvar = c.lvar,
uvar = c.uvar,
y0 = c.y0,
lcon = c.lcon,
ucon = c.ucon,
minimize = c.minimize,
),
NLPModels.Counters(),
nothing,
)
end
@inline function Base.getindex(v::V, i) where {V<:Variable}
_bound_check(v.size, i)
Var(i + (v.offset - _start(v.size[1]) + 1))
end
@inline function Base.getindex(v::V, is...) where {V<:Variable}
@assert(length(is) == length(v.size), "Variable index dimension error")
_bound_check(v.size, is)
Var(v.offset + idxx(is .- (_start.(v.size) .- 1), _length.(v.size)))
end
function _bound_check(sizes, i::I) where {I<:Integer}
__bound_check(sizes[1], i)
end
function _bound_check(sizes, is::NTuple{N,I}) where {I<:Integer,N}
__bound_check(sizes[1], is[1])
_bound_check(sizes[2:end], is[2:end])
end
_bound_check(sizes, is) = nothing
_bound_check(sizes, is::Tuple{}) = nothing
function __bound_check(a::I, b::I) where {I<:Integer}
@assert(1 <= b <= a, "Variable index bound error")
end
function __bound_check(a::UnitRange{Int}, b::I) where {I<:Integer}
@assert(b in a, "Variable index bound error")
end
function append!(backend, a, b::Base.Generator, lb)
b = _adapt_gen(b)
la = length(a)
resize!(a, la + lb)
map!(b.f, view(a, (la+1):(la+lb)), convert_array(b.iter, backend))
return a
end
function append!(backend, a, b::Base.Generator{UnitRange{I}}, lb) where {I}
la = length(a)
resize!(a, la + lb)
map!(b.f, view(a, (la+1):(la+lb)), b.iter)
return a
end
function append!(backend, a, b::AbstractArray, lb)
la = length(a)
resize!(a, la + lb)
map!(identity, view(a, (la+1):(la+lb)), convert_array(b, backend))
return a
end
function append!(backend, a, b::Number, lb)
la = length(a)
resize!(a, la + lb)
fill!(view(a, (la+1):(la+lb)), b)
return a
end
total(ns) = prod(_length(n) for n in ns)
_length(n::Int) = n
_length(n::UnitRange) = length(n)
size(ns) = Tuple(_length(n) for n in ns)
_start(n::Int) = 1
_start(n::UnitRange) = n.start
"""
variable(core, dims...; start = 0, lvar = -Inf, uvar = Inf)
Adds variables with dimensions specified by `dims` to `core`, and returns `Variable` object. `dims` can be either `Integer` or `UnitRange`.
## Keyword Arguments
- `start`: The initial guess of the solution. Can either be `Number`, `AbstractArray`, or `Generator`.
- `lvar` : The variable lower bound. Can either be `Number`, `AbstractArray`, or `Generator`.
- `uvar` : The variable upper bound. Can either be `Number`, `AbstractArray`, or `Generator`.
## Example
```jldoctest
julia> using ExaModels
julia> c = ExaCore();
julia> x = variable(c, 10; start = (sin(i) for i=1:10))
Variable
x β R^{10}
julia> y = variable(c, 2:10, 3:5; lvar = zeros(9,3), uvar = ones(9,3))
Variable
x β R^{9 Γ 3}
```
"""
function variable(
c::C,
ns...;
start = zero(T),
lvar = T(-Inf),
uvar = T(Inf),
) where {T,C<:ExaCore{T}}
o = c.nvar
len = total(ns)
c.nvar += len
c.x0 = append!(c.backend, c.x0, start, total(ns))
c.lvar = append!(c.backend, c.lvar, lvar, total(ns))
c.uvar = append!(c.backend, c.uvar, uvar, total(ns))
return Variable(ns, len, o)
end
function variable(c::C; kwargs...) where {T,C<:ExaCore{T}}
return variable(c, 1; kwargs...)[1]
end
"""
objective(core::ExaCore, generator)
Adds objective terms specified by a `generator` to `core`, and returns an `Objective` object. Note: it is assumed that the terms are summed.
## Example
```jldoctest
julia> using ExaModels
julia> c = ExaCore();
julia> x = variable(c, 10);
julia> objective(c, x[i]^2 for i=1:10)
Objective
min (...) + β_{p β P} f(x,p)
where |P| = 10
```
"""
function objective(c::C, gen) where {C<:ExaCore}
gen = _adapt_gen(gen)
f = SIMDFunction(gen, c.nobj, c.nnzg, c.nnzh)
pars = gen.iter
_objective(c, f, pars)
end
"""
objective(core::ExaCore, expr [, pars])
Adds objective terms specified by a `expr` and `pars` to `core`, and returns an `Objective` object.
"""
function objective(c::C, expr::N, pars = 1:1) where {C<:ExaCore,N<:AbstractNode}
f = _simdfunction(expr, c.nobj, c.nnzg, c.nnzh)
_objective(c, f, pars)
end
function _objective(c, f, pars)
nitr = length(pars)
c.nobj += nitr
c.nnzg += nitr * f.o1step
c.nnzh += nitr * f.o2step
c.obj = Objective(c.obj, f, convert_array(pars, c.backend))
end
"""
constraint(core, generator; start = 0, lcon = 0, ucon = 0)
Adds constraints specified by a `generator` to `core`, and returns an `Constraint` object.
## Keyword Arguments
- `start`: The initial guess of the solution. Can either be `Number`, `AbstractArray`, or `Generator`.
- `lcon` : The constraint lower bound. Can either be `Number`, `AbstractArray`, or `Generator`.
- `ucon` : The constraint upper bound. Can either be `Number`, `AbstractArray`, or `Generator`.
## Example
```jldoctest
julia> using ExaModels
julia> c = ExaCore();
julia> x = variable(c, 10);
julia> constraint(c, x[i] + x[i+1] for i=1:9; lcon = -1, ucon = (1+i for i=1:9))
Constraint
s.t. (...)
gβ β€ [g(x,p)]_{p β P} β€ gβ―
where |P| = 9
```
"""
function constraint(
c::C,
gen::Base.Generator;
start = zero(T),
lcon = zero(T),
ucon = zero(T),
) where {T,C<:ExaCore{T}}
gen = _adapt_gen(gen)
f = SIMDFunction(gen, c.ncon, c.nnzj, c.nnzh)
pars = gen.iter
_constraint(c, f, pars, start, lcon, ucon)
end
"""
constraint(core, expr [, pars]; start = 0, lcon = 0, ucon = 0)
Adds constraints specified by a `expr` and `pars` to `core`, and returns an `Constraint` object.
"""
function constraint(
c::C,
expr::N,
pars = 1:1;
start = zero(T),
lcon = zero(T),
ucon = zero(T),
) where {T,C<:ExaCore{T},N<:AbstractNode}
f = _simdfunction(expr, c.ncon, c.nnzj, c.nnzh)
_constraint(c, f, pars, start, lcon, ucon)
end
"""
constraint(core, n; start = 0, lcon = 0, ucon = 0)
Adds empty constraints of dimension n, so that later the terms can be added with `constraint!`.
"""
function constraint(
c::C,
n;
start = zero(T),
lcon = zero(T),
ucon = zero(T),
) where {T,C<:ExaCore{T}}
f = _simdfunction(Null(), c.ncon, c.nnzj, c.nnzh)
_constraint(c, f, 1:n, start, lcon, ucon)
end
function _constraint(c, f, pars, start, lcon, ucon)
nitr = length(pars)
o = c.ncon
c.ncon += nitr
c.nnzj += nitr * f.o1step
c.nnzh += nitr * f.o2step
c.y0 = append!(c.backend, c.y0, start, nitr)
c.lcon = append!(c.backend, c.lcon, lcon, nitr)
c.ucon = append!(c.backend, c.ucon, ucon, nitr)
c.con = Constraint(c.con, f, convert_array(pars, c.backend), o)
end
function constraint!(c::C, c1, gen::Base.Generator) where {C<:ExaCore}
gen = _adapt_gen(gen)
f = SIMDFunction(gen, offset0(c1, 0), c.nnzj, c.nnzh)
pars = gen.iter
_constraint!(c, f, pars)
end
function constraint!(c::C, c1, expr, pars) where {C<:ExaCore}
f = _simdfunction(expr, offset0(c1, 0), c.nnzj, c.nnzh)
_constraint!(c, f, pars)
end
function _constraint!(c, f, pars)
oa = c.nconaug
nitr = length(pars)
c.nconaug += nitr
c.nnzj += nitr * f.o1step
c.nnzh += nitr * f.o2step
c.con = ConstraintAug(c.con, f, convert_array(pars, c.backend), oa)
end
function jac_structure!(m::ExaModel, rows::AbstractVector, cols::AbstractVector)
_jac_structure!(m.cons, rows, cols)
end
_jac_structure!(cons::ConstraintNull, rows, cols) = nothing
function _jac_structure!(cons, rows, cols)
_jac_structure!(cons.inner, rows, cols)
sjacobian!(rows, cols, cons, nothing, NaN)
end
function hess_structure!(m::ExaModel, rows::AbstractVector, cols::AbstractVector)
_obj_hess_structure!(m.objs, rows, cols)
_con_hess_structure!(m.cons, rows, cols)
end
_obj_hess_structure!(objs::ObjectiveNull, rows, cols) = nothing
function _obj_hess_structure!(objs, rows, cols)
_obj_hess_structure!(objs.inner, rows, cols)
shessian!(rows, cols, objs, nothing, NaN, NaN)
end
_con_hess_structure!(cons::ConstraintNull, rows, cols) = nothing
function _con_hess_structure!(cons, rows, cols)
_con_hess_structure!(cons.inner, rows, cols)
shessian!(rows, cols, cons, nothing, NaN, NaN)
end
function obj(m::ExaModel, x::AbstractVector)
_obj(m.objs, x)
end
_obj(objs, x) = _obj(objs.inner, x) + sum(objs.f.f(k, x) for k in objs.itr)
_obj(objs::ObjectiveNull, x) = zero(eltype(x))
function cons_nln!(m::ExaModel, x::AbstractVector, g::AbstractVector)
fill!(g, zero(eltype(g)))
_cons_nln!(m.cons, x, g)
end
function _cons_nln!(cons, x, g)
_cons_nln!(cons.inner, x, g)
@simd for i in eachindex(cons.itr)
g[offset0(cons, i)] += cons.f.f(cons.itr[i], x)
end
end
_cons_nln!(cons::ConstraintNull, x, g) = nothing
function grad!(m::ExaModel, x::AbstractVector, f::AbstractVector)
fill!(f, zero(eltype(f)))
_grad!(m.objs, x, f)
return f
end
function _grad!(objs, x, f)
_grad!(objs.inner, x, f)
gradient!(f, objs, x, one(eltype(f)))
end
_grad!(objs::ObjectiveNull, x, f) = nothing
function jac_coord!(m::ExaModel, x::AbstractVector, jac::AbstractVector)
fill!(jac, zero(eltype(jac)))
_jac_coord!(m.cons, x, jac)
end
_jac_coord!(cons::ConstraintNull, x, jac) = nothing
function _jac_coord!(cons, x, jac)
_jac_coord!(cons.inner, x, jac)
sjacobian!(jac, nothing, cons, x, one(eltype(jac)))
end
function jprod_nln!(m::ExaModel, x::AbstractVector, v::AbstractVector, Jv::AbstractVector)
fill!(Jv, zero(eltype(Jv)))
_jprod_nln!(m.cons, x, v, Jv)
end
_jprod_nln!(cons::ConstraintNull, x, v, Jv) = nothing
function _jprod_nln!(cons, x, v, Jv)
_jprod_nln!(cons.inner, x, v, Jv)
sjacobian!((Jv, v), nothing, cons, x, one(eltype(Jv)))
end
function jtprod_nln!(m::ExaModel, x::AbstractVector, v::AbstractVector, Jtv::AbstractVector)
fill!(Jtv, zero(eltype(Jtv)))
_jtprod_nln!(m.cons, x, v, Jtv)
end
_jtprod_nln!(cons::ConstraintNull, x, v, Jtv) = nothing
function _jtprod_nln!(cons, x, v, Jtv)
_jtprod_nln!(cons.inner, x, v, Jtv)
sjacobian!(nothing, (Jtv, v), cons, x, one(eltype(Jtv)))
end
function hess_coord!(
m::ExaModel,
x::AbstractVector,
y::AbstractVector,
hess::AbstractVector;
obj_weight = one(eltype(x)),
)
fill!(hess, zero(eltype(hess)))
_obj_hess_coord!(m.objs, x, y, hess, obj_weight)
_con_hess_coord!(m.cons, x, y, hess, obj_weight)
return hess
end
_obj_hess_coord!(objs::ObjectiveNull, x, y, hess, obj_weight) = nothing
function _obj_hess_coord!(objs, x, y, hess, obj_weight)
_obj_hess_coord!(objs.inner, x, y, hess, obj_weight)
shessian!(hess, nothing, objs, x, obj_weight, zero(eltype(hess)))
end
_con_hess_coord!(cons::ConstraintNull, x, y, hess, obj_weight) = nothing
function _con_hess_coord!(cons, x, y, hess, obj_weight)
_con_hess_coord!(cons.inner, x, y, hess, obj_weight)
shessian!(hess, nothing, cons, x, y, zero(eltype(hess)))
end
function hprod!(
m::ExaModel,
x::AbstractVector,
y::AbstractVector,
v::AbstractVector,
Hv::AbstractVector;
obj_weight = one(eltype(x)),
)
fill!(Hv, zero(eltype(Hv)))
_obj_hprod!(m.objs, x, y, v, Hv, obj_weight)
_con_hprod!(m.cons, x, y, v, Hv, obj_weight)
return Hv
end
_obj_hprod!(objs::ObjectiveNull, x, y, v, Hv, obj_weight) = nothing
function _obj_hprod!(objs, x, y, v, Hv, obj_weight)
_obj_hprod!(objs.inner, x, y, v, Hv, obj_weight)
shessian!((Hv, v), nothing, objs, x, obj_weight, zero(eltype(Hv)))
end
_con_hprod!(cons::ConstraintNull, x, y, v, Hv, obj_weight) = nothing
function _con_hprod!(cons, x, y, v, Hv, obj_weight)
_con_hprod!(cons.inner, x, y, v, Hv, obj_weight)
shessian!((Hv, v), nothing, cons, x, y, zero(eltype(Hv)))
end
@inbounds @inline offset0(a, i) = offset0(a.f, i)
@inbounds @inline offset1(a, i) = offset1(a.f, i)
@inbounds @inline offset2(a, i) = offset2(a.f, i)
@inbounds @inline offset0(f, itr, i) = offset0(f, i)
@inbounds @inline offset0(f::F, i) where {F<:SIMDFunction} = f.o0 + i
@inbounds @inline offset1(f::F, i) where {F<:SIMDFunction} = f.o1 + f.o1step * (i - 1)
@inbounds @inline offset2(f::F, i) where {F<:SIMDFunction} = f.o2 + f.o2step * (i - 1)
@inbounds @inline offset0(a::C, i) where {C<:ConstraintAug} = offset0(a.f, a.itr, i)
@inbounds @inline offset0(f::F, itr, i) where {P<:Pair,F<:SIMDFunction{P}} =
f.o0 + f.f.first(itr[i], nothing)
@inbounds @inline offset0(f::F, itr, i) where {T<:Tuple,P<:Pair{T},F<:SIMDFunction{P}} =
f.o0 + idxx(coord(itr, i, f.f.first), Base.size(itr))
@inline idx(itr, I) = @inbounds itr[I]
@inline idx(itr::Base.Iterators.ProductIterator{V}, I) where {V} =
_idx(I - 1, itr.iterators, Base.size(itr))
@inline function _idx(n, (vec1, vec...), (si1, si...))
d, r = divrem(n, si1)
return (vec1[r+1], _idx(d, vec, si)...)
end
@inline _idx(n, (vec,), ::Tuple{Int}) = @inbounds vec[n+1]
@inline idxx(coord, si) = _idxx(coord, si, 1) + 1
@inline _idxx(coord, si, a) = a * (coord[1] - 1) + _idxx(coord[2:end], si[2:end], a * si[1])
@inline _idxx(::Tuple{}, ::Tuple{}, a) = 0
@inline coord(itr, i, (f, fs...)) = (f(idx(itr, i), nothing), coord(itr, i, fs)...)
@inline coord(itr, i, ::Tuple{}) = ()
for (thing, val) in [(:solution, 1), (:multipliers_L, 0), (:multipliers_U, 2)]
@eval begin
"""
$(string($thing))(result, x)
Returns the $(string($thing)) for variable `x` associated with `result`, obtained by solving the model.
## Example
```jldoctest
julia> using ExaModels, NLPModelsIpopt
julia> c = ExaCore();
julia> x = variable(c, 1:10, lvar = -1, uvar = 1);
julia> objective(c, (x[i]-2)^2 for i in 1:10);
julia> m = ExaModel(c);
julia> result = ipopt(m; print_level=0);
julia> val = $(string($thing))(result, x);
julia> isapprox(val, fill($(string($val)), 10), atol=sqrt(eps(Float64)), rtol=Inf)
true
```
"""
function $thing(result::SolverCore.AbstractExecutionStats, x)
o = x.offset
len = total(x.size)
s = size(x.size)
return reshape(view(result.$thing, o+1:o+len), s...)
end
end
end
solution(result::SolverCore.AbstractExecutionStats, x::Var{I}) where {I} =
return result.solution[x.i]
"""
multipliers(result, y)
Returns the multipliers for constraints `y` associated with `result`, obtained by solving the model.
## Example
```jldoctest
julia> using ExaModels, NLPModelsIpopt
julia> c = ExaCore();
julia> x = variable(c, 1:10, lvar = -1, uvar = 1);
julia> objective(c, (x[i]-2)^2 for i in 1:10);
julia> y = constraint(c, x[i] + x[i+1] for i=1:9; lcon = -1, ucon = (1+i for i=1:9));
julia> m = ExaModel(c);
julia> result = ipopt(m; print_level=0);
julia> val = multipliers(result, y);
julia> val[1] β 0.81933930
true
```
"""
function multipliers(result::SolverCore.AbstractExecutionStats, y::Constraint)
o = y.offset
len = length(y.itr)
return view(result.multipliers, o+1:o+len)
end
_adapt_gen(gen) = Base.Generator(gen.f, collect(gen.iter))
_adapt_gen(gen::Base.Generator{P}) where {P<:Union{AbstractArray,AbstractRange}} = gen
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 6512 | """
@register_univariate(f, df, ddf)
Register a univariate function `f` to `ExaModels`, so that it can be used within objective and constraint expressions
# Arguments:
- `f`: function
- `df`: derivative function
- `ddf`: second-order derivative funciton
## Example
```jldoctest
julia> using ExaModels
julia> relu3(x) = x > 0 ? x^3 : zero(x)
relu3 (generic function with 1 method)
julia> drelu3(x) = x > 0 ? 3*x^2 : zero(x)
drelu3 (generic function with 1 method)
julia> ddrelu3(x) = x > 0 ? 6*x : zero(x)
ddrelu3 (generic function with 1 method)
julia> @register_univariate(relu3, drelu3, ddrelu3)
```
"""
macro register_univariate(f, df, ddf)
return esc(
quote
if !hasmethod($f, Tuple{ExaModels.AbstractNode})
@inline $f(n::N) where {N<:ExaModels.AbstractNode} = ExaModels.Node1($f, n)
end
@inline $f(d::D) where {D<:ExaModels.AbstractAdjointNode} =
ExaModels.AdjointNode1($f, $f(d.x), $df(d.x), d)
@inline $f(t::T) where {T<:ExaModels.AbstractSecondAdjointNode} =
ExaModels.SecondAdjointNode1($f, $f(t.x), $df(t.x), $ddf(t.x), t)
@inline (n::ExaModels.Node1{typeof($f),I})(i, x) where {I} = $f(n.inner(i, x))
end,
)
end
"""
register_bivariate(f, df1, df2, ddf11, ddf12, ddf22)
Register a bivariate function `f` to `ExaModels`, so that it can be used within objective and constraint expressions
# Arguments:
- `f`: function
- `df1`: derivative function (w.r.t. first argument)
- `df2`: derivative function (w.r.t. second argument)
- `ddf11`: second-order derivative funciton (w.r.t. first argument)
- `ddf12`: second-order derivative funciton (w.r.t. first and second argument)
- `ddf22`: second-order derivative funciton (w.r.t. second argument)
## Example
```jldoctest
julia> using ExaModels
julia> relu23(x) = (x > 0 || y > 0) ? (x + y)^3 : zero(x)
relu23 (generic function with 1 method)
julia> drelu231(x) = (x > 0 || y > 0) ? 3 * (x + y)^2 : zero(x)
drelu231 (generic function with 1 method)
julia> drelu232(x) = (x > 0 || y > 0) ? 3 * (x + y)^2 : zero(x)
drelu232 (generic function with 1 method)
julia> ddrelu2311(x) = (x > 0 || y > 0) ? 6 * (x + y) : zero(x)
ddrelu2311 (generic function with 1 method)
julia> ddrelu2312(x) = (x > 0 || y > 0) ? 6 * (x + y) : zero(x)
ddrelu2312 (generic function with 1 method)
julia> ddrelu2322(x) = (x > 0 || y > 0) ? 6 * (x + y) : zero(x)
ddrelu2322 (generic function with 1 method)
julia> @register_bivariate(relu23, drelu231, drelu232, ddrelu2311, ddrelu2312, ddrelu2322)
```
"""
macro register_bivariate(f, df1, df2, ddf11, ddf12, ddf22)
return esc(
quote
if !hasmethod($f, Tuple{ExaModels.AbstractNode,ExaModels.AbstractNode})
@inline function $f(
d1::D1,
d2::D2,
) where {D1<:ExaModels.AbstractNode,D2<:ExaModels.AbstractNode}
ExaModels.Node2($f, d1, d2)
end
end
if !hasmethod($f, Tuple{ExaModels.AbstractNode,Real})
@inline function $f(
d1::D1,
d2::D2,
) where {D1<:ExaModels.AbstractNode,D2<:Real}
ExaModels.Node2($f, d1, d2)
end
end
if !hasmethod($f, Tuple{Real,ExaModels.AbstractNode})
@inline function $f(
d1::D1,
d2::D2,
) where {D1<:Real,D2<:ExaModels.AbstractNode}
ExaModels.Node2($f, d1, d2)
end
end
@inline function $f(
d1::D1,
d2::D2,
) where {D1<:ExaModels.AbstractAdjointNode,D2<:ExaModels.AbstractAdjointNode}
x1 = d1.x
x2 = d2.x
ExaModels.AdjointNode2($f, $f(x1, x2), $df1(x1, x2), $df2(x1, x2), d1, d2)
end
@inline function $f(
d1::D1,
d2::D2,
) where {D1<:ExaModels.AbstractAdjointNode,D2<:Real}
x1 = d1.x
x2 = d2
ExaModels.AdjointNode1($f, $f(x1, x2), $df1(x1, x2), d1)
end
@inline function $f(
d1::D1,
d2::D2,
) where {D1<:Real,D2<:ExaModels.AbstractAdjointNode}
x1 = d1
x2 = d2.x
ExaModels.AdjointNode1($f, $f(x1, x2), $df2(x1, x2), d2)
end
@inline function $f(
t1::T1,
t2::T2,
) where {
T1<:ExaModels.AbstractSecondAdjointNode,
T2<:ExaModels.AbstractSecondAdjointNode,
}
x1 = t1.x
x2 = t2.x
ExaModels.SecondAdjointNode2(
$f,
$f(x1, x2),
$df1(x1, x2),
$df2(x1, x2),
$ddf11(x1, x2),
$ddf12(x1, x2),
$ddf22(x1, x2),
t1,
t2,
)
end
@inline function $f(
t1::T1,
t2::T2,
) where {T1<:ExaModels.AbstractSecondAdjointNode,T2<:Real}
x1 = t1.x
x2 = t2
ExaModels.SecondAdjointNode1(
ExaModels.SecondFixed($f),
$f(x1, x2),
$df1(x1, x2),
$ddf11(x1, x2),
t1,
)
end
@inline function $f(
t1::T1,
t2::T2,
) where {T1<:Real,T2<:ExaModels.AbstractSecondAdjointNode}
x1 = t1
x2 = t2.x
ExaModels.SecondAdjointNode1(
ExaModels.FirstFixed($f),
$f(x1, x2),
$df2(x1, x2),
$ddf22(x1, x2),
t2,
)
end
@inline (n::ExaModels.Node2{typeof($f),I1,I2})(i, x) where {I1,I2} =
$f(n.inner1(i, x), n.inner2(i, x))
@inline (n::ExaModels.Node2{typeof($f),I1,I2})(i, x) where {I1<:Real,I2} =
$f(n.inner1, n.inner2(i, x))
@inline (n::ExaModels.Node2{typeof($f),I1,I2})(i, x) where {I1,I2<:Real} =
$f(n.inner1(i, x), n.inner2)
end,
)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1499 | @inline (a::Pair{P,S} where {P,S<:AbstractNode})(i, x) = a.second(i, x)
"""
Compressor{I}
Data structure for the sparse index
# Fields:
- `inner::I`: stores the sparse index as a tuple form
"""
struct Compressor{I}
inner::I
end
@inline (i::Compressor{I})(n) where {I} = @inbounds i.inner[n]
struct SIMDFunction{F,C1,C2}
f::F
comp1::C1
comp2::C2
o0::Int
o1::Int
o2::Int
o1step::Int
o2step::Int
end
"""
SIMDFunction(gen::Base.Generator, o0 = 0, o1 = 0, o2 = 0)
Returns a `SIMDFunction` using the `gen`.
# Arguments:
- `gen`: an iterable function specified in `Base.Generator` format
- `o0`: offset for the function evaluation
- `o1`: offset for the derivative evalution
- `o2`: offset for the second-order derivative evalution
"""
function SIMDFunction(gen::Base.Generator, o0 = 0, o1 = 0, o2 = 0)
f = gen.f(Par(eltype(gen.iter)))
_simdfunction(f, o0, o1, o2)
end
function _simdfunction(f, o0, o1, o2)
d = f(Identity(), AdjointNodeSource(nothing))
y1 = []
ExaModels.grpass(d, nothing, y1, nothing, 0, NaN)
t = f(Identity(), SecondAdjointNodeSource(nothing))
y2 = []
ExaModels.hrpass0(t, nothing, y2, nothing, nothing, 0, NaN, NaN)
a1 = unique(y1)
o1step = length(a1)
c1 = Compressor(Tuple(findfirst(isequal(i), a1) for i in y1))
a2 = unique(y2)
o2step = length(a2)
c2 = Compressor(Tuple(findfirst(isequal(i), a2) for i in y2))
SIMDFunction(f, c1, c2, o0, o1, o2, o1step, o2step)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 935 | @inline function Base.:^(d1::D1, d2::D2) where {D1<:AbstractNode,D2<:Integer}
if d2 == 1
return d1
elseif d2 == 2
return Node1(abs2, d1)
else
return Node2(^, d1, d2)
end
end
# identity operators
for (op, id, typ) in [
(:(Base.:+), 0, :Real),
(:(Base.:-), 0, :Real),
(:(Base.:*), 1, :Real),
(:(Base.:/), 1, :Real),
]
@eval begin
@inline function $op(d1::D1, d2::D2) where {D1<:AbstractNode,D2<:$typ}
if d2 == $id
return d1
else
return Node2($op, d1, d2)
end
end
end
end
for (op, id, typ) in [(:(Base.:+), 0, :Real), (:(Base.:*), 1, :Real)]
@eval begin
@inline function $op(d1::D1, d2::D2) where {D1<:$typ,D2<:AbstractNode}
if d1 == $id
return d2
else
return Node2($op, d1, d2)
end
end
end
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 440 | # A template for convert_array. This is extended in extension packages for each device architecture.
convert_array(v, ::Nothing) = v
# template to avoid oneAPI sum issue
sum(a) = Base.sum(a)
# to avoid type privacy
sort!(array; kwargs...) = Base.sort!(array; kwargs...)
# MOI
function Optimizer end
function IpoptOptimizer end
function MadNLPOptimizer end
function result_status_translator end
function termination_status_translator end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 13131 | # WrapperNLPModel serves as a wrapper for ExaNLPModel, or even any NLPModels.
# This is useful when you want to use a solver that does not support non-stardard array data types.
# TODO: make this as an independent package
struct WrapperNLPModel{
T,
VT,
T2,
VT2<:AbstractVector{T2},
VI,
I<:NLPModels.AbstractNLPModel{T2,VT2},
} <: NLPModels.AbstractNLPModel{T,VT}
inner::I
x_result::VT
x_result2::VT
y_result::VT
x_buffer::VT2
y_buffer::VT2
v_buffer::VT2
cons_buffer::VT2
grad_buffer::VT2
jac_buffer::VT2
jac_I_buffer::VI
jac_J_buffer::VI
hess_buffer::VT2
hess_I_buffer::VI
hess_J_buffer::VI
meta::NLPModels.AbstractNLPModelMeta{T,VT}
counters::NLPModels.Counters
end
"""
WrapperNLPModel(m)
Returns a `WrapperModel{Float64,Vector{64}}` wrapping `m`
"""
WrapperNLPModel(m) = WrapperNLPModel(Vector{Float64}, m)
"""
WrapperNLPModel(VT, m)
Returns a `WrapperModel{T,VT}` wrapping `m <: AbstractNLPModel{T}`
"""
function WrapperNLPModel(VT, m)
nvar = NLPModels.get_nvar(m)
ncon = NLPModels.get_ncon(m)
nnzj = NLPModels.get_nnzj(m)
nnzh = NLPModels.get_nnzh(m)
x_result = VT(undef, nvar)
x_result2 = VT(undef, nvar)
y_result = VT(undef, ncon)
x0 = VT(undef, nvar)
lvar = VT(undef, nvar)
uvar = VT(undef, nvar)
y0 = VT(undef, ncon)
lcon = VT(undef, ncon)
ucon = VT(undef, ncon)
copyto!(x0, m.meta.x0)
copyto!(lvar, m.meta.lvar)
copyto!(uvar, m.meta.uvar)
copyto!(y0, m.meta.y0)
copyto!(lcon, m.meta.lcon)
copyto!(ucon, m.meta.ucon)
x_buffer = similar(m.meta.x0, nvar)
y_buffer = similar(m.meta.x0, ncon)
v_buffer = similar(m.meta.x0, nvar)
cons_buffer = similar(m.meta.x0, ncon)
grad_buffer = similar(m.meta.x0, nvar)
jac_buffer = similar(m.meta.x0, nnzj)
jac_I_buffer = similar(m.meta.x0, Int, nnzj)
jac_J_buffer = similar(m.meta.x0, Int, nnzj)
hess_buffer = similar(m.meta.x0, nnzh)
hess_I_buffer = similar(m.meta.x0, Int, nnzh)
hess_J_buffer = similar(m.meta.x0, Int, nnzh)
return WrapperNLPModel(
m,
x_result,
x_result2,
y_result,
x_buffer,
y_buffer,
v_buffer,
cons_buffer,
grad_buffer,
jac_buffer,
jac_I_buffer,
jac_J_buffer,
hess_buffer,
hess_I_buffer,
hess_J_buffer,
NLPModels.NLPModelMeta(
nvar,
x0 = x0,
lvar = lvar,
uvar = uvar,
ncon = ncon,
y0 = y0,
lcon = lcon,
ucon = ucon,
nnzj = nnzj,
nnzh = nnzh,
minimize = m.meta.minimize,
),
NLPModels.Counters(),
)
end
function NLPModels.jac_structure!(
m::WrapperNLPModel,
rows::AbstractVector,
cols::AbstractVector,
)
NLPModels.jac_structure!(m.inner, m.jac_I_buffer, m.jac_J_buffer)
copyto!(rows, m.jac_I_buffer)
copyto!(cols, m.jac_J_buffer)
end
function NLPModels.hess_structure!(
m::WrapperNLPModel,
rows::AbstractVector,
cols::AbstractVector,
)
NLPModels.hess_structure!(m.inner, m.hess_I_buffer, m.hess_J_buffer)
copyto!(rows, m.hess_I_buffer)
copyto!(cols, m.hess_J_buffer)
end
function NLPModels.obj(m::WrapperNLPModel, x::AbstractVector)
copyto!(m.x_result, x)
copyto!(m.x_buffer, m.x_result)
o = NLPModels.obj(m.inner, m.x_buffer)
return o
end
function NLPModels.cons_nln!(m::WrapperNLPModel, x::AbstractVector, g::AbstractVector)
copyto!(m.x_result, x)
copyto!(m.x_buffer, m.x_result)
NLPModels.cons_nln!(m.inner, m.x_buffer, m.cons_buffer)
copyto!(m.y_result, m.cons_buffer)
copyto!(g, m.y_result)
return
end
function NLPModels.grad!(m::WrapperNLPModel, x::AbstractVector, f::AbstractVector)
copyto!(m.x_result, x)
copyto!(m.x_buffer, m.x_result)
NLPModels.grad!(m.inner, m.x_buffer, m.grad_buffer)
copyto!(m.x_result, m.grad_buffer)
copyto!(f, m.x_result)
return f
end
function NLPModels.jac_coord!(m::WrapperNLPModel, x::AbstractVector, jac::AbstractVector)
copyto!(m.x_result, x)
copyto!(m.x_buffer, m.x_result)
NLPModels.jac_coord!(m.inner, m.x_buffer, m.jac_buffer)
copyto!(jac, m.jac_buffer)
return
end
function NLPModels.hess_coord!(
m::WrapperNLPModel,
x::AbstractVector,
y::AbstractVector,
hess::AbstractVector;
obj_weight = one(eltype(x)),
)
copyto!(m.x_buffer, x)
copyto!(m.y_buffer, y)
NLPModels.hess_coord!(
m.inner,
m.x_buffer,
m.y_buffer,
m.hess_buffer;
obj_weight = obj_weight,
)
copyto!(unsafe_wrap(Array, pointer(hess), length(hess)), m.hess_buffer)
return
end
function buffered_copyto!(a, b, c)
copyto!(b, c)
copyto!(a, b)
end
function NLPModels.jprod_nln!(
m::WrapperNLPModel,
x::AbstractVector,
v::AbstractVector,
Jv::AbstractVector,
)
buffered_copyto!(m.x_buffer, m.x_result, x)
buffered_copyto!(m.grad_buffer, m.x_result2, v)
NLPModels.jprod_nln!(m.inner, m.x_buffer, m.grad_buffer, m.cons_buffer)
buffered_copyto!(Jv, m.y_result, m.cons_buffer)
return
end
function NLPModels.jtprod_nln!(
m::WrapperNLPModel,
x::AbstractVector,
v::AbstractVector,
Jtv::AbstractVector,
)
buffered_copyto!(m.x_buffer, m.x_result, x)
buffered_copyto!(m.cons_buffer, m.y_result, v)
NLPModels.jtprod_nln!(m.inner, m.x_buffer, m.cons_buffer, m.grad_buffer)
buffered_copyto!(Jtv, m.x_result, m.grad_buffer)
return
end
function NLPModels.hprod!(
m::WrapperNLPModel,
x::AbstractVector,
y::AbstractVector,
v::AbstractVector,
Hv::AbstractVector;
obj_weight = one(eltype(x)),
)
buffered_copyto!(m.x_buffer, m.x_result, x)
buffered_copyto!(m.y_buffer, m.y_result, y)
buffered_copyto!(m.grad_buffer, m.x_result2, v)
NLPModels.hprod!(
m.inner,
m.x_buffer,
m.y_buffer,
m.grad_buffer,
m.v_buffer;
obj_weight = obj_weight,
)
buffered_copyto!(Hv, m.x_result, m.v_buffer)
return Hv
end
# TimedNLPModels
Base.@kwdef mutable struct CallbackStats
obj_cnt::Int = 0
cons_cnt::Int = 0
grad_cnt::Int = 0
jac_coord_cnt::Int = 0
hess_coord_cnt::Int = 0
jac_structure_cnt::Int = 0
hess_structure_cnt::Int = 0
obj_time::Float64 = 0.0
cons_time::Float64 = 0.0
grad_time::Float64 = 0.0
jac_coord_time::Float64 = 0.0
hess_coord_time::Float64 = 0.0
jac_structure_time::Float64 = 0.0
hess_structure_time::Float64 = 0.0
end
struct TimedNLPModel{T,VT,I<:NLPModels.AbstractNLPModel{T,VT}} <:
NLPModels.AbstractNLPModel{T,VT}
inner::I
meta::NLPModels.AbstractNLPModelMeta{T,VT}
stats::CallbackStats
counters::NLPModels.Counters
end
function TimedNLPModel(m)
return TimedNLPModel(m, m.meta, CallbackStats(), NLPModels.Counters())
end
function TimedNLPModel(c::ExaModels.ExaCore; kwargs...)
m = ExaModels.Model(c; kwargs...)
return TimedNLPModel(m)
end
function NLPModels.jac_structure!(
m::M,
rows::V,
cols::V,
) where {M<:TimedNLPModel,V<:AbstractVector}
m.stats.jac_structure_cnt += 1
t = time()
NLPModels.jac_structure!(m.inner, rows, cols)
m.stats.jac_structure_time += time() - t
end
function NLPModels.hess_structure!(
m::M,
rows::V,
cols::V,
) where {M<:TimedNLPModel,V<:AbstractVector}
m.stats.hess_structure_cnt += 1
t = time()
NLPModels.hess_structure!(m.inner, rows, cols)
m.stats.hess_structure_time += time() - t
end
function NLPModels.obj(m::TimedNLPModel, x::AbstractVector)
m.stats.obj_cnt += 1
t = time()
o = NLPModels.obj(m.inner, x)
m.stats.obj_time += time() - t
return o
end
function NLPModels.cons!(m::TimedNLPModel, x::AbstractVector, g::AbstractVector)
m.stats.cons_cnt += 1
t = time()
NLPModels.cons!(m.inner, x, g)
m.stats.cons_time += time() - t
return
end
function NLPModels.grad!(m::TimedNLPModel, x::AbstractVector, f::AbstractVector)
m.stats.grad_cnt += 1
t = time()
NLPModels.grad!(m.inner, x, f)
m.stats.grad_time += time() - t
return
end
function NLPModels.jac_coord!(m::TimedNLPModel, x::AbstractVector, jac::AbstractVector)
m.stats.jac_coord_cnt += 1
t = time()
NLPModels.jac_coord!(m.inner, x, jac)
m.stats.jac_coord_time += time() - t
return
end
function NLPModels.hess_coord!(
m::TimedNLPModel,
x::AbstractVector,
y::AbstractVector,
hess::AbstractVector;
obj_weight = one(eltype(x)),
)
m.stats.hess_coord_cnt += 1
t = time()
NLPModels.hess_coord!(m.inner, x, y, hess; obj_weight = obj_weight)
m.stats.hess_coord_time += time() - t
return
end
function Base.print(io::IO, e::TimedNLPModel)
tot = 0.0
for f in fieldnames(CallbackStats)
if endswith(string(f), "cnt")
Printf.@printf "%20s: %13i times\n" f getfield(e.stats, f)
else
t = getfield(e.stats, f)
Printf.@printf "%20s: %13.6f secs\n" f t
tot += t
end
end
println("------------------------------------------")
Printf.@printf " total AD time: %13.6f secs\n" tot
end
Base.show(io::IO, ::MIME"text/plain", e::TimedNLPModel) = Base.print(io, e);
# CompressedNLPModels
struct CompressedNLPModel{
T,
VT<:AbstractVector{T},
B,
VI<:AbstractVector{Int},
VI2<:AbstractVector{Tuple{Tuple{Int,Int},Int}},
M<:NLPModels.AbstractNLPModel{T,VT},
} <: NLPModels.AbstractNLPModel{T,VT}
inner::M
jptr::VI
jsparsity::VI2
hptr::VI
hsparsity::VI2
buffer::VT
backend::B
meta::NLPModels.NLPModelMeta{T,VT}
counters::NLPModels.Counters
end
function getptr(backend::Nothing, array; cmp = (x, y) -> x != y)
return push!(
pushfirst!(findall(cmp.(@view(array[1:end-1]), @view(array[2:end]))) .+= 1, 1),
length(array) + 1,
)
end
function CompressedNLPModel(m)
nnzj = NLPModels.get_nnzj(m)
nnzh = NLPModels.get_nnzh(m)
Ibuffer = similar(m.meta.x0, Int, max(nnzj, nnzh))
Jbuffer = similar(m.meta.x0, Int, max(nnzj, nnzh))
buffer = similar(m.meta.x0, max(nnzj, nnzh))
NLPModels.jac_structure!(m, Ibuffer, Jbuffer)
backend = getbackend(m)
jsparsity = get_compressed_sparsity(nnzj, Ibuffer, Jbuffer, backend)
sort!(jsparsity; lt = (a, b) -> a[1] < b[1])
jptr = getptr(backend, jsparsity; cmp = (a, b) -> first(a) != first(b))
NLPModels.hess_structure!(m, Ibuffer, Jbuffer)
hsparsity = get_compressed_sparsity(nnzh, Ibuffer, Jbuffer, backend)
sort!(hsparsity; lt = (a, b) -> a[1] < b[1])
hptr = getptr(backend, hsparsity; cmp = (a, b) -> first(a) != first(b))
meta = NLPModels.NLPModelMeta(
m.meta.nvar,
ncon = m.meta.ncon,
nnzj = length(jptr) - 1,
nnzh = length(hptr) - 1,
x0 = m.meta.x0,
lvar = m.meta.lvar,
uvar = m.meta.uvar,
y0 = m.meta.y0,
lcon = m.meta.lcon,
ucon = m.meta.ucon,
)
counters = NLPModels.Counters()
return CompressedNLPModel(
m,
jptr,
jsparsity,
hptr,
hsparsity,
buffer,
backend,
meta,
counters,
)
end
getbackend(m) = nothing
get_compressed_sparsity(nnz, Ibuffer, Jbuffer, backend::Nothing) =
map((k, i, j) -> ((j, i), k), 1:nnz, Ibuffer, Jbuffer)
function NLPModels.obj(m::CompressedNLPModel, x::AbstractVector)
NLPModels.obj(m.inner, x)
end
function NLPModels.grad!(m::CompressedNLPModel, x::AbstractVector, y::AbstractVector)
NLPModels.grad!(m.inner, x, y)
end
function NLPModels.cons!(m::CompressedNLPModel, x::AbstractVector, g::AbstractVector)
NLPModels.cons!(m.inner, x, g)
end
function NLPModels.jac_coord!(m::CompressedNLPModel, x::AbstractVector, j::AbstractVector)
NLPModels.jac_coord!(m.inner, x, m.buffer)
_compress!(j, m.buffer, m.jptr, m.jsparsity, m.backend)
end
function NLPModels.hess_coord!(
m::CompressedNLPModel,
x::AbstractVector,
y::AbstractVector,
h::AbstractVector;
obj_weight = 1.0,
)
NLPModels.hess_coord!(m.inner, x, y, m.buffer; obj_weight = obj_weight)
_compress!(h, m.buffer, m.hptr, m.hsparsity, m.backend)
end
function NLPModels.jac_structure!(
m::CompressedNLPModel,
I::AbstractVector,
J::AbstractVector,
)
_structure!(I, J, m.jptr, m.jsparsity, m.backend)
end
function NLPModels.hess_structure!(
m::CompressedNLPModel,
I::AbstractVector,
J::AbstractVector,
)
_structure!(I, J, m.hptr, m.hsparsity, m.backend)
end
function _compress!(V, buffer, ptr, sparsity, backend::Nothing)
fill!(V, zero(eltype(V)))
@simd for i = 1:length(ptr)-1
for j = ptr[i]:ptr[i+1]-1
V[i] += buffer[sparsity[j][2]]
end
end
end
function _structure!(I, J, ptr, sparsity, backend::Nothing)
@simd for i = 1:length(ptr)-1
J[i], I[i] = sparsity[ptr[i]][1]
end
end
export WrapperNLPModel, TimedNLPModel, CompressedNLPModel
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 510 | const BACKENDS = Any[nothing, CPU()]
if haskey(ENV, "EXAMODELS_TEST_CUDA")
using CUDA
push!(BACKENDS, CUDABackend())
@info "including CUDA"
else
@info "excluding CUDA"
end
if haskey(ENV, "EXAMODELS_TEST_AMDGPU")
using AMDGPU
push!(BACKENDS, ROCBackend())
@info "including AMDGPU"
else
@info "excluding AMDGPU"
end
if haskey(ENV, "EXAMODELS_TEST_ONEAPI")
using oneAPI
push!(BACKENDS, oneAPIBackend())
@info "including oneAPI"
else
@info "excluding oneAPI"
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 477 | using Test, ExaModels
using Random
using KernelAbstractions
Random.seed!(0)
include("backends.jl")
include("NLPTest/NLPTest.jl")
include("ADTest/ADTest.jl")
include("JuMPTest/JuMPTest.jl")
include("UtilsTest/UtilsTest.jl")
@testset "ExaModels test" begin
@info "Running AD Test"
ADTest.runtests()
@info "Running NLP Test"
NLPTest.runtests()
@info "Running JuMP Test"
JuMPTest.runtests()
@info "Running Utils Test"
UtilsTest.runtests()
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 7960 | module ADTest
using ExaModels
using Test, ForwardDiff, SpecialFunctions
const FUNCTIONS = [
("basic-functions-:+", x -> +(x[1])),
("basic-functions-:-", x -> -(x[1])),
("basic-functions-inv", x -> inv(x[1])),
("basic-functions-abs", x -> abs(x[1])),
("basic-functions-sqrt", x -> sqrt(x[1])),
("basic-functions-cbrt", x -> cbrt(x[1])),
("basic-functions-abs2", x -> abs2(x[1])),
("basic-functions-exp", x -> exp(x[1])),
("basic-functions-exp2", x -> exp2(x[1])),
("basic-functions-exp10", x -> exp10(x[1])),
("basic-functions-log", x -> log(x[1])),
("basic-functions-log2", x -> log2(x[1])),
("basic-functions-log1p", x -> log1p(x[1])),
("basic-functions-log10", x -> log10(x[1])),
("basic-functions-sin", x -> sin(x[1])),
("basic-functions-cos", x -> cos(x[1])),
("basic-functions-tan", x -> tan(x[1])),
("basic-functions-asin", x -> asin(x[1])),
("basic-functions-acos", x -> acos(x[1])),
("basic-functions-csc", x -> csc(x[1])),
("basic-functions-sec", x -> sec(x[1])),
("basic-functions-cot", x -> cot(x[1])),
("basic-functions-atan", x -> atan(x[1])),
("basic-functions-acot", x -> acot(x[1])),
# ("basic-functions-sind", x-> sind(x[1])), # cannot extend function
# ("basic-functions-cosd", x-> cosd(x[1])), # cannot extend function
# ("basic-functions-tand", x-> tand(x[1])), # cannot extend function
("basic-functions-cscd", x -> cscd(x[1])),
("basic-functions-secd", x -> secd(x[1])),
("basic-functions-cotd", x -> cotd(x[1])),
# ("basic-functions-atand", x-> atand(x[1])), # cannot extend function
# ("basic-functions-acotd", x-> acotd(x[1])), # cannot extend function
("basic-functions-sinh", x -> sinh(x[1])),
("basic-functions-cosh", x -> cosh(x[1])),
("basic-functions-tanh", x -> tanh(x[1])),
("basic-functions-csch", x -> csch(x[1])),
("basic-functions-sech", x -> sech(x[1])),
("basic-functions-coth", x -> coth(x[1])),
("basic-functions-atanh", x -> atanh(x[1])),
# ("basic-functions-acoth", x-> acoth(x[1])), # range issue
("basic-functions-:+", x -> +(x[1], x[2])),
("basic-functions-:-", x -> -(x[1], x[2])),
("basic-functions-:*", x -> *(x[1], x[2])),
("basic-functions-:^", x -> ^(x[1], x[2])),
("basic-functions-:/", x -> /(x[1], x[2])),
# ("basic-functions-:<=", x-> <=(x[1], x[2])), # not implemented
# ("basic-functions-:>=", x-> >=(x[1], x[2])), # not implemented
# ("basic-functions-:(==),", x-> (==)(x[1], x[2])), # not implemented
# ("basic-functions-:<", x-> <(x[1], x[2])), # not implemented
# ("basic-functions-:>", x-> >(x[1], x[2])), # not implemented
("special-functions-erfi", x -> erfi(x[1])),
("special-functions-erfcinv", x -> erfcinv(x[1])),
("special-functions-erfcx", x -> erfcx(x[1])),
("special-functions-invdigamma", x -> invdigamma(x[1])),
("special-functions-bessely1", x -> bessely1(x[1])),
("special-functions-besselj1", x -> besselj1(x[1])),
("special-functions-dawson", x -> dawson(x[1])),
("special-functions-airyaiprime", x -> airyaiprime(x[1])),
("special-functions-erf", x -> erf(x[1])),
("special-functions-trigamma", x -> trigamma(x[1])),
("special-functions-gamma", x -> gamma(x[1])),
("special-functions-airyaiprime", x -> airyaiprime(x[1])),
("special-functions-airybiprime", x -> airybiprime(x[1])),
("special-functions-erfinv", x -> erfinv(x[1])),
("special-functions-bessely0", x -> bessely0(x[1])),
("special-functions-erfc", x -> erfc(x[1])),
("special-functions-trigamma", x -> trigamma(x[1])),
("special-functions-airybiprime", x -> airybiprime(x[1])),
("special-functions-besselj0", x -> besselj0(x[1])),
("special-functions-beta", x -> beta(x[1], x[2])),
("special-functions-logbeta", x -> logbeta(x[1], x[2])),
(
"composite-functions-1-1",
x -> beta(erf(x[1] / x[2] / 3.0) + 3.0 * x[2], erf(x[9])^2),
),
("composite-functions-1-2", x -> 0 * x[1]),
(
"composite-functions-1-3",
x -> beta(cos(log(abs2(inv(inv(x[1]))) + 1.0)), erfc(tanh(0 * x[1]))),
),
("composite-functions-1-4", x -> (0 * x[1]^x[3]^1.0 + x[1]) / x[9] / x[10]),
(
"composite-functions-1-5",
x -> exp(x[1] + 1.0)^x[2] * log(abs2(x[3]) + 3) / tanh(x[2]),
),
("composite-functions-1-6", x -> beta(2 * logbeta(x[1], x[5]), beta(x[2], x[3]))),
("composite-functions-1-7", x -> besselj0(exp(erf(-x[1])))),
("composite-functions-1-8", x -> erfc(abs2(x[1]^2 / x[2])^x[9] / x[10])),
("composite-functions-1-9", x -> erfc(x[1])^erf(2.5x[2])),
("composite-functions-1-10", x -> sin(1 / x[1])),
("composite-functions-1-11", x -> exp(x[2]) / cos(x[1])^2 + sin(x[1]^2)),
("composite-functions-1-12", x -> sin(x[9]inv(x[1]) - x[8]inv(x[2]))),
("composite-functions-1-13", x -> x[1] / log(x[2]^2 + 9.0)),
(
"composite-functions-1-14",
x -> beta(beta(tan(beta(x[1], 1) + 2.0), cos(sin(x[2]))), x[3]),
),
]
function gradient(f, x)
T = eltype(x)
y = fill!(similar(x), zero(T))
ExaModels.gradient!(y, (p, x) -> f(x), x, nothing, one(T))
return y
end
function sgradient(f, x)
T = eltype(x)
ff = f(ExaModels.VarSource())
d = ff(ExaModels.Identity(), ExaModels.AdjointNodeSource(nothing))
y1 = []
ExaModels.grpass(d, nothing, y1, nothing, 0, NaN)
a1 = unique(y1)
comp = ExaModels.Compressor(Tuple(findfirst(isequal(i), a1) for i in y1))
n = length(a1)
buffer = fill!(similar(x, n), zero(T))
buffer_I = similar(x, Tuple{Int,Int}, n)
ExaModels.sgradient!(buffer_I, ff, nothing, nothing, comp, 0, NaN)
ExaModels.sgradient!(buffer, ff, nothing, x, comp, 0, one(T))
y = zeros(length(x))
y[collect(i for (i, j) in buffer_I)] += buffer
return y
end
function sjacobian(f, x)
T = eltype(x)
ff = f(ExaModels.VarSource())
d = ff(ExaModels.Identity(), ExaModels.AdjointNodeSource(nothing))
y1 = []
ExaModels.grpass(d, nothing, y1, nothing, 0, NaN)
a1 = unique(y1)
comp = ExaModels.Compressor(Tuple(findfirst(isequal(i), a1) for i in y1))
n = length(a1)
buffer = fill!(similar(x, n), zero(T))
buffer_I = similar(x, Int, n)
buffer_J = similar(x, Int, n)
ExaModels.sjacobian!(buffer_I, buffer_J, ff, nothing, nothing, comp, 0, 0, NaN)
ExaModels.sjacobian!(buffer, nothing, ff, nothing, x, comp, 0, 0, one(T))
y = zeros(length(x))
y[buffer_J] += buffer
return y
end
function shessian(f, x)
T = eltype(x)
ff = f(ExaModels.VarSource())
t = ff(ExaModels.Identity(), ExaModels.SecondAdjointNodeSource(nothing))
y2 = []
ExaModels.hrpass0(t, nothing, y2, nothing, nothing, 0, NaN, NaN)
a2 = unique(y2)
comp = ExaModels.Compressor(Tuple(findfirst(isequal(i), a2) for i in y2))
n = length(a2)
buffer = fill!(similar(x, n), zero(T))
buffer_I = similar(x, Int, n)
buffer_J = similar(x, Int, n)
ExaModels.shessian!(buffer_I, buffer_J, ff, nothing, nothing, comp, 0, NaN, NaN)
ExaModels.shessian!(buffer, nothing, ff, nothing, x, comp, 0, one(T), zero(T))
y = zeros(length(x), length(x))
for (k, (i, j)) in enumerate(zip(buffer_I, buffer_J))
if i == j
y[i, j] += buffer[k]
else
y[i, j] += buffer[k]
y[j, i] += buffer[k]
end
end
return y
end
function runtests()
@testset "AD test" begin
for (name, f) in FUNCTIONS
x0 = rand(10)
@testset "$name" begin
g = ForwardDiff.gradient(f, x0)
h = ForwardDiff.hessian(f, x0)
@test gradient(f, x0) β g atol = 1e-6
@test sgradient(f, x0) β g atol = 1e-6
@test sjacobian(f, x0) β g atol = 1e-6
@test shessian(f, x0) β h atol = 1e-6
end
end
end
end
end #module
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 5813 | module JuMPTest
using Test, JuMP, ExaModels, PowerModels, NLPModelsIpopt, ..NLPTest
import ..BACKENDS
const JUMP_INTERFACE_INSTANCES = [
(:jump_luksan_vlcek_model, [3, 10]),
(:jump_ac_power_model, ["pglib_opf_case3_lmbd.m", "pglib_opf_case14_ieee.m"]),
]
function jump_luksan_vlcek_model(N)
jm = JuMP.Model()
JuMP.@variable(jm, x[i = 1:N], start = mod(i, 2) == 1 ? -1.2 : 1.0)
JuMP.@constraint(
jm,
s[i = 1:N-2],
3x[i+1]^3 + 2x[i+2] - 5 + sin(x[i+1] - x[i+2])sin(x[i+1] + x[i+2]) + 4x[i+1] -
x[i]exp(x[i] - x[i+1]) - 3 == 0.0
)
JuMP.@objective(jm, Min, sum(100(x[i-1]^2 - x[i])^2 + (x[i-1] - 1)^2 for i = 2:N))
return jm
end
function jump_ac_power_model(filename = "pglib_opf_case3_lmbd.m")
ref = NLPTest.get_power_data_ref(filename)
model = JuMP.Model()
#JuMP.set_optimizer_attribute(model, "print_level", 0)
JuMP.@variable(model, va[i in keys(ref[:bus])])
JuMP.@variable(
model,
ref[:bus][i]["vmin"] <= vm[i in keys(ref[:bus])] <= ref[:bus][i]["vmax"],
start = 1.0
)
JuMP.@variable(
model,
ref[:gen][i]["pmin"] <= pg[i in keys(ref[:gen])] <= ref[:gen][i]["pmax"]
)
JuMP.@variable(
model,
ref[:gen][i]["qmin"] <= qg[i in keys(ref[:gen])] <= ref[:gen][i]["qmax"]
)
JuMP.@variable(
model,
-ref[:branch][l]["rate_a"] <=
p[(l, i, j) in ref[:arcs]] <=
ref[:branch][l]["rate_a"]
)
JuMP.@variable(
model,
-ref[:branch][l]["rate_a"] <=
q[(l, i, j) in ref[:arcs]] <=
ref[:branch][l]["rate_a"]
)
JuMP.@objective(
model,
Min,
sum(
gen["cost"][1] * pg[i]^2 + gen["cost"][2] * pg[i] + gen["cost"][3] for
(i, gen) in ref[:gen]
)
)
for (i, bus) in ref[:ref_buses]
JuMP.@constraint(model, va[i] == 0)
end
for (i, bus) in ref[:bus]
bus_loads = [ref[:load][l] for l in ref[:bus_loads][i]]
bus_shunts = [ref[:shunt][s] for s in ref[:bus_shunts][i]]
JuMP.@constraint(
model,
sum(p[a] for a in ref[:bus_arcs][i]) ==
sum(pg[g] for g in ref[:bus_gens][i]) - sum(load["pd"] for load in bus_loads) -
sum(shunt["gs"] for shunt in bus_shunts) * vm[i]^2
)
JuMP.@constraint(
model,
sum(q[a] for a in ref[:bus_arcs][i]) ==
sum(qg[g] for g in ref[:bus_gens][i]) - sum(load["qd"] for load in bus_loads) +
sum(shunt["bs"] for shunt in bus_shunts) * vm[i]^2
)
end
# Branch power flow physics and limit constraints
for (i, branch) in ref[:branch]
f_idx = (i, branch["f_bus"], branch["t_bus"])
t_idx = (i, branch["t_bus"], branch["f_bus"])
p_fr = p[f_idx]
q_fr = q[f_idx]
p_to = p[t_idx]
q_to = q[t_idx]
vm_fr = vm[branch["f_bus"]]
vm_to = vm[branch["t_bus"]]
va_fr = va[branch["f_bus"]]
va_to = va[branch["t_bus"]]
g, b = PowerModels.calc_branch_y(branch)
tr, ti = PowerModels.calc_branch_t(branch)
ttm = tr^2 + ti^2
g_fr = branch["g_fr"]
b_fr = branch["b_fr"]
g_to = branch["g_to"]
b_to = branch["b_to"]
# From side of the branch flow
JuMP.@constraint(
model,
p_fr ==
(g + g_fr) / ttm * vm_fr^2 +
(-g * tr + b * ti) / ttm * (vm_fr * vm_to * cos(va_fr - va_to)) +
(-b * tr - g * ti) / ttm * (vm_fr * vm_to * sin(va_fr - va_to))
)
JuMP.@constraint(
model,
q_fr ==
-(b + b_fr) / ttm * vm_fr^2 -
(-b * tr - g * ti) / ttm * (vm_fr * vm_to * cos(va_fr - va_to)) +
(-g * tr + b * ti) / ttm * (vm_fr * vm_to * sin(va_fr - va_to))
)
# To side of the branch flow
JuMP.@constraint(
model,
p_to ==
(g + g_to) * vm_to^2 +
(-g * tr - b * ti) / ttm * (vm_to * vm_fr * cos(va_to - va_fr)) +
(-b * tr + g * ti) / ttm * (vm_to * vm_fr * sin(va_to - va_fr))
)
JuMP.@constraint(
model,
q_to ==
-(b + b_to) * vm_to^2 -
(-b * tr + g * ti) / ttm * (vm_to * vm_fr * cos(va_to - va_fr)) +
(-g * tr - b * ti) / ttm * (vm_to * vm_fr * sin(va_to - va_fr))
)
# Voltage angle difference limit
JuMP.@constraint(model, branch["angmin"] <= va_fr - va_to <= branch["angmax"])
# Apparent power limit, from side and to side
JuMP.@constraint(model, p_fr^2 + q_fr^2 <= branch["rate_a"]^2)
JuMP.@constraint(model, p_to^2 + q_to^2 <= branch["rate_a"]^2)
end
return model
end
function runtests()
@testset "JuMP Interface test" begin
for (model, cases) in JUMP_INTERFACE_INSTANCES
for case in cases
@testset "$model $case" begin
modelfunction = getfield(@__MODULE__, model)
# solve JuMP problem
jm = modelfunction(case)
set_optimizer(jm, NLPModelsIpopt.Ipopt.Optimizer)
set_optimizer_attribute(jm, "print_level", 0)
optimize!(jm)
sol = value.(all_variables(jm))
for backend in BACKENDS
@testset "$backend" begin
m = WrapperNLPModel(ExaModel(jm; backend = backend))
result = ipopt(m; print_level = 0)
@test sol β result.solution atol = 1e-6
end
end
end
end
end
end
end
end # module
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 6883 | module NLPTest
using ExaModels
using Downloads, Test
using NLPModels, JuMP, NLPModelsJuMP, PowerModels, NLPModelsIpopt, MadNLP, Percival
import ..BACKENDS
const NLP_TEST_ARGUMENTS = [
("luksan_vlcek", 3),
("luksan_vlcek", 20),
("ac_power", "pglib_opf_case3_lmbd.m"),
("ac_power", "pglib_opf_case14_ieee.m"),
]
const SOLVERS = [
("ipopt", nlp -> ipopt(nlp; print_level = 0)),
("madnlp", nlp -> madnlp(nlp; print_level = MadNLP.ERROR)),
("percival", nlp -> percival(nlp)),
]
const EXCLUDE1 = [("ac_power", "percival")]
const EXCLUDE2 = []
for backend in BACKENDS
if "oneAPIBackend()" == string(backend)
push!(EXCLUDE2, ("percival", backend))
end
end
include("luksan.jl")
include("power.jl")
function test_nlp(m1, m2; full = false)
@testset "NLP meta tests" begin
list = [:nvar, :ncon, :x0, :lvar, :uvar, :y0, :lcon, :ucon]
if full
append!(list, [:nnzj, :nnzh])
end
for field in list
@testset "$field" begin
@test getfield(m1.meta, field) == getfield(m2.meta, field)
end
end
end
@testset "NLP callback tests" begin
x0 = copy(m2.meta.x0)
y0 = randn(eltype(m2.meta.x0), m2.meta.ncon)
u = randn(eltype(m2.meta.x0), m2.meta.nvar)
v = randn(eltype(m2.meta.x0), m2.meta.ncon)
@test NLPModels.obj(m1, x0) β NLPModels.obj(m2, x0) atol = 1e-6
@test NLPModels.cons(m1, x0) β NLPModels.cons(m2, x0) atol = 1e-6
@test NLPModels.grad(m1, x0) β NLPModels.grad(m2, x0) atol = 1e-6
@test NLPModels.jprod(m1, x0, u) β NLPModels.jprod(m2, x0, u) atol = 1e-6
@test NLPModels.jtprod(m1, x0, v) β NLPModels.jtprod(m2, x0, v) atol = 1e-6
@test NLPModels.hprod(m1, x0, y0, u) β NLPModels.hprod(m2, x0, y0, u) atol = 1e-6
if full
jac_buffer1 = zeros(m1.meta.nnzj)
jac_buffer2 = zeros(m2.meta.nnzj)
jac_I_buffer1 = zeros(Int, m1.meta.nnzj)
jac_I_buffer2 = zeros(Int, m2.meta.nnzj)
jac_J_buffer1 = zeros(Int, m1.meta.nnzj)
jac_J_buffer2 = zeros(Int, m2.meta.nnzj)
hess_buffer1 = zeros(m1.meta.nnzh)
hess_buffer2 = zeros(m2.meta.nnzh)
hess_I_buffer1 = zeros(Int, m1.meta.nnzh)
hess_I_buffer2 = zeros(Int, m2.meta.nnzh)
hess_J_buffer1 = zeros(Int, m1.meta.nnzh)
hess_J_buffer2 = zeros(Int, m2.meta.nnzh)
NLPModels.jac_coord!(m1, x0, jac_buffer1)
NLPModels.jac_coord!(m2, x0, jac_buffer2)
NLPModels.hess_coord!(m1, x0, y0, hess_buffer1)
NLPModels.hess_coord!(m2, x0, y0, hess_buffer2)
NLPModels.jac_structure!(m1, jac_I_buffer1, jac_J_buffer1)
NLPModels.jac_structure!(m2, jac_I_buffer2, jac_J_buffer2)
NLPModels.hess_structure!(m1, hess_I_buffer1, hess_J_buffer1)
NLPModels.hess_structure!(m2, hess_I_buffer2, hess_J_buffer2)
@test jac_buffer1 β jac_buffer2 atol = 1e-6
@test hess_buffer1 β hess_buffer2 atol = 1e-6
@test jac_I_buffer1 == jac_I_buffer2
@test jac_J_buffer1 == jac_J_buffer2
@test hess_I_buffer1 == hess_I_buffer2
@test hess_J_buffer1 == hess_J_buffer2
end
end
end
function test_nlp_solution(result1, result2)
@testset "solution test" begin
@test result1.status == result2.status
for field in [:solution, :multipliers, :multipliers_L, :multipliers_U]
@testset "$field" begin
@test getfield(result1, field) β getfield(result2, field) atol = 1e-6
end
end
end
end
dual_lb(x) = has_lower_bound(x) ? dual(LowerBoundRef(x)) : 0.0
dual_ub(x) = has_upper_bound(x) ? dual(UpperBoundRef(x)) : 0.0
function test_api(result1, vars1, cons1, vars2, cons2)
@testset "API test" begin
for (var1, var2) in zip(vars1, vars2)
@test solution(result1, var1) β [value(var) for var in var2] atol = 1e-6
@test multipliers_L(result1, var1) β [dual_lb(var) for var in var2] atol = 1e-6
@test multipliers_U(result1, var1) β [-dual_ub(var) for var in var2] atol = 1e-6
end
for (con1, con2) in zip(cons1, cons2)
@test multipliers(result1, con1) β [-dual.(con) for con in con2] atol = 1e-6
end
end
end
function runtests()
@testset "NLP test" begin
for backend in BACKENDS
@testset "$backend" begin
for (name, args) in NLP_TEST_ARGUMENTS
@testset "$name $args" begin
exa_model = getfield(@__MODULE__, Symbol("_exa_$(name)_model"))
jump_model = getfield(@__MODULE__, Symbol("_jump_$(name)_model"))
m, vars0, cons0 = exa_model(nothing, args)
m0 = WrapperNLPModel(m)
m, vars2, cons2 = jump_model(nothing, args)
m2 = MathOptNLPModel(m)
set_optimizer(m, MadNLP.Optimizer)
set_optimizer_attribute(m, "print_level", MadNLP.ERROR)
optimize!(m)
m, vars1, cons1 = exa_model(backend, args)
m1 = WrapperNLPModel(m)
@testset "Backend test" begin
test_nlp(m0, m1; full = true)
end
@testset "Comparison to JuMP" begin
test_nlp(m1, m2; full = false)
for (sname, solver) in SOLVERS
if (name, sname) in EXCLUDE1 || (sname, backend) in EXCLUDE2
continue
end
result1 = solver(m1)
result2 = solver(m2)
@testset "$sname" begin
test_nlp_solution(result1, result2)
end
end
end
result1 = madnlp(m1; print_level = MadNLP.ERROR)
test_api(result1, vars1, cons1, vars2, cons2)
end
end
m3 = WrapperNLPModel(exa_luksan_vlcek_model(nothing, 3; M = 2))
m4 = jump_luksan_vlcek_model(nothing, 3; M = 2)
@testset "Multi-column constraints" begin
test_nlp(m3, m4; full = false)
end
end
end
end
end
function __init__()
if haskey(ENV, "EXA_MODELS_DEPOT")
global TMPDIR = ENV["EXA_MODELS_DEPOT"]
else
global TMPDIR = tempname()
mkdir(TMPDIR)
end
PowerModels.silence()
end
end # NLPTest
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 1716 | function luksan_vlcek_obj(x, i, j)
return 100 * (x[i-1, j]^2 - x[i, j])^2 + (x[i-1, j] - 1)^2
end
function luksan_vlcek_con1(x, i, j)
return 3x[i+1, j]^3 + 2 * x[i+2, j] - 5
end
function luksan_vlcek_con2(x, i, j)
return sin(x[i+1, j] - x[i+2, j])sin(x[i+1, j] + x[i+2, j]) + 4x[i+1, j] -
x[i, j]exp(x[i, j] - x[i+1, j]) - 3
end
function luksan_vlcek_x0(i)
return mod(i, 2) == 1 ? -1.2 : 1.0
end
function _exa_luksan_vlcek_model(backend, N; M = 1)
c = ExaCore(backend = backend)
x = variable(c, N, M; start = [luksan_vlcek_x0(i) for i = 1:N, j = 1:M])
s = constraint(c, luksan_vlcek_con1(x, i, j) for i = 1:N-2, j = 1:M)
constraint!(c, s, (i, j) => luksan_vlcek_con2(x, i, j) for i = 1:N-2, j = 1:M)
objective(c, luksan_vlcek_obj(x, i, j) for i = 2:N, j = 1:M)
return ExaModel(c; prod = true), (x,), (s,)
end
function exa_luksan_vlcek_model(backend, N; M = 1)
m, vars, cons = _exa_luksan_vlcek_model(backend, N; M = M)
return m
end
function _jump_luksan_vlcek_model(backend, N; M = 1)
jm = JuMP.Model()
JuMP.@variable(jm, x[i = 1:N, j = 1:M], start = mod(i, 2) == 1 ? -1.2 : 1.0)
JuMP.@NLconstraint(
jm,
s[i = 1:N-2, j = 1:M],
3x[i+1, j]^3 + 2x[i+2, j] - 5 +
sin(x[i+1, j] - x[i+2, j])sin(x[i+1, j] + x[i+2, j]) +
4x[i+1, j] - x[i, j]exp(x[i, j] - x[i+1, j]) - 3 == 0.0
)
JuMP.@NLobjective(
jm,
Min,
sum(100(x[i-1, j]^2 - x[i, j])^2 + (x[i-1, j] - 1)^2 for i = 2:N, j = 1:M)
)
return jm, (x,), (s,)
end
function jump_luksan_vlcek_model(backend, N; M = 1)
jm, vars, cons = _jump_luksan_vlcek_model(backend, N; M = M)
return MathOptNLPModel(jm)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 15245 | function get_power_case(filename)
if !isfile(filename)
ff = joinpath(TMPDIR, filename)
if !isfile(ff)
@info "Downloading $filename"
Downloads.download(
"https://raw.githubusercontent.com/power-grid-lib/pglib-opf/dc6be4b2f85ca0e776952ec22cbd4c22396ea5a3/$filename",
joinpath(TMPDIR, filename),
)
return joinpath(TMPDIR, filename)
else
return ff
end
else
return filename
end
end
function get_power_data_ref(filename)
case = get_power_case(filename)
data = PowerModels.parse_file(case)
PowerModels.standardize_cost_terms!(data, order = 2)
PowerModels.calc_thermal_limits!(data)
return PowerModels.build_ref(data)[:it][:pm][:nw][0]
end
convert_data(data::N, backend) where {names,N<:NamedTuple{names}} =
NamedTuple{names}(ExaModels.convert_array(d, backend) for d in data)
parse_ac_power_data(filename, backend) =
convert_data(parse_ac_power_data(filename), backend)
function parse_ac_power_data(filename)
ref = get_power_data_ref(filename)
arcdict = Dict(a => k for (k, a) in enumerate(ref[:arcs]))
busdict = Dict(k => i for (i, (k, v)) in enumerate(ref[:bus]))
gendict = Dict(k => i for (i, (k, v)) in enumerate(ref[:gen]))
branchdict = Dict(k => i for (i, (k, v)) in enumerate(ref[:branch]))
return (
bus = [
begin
bus_loads = [ref[:load][l] for l in ref[:bus_loads][k]]
bus_shunts = [ref[:shunt][s] for s in ref[:bus_shunts][k]]
pd = sum(load["pd"] for load in bus_loads; init = 0.0)
gs = sum(shunt["gs"] for shunt in bus_shunts; init = 0.0)
qd = sum(load["qd"] for load in bus_loads; init = 0.0)
bs = sum(shunt["bs"] for shunt in bus_shunts; init = 0.0)
(i = busdict[k], pd = pd, gs = gs, qd = qd, bs = bs)
end for (k, v) in ref[:bus]
],
gen = [
(
i = gendict[k],
cost1 = v["cost"][1],
cost2 = v["cost"][2],
cost3 = v["cost"][3],
bus = busdict[v["gen_bus"]],
) for (k, v) in ref[:gen]
],
arc = [
(i = k, rate_a = ref[:branch][l]["rate_a"], bus = busdict[i]) for
(k, (l, i, j)) in enumerate(ref[:arcs])
],
branch = [
begin
f_idx = arcdict[i, branch["f_bus"], branch["t_bus"]]
t_idx = arcdict[i, branch["t_bus"], branch["f_bus"]]
g, b = PowerModels.calc_branch_y(branch)
tr, ti = PowerModels.calc_branch_t(branch)
ttm = tr^2 + ti^2
g_fr = branch["g_fr"]
b_fr = branch["b_fr"]
g_to = branch["g_to"]
b_to = branch["b_to"]
c1 = (-g * tr - b * ti) / ttm
c2 = (-b * tr + g * ti) / ttm
c3 = (-g * tr + b * ti) / ttm
c4 = (-b * tr - g * ti) / ttm
c5 = (g + g_fr) / ttm
c6 = (b + b_fr) / ttm
c7 = (g + g_to)
c8 = (b + b_to)
(
i = branchdict[i],
j = 1,
f_idx = f_idx,
t_idx = t_idx,
f_bus = busdict[branch["f_bus"]],
t_bus = busdict[branch["t_bus"]],
c1 = c1,
c2 = c2,
c3 = c3,
c4 = c4,
c5 = c5,
c6 = c6,
c7 = c7,
c8 = c8,
rate_a_sq = branch["rate_a"]^2,
)
end for (i, branch) in ref[:branch]
],
ref_buses = [busdict[i] for (i, k) in ref[:ref_buses]],
vmax = [v["vmax"] for (k, v) in ref[:bus]],
vmin = [v["vmin"] for (k, v) in ref[:bus]],
pmax = [v["pmax"] for (k, v) in ref[:gen]],
pmin = [v["pmin"] for (k, v) in ref[:gen]],
qmax = [v["qmax"] for (k, v) in ref[:gen]],
qmin = [v["qmin"] for (k, v) in ref[:gen]],
rate_a = [ref[:branch][l]["rate_a"] for (k, (l, i, j)) in enumerate(ref[:arcs])],
angmax = [b["angmax"] for (i, b) in ref[:branch]],
angmin = [b["angmin"] for (i, b) in ref[:branch]],
)
end
function _exa_ac_power_model(backend, filename)
data = parse_ac_power_data(filename, backend)
w = ExaModels.ExaCore(backend = backend)
va = ExaModels.variable(w, length(data.bus);)
vm = ExaModels.variable(
w,
length(data.bus);
start = fill!(similar(data.bus, Float64), 1.0),
lvar = data.vmin,
uvar = data.vmax,
)
pg = ExaModels.variable(w, length(data.gen); lvar = data.pmin, uvar = data.pmax)
qg = ExaModels.variable(w, length(data.gen); lvar = data.qmin, uvar = data.qmax)
p = ExaModels.variable(w, length(data.arc); lvar = -data.rate_a, uvar = data.rate_a)
q = ExaModels.variable(w, length(data.arc); lvar = -data.rate_a, uvar = data.rate_a)
o = ExaModels.objective(
w,
g.cost1 * pg[g.i]^2 + g.cost2 * pg[g.i] + g.cost3 for g in data.gen
)
c1 = ExaModels.constraint(w, va[i] for i in data.ref_buses)
c2 = ExaModels.constraint(
w,
p[b.f_idx] - b.c5 * vm[b.f_bus]^2 -
b.c3 * (vm[b.f_bus] * vm[b.t_bus] * cos(va[b.f_bus] - va[b.t_bus])) -
b.c4 * (vm[b.f_bus] * vm[b.t_bus] * sin(va[b.f_bus] - va[b.t_bus])) for
b in data.branch
)
c3 = ExaModels.constraint(
w,
q[b.f_idx] +
b.c6 * vm[b.f_bus]^2 +
b.c4 * (vm[b.f_bus] * vm[b.t_bus] * cos(va[b.f_bus] - va[b.t_bus])) -
b.c3 * (vm[b.f_bus] * vm[b.t_bus] * sin(va[b.f_bus] - va[b.t_bus])) for
b in data.branch
)
c4 = ExaModels.constraint(
w,
p[b.t_idx] - b.c7 * vm[b.t_bus]^2 -
b.c1 * (vm[b.t_bus] * vm[b.f_bus] * cos(va[b.t_bus] - va[b.f_bus])) -
b.c2 * (vm[b.t_bus] * vm[b.f_bus] * sin(va[b.t_bus] - va[b.f_bus])) for
b in data.branch
)
c5 = ExaModels.constraint(
w,
q[b.t_idx] +
b.c8 * vm[b.t_bus]^2 +
b.c2 * (vm[b.t_bus] * vm[b.f_bus] * cos(va[b.t_bus] - va[b.f_bus])) -
b.c1 * (vm[b.t_bus] * vm[b.f_bus] * sin(va[b.t_bus] - va[b.f_bus])) for
b in data.branch
)
c6 = ExaModels.constraint(
w,
va[b.f_bus] - va[b.t_bus] for b in data.branch;
lcon = data.angmin,
ucon = data.angmax,
)
c7 = ExaModels.constraint(
w,
p[b.f_idx]^2 + q[b.f_idx]^2 - b.rate_a_sq for b in data.branch;
lcon = fill!(similar(data.branch, Float64, length(data.branch)), -Inf),
)
c8 = ExaModels.constraint(
w,
p[b.t_idx]^2 + q[b.t_idx]^2 - b.rate_a_sq for b in data.branch;
lcon = fill!(similar(data.branch, Float64, length(data.branch)), -Inf),
)
c9 = ExaModels.constraint(w, b.pd + b.gs * vm[b.i]^2 for b in data.bus)
c10 = ExaModels.constraint(w, b.qd - b.bs * vm[b.i]^2 for b in data.bus)
c11 = ExaModels.constraint!(w, c9, a.bus => p[a.i] for a in data.arc)
c12 = ExaModels.constraint!(w, c10, a.bus => q[a.i] for a in data.arc)
c13 = ExaModels.constraint!(w, c9, g.bus => -pg[g.i] for g in data.gen)
c14 = ExaModels.constraint!(w, c10, g.bus => -qg[g.i] for g in data.gen)
return ExaModels.ExaModel(w; prod = true),
(va, vm, pg, qg, p, q),
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10)
end
function exa_ac_power_model(backend, filename)
m, vars, cons = _exa_ac_power_model(backend, filename)
return m
end
function _jump_ac_power_model(backend, filename)
ref = get_power_data_ref(filename)
model = JuMP.Model()
#JuMP.set_optimizer_attribute(model, "print_level", 0)
JuMP.@variable(model, va[i in keys(ref[:bus])])
JuMP.@variable(
model,
ref[:bus][i]["vmin"] <= vm[i in keys(ref[:bus])] <= ref[:bus][i]["vmax"],
start = 1.0
)
JuMP.@variable(
model,
ref[:gen][i]["pmin"] <= pg[i in keys(ref[:gen])] <= ref[:gen][i]["pmax"]
)
JuMP.@variable(
model,
ref[:gen][i]["qmin"] <= qg[i in keys(ref[:gen])] <= ref[:gen][i]["qmax"]
)
JuMP.@variable(
model,
-ref[:branch][l]["rate_a"] <=
p[(l, i, j) in ref[:arcs]] <=
ref[:branch][l]["rate_a"]
)
JuMP.@variable(
model,
-ref[:branch][l]["rate_a"] <=
q[(l, i, j) in ref[:arcs]] <=
ref[:branch][l]["rate_a"]
)
JuMP.@NLobjective(
model,
Min,
sum(
gen["cost"][1] * pg[i]^2 + gen["cost"][2] * pg[i] + gen["cost"][3] for
(i, gen) in ref[:gen]
)
)
c1 = []
c2 = []
c3 = []
c4 = []
c5 = []
c6 = []
c7 = []
c8 = []
c9 = []
c10 = []
for (i, bus) in ref[:ref_buses]
push!(c1, JuMP.@NLconstraint(model, va[i] == 0))
end
# Branch power flow physics and limit constraints
for (i, branch) in ref[:branch]
f_idx = (i, branch["f_bus"], branch["t_bus"])
t_idx = (i, branch["t_bus"], branch["f_bus"])
p_fr = p[f_idx]
q_fr = q[f_idx]
p_to = p[t_idx]
q_to = q[t_idx]
vm_fr = vm[branch["f_bus"]]
vm_to = vm[branch["t_bus"]]
va_fr = va[branch["f_bus"]]
va_to = va[branch["t_bus"]]
g, b = PowerModels.calc_branch_y(branch)
tr, ti = PowerModels.calc_branch_t(branch)
ttm = tr^2 + ti^2
g_fr = branch["g_fr"]
b_fr = branch["b_fr"]
g_to = branch["g_to"]
b_to = branch["b_to"]
# From side of the branch flow
push!(
c2,
JuMP.@NLconstraint(
model,
p_fr ==
(g + g_fr) / ttm * vm_fr^2 +
(-g * tr + b * ti) / ttm * (vm_fr * vm_to * cos(va_fr - va_to)) +
(-b * tr - g * ti) / ttm * (vm_fr * vm_to * sin(va_fr - va_to))
)
)
end
for (i, branch) in ref[:branch]
f_idx = (i, branch["f_bus"], branch["t_bus"])
t_idx = (i, branch["t_bus"], branch["f_bus"])
p_fr = p[f_idx]
q_fr = q[f_idx]
p_to = p[t_idx]
q_to = q[t_idx]
vm_fr = vm[branch["f_bus"]]
vm_to = vm[branch["t_bus"]]
va_fr = va[branch["f_bus"]]
va_to = va[branch["t_bus"]]
g, b = PowerModels.calc_branch_y(branch)
tr, ti = PowerModels.calc_branch_t(branch)
ttm = tr^2 + ti^2
g_fr = branch["g_fr"]
b_fr = branch["b_fr"]
g_to = branch["g_to"]
b_to = branch["b_to"]
push!(
c3,
JuMP.@NLconstraint(
model,
q_fr ==
-(b + b_fr) / ttm * vm_fr^2 -
(-b * tr - g * ti) / ttm * (vm_fr * vm_to * cos(va_fr - va_to)) +
(-g * tr + b * ti) / ttm * (vm_fr * vm_to * sin(va_fr - va_to))
)
)
end
# To side of the branch flow
for (i, branch) in ref[:branch]
f_idx = (i, branch["f_bus"], branch["t_bus"])
t_idx = (i, branch["t_bus"], branch["f_bus"])
p_fr = p[f_idx]
q_fr = q[f_idx]
p_to = p[t_idx]
q_to = q[t_idx]
vm_fr = vm[branch["f_bus"]]
vm_to = vm[branch["t_bus"]]
va_fr = va[branch["f_bus"]]
va_to = va[branch["t_bus"]]
g, b = PowerModels.calc_branch_y(branch)
tr, ti = PowerModels.calc_branch_t(branch)
ttm = tr^2 + ti^2
g_fr = branch["g_fr"]
b_fr = branch["b_fr"]
g_to = branch["g_to"]
b_to = branch["b_to"]
push!(
c4,
JuMP.@NLconstraint(
model,
p_to ==
(g + g_to) * vm_to^2 +
(-g * tr - b * ti) / ttm * (vm_to * vm_fr * cos(va_to - va_fr)) +
(-b * tr + g * ti) / ttm * (vm_to * vm_fr * sin(va_to - va_fr))
)
)
end
for (i, branch) in ref[:branch]
f_idx = (i, branch["f_bus"], branch["t_bus"])
t_idx = (i, branch["t_bus"], branch["f_bus"])
p_fr = p[f_idx]
q_fr = q[f_idx]
p_to = p[t_idx]
q_to = q[t_idx]
vm_fr = vm[branch["f_bus"]]
vm_to = vm[branch["t_bus"]]
va_fr = va[branch["f_bus"]]
va_to = va[branch["t_bus"]]
g, b = PowerModels.calc_branch_y(branch)
tr, ti = PowerModels.calc_branch_t(branch)
ttm = tr^2 + ti^2
g_fr = branch["g_fr"]
b_fr = branch["b_fr"]
g_to = branch["g_to"]
b_to = branch["b_to"]
push!(
c5,
JuMP.@NLconstraint(
model,
q_to ==
-(b + b_to) * vm_to^2 -
(-b * tr + g * ti) / ttm * (vm_to * vm_fr * cos(va_to - va_fr)) +
(-g * tr - b * ti) / ttm * (vm_to * vm_fr * sin(va_to - va_fr))
)
)
end
for (i, branch) in ref[:branch]
va_fr = va[branch["f_bus"]]
va_to = va[branch["t_bus"]]
push!(
c6,
JuMP.@NLconstraint(
model,
branch["angmin"] <= va_fr - va_to <= branch["angmax"]
)
)
end
# Apparent power limit, from side and to side
for (i, branch) in ref[:branch]
f_idx = (i, branch["f_bus"], branch["t_bus"])
p_fr = p[f_idx]
q_fr = q[f_idx]
push!(c7, JuMP.@NLconstraint(model, p_fr^2 + q_fr^2 <= branch["rate_a"]^2))
end
for (i, branch) in ref[:branch]
t_idx = (i, branch["t_bus"], branch["f_bus"])
p_to = p[t_idx]
q_to = q[t_idx]
push!(c8, JuMP.@NLconstraint(model, p_to^2 + q_to^2 <= branch["rate_a"]^2))
end
for (i, bus) in ref[:bus]
bus_loads = [ref[:load][l] for l in ref[:bus_loads][i]]
bus_shunts = [ref[:shunt][s] for s in ref[:bus_shunts][i]]
push!(
c9,
JuMP.@NLconstraint(
model,
sum(p[a] for a in ref[:bus_arcs][i]) ==
sum(pg[g] for g in ref[:bus_gens][i]) -
sum(load["pd"] for load in bus_loads) -
sum(shunt["gs"] for shunt in bus_shunts) * vm[i]^2
)
)
end
for (i, bus) in ref[:bus]
bus_loads = [ref[:load][l] for l in ref[:bus_loads][i]]
bus_shunts = [ref[:shunt][s] for s in ref[:bus_shunts][i]]
push!(
c10,
JuMP.@NLconstraint(
model,
sum(q[a] for a in ref[:bus_arcs][i]) ==
sum(qg[g] for g in ref[:bus_gens][i]) -
sum(load["qd"] for load in bus_loads) +
sum(shunt["bs"] for shunt in bus_shunts) * vm[i]^2
)
)
end
return model,
(va.data, vm.data, pg.data, qg.data, p.data, q.data),
(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10)
end
function jump_ac_power_model(backend, filename)
jm, vars = _jump_ac_power_model(backend, filename)
return MathOptNLPModel(jm)
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
|
[
"MIT"
] | 0.7.1 | 68888f0d012aae809dbbe90f5ea6e1c8c5c5f737 | code | 873 | module UtilsTest
using Test
import ExaModels, NLPModelsIpopt
import ..NLPTest: _exa_luksan_vlcek_model
UTIL_MODELS = [ExaModels.TimedNLPModel, ExaModels.CompressedNLPModel]
FIELDS = [:solution, :multipliers, :multipliers_L, :multipliers_U, :objective]
function runtests()
@testset "Utils tests" begin
m, ~ = _exa_luksan_vlcek_model(nothing, 3)
result = NLPModelsIpopt.ipopt(m; print_level = 0)
for util_model in UTIL_MODELS
util_result = NLPModelsIpopt.ipopt(util_model(m); print_level = 0)
@testset "$util_model" begin
for field in FIELDS
@testset "$field" begin
@test getfield(util_result, field) β getfield(result, field) atol =
1e-6
end
end
end
end
end
end
end
| ExaModels | https://github.com/exanauts/ExaModels.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.