licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | code | 19985 | ################################################################################
# Utils
################################################################################\
joint_types = [
:Fixed,
:Prismatic,
:Planar,
:FixedOrientation,
:Revolute,
:Cylindrical,
:PlanarAxis,
:FreeRevolute,
:Orbital,
:PrismaticOrbital,
:PlanarOrbital,
:FreeOrbital,
:Spherical,
:CylindricalFree,
:PlanarFree
]
function force_to_jacobian_forward_diff(f,x)
if typeof(f(x)) <: AbstractVector
return ForwardDiff.jacobian(f,x)
else
return ForwardDiff.jacobian(x -> [f(x)],x)
end
end
################################################################################
# Test get and set position and velocities
################################################################################
@testset "Get and set position and velocities" begin
@testset "Maximal coordinates" begin
mech = DojoEnvironments.get_mechanism(:raiberthopper)
timestep= mech.timestep
joint1 = mech.joints[1]
joint2 = mech.joints[2]
pbody = mech.bodies[1]
cbody = mech.bodies[2]
tra2 = joint2.translational
rot2 = joint2.rotational
x = srand(1)
Δx = Dojo.zerodimstaticadjoint(Dojo.nullspace_mask(tra2)) * x
Δq = rand(QuatRotation).q
Dojo.set_maximal_configurations!(pbody, cbody;
parent_vertex=tra2.vertices[1],
child_vertex=tra2.vertices[2],
Δx=Δx,
Δq=Δq)
@test norm(Dojo.minimal_coordinates(tra2, pbody, cbody) - x[1], Inf) < 1.0e-8
v = srand(1)
Δv = Dojo.zerodimstaticadjoint(Dojo.nullspace_mask(tra2)) * v
Δω = rand(3)
Dojo.set_maximal_velocities!(pbody, cbody;
parent_vertex=tra2.vertices[1],
child_vertex=tra2.vertices[2],
Δv=Δv,
Δω=Δω)
@test norm(Dojo.minimal_velocities(tra2, pbody, cbody, timestep) - v[1], Inf) < 1.0e-8
end
@testset "Minimal coordinates" begin
for joint_type in joint_types
mech = DojoEnvironments.get_snake(;
num_bodies=10,
joint_type)
timestep = mech.timestep
for joint in mech.joints
joint.rotational.orientation_offset = rand(QuatRotation).q
end
joint0 = mech.joints[1]
tra0 = joint0.translational
rot0 = joint0.rotational
pnodes0 = [mech.origin; mech.bodies[1:end-1]]
cnodes0 = mech.bodies
Random.seed!(100)
Δθ = rand(input_dimension(rot0))
Δx = rand(input_dimension(tra0))
Δω = rand(input_dimension(rot0))
Δv = rand(input_dimension(tra0))
for i = 1:10
Dojo.set_minimal_coordinates!(rot0, pnodes0[i], cnodes0[i], timestep,
Δθ=Δθ)
Δθ0 = Dojo.minimal_coordinates(rot0, pnodes0[i], cnodes0[i])
@test norm(Δθ0 - Δθ, Inf) < 1.0e-8
Dojo.set_minimal_coordinates!(tra0, pnodes0[i], cnodes0[i], timestep,
Δx=Δx)
Δx0 = Dojo.minimal_coordinates(tra0, pnodes0[i], cnodes0[i])
@test norm(Δx0 - Δx, Inf) < 1.0e-8
Dojo.set_minimal_velocities!(joint0, pnodes0[i], cnodes0[i], timestep,
Δv=Δv,
Δω=Δω)
Δω0 = Dojo.minimal_velocities(rot0, pnodes0[i], cnodes0[i], timestep)
Δv0 = Dojo.minimal_velocities(tra0, pnodes0[i], cnodes0[i], timestep)
@test norm(Δω0 - Δω, Inf) < 1.0e-8
@test norm(Δv0 - Δv, Inf) < 1.0e-8
end
end
end
end
################################################################################
# Test min -> max -> min
################################################################################
@testset "Minimal to maximal to minimal" begin
# raiberthopper
@testset "Raibert hopper" begin
mech = DojoEnvironments.get_mechanism(:raiberthopper);
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0[1:3] - x1[1:3], Inf) < 1.0e-8
@test norm(x0[4:6] - x1[4:6]) < 1.0e-8
@test norm(x0[7:9] - x1[7:9], Inf) < 1.0e-8
@test norm(x0[11:12] - x1[11:12], Inf) < 1.0e-8
@test norm(x0[13] - x1[13], Inf) < 1.0e-8
@test norm(x0[14] - x1[14], Inf) < 1.0e-8
end
# box
@testset "Box" begin
mech = DojoEnvironments.get_mechanism(:block)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
# pendulum
@testset "Pendulum" begin
mech = DojoEnvironments.get_mechanism(:pendulum)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
# halfcheetah
@testset "Halfcheetah" begin
mech = DojoEnvironments.get_mechanism(:halfcheetah)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
# nslider
@testset "Nslider" begin
Nb0 = 5
mech = DojoEnvironments.get_mechanism(:nslider;
num_bodies=Nb0)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
# npendulum
@testset "Npendulum" begin
for joint_type in joint_types
# @show joint_type
Nb0 = 5
mech = DojoEnvironments.get_mechanism(:npendulum;
num_bodies=Nb0,
rest_joint_type=joint_type)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
end
# snake
@testset "Snake" begin
for joint_type in joint_types
# @show joint_type
Nb0 = 5
mech = DojoEnvironments.get_mechanism(:snake;
num_bodies=Nb0,
joint_type=joint_type)
mech = DojoEnvironments.get_mechanism(:snake;
num_bodies=Nb0,
joint_type=:Fixed)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
end
# twister
@testset "Twister" begin
for joint_type in joint_types
# @show joint_type
Nb0 = 5
mech = DojoEnvironments.get_mechanism(:twister;
num_bodies=Nb0,
joint_type=joint_type)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
end
# humanoid
@testset "Humanoid" begin
mech = DojoEnvironments.get_mechanism(:humanoid)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
# quadruped
@testset "Quadruped" begin
mech = DojoEnvironments.get_mechanism(:quadruped)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
# atlas
@testset "Atlas" begin
mech = DojoEnvironments.get_mechanism(:atlas;
urdf=:atlas_simple,
contact_feet=true,
parse_dampers=false)
Random.seed!(100)
nx = Dojo.minimal_dimension(mech)
x0 = rand(nx)
z0 = Dojo.minimal_to_maximal(mech, x0)
x1 = Dojo.maximal_to_minimal(mech, z0)
@test norm(x0 - x1, Inf) < 1.0e-8
end
end
################################################################################
#Test minimal coordinates and velocities Jacobians
################################################################################
@testset "Jacobians" begin
@testset "Minimal velocity Jacobian" begin
mech = get_mechanism(:humanoid)
timestep= mech.timestep
for jointcon in mech.joints
for joint in [jointcon.translational, jointcon.rotational]
# generate random configuration in minimal space
x = rand(minimal_dimension(mech))
# convert to maximal
z = Dojo.minimal_to_maximal(mech, x)
# extract body states
Ne = Dojo.length(mech.joints)
if Dojo.get_body(mech, jointcon.parent_id).name == :origin
zp = [mech.origin.state.x2; mech.origin.state.v15; Dojo.vector(mech.origin.state.q2); mech.origin.state.ω15]
else
zp = z[(jointcon.parent_id - Ne - 1) * 13 .+ (1:13)]
end
zc = z[(jointcon.child_id - Ne - 1) * 13 .+ (1:13)]
xa = SVector{3}(zp[1:3])
va = SVector{3}(zp[3 .+ (1:3)])
qa = Quaternion(zp[6 .+ (1:4)]...)
ωa = SVector{3}(zp[10 .+ (1:3)])
xb = SVector{3}(zc[1:3])
vb = SVector{3}(zc[3 .+ (1:3)])
qb = Quaternion(zc[6 .+ (1:4)]...)
ωb = SVector{3}(zc[10 .+ (1:3)])
Dojo.minimal_velocities(joint, xa, va, qa, ωa, xb, vb, qb, ωb, timestep)
# Jacobians
∇0 = Dojo.minimal_velocities_jacobian_configuration(:parent, joint, xa, va, qa, ωa, xb, vb, qb, ωb, timestep)
∇1 = force_to_jacobian_forward_diff(
xq -> Dojo.minimal_velocities(joint, xq[Dojo.SUnitRange(1,3)], va, Quaternion(xq[4:7]...), ωa, xb, vb, qb, ωb, timestep),
[xa; Dojo.vector(qa)]) * cat(I(3), Dojo.LVᵀmat(qa), dims=(1,2))
@test norm(∇0 - ∇1, Inf) < 1.0e-8
∇0 = Dojo.minimal_velocities_jacobian_configuration(:child, joint, xa, va, qa, ωa, xb, vb, qb, ωb, timestep)
∇1 = force_to_jacobian_forward_diff(
xq -> Dojo.minimal_velocities(joint, xa, va, qa, ωa, xq[Dojo.SUnitRange(1,3)], vb, Quaternion(xq[4:7]...), ωb, timestep),
[xb; Dojo.vector(qb)]) * cat(I(3), Dojo.LVᵀmat(qb), dims=(1,2))
@test norm(∇0 - ∇1, Inf) < 1.0e-8
∇0 = Dojo.minimal_velocities_jacobian_velocity(:parent, joint, xa, va, qa, ωa, xb, vb, qb, ωb, timestep)
∇1 = force_to_jacobian_forward_diff(
vω -> Dojo.minimal_velocities(joint, xa, vω[Dojo.SUnitRange(1,3)], qa, vω[Dojo.SUnitRange(4,6)], xb, vb, qb, ωb, timestep),
[va; ωa])
@test norm(∇0 - ∇1, Inf) < 1.0e-8
∇0 = Dojo.minimal_velocities_jacobian_velocity(:child, joint, xa, va, qa, ωa, xb, vb, qb, ωb, timestep)
∇1 = force_to_jacobian_forward_diff(
vω -> Dojo.minimal_velocities(joint, xa, va, qa, ωa, xb, vω[Dojo.SUnitRange(1,3)], qb, vω[Dojo.SUnitRange(4,6)], timestep),
[vb; ωb])
@test norm(∇0 - ∇1, Inf) < 1.0e-8
end
end
end
@testset "Minimal coordinate Jacobian" begin
mech = get_mechanism(:humanoid)
for jointcon in mech.joints
for joint in [jointcon.translational, jointcon.rotational]
# generate random configuration in minimal space
x = rand(Dojo.minimal_dimension(mech))
# convert to maximal
z = Dojo.minimal_to_maximal(mech, x)
# extract body states
Ne = Dojo.length(mech.joints)
if Dojo.get_body(mech, jointcon.parent_id).name == :origin
zp = [mech.origin.state.x2; mech.origin.state.v15; Dojo.vector(mech.origin.state.q2); mech.origin.state.ω15]
else
zp = z[(jointcon.parent_id - Ne - 1) * 13 .+ (1:13)]
end
zc = z[(jointcon.child_id - Ne - 1) * 13 .+ (1:13)]
xa = SVector{3}(zp[1:3])
# va = SVector{3}(zp[3 .+ (1:3)])
qa = Quaternion(zp[6 .+ (1:4)]...)
# ωa = SVector{3}(zp[10 .+ (1:3)])
xb = SVector{3}(zc[1:3])
# vb = SVector{3}(zc[3 .+ (1:3)])
qb = Quaternion(zc[6 .+ (1:4)]...)
# ωb = SVector{3}(zc[10 .+ (1:3)])
Dojo.minimal_coordinates(joint, xa, qa, xb, qb)
∇0 = Dojo.minimal_coordinates_jacobian_configuration(:parent, joint, xa, qa, xb, qb)
∇1 = force_to_jacobian_forward_diff(
xq -> Dojo.minimal_coordinates(joint, xq[1:3], Quaternion(xq[4:7]...), xb, qb),
[xa; Dojo.vector(qa)]) * cat(I(3), Dojo.LVᵀmat(qa), dims=(1,2))
@test norm(∇0 - ∇1, Inf) < 1.0e-8
∇0 = Dojo.minimal_coordinates_jacobian_configuration(:child, joint, xa, qa, xb, qb)
∇1 = force_to_jacobian_forward_diff(
xq -> Dojo.minimal_coordinates(joint, xa, qa, xq[1:3], Quaternion(xq[4:7]...)),
[xb; Dojo.vector(qb)]) * cat(I(3), Dojo.LVᵀmat(qb), dims=(1,2))
@test norm(∇0 - ∇1, Inf) < 1.0e-8
end
end
end
@testset "Minimal to maximal Jacobian" begin
function maximal_to_minimal_jacobian_fd(mechanism::Mechanism, z)
J = ForwardDiff.jacobian(y -> Dojo.maximal_to_minimal(mechanism, y), z)
G = attitude_jacobian(z, length(mechanism.bodies))
return J * G
end
# TODO switch to ForwardDiff once it works
function maximal_to_minimal_jacobian_fd_finite_diff(mechanism::Mechanism, z)
J = FiniteDiff.finite_difference_jacobian(y -> Dojo.maximal_to_minimal(mechanism, y), z)
G = attitude_jacobian(z, length(mechanism.bodies))
return J * G
end
# TODO switch to ForwardDiff once it works
function minimal_to_maximal_jacobian_fd(mechanism::Mechanism, x)
J = FiniteDiff.finite_difference_jacobian(y -> Dojo.minimal_to_maximal(mechanism, y), x)
z = minimal_to_maximal(mechanism, x)
G = attitude_jacobian(z, length(mechanism.bodies))
return G' * J
end
function ctrl!(mechanism, k)
Dojo.set_input!(mechanism, 0.1 * srand(Dojo.input_dimension(mechanism)))
end
# n-pendulum
mechanism = DojoEnvironments.get_mechanism(:npendulum;
timestep=0.1, gravity=-9.81, num_bodies=1)
base_angle = 0.3 * π
Dojo.initialize!(mechanism, :npendulum;
base_angle)
storage = Dojo.simulate!(mechanism, 1.0;
record=true, verbose=false)
x = Dojo.get_minimal_state(mechanism)
z = Dojo.get_maximal_state(mechanism)
u = zeros(Dojo.input_dimension(mechanism))
@test norm(minimal_to_maximal(mechanism, x) - z) < 1.0e-5
@test norm(Dojo.maximal_to_minimal(mechanism, z) - x) < 1.0e-8
M_fd = maximal_to_minimal_jacobian_fd(mechanism, z)
M_a = Dojo.maximal_to_minimal_jacobian(mechanism, z)
@test size(M_fd) == size(M_a)
@test norm(M_fd - M_a, Inf) < 1.0e-8
N_fd = minimal_to_maximal_jacobian_fd(mechanism, Dojo.maximal_to_minimal(mechanism, z))
N_a = Dojo.minimal_to_maximal_jacobian(mechanism, Dojo.maximal_to_minimal(mechanism, z))
@test size(N_fd) == size(N_a)
@test norm(N_fd - N_a, Inf) < 1.0e-5
@test norm(diag(M_fd * N_fd) .- 1.0, Inf) < 1.0e-5
@test norm(diag(M_a * N_a) .- 1.0, Inf) < 1.0e-8
# n-pendulum
mechanism = DojoEnvironments.get_mechanism(:npendulum;
timestep=0.1,
gravity=-9.81,
num_bodies=2)
base_angle = 0.3 * π
Dojo.initialize!(mechanism, :npendulum;
base_angle)
storage = Dojo.simulate!(mechanism, 1.0;
record=true, verbose=false)
x = Dojo.get_minimal_state(mechanism)
z = Dojo.get_maximal_state(mechanism)
u = zeros(Dojo.input_dimension(mechanism))
@test norm(minimal_to_maximal(mechanism, x) - z) < 1.0e-5
@test norm(Dojo.maximal_to_minimal(mechanism, z) - x) < 1.0e-8
M_fd = maximal_to_minimal_jacobian_fd(mechanism, z)
M_a = Dojo.maximal_to_minimal_jacobian(mechanism, z)
@test size(M_fd) == size(M_a)
@test norm(M_fd - M_a, Inf) < 1.0e-8
N_fd = minimal_to_maximal_jacobian_fd(mechanism, Dojo.maximal_to_minimal(mechanism, z))
N_a = Dojo.minimal_to_maximal_jacobian(mechanism, Dojo.maximal_to_minimal(mechanism, z))
@test size(N_fd) == size(N_a)
@test norm(N_fd - N_a, Inf) < 1.0e-5
@test norm(diag(M_fd * N_fd) .- 1.0, Inf) < 1.0e-5
@test norm(diag(M_a * N_a) .- 1.0, Inf) < 1.0e-5
# sphere
mechanism = DojoEnvironments.get_mechanism(:sphere;
timestep=0.01, gravity=-9.81)
Dojo.initialize!(mechanism, :sphere)
storage = Dojo.simulate!(mechanism, 1.0;
record=true, verbose=false)
z = Dojo.get_maximal_state(mechanism)
x = Dojo.get_minimal_state(mechanism)
u = zeros(Dojo.input_dimension(mechanism))
@test norm(minimal_to_maximal(mechanism, x) - z) < 1.0e-8
@test norm(Dojo.maximal_to_minimal(mechanism, z) - x) < 1.0e-8
M_fd = maximal_to_minimal_jacobian_fd_finite_diff(mechanism, z)
M_a = Dojo.maximal_to_minimal_jacobian(mechanism, z)
@test size(M_fd) == size(M_a)
@test norm(M_fd - M_a, Inf) < 1.0e-5
N_fd = minimal_to_maximal_jacobian_fd(mechanism, Dojo.maximal_to_minimal(mechanism, z))
N_a = Dojo.minimal_to_maximal_jacobian(mechanism, Dojo.maximal_to_minimal(mechanism, z))
@test size(N_fd) == size(N_a)
@test norm(N_fd - N_a, Inf) < 1.0e-5
@test norm(diag(M_fd * N_fd) .- 1.0, Inf) < 1.0e-5
@test norm(diag(M_a * N_a) .- 1.0, Inf) < 1.0e-8
# half cheetah
mechanism = DojoEnvironments.get_mechanism(:halfcheetah;
timestep=0.01, gravity=-9.81)
Dojo.initialize!(mechanism, :halfcheetah)
storage = Dojo.simulate!(mechanism, 1.0, ctrl!;
record=true, verbose=false)
z = Dojo.get_maximal_state(mechanism)
x = Dojo.get_minimal_state(mechanism)
u = zeros(Dojo.input_dimension(mechanism))
@test norm(minimal_to_maximal(mechanism, x) - z) < 1.0e-5
@test norm(Dojo.maximal_to_minimal(mechanism, z) - x) < 1.0e-8
M_fd = maximal_to_minimal_jacobian_fd(mechanism, z)
M_a = Dojo.maximal_to_minimal_jacobian(mechanism, z)
@test size(M_fd) == size(M_a)
@test norm(M_fd - M_a, Inf) < 1.0e-8
N_fd = minimal_to_maximal_jacobian_fd(mechanism, Dojo.maximal_to_minimal(mechanism, z))
N_a = Dojo.minimal_to_maximal_jacobian(mechanism, Dojo.maximal_to_minimal(mechanism, z))
@test size(N_fd) == size(N_a)
# @test norm(N_fd - N_a, Inf) < 2.0e-5 # TODO check tolerance # TODO FAILS
@test norm(diag(M_fd * N_fd) .- 1.0, Inf) < 1.0e-5
@test norm(diag(M_a * N_a) .- 1.0, Inf) < 1.0e-5
@test norm(diag(M_a * N_fd) .- 1.0, Inf) < 1.0e-5
# atlas
mechanism = DojoEnvironments.get_mechanism(:atlas;
timestep=0.01,
gravity=-9.81,
friction_coefficient=0.5,
parse_springs=false,
parse_dampers=false,
dampers=100.0,
springs=1.0,
contact_feet=true)
Dojo.initialize!(mechanism, :atlas)
storage = Dojo.simulate!(mechanism, 1.0, ctrl!;
record=true, verbose=false)
z = Dojo.get_maximal_state(mechanism)
x = Dojo.get_minimal_state(mechanism)
u = zeros(Dojo.input_dimension(mechanism))
@test norm(minimal_to_maximal(mechanism, x) - z) < 1.0e-5
@test norm(Dojo.maximal_to_minimal(mechanism, z) - x) < 1.0e-8
M_fd = maximal_to_minimal_jacobian_fd(mechanism, z)
M_a = Dojo.maximal_to_minimal_jacobian(mechanism, z)
@test size(M_fd) == size(M_a)
@test norm(M_fd - M_a, Inf) < 1.0e-8
N_fd = minimal_to_maximal_jacobian_fd(mechanism, Dojo.maximal_to_minimal(mechanism, z))
N_a = Dojo.minimal_to_maximal_jacobian(mechanism, Dojo.maximal_to_minimal(mechanism, z))
@test size(N_fd) == size(N_a)
# @test norm(N_fd - N_a, Inf) < 5.0e-5 # TODO FAILS
@test norm(diag(M_fd * N_fd) .- 1.0, Inf) < 1.0e-5
@test norm(diag(M_a * N_a) .- 1.0, Inf) < 1.0e-5
end
@testset "Maximal to minimal Jacobian" begin
function maximal_to_minimal_jacobian_fd_finite_diff(mechanism::Mechanism, z)
J = FiniteDiff.finite_difference_jacobian(y -> maximal_to_minimal(mechanism, y), z)
G = attitude_jacobian(z, length(mechanism.bodies))
return J * G
end
function maximal_to_minimal_jacobian_fd(mechanism::Mechanism, z)
J = ForwardDiff.jacobian(y -> maximal_to_minimal(mechanism, y), z)
G = attitude_jacobian(z, length(mechanism.bodies))
return J * G
end
# 5-link pendulum
mech = DojoEnvironments.get_mechanism(:npendulum;
timestep=0.01, gravity=-9.81, num_bodies=5)
Random.seed!(100)
base_angle = 0.3π
Dojo.initialize!(mech, :npendulum;
base_angle)
storage = Dojo.simulate!(mech, 1.0;
record=true, verbose=false)
Dojo.maximal_dimension(mech) == 13
Dojo.minimal_dimension(mech) == 12
z = Dojo.get_maximal_state(mech)
attjac = Dojo.attitude_jacobian(z, length(mech.bodies))
M_fd = maximal_to_minimal_jacobian_fd(mech, z)
M_a = Dojo.maximal_to_minimal_jacobian(mech, z)
@test size(M_fd) == size(M_a)
@test norm(M_fd - M_a, Inf) < 1.0e-8
# # sphere
mech = DojoEnvironments.get_mechanism(:sphere;
timestep=0.01, gravity=-9.81)
Dojo.initialize!(mech, :sphere)
storage = Dojo.simulate!(mech, 1.0;
record=true, verbose=false)
Dojo.maximal_dimension(mech)
Dojo.minimal_dimension(mech)
z = Dojo.get_maximal_state(mech)
attjac = Dojo.attitude_jacobian(z, length(mech.bodies))
M_fd = maximal_to_minimal_jacobian_fd_finite_diff(mech, z)
M_a = Dojo.maximal_to_minimal_jacobian(mech, z)
@test size(M_fd) == size(M_a)
@test norm(M_fd - M_a, Inf) < 2.0e-6 # TODO check tolerance
end
end
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | code | 11894 | # Controller
function controller!(mechanism, k;
U=0.5,
timestep=0.01)
for joint in mechanism.joints
nu = input_dimension(joint)
u = (nu <= 5 && k ∈ (1:100)) * U * sones(nu)
set_input!(joint, u)
end
return
end
nocontrol!(mechanism, k) = controller!(mechanism, k, U=0.0)
# Parameters
ϵ0 = 1e-12
timestep0 = 0.01
gravity0= 0.0
joint_types = [
:Fixed,
:Prismatic,
:Planar,
:FixedOrientation,
:Revolute,
:Cylindrical,
:PlanarAxis,
:FreeRevolute,
:Orbital,
:PrismaticOrbital,
:PlanarOrbital,
:FreeOrbital,
:Spherical,
:CylindricalFree,
:PlanarFree
]
################################################################################
# Box
################################################################################
# single body
# initial linear and angular velocity
# no gravity
# no spring and damper
# no control
################################################################################
@testset "Box" begin
mech = get_mechanism(:block;
timestep=timestep0,
gravity=gravity0,
contact=false)
v0 = [1,2,3.0]
ω0 = [10,10,10.0]
initialize!(mech, :block,
velocity=v0,
angular_velocity=ω0)
storage = simulate!(mech, 5.0, nocontrol!;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[5:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
end
################################################################################
# SINGLE PENDULUM
################################################################################
# single body
# initial angular velocity
# no gravity
# no spring and damper
# no control
################################################################################
@testset "Pendulum" begin
mech = get_mechanism(:pendulum;
timestep=timestep0,
gravity=gravity0)
ϕ0 = 0.7
ω0 = 5.0
initialize!(mech, :pendulum,
angle=ϕ0, angular_velocity=ω0)
storage = simulate!(mech, 5.0, nocontrol!;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[10:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
# @test all(norm.(mlin0, Inf) .< 1e-11)
@test all(norm.(mang0, Inf) .< 1.0e-8)
end
################################################################################
# HUMANOID
################################################################################
# multiple bodies
# initial linear and angular velocity
# no gravity
# with spring and damper
# with control
################################################################################
@testset "Humanoid" begin
springs0 = 1.0
dampers0 = 1.0
mech = get_mechanism(:humanoid;
timestep=timestep0,
gravity=gravity0,
parse_springs=false,
parse_dampers=false,
springs=springs0,
dampers=dampers0,
contact_feet=false)
initialize!(mech, :humanoid)
bodies = mech.bodies
set_maximal_velocities!.(bodies, ω=1e-0rand(3))
storage = simulate!(mech, 10.0, controller!;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[1:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
end
################################################################################
# ATLAS
################################################################################
# multiple bodies
# initial linear and angular velocity
# no gravity
# with spring and damper
# with control
################################################################################
@testset "Atlas" begin
springs0 = 10.0
dampers0 = 1.0
mech = get_mechanism(:atlas;
timestep=timestep0,
gravity=gravity0,
parse_springs=false,
parse_dampers=false,
springs=springs0,
dampers=dampers0,
contact_feet=false,
contact_body=false)
initialize!(mech, :atlas)
storage = simulate!(mech, 5.0, controller!;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[1:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
end
################################################################################
# QUADRUPED
################################################################################
# multiple bodies
# initial linear and angular velocity
# no gravity
# with spring and damper
# with control
################################################################################
@testset "Quadruped" begin
springs0 = 0.3
dampers0 = 0.1
mech = get_mechanism(:quadruped;
timestep=timestep0,
gravity=gravity0,
parse_springs=false,
parse_dampers=false,
springs=springs0,
dampers=dampers0,
contact_feet=false,
contact_body=false)
initialize!(mech, :quadruped)
storage = simulate!(mech, 5.0, controller!;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[1:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
end
################################################################################
# 5-lINK SNAKE
################################################################################
# multiple bodies
# initial linear and angular velocity
# no gravity
# with spring and damper
# with control
################################################################################
@testset "Snake" begin
Nb0 = 5
springs0 = 1.0 * 4e0
dampers0 = 1.0 * 2e+1
mech = get_mechanism(:snake;
timestep=timestep0,
gravity=gravity0,
num_bodies=Nb0,
springs=springs0,
dampers=dampers0,
joint_type=:Revolute,
contact=false,
radius=0.05);
v0 = 100.0 * [1, 2, 3] * timestep0
ω0 = 100.0 * [1, 2, 3.0] * timestep0
q10 = Dojo.RotX(0.5*π)
initialize!(mech, :snake,
base_orientation=q10,
base_linear_velocity=v0,
base_angular_velocity=ω0)
storage = simulate!(mech, 1.50;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[5:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
for joint_type in joint_types
mech = get_mechanism(:snake;
timestep=timestep0,
gravity=gravity0,
num_bodies=Nb0,
springs=springs0,
dampers=dampers0,
joint_type=joint_type,
contact=false,
radius=0.05)
v0 = 10.0 * [1, 2, 3] * timestep0
ω0 = 10.0 * [1, 2, 3.0] * timestep0
q10 = Dojo.RotX(0.5 * π)
initialize!(mech, :snake,
base_orientation=q10,
base_linear_velocity=v0,
base_angular_velocity=ω0)
storage = simulate!(mech, 1.50, controller!;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[5:end]
# @show mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
# @show mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plt = plot()
# plot!([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
# display(plt)
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
end
end
################################################################################
# 5-lINK TWISTER
################################################################################
# multiple bodies
# initial linear and angular velocity
# no gravity
# with spring and damper
################################################################################
@testset "Twister" begin
Nb0 = 5
springs0 = 1.0 * 4e0
dampers0 = 1.0 * 2e+1
mech = get_mechanism(:twister;
timestep=timestep0,
gravity=gravity0,
num_bodies=Nb0,
springs=springs0,
dampers=dampers0,
joint_type=:FixedOrientation,
contact=false,
radius=0.05);
v0 = 100.0 * [1, 2, 3] * timestep0
ω0 = 100.0 * [1, 2, 3.0] * timestep0
q10 = Dojo.RotX(0.5*π)
initialize!(mech, :twister,
base_orientation=q10,
base_linear_velocity=v0,
base_angular_velocity=ω0)
storage = simulate!(mech, 2.5;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[5:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mlin0...)')
# plot([(i-1) * timestep0 for i in 1:length(m0)], hcat(mang0...)')
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
for joint_type in joint_types
mech = get_mechanism(:twister;
timestep=timestep0,
gravity=gravity0,
num_bodies=Nb0,
springs=springs0,
dampers=dampers0,
joint_type=joint_type,
contact=false,
radius=0.05)
v0 = 10.0 * [1, 2, 3] * timestep0
ω0 = 10.0 * [1, 2, 3.0] * timestep0
q10 = Dojo.RotX(0.5*π)
initialize!(mech, :twister,
base_orientation=q10,
base_linear_velocity=v0,
base_angular_velocity=ω0)
storage = simulate!(mech, 1.50;
record=true, verbose=false,
opts=SolverOptions(rtol=ϵ0, btol=ϵ0))
# visualize(mech, storage, vis = vis)
m0 = momentum(mech, storage)[5:end]
mlin0 = [Vector(m - m0[1])[1:3] for m in m0]
mang0= [Vector(m - m0[1])[4:6] for m in m0]
@test all(norm.(mlin0, Inf) .< 1.0e-8)
@test all(norm.(mang0, Inf) .< 1.0e-8)
end
end
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | code | 1506 | @testset "FiniteDiff comparison" begin
q = [1,2,3,4.0]
q = Quaternion(q ./ norm(q)...)
@test norm(Dojo.dmrpdq(Dojo.vector(q)) -
ForwardDiff.jacobian(Dojo.mrp, Dojo.vector(q)), Inf) < 1.0e-8
@test norm(Dojo.daxisdq(Dojo.vector(q)) -
ForwardDiff.jacobian(Dojo.axis, Dojo.vector(q))) < 1.0e-8
@test norm(Dojo.drotation_vectordq(Dojo.vector(q)) -
ForwardDiff.jacobian(Dojo.rotation_vector, Dojo.vector(q))) < 1.0e-8
q = one(Quaternion{Float64})
@test norm(Dojo.dmrpdq(Dojo.vector(q)) -
ForwardDiff.jacobian(Dojo.mrp, Dojo.vector(q)), Inf) < 1.0e-8
# TODO For zero rotation, ForwardDiff runs into a numeric singularity
@test norm(Dojo.drotation_vectordq(Dojo.vector(q)) -
FiniteDiff.finite_difference_jacobian(Dojo.rotation_vector, Dojo.vector(q))) < 1.0e-5
end
@testset "axes pair to quaternion" begin
n1 = [0,0,0.0]
n2 = [0,0,0.0]
q = Dojo.axes_pair_to_quaternion(n1, n2)
@test norm(n2 - Dojo.vector_rotate(n1, q), Inf) < 1e-8
n1 = rand(3)
n1 /= norm(n1)
n2 = n1
q = Dojo.axes_pair_to_quaternion(n1, n2)
@test norm(n2 - Dojo.vector_rotate(n1, q), Inf) < 1e-8
n1 = rand(3)
n1 /= norm(n1)
n2 = -n1
q = Dojo.axes_pair_to_quaternion(n1, n2)
@test norm(n2 - Dojo.vector_rotate(n1, q), Inf) < 1e-5
n1 = rand(3)
n1 /= norm(n1)
n2 = rand(3)
n2 /= norm(n2)
q = Dojo.axes_pair_to_quaternion(n1, n2)
@test norm(n2 - Dojo.vector_rotate(n1, q), Inf) < 1e-8
end
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | code | 1689 | using Test
using BenchmarkTools
using LinearAlgebra
using Random
using SparseArrays
using StaticArrays
using FiniteDiff
using ForwardDiff
using Rotations
using Dojo
using DojoEnvironments
global REG = 1.0e-10
@testset "Integrator" verbose=true begin include("integrator.jl") end
@testset "Minimal Coordinates" verbose=true begin include("minimal.jl") end
@testset "Modified Rodrigues Parameters" verbose=true begin include("mrp.jl") end
@testset "Damper" verbose=true begin include("damper.jl") end
@testset "Jacobian (Solution Matrix)" verbose=true begin include("jacobian.jl") end
@testset "Data" verbose=true begin include("data.jl") end
@testset "Collisions" verbose=true begin include("collisions.jl") end
@testset "Momentum" verbose=true begin include("momentum.jl") end
@testset "Energy" verbose=true begin include("energy.jl") end
@testset "Behavior" verbose=true begin include("behaviors.jl") end
@testset "Joint Limits" verbose=true begin include("joint_limits.jl") end
@testset "Impulse Map" verbose=true begin include("impulse_map.jl") end
@testset "Bodies" verbose=true begin include("bodies.jl") end
@testset "Mechanism (Miscellaneous)" verbose=true begin include("mechanism.jl") end
@testset "Simulate" verbose=true begin include("simulate.jl") end
@testset "Visuals" verbose=true begin include("visuals.jl") end
@testset "Utilities" verbose=true begin include("utilities.jl") end
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | code | 1155 | @testset "step!" begin
# get mechanism and simulate
mechanism = DojoEnvironments.get_mechanism(:pendulum;
timestep=0.1,
gravity=0.0);
# step (no control)
z1 = Dojo.get_maximal_state(mechanism)
u1 = zeros(Dojo.input_dimension(mechanism))
z2 = Dojo.step!(mechanism, z1, u1)
@test norm(z2 - z1) < 1.0e-6
# step (control)
z1 = Dojo.get_maximal_state(mechanism)
u1 = rand(Dojo.input_dimension(mechanism))
z2 = Dojo.step!(mechanism, z1, u1)
@test norm(z2 - z1) > 1.0e-6
end
@testset "Storage" begin
# get mechanism and simulate
mechanism = DojoEnvironments.get_mechanism(:pendulum;
timestep=0.1);
DojoEnvironments.initialize_pendulum!(mechanism,
angle=0.25 * π)
storage = Dojo.simulate!(mechanism, 1.0,
record=true,
verbose=false)
# check storage length
@test Dojo.length(storage) == 10
# convert to vector of vectors
z = Dojo.get_maximal_state(storage)
@test length(z) == 10
# convert back to storage
s = Dojo.generate_storage(mechanism, z)
@test typeof(s) <: Dojo.Storage
@test Dojo.length(s) == 10
end
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | code | 922 | @test length(Dojo.module_dir()) > 0
@test Dojo.scn(1.0) == " 1.0e+0"
@test Dojo.scn(0.0) == " 0.0e+0"
@test Dojo.scn(Inf) == " Inf"
@test Dojo.scn(123.1,
digits=0) == " 1e+2"
# show functions
test_origin = Origin{Float64}()
display(test_origin)
@test true
test_body = Box(1.0,2.0,3.0,4.0)
display(test_body)
display(test_body.shape)
display(test_body.state)
@test true
test_joint = JointConstraint(Fixed(test_origin,test_body))
display(test_joint)
display(test_joint.translational)
display(test_joint.rotational)
@test true
test_mechanism = Mechanism(test_origin,[test_body],[test_joint])
display(test_mechanism)
# display(test_mechanism.system) # TODO Removed from tests because '_show_with_braille_patterns' fails during tests for Julia 1.6, but it works in GBS for 1.6.
@test true
test_storage = Storage(3,4)
display(test_storage)
@test true
test_options = SolverOptions()
display(test_options)
@test true
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | code | 1373 | @testset "Utilities" begin
# create visualizer
vis = Dojo.Visualizer();
# default background
Dojo.set_background!(vis)
# add floor
Dojo.set_floor!(vis)
# add quadratic surface
Dojo.set_surface!(vis, x -> x' * x,
color=Dojo.RGBA(1.0, 0.0, 0.0, 1.0))
# lighting
Dojo.set_light!(vis)
# camera
Dojo.set_camera!(vis,
zoom=0.25)
# test that methods don't fail
@test true
end
@testset "MeshCat mechanism" begin
# create visualizer
vis = Dojo.Visualizer();
# get mechanism and simulate
mechanism = DojoEnvironments.get_mechanism(:halfcheetah;
timestep=0.1)
storage = Dojo.simulate!(mechanism, 0.25;
record=true, verbose=true);
# visualize simulation
Dojo.visualize(mechanism, storage; vis=vis)
# test that methods don't fail
@test true
end
@testset "URDF mesh" begin
# create visualizer
vis = Dojo.Visualizer();
# get mechanism and simulate
mechanism = DojoEnvironments.get_mechanism(:quadruped; timestep=0.1)
storage = Dojo.simulate!(mechanism, 0.25;
record=true, verbose=false)
# visualize simulation
Dojo.visualize(mechanism, storage; vis=vis)
# visualize w/ contact
Dojo.visualize(mechanism, storage;
vis=vis, show_contact=true)
# test that methods don't fail
@test true
end
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 3319 | [](https://github.com/dojo-sim/Dojo.jl/actions/workflows/CI.yml)
[](https://codecov.io/gh/dojo-sim/Dojo.jl)
[](https://dojo-sim.github.io/Dojo.jl/dev)
# Dojo
A differentiable physics engine for robotics
- arXiv preprint: https://arxiv.org/abs/2203.00806
- Python interface: https://github.com/dojo-sim/dojopy
- site: https://sites.google.com/view/dojo-sim
- video presentation: https://youtu.be/TRtOESXJxJQ
[](https://youtu.be/TRtOESXJxJQ "Dojo: A Differentiable Simulator for Robotics")
# Update April 2023
- We are no longer actively developing Dojo, but pull requests are always welcome.
- We have updated or removed examples to account for changes since the initial version of Dojo.
- Additional developments on differentiable simulation:
- Differentiable collision detection (Kevin Tracy): [capsules](https://arxiv.org/abs/2207.00202), [convex primitives](https://arxiv.org/abs/2207.00669)
- Single-level contact dynamics + collision detection (Simon Le Cleac'h): [Silico](https://arxiv.org/pdf/2212.06764.pdf)
# Examples
## Simulation
<p float="left">
<img src="docs/src/assets/animations/atlas_drop.gif" width="120"/>
<img src="docs/src/assets//animations/astronaut.gif" width="210"/>
<img src="docs/src/assets/animations/dzhanibekov.gif" width="180"/>
<img src="docs/src/assets/animations/tippetop.gif" width="180"/>
</p>
## Learning and Control
<p float="left">
<img src="docs/src/assets/animations/quadruped.gif" width="275"/>
<img src="docs/src/assets/animations/ant_ars.gif" width="275"/>
<img src="docs/src/assets/animations/quadrotor.gif" width="175"/>
</p>
## System Identification
<p float="left">
<img src="docs/src/assets/animations/box_learning.gif" width="200"/>
<img src="docs/src/assets/animations/cone_learning.gif" width="200"/>
<img src="docs/src/assets/animations/box_toss.gif" width="300"/>
</p>
## Interfacing Other Packages
| [ReinforcementLearning.jl](https://github.com/JuliaReinforcementLearning/ReinforcementLearning.jl): DQN | [ControlSystems.jl](https://github.com/JuliaControl/ControlSystems.jl): LQR |
| - | -|
| <img src="docs/src/assets/animations/cartpole_rl.gif" width="250"/> | <img src="docs/src/assets/animations/cartpole_lqr.gif" width="250"/> |
## Installation
`Dojo` can be added via the Julia package manager (type `]`):
```julia
pkg> add Dojo
```
For convenience mechanisms and environments, add `DojoEnvironments` additionally:
```julia
pkg> add DojoEnvironments
```
## Citing
```
@article{howelllecleach2022,
title={Dojo: A Differentiable Simulator for Robotics},
author={Howell, Taylor and Le Cleac'h, Simon and Bruedigam, Jan and Kolter, Zico and Schwager, Mac and Manchester, Zachary},
journal={arXiv preprint arXiv:2203.00806},
url={https://arxiv.org/abs/2203.00806},
year={2022}
}
```
## How To Contribute
Please submit a pull request or open an issue.
See the [docs](https://dojo-sim.github.io/Dojo.jl/dev/contributing.html) for contribution ideas.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 206 | # DojoEnvironments
This package contains convenience mechanisms and environments for the [Dojo.jl](https://github.com/dojo-sim/Dojo.jl) package.
For explanations, docs, and examples, see the main package.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1932 | # API Documentation
Docstrings for Dojo.jl interface members can be [accessed through Julia's built-in documentation system](https://docs.julialang.org/en/v1/manual/documentation/index.html#Accessing-Documentation-1) or in the list below.
```@meta
CurrentModule = Dojo
```
## Contents
```@contents
Pages = ["api.md"]
```
## Index
```@index
Pages = ["api.md"]
```
## Mechanism
```@docs
Mechanism
get_node
get_body
get_joint
get_contact
get_maximal_state
get_next_state
get_minimal_state
set_maximal_state!
set_minimal_state!
set_input!
maximal_dimension
minimal_dimension
input_dimension
input_dimensions
zero_coordinates!
zero_velocities!
root_to_leaves_ordering
set_floating_base
set_external_force!
add_external_force!
```
### Nodes
```@docs
Node
Body
Origin
Constraint
Shape
EmptyShape
Mesh
Box
Cylinder
Capsule
Sphere
Pyramid
FrameShape
CombinedShapes
```
### Joints
```@docs
Joint
Rotational
Translational
JointConstraint
Floating
Fixed
Prismatic
Planar
FixedOrientation
Revolute
Cylindrical
PlanarAxis
FreeRevolute
Orbital
PrismaticOrbital
PlanarOrbital
FreeOrbital
Spherical
CylindricalFree
PlanarFree
```
### Contacts
```@docs
Contact
ImpactContact
LinearContact
NonlinearContact
ContactConstraint
contact_constraint
contact_location
get_sdf
Collision
SphereHalfSpaceCollision
SphereSphereCollision
SphereCapsuleCollision
SphereBoxCollision
contact_normal
contact_tangent
```
### Representations
```@docs
State
minimal_to_maximal
maximal_to_minimal
```
### Mechanics
```@docs
mechanical_energy
kinetic_energy
potential_energy
momentum
```
## Simulate
```@docs
Storage
step!
step_minimal_coordinates!
simulate!
```
## Gradients
```@docs
get_maximal_gradients!
get_minimal_gradients!
maximal_to_minimal_jacobian
minimal_to_maximal_jacobian
```
## Solver
```@docs
SolverOptions
mehrotra!
```
## Visualization
```@docs
visualize
build_robot
set_camera!
set_light!
set_surface!
set_floor!
set_arrow!
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 433 | # Citing
If you find Dojo useful in your project, we kindly request that you cite the following paper:
```
@article{howelllecleach2022dojo,
title={Dojo: {A} {D}ifferentiable {S}imulator for {R}obotics},
author={Howell, Taylor A. and Le Cleac'h, Simon and Bruedigam, Jan and Kolter, J. Zico and Schwager, Mac and Manchester, Zachary},
year={2022}
}
```
A preprint can be downloaded from [arXiv](https://arxiv.org/abs/2203.00806). | Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 2642 | # Contributing
Contributions are always welcome!
* If you want to contribute features, bug fixes, etc, please take a look at our __Code Style Guide__ below
* Please report any issues and bugs that you encounter in [Issues](https://github.com/dojo-sim/Dojo.jl/issues)
* As an open source project we are also interested in any projects and applications that use Dojo. Please let us know via email to: [email protected] or [email protected]
## Potentially Useful Contributions
Here are a list of current to-do's that would make awesome contributions:
- reduce allocations by using StaticArrays and https://docs.julialang.org/en/v1/manual/profile/#Line-by-Line-Allocation-Tracking
- improved parsing of URDF files
- joint limits, friction coefficients
- improved collision detection
- body-to-body contact
- general convex shapes
- curved surfaces
- GPU support
- nice REPL interface
- interactive GUI
## Code Style Guide
The code in this repository follows the naming and style conventions of [Julia Base](https://docs.julialang.org/en/v1.0/manual/style-guide/#Style-Guide-1) with a few modifications. This style guide is heavily "inspired" by the guides of [John Myles White](https://github.com/johnmyleswhite/Style.jl), [JuMP](http://www.juliaopt.org/JuMP.jl/latest/style), and [COSMO](https://github.com/oxfordcontrol/COSMO.jl)
### Formatting
* Use one tab when indenting a new block (except `module`)
* Use spaces between operators, except for `^`, `'`, and `:`
* Use single space after commas and semicolons
* Don't use spaces around parentheses, or braces
**Bad**: `f(x,y) = [5*sin(x+y);y']` **Good**: `f(x, y) = [5 * sin(x + y); y']`
* Use spacing with keyword arguments
**Bad**: `foo(x::Float; y::Integer = 1)` **Good**: `foo(x::Float; y::Integer=1)`
* Don't parenthesize conditions
**Bad**: `if (a == b)` **Good**: `if a == b`
### Naming
* Modules and Type names use capitalization and camel case, e.g. `module LinearAlgebra`, `struct ConvexSets`.
* Functions are lowercase and use underscores to separate words, e.g. `has_key(x)`, `is_valid(y)`.
* Normal variables are lowercase and use underscores like functions, e.g. `convex_set`
* Constants are uppercase, e.g. `const MY_CONSTANT`
* **Always** append `!` to names of functions that modify their arguments.
* Function arguments that are mutated come first. Otherwise follow the rules layed out in Julia Base [Argument ordering](https://docs.julialang.org/en/v1.0/manual/style-guide/#Write-functions-with-argument-ordering-similar-to-Julia-Base-1)
* Files are named like functions, e.g. `my_new_file.jl`
### Syntax
* Use `1.0` instead of `1.` | Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 2269 | # Get Started
__[Dojo](https://github.com/dojo-sim/Dojo.jl) is a differentiable simulator for robotics__, prioritizing accurate physics and useful gradients. The simulator is written in pure Julia in order to be both performant and easy to use.
## Features
* __Maximal-Coordinates Representation__: Fast and efficient conversion between [maximal](background_representations/maximal_representation.md) and [minimal](background_representations/minimal_representation.md) representations
* __Smooth Gradients__: Simulation with [hard contact](background_contact/impact.md) and useful [gradients](background_representations/gradients.md) through contact events
* __Open Source__: Code is available on [GitHub](https://github.com/dojo-sim/Dojo.jl) and distributed under the MIT License
* __Python Interface__: [dojopy](https://github.com/dojo-sim/dojopy)
## Installation
Dojo can be installed using the Julia package manager for Julia `v1.6` and higher. Inside the Julia REPL, type `]` to enter the Pkg REPL mode then run
`pkg> add Dojo`
## Related talks
## Credits
The following people are involved in the development of Dojo:
__Primary Development__
* [Simon Le Cleac'h](https://simon-lc.github.io/) (main development, contact modeling, interior-point solver, gradients)
* [Taylor Howell](https://thowell.github.io/) (main development, contact modeling, interior-point solver, gradients)
* [Jan Bruedigam](https://github.com/janbruedigam) (main development, maximal representation and graph-based solver)
* [Zico Kolter](https://zicokolter.com/)
* [Mac Schwager](https://web.stanford.edu/~schwager/)
* [Zachary Manchester](https://www.ri.cmu.edu/ri-faculty/zachary-manchester/) (principal investigator)
__Additional Contributions__
* [Suvansh Sanjeev](https://suvan.sh/) (PyTorch interface)
* [Benjamin Bokser](http://www.benbokser.com/) (REx Hopper)
Development by the [Robotic Exploration Lab](https://roboticexplorationlab.org/).
If this project is useful for your work please consider
* [Citing](citing.md) the relevant papers
* Leaving a star on the [GitHub repository](https://github.com/dojo-sim/Dojo.jl)
## Licence
Dojo.jl is licensed under the MIT License. For more details click [here](https://github.com/dojo-sim/Dojo.jl/blob/main/LICENSE.md).
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 527 | # Collisions
!!! info
Only sphere-halfspace collisions (spheres colliding with planes) are fully supported. The other contact models are experimental.
Dojo currently implements the following collision models:
- [`SphereHalfSpaceCollision`](@ref)
- [`SphereSphereCollision`](@ref)
- [`SphereCapsuleCollision`](@ref)
- [`SphereBoxCollision`](@ref)
```@raw html
<img src="../assets/animations/sphere_capsule_drop.gif" width="200"/>
```
```@raw html
<img src="../assets/animations/sphere_box_drop.gif" width="200"/>
``` | Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1822 | # Overview
Impact and friction behaviors are modeled, along with the system’s dynamics, as a nonlinear complementarity problem (NCP). This model simulates hard contact without requiring system-specific solver tuning. Additionally, contacts between a system and the environment are treated as a single graph node connected to a rigid body (see below). As a result, the simulator retains efficient linear-time complexity for open-chain mechanical systems.

Three contact models are implemented in Dojo:
- [`ImpactContact`](@ref) enforces frictionless contact,
- [`NonlinearContact`](@ref) enforces contact with a non-linear cone of friction (second-order cone),

- [`LinearContact`](@ref) enforces contact with a linearized cone of friction (pyramidal cone).

All 3 of these contact models implement hard contact i.e., no interpenetration. This means that for both the nonlinear and linearized cones, we concatenate the constraints resulting from friction with the impact constraints.
### Implementation
Dojo currently supports contact constraints occurring between a sphere and the ground i.e., a horizontal half-space of altitude 0.0. Each spherical contact is attached to a single [`Body`](@ref).
To create a new point of contact, we need to define:
- the [`Body`](@ref) to which the contact constraint is attached
- radius of the sphere defining the spherical contact
- coefficient of friction (except for [`ImpactContact`](@ref))
### Example
For the Quadruped model shown in the picture below, we defined 12 contacts spheres show in red:
- 4 for the feet,
- 4 for the knees,
- 4 for for the hips.

| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 911 | # Impact
### Mathematical Model
We model hard contact via constraints on the system’s configuration and the applied contact forces. For a system with $P$ contact points, we define a signed-distance function,
```math
\phi : \mathbf{Z} \rightarrow \mathbf{R}^P
```
subject to the following element-wise constraint:
```math
ϕ(z) > 0,
```
Impact forces with magnitude ``\gamma \in \mathbf{R}^P`` are applied to the bodies’ contact points in the direction of their surface normals in order to enforce (5) and prevent interpenetration. A non-negative constraint,
```math
\gamma \geq 0,
```
enforces physical behavior that impulses are repulsive (e.g., the floor does not attract bodies), and the complementarity condition,
```math
\gamma \circ \phi(z) = 0,
```
where ``\circ`` is an element-wise product operator, enforces zero force if the body is not in contact and allows non-zero force during contact.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1784 | # Linearized Friction
Coulomb friction instantaneously maximizes the dissipation of kinetic energy between two objects in contact.
### Mathematical Model
For a single contact point, this physical phenomenon can be modeled by the following optimization problem,
```math
\begin{align*}
\underset{b}{\text{minimize}} & \quad v^T b \\
\text{subject to} & \quad \|b\|_2 \leq \mu \gamma,
\end{align*}
```
where ``v \in \mathbf{R}^{2}`` is the tangential velocity at the contact point, ``b \in \mathbf{R}^2`` is the friction force, and ``\mu \in \mathbf{R}_{+}`` is the coefficient of friction between the two objects.
### Linearized Model
This above problem is naturally a convex second-order cone program, and can be efficiently and reliably solved. However, classically, an approximate version:
```math
\begin{align*}
\underset{\beta}{\text{minimize}} & \quad [v^T -v^T] \beta, \\
\text{subject to} & \quad \beta^T \mathbf{1} \leq \mu \gamma, \\
& \quad \beta \geq 0,
\end{align*}
```
which satisfies the LCP formulation, is instead solved. Here, the friction cone is linearized and the friction vector, ``\beta \in \mathbf{R}^{4}``, is correspondingly overparameterized and subject to additional non-negative constraints.
The optimality conditions of the above problem and constraints used in the LCP are:
```math
\begin{align*}
[v^T -v^T]^T + \psi \mathbf{1} - \eta &= 0, \\
\mu \gamma -\beta^T \textbf{1} & \geq 0,\\
\psi \cdot (\mu \gamma - \beta^T \textbf{1}) &= 0, \\
\beta \circ \eta &= 0, \\
\beta, \psi, \eta &\geq 0,
\end{align*}
```
where ``\psi \in \mathbf{R}`` and ``\eta \in \mathbf{R}^{4}`` are the dual variables associated with the friction cone and positivity constraints, respectively, and ``\textbf{1}`` is a vector of ones.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 3140 | # Nonlinear friction
In contrast to a LCP approach, we utilize the optimality conditions of in a form amenable to a primal-dual interior-point solver. The associated cone program is,
```math
\begin{align*}
\underset{\beta}{\text{minimize}} & \hspace{0.5em} \begin{bmatrix} 0 & v^T \end{bmatrix} \beta\\
\text{subject to} & \hspace{0.5em} \beta_{(1)} = \mu \gamma, \\
& \hspace{0.5em} \beta \in \mathcal{Q}^3,\\
\end{align*}
```
where subscripts indicate vector indices and the $n$-dimensional second-order cone $\mathcal{Q}^n$ is defined by:
```math
\mathcal{Q}^n = \{(a_{(1)}, a_{(2:n)}) \in \mathbf{R} \times \mathbf{R}^{n-1}\, | \, \|a_{(2:n)}\|_2 \leq a_{(1)} \},
```
The relaxed optimality conditions for the above problem in interior-point form are:
```math
\begin{align*}
v - \eta_{(2:3)} &= 0, \\
\beta_{(1)} - \mu \gamma &= 0, \\
\beta \circ \eta &= \kappa \mathbf{e}, \\
\beta, \eta &\in \mathcal{Q}^3,
\end{align*}
```
with dual variable $\eta \in \mathcal{Q}^3$ associated with the second-order-cone constraints, and central-path parameter, $\kappa \in \mathbf{R}_{+}$.
The second-order-cone product is:
```math
\beta \circ \eta = (\beta^T \eta, \beta_{(1)} \eta_{(2:n)} + \eta_{(1)} \beta_{(2:n)}),
```
and,
```math
\mathbf{e} = (1, 0, \dots, 0),
```
is its corresponding identity element. Friction is recovered from the solution: $b = \beta^*_{(2:3)}$. The benefits of this model are increased physical fidelity and fewer optimization variables, without substantial increase in computational cost.
Nonlinear complementarity problem: To simulate a system represented in maximal coordinates that experiences contact, a solver aims to satisfy the following relaxed feasibility problem:
```math
\begin{align*}
\text{find} & \quad z_{+}, w, \gamma, \beta^{(1:P)}, \eta^{(1:P)}, s\\
\text{s.t.} & \quad f(z_{-}, z, z_{+}, w) + B(z) u + C(z)^T \lambda = 0, \\
& \quad s - \phi(z_{+}) = 0, \\
& \quad \gamma \circ s = \kappa \textbf{1}, \\
& \quad \beta^{(i)} \circ \eta^{(i)} = \kappa \mathbf{e}, && \quad i = 1, \dots, P,\\
& \quad v^{(i)}(z, z_{+}) - \eta_{(2:3)}^{(i)} = 0, && \quad i = 1, \dots, P, \\
& \quad \beta^{(i)}_{(1)} - \mu^{(i)} \gamma^{(i)} = 0, && \quad i = 1, \dots, P,\\
& \quad \gamma, s \geq 0,\\
& \quad \beta^{(i)}, \eta^{(i)} \in \mathcal{Q}^3, && \quad i = 1, \dots, P,
\end{align*}
```
where $u \in \mathbf{R}^m$ is the control input at the current time step, $\lambda = (\beta^{(1)}_{(2:3)}, \gamma^{(1)}, \dots, \beta^{(P)}_{(2:3)}, \gamma^{(P)}) \in \mathbf{\Lambda}$ is the concatenation of impact and friction impulses, $B : \mathbf{Z} \rightarrow \mathbf{R}^{6N \times m}$ is the input Jacobian mapping control inputs into maximal coordinates, $C : \mathbf{Z} \rightarrow \mathbf{R}^{\text{dim}(\mathbf{\Lambda}) \times 6N}$ is a contact Jacobian mapping between maximal coordinates and contact surfaces, $s \in \mathbf{R}^P$ is a slack variable introduced for convenience, and $v^{(i)} : \mathbf{Z} \times \mathbf{Z} \rightarrow \mathbf{R}^2$ is the tangential velocity at contact point $i$. Joint limits and internal friction are readily incorporated into this problem formulation.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 3683 | # Background on Gradients
## Implicit Function Theorem
An implicit function, ``r : \mathbf{R}^{n_w} \times \mathbf{R}^{n_\theta} \rightarrow \mathbf{R}^{n_w}``, is defined as
```math
r(w^*; \theta) = 0,
```
for solution ``w^* \in \mathbf{R}^{n_w}`` and problem data ``\theta \in \mathbf{R}^{n_\theta}``. At a solution point of the above equation the sensitivities of the solution with respect to the problem data, i.e., ``\partial w^* / \partial \theta``, can be computed under certain conditions. First, we approximate the above equation to first order:
```math
\frac{\partial r}{\partial w} \delta w + \frac{\partial r}{\partial \theta} \delta \theta = 0,
```
and then solve for the relationship:
```math
\frac{\partial w^*}{\partial \theta} = -\Big(\frac{\partial r}{\partial w}\Big)^{-1} \frac{\partial r}{\partial \theta}. \quad \quad(1)
```
In case ``(\partial r / \partial w)^{-1}`` is not well defined, (e.g., not full rank) we can either apply regularization or approximately solve (1) with, for example, a least-squares approach.
Often, Newton's method is employed to find solutions to the implicit equation and custom linear-system solvers can efficiently compute search directions for this purpose. Importantly, the factorization of ``\partial r / \partial w`` used to find a solution can be reused to compute (1) at very low computational cost using only back-substitution. Additionally, each element of the problem-data sensitivity can be computed in parallel.
## Dojo's Gradient
At a solution point, ``w^*(\theta, \kappa)``, the sensitivity of the solution with respect to the problem data, i.e., ``\partial w^* / \partial \theta``, is efficiently computed using the implicit-function theorem (1) to differentiate through the solver's residual.
The efficient linear-system solver used for the simulator, as well as the computation and factorization of ``\partial r / \partial w``, is used to compute the sensitivities for each element of the problem data. Calculations over the individual columns of ``\partial r / \partial \theta`` can be performed in parallel.
The problem data for each simulation step include: the previous and current configurations, control input, and additional terms like the time step, friction coefficients, and parameters of each body. The chain rule is utilized to compute gradients with respect to the finite-difference velocities as well as transformations between minimal- and maximal-coordinate representations.
In many robotics scenarios, we are interested in gradient information through contact events. Instead of computing gradients for hard contact with zero or very small central-path parameters, we use a relaxed value from intermediate solutions ``w^*(\theta, \kappa > 0)`` corresponding to a soft contact model. In practice, we find that these smooth gradients greatly improve the performance of gradient-based optimization methods.
## Gradient Comparison
```@raw html
<img src="../assets/pictures/gradient_comparison.png" width="500"/>
```
Gradient comparison between randomized smoothing and Dojo's smooth gradients. The dynamics for a box in the ``XY`` plane that is resting on a flat surface and displaced an amount ``\Delta`` by an input ``F`` (top left). Its corresponding exact gradients are shown in black. Gradient bundles (right column) are computed using sampling schemes with varying covariances ``\Sigma`` and ``500`` samples. Dojo's gradients (middle column) are computed for different values of ``\kappa``, corresponding to the smoothness of the contact model. Compared to the 500-sample gradient bundle, Dojo's gradients are not noisy and are a 100 times faster to compute with a single worker.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 619 | # Maximal Coordinates
The ``i``-th body in a mechanism with ``N`` bodies has state:
```math
z^{(i)} = (x^{(i)}, v^{(i)}, q^{(i)}, \omega^{(i)}) \in \mathbf{R}^3 \times \mathbf{R}^3 \times \mathbf{H} \times \mathbf{R}^3,
```
represented in maximal coordinates, where ``\mathbf{H}`` is the space of unit quaternions.
- ``x``: position in world frame
- ``v``: linear velocity in the world frame
- ``q``: orientation represented as a unit quaternion
- ``\omega``: angular velocity in the body frame
The mechanism state:
```math
z = (z^{(1)}, \dots, z^{(N)}).
```
is the concatentation of all body states.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1032 | # Minimal Coordinates
Dojo simulates systems in [maximal coordinates](maximal_representation.md).
For a mechanism with ``M`` joints and ``N`` bodies, the maximal representation ``z`` can be efficiently converted to minimal coordinates:
```math
y = (y^{(1)}, \dots, y^{(M)}) \leftarrow z = (z^{(1)}, \dots, z^{(N)}),
```
where ``y^{(j)}`` depends on the degree and type of joint. **Note**: this minimal representation does not stack coordinates followed by velocities, which is a common convention; instead, **coordinates and velocities are grouped by joint**.
Each minimal state comprises:
```math
y = (p_{\text{translational}}, p_{\text{rotational}}, w_{\text{translational}}, w_{\text{rotational}})
```
coordinates ``p`` and velocities ``w`` for both translational and rotational degrees of freedom.
In the case of a floating-base joint, the minimal-representation orientation is converted to [modified Rodrigues parameters](https://en.wikipedia.org/wiki/Rotation_formalisms_in_three_dimensions) from a unit quaternion. | Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 4012 | # Algorithm
### Overview
To simulate the system forward in time, we need to solve a Nonlinear Complementarity Problem (NCP) at each time step. To efficiently and reliably satisfy the NCP, we developed a custom primal-dual interior-point solver for NCPs with cone constraints and quaternions. The algorithm is largely based upon Mehrotra's predictor-corrector algorithm, while borrowing practical numerical features from CVXOPT to handle cones and non-Euclidean optimization to handle quaternions. We also introduce heuristics that further improve reliability and overall performance of the solver for our simulation-step NCPs.
The primary advantages of this algorithm are the correction to the classic Newton step, which can greatly reduce the iterations required by the solver (often halving the total number of iterations), and feedback on the problem's central-path parameter that helps avoid premature ill-conditioning and adaptively drives the complementarity violation to zero in order to reliably simulate hard contact.
### Problem formulation
The solver aims to satisfy instantiations of the following problem:
```math
\begin{align*}
\text{find} & \quad x, y, z \\
\text{subject to} & \quad c(x, y, z; \theta) = 0, \\
& \quad y^{(i)} \circ z^{(i)} = \kappa \mathbf{e}, && \quad i = 1,\dots,n, \\
& \quad y^{(i)}, z^{(i)} \in \mathcal{K}, && \quad i = 1,\dots, n,
\end{align*}
```
with decision variables ``x \in \mathbf{R}^k`` and ``y, z \in \mathbf{R}^m``, equality-constraint set ``c : \mathbf{R}^k \times \mathbf{R}^m \times \mathbf{R}^m \times \mathbf{R}^l \rightarrow \mathbf{R}^h``, problem data ``\theta \in \mathbf{R}^l``; and where ``\mathcal{K}`` is the Cartesian product of ``n`` total positive-orthant and second-order cones. The variables are partitioned: ``x = (x^{(1)}, \dots, x^{(p)})``, where ``i = 1`` are Euclidean variables and ``i = 2, \dots, p`` are each quaternion variables; and ``y = (y^{(1)}, \dots, y^{(n)})``, ``z = (z^{(1)}, \dots, z^{(n)})``, where ``j = 1`` is the positive-orthant and the remaining ``j = 2, \dots, n`` are second-order cones. For convenience, we denote ``w = (x, y, z)``.
The algorithm aims to satisfy a sequence of relaxed problems with ``\kappa > 0`` and ``\kappa \rightarrow 0`` in order to reliably converge to a solution of the original problem (i.e., ``\kappa = 0``). This continuation approach helps avoid premature ill-conditioning and is the basis for numerous convex and non-convex general-purpose interior-point solvers.
### Violation metrics:
Two metrics are used to measure progress:
The constraint violation,
```math
r_{\text{vio}} = \| c(w; \theta) \|_{\infty},
```
and complementarity violation,
```math
b_{\text{vio}} = {\text{max}}_i \{\| y^{(i)} \circ z^{(i)} \|_{\infty}\}.
```
The NCP is considered solved when ``r_{\text{vio}} < r_{\text{tol}}`` and ``b_{\text{vio}} < b_{\text{tol}}``.
!!! info "solver options"
Both `r_tol` and `b_tol` are options that can easily be accessed and modified via [`SolverOptions`](@ref).
### Newton Steps
The main loop of the solver performs Newton's method on the equality-constraint set ``c`` and the bilinear constraints. The solver typically converges in about 10 iterations.
!!! info "solver options"
The maximal number of Newton's iterations `max_iter` can be set via [`SolverOptions`](@ref).
### Line Search
Newton's method provides a search direction, then we perform a line search along this direction to determine the step length ``\alpha``. We use a backtracking line search that accepts the step whenever it decreases ``c_{\text{vio}}`` or ``b_{\text{vio}}``.
The line search starts with a step ``\alpha=1``, if the step acceptance conditions are not met the step is decreased geometrically:
``\alpha \leftarrow s \cdot \alpha``. The line search takes at most `max_ls` backtracking steps.
!!! info "solver options"
The scaling parameter ``s`` is called `ls_scale` and the maximum number of line search iteration `max_ls` can be set via [`SolverOptions`](@ref).
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1680 | # Solver Options
The solver has several options accessible via [`SolverOptions`](@ref). Below is a list describing their effect on the solver's behavior, typical values, and if they need to be tuned by the user.
| option | default |range | effect | tuning |
| ----------------------- | --------|-------------------------------------- | ------ | -------------- |
| `rtol` | ``10^{-4}`` |``[10^{-6}, 10^{-2}]``| larger leads to faster solve (usually takes the same value as `btol`) | rarely |
| `btol` | ``10^{-4}`` |``[10^{-6}, 10^{-2}]``| larger results in smoothed contact dynamics and faster solve | rarely |
| `ls_scale` | ``0.5`` |``[0.3, 0.8]`` | larger potentially increase step size at the cost of more residual evaluations | never |
| `max_ls` | ``10`` |``[1, 25]`` | larger allows for taking smaller steps | never |
| `undercut` | ``+\infty`` |``[2, +\infty]`` | larger is more robust but can generate stiffer gradients | rarely |
| `no_progress_max` | ``3`` |``[3, 5]`` | smaller will increase the undercut faster | never |
| `no_progress_undercut` | ``10`` |``[3, 100]`` | larger will increase the undercut faster | never |
| `verbose` | ``\text{false}`` |``\{\text{true}, \text{false}\}`` | printing the status of the solver | often |
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 559 | # Using an Existing Environment
The following code uses a function defined in `DojoEnvironments` to create a pendulum `Environment`. This `Environment` is a wrapper around the [`Mechanism`](@ref) for easy interfacing with other packages. As with [`Mechanism`](@ref)s, you can use the existing templates as a starting point for your own `Environment`s.
```julia
# ### Setup
using Dojo
using DojoEnvironments
# ### Get environment (check DojoEnvironment/environments files for kwargs)
environment = get_environment(:pendulum; timestep=0.01, horizon=200)
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 678 | # Directly Build a Mechanism
The following code builds a pendulum [`Mechanism`](@ref), consisting of an [`Origin`](@ref), a [`Body`](@ref), and a [`JointConstraint`](@ref). Most [`Mechanism`](@ref)s consist of these three components, sometimes supplemented by [`ContactConstraint`](@ref)s.
```julia
# ### Setup
using Dojo
# ### Parameters
radius = 0.1
length = 1
mass = 1
rotation_axis = [1;0;0]
connection = [0;0;length/2]
# ### Mechanism components
origin = Origin()
body = Cylinder(radius, length, mass)
joint = JointConstraint(Revolute(origin, body, rotation_axis; child_vertex=connection))
# ### Construct Mechanism
mechanism = Mechanism(origin, [body], [joint])
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 549 | # Using an Existing Mechanism
The following code uses a function defined in `DojoEnvironments` to create a pendulum [`Mechanism`](@ref). As before, the mechanism consists of an [`Origin`](@ref), a [`Body`](@ref), and a [`JointConstraint`](@ref). You can, of course, use these existing templates as a starting point for your own [`Mechanism`](@ref)s.
```julia
# ### Setup
using Dojo
using DojoEnvironments
# ### Get mechanism (check DojoEnvironment/mechanisms files for kwargs)
mechanism = get_mechanism(:pendulum; timestep=0.02, length=0.75)
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 499 | # Overview
There are three ways to to build and use mechanical systems in Dojo: Build it directly, use an existing mechanism, or use an existing environment. Directly-built and existing mechanism both result in a [`Mechanism`](@ref), and using an existing mechanism is just for convenience. Creating an `Environment` can be useful for more advanced applications like reinforcement learning. We use the pendulum as a simple example for these three methods and later give two more detailed examples.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 4687 | # Loading a Mechanism via URDF
Another way to build a mechanism is to directly load it from a URDF file. We illustrate this with the A1 quadruped, defined in `DojoEnvironments`.

Note that the [`Mechanism`](@ref) is created simply by passing the path to the URDF file to the constructor. Afterwards, additional features, such as contacts can be added.
```julia
function get_quadruped(;
timestep=0.01,
input_scaling=timestep,
gravity=-9.81,
urdf=:gazebo_a1,
springs=0,
dampers=0,
parse_springs=true,
parse_dampers=true,
limits=true,
joint_limits=Dict(vcat([[
(Symbol(group,:_hip_joint), [-0.5,0.5]),
(Symbol(group,:_thigh_joint), [-0.5,1.5]),
(Symbol(group,:_calf_joint), [-2.5,-1])]
for group in [:FR, :FL, :RR, :RL]]...)),
keep_fixed_joints=true,
friction_coefficient=0.8,
contact_feet=true,
contact_body=true,
T=Float64)
# mechanism
path = joinpath(@__DIR__, "dependencies/$(string(urdf)).urdf")
mechanism = Mechanism(path; floating=true, T,
gravity, timestep, input_scaling,
parse_dampers, keep_fixed_joints)
# springs and dampers
!parse_springs && set_springs!(mechanism.joints, springs)
!parse_dampers && set_dampers!(mechanism.joints, dampers)
# joint limits
if limits
joints = set_limits(mechanism, joint_limits)
mechanism = Mechanism(mechanism.origin, mechanism.bodies, joints;
gravity, timestep, input_scaling)
end
# contacts
contacts = ContactConstraint{T}[]
if contact_feet
# feet contacts
body_names = [:FR_calf, :FL_calf, :RR_calf, :RL_calf]
names = [:FR_calf_contact, :FL_calf_contact, :RR_calf_contact, :RL_calf_contact]
contact_bodies = [get_body(mechanism, name) for name in body_names]
n = length(contact_bodies)
normals = fill(Z_AXIS,n)
friction_coefficients = fill(friction_coefficient,n)
contact_origins = fill([-0.006; 0; -0.092],n)
contact_radii = fill(0.021,n)
contacts = [contacts;contact_constraint(contact_bodies, normals; friction_coefficients, contact_origins, contact_radii, names)]
end
if contact_body
# thigh contacts
body_names = [:FR_thigh, :FL_thigh, :RR_thigh, :RL_thigh]
names = [:FR_thigh_contact, :FL_thigh_contact, :RR_thigh_contact, :RL_thigh_contact]
contact_bodies = [get_body(mechanism, name) for name in body_names]
n = length(contact_bodies)
normals = fill(Z_AXIS,n)
friction_coefficients = fill(friction_coefficient,n)
contact_origins = [
[-0.005; -0.023; -0.16],
[-0.005; 0.023; -0.16],
[-0.005; -0.023; -0.16],
[-0.005; 0.023; -0.16],
]
contact_radii = fill(0.023,n)
contacts = [contacts;contact_constraint(contact_bodies, normals; friction_coefficients, contact_origins, contact_radii, names)]
# hip contacts
body_names = [:FR_hip, :FL_hip, :RR_hip, :RL_hip]
names = [:FR_hip_contact, :FL_hip_contact, :RR_hip_contact, :RL_hip_contact]
contact_bodies = [get_body(mechanism, name) for name in body_names]
n = length(contact_bodies)
normals = fill(Z_AXIS,n)
friction_coefficients = fill(friction_coefficient,n)
contact_origins = fill([0; 0.05; 0],n)
contact_radii = fill(0.05,n)
contacts = [contacts;contact_constraint(contact_bodies, normals; friction_coefficients, contact_origins, contact_radii, names)]
end
mechanism = Mechanism(mechanism.origin, mechanism.bodies, mechanism.joints, contacts;
gravity, timestep, input_scaling)
# zero configuration
initialize_quadruped!(mechanism)
# construction finished
return mechanism
end
```
```julia
function initialize_quadruped!(mechanism::Mechanism;
body_position=[0, 0, 0], body_orientation=one(Quaternion),
hip_angle=0, thigh_angle=pi/4, calf_angle=-pi/2)
zero_velocities!(mechanism)
zero_coordinates!(mechanism)
body_position += [0, 0, 0.43]
set_minimal_coordinates!(mechanism, get_joint(mechanism, :floating_base), [body_position; Dojo.rotation_vector(body_orientation)])
for group in [:FR, :FL, :RR, :RL]
set_minimal_coordinates!(mechanism, get_joint(mechanism, Symbol(group, :_hip_joint)), [hip_angle])
set_minimal_coordinates!(mechanism, get_joint(mechanism, Symbol(group, :_thigh_joint)), [thigh_angle])
set_minimal_coordinates!(mechanism, get_joint(mechanism, Symbol(group, :_calf_joint)), [calf_angle])
end
return
end
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 3896 | # Detailed Mechanism Definition
Here, we describe in detail how to define your own dynamical system of [`Mechanism`](@ref). After it has been defined, it will be extremely easy to simulate it, control it, perform trajectory optimization on it, or even policy optimization.
We're going to build a tippe top:
```@raw html
<img src="../assets/animations/tippetop_real.gif" width="300"/>
```
```@raw html
<img src="../assets/animations/tippetop.gif" width="300"/>
```
### Build Mechanism
We will take a look at the definition of `get_tippetop` in `DojoEnvironments`. This function return a [`Mechanism`](@ref) and takes as input a variety of parameters like the simulation time step, gravity etc. You can add as many parameters you want. This example is typical of what you will find in Dojo.
To build the mechanism corresponding to the tippe top, we decompose it into two spherical bodies. Each body has its own spherical contact constraint with the floor. The joint between the two bodies is a `Fixed` joint and the joint between the main body and the [`Origin`](@ref) of the frame is a `Floating` joint.
```julia
function get_tippetop(;
timestep=0.01,
input_scaling=timestep,
gravity=-9.81,
mass=1,
radius=0.5,
scale=0.2,
color=RGBA(0.9, 0.9, 0.9, 1.0),
springs=0,
dampers=0,
limits=false,
joint_limits=Dict(),
keep_fixed_joints=false,
friction_coefficient=0.4,
contact=true,
contact_type=:nonlinear,
T=Float64)
# mechanism
origin = Origin{T}(name=:origin)
bodies = [
Sphere(radius, mass; name=:sphere1, color),
Sphere(radius*scale, mass*scale^3; name=:sphere2, color)
]
bodies[1].inertia = Diagonal([1.9, 2.1, 2])
joints = [
JointConstraint(Floating(origin, bodies[1]); name=:floating_joint),
JointConstraint(Fixed(bodies[1], bodies[2];
parent_vertex=[0,0,radius]), name = :fixed_joint)
]
mechanism = Mechanism(origin, bodies, joints;
timestep, gravity, input_scaling)
# springs and dampers
set_springs!(mechanism.joints, springs)
set_dampers!(mechanism.joints, dampers)
# joint limits
if limits
joints = set_limits(mechanism, joint_limits)
mechanism = Mechanism(mechanism.origin, mechanism.bodies, joints;
gravity, timestep, input_scaling)
end
# contacts
contacts = ContactConstraint{T}[]
if contact
n = length(bodies)
normals = fill(Z_AXIS,n)
friction_coefficients = fill(friction_coefficient,n)
contact_radii = [radius;radius*scale]
contacts = [contacts;contact_constraint(bodies, normals; friction_coefficients, contact_radii, contact_type)]
end
mechanism = Mechanism(mechanism.origin, mechanism.bodies, mechanism.joints, contacts;
gravity, timestep, input_scaling)
# zero configuration
initialize_tippetop!(mechanism)
# construction finished
return mechanism
end
```
### Initialize Mechanism
The second method that we need to look at is `initialize_tippetop!`. This function initialize the dynamical system to a certain state. This means that we set the position orientation, linear and angular velocity of each body in the mechanism.
```julia
function initialize_tippetop!(mechanism::Mechanism{T};
body_position=2*Z_AXIS*mechanism.bodies[1].shape.r, body_orientation=one(Quaternion),
body_linear_velocity=zeros(3), body_angular_velocity=[0.0, 0.1, 50.0]) where T
# zero state
zero_velocities!(mechanism)
zero_coordinates!(mechanism)
# set desired state value
floating_joint = mechanism.joints[1]
set_minimal_coordinates!(mechanism, floating_joint,
[body_position; Dojo.rotation_vector(body_orientation)])
set_minimal_velocities!(mechanism, floating_joint,
[body_linear_velocity; body_angular_velocity])
end
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1739 | # Defining a Controller
Here, we explain how to write a controller and simulate its effect on a dynamical system, i.e., a [`Mechanism`](@ref).
We focus on the stabilization of the cartpole, which has two joints but only a single input on the cart. The controller is a method that always takes 2 input arguments:
- a [`Mechanism`](@ref),
- an integer `k` indicating the current simulation step.
For the cartpole, the controller computes the control input based on the current state `x`, the goal state `x_goal` and an LQR controller. The simulation step is not used in this example.
There are three ways to apply inputs to the system
- set an input directly to a joint
- set a set of inputs to all joints of the mechanism
- set an external force on bodies
```julia
# ### Setup
using Dojo
using DojoEnvironments
# ### Mechanism
mechanism = get_mechanism(:cartpole)
# ### Controller
K = [-0.948838; -2.54837; 48.6627; 10.871]
function controller!(mechanism, k)
## Target state
x_goal = [0;0; 0.0;0]
## Current state
x = get_minimal_state(mechanism)
## Control inputs
u = -K' * (x - x_goal)
## 3 ways to set input:
## 1: get joint and set input
cart_joint = get_joint(mechanism, :cart_joint)
set_input!(cart_joint, [u])
## 2: set input for all joints at once
## set_input!(mechanism, [u;0]) # need to know joint order
## 3: direct external force on body
## cart = get_body(mechanism, :cart)
## set_external_force!(cart; force=[0;u;0])
end
# ### Simulate
initialize!(mechanism, :cartpole; position=0, orientation=pi/4)
storage = simulate!(mechanism, 10.0, controller!, record=true)
# ### Visualize
vis = visualize(mechanism, storage)
render(vis)
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 893 | # Defining a Simulation
Here, we explain how to simulate a dynamical system i.e., a [`Mechanism`](@ref) forward in time.
The example that we are trying to replicate the Dzhanibekov effect shown below.

```julia
# ### Setup
using Dojo
using DojoEnvironments
# ### Get mechanism (check DojoEnvironment/mechanisms files for kwargs)
mechanism = get_mechanism(:dzhanibekov; timestep=0.01, gravity=0)
# ### Initialize mechanism (check DojoEnvironment/mechanisms files for kwargs)
initialize!(mechanism, :dzhanibekov; angular_velocity=[15.0; 0.01; 0.0])
# ### Simulate mechanism
storage = simulate!(mechanism, 5, record=true)
# ### Visualize mechanism
vis = visualize(mechanism, storage)
render(vis)
```
And voila! You should see something like this;
```@raw html
<img src="../assets/animations/dzhanibekov.gif" width="300"/>
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1053 | # Gradients
Dynamical systems, i.e., `Mechanism`s can be differentiated with respect to current state and control input. The results can be used, for example, for defining a linear controller.
As an example, we use the `ControlSystemsBase` package to control the cartpole.
```julia
# ### Setup
using Dojo
using DojoEnvironments
using ControlSystemsBase
using LinearAlgebra
# ### Mechanism
mechanism = get_mechanism(:cartpole)
# ### Controller
x0 = zeros(4)
u0 = zeros(2)
A, B = get_minimal_gradients!(mechanism, x0, u0)
Q = I(4)
R = I(1)
K = lqr(Discrete,A,B[:,1],Q,R)
function controller!(mechanism, k)
## Target state
x_goal = [0;0; 0.0;0]
## Current state
x = get_minimal_state(mechanism)
## Control inputs
u = -K * (x - x_goal)
set_input!(mechanism, [u;0]) # input only to cart
end
# ### Simulate
initialize!(mechanism, :cartpole; position=0, orientation=pi/4)
storage = simulate!(mechanism, 10.0, controller!, record=true)
# ### Visualize
vis = visualize(mechanism, storage)
render(vis)
```
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 365 | # Overview
You can use `Environment`s to interface `Dojo` to other packages, such as `ReinforcementLearning`. Due to current changes in the `ReinforcementLearning` package, we will provide an updated documentation once the changes are done.
However, an experimental implementation of training the cartpole with `ReinforcementLearning` is provided in the examples.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 942 | # Reinforcement Learning
We have implemented a few learning examples.
## Ant
```@raw html
<img src="../assets/animations/ant_ars.gif" width="300"/>
```
Policy optimization is performed using the reinforcement-learning algorithm [augmented random search (ARS)](https://arxiv.org/abs/1803.07055) to optimize static linear policies for locomotion.
The insect-like robot has rewards on forward velocity and survival and costs on control usage and contact forces.
## Quadruped
```@raw html
<img src="../assets/animations/quadruped.gif" width="300"/>
```
A very basic random-sampling algorithm is used to find parameters for the periodic gait of a quadruped.
## Cartpole
```@raw html
<img src="../assets/animations/cartpole_rl.gif" width="300"/>
```
We have modified the cartpole example in the `ReinforcementLearning` package to use `Dojo`'s dynamics. This allows us to combine advanced learning algorithms with accurate dynamics simulation. | Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1697 | # Simulation
Dojo can simulate a number of interesting physical behaviors.
We include notebooks (generated upon installation) for the examples below.
## Atlas Drop
```@raw html
<img src="../assets/animations/atlas_drop.gif" width="100"/>
```
The humanoid [Atlas](https://www.bostondynamics.com/atlas) is dropped onto a flat surface.
Dojo is able to simulate hard contact and prevent interpenetration of the robot's feet with the floor.
In comparison, when the same system is simulated in [MuJoCo](https://mujoco.org), **centimeters** of interpenetration occur.
## Friction Cone Comparison
```@raw html
<img src="../assets/animations/cone_compare_mujoco.gif" width="300"/>
```
Blocks are simulated with initial velocity before impacting and sliding along a flat surface. We compare Dojo's nonlinear cone (blue) with a linearized approximation (orange) and MuJoCo's default linear cone (magenta). The linearized cones exhibit drift due to the approximation, whereas **Dojo's nonlinear cone produces the expected sliding behavior**.
## Dzhanibekov Effect
```@raw html
<img src="../assets/animations/dzhanibekov.gif" width="150"/>
```
Dojo simulates the [unstable rotational motion](https://en.wikipedia.org/wiki/Tennis_racket_theorem) of a rigid body about its second primary moment of inertia. Using [non-Euclidean optimization for quaternions](https://roboticexplorationlab.org/papers/planning_with_attitude.pdf) enables continuous simulation of rotating objects without singularity issues.
## Tippetop
```@raw html
<img src="../assets/animations/tippetop.gif" width="300"/>
```
A spinning object oscillates between up and down configurations as a result of its mass distribution.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 1960 | # System Identification
A [real-world dataset](https://github.com/DAIRLab/contact-nets/tree/main/data) is used to learn the geometric and friction properties of a block being tossed onto a flat surface. Gradient-based optimization is employed to regress parameters and real-to-sim validation is performed. The ground truth system is shown in orange and the learned system in blue.
## Geometry
```@raw html
<img src="../assets/animations/box_learning.gif" width="300"/>
```
The eight locations of the block's corners relative to its center of mass are learned.
## Friction
```@raw html
<img src="../assets/animations/cone_learning.gif" width="200"/>
```
A friction coefficient, describing a friction cone, is learned for all of the contact points.
## Real-To-Sim
```@raw html
<img src="../assets/animations/box_toss.gif" width="300"/>
```
The system parameters are learned to within a ``\pm 5 \%`` error from their ground-truth values. These parameters are then compared to the ground-truth system in simulation.
## Learning
The cost function:
``\mathcal{L}(\mathcal{D}, \theta) = \sum_{Z \in \mathcal{D}} L(Z, \theta) = \sum_{Z \in \mathcal{D}} \frac{1}{2} ||s(z_{-}, z, \theta) - z_{+}||_W^2``,
is used where ``\mathcal{D}`` is a dataset of trajectories containing tuples ``(z_{-}, z, z_{+})`` of state sequences, with system parameters ``\theta \in \mathbf{R}^p``, and where ``s : \mathbf{Z} \times \mathbf{Z} \times \mathbf{R}^p \rightarrow \mathbf{Z}`` represents the simulator.
A quasi-Newton method is employed to optimize the cost function and uses gradients:
``\frac{\partial L}{\partial \theta} = {\frac{\partial s}{\partial \theta}}^T W \Big(s(z_{-}, z, \theta) - z_{+} \Big)``,
and the following Gauss-Newton approximation:
``\frac{\partial^2 L}{\partial \theta^2} \approx {\frac{\partial s}{\partial \theta}}^T W \frac{\partial s}{\partial \theta}``,
of the cost function Hessian, which only relies on Jacobians from Dojo.
| Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 2065 | # Trajectory Optimization
!!! info
The trajectory optimization examples are outdated and currently not available.
Dojo provides dynamics constraints and Jacobians in order to perform trajectory optimization using [iterative LQR](https://github.com/thowell/IterativeLQR.jl).
## Quadruped
```@raw html
<img src="../assets/animations/quadruped_min.gif" width="200"/>
```
A [Unitree A1](https://www.unitree.com/products/a1/) takes a number of forward steps. There are costs on a kinematic gait and control usage, as well as an augmented Lagrangian (i.e., soft) constraint on the the robot's final pose. The maximal representation is converted to a minimal one for optimization. Additionally, slack controls are utilized early on to aid the optimizer before being driven to zero by a constraint to achieve a dynamically feasible trajectory.
## Atlas
```@raw html
<img src="../assets/animations/atlas_ilqr.gif" width="200"/>
```
The Atlas v5 humanoid (sans arms) takes a number of forward steps. Similar to the quadruped example, there are costs on control effort and deviations from a kinematic plan, a minimal representation is utilized, and the optimizer is aided by slack controls.
## Block
```@raw html
<img src="../assets/animations/box_right.gif" width="200"/>
```
A block is moved to a goal location by applying forces to its center of mass. The optimizer is initialized with zero control and utilizes smooth gradients from Dojo to find a motion that overcomes friction to slide towards the goal.
## Raibert Hopper
```@raw html
<img src="../assets/animations/hopper_max.gif" width="100"/>
```
A hopping robot, inspired by the [Raibert Hopper](https://dspace.mit.edu/handle/1721.1/6820), is tasked with moving to a goal location. The optimizer finds a single hop trajectory to reach its goal pose.
## Cartpole
```@raw html
<img src="../assets/animations/cartpole_max.gif" width="200"/>
```
This classic system is tasked with performing a swing-up. Examples are provided performing optimization with both maximal and minimal representations. | Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.7.5 | 104b6a1a358da9b1348ab7cd66ad685ecaca13d4 | docs | 717 | # Dojo.jl examples
This directory contains examples using Dojo.
The `.jl` files in each subdirectory can be executed directly or they can processed using [Literate.jl](https://github.com/fredrikekre/Literate.jl) to obtain notebooks.
Building of the `Dojo` package generates notebooks and they can be run locally by performing the following steps:
1. [install Dojo.jl](https://github.com/dojo-sim/Dojo.jl)
2. [install IJulia](https://github.com/JuliaLang/IJulia.jl) (`add` it to the default project)
3. in the Julia REPL, run (do once)
```
using Pkg
Pkg.build("Dojo")
```
4. interact with notebooks
```
using IJulia, Dojo
notebook(dir=joinpath(dirname(pathof(Dojo)), "..", "examples"))
```` | Dojo | https://github.com/dojo-sim/Dojo.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | code | 400 | __precompile__()
module PlotSeis
using Printf, SeisIO, PyPlot
using Statistics: mean
using DSP: spectrogram, time, freq, power
import SeisIO: t_expand, μs, t_win
export plotseis,
uptimes,
logspec
# Time series plots
include("TimeSeries/ts_internals.jl")
include("TimeSeries/plotseis.jl")
# Spectral plots
include("Spectral/logspec.jl")
# Other plots
include("Other/uptimes.jl")
end
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | code | 3305 | function uptimes_bar(S::SeisData, fmt::String, use_name::Bool, nxt::Int64)
xmi = 2^63-1
xma = xmi+1
fig = PyPlot.figure(figsize=[8.0, 6.0], dpi=150)
ax = PyPlot.axes([0.20, 0.10, 0.72, 0.85])
for i = 1:S.n
if S.fs[i] == 0.0
t = view(S.t[i], :, 2)
x = rescaled(S.x[i].-mean(S.x[i]),i)
plot(t, x, "ko", markersize = 2.0)
else
rect_x = Array{Float64,1}(undef, 0)
rect_y = Array{Float64,1}(undef, 0)
t = t_win(S.t[i], S.fs[i])
for j = 1:size(t,1)
ts = Float64(t[j,1])
te = Float64(t[j,2])
append!(rect_x, [ts, te, te, ts, ts])
append!(rect_y, [i-0.48, i-0.48, i+0.48, i+0.48, i-0.48])
end
p = fill(rect_x, rect_y, linewidth=1.0, edgecolor="k")
end
xmi = min(xmi, first(t))
xma = max(xma, last(t))
end
PyPlot.title("Channel Uptimes", fontweight="bold", fontsize=13.0, family="serif", color="black")
# Y scaling and axis manipulation
PyPlot.yticks(1:S.n, map((i) -> replace(i, " " => ""), use_name ? S.name : S.id))
PyPlot.ylim(0.5, S.n+0.5)
PyPlot.setp(gca().get_yticklabels(), fontsize=8.0, color="black", fontweight="bold", family="serif")
# X scaling and axis manipulation
xfmt(xmi, xma, fmt, nxt)
PyPlot.setp(gca().get_xticklabels(), fontsize=10.0, color="black", fontweight="bold", family="serif")
return fig
end
function uptimes_sum(S::SeisData, fmt::String, use_name::Bool, nxt::Int64)
ntr = sum(S.fs.>0)
W = Array{Int64, 1}(undef, 0)
for i = 1:S.n
S.fs[i] == 0.0 && continue
w = div.(t_win(S.t[i], S.fs[i]), 1000000)
for i = 1:size(w,1)
append!(W, collect(w[i,1]:w[i,2]))
end
end
sort!(W)
t = collect(first(W):last(W))
y = zeros(Int64, length(t))
i = 0
j = 1
τ = first(t)
while i < length(W)
i = i + 1
if W[i] == τ
y[j] += 1
else
while W[i] != τ
j += 1
j > length(t) && break
τ = t[j]
end
y[j] += 1
end
end
fig = PyPlot.figure(figsize=[8.0, 6.0], dpi=150)
step(t.*1000000, y, color="k", linewidth=2.0)
# Y scaling and axis manipulation
dy = ntr > 24 ? 5.0 : ntr > 19 ? 4.0 : ntr > 14 ? 3.0 : ntr > 9 ? 2.0 : 1.0
PyPlot.ylim(0, ntr)
PyPlot.yticks(ntr:-dy:0)
PyPlot.ylabel("Active Channels", fontweight="bold", fontsize=12.0, family="serif", color="black")
PyPlot.setp(gca().get_yticklabels(), fontsize=10.0, color="black", fontweight="bold", family="serif")
# X scaling and axis manipulation
xfmt(first(t)*1000000, last(t)*1000000, fmt, nxt)
PyPlot.setp(gca().get_xticklabels(), fontsize=10.0, color="black", fontweight="bold", family="serif")
return fig
end
"""
uptimes(S[, summed=false])
Bar plot of uptimes for each channel in S.
If summed==true, plot uptimes for all channels in S that record timeseries data,
scaled so that y=1 corresponds to 100% of channels active. Non-timeseries
channels in S are not counted toward the cumulative total in a summed uptime plot.
"""
function uptimes(S::SeisData; summed::Bool=false, fmt::String="auto", use_name::Bool=false, nxt::Int64=5)
if summed
fig = uptimes_sum(S, fmt, use_name, nxt)
else
fig = uptimes_bar(S, fmt, use_name, nxt)
end
return fig
end
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | code | 3934 | function logspec(S::SeisData, k::Union{Int64,String};
nx::Int64=1024,
ov::Float64=0.5,
fmin::Real=0.0,
fmax::Real=Inf,
fmt::String="auto")
N_ticks = 5
if isa(k, String)
j = findid(k, S)
j == 0 && error("No matching ID or no channel at the specified #")
else
j = k
end
seis = deepcopy(S[j])
T = eltype(seis.x)
unscale!(seis)
demean!(seis)
taper!(seis)
tos = seis.t[1,2]
fs = T(seis.fs)
# Set some image properties
sz = Float64[8.0, 5.0] # Size of image in inches
cf = 0.05 # Colorbar fraction
cp = 0.02 # Colorbar spacing
x0 = 0.14
y0 = 0.12
tsh = 0.13
wid = 0.90-x0
sp = (x0, y0+tsh+cp, wid, 0.98-(y0+tsh+cp)) # Spectrogram plot bounds
tp = (x0, y0, wid*(1.0-(cf+cp)), tsh) # Timeseries plot bounds
fmax = T(min(fmax, 0.5*fs)) # Maximum frequency to plot
# Generate spectrogram
if fmin != 0.0
nx = nextpow(2, 4.0/fmin)
end
no = floor(Int64, ov*nx)
sg = spectrogram(seis.x, nx, no, fs=fs)
t = Int64.(round.(time(sg).*1.0e6)) .+ tos # Time from start (s)
f = T.(collect(freq(sg))) # Frequency (Hz)
P = T(10.0)*log10.(power(sg)) # Power (dB)
# x ticks
xti = first(t) : div(last(t) - first(t), N_ticks-1) : last(t)
xtl = fill!(Array{String,1}(undef, length(xti)), "")
# truncate P, y
i0 = max(2, findfirst(f.≥max(0.5*fs/nx, fmin))) # First sane frequency index
i1 = findlast(f.≤fmax) # Last sane frequency index
P = P[i0:i1, :]
y = log10.(f)[i0:i1]
# Remove all -∞
mP = maximum(P)
pτ = mP - T(60.0) - eps(T)
P[P.<pτ] .= pτ
# y ticks
f0 = log10(f[i0]) # Minimum frequency to show
f1 = log10(f[i1]) # Maximum frequency to show
yt = collect(ceil(f0) : 0.5 : floor(f1))
yti = T(10.0) .^ vcat(f0, yt, f1)
if fmin < 0.1
ytl = vcat("", [@sprintf("%0.1e", T(10.0)^i) for i in yt], "")
else
ytl = vcat("", [@sprintf("%0.1f", T(10.0)^i) for i in yt], "")
end
# Figure
h = PyPlot.figure(figsize = sz, dpi=150)
# Seismogram plot
ax_ts = PyPlot.axes(tp)
dt = one(T)/fs
tx = t_expand(seis.t, seis.fs)
xmax = 1.0*maximum(abs.(seis.x))
ts_xl = @sprintf("%0.2e", xmax)
PyPlot.plot(tx, seis.x, linewidth=1.0, color="k")
PyPlot.ylim(xmax .*(T(-1.05), T(1.05)))
xfmt(round(Int64, first(xti)), round(Int64, last(xti)), fmt, N_ticks)
PyPlot.xlabel(ax_ts.get_xlabel(), fontsize=12.0, color="black", fontweight="bold", family="serif")
PyPlot.yticks([-xmax, zero(T), xmax], ["-"*ts_xl, "0.00e00", "+"*ts_xl])
PyPlot.setp(ax_ts.get_yticklabels(), fontsize=9.0, color="black", fontweight="bold", family="serif")
PyPlot.setp(ax_ts.get_xticklabels(), fontsize=9.0, color="black", fontweight="bold", family="serif")
# Note: imshow won't work with unevenly-spaced y-ticks
ax_sp = PyPlot.axes(sp)
img = pcolormesh(t, y, P, cmap="Spectral_r", shading="goraud")
PyPlot.ylabel("Frequency [Hz]", fontweight="bold", fontsize=12.0, family="serif", color="black")
PyPlot.ylim(f0, f1)
PyPlot.yticks(log10.(yti), ytl)
PyPlot.setp(ax_sp.get_yticklabels(), fontsize=9.0, color="black", fontweight="bold", family="serif")
PyPlot.xlim(minimum(t), maximum(t))
PyPlot.xticks(xti, xtl)
# Set color limits and draw colorbar
cl = [mP-60.0, mP]
PyPlot.clim(cl)
cb = PyPlot.colorbar(label="Power [dB]", use_gridspec=false, fraction=cf, pad=cp)
PyPlot.setp(cb.ax.get_yticklabels(), fontsize=9.0, color="black", fontweight="bold", family="serif")
cb.set_label("Power [dB]", fontsize=10.0, color="black", fontweight="bold", family="serif")
return h
end
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | code | 1345 | """
plotseis(S[, fmt=FMT, use_name=false, n=N])
Renormalized, time-aligned trace plot of data in S.x using timestamps in S.t.
Keywords:
* fmt=FMT formats x-axis labels using C-language `strftime` format string `FMT`.
If unspecified, the format is determined by when data in `S` start and end.
* use_name=true uses `S.name`, rather than `S.id`, for labels.
* n=N sets the number of X-axis ticks.
"""
function plotseis(S::SeisData; fmt::String="auto", use_name::Bool=false, nxt::Int64=5)
xmi = typemax(Int64)
xma = typemin(Int64)
fig = PyPlot.figure(figsize=[8.0, 6.0], dpi=150)
ax = PyPlot.axes([0.20, 0.10, 0.72, 0.85])
for i = 1:S.n
x = rescaled(S.x[i].-mean(S.x[i]),i)
if S.fs[i] > 0
t = t_expand(S.t[i], S.fs[i])
plot(t, x, linewidth=1.0)
else
t = view(S.t[i], :, 2)
plot(t, x, "o", linewidth=1, markeredgecolor=[0,0,0])
end
xmi = min(xmi, t[1])
xma = max(xma, t[end])
end
xfmt(xmi, xma, fmt, nxt)
PyPlot.setp(gca().get_yticklabels(), fontsize=8.0, color="black", fontweight="bold", family="serif")
PyPlot.setp(gca().get_xticklabels(), fontsize=8.0, color="black", fontweight="bold", family="serif")
PyPlot.yticks(1:S.n, map((i) -> replace(i, " " => ""), use_name ? S.name : S.id))
PyPlot.ylim(0.5, S.n+0.5)
return fig
end
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | code | 2030 | rescaled(x::Array{Float32,1},i::Int) = Float32(i) .+ x.*(1.0f0/(2.0f0*maximum(abs.(x))))
rescaled(x::Array{Float64,1},i::Int) = Float64(i) .+ x.*(1.0/(2.0*maximum(abs.(x))))
# Formatted X labels
function xfmt(xmi::Int64, xma::Int64, fmt::String, N::Int64)
dt = (xma-xmi)
tzcorr = Libc.TmStruct(time())._11
if fmt == "auto"
# timespan < one minute
x0 = div(xmi, 1000000)
x1 = div(xma, 60000000)
if x0 == x1
fmt = "%S"
xstr = string("Seconds from %s:00 [UTC]", Libc.strftime("%Y-%m-%d %H:%M", xmi*μs - tzcorr))
else
# timespan < one hour
x0 = div(x0, 60)
x1 = div(x1, 60)
if x0 == x1
fmt = "%M:%S"
xstr = string("MM:SS from %s:00:00 [UTC]", Libc.strftime("%Y-%m-%d %H", xmi*μs - tzcorr))
else
# timespan < one day
x0 = div(x0, 24)
x1 = div(x1, 24)
if x0 == x1
fmt = "%H:%M:%S"
xstr = string("Time [UTC], ", Libc.strftime("%Y-%m-%d", xmi*μs - tzcorr))
else
# timespan < one year
if div(xmi, 31536000000000) == div(xma, 31536000000000)
fmt = "%j, %T"
xstr = string("JDN (", Libc.strftime("%Y", xmi*μs - tzcorr), "), Time [UTC]")
else
fmt ="%Y-%m-%d %H:%M:%S"
xstr = string("DateTime [UTC]")
end
end
end
end
# plot!(xlabel = xstr)
PyPlot.xlabel(xstr,
fontsize=12.0, color="black", fontweight="bold", family="serif")
else
# plot!(xlabel = string("Time [", replace(fmt, "%" => ""), "]"))
PyPlot.xlabel(string("Time [", replace(fmt, "%" => ""), "]"),
fontsize=12.0, color="black", fontweight="bold", family="serif")
end
dt = dt / (N-1)
xt = Array{Float64,1}(undef, N)
xl = Array{String,1}(undef, N)
t = xmi
for i = 1:N
setindex!(xt, t, i)
setindex!(xl, Libc.strftime(fmt, xt[i]*μs), i)
t = t + dt
end
PyPlot.xlim(xmi, xma)
PyPlot.xticks(xt, xl)
return nothing
end
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | code | 1693 | ENV["MPLBACKEND"]="agg" # no GUI
using SeisIO, SeisIO.RandSeis, PlotSeis
using Random: randn
using Dates: now
using Printf: @sprintf
path = Base.source_dir()
cd(dirname(pathof(PlotSeis))*"/../test")
test_start = now()
printstyled(stdout, string(test_start, ": tests begin, source_dir = ", path, "/\n"), color=:light_green, bold=true)
printstyled("Generating data\n", color=:light_green, bold=true)
n = (4,5,10,15,20,23,25)
A = Array{SeisData,1}(undef, 12)
for i = 1:7
A[i] = randSeisData(n[i])
end
S = SeisData(randSeisChannel(s=true))
# timespan crosses a year boundary
S.x[1] = randn(200000)
S.fs[1] = 50.0
S.t[1] = [1 -100000; 200000 0]
A[8] = deepcopy(S)
# timespan crosses a day boundary
S.t[1] = [1 86300000000; 200000 0]
A[9] = deepcopy(S)
# timespan crosses an hour boundary
S.t[1] = [1 1; 200000 0]
A[10] = deepcopy(S)
# timespan crosses a minute boundary
S.x[1] = rand(120000)
S.fs[1] = 1000.0
S.t[1] = [1 1; 120000 0]
A[11] = deepcopy(S)
# timespan under a minute
S.x[1] = randn(2000)
S.t[1] = [1 0; 2000 0]
S.fs[1] = 4000.0
A[12] = deepcopy(S)
printstyled("Testing plots\n", color=:light_green, bold=true)
for S in A
plotseis(S)
uptimes(S)
uptimes(S, summed=true)
for i=1:3
close()
end
if length(S.x[1]) ≥ 10000 && S.fs[1] > 0.0
logspec(S, 1, fmin = S.fs[1] > 100.0 ? 1.0 : 0.01)
close()
end
end
plotseis(A[9], fmt = "%H:%M:%S")
test_end = now()
δt = 0.001*(test_end-test_start).value
mm = round(Int, div(δt, 60))
ss = rem(δt, 60)
printstyled(string(test_end, ": tests end, elapsed time (mm:ss.μμμ) = ",
@sprintf("%02i", mm), ":",
@sprintf("%06.3f", ss), "\n"), color=:light_green, bold=true)
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | docs | 1048 | ## About the choice of PyPlot
PyPlot is not intended as a permanent graphics backend for SeisIO visualization. However, we encountered breaking issues as recently as June 2019 with every other Julia graphics package:
* `GR` is unstable, poorly documented, and lacks customization.
+ In my tests, calling `uptimes` after `plotseis` was >50% likely to cause a segmentation fault and core dump Julia; the reverse was also true.
+ GR depends on QT as an external back-end; while QT is more standardized than `matplotlib`, many external Linux packages are needed for it to work.
* `plotlyjs` is profoundly slow and memory-intensive.
+ On an Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz with nVidia GeForce 940MX and 12 GB RAM, line plots stall after 8-10 traces per plot and require >30 seconds to reach that point.
+ Increasing line width to 2.0 when plotting lines with ~10^5 points consumes so much memory that Ubuntu 18.04 hangs, forcing a hard reset.
* `plotly` uses browser windows, lacks Julia documentation, and lacks axis customization.
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.1.0 | a1a323a509108883bf94f3056027391c6aef4d04 | docs | 1165 | # PlotSeis.jl
[](https://travis-ci.org/jpjones76/PlotSeis.jl) [](https://ci.appveyor.com/project/jpjones76/PlotSeis-jl/branch/master) [](https://codecov.io/gh/jpjones76/PlotSeis.jl)[](https://coveralls.io/github/jpjones76/PlotSeis.jl?branch=master) [](https://readthedocs.org/projects/plotseisjl/badge/?version=latest)
A visualization tool for SeisIO.jl [based on PyPlot](./NOTE.md).
## Installation | [Documentation](http://plotseisjl.readthedocs.io)
1. Ensure Python is installed and working on your machine.
2. [Install matplotlib for Python.](https://matplotlib.org/users/installing.html)
3. Run these commands from the Julia prompt:
```
]
add https://github.com/jpjones76/PlotSeis.jl
^C
using PlotSeis
```
| PlotSeis | https://github.com/jpjones76/PlotSeis.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 548 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
module GeoStatsModels
using Meshes
using GeoTables
using GeoStatsFunctions
using LinearAlgebra
using Distributions
using Combinatorics
using Distances
using Unitful
using Tables
using Unitful: AffineQuantity
include("models.jl")
# utility functions
include("utils.jl")
export
# models
NN,
IDW,
LWR,
Polynomial,
Kriging
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 1849 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
IDW(exponent=1, distance=Euclidean())
The inverse distance weighting model introduced in
the very early days of geostatistics by Shepard 1968.
The weights are computed as `λᵢ = 1 / d(x, xᵢ)ᵉ` for
a given `distance` denoted by `d` and `exponent` denoted
by `e`.
## References
* Shepard 1968. [A two-dimensional interpolation function
for irregularly-spaced data](https://dl.acm.org/doi/10.1145/800186.810616)
"""
struct IDW{E,D} <: GeoStatsModel
exponent::E
distance::D
end
IDW(exponent) = IDW(exponent, Euclidean())
IDW() = IDW(1)
struct IDWState{D<:AbstractGeoTable}
data::D
end
struct FittedIDW{M<:IDW,S<:IDWState}
model::M
state::S
end
status(fitted::FittedIDW) = true
#--------------
# FITTING STEP
#--------------
function fit(model::IDW, data)
# record state
state = IDWState(data)
# return fitted model
FittedIDW(model, state)
end
#-----------------
# PREDICTION STEP
#-----------------
predict(fitted::FittedIDW, var, uₒ) = idw(fitted, weights(fitted, uₒ), var)
predictprob(fitted::FittedIDW, var, uₒ) = Dirac(predict(fitted, var, uₒ))
function idw(fitted::FittedIDW, weights, var)
d = fitted.state.data
c = Tables.columns(values(d))
z = Tables.getcolumn(c, var)
w = weights
Σw = sum(w)
λ(i) = w[i] / Σw
if isinf(Σw) # some distance is zero?
z[findfirst(isinf, w)]
else
sum(i -> λ(i) * z[i], eachindex(z))
end
end
function weights(fitted::FittedIDW, uₒ)
e = fitted.model.exponent
δ = fitted.model.distance
d = fitted.state.data
Ω = domain(d)
xₒ = to(centroid(uₒ))
x(i) = to(centroid(Ω, i))
λ(i) = 1 / δ(xₒ, x(i)) ^ e
map(λ, 1:nelements(Ω))
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 5671 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
KrigingModel
A Kriging model (e.g. Simple Kriging).
"""
abstract type KrigingModel <: GeoStatsModel end
"""
KrigingState(data, LHS, RHS, VARTYPE)
A Kriging state stores information needed
to perform estimation at any given geometry.
"""
mutable struct KrigingState{D<:AbstractGeoTable,F<:Factorization,T,V}
data::D
LHS::F
RHS::Vector{T}
VARTYPE::V
end
"""
KrigingWeights(λ, ν)
An object storing Kriging weights `λ` and Lagrange multipliers `ν`.
"""
struct KrigingWeights{T<:Real,A<:AbstractVector{T}}
λ::A
ν::A
end
"""
FittedKriging(model, state)
An object that can be used for making predictions using the
parameters in Kriging `model` and the current Kriging `state`.
"""
struct FittedKriging{M<:KrigingModel,S<:KrigingState}
model::M
state::S
end
status(fitted::FittedKriging) = issuccess(fitted.state.LHS)
#--------------
# FITTING STEP
#--------------
function fit(model::KrigingModel, data)
# variogram and domain
γ = model.γ
D = domain(data)
# build Kriging system
LHS = lhs(model, D)
RHS = Vector{eltype(LHS)}(undef, size(LHS, 1))
# factorize LHS
FLHS = factorize(model, LHS)
# variance type
VARTYPE = GeoStatsFunctions.returntype(γ, first(D), first(D))
# record Kriging state
state = KrigingState(data, FLHS, RHS, VARTYPE)
# return fitted model
FittedKriging(model, state)
end
"""
lhs(model, domain)
Return LHS of Kriging system for the elements in the `domain`.
"""
function lhs(model::KrigingModel, domain)
γ = model.γ
nobs = nelements(domain)
ncon = nconstraints(model)
# pre-allocate memory for LHS
u = first(domain)
V² = GeoStatsFunctions.returntype(γ, u, u)
m = nobs + ncon
G = Matrix{V²}(undef, m, m)
# set variogram/covariance block
GeoStatsFunctions.pairwise!(G, γ, domain)
if isstationary(γ)
σ² = sill(γ)
for j in 1:nobs, i in 1:nobs
@inbounds G[i, j] = σ² - G[i, j]
end
end
# strip units if necessary
LHS = ustrip.(G)
# set blocks of constraints
set_constraints_lhs!(model, LHS, domain)
LHS
end
"""
nconstraints(model)
Return number of constraints for Kriging `model`.
"""
function nconstraints end
"""
set_constraints_lhs!(model, LHS, X)
Set constraints in LHS of Kriging system.
"""
function set_constraints_lhs! end
"""
factorize(model, LHS)
Factorize LHS of Kriging system with appropriate
factorization method.
"""
factorize(::KrigingModel, LHS) = bunchkaufman(Symmetric(LHS), check=false)
#-----------------
# PREDICTION STEP
#-----------------
predict(fitted::FittedKriging, var, uₒ) = predictmean(fitted, weights(fitted, uₒ), var)
function predictprob(fitted::FittedKriging, var, uₒ)
w = weights(fitted, uₒ)
μ = predictmean(fitted, w, var)
σ² = predictvar(fitted, w)
Normal(μ, √σ²)
end
"""
predictmean(fitted, var, weights)
Posterior mean of `fitted` Kriging model for variable `var`
with Kriging `weights`.
"""
function predictmean(fitted::FittedKriging, weights::KrigingWeights, var)
d = fitted.state.data
c = Tables.columns(values(d))
z = Tables.getcolumn(c, var)
λ = weights.λ
sum(i -> λ[i] * z[i], eachindex(λ, z))
end
"""
predictvar(fitted, var, weights)
Posterior variance of `fitted` Kriging model for variable `var`
with Kriging `weights`.
"""
function predictvar(fitted::FittedKriging, weights::KrigingWeights)
γ = fitted.model.γ
b = fitted.state.RHS
V² = fitted.state.VARTYPE
λ = weights.λ
ν = weights.ν
# compute b⋅[λ;ν]
n = length(λ)
m = length(b)
c₁ = view(b, 1:n) ⋅ λ
c₂ = view(b, (n + 1):m) ⋅ ν
c = c₁ + c₂
σ² = isstationary(γ) ? sill(γ) - V²(c) : V²(c)
max(zero(V²), σ²)
end
"""
weights(model, uₒ)
Weights λ (and Lagrange multipliers ν) for the
Kriging `model` at geometry `uₒ`.
"""
function weights(fitted::FittedKriging, uₒ)
nobs = nrow(fitted.state.data)
set_rhs!(fitted, uₒ)
# solve Kriging system
s = fitted.state.LHS \ fitted.state.RHS
λ = view(s, 1:nobs)
ν = view(s, (nobs + 1):length(s))
KrigingWeights(λ, ν)
end
"""
set_rhs!(model, uₒ)
Set RHS of Kriging system at geometry `uₒ`.
"""
function set_rhs!(fitted::FittedKriging, uₒ)
γ = fitted.model.γ
dom = domain(fitted.state.data)
nel = nelements(dom)
RHS = fitted.state.RHS
# RHS variogram/covariance
g = map(u -> γ(u, uₒ), dom)
RHS[1:nel] .= ustrip.(g)
if isstationary(γ)
σ² = ustrip(sill(γ))
RHS[1:nel] .= σ² .- RHS[1:nel]
end
set_constraints_rhs!(fitted, uₒ)
end
"""
set_constraints_rhs!(model, xₒ)
Set constraints in RHS of Kriging system.
"""
function set_constraints_rhs! end
# ----------------
# IMPLEMENTATIONS
# ----------------
include("krig/simple.jl")
include("krig/ordinary.jl")
include("krig/universal.jl")
include("krig/externaldrift.jl")
"""
Kriging(γ)
Equivalent to [`OrdinaryKriging`](@ref) with variogram `γ`.
Kriging(γ, μ)
Equivalent to [`SimpleKriging`](@ref) with variogram `γ` and
constant mean `μ`.
Kriging(γ, deg, dim)
Equivalent to [`UniversalKriging`](@ref) with variogram `γ` and
`deg`-order polynomial in `dim`-dimensinal space.
Kriging(γ, drifts)
Equivalent to [`ExternalDriftKriging`](@ref) with variogram `γ` and
`drifts` functions.
Please check the docstring of corresponding models for more details.
"""
Kriging(γ) = OrdinaryKriging(γ)
Kriging(γ, μ::Number) = SimpleKriging(γ, μ)
Kriging(γ, deg::Int, dim::Int) = UniversalKriging(γ, deg, dim)
Kriging(γ, drifts::AbstractVector) = ExternalDriftKriging(γ, drifts)
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 2734 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
LWR(weightfun=h -> exp(-3 * h^2), distance=Euclidean())
The locally weighted regression (a.k.a. LOESS) model introduced by
Cleveland 1979. It is the most natural generalization of [`IDW`](@ref)
in which one is allowed to use a custom weight function instead of
distance-based weights.
## References
* Stone 1977. [Consistent non-parametric regression](https://tinyurl.com/4da68xxf)
* Cleveland 1979. [Robust locally weighted regression and smoothing
scatterplots](https://www.tandfonline.com/doi/abs/10.1080/01621459.1979.10481038)
* Cleveland & Grosse 1991. [Computational methods for local
regression](https://link.springer.com/article/10.1007/BF01890836)
"""
struct LWR{F,D} <: GeoStatsModel
weightfun::F
distance::D
end
LWR(weightfun) = LWR(weightfun, Euclidean())
LWR() = LWR(h -> exp(-3 * h ^ 2))
mutable struct LWRState{D<:AbstractGeoTable,T}
data::D
X::T
end
struct FittedLWR{M<:LWR,S<:LWRState}
model::M
state::S
end
status(fitted::FittedLWR) = true
#--------------
# FITTING STEP
#--------------
function fit(model::LWR, data)
Ω = domain(data)
n = nelements(Ω)
x(i) = to(centroid(Ω, i))
# coordinates matrix
X = mapreduce(x, hcat, 1:n)
X = [ones(eltype(X), n) X']
# record state
state = LWRState(data, X)
# return fitted model
FittedLWR(model, state)
end
#-----------------
# PREDICTION STEP
#-----------------
predict(fitted::FittedLWR, var, uₒ) = predictmean(fitted, var, uₒ)
function predictprob(fitted::FittedLWR, var, uₒ)
X, W, A, x, z = matrices(fitted, var, uₒ)
μ = lwrmean(X, W, A, x, z)
σ² = lwrvar(X, W, A, x)
Normal(μ, √σ²)
end
function predictmean(fitted::FittedLWR, var, uₒ)
X, W, A, x, z = matrices(fitted, var, uₒ)
lwrmean(X, W, A, x, z)
end
function matrices(fitted::FittedLWR, var, uₒ)
d = fitted.state.data
c = Tables.columns(values(d))
z = Tables.getcolumn(c, var)
u = unit(eltype(fitted.state.X))
X = ustrip.(fitted.state.X)
W = wmatrix(fitted, uₒ)
A = X' * W * X
xₒ = ustrip.(u, to(centroid(uₒ)))
x = [one(eltype(xₒ)); xₒ]
X, W, A, x, z
end
function wmatrix(fitted::FittedLWR, uₒ)
w = fitted.model.weightfun
δ = fitted.model.distance
d = fitted.state.data
Ω = domain(d)
n = nelements(Ω)
xₒ = to(centroid(uₒ))
x(i) = to(centroid(Ω, i))
δs = map(i -> δ(xₒ, x(i)), 1:n)
ws = w.(δs / maximum(δs))
Diagonal(ws)
end
function lwrmean(X, W, A, x, z)
θ = A \ X' * (W * z)
sum(i -> x[i] * θ[i], eachindex(x, θ))
end
function lwrvar(X, W, A, x)
r = W * X * (A \ x)
norm(r)
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 1072 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
GeoStatsModel
A geostatistical model that predicts variables over geometries
of a geospatial domain that are in between other geometries with
samples.
"""
abstract type GeoStatsModel end
"""
fit(model, data)
Fit `model` to geospatial `data` and return a fitted model.
"""
function fit end
"""
predict(model, var, uₒ)
Predict variable `var` at geometry `uₒ` with given `model`.
"""
function predict end
"""
predictprob(model, var, uₒ)
Predict distribution of variable `var` at geometry `uₒ` with given `model`.
"""
function predictprob end
"""
status(fitted)
Return the status of the `fitted` model. (e.g. the
factorization of the linear system was successful)
"""
function status end
# ----------------
# IMPLEMENTATIONS
# ----------------
include("nn.jl")
include("idw.jl")
include("lwr.jl")
include("poly.jl")
include("krig.jl")
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 1278 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
NN(distance=Euclidean())
A model that assigns the value of the nearest observation.
"""
struct NN{D} <: GeoStatsModel
distance::D
end
NN() = NN(Euclidean())
struct NNState{D<:AbstractGeoTable}
data::D
end
struct FittedNN{M<:NN,S<:NNState}
model::M
state::S
end
status(fitted::FittedNN) = true
#--------------
# FITTING STEP
#--------------
function fit(model::NN, data)
# record state
state = NNState(data)
# return fitted model
FittedNN(model, state)
end
#-----------------
# PREDICTION STEP
#-----------------
predict(fitted::FittedNN, var, uₒ) = nn(fitted, distances(fitted, uₒ), var)
predictprob(fitted::FittedNN, var, uₒ) = Dirac(predict(fitted, var, uₒ))
function nn(fitted::FittedNN, distances, var)
d = fitted.state.data
c = Tables.columns(values(d))
z = Tables.getcolumn(c, var)
z[argmin(distances)]
end
function distances(fitted::FittedNN, uₒ)
δ = fitted.model.distance
d = fitted.state.data
Ω = domain(d)
xₒ = to(centroid(uₒ))
x(i) = to(centroid(Ω, i))
λ(i) = δ(xₒ, x(i))
map(λ, 1:nelements(Ω))
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 1895 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
Polynomial(degree=1)
A polynomial model with coefficients obtained via regression.
"""
struct Polynomial <: GeoStatsModel
degree::Int
end
Polynomial() = Polynomial(1)
struct PolynomialState{D,U}
coeffs::D
lenunit::U
end
struct FittedPolynomial{M<:Polynomial,S<:PolynomialState}
model::M
state::S
end
status(fitted::FittedPolynomial) = true
#--------------
# FITTING STEP
#--------------
function fit(model::Polynomial, data)
# retrieve parameters
d = model.degree
D = domain(data)
# multivariate Vandermonde matrix
x(i) = ustrip.(to(centroid(D, i)))
xs = (x(i) for i in 1:nelements(D))
V = vandermonde(xs, d)
# regression matrix
P = V'V \ V'
# regression coefficients
cols = Tables.columns(values(data))
vars = Tables.columnnames(cols)
coeffs = map(vars) do var
P * Tables.getcolumn(cols, var)
end
# length units of coordinates
lenunit = unit(Meshes.lentype(D))
# record state
state = PolynomialState(Dict(vars .=> coeffs), lenunit)
# return fitted model
FittedPolynomial(model, state)
end
#-----------------
# PREDICTION STEP
#-----------------
predict(fitted::FittedPolynomial, var, uₒ) = evalpoly(fitted, var, uₒ)
predictprob(fitted::FittedPolynomial, var, uₒ) = Dirac(predict(fitted, var, uₒ))
function evalpoly(fitted::FittedPolynomial, var, uₒ)
θ = fitted.state.coeffs
u = fitted.state.lenunit
d = fitted.model.degree
xₒ = ustrip.(u, to(centroid(uₒ)))
V = vandermonde((xₒ,), d)
first(V * θ[var])
end
function vandermonde(xs, d)
x = first(xs)
n = length(x)
es = Iterators.flatten(multiexponents(n, d) for d in 0:d)
ps = [[prod(x .^ e) for x in xs] for e in es]
reduce(hcat, ps)
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 4024 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
function fitpredict(
model::GeoStatsModel,
geotable::AbstractGeoTable,
pdomain::Domain;
path=LinearPath(),
point=true,
prob=false,
neighbors=true,
minneighbors=1,
maxneighbors=10,
distance=Euclidean(),
neighborhood=nothing
)
if neighbors
_fitpredictneigh(model, geotable, pdomain, path, point, prob, minneighbors, maxneighbors, distance, neighborhood)
else
_fitpredictall(model, geotable, pdomain, path, point, prob)
end
end
function _fitpredictall(model, geotable, pdomain, path, point, prob)
table = values(geotable)
ddomain = domain(geotable)
vars = Tables.schema(table).names
# adjust data
data = if point
pset = PointSet(centroid(ddomain, i) for i in 1:nelements(ddomain))
_adjustunits(georef(values(geotable), pset))
else
_adjustunits(geotable)
end
# prediction order
inds = traverse(pdomain, path)
# predict function
predfun = prob ? predictprob : predict
# fit model to data
fmodel = fit(model, data)
# predict variable values
function pred(var)
map(inds) do ind
geom = point ? centroid(pdomain, ind) : pdomain[ind]
predfun(fmodel, var, geom)
end
end
pairs = (var => pred(var) for var in vars)
newtab = (; pairs...) |> Tables.materializer(table)
georef(newtab, pdomain)
end
function _fitpredictneigh(
model,
geotable,
pdomain,
path,
point,
prob,
minneighbors,
maxneighbors,
distance,
neighborhood
)
table = values(geotable)
ddomain = domain(geotable)
vars = Tables.schema(table).names
# adjust data
data = if point
pset = PointSet(centroid(ddomain, i) for i in 1:nelements(ddomain))
_adjustunits(georef(values(geotable), pset))
else
_adjustunits(geotable)
end
# fix neighbors limits
nobs = nrow(data)
if maxneighbors > nobs || maxneighbors < 1
maxneighbors = nobs
end
if minneighbors > maxneighbors || minneighbors < 1
minneighbors = 1
end
# determine bounded search method
searcher = if isnothing(neighborhood)
# nearest neighbor search with a metric
KNearestSearch(ddomain, maxneighbors; metric=distance)
else
# neighbor search with ball neighborhood
KBallSearch(ddomain, maxneighbors, neighborhood)
end
# pre-allocate memory for neighbors
neighbors = Vector{Int}(undef, maxneighbors)
# prediction order
inds = traverse(pdomain, path)
# predict function
predfun = prob ? predictprob : predict
# predict variable values
function pred(var)
map(inds) do ind
# centroid of estimation
center = centroid(pdomain, ind)
# find neighbors with data
nneigh = search!(neighbors, center, searcher)
# predict if enough neighbors
if nneigh ≥ minneighbors
# final set of neighbors
ninds = view(neighbors, 1:nneigh)
# view neighborhood with data
samples = view(data, ninds)
# fit model to samples
fmodel = fit(model, samples)
# save prediction
geom = point ? center : pdomain[ind]
predfun(fmodel, var, geom)
else
# missing prediction
missing
end
end
end
pairs = (var => pred(var) for var in vars)
newtab = (; pairs...) |> Tables.materializer(table)
georef(newtab, pdomain)
end
#--------------
# ADJUST UNITS
#--------------
function _adjustunits(geotable::AbstractGeoTable)
dom = domain(geotable)
tab = values(geotable)
cols = Tables.columns(tab)
vars = Tables.columnnames(cols)
pairs = (var => _absunit(Tables.getcolumn(cols, var)) for var in vars)
newtab = (; pairs...) |> Tables.materializer(tab)
georef(newtab, dom)
end
_absunit(x) = _absunit(nonmissingtype(eltype(x)), x)
_absunit(::Type, x) = x
function _absunit(::Type{Q}, x) where {Q<:AffineQuantity}
u = absoluteunit(unit(Q))
map(v -> uconvert(u, v), x)
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 1541 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
ExternalDriftKriging(γ, drifts)
External Drift Kriging with variogram model `γ` and
external `drifts` functions.
### Notes
* External drift functions should be smooth
* Kriging system with external drift is often unstable
* Include a constant drift (e.g. `x->1`) for unbiased estimation
* [`OrdinaryKriging`](@ref) is recovered for `drifts = [x->1]`
* For polynomial mean, see [`UniversalKriging`](@ref)
"""
struct ExternalDriftKriging{G<:Variogram,D} <: KrigingModel
γ::G
drifts::Vector{D}
end
nconstraints(model::ExternalDriftKriging) = length(model.drifts)
function set_constraints_lhs!(model::ExternalDriftKriging, LHS::AbstractMatrix, domain)
drifts = model.drifts
ndrifts = length(drifts)
nobs = nelements(domain)
# set external drift blocks
for i in 1:nobs
x = to(centroid(domain, i))
for j in 1:ndrifts
LHS[nobs + j, i] = drifts[j](x)
LHS[i, nobs + j] = LHS[nobs + j, i]
end
end
# set zero block
LHS[(nobs + 1):end, (nobs + 1):end] .= zero(eltype(LHS))
nothing
end
function set_constraints_rhs!(fitted::FittedKriging{<:ExternalDriftKriging}, uₒ)
drifts = fitted.model.drifts
RHS = fitted.state.RHS
nobs = nrow(fitted.state.data)
# set external drift
xₒ = to(centroid(uₒ))
for (j, m) in enumerate(drifts)
RHS[nobs + j] = m(xₒ)
end
nothing
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 713 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
OrdinaryKriging(γ)
Ordinary Kriging with variogram model `γ`.
"""
struct OrdinaryKriging{G<:Variogram} <: KrigingModel
γ::G
end
nconstraints(::OrdinaryKriging) = 1
function set_constraints_lhs!(::OrdinaryKriging, LHS::AbstractMatrix, domain)
T = eltype(LHS)
LHS[end, :] .= one(T)
LHS[:, end] .= one(T)
LHS[end, end] = zero(T)
nothing
end
function set_constraints_rhs!(fitted::FittedKriging{<:OrdinaryKriging}, uₒ)
RHS = fitted.state.RHS
RHS[end] = one(eltype(RHS))
nothing
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 1117 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
SimpleKriging(γ, μ)
Simple Kriging with variogram model `γ` and constant mean `μ`.
### Notes
* Simple Kriging requires stationary variograms
"""
struct SimpleKriging{G<:Variogram,V} <: KrigingModel
# input fields
γ::G
μ::V
function SimpleKriging{G,V}(γ, μ) where {G<:Variogram,V}
@assert isstationary(γ) "Simple Kriging requires stationary variogram"
new(γ, μ)
end
end
SimpleKriging(γ, μ) = SimpleKriging{typeof(γ),typeof(μ)}(γ, μ)
nconstraints(::SimpleKriging) = 0
set_constraints_lhs!(::SimpleKriging, LHS::AbstractMatrix, domain) = nothing
set_constraints_rhs!(::FittedKriging{<:SimpleKriging}, uₒ) = nothing
function predictmean(fitted::FittedKriging{<:SimpleKriging}, weights::KrigingWeights, var)
μ = fitted.model.μ
d = fitted.state.data
c = Tables.columns(values(d))
z = Tables.getcolumn(c, var)
λ = weights.λ
y = [zᵢ - μ for zᵢ in z]
μ + sum(λ .* y)
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 2240 | # ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
UniversalKriging(γ, degree, dim)
Universal Kriging with variogram model `γ` and polynomial
`degree` on a geospatial domain of dimension `dim`.
### Notes
* [`OrdinaryKriging`](@ref) is recovered for 0th degree polynomial
* For non-polynomial mean, see [`ExternalDriftKriging`](@ref)
"""
struct UniversalKriging{G<:Variogram} <: KrigingModel
γ::G
degree::Int
dim::Int
exponents::Matrix{Int}
function UniversalKriging{G}(γ, degree, dim) where {G<:Variogram}
@assert degree ≥ 0 "degree must be nonnegative"
@assert dim > 0 "dimension must be positive"
exponents = UKexps(degree, dim)
new(γ, degree, dim, exponents)
end
end
UniversalKriging(γ, degree, dim) = UniversalKriging{typeof(γ)}(γ, degree, dim)
function UKexps(degree::Int, dim::Int)
# multinomial expansion
expmats = [hcat(collect(multiexponents(dim, d))...) for d in 0:degree]
exponents = hcat(expmats...)
# sort expansion for better conditioned Kriging matrices
sorted = sortperm(vec(maximum(exponents, dims=1)), rev=true)
exponents[:, sorted]
end
nconstraints(model::UniversalKriging) = size(model.exponents, 2)
function set_constraints_lhs!(model::UniversalKriging, LHS::AbstractMatrix, domain)
exponents = model.exponents
nobs = nelements(domain)
nterms = size(exponents, 2)
# set polynomial drift blocks
for i in 1:nobs
x = ustrip.(to(centroid(domain, i)))
for j in 1:nterms
LHS[nobs + j, i] = prod(x .^ exponents[:, j])
LHS[i, nobs + j] = LHS[nobs + j, i]
end
end
# set zero block
LHS[(nobs + 1):end, (nobs + 1):end] .= zero(eltype(LHS))
nothing
end
function set_constraints_rhs!(fitted::FittedKriging{<:UniversalKriging}, uₒ)
exponents = fitted.model.exponents
RHS = fitted.state.RHS
dom = domain(fitted.state.data)
nobs = nrow(fitted.state.data)
nterms = size(exponents, 2)
# set polynomial drift
u = unit(Meshes.lentype(dom))
xₒ = ustrip.(u, to(centroid(uₒ)))
for j in 1:nterms
RHS[nobs + j] = prod(xₒ .^ exponents[:, j])
end
nothing
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 659 | @testset "IDW" begin
@testset "Unitful" begin
d = georef((; z=[1.0, 0.0, 1.0]u"K"))
idw = GeoStatsModels.fit(IDW(), d)
pred = GeoStatsModels.predict(idw, :z, Point(0.0))
@test unit(pred) == u"K"
# affine units
d = georef((; z=[1.0, 0.0, 1.0]u"°C"))
idw = GeoStatsModels.fit(IDW(), d)
#pred = GeoStatsModels.predict(idw, :z, Point(0.0))
#@test unit(pred) == u"K"
end
@testset "CoDa" begin
d = georef((; z=[Composition(0.1, 0.2), Composition(0.3, 0.4), Composition(0.5, 0.6)]))
idw = GeoStatsModels.fit(IDW(), d)
pred = GeoStatsModels.predict(idw, :z, Point(0.0))
@test pred isa Composition
end
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 10627 | @testset "Kriging" begin
rng = StableRNG(2024)
tol = 10 * eps(Float64)
SK = GeoStatsModels.SimpleKriging
OK = GeoStatsModels.OrdinaryKriging
UK = GeoStatsModels.UniversalKriging
DK = GeoStatsModels.ExternalDriftKriging
@testset "Basics" begin
dim = 3
nobs = 10
cmat = 10 * rand(rng, dim, nobs)
pset = PointSet(Tuple.(eachcol(cmat)))
data = georef((z=rand(rng, nobs),), pset)
γ = GaussianVariogram(sill=1.0, range=1.0, nugget=0.0)
simkrig = SK(γ, mean(data.z))
ordkrig = OK(γ)
unikrig = UK(γ, 1, 3)
drikrig = DK(γ, [x -> 1.0])
sk = GeoStatsModels.fit(simkrig, data)
ok = GeoStatsModels.fit(ordkrig, data)
uk = GeoStatsModels.fit(unikrig, data)
dk = GeoStatsModels.fit(drikrig, data)
# Kriging is an interpolator
for j in 1:nobs
skdist = GeoStatsModels.predictprob(sk, :z, pset[j])
okdist = GeoStatsModels.predictprob(ok, :z, pset[j])
ukdist = GeoStatsModels.predictprob(uk, :z, pset[j])
dkdist = GeoStatsModels.predictprob(dk, :z, pset[j])
# mean checks
@test mean(skdist) ≈ data.z[j]
@test mean(okdist) ≈ data.z[j]
@test mean(ukdist) ≈ data.z[j]
@test mean(dkdist) ≈ data.z[j]
# variance checks
@test var(skdist) ≥ 0
@test var(okdist) ≥ 0
@test var(ukdist) ≥ 0
@test var(dkdist) ≥ 0
@test var(skdist) ≤ var(okdist) + tol
end
# save results on a particular location pₒ
pₒ = rand(rng, Point)
skdist = GeoStatsModels.predictprob(sk, :z, pₒ)
okdist = GeoStatsModels.predictprob(ok, :z, pₒ)
ukdist = GeoStatsModels.predictprob(uk, :z, pₒ)
dkdist = GeoStatsModels.predictprob(dk, :z, pₒ)
# Kriging is translation-invariant
h = to(rand(rng, Point))
pset_h = PointSet([pset[i] + h for i in 1:nelements(pset)])
data_h = georef((z=data.z,), pset_h)
sk_h = GeoStatsModels.fit(SK(γ, mean(data_h.z)), data_h)
ok_h = GeoStatsModels.fit(OK(γ), data_h)
uk_h = GeoStatsModels.fit(UK(γ, 1, 3), data_h)
dk_h = GeoStatsModels.fit(DK(γ, [x -> 1.0]), data_h)
skdist_h = GeoStatsModels.predictprob(sk_h, :z, pₒ + h)
okdist_h = GeoStatsModels.predictprob(ok_h, :z, pₒ + h)
ukdist_h = GeoStatsModels.predictprob(uk_h, :z, pₒ + h)
dkdist_h = GeoStatsModels.predictprob(dk_h, :z, pₒ + h)
@test mean(skdist_h) ≈ mean(skdist)
@test var(skdist_h) ≈ var(skdist)
@test mean(okdist_h) ≈ mean(okdist)
@test var(okdist_h) ≈ var(okdist)
@test mean(ukdist_h) ≈ mean(ukdist)
@test var(ukdist_h) ≈ var(ukdist)
@test mean(dkdist_h) ≈ mean(dkdist)
@test var(dkdist_h) ≈ var(dkdist)
# Kriging mean is invariant under covariance scaling
# Kriging variance is multiplied by the same factor
α = 2.0
γ_α = GaussianVariogram(sill=α, range=1.0, nugget=0.0)
sk_α = GeoStatsModels.fit(SK(γ_α, mean(data.z)), data)
ok_α = GeoStatsModels.fit(OK(γ_α), data)
uk_α = GeoStatsModels.fit(UK(γ_α, 1, 3), data)
dk_α = GeoStatsModels.fit(DK(γ_α, [x -> 1.0]), data)
skdist_α = GeoStatsModels.predictprob(sk_α, :z, pₒ)
okdist_α = GeoStatsModels.predictprob(ok_α, :z, pₒ)
ukdist_α = GeoStatsModels.predictprob(uk_α, :z, pₒ)
dkdist_α = GeoStatsModels.predictprob(dk_α, :z, pₒ)
@test mean(skdist_α) ≈ mean(skdist)
@test var(skdist_α) ≈ α * var(skdist)
@test mean(okdist_α) ≈ mean(okdist)
@test var(okdist_α) ≈ α * var(okdist)
@test mean(ukdist_α) ≈ mean(ukdist)
@test var(ukdist_α) ≈ α * var(ukdist)
@test mean(dkdist_α) ≈ mean(dkdist)
@test var(dkdist_α) ≈ α * var(dkdist)
# Kriging variance is a function of data configuration, not data values
δ = rand(rng, nobs)
data_δ = georef((z=data.z .+ δ,), pset)
sk_δ = GeoStatsModels.fit(SK(γ, mean(data_δ.z)), data_δ)
ok_δ = GeoStatsModels.fit(OK(γ), data_δ)
uk_δ = GeoStatsModels.fit(UK(γ, 1, 3), data_δ)
dk_δ = GeoStatsModels.fit(DK(γ, [x -> 1.0]), data_δ)
skdist_δ = GeoStatsModels.predictprob(sk_δ, :z, pₒ)
okdist_δ = GeoStatsModels.predictprob(ok_δ, :z, pₒ)
ukdist_δ = GeoStatsModels.predictprob(uk_δ, :z, pₒ)
dkdist_δ = GeoStatsModels.predictprob(dk_δ, :z, pₒ)
@test var(skdist_δ) ≈ var(skdist)
@test var(okdist_δ) ≈ var(okdist)
@test var(ukdist_δ) ≈ var(ukdist)
@test var(dkdist_δ) ≈ var(dkdist)
# Ordinary Kriging ≡ Universal Kriging with 0th degree drift
uk_0 = GeoStatsModels.fit(UK(γ, 0, 3), data)
okdist = GeoStatsModels.predictprob(ok, :z, pₒ)
ukdist_0 = GeoStatsModels.predictprob(uk_0, :z, pₒ)
@test mean(okdist) ≈ mean(ukdist_0)
@test var(okdist) ≈ var(ukdist_0)
# Ordinary Kriging ≡ Kriging with constant external drift
dk_c = GeoStatsModels.fit(DK(γ, [x -> 1.0]), data)
okdist = GeoStatsModels.predictprob(ok, :z, pₒ)
dkdist_c = GeoStatsModels.predictprob(dk_c, :z, pₒ)
@test mean(okdist) ≈ mean(dkdist_c)
@test var(okdist) ≈ var(dkdist_c)
end
# non-stationary variograms are allowed
@testset "Stationarity" begin
dim = 3
nobs = 10
cmat = 10 * rand(rng, dim, nobs)
pset = PointSet(Tuple.(eachcol(cmat)))
data = georef((z=rand(rng, nobs),), pset)
γ_ns = PowerVariogram()
ok_ns = GeoStatsModels.fit(OK(γ_ns), data)
uk_ns = GeoStatsModels.fit(UK(γ_ns, 1, 3), data)
dk_ns = GeoStatsModels.fit(DK(γ_ns, [x -> 1.0]), data)
for j in 1:nobs
okdist_ns = GeoStatsModels.predictprob(ok_ns, :z, pset[j])
ukdist_ns = GeoStatsModels.predictprob(uk_ns, :z, pset[j])
dkdist_ns = GeoStatsModels.predictprob(dk_ns, :z, pset[j])
# mean checks
@test mean(okdist_ns) ≈ data.z[j]
@test mean(ukdist_ns) ≈ data.z[j]
@test mean(dkdist_ns) ≈ data.z[j]
# variance checks
@test var(okdist_ns) ≥ 0
@test var(ukdist_ns) ≥ 0
@test var(dkdist_ns) ≥ 0
end
end
# floating point checks
@testset "Floats" begin
dim = 3
nobs = 10
X_f = rand(rng, Float32, dim, nobs)
z_f = rand(rng, Float32, nobs)
X_d = Float64.(X_f)
z_d = Float64.(z_f)
pset_f = PointSet(Tuple.(eachcol(X_f)))
data_f = georef((z=z_f,), pset_f)
pset_d = PointSet(Tuple.(eachcol(X_d)))
data_d = georef((z=z_d,), pset_d)
coords_f = ntuple(i -> rand(rng, Float32), dim)
coords_d = Float64.(coords_f)
pₒ_f = Point(coords_f)
pₒ_d = Point(coords_d)
γ_f = GaussianVariogram(sill=1.0f0, range=1.0f0, nugget=0.0f0)
sk_f = GeoStatsModels.fit(SK(γ_f, mean(data_f.z)), data_f)
ok_f = GeoStatsModels.fit(OK(γ_f), data_f)
uk_f = GeoStatsModels.fit(UK(γ_f, 1, 3), data_f)
dk_f = GeoStatsModels.fit(DK(γ_f, [x -> 1.0f0]), data_f)
γ_d = GaussianVariogram(sill=1.0, range=1.0, nugget=0.0)
sk_d = GeoStatsModels.fit(SK(γ_d, mean(data_d.z)), data_d)
ok_d = GeoStatsModels.fit(OK(γ_d), data_d)
uk_d = GeoStatsModels.fit(UK(γ_d, 1, 3), data_d)
dk_d = GeoStatsModels.fit(DK(γ_d, [x -> 1.0]), data_d)
skdist_f = GeoStatsModels.predictprob(sk_f, :z, pₒ_f)
okdist_f = GeoStatsModels.predictprob(ok_f, :z, pₒ_f)
ukdist_f = GeoStatsModels.predictprob(uk_f, :z, pₒ_f)
dkdist_f = GeoStatsModels.predictprob(dk_f, :z, pₒ_f)
skdist_d = GeoStatsModels.predictprob(sk_d, :z, pₒ_d)
okdist_d = GeoStatsModels.predictprob(ok_d, :z, pₒ_d)
ukdist_d = GeoStatsModels.predictprob(uk_d, :z, pₒ_d)
dkdist_d = GeoStatsModels.predictprob(dk_d, :z, pₒ_d)
@test isapprox(mean(skdist_f), mean(skdist_d), atol=1e-4)
@test isapprox(var(skdist_f), var(skdist_d), atol=1e-4)
@test isapprox(mean(okdist_f), mean(okdist_d), atol=1e-4)
@test isapprox(var(okdist_f), var(okdist_d), atol=1e-4)
@test isapprox(mean(ukdist_f), mean(ukdist_d), atol=1e-4)
@test isapprox(var(ukdist_f), var(ukdist_d), atol=1e-4)
@test isapprox(mean(dkdist_f), mean(dkdist_d), atol=1e-4)
@test isapprox(var(dkdist_f), var(dkdist_d), atol=1e-4)
end
# change of support checks
@testset "Support" begin
dim = 2
nobs = 10
cmat = 10 * rand(rng, dim, nobs)
pset = PointSet(Tuple.(eachcol(cmat)))
data = georef((z=rand(rng, nobs),), pset)
γ = GaussianVariogram(sill=1.0, range=1.0, nugget=0.0)
sk = GeoStatsModels.fit(SK(γ, mean(data.z)), data)
ok = GeoStatsModels.fit(OK(γ), data)
uk = GeoStatsModels.fit(UK(γ, 1, dim), data)
dk = GeoStatsModels.fit(DK(γ, [x -> 1.0]), data)
# predict on a quadrangle
uₒ = Quadrangle((0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0))
skdist = GeoStatsModels.predictprob(sk, :z, uₒ)
okdist = GeoStatsModels.predictprob(ok, :z, uₒ)
ukdist = GeoStatsModels.predictprob(uk, :z, uₒ)
dkdist = GeoStatsModels.predictprob(dk, :z, uₒ)
# variance checks
@test var(skdist) ≥ 0
@test var(okdist) ≥ 0
@test var(ukdist) ≥ 0
@test var(dkdist) ≥ 0
@test var(skdist) ≤ var(okdist) + tol
end
@testset "CoDa" begin
dim = 2
nobs = 10
cmat = 10 * rand(rng, dim, nobs)
pset = PointSet(Tuple.(eachcol(cmat)))
table = (z=rand(rng, Composition{3}, nobs),)
data = georef(table, pset)
# basic models
γ = GaussianVariogram(sill=1.0, range=1.0, nugget=0.0)
sk = GeoStatsModels.fit(SK(γ, mean(data.z)), data)
ok = GeoStatsModels.fit(OK(γ), data)
uk = GeoStatsModels.fit(UK(γ, 1, dim), data)
dk = GeoStatsModels.fit(DK(γ, [x -> 1.0]), data)
# prediction on a quadrangle
uₒ = Quadrangle((0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0))
skmean = GeoStatsModels.predict(sk, :z, uₒ)
okmean = GeoStatsModels.predict(ok, :z, uₒ)
ukmean = GeoStatsModels.predict(uk, :z, uₒ)
dkmean = GeoStatsModels.predict(dk, :z, uₒ)
# type tests
@test skmean isa Composition
@test okmean isa Composition
@test ukmean isa Composition
@test dkmean isa Composition
end
@testset "Unitiful" begin
dim = 3
nobs = 10
cmat = 10 * rand(rng, dim, nobs)
pset = PointSet(Tuple.(eachcol(cmat)))
data = georef((z=rand(rng, nobs) * u"K",), pset)
γ = GaussianVariogram(sill=1.0u"K^2")
sk = GeoStatsModels.fit(SK(γ, mean(data.z)), data)
ok = GeoStatsModels.fit(OK(γ), data)
uk = GeoStatsModels.fit(UK(γ, 1, dim), data)
dk = GeoStatsModels.fit(DK(γ, [x -> 1.0]), data)
for _k in [sk, ok, uk, dk]
w = GeoStatsModels.weights(_k, Point(0, 0, 0))
μ = GeoStatsModels.predictmean(_k, w, :z)
σ² = GeoStatsModels.predictvar(_k, w)
@test unit(μ) == u"K"
@test unit(σ²) == u"K^2"
end
end
@testset begin "Kriging"
γ = GaussianVariogram()
@test Kriging(γ) isa OK
@test Kriging(γ, 0.0) isa SK
@test Kriging(γ, 1, 2) isa UK
@test Kriging(γ, [x -> 1]) isa DK
end
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 659 | @testset "LWR" begin
@testset "Unitful" begin
d = georef((; z=[1.0, 0.0, 1.0]u"K"))
lwr = GeoStatsModels.fit(LWR(), d)
pred = GeoStatsModels.predict(lwr, :z, Point(0.0))
@test unit(pred) == u"K"
# affine units
d = georef((; z=[1.0, 0.0, 1.0]u"°C"))
lwr = GeoStatsModels.fit(LWR(), d)
#pred = GeoStatsModels.predict(lwr, :z, Point(0.0))
#@test unit(pred) == u"K"
end
@testset "CoDa" begin
d = georef((; z=[Composition(0.1, 0.2), Composition(0.3, 0.4), Composition(0.5, 0.6)]))
lwr = GeoStatsModels.fit(LWR(), d)
pred = GeoStatsModels.predict(lwr, :z, Point(0.0))
@test pred isa Composition
end
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 985 | @testset "NN" begin
@testset "Basics" begin
d = georef((; z=["a", "b", "c"]))
nn = GeoStatsModels.fit(NN(), d)
pred = GeoStatsModels.predict(nn, :z, Point(0.5))
@test pred == "a"
pred = GeoStatsModels.predict(nn, :z, Point(1.5))
@test pred == "b"
pred = GeoStatsModels.predict(nn, :z, Point(2.5))
@test pred == "c"
end
@testset "Unitful" begin
d = georef((; z=[1.0, 0.0, 1.0]u"K"))
nn = GeoStatsModels.fit(NN(), d)
pred = GeoStatsModels.predict(nn, :z, Point(0.0))
@test unit(pred) == u"K"
# affine units
d = georef((; z=[1.0, 0.0, 1.0]u"°C"))
nn = GeoStatsModels.fit(NN(), d)
#pred = GeoStatsModels.predict(nn, :z, Point(0.0))
#@test unit(pred) == u"K"
end
@testset "CoDa" begin
d = georef((; z=[Composition(0.1, 0.2), Composition(0.3, 0.4), Composition(0.5, 0.6)]))
nn = GeoStatsModels.fit(NN(), d)
pred = GeoStatsModels.predict(nn, :z, Point(0.0))
@test pred isa Composition
end
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 1922 | @testset "Polynomial" begin
@testset "Basics" begin
d = georef((; z=[1, 2, 3]))
poly = GeoStatsModels.fit(Polynomial(), d)
pred = GeoStatsModels.predict(poly, :z, Point(0.5))
@test pred ≈ 1
pred = GeoStatsModels.predict(poly, :z, Point(1.5))
@test pred ≈ 2
pred = GeoStatsModels.predict(poly, :z, Point(2.5))
@test pred ≈ 3
fitpredict(m, d) = GeoStatsModels.fitpredict(m, d, domain(d), neighbors=false)
# constant trend
rng = StableRNG(42)
d = georef((z=rand(rng, 100),), CartesianGrid(100))
z̄ = fitpredict(Polynomial(), d).z
@test all(abs.(diff(z̄)) .< 0.01)
# linear trend
rng = StableRNG(42)
μ = range(0, stop=1, length=100)
ϵ = 0.1rand(rng, 100)
d = georef((z=μ + ϵ,), CartesianGrid(100))
z̄ = fitpredict(Polynomial(), d).z
@test all([abs(z̄[i] - μ[i]) < 0.1 for i in 1:length(z̄)])
# quadratic trend
rng = StableRNG(42)
r = range(-1, stop=1, length=100)
μ = [x^2 + y^2 for x in r, y in r]
ϵ = 0.1rand(rng, 100, 100)
d = georef((z=μ + ϵ,))
z̄ = fitpredict(Polynomial(2), d).z
@test all([abs(z̄[i] - μ[i]) < 0.1 for i in 1:length(z̄)])
# correct schema
rng = StableRNG(42)
d = georef((a=rand(rng, 10), b=rand(rng, 10)), rand(rng, Point, 10))
d̄ = fitpredict(Polynomial(), d)
t̄ = values(d̄)
@test propertynames(t̄) == (:a, :b)
@test eltype(t̄.a) == Float64
@test eltype(t̄.b) == Float64
end
@testset "Unitful" begin
d = georef((; z=[1.0, 0.0, 1.0]u"K"))
poly = GeoStatsModels.fit(Polynomial(), d)
pred = GeoStatsModels.predict(poly, :z, Point(0.0))
@test unit(pred) == u"K"
end
@testset "CoDa" begin
d = georef((; z=[Composition(0.1, 0.2), Composition(0.3, 0.4), Composition(0.5, 0.6)]))
poly = GeoStatsModels.fit(Polynomial(), d)
pred = GeoStatsModels.predict(poly, :z, Point(0.0))
@test pred isa Composition
end
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 348 | using GeoStatsModels
using Meshes
using GeoTables
using GeoStatsFunctions
using CoDa
using Unitful
using LinearAlgebra
using Statistics
using Test, StableRNGs
# list of tests
testfiles = ["nn.jl", "idw.jl", "lwr.jl", "poly.jl", "krig.jl", "utils.jl"]
@testset "GeoStatsModels.jl" begin
for testfile in testfiles
include(testfile)
end
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | code | 930 | @testset "fitpredict" begin
rng = StableRNG(2024)
pset = PointSet(rand(rng, Point, 3))
gtb = georef((a=[1, 2, 3], b=[4, 5, 6]), pset)
pred = GeoStatsModels.fitpredict(IDW(), gtb, pset, neighbors=false)
@test pred.a == gtb.a
@test pred.b == gtb.b
@test pred.geometry == gtb.geometry
# also works with views
vgtb = view(gtb, 1:3)
vpred = GeoStatsModels.fitpredict(IDW(), vgtb, pset, neighbors=false)
@test vpred == pred
gtb = georef((; z=[1.0, 0.0, 1.0]), [(25.0, 25.0), (50.0, 75.0), (75.0, 50.0)])
grid = CartesianGrid((100, 100), (0.5, 0.5), (1.0, 1.0))
linds = LinearIndices(size(grid))
variogram = GaussianVariogram(range=35.0, nugget=0.0)
pred = GeoStatsModels.fitpredict(Kriging(variogram), gtb, grid, maxneighbors=3)
@test isapprox(pred.z[linds[25, 25]], 1.0, atol=1e-3)
@test isapprox(pred.z[linds[50, 75]], 0.0, atol=1e-3)
@test isapprox(pred.z[linds[75, 50]], 1.0, atol=1e-3)
end
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MIT"
] | 0.5.1 | 5be547a8340a28bb5547d46fa10d6bfd02543fc9 | docs | 702 | # GeoStatsModels.jl
[![][build-img]][build-url] [![][codecov-img]][codecov-url]
Geostatistical models for the [GeoStats.jl](https://github.com/JuliaEarth/GeoStats.jl) framework.
## Asking for help
If you have any questions, please [contact our community](https://juliaearth.github.io/GeoStats.jl/stable/about/community.html).
[build-img]: https://img.shields.io/github/actions/workflow/status/JuliaEarth/GeoStatsModels.jl/CI.yml?branch=main&style=flat-square
[build-url]: https://github.com/JuliaEarth/GeoStatsModels.jl/actions
[codecov-img]: https://img.shields.io/codecov/c/github/JuliaEarth/GeoStatsModels.jl?style=flat-square
[codecov-url]: https://codecov.io/gh/JuliaEarth/GeoStatsModels.jl
| GeoStatsModels | https://github.com/JuliaEarth/GeoStatsModels.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 12740 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#=========================================================
MathOptInterface wrapper
=========================================================#
function MOI.is_empty(model::Optimizer)
return (
isnothing(model.mip_optimizer) || MOI.is_empty(model.mip_optimizer)
) && (
isnothing(model.cont_optimizer) || MOI.is_empty(model.cont_optimizer)
)
end
function MOI.empty!(model::Optimizer)
model.mip_optimizer = nothing
model.cont_optimizer = nothing
model.infeasible_optimizer = nothing
model.nlp_obj_var = nothing
model.mip_variables = MOI.VariableIndex[]
model.cont_variables = MOI.VariableIndex[]
model.infeasible_variables = MOI.VariableIndex[]
model.nl_slack_variables = nothing
model.quad_LT_slack = nothing
model.quad_GT_slack = nothing
model.quad_LT_infeasible_con = nothing
model.quad_GT_infeasible_con = nothing
model.int_indices = BitSet()
model.nlp_block = nothing
model.objective = nothing
model.quad_LT =
Tuple{MOI.ScalarQuadraticFunction{Float64},MOI.LessThan{Float64}}[]
model.quad_GT =
Tuple{MOI.ScalarQuadraticFunction{Float64},MOI.GreaterThan{Float64}}[]
model.incumbent = Float64[]
model.status = MOI.OPTIMIZE_NOT_CALLED
return
end
MOI.get(::Optimizer, ::MOI.SolverName) = "Pavito"
MOI.supports_incremental_interface(::Optimizer) = true
function MOI.copy_to(model::Optimizer, src::MOI.ModelLike)
return MOI.Utilities.default_copy_to(model, src)
end
MOI.get(model::Optimizer, ::MOI.NumberOfVariables) = length(model.incumbent)
function MOI.add_variable(model::Optimizer)
push!(model.mip_variables, MOI.add_variable(_mip(model)))
push!(model.cont_variables, MOI.add_variable(_cont(model)))
push!(model.infeasible_variables, MOI.add_variable(_infeasible(model)))
if !isnothing(model.nl_slack_variables)
# the slack variables are assumed to be added after all the
# `infeasible_variables`, so we delete them now and add back during
# `optimize!` if needed
_clean_slacks(model)
end
push!(model.incumbent, NaN)
return MOI.VariableIndex(length(model.mip_variables))
end
function MOI.supports(
::Optimizer,
::MOI.VariablePrimalStart,
::Type{MOI.VariableIndex},
)
return true
end
function MOI.set(
model::Optimizer,
attr::MOI.VariablePrimalStart,
vi::MOI.VariableIndex,
value,
)
MOI.set(_cont(model), attr, vi, value)
model.incumbent[vi.value] = something(value, NaN)
return
end
function _map(variables::Vector{MOI.VariableIndex}, x)
return MOI.Utilities.map_indices(vi -> variables[vi.value], x)
end
_is_discrete(::Type{<:MOI.AbstractSet}) = false
function _is_discrete(
::Type{<:Union{MOI.Integer,MOI.ZeroOne,MOI.Semiinteger{Float64}}},
)
return true
end
function MOI.supports_constraint(
model::Optimizer,
F::Type{MOI.VariableIndex},
S::Type{<:MOI.AbstractScalarSet},
)
return MOI.supports_constraint(_mip(model), F, S) &&
(_is_discrete(S) || MOI.supports_constraint(_cont(model), F, S))
end
function MOI.is_valid(
model::Optimizer,
ci::MOI.ConstraintIndex{MOI.VariableIndex,<:MOI.AbstractScalarSet},
)
return MOI.is_valid(_mip(model), ci)
end
function MOI.add_constraint(
model::Optimizer,
func::MOI.VariableIndex,
set::MOI.AbstractScalarSet,
)
if _is_discrete(typeof(set))
push!(model.int_indices, func.value)
else
MOI.add_constraint(_cont(model), _map(model.cont_variables, func), set)
MOI.add_constraint(
_infeasible(model),
_map(model.infeasible_variables, func),
set,
)
end
return MOI.add_constraint(_mip(model), _map(model.mip_variables, func), set)
end
function MOI.supports_constraint(
model::Optimizer,
F::Type{MOI.ScalarQuadraticFunction{Float64}},
S::Type{<:Union{MOI.LessThan{Float64},MOI.GreaterThan{Float64}}},
)
return MOI.supports_constraint(_cont(model), F, S)
end
function MOI.add_constraint(
model::Optimizer,
func::MOI.ScalarQuadraticFunction{Float64},
set::MOI.LessThan{Float64},
)
_clean_slacks(model)
MOI.add_constraint(_cont(model), _map(model.cont_variables, func), set)
push!(model.quad_LT, (MOI.Utilities.canonical(func), copy(set)))
return MOI.ConstraintIndex{typeof(func),typeof(set)}(length(model.quad_LT))
end
function MOI.add_constraint(
model::Optimizer,
func::MOI.ScalarQuadraticFunction{Float64},
set::MOI.GreaterThan{Float64},
)
_clean_slacks(model)
MOI.add_constraint(_cont(model), _map(model.cont_variables, func), set)
push!(model.quad_GT, (MOI.Utilities.canonical(func), copy(set)))
return MOI.ConstraintIndex{typeof(func),typeof(set)}(length(model.quad_GT))
end
function MOI.get(
model::Optimizer,
attr::MOI.NumberOfConstraints{
MOI.ScalarQuadraticFunction{Float64},
<:Union{MOI.LessThan{Float64},MOI.GreaterThan{Float64}},
},
)
return MOI.get(_cont(model), attr)
end
function MOI.supports_constraint(
model::Optimizer,
F::Type{<:MOI.AbstractFunction},
S::Type{<:MOI.AbstractSet},
)
return (
MOI.supports_constraint(_mip(model), F, S) &&
MOI.supports_constraint(_cont(model), F, S)
)
end
function MOI.add_constraint(
model::Optimizer,
func::MOI.AbstractFunction,
set::MOI.AbstractSet,
)
MOI.add_constraint(_cont(model), _map(model.cont_variables, func), set)
MOI.add_constraint(
_infeasible(model),
_map(model.infeasible_variables, func),
set,
)
return MOI.add_constraint(_mip(model), _map(model.mip_variables, func), set)
end
function MOI.get(model::Optimizer, attr::MOI.NumberOfConstraints)
return MOI.get(_mip(model), attr)
end
MOI.supports(::Optimizer, ::MOI.NLPBlock) = true
function MOI.set(model::Optimizer, attr::MOI.NLPBlock, block::MOI.NLPBlockData)
_clean_slacks(model)
model.nlp_block = block
if !MOI.supports(_cont(model), MOI.NLPBlock())
error(
"Continuous solver (`cont_solver`) is not a derivative-based " *
"NLP solver recognized by MathOptInterface (try Pajarito solver " *
"if your continuous solver is conic)",
)
end
MOI.set(_cont(model), attr, block)
return
end
MOI.supports(model::Optimizer, attr::MOI.ObjectiveSense) = true
function MOI.set(model::Optimizer, attr::MOI.ObjectiveSense, sense)
if sense == MOI.FEASIBILITY_SENSE
model.objective = nothing
end
MOI.set(_mip(model), attr, sense)
MOI.set(_cont(model), attr, sense)
return
end
MOI.get(model::Optimizer, attr::MOI.ObjectiveSense) = MOI.get(_mip(model), attr)
function MOI.supports(model::Optimizer, attr::MOI.ObjectiveFunction)
return MOI.supports(_mip(model), attr) && MOI.supports(_cont(model), attr)
end
function MOI.supports(
model::Optimizer,
attr::MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}},
)
return MOI.supports(_cont(model), attr)
end
function MOI.set(model::Optimizer, attr::MOI.ObjectiveFunction, func)
# make a copy (as the user might modify it)
model.objective = copy(func)
MOI.set(_mip(model), attr, _map(model.mip_variables, func))
MOI.set(_cont(model), attr, _map(model.cont_variables, func))
return
end
function MOI.set(
model::Optimizer,
attr::MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}},
func::MOI.ScalarQuadraticFunction{Float64},
)
# make a copy (as the user might modify it) and canonicalize
model.objective = MOI.Utilities.canonical(func)
MOI.set(_cont(model), attr, _map(model.cont_variables, func))
return
end
function MOI.get(model::Optimizer, param::MOI.RawOptimizerAttribute)
return getproperty(model, Symbol(param.name))
end
function MOI.supports(::Optimizer, param::MOI.RawOptimizerAttribute)
return (Symbol(param.name) in fieldnames(Optimizer))
end
function MOI.set(model::Optimizer, param::MOI.RawOptimizerAttribute, value)
setproperty!(model, Symbol(param.name), value)
return
end
MOI.supports(::Optimizer, ::MOI.Silent) = true
function MOI.set(model::Optimizer, ::MOI.Silent, value::Bool)
model.log_level = value ? 0 : 1
return
end
MOI.get(model::Optimizer, ::MOI.Silent) = (model.log_level <= 0)
MOI.supports(::Optimizer, ::MOI.TimeLimitSec) = true
function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, value::Nothing)
MOI.set(model, MOI.RawOptimizerAttribute("timeout"), Inf)
return
end
function MOI.set(model::Optimizer, ::MOI.TimeLimitSec, value)
MOI.set(model, MOI.RawOptimizerAttribute("timeout"), value)
return
end
function MOI.get(model::Optimizer, ::MOI.TimeLimitSec)
value = MOI.get(model, MOI.RawOptimizerAttribute("timeout"))
return isfinite(value) ? value : nothing
end
MOI.get(model::Optimizer, ::MOI.SolveTimeSec) = model.total_time
MOI.get(model::Optimizer, ::MOI.TerminationStatus) = model.status
MOI.get(model::Optimizer, ::MOI.RawStatusString) = string(model.status)
function MOI.get(
model::Optimizer,
attr::MOI.VariablePrimal,
v::MOI.VariableIndex,
)
MOI.check_result_index_bounds(model, attr)
return model.incumbent[v.value]
end
function MOI.get(model::Optimizer, attr::MOI.ObjectiveValue)
MOI.check_result_index_bounds(model, attr)
return model.objective_value
end
MOI.get(model::Optimizer, ::MOI.ObjectiveBound) = model.objective_bound
function MOI.get(model::Optimizer, attr::MOI.PrimalStatus)
if attr.result_index != 1
return MOI.NO_SOLUTION
end
term_status = MOI.get(model, MOI.TerminationStatus())
if term_status == MOI.LOCALLY_SOLVED
return MOI.FEASIBLE_POINT
elseif term_status == MOI.ALMOST_LOCALLY_SOLVED
return MOI.NEARLY_FEASIBLE_POINT
end
return MOI.NO_SOLUTION
end
MOI.get(::Optimizer, ::MOI.DualStatus) = MOI.NO_SOLUTION
function MOI.get(model::Optimizer, ::MOI.ResultCount)
return MOI.get(model, MOI.PrimalStatus()) == MOI.NO_SOLUTION ? 0 : 1
end
# utilities:
function _mip(model::Optimizer)
if model.mip_optimizer !== nothing
return model.mip_optimizer
end
if model.mip_solver === nothing
error("No MIP solver specified (set `mip_solver` attribute)\n")
end
model.mip_optimizer = MOI.Utilities.CachingOptimizer(
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
MOI.instantiate(model.mip_solver, with_bridge_type = Float64),
)
supports_lazy =
MOI.supports(model.mip_optimizer, MOI.LazyConstraintCallback())
if model.mip_solver_drives === nothing
model.mip_solver_drives = supports_lazy
elseif model.mip_solver_drives && !supports_lazy
error(
"MIP solver (`mip_solver`) does not support lazy constraint " *
"callbacks (cannot set `mip_solver_drives` attribute to `true`)",
)
end
return model.mip_optimizer
end
function _new_cont(optimizer_constructor)
if isnothing(optimizer_constructor)
error(
"No continuous NLP solver specified (set `cont_solver` attribute)",
)
end
optimizer =
MOI.instantiate(optimizer_constructor, with_bridge_type = Float64)
return optimizer
end
function _cont(model::Optimizer)
if isnothing(model.cont_optimizer)
model.cont_optimizer = _new_cont(model.cont_solver)
end
return model.cont_optimizer
end
function _infeasible(model::Optimizer)
if isnothing(model.infeasible_optimizer)
model.infeasible_optimizer = _new_cont(model.cont_solver)
MOI.set(model.infeasible_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE)
end
return model.infeasible_optimizer
end
function _clean_slacks(model::Optimizer)
if !isnothing(model.nl_slack_variables)
MOI.delete(_infeasible(model), model.nl_slack_variables)
model.nl_slack_variables = nothing
end
if !isnothing(model.quad_LT_slack)
MOI.delete(_infeasible(model), model.quad_LT_slack)
model.quad_LT_slack = nothing
MOI.delete(_infeasible(model), model.quad_LT_infeasible_con)
model.quad_LT_infeasible_con = nothing
end
if !isnothing(model.quad_GT_slack)
MOI.delete(_infeasible(model), model.quad_GT_slack)
model.quad_GT_slack = nothing
MOI.delete(_infeasible(model), model.quad_GT_infeasible_con)
model.quad_GT_infeasible_con = nothing
end
return
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 728 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#=========================================================
This package contains the mixed-integer convex programming (MICP) solver Pavito
See README.md for details
=========================================================#
module Pavito
import Printf
import MathOptInterface
const MOI = MathOptInterface
include("infeasible_nlp.jl")
include("optimize.jl")
include("cut_utils.jl")
include("MOI_wrapper.jl")
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 7867 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#=========================================================
gradient cut utilities
=========================================================#
# by convexity of g(x), we know that g(x) >= g(c) + g'(c) * (x - c)
# given a constraint ub >= g(x), we rewrite it as:
# ub - g(x) + g'(c) * c >= g'(x) * x
# if the constraint is `g(x) <= lb`, we assume `g(x)` is concave, so:
# lb - g(x) + g'(c) * c <= g'(x) * x
# if the constraint is `lb <= g(x) <= ub` or `g(x) == lb == ub`, we assume
# `g(x)` is linear
function _add_cut(
model::Optimizer,
cont_solution,
gc,
dgc_idx,
dgc_nzv,
set,
callback_data,
)
Δ = 0.0
for i in eachindex(dgc_idx)
Δ += dgc_nzv[i] * cont_solution[dgc_idx[i]]
end
safs = [
MOI.ScalarAffineTerm(dgc_nzv[i], model.mip_variables[dgc_idx[i]])
for i in eachindex(dgc_idx)
]
func = MOI.ScalarAffineFunction(safs, 0.0)
MOI.Utilities.canonicalize!(func)
set = MOI.Utilities.shift_constant(set, Δ - gc)
if !isempty(func.terms)
if isnothing(callback_data)
MOI.add_constraint(model.mip_optimizer, func, set)
else
_add_lazy_constraint(
model,
callback_data,
func,
set,
model.mip_solution,
)
end
end
return
end
function _add_quad_cuts(model::Optimizer, cont_solution, cons, callback_data)
for (func, set) in cons
gc = _eval_func(cont_solution, func)
dgc_idx = Int64[]
dgc_nzv = Float64[]
for term in func.affine_terms
push!(dgc_idx, term.variable.value)
push!(dgc_nzv, term.coefficient)
end
for term in func.quadratic_terms
push!(dgc_idx, term.variable_1.value)
push!(
dgc_nzv,
term.coefficient * cont_solution[term.variable_2.value],
)
# if variables are the same, the coefficient is already multiplied by
# 2 by definition of `MOI.ScalarQuadraticFunction{Float64}`
if term.variable_1 != term.variable_2
push!(dgc_idx, term.variable_2.value)
push!(
dgc_nzv,
term.coefficient * cont_solution[term.variable_1.value],
)
end
end
_add_cut(model, cont_solution, gc, dgc_idx, dgc_nzv, set, callback_data)
end
return
end
function _add_cuts(
model::Optimizer,
cont_solution,
jac_IJ,
jac_V,
grad_f,
is_max,
callback_data = nothing,
)
if !isnothing(model.nlp_block)
# eval g and jac_g at MIP solution
num_constrs = length(model.nlp_block.constraint_bounds)
g = zeros(num_constrs)
MOI.eval_constraint(model.nlp_block.evaluator, g, cont_solution)
MOI.eval_constraint_jacobian(
model.nlp_block.evaluator,
jac_V,
cont_solution,
)
# create rows corresponding to constraints in sparse format
varidx_new = [zeros(Int, 0) for i in 1:num_constrs]
coef_new = [zeros(0) for i in 1:num_constrs]
for k in eachindex(jac_IJ)
row, col = jac_IJ[k]
push!(varidx_new[row], col)
push!(coef_new[row], jac_V[k])
end
# create constraint cuts
for i in 1:num_constrs
# create supporting hyperplane
set = _bound_set(model, i)
_add_cut(
model,
cont_solution,
g[i],
varidx_new[i],
coef_new[i],
set,
callback_data,
)
end
end
_add_quad_cuts(model, cont_solution, model.quad_LT, callback_data)
_add_quad_cuts(model, cont_solution, model.quad_GT, callback_data)
# given an objective `Min nlp_obj_var = f(x)` with a convex `f(x)`:
# -f(x) + f'(c) * c >= f'(x) * x - nlp_obj_var
# if the objective is `Max`, we assume `g(x)` is concave, so:
# -f(x) + f'(c) * c <= f'(x) * x - nlp_obj_var
# create objective cut
if (!isnothing(model.nlp_block) && model.nlp_block.has_objective) ||
model.objective isa MOI.ScalarQuadraticFunction{Float64}
f = _eval_objective(model, cont_solution)
_eval_objective_gradient(model, grad_f, cont_solution)
constant = -f
func = MOI.Utilities.operate(-, Float64, model.nlp_obj_var)
for j in eachindex(grad_f)
if !iszero(grad_f[j])
constant += grad_f[j] * cont_solution[j]
push!(
func.terms,
MOI.ScalarAffineTerm(grad_f[j], model.mip_variables[j]),
)
end
end
set = (
is_max ? MOI.GreaterThan{Float64}(constant) :
MOI.LessThan{Float64}(constant)
)
if isnothing(callback_data)
MOI.add_constraint(model.mip_optimizer, func, set)
else
nlp_obj_var = MOI.get(
model.mip_optimizer,
MOI.CallbackVariablePrimal(callback_data),
model.nlp_obj_var,
)
_add_lazy_constraint(
model,
callback_data,
func,
set,
vcat(model.mip_solution, nlp_obj_var),
)
end
end
return
end
function _add_lazy_constraint(model, callback_data, func, set, mip_solution)
# GLPK does not check whether the new cut is redundant, so we filter it out
# see https://github.com/jump-dev/GLPK.jl/issues/153
if !_approx_in(_eval_func(mip_solution, func), set)
MOI.submit(
model.mip_optimizer,
MOI.LazyConstraint(callback_data),
func,
set,
)
end
return
end
function _eval_objective(model::Optimizer, values)
if !isnothing(model.nlp_block) && model.nlp_block.has_objective
return MOI.eval_objective(model.nlp_block.evaluator, values)
else
return _eval_func(values, model.objective)
end
end
function _eval_gradient(
func::MOI.ScalarQuadraticFunction{Float64},
grad_f,
values,
)
fill!(grad_f, 0.0)
for term in func.affine_terms
grad_f[term.variable.value] += term.coefficient
end
for term in func.quadratic_terms
grad_f[term.variable_1.value] +=
term.coefficient * values[term.variable_2.value]
# if variables are the same, the coefficient is already multiplied by 2
if term.variable_1 != term.variable_2
grad_f[term.variable_2.value] +=
term.coefficient * values[term.variable_1.value]
end
end
return
end
function _eval_objective_gradient(model::Optimizer, grad_f, values)
if (!isnothing(model.nlp_block) && model.nlp_block.has_objective)
MOI.eval_objective_gradient(model.nlp_block.evaluator, grad_f, values)
else
_eval_gradient(model.objective, grad_f, values)
end
return
end
# `isapprox(0.0, 1e-16)` is false but `_is_approx(0.0, 1e-16)` is true.
_is_approx(x, y) = isapprox(x, y, atol = Base.rtoldefault(Float64))
_approx_in(value, set::MOI.EqualTo) = _is_approx(value, MOI.constant(set))
function _approx_in(value, set::MOI.LessThan{Float64})
return _is_approx(value, MOI.constant(set)) || value < MOI.constant(set)
end
function _approx_in(value, set::MOI.GreaterThan{Float64})
return _is_approx(value, MOI.constant(set)) || value > MOI.constant(set)
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 2141 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#=========================================================
wrapped NLP solver for infeasible subproblem case
=========================================================#
struct _InfeasibleNLPEvaluator <: MOI.AbstractNLPEvaluator
d::MOI.AbstractNLPEvaluator
num_variables::Int
minus::BitVector
end
function MOI.initialize(
d::_InfeasibleNLPEvaluator,
requested_features::Vector{Symbol},
)
MOI.initialize(d.d, requested_features)
return
end
function MOI.features_available(d::_InfeasibleNLPEvaluator)
return intersect([:Grad, :Jac, :Hess], MOI.features_available(d.d))
end
function MOI.eval_constraint(d::_InfeasibleNLPEvaluator, g, x)
MOI.eval_constraint(d.d, g, x[1:d.num_variables])
for i in eachindex(d.minus)
g[i] -= sign(d.minus[i]) * x[d.num_variables+i]
end
return
end
function MOI.jacobian_structure(d::_InfeasibleNLPEvaluator)
IJ_new = copy(MOI.jacobian_structure(d.d))
for i in eachindex(d.minus)
push!(IJ_new, (i, d.num_variables + i))
end
return IJ_new
end
function MOI.eval_constraint_jacobian(d::_InfeasibleNLPEvaluator, J, x)
MOI.eval_constraint_jacobian(d.d, J, x[1:d.num_variables])
k = length(J) - length(d.minus)
for i in eachindex(d.minus)
J[k+i] = (d.minus[i] ? -1.0 : 1.0)
end
return
end
# Hessian: add linear terms and remove the objective so the hessian of the
# objective is zero and the hessian of the constraints is unaffected;
# also set `σ = 0.0` to absorb the contribution of the hessian of the objective
function MOI.hessian_lagrangian_structure(d::_InfeasibleNLPEvaluator)
return MOI.hessian_lagrangian_structure(d.d)
end
function MOI.eval_hessian_lagrangian(d::_InfeasibleNLPEvaluator, H, x, σ, μ)
return MOI.eval_hessian_lagrangian(d.d, H, x[1:d.num_variables], 0.0, μ)
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 28613 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#=========================================================
Optimizer object and algorithm
=========================================================#
mutable struct Optimizer <: MOI.AbstractOptimizer
log_level::Int # Verbosity flag: 0 for quiet, higher for basic solve info
timeout::Float64 # Time limit for algorithm (in seconds)
rel_gap::Float64 # Relative optimality gap termination condition
mip_solver_drives::Union{Nothing,Bool} # Let MIP solver manage convergence ("branch and cut")
mip_solver::Union{Nothing,MOI.OptimizerWithAttributes} # MIP solver constructor
cont_solver::Union{Nothing,MOI.OptimizerWithAttributes} # Continuous NLP solver constructor
mip_optimizer::Union{Nothing,MOI.ModelLike} # MIP optimizer instantiated from `mip_solver`
cont_optimizer::Union{Nothing,MOI.ModelLike} # Continuous NLP optimizer instantiated from `cont_solver`
infeasible_optimizer::Union{Nothing,MOI.ModelLike} # Continuous NLP optimizer instantiated from `cont_solver`, used for infeasible subproblems
nlp_obj_var::Union{Nothing,MOI.VariableIndex} # new MIP objective function if the original is nonlinear
mip_variables::Vector{MOI.VariableIndex} # Variable indices of `mip_optimizer`
cont_variables::Vector{MOI.VariableIndex} # Variable indices of `cont_optimizer`
infeasible_variables::Vector{MOI.VariableIndex} # Variable indices of `infeasible_optimizer`
# Slack variable indices for `infeasible_optimizer`
nl_slack_variables::Union{Nothing,Vector{MOI.VariableIndex}} # for the nonlinear constraints
quad_LT_slack::Union{Nothing,Vector{MOI.VariableIndex}} # for the less than constraints
quad_GT_slack::Union{Nothing,Vector{MOI.VariableIndex}} # for the greater than constraints
# Quadratic constraints for `infeasible_optimizer`
quad_LT_infeasible_con::Union{
Nothing,
Vector{
MOI.ConstraintIndex{
MOI.ScalarQuadraticFunction{Float64},
MOI.LessThan{Float64},
},
},
} # `q - slack <= ub`
quad_GT_infeasible_con::Union{
Nothing,
Vector{
MOI.ConstraintIndex{
MOI.ScalarQuadraticFunction{Float64},
MOI.GreaterThan{Float64},
},
},
} # `q + slack >= lb`
infeasible_evaluator::_InfeasibleNLPEvaluator # NLP evaluator used for `infeasible_optimizer`
int_indices::BitSet # Indices of discrete variables
nlp_block::Union{Nothing,MOI.NLPBlockData} # NLP block set to `Optimizer`
objective::Union{Nothing,MOI.AbstractScalarFunction} # Objective function set to `Optimizer`
quad_LT::Vector{
Tuple{MOI.ScalarQuadraticFunction{Float64},MOI.LessThan{Float64}},
} # Cached quadratic less than constraints
quad_GT::Vector{
Tuple{MOI.ScalarQuadraticFunction{Float64},MOI.GreaterThan{Float64}},
} # Cached quadratic greater than constraints
status::MOI.TerminationStatusCode # Termination status to be returned
incumbent::Vector{Float64} # Starting values set and then current best nonlinear feasible solution
new_incumb::Bool # `true` if a better nonlinear feasible solution was found
mip_solution::Vector{Float64} # MIP solution cached for used to check redundancy of lazy constraint
total_time::Float64 # Total solve time
objective_value::Float64 # Objective value corresponding to `incumbent`
objective_bound::Float64 # Best objective bound found by MIP
objective_gap::Float64 # Objective gap between objective value and bound
num_iters_or_callbacks::Int # Either the number of iterations or the number of calls to the lazy constraint callback if `mip_solver_drives`
function Optimizer()
model = new()
model.log_level = 1
model.timeout = Inf
model.rel_gap = 1e-5
model.mip_solver_drives = nothing
model.mip_solver = nothing
model.cont_solver = nothing
MOI.empty!(model)
return model
end
end
function _print_model_summary(model)
obj_type = if !isnothing(model.nlp_block) && model.nlp_block.has_objective
"nonlinear"
elseif model.objective isa MOI.ScalarQuadraticFunction{Float64}
"quadratic"
else
"linear"
end
ncont = length(model.cont_variables)
nint = length(model.int_indices)
nnlp = if isnothing(model.nlp_block)
0
else
length(model.nlp_block.constraint_bounds)
end
nquad = length(model.quad_LT) + length(model.quad_GT)
println(
"\nMINLP has a $obj_type objective, $ncont continuous variables, " *
"$nint integer variables, $nnlp nonlinear constraints, and " *
"$nquad quadratic constraints",
)
alg = model.mip_solver_drives ? "MIP-solver-driven" : "iterative"
println("\nPavito started, using $alg method...")
return
end
function _clean_up_algorithm(model, start, mip_time, nlp_time)
model.total_time = time() - start
if model.log_level > 0
println("\nPavito finished...\n")
Printf.@printf("Status %13s\n", model.status)
Printf.@printf("Objective value %13.5f\n", model.objective_value)
Printf.@printf("Objective bound %13.5f\n", model.objective_bound)
Printf.@printf("Objective gap %13.5f\n", model.objective_gap)
if !model.mip_solver_drives
Printf.@printf(
"Iterations %13d\n",
model.num_iters_or_callbacks
)
else
Printf.@printf(
"Callbacks %13d\n",
model.num_iters_or_callbacks
)
end
Printf.@printf("Total time %13.5f sec\n", model.total_time)
Printf.@printf("MIP total time %13.5f sec\n", mip_time)
Printf.@printf("NLP total time %13.5f sec\n", nlp_time)
println()
end
flush(stdout)
return
end
abstract type _AbstractAlgortithm end
struct _MIPSolverDrivenAlgorithm end
function _run_algorithm(
::_MIPSolverDrivenAlgorithm,
model,
start,
mip_time,
nlp_time,
jac_IJ,
jac_V,
grad_f,
is_max,
)
comp = is_max ? (>) : (<)
cache_contsol = Dict{Vector{Float64},Vector{Float64}}()
function lazy_callback(cb)
model.num_iters_or_callbacks += 1
model.mip_solution = MOI.get(
model.mip_optimizer,
MOI.CallbackVariablePrimal(cb),
model.mip_variables,
)
round_mipsol = round.(model.mip_solution)
if any(
i -> abs(model.mip_solution[i] - round_mipsol[i]) > 1e-5,
model.int_indices,
)
# The solution is integer-infeasible; see:
# https://github.com/jump-dev/GLPK.jl/issues/146
# https://github.com/jump-dev/MathOptInterface.jl/pull/1172
@warn "Integer-infeasible solution in lazy callback"
return
end
# If integer assignment has been seen before, use cached point
if haskey(cache_contsol, round_mipsol)
cont_solution = cache_contsol[round_mipsol]
else # Try to solve new subproblem, update incumbent if feasible.
start_nlp = time()
cont_solution = _solve_subproblem(model, comp)
nlp_time += time() - start_nlp
cache_contsol[round_mipsol] = cont_solution
end
# Add gradient cuts to MIP model from NLP solution
if all(isfinite, cont_solution)
_add_cuts(model, cont_solution, jac_IJ, jac_V, grad_f, is_max, cb)
else
@warn "no cuts could be added, Pavito should be terminated"
# TODO terminate the solver once there is a
# solver-independent callback for that in MOI
return
end
end
MOI.set(model.mip_optimizer, MOI.LazyConstraintCallback(), lazy_callback)
function heuristic_callback(cb)
# If have a new best feasible solution since last heuristic
# solution added, set MIP solution to the new incumbent.
if !model.new_incumb
return
elseif isnothing(model.nlp_obj_var)
MOI.submit(
model.mip_optimizer,
MOI.HeuristicSolution(cb),
model.mip_variables,
model.incumbent,
)
else
MOI.submit(
model.mip_optimizer,
MOI.HeuristicSolution(cb),
vcat(model.mip_variables, model.nlp_obj_var),
vcat(model.incumbent, model.objective_value),
)
end
model.new_incumb = false
return
end
MOI.set(model.mip_optimizer, MOI.HeuristicCallback(), heuristic_callback)
if isfinite(model.timeout)
MOI.set(
model.mip_optimizer,
MOI.TimeLimitSec(),
max(1.0, model.timeout - (time() - start)),
)
end
MOI.optimize!(model.mip_optimizer)
mip_status = MOI.get(model.mip_optimizer, MOI.TerminationStatus())
if mip_status == MOI.OPTIMAL
model.status = MOI.LOCALLY_SOLVED
elseif mip_status == MOI.ALMOST_OPTIMAL
model.status = MOI.ALMOST_LOCALLY_SOLVED
else
model.status = mip_status
end
model.objective_bound = MOI.get(model.mip_optimizer, MOI.ObjectiveBound())
_update_gap(model, is_max)
return mip_time, nlp_time
end
struct _IterativeAlgorithm end
function _run_algorithm(
::_IterativeAlgorithm,
model,
start,
mip_time,
nlp_time,
jac_IJ,
jac_V,
grad_f,
is_max,
)
comp = is_max ? (>) : (<)
prev_mip_solution = fill(NaN, length(model.mip_variables))
while time() - start < model.timeout
model.num_iters_or_callbacks += 1
# Set remaining time limit on MIP solver
if isfinite(model.timeout)
MOI.set(
model.mip_optimizer,
MOI.TimeLimitSec(),
max(1.0, model.timeout - (time() - start)),
)
end
# Solve MIP model
start_mip = time()
MOI.optimize!(model.mip_optimizer)
mip_status = MOI.get(model.mip_optimizer, MOI.TerminationStatus())
mip_time += time() - start_mip
# Finish if MIP was infeasible or if problematic status
if mip_status in (MOI.INFEASIBLE, MOI.INFEASIBLE_OR_UNBOUNDED)
model.status = MOI.LOCALLY_INFEASIBLE
break
elseif (mip_status != MOI.OPTIMAL) && (mip_status != MOI.ALMOST_OPTIMAL)
@warn "MIP solver status was $mip_status, terminating Pavito"
model.status = mip_status
break
end
model.mip_solution = MOI.get(
model.mip_optimizer,
MOI.VariablePrimal(),
model.mip_variables,
)
# Update best bound from MIP bound
mip_obj_bound = MOI.get(model.mip_optimizer, MOI.ObjectiveBound())
if isfinite(mip_obj_bound) && comp(model.objective_bound, mip_obj_bound)
model.objective_bound = mip_obj_bound
end
_update_gap(model, is_max)
_print_gap(model, start)
if _check_progress(model, prev_mip_solution)
break
end
# Try to solve new subproblem, update incumbent if feasible
start_nlp = time()
cont_solution = _solve_subproblem(model, comp)
nlp_time += time() - start_nlp
_update_gap(model, is_max)
if _check_progress(model, prev_mip_solution)
break
end
# Add gradient cuts to MIP model from NLP solution
if all(isfinite, cont_solution)
_add_cuts(model, cont_solution, jac_IJ, jac_V, grad_f, is_max)
else
@warn "no cuts could be added, terminating Pavito"
break
end
# TODO warmstart MIP from incumbent
prev_mip_solution = model.mip_solution
flush(stdout)
end
return mip_time, nlp_time
end
function MOI.optimize!(model::Optimizer)
model.status = MOI.OPTIMIZE_NOT_CALLED
fill!(model.incumbent, NaN)
model.new_incumb = false
model.total_time = 0.0
model.objective_value = NaN
model.objective_bound = NaN
model.objective_gap = Inf
model.num_iters_or_callbacks = 0
if isempty(model.int_indices) && model.log_level >= 1
@warn "No variables of type integer or binary; call the continuous " *
"solver directly for pure continuous problems."
end
if (!isnothing(model.nlp_block) && model.nlp_block.has_objective) ||
model.objective isa MOI.ScalarQuadraticFunction{Float64}
if isnothing(model.nlp_obj_var)
model.nlp_obj_var = MOI.add_variable(model.mip_optimizer)
MOI.set(
model.mip_optimizer,
MOI.ObjectiveFunction{MOI.VariableIndex}(),
model.nlp_obj_var,
)
end
else
if !isnothing(model.nlp_obj_var)
MOI.delete(model.mip_optimizer, model.nlp_obj_var)
model.nlp_obj_var = nothing
end
end
start = time()
nlp_time = 0.0
mip_time = 0.0
is_max =
MOI.get(model.cont_optimizer, MOI.ObjectiveSense()) == MOI.MAX_SENSE
model.objective_value = is_max ? -Inf : Inf
model.objective_bound = -model.objective_value
if model.log_level > 0
_print_model_summary(model)
end
flush(stdout)
if isnothing(model.nlp_block)
jac_V = jac_IJ = nothing
else
MOI.initialize(
model.nlp_block.evaluator,
intersect(
[:Grad, :Jac, :Hess],
MOI.features_available(model.nlp_block.evaluator),
),
)
jac_IJ = MOI.jacobian_structure(model.nlp_block.evaluator)
jac_V = zeros(length(jac_IJ))
end
grad_f = zeros(length(model.cont_variables))
# Solve initial continuous relaxation NLP model.
start_nlp = time()
MOI.optimize!(model.cont_optimizer)
nlp_time += time() - start_nlp
ini_nlp_status = MOI.get(model.cont_optimizer, MOI.TerminationStatus())
cont_solution, cont_obj = nothing, 0.0
if ini_nlp_status in (MOI.OPTIMAL, MOI.LOCALLY_SOLVED, MOI.ALMOST_OPTIMAL)
cont_solution = MOI.get(
model.cont_optimizer,
MOI.VariablePrimal(),
model.cont_variables,
)
cont_obj = MOI.get(model.cont_optimizer, MOI.ObjectiveValue())
if isempty(model.int_indices)
model.objective_value = cont_obj
model.objective_bound = cont_obj
_update_gap(model, is_max)
model.mip_solution = Float64[]
model.incumbent = cont_solution
else
_add_cuts(model, cont_solution, jac_IJ, jac_V, grad_f, is_max)
end
elseif ini_nlp_status == MOI.DUAL_INFEASIBLE
# The integrality constraints may make the MINLP bounded so we continue
@warn "initial NLP relaxation unbounded"
elseif ini_nlp_status == MOI.NORM_LIMIT
# Ipopt usually ends with `Diverging_Iterates` for unbounded problems,
# this gets converted to `MOI.NORM_LIMIT`
@warn "initial NLP relaxation terminated with status `NORM_LIMIT` " *
"which usually means that the problem is unbounded"
else
if ini_nlp_status in
(MOI.INFEASIBLE, MOI.LOCALLY_INFEASIBLE, MOI.ALMOST_INFEASIBLE)
# The original problem is infeasible too
@warn "initial NLP relaxation infeasible"
model.status = ini_nlp_status
else
@warn "NLP solver failure: initial NLP relaxation terminated with " *
"status $ini_nlp_status"
model.status = ini_nlp_status
end
return
end
flush(stdout)
# If there is no integrality, exit early! We've already solved the NLP.
if isempty(model.int_indices)
model.status = ini_nlp_status
_clean_up_algorithm(model, start, mip_time, nlp_time)
return
end
# Set a VariablePrimalStart if supported by the solver
if !isnothing(cont_solution) &&
MOI.supports(
model.mip_optimizer,
MOI.VariablePrimalStart(),
MOI.VariableIndex,
) &&
all(isfinite, cont_solution)
MOI.set(
model.mip_optimizer,
MOI.VariablePrimalStart(),
model.mip_variables,
cont_solution,
)
if !isnothing(model.nlp_obj_var)
MOI.set(
model.mip_optimizer,
MOI.VariablePrimalStart(),
model.nlp_obj_var,
cont_obj,
)
end
end
algorithm = if model.mip_solver_drives
_MIPSolverDrivenAlgorithm()
else
_IterativeAlgorithm()
end
mip_time, nlp_time = _run_algorithm(
algorithm,
model,
start,
mip_time,
nlp_time,
jac_IJ,
jac_V,
grad_f,
is_max,
)
flush(stdout)
_clean_up_algorithm(model, start, mip_time, nlp_time)
return
end
function _update_gap(model::Optimizer, is_max::Bool)
# Update gap if best bound and best objective are finite
if isfinite(model.objective_value) && isfinite(model.objective_bound)
model.objective_gap =
(model.objective_value - model.objective_bound) /
(abs(model.objective_value) + 1e-5)
if is_max
model.objective_gap = -model.objective_gap
end
end
return
end
function _check_progress(model::Optimizer, prev_mip_solution)
# Finish if optimal or cycling integer solutions.
int_ind = collect(model.int_indices)
if model.objective_gap <= model.rel_gap
model.status = MOI.LOCALLY_SOLVED
return true
elseif round.(prev_mip_solution[int_ind]) ==
round.(model.mip_solution[int_ind])
@warn "mixed-integer cycling detected, terminating Pavito"
if isfinite(model.objective_gap)
model.status = MOI.ALMOST_LOCALLY_SOLVED
else
model.status = MOI.OTHER_ERROR
end
return true
end
return false
end
function _fix_int_vars(
optimizer::MOI.ModelLike,
vars,
mip_solution,
int_indices,
)
F = MOI.VariableIndex
for i in int_indices
x = vars[i]
# We need to delete any conflicting bound constraints before we set or
# update the x == mip_solution[i] constraint.
ci = MOI.ConstraintIndex{F,MOI.Interval{Float64}}(x.value)
if MOI.is_valid(optimizer, ci)
MOI.delete(optimizer, ci)
end
ci = MOI.ConstraintIndex{F,MOI.LessThan{Float64}}(x.value)
if MOI.is_valid(optimizer, ci)
MOI.delete(optimizer, ci)
end
ci = MOI.ConstraintIndex{F,MOI.GreaterThan{Float64}}(x.value)
if MOI.is_valid(optimizer, ci)
MOI.delete(optimizer, ci)
end
ci = MOI.ConstraintIndex{F,MOI.EqualTo{Float64}}(x.value)
set = MOI.EqualTo(mip_solution[i])
if MOI.is_valid(optimizer, ci)
MOI.set(optimizer, MOI.ConstraintSet(), ci, set)
else
MOI.add_constraint(optimizer, x, set)
end
end
return
end
# solve NLP subproblem defined by integer assignment
function _solve_subproblem(model::Optimizer, comp::Function)
_fix_int_vars(
model.cont_optimizer,
model.cont_variables,
model.mip_solution,
model.int_indices,
)
MOI.optimize!(model.cont_optimizer)
primal_status = MOI.get(model.cont_optimizer, MOI.PrimalStatus())
if primal_status in (MOI.FEASIBLE_POINT, MOI.NEARLY_FEASIBLE_POINT)
# Subproblem is feasible, check if solution is new incumbent
nlp_objective_value =
MOI.get(model.cont_optimizer, MOI.ObjectiveValue())
nlp_solution = MOI.get(
model.cont_optimizer,
MOI.VariablePrimal(),
model.cont_variables,
)
if comp(nlp_objective_value, model.objective_value)
model.objective_value = nlp_objective_value
copyto!(model.incumbent, nlp_solution)
model.new_incumb = true
end
return nlp_solution
end
# Assume subproblem is infeasible, so solve infeasible recovery NLP
# subproblem.
if (
!isnothing(model.nlp_block) &&
!isempty(model.nlp_block.constraint_bounds) &&
isnothing(model.nl_slack_variables)
) ||
(!isempty(model.quad_LT) && isnothing(model.quad_LT_slack)) ||
(!isempty(model.quad_GT) && isnothing(model.quad_GT_slack))
if !isnothing(model.nl_slack_variables)
obj = MOI.ScalarAffineFunction(
MOI.ScalarAffineTerm.(1.0, model.nl_slack_variables),
0.0,
)
else
obj = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm{Float64}[], 0.0)
end
function _add_to_obj(vi::MOI.VariableIndex)
return push!(obj.terms, MOI.ScalarAffineTerm(1.0, vi))
end
if !isnothing(model.nlp_block) &&
!isempty(model.nlp_block.constraint_bounds)
bounds = copy(model.nlp_block.constraint_bounds)
model.infeasible_evaluator = _InfeasibleNLPEvaluator(
model.nlp_block.evaluator,
length(model.infeasible_variables),
falses(length(model.nlp_block.constraint_bounds)),
)
model.nl_slack_variables = MOI.add_variables(
_infeasible(model),
length(model.nlp_block.constraint_bounds),
)
for i in eachindex(model.nlp_block.constraint_bounds)
_add_to_obj(model.nl_slack_variables[i])
push!(bounds, MOI.NLPBoundsPair(0.0, Inf))
set = _bound_set(model, i)
if set isa MOI.LessThan{Float64}
model.infeasible_evaluator.minus[i] = true
end
end
MOI.set(
_infeasible(model),
MOI.NLPBlock(),
MOI.NLPBlockData(bounds, model.infeasible_evaluator, false),
)
end
# We need to add quadratic variables afterwards because
# `_InfeasibleNLPEvaluator` assumes the original variables are directly
# followed by the NL slacks.
if !isempty(model.quad_LT)
model.quad_LT_slack =
MOI.add_variables(_infeasible(model), length(model.quad_LT))
model.quad_LT_infeasible_con = map(eachindex(model.quad_LT)) do i
(func, set) = model.quad_LT[i]
new_func = MOI.Utilities.operate(
-,
Float64,
func,
model.quad_LT_slack[i],
)
return MOI.add_constraint(_infeasible(model), new_func, set)
end
for vi in model.quad_LT_slack
_add_to_obj(vi)
end
end
if !isempty(model.quad_GT)
model.quad_GT_slack =
MOI.add_variables(_infeasible(model), length(model.quad_GT))
model.quad_GT_infeasible_con = map(eachindex(model.quad_GT)) do i
(func, set) = model.quad_GT[i]
new_func = MOI.Utilities.operate(
+,
Float64,
func,
model.quad_GT_slack[i],
)
return MOI.add_constraint(_infeasible(model), new_func, set)
end
for vi in model.quad_GT_slack
_add_to_obj(vi)
end
end
MOI.set(_infeasible(model), MOI.ObjectiveFunction{typeof(obj)}(), obj)
end
_fix_int_vars(
model.infeasible_optimizer,
model.infeasible_variables,
model.mip_solution,
model.int_indices,
)
MOI.set(
_infeasible(model),
MOI.VariablePrimalStart(),
model.infeasible_variables,
model.mip_solution,
)
if !isnothing(model.nlp_block) &&
!isempty(model.nlp_block.constraint_bounds)
fill!(model.infeasible_evaluator.minus, false)
g = zeros(length(model.nlp_block.constraint_bounds))
MOI.eval_constraint(model.nlp_block.evaluator, g, model.mip_solution)
for i in eachindex(model.nlp_block.constraint_bounds)
bounds = model.nlp_block.constraint_bounds[i]
val = if model.infeasible_evaluator.minus[i]
g[i] - bounds.upper
else
bounds.lower - g[i]
end
# Sign of the slack changes if the constraint direction changes.
MOI.set(
_infeasible(model),
MOI.VariablePrimalStart(),
model.nl_slack_variables[i],
max(0.0, val),
)
end
end
for i in eachindex(model.quad_LT)
val =
_eval_func(model.mip_solution, model.quad_LT[i][1]) -
model.quad_LT[i][2].upper
MOI.set(
_infeasible(model),
MOI.VariablePrimalStart(),
model.quad_LT_slack[i],
max(0.0, val),
)
end
for i in eachindex(model.quad_GT)
val =
model.quad_GT[i][2].lower -
_eval_func(model.mip_solution, model.quad_GT[i][1])
MOI.set(
_infeasible(model),
MOI.VariablePrimalStart(),
model.quad_GT_slack[i],
max(0.0, val),
)
end
MOI.optimize!(model.infeasible_optimizer)
status = MOI.get(model.infeasible_optimizer, MOI.PrimalStatus())
if status != MOI.FEASIBLE_POINT
@warn "Infeasible NLP problem terminated with primal status: $status"
end
return MOI.get(
model.infeasible_optimizer,
MOI.VariablePrimal(),
model.infeasible_variables,
)
end
# print objective gap information for iterative
function _print_gap(model::Optimizer, start)
if model.log_level >= 1
if model.num_iters_or_callbacks == 1 || model.log_level >= 2
Printf.@printf(
"\n%-5s | %-14s | %-14s | %-11s | %-11s\n",
"Iter.",
"Best feasible",
"Best bound",
"Rel. gap",
"Time (s)",
)
end
if model.objective_gap < 1000
Printf.@printf(
"%5d | %+14.6e | %+14.6e | %11.3e | %11.3e\n",
model.num_iters_or_callbacks,
model.objective_value,
model.objective_bound,
model.objective_gap,
time() - start,
)
else
obj_gap = isnan(model.objective_gap) ? "Inf" : ">1000"
Printf.@printf(
"%5d | %+14.6e | %+14.6e | %11s | %11.3e\n",
model.num_iters_or_callbacks,
model.objective_value,
model.objective_bound,
obj_gap,
time() - start,
)
end
flush(stdout)
flush(stderr)
end
return
end
# utilities:
function _eval_func(values, func)
return MOI.Utilities.eval_variables(vi -> values[vi.value], func)
end
# because Pavito only supports one bound on NLP constraints:
# TODO handle two bounds?
_has_upper(bound) = (bound != typemax(bound))
_has_lower(bound) = (bound != typemin(bound))
function _bound_set(model::Optimizer, i::Integer)
bounds = model.nlp_block.constraint_bounds[i]
return _bound_set(bounds.lower, bounds.upper)
end
function _bound_set(lb::T, ub::T) where {T}
if _has_upper(ub)
if _has_lower(lb)
error(
"An NLP constraint has lower bound $lb and upper bound $ub " *
"but only one bound is supported.",
)
else
return MOI.LessThan{Float64}(ub)
end
else
if _has_lower(lb)
return MOI.GreaterThan{Float64}(lb)
else
error("Pavito needs one bound per NLP constraint.")
end
end
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 2578 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestMOIWrapper
import MathOptInterface
import Pavito
import Test
const MOI = MathOptInterface
function runtests(mip_solver, cont_solver)
Test.@testset "$(msd)" for msd in (true, false)
_run_moi_tests(msd, mip_solver, cont_solver)
end
return
end
function _run_moi_tests(msd::Bool, mip_solver, cont_solver)
pavito = Pavito.Optimizer()
MOI.set(pavito, MOI.Silent(), true)
MOI.set(pavito, MOI.RawOptimizerAttribute("mip_solver_drives"), msd)
MOI.set(pavito, MOI.RawOptimizerAttribute("mip_solver"), mip_solver)
MOI.set(pavito, MOI.RawOptimizerAttribute("cont_solver"), cont_solver)
MOI.Test.runtests(
MOI.Utilities.CachingOptimizer(
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
MOI.Bridges.full_bridge_optimizer(pavito, Float64),
),
MOI.Test.Config(
atol = 1e-4,
rtol = 1e-4,
infeasible_status = MOI.LOCALLY_INFEASIBLE,
optimal_status = MOI.LOCALLY_SOLVED,
exclude = Any[
MOI.ConstraintDual,
MOI.ConstraintBasisStatus,
MOI.DualObjectiveValue,
MOI.NLPBlockDual,
],
),
exclude = String[
# Not implemented:
"test_attribute_SolverVersion",
# Invalid model:
"test_constraint_ZeroOne_bounds_3",
"test_linear_VectorAffineFunction_empty_row",
# CachingOptimizer does not throw if optimizer not attached:
"test_model_copy_to_UnsupportedAttribute",
"test_model_copy_to_UnsupportedConstraint",
# NLP features not supported:
"test_nonlinear_invalid",
# NORM_LIMIT instead of DUAL_INFEASIBLE
"test_linear_DUAL_INFEASIBLE",
"test_linear_DUAL_INFEASIBLE_2",
"test_solve_TerminationStatus_DUAL_INFEASIBLE",
# ITERATION_LIMIT instead of OPTIMAL
"test_linear_integer_knapsack",
"test_linear_integer_solve_twice",
# INFEASIBLE instead of LOCALLY_INFEASIBLE?
"test_linear_Semicontinuous_integration",
"test_linear_Semiinteger_integration",
],
)
return
end
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 9385 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
module TestJuMP
import JuMP
import MINLPTests
import Pavito
using Test
const MOI = JuMP.MOI
function runtests(mip_solver, cont_solver)
@testset "$(msd)" for msd in (true, false)
@testset "MINLPTests" begin
run_minlptests(msd, mip_solver, cont_solver, 0, 1e-3)
end
@testset "QP-NLP" begin
run_qp_nlp_tests(msd, mip_solver, cont_solver, 0, 1e-3)
end
@testset "log_level=$(log_level)" for log_level in 0:2
run_log_level_tests(msd, mip_solver, cont_solver, log_level, 1e-3)
end
end
return
end
function run_minlptests(
mip_solver_drives::Bool,
mip_solver,
cont_solver,
log_level::Int,
TOL::Real,
)
solver = MOI.OptimizerWithAttributes(
Pavito.Optimizer,
"timeout" => 120.0,
"mip_solver_drives" => mip_solver_drives,
"mip_solver" => mip_solver,
"cont_solver" => cont_solver,
"log_level" => log_level,
)
MINLPTests.test_nlp_mi(
solver,
exclude = String[
# ======================= Unexpected failures ======================
# LOCALLY_SOLVED instead of LOCALLY_INFEASIBLE for MSD algorithm
"007_020",
# ======================== Expected failures =======================
# Nonconvex: contains sin(x)^2
"003_010",
"003_011",
"003_012",
"003_013",
"003_014",
"003_015",
"003_016",
# Non-convex: user-defined function y^3
"006_010",
],
objective_tol = TOL,
primal_tol = TOL,
dual_tol = NaN,
primal_target = Dict(
MINLPTests.FEASIBLE_PROBLEM => MOI.FEASIBLE_POINT,
MINLPTests.INFEASIBLE_PROBLEM => MOI.NO_SOLUTION,
),
)
return
end
function run_qp_nlp_tests(
mip_solver_drives::Bool,
mip_solver,
cont_solver,
log_level::Int,
TOL::Real,
)
solver = JuMP.optimizer_with_attributes(
Pavito.Optimizer,
"timeout" => 120.0,
"mip_solver_drives" => mip_solver_drives,
"mip_solver" => mip_solver,
"cont_solver" => cont_solver,
"log_level" => log_level,
)
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "_test_qp_") ||
startswith("$(name)", "_test_nlp_")
@testset "$(name)" begin
getfield(@__MODULE__, name)(solver, TOL)
end
end
end
return
end
function run_log_level_tests(
mip_solver_drives::Bool,
mip_solver,
cont_solver,
log_level::Int,
TOL::Real,
)
solver = JuMP.optimizer_with_attributes(
Pavito.Optimizer,
"timeout" => 120.0,
"mip_solver_drives" => mip_solver_drives,
"mip_solver" => mip_solver,
"cont_solver" => cont_solver,
"log_level" => log_level,
)
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "_test_loglevel_")
@testset "$(name)" begin
getfield(@__MODULE__, name)(solver, TOL)
end
end
end
return
end
###
### Individual tests go below here.
###
function _test_qp_optimal(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, Int)
JuMP.@variable(m, y >= 0)
JuMP.@variable(m, 0 <= u <= 10, Int)
JuMP.@variable(m, w == 1)
JuMP.@objective(m, Min, -3x - y)
JuMP.@constraint(m, 3x + 10 <= 20)
JuMP.@constraint(m, y^2 <= u * w)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status == MOI.LOCALLY_SOLVED
@test isapprox(JuMP.objective_value(m), -12.162277, atol = TOL)
@test isapprox(JuMP.objective_bound(m), -12.162277, atol = TOL)
@test isapprox(JuMP.value(x), 3, atol = TOL)
@test isapprox(JuMP.value(y), 3.162277, atol = TOL)
return
end
function _test_qp_maximize(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, Int)
JuMP.@variable(m, y >= 0)
JuMP.@variable(m, 0 <= u <= 10, Int)
JuMP.@variable(m, w == 1)
JuMP.@objective(m, Max, 3x + y)
JuMP.@constraint(m, 3x + 2y + 10 <= 20)
JuMP.@constraint(m, x^2 <= u * w)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status == MOI.LOCALLY_SOLVED
@test isapprox(JuMP.objective_value(m), 9.5, atol = TOL)
@test isapprox(JuMP.objective_bound(m), 9.5, atol = TOL)
@test isapprox(JuMP.value(x), 3, atol = TOL)
@test isapprox(JuMP.value(y), 0.5, atol = TOL)
return
end
function _test_qp_infeasible(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, Int)
JuMP.@objective(m, Max, x)
JuMP.@constraint(m, x^2 <= 3.9)
JuMP.@constraint(m, x >= 1.1)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status in (MOI.INFEASIBLE, MOI.LOCALLY_INFEASIBLE)
return
end
function _test_nlp_nonconvex_error(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1, Int)
JuMP.@variable(m, y >= 0, start = 1)
JuMP.@objective(m, Min, -3x - y)
JuMP.@constraint(m, 3x + 2y + 10 <= 20)
JuMP.@NLconstraint(m, 8 <= x^2 <= 10)
@test_throws ErrorException JuMP.optimize!(m)
return
end
function _test_nlp_optimal(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1, Int)
JuMP.@variable(m, y >= 0, start = 1)
JuMP.@objective(m, Min, -3x - y)
JuMP.@constraint(m, 3x + 2y + 10 <= 20)
JuMP.@constraint(m, x >= 1)
JuMP.@NLconstraint(m, x^2 <= 5)
JuMP.@NLconstraint(m, exp(y) + x <= 7)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status == MOI.LOCALLY_SOLVED
@test isapprox(JuMP.value(x), 2.0)
return
end
function _test_nlp_infeasible_1(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1, Int)
JuMP.@variable(m, y >= 0, start = 1)
JuMP.@objective(m, Min, -3x - y)
JuMP.@constraint(m, 3x + 2y + 10 <= 20)
JuMP.@NLconstraint(m, x^2 >= 9)
JuMP.@NLconstraint(m, exp(y) + x <= 2)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status in (MOI.INFEASIBLE, MOI.LOCALLY_INFEASIBLE)
return
end
function _test_nlp_infeasible_2(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1, Int)
JuMP.@objective(m, Max, x)
JuMP.@NLconstraint(m, log(x) >= 0.75)
JuMP.@constraint(m, x <= 2.9)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status in (MOI.INFEASIBLE, MOI.LOCALLY_INFEASIBLE)
return
end
function _test_nlp_continuous(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1)
JuMP.@variable(m, y >= 0, start = 1)
JuMP.@objective(m, Min, -3x - y)
JuMP.@constraint(m, 3x + 2y + 10 <= 20)
JuMP.@constraint(m, x >= 1)
JuMP.@NLconstraint(m, x^2 <= 5)
JuMP.@NLconstraint(m, exp(y) + x <= 7)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status == MOI.LOCALLY_SOLVED
@test isapprox(JuMP.objective_value(m), -8.26928, atol = TOL)
@test isapprox(JuMP.objective_bound(m), -8.26928, atol = TOL)
@test isapprox(JuMP.value(x), 2.23607, atol = TOL)
@test isapprox(JuMP.value(y), 1.56107, atol = TOL)
return
end
function _test_nlp_maximization(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1, Int)
JuMP.@variable(m, y >= 0, start = 1)
JuMP.@objective(m, Max, 3x + y)
JuMP.@constraint(m, 3x + 2y + 10 <= 20)
JuMP.@NLconstraint(m, x^2 <= 9)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status == MOI.LOCALLY_SOLVED
@test isapprox(JuMP.objective_value(m), 9.5, atol = TOL)
return
end
function _test_nlp_nonlinear_objective(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1, Int)
JuMP.@variable(m, y >= 0, start = 1)
JuMP.@objective(m, Max, -x^2 - y)
JuMP.@constraint(m, x + 2y >= 4)
JuMP.@NLconstraint(m, x^2 <= 9)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status == MOI.LOCALLY_SOLVED
@test isapprox(JuMP.objective_value(m), -2.0, atol = TOL)
@test isapprox(JuMP.objective_bound(m), -2.0, atol = TOL)
return
end
function _test_loglevel_optimal(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, Int)
JuMP.@objective(m, Max, x)
JuMP.@constraint(m, x^2 <= 5)
JuMP.@constraint(m, x >= 0.5)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status == MOI.LOCALLY_SOLVED
return
end
function test_loglevel_infeasible(solver, TOL)
m = JuMP.Model(solver)
JuMP.@variable(m, x >= 0, start = 1, Int)
JuMP.@variable(m, y >= 0, start = 1)
JuMP.@objective(m, Min, -3x - y)
JuMP.@constraint(m, 3x + 2y + 10 <= 20)
JuMP.@constraint(m, 6x + 5y >= 30)
JuMP.@NLconstraint(m, x^2 >= 8)
JuMP.@NLconstraint(m, exp(y) + x <= 7)
JuMP.optimize!(m)
status = JuMP.termination_status(m)
@test status in [MOI.INFEASIBLE, MOI.LOCALLY_INFEASIBLE]
return
end
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | code | 1790 | # Copyright 2017, Chris Coey and Miles Lubin
# Copyright 2016, Los Alamos National Laboratory, LANS LLC.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
using Test
import Cbc
import GLPK
import Ipopt
import MathOptInterface
const MOI = MathOptInterface
include("MOI_wrapper.jl")
include("jump_tests.jl")
# !!! info
# We test with both Cbc and GLPK because they have very different
# implementations of the MOI API: GLPK supports incremental modification and
# supports lazy constraints, whereas Cbc supports copy_to and does not
# support lazy constraints. In addition, Cbc uses MatrixOfConstraints to
# simplify the copy process, needing an additional cache if we modify after
# the solve.
@testset "MOI" begin
TestMOIWrapper.runtests(
MOI.OptimizerWithAttributes(
GLPK.Optimizer,
"msg_lev" => 0,
"tol_int" => 1e-9,
"tol_bnd" => 1e-7,
"mip_gap" => 0.0,
),
MOI.OptimizerWithAttributes(Ipopt.Optimizer, MOI.Silent() => true),
)
end
@testset "Cbc" begin
TestMOIWrapper._run_moi_tests(
false, # mip_solver_drives
MOI.OptimizerWithAttributes(Cbc.Optimizer, MOI.Silent() => true),
MOI.OptimizerWithAttributes(Ipopt.Optimizer, MOI.Silent() => true),
)
end
@testset "JuMP" begin
TestJuMP.runtests(
MOI.OptimizerWithAttributes(
GLPK.Optimizer,
"msg_lev" => 0,
"tol_int" => 1e-9,
"tol_bnd" => 1e-7,
"mip_gap" => 0.0,
),
MOI.OptimizerWithAttributes(Ipopt.Optimizer, MOI.Silent() => true),
)
end
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | docs | 44 | Pavito release notes
======================
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MPL-2.0"
] | 0.3.9 | 55c5d8356b684e56e7309386ec86ecd9ef9d17da | docs | 3453 | # Pavito.jl
[](https://github.com/jump-dev/Pavito.jl/actions)
[](https://codecov.io/gh/jump-dev/Pavito.jl)
[Pavito.jl](https://github.com/jump-dev/Pavito.jl) is a mixed-integer convex
programming (MICP) solver package written in [Julia](http://julialang.org/).
MICP problems are convex, except for restrictions that some variables take
binary or integer values.
Pavito solves MICP problems by constructing sequential polyhedral
outer-approximations of the convex feasible set, similar to [Bonmin](https://projects.coin-or.org/Bonmin).
Pavito accesses state-of-the-art MILP solvers and continuous, derivative-based
nonlinear programming (NLP) solvers through [MathOptInterface](https://github.com/jump-dev/MathOptInterface.jl).
For algorithms that use a conic solver instead of an NLP solver, use
[Pajarito](https://github.com/jump-dev/Pajarito.jl). Pajarito is a robust
mixed-integer conic solver that can handle such established problem classes as
mixed-integer second-order cone programming (MISOCP) and mixed-integer
semidefinite programming (MISDP).
## License
`Pavito.jl` is licensed under the [MPL 2.0 license](https://github.com/jump-dev/Pavito.jl/blob/master/LICENSE.md).
## Installation
Install Pavito using `Pkg.add`:
```julia
import Pkg
Pkg.add("Pavito")
```
## Use with JuMP
To use Pavito with [JuMP](https://github.com/jump-dev/JuMP.jl), use
`Pavito.Optimizer`:
```julia
using JuMP, Pavito
import GLPK, Ipopt
model = Model(
optimizer_with_attributes(
Pavito.Optimizer,
"mip_solver" => optimizer_with_attributes(GLPK.Optimizer),
"cont_solver" =>
optimizer_with_attributes(Ipopt.Optimizer, "print_level" => 0),
),
)
```
The algorithm implemented by Pavito itself is relatively simple; most of the
hard work is performed by the MILP solver passed as `mip_solver` and the NLP
solver passed as `cont_solver`.
**The performance of Pavito depends on these two types of solvers.**
For better performance, you should use a commercial MILP solver such as CPLEX
or Gurobi.
## Options
The following optimizer attributes can set to a `Pavito.Optimizer` to modify its
behavior:
* `log_level::Int` Verbosity flag: 0 for quiet, higher for basic solve info
* `timeout::Float64` Time limit for algorithm (in seconds)
* `rel_gap::Float64` Relative optimality gap termination condition
* `mip_solver_drives::Bool` Let MILP solver manage convergence ("branch and
cut")
* `mip_solver::MOI.OptimizerWithAttributes` MILP solver
* `cont_solver::MOI.OptimizerWithAttributes` Continuous NLP solver
**Pavito is not yet numerically robust and may require tuning of parameters to
improve convergence.**
If the default parameters don't work for you, please let us know by opening an
issue.
For improved Pavito performance, MILP solver integrality tolerance and
feasibility tolerances should typically be tightened, for example to `1e-8`.
## Bug reports and support
Please report any issues via the [GitHub issue tracker](https://github.com/jump-dev/Pavito.jl/issues).
All types of issues are welcome and encouraged; this includes bug reports,
documentation typos, feature requests, etc. The [Optimization (Mathematical)](https://discourse.julialang.org/c/domain/opt) category on Discourse is appropriate for general
discussion.
| Pavito | https://github.com/jump-dev/Pavito.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 74 | using LatticeQCD
using Documenter
makedocs(sitename="LatticeQCD.jl")
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 3070 | # - - parameters - - - - - - - - - - -
#=
Symanzik improved gauge actions debuged by
https://link.springer.com/article/10.1140/epjc/s10052-017-5392-6
Improved thermodynamics of SU(2) gauge theory Pietro Giudice & Stefano Piemonte
=#
# Physical setting
system["L"] = (8, 8, 8, 8)
system["β"] = 1.6
system["NC"] = 2
system["Nthermalization"] = 0
system["Nsteps"] = 201
system["initial"] = "cold"
system["initialtrj"] = 1
system["update_method"] = "Heatbath"
system["useOR"] = false
system["numOR"] = 1
system["Nwing"] = 1
# Physical setting(fermions)
system["quench"] = true
system["Dirac_operator"] = nothing
system["BoundaryCondition"] = [1, 1, 1, -1]
# System Control
system["log_dir"] = "./logs"
system["logfile"] = "Heatbath_L08080808_beta1.6_quenched.txt"
system["saveU_dir"] = ""
system["saveU_format"] = nothing
system["verboselevel"] = 2
system["randomseed"] = 111
measurement["measurement_basedir"] = "./measurements"
measurement["measurement_dir"] = "Heatbath_L08080808_beta1.6_quenched"
# HMC related
# Action parameter for SLMC
actions["use_autogeneratedstaples"] = false
actions["couplingcoeff"] = Any[]
actions["couplinglist"] = Any[]
# Measurement set
measurement["measurement_methods"] = Dict[
Dict{Any,Any}("methodname" => "Polyakov_loop",
"measure_every" => 1
),
Dict{Any,Any}("methodname" => "Plaquette",
"measure_every" => 1
)
]
# - - - - - - - - - - - - - - - - - - -
#-(\beta/2) Re Tr((5/3) plaquette - (1/12) r
beta = 1.6
actions["use_autogeneratedstaples"] = true
actions["couplingcoeff"] = [beta*5/3,-beta/12]
#actions["couplinglist"] = ["plaq","rect","polyx"]
#=
function makeplaq()
loopset = []
for μ=1:4
for ν=μ:4
if ν == μ
continue
end
push!(loopset,[(μ,1),(ν,1),(μ,-1),(ν,-1)])
end
end
return loopset
end
function makechair()
loopset = []
set1 = (1,2,3)
set2 = (1,2,4)
set3 = (2,3,4)
set4 = (1,3,4)
for set in (set1,set2,set3,set4)
mu,nu,rho = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
mu,rho,nu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
nu,rho,mu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
nu,mu,rho = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
rho,mu,nu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
rho,nu,mu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
end
return loopset
end
=#
plaqloops = make_plaqloops()
rectloops = make_rectloops(2)
#=
chairloops = makechair()
L = system["L"]
poly1loops = make_polyakovloops(1,L[1])
poly2loops = make_polyakovloops(2,L[2])
poly3loops = make_polyakovloops(3,L[3])
poly4loops = make_polyakovloops(4,L[4])
=#
actions["coupling_loops"] = [plaqloops,rectloops]
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 2717 | # - - parameters - - - - - - - - - - -
system["saveU_dir"] = ""
system["verboselevel"] = 2
system["L"] = (4, 4, 4, 4)
system["Nwing"] = 1
system["Nsteps"] = 100
system["quench"] = true
system["logfile"] = "Heatbath_L04040404_beta5.7_quenched.txt"
system["initial"] = "cold"
system["Dirac_operator"] = nothing
system["log_dir"] = "./logs"
system["Nthermalization"] = 10
system["update_method"] = "HB"
system["randomseed"] = 111
system["NC"] = 3
system["BoundaryCondition"] = [1, 1, 1, -1]
system["saveU_format"] = nothing
system["β"] = 5.7
actions["use_autogeneratedstaples"] = false
actions["couplingcoeff"] = Any[]
actions["couplinglist"] = Any[]
md["Δτ"] = 0.05
md["N_SextonWeingargten"] = 2
md["SextonWeingargten"] = false
md["MDsteps"] = 20
measurement["measurement_methods"] = Dict[Dict{Any,Any}("fermiontype" => nothing,"measure_every" => 1,"methodname" => "Polyakov_loop"), Dict{Any,Any}("fermiontype" => nothing,"measure_every" => 1,"methodname" => "Plaquette")]
measurement["measurement_dir"] = "Heatbath_L04040404_beta5.7_quenched"
measurement["measurement_basedir"] = "./measurements"
actions["use_autogeneratedstaples"] = true
actions["couplingcoeff"] = [5.7,1.0,0.5]
#actions["couplinglist"] = ["plaq","rect","polyx"]
function makeplaq()
loopset = []
for μ=1:4
for ν=μ:4
if ν == μ
continue
end
push!(loopset,[(μ,1),(ν,1),(μ,-1),(ν,-1)])
end
end
return loopset
end
function makechair()
loopset = []
set1 = (1,2,3)
set2 = (1,2,4)
set3 = (2,3,4)
set4 = (1,3,4)
for set in (set1,set2,set3,set4)
mu,nu,rho = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
mu,rho,nu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
nu,rho,mu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
nu,mu,rho = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
rho,mu,nu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
rho,nu,mu = set
loop = [(mu,1),(nu,1),(rho,1),(mu,-1),(rho,-1),(nu,-1)]
push!(loopset,loop)
end
return loopset
end
plaqloops = makeplaq()
rectloops = make_rectloops(2)
chairloops = makechair()
L = system["L"]
poly1loops = make_polyakovloops(1,L[1])
poly2loops = make_polyakovloops(2,L[2])
poly3loops = make_polyakovloops(3,L[3])
poly4loops = make_polyakovloops(4,L[4])
actions["coupling_loops"] = [plaqloops,rectloops,chairloops]
# - - - - - - - - - - - - - - - - - - -
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 4323 | module LatticeQCD
using Requires
include("./mpi/simpleprint.jl")
#include("./SLMC/logdet.jl")
include("./system/parameter_structs.jl")
#include("./rhmc/AlgRemez.jl")
#include("./rhmc/rhmc.jl")
#include("./gaugefields/SUN_generator.jl")
#include("./output/verboseprint.jl")
#include("./fermions/cgmethod.jl")
#include("./autostaples/wilsonloops.jl")
include("./system/transform_oldinputfile.jl")
include("./system/system_parameters.jl")
include("./system/parameters_TOML.jl")
include("./system/universe.jl")
include("./md/AbstractMD.jl")
include("./updates/AbstractUpdate.jl")
include("./measurements/measurement_parameters_set.jl")
include("./measurements/Measurement_set.jl")
#include("./measurements/AbstractMeasurement.jl")
#include("parallel.jl")
#include("site.jl")
#include("./system/rand.jl")
#include("./actions/actions.jl")
#include("./gaugefields/gaugefields.jl")
#include("gaugefields.jl")
#include("./fermions/AbstractFermion.jl")
#include("./fermions/WilsonFermion.jl")
#include("./fermions/DomainwallFermion.jl")
#include("./fermions/StaggeredFermion.jl")
#include("./fermions/fermionfields.jl")
#include("./liealgebra/liealgebrafields.jl")
#include("./rationalapprox/rationalapprox.jl")
#include("./fermions/clover.jl")
#include("./fermions/diracoperator.jl")
#include("./fermions/misc.jl")
#include("./output/io.jl")
#include("./output/ildg_format.jl")
#include("./output/bridge_format.jl")
#include("./system/LTK_universe.jl")
#include("./gaugefields/smearing.jl")
#include("./output/print_config.jl")
#include("cg.jl")
#include("./measurements/measurements.jl")
#include("./heatbath/heatbath.jl")
#include("./md/md.jl")
include("./system/wizard.jl")
#include("./SLMC/SLMC.jl")
#include("./system/mainrun.jl")
#include("./output/analyze.jl")
function __init__()
@require Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" begin
include("./demo/demo.jl")
import .Demo: demo
export demo
#import .Analyze: plot_plaquette, plot_polyakov, plot_plaq_and_poly
#export plot_plaquette, plot_polyakov, plot_plaq_and_poly
end
@require MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" begin
include("./mpi/mpimodule.jl")
import .MPImodules: get_myrank, get_nprocs, println_rank0, set_PEs, get_PEs
export get_myrank, get_nprocs, println_rank0, set_PEs, get_PEs
end
end
include("./system/lqcd.jl")
#import .LTK_universe:
# Universe, show_parameters, make_WdagWmatrix, calc_Action, set_β!, set_βs!
#import .Actions: Setup_Gauge_action, Setup_Fermi_action, GaugeActionParam_autogenerator
#import .Measurements:
# calc_plaquette,
# measure_correlator,
# Measurement,
# calc_polyakovloop,
# measure_chiral_cond,
# calc_topological_charge,
#measurements,
# Measurement_set
#import .MD:
# md_initialize!, MD_parameters_standard, md!, metropolis_update!, construct_MD_parameters
import .System_parameters: Params
#import .Print_config: write_config
#import .Smearing: gradientflow!
#import .ILDG_format: ILDG, load_gaugefield
#import .Heatbath: heatbath!
#import .Wilsonloops: make_plaq
#import .IOmodule: saveU, loadU, loadU!
import .Wizard: run_wizard
#import .Mainrun: run_LQCD
#import .RationalApprox: calc_exactvalue, calc_Anϕ, calc_det
#,run_LQCD!
#import .Analyze:
# analyze,
# get_plaquette,
# get_polyakov,
# get_plaquette_average,
# get_polyakov_average,
# get_trjs
import .LQCD: run_LQCD_file, run_LQCD##
#import .Fermionfields:make_WdagWmatrix
#export Setup_Gauge_action, Setup_Fermi_action, GaugeActionParam_autogenerator
#export Universe, set_β!, set_βs!
#export calc_plaquette, calc_polyakovloop, calc_topological_charge
#export md_initialize!,
# MD_parameters_standard, md!, metropolis_update!, construct_MD_parameters
#export show_parameters
export Params
#export measure_correlator, measure_chiral_cond, Measurement, measurements, Measurement_set
#export gradientflow!
#export ILDG, load_gaugefield
#export make_WdagWmatrix
#export heatbath!
#export make_plaq
#export calc_Action
#export calc_topological_charge
#export saveU, loadU, loadU!
export run_LQCD, run_LQCD!
#export write_config
export run_wizard
export analyze,
get_plaquette, get_polyakov, get_plaquette_average, get_polyakov_average, get_trjs
export run_LQCD_file
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 378 | include("./LatticeQCD.jl")
using Plots
using .LatticeQCD
using Random
using Dates
#using JLD
#using GFlops
if length(ARGS) == 0
error("""
Use input file:
Like,
julia analyze.jl parameters.jl
""")
end
function runtest()
plot_plaquette(ARGS[1])
plot_polyakov(ARGS[1])
plot_plaq_and_poly(ARGS[1])
end
@time runtest()
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 384 | include("./LatticeQCD.jl")
using .LatticeQCD
using Random
using Dates
#using JLD
function test()
Random.seed!(111)
A = rand(ComplexF64, 4, 4) * 4
A = A' * A
n = 4
ϕ = ComplexF64[1, 2, 3, 4]
ϕr = LatticeQCD.calc_exactvalue(n, A, ϕ)
println(ϕ' * ϕr)
ϕr2 = LatticeQCD.calc_Anϕ(n, A, ϕ)
println(ϕ' * ϕr2)
#LatticeQCD.calc_det(n,A,ϕ)
end
test()
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 132 | include("./LatticeQCD.jl")
using Plots
using .LatticeQCD
using Random
using Dates
#using JLD
#using GFlops
@time demo()
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 59 | include("LatticeQCD.jl")
using .LatticeQCD
run_wizard()
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 383 | using MPI
include("./LatticeQCD.jl")
using .LatticeQCD
#using LatticeQCD
function main()
myrank = get_myrank()
nprocs = get_nprocs()
println(myrank)
println_rank0(nprocs)
println_rank0(get_PEs())
println_rank0(ARGS)
PEs = parse.(Int64, ARGS[2:5])
set_PEs(PEs)
println_rank0(get_PEs())
run_LQCD(ARGS[1]; MPIparallel = true)
end
main()
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 392 | include("./LatticeQCD.jl")
using .LatticeQCD
using Random
using Dates
#using JLD
#using GFlops
if length(ARGS) == 0
error("""
Use input file:
Like,
julia run.jl parameters.jl
""")
end
function runtest()
run_LQCD_file(ARGS[1])
#run_LQCD(ARGS[1])
#parameters = parameterloading()
#run_LQCD(parameters)
end
@time runtest()
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 14457 | module Demo
using Dates
using Plots
using TOML
#using LaTeXStrings
##import ..System_parameters: Params_set, parameterloading
#import ..LTK_universe: Universe
#import ..MD: construct_MD_parameters
#import ..Measurements: Measurement_set, measurements
#import ..Verbose_print: println_verbose1, println_verbose2, Verbose_1
#import ..Heatbath: heatbath!
import ..Universe_module: Univ
import ..Transform_oldinputfile: transform_to_toml
import ..Parameters_TOML: construct_Params_from_TOML
import ..AbstractUpdate_module: Updatemethod, update!
import Gaugefields:
Gradientflow,
println_verbose_level1,
get_myrank,
flow!,
save_binarydata,
save_textdata,
saveU
#import ..AbstractMeasurement_module:
# Measurement_methods,
# calc_measurement_values,
# measure,
# Plaquette_measurement,
# get_temporary_gaugefields
using Gaugefields
using InteractiveUtils
using Dates
using Random
import ..Simpleprint: println_rank0
#import ..AbstractMeasurement_module: Plaquette_measurement, Polyakov_measurement
import ..LatticeQCD: Measurement_methods, calc_measurement_values
import QCDMeasurements:measure,
Plaquette_measurement,
get_temporary_gaugefields,
Plaquette_measurement, Polyakov_measurement,get_value
system = Dict()
actions = Dict()
md = Dict()
cg = Dict()
wilson = Dict()
staggered = Dict()
measurement = Dict()
# - - parameters - - - - - - - - - - -
# - - parameters - - - - - - - - - - -
system["saveU_dir"] = ""
system["verboselevel"] = 1
#system["L"] = (6, 6, 6, 6)
system["L"] = (4, 4, 4, 4)
system["Nwing"] = 1
system["Nsteps"] = 10000
system["quench"] = true
system["logfile"] = "Heatbath_L06060606_beta8.0_quenched.txt"
system["initial"] = "hot"
system["Dirac_operator"] = nothing
system["log_dir"] = "./logs"
system["Nthermalization"] = 10
system["update_method"] = "Heatbath"
system["randomseed"] = 111
system["NC"] = 3
system["saveU_every"] = 1
system["BoundaryCondition"] = [1, 1, 1, -1]
system["saveU_format"] = nothing
#system["β"] = 6.93015
system["β"] = 6.75850661032668353
actions["use_autogeneratedstaples"] = false
actions["couplingcoeff"] = Any[]
actions["couplinglist"] = Any[]
md["Δτ"] = 0.05
md["SextonWeingargten"] = false
md["MDsteps"] = 20
cg["eps"] = 1.0e-19
cg["MaxCGstep"] = 3000
wilson["Clover_coefficient"] = 0
wilson["r"] = 1
wilson["hop"] = 0
staggered["Nf"] = 0
staggered["mass"] = 0
measurement["measurement_methods"] = Dict[
Dict{Any,Any}(
"fermiontype" => nothing,
"measure_every" => 1,
"methodname" => "Polyakov_loop",
),
Dict{Any,Any}(
"fermiontype" => nothing,
"measure_every" => 1,
"methodname" => "Plaquette",
),
]
measurement["measurement_dir"] = "Heatbath_L06060606_beta8.0_quenched"
measurement["measurement_basedir"] = "./measurements"
# - - - - - - - - - - - - - - - - - - -
data = """
["HMC related"]
"Δτ" = 0.05
MDsteps = 20
["Physical setting(fermions)"]
Dirac_operator = "nothing"
["Physical setting"]
initial = "hot"
L = [4, 4, 4, 4]
Nthermalization = 10
update_method = "Heatbath"
Nwing = 1
Nsteps = 10000
"β" = 6.758506610326683
["System Control"]
logfile = "Heatbath_L06060606_beta8.0_quenched.txt"
measurement_dir = "Heatbath_L06060606_beta8.0_quenched"
measurement_basedir = "./measurements"
log_dir = "./logs"
hasgradientflow = false
["Measurement set".measurement_methods.Polyakov_loop]
measure_every = 1
methodname = "Polyakov_loop"
["Measurement set".measurement_methods.Plaquette]
measure_every = 1
methodname = "Plaquette"
[gradientflow_measurements.measurements_for_flow]
"""
function demo()
filename = "./demoparam.toml"
parameters = construct_Params_from_TOML(TOML.parse(data))
Random.seed!(parameters.randomseed)
univ = Univ(parameters)
run_demo!(parameters, univ)
return
numaccepts = 0
for itrj = parameters.initialtrj:parameters.Nsteps
#println_verbose_level1(univ.U[1], "# itrj = $itrj")
@time accepted = update!(updatemethod, univ.U)
if accepted
numaccepts += 1
end
#save_gaugefield(savedata,univ.U,itrj)
measure(plaq_m, itrj, univ.U; additional_string = "")
measure(poly_m, itrj, univ.U; additional_string = "")
#calc_measurement_values(measurements,itrj, univ.U)
Usmr = deepcopy(univ.U)
for istep = 1:numflow
τ = istep * dτ
flow!(Usmr, gradientflow)
additional_string = "$istep $τ "
for i = 1:measurements_for_flow.num_measurements
interval = measurements_for_flow.intervals[i]
if istep % interval == 0
measure(
measurements_for_flow.measurements[i],
itrj,
Usmr,
additional_string = additional_string,
)
end
end
end
println_verbose_level1(
univ.U[1],
"Acceptance $numaccepts/$itrj : $(round(numaccepts*100/itrj)) %",
)
end
end
function demo_old()
params_set = Params_set(system, actions, md, cg, wilson, staggered, measurement)
parameters = parameterloading(params_set)
univ = Universe(parameters)
mdparams = construct_MD_parameters(parameters)
meas = Measurement_set(
univ,
parameters.measuredir,
measurement_methods = parameters.measurement_methods,
)
run_demo!(parameters, univ, meas)
end
function run_demo_old!(parameters, univ, meas)
plt1 = histogram([0], label = nothing) #plot1
plt2 = plot([], [], label = nothing) #plot2
ylabel!("Plaquette")
xlabel!("MC time")
plt3 = histogram([0], label = nothing) #plot3
plt4 = plot([], [], label = nothing) #plot4
ylabel!("|Polyakov loop|")
xlabel!("MC time")
plt5 = scatter([], [], label = nothing, title = "Polyakov loop") #plot5 scatter
ylabel!("Im")
xlabel!("Re")
plt6 = plot([], [], label = nothing) #plot6
ylabel!("Arg(Polyakov loop)")
xlabel!("MC time")
plot(plt1, plt2, plt3, plt4, plt5, plt6, layout = 6)
hist_plaq = []
hist_poly = []
hist_poly_θ = []
function plot_refresh!(
plt1,
plt2,
plt3,
plt4,
plt5,
plt6,
hist_plaq,
hist_poly,
hist_poly_θ,
plaq,
poly,
itrj,
)
bins = round(Int, log(itrj) * 4 + 1)
if itrj < 500
bins = round(Int, log(itrj) * 1.5 + 1)
elseif itrj < 1000
bins = round(Int, log(itrj) * 2.5 + 1)
elseif itrj < 2000
bins = round(Int, log(itrj) * 3.5 + 1)
end
append!(hist_plaq, plaq)
append!(hist_poly, abs(poly))
# omit un-thermalized part
if (10 < itrj < 1000) & (itrj % 5 == 0)
print("remove unthermalize part $(length(hist_plaq)) -> ")
popfirst!(hist_plaq)
popfirst!(hist_poly)
println(" $(length(hist_plaq))")
end
#
plt1 = histogram(hist_plaq, bins = bins, label = nothing) #plot1
#plot!(plt1,title="SU(3), Quenched, L=6^4,")
plot!(plt1, title = "SU(3), Quenched, L=4^4,")
xlabel!("Plaquette")
plot!(plt2, title = "Heatbath")
plt3 = histogram(hist_poly, bins = bins, label = nothing) #plot3
xlabel!("|Polyakov loop|")
#
if itrj < 500
push!(plt2, 1, itrj, plaq)
elseif 500 < itrj < 1000
if itrj % 2 == 0
push!(plt2, 1, itrj, plaq)
end
elseif 1000 < itrj
if itrj % 5 == 0
push!(plt2, 1, itrj, plaq)
end
end
push!(plt4, 1, itrj, abs(poly))
if itrj < 500
push!(plt5, 1, real(poly), imag(poly))
end
if 500 < itrj < 1000
if itrj % 10 == 0
push!(plt5, 1, real(poly), imag(poly))
end
end
if 1000 < itrj
if itrj % 50 == 0
push!(plt5, 1, real(poly), imag(poly))
end
end
#
push!(plt6, 1, itrj, angle(poly))
#
plot(plt1, plt2, plt3, plt4, plt5, plt6, layout = 6)
gui()
end
@assert parameters.update_method == "Heatbath"
verbose = Verbose_1()
Nsteps = parameters.Nsteps
numaccepts = 0
plaq, poly = measurements(0, univ.U, univ, meas; verbose = verbose) # check consistency of preparation.
for itrj = 1:Nsteps
@time heatbath!(univ)
plaq, poly = measurements(itrj, univ.U, univ, meas; verbose = verbose)
plot_refresh!(
plt1,
plt2,
plt3,
plt4,
plt5,
plt6,
hist_plaq,
hist_poly,
hist_poly_θ,
plaq,
poly,
itrj,
)
println_verbose1(verbose, "-------------------------------------")
#println("-------------------------------------")
flush(stdout)
flush(verbose)
end
end
function run_demo!(parameters, univ)
println_verbose_level1(univ.U[1], "# ", pwd())
println_verbose_level1(univ.U[1], "# ", Dates.now())
io = IOBuffer()
InteractiveUtils.versioninfo(io)
versioninfo = String(take!(io))
println_verbose_level1(univ.U[1], versioninfo)
updatemethod = Updatemethod(parameters, univ)
eps_flow = parameters.eps_flow
numflow = parameters.numflow
Nflow = parameters.Nflow
dτ = Nflow * eps_flow
gradientflow = Gradientflow(univ.U, Nflow = 1, eps = eps_flow)
measurements =
Measurement_methods(univ.U, parameters.measuredir, parameters.measurement_methods)
i_plaq = 0
for i = 1:measurements.num_measurements
if typeof(measurements.measurements[i]) == Plaquette_measurement
i_plaq = i
plaq_m = measurements.measurements[i]
end
end
if i_plaq == 0
plaq_m = Plaquette_measurement(univ.U, printvalues = false)
end
plaq_m = Plaquette_measurement(univ.U, printvalues = false)
poly_m = Polyakov_measurement(univ.U, printvalues = false)
measurements_for_flow =
Measurement_methods(univ.U, parameters.measuredir, parameters.measurements_for_flow)
numaccepts = 0
plt1 = histogram([0], label = nothing) #plot1
plt2 = plot([], [], label = nothing) #plot2
ylabel!("Plaquette")
xlabel!("MC time")
plt3 = histogram([0], label = nothing) #plot3
plt4 = plot([], [], label = nothing) #plot4
ylabel!("|Polyakov loop|")
xlabel!("MC time")
plt5 = scatter([], [], label = nothing, title = "Polyakov loop") #plot5 scatter
ylabel!("Im")
xlabel!("Re")
plt6 = plot([], [], label = nothing) #plot6
ylabel!("Arg(Polyakov loop)")
xlabel!("MC time")
plot(plt1, plt2, plt3, plt4, plt5, plt6, layout = 6)
hist_plaq = []
hist_poly = []
hist_poly_θ = []
function plot_refresh!(
plt1,
plt2,
plt3,
plt4,
plt5,
plt6,
hist_plaq,
hist_poly,
hist_poly_θ,
plaq,
poly,
itrj,
)
bins = round(Int, log(itrj) * 4 + 1)
if itrj < 500
bins = round(Int, log(itrj) * 1.5 + 1)
elseif itrj < 1000
bins = round(Int, log(itrj) * 2.5 + 1)
elseif itrj < 2000
bins = round(Int, log(itrj) * 3.5 + 1)
end
append!(hist_plaq, plaq)
append!(hist_poly, abs(poly))
# omit un-thermalized part
if (10 < itrj < 1000) & (itrj % 5 == 0)
print("remove unthermalize part $(length(hist_plaq)) -> ")
popfirst!(hist_plaq)
popfirst!(hist_poly)
println(" $(length(hist_plaq))")
end
#
plt1 = histogram(hist_plaq, bins = bins, label = nothing) #plot1
#plot!(plt1,title="SU(3), Quenched, L=6^4,")
plot!(plt1, title = "SU(3), Quenched, ")
xlabel!("Plaquette")
plot!(plt2, title = "L=4^4, Heatbath")
plt3 = histogram(hist_poly, bins = bins, label = nothing) #plot3
xlabel!("|Polyakov loop|")
#
if itrj < 500
push!(plt2, 1, itrj, plaq)
elseif 500 < itrj < 1000
if itrj % 2 == 0
push!(plt2, 1, itrj, plaq)
end
elseif 1000 < itrj
if itrj % 5 == 0
push!(plt2, 1, itrj, plaq)
end
end
push!(plt4, 1, itrj, abs(poly))
if itrj < 500
push!(plt5, 1, real(poly), imag(poly))
end
if 500 < itrj < 1000
if itrj % 10 == 0
push!(plt5, 1, real(poly), imag(poly))
end
end
if 1000 < itrj
if itrj % 50 == 0
push!(plt5, 1, real(poly), imag(poly))
end
end
#
push!(plt6, 1, itrj, angle(poly))
#
plot(plt1, plt2, plt3, plt4, plt5, plt6, layout = 6)
gui()
end
#@assert parameters.update_method == "Heatbath"
#verbose = Verbose_1()
#Nsteps = parameters.Nsteps
numaccepts = 0
#plaq, poly = measurements(0, univ.U, univ, meas; verbose = verbose) # check consistency of preparation.
plaq = get_value(measure(plaq_m, univ.U; additional_string = "0 "))
poly = get_value(measure(poly_m, univ.U; additional_string = "0 "))
for itrj = 1:parameters.initialtrj:parameters.Nsteps
#@time heatbath!(univ)
@time update!(updatemethod, univ.U)
plaq = get_value(measure(plaq_m, univ.U; additional_string = "$itrj "))
poly = get_value(measure(poly_m, univ.U; additional_string = "$itrj "))
#plaq, poly = measurements(itrj, univ.U, univ, meas; verbose = verbose)
plot_refresh!(
plt1,
plt2,
plt3,
plt4,
plt5,
plt6,
hist_plaq,
hist_poly,
hist_poly_θ,
plaq,
poly,
itrj,
)
println(itrj, "\t", plaq, " # plaq")
println(itrj, "\t", real(poly), "\t", imag(poly), " # poly")
println("-----------------------------------")
#println_verbose1(verbose, "-------------------------------------")
#println("-------------------------------------")
flush(stdout)
#flush(verbose)
end
end
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 1247 | system["saveU_dir"] = ""
system["verboselevel"] = 1
#system["L"] = (6, 6, 6, 6)
system["L"] = (4, 4, 4, 4)
system["Nwing"] = 1
system["Nsteps"] = 10000
system["quench"] = true
system["logfile"] = "Heatbath_L06060606_beta8.0_quenched.txt"
system["initial"] = "hot"
system["Dirac_operator"] = nothing
system["log_dir"] = "./logs"
system["Nthermalization"] = 10
system["update_method"] = "Heatbath"
system["randomseed"] = 111
system["NC"] = 3
system["saveU_every"] = 1
system["BoundaryCondition"] = [1, 1, 1, -1]
system["saveU_format"] = nothing
#system["β"] = 6.93015
system["β"] = 6.75850661032668353
actions["use_autogeneratedstaples"] = false
actions["couplingcoeff"] = Any[]
actions["couplinglist"] = Any[]
md["Δτ"] = 0.05
md["SextonWeingargten"] = false
md["MDsteps"] = 20
cg["eps"] = 1.0e-19
cg["MaxCGstep"] = 3000
measurement["measurement_methods"] = Dict[
Dict{Any,Any}(
"fermiontype" => nothing,
"measure_every" => 1,
"methodname" => "Polyakov_loop",
),
Dict{Any,Any}(
"fermiontype" => nothing,
"measure_every" => 1,
"methodname" => "Plaquette",
),
]
measurement["measurement_dir"] = "Heatbath_L06060606_beta8.0_quenched"
measurement["measurement_basedir"] = "./measurements"
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 1490 | module Realtimeplot
using Plots
# prep the plots
plt1 = histogram([0], label = nothing) #plot1
plt2 = plot([], [], label = nothing) #plot2
ylabel!("Plaquette")
xlabel!("MC time")
plt3 = histogram([0], label = nothing) #plot3
plt4 = plot([], [], label = nothing) #plot4
ylabel!("Polyakov loop")
xlabel!("MC time")
plot(plt1, plt2, plt3, plt4, layout = 4)
function mock() #mock data
plaq = 0.6 + 0.1 * randn()
if rand() < 0.5
poly = -1.5 + randn() / 2
else
poly = 1.5 + randn() / 2
end
sleep(0.01)
return plaq, poly
end
hist_plaq = []
hist_poly = []
function plot_refresh!(plt1, plt2, plt3, plt4, hist_plaq, hist_poly, plaq, poly, itrj)
bins = round(Int, log(itrj) * 5 + 1)
append!(hist_plaq, plaq)
append!(hist_poly, poly)
#
plt1 = histogram(hist_plaq, bins = bins, label = nothing) #plot1
xlabel!("Plaquette")
plt3 = histogram(hist_poly, bins = bins, label = nothing) #plot3
xlabel!("Polyakov loop")
#
push!(plt2, 1, itrj, plaq)
push!(plt4, 1, itrj, poly)
#
plot(plt1, plt2, plt3, plt4, layout = 4)
gui()
end
Ntrj = 1000
for itrj = 1:Ntrj
plaq, poly = mock()
#
plot_refresh!(plt1, plt2, plt3, plt4, hist_plaq, hist_poly, plaq, poly, itrj)
#
end
#=
plt = plot([0,0.1], Any[rand(2),sin])
for x in 0.2:0.1:π
push!(plt, 1, x, rand())
push!(plt, 2, x, sin(x))
gui(); sleep(0.5)
end
=#
#=
import PyPlot: plt
x = [1,2,3]
y = [1,2,3]
plt.plot(x,y)
plt.show()
=#
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 1675 | system["L"] = (8, 8, 8, 8)
system["β"] = 5.7
system["NC"] = 3
system["quench"] = true
system["SextonWeingargten"] = false
system["N_SextonWeingargten"] = 10
md["MDsteps"] = 20
md["Δτ"] = 1 / md["MDsteps"]
system["Dirac_operator"] = nothing
md["Nthermalization"] = 10
md["Nsteps"] = 100 + md["Nthermalization"]
system["update_method"] = "Fileloading"
system["loadU_format"] = "JLD"
system["loadU_dir"] = "confs"
measurement["measurement_methods"] = Array{Dict,1}(undef, 2)
for i = 1:length(measurement["measurement_methods"])
measurement["measurement_methods"][i] = Dict()
end
measurement["measurement_methods"][1]["methodname"] = "Plaquette"
measurement["measurement_methods"][1]["measure_every"] = 1
measurement["measurement_methods"][1]["fermiontype"] = nothing
measurement["measurement_methods"][2]["methodname"] = "Polyakov_loop"
measurement["measurement_methods"][2]["measure_every"] = 1
measurement["measurement_methods"][2]["fermiontype"] = nothing
#=
measurement["measurement_methods"][3]["methodname"] = "Topological_charge"
measurement["measurement_methods"][3]["measure_every"] = 10
measurement["measurement_methods"][3]["fermiontype"] = nothing
measurement["measurement_methods"][3]["numflow"] = 10
measurement["measurement_methods"][4]["methodname"] = "Chiral_condensate"
measurement["measurement_methods"][4]["measure_every"] = 20
measurement["measurement_methods"][4]["fermiontype"] = "Staggered"
measurement["measurement_methods"][4]["Nf"] = 4
measurement["measurement_methods"][5]["methodname"] = "Pion_correlator"
measurement["measurement_methods"][5]["measure_every"] = 20
measurement["measurement_methods"][5]["fermiontype"] = "Wilson"
=#
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 436 | using LatticeQCD
betas = ["1.40", "1.65"]
println("# beta plaquette")
for beta in betas
dir = "samples/HMC_L08080808_su2nf8_beta$(beta)_Staggered_mass0.015"
p = get_plaquette_average(dir)
println("$beta $p #$dir")
end
println("# ")
println("# beta Polyakov loop")
for beta in betas
dir = "samples/HMC_L08080808_su2nf8_beta$(beta)_Staggered_mass0.015"
p = get_polyakov_average(dir)
println("$beta $p #$dir")
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 5135 | # https://inspirehep.net/literature/283285
# Lattice setup
system["L"] = (12, 12, 12, 8)
#system["β"] = 5.4;system["initial"] = "cold";md["MDsteps"] = 35; md["Δτ"] = 1/md["MDsteps"]
system["β"] = 5.3;
system["initial"] = "cold";
md["MDsteps"] = 35;
md["Δτ"] = 1 / md["MDsteps"];
#system["β"] = 5.175;initial = "cold";MDsteps = 40; Δτ = 1/MDsteps
#β = 5.1;initial = "hot";MDsteps = 80; Δτ = 0.5/MDsteps
system["Dirac_operator"] = "Staggered"
system["Nf"] = 4
system["NC"] = 3
staggered["mass"] = 0.025
# HMC
system["quench"] = false
md["SextonWeingargten"] = false
#N_SextonWeingargten = 25
md["Nsteps"] = 5000
system["update_method"] = "HMC"
system["saveU_format"] = "JLD"
system["saveU_every"] = 1
system["saveU_dir"] = "./beta53_confs"
# Measurements
measurement["measurement_methods"] = Array{Dict,1}(undef, 3)
for i = 1:length(measurement["measurement_methods"])
measurement["measurement_methods"][i] = Dict()
end
measurement["measurement_methods"][1]["methodname"] = "Plaquette"
measurement["measurement_methods"][1]["measure_every"] = 1
measurement["measurement_methods"][1]["fermiontype"] = nothing
#
measurement["measurement_methods"][2]["methodname"] = "Polyakov_loop"
measurement["measurement_methods"][2]["measure_every"] = 1
measurement["measurement_methods"][2]["fermiontype"] = nothing
#
measurement["measurement_methods"][3]["methodname"] = "Chiral_condensate"
measurement["measurement_methods"][3]["fermiontype"] = "Staggered"
measurement["measurement_methods"][3]["measure_every"] = staggered["mass"]
measurement["measurement_methods"][3]["mass"] = 1
measurement["measurement_methods"][3]["Nf"] = 4
#mass_measurement = mass
#Nr = 10
#
#=
Title:
The Finite Temperature Phase Transition in Four Flavor {QCD} on an 8 X 12^{3} Lattice
Authors
MT(c) Collaboration:
R.V. Gavai(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana),
Sourendu Gupta(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana),
A. Irback(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana),
F. Karsch(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana),
S. Meyer(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana),
B. Petersson(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana),
H. Satz(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana),
H.W. Wyld(Tata Inst. and Bielefeld U. and CERN and Kaiserslautern U. and Illinois U., Urbana)
Sep, 1989
Abstract: (Elsevier)
We present results of a numerical study of lattice QCD with four dynamical flavours of staggered fermions,
performed by using a hybrid Monte Carlo algorithm on an 8×12 3 lattice. We find a rapid change in
the average value of the Polyakov loop at β c =5.25±0.025 for a quark mass ma =0.025; at this mass value,
the behaviour of the chiral order parameter, 〈 Ψ Ψ〉 does not yet allow an independent determination of
the transition point. Using existing hadron mass calculations, the value of β c we have obtained here would
lead to a transition temperature T ∼100 MeV .
DOI:
10.1016/0370-2693(89)90447-4
=#
# Results Fig 2b (By plot digitizer)
#= β PbP
5.100, 0.662943264422041
5.175, 0.3819303478073508
5.200, 0.2874088522470827
5.250, 0.24762703983851386
5.300, 0.2109864936891166
5.400, 0.1686361007473265
5.600, 0.13783581497147912
=#
#= Plot Digitizer json for fig 2 b
{"version":[4,2],"axesColl":[{"name":"XY","type":"XYAxes","isLogX":false,"isLogY":false,"calibrationPoints":[{"px":122.93685756240822,"py":475.712187958884,"dx":"5.0","dy":"0.0","dz":null},{"px":391.7180616740088,"py":477.2393538913363,"dx":"5.7","dy":"0.0","dz":null},{"px":77.12187958883995,"py":476.4757709251101,"dx":"5.0","dy":"0.0","dz":null},{"px":80.93979441997062,"py":93.92070484581498,"dx":"5.7","dy":"1.0","dz":null}]}],"datasetColl":[{"name":"Default Dataset","axesName":"XY","metadataKeys":[],"colorRGB":[200,0,0,255],"data":[{"x":353.92070484581495,"y":425.31571218795887,"value":[5.600218584502331,0.13783581497147912]},{"x":277.56240822320115,"y":413.09838472834065,"value":[5.401048695297173,0.1686361007473265]},{"x":239.76505139500736,"y":396.6813509544787,"value":[5.302190097640081,0.2109864936891166]},{"x":230.22026431718064,"y":389.8091042584435,"value":[5.277154942674726,0.2288077930619974]},{"x":220.67547723935388,"y":382.55506607929516,"value":[5.252109864936891,0.24762703983851386]},{"x":211.51248164464025,"y":367.2834067547724,"value":[5.22785067077942,0.2874088522470827]},{"x":192.42290748898682,"y":331.0132158590308,"value":[5.177194917272429,0.3819303478073508]},{"x":164.9339207048458,"y":223.34801762114537,"value":[5.102809845658362,0.662943264422041]}],"autoDetectionData":{"fgColor":[0,0,255],"bgColor":[255,255,255],"mask":[],"colorDetectionMode":"fg","colorDistance":120,"algorithm":{"algoType":"AveragingWindowAlgo","xStep":10,"yStep":10},"name":0,"imageWidth":442,"imageHeight":520}}],"measurementColl":[]}
=#
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 2698 | module AbstractMD_module
using Gaugefields
using LatticeDiracOperators
using LinearAlgebra
import ..Universe_module: Univ, get_gauge_action, is_quenched
import ..System_parameters: Params
abstract type AbstractMD{Dim,TG} end
include("./standardMD.jl")
function MD(
U,
gauge_action,
quench,
Δτ,
MDsteps,
fermi_action = nothing,
cov_neural_net = nothing;
SextonWeingargten = false,
Nsw = 2,
)
#=
if SextonWeingargten
if quench == true
error("The quench update does not need the SextonWeingargten method. Put SextonWeingargten = false")
end
else
md = StandardMD(U,gauge_action,quench,Δτ,MDsteps,fermi_action)
end
=#
md = StandardMD(
U,
gauge_action,
quench,
Δτ,
MDsteps,
fermi_action,
cov_neural_net,
SextonWeingargten = SextonWeingargten,
Nsw = Nsw,
)
return md
end
function MD(p::Params, univ::Univ)
gauge_action = get_gauge_action(univ)
quench = is_quenched(univ)
md = MD(
univ.U,
gauge_action,
quench,
p.cov_neural_net,
p.Δτ,
p.MDsteps,
SextonWeingargten = p.SextonWeingargten,
Nsw = p.N_SextonWeingargten,
)
return md
end
function runMD!(U, md::AbstractMD{Dim,TG}) where {Dim,TG}
error("runMD! with type $(typeof(md)) is not supported")
end
function initialize_MD!(U, md::AbstractMD{Dim,TG}) where {Dim,TG}
error("initialize_MD! with type $(typeof(md)) is not supported")
end
function U_update!(U, p, ϵ, md::AbstractMD{Dim,TG}) where {Dim,TG}
temps = get_temporary_gaugefields(md.gauge_action)
temp1 = temps[1]
temp2 = temps[2]
expU = temps[3]
W = temps[4]
for μ = 1:Dim
exptU!(expU, ϵ * md.Δτ, p[μ], [temp1, temp2])
mul!(W, expU, U[μ])
substitute_U!(U[μ], W)
end
end
function P_update!(U, p, ϵ, md::AbstractMD{Dim,TG}) where {Dim,TG} # p -> p +factor*U*dSdUμ
NC = U[1].NC
temps = get_temporary_gaugefields(md.gauge_action)
dSdUμ = temps[end]
factor = -ϵ * md.Δτ / (NC)
#factor = ϵ*md.Δτ/(NC)
for μ = 1:Dim
calc_dSdUμ!(dSdUμ, md.gauge_action, μ, U)
mul!(temps[1], U[μ], dSdUμ) # U*dSdUμ
Traceless_antihermitian_add!(p[μ], factor, temps[1])
end
end
function P_update_fermion!(U, p, ϵ, md::AbstractMD{Dim,TG}) where {Dim,TG} # p -> p +factor*U*dSdUμ
#NC = U[1].NC
temps = get_temporary_gaugefields(md.gauge_action)
UdSfdUμ = temps[1:Dim]
factor = -ϵ * md.Δτ
calc_UdSfdU!(UdSfdUμ, md.fermi_action, U, md.η)
for μ = 1:Dim
Traceless_antihermitian_add!(p[μ], factor, UdSfdUμ[μ])
end
end
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 5461 | using InteractiveUtils
struct StandardMD{Dim,TG,TA,quench,T_FA,TF,TC} <: AbstractMD{Dim,TG}
gauge_action::GaugeAction{Dim,TG}
quench::Bool
Δτ::Float64
MDsteps::Float64
p::Vector{TA}
QPQ::Bool
fermi_action::T_FA
η::TF
ξ::TF
SextonWeingargten::Bool
Nsw::Int64
cov_neural_net::TC
dSdU::Union{Nothing,Vector{TG}}
function StandardMD(
U,
gauge_action::GaugeAction{Dim,TG},
quench,
Δτ,
MDsteps,
fermi_action = nothing,
cov_neural_net = nothing;
QPQ = true,
SextonWeingargten = false,
Nsw = 2,
) where {Dim,TG}
p = initialize_TA_Gaugefields(U) #This is a traceless-antihermitian gauge fields. This has NC^2-1 real coefficients.
TA = eltype(p)
T_FA = typeof(fermi_action)
if quench
η = nothing
ξ = nothing
if SextonWeingargten
error(
"The quench update does not need the SextonWeingargten method. Put SextonWeingargten = false",
)
end
else
if fermi_action == nothing
η = nothing
ξ = nothing
else
η = similar(fermi_action._temporary_fermionfields[1])
ξ = similar(η)
end
end
TF = typeof(η)
@assert Nsw % 2 == 0 "Nsw should be even number! now Nsw = $Nsw"
TC = typeof(cov_neural_net)
if TC != Nothing
dSdU = similar(U)
else
dSdU = nothing
end
return new{Dim,TG,TA,quench,T_FA,TF,TC}(
gauge_action,
quench,
Δτ,
MDsteps,
p,
QPQ,
fermi_action,
η,
ξ,
SextonWeingargten,
Nsw,
cov_neural_net,
dSdU,
)
end
end
function initialize_MD!(
U,
md::StandardMD{Dim,TG,TA,quench,T_FA,TF,TC},
) where {Dim,TG,TA,quench,T_FA,TF,TC}
gauss_distribution!(md.p) #initial momentum
if quench == false
if TC != Nothing
Uout, Uout_multi, _ = calc_smearedU(U, md.cov_neural_net)
gauss_sampling_in_action!(md.ξ, Uout, md.fermi_action)
sample_pseudofermions!(md.η, Uout, md.fermi_action, md.ξ)
else
gauss_sampling_in_action!(md.ξ, U, md.fermi_action)
sample_pseudofermions!(md.η, U, md.fermi_action, md.ξ)
end
#error("not supported yet")
end
end
function runMD!(
U,
md::StandardMD{Dim,TG,TA,quench,T_FA,TF,TC},
) where {Dim,TG,TA,quench,T_FA,TF,TC}
#p = md.p
if md.QPQ
if md.SextonWeingargten
runMD_QPQ_sw!(U, md)
else
runMD_QPQ!(U, md)
end
else
if md.SextonWeingargten
error("PQP update with SextonWeingargten is not supported")
else
runMD_PQP!(U, md)
end
end
#error("type $(typeof(md)) is not supported")
end
function runMD_QPQ!(
U,
md::StandardMD{Dim,TG,TA,quench,T_FA,TF,TC},
) where {Dim,TG,TA,quench,T_FA,TF,TC}
p = md.p
for itrj = 1:md.MDsteps
U_update!(U, p, 0.5, md)
P_update!(U, p, 1.0, md)
if quench == false
P_update_fermion!(U, p, 1.0, md)
end
U_update!(U, p, 0.5, md)
end
#error("type $(typeof(md)) is not supported")
end
function runMD_QPQ_sw!(
U,
md::StandardMD{Dim,TG,TA,quench,T_FA,TF,TC},
) where {Dim,TG,TA,quench,T_FA,TF,TC}
p = md.p
for itrj = 1:md.MDsteps
for isw = 1:div(md.Nsw, 2)
U_update!(U, p, 0.5 / md.Nsw, md)
P_update!(U, p, 1.0 / md.Nsw, md)
U_update!(U, p, 0.5 / md.Nsw, md)
end
if quench == false
P_update_fermion!(U, p, 1.0, md)
end
for isw = 1:div(md.Nsw, 2)
U_update!(U, p, 0.5 / md.Nsw, md)
P_update!(U, p, 1.0 / md.Nsw, md)
U_update!(U, p, 0.5 / md.Nsw, md)
end
end
#error("type $(typeof(md)) is not supported")
end
function runMD_PQP!(
U,
md::StandardMD{Dim,TG,TA,quench,T_FA,TF,TC},
) where {Dim,TG,TA,quench,T_FA,TF,TC}
p = md.p
for itrj = 1:md.MDsteps
P_update!(U, p, 0.5, md)
if quench == false
P_update_fermion!(U, p, 0.5, md)
end
U_update!(U, p, 1.0, md)
P_update!(U, p, 0.5, md)
if quench == false
P_update_fermion!(U, p, 0.5, md)
end
end
#error("type $(typeof(md)) is not supported")
end
function P_update_fermion!(
U,
p,
ϵ,
md::StandardMD{Dim,TG,TA,quench,T_FA,TF,TC},
) where {Dim,TG,TA,quench,T_FA,TF,TC<:CovNeuralnet{Dim}} # p -> p +factor*U*dSdUμ
#NC = U[1].NC
temps = get_temporary_gaugefields(md.gauge_action)
UdSfdUμ = temps[1:Dim]
factor = -ϵ * md.Δτ
Uout, Uout_multi, _ = calc_smearedU(U, md.cov_neural_net)
for μ = 1:Dim
calc_UdSfdU!(UdSfdUμ, md.fermi_action, Uout, md.η)
mul!(md.dSdU[μ], Uout[μ]', UdSfdUμ[μ])
end
#calc_UdSfdU!(UdSfdUμ, md.fermi_action, U, md.η)
dSdUbare = back_prop(md.dSdU, md.cov_neural_net, Uout_multi, U)
for μ = 1:Dim
#Traceless_antihermitian_add!(p[μ], factor, UdSfdUμ[μ])
mul!(temps[1], U[μ], dSdUbare[μ]) # U*dSdUμ
Traceless_antihermitian_add!(p[μ], factor, temps[1])
end
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 7353 | import Gaugefields.Abstractsmearing
function set_parameter_default(method, key, defaultvalue)
if haskey(method, key)
return method[key]
else
return defaultvalue
end
end
struct Measurements_set
nummeasurements::Int64
measurements::Array{AbstractMeasurement,1}
measurement_methods::Array{Dict,1}
methodnames::Array{String,1}
baremesurement_indices::Array{Int64,1}
flowmeasurement_indices::Array{Int64,1}
function Measurements_set(U, measurement_dir, measurement_methods)
nummeasurements = length(measurement_methods)
measurements = Array{AbstractMeasurement,1}(undef, nummeasurements)
methodnames = Array{String,1}(undef, nummeasurements)
for i = 1:nummeasurements
method = measurement_methods[i]
methodnames[i] = method["methodname"]
if method["methodname"] == "Plaquette"
measurements[i] =
Plaquette_measurement(U, filename = measurement_dir * "/Plaquette.txt")
elseif method["methodname"] == "Polyakov_loop"
measurements[i] = Polyakov_measurement(
U,
filename = measurement_dir * "/Polyakov_loop.txt.txt",
)
elseif method["methodname"] == "Topological_charge"
TC_methods =
set_parameter_default(method, "TC_methods", ["plaquette", "clover"])
measurements[i] = Topological_charge_measurement(
U,
filename = measurement_dir * "/Topological_charge.txt",
TC_methods = TC_methods,
)
elseif method["methodname"] == "Energy_density"
measurements[i] = Energy_density_measurement(
U,
filename = measurement_dir * "/Energy_density.txt",
)
elseif method["methodname"] == "Chiral_condensate"
baremeasure = set_parameter_default(method, "bare measure", true)
flowmeasure = set_parameter_default(method, "flow measure", false)
fermiontype = set_parameter_default(method, "fermiontype", "Staggered")
mass = set_parameter_default(method, "mass", 0.1)
Nf = set_parameter_default(method, "Nf", 2)
κ = set_parameter_default(method, "hop", 0.141139)
r = set_parameter_default(method, "r", 1)
M = set_parameter_default(method, "Domainwall_M", -1)
L5 = set_parameter_default(method, "Domainwall_L5", 2)
mass = set_parameter_default(method, "Domainwall_m", mass)
BoundaryCondition =
set_parameter_default(method, "BoundaryCondition", nothing)
eps_CG = set_parameter_default(method, "eps", 1e-14)
MaxCGstep = set_parameter_default(method, "MaxCGstep", 3000)
Nr = set_parameter_default(method, "Nr", 10)
verbose_level = set_parameter_default(method, "verbose_level", 2)
measurements[i] = Chiral_condensate_measurement(
U,
filename = measurement_dir * "/Chiral_condensate.txt",
fermiontype = fermiontype,
mass = mass,
Nf = Nf,
κ = κ,
r = r,
L5 = L5,
M = M,
eps_CG = eps_CG,
MaxCGstep = MaxCGstep,
BoundaryCondition = BoundaryCondition,
Nr = Nr,
verbose_level = verbose_level,
)
#measurements[i] = Measure_chiral_condensate(measurement_dir*"/Chiral_condensate.txt",univ.U,method)
elseif method["methodname"] == "Pion_correlator"
baremeasure = set_parameter_default(method, "bare measure", true)
flowmeasure = set_parameter_default(method, "flow measure", false)
fermiontype = set_parameter_default(method, "fermiontype", "Staggered")
mass = set_parameter_default(method, "mass", 0.1)
Nf = set_parameter_default(method, "Nf", 2)
κ = set_parameter_default(method, "hop", 0.141139)
r = set_parameter_default(method, "r", 1)
M = set_parameter_default(method, "Domainwall_M", -1)
L5 = set_parameter_default(method, "Domainwall_L5", 2)
mass = set_parameter_default(method, "Domainwall_m", mass)
BoundaryCondition =
set_parameter_default(method, "BoundaryCondition", nothing)
eps_CG = set_parameter_default(method, "eps", 1e-14)
MaxCGstep = set_parameter_default(method, "MaxCGstep", 3000)
#Nr = set_parameter_default(method,"Nr",10)
verbose_level = set_parameter_default(method, "verbose_level", 2)
measurements[i] = Pion_correlator_measurement(
U,
filename = measurement_dir * "/Pion_correlator.txt",
fermiontype = fermiontype,
mass = mass,
Nf = Nf,
κ = κ,
r = r,
L5 = L5,
M = M,
eps_CG = eps_CG,
MaxCGstep = MaxCGstep,
BoundaryCondition = BoundaryCondition,
verbose_level = verbose_level,
)
#measurements[i] = Measure_Pion_correlator(measurement_dir*"/Pion_correlator.txt",univ.U,method)
elseif method["methodname"] == "Wilson_loop"
baremeasure = set_parameter_default(method, "bare measure", true)
flowmeasure = set_parameter_default(method, "flow measure", false)
Tmax = set_parameter_default(method, "Tmax", 4)
Rmax = set_parameter_default(method, "Rmax", 4)
measurements[i] = Pion_correlator_measurement(
U,
filename = measurement_dir * "/Pion_correlator.txt",
Tmax = Tmax,
Rmax = Rmax,
verbose_level = verbose_level,
)
else
error("$(method["methodname"]) is not supported")
end
end
return new(
nummeasurements,#::Int64
measurements,#::Array{AbstractMeasurement,1}
measurement_methods,#::Array{Dict,1}
methodnames,#::Array{String,1}
)
end
end
function measure(m::Measurements_set, itrj, U)
additional_string = "$itrj 0 0.0 "
for i in m.baremesurement_indices
measure(m.measurements[i], U, additional_string = additional_string)
end
end
function measure_withflow(
m::Measurements_set,
itrj,
U,
smearing::Abstractsmearing,
numstep,
dτ,
)
Usmr = deepcopy(U)
for istep = 1:numstep
τ = istep * dτ
flow!(Usmr, smearing)
additional_string = "$itrj $istep $τ "
for i in m.flowmeasurement_indices
measure(m.measurements[i], Usmr, additional_string = additional_string)
end
end
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 3938 | mutable struct Wilson_loop_measurement{Dim,TG} <: AbstractMeasurement
filename::Union{Nothing,String}
_temporary_gaugefields::Vector{TG}
Dim::Int8
#factor::Float64
verbose_print::Union{Verbose_print,Nothing}
printvalues::Bool
function Wilson_loop_measurement(
U::Vector{T};
filename = nothing,
verbose_level = 2,
printvalues = true,
) where {T}
myrank = get_myrank(U)
#=
if U[1].mpi == false
myrank = 0
else
myrank = U[1].myrank
end
=#
if printvalues
verbose_print = Verbose_print(verbose_level, myid = myrank, filename = filename)
else
verbose_print = nothing
end
Dim = length(U)
numg = 2
_temporary_gaugefields = Vector{T}(undef, numg)
_temporary_gaugefields[1] = similar(U[1])
for i = 2:numg
_temporary_gaugefields[i] = similar(U[1])
end
return new{Dim,T}(filename, _temporary_gaugefields, Dim, verbose_print, printvalues)
end
end
function Wilson_loop_measurement(U::Vector{T}, params::Poly_parameters, filename) where {T}
return Wilson_loop_measurement(
U,
filename = filename,
verbose_level = params.verbose_level,
printvalues = params.printvalues,
)
end
function measure(m::M, itrj, U; additional_string = "") where {M<:Wilson_loop_measurement}
temps = get_temporary_gaugefields(m)
poly = calculate_Polyakov_loop(U, temps[1], temps[2])
measurestring = ""
if m.printvalues
#println_verbose_level2(U[1],"-----------------")
measurestring = "$itrj $additional_string $(real(poly)) $(imag(poly)) # poly"
println_verbose_level2(m.verbose_print, measurestring)
#println_verbose_level2(U[1],"-----------------")
end
return poly, measurestring
end
function calc_Wilson_loop(U::Array{T,1}, Lt, Ls) where {T<:GaugeFields}
# Making a ( Ls × Lt) Wilson loop operator for potential calculations
WL = 0.0 + 0.0im
NV = U[1].NV
NC = U[1].NC
Wmat = Array{GaugeFields_1d,2}(undef, 4, 4)
#
calc_large_wiloson_loop!(Wmat, Lt, Ls, U) # make wilon loop operator and evaluate as a field, not traced.
WL = calc_Wilson_loop_core(Wmat, U, NV) # tracing over color and average over spacetime and x,y,z.
NDir = 3.0 # in 4 diemension, 3 associated staples. t-x plane, t-y plane, t-z plane
return real(WL) / NV / NDir / NC
end
function calc_Wilson_loop_core(Wmat, U::Array{GaugeFields{S},1}, NV) where {S<:SUn}
if S == SU3
NC = 3
elseif S == SU2
NC = 2
else
NC = U[1].NC
#error("NC != 2,3 is not supported")
end
W = 0.0 + 0.0im
for n = 1:NV
for μ = 1:3 # spatial directions
ν = 4 # T-direction is not summed over
W += tr(Wmat[μ, ν][:, :, n])
end
end
return W
end
function calc_large_wiloson_loop!(Wmat, Lt, Ls, U)
W_operator = make_Wilson_loop(Lt, Ls)
calc_large_wiloson_loop!(Wmat, W_operator, U)
return
end
function make_Wilson_loop(Lt, Ls, Dim)
#= Making a Wilson loop operator for potential calculations
Ls × Lt
ν=4
↑
+--+
| |
| |
| |
+--+ → μ=1,2,3 (averaged)
=#
Wmatset = Array{Wilsonline{Dim},2}(undef, 4, 4)
for μ = 1:3 # spatial directions
ν = 4 # T-direction is not summed over
loops = Wilsonline{Dim}[]
loop = Wilsonline([(μ, Ls), (ν, Lt), (μ, -Ls), (ν, -Lt)])
push!(loops, loop)
Wmatset[μ, ν] = loops
end
return Wmatset
end
function calc_large_wiloson_loop!(temp_Wmat, loops_μν, U)
W = temp_Wmat
for μ = 1:3 # spatial directions
ν = 4 # T-direction is not summed over
loopset = Loops(U, loops_μν[μ, ν])
W[μ, ν] = evaluate_loops(loopset, U)
end
return
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 3269 |
#import ..Parameter_structs:construct_Measurement_parameters_from_dict,Measurement_parameters
import QCDMeasurements:
construct_Measurement_parameters_from_dict,
Measurement_parameters,
AbstractMeasurement,
prepare_measurement,
get_string
import QCDMeasurements:
Plaquette_measurement,
measure,
get_value,
Polyakov_measurement,
Pion_correlator_measurement,
Chiral_condensate_measurement,
Energy_density_measurement,
Topological_charge_measurement,
Wilson_loop_measurement
struct Measurement_methods
measurement_parameters_set::Vector{Measurement_parameters}
measurements::Vector{AbstractMeasurement}
num_measurements::Int64
intervals::Vector{Int64}
end
function calc_measurement_values(m::Measurement_methods, itrj, U; additional_string = "")
measurestrings = String[]
for i = 1:m.num_measurements
interval = m.intervals[i]
if itrj % interval == 0
outputvalue = measure(
m.measurements[i],
U,
additional_string = "$itrj " * additional_string,
)
push!(measurestrings, get_string(outputvalue))
end
end
return measurestrings
end
#=
function prepare_measurement(U,measurement_parameters::T,filename) where T
if T == Plaq_parameters
measurement = Plaquette_measurement(U,measurement_parameters,filename)
elseif T == Poly_parameters
measurement = Polyakov_measurement(U,measurement_parameters,filename)
elseif T == TopologicalCharge_parameters
measurement = Topological_charge_measurement(U,measurement_parameters,filename)
elseif T == ChiralCondensate_parameters
measurement = Chiral_condensate_measurement(U,measurement_parameters,filename)
elseif T == Pion_parameters
measurement = Pion_correlator_measurement(U,measurement_parameters,filename)
elseif T == Energy_density_parameters
measurement = Energy_density_measurement(U,measurement_parameters,filename)
else
error(T, " is not supported in measurements")
end
return measurement
end
=#
function Measurement_methods(
U,
measurement_dir,
measurement_methods::T,
) where {T<:Vector{Dict}}
#println( measurement_methods)
nummeasurements = length(measurement_methods)
measurements = Vector{AbstractMeasurement}(undef, nummeasurements)
measurement_parameters_set = Vector{Measurement_parameters}(undef, nummeasurements)
intervals = zeros(Int64, nummeasurements)
for (i, method) in enumerate(measurement_methods)
measurement_parameters = construct_Measurement_parameters_from_dict(method)
#println(measurement_parameters)
intervals[i] = measurement_parameters.measure_every
filename = measurement_dir * "/" * measurement_parameters.methodname * ".txt"
measurements[i] = prepare_measurement(U, measurement_parameters, filename)
measurement_parameters_set[i] = deepcopy(measurement_parameters)
end
#=
for i=1:nummeasurements
println(measurement_parameters_set[i] )
end
=#
return Measurement_methods(
measurement_parameters_set,
measurements,
nummeasurements,
intervals,
)
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 1586 | module AbstractMeasurement_module
import QCDMeasurements:
Plaquette_measurement,
measure,
get_value,
Polyakov_measurement,
Pion_correlator_measurement,
Chiral_condensate_measurement,
Energy_density_measurement,
Topological_charge_measurement,
Wilson_loop_measurement
#=
using Wilsonloop
using Gaugefields
import Gaugefields.Verbose_print
import Gaugefields.println_verbose_level2
import Gaugefields.AbstractGaugefields_module.get_myrank
import Gaugefields: AbstractGaugefields
import Gaugefields.Abstractsmearing
using LinearAlgebra
using LatticeDiracOperators
import LatticeDiracOperators.Dirac_operators:
clear_fermion!, AbstractFermionfields_4D, Z4_distribution_fermi!
import Wilsonloop.make_cloverloops
import ..Parameter_structs:Plaq_parameters,Poly_parameters,
TopologicalCharge_parameters,
ChiralCondensate_parameters,
Pion_parameters,
Energy_density_parameters
abstract type AbstractMeasurement end
function get_temporary_gaugefields(m::AbstractMeasurement)
return m._temporary_gaugefields
end
function get_temporary_fermionfields(m::AbstractMeasurement)
return m._temporary_fermionfields
end
include("measure_plaquette.jl")
include("measure_polyakov.jl")
include("measure_topological_charge.jl")
include("measure_energy_density.jl")
include("measure_chiral_condensate.jl")
include("measure_Pion_correlator.jl")
include("measurement_parameters_set.jl")
include("Measurement_set.jl")
function measure(measurement::M, itrj, U) where {M<:AbstractMeasurement}
error("measure with a type $M is not supported")
end
=#
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 12726 | mutable struct Pion_correlator_measurement{Dim,TG,TD,TF,TF_vec,Dim_2} <: AbstractMeasurement
filename::String
_temporary_gaugefields::Vector{TG}
Dim::Int8
verbose_print::Union{Verbose_print,Nothing}
printvalues::Bool
D::TD
fermi_action::TF
_temporary_fermionfields::Vector{TF_vec}
#Nr::Int64
Nspinor::Int64
S::Array{ComplexF64,Dim_2}
function Pion_correlator_measurement(
U::Vector{T};
filename = nothing,
verbose_level = 2,
printvalues = true,
fermiontype = "Staggered",
mass = 0.1,
Nf = 2,
κ = 1,
r = 1,
L5 = 2,
M = -1,
eps_CG = 1e-14,
MaxCGstep = 3000,
BoundaryCondition = nothing,
) where {T}
NC = U[1].NC
Dim = length(U)
if BoundaryCondition == nothing
if Dim == 4
boundarycondition = BoundaryCondition_4D_default
elseif Dim == 2
boundarycondition = BoundaryCondition_2D_default
end
else
boundarycondition = BoundaryCondition
end
#println(boundarycondition)
params = Dict()
parameters_action = Dict()
if fermiontype == "Staggered"
x = Initialize_pseudofermion_fields(U[1], "staggered")
params["Dirac_operator"] = "staggered"
params["mass"] = mass
parameters_action["Nf"] = Nf
Nfbase = 4
#Nfbase = ifelse( m.fparam.Dirac_operator == "Staggered",4,1)
factor = Nf / Nfbase
elseif fermiontype == "Wilson"
x = Initialize_pseudofermion_fields(U[1], "Wilson", nowing = true)
params["Dirac_operator"] = "Wilson"
params["κ"] = κ
params["r"] = r
params["faster version"] = true
elseif fermiontype == "Domainwall"
params["Dirac_operator"] = "Domainwall"
params["mass"] = mass
params["L5"] = L5
params["M"] = M
x = Initialize_pseudofermion_fields(U[1], "Domainwall", L5 = L5)
else
error(
"fermion type $fermiontype is not supported in chiral condensate measurement",
)
end
Nspinor = ifelse(fermiontype == "Staggered", 1, 4)
_, _, NN... = size(U[1])
S = zeros(ComplexF64, NN..., Nspinor * NC, Nspinor * NC)
params["eps_CG"] = eps_CG
params["verbose_level"] = verbose_level
params["MaxCGstep"] = MaxCGstep
params["boundarycondition"] = boundarycondition
D = Dirac_operator(U, x, params)
fermi_action = FermiAction(D, parameters_action)
TD = typeof(D)
TF = typeof(fermi_action)
myrank = get_myrank(U)
if printvalues
verbose_print = Verbose_print(verbose_level, myid = myrank, filename = filename)
else
verbose_print = nothing
end
numg = 1
_temporary_gaugefields = Vector{T}(undef, numg)
_temporary_gaugefields[1] = similar(U[1])
for i = 2:numg
_temporary_gaugefields[i] = similar(U[1])
end
numf = 2
TF_vec = typeof(x)
_temporary_fermionfields = Vector{TF_vec}(undef, numf)
for i = 1:numf
_temporary_fermionfields[i] = similar(x)
end
Dim_2 = Dim + 2
return new{Dim,T,TD,TF,TF_vec,Dim_2}(
filename, #::String
_temporary_gaugefields,#::Vector{TG}
Dim,#::Int8
verbose_print,#::Union{Verbose_print,Nothing}
printvalues,#::Bool
D,#::TD
fermi_action,#::TF
_temporary_fermionfields,#::Vector{TF_vec}
Nspinor,#::Int64
S,#::Array{ComplexF64,3}
)
end
end
function Pion_correlator_measurement(
U::Vector{T},
params::Pion_parameters,
filename,
) where {T}
if params.smearing_for_fermion != "nothing"
error("smearing is not implemented in Pion correlator")
end
fermionparameters = params.fermion_parameters
if params.fermiontype == "Staggered"
method = Pion_correlator_measurement(
U;
filename = filename,
verbose_level = params.verbose_level,
printvalues = params.printvalues,
fermiontype = params.fermiontype,
mass = fermionparameters.mass,
Nf = fermionparameters.Nf,
eps_CG = params.eps,
MaxCGstep = params.MaxCGstep,
)
elseif params.fermiontype == "Wilson" || params.fermiontype == "WilsonClover"
if fermionparameters.hasclover
error("WilsonClover is not implemented in Pion measurement")
end
method = Pion_correlator_measurement(
U;
filename = filename,
verbose_level = params.verbose_level,
printvalues = params.printvalues,
fermiontype = params.fermiontype,
κ = fermionparameters.hop,
r = fermionparameters.r,
eps_CG = params.eps,
MaxCGstep = params.MaxCGstep,
)
elseif params.fermiontype == "Domainwall"
error("Domainwall fermion is not implemented in Pion measurement!")
method = Pion_correlator_measurement(
U;
filename = filename,
verbose_level = params.verbose_level,
printvalues = params.printvalues,
fermiontype = params.fermiontype,
L5 = fermionparameters.N5,
M = fermionparameters.M,
eps_CG = params.eps,
MaxCGstep = params.MaxCGstep,
)
end
return method
end
@inline function spincolor(ic, is, NC)
return ic - 1 + (is - 1) * NC + 1
end
function measure(
m::M,
itrj,
U::Array{<:AbstractGaugefields{NC,Dim},1};
additional_string = "",
) where {M<:Pion_correlator_measurement,NC,Dim}
S = m.S
S .= 0
measurestring = ""
st = "Hadron spectrum started"
measurestring *= st * "\n"
println_verbose_level2(U[1], st)
Nspinor = m.Nspinor
#D = m.D(U)
# calculate quark propagators from a point source at he origin
propagators, st = calc_quark_propagators_point_source(m, U)
measurestring *= st * "\n"
#=
#println(propagators)
for ic=1:NC
for is=1:Nspinor
icum = (ic-1)*Nspinor+is
println("$icum ", dot(propagators[icum],propagators[icum]))
end
end
#error("prop")
=#
_, _, NN... = size(U[1])
#println("NN = $NN")
#ctr = 0 # a counter
for ic = 1:NC
for is = 1:Nspinor
icum = (ic - 1) * Nspinor + is
propagator = propagators[icum]
α0 = spincolor(ic, is, NC) # source(color-spinor) index
# reconstruction
if Dim == 4
@inbounds for t = 1:NN[4]
for z = 1:NN[3]
for y = 1:NN[2]
for x = 1:NN[1]
for ic2 = 1:NC
@inbounds @simd for is2 = 1:Nspinor # Nspinor is the number of spinor index in 4d.
β = spincolor(ic2, is2, NC)
S[x, y, z, t, α0, β] +=
propagator[ic, x, y, z, t, is]
#println( propagator[ic,x,y,z,t,is])
end
end
end
end
end
end
else
error("Dim = $Dim is not supported")
end
# end for the substitution
#ctr+=1
end
end
#println(sum(S))
#error("prop")
# contruction end.
st = "Hadron spectrum: Reconstruction"
measurestring *= st * "\n"
println_verbose_level2(U[1], st)
#println("Hadron spectrum: Reconstruction")
Cpi = zeros(NN[end])
#Cpi = zeros( univ.NT )
# Construct Pion propagator
if Dim == 4
@inbounds for t = 1:NN[4]
tmp = 0.0 + 0.0im
for z = 1:NN[3]
for y = 1:NN[2]
for x = 1:NN[1]
for ic = 1:NC
for is = 1:Nspinor # Nspinor is the number of spinor index in 4d.
α = spincolor(ic, is, NC)
for ic2 = 1:NC
for is2 = 1:Nspinor # Nspinor is the number of spinor index in 4d.
β = spincolor(ic2, is2, NC)
tmp += S[x, y, z, t, α, β] * S[x, y, z, t, α, β]'#inner product.
# complex conjugate = g5 S g5.
end
end
# complex conjugate = g5 S g5.
end
end
end
end
end
# staggered Pion correlator relies on https://itp.uni-frankfurt.de/~philipsen/theses/breitenfelder_ba.pdf (3.33)
# we adopt ignoreing the staggering factor. See detail above reference.
ksfact = 1.0 #ifelse( meas.fparam.Dirac_operator == "Staggered" , (-1)^(t-1) * 64, 1)
Cpi[t] = real(tmp) * ksfact
end
end
#println(typeof(verbose),"\t",verbose)
st = "Hadron spectrum end"
measurestring *= st * "\n"
println_verbose_level2(U[1], st)
#println("Hadron spectrum end")
if m.printvalues
stringcc = "$itrj "
#println_verbose_level1(U[1],"$itrj ")
#println_verbose_level1(m.verbose_print,"$itrj ")
for it = 1:length(Cpi)
cc = Cpi[it]
#println_verbose_level1(U[1],"$cc ")
stringcc *= "$cc "
end
#println_verbose_level1(U[1],stringcc)
measurestring *= stringcc * "\n"
println_verbose_level1(m.verbose_print, stringcc)
#println_verbose_level1(U[1],"#pioncorrelator")
st = "#pioncorrelator"
measurestring *= st * "\n"
println_verbose_level1(m.verbose_print, st)
end
return Cpi, measurestring
end
function calc_quark_propagators_point_source(
m,
U::Array{<:AbstractGaugefields{NC,Dim},1},
) where {NC,Dim}
# D^{-1} for each spin x color element
D = m.D(U)
stvec = String[]
propagators = map(
i -> calc_quark_propagators_point_source_each(m, U, D, i, stvec),
1:NC*m.Nspinor,
)
st = ""
for i = 1:NC*m.Nspinor
st *= stvec[i] * "\n"
end
return propagators, st
end
function calc_quark_propagators_point_source_each(m, U, D, i, stvec)
# calculate D^{-1} for a given source at the origin.
# Nc*Ns (Ns: dim of spinor, Wilson=4, ks=1) elements has to be gathered.
# staggered Pion correlator relies on https://itp.uni-frankfurt.de/~philipsen/theses/breitenfelder_ba.pdf (3.33)
temps_fermi = get_temporary_fermionfields(m)
measurestring = ""
b = temps_fermi[1]
p = temps_fermi[2]
#b = similar(meas._temporal_fermions[1]) # source is allocated
#p = similar(b) # sink is allocated (propagator to sink position)
#k = meas._temporal_fermi2[2]
#clear_fermion!(b)
Nspinor = m.Nspinor#ifelse( meas.fparam.Dirac_operator == "Staggered" ,1,4)
is = ((i - 1) % Nspinor) + 1 # spin index
ic = ((i - is) ÷ Nspinor) + 1 # color index
st = "$ic $is"
measurestring *= st * "\n"
println_verbose_level1(U[1], st)
v = 1
clear_fermion!(b)
clear_fermion!(p)
#b[ic,1,1,1,1,is] = v
#println(dot(b,b))
p#rintln("ic = $ic is = $is")
iorigin = (1, 1, 1, 1)
setindex_global!(b, v, ic, iorigin..., is) # source at the origin
#=
mul!(p,D,b)
for it=1:U[1].NT
for iz=1:U[1].NZ
for iy=1:U[1].NY
for ix=1:U[1].NX
val = p[ic,ix,iy,iz,it,is]
if abs(val) > 1e-16
println("$ix $iy $iz $it $val")
end
end
end
end
end
=#
#println(p[ic,1,1,1,1,is])
#println(p[ic,2,1,1,1,is])
#Z4_distribution_fermi!(b)
#error("dd")
@time solve_DinvX!(p, D, b)
#error("dd")
#println("norm p ",dot(p,p))
st = "Hadron spectrum: Inversion $(i)/$(U[1].NC*m.Nspinor) is done"
measurestring *= st * "\n"
println_verbose_level1(U[1], st)
flush(stdout)
push!(stvec, measurestring)
return deepcopy(p)
end
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
|
[
"MIT"
] | 1.1.0 | e495a1eaee52a513e02fe6ca2c995a7d0ed671a7 | code | 6696 | const BoundaryCondition_4D_default = [1, 1, 1, -1]
const BoundaryCondition_2D_default = [1, -1]
mutable struct Chiral_condensate_measurement{Dim,TG,TD,TF,TF_vec} <: AbstractMeasurement
filename::String
_temporary_gaugefields::Vector{TG}
Dim::Int8
#factor::Float64
verbose_print::Union{Verbose_print,Nothing}
printvalues::Bool
D::TD
fermi_action::TF
_temporary_fermionfields::Vector{TF_vec}
Nr::Int64
factor::Float64
function Chiral_condensate_measurement(
U::Vector{T};
filename = nothing,
verbose_level = 2,
printvalues = true,
fermiontype = "Staggered",
mass = 0.1,
Nf = 2,
κ = 1,
r = 1,
L5 = 2,
M = -1,
eps_CG = 1e-14,
MaxCGstep = 3000,
BoundaryCondition = nothing,
Nr = 10,
) where {T}
Dim = length(U)
if BoundaryCondition == nothing
if Dim == 4
boundarycondition = BoundaryCondition_4D_default
elseif Dim == 2
boundarycondition = BoundaryCondition_2D_default
end
else
boundarycondition = BoundaryCondition
end
Nfbase = 1
factor = 1
params = Dict()
parameters_action = Dict()
if fermiontype == "Staggered"
x = Initialize_pseudofermion_fields(U[1], "staggered")
params["Dirac_operator"] = "staggered"
params["mass"] = mass
parameters_action["Nf"] = Nf
Nfbase = 4
#Nfbase = ifelse( m.fparam.Dirac_operator == "Staggered",4,1)
factor = Nf / Nfbase
elseif fermiontype == "Wilson"
x = Initialize_pseudofermion_fields(U[1], "Wilson", nowing = true)
params["Dirac_operator"] = "Wilson"
params["κ"] = κ
params["r"] = r
params["faster version"] = true
elseif fermiontype == "Domainwall"
params["Dirac_operator"] = "Domainwall"
params["mass"] = mass
params["L5"] = L5
params["M"] = M
x = Initialize_pseudofermion_fields(U[1], "Domainwall", L5 = L5)
else
error(
"fermion type $fermiontype is not supported in chiral condensate measurement",
)
end
params["eps_CG"] = eps_CG
params["verbose_level"] = verbose_level
params["MaxCGstep"] = MaxCGstep
params["boundarycondition"] = boundarycondition
D = Dirac_operator(U, x, params)
fermi_action = FermiAction(D, parameters_action)
TD = typeof(D)
TF = typeof(fermi_action)
myrank = get_myrank(U)
#=
if U[1].mpi == false
myrank = 0
else
myrank = U[1].myrank
end
=#
if printvalues
verbose_print = Verbose_print(verbose_level, myid = myrank, filename = filename)
else
verbose_print = nothing
end
numg = 1
_temporary_gaugefields = Vector{T}(undef, numg)
_temporary_gaugefields[1] = similar(U[1])
for i = 2:numg
_temporary_gaugefields[i] = similar(U[1])
end
numf = 2
TF_vec = typeof(x)
_temporary_fermionfields = Vector{TF_vec}(undef, numf)
for i = 1:numf
_temporary_fermionfields[i] = similar(x)
end
return new{Dim,T,TD,TF,TF_vec}(
filename,
_temporary_gaugefields,
Dim,
verbose_print,
printvalues,
D,#::TD
fermi_action,#::TF,
_temporary_fermionfields,
Nr,
factor,
)
end
end
function Chiral_condensate_measurement(
U::Vector{T},
params::ChiralCondensate_parameters,
filename,
) where {T}
if params.fermiontype == "Staggered"
method = Chiral_condensate_measurement(
U;
filename = filename,
verbose_level = params.verbose_level,
printvalues = params.printvalues,
fermiontype = params.fermiontype,
mass = params.mass,
Nf = params.Nf,
eps_CG = params.eps,
MaxCGstep = params.MaxCGstep,
Nr = params.Nr,
)
else
error("$(params.fermiontype) is not supported in Chiral_condensate_measurement")
end
#途中
return method
end
function measure(
m::M,
itrj,
U::Array{<:AbstractGaugefields{NC,Dim},1};
additional_string = "",
) where {M<:Chiral_condensate_measurement,NC,Dim}
temps_fermi = get_temporary_fermionfields(m)
p = temps_fermi[1]
r = temps_fermi[2]
D = m.D(U)
pbp = 0.0
#Nr = 100
Nr = m.Nr
measurestring = ""
for ir = 1:Nr
clear_fermion!(p)
Z4_distribution_fermi!(r)
solve_DinvX!(p, D, r)
tmp = dot(r, p) # hermitian inner product
if m.printvalues
#println_verbose_level2(U[1],"# $itrj $ir $(real(tmp)/U[1].NV) # itrj irand chiralcond")
measurestring_ir = "# $itrj $ir $additional_string $(real(tmp)/U[1].NV) # itrj irand chiralcond"
println_verbose_level2(m.verbose_print, measurestring_ir)
measurestring *= measurestring_ir * "\n"
end
pbp += tmp
end
pbp_value = real(pbp / Nr) / U[1].NV * m.factor
if m.printvalues
measurestring_ir = "$itrj $pbp_value # pbp Nr=$Nr"
println_verbose_level1(m.verbose_print, measurestring_ir)
measurestring *= measurestring_ir * "\n"
flush(stdout)
end
return pbp_value, measurestring
end
#=
"""
c-------------------------------------------------c
c Random number function Z4 Noise
c https://arxiv.org/pdf/1611.01193.pdf
c-------------------------------------------------c
"""
function Z4_distribution_fermion!(x::AbstractFermionfields_4D{NC}) where NC
NX = x.NX
NY = x.NY
NZ = x.NZ
NT = x.NT
n6 = size(x.f)[6]
θ = 0.0
N::Int32 = 4
Ninv = Float64(1/N)
for ialpha = 1:n6
for it=1:NT
for iz=1:NZ
for iy=1:NY
for ix=1:NX
@inbounds @simd for ic=1:NC
θ = Float64(rand(0:N-1))*π*Ninv # r \in [0,π/4,2π/4,3π/4]
x[ic,ix,iy,iz,it,ialpha] = cos(θ)+im*sin(θ)
end
end
end
end
end
end
set_wing_fermion!(x)
return
end
=#
| LatticeQCD | https://github.com/akio-tomiya/LatticeQCD.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.