licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 2127 | const PlainDS = Array{Face{3,Int},1}
struct FaceRing{T}
v::Int
t::T # topology
end
"""
EdgeRing(v,t)
Construct an edge ring iterator at vertex `v` from a given topology `t`.
"""
struct EdgeRing{T}
v::Int
t::T # topology
end
"""
VertexRing(v,t)
Construct a vertex ring iterator at vertex `v` from a given topology `t`.
"""
struct VertexRing{T}
v::Int
start::Union{Int,Nothing}
t::T # topology
end
VertexRing(v::Int,t) = VertexRing(v,nothing,t)
### Perhaps one may also consider to make a trait
### With this pairiterator I now can initialize EdgeRing as iterator from VertexRing.
struct PairIterator
iter
end
function Base.iterate(iter::PairIterator)
i1,state1 = iterate(iter.iter)
i2, state2 = iterate(iter.iter,state1)
return Face(i1,i2),(state2,i2)
end
function Base.iterate(iter::PairIterator,state)
if state==nothing
return nothing
end
state1,i1 = state
step = iterate(iter.iter,state1)
if step==nothing
i2,state2 = iterate(iter.iter)
return Face(i1,i2), nothing
else
i2, state2 = step
return Face(i1,i2),(state2,i2)
end
end
function Base.collect(iter::Union{FaceRing,VertexRing})
collection = Int[]
for i in iter
push!(collection,i)
end
return collection
end
function Base.collect(iter::EdgeRing)
collection = Face{2,Int}[]
for i in iter
push!(collection,i)
end
return collection
end
function Base.collect(iter::PairIterator)
collection = Face{2,Int}[]
for i in iter
push!(collection,i)
end
return collection
end
### There is a room for unstructured circulators iterators for performance reasons.
function find_other_triangle_edge(v1::Integer,v2::Integer,skip::Integer,t::PlainDS)
for i in 1:length(t)
if in(v1,t[i]) & in(v2,t[i]) & !(i==skip)
return i
end
end
return -1
end
function find_triangle_vertex(v::Integer,t::PlainDS)
for i in 1:length(t)
if in(v,t[i])
return i
end
end
return length(t) + 1 # -1
end
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | code | 1979 | using Test
using GeometryTypes
using SurfaceTopology
@info "Topology function tests"
faces = Face{3,Int}[
[6, 8, 11],
[8, 6, 7],
[5, 1, 4],
[1, 5, 7],
[5, 8, 7],
[5, 10, 11],
[8, 5, 11],
[1, 3, 2],
[3, 1, 7],
[3, 6, 2],
[6, 3, 7],
[9, 5, 4],
[5, 12, 10],
[9, 12, 5],
[10, 12, 4],
[12, 9, 4]
]
triangles = []
for i in FaceRing(5,faces)
push!(triangles,i)
end
@test sort(triangles)==[3,4,5,6,7,12,13,14]
triverticies = []
for i in EdgeRing(5,faces)
#println("i")
push!(triverticies,i)
end
@test (1,4) in triverticies
verticies = []
for i in VertexRing(5,faces)
push!(verticies,i)
end
@test sort(verticies)==[1,4,7,8,9,10,11,12] #[1,2,6,7]
@info "Testing FaceDS"
# points = zero(faces)
fb = FaceDS(faces)
triangles = []
for i in FaceRing(5,fb)
push!(triangles,i)
end
@test sort(triangles)==[3,4,5,6,7,12,13,14]
triverticies = []
for i in EdgeRing(5,fb)
push!(triverticies,i)
end
@test Face(1,4) in triverticies
verticies = []
for i in VertexRing(5,fb)
push!(verticies,i)
end
@test sort(verticies)==[1,4,7,8,9,10,11,12]
@info "Testing EdgeDS"
eb = EdgeDS(faces)
verticies = []
for i in VertexRing(5,eb)
push!(verticies,i)
end
@test sort(verticies)==[1,4,7,8,9,10,11,12]
triverticies = []
for i in EdgeRing(5,eb)
push!(triverticies,i)
end
@test Face(1,4) in triverticies
@info "Topology tests for Cached DS"
# At the moment limited to a closed surfaces
faces = Face{3,Int64}[
[1, 12, 6], [1, 6, 2], [1, 2, 8], [1, 8, 11], [1, 11, 12], [2, 6, 10], [6, 12, 5],
[12, 11, 3], [11, 8, 7], [8, 2, 9], [4, 10, 5], [4, 5, 3], [4, 3, 7], [4, 7, 9],
[4, 9, 10], [5, 10, 6], [3, 5, 12], [7, 3, 11], [9, 7, 8], [10, 9, 2]
]
cds = CachedDS(faces)
@test sort(collect(VertexRing(3,faces)))==sort(collect(VertexRing(3,cds)))
@test sort(collect(EdgeRing(3,faces)))==sort(collect(EdgeRing(3,cds)))
| SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"MIT"
] | 0.1.0 | 74840bce8734609e4b921aefdceed22e18460b52 | docs | 746 | # SurfaceTopology.jl
[](https://akels.github.io/SurfaceTopology.jl/stable)
[](https://akels.github.io/SurfaceTopology.jl/dev)
[](https://travis-ci.org/akels/SurfaceTopology.jl)
As we know, triangular meshes can be stored in a computer in multiple different ways, each having strength and weaknesses in a particular case at hand. But it is not always clear which data structure would be most suitable for a specific task. Thus it is wise to write a data structure generic code which is the precise purpose of this package for closed oriented closed surfaces. | SurfaceTopology | https://github.com/akels/SurfaceTopology.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 347 | using Documenter
using CloudQSim
makedocs(
sitename = "CloudQSim",
format = Documenter.HTML(),
modules = [CloudQSim]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
#=deploydocs(
repo = "<repository url>"
)=#
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 633 | using BloqadeExpr, BloqadeLattices, BloqadeWaveforms
import CloudQSim
nsites = 10
atoms = generate_sites(ChainLattice(), nsites, scale = 5.74)
T_end = 1.
Δ = ϕ = piecewise_linear(; clocks = [0, T_end], values = [0., 0.])
Ω = piecewise_linear(; clocks = [0, T_end], values = [2π, 2π])
h = rydberg_h(atoms; Ω = Ω, Δ = Δ, ϕ=ϕ)
clconf = CloudQSim.CloudConfig()
CloudQSim.add_server!(clconf, "127.0.0.1", 8000)
qstates = 0:2^nsites-1
isodd = [x%2 for x in qstates]
rydberg = [Base.count_ones(x) for x in qstates]
observables = [isodd, rydberg]
time_points = 10
data, meta = CloudQSim.cloud_simulate(h, time_points, observables, clconf)
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 228 | module CloudQSim
include("compress.jl")
include("auth.jl")
include("api.jl")
include("client.jl")
export CloudConfig,
cloud_simulate,
add_server!,
del_server!
end # module
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 1614 | import Configurations
import JSON
import BloqadeSchema
@Base.kwdef struct CloudQSimTask
bloqade_tasks :: Vector{BloqadeSchema.TaskSpecification}
time_points :: Int64
subspace_radius :: Float64
observs :: Vector{Vector{Float64}}
end
function reduce_meta(old, update)
# for each key of `update` that is not in `old`, add it to `old`
for (k, v) in update
if !haskey(old, k)
old[k] = v
else
old[k] = old[k] + v
end
end
payload_label = "payload"
overhead_label = "overhead"
ignore_labels = [payload_label, overhead_label, "pmap"]
# sum all values except for the payload and overhead
overhead = sum([v for (k, v) in old if k ∉ ignore_labels])
# add the overhead to meta
old[overhead_label] = overhead
return old
end
## - Serialize task
function serialize_task(task:: CloudQSimTask)
data = Dict(
"version" => API_VERSION,
"bloqade_tasks" =>
[ Configurations.to_dict(t) for t in task.bloqade_tasks],
"time_points" => task.time_points,
"subspace_radius" => task.subspace_radius,
"observables" => task.observs
)
return data |> JSON.json
end
function serialize_hamiltonian(ham)
return BloqadeSchema.to_json(ham, n_shots=1)
end
## - Parse results
flatten(x) = x
flatten(x::AbstractArray) = vcat(map(flatten, x)...)
function parse_results(data)
sar = data |> JSON.parse
if length(sar) == 0
println("No results")
return []
end
mat = sar["results"]
return Dict("results" => mat, "meta" => sar["meta"])
end
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 629 | using JSONWebTokens
import JSON
VERSION = "0.1.1"
TOKEN_ENV_VAR = "CLOUDQS_TOKEN"
function _get_token(token::Union{String, Nothing})::Union{String, Nothing}
if token === nothing
token = get(ENV, TOKEN_ENV_VAR, nothing)
end
return token
end
function auth_encode(
out_data::String
; token::Union{String, Nothing}=nothing
)::String
token = _get_token(token)
if token === nothing
return out_data
end
return JSON.json(Dict(
"auth_data" => out_data,
"auth_version" => VERSION,
"auth_token"=>token
))
end
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 7883 | import Sockets
import Logging
import JSON
import BloqadeSchema
import TOML
API_VERSION = 1
# -- Cloud Managment
# Given many servers, it is possible to distribute the tasks
# between them. This is done by the following functions.
#
# CloudConfig is a configuration structure that holds the information about the
# cloud servers. Can be read from a TOML file.
struct CloudConfig
addrs :: Vector{String}
ports :: Vector{Int}
worker_counts :: Vector{Int}
function CloudConfig(addrs, ports, worker_counts)
if length(addrs) != length(ports)
error("addrs and ports must have the same length")
end
return new(addrs, ports, worker_counts)
end
function CloudConfig(addrs, ports)
return CloudConfig(addrs, ports, fill(1, length(addrs)))
end
function CloudConfig()
return new([], [], [])
end
end
get_empty_task() = CloudQSimTask(bloqade_tasks=[], time_points=1, subspace_radius=0, observs=[[]])
function read_toml_clconf(path::String="CloudConfig.toml")
config = TOML.parsefile(path)
cloud_server_info = config["server_info"]
hosts = cloud_server_info["hosts"]
ports = cloud_server_info["ports"]
worker_counts = get(cloud_server_info, "worker_counts", fill(1, length(hosts)))
return CloudConfig(hosts, ports, worker_counts)
end
Base.getindex(config::CloudConfig, i::Int) = (config.addrs[i], config.ports[i], config.worker_counts[i])
Base.getindex(config::CloudConfig, i::Symbol) = getfield(config, i)
Base.getindex(config::CloudConfig, sl::AbstractVector) = CloudConfig(config.addrs[sl], config.ports[sl], config.worker_counts[sl])
Base.length(config::CloudConfig) = length(config.addrs)
function add_server!(cloud_config::CloudConfig, addr, port, workers=1)
push!(cloud_config.addrs, addr)
push!(cloud_config.ports, port)
push!(cloud_config.worker_counts, workers)
end
function del_server!(cloud_config::CloudConfig, addr, port)
idx = findfirst(x -> x[1] == addr && x[2] == port, collect(zip(cloud_config.addrs, cloud_config.ports)))
if idx === nothing
error("Server not found")
end
deleteat!(cloud_config.addrs, idx)
deleteat!(cloud_config.ports, idx)
deleteat!(cloud_config.worker_counts, idx)
end
# --
# -- Cloud Simulation utils
function send_task_cloud(sock, task)
jsn = task |> serialize_task |> auth_encode
while isopen(sock)
addr, port = Sockets.getpeername(sock)
t_net_wait = @elapsed begin
println(sock, jsn)
println("$addr:$port 🠔── $(length(task.bloqade_tasks)) hamiltonians")
result = readline(sock)
end
t_parse_results = @elapsed begin
outs = parse_results(result)
end
results, meta = outs["results"], outs["meta"]
# merge meta with client meta
client_meta = Dict("net_wait" => t_net_wait, "parse_results" => t_parse_results)
meta = merge(meta, client_meta)
return results, meta
end
end
function get_working_servers(clconf, show_errors=false)
println("[CQS] #> Testing servers...")
empty_task = get_empty_task()
working_server_ids = []
metas = []
for i in 1:length(clconf.addrs)
try
sock = Sockets.connect(clconf.addrs[i], clconf.ports[i])
data, meta = fetch(send_task_cloud(sock, empty_task))
Sockets.close(sock)
push!(working_server_ids, i)
push!(metas, meta)
catch e
if show_errors
Logging.@error e
end
continue
end
end
return working_server_ids, metas
end
"""
Load balancing of jobs betwen worker servers
"""
function split_task(task::CloudQSimTask, portions::Vector{Int})
total_workers = sum(portions)
# -- Divide into sub-tasks: create a task for each server in config,
# with number of task.blaqade_tasks distributed according to worker_counts.
task_cnt = length(task.bloqade_tasks)
task_counts = []
for wc in portions
push!(task_counts, Int(floor(task_cnt * wc / total_workers)))
end
# task_cont - sum(task_count) < K
for i in 1:(task_cnt - sum(task_counts))
task_counts[i] += 1
end
tasks::Vector{CloudQSimTask} = []
start = 1
for i in 1:length(portions)
push!(tasks, CloudQSimTask(
task.bloqade_tasks[start:start+task_counts[i]-1],
task.time_points,
task.subspace_radius,
task.observs
))
start += task_counts[i]
end
return tasks
end
unzip(a) = map(x->getfield.(a, x), fieldnames(eltype(a)))
function distribute_task(task::CloudQSimTask, clconf::CloudConfig)
tasks = split_task(task, clconf.worker_counts)
function map_fn(task, addr, port)
sock = Sockets.connect(addr, port)
ret, meta = send_task_cloud(sock, task)
println("$(length(ret)) results 🠔── $(addr):$(port)")
return ret, meta
end
results = asyncmap(map_fn, tasks, clconf.addrs, clconf.ports)
# merge results
res, meta = unzip(results)
return reduce(vcat, res), reduce(reduce_meta, meta)
end
function convert_final_result(ret_list)
# -- Convert output from list of lists to an array
l1 = length(ret_list)
l2 = length(ret_list[1])
l3 = length(ret_list[1][1])
dims = (l1, l2, l3)
mat = flatten(ret_list)
mat = reshape(mat, reverse(dims))
ret = permutedims(mat, (3, 2, 1))
return ret
end
# -- Manage meta about the simulation
global last_meta = Dict()
function get_last_meta()
return last_meta
end
function set_last_meta(meta)
global last_meta = meta
end
# -- Public API
function cloud_simulate(
hamiltonian::AbstractVector,
time_points :: Int64,
observables::Vector{<:Vector},
cloud_config::CloudConfig
; subspace_radius=0.
)
t_to_schema = @elapsed begin
bloqade_tasks = [BloqadeSchema.to_schema(h, n_shots=1)
for h in hamiltonian]
task = CloudQSimTask(bloqade_tasks, time_points, subspace_radius, observables)
end
t_submit = @elapsed begin
# Don't check for working servers if there is no choice of servers
if length(cloud_config) ≤ 1
working_server_ids = fill(1, length(cloud_config))
else
working_server_ids, _ = get_working_servers(cloud_config)
println("[CQS] <# Working servers Ids: ", working_server_ids)
end
if length(working_server_ids) == 0
error("No working servers found")
end
working_clconf = cloud_config[working_server_ids]
results, meta = distribute_task(task, working_clconf)
end
meta = reduce_meta(meta, Dict("to_schema" => t_to_schema, "submit" => t_submit))
set_last_meta(meta)
ret = convert_final_result(results)
return ret
end
function cloud_simulate(
hamiltonian::AbstractVector,
time_points :: Int64,
observables::Vector{<:AbstractArray}
; subspace_radius=0.,
)
clconf_default = CloudConfig(
["localhost"],
[8000],
)
return cloud_simulate(hamiltonian, time_points, observables, clconf_default; subspace_radius=subspace_radius)
end
# -- Single-hamiltonian versions
function cloud_simulate(hamiltonian, rest...; kwargs...)
cloud_simulate([hamiltonian], rest...; kwargs...)[1, :, :]
end
# --
function cloud_simulate(
hamiltonian::AbstractVector,
time_points :: Int64,
subspace_radius,
observables::Vector{<:AbstractArray},
clconf:: CloudConfig
)
cloud_simulate(hamiltonian, time_points, observables, clconf; subspace_radius=subspace_radius)
end
function test_client()
include("../tests/run_sim.jl")
ham, time_points, observables = get_test_task()
_ = cloud_simulate(ham, time_points, observables)
end
#main()
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 990 | import LibDeflate
COMPRESS_DELIMITER = ':'
function compress(data::String)::String
compressor = LibDeflate.Compressor()
outvec = zeros(UInt8, length(data))
nbytes = LibDeflate.compress!(compressor, outvec, data)
if typeof(nbytes) == LibDeflate.LibDeflateError
throw(nbytes)
end
println("Compressed $(length(data)) -> $nbytes bytes")
# convert nbytes to string
nbytes_str = string(length(data))
prefix = nbytes_str * COMPRESS_DELIMITER
return prefix * String(outvec[1:nbytes])
end
function decompress(data::String)::String
decompressor = LibDeflate.Decompressor()
prefix, data = split(data, COMPRESS_DELIMITER, limit=2)
nbytes = parse(Int, prefix)
outvec = zeros(UInt8, nbytes)
nbytes = LibDeflate.decompress!(decompressor, outvec, data)
if typeof(nbytes) == LibDeflate.LibDeflateError
throw(nbytes)
end
println("Decompressed $(length(data)) -> $nbytes bytes")
return String(outvec[1:nbytes])
end
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 1048 | using BloqadeExpr, BloqadeLattices, BloqadeWaveforms
import CloudQSim
HOSTNAME = "cloudqs.lykov.tech"
PORT = 7700
remote_config = CloudQSim.CloudConfig()
CloudQSim.add_server!(remote_config, HOSTNAME, PORT)
@testset "Send to server" begin
nsites = 10
atoms = generate_sites(ChainLattice(), nsites, scale = 5.74)
T_end = 1.
Δ = ϕ = piecewise_linear(; clocks = [0, T_end], values = [0., 0.])
Ω = piecewise_linear(; clocks = [0, T_end], values = [2π, 2π])
h = rydberg_h(atoms; Ω = Ω, Δ = Δ, ϕ=ϕ)
qstates = 0:2^nsites-1
isodd = [x%2 for x in qstates]
rydberg = [Base.count_ones(x) for x in qstates]
observables = [isodd, rydberg]
time_points = 10
data = CloudQSim.cloud_simulate([h], time_points, observables, remote_config)
@test size(data) == (1, 10, 2)
data = CloudQSim.cloud_simulate(h, time_points, observables, remote_config)
@test size(data) == (10, 2)
data = CloudQSim.cloud_simulate(fill(h, 3), time_points, observables, remote_config)
@test size(data) == (3, 10, 2)
end
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 142 | using Test
import CloudQSim
@testset "unit" begin
include("unit.jl")
end
@testset "integration" begin
include("integration.jl")
end
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | code | 648 |
@testset "CloudConfig" begin
clconf = CloudQSim.CloudConfig()
CloudQSim.add_server!(clconf, "localhost", 8000)
@test length(clconf) == 1
CloudQSim.add_server!(clconf, "localhost2", 8001, 2)
@test length(clconf) == 2
@test clconf.worker_counts == [1, 2]
@test clconf.addrs == ["localhost", "localhost2"]
@test clconf.ports == [8000, 8001]
end
@testset "Compression" begin
data = "Mary had a little lamb, little lamb, little lamb. Mary had a little lamb, its fleece was white as snow."
compressed = CloudQSim.compress(data)
@test compressed != data
@test CloudQSim.decompress(compressed) == data
end
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | docs | 1494 | # CloudQSim
[](https://github.com/CloudQuantumSim/CloudQSim.jl/actions)
## Installation
```
pkg> add CloudQSim
```
## Usage
CloudQSim allows to calculate evolution of observables for quantum hamiltonians.
Any diagonal observable is supported. An observable is defined by a vector that
maps each quantum state to the observable value.
Parameters:
* `hamiltonians` - a Bloqade.jl hamiltonian
* `time_points` - number of poinst in time when observables are evaluated
* `observables` - Vector of observables to evaluate
* `clconf` - `CloudQSim.CloudConfig` specifies the servers to use
* `subspace_radius` (optional keyword) - used to generate subspace for faster
evolution
### Minimal example
```julia
using BloqadeExpr, BloqadeLattices, BloqadeWaveforms
import CloudQSim
nsites = 10
atoms = generate_sites(ChainLattice(), nsites, scale = 5.74)
T_end = 1.
Δ = ϕ = piecewise_linear(; clocks = [0, T_end], values = [0., 0.])
Ω = piecewise_linear(; clocks = [0, T_end], values = [2π, 2π])
h = rydberg_h(atoms; Ω = Ω, Δ = Δ, ϕ = ϕ)
clconf = CloudQSim.CloudConfig()
CloudQSim.add_server!(clconf, "127.0.0.1", 8000)
qstates = 0:2^nsites-1
isodd = [x%2 for x in qstates]
rydberg = [Base.count_ones(x) for x in qstates]
observables = [isodd, rydberg]
time_points = 10
data, meta = CloudQSim.cloud_simulate(h, time_points, observables, clconf)
```
See `examples/` folder for more usage.
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"Apache-2.0"
] | 0.1.0 | cdcbbd821f44f0e6bdd0b0c6d33bb54f3033a887 | docs | 98 | # CloudQSim.jl
Documentation for CloudQSim.jl will be here soon. Meanwhile, out the github repo.
| CloudQSim | https://github.com/CloudQuantumSim/CloudQSim.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 228 | module AlgorithmicCompetition
import Base: @invokelatest
include("common.jl")
include("competitive_equilibrium_solver.jl")
include("AIAPC2020/AIAPC2020.jl")
include("DDDC2023/DDDC2023.jl")
include("price_diagnostics.jl")
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 412 | using AlgorithmicCompetition
using Dates
AlgorithmicCompetition.run_aiapc(
version = ENV["VERSION"],
start_timestamp = now(),
n_parameter_iterations = parse(Int, ENV["N_ITERATIONS"]),
slurm_metadata = (
SLURM_ARRAY_JOB_ID = parse(Int, ENV["SLURM_ARRAY_JOB_ID"]),
SLURM_ARRAY_TASK_ID = parse(Int, ENV["SLURM_ARRAY_TASK_ID"]),
),
debug = (parse(Int, ENV["DEBUG"]) == 1),
)
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 437 | import Base.push!
using ReinforcementLearning
using DrWatson
const player_to_index = Dict(Player(1) => 1, Player(2) => 2)
const demand_to_index = (; :high => 1, :low => 2)
# Handle CartesianIndex actions
function Base.push!(
multiagent::MultiAgentPolicy,
::PostActStage,
env::E,
actions::CartesianIndex,
) where {E<:AbstractEnv}
actions = Tuple(actions)
Base.push!(multiagent, PostActStage(), env, actions)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 2698 | # Parameters from pg 3374 AIAPC 2020
using JuMP
using Chain
using Ipopt
using Flux: softmax
using Statistics
struct CompetitionParameters
μ::Float64
a_0::Float64
a::Tuple{Float64,Float64}
c::Tuple{Float64,Float64}
n_firms::Int64
function CompetitionParameters(μ, a_0, a, c)
length(a) != length(c) &&
throw(DimensionMismatch("a and c must be the same length."))
n_firms = length(a)
new(μ, a_0, a, c, n_firms)
end
end
function Q(p1, p2, params::CompetitionParameters)
# Logit demand function from pg 3372 AIAPC 2020
a_ = (params.a_0, params.a...)
p_ = (0, p1, p2)
mu_vect = ((a_ .- p_) ./ params.μ)
q_out = softmax([mu_vect...])
return q_out[2:3]
end
function π(p1::T, p2::T, params::CompetitionParameters) where {T<:Real}
# Returns profit due to p_1
q_ = Q(p1, p2, params)
π_ = ((p1, p2) .- params.c) .* q_
return π_
end
function p_BR(p_minus_i_::T, params::CompetitionParameters) where {T<:Real}
# Best response Bertrand price
π_i_(p_i_, p_minus_i_) = π(p_i_, p_minus_i_, params)[1]
model = Model(Ipopt.Optimizer)
set_silent(model)
register(model, :π_i_, 2, π_i_; autodiff = true)
@variable(model, p_minus_i)
@variable(model, p_i)
@constraint(model, p_minus_i == p_minus_i_)
@NLobjective(model, Max, π_i_(p_i, p_minus_i))
optimize!(model)
return value(p_i)
end
π_i(p_i::T, p_minus_i::T, params::CompetitionParameters) where {T<:Real} =
π(p_i, p_minus_i, params)[1]
π_bertrand(p_1::T, params::CompetitionParameters) where {T<:Real} =
π(p_1, p_BR(p_1, params), params)[1]
π_monop(p_1::T, p_2::T, params::CompetitionParameters) where {T<:Real} =
sum(π(p_1, p_2, params)) / 2 # per-firm
function solve_monopolist(params::CompetitionParameters)
model = Model(Ipopt.Optimizer)
π_monop_(p_1, p_2) = π_monop(p_1, p_2, params)
set_silent(model)
register(model, :π_monop_, 2, π_monop_; autodiff = true)
@variable(model, p_1)
@variable(model, p_2)
@NLobjective(model, Max, π_monop_(p_1, p_2))
optimize!(model)
return model, (value(p_1), value(p_2))
end
function solve_bertrand(params::CompetitionParameters)
π_i_(p_1, p_2) = π_i(p_1, p_2, params)
model = Model(Ipopt.Optimizer)
set_silent(model)
register(model, :π_i_, 2, π_i_, autodiff = true)
@variable(model, p_i)
@NLparameter(model, p_min_i == 1)
@NLobjective(model, Max, π_i_(p_i, p_min_i))
optimize!(model)
i = 0
while !isapprox(value(p_i), value(p_min_i))
i += 1
set_value(p_min_i, value(p_i))
optimize!(model)
end
return model, (value(p_i), value(p_min_i)), i
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1003 | using AlgorithmicCompetition
using Dates
if Sys.isapple()
# For debugging on MacOS
ENV["DEBUG"] = 1
ENV["SLURM_ARRAY_TASK_ID"] = 1
ENV["SLURM_ARRAY_JOB_ID"] = 1
ENV["N_ITERATIONS"] = 1
ENV["VERSION"] = "v1"
end
debug = parse(Int, ENV["DEBUG"]) == 1
SLURM_ARRAY_TASK_ID = parse(Int, ENV["SLURM_ARRAY_TASK_ID"])
SLURM_ARRAY_JOB_ID = parse(Int, ENV["SLURM_ARRAY_JOB_ID"])
n_parameter_iterations = parse(Int, ENV["N_ITERATIONS"])
if debug && Sys.isapple()
n_grid_increments = 1
elseif debug
n_grid_increments = 10
else
n_grid_increments = 100
end
if debug && SLURM_ARRAY_TASK_ID > 10
return
else
AlgorithmicCompetition.run_dddc(
version = ENV["VERSION"],
start_timestamp = now(),
n_parameter_iterations = 1,
n_grid_increments = n_grid_increments,
batch_metadata = (
SLURM_ARRAY_JOB_ID = SLURM_ARRAY_JOB_ID,
SLURM_ARRAY_TASK_ID = SLURM_ARRAY_TASK_ID,
),
debug = debug,
)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1308 | using Distributed
using ClusterManagers
using AlgorithmicCompetition
using Dates
using CSV
n_parameter_iterations = 1000
n_parameter_combinations = 10000
batch_size = 500
duration = 0.5 # in hours, e.g. 8 hours per run
duration_minutes = Int(floor(duration * 60))
n_sims_per_hour = 5 * 60 # 5 simulations per minute
speed_discount = 0.9 # 10% buffer for speed discount
n_cores =
n_parameter_iterations * n_parameter_combinations / duration / n_sims_per_hour *
speed_discount |>
ceil |>
Int
version = "v0.0.2"
start_timestamp = now()
start_timestamp_str = Dates.format(start_timestamp, "yyyy-mm-dd__HH_MM_SS")
addprocs(
SlurmManager(n_cores),
partition = "normal",
t = "00:$duration_minutes:00",
cpus_per_task = "1",
mem_per_cpu = "1G",
# q="express",
)
@everywhere begin
using Pkg
Pkg.instantiate()
using AlgorithmicCompetition: run_and_extract
end
@time exp_df = AlgorithmicCompetition.run_aiapc(
batch_size = batch_size,
version = version,
start_timestamp = start_timestamp,
# max_iter=Int(1e3),
# convergence_threshold=Int(1e2),
n_parameter_iterations = n_parameter_iterations,
)
file_name = "$(ENV["HOME"])/simulation_results_aiapc_$(version)_$(start_timestamp_str).csv"
CSV.write(file_name, exp_df)
rmprocs(workers())
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 961 | using AlgorithmicCompetition
using Statistics
using DataFrames
using CSV
using Distributed
using Dates
version = "v0.0.2"
start_timestamp = now()
start_timestamp = Dates.format(start_timestamp, "yyyy-mm-dd__HH_MM_SS")
if Sys.isapple()
n_procs_ = 7 # up to 8 performance cores on m1 (7 workers + 1 main)
n_parameter_iterations = 2
else
n_procs_ = 60
n_parameter_iterations = 40
end
_procs = addprocs(
n_procs_,
topology = :master_worker,
exeflags = ["--threads=1", "--project=$(Base.active_project())"],
)
@everywhere begin
using Pkg
Pkg.instantiate()
using AlgorithmicCompetition: run_and_extract
end
@time exp_df = AlgorithmicCompetition.run_aiapc(;
n_parameter_iterations = n_parameter_iterations,
max_iter = Int(1e9),
version = version,
start_timestamp = start_timestamp,
)
rmprocs(_procs)
file_name = "simulation_results_aiapc_$(version)_$(start_timestamp).csv"
CSV.write(file_name, exp_df)
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1148 | using AlgorithmicCompetition
using Statistics
using DataFrames
using CSV
using Distributed
using Dates
version = 0.6
start_timestamp = now()
start_timestamp = Dates.format(start_timestamp, "yyyy-mm-dd__HH_MM_SS")
if Sys.isapple()
n_procs_ = 7 # up to 8 performance cores on m1 (7 workers + 1 main)
n_parameter_iterations = 1
n_grid_increments = 10
else
n_procs_ = 63
n_parameter_iterations = 40 * 14 # 40 takes about an hour on 63 cores
n_grid_increments = 10
end
_procs = addprocs(
n_procs_,
topology = :master_worker,
exeflags = ["--threads=1", "--project=$(Base.active_project())"],
)
@everywhere begin
using Pkg
Pkg.instantiate()
using AlgorithmicCompetition: run_and_extract
end
@time exp_list = AlgorithmicCompetition.run_dddc(;
n_parameter_iterations = n_parameter_iterations,
max_iter = Int(1e9),
n_grid_increments = n_grid_increments,
)
rmprocs(_procs)
file_name = "simulation_results_v$(version)_dddc_$(start_timestamp).csv"
exp_list_ = AlgorithmicCompetition.DDDCSummary[exp_list...]
df = AlgorithmicCompetition.extract_sim_results(exp_list_)
CSV.write(file_name, df)
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 4946 | using AlgebraOfGraphics
using CairoMakie
using DataFrames
using Chain
using DataFrameMacros
max_profit_for_price(
price::Float64,
price_options::Vector{Float64},
competition_params::CompetitionParameters,
) = maximum(first.(π.(price_options, (price,), (competition_params,))))
max_profit_for_price(
price_options::Vector{Float64},
competition_params::CompetitionParameters,
) = max_profit_for_price.(price_options, (price_options,), (competition_params,))
min_profit_for_price(
price::Float64,
price_options::Vector{Float64},
competition_params::CompetitionParameters,
) = minimum(first.(π.(price_options, (price,), (competition_params,))))
min_profit_for_price(
price_options::Vector{Float64},
competition_params::CompetitionParameters,
) = min_profit_for_price.(price_options, (price_options,), (competition_params,))
symmetric_profit(price::Float64, competition_params::CompetitionParameters) =
first(π(price, price, competition_params))
symmetric_profit(
price_options::Vector{Float64},
competition_params::CompetitionParameters,
) = symmetric_profit.(price_options, (competition_params,))
function extract_profit_results(profit_results, price_options)
profit_results[:price_options] = price_options
profit_df = @chain profit_results begin
DataFrame
stack(Not(:price_options), variable_name = :demand, value_name = :profit)
end
return profit_df
end
function generate_profit_df(
hyperparams::HyperParameters,
profit_for_price_function,
label,
) where {HyperParameters<:Union{AIAPCHyperParameters,DDDCHyperParameters}}
profit_df = Dict(
demand => profit_for_price_function(
hyperparams.price_options,
hyperparams.competition_params_dict[demand],
) for demand in [:low, :high]
)
profit_df = extract_profit_results(profit_df, hyperparams.price_options)
profit_df[!, :label] .= label
return profit_df
end
function generate_profit_df(
hyperparams::HyperParameters,
) where {HyperParameters<:Union{AIAPCHyperParameters,DDDCHyperParameters}}
profit_df_ = [
generate_profit_df(hyperparams, max_profit_for_price, "max_profit"),
generate_profit_df(hyperparams, min_profit_for_price, "min_profit"),
generate_profit_df(hyperparams, symmetric_profit, "symmetric_profit"),
]
profit_df = vcat(profit_df_...)
return profit_df
end
function draw_price_diagnostic(hyperparams::AIAPCHyperParameters)
profit_df = generate_profit_df(hyperparams)
profit_df = unstack(profit_df, :label, :profit)
critical_prices = [hyperparams.p_Bert_nash_equilibrium, hyperparams.p_monop_opt]
plt_1 =
data((
price = critical_prices,
profit = symmetric_profit(
critical_prices,
hyperparams.competition_params_dict[:high],
),
label = ["Bertrand Nash", "Monopoly"],
)) *
mapping(:price, :profit, color = :label => "Equilibria") *
visual(Scatter)
plt = @chain profit_df begin
@subset(:demand == "high")
data(_) *
mapping(
:price_options => "Price",
:symmetric_profit => "Profit",
lower = :min_profit,
upper = :max_profit,
) *
(visual(Scatter) + visual(LinesFill))
end
return plt + plt_1
end
function draw_price_diagnostic(hyperparams::DDDCHyperParameters)
profit_df = generate_profit_df(hyperparams)
profit_df = unstack(profit_df, :label, :profit)
critical_prices = vcat(
[
[hyperparams.p_Bert_nash_equilibrium[demand], hyperparams.p_monop_opt[demand]] for demand in [:high, :low]
]...,
)
critical_profits = vcat(
[
symmetric_profit(
[
hyperparams.p_Bert_nash_equilibrium[demand],
hyperparams.p_monop_opt[demand],
],
hyperparams.competition_params_dict[demand],
) for demand in [:high, :low]
]...,
)
plt_1 =
data((
price = critical_prices,
profit = critical_profits,
label = repeat(["Bertrand Nash", "Monopoly"], outer = 2),
demand = repeat(["High Demand", "Low Demand"], inner = 2),
)) *
mapping(
:price,
:profit => "Profit",
color = :label => "Equilibria",
row = :demand,
) *
visual(Scatter)
plt = @chain profit_df begin
@transform(:demand = :demand == "high" ? "High Demand" : "Low Demand")
data(_) *
mapping(
:price_options => "Price",
:symmetric_profit => "Profit",
lower = :min_profit,
upper = :max_profit,
row = :demand => "Demand Level",
) *
(visual(Scatter) + visual(LinesFill))
end
return plt + plt_1
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 199 | include("params.jl")
include("env_helpers.jl")
include("env.jl")
include("hooks.jl")
include("policy.jl")
include("stop_condition.jl")
include("run.jl")
include("summary.jl")
include("run_aiapc.jl")
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 6395 | using ReinforcementLearning
"""
AIAPCEnv(p::AIAPCHyperParameters)
Build an environment to reproduce the results of the 2020 Calvano, Calzolari, Denicolò & Pastorello AER Paper
Calvano, E., Calzolari, G., Denicolò, V., & Pastorello, S. (2020). Artificial Intelligence, Algorithmic Pricing, and Collusion. American Economic Review, 110(10), 3267–3297. https://doi.org/10.1257/aer.20190623
"""
struct AIAPCEnv <: AbstractEnv
α::Float64 # Learning parameter
β::Float64 # Exploration parameter
δ::Float64 # Discount factor
max_iter::Int # Maximum number of iterations
convergence_threshold::Int # Convergence threshold
n_players::Int # Number of players
price_options::Vector{Float64} # Price options
price_index::Vector{Int64} # Price indices
competition_params_dict::Dict{Symbol,CompetitionParameters} # Competition parameters, true = high, false = low
demand_mode::Symbol # Demand mode, :high or :low
memory::Vector{CartesianIndex{2}} # Memory vector (previous prices)
state_space::Base.OneTo{Int64} # State space
state_space_lookup::Array{Int64,2} # State space lookup table
n_prices::Int # Number of price options
n_state_space::Int64 # Number of states
convergence_vect::Vector{Bool} # Convergence status for each player
is_done::Vector{Bool} # Episode is complete
p_Bert_nash_equilibrium::Float64 # Nash equilibrium price (Betrand price)
p_monop_opt::Float64 # Monopoly optimal price
action_space::Tuple # Action space
profit_array::Array{Float64,3} # Profit given price pair as coordinates
reward::Vector{Float64} # Reward vector
function AIAPCEnv(p::AIAPCHyperParameters)
price_options = Vector{Float64}(p.price_options)
n_prices = length(p.price_options)
price_index = Vector{Int64}(Int64.(1:n_prices))
n_players = p.n_players
n_state_space = n_prices^(p.memory_length * n_players)
state_space = Base.OneTo(Int64(n_state_space))
action_space = construct_AIAPC_action_space(price_index)
profit_array = construct_AIAPC_profit_array(
price_options,
p.competition_params_dict,
n_players;
p.demand_mode,
)
state_space_lookup = construct_AIAPC_state_space_lookup(action_space, n_prices)
@assert p.demand_mode ∈ (:high, :low)
new(
p.α,
p.β,
p.δ,
p.max_iter,
p.convergence_threshold,
n_players,
p.price_options,
price_index,
p.competition_params_dict,
p.demand_mode,
CartesianIndex{2}[initialize_price_memory(price_index, p.n_players)], # Memory, randomly initialized
state_space,
state_space_lookup,
n_prices,
n_state_space,
Bool[false, false], # Convergence vector
Vector{Bool}([false]), # Episode is done indicator
p.p_Bert_nash_equilibrium,
p.p_monop_opt,
action_space,
profit_array,
Float64[0.0, 0.0], # Reward vector
)
end
end
"""
RLBase.act!(env::AIAPCEnv, price_tuple::Tuple{Int64,Int64})
Act in the environment by setting the memory to the given price tuple and setting `is_done` to `true`.
"""
function RLBase.act!(env::AIAPCEnv, price_tuple::CartesianIndex{2})
# TODO: Fix support for longer memories
memory_index = env.memory[1]
env.reward .= env.profit_array[memory_index, :]
env.memory[1] = price_tuple
env.is_done[1] = true
end
RLBase.action_space(env::AIAPCEnv, ::Player) = env.price_index # Choice of price
RLBase.action_space(env::AIAPCEnv, ::SimultaneousPlayer) = env.action_space
RLBase.legal_action_space(env::AIAPCEnv, p) = is_terminated(env) ? () : action_space(env, p)
const legal_action_space_mask_object_AIAPC = fill(true, 15)
RLBase.legal_action_space_mask(env::AIAPCEnv, player::Player) =
legal_action_space_mask_object_AIAPC
RLBase.action_space(env::AIAPCEnv) = action_space(env, SIMULTANEOUS_PLAYER)
"""
RLBase.reward(env::AIAPCEnv)
Return the reward for the current state. If the episode is done, return the profit, else return `0, 0`.
"""
function RLBase.reward(env::AIAPCEnv)
env.is_done[1] ? env.reward : (zero(Float64), zero(Float64))
end
"""
RLBase.reward(env::AIAPCEnv, p::Int)
Return the reward for the current state for player `p` as an integer. If the episode is done, return the profit, else return `0`.
"""
function RLBase.reward(env::AIAPCEnv, p::Int)
return env.reward[p]
end
"""
RLBase.reward(env::AIAPCEnv, player::Player)
Return the reward for the current state for `player`. If the episode is done, return the profit, else return `0`.
"""
RLBase.reward(env::AIAPCEnv, p::Player) = reward(env, player_to_index[p])
RLBase.state_space(env::AIAPCEnv, ::Observation, p) = env.state_space
# State without player spec is a noop
RLBase.state(env::AIAPCEnv) = nothing
"""
RLBase.state(env::AIAPCEnv, player::Player)
Return the current state as an integer, mapped from the environment memory.
"""
function RLBase.state(env::AIAPCEnv, player::Player)
memory_index = env.memory[1]
env.state_space_lookup[memory_index]
end
"""
RLBase.is_terminated(env::AIAPCEnv)
Return whether the episode is done.
"""
RLBase.is_terminated(env::AIAPCEnv) = env.is_done[1]
function RLBase.reset!(env::AIAPCEnv)
env.is_done[1] = false
end
const players_ = (Player(1), Player(2))
RLBase.players(::AIAPCEnv) = players_
RLBase.current_player(::AIAPCEnv) = SIMULTANEOUS_PLAYER
RLBase.NumAgentStyle(::AIAPCEnv) = MultiAgent(2)
RLBase.DynamicStyle(::AIAPCEnv) = SIMULTANEOUS
RLBase.ActionStyle(::AIAPCEnv) = MINIMAL_ACTION_SET
RLBase.InformationStyle(::AIAPCEnv) = IMPERFECT_INFORMATION
RLBase.StateStyle(::AIAPCEnv) = Observation{Int64}()
RLBase.RewardStyle(::AIAPCEnv) = STEP_REWARD
RLBase.UtilityStyle(::AIAPCEnv) = GENERAL_SUM
RLBase.ChanceStyle(::AIAPCEnv) = DETERMINISTIC
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1253 | """
construct_AIAPC_state_space_lookup(action_space, n_prices)
Construct a lookup table from action space to the state space.
"""
function construct_AIAPC_state_space_lookup(action_space, n_prices)
@assert length(action_space) == n_prices^2
state_space_lookup = reshape(1:length(action_space), n_prices, n_prices)
return state_space_lookup
end
"""
construct_AIAPC_profit_array(price_options, params, n_players)
Construct a 3-dimensional array which holds the profit for each player given a price pair.
The first dimension is player 1's action, the second dimension is player 2's action, and
the third dimension is the player index for their profit.
"""
function construct_AIAPC_profit_array(
price_options::Vector{Float64},
competition_params_dict::Dict{Symbol,CompetitionParameters},
n_players::Int;
demand_mode = :high,
)
n_prices = length(price_options)
params_ = competition_params_dict[demand_mode]
profit_array = zeros(Float64, n_prices, n_prices, n_players)
for k = 1:n_players
for i = 1:n_prices
for j = 1:n_prices
profit_array[i, j, k] = π(price_options[i], price_options[j], params_)[k]
end
end
end
return profit_array
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 4599 | using ReinforcementLearning
using ReinforcementLearningFarm: TotalRewardPerLastNEpisodes
import Base.push!
"""
ConvergenceCheck(convergence_threshold::Int64)
Hook to check convergence, as defined by the best response for each state being stable for a given number of iterations.
"""
mutable struct ConvergenceCheck <: AbstractHook
convergence_duration::Int64
iterations_until_convergence::Int64
best_response_vector::Vector{Int64}
is_converged::Bool
convergence_threshold::Int64
function ConvergenceCheck(n_states::Int64, convergence_threshold::Int64)
new(0, 0, Vector{Int64}(zeros(Int64, n_states)), false, convergence_threshold)
end
end
function Base.push!(
h::ConvergenceCheck,
state_::S,
best_action::Int64,
iter_converged::Bool,
) where {S<:Integer}
# Increment duration whenever argmax action is stable (convergence criteria)
# Increment convergence metric (e.g. convergence not reached)
# Keep track of number of iterations it takes until convergence
h.iterations_until_convergence += 1
if iter_converged
h.convergence_duration += 1
if h.convergence_duration >= h.convergence_threshold
h.is_converged = true
end
else
h.convergence_duration = 0
h.best_response_vector[state_] = best_action
h.is_converged = false
end
return
end
"""
_best_action_lookup(state_, table)
Look up the best action for a given state in the q-value matrix
"""
function _best_action_lookup(state_, table)
best_action = 1
max_value = table[1, state_]
for i = 2:size(table, 1)
value = table[i, state_]
if value > max_value
max_value = value
best_action = i
end
end
return Int64(best_action)
end
function Base.push!(
h::ConvergenceCheck,
table::Matrix{F},
state_::S,
) where {S<:Integer,F<:AbstractFloat}
# Convergence is defined over argmax action for each state
# E.g. best / greedy action
best_action = _best_action_lookup(state_, table)
iter_converged = (@views h.best_response_vector[state_] == best_action)
Base.push!(h, state_, best_action, iter_converged)
return h.is_converged
end
function Base.push!(
h::ConvergenceCheck,
::PostActStage,
agent::Agent{P,T},
env::E,
player::Player,
) where {P<:AbstractPolicy,T<:Trajectory,E<:AbstractEnv}
Base.push!(h, PostActStage(), agent.policy, env, player)
end
function Base.push!(
h::ConvergenceCheck,
::PostActStage,
policy::QBasedPolicy{L,Exp},
env::E,
player::Player,
) where {L<:AbstractLearner,Exp<:AbstractExplorer,E<:AbstractEnv}
Base.push!(h, PostActStage(), policy.learner, env, player)
end
function Base.push!(
h::ConvergenceCheck,
::PostActStage,
learner::L,
env::E,
player::Player,
) where {L<:AbstractLearner,E<:AbstractEnv}
Base.push!(h, PostActStage(), learner.approximator, env, player)
end
function Base.push!(
h::ConvergenceCheck,
::PostActStage,
approximator::TabularApproximator{A},
env::E,
player::Player,
) where {A,E<:AbstractEnv}
Base.push!(h, PostActStage(), approximator.model, env, player)
end
function Base.push!(
h::ConvergenceCheck,
::PostActStage,
table::Matrix{F},
env::E,
player::Player,
) where {F<:AbstractFloat,E<:AbstractEnv}
state_ = RLBase.state(env, player)
player_index = player_to_index[player]
env.convergence_vect[player_index] = Base.push!(h, table, state_)
return
end
function AIAPCPerformanceHook(env::AbstractEnv)
MultiAgentHook(
PlayerTuple(
p => ComposedHook(
ConvergenceCheck(env.n_state_space, env.convergence_threshold),
) for p in players(env)
),
)
end
function AIAPCDebugHook(env::AbstractEnv)
MultiAgentHook(
PlayerTuple(
p => ComposedHook(
# TotalRewardPerEpisode(; is_display_on_exit = false),
ConvergenceCheck(env.n_state_space, env.convergence_threshold),
TotalRewardPerLastNEpisodes(;
max_episodes = env.convergence_threshold + 100,
),
# TODO: MultiAgent version of TotalRewardPerEpisode / better player handling for hooks
) for p in players(env)
),
)
end
function Base.push!(
hook::MultiAgentHook,
stage::AbstractStage,
policy::MultiAgentPolicy,
env::AIAPCEnv,
)
@simd for p in (Player(1), Player(2))
Base.push!(hook[p], stage, policy[p], env, p)
end
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 2974 | """
CompetitionSolution(params::CompetitionParameters)
Solve the monopolist and Bertrand competition models for the given parameters and return the solution.
"""
struct CompetitionSolution
p_Bert_nash_equilibrium::Float64
p_monop_opt::Float64
params::CompetitionParameters
function CompetitionSolution(params::CompetitionParameters)
model_monop, p_monop = solve_monopolist(params)
p_Bert_nash_equilibrium = solve_bertrand(params)[2][1]
p_monop_opt = solve_monopolist(params)[2][1]
new(p_Bert_nash_equilibrium, p_monop_opt, params)
end
end
"""
AIAPCHyperParameters(
α::Float64,
β::Float64,
δ::Float64,
max_iter::Int,
competition_solution_dict::Dict{Symbol,CompetitionSolution};
convergence_threshold::Int = Int(1e5),
)
Hyperparameters which define a specific AIAPC environment.
"""
struct AIAPCHyperParameters
α::Float64
β::Float64
δ::Float64
max_iter::Int
convergence_threshold::Int
price_options::Vector{Float64}
memory_length::Int
n_players::Int
competition_params_dict::Dict{Symbol,CompetitionParameters}
p_Bert_nash_equilibrium::Float64
p_monop_opt::Float64
demand_mode::Symbol
function AIAPCHyperParameters(
α::Float64,
β::Float64,
δ::Float64,
max_iter::Int,
competition_solution_dict::Dict{Symbol,CompetitionSolution};
convergence_threshold::Int = Int(1e5),
demand_mode::Symbol = :high,
)
@assert max_iter > convergence_threshold
@assert demand_mode ∈ [:high, :low]
ξ = 0.1
δ = 0.95
n_prices = 15
n_players = 2
memory_length = 1
# p_monop defined above
p_range_pad =
ξ * (
competition_solution_dict[demand_mode].p_monop_opt -
competition_solution_dict[demand_mode].p_Bert_nash_equilibrium
)
price_options = [
range(
competition_solution_dict[demand_mode].p_Bert_nash_equilibrium -
p_range_pad,
competition_solution_dict[demand_mode].p_monop_opt + p_range_pad,
n_prices,
)...,
]
new(
α,
β,
δ,
max_iter,
convergence_threshold,
price_options,
memory_length,
n_players,
Dict(d_ => competition_solution_dict[d_].params for d_ in [:high, :low]),
competition_solution_dict[demand_mode].p_Bert_nash_equilibrium,
competition_solution_dict[demand_mode].p_monop_opt,
demand_mode,
)
end
end
function construct_AIAPC_action_space(price_index)
Tuple(CartesianIndex{2}(i, j) for i in price_index for j in price_index)
end
function initialize_price_memory(price_index, n_players::Int)
CartesianIndex{2}(rand(price_index, n_players)...)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 2459 | using ReinforcementLearning
using ReinforcementLearningFarm: EpsilonSpeedyExplorer
"""
Q_i_0(env::AIAPCEnv)
Calculate the Q-value for player i at time t=0, given the price chosen by player i and assuming random play over the price options of player -i.
"""
function Q_i_0(env::AIAPCEnv)
Float64[mean(env.profit_array[:, :, 1], dims = 2) ./ (1 - env.δ)...]
end
"""
InitMatrix(env::AIAPCEnv, mode = "zero")
Initialize the Q-matrix for the AIAPC environment.
"""
function InitMatrix(env::AIAPCEnv; mode = "zero")
if mode == "zero"
return zeros(env.n_prices, env.n_state_space)
elseif mode == "baseline"
opponent_randomizes_expected_profit = Q_i_0(env)
return repeat(opponent_randomizes_expected_profit, 1, env.n_state_space)
elseif mode == "constant"
return fill(5, env.n_prices, env.n_state_space)
else
@assert false "Unknown mode"
end
end
"""
AIAPCPolicy(env::AIAPCEnv; mode = "baseline")
Create a policy for the AIAPC environment, with symmetric agents, using a tabular Q-learner. Mode deterimines the initialization of the Q-matrix.
"""
function AIAPCPolicy(env::AIAPCEnv; mode = "baseline")
aiapc_policy = MultiAgentPolicy(
PlayerTuple(
p => Agent(
QBasedPolicy(;
learner = TDLearner(
# TabularQApproximator with specified init matrix
TabularApproximator(InitMatrix(env, mode = mode)),
# For param info: https://github.com/JuliaReinforcementLearning/ReinforcementLearning.jl/blob/f97747923c6d7bbc5576f81664ed7b05a2ab8f1e/src/ReinforcementLearningZoo/src/algorithms/tabular/td_learner.jl#L15
:SARS;
γ = env.δ,
α = env.α,
n = 0,
),
explorer = EpsilonSpeedyExplorer(env.β * 1e-5),
),
Trajectory(
CircularArraySARTSTraces(;
capacity = 1,
state = Int64 => (),
action = Int64 => (),
reward = Float64 => (),
terminal = Bool => (),
),
DummySampler(),
InsertSampleRatioController(),
),
) for p in players(env)
),
)
return aiapc_policy
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1945 | using ReinforcementLearning
using Distributed
import Base
import ReinforcementLearning: RLCore, RLBase
# Patch to improve type stability and try to speed things up (avoid generator)
function RLBase.plan!(multiagent::MultiAgentPolicy, env::AIAPCEnv)
return CartesianIndex{2}(
Tuple{Int64,Int64}(
RLBase.plan!(multiagent[player_], env, player_) for player_ ∈ players(env)
),
)
end
function Experiment(env::AIAPCEnv; stop_on_convergence = true, debug = false)
RLCore.Experiment(
AIAPCPolicy(env),
env,
AIAPCStop(env; stop_on_convergence = stop_on_convergence),
debug ? AIAPCDebugHook(env) : AIAPCPerformanceHook(env),
)
end
function Base.run(experiments::Vector{RLCore.Experiment})
sendto(workers(), experiments = experiments)
status = pmap(1:length(experiments)) do i
experiment = experiment[i]
RLCore._run(
experiment.policy,
experiment.env,
experiment.stop_condition,
experiment.hook,
ResetIfEnvTerminated(),
)
experiments[i]
end
end
function Base.run(
hyperparameters::AIAPCHyperParameters;
stop_on_convergence = true,
debug = false,
)
env = AIAPCEnv(hyperparameters)
experiment = Experiment(env; stop_on_convergence = stop_on_convergence, debug = debug)
RLCore._run(
experiment.policy,
experiment.env,
experiment.stop_condition,
experiment.hook,
ResetIfEnvTerminated(),
)
return experiment
end
"""
run_and_extract(hyperparameters::AIAPCHyperParameters; stop_on_convergence = true)
Runs the experiment and returns the economic summary.
"""
function run_and_extract(
hyperparameters::AIAPCHyperParameters;
stop_on_convergence = true,
debug = false,
)
economic_summary(
run(hyperparameters; stop_on_convergence = stop_on_convergence, debug = debug),
)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 3520 | import ProgressMeter: @showprogress
using Distributed
using Random
using StatsBase
using DataFrames
using CSV
using Dates
function build_hyperparameter_set(
α_vect,
β_vect,
δ,
max_iter,
competition_solution_dict,
convergence_threshold,
n_parameter_iterations,
)
hyperparameter_vect = [
AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = convergence_threshold,
) for α in α_vect for β in β_vect
]
# Shuffle hyperparameter_vect, extend according to number of repetitions
hyperparameter_vect = shuffle(repeat(hyperparameter_vect, n_parameter_iterations))
return hyperparameter_vect
end
"""
run_aiapc(
n_parameter_iterations=1,
max_iter=Int(1e9),
convergence_threshold=Int(1e5),
α_range=Float64.(range(0.0025, 0.25, 100)),
β_range=Float64.(range(0.02, 2, 100)),
version="v0.0.0",
start_timestamp=now(),
batch_size=1,
)
Run AIAPC, given a configuration for a set of experiments.
"""
function run_aiapc(;
n_parameter_iterations = 1,
max_iter = Int(1e9),
convergence_threshold = Int(1e5),
α_range = Float64.(range(0.0025, 0.25, 100)),
β_range = Float64.(range(0.02, 2, 100)),
version = "v0.0.0",
start_timestamp = now(),
batch_size = 1,
slurm_metadata = (SLURM_ARRAY_JOB_ID = 0, SLURM_ARRAY_TASK_ID = 0),
debug = false,
)
if debug
α_range = α_range[1:10:end]
β_range = β_range[1:10:end]
if SLURM_ARRAY_TASK_ID > 10
return
end
end
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
δ = 0.95
hyperparameter_vect = build_hyperparameter_set(
α_range,
β_range,
δ,
max_iter,
competition_solution_dict,
convergence_threshold,
1,
)
println(
"About to run $(length(hyperparameter_vect)) parameter settings, each $n_parameter_iterations times",
)
start_timestamp = Dates.format(start_timestamp, "yyyy-mm-dd__HH_MM_SS")
folder_name = joinpath(
"data",
savename((
model = "aiapc",
version = version,
start_timestamp = start_timestamp,
SLURM_ARRAY_JOB_ID = slurm_metadata.SLURM_ARRAY_JOB_ID,
SLURM_ARRAY_TASK_ID = slurm_metadata.SLURM_ARRAY_TASK_ID,
debug = debug,
)),
)
mkpath(folder_name)
for i = 1:n_parameter_iterations
println("Parameter iteration $i of $n_parameter_iterations")
file_name = joinpath(folder_name, savename((parameter_iteration = i, suffix = "csv")))
exp_list_ = AIAPCSummary[]
exp_list = @showprogress pmap(
run_and_extract,
hyperparameter_vect;
on_error = identity,
batch_size = batch_size,
)
df = extract_sim_results(exp_list)
CSV.write(file_name, df)
end
exp_df = DataFrame.(CSV.File.(readdir(folder_name, join = true)))
exp_df = vcat(exp_df...)
CSV.write(folder_name * ".csv", exp_df)
rm(folder_name, recursive=true) # Remove folder after merging and writing to CSV
return exp_df
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 909 | using ReinforcementLearning
import ReinforcementLearning: RLCore
struct StopWhenConverged <: AbstractStopCondition end
"""
RLCore.check_stop(s::StopWhenConverged, agent, env)
Returns true if the environment has converged for all players.
"""
function RLCore.check!(s::StopWhenConverged, agent, env)
# false until converged, then true
return all(env.convergence_vect)
end
"""
AIAPCStop(env::AIAPCEnv; stop_on_convergence = true)
Returns a stop condition that stops when the environment has converged for all players.
"""
function AIAPCStop(env::E; stop_on_convergence::Bool = true) where {E<:AbstractEnv}
stop_conditions = []
push!(stop_conditions, StopAfterNEpisodes(env.max_iter, is_show_progress = false))
if stop_on_convergence
stop_converged = StopWhenConverged()
push!(stop_conditions, stop_converged)
end
return StopIfAny(stop_conditions...)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 5379 | using Chain
using ReinforcementLearning
using DataFrames
"""
profit_gain(π_hat, env::AIAPCEnv)
Returns the profit gain of the agent based on the current policy.
"""
function profit_gain(π_hat, env)
π_N, π_M = extract_profit_vars(env)
(mean(π_hat) - π_N) / (π_M - π_N)
end
"""
AIAPCSummary(α, β, is_converged, convergence_profit, iterations_until_convergence)
A struct to store the summary of an AIAPC experiment.
"""
struct AIAPCSummary
α::Float64
β::Float64
is_converged::Vector{Bool}
convergence_profit::Vector{Float64}
iterations_until_convergence::Vector{Int64}
end
"""
extract_profit_vars(env::AIAPCEnv)
Returns the Nash equilibrium and monopoly optimal profits, based on prices stored in env.
"""
function extract_profit_vars(env::AIAPCEnv)
p_Bert_nash_equilibrium = env.p_Bert_nash_equilibrium
p_monop_opt = env.p_monop_opt
competition_params = env.competition_params_dict[:high]
π_N = π(p_Bert_nash_equilibrium, p_Bert_nash_equilibrium, competition_params)[1]
π_M = π(p_monop_opt, p_monop_opt, competition_params)[1]
return (π_N, π_M)
end
economic_summary(e::RLCore.Experiment) = economic_summary(e.env, e.policy, e.hook)
"""
get_state_from_memory(env::AIAPCEnv)
Helper function. Returns the state corresponding to the current memory of the environment.
"""
function get_state_from_memory(env::AIAPCEnv)
return get_state_from_prices(env, env.memory[1])
end
"""
get_state_from_prices(env::AIAPCEnv, memory)
Helper function. Returns the state corresponding to the memory vector passed.
"""
function get_state_from_prices(env::AIAPCEnv, memory_index)
return env.state_space_lookup[memory_index]
end
"""
get_prices_from_state(env::AIAPCEnv, state)
Helper function. Returns the prices corresponding to the state passed.
"""
function get_prices_from_state(env::AIAPCEnv, state)
prices = findall(x -> x == state, env.state_space_lookup)[1]
return [env.price_options[prices[1]], env.price_options[prices[2]]]
end
"""
get_profit_from_state(env::AIAPCEnv, state)
Helper function. Returns the profit corresponding to the state passed.
"""
function get_profit_from_state(env::AIAPCEnv, state)
prices = get_prices_from_state(env, state)
return AlgorithmicCompetition.π(
prices[1],
prices[2],
env.competition_params_dict[:high],
)
end
"""
get_optimal_action(env::AIAPCEnv, policy::MultiAgentPolicy, last_observed_state)
Get the optimal action (best response) for each player, given the current policy and the last observed state.
"""
function get_optimal_action(env::AIAPCEnv, policy::MultiAgentPolicy, last_observed_state)
optimal_action_set = Int64[]
for player_ in [Player(1), Player(2)]
opt_act = argmax(
policy[player_].policy.learner.approximator.model[:, last_observed_state],
)
push!(optimal_action_set, Int64(opt_act))
end
return CartesianIndex(optimal_action_set...)
end
function economic_summary(env::AIAPCEnv, policy::MultiAgentPolicy, hook::AbstractHook)
convergence_threshold = env.convergence_threshold
iterations_until_convergence = Int64[
hook[player][1].iterations_until_convergence for player in [Player(1), Player(2)]
]
is_converged = Bool[]
convergence_profit = [get_convergence_profit_from_env(env, policy)...]
for i in (Player(1), Player(2))
push!(is_converged, hook[i][1].is_converged)
end
return AIAPCSummary(
env.α,
env.β,
is_converged,
convergence_profit,
iterations_until_convergence,
)
end
"""
get_convergence_profit_from_env(env::AIAPCEnv, policy::MultiAgentPolicy)
Returns the average profit of the agent, after convergence, over the convergence state or states (in the case of a cycle).
"""
function get_convergence_profit_from_env(
env::E,
policy::MultiAgentPolicy,
) where {E<:AbstractEnv}
last_observed_state = get_state_from_memory(env)
visited_states = [last_observed_state]
for i = 1:100
next_price_set = get_optimal_action(env, policy, last_observed_state)
next_state = get_state_from_prices(env, next_price_set)
if next_state ∈ visited_states
break
else
push!(visited_states, next_state)
end
end
profit_vects = get_profit_from_state.((env,), visited_states)
profit_table = hcat(profit_vects...)'
mean(profit_table, dims = 1)
end
"""
extract_sim_results(exp_list::Vector{AIAPCSummary})
Extracts the results of a simulation experiment, given a list of AIAPCSummary objects, returns a `DataFrame`.
"""
function extract_sim_results(exp_list::Vector{AIAPCSummary})
α_result = [ex.α for ex in exp_list if !(ex isa Exception)]
β_result = [ex.β for ex in exp_list if !(ex isa Exception)]
iterations_until_convergence =
[ex.iterations_until_convergence[1] for ex in exp_list if !(ex isa Exception)]
avg_profit_result =
[mean(ex.convergence_profit) for ex in exp_list if !(ex isa Exception)]
is_converged = [ex.is_converged for ex in exp_list if !(ex isa Exception)]
df = DataFrame(
α = α_result,
β = β_result,
π_bar = avg_profit_result,
iterations_until_convergence = iterations_until_convergence,
is_converged = is_converged,
)
return df
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 224 | include("stochastic_demand_stochastic_information.jl")
include("params.jl")
include("env_helpers.jl")
include("env.jl")
include("policy.jl")
include("hooks.jl")
include("run.jl")
include("run_dddc.jl")
include("summary.jl")
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 7750 | using ReinforcementLearning
mutable struct DDDCMemory
prices::CartesianIndex{2}
signals::Vector{Bool}
demand_state::Symbol
reward::Vector{Float64} # NOTE: Not strictly part of the state-defining memory, but used to store reward for each player
end
"""
DDDCEnv(p::AIAPCHyperParameters)
Build an environment to reproduce the results of the Lewis 2023 extentions to AIAPC.
"""
struct DDDCEnv <: AbstractEnv # N is profit_array dimension
α::Float64 # Learning parameter
β::Float64 # Exploration parameter
δ::Float64 # Discount factor
max_iter::Int # Maximum number of iterations
convergence_threshold::Int # Convergence threshold
n_players::Int # Number of players
price_options::Vector{Float64} # Price options
price_index::Vector{Int64} # Price indices
competition_params_dict::Dict{Symbol,CompetitionParameters} # Competition parameters, true = high, false = low
memory::DDDCMemory # Memory struct (previous prices, signals, demand state)
is_high_demand_signals::Vector{Bool} # [true, false] if demand signal is high for player one and low for player two for a given episode
is_high_demand_episode::Vector{Bool} # [true] if demand is high for a given episode
state_space::Base.OneTo{Int64} # State space
state_space_lookup::Array{Int64,4} # State space lookup table
n_prices::Int # Number of price options
n_state_space::Int64 # Number of states
convergence_vect::Vector{Bool} # Convergence status for each player
is_done::Vector{Bool} # Episode is complete
p_Bert_nash_equilibrium::Dict{Symbol,Float64} # Nash equilibrium prices for low and high demand (Betrand price)
p_monop_opt::Dict{Symbol,Float64} # Monopoly optimal prices for low and high demand
action_space::Tuple # Action space
profit_array::Array{Float64,4} # Profit given price pair as coordinates
data_demand_digital_params::DataDemandDigitalParams # Parameters for Data/Demand/Digital AIAPC extension
reward::Vector{Float64}
function DDDCEnv(p::DDDCHyperParameters)
price_options = Vector{Float64}(p.price_options)
n_prices = length(p.price_options)
price_index = Vector{Int64}(Int64.(1:n_prices))
n_players = p.n_players
n_state_space = 4 * n_prices^(p.memory_length * n_players) # 2^2 = 4 possible demand states (ground truth and signal)
state_space = Base.OneTo(Int64(n_state_space))
action_space = construct_DDDC_action_space(price_index)
profit_array =
construct_DDDC_profit_array(price_options, p.competition_params_dict, n_players)
state_space_lookup = construct_DDDC_state_space_lookup(action_space, n_prices)
is_high_demand_prev_episode = rand(Bool)
is_high_demand_episode = rand(Bool)
new(
p.α,
p.β,
p.δ,
p.max_iter,
p.convergence_threshold,
n_players,
p.price_options,
price_index,
p.competition_params_dict,
DDDCMemory( # Memory, randomly initialized
initialize_price_memory(price_index, p.n_players),
get_demand_signals(
p.data_demand_digital_params,
is_high_demand_prev_episode,
),
is_high_demand_prev_episode ? :high : :low,
[0.0, 0.0],
),
get_demand_signals(p.data_demand_digital_params, is_high_demand_episode), # Current demand, randomly initialized
Bool[is_high_demand_episode],
state_space,
state_space_lookup,
n_prices,
n_state_space,
Bool[false, false], # Convergence vector
Bool[false], # Episode is done indicator
p.p_Bert_nash_equilibrium,
p.p_monop_opt,
action_space,
profit_array,
p.data_demand_digital_params,
Float64[0.0, 0.0],
)
end
end
"""
RLBase.act!(env::DDDCEnv, price_tuple::Tuple{Int64,Int64})
Act in the environment by setting the memory to the given price tuple and setting `is_done` to `true`.
"""
function RLBase.act!(env::DDDCEnv, price_tuple::CartesianIndex{2})
# TODO: Fix support for longer memories
demand_state = env.is_high_demand_episode[1] ? :high : :low
# Reward is based on prices chosen & demand state
env.memory.reward .= env.profit_array[price_tuple, :, demand_to_index[demand_state]]
# Update 'memory' data for next episode
env.memory.prices = price_tuple
env.memory.signals = copy(env.is_high_demand_signals)
env.memory.demand_state = demand_state
# Determine whether next episode is a high demand episode and update
env.is_high_demand_episode[1] = get_demand_level(env.data_demand_digital_params)
# Update demand signals
env.is_high_demand_signals .=
get_demand_signals(env.data_demand_digital_params, env.is_high_demand_episode[1])
env.is_done[1] = true
end
RLBase.action_space(env::DDDCEnv, ::Player) = env.price_index # Choice of price
RLBase.action_space(env::DDDCEnv, ::SimultaneousPlayer) = env.action_space
RLBase.legal_action_space(env::DDDCEnv, p) = is_terminated(env) ? () : action_space(env, p)
const legal_action_space_mask_object_DDDC = fill(true, 15)
RLBase.legal_action_space_mask(env::DDDCEnv, player::Player) =
legal_action_space_mask_object_DDDC
RLBase.action_space(env::DDDCEnv) = action_space(env, SIMULTANEOUS_PLAYER)
RLBase.reward(env::DDDCEnv, player::Player) =
env.is_done[1] ? env.memory.reward[player_to_index[player]] : zero(Float64)
RLBase.state_space(env::DDDCEnv, ::Observation, p) = env.state_space
# State without player spec is a noop
RLBase.state(env::DDDCEnv) = nothing
"""
RLBase.state(env::DDDCEnv, player::Player)
Return the current state as an integer, mapped from the environment memory.
"""
function RLBase.state(env::DDDCEnv, player::Player)
memory_index = env.memory.prices
# State is defined by memory, as in AIAPC, plus demand signal given to a player
index_ = player_to_index[player]
_is_high_demand_signal = env.is_high_demand_signals[index_]
_demand_signal = _is_high_demand_signal ? :high : :low
demand_signal_index = demand_to_index[_demand_signal]
_prev_is_high_demand_signal = env.memory.signals[index_]
_prev_demand_signal = _prev_is_high_demand_signal ? :high : :low
prev_demand_signal_index = demand_to_index[_prev_demand_signal]
# State space is indexed by: memory (price x price, length 2), current demand signal, previous demand signal
env.state_space_lookup[memory_index, demand_signal_index, prev_demand_signal_index]
end
"""
RLBase.is_terminated(env::DDDCEnv)
Return whether the episode is done.
"""
RLBase.is_terminated(env::DDDCEnv) = env.is_done[1]
function RLBase.reset!(env::DDDCEnv)
env.is_done[1] = false
end
RLBase.players(::DDDCEnv) = (Player(1), Player(2))
RLBase.current_player(::DDDCEnv) = SIMULTANEOUS_PLAYER
RLBase.NumAgentStyle(::DDDCEnv) = MultiAgent(2)
RLBase.DynamicStyle(::DDDCEnv) = SIMULTANEOUS
RLBase.ActionStyle(::DDDCEnv) = MINIMAL_ACTION_SET
RLBase.InformationStyle(::DDDCEnv) = IMPERFECT_INFORMATION
RLBase.StateStyle(::DDDCEnv) = Observation{Int64}()
RLBase.RewardStyle(::DDDCEnv) = STEP_REWARD
RLBase.UtilityStyle(::DDDCEnv) = GENERAL_SUM
RLBase.ChanceStyle(::DDDCEnv) = DETERMINISTIC
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1322 | """
construct_DDDC_state_space_lookup(action_space, n_prices)
Construct a lookup table from action space to the state space.
"""
function construct_DDDC_state_space_lookup(action_space, n_prices)
@assert length(action_space) == n_prices^2 * 4
state_space_lookup = reshape(Int64.(1:length(action_space)), n_prices, n_prices, 2, 2)
return state_space_lookup
end
"""
construct_DDDC_profit_array(price_options, params, n_players)
Construct a 3-dimensional array which holds the profit for each player given a price pair.
The first dimension is player 1's action, the second dimension is player 2's action, and
the third dimension is the player index for their profit.
"""
function construct_DDDC_profit_array(
price_options::Vector{Float64},
competition_params_dict::Dict{Symbol,CompetitionParameters},
n_players::Int;
)
n_prices = length(price_options)
profit_array = zeros(Float64, n_prices, n_prices, n_players, 2)
for l in [:high, :low]
for k = 1:n_players
for i = 1:n_prices
for j = 1:n_prices
profit_array[i, j, k, demand_to_index[l]] =
π(price_options[i], price_options[j], competition_params_dict[l])[k]
end
end
end
end
return profit_array
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1652 | using CircularArrayBuffers
struct DDDCTotalRewardPerLastNEpisodes <: AbstractHook
rewards::CircularVectorBuffer{Float64}
demand_state_high_vect::CircularVectorBuffer{Bool}
function DDDCTotalRewardPerLastNEpisodes(; max_steps = 100)
new(CircularVectorBuffer{Float64}(max_steps), CircularVectorBuffer{Bool}(max_steps))
end
end
function Base.push!(h::DDDCTotalRewardPerLastNEpisodes, reward::Float64, memory::DDDCMemory)
push!(h.rewards, reward)
push!(h.demand_state_high_vect, memory.demand_state == :high)
return
end
function Base.push!(
h::DDDCTotalRewardPerLastNEpisodes,
::PostActStage,
agent::P,
env::DDDCEnv,
player::Player,
) where {P<:AbstractPolicy}
push!(h, reward(env, player), env.memory)
return
end
function Base.push!(
hook::DDDCTotalRewardPerLastNEpisodes,
stage::Union{PreEpisodeStage,PostEpisodeStage,PostExperimentStage},
agent::P,
env::DDDCEnv,
player::Player,
) where {P<:AbstractPolicy}
push!(hook, stage, agent, env)
return
end
function Base.push!(
hook::MultiAgentHook,
stage::AbstractStage,
policy::MultiAgentPolicy,
env::DDDCEnv,
)
@simd for p in (Player(1), Player(2))
push!(hook[p], stage, policy[p], env, p)
end
end
function DDDCHook(env::AbstractEnv)
MultiAgentHook(
PlayerTuple(
p => ComposedHook(
ConvergenceCheck(env.n_state_space, env.convergence_threshold),
DDDCTotalRewardPerLastNEpisodes(;
max_steps = env.convergence_threshold + 100,
),
) for p in players(env)
),
)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 2964 | function construct_DDDC_action_space(price_index)
Tuple(
CartesianIndex{4}(i, j, k, l) for i in price_index for j in price_index for k = 1:2
for l = 1:2
)
end
"""
DDDCHyperParameters(
α::Float64,
β::Float64,
δ::Float64,
max_iter::Int,
competition_solution_dict::Dict{Symbol,CompetitionSolution},
data_demand_digital_params::DataDemandDigitalParams;
convergence_threshold::Int = Int(1e5),
)
Hyperparameters which define a specific DDDC environment.
"""
struct DDDCHyperParameters
α::Float64
β::Float64
δ::Float64
max_iter::Int
convergence_threshold::Int
price_options::Vector{Float64}
memory_length::Int
n_players::Int
competition_params_dict::Dict{Symbol,CompetitionParameters}
p_Bert_nash_equilibrium::Dict{Symbol,Float64}
p_monop_opt::Dict{Symbol,Float64}
data_demand_digital_params::DataDemandDigitalParams
function DDDCHyperParameters(
α::Float64,
β::Float64,
δ::Float64,
max_iter::Int,
competition_solution_dict::Dict{Symbol,CompetitionSolution},
data_demand_digital_params::DataDemandDigitalParams;
convergence_threshold::Int = Int(1e5),
)
@assert max_iter > convergence_threshold
ξ = 0.1
δ = 0.95
n_prices = 15
n_players = 2
memory_length = 1
d = Dict(:a => 1, :b => 2)
p_monop_opt_min = minimum(
competition_solution_dict[demand_mode].p_monop_opt for
demand_mode in [:high, :low]
)
p_monop_opt_max = maximum(
competition_solution_dict[demand_mode].p_monop_opt for
demand_mode in [:high, :low]
)
p_Bert_nash_equilibrium_min = minimum(
competition_solution_dict[demand_mode].p_Bert_nash_equilibrium for
demand_mode in [:high, :low]
)
p_Bert_nash_equilibrium_max = maximum(
competition_solution_dict[demand_mode].p_Bert_nash_equilibrium for
demand_mode in [:high, :low]
)
p_range_pad = ξ * (p_monop_opt_max - p_Bert_nash_equilibrium_min)
price_options = [
range(
p_Bert_nash_equilibrium_min - p_range_pad,
p_monop_opt_max + p_range_pad,
n_prices,
)...,
]
new(
α,
β,
δ,
max_iter,
convergence_threshold,
price_options,
memory_length,
n_players,
Dict(d_ => competition_solution_dict[d_].params for d_ in [:high, :low]),
Dict(
d_ => competition_solution_dict[d_].p_Bert_nash_equilibrium for
d_ in [:high, :low]
),
Dict(d_ => competition_solution_dict[d_].p_monop_opt for d_ in [:high, :low]),
data_demand_digital_params,
)
end
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 2761 | using ReinforcementLearning
using ReinforcementLearningFarm: EpsilonSpeedyExplorer
"""
Q_i_0(env::DDDCEnv)
Calculate the Q-value for player i at time t=0, given the price chosen by player i and assuming random play over the price options of player -i, weighted by the demand state frequency.
"""
function Q_i_0(env::DDDCEnv)
freq_high_demand = env.data_demand_digital_params.frequency_high_demand
avg_profit_by_demand_state = mean(env.profit_array[:, :, 1, :], dims = 2)
avg_profit =
(avg_profit_by_demand_state[:, :, 1] .* freq_high_demand) .+
((1 - freq_high_demand) .* avg_profit_by_demand_state[:, :, 1])
return Float64[avg_profit...]
end
"""
InitMatrix(env::DDDCEnv, mode = "zero")
Initialize the Q-matrix for the AIAPC environment.
"""
function InitMatrix(env::DDDCEnv; mode = "zero")
if mode == "zero"
return zeros(env.n_prices, env.n_state_space)
elseif mode == "baseline"
opponent_randomizes_expected_profit = Q_i_0(env)
return repeat(opponent_randomizes_expected_profit, 1, env.n_state_space)
elseif mode == "constant"
return fill(5, env.n_prices, env.n_state_space)
else
@assert false "Unknown mode"
end
end
"""
DDDCPolicy(env::DDDCEnv; mode = "baseline")
Create a policy for the DDDC environment, with symmetric agents, using a tabular Q-learner. Mode deterimines the initialization of the Q-matrix.
"""
function DDDCPolicy(env::DDDCEnv; mode = "baseline")
dddc_policy = MultiAgentPolicy(
PlayerTuple(
p => Agent(
QBasedPolicy(;
learner = TDLearner(
# TabularQApproximator with specified init matrix
TabularApproximator(InitMatrix(env, mode = mode)),
# For param info: https://github.com/JuliaReinforcementLearning/ReinforcementLearning.jl/blob/f97747923c6d7bbc5576f81664ed7b05a2ab8f1e/src/ReinforcementLearningZoo/src/algorithms/tabular/td_learner.jl#L15
:SARS;
γ = env.δ,
α = env.α,
n = 0,
),
explorer = EpsilonSpeedyExplorer(env.β * 1e-5),
),
Trajectory(
CircularArraySARTSTraces(;
capacity = 1,
state = Int64 => (),
action = Int64 => (),
reward = Float64 => (),
terminal = Bool => (),
),
DummySampler(),
InsertSampleRatioController(),
),
) for p in players(env)
),
)
return dddc_policy
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1278 | # Patch to improve type stability and try to speed things up (avoid generator)
function RLBase.plan!(multiagent::MultiAgentPolicy, env::DDDCEnv)
action_set = CartesianIndex{2}(
RLBase.plan!(multiagent[Player(1)], env, Player(1)),
RLBase.plan!(multiagent[Player(2)], env, Player(2)),
)
return action_set
end
function Experiment(env::DDDCEnv; stop_on_convergence = true)
RLCore.Experiment(
DDDCPolicy(env),
env,
AIAPCStop(env; stop_on_convergence = stop_on_convergence),
DDDCHook(env),
)
end
function Base.run(hyperparameters::DDDCHyperParameters; stop_on_convergence = true)
env = DDDCEnv(hyperparameters)
experiment = Experiment(env; stop_on_convergence = stop_on_convergence)
RLCore._run(
experiment.policy,
experiment.env,
experiment.stop_condition,
experiment.hook,
ResetIfEnvTerminated(),
)
return experiment
end
"""
run_and_extract(hyperparameters::DDDCHyperParameters; stop_on_convergence = true)
Runs the experiment and returns the economic summary.
"""
function run_and_extract(hyperparameters::DDDCHyperParameters; stop_on_convergence = true)
economic_summary(run(hyperparameters; stop_on_convergence = stop_on_convergence))
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 3348 | import ProgressMeter: @showprogress
using Distributed
using Random
using StatsBase
"""
run_dddc(
n_parameter_iterations = 1,
max_iter = Int(1e9),
convergence_threshold = Int(1e5),
n_grid_increments = 100,
)
Run DDDC, given a configuration for a set of experiments.
"""
function run_dddc(;
n_parameter_iterations = 1,
max_iter = Int(1e9),
convergence_threshold = Int(1e5),
n_grid_increments = 100,
version = "v0.0.0",
start_timestamp = now(),
batch_size = 1,
batch_metadata = (SLURM_ARRAY_JOB_ID = 0, SLURM_ARRAY_TASK_ID = 0),
debug = false,
)
signal_quality_vect = [[true, false], [false, false]]
frequency_high_demand_range = Float64.(range(0.5, 1, n_grid_increments + 1))
weak_signal_quality_level_range = Float64.(range(0.5, 1.0, n_grid_increments + 1))
if debug
frequency_high_demand_range = frequency_high_demand_range[1:10:end]
weak_signal_quality_level_range = weak_signal_quality_level_range[1:10:end]
end
competition_params_dict = Dict(
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)), # Parameter values aligned with Calvano 2020 Stochastic Demand case
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
α = Float64(0.15)
β = Float64(4e-1)
δ = 0.95
data_demand_digital_param_set = [
DataDemandDigitalParams(
weak_signal_quality_level = weak_signal_quality_level,
strong_signal_quality_level = 1.0,
signal_is_strong = shuffle(signal_quality_players),
frequency_high_demand = frequency_high_demand,
) for frequency_high_demand in frequency_high_demand_range for
signal_quality_players in signal_quality_vect for
weak_signal_quality_level in weak_signal_quality_level_range
]
hyperparameter_vect = [
DDDCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = convergence_threshold,
) for data_demand_digital_params in data_demand_digital_param_set
]
# Shuffle hyperparameter_vect, extend according to number of repetitions
hyperparameter_vect = shuffle(repeat(hyperparameter_vect, n_parameter_iterations))
exp_list = DDDCSummary[]
println(
"About to run $(length(hyperparameter_vect) ÷ n_parameter_iterations) parameter settings, each $n_parameter_iterations times",
)
exp_list_ = @showprogress pmap(
run_and_extract,
hyperparameter_vect;
on_error = identity,
batch_size = batch_size,
)
append!(exp_list, exp_list_)
folder_name = joinpath(
"data",
savename((
model = "dddc",
version = version,
start_timestamp = start_timestamp,
SLURM_ARRAY_JOB_ID = batch_metadata.SLURM_ARRAY_JOB_ID,
SLURM_ARRAY_TASK_ID = batch_metadata.SLURM_ARRAY_TASK_ID,
debug = debug,
)),
)
mkpath(folder_name)
df = extract_sim_results(exp_list)
CSV.write(folder_name * ".csv", df)
return exp_list
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 3194 | # demand is either high or low
# state is determined by prices (known to agents) and demand state (known to agents, but unreliable signal)
# state * 2 for high / low (signal given to agents)
# env.frequency_high_demand = 0.5
# env.demand_level_is_high = true / false
# env.high_demand_signal = [true, false]
# env.strong_signal_quality_level = 0.3 # high signal quality -> additive deviation from coin flip zero information signal
# env.weak_signal_quality_level = 0.1 # low signal quality -> base deviation from coin flip zero information signal
@kwdef struct DataDemandDigitalParams
weak_signal_quality_level::Float64 = 0.5 # probability of true signal (0.5 is lowest possible vale)
strong_signal_quality_level::Float64 = 1.0 # probability of true signal (0.5 is lowest possible vale)
signal_is_strong::Vector{Bool} = [false, false] # true if signal quality is high
frequency_high_demand::Float64 = 0.5 # probability of high demand for a given episode
end
function get_demand_level(frequency_high_demand::Float64)
rand() < frequency_high_demand ? true : false
end
get_demand_level(d::DataDemandDigitalParams) = get_demand_level(d.frequency_high_demand)
function get_demand_signals(
demand_level_is_high::Bool,
signal_is_strong::Vector{Bool},
weak_signal_quality_level::Float64,
strong_signal_quality_level::Float64,
)
true_signal_probability =
(weak_signal_quality_level .* .!signal_is_strong) .+
(strong_signal_quality_level .* signal_is_strong)
# Probability of true signal is a function of true signal probability
reveal_true_signal = rand(2) .< true_signal_probability
# Observed signal is 'whether we are lying' times 'whether true demand signal is high'
observed_signal_demand_level_is_high = reveal_true_signal .== demand_level_is_high
return observed_signal_demand_level_is_high
end
function get_demand_signals(d::DataDemandDigitalParams, is_high_demand_episode::Bool)
get_demand_signals(
is_high_demand_episode,
d.signal_is_strong,
d.weak_signal_quality_level,
d.strong_signal_quality_level,
)
end
function post_prob_high_low_given_signal(pr_high_demand, pr_signal_true)
denom_high =
pr_high_demand * pr_signal_true + (1 - pr_high_demand) * (1 - pr_signal_true)
denom_low =
(1 - pr_high_demand) * pr_signal_true + pr_high_demand * (1 - pr_signal_true)
num_high = pr_high_demand * pr_signal_true
num_low = (1 - pr_high_demand) * pr_signal_true
return [num_high / denom_high, num_low / denom_low]
end
function post_prob_high_low_given_both_signals(pr_high_demand, pr_signal_true)
denom_high =
pr_high_demand * pr_signal_true^2 +
(1 - pr_high_demand) * (1 - pr_signal_true)^2 +
2 * pr_high_demand * pr_signal_true * (1 - pr_signal_true)
denom_low =
(1 - pr_high_demand) * pr_signal_true^2 +
pr_high_demand * (1 - pr_signal_true)^2 +
2 * (1 - pr_high_demand) * pr_signal_true * (1 - pr_signal_true)
num_high = pr_high_demand * pr_signal_true^2
num_low = (1 - pr_high_demand) * pr_signal_true^2
return [num_high / denom_high, num_low / denom_low]
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 10950 | using Chain
using ReinforcementLearning
using DataFrames
using Flux: mse
using DataFrameMacros
"""
DDDCSummary(α, β, is_converged, data_demand_digital_params, convergence_profit, convergence_profit_demand_high, convergence_profit_demand_low, profit_gain, profit_gain_demand_high, profit_gain_demand_low, iterations_until_convergence, price_response_to_demand_signal_mse, percent_demand_high)
A struct to store the summary of an DDDC experiment.
"""
struct DDDCSummary
α::Float64
β::Float64
is_converged::Vector{Bool}
data_demand_digital_params::DataDemandDigitalParams
convergence_profit::Vector{Float64}
convergence_profit_demand_high::Vector{Float64}
convergence_profit_demand_low::Vector{Float64}
profit_gain::Vector{Float64}
profit_gain_demand_high::Vector{Float64}
profit_gain_demand_low::Vector{Float64}
iterations_until_convergence::Vector{Int64}
price_response_to_demand_signal_mse::Vector{Float64}
percent_demand_high::Float64
percent_unexplored_states::Vector{Float64}
end
"""
extract_profit_vars(env::DDDCEnv)
Returns the Nash equilibrium and monopoly optimal profits, based on prices stored in `env`.
"""
function extract_profit_vars(env::DDDCEnv)
p_Bert_nash_equilibrium = env.p_Bert_nash_equilibrium
p_monop_opt = env.p_monop_opt
competition_params = env.competition_params_dict
π_N = Dict(
i => π(
p_Bert_nash_equilibrium[i],
p_Bert_nash_equilibrium[i],
competition_params[i],
)[1] for i in [:high, :low]
)
π_M = Dict(
i => π(p_monop_opt[i], p_monop_opt[i], competition_params[i])[1] for
i in [:high, :low]
)
return (π_N, π_M)
end
"""
extract_quantity_vars(env::DDDCEnv)
Returns the Nash equilibrium and monopoly optimal quantities, based on prices stored in `env`.
"""
function extract_quantity_vars(env::DDDCEnv)
p_Bert_nash_equilibrium = env.p_Bert_nash_equilibrium
p_monop_opt = env.p_monop_opt
competition_params = env.competition_params_dict
π_N = Dict(
i => Q(
p_Bert_nash_equilibrium[i],
p_Bert_nash_equilibrium[i],
competition_params[i],
)[1] for i in [:high, :low]
)
π_M = Dict(
i => Q(p_monop_opt[i], p_monop_opt[i], competition_params[i])[1] for
i in [:high, :low]
)
return (π_N, π_M)
end
function economic_summary(env::DDDCEnv, policy::MultiAgentPolicy, hook::AbstractHook)
convergence_threshold = env.convergence_threshold
iterations_until_convergence = Int64[
hook[player][1].iterations_until_convergence for player in [Player(1), Player(2)]
]
percent_demand_high = mean(hook[Player(1)][2].demand_state_high_vect)
is_converged = Bool[]
percent_unexplored_states = Float64[]
convergence_profit = get_convergence_profit_from_hook(hook)
for player_ in (Player(1), Player(2))
push!(is_converged, hook[player_][1].is_converged)
push!(percent_unexplored_states, mean(hook[player_][1].best_response_vector .== 0))
end
price_vs_demand_signal_counterfactuals =
extract_price_vs_demand_signal_counterfactuals(env, hook)
return DDDCSummary(
env.α,
env.β,
is_converged,
env.data_demand_digital_params,
convergence_profit[:all],
convergence_profit[:high],
convergence_profit[:low],
get.(profit_gain.(convergence_profit[:all], (env,)), :weighted, ""),
get.(profit_gain.(convergence_profit[:high], (env,)), :high, ""),
get.(profit_gain.(convergence_profit[:low], (env,)), :low, ""),
iterations_until_convergence,
[e_[1] for e_ in price_vs_demand_signal_counterfactuals],
percent_demand_high,
percent_unexplored_states,
)
end
"""
get_convergence_profit_from_env(env::DDDCEnv, policy::MultiAgentPolicy)
Returns the average profit of the agent, after convergence, over the convergence state or states (in the case of a cycle). Also returns the average profit for the high and low demand states.
"""
function get_convergence_profit_from_hook(hook::AbstractHook)
demand_high = hook[Player(1)][2].demand_state_high_vect
return Dict(
:all => [mean(hook[p][2].rewards[101:end]) for p in [Player(1), Player(2)]],
:high => [
mean(hook[p][2].rewards[101:end][demand_high[101:end]]) for
p in [Player(1), Player(2)]
],
:low => [
mean(hook[p][2].rewards[101:end][.!demand_high[101:end]]) for
p in [Player(1), Player(2)]
],
)
end
"""
extract_sim_results(exp_list::Vector{DDDCSummary})
Extracts the results of a simulation experiment, given a list of DDDCSummary objects, returns a `DataFrame`.
"""
function extract_sim_results(exp_list::Vector{DDDCSummary})
α_result = [ex.α for ex in exp_list if !(ex isa Exception)]
β_result = [ex.β for ex in exp_list if !(ex isa Exception)]
percent_demand_high = [ex.percent_demand_high for ex in exp_list if !(ex isa Exception)]
iterations_until_convergence =
[ex.iterations_until_convergence[1] for ex in exp_list if !(ex isa Exception)]
convergence_profit =
[mean(ex.convergence_profit) for ex in exp_list if !(ex isa Exception)]
convergence_profit_demand_high =
[ex.convergence_profit_demand_high for ex in exp_list if !(ex isa Exception)]
convergence_profit_demand_low =
[ex.convergence_profit_demand_low for ex in exp_list if !(ex isa Exception)]
profit_vect = [ex.convergence_profit for ex in exp_list if !(ex isa Exception)]
profit_max = [maximum(ex.convergence_profit) for ex in exp_list if !(ex isa Exception)]
profit_min = [minimum(ex.convergence_profit) for ex in exp_list if !(ex isa Exception)]
profit_gain = [ex.profit_gain for ex in exp_list if !(ex isa Exception)]
profit_gain_demand_high =
[ex.profit_gain_demand_high for ex in exp_list if !(ex isa Exception)]
profit_gain_demand_low =
[ex.profit_gain_demand_low for ex in exp_list if !(ex isa Exception)]
is_converged = [ex.is_converged for ex in exp_list if !(ex isa Exception)]
weak_signal_quality_level = [
ex.data_demand_digital_params.weak_signal_quality_level for
ex in exp_list if !(ex isa Exception)
]
strong_signal_quality_level = [
ex.data_demand_digital_params.strong_signal_quality_level for
ex in exp_list if !(ex isa Exception)
]
signal_is_strong = [
ex.data_demand_digital_params.signal_is_strong for
ex in exp_list if !(ex isa Exception)
]
frequency_high_demand = [
ex.data_demand_digital_params.frequency_high_demand for
ex in exp_list if !(ex isa Exception)
]
price_response_to_demand_signal_mse =
[ex.price_response_to_demand_signal_mse for ex in exp_list if !(ex isa Exception)]
percent_unexplored_states =
[ex.percent_unexplored_states for ex in exp_list if !(ex isa Exception)]
df = DataFrame(
α = α_result,
β = β_result,
profit_vect = profit_vect,
profit_min = profit_min,
profit_max = profit_max,
profit_gain = profit_gain,
profit_gain_demand_high = profit_gain_demand_high,
profit_gain_demand_low = profit_gain_demand_low,
convergence_profit = convergence_profit,
convergence_profit_demand_high = convergence_profit_demand_high,
convergence_profit_demand_low = convergence_profit_demand_low,
iterations_until_convergence = iterations_until_convergence,
is_converged = is_converged,
weak_signal_quality_level = weak_signal_quality_level,
strong_signal_quality_level = strong_signal_quality_level,
signal_is_strong = signal_is_strong,
frequency_high_demand = frequency_high_demand,
price_response_to_demand_signal_mse = price_response_to_demand_signal_mse,
percent_demand_high = percent_demand_high,
percent_unexplored_states = percent_unexplored_states,
)
return df
end
function extract_price_vs_demand_signal_counterfactuals(env::DDDCEnv, hook::AbstractHook)
price_vs_demand_signal_counterfactuals = [
extract_price_vs_demand_signal_counterfactuals(
hook[player_][1].best_response_vector,
env.state_space_lookup,
env.price_options,
env.n_prices,
) for player_ in [Player(1), Player(2)]
]
return price_vs_demand_signal_counterfactuals
end
function extract_price_vs_demand_signal_counterfactuals(
best_response_vector,
state_space_lookup,
price_options,
n_prices,
)
price_counterfactual_vect = []
for i = 1:n_prices
for j = 1:n_prices
for k = 1:2
# The price that a player would choose if given signal 1 and signal 2 (e.g. high=1 or low=2), conditional on memory (prices and previous signals)
best_response_price_indices =
best_response_vector[state_space_lookup[i, j, :, k]]
if all(best_response_price_indices .> 0)
price_counterfactuals = price_options[best_response_price_indices]
else
price_counterfactuals = [0, 0]
end
push!(price_counterfactual_vect, ((i, j, k), price_counterfactuals...))
end
end
end
price_counterfactual_df = DataFrame(
price_counterfactual_vect,
[:memory_index, :price_given_high_demand_signal, :price_given_low_demand_signal],
)
# NOTE: mse is calculated only over the states explored by the agent for both signal levels
price_mse = @chain price_counterfactual_df begin
@subset((:price_given_high_demand_signal != 0) & (:price_given_low_demand_signal != 0))
@combine(
:price_mse =
mse(:price_given_high_demand_signal, :price_given_low_demand_signal)
)
_[1, :price_mse]
end
return price_mse, price_counterfactual_df
end
"""
profit_gain(π_hat, env::AIAPCEnv)
Returns the profit gain of the agent based on the current policy.
"""
function profit_gain(π_hat, env::DDDCEnv)
π_N, π_M = extract_profit_vars(env)
profit_gain_ =
Dict(i => (mean(π_hat) - π_N[i]) / (π_M[i] - π_N[i]) for i in [:high, :low])
π_N_weighted =
π_N[:high] * env.data_demand_digital_params.frequency_high_demand +
π_N[:low] * (1 - env.data_demand_digital_params.frequency_high_demand)
π_M_weighted =
π_M[:high] * env.data_demand_digital_params.frequency_high_demand +
π_M[:low] * (1 - env.data_demand_digital_params.frequency_high_demand)
profit_gain_weighted = (mean(π_hat) - π_N_weighted) / (π_M_weighted - π_N_weighted)
return Dict(
:high => profit_gain_[:high],
:low => profit_gain_[:low],
:weighted => profit_gain_weighted,
)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 3718 | using DataFrames
using AlgorithmicCompetition:
AIAPCHyperParameters,
AIAPCSummary,
CompetitionParameters,
CompetitionSolution,
run_and_extract,
extract_sim_results,
profit_gain,
AIAPCEnv
using Distributed
using ProgressMeter
using Random
using Chain
using DataFrameMacros
using Statistics
function test_key_AIAPC_points(; n_parameter_iterations = 1000)
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
test_params = DataFrame(
:α => [0.15, 0.08, 0.2, 0.15, 0.01, 0.04, 0.1, 0.25, 0.2],
:β => [0.4, 2, 0.25, 1, 0.1, 0.2, 1.75, 0.1, 1],
:iter_min => [0, 0, 1.5e6, 0.5e6, 0, 1.5e6, 0.2e6, 1e6, 0.5e6],
:iter_max => [1e7, 0.7e6, 1e7, 1.1e6, 1e7, 1e7, 1e6, 1e7, 1.5e6],
:Δ_π_bar_min => [0.75, 0.7, 0.7, 0.75, 0.6, 0.8, 0.5, 0.5, 0.55],
:Δ_π_bar_max => [0.9, 0.8, 0.85, 0.9, 1, 0.95, 0.8, 0.75, 0.9],
)
hyperparameter_vect =
AIAPCHyperParameters.(
test_params[!, :α],
test_params[!, :β],
(0.95,),
(Int(1e9),),
(competition_solution_dict,),
)
exp_list_ = AIAPCSummary[]
exp_list = @showprogress pmap(
run_and_extract,
shuffle(repeat(hyperparameter_vect, n_parameter_iterations));
on_error = identity,
)
append!(exp_list_, exp_list)
df = extract_sim_results(exp_list_)
df_summary = @chain df begin
@subset(all(:is_converged))
@groupby(:α, :β)
@combine(
:Δ_π_bar = profit_gain(:π_bar, AIAPCEnv(hyperparameter_vect[1])),
:iterations_until_convergence = mean(:iterations_until_convergence)
)
leftjoin(test_params, on = [:α, :β])
end
return df_summary, exp_list_
end
@testset "AIAPC Conversion Check" begin
_procs = addprocs(
Sys.CPU_THREADS,
topology = :master_worker,
exeflags = ["--threads=1", "--project=$(Base.active_project())"],
)
@everywhere begin
using Pkg
Pkg.instantiate()
using AlgorithmicCompetition: run_and_extract
end
n_parameter_iterations = 10
exp_df, exp_list =
test_key_AIAPC_points(; n_parameter_iterations = n_parameter_iterations)
rmprocs(_procs)
exp_diagostic = @chain exp_df begin
@transform(
:profit_match = :Δ_π_bar_max > :Δ_π_bar > :Δ_π_bar_min,
:convergence_match = :iter_max > :iterations_until_convergence > :iter_min,
:convergence_status =
:iterations_until_convergence > :iter_max ? "too slow" :
:iterations_until_convergence < :iter_min ? "too fast" : "ok",
:profit_status =
:Δ_π_bar > :Δ_π_bar_max ? "high" : :Δ_π_bar < :Δ_π_bar_min ? "low" : "ok"
)
@select(
:α,
:β,
:iterations_until_convergence =
round(:iterations_until_convergence; digits = 0),
:convergence_status,
:Δ_π_bar,
:profit_status
)
end
exp_diagostic[1:8, :]
if n_parameter_iterations < 100
@test mean(exp_diagostic[!, :profit_status] .== "ok") > 0.7 # Tests are too slow to run more than 10 iterations, but this is noisy, so not all pass
@test mean(exp_diagostic[!, :convergence_status] .== "ok") > 0.7
else
@test all(exp_diagostic[!, :profit_status] .== "ok")
@test all(exp_diagostic[!, :convergence_status] .== "ok")
end
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 3240 | @testset "Test alpha and beta ranges" begin
alpha_range = [
0.0025,
0.005,
0.0075,
0.01,
0.0125,
0.015,
0.0175,
0.02,
0.0225,
0.025,
0.0275,
0.03,
0.0325,
0.035,
0.0375,
0.04,
0.0425,
0.045,
0.0475,
0.05,
0.0525,
0.055,
0.0575,
0.06,
0.0625,
0.065,
0.0675,
0.07,
0.0725,
0.075,
0.0775,
0.08,
0.0825,
0.085,
0.0875,
0.09,
0.0925,
0.095,
0.0975,
0.1,
0.1025,
0.105,
0.1075,
0.11,
0.1125,
0.115,
0.1175,
0.12,
0.1225,
0.125,
0.1275,
0.13,
0.1325,
0.135,
0.1375,
0.14,
0.1425,
0.145,
0.1475,
0.15,
0.1525,
0.155,
0.1575,
0.16,
0.1625,
0.165,
0.1675,
0.17,
0.1725,
0.175,
0.1775,
0.18,
0.1825,
0.185,
0.1875,
0.19,
0.1925,
0.195,
0.1975,
0.2,
0.2025,
0.205,
0.2075,
0.21,
0.2125,
0.215,
0.2175,
0.22,
0.2225,
0.225,
0.2275,
0.23,
0.2325,
0.235,
0.2375,
0.24,
0.2425,
0.245,
0.2475,
0.25,
]
@test Float64.(range(0.0025, 0.25, 100)) == alpha_range
beta_range = [
0.005,
0.01,
0.015,
0.02,
0.025,
0.03,
0.035,
0.04,
0.045,
0.05,
0.055,
0.06,
0.065,
0.07,
0.075,
0.08,
0.085,
0.09,
0.095,
0.1,
0.105,
0.11,
0.115,
0.12,
0.125,
0.13,
0.135,
0.14,
0.145,
0.15,
0.155,
0.16,
0.165,
0.17,
0.175,
0.18,
0.185,
0.19,
0.195,
0.2,
0.205,
0.21,
0.215,
0.22,
0.225,
0.23,
0.235,
0.24,
0.245,
0.25,
0.255,
0.26,
0.265,
0.27,
0.275,
0.28,
0.285,
0.29,
0.295,
0.3,
0.305,
0.31,
0.315,
0.32,
0.325,
0.33,
0.335,
0.34,
0.345,
0.35,
0.355,
0.36,
0.365,
0.37,
0.375,
0.38,
0.385,
0.39,
0.395,
0.4,
0.405,
0.41,
0.415,
0.42,
0.425,
0.43,
0.435,
0.44,
0.445,
0.45,
0.455,
0.46,
0.465,
0.47,
0.475,
0.48,
0.485,
0.49,
0.495,
0.5,
]
@test Float64.(range(0.02, 2, 100)) == beta_range * 4 # weird quadruple counting issue with original paper
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 964 | @testset "Competitive Equilibrium: Monopoly" begin
params = CompetitionParameters(0.25, 0, (2, 2), (1, 1))
model_monop, p_monop = solve_monopolist(params)
# symmetric solution found
@test value(p_monop[1]) ≈ value(p_monop[2])
# Match AIAPC 2020 parameterization
@test value(p_monop[1]) ≈ 1.92498 atol = 0.0001
p_monop_opt = value(p_monop[2])
end
@testset "Competitive Equilibrium: Bertrand" begin
params = CompetitionParameters(0.25, 0, (2, 2), (1, 1))
p_Bertrand_ = value.(solve_bertrand(params)[2])
p_Bertrand = p_Bertrand_[1]
# Parameter recovery
@test p_Bertrand_[2] ≈ 1.47293 atol = 1e-3
# Best response function matches AIAPC 2020
@test p_BR(1.47293, params) ≈ 1.47293 atol = 1e6
end
@testset "CompetitionParameters" begin
@test CompetitionParameters(1, 1, (1.0, 1.0), (1.0, 1.0)) isa CompetitionParameters
@test_throws DimensionMismatch CompetitionParameters(1, 1, (1.0, 1), (1.0))
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 611 | using ReinforcementLearningFarm: EpsilonSpeedyExplorer
using ReinforcementLearningFarm
@testset "EpsilonGreedy" begin
# This yields a different result (same result, but at 2x step count) than in the paper for 100k steps, but the same convergece duration at α and β midpoints 850k (pg. 13)
explorer = EpsilonSpeedyExplorer(Float64(1e-5))
explorer.step[] = Int(1e5)
@test RLFarm.get_ϵ(explorer) ≈ 0.36787944117144233 # Percentage according to formula and paper convergence results
@test_broken RLFarm.get_ϵ(explorer) ≈ 0.1353352832366127 # Percentage cited in AIAPC paper (2x step count)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 4767 | using ReinforcementLearningFarm: TotalRewardPerLastNEpisodes
using ReinforcementLearning
@testset "TotalRewardPerLastNEpisodes" begin
@testset "Single Agent" begin
hook = TotalRewardPerLastNEpisodes(max_episodes = 10)
env = TicTacToeEnv()
agent = RandomPolicy()
for i = 1:15
push!(hook, PreEpisodeStage(), agent, env)
push!(hook, PostActStage(), agent, env)
@test length(hook.rewards) == min(i, 10)
@test hook.rewards[min(i, 10)] == reward(env)
end
end
@testset "MultiAgent" begin
hook = TotalRewardPerLastNEpisodes(max_episodes = 10)
env = TicTacToeEnv()
agent = RandomPolicy()
for i = 1:15
push!(hook, PreEpisodeStage(), agent, env, Player(:Cross))
push!(hook, PostActStage(), agent, env, Player(:Cross))
@test length(hook.rewards) == min(i, 10)
@test hook.rewards[min(i, 10)] == reward(env, Player(:Cross))
end
end
end
@testset "Convergence Check Hook" begin
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
env =
AIAPCHyperParameters(
Float64(0.1),
Float64(1e-4),
0.95,
Int(1e7),
competition_solution_dict,
) |> AIAPCEnv
exper = Experiment(env; debug = true)
state(env, Player(1))
policies = AIAPCPolicy(env, mode = "zero")
push!(exper.hook[Player(1)][1], Int64(2), Int64(3), false)
@test exper.hook[Player(1)][1].best_response_vector[2] == 3
policies[Player(1)].policy.learner.approximator.model[11, :] .= 10
push!(
exper.hook[Player(1)][1],
PostActStage(),
policies[Player(1)],
exper.env,
Player(1),
)
@test exper.hook[Player(1)][1].best_response_vector[state(env, Player(1))] == 11
end
@testset "ConvergenceCheck" begin
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
env =
AIAPCHyperParameters(
Float64(0.1),
Float64(2e-5),
0.95,
Int(1e7),
competition_solution_dict,
) |> AIAPCEnv
policies = env |> AIAPCPolicy
convergence_hook = ConvergenceCheck(env.n_state_space, 1)
push!(convergence_hook, PostActStage(), policies[Player(1)], env, Player(1))
@test convergence_hook.convergence_duration == 0
@test convergence_hook.iterations_until_convergence == 1
@test convergence_hook.best_response_vector[state(env, Player(1))] != 0
@test convergence_hook.is_converged != true
convergence_hook_1 = ConvergenceCheck(env.n_state_space, 1)
convergence_hook_1.best_response_vector = Vector{Int}(fill(8, 225))
push!(convergence_hook_1, PostActStage(), policies[Player(1)], env, Player(1))
@test convergence_hook.iterations_until_convergence == 1
@test convergence_hook.convergence_duration ∈ [0, 1]
# @test convergence_hook_1.is_converged == true
end
@testset "DDDCTotalRewardPerLastNEpisodes" begin
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6)
price_index = 1:n_prices
competition_params_dict = Dict(
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
data_demand_digital_params = DataDemandDigitalParams(
weak_signal_quality_level = 1,
strong_signal_quality_level = 1,
signal_is_strong = [false, false],
frequency_high_demand = 0.9,
)
env =
DDDCHyperParameters(
Float64(0.1),
Float64(2e-5),
0.95,
Int(1e7),
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = Int(1e5),
) |> DDDCEnv
policies = env |> DDDCPolicy
reward_hook = DDDCTotalRewardPerLastNEpisodes(; max_steps = 100)
for i = 1:2
push!(reward_hook, PostActStage(), policies[Player(1)], env, Player(1))
end
@test reward_hook.rewards[2] isa Float64
@test reward_hook.demand_state_high_vect[2] ∈ [true, false]
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 23764 | using CircularArrayBuffers: capacity
@testset "Prepackaged Environment Tests" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = 10000
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 1,
)
RLBase.test_interfaces!(AIAPCEnv(hyperparameters))
# Until state handling is fixed for multi-agent simultaneous environments, we can't test this
# RLBase.test_runnable!(AIAPCEnv(hyperparameters))
end
@testset "Profit gain DDDC" begin
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6) # 1e8
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
data_demand_digital_params = DataDemandDigitalParams(
weak_signal_quality_level = 0.99,
strong_signal_quality_level = 0.995,
signal_is_strong = [true, false],
frequency_high_demand = 0.9,
)
hyperparams = DDDCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = Int(1e5),
)
env = DDDCEnv(hyperparams)
# TODO: Until state handling is fixed for multi-agent simultaneous environments, we can't test this
# RLBase.test_interfaces!(env)
# RLBase.test_runnable!(AIAPCEnv(hyperparameters))
for demand in [:high, :low]
@test profit_gain(
π(
env.p_monop_opt[demand],
env.p_monop_opt[demand],
env.competition_params_dict[demand],
)[1],
env,
)[demand] == 1
@test profit_gain(
π(
env.p_Bert_nash_equilibrium[demand],
env.p_Bert_nash_equilibrium[demand],
env.competition_params_dict[demand],
)[1],
env,
)[demand] == 0
end
end
@testset "Profit gain check AIAPC" begin
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
env =
AIAPCHyperParameters(
Float64(0.1),
Float64(1e-4),
0.95,
Int(1e7),
competition_solution_dict,
) |> AIAPCEnv
env.memory[1] = CartesianIndex(Int64(1), Int64(1))
exper = Experiment(env; debug = true)
# Find the Nash equilibrium profit
params = env.competition_params_dict[:high]
p_Bert_nash_equilibrium = exper.env.p_Bert_nash_equilibrium
π_min_price =
π(minimum(exper.env.price_options), minimum(exper.env.price_options), params)[1]
π_nash = π(p_Bert_nash_equilibrium, p_Bert_nash_equilibrium, params)[1]
@test π_nash > π_min_price
for i = 1:capacity(exper.hook[Player(1)].hooks[2].rewards)
push!(exper.hook[Player(1)].hooks[2].rewards, π_nash)
push!(exper.hook[Player(2)].hooks[2].rewards, 0)
end
ec_summary_ = economic_summary(exper)
# TODO: Convergence profit needs to be tested with a properly configured tabular approximator...
# @test round(profit_gain(ec_summary_.convergence_profit[1], env); digits = 2) == 0
# @test round(profit_gain(ec_summary_.convergence_profit[2], env); digits = 2) == 1.07
p_monop_opt = exper.env.p_monop_opt
π_monop = π(p_monop_opt, p_monop_opt, params)[1]
π_max_price =
π(maximum(exper.env.price_options), maximum(exper.env.price_options), params)[1]
@test π_max_price < π_monop
for i = 1:capacity(exper.hook[Player(1)].hooks[2].rewards)
push!(exper.hook[Player(1)].hooks[2].rewards, π_monop)
push!(exper.hook[Player(2)].hooks[2].rewards, 0)
end
ec_summary_ = economic_summary(exper)
@test 1 > round(profit_gain(ec_summary_.convergence_profit[1], env); digits = 2) > 0
end
@testset "Sequential environment" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = 1000
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 1,
)
env = AIAPCEnv(hyperparameters)
@test current_player(env) == RLBase.SimultaneousPlayer()
@test action_space(env, Player(1)) == Int64.(1:15)
@test reward(env) != 0 # reward reflects outcomes of last play (which happens at player = 1, e.g. before any actions chosen)
act!(env, CartesianIndex(Int64(5), Int64(5)))
@test reward(env) != [0, 0] # reward is zero as at least one player has already played (technically sequental plays)
end
@testset "run AIAPC multiprocessing code" begin
n_procs_ = 1
_procs = addprocs(
Sys.CPU_THREADS,
topology = :master_worker,
exeflags = ["--threads=1", "--project=$(Base.active_project())"],
)
@everywhere begin
using Pkg
Pkg.instantiate()
using AlgorithmicCompetition
end
AlgorithmicCompetition.run_aiapc(;
n_parameter_iterations = 1,
max_iter = Int(100),
convergence_threshold = Int(10),
)
rmprocs(_procs)
end
@testset "run full AIAPC simulation (with full convergence threshold)" begin
α = Float64(0.075)
β = Float64(0.25)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e9)
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(α, β, δ, max_iter, competition_solution_dict)
profit_gain_max = 0
i = 0
while (profit_gain_max <= 0.82) && (i < 10)
i += 1
c_out = run(hyperparameters; stop_on_convergence = true)
profit_gain_max =
maximum(profit_gain(economic_summary(c_out).convergence_profit, c_out.env))
end
@test profit_gain_max > 0.82
end
@testset "run full DDDC simulation low-low" begin
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6)
price_index = 1:n_prices
competition_params_dict = Dict(
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
data_demand_digital_params = DataDemandDigitalParams(
weak_signal_quality_level = 1,
strong_signal_quality_level = 1,
signal_is_strong = [false, false],
frequency_high_demand = 0.9,
)
hyperparams = DDDCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = Int(1e5),
)
e_out = run(hyperparams; stop_on_convergence = true)
e_sum = economic_summary(e_out)
@test e_out.hook[Player(1)][2].demand_state_high_vect[end] ==
(e_out.env.memory.demand_state == :high)
player_ = Player(1)
demand_state_high_vect = [e_out.hook[player_][2].demand_state_high_vect...]
rewards = [e_out.hook[player_][2].rewards...]
@test mean(rewards[demand_state_high_vect]) ≈ e_sum.convergence_profit_demand_high[1] atol =
1e-2
@test mean(rewards[.!demand_state_high_vect]) ≈ e_sum.convergence_profit_demand_low[1] atol =
1e-2
@test mean(e_out.env.profit_array[:, :, :, 1]) >
mean(e_out.env.profit_array[:, :, :, 2])
@test 0.85 < e_sum.percent_demand_high < 0.95
@test all(e_sum.convergence_profit_demand_high > e_sum.convergence_profit_demand_low)
@test all(1 .> e_sum.profit_gain .> 0)
@test all(1 .> e_sum.profit_gain_demand_low .> 0)
@test all(1 .> e_sum.profit_gain_demand_high .> 0)
@test extract_profit_vars(e_out.env) == (
Dict(:high => 0.2386460385715974, :low => 0.19331233681405383),
Dict(:high => 0.4317126027908472, :low => 0.25),
)
@test extract_profit_vars(e_out.env) == (
Dict(:high => 0.2386460385715974, :low => 0.19331233681405383),
Dict(:high => 0.4317126027908472, :low => 0.25),
)
@test extract_quantity_vars(e_out.env)[1][:high] >
extract_quantity_vars(e_out.env)[1][:low]
@test extract_quantity_vars(e_out.env)[2][:high] >
extract_quantity_vars(e_out.env)[2][:low]
@test all(e_sum.price_response_to_demand_signal_mse .> 0)
end
@testset "run full DDDC simulation high-low" begin
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6)
price_index = 1:n_prices
competition_params_dict = Dict(
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
data_demand_digital_params = DataDemandDigitalParams(
weak_signal_quality_level = 1,
strong_signal_quality_level = 1,
signal_is_strong = [true, false],
frequency_high_demand = 0.5,
)
hyperparams = DDDCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = Int(1e5),
)
e_out = run(hyperparams; stop_on_convergence = true)
e_sum = economic_summary(e_out)
for player_ in [1, 2]
@test e_out.hook[Player(player_)][2].demand_state_high_vect[end] ==
(e_out.env.memory.demand_state == :high)
demand_state_high_vect = [e_out.hook[Player(player_)][2].demand_state_high_vect...]
rewards = [e_out.hook[Player(player_)][2].rewards...]
@test mean(rewards[demand_state_high_vect]) ≈
e_sum.convergence_profit_demand_high[player_] atol = 1e-2
@test mean(rewards[.!demand_state_high_vect]) ≈
e_sum.convergence_profit_demand_low[player_] atol = 1e-2
@test mean(e_out.hook[Player(player_)][1].best_response_vector .== 0) < 0.05
end
@test mean(e_out.env.profit_array[:, :, :, 1]) >
mean(e_out.env.profit_array[:, :, :, 2])
@test 0.45 < e_sum.percent_demand_high < 0.55
@test all(e_sum.convergence_profit_demand_high > e_sum.convergence_profit_demand_low)
@test any(1 .> e_sum.profit_gain .> 0)
@test any(1 .> e_sum.profit_gain_demand_low .> 0)
@test extract_profit_vars(e_out.env) == (
Dict(:high => 0.2386460385715974, :low => 0.19331233681405383),
Dict(:high => 0.4317126027908472, :low => 0.25),
)
@test extract_profit_vars(e_out.env) == (
Dict(:high => 0.2386460385715974, :low => 0.19331233681405383),
Dict(:high => 0.4317126027908472, :low => 0.25),
)
@test extract_quantity_vars(e_out.env)[1][:high] >
extract_quantity_vars(e_out.env)[1][:low]
@test extract_quantity_vars(e_out.env)[2][:high] >
extract_quantity_vars(e_out.env)[2][:low]
@test all(e_sum.price_response_to_demand_signal_mse .> 0)
end
@testset "run full DDDC simulation high-high" begin
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6)
price_index = 1:n_prices
competition_params_dict = Dict(
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
data_demand_digital_params = DataDemandDigitalParams(
weak_signal_quality_level = 1,
strong_signal_quality_level = 1,
signal_is_strong = [true, true],
frequency_high_demand = 0.5,
)
hyperparams = DDDCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = Int(1e5),
)
e_out = run(hyperparams; stop_on_convergence = true)
e_sum = economic_summary(e_out)
@test e_out.hook[Player(1)][2].demand_state_high_vect[end] ==
(e_out.env.memory.demand_state == :high)
demand_state_high_vect = [e_out.hook[Player(1)][2].demand_state_high_vect...]
rewards = [e_out.hook[Player(1)][2].rewards...]
@test mean(rewards[demand_state_high_vect]) ≈ e_sum.convergence_profit_demand_high[1] atol =
1e-2
@test mean(rewards[.!demand_state_high_vect]) ≈ e_sum.convergence_profit_demand_low[1] atol =
1e-2
@test mean(e_out.env.profit_array[:, :, :, 1]) >
mean(e_out.env.profit_array[:, :, :, 2])
@test 0.45 < e_sum.percent_demand_high < 0.55
@test all(e_sum.convergence_profit_demand_high > e_sum.convergence_profit_demand_low)
@test all(1 .> e_sum.profit_gain .> 0)
@test all(1 .> e_sum.profit_gain_demand_high .> 0)
@test extract_profit_vars(e_out.env) == (
Dict(:high => 0.2386460385715974, :low => 0.19331233681405383),
Dict(:high => 0.4317126027908472, :low => 0.25),
)
@test extract_profit_vars(e_out.env) == (
Dict(:high => 0.2386460385715974, :low => 0.19331233681405383),
Dict(:high => 0.4317126027908472, :low => 0.25),
)
@test extract_quantity_vars(e_out.env)[1][:high] >
extract_quantity_vars(e_out.env)[1][:low]
@test extract_quantity_vars(e_out.env)[2][:high] >
extract_quantity_vars(e_out.env)[2][:low]
end
@testset "run full AIAPC simulation" begin
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6)
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 10000,
)
c_out = run(hyperparameters; stop_on_convergence = true)
@test minimum(c_out.policy[Player(1)].policy.learner.approximator.model) < 6
@test maximum(c_out.policy[Player(1)].policy.learner.approximator.model) > 5.5
# ensure that the policy is updated by the learner
@test sum(c_out.policy[Player(1)].policy.learner.approximator.model; dims = 2) != 0
state_sum = sum(c_out.policy[Player(1)].policy.learner.approximator.model; dims = 1)
@test !all(y -> y == state_sum[1], state_sum)
@test length(reward(c_out.env)) == 2
@test length(reward(c_out.env, 1)) == 1
c_out.env.is_done[1] = false
@test reward(c_out.env) == (0, 0)
@test reward(c_out.env, 1) != 0
@test sum(c_out.hook[Player(1)][1].best_response_vector == 0) == 0
@test c_out.hook[Player(1)][1].best_response_vector !=
c_out.hook[Player(2)][1].best_response_vector
end
@testset "Run a set of AIAPC experiments." begin
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
n_parameter_increments = 3
α_ = Float64.(range(0.0025, 0.25, n_parameter_increments))
β_ = Float64.(range(0.025, 2, n_parameter_increments))
δ = 0.95
max_iter = Int(1e8)
hyperparameter_vect = [
AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 10,
) for α in α_ for β in β_
]
experiments = @chain hyperparameter_vect run_and_extract.(stop_on_convergence = true)
@test experiments[1] isa AIAPCSummary
@test all(10 < experiments[1].iterations_until_convergence[i] < max_iter for i = 1:2)
@test (
sum(experiments[1].convergence_profit .> 1) +
sum(experiments[1].convergence_profit .< 0)
) == 0
@test experiments[1].convergence_profit[1] != experiments[1].convergence_profit[2]
@test all(experiments[1].is_converged)
end
@testset "AIAPC Hyperparameter Set" begin
α_vect = Float64.(range(0.0025, 0.25, 3))
β_vect = Float64.(range(0.025, 2, 3))
max_iter = Int(1e8)
convergence_threshold = 10
δ = 0.95
n_parameter_iterations = 2
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameter_vect = build_hyperparameter_set(
α_vect,
β_vect,
δ,
max_iter,
competition_solution_dict,
convergence_threshold,
n_parameter_iterations,
)
df_1 = DataFrame([
(hyperparameter.α, hyperparameter.β, 1) for hyperparameter in hyperparameter_vect
])
rename!(df_1, [:α, :β, :count])
df_ = @chain df_1 @groupby(:α, :β) @combine(length(:count))
@test all(df_[!, :count_length] .== 2)
end
@testset "Parameter / learning checks" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = 10000
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 1,
)
c_out = run(hyperparameters; stop_on_convergence = false, debug = true)
# ensure that the policy is updated by the learner
@test sum(c_out.policy[Player(1)].policy.learner.approximator.model .!= 0) != 0
@test sum(c_out.policy[Player(2)].policy.learner.approximator.model .!= 0) != 0
@test c_out.env.is_done[1]
@test c_out.hook[Player(1)][1].iterations_until_convergence == max_iter
@test c_out.hook[Player(2)][1].iterations_until_convergence == max_iter
@test c_out.policy[Player(1)].trajectory.container[:reward][1] .!= 0
@test c_out.policy[Player(2)].trajectory.container[:reward][1] .!= 0
@test c_out.policy[Player(1)].policy.learner.approximator.model !=
c_out.policy[Player(2)].policy.learner.approximator.model
@test c_out.hook[Player(1)][1].best_response_vector !=
c_out.hook[Player(2)][1].best_response_vector
@test mean(
c_out.hook[Player(1)][2].rewards[(end-2):end] .!=
c_out.hook[Player(2)][2].rewards[(end-2):end],
) >= 0.3
for i in [Player(1), Player(2)]
@test c_out.hook[i][1].convergence_duration >= 0
@test c_out.hook[i][1].is_converged
@test c_out.hook[i][1].convergence_threshold == 1
@test sum(c_out.hook[i][2].rewards .== 0) == 0
end
@test reward(c_out.env, 1) != 0
@test reward(c_out.env, 2) != 0
@test length(reward(c_out.env)) == 2
@test length(c_out.env.action_space) == 225
@test length(reward(c_out.env)) == 2
end
@testset "No stop on Convergence stop works" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6)
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 10,
)
c_out = run(hyperparameters; stop_on_convergence = false)
@test RLFarm.get_ϵ(c_out.policy[Player(1)].policy.explorer) < 1e-4
@test RLFarm.get_ϵ(c_out.policy[Player(2)].policy.explorer) < 1e-4
end
@testset "Convergence stop works" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e7)
price_index = 1:n_prices
policy = RandomPolicy()
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 5,
)
c_out = run(hyperparameters; stop_on_convergence = true)
@test 0.98 < RLFarm.get_ϵ(c_out.policy[Player(1)].policy.explorer) < 1
@test 0.98 < RLFarm.get_ϵ(c_out.policy[Player(2)].policy.explorer) < 1
@test RLCore.check!(c_out.stop_condition, policy, c_out.env) == true
@test RLCore.check!(c_out.stop_condition.stop_conditions[1], policy, c_out.env) == false
@test RLCore.check!(c_out.stop_condition.stop_conditions[2], policy, c_out.env) == true
@test c_out.hook[Player(1)][1].convergence_duration >= 5
@test c_out.hook[Player(2)][1].convergence_duration >= 5
@test (c_out.hook[Player(2)][1].convergence_duration == 5) ||
(c_out.hook[Player(1)][1].convergence_duration == 5)
end
@testset "run DDDC multiprocessing code" begin
_procs = addprocs(
Sys.CPU_THREADS,
topology = :master_worker,
exeflags = ["--threads=1", "--project=$(Base.active_project())"],
)
@everywhere begin
using Pkg
Pkg.instantiate()
using AlgorithmicCompetition
end
AlgorithmicCompetition.run_dddc(
n_parameter_iterations = 1,
max_iter = Int(2e5),
convergence_threshold = Int(1e5),
n_grid_increments = 3,
)
rmprocs(_procs)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 3756 | @testset "Policy operation test" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = 1000
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 1,
)
env = AIAPCEnv(hyperparameters)
policy = AIAPCPolicy(env)
# Test full policy exploration of states
push!(policy, PreActStage(), env)
n_ = Int(1e5)
policy_runs = [[Tuple(plan!(policy, env))...] for i = 1:n_]
checksum_ = [sum(unique(policy_runs[j][i] for j = 1:n_)) for i = 1:2]
@test all(checksum_ .== sum(1:env.n_prices))
end
@testset "policy push! and optimise! test" begin
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
env =
AIAPCHyperParameters(
Float64(0.1),
Float64(1e-4),
0.95,
Int(1e7),
competition_solution_dict,
) |> AIAPCEnv
policy = AIAPCPolicy(env)
@test maximum(policy[Player(1)].policy.learner.approximator.model) ≈ 6.278004857861001
@test minimum(policy[Player(1)].policy.learner.approximator.model) ≈ 4.111178690372623
approx_table = copy(policy.agents[Player(1)].policy.learner.approximator.model)
# First three rounds
# t=1
push!(policy, PreEpisodeStage(), env)
push!(policy, PreActStage(), env)
@test length(policy.agents[Player(1)].trajectory.container) == 0
optimise!(policy, PreActStage())
approx_table_t_1 = copy(policy.agents[Player(1)].policy.learner.approximator.model)
@test approx_table_t_1 == approx_table # test that optimise! in t=1 is a noop
actions = RLBase.plan!(policy, env)
act!(env, actions)
@test length(policy.agents[Player(1)].trajectory.container) == 0 # test that trajectory has not been filled
push!(policy, PostActStage(), env, actions)
@test length(policy.agents[Player(1)].trajectory.container) == 1
optimise!(policy, PostActStage())
push!(policy, PostEpisodeStage(), env)
# t=2
push!(policy, PreEpisodeStage(), env)
push!(policy, PreActStage(), env)
@test length(policy.agents[Player(1)].trajectory.container) == 1
optimise!(policy, PreActStage())
approx_table_t_2 = copy(policy.agents[Player(1)].policy.learner.approximator.model)
@test approx_table_t_2 != approx_table_t_1 # test that optimise! in t=2 is not a noop
action = RLBase.plan!(policy, env)
act!(env, action)
push!(policy, PostActStage(), env, actions)
optimise!(policy, PostActStage())
push!(policy, PostEpisodeStage(), env)
# t=3
push!(policy, PreEpisodeStage(), env)
push!(policy, PreActStage(), env)
@test length(policy.agents[Player(1)].trajectory.container) == 1
optimise!(policy, PreActStage())
approx_table_t_3 = copy(policy.agents[Player(1)].policy.learner.approximator.model)
@test approx_table_t_2 != approx_table_t_3 # test that optimise! in t=2 is not a noop
action = RLBase.plan!(policy, env)
act!(env, action)
push!(policy, PostActStage(), env, actions)
optimise!(policy, PostActStage())
push!(policy, PostEpisodeStage(), env)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 895 | @testset "Profit array test" begin
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
params = AIAPCHyperParameters(
Float64(0.1),
Float64(1e-4),
0.95,
Int(1e7),
competition_solution_dict,
)
env = params |> AIAPCEnv
exper = Experiment(env)
price_options = env.price_options
action_space_ = env.action_space
profit_array = construct_profit_array(
price_options,
competition_solution_dict[:high].params,
2;
false,
:high,
)
profit_array[5, 3, :] ≈
π(price_options[5], price_options[3], competition_solution_dict[:high].params)
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 4319 | @testset "Q-Learning" begin
n_prices = 15
n_state_space = 15^2
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = 1000
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 1,
)
env = AIAPCEnv(hyperparameters)
app = TabularApproximator(InitMatrix(env; mode = "zero"))
@test RLCore.Q(app, 1, 1) == 0
@test RLCore.Q(app, 1) == zeros(n_prices)
@test 0.0625 == RLCore.bellman_update!(app, 1, 1, 1, 0.5, δ, α)
end
@testset "Q_i_0 AIAPC" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = 1000
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 1,
)
env = AIAPCEnv(hyperparameters)
test_prices = Q_i_0(env)
@test minimum(test_prices) ≈ 4.111178690372623 atol = 0.001
@test maximum(test_prices) ≈ 6.278004857861001 atol = 0.001
end
@testset "Q_i_0 DDDC" begin
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6) # 1e8
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
data_demand_digital_params = DataDemandDigitalParams(
weak_signal_quality_level = 0.5,
strong_signal_quality_level = 0.5,
signal_is_strong = [true, false],
frequency_high_demand = 0.5,
)
hyperparams = DDDCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = Int(1e5),
)
env = DDDCEnv(hyperparams)
@test minimum(Q_i_0(env)) == 0.2003206598478015
@test maximum(Q_i_0(env)) == 0.3694013307458184
end
@testset "Q" begin
params = CompetitionParameters(0.25, 0, (2, 2), (1, 1))
p_ = [1, 1]
logit_demand = exp.((params.a .- p_) ./ params.μ)
q_logit_demand =
logit_demand /
(sum(exp.((params.a .- p_) ./ params.μ)) + exp(params.a_0 / params.μ))
Q(p_[1], p_[2], params) == q_logit_demand
@test Q(1.47293, 1.47293, CompetitionParameters(0.25, 0, (2, 2), (1, 1))) ≈
fill(0.47138, 2) atol = 0.01
@test Q(1.92498, 1.92498, CompetitionParameters(0.25, 0, (2, 2), (1, 1))) ≈
fill(0.36486, 2) atol = 0.01
end
@testset "simple InitMatrix test" begin
α = Float64(0.125)
β = Float64(1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = 1000
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparameters = AIAPCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict;
convergence_threshold = 1,
)
env = AIAPCEnv(hyperparameters)
a = InitMatrix(env; mode = "baseline")
@test mean(a) ≈ 5.598115514452509
@test a[1, 1] ≈ 5.7897603960172805
@test a[1, 10] ≈ 5.7897603960172805
@test a[5, 10] ≈ 6.278004857861001
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 2048 | using JuMP
using Chain
using DataFrames
using DataFrameMacros
using Test
using ReinforcementLearning:
RLCore,
PostActStage,
PreActStage,
PostEpisodeStage,
PreEpisodeStage,
state,
reward,
current_player,
action_space,
EpsilonGreedyExplorer,
RandomPolicy,
MultiAgentPolicy,
optimise!,
RLBase,
AbstractPolicy,
act!,
plan!
import ReinforcementLearning: RLCore
using Statistics
using AlgorithmicCompetition:
AIAPCEnv,
AIAPCHyperParameters,
AIAPCPolicy,
AIAPCSummary,
AlgorithmicCompetition,
build_hyperparameter_set,
CompetitionParameters,
CompetitionParameters,
CompetitionSolution,
construct_AIAPC_action_space,
construct_AIAPC_profit_array,
construct_AIAPC_state_space_lookup,
construct_DDDC_action_space,
construct_DDDC_profit_array,
construct_DDDC_state_space_lookup,
ConvergenceCheck,
DataDemandDigitalParams,
DDDCEnv,
DDDCHyperParameters,
DDDCTotalRewardPerLastNEpisodes,
DDDCPolicy,
economic_summary,
Experiment,
extract_profit_vars,
extract_quantity_vars,
get_demand_level,
get_demand_signals,
initialize_price_memory,
InitMatrix,
p_BR,
post_prob_high_low_given_signal,
profit_gain,
Q_i_0,
Q,
reward,
run_and_extract,
run,
solve_bertrand,
solve_monopolist,
TDLearner,
π
using Distributed
@testset "AlgorithmicCompetition.jl" begin
@testset "Paramter tests" begin
include("alpha_beta.jl")
include("stochastic_demand_stochastic_information.jl")
include("competitive_equilibrium.jl")
end
@testset "RL.jl structs" begin
include("hooks.jl")
include("explorer.jl")
include("tabular_approximator.jl")
include("q_learning.jl")
include("policy.jl")
end
@testset verbose = true "Integration tests" begin
include("integration.jl")
end
@testset "Output tests" begin
include("aiapc_conversion_check.jl")
end
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 1287 | @testset "get_demand_signals" begin
@test get_demand_signals(true, [true, false], 0.0, 1.0) == [1, 0]
@test get_demand_signals(true, [true, false], 1.0, 0.0) == [0, 1]
@test get_demand_signals(false, [true, false], 0.0, 1.0) == [0, 1]
@test get_demand_signals(false, [true, false], 1.0, 0.0) == [1, 0]
@test all(
4500 .<
sum(get_demand_signals(false, [true, false], 0.5, 0.5) for i = 1:10000) .<
5500,
)
@test get_demand_signals(false, [true, false], 1.0, 1.0) == [0, 0]
@test get_demand_signals(false, [true, true], 0.5, 1.0) == [0, 0]
@test get_demand_signals(true, [true, true], 0.5, 1.0) == [1, 1]
end
@testset "get_demand_level" begin
@test get_demand_level(1.0) == true
@test get_demand_level(0.0) == false
end
@testset "construct_action_space" begin
@test length(construct_AIAPC_action_space(1:15)) == 225
@test length(construct_DDDC_action_space(1:15)) == 900
end
@testset "initialize_price_memory" begin
@test length(initialize_price_memory(1:15, 2)) == 2
end
@testset "post_prob_high_low_given_signal" begin
@test post_prob_high_low_given_signal(0, 1)[2] == 1
@test post_prob_high_low_given_signal(1, 0)[2] == 0.0
@test post_prob_high_low_given_signal(0.5, 0.5) == [0.5, 0.5]
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 808 | using Test
using AlgorithmicCompetition:
TabularApproximator, TabularVApproximator, TabularQApproximator, TDLearner, QBasedPolicy
import ReinforcementLearning: RLBase
using ReinforcementLearning
@testset "Constructors" begin
@test TabularApproximator(fill(1, 10, 10)) isa TabularApproximator
@test TabularVApproximator(n_state = 10) isa TabularApproximator{Vector{Float64}}
@test TabularQApproximator(n_state = 10, n_action = 10) isa
TabularApproximator{Matrix{Float64}}
end
@testset "RLCore.forward" begin
v_approx = TabularVApproximator(n_state = 10)
@test RLCore.forward(v_approx, 1) == 0.0
q_approx = TabularQApproximator(n_state = 5, n_action = 10)
@test RLCore.forward(q_approx, 1) == zeros(Float64, 10)
@test RLCore.forward(q_approx, 1, 5) == 0.0
end
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 2236 | using DrWatson
using CairoMakie
using Chain
using DataFrameMacros
using AlgebraOfGraphics
using AlgorithmicCompetition:
AIAPCHyperParameters,
AIAPCEnv,
CompetitionParameters,
CompetitionSolution,
profit_gain,
draw_price_diagnostic
using CSV
using DataFrames
using Statistics
job_id = "7799305"
csv_files = filter!(x -> occursin(Regex("data/SLURM_ARRAY_JOB_ID=$(job_id).*.csv"), x), readdir("data", join = true))
df_ = DataFrame.(CSV.File.(csv_files))
for i in 1:length(df_)
df_[i][!, "metadata"] .= csv_files[i]
end
df = vcat(df_...)
# df[!, "metadata_dict"] = parse_savename.(df[!, "metadata"])
n_simulations_aiapc =
@chain df @groupby(:α, :β) @combine(:n_simulations = length(:π_bar)) _[
1,
:n_simulations,
]
mkpath("plots/aiapc")
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
hyperparams = AIAPCHyperParameters(
Float64(0.1),
Float64(1e-4),
0.95,
Int(1e7),
competition_solution_dict,
)
env = AIAPCEnv(hyperparams)
df_summary = @chain df begin
@groupby(:α, :β)
@combine(
:Δ_π_bar = profit_gain(mean(:π_bar), env),
:iterations_until_convergence = mean(:iterations_until_convergence)
)
end
plt0 = draw_price_diagnostic(hyperparams)
fig_0 = draw(
plt0,
axis = (
title = "Profit Levels across Price Options",
subtitle = "(Solid line is profit for symmetric prices, shaded region shows range based on price options)",
xlabel = "Competitor's Price Choice",
),
)
save("plots/aiapc/fig_0.svg", fig_0)
plt1 = @chain df_summary begin
@transform(:Δ_π_bar = round(:Δ_π_bar * 60; digits = 0) / 60) # Create bins
data(_) * mapping(:β, :α, :Δ_π_bar => "Average Profit Gain") * visual(Heatmap)
end
fig_1 = draw(plt1)
save("plots/aiapc/fig_1.svg", fig_1)
plt2 = @chain df_summary begin
data(_) *
mapping(:β, :α, :iterations_until_convergence => "Iterations until convergence") *
visual(Heatmap)
end
fig_2 = draw(plt2)
save("plots/aiapc/fig_2.svg", fig_2)
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 28946 | using CairoMakie
using Chain
using DataFrameMacros
using AlgebraOfGraphics
using CSV
using DataFrames
using Statistics
using Test
using AlgorithmicCompetition:
post_prob_high_low_given_signal,
post_prob_high_low_given_both_signals,
draw_price_diagnostic,
CompetitionParameters,
CompetitionSolution,
DataDemandDigitalParams,
DDDCHyperParameters,
draw_price_diagnostic
folder_name = "data/dddc_v0.0.6_data"
mkpath("plots/dddc")
df_ = DataFrame.(CSV.File.(readdir(folder_name, join = true)))
df_ = vcat(df_...)
n_simulations_dddc = @chain df_ @subset(
(:weak_signal_quality_level == 1) &
(:frequency_high_demand == 1) &
(:signal_is_strong == "Bool[0, 0]")
) nrow()
@test (132 * n_simulations_dddc) == nrow(df_)
df__ = @chain df_ begin
@transform(
:price_response_to_demand_signal_mse =
eval(Meta.parse(:price_response_to_demand_signal_mse)),
:convergence_profit_demand_high_vect =
eval(Meta.parse(:convergence_profit_demand_high)),
:convergence_profit_demand_low_vect =
eval(Meta.parse(:convergence_profit_demand_low)),
:profit_vect = eval(Meta.parse(:profit_vect)),
:profit_gain = eval(Meta.parse(:profit_gain)),
:profit_gain_demand_high = eval(Meta.parse(:profit_gain_demand_high)),
:profit_gain_demand_low = eval(Meta.parse(:profit_gain_demand_low)),
:signal_is_strong_vect = eval(Meta.parse(:signal_is_strong)),
:percent_unexplored_states_vect = eval(Meta.parse(:percent_unexplored_states)),
)
@transform!(
@subset((:frequency_high_demand == 1) & (:weak_signal_quality_level == 1)),
:price_response_to_demand_signal_mse = missing
)
end
df___ = @chain df__ begin
@transform(:signal_is_weak_vect = :signal_is_strong_vect .!= 1)
@transform(:profit_mean = mean(:profit_vect))
@transform(:percent_unexplored_states = mean(:percent_unexplored_states_vect))
@transform(
:percent_unexplored_states_weak_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:percent_unexplored_states_vect[:signal_is_weak_vect][1],
)
@transform(
:percent_unexplored_states_strong_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:percent_unexplored_states_vect[:signal_is_strong_vect][1],
)
@transform(
:profit_gain_demand_low_weak_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) |
(:frequency_high_demand == 1) ? missing :
:profit_gain_demand_low[:signal_is_weak_vect][1],
)
@transform(
:profit_gain_demand_low_strong_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) |
(:frequency_high_demand == 1) ? missing :
:profit_gain_demand_low[:signal_is_strong_vect][1],
)
@transform(
:profit_gain_demand_high_weak_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:profit_gain_demand_high[:signal_is_weak_vect][1],
)
@transform(
:profit_gain_demand_high_strong_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:profit_gain_demand_high[:signal_is_strong_vect][1],
)
@transform(
:profit_gain_weak_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:profit_gain[:signal_is_weak_vect][1],
)
@transform(
:profit_gain_strong_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:profit_gain[:signal_is_strong_vect][1],
)
end
df = @chain df___ begin
@transform(:signal_is_weak_vect = :signal_is_strong_vect .!= 1)
@transform(
:convergence_profit_demand_low_weak_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) |
(:frequency_high_demand == 1) ? missing :
:convergence_profit_demand_low_vect[:signal_is_weak_vect][1],
)
@transform(
:convergence_profit_demand_low_strong_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) |
(:frequency_high_demand == 1) ? missing :
:convergence_profit_demand_low_vect[:signal_is_strong_vect][1],
)
@transform(
:convergence_profit_demand_high_weak_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:convergence_profit_demand_high_vect[:signal_is_weak_vect][1],
)
@transform(
:convergence_profit_demand_high_strong_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:convergence_profit_demand_high_vect[:signal_is_strong_vect][1],
)
@transform(
:convergence_profit_weak_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:profit_vect[:signal_is_weak_vect][1],
)
@transform(
:convergence_profit_strong_signal_player =
(:signal_is_strong ∈ ("Bool[0, 0]", "Bool[1, 1]")) ? missing :
:profit_vect[:signal_is_strong_vect][1],
)
end
# Basic correctness assurance tests...
@test mean(mean.(df[!, :signal_is_weak_vect] .+ df[!, :signal_is_strong_vect])) == 1
@chain df begin
@subset(:signal_is_strong ∉ ("Bool[0, 0]", "Bool[1, 1]"))
@transform(
:profit_gain_sum_1 =
(:profit_gain_weak_signal_player + :profit_gain_strong_signal_player),
:profit_gain_sum_2 = sum(:profit_gain),
)
@transform(:profit_gain_check = :profit_gain_sum_1 != :profit_gain_sum_2)
@combine(sum(:profit_gain_check))
@test _[1, :profit_gain_check_sum] == 0
end
@chain df begin
@subset(:signal_is_strong ∉ ("Bool[0, 0]", "Bool[1, 1]"))
@transform(
:profit_gain_sum_1 = (
:profit_gain_demand_high_weak_signal_player +
:profit_gain_demand_high_strong_signal_player
),
:profit_gain_sum_2 = sum(:profit_gain_demand_high),
)
@transform(:profit_gain_check = :profit_gain_sum_1 != :profit_gain_sum_2)
@combine(sum(:profit_gain_check))
@test _[1, :profit_gain_check_sum] == 0
end
@chain df begin
@subset(
(:signal_is_strong ∉ ("Bool[0, 0]", "Bool[1, 1]")) & (:frequency_high_demand != 1)
)
@transform(
:profit_gain_sum_1 = (
:profit_gain_demand_low_weak_signal_player +
:profit_gain_demand_low_strong_signal_player
),
:profit_gain_sum_2 = sum(:profit_gain_demand_low),
)
@transform(:profit_gain_check = :profit_gain_sum_1 != :profit_gain_sum_2)
@combine(sum(:profit_gain_check))
@test _[1, :profit_gain_check_sum] == 0
end
plt1 = @chain df begin
@transform(:signal_is_strong = string(:signal_is_strong))
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_mean => "Average Profit",
color = :signal_is_strong => nonnumeric,
row = :signal_is_strong,
) *
visual(Scatter)
end
f1 = draw(plt1)
# save("plots/dddc/plot_1.svg", f1)
df_summary = @chain df begin
@transform!(@subset(:signal_is_strong == "Bool[0, 1]"), :signal_is_strong = "Bool[1, 0]",)
@transform(
:price_response_to_demand_signal_mse_mean =
@passmissing minimum(:price_response_to_demand_signal_mse)
)
@transform(
:weak_signal_quality_level_str =
string("Weak Signal Strength: ", :weak_signal_quality_level)
)
@transform(
:profit_gain_max = maximum(:profit_gain),
:profit_gain_demand_high_max = maximum(:profit_gain_demand_high),
:profit_gain_demand_low_max = maximum(:profit_gain_demand_low),
:profit_gain_min = minimum(:profit_gain),
:profit_gain_demand_high_min = minimum(:profit_gain_demand_high),
:profit_gain_demand_low_min = minimum(:profit_gain_demand_low),
:convergence_profit_demand_high = mean(:convergence_profit_demand_high_vect),
:convergence_profit_demand_low = mean(:convergence_profit_demand_low_vect),
)
@groupby(
:signal_is_strong,
:weak_signal_quality_level,
:weak_signal_quality_level_str,
:frequency_high_demand,
)
@combine(
:profit_mean = mean(:profit_mean),
mean(:iterations_until_convergence),
mean(:profit_min),
mean(:profit_max),
:profit_gain_min = mean(:profit_gain_min),
:profit_gain_max = mean(:profit_gain_max),
:profit_gain_demand_high_weak_signal_player =
mean(:profit_gain_demand_high_weak_signal_player),
:profit_gain_demand_low_weak_signal_player =
mean(:profit_gain_demand_low_weak_signal_player),
:profit_gain_demand_high_strong_signal_player =
mean(:profit_gain_demand_high_strong_signal_player),
:profit_gain_demand_low_strong_signal_player =
mean(:profit_gain_demand_low_strong_signal_player),
:percent_unexplored_states = mean(:percent_unexplored_states),
:percent_unexplored_states_weak_signal_player =
mean(:percent_unexplored_states_weak_signal_player),
:percent_unexplored_states_strong_signal_player =
mean(:percent_unexplored_states_strong_signal_player),
:profit_gain_weak_signal_player = mean(:profit_gain_weak_signal_player),
:profit_gain_strong_signal_player = mean(:profit_gain_strong_signal_player),
:profit_gain_demand_high_min = mean(:profit_gain_demand_high_min),
:profit_gain_demand_low_min = mean(:profit_gain_demand_low_min),
:profit_gain_demand_high_max = mean(:profit_gain_demand_high_max),
:profit_gain_demand_low_max = mean(:profit_gain_demand_low_max),
:convergence_profit_demand_high_weak_signal_player =
mean(:convergence_profit_demand_high_weak_signal_player),
:convergence_profit_demand_low_weak_signal_player =
mean(:convergence_profit_demand_low_weak_signal_player),
:convergence_profit_demand_high_strong_signal_player =
mean(:convergence_profit_demand_high_strong_signal_player),
:convergence_profit_demand_low_strong_signal_player =
mean(:convergence_profit_demand_low_strong_signal_player),
:convergence_profit_weak_signal_player =
mean(:convergence_profit_weak_signal_player),
:convergence_profit_strong_signal_player =
mean(:convergence_profit_strong_signal_player),
:price_response_to_demand_signal_mse =
(@passmissing mean(:price_response_to_demand_signal_mse_mean)),
:convergence_profit_demand_high = mean(:convergence_profit_demand_high),
:convergence_profit_demand_low = mean(:convergence_profit_demand_low),
)
end
@assert nrow(df_summary) == 132
# Question is how existence of low state destabilizes the high state / overall collusion and to what extent...
# Question becomes 'given signal, estimated demand state prob, which opponent do I believe I am competing against?' the low demand believing opponent or the high demand one...
# in the case where own and opponents' signals are public, the high-high signal state yields the following probability curve over high state base frequency:
df_post_prob = DataFrame(
vcat([
(
pr_high_demand,
pr_signal_true,
post_prob_high_low_given_signal(pr_high_demand, pr_signal_true)[1],
post_prob_high_low_given_both_signals(pr_high_demand, pr_signal_true)[1],
pr_high_demand^2 * pr_signal_true,
) for pr_high_demand = 0.5:0.01:1 for pr_signal_true = 0.5:0.1:1
]),
) # squared to reflect high-high signals, for each opponent, independently
rename!(
df_post_prob,
[
:pr_high_demand,
:pr_signal_true,
:post_prob_high_given_signal_high,
:post_prob_high_given_both_signals_high,
:state_and_signals_agree_prob,
],
)
f11 = @chain df_post_prob begin
data(_) *
mapping(
:pr_high_demand,
:state_and_signals_agree_prob,
color = :pr_signal_true => nonnumeric => "Signal Strength",
) *
visual(Scatter)
draw(
axis = (
xticks = 0.5:0.1:1,
yticks = 0:0.1:1,
xlabel = "Probability High Demand",
ylabel = "Probability High Demand and Opponent Signal High Given Own Signal High",
),
)
end
save("plots/dddc/plot_11.svg", f11)
plt2 = @chain df_summary begin
stack(
[:profit_min_mean, :profit_max_mean],
variable_name = :profit_variable_name,
value_name = :profit_value,
)
@subset(:signal_is_strong == "Bool[0, 0]")
@sort(:frequency_high_demand)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_value => "Average Profit",
color = :profit_variable_name => nonnumeric,
layout = :weak_signal_quality_level_str => nonnumeric,
) *
(visual(Scatter) + visual(Lines))
end
f2 = draw(
plt2,
legend = (position = :top, titleposition = :left, framevisible = true, padding = 5),
)
save("plots/dddc/plot_2.svg", f2)
plt20 = @chain df_summary begin
@subset(:signal_is_strong == "Bool[0, 0]")
@sort(:frequency_high_demand)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_mean => "Average Profit",
color = :weak_signal_quality_level => nonnumeric => "Weak Signal Strength",
) *
(visual(Scatter) + visual(Lines))
end
f20 = draw(
plt20,
legend = (position = :top, titleposition = :left, framevisible = true, padding = 5),
)
save("plots/dddc/plot_20.svg", f20)
plt21 = @chain df_summary begin
@subset(
(:signal_is_strong == "Bool[0, 0]") &
!ismissing(:price_response_to_demand_signal_mse)
)
@sort(:frequency_high_demand)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:price_response_to_demand_signal_mse => "Mean Squared Error Price Difference by Demand Signal",
color = :weak_signal_quality_level => nonnumeric => "Weak Signal Strength",
) *
(visual(Scatter) + visual(Lines))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f21 = draw(
plt21,
legend = (position = :top, titleposition = :left, framevisible = true, padding = 5),
)
save("plots/dddc/plot_21.svg", f21)
plt22 = @chain df_summary begin
stack(
[:convergence_profit_demand_high, :convergence_profit_demand_low],
variable_name = :demand_level,
value_name = :profit,
)
@subset(:signal_is_strong == "Bool[0, 0]")
@transform(:demand_level = replace(:demand_level, "convergence_profit_demand_" => ""))
@sort(:frequency_high_demand)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit => "Average Profit",
color = :demand_level => nonnumeric => "Demand Level",
layout = :weak_signal_quality_level_str => nonnumeric,
) *
(visual(Scatter) + visual(Lines))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f22 = draw(
plt22,
legend = (position = :top, titleposition = :left, framevisible = true, padding = 5),
)
save("plots/dddc/plot_22.svg", f22)
plt221 = @chain df_summary begin
@subset(:signal_is_strong == "Bool[0, 0]")
stack(
[:profit_gain_min, :profit_gain_max],
variable_name = :min_max,
value_name = :profit_gain,
)
@transform(
:min_max = (:min_max == "profit_gain_min" ? "Per-Trial Min" : "Per-Trial Max")
)
@sort(:frequency_high_demand)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_gain,
color = :min_max => nonnumeric => "",
# columns = :weak_signal_quality_level => nonnumeric,
layout = :weak_signal_quality_level_str => nonnumeric,
) *
(visual(Scatter) + visual(Lines))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f221 = draw(
plt221,
legend = (position = :top, titleposition = :left, framevisible = true, padding = 5),
axis = (xlabel = "High Demand Frequency", ylabel = "Profit Gain"),
)
save("plots/dddc/plot_221.svg", f221)
plt23 = @chain df_summary begin
@subset(:signal_is_strong == "Bool[0, 0]")
@transform(
:profit_gain_demand_all_min = :profit_gain_min,
:profit_gain_demand_all_max = :profit_gain_max,
)
stack(
[
:profit_gain_demand_high_min,
:profit_gain_demand_high_max,
:profit_gain_demand_low_min,
:profit_gain_demand_low_max,
:profit_gain_demand_all_min,
:profit_gain_demand_all_max,
],
variable_name = :profit_gain_type,
value_name = :profit_gain,
)
@transform(
:demand_level =
replace(:profit_gain_type, r"profit_gain_demand_([a-z]+)_.*" => s"\1")
)
@transform(
:statistic =
replace(:profit_gain_type, r"profit_gain_demand_[a-z]+_([a-z_]+)" => s"\1")
)
@select(
:statistic,
:profit_gain,
:demand_level,
:weak_signal_quality_level_str,
:frequency_high_demand
)
@sort(:frequency_high_demand)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_gain => "Profit Gain",
marker = :statistic => nonnumeric => "Metric",
color = :demand_level => nonnumeric => "Demand Level",
layout = :weak_signal_quality_level_str => nonnumeric,
) *
(visual(Lines) + visual(Scatter))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f23 = draw(
plt23,
legend = (position = :top, titleposition = :left, framevisible = true, padding = 5),
axis = (
xticks = 0.5:0.1:1,
yticks = 0:0.1:1.2,
aspect = 1,
limits = (0.5, 1.05, 0.2, 1.2),
),
)
save("plots/dddc/plot_23.svg", f23)
plt24 = @chain df_summary begin
stack(
[
:profit_gain_demand_high_weak_signal_player,
:profit_gain_demand_low_weak_signal_player,
:profit_gain_demand_high_strong_signal_player,
:profit_gain_demand_low_strong_signal_player,
],
variable_name = :profit_gain_type,
value_name = :profit_gain,
)
@subset((:signal_is_strong == "Bool[1, 0]"))
@sort(:frequency_high_demand)
@transform(
:demand_level =
replace(:profit_gain_type, r"profit_gain_demand_([a-z]+)_.*" => s"\1")
)
@transform(
:signal_type = replace(
:profit_gain_type,
r"profit_gain_demand_[a-z]+_([a-z_]+)_signal_player" => s"\1",
)
)
@subset((:frequency_high_demand < 1) | (:demand_level == "high"))
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_gain,
marker = :demand_level => nonnumeric => "Demand Level",
layout = :weak_signal_quality_level_str => nonnumeric,
color = :signal_type => "Signal Strength",
) *
(visual(Scatter) + visual(Lines))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f24 = draw(
plt24,
legend = (
position = :top,
titleposition = :left,
framevisible = true,
padding = 5,
titlesize = 10,
labelsize = 10,
),
axis = (
# title = x -> string(x, "aaa"),
# subtitle = "(Solid line is profit for symmetric prices, shaded region shows range based on price options)",
xlabel = "High Demand Frequency",
ylabel = "Profit Gain",
xticks = 0.5:0.1:1,
yticks = 0:0.2:1,
aspect = 1,
limits = (0.5, 1.05, 0.0, 1.0),
),
)
save("plots/dddc/plot_24.svg", f24)
plt25 = @chain df_summary begin
stack(
[
:convergence_profit_demand_high_weak_signal_player,
:convergence_profit_demand_low_weak_signal_player,
:convergence_profit_demand_high_strong_signal_player,
:convergence_profit_demand_low_strong_signal_player,
],
variable_name = :convergence_profit_type,
value_name = :convergence_profit,
)
@subset((:signal_is_strong == "Bool[1, 0]") & (:frequency_high_demand != 1))
@sort(:frequency_high_demand)
@transform(
:convergence_profit_type =
replace(:convergence_profit_type, "convergence_profit_" => "")
)
@transform(:convergence_profit_type = replace(:convergence_profit_type, "_" => " "))
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:convergence_profit => "Average Profit",
color = :convergence_profit_type => nonnumeric => "Demand Level",
layout = :weak_signal_quality_level_str => nonnumeric,
) *
(visual(Scatter) + visual(Lines))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f25 = draw(
plt25,
legend = (
position = :top,
titleposition = :left,
framevisible = true,
padding = 5,
titlesize = 10,
labelsize = 10,
nbanks = 2,
),
# axis = (width = 100, height = 100),
)
save("plots/dddc/plot_25.svg", f25)
plt26 = @chain df_summary begin
stack(
[
:percent_unexplored_states_weak_signal_player,
:percent_unexplored_states_strong_signal_player,
],
variable_name = :percent_unexplored_states_type,
value_name = :percent_unexplored_states_value,
)
@subset((:signal_is_strong == "Bool[1, 0]"))
@sort(:frequency_high_demand)
@transform(
:percent_unexplored_states_type = replace(
:percent_unexplored_states_type,
"percent_unexplored_states_" => "",
)
)
@transform(
:percent_unexplored_states_type =
replace(:percent_unexplored_states_type, "_" => " ")
)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:percent_unexplored_states_value => "Frequency Unexplored States",
color = :percent_unexplored_states_type => nonnumeric => "Signal Strength",
layout = :weak_signal_quality_level_str => nonnumeric => "Weak Signal Strength",
) *
(visual(Scatter) + visual(Lines))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f26 = draw(
plt26,
legend = (
position = :top,
titleposition = :left,
framevisible = true,
padding = 5,
titlesize = 10,
labelsize = 10,
nbanks = 2,
),
axis = (yticks = 0:0.1:1, xticks = 0:0.1:1, limits = (0.5, 1.02, 0.0, 0.85)),
)
save("plots/dddc/plot_26.svg", f26)
plt27 = @chain df_summary begin
stack(
[
:profit_gain_demand_high_weak_signal_player,
:profit_gain_demand_low_weak_signal_player,
:profit_gain_demand_high_strong_signal_player,
:profit_gain_demand_low_strong_signal_player,
],
variable_name = :profit_gain_type,
value_name = :profit_gain,
)
@subset((:signal_is_strong == "Bool[1, 0]") & (:frequency_high_demand != 1))
@sort(:frequency_high_demand)
@transform(
:demand_level =
replace(:profit_gain_type, r"profit_gain_demand_([a-z]+)_.*" => s"\1")
)
@transform(
:signal_type = replace(
:profit_gain_type,
r"profit_gain_demand_[a-z]+_([a-z_]+)_signal_player" => s"\1",
)
)
@transform(:signal_type = uppercasefirst(:signal_type) * " Signal Player")
@transform(:demand_level = uppercasefirst(:demand_level) * " Demand")
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_gain => "Profit Gain",
color = :weak_signal_quality_level => nonnumeric => "Weak Signal Strength",
row = :demand_level => nonnumeric => "Demand Level",
col = :signal_type => nonnumeric => "Signal Strength",
) *
(visual(Scatter) + visual(Lines))
end
# NOTE: freq_high_demand == 1 intersect weak_signal_quality_level == 1 is excluded, as the low demand states are never explored, so the price response to demand signal is not defined
f27 = draw(
plt27,
legend = (
position = :top,
titleposition = :left,
framevisible = true,
padding = 5,
titlesize = 10,
labelsize = 10,
),
)
save("plots/dddc/plot_27.svg", f27)
df_weak_weak_outcomes = @chain df begin
@subset((:signal_is_strong == "Bool[0, 0]") & (:frequency_high_demand < 1.0))
@transform(
:compensating_profit_gain =
(:profit_gain_demand_high[1] > :profit_gain_demand_high[2]) !=
(:profit_gain_demand_low[1] > :profit_gain_demand_low[2])
)
@groupby(:weak_signal_quality_level, :frequency_high_demand)
@combine(:pct_compensating_profit_gain = mean(:compensating_profit_gain),)
@sort(:frequency_high_demand)
end
plt_28 = @chain df_weak_weak_outcomes begin
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:pct_compensating_profit_gain => "Frequency of Weak-Weak Outcomes with Compensating Profit Gain",
color = :weak_signal_quality_level => nonnumeric => "Weak Signal Strength",
) *
visual(Lines)
end
f28 = draw(
plt_28,
axis = (xticks = 0.5:0.1:1, yticks = 0:0.1:1, limits = (0.5, 1.02, 0.0, 1.0)),
)
save("plots/dddc/plot_28.svg", f28)
plt3 = @chain df_summary begin
@sort(:frequency_high_demand)
@transform(
:signal_is_strong =
:signal_is_strong == "Bool[0, 0]" ? "Weak-Weak" : "Strong-Weak"
)
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:iterations_until_convergence_mean => "Iterations Until Convergence",
color = :weak_signal_quality_level => nonnumeric => "Weak Signal Strength",
layout = :signal_is_strong => nonnumeric,
) *
(visual(Scatter) + visual(Lines))
end
f3 = draw(plt3, axis = (xticks = 0.5:0.1:1,))
save("plots/dddc/plot_3.svg", f3)
plt4 = @chain df_summary begin
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_min_mean => "Minimum Player Profit per Trial",
color = :signal_is_strong => nonnumeric,
) *
(visual(Scatter) + linear())
end
f4 = draw(plt4)
save("plots/dddc/plot_4.svg", f4)
plt5 = @chain df_summary begin
data(_) *
mapping(
:frequency_high_demand => "High Demand Frequency",
:profit_max_mean => "Maximum Player Profit per Trial",
color = :signal_is_strong => nonnumeric,
) *
(visual(Scatter) + linear())
end
f5 = draw(plt5)
save("plots/dddc/plot_5.svg", f5)
α = Float64(0.125)
β = Float64(4e-1)
δ = 0.95
ξ = 0.1
δ = 0.95
n_prices = 15
max_iter = Int(1e6) # 1e8
price_index = 1:n_prices
competition_params_dict = Dict(
:high => CompetitionParameters(0.25, -0.25, (2, 2), (1, 1)),
:low => CompetitionParameters(0.25, 0.25, (2, 2), (1, 1)),
)
competition_solution_dict =
Dict(d_ => CompetitionSolution(competition_params_dict[d_]) for d_ in [:high, :low])
data_demand_digital_params = DataDemandDigitalParams(
weak_signal_quality_level = 0.99,
strong_signal_quality_level = 0.995,
signal_is_strong = [true, false],
frequency_high_demand = 0.9,
)
hyperparams = DDDCHyperParameters(
α,
β,
δ,
max_iter,
competition_solution_dict,
data_demand_digital_params;
convergence_threshold = Int(1e5),
)
plt = draw_price_diagnostic(hyperparams)
f6 = draw(
plt,
axis = (
title = "Profit Levels across Price Options",
subtitle = "(Solid line is profit for symmetric prices, shaded region shows range based on price options)",
xlabel = "Competitor's Price Choice",
),
legend = (position = :bottom, titleposition = :left, framevisible = true, padding = 5),
)
save("plots/dddc/plot_6.svg", f6)
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | code | 213 | using YAML
include("a1_viz.jl")
include("dddc_viz.jl")
d_ = Dict(
:n_simulations_aiapc => n_simulations_aiapc,
:n_simulations_dddc => n_simulations_dddc,
)
YAML.write_file("plots/viz_metadata.yml", d_)
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | docs | 1566 | # AlgorithmicCompetition.jl
[](https://zenodo.org/badge/latestdoi/570286360)
[](https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl/actions/workflows/CI.yml)
[](https://www.bestpractices.dev/projects/7837)
[](https://github.com/SciML/ColPrac)
Tools for structuring and scaling research into algorithmic competition.
Components:
- Reinforcement learning models of algorithmic competition
## How to Run
```julia
import AlgorithmicCompetition
using Chain
using Statistics
using DataFrameMacros
using CSV
using ParallelDataTransfer
using Distributed
n_procs_ = 2 # update number of parallel processes
_procs = addprocs(
n_procs_,
topology = :master_worker,
exeflags = ["--threads=1", "--project=$(Base.active_project())"],
)
@everywhere begin
using Pkg
Pkg.instantiate()
using AlgorithmicCompetition
end
aiapc_results = AlgorithmicCompetition.run_aiapc()
```
For citations of works this project is based on, see `citations.bib`.
## AI / LLM Usage Statement
This project uses [Github Copilot](https://github.com/features/copilot) and [Chat-GPT 3](https://chat.openai.com) to assist software development and optimize code performance.
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.5 | f09ca454bd05ecdf946ce4ebeaf8a7d687356d44 | docs | 1152 | # Reproducing Data, Demand, and Digital Competition Graphics
In order to reproduce the graphics in *Data, Demand, and Digital Competition*, please do the following:
1. Install [Julia `v1.10`](https://julialang.org).
2. Set your current directory in a terminal shell to the `viz/` folder of this project.
3. Open a new Julia session.
4. Activate the local environment, `using Pkg; Pkg.activate(); Pkg.instantiate()`
5. Run the file `viz/run_viz.jl`
If you would like to regenerate the data used in the visualizations, you need to install the `AlgorithmicCompetition` package located [here](https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl), via `using Pkg; Pkg.add("https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl")`. Then run either the `src/multiprocessing_template_aiapc.jl` or the `src/multiprocessing_template_dddc.jl` depending on which study you would like to reproduce, adjusting the number of cores and other parameters to match your system.
If you have questions, please ping me by creating an issue at the [Algorithmic Competition Github Repository](https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl).
| AlgorithmicCompetition | https://github.com/jeremiahpslewis/AlgorithmicCompetition.jl.git |
|
[
"MIT"
] | 0.1.6 | 3016edfb32059c5a332b88ec3606b21c5c930ec8 | code | 491 | using Documenter, LorentzVectorHEP
makedocs(;
modules=[LorentzVectorHEP],
format = Documenter.HTML(
prettyurls = get(ENV, "CI", nothing) == "true",
assets=String[],
),
pages=[
"Introduction" => "index.md",
],
repo="https://github.com/JuliaHEP/LorentzVectorHEP.jl/blob/{commit}{path}#L{line}",
sitename="LorentzVectorHEP.jl",
authors="Jerry Ling and contributors",
)
deploydocs(;
repo="github.com/JulieHEP/LorentzVectorHEP.jl",
)
| LorentzVectorHEP | https://github.com/JuliaHEP/LorentzVectorHEP.jl.git |
|
[
"MIT"
] | 0.1.6 | 3016edfb32059c5a332b88ec3606b21c5c930ec8 | code | 728 | module LorentzVectorHEP
using LorentzVectors # provides x, y, z, t
export LorentzVectorCyl, LorentzVector
export px, py, pz, energy, fast_mass, pt, rapidity, eta, phi, mass
export deltaphi, deltar, deltaeta
export ΔR, Δϕ, Δη
export fromPtEtaPhiE
include("cartesian.jl")
include("cylindrical.jl")
# conversion
function LorentzVector(v::LorentzVectorCyl)
x = px(v)
y = py(v)
z = pz(v)
t = energy(v)
return LorentzVector(t, x, y, z)
end
function LorentzVectorCyl(v::LorentzVector)
t, x, y, z = v.t, v.x, v.y, v.z
pt2 = muladd(x, x, y^2)
pt = sqrt(pt2)
eta = asinh(z/pt)
phi = atan(y, x)
mass = sqrt(max(t^2 - pt2 - z^2, 0))
return LorentzVectorCyl(pt, eta, phi, mass)
end
end
| LorentzVectorHEP | https://github.com/JuliaHEP/LorentzVectorHEP.jl.git |
|
[
"MIT"
] | 0.1.6 | 3016edfb32059c5a332b88ec3606b21c5c930ec8 | code | 2367 | Base.zero(lv::T) where T<:LorentzVector = T(0,0,0,0)
mass2(lv::LorentzVector) = dot(lv, lv)
"""mass value - returns a negative number for spacelike 4-vectors"""
mass(lv::LorentzVector) = mass2(lv) < 0.0 ? -sqrt(-mass2(lv)) : sqrt(mass2(lv))
pt2(lv::LorentzVector) = muladd(lv.x, lv.x, lv.y^2)
pt(lv::LorentzVector) = sqrt(pt2(lv))
mt2(lv::LorentzVector) = lv.t^2 - lv.z^2
mt(lv::LorentzVector) = mt2(lv)<0 ? -sqrt(-mt2(lv)) : sqrt(mt2(lv))
mag(lv::LorentzVector) = sqrt(muladd(lv.x, lv.x, lv.y^2) + lv.z^2)
energy(lv::LorentzVector) = lv.t
px(lv::LorentzVector) = lv.x
py(lv::LorentzVector) = lv.y
pz(lv::LorentzVector) = lv.z
@inline function CosTheta(lv::LorentzVector)
fZ = lv.z
ptot = mag(lv)
return ifelse(ptot == 0.0, 1.0, fZ / ptot)
end
"""Pseudorapidity"""
function eta(lv::LorentzVector)
cosTheta = CosTheta(lv)
(cosTheta^2 < 1.0) && return -0.5 * log((1.0 - cosTheta) / (1.0 + cosTheta))
fZ = lv.z
iszero(fZ) && return 0.0
# Warning("PseudoRapidity","transverse momentum = 0! return +/- 10e10");
fZ > 0.0 && return 10e10
return -10e10
end
const η = eta
"""Rapidity"""
function rapidity(lv::LorentzVector)
pt_squared = pt2(lv)
abspz = abs(pz(lv))
if (energy(lv) == abspz) && (pt_squared == 0.0)
return (-1)^(pz(lv) < 0)*(1e5 + abspz) # a very large value that depends on pz
end
m2 = max((energy(lv) + pz(lv))*(energy(lv) - pz(lv)) - pt_squared, 0.0) # mass^2
E_plus_z = energy(lv) + abspz
return (-1)^(pz(lv) > 0) * 0.5*log((pt_squared + m2)/(E_plus_z^2))
end
# Don't export "y" as an alias as for a normal 4-vector it's the second space coordinate
function phi(lv::LorentzVector)
return (lv.x == 0.0 && lv.y == 0.0) ? 0.0 : atan(lv.y, lv.x)
end
const ϕ = phi
function phi02pi(lv::LorentzVector)
return phi(lv) < 0.0 ? phi(lv) + 2π : phi(lv)
end
function phi_mpi_pi(x)
twopi = 2pi
while (x >= pi)
x -= twopi
end
while (x < -pi)
x += twopi
end
return x
end
deltaeta(lv1, lv2) = eta(lv1) - eta(lv2)
deltaphi(lv1, lv2) = phi_mpi_pi(phi(lv1) - phi(lv2))
function deltar(lv1, lv2)
deta = eta(lv1) - eta(lv2)
dphi = deltaphi(lv1, lv2)
return sqrt(fma(deta, deta, dphi * dphi))
end
function fromPxPyPzM(px, py, pz, m)
e = sqrt(muladd(px, px, py^2) + muladd(pz, pz, m^2))
return LorentzVector(e, px, py, pz)
end
| LorentzVectorHEP | https://github.com/JuliaHEP/LorentzVectorHEP.jl.git |
|
[
"MIT"
] | 0.1.6 | 3016edfb32059c5a332b88ec3606b21c5c930ec8 | code | 3648 | struct LorentzVectorCyl{T <: Number}
pt::T
eta::T
phi::T
mass::T
end
LorentzVectorCyl(pt, eta, phi, mass) = LorentzVectorCyl(promote(pt, eta, phi, mass)...)
Base.show(io::IO, v::LorentzVectorCyl) = print(io, "$(typeof(v))(pt=$(v.pt), eta=$(v.eta), phi=$(v.phi), mass=$(v.mass))")
Base.broadcastable(v::LorentzVectorCyl) = Ref(v)
"""
zero(LorentzVectorCyl)
zero(v::LorentzVectorCyl{T})
Constructs a zero four-vector.
"""
Base.zero(::Type{LorentzVectorCyl{T}}) where T = LorentzVectorCyl{T}(zero(T), zero(T), zero(T), zero(T))
Base.zero(::Type{LorentzVectorCyl}) = zero(LorentzVectorCyl{Float64})
Base.zero(lv::T) where T<:LorentzVectorCyl = T(0,0,0,0)
pt(lv::LorentzVectorCyl) = lv.pt
pt2(lv::LorentzVectorCyl) = lv.pt^2
eta(lv::LorentzVectorCyl) = lv.eta
phi(lv::LorentzVectorCyl) = lv.phi
mass(lv::LorentzVectorCyl) = lv.mass
mass2(lv::LorentzVectorCyl) = lv.mass^2
px(v::LorentzVectorCyl) = v.pt * cos(v.phi)
py(v::LorentzVectorCyl) = v.pt * sin(v.phi)
pz(v::LorentzVectorCyl) = v.pt * sinh(v.eta)
energy(v::LorentzVectorCyl) = sqrt(px(v)^2 + py(v)^2 + pz(v)^2 + v.mass^2)
function Base.:*(v::LorentzVectorCyl{T}, k::Real) where T
LorentzVectorCyl{T}(v.pt*k, v.eta, v.phi, v.mass*k)
end
function Base.:+(v1::LorentzVectorCyl{T}, v2::LorentzVectorCyl{W}) where {T,W}
m1, m2 = max(v1.mass, zero(v1.pt)), max(v2.mass, zero(v2.pt))
px1, px2 = px(v1), px(v2)
py1, py2 = py(v1), py(v2)
pz1, pz2 = pz(v1), pz(v2)
e1 = sqrt(px1^2 + py1^2 + pz1^2 + m1^2)
e2 = sqrt(px2^2 + py2^2 + pz2^2 + m2^2)
sumpx = px1+px2
sumpy = py1+py2
sumpz = pz1+pz2
ptsq = sumpx^2 + sumpy^2
pt = sqrt(ptsq)
eta = asinh(sumpz/pt)
phi = atan(sumpy, sumpx)
mass = sqrt(max(muladd(m1, m1, m2^2) + 2*e1*e2 - 2*(muladd(px1, px2, py1*py2) + pz1*pz2), zero(v1.pt)))
return LorentzVectorCyl(pt,eta,phi,mass)
end
function fast_mass(v1::LorentzVectorCyl, v2::LorentzVectorCyl)
# Calculate mass directly. Same as (v1+v2).mass except
# this skips the intermediate pt, eta, phi calculations.
# ~4x faster than (v1+v2).mass
pt1, pt2 = v1.pt, v2.pt
eta1, eta2 = v1.eta, v2.eta
phi1, phi2 = v1.phi, v2.phi
m1, m2 = v1.mass, v2.mass
# note, massless approximation is
# mass = sqrt(max(2*pt1*pt2*(cosh(eta1-eta2) - cos(phi1-phi2)), zero(pt1)))
sinheta1 = sinh(eta1)
sinheta2 = sinh(eta2)
tpt12 = 2*pt1*pt2
return @fastmath sqrt(max(fma(m1, m1, m2^2)
+ 2*sqrt((pt1^2*(1+sinheta1^2) + m1^2)*(pt2^2*(1+sinheta2^2) + m2^2))
- tpt12*sinheta1*sinheta2
- tpt12*cos(phi1-phi2), zero(pt1)))
end
"Rapidity"
function rapidity(lv::LorentzVectorCyl)
num = sqrt(lv.mass^2 + lv.pt^2 * cosh(lv.eta)^2) + lv.pt * sinh(lv.eta)
den = sqrt(lv.mass^2 + lv.pt^2)
return log(num/den)
end
# https://root.cern.ch/doc/v606/GenVector_2VectorUtil_8h_source.html#l00061
@inline function deltaphi(v1::LorentzVectorCyl, v2::LorentzVectorCyl)
dphi = v2.phi - v1.phi
dphi = dphi - 2*pi*(dphi > pi) + 2*pi*(dphi <= -pi)
return dphi
end
const Δϕ = deltaphi
@inline function deltaeta(v1::LorentzVectorCyl, v2::LorentzVectorCyl)
return v2.eta - v1.eta
end
const Δη = deltaeta
@inline function deltar2(v1::LorentzVectorCyl, v2::LorentzVectorCyl)
dphi = deltaphi(v1,v2)
deta = deltaeta(v1,v2)
return muladd(dphi, dphi, deta^2)
end
deltar(v1::LorentzVectorCyl, v2::LorentzVectorCyl) = sqrt(deltar2(v1, v2))
const ΔR = deltar
function fromPtEtaPhiE(pt, eta, phi, E)
m2 = E^2 - pt^2 - (sinh(eta) * pt)^2
m = sign(m2) * sqrt(abs(m2))
return LorentzVectorCyl(pt, eta, phi, m)
end
| LorentzVectorHEP | https://github.com/JuliaHEP/LorentzVectorHEP.jl.git |
|
[
"MIT"
] | 0.1.6 | 3016edfb32059c5a332b88ec3606b21c5c930ec8 | code | 3389 | using LorentzVectorHEP
using Test
@testset "calculations" begin
v1 = LorentzVectorCyl(1761.65,-2.30322,-2.5127,0.105652)
v2 = LorentzVectorCyl(115.906,-2.28564,-2.50781,0.105713)
@test energy(v1) ≈ 8901.870789524375 atol=1e-6
@test px(v1) ≈ -1424.610065192358 atol=1e-6
@test py(v1) ≈ -1036.2899616674022 atol=1e-6
@test pz(v1) ≈ -8725.817601790963 atol=1e-6
@test rapidity(v1) ≈ -2.3032199982371715 atol=1e-6
@test LorentzVectorHEP.pt2(v1) ≈ 3.1034107225e6 atol=1e-6
@test LorentzVectorHEP.mass2(v1) ≈ 0.011162345103999998 atol=1e-6
@test isapprox((v1+v2).mass, 8.25741602000877, atol=1e-6)
@test isapprox(fast_mass(v1,v2), 8.25741602000877, atol=1e-6)
v1 = LorentzVectorCyl(43.71242f0, 1.4733887f0, 1.6855469f0, 0.10571289f0)
v2 = LorentzVectorCyl(36.994347f0, 0.38684082f0, -1.3935547f0, 0.10571289f0)
@test (v1+v2).mass == 92.55651f0
@test fast_mass(v1,v2) == 92.55651f0
@test isapprox(deltar(v1,v2), 3.265188f0, atol=1e-6)
@test isapprox(deltaphi(v1,v2), -3.0791016f0, atol=1e-6)
v3 = v1*5
@test v3.pt == 5*v1.pt
@test v3.mass == 5*v1.mass
@test v3.eta == v1.eta
@test v3.phi == v1.phi
vcart1 = LorentzVector(10.0, -2.3, 4.5, 0.23)
@test rapidity(vcart1) ≈ 0.02300405695442185 atol=1e-9
@test eta(vcart1) ≈ 0.045495409709778126 atol=1e-9
@test phi(vcart1) ≈ 2.0432932623119604 atol=1e-9
vcart2 = LorentzVector(10.0, 2.7, -4.1, -0.21)
@test rapidity(vcart2) ≈ -0.021003087817077763 atol=1e-9
@test eta(vcart2) ≈ -0.04276400891568771 atol=1e-9
@test phi(vcart2) ≈ -0.9884433806509134 atol=1e-9
@test LorentzVectorHEP.phi02pi(vcart2) ≈ 5.294741926528673 atol=1e-9
@test deltaeta(vcart1, vcart2) ≈ 0.08825941862546584 atol=1e-9
@test deltaphi(vcart1, vcart2) ≈ 3.0317366429628736 atol=1e-9
vcart3 = LorentzVector(66.0, 0.0, 0.0, 66.0)
@test rapidity(vcart3) ≈ 100066.0 atol=1e-9
vcart4 = LorentzVector(4.4, 8.1, 2.2, 3.3)
@test mass(vcart4) ≈ -7.872737770305829 atol=1e-9
end
@testset "summing" begin
v1 = LorentzVectorCyl(1761.65,-2.30322,-2.5127,0.105652)
v2 = LorentzVectorCyl(115.906,-2.28564,-2.50781,0.105713)
v3 = LorentzVectorCyl(43.71242f0, 1.4733887f0, 1.6855469f0, 0.10571289f0)
v4 = LorentzVectorCyl(36.994347f0, 0.38684082f0, -1.3935547f0, 0.10571289f0)
vs = [v1, v2, v3, v4]
@test sum(vs).mass ≈ 2153.511000993
@test sum(LorentzVectorCyl[]).mass ≈ 0
end
@testset "broadcasting" begin
pts = [1761.65,115.906,43.712420,36.994347]
etas = [-2.30322,-2.28564,1.4733887,0.38684082]
phis = [-2.5127,-2.50781,1.6855469,-1.3935547]
mass = 0.105652
vs = LorentzVectorCyl.(pts, etas, phis, mass)
@test all([v.mass for v in vs] .== mass)
@test fast_mass.(vs[1], vs[2:end]) == fast_mass.(Ref(vs[1]), vs[2:end])
end
@testset "conversions" begin
v1 = LorentzVectorCyl(1761.65,-2.30322,-2.5127,0.105652)
v2 = LorentzVectorCyl(LorentzVector(v1))
@test v1.pt ≈ v2.pt
@test v1.eta ≈ v2.eta
@test v1.phi ≈ v2.phi
@test v1.mass ≈ v2.mass atol=1e-6
for func in (px, py, pz, energy, pt, eta, phi, mass)
func(v1) ≈ func(LorentzVector(v1))
end
end
@testset "fromPtEtaPhiE" begin
v1 = LorentzVectorCyl(1761.65,-2.30322,-2.5127,0.105652)
v2 = fromPtEtaPhiE(v1.pt, v1.eta, v1.phi, energy(v1))
@test v1.mass ≈ v2.mass atol=1e-6
end | LorentzVectorHEP | https://github.com/JuliaHEP/LorentzVectorHEP.jl.git |
|
[
"MIT"
] | 0.1.6 | 3016edfb32059c5a332b88ec3606b21c5c930ec8 | docs | 784 | # LorentzVectorHEP
Provides two types (and the conversion between the two):
- `LorentzVector` (energy, px, py, pz) (from [LorentzVectors.jl](https://github.com/JLTastet/LorentzVectors.jl))
- `LorentzVectorCyl` (pt, eta, phi, mass)
you can also use `fromPtEtaPhiE(pt, eta, phi, energy) --> LorentzVectorCyl`.
and these functions for both of them:
```julia
px, py, pz, energy, pt, rapidity, eta, phi, mass
```
as well as these utility functions:
```julia
deltar, deltaphi, deltaeta, mt, mt2
```
(some of them have aliases, `ΔR, Δϕ, Δη`)
There are some unexported methods which are useful for more specialist use cases:
```julia
mass2, pt2, mt, mt2, mag
```
## LHC coordinate system

| LorentzVectorHEP | https://github.com/JuliaHEP/LorentzVectorHEP.jl.git |
|
[
"MIT"
] | 0.1.6 | 3016edfb32059c5a332b88ec3606b21c5c930ec8 | docs | 53 | # APIs
```@autodocs
Modules = [LorentzVectorHEP]
```
| LorentzVectorHEP | https://github.com/JuliaHEP/LorentzVectorHEP.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 568 | using Documenter, AWSBatch
makedocs(;
modules=[AWSBatch],
format=Documenter.HTML(
prettyurls=get(ENV, "CI", nothing) == "true",
assets=[
"assets/invenia.css",
],
),
pages=[
"Home" => "index.md",
],
repo="https://github.com/JuliaCloud/AWSBatch.jl/blob/{commit}{path}#L{line}",
sitename="AWSBatch.jl",
authors="Invenia Technical Computing",
strict = true,
checkdocs = :none,
)
deploydocs(;
repo="github.com/JuliaCloud/AWSBatch.jl",
push_preview=true,
devbranch = "main"
)
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 7370 | module AWSBatch
using AWS
using AutoHashEquals
using OrderedCollections: OrderedDict
using Dates
using Memento
using Mocking
@service Batch
@service Cloudwatch_Logs
export BatchJob, ComputeEnvironment, BatchEnvironmentError, BatchJobError
export JobQueue, JobDefinition, JobState, LogEvent
export run_batch, describe, status, status_reason, wait, log_events, isregistered, register, deregister
export list_job_queues, list_job_definitions, create_compute_environment, create_job_queue
const logger = getlogger(@__MODULE__)
# Register the module level logger at runtime so that folks can access the logger via `getlogger(MyModule)`
# NOTE: If this line is not included then the precompiled `MyModule.logger` won't be registered at runtime.
__init__() = Memento.register(logger)
include("exceptions.jl")
include("log_event.jl")
include("compute_environment.jl")
include("job_queue.jl")
include("job_state.jl")
include("job_definition.jl")
include("batch_job.jl")
"""
run_batch(;
name::AbstractString="",
queue::AbstractString="",
region::AbstractString="",
definition::Union{AbstractString, JobDefinition, Nothing}=nothing,
image::AbstractString="",
vcpus::Integer=1,
memory::Integer=-1,
role::AbstractString="",
cmd::Cmd=``,
num_jobs::Integer=1,
parameters::Dict{String, String}=Dict{String, String}(),
) -> BatchJob
Handles submitting a BatchJob based on various potential defaults.
For example, default job fields can be inferred from an existing job definition or an
existing job (if currently running in a batch job).
Order of priority from highest to lowest:
1. Explict arguments passed in via `kwargs`.
2. Inferred environment (e.g., `AWS_BATCH_JOB_ID` environment variable set)
3. Job definition parameters
If no valid job definition exists (see [`AWSBatch.job_definition_arn`](@ref) then a new job
definition will be created and registered based on the job parameters.
"""
function run_batch(;
name::AbstractString="",
queue::AbstractString="",
region::AbstractString="",
definition::Union{AbstractString, JobDefinition, Nothing}=nothing,
image::AbstractString="",
vcpus::Integer=1,
memory::Integer=-1,
role::AbstractString="",
cmd::Cmd=``,
num_jobs::Integer=1,
parameters::Dict{String, String}=Dict{String, String}(),
allow_job_registration::Bool=true,
aws_config::AbstractAWSConfig=global_aws_config(),
)
if isa(definition, AbstractString)
definition = isempty(definition) ? nothing : definition
end
# Determine if the job definition already exists and update the default job parameters
if definition !== nothing
response = describe_job_definition(definition; aws_config=aws_config)
if !isempty(response["jobDefinitions"])
details = first(response["jobDefinitions"])
container = details["containerProperties"]
isempty(image) && (image = container["image"])
isempty(role) && (role = container["jobRoleArn"])
# Update container override parameters
vcpus == 1 && (vcpus = container["vcpus"])
memory < 0 && (memory = container["memory"])
isempty(cmd) && (cmd = Cmd(Vector{String}(container["command"])))
end
end
# Get inferred environment parameters
if haskey(ENV, "AWS_BATCH_JOB_ID")
# Environmental variables set by the AWS Batch service. They were discovered by
# inspecting the running AWS Batch job in the ECS task interface.
job_id = ENV["AWS_BATCH_JOB_ID"]
job_queue = ENV["AWS_BATCH_JQ_NAME"]
# if not specified, get region from the aws_config
isempty(region) && (region = aws_config.region)
# Requires permissions to access to "batch:DescribeJobs"
response = @mock Batch.describe_jobs([job_id]; aws_config=aws_config)
# Use the job's description to only update fields that are using the default
# values since explict arguments passed in via `kwargs` have higher priority
if length(response["jobs"]) > 0
details = first(response["jobs"])
# Update the job's required parameters
isempty(name) && (name = details["jobName"])
definition === nothing && (definition = details["jobDefinition"])
isempty(queue) && (queue = job_queue)
# Update the container parameters
container = details["container"]
isempty(image) && (image = container["image"])
isempty(role) && (role = container["jobRoleArn"])
# Update container overrides
vcpus == 1 && (vcpus = container["vcpus"])
memory < 0 && (memory = container["memory"])
isempty(cmd) && (cmd = Cmd(Vector{String}(container["command"])))
else
warn(logger, "No jobs found with id: $job_id.")
end
end
# Error if required parameters were not explicitly set and cannot be inferred
if isempty(name) || isempty(queue) || memory < 0
throw(BatchEnvironmentError(
"Unable to perform AWS Batch introspection when not running within " *
"an AWS Batch job. Current job parameters are: " *
"\nname=$name" *
"\nqueue=$queue" *
"\nmemory=$memory"
))
end
# Reuse a previously registered job definition if available.
if isa(definition, AbstractString)
reusable_job_definition_arn = job_definition_arn(definition; image=image, role=role)
if reusable_job_definition_arn !== nothing
definition = JobDefinition(reusable_job_definition_arn)
end
elseif definition === nothing
# Use the job name as the definiton name since the definition name was not specified
definition = name
end
# If no job definition exists that can be reused, a new job definition is created
# under the current job specifications.
if isa(definition, AbstractString)
if allow_job_registration
definition = register(
definition;
image=image,
role=role,
vcpus=vcpus,
memory=memory,
cmd=cmd,
parameters=parameters,
aws_config=aws_config,
)
else
throw(BatchEnvironmentError(string(
"Attempting to register job definition \"$definition\" but registering ",
"job definitions is disallowed. Current job definition parameters are: ",
"\nimage=$image",
"\nrole=$role",
"\nvcpus=$vcpus",
"\nmemory=$memory",
"\ncmd=$cmd",
"\nparameters=$parameters",
)))
end
end
# Parameters that can be overridden are `memory`, `vcpus`, `command`, and `environment`
# See https://docs.aws.amazon.com/batch/latest/APIReference/API_ContainerOverrides.html
container_overrides = Dict(
"vcpus" => vcpus,
"memory" => memory,
"command" => cmd.exec,
)
return submit(
name,
definition,
JobQueue(queue);
container=container_overrides,
parameters=parameters,
num_jobs=num_jobs,
)
end
end # AWSBatch
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 5883 |
"""
BatchJob
Stores a batch job id in order to:
- `describe` a job and its parameters
- check on the `status` of a job
- `wait` for a job to complete
- fetch `log_events`
# Fields
- `id::AbstractString`: jobId
"""
@auto_hash_equals struct BatchJob
id::AbstractString
end
"""
submit(
name::AbstractString,
definition::JobDefinition,
queue::JobQueue;
container::AbstractDict=Dict(),
parameters::Dict{String,String}=Dict{String, String}(),
num_jobs::Integer=1,
) -> BatchJob
Handles submitting the batch job. Returns a `BatchJob` wrapper for the id.
"""
function submit(
name::AbstractString,
definition::JobDefinition,
queue::JobQueue;
container::AbstractDict=Dict(),
parameters::Dict{String,String}=Dict{String, String}(),
num_jobs::Integer=1,
aws_config::AbstractAWSConfig=global_aws_config(),
)
debug(logger, "Submitting job \"$name\"")
input = OrderedDict(
"parameters" => parameters,
"containerOverrides" => container,
)
if num_jobs > 1
# https://docs.aws.amazon.com/batch/latest/userguide/array_jobs.html
@assert 2 <= num_jobs <= 10_000
push!(input, "arrayProperties" => Dict("size" => num_jobs))
end
debug(logger, "Input: $input")
response = @mock Batch.submit_job(definition.arn, name, queue.arn, input; aws_config=aws_config)
job = BatchJob(response["jobId"])
if num_jobs > 1
info(logger, "Submitted array job \"$(name)\" ($(job.id), n=$(num_jobs))")
else
info(logger, "Submitted job \"$(name)\" ($(job.id))")
end
return job
end
"""
describe(job::BatchJob) -> Dict
Provides details about the AWS batch job.
"""
function describe(job::BatchJob; aws_config::AbstractAWSConfig=global_aws_config())
response = @mock Batch.describe_jobs([job.id]; aws_config=aws_config)
isempty(response["jobs"]) && error(logger, "Job $(job.id) not found.")
debug(logger, "Job $(job.id): $response")
return first(response["jobs"])
end
"""
JobDefinition
Returns the job definition corresponding to a batch job.
"""
function JobDefinition(job::BatchJob; aws_config::AbstractAWSConfig=global_aws_config())
JobDefinition(describe(job)["jobDefinition"]; aws_config=aws_config)
end
"""
status(job::BatchJob) -> JobState
Returns the current status of a job.
"""
function status(job::BatchJob; aws_config::AbstractAWSConfig=global_aws_config())::JobState
details = describe(job; aws_config=aws_config)
return parse(JobState, details["status"])
end
"""
status_reason(job::BatchJob) -> Union{String, Nothing}
A short, human-readable string to provide additional details about the current status of the
job.
"""
function status_reason(job::BatchJob; aws_config::AbstractAWSConfig=global_aws_config())
details = describe(job; aws_config=aws_config)
return get(details, "statusReason", nothing)
end
"""
wait(
cond::Function,
job::BatchJob;
timeout=600,
delay=5
)
Polls the batch job state until it hits one of the conditions in `cond`.
The loop will exit if it hits a `failure` condition and will not catch any excpetions.
The polling interval can be controlled with `delay` and `timeout` provides a maximum
polling time.
# Examples
```julia
julia> wait(state -> state < SUCCEEDED, job)
true
```
"""
function Base.wait(
cond::Function,
job::BatchJob;
timeout=600,
delay=5,
aws_config::AbstractAWSConfig=global_aws_config(),
)
completed = false
last_state = PENDING
initial = true
start_time = time() # System time in seconds since epoch
while time() - start_time < timeout
state = status(job; aws_config=aws_config)
if state != last_state || initial
info(logger, "$(job.id) status $state")
if !cond(state)
completed = true
break
end
last_state = state
end
initial && (initial = false)
sleep(delay)
end
if !completed
message = "Waiting on job $(job.id) timed out"
if !initial
message *= " Last known state $last_state"
end
throw(BatchJobError(job.id, message))
end
return completed
end
"""
wait(
job::BatchJob,
cond::Vector{JobState}=[RUNNING, SUCCEEDED],
failure::Vector{JobState}=[FAILED];
kwargs...,
)
Polls the batch job state until it hits one of the conditions in `cond`.
The loop will exit if it hits a `failure` condition and will not catch any excpetions.
The polling interval can be controlled with `delay` and `timeout` provides a maximum
polling time.
"""
function Base.wait(
job::BatchJob,
cond::Vector{JobState}=[RUNNING, SUCCEEDED],
failure::Vector{JobState}=[FAILED];
kwargs...,
)
wait(job; kwargs...) do state
if state in cond
false
elseif state in failure
throw(BatchJobError(job.id, "Job $(job.id) hit failure condition $state"))
false
else
true
end
end
end
"""
log_events(job::BatchJob) -> Union{Vector{LogEvent}, Nothing}
Fetches the logStreamName, fetches the CloudWatch logs, and returns a vector of log events.
If the log stream does not currently exist then `nothing` is returned.
NOTES:
- The `logStreamName` isn't available until the job is RUNNING, so you may want to use
`wait(job)` or `wait(job, [AWSBatch.SUCCEEDED])` prior to calling this function.
"""
function log_events(job::BatchJob)
job_details = describe(job)
if haskey(job_details["container"], "logStreamName")
stream = job_details["container"]["logStreamName"]
else
return nothing
end
info(logger, "Fetching log events from $stream")
return log_events("/aws/batch/job", stream)
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 3182 |
"""
ComputeEnvironment
An object representing an AWS batch compute environment.
See [`AWSBatch.create_compute_environment`](@ref).
"""
struct ComputeEnvironment
arn::String
function ComputeEnvironment(ce::AbstractString; aws_config::AbstractAWSConfig=global_aws_config())
arn = compute_environment_arn(ce; aws_config=aws_config)
arn === nothing && error("No compute environment ARN found for $ce")
new(arn)
end
end
Base.:(==)(a::ComputeEnvironment, b::ComputeEnvironment) = a.arn == b.arn
function describe(ce::ComputeEnvironment; aws_config::AbstractAWSConfig=global_aws_config())
describe_compute_environment(ce; aws_config=aws_config)
end
function max_vcpus(ce::ComputeEnvironment; aws_config::AbstractAWSConfig=global_aws_config())
describe(ce; aws_config=aws_config)["computeResources"]["maxvCpus"]
end
"""
create_compute_environment(name;
managed=true,
role="",
resources=Dict(),
enabled=true,
tags=Dict(),
aws_config=global_aws_config())
Create a compute environment of type `type` with name `name`.
See the AWS docs [here](https://docs.aws.amazon.com/batch/latest/APIReference/API_CreateComputeEnvironment.html).
"""
function create_compute_environment(name::AbstractString;
managed::Bool=true,
role::AbstractString="",
resources::AbstractDict=Dict{String,Any}(),
enabled::Bool=true,
tags::AbstractDict=Dict{String,Any}(),
aws_config::AbstractAWSConfig=global_aws_config())
type = managed ? "MANAGED" : "UNMANAGED"
args = Dict{String,Any}()
isempty(role) || (args["serviceRole"] = role)
isempty(resources) || (args["computeResources"] = resources)
isempty(tags) || (args["tags"] = tags)
enabled || (args["state"] = "DISABLED")
return @mock Batch.create_compute_environment(name, type, args; aws_config=aws_config)
end
function compute_environment_arn(ce::AbstractString; aws_config::AbstractAWSConfig=global_aws_config())
startswith(ce, "arn:") && return ce
json = describe_compute_environment(ce; aws_config=aws_config)
isempty(json) ? nothing : json["computeEnvironmentArn"]
end
function describe_compute_environment(ce::ComputeEnvironment;
aws_config::AbstractAWSConfig=global_aws_config())
describe_compute_environment(ce.arn; aws_config=aws_config)
end
function describe_compute_environment(ce::AbstractString;
aws_config::AbstractAWSConfig=global_aws_config())::OrderedDict
json = @mock Batch.describe_compute_environments(Dict("computeEnvironments" => [ce]);
aws_config=aws_config)
envs = json["computeEnvironments"]
len = length(envs)::Int
@assert len <= 1
return len == 1 ? first(envs) : OrderedDict()
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 345 | struct BatchEnvironmentError <: Exception
message::String
end
Base.showerror(io::IO, e::BatchEnvironmentError) = print(io, "BatchEnvironmentError: ", e.message)
struct BatchJobError <: Exception
job_id::AbstractString
message::String
end
Base.showerror(io::IO, e::BatchJobError) = print(io, "BatchJobError: $(e.job_id)", e.message)
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 5876 | """
JobDefinition
Stores the job definition arn including the revision.
"""
@auto_hash_equals struct JobDefinition
arn::AbstractString
function JobDefinition(name::AbstractString; aws_config::AbstractAWSConfig=global_aws_config())
if startswith(name, "arn:")
new(name)
else
arn = job_definition_arn(name; aws_config=aws_config)
arn === nothing && error("No job definition ARN found for $name")
new(arn)
end
end
end
"""
job_definition_arn(
definition_name::AbstractString;
image::AbstractString="",
role::AbstractString="",
aws_config::AbstractAWSConfig=global_aws_config(),
) -> Union{AbstractString, Nothing}
Looks up the ARN (Amazon Resource Name) for the latest job definition that can be reused.
Returns a JobDefinition with the ARN that can be reused or `nothing`.
A job definition can only be reused if:
1. status = ACTIVE
2. type = container
3. image = the current job's image
4. jobRoleArn = the current job's role
"""
function job_definition_arn(
definition_name::AbstractString;
image::AbstractString="",
role::AbstractString="",
aws_config::AbstractAWSConfig=global_aws_config(),
)
response = describe_job_definition(definition_name; aws_config=aws_config)
if !isempty(response["jobDefinitions"])
latest = first(response["jobDefinitions"])
for definition in response["jobDefinitions"]
if definition["status"] == "ACTIVE" && definition["revision"] > latest["revision"]
latest = definition
end
end
if (
latest["status"] == "ACTIVE" &&
latest["type"] == "container" &&
(latest["containerProperties"]["image"] == image || isempty(image)) &&
(latest["containerProperties"]["jobRoleArn"] == role || isempty(role))
)
info(
logger,
string(
"Found previously registered job definition: ",
"\"$(latest["jobDefinitionArn"])\"",
)
)
return latest["jobDefinitionArn"]
end
end
notice(
logger,
string(
"Did not find a previously registered ACTIVE job definition for ",
"\"$definition_name\".",
)
)
return nothing
end
"""
register(
definition_name::AbstractString;
role::AbstractString="",
image::AbstractString="",
vcpus::Integer=1,
memory::Integer=1024,
cmd::Cmd=``,
region::AbstractString="",
parameters::Dict{String,String}=Dict{String, String}(),
) -> JobDefinition
Registers a new job definition.
"""
function register(
definition_name::AbstractString;
image::AbstractString="",
role::AbstractString="",
type::AbstractString="container",
vcpus::Integer=1,
memory::Integer=1024,
cmd::Cmd=``,
parameters::Dict{String, String}=Dict{String, String}(),
aws_config::AbstractAWSConfig=global_aws_config(),
)
debug(logger, "Registering job definition \"$definition_name\"")
input = OrderedDict(
"parameters" => parameters,
"containerProperties" => OrderedDict(
"image" => image,
"vcpus" => vcpus,
"memory" => memory,
"command" => cmd.exec,
"jobRoleArn" => role,
),
)
response = @mock Batch.register_job_definition(definition_name, type, input; aws_config=aws_config)
definition = JobDefinition(response["jobDefinitionArn"]; aws_config=aws_config)
info(logger, "Registered job definition \"$(definition.arn)\"")
return definition
end
"""
deregister(job::JobDefinition)
Deregisters an AWS Batch job.
"""
function deregister(definition::JobDefinition; aws_config::AbstractAWSConfig=global_aws_config())
debug(logger, "Deregistering job definition \"$(definition.arn)\"")
resp = @mock Batch.deregister_job_definition(definition.arn; aws_config=aws_config)
info(logger, "Deregistered job definition \"$(definition.arn)\"")
end
"""
isregistered(definition::JobDefinition; aws_config=global_aws_config()) -> Bool
Checks if a JobDefinition is registered.
"""
function isregistered(definition::JobDefinition; aws_config::AbstractAWSConfig=global_aws_config())
j = describe(definition; aws_config=aws_config)
return any(d -> d["status"] == "ACTIVE", get(j, "jobDefinitions", []))
end
"""
list_job_definitions(;aws_config=global_aws_config())
Get a list of `JobDefinition` objects via `Batch.decsribe_job_definitions()`.
"""
function list_job_definitions(;aws_config::AbstractAWSConfig=global_aws_config())
job_definitions = Batch.describe_job_definitions(; aws_config=aws_config)["jobDefinitions"]
return [JobDefintiion(jd["jobDefinitionArn"]) for jd in job_definitions]
end
"""
describe(definition::JobDefinition; aws_config=global_aws_config()) -> Dict
Describes a job definition as a dictionary. Requires the IAM permissions
"batch:DescribeJobDefinitions".
"""
function describe(definition::JobDefinition; aws_config::AbstractAWSConfig=global_aws_config())
describe_job_definition(definition; aws_config=aws_config)
end
function describe_job_definition(definition::JobDefinition;
aws_config::AbstractAWSConfig=global_aws_config())
describe_job_definition(definition.arn; aws_config=aws_config)
end
function describe_job_definition(definition::AbstractString;
aws_config::AbstractAWSConfig=global_aws_config())
query = if startswith(definition, "arn:")
Dict("jobDefinitions" => [definition])
else
Dict("jobDefinitionName" => definition)
end
return @mock Batch.describe_job_definitions(query; aws_config=aws_config)
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 3557 |
"""
JobQueue
An object representing and AWS batch job queue.
See [`AWSBatch.create_job_queue`](@ref).
"""
struct JobQueue
arn::String
function JobQueue(queue::AbstractString; aws_config::AbstractAWSConfig=global_aws_config())
arn = job_queue_arn(queue; aws_config=aws_config)
arn === nothing && error("No job queue ARN found for: $queue")
new(arn)
end
end
Base.:(==)(a::JobQueue, b::JobQueue) = a.arn == b.arn
function describe(queue::JobQueue; aws_config::AbstractAWSConfig=global_aws_config())
return describe_job_queue(queue; aws_config=aws_config)
end
function describe_job_queue(queue::JobQueue; aws_config::AbstractAWSConfig=global_aws_config())
return describe_job_queue(queue.arn; aws_config=aws_config)
end
function max_vcpus(queue::JobQueue; aws_config::AbstractAWSConfig=global_aws_config())
sum(max_vcpus(ce; aws_config=aws_config) for ce in compute_environments(queue; aws_config=aws_config))
end
function _create_compute_environment_order(envs)
map(enumerate(envs)) do (i, env)
Dict{String,Any}("computeEnvironment"=>env, "order"=>i)
end
end
"""
create_job_queue(name, envs, priority=1; aws_config=global_aws_config())
Create a job queue with name `name` and priority `priority` returning the associated `JobQueue` object.
`envs` must be an iterator of compute environments given by ARN.
See the AWS docs [here](https://docs.aws.amazon.com/batch/latest/APIReference/API_CreateJobQueue.html).
"""
function create_job_queue(name::AbstractString, envs, priority::Integer=1;
enabled::Bool=true,
tags::AbstractDict=Dict{String,Any}(),
aws_config::AbstractAWSConfig=global_aws_config())
env = _create_compute_environment_order(envs)
args = Dict{String,Any}()
enabled || (args["state"] = "DISABLED")
isempty(tags) || (args["tags"] = tags)
return @mock Batch.create_job_queue(env, name, priority, args; aws_config=aws_config)
end
"""
list_job_queues(;aws_config=global_aws_config())
Get a list of `JobQueue` objects as returned by `Batch.describe_job_queues()`.
"""
function list_job_queues(;aws_config::AbstractAWSConfig=global_aws_config())
[JobQueue(q["jobQueueArn"]) for q ∈ Batch.describe_job_queues(;aws_config=aws_config)["jobQueues"]]
end
"""
compute_environments(queue::JobQueue; aws_config=global_aws_config())
Get a list of `ComputeEnvironment` objects associated with the `JobQueue`.
"""
function compute_environments(queue::JobQueue; aws_config::AbstractAWSConfig=global_aws_config())
ce_order = describe(queue; aws_config=aws_config)["computeEnvironmentOrder"]
compute_envs = Vector{ComputeEnvironment}(undef, length(ce_order))
for ce in ce_order
i, arn = ce["order"], ce["computeEnvironment"]
compute_envs[i] = ComputeEnvironment(arn)
end
return compute_envs
end
function job_queue_arn(queue::AbstractString; aws_config::AbstractAWSConfig=global_aws_config())
startswith(queue, "arn:") && return queue
json = describe_job_queue(queue; aws_config=aws_config)
isempty(json) ? nothing : json["jobQueueArn"]
end
function describe_job_queue(queue::AbstractString;
aws_config::AbstractAWSConfig=global_aws_config())::OrderedDict
json = @mock Batch.describe_job_queues(Dict("jobQueues" => [queue]); aws_config=aws_config)
queues = json["jobQueues"]
len = length(queues)::Int
@assert len <= 1
return len == 1 ? first(queues) : OrderedDict()
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 476 | # https://docs.aws.amazon.com/batch/latest/userguide/job_states.html
@doc """
JobState
An enum for representing different possible AWS Batch job states.
See [docs](http://docs.aws.amazon.com/batch/latest/userguide/job_states.html) for details.
""" JobState
@enum JobState SUBMITTED PENDING RUNNABLE STARTING RUNNING SUCCEEDED FAILED
const STATE_MAP = Dict(string(s) => s for s in instances(JobState))
Base.parse(::Type{JobState}, str::AbstractString) = STATE_MAP[str]
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 2193 | """
LogEvent
A struct for representing an event in an AWS Batch job log.
"""
struct LogEvent
id::String
ingestion_time::DateTime # in UTC
timestamp::DateTime # in UTC
message::String
end
function Base.convert(::Type{LogEvent}, d::AbstractDict)
LogEvent(
d["eventId"],
Dates.unix2datetime(d["ingestionTime"] / 1000),
Dates.unix2datetime(d["timestamp"] / 1000),
d["message"],
)
end
function Base.print(io::IO, event::LogEvent)
print(io, rpad(event.timestamp, 23), " ", event.message)
end
function Base.print(io::IO, log_events::Vector{LogEvent})
for event in log_events
println(io, event)
end
end
"""
log_events(log_group, log_stream) -> Union{Vector{LogEvent}, Nothing}
Fetches the CloudWatch log from the specified log group and stream as a `Vector` of
`LogEvent`s. If the log stream does not exist then `nothing` will be returned.
"""
function log_events(log_group::AbstractString, log_stream::AbstractString;
aws_config::AbstractAWSConfig=global_aws_config())
events = LogEvent[]
curr_token = nothing
next_token = nothing
# We've hit the end of the stream if the next token matches the current one.
while next_token != curr_token || next_token === nothing
response = try
@mock Cloudwatch_Logs.get_log_events(
log_group, log_stream,
Dict("nextToken"=>next_token);
aws_config=aws_config,
)
catch e
# The specified log stream does not exist. Specifically, this can occur when
# a batch job has a reference to a log stream but the stream has not yet been
# created.
if (
e isa AWSExceptions.AWSException &&
e.cause.status == 400 &&
e.info["message"] == "The specified log stream does not exist."
)
return nothing
end
rethrow()
end
append!(events, convert.(LogEvent, response["events"]))
curr_token = next_token
next_token = response["nextForwardToken"]
end
return events
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 3589 | @testset "BatchJob" begin
job = BatchJob("00000000-0000-0000-0000-000000000000")
@testset "status_reason" begin
@testset "not provided" begin
patch = @patch function AWSBatch.Batch.describe_jobs(args...; kwargs...)
Dict(
"jobs" => [
Dict()
]
)
end
apply(patch) do
@test status_reason(job) === nothing
end
end
@testset "provided" begin
reason = "Essential container in task exited"
patch = @patch function AWSBatch.Batch.describe_jobs(args...; kwargs...)
Dict(
"jobs" => [
Dict("statusReason" => reason)
]
)
end
apply(patch) do
@test status_reason(job) == reason
end
end
end
@testset "log_events" begin
@testset "Stream not yet created" begin
# When a AWS Batch job is first submitted the description of the job will not
# contain a reference to a log stream
patches = log_events_patches(log_stream_name=nothing)
apply(patches) do
@test log_events(job) === nothing
end
end
end
@testset "wait" begin
# Generate a patch which returns the next status each time it is requested
function status_patch(states)
index = 1
return @patch function AWSBatch.Batch.describe_jobs(args...; kwargs...)
json = Dict(
"jobs" => [
Dict("status" => states[index])
]
)
if index < length(states)
index += 1
end
return json
end
end
@testset "success" begin
# Encounter all states possible for a successful job
states = ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING", "SUCCEEDED"]
apply(status_patch(states)) do
@test_log logger "info" r"^[\d-]+ status \w+" begin
@test wait(state -> state < AWSBatch.SUCCEEDED, job; delay=0.1) == true
end
@test status(job) == AWSBatch.SUCCEEDED
end
end
@testset "failed" begin
# Encounter all states possible for a failed job
states = ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING", "FAILED"]
apply(status_patch(states)) do
@test_log logger "info" r"^[\d-]+ status \w+" begin
@test wait(state -> state < AWSBatch.SUCCEEDED, job; delay=0.1) == true
end
@test status(job) == AWSBatch.FAILED
end
end
@testset "timeout" begin
apply(status_patch(["SUBMITTED", "RUNNING", "SUCCEEDED"])) do
started = time()
@test_nolog logger "info" r".*" begin
@test_throws BatchJobError wait(
state -> state < AWSBatch.SUCCEEDED,
job;
delay=0.1,
timeout=0
)
end
duration = time() - started
@test status(job) != AWSBatch.SUCCEEDED # Requires a minimum of 3 states
@test duration < 1 # Less than 1 second
end
end
end
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 1188 | using OrderedCollections: OrderedDict
@testset "ComputeEnvironment" begin
@testset "constructor" begin
arn = "arn:aws:batch:us-east-1:000000000000:compute-environment/ce"
patch = describe_compute_environments_patch(
OrderedDict(
"computeEnvironmentName" => "ce-name",
"computeEnvironmentArn" => arn,
)
)
apply(patch) do
@test ComputeEnvironment(arn).arn == arn
end
apply(patch) do
@test ComputeEnvironment("ce-name").arn == arn
end
patch = describe_compute_environments_patch()
apply(patch) do
@test_throws ErrorException ComputeEnvironment("ce-name")
end
end
@testset "max_vcpus" begin
ce = ComputeEnvironment("arn:aws:batch:us-east-1:000000000000:compute-environment/ce")
patch = describe_compute_environments_patch(
OrderedDict(
"computeEnvironmentArn" => ce.arn,
"computeResources" => OrderedDict("maxvCpus" => 5),
),
)
apply(patch) do
@test AWSBatch.max_vcpus(ce) == 5
end
end
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 2949 | using OrderedCollections: OrderedDict
@testset "JobQueue" begin
@testset "constructor" begin
arn = "arn:aws:batch:us-east-1:000000000000:job-queue/queue"
patch = describe_job_queues_patch(
OrderedDict(
"jobQueueName" => "queue-name",
"jobQueueArn" => arn,
)
)
apply(patch) do
@test JobQueue(arn).arn == arn
end
apply(patch) do
@test JobQueue("queue-name").arn == arn
end
patch = describe_job_queues_patch()
apply(patch) do
@test_throws ErrorException JobQueue("queue-name")
end
end
@testset "compute_environments" begin
# Note: to date we've only used queues with a single compute environment
queue = JobQueue("arn:aws:batch:us-east-1:000000000000:job-queue/queue")
patch = describe_job_queues_patch(
OrderedDict(
"jobQueueArn" => queue.arn,
"computeEnvironmentOrder" => [
OrderedDict("order" => 2, "computeEnvironment" => "arn:aws:batch:us-east-1:000000000000:compute-environment/two"),
OrderedDict("order" => 1, "computeEnvironment" => "arn:aws:batch:us-east-1:000000000000:compute-environment/one"),
],
)
)
expected = [
ComputeEnvironment("arn:aws:batch:us-east-1:000000000000:compute-environment/one"),
ComputeEnvironment("arn:aws:batch:us-east-1:000000000000:compute-environment/two"),
]
apply(patch) do
@test AWSBatch.compute_environments(queue) == expected
end
end
@testset "max_vcpus" begin
queue = JobQueue("arn:aws:batch:us-east-1:000000000000:job-queue/queue")
patches = [
describe_job_queues_patch(
OrderedDict(
"jobQueueArn" => queue.arn,
"computeEnvironmentOrder" => [
OrderedDict("order" => 1, "computeEnvironment" => "arn:aws:batch:us-east-1:000000000000:compute-environment/one"),
OrderedDict("order" => 2, "computeEnvironment" => "arn:aws:batch:us-east-1:000000000000:compute-environment/two"),
],
)
)
describe_compute_environments_patch([
OrderedDict(
"computeEnvironmentArn" => "arn:aws:batch:us-east-1:000000000000:compute-environment/one",
"computeResources" => OrderedDict("maxvCpus" => 7),
),
OrderedDict(
"computeEnvironmentArn" => "arn:aws:batch:us-east-1:000000000000:compute-environment/two",
"computeResources" => OrderedDict("maxvCpus" => 8),
)
])
]
apply(patches) do
@test AWSBatch.max_vcpus(queue) == 15
end
end
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 703 | using AWSBatch: JobState, SUBMITTED, PENDING, RUNNABLE, STARTING, RUNNING, SUCCEEDED, FAILED
@testset "JobState" begin
@testset "parse" begin
@test length(instances(JobState)) == 7
@test parse(JobState, "SUBMITTED") == SUBMITTED
@test parse(JobState, "PENDING") == PENDING
@test parse(JobState, "RUNNABLE") == RUNNABLE
@test parse(JobState, "STARTING") == STARTING
@test parse(JobState, "RUNNING") == RUNNING
@test parse(JobState, "SUCCEEDED") == SUCCEEDED
@test parse(JobState, "FAILED") == FAILED
end
@testset "order" begin
@test SUBMITTED < PENDING < RUNNABLE < STARTING < RUNNING < SUCCEEDED < FAILED
end
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 2804 | @testset "LogEvent" begin
@testset "constructor" begin
event = AWSBatch.LogEvent("123", DateTime(2018, 1, 2), DateTime(2018, 1, 1), "hello world!")
@test event.id == "123"
@test event.ingestion_time == DateTime(2018, 1, 2)
@test event.timestamp == DateTime(2018, 1, 1)
@test event.message == "hello world!"
end
@testset "convert" begin
d = Dict(
"eventId" => "456",
"ingestionTime" => 1,
"timestamp" => 2,
"message" => "from a dict",
)
event = convert(AWSBatch.LogEvent, d)
@test event.id == "456"
@test event.ingestion_time == DateTime(1970, 1, 1, 0, 0, 0, 1)
@test event.timestamp == DateTime(1970, 1, 1, 0, 0, 0, 2)
@test event.message == "from a dict"
end
@testset "print" begin
event = AWSBatch.LogEvent("123", DateTime(2018, 1, 2), DateTime(2018, 1, 1), "hello world!")
@test sprint(print, event) == "2018-01-01T00:00:00 hello world!"
@test sprint(print, [event, event]) == """
2018-01-01T00:00:00 hello world!
2018-01-01T00:00:00 hello world!
"""
end
end
@testset "log_events" begin
@testset "Stream DNE" begin
dne_exception = AWSException(
HTTP.StatusError(
400,
"",
"",
HTTP.Messages.Response(
400,
Dict("Content-Type" => "application/x-amz-json-1.1");
body="""{"__type":"ResourceNotFoundException","message":"The specified log stream does not exist."}"""
)
)
)
patches = log_events_patches(exception=dne_exception)
apply(patches) do
@test log_events("group", "dne-stream") === nothing # TODO: Suppress "Fetching log events from"
end
end
@testset "Stream with no events" begin
patches = log_events_patches(events=[])
apply(patches) do
@test log_events("group", "stream") == LogEvent[]
end
end
@testset "Stream with events" begin
events = [
Dict(
"eventId" => "0" ^ 56,
"ingestionTime" => 1573672813145,
"timestamp" => 1573672813145,
"message" => "hello world!",
)
]
patches = log_events_patches(events=events)
apply(patches) do
@test log_events("group", "stream") == [
LogEvent(
"0" ^ 56,
DateTime(2019, 11, 13, 19, 20, 13, 145),
DateTime(2019, 11, 13, 19, 20, 13, 145),
"hello world!",
)
]
end
end
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 4754 | import Base: AbstractCmd, CmdRedirect
using OrderedCollections: OrderedDict
const BATCH_ENVS = (
"AWS_BATCH_JOB_ID" => "24fa2d7a-64c4-49d2-8b47-f8da4fbde8e9",
"AWS_BATCH_JQ_NAME" => "HighPriority"
)
const SUBMIT_JOB_RESP = Dict(
"jobName" => "example",
"jobId" => "24fa2d7a-64c4-49d2-8b47-f8da4fbde8e9",
)
const REGISTER_JOB_DEF_RESP = Dict(
"jobDefinitionName" => "sleep60",
"jobDefinitionArn" => "arn:aws:batch:us-east-1:012345678910:job-definition/sleep60:1",
"revision"=>1,
)
const DESCRIBE_JOBS_DEF_RESP = Dict(
"jobDefinitions" => [
Dict(
"type" => "container",
"containerProperties" => Dict(
"command" => [
"sleep",
"60"
],
"environment" => [
],
"image" => "busybox",
"memory" => 128,
"mountPoints" => [
],
"ulimits" => [
],
"vcpus" => 1,
"volumes" => [
],
"jobRoleArn" => "arn:aws:iam::012345678910:role/sleep60",
),
"jobDefinitionArn" => "arn:aws:batch:us-east-1:012345678910:job-definition/sleep60:1",
"jobDefinitionName" => "sleep60",
"revision" => 1,
"status" => "ACTIVE"
)
]
)
const DESCRIBE_JOBS_RESP = Dict(
"jobs" => [
Dict(
"container" => Dict(
"command" => [
"sleep",
"60"
],
"containerInstanceArn" => "arn:aws:ecs:us-east-1:012345678910:container-instance/5406d7cd-58bd-4b8f-9936-48d7c6b1526c",
"environment" => [
],
"exitCode" => 0,
"image" => "busybox",
"memory" => 128,
"mountPoints" => [
],
"ulimits" => [
],
"vcpus" => 1,
"volumes" => [
],
"jobRoleArn" => "arn:aws:iam::012345678910:role/sleep60",
),
"createdAt" => 1480460782010,
"dependsOn" => [
],
"jobDefinition" => "sleep60",
"jobId" => "24fa2d7a-64c4-49d2-8b47-f8da4fbde8e9",
"jobName" => "example",
"jobQueue" => "arn:aws:batch:us-east-1:012345678910:job-queue/HighPriority",
"parameters" => Dict(
),
"startedAt" => 1480460816500,
"status" => "SUCCEEDED",
"stoppedAt" => 1480460880699
)
]
)
function describe_compute_environments_patch(output::Vector=[])
@patch function AWSBatch.Batch.describe_compute_environments(d::Dict; aws_config=aws_config)
compute_envs = d["computeEnvironments"]
@assert length(compute_envs) == 1
ce = first(compute_envs)
key = startswith(ce, "arn:") ? "computeEnvironmentArn" : "computeEnvironmentName"
results = filter(d -> d[key] == ce, output)
OrderedDict("computeEnvironments" => results)
end
end
function describe_compute_environments_patch(output::OrderedDict)
describe_compute_environments_patch([output])
end
function describe_job_queues_patch(output::Vector=[])
@patch function AWSBatch.Batch.describe_job_queues(d::Dict; aws_config=aws_config)
queues = d["jobQueues"]
@assert length(queues) == 1
queue = first(queues)
key = startswith(queue, "arn:") ? "jobQueueArn" : "jobQueueName"
results = filter(d -> d[key] == queue, output)
OrderedDict("jobQueues" => output)
end
end
function describe_job_queues_patch(output::OrderedDict)
describe_job_queues_patch([output])
end
function log_events_patches(; log_stream_name="mock_stream", events=[], exception=nothing)
job_descriptions = if log_stream_name === nothing
Dict("jobs" => [Dict("container" => Dict())])
else
Dict("jobs" => [Dict("container" => Dict("logStreamName" => log_stream_name))])
end
get_log_events_patch = if exception !== nothing
@patch AWSBatch.Cloudwatch_Logs.get_log_events(args...; kwargs...) = throw(exception)
else
@patch function AWSBatch.Cloudwatch_Logs.get_log_events(grp, stream, params; kwargs...)
if get(params, "nextToken", nothing) === nothing
Dict("events" => events, "nextForwardToken" => "0")
else
Dict("events" => [], "nextForwardToken" => "0")
end
end
end
return [
@patch AWSBatch.Batch.describe_jobs(args...; kwargs...) = job_descriptions
get_log_events_patch
]
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 5471 | function _register_job_def(name, type, input, expected)
@test name == expected["jobDefinitionName"]
@test type == expected["type"]
@test input["parameters"] == expected["parameters"]
@test input["containerProperties"] == expected["containerProperties"]
return REGISTER_JOB_DEF_RESP
end
function _submit_job(def, name, queue, input, expected=AbstractDict)
@test def == expected["jobDefinition"]
@test name == expected["jobName"]
@test queue == expected["jobQueue"]
@test input["parameters"] == expected["parameters"]
@test input["containerOverrides"] == expected["containerOverrides"]
return SUBMIT_JOB_RESP
end
@testset "run_batch" begin
@testset "Defaults" begin
withenv("AWS_BATCH_JOB_ID" => nothing) do
@test_throws AWSBatch.BatchEnvironmentError run_batch()
end
end
queue_arn = "arn:aws:batch:us-east-1:000000000000:job-queue/HighPriority"
queue_patch = describe_job_queues_patch(OrderedDict("jobQueueName"=>"HighPriority",
"jobQueueArn"=>queue_arn))
aws_config = global_aws_config()
@testset "From Job Definition" begin
expected_job = OrderedDict(
"jobName" => "example",
"jobQueue" => queue_arn,
"jobDefinition" => "arn:aws:batch:us-east-1:012345678910:job-definition/sleep60:1",
"parameters" => Dict{String,String}(),
"containerOverrides" => Dict(
"command" => ["sleep", "60"],
"memory" => 128,
"vcpus" => 1,
),
)
patches = [
queue_patch
@patch AWSBatch.Batch.describe_job_definitions(args...; kw...) = DESCRIBE_JOBS_DEF_RESP
@patch AWSBatch.Batch.submit_job(def, name, queue, input; kw...) =
_submit_job(def, name, queue, input, expected_job)
]
apply(patches) do
job = run_batch(; name="example", definition="sleep60", queue="HighPriority")
@test job.id == "24fa2d7a-64c4-49d2-8b47-f8da4fbde8e9"
job = run_batch(;
name="example", definition="sleep60", queue="HighPriority", num_jobs=4
)
@test job.id == "24fa2d7a-64c4-49d2-8b47-f8da4fbde8e9"
end
end
@testset "From Current Job" begin
withenv(BATCH_ENVS...) do
expected_job = OrderedDict(
"jobName" => "example",
"jobQueue" => queue_arn,
"jobDefinition" => "arn:aws:batch:us-east-1:012345678910:job-definition/sleep60:1",
"parameters" => Dict{String,String}(),
"containerOverrides" => Dict(
"command" => ["sleep", "60"],
"memory" => 128,
"vcpus" => 1,
),
)
expected_job_def = OrderedDict(
"type" => "container",
"parameters" => Dict{String,String}(),
"containerProperties" => OrderedDict(
"image" => "busybox",
"vcpus" => 1,
"memory" => 128,
"command" => ["sleep", "60"],
"jobRoleArn" => "arn:aws:iam::012345678910:role/sleep60",
),
"jobDefinitionName" => "sleep60",
)
patches = [
queue_patch
@patch AWSBatch.Batch.describe_jobs(args...; kw...) = DESCRIBE_JOBS_RESP
@patch AWSBatch.Batch.describe_job_definitions(args...; kw...) = Dict("jobDefinitions" => Dict())
@patch AWSBatch.Batch.register_job_definition(name, type, input; kw...) =
_register_job_def(name, type, input, expected_job_def)
@patch AWSBatch.Batch.submit_job(def, name, queue, input; kw...) =
_submit_job(def, name, queue, input, expected_job)
]
apply(patches) do
job = run_batch()
@test job.id == "24fa2d7a-64c4-49d2-8b47-f8da4fbde8e9"
end
end
end
@testset "Using a Job Definition" begin
withenv(BATCH_ENVS...) do
expected_job = OrderedDict(
"jobName" => "example",
"jobQueue" => queue_arn,
"jobDefinition" => "arn:aws:batch:us-east-1:012345678910:job-definition/sleep60:1",
"parameters" => Dict{String,String}(),
"containerOverrides" => Dict(
"command" => ["sleep", "60"],
"memory" => 128,
"vcpus" => 1,
),
)
patches = [
queue_patch
@patch AWSBatch.Batch.describe_jobs(args...; kw...) = DESCRIBE_JOBS_RESP
@patch AWSBatch.Batch.describe_job_definitions(args...; kw...) = Dict("jobDefinitions" => Dict())
@patch AWSBatch.Batch.submit_job(def, name, queue, input; kw...) =
_submit_job(def, name, queue, input, expected_job)
]
apply(patches) do
definition = JobDefinition("arn:aws:batch:us-east-1:012345678910:job-definition/sleep60:1")
job = run_batch(definition=definition)
@test job.id == "24fa2d7a-64c4-49d2-8b47-f8da4fbde8e9"
end
end
end
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | code | 12690 | using AWS
using AWSTools.CloudFormation: stack_output
using AWSBatch
using Dates
using HTTP: HTTP
using Memento
using Memento.TestUtils: @test_log, @test_nolog
using Mocking
using Test
using AWS.AWSExceptions: AWSException
Mocking.activate()
# Controls the running of various tests: "local", "batch"
const TESTS = strip.(split(get(ENV, "TESTS", "local"), r"\s*,\s*"))
# Run the tests on a stack created with the "test/batch.yml" CloudFormation template
# found in AWSClusterMangers.jl
const AWS_STACKNAME = get(ENV, "AWS_STACKNAME", "")
const STACK = !isempty(AWS_STACKNAME) ? stack_output(AWS_STACKNAME) : Dict()
const JOB_TIMEOUT = 900
const LOG_TIMEOUT = 30
const JULIA_BAKED_IMAGE = let
output = read(`git ls-remote --tags https://github.com/JuliaLang/julia`, String)
tags = split(replace(output, r".*\/" => ""))
versions = VersionNumber.(filter(v -> !endswith(v, "^{}"), tags))
latest_version = maximum(versions)
docker_tag = VERSION > latest_version ? "nightly" : "$VERSION"
"468665244580.dkr.ecr.us-east-1.amazonaws.com/julia-baked:$docker_tag"
end
Memento.config!("debug"; fmt="[{level} | {name}]: {msg}")
const logger = getlogger(AWSBatch)
setlevel!(logger, "info")
# We've been having issues with the log stream being created but no log events are present.
# - https://gitlab.invenia.ca/invenia/AWSBatch.jl/issues/28
# - https://gitlab.invenia.ca/invenia/AWSBatch.jl/issues/30
#
# This function allows for us to wait if logs are not present but avoids blocking when
# logs are ready.
#
# Note: The timeout duration expects the job has reached the SUCCEEDED or FAILED state and
# is not expected to last long enough for a job to complete running.
function wait_for_log_events(job::BatchJob)
events = nothing
# Convert to Float64 until this is merged and we no longer use versions of Julia
# without PR (probably Julia 1.5+): https://github.com/JuliaLang/julia/pull/35103
timedwait(Float64(LOG_TIMEOUT); pollint=Float64(5)) do
events = log_events(job)
# Note: These warnings should assist in determining the special circumstances
# the log events not being present. Eventually warnings should be removed.
if events === nothing
notice(logger, "Log stream for $(job.id) does not exist")
elseif isempty(events)
notice(logger, "No log events for $(job.id)")
end
# Wait for log stream to exist and contain at least one event
events !== nothing && !isempty(events)
end
return events
end
include("mock.jl")
@testset "AWSBatch.jl" begin
if "local" in TESTS
# need to define these to make sure we don't inadvertently try to talk to AWS
withenv("AWS_ACCESS_KEY_ID" => "", "AWS_SECRET_ACCESS_KEY" => "") do
include("compute_environment.jl")
include("job_queue.jl")
include("log_event.jl")
include("job_state.jl")
include("batch_job.jl")
include("run_batch.jl")
end
else
warn(logger, "Skipping \"local\" tests. Set `ENV[\"TESTS\"] = \"local\"` to run.")
end
if "batch" in TESTS && !isempty(AWS_STACKNAME)
@testset "AWS Batch" begin
info(logger, "Running AWS Batch tests")
@testset "Job Submission" begin
definition = "aws-batch-test"
# Append the job ID to the definition when running on the CI. Doing this
# will allow this test to successfully reuse the job definition later when
# concurrent CI jobs are running AWS Batch tests at the same time.
if haskey(ENV, "CI_JOB_ID")
definition *= "-" * ENV["CI_JOB_ID"]
end
job = run_batch(;
name = "aws-batch-test",
definition = definition,
queue = STACK["JobQueueArn"],
image = JULIA_BAKED_IMAGE,
vcpus = 1,
memory = 1024,
role = STACK["JobRoleArn"],
cmd = `julia -e 'println("Hello World!")'`,
parameters = Dict{String, String}("region" => "us-east-1"),
)
@test wait(job, [AWSBatch.SUCCEEDED]; timeout=JOB_TIMEOUT) == true
@test status(job) == AWSBatch.SUCCEEDED
events = wait_for_log_events(job)
@test length(events) == 1
@test first(events).message == "Hello World!"
# Test job details were set correctly
job_details = describe(job)
@test job_details["jobName"] == "aws-batch-test"
@test occursin(STACK["JobQueueArn"], job_details["jobQueue"])
@test job_details["parameters"] == Dict("region" => "us-east-1")
# Test job definition and container parameters were set correctly
job_definition = JobDefinition(job)
@test isregistered(job_definition) == true
job_definition_details = first(describe(job_definition)["jobDefinitions"])
@test job_definition_details["jobDefinitionName"] == definition
@test job_definition_details["status"] == "ACTIVE"
@test job_definition_details["type"] == "container"
container_properties = job_definition_details["containerProperties"]
@test container_properties["image"] == JULIA_BAKED_IMAGE
@test container_properties["vcpus"] == 1
@test container_properties["memory"] == 1024
@test container_properties["command"] == [
"julia",
"-e",
"println(\"Hello World!\")"
]
@test container_properties["jobRoleArn"] == STACK["JobRoleArn"]
# Reuse job definition
job = run_batch(;
name = "aws-batch-test",
definition = definition,
queue = STACK["JobQueueArn"],
image = JULIA_BAKED_IMAGE,
vcpus = 1,
memory = 1024,
role = STACK["JobRoleArn"],
cmd = `julia -e 'println("Hello World!")'`,
parameters = Dict{String, String}("region" => "us-east-1"),
)
@test wait(job, [AWSBatch.SUCCEEDED]; timeout=JOB_TIMEOUT) == true
@test status(job) == AWSBatch.SUCCEEDED
# Test job definition and container parameters were set correctly
job_definition_2 = JobDefinition(job)
@test job_definition_2 == job_definition
deregister(job_definition)
end
@testset "Job registration disallowed" begin
@test_throws BatchEnvironmentError run_batch(;
name = "aws-batch-no-job-registration-test",
queue = STACK["JobQueueArn"],
image = JULIA_BAKED_IMAGE,
role = STACK["JobRoleArn"],
cmd = `julia -e 'println("Hello World!")'`,
parameters = Dict{String, String}("region" => "us-east-1"),
allow_job_registration = false,
)
end
@testset "Job parameters" begin
# Use parameter substitution placeholders in the command field
command = Cmd(["julia", "-e", "Ref::juliacmd"])
# Set a default output string when registering the job definition
job_definition = register(
"aws-batch-parameters-test";
image=JULIA_BAKED_IMAGE,
role=STACK["JobRoleArn"],
vcpus=1,
memory=1024,
cmd=command,
parameters=Dict("juliacmd" => "println(\"Default String\")"),
)
# Override the default output string
job = run_batch(;
name = "aws-batch-parameters-test",
definition = job_definition,
queue = STACK["JobQueueArn"],
image = JULIA_BAKED_IMAGE,
vcpus = 1,
memory = 1024,
role = STACK["JobRoleArn"],
cmd = command,
parameters=Dict("juliacmd" => "println(\"Hello World!\")"),
)
@test wait(state -> state < AWSBatch.SUCCEEDED, job; timeout=JOB_TIMEOUT)
@test status(job) == AWSBatch.SUCCEEDED
events = wait_for_log_events(job)
@test length(events) == 1
@test first(events).message == "Hello World!"
# Test job details were set correctly
job_details = describe(job)
@test job_details["parameters"] == Dict(
"juliacmd" => "println(\"Hello World!\")"
)
job_definition = JobDefinition(job)
job_definition_details = first(describe(job_definition)["jobDefinitions"])
job_definition_details["parameters"] = Dict(
"juliacmd" => "println(\"Default String\")"
)
container_properties = job_definition_details["containerProperties"]
@test container_properties["command"] == ["julia", "-e", "Ref::juliacmd"]
# Deregister job definition
deregister(job_definition)
end
@testset "Array job" begin
job = run_batch(;
name = "aws-batch-array-job-test",
definition = "aws-batch-array-job-test",
queue = STACK["JobQueueArn"],
image = JULIA_BAKED_IMAGE,
vcpus = 1,
memory = 1024,
role = STACK["JobRoleArn"],
cmd = `julia -e 'println("Hello World!")'`,
num_jobs = 3,
)
@test wait(state -> state < AWSBatch.SUCCEEDED, job; timeout=JOB_TIMEOUT)
@test status(job) == AWSBatch.SUCCEEDED
# Test array job was submitted properly
status_summary = Dict(
"STARTING" => 0, "FAILED" => 0, "RUNNING" => 0, "SUCCEEDED" => 3,
"RUNNABLE" => 0, "SUBMITTED" => 0, "PENDING" => 0,
)
job_details = describe(job)
@test job_details["arrayProperties"]["statusSummary"] == status_summary
@test job_details["arrayProperties"]["size"] == 3
# No log stream will exist for the parent job
events = log_events(job)
@test events === nothing
# Test logs for each individual job that is part of the job array
for i in 0:2
job_id = "$(job.id):$i"
events = wait_for_log_events(BatchJob(job_id))
@test length(events) == 1
@test first(events).message == "Hello World!"
end
# Deregister the job definition
job_definition = JobDefinition(job)
deregister(job_definition)
end
@testset "Failed Job" begin
info(logger, "Testing job failure")
job = run_batch(;
name = "aws-batch-failed-job-test",
definition = "aws-batch-failed-job-test",
queue = STACK["JobQueueArn"],
image = JULIA_BAKED_IMAGE,
vcpus = 1,
memory = 1024,
role = STACK["JobRoleArn"],
cmd = `julia -e 'error("Testing job failure")'`,
)
job_definition = JobDefinition(job)
@test isregistered(job_definition) == true
@test_throws BatchJobError wait(
job,
[AWSBatch.SUCCEEDED];
timeout=JOB_TIMEOUT
)
events = wait_for_log_events(job)
@test first(events).message == "ERROR: Testing job failure"
deregister(job_definition)
end
end
else
warn(
logger,
"Skipping \"batch\" tests. Set `ENV[\"TESTS\"] = \"batch\"` and " *
"`ENV[\"AWS_STACKNAME\"]` to run."
)
end
end
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | docs | 749 | # AWSBatch
[](https://github.com/JuliaCloud/AWSBatch.jl/actions/workflows/CI.yml)
[](https://juliacloud.github.io/AWSBatch.jl/stable)
[](https://juliacloud.github.io/AWSBatch.jl/dev)
# Running the tests
To run the online AWS Batch tests you must first set the environmental variables `TESTS` and
`AWS_STACKNAME`.
```julia
ENV["TESTS"] = "batch"
ENV["AWS_STACKNAME"] = "aws-batch-manager-test"
```
To make an `aws-batch-manager-test` compatible stack you can use the CloudFormation template [test/resources/batch.yml](./test/batch.yml).
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 2.0.1 | dfe487a88864ca8e58ff2c7c8149d0b6dc2bc94d | docs | 2892 | # AWSBatch
[](https://juliacloud.github.io/AWSBatch.jl/stable)
[](https://github.com/JuliaCloud/AWSBatch.jl/actions/workflows/CI.yml)
AWSBatch.jl provides a small set of methods for working with AWS Batch jobs from julia.
## Installation
AWSBatch assumes that you already have an AWS account configured with:
1. An [ECR repository](https://aws.amazon.com/ecr/) and a docker image pushed to it [[1]](http://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-push-ecr-image.html).
2. An [IAM role](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) to apply to the batch jobs.
3. A compute environment and job queue for submitting jobs to [[2]](http://docs.aws.amazon.com/batch/latest/userguide/Batch_GetStarted.html#first-run-step-2).
Please review the
["Getting Started with AWS Batch"](http://docs.aws.amazon.com/batch/latest/userguide/Batch_GetStarted.html) guide and example
[CloudFormation template](https://s3-us-west-2.amazonaws.com/cloudformation-templates-us-west-2/Managed_EC2_Batch_Environment.template) for more details.
## Basic Usage
```julia
julia> using AWSBatch
julia> job = run_batch(
name="Demo",
definition="AWSBatchJobDefinition",
queue="AWSBatchJobQueue",
image = "000000000000.dkr.ecr.us-east-1.amazonaws.com/demo:latest",
role = "arn:aws:iam::000000000000:role/AWSBatchJobRole",
vcpus = 1,
memory = 1024,
cmd = `julia -e 'println("Hello World!")'`,
)
AWSBatch.BatchJob("00000000-0000-0000-0000-000000000000")
julia> wait(job, [AWSBatch.SUCCEEDED])
true
julia> results = log_events(job)
1-element Array{AWSBatch.LogEvent,1}:
AWSBatch.LogEvent("00000000000000000000000000000000000000000000000000000000", 2018-04-23T19:41:18.765, 2018-04-23T19:41:18.677, "Hello World!")
```
AWSBatch also supports Memento logging for more detailed usage information.
## API
```@docs
run_batch()
```
### BatchJob
```@docs
AWSBatch.BatchJob
AWSBatch.submit
AWSBatch.describe(::BatchJob)
AWSBatch.JobDefinition(::BatchJob)
AWSBatch.status(::BatchJob)
Base.wait(::Function, ::BatchJob)
Base.wait(::BatchJob, ::Vector{JobState}, ::Vector{JobState})
AWSBatch.log_events(::BatchJob)
```
### JobDefinition
```@docs
AWSBatch.JobDefinition
AWSBatch.ComputeEnvironment
AWSBatch.create_compute_environment
AWSBatch.list_job_definitions
AWSBatch.job_definition_arn(::AbstractString)
AWSBatch.register(::AbstractString)
AWSBatch.deregister(::JobDefinition)
AWSBatch.isregistered(::JobDefinition)
AWSBatch.describe(::JobDefinition)
```
### JobQueue
```@docs
AWSBatch.JobQueue
AWSBatch.create_job_queue
AWSBatch.list_job_queues
```
### JobState
```@docs
AWSBatch.JobState
```
### LogEvent
```@docs
AWSBatch.LogEvent
```
| AWSBatch | https://github.com/JuliaCloud/AWSBatch.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 773 | module ReparametrizableDistributionsReverseDiffExt
using ReparametrizableDistributions, ReverseDiff, Distributions
import ReparametrizableDistributions: _logcdf, _invlogcdf
import ReverseDiff: TrackedReal
ReverseDiff.@grad_from_chainrules _logcdf(d, x::TrackedReal)
ReverseDiff.@grad_from_chainrules _invlogcdf(d, x::TrackedReal)
ReverseDiff.value(d::Gamma{<:TrackedReal}) = Gamma(ReverseDiff.value.(params(d))...)
# ReverseDiff.@grad_from_chainrules logcdf(d::Gamma{<:TrackedReal}, x::Real)
ReverseDiff.@grad_from_chainrules _invlogcdf(d::Gamma{<:TrackedReal}, x::Real)
ReverseDiff.value(d::NoncentralChisq{<:TrackedReal}) = NoncentralChisq(ReverseDiff.value.(params(d))...)
ReverseDiff.@grad_from_chainrules _invlogcdf(d::NoncentralChisq{<:TrackedReal}, x::Real)
end | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 1958 | module ReparametrizableDistributions
export LocScaleHierarchy, ScaleHierarchy, TScaleHierarchy, MeanShift, GammaSimplex, HSGP, PHSGP, R2D2, RHS, Directional, ReparametrizablePosterior, ReparametrizableBSLDP, FixedDistribution, find_reparametrization
export log_transform
using WarmupHMC, Distributions, LogDensityProblems, LogExpFunctions
using SpecialFunctions, HypergeometricFunctions, ChainRulesCore
using BridgeStan, JSON
import WarmupHMC: reparametrization_parameters, optimization_reparametrization_parameters, reparametrize, lpdf_and_invariants, lja_and_reparametrize, to_array, to_nt, lpdf_update, lja_update, find_reparametrization
import LogDensityProblemsAD: ADgradient, ADGradientWrapper
kmap_(f, args...; kwargs...) = map((args...)->f(args...; kwargs...), args...)
kmap(f, args...; kwargs...) = kmap_(f, args...; kwargs...)
kmap(f, arg::NamedTuple, args...; kwargs...) = kmap_(f, arg, ensure_like.(Ref(arg), args)...; kwargs...)
ensure_like(::NamedTuple{names}, rhs::NamedTuple) where {names} = NamedTuple{names}(rhs)
ensure_like(::NamedTuple{names}, rhs) where {names} = NamedTuple{names}((rhs for name in names))
include("utils/StackedArray.jl")
include("utils/finite_unconstraining.jl")
include("utils/quantile_and_cdf.jl")
include("utils/transform.jl")
include("distributions/AbstractReparametrizableDistribution.jl")
include("distributions/ScaleHierarchy.jl")
include("distributions/MeanShift.jl")
include("distributions/Directional.jl")
include("distributions/GammaSimplex.jl")
include("distributions/AbstractWrappedDistribution.jl")
include("distributions/FixedDistribution.jl")
include("distributions/AbstractCompositeReparametrizableDistribution.jl")
include("distributions/HSGP.jl")
include("distributions/R2D2.jl")
include("distributions/RHS.jl")
include("distributions/ReparametrizablePosterior.jl")
include("distributions/ReparametrizableBSLDP.jl")
include("utils/convenience.jl")
end # module ReparametrizableDistributions
| ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 1084 | abstract type AbstractCompositeReparametrizableDistribution <: AbstractReparametrizableDistribution end
ACRD = AbstractCompositeReparametrizableDistribution
# IMPLEMENT THIS
parts(::ACRD) = error("unimplemented")
reparametrization_parameters(source::ACRD) = map(reparametrization_parameters, parts(source))
optimization_parameters_fn(source::ACRD) = map(optimization_parameters_fn, parts(source))
reparametrize(source::ACRD, parameters::NamedTuple) = recombine(
source, map(reparametrize, parts(source), parameters)
)
# IMPLEMENT THIS
lpdf_update(::ACRD, ::NamedTuple, lpdf=0.) = error("unimplemented")
lja_update(source::ACRD, target::ACRD, invariants::NamedTuple, lja=0.) = begin
intermediates = kmap(lja_and_reparametrize, parts(source), parts(target), invariants, lja)
(;lja=sum(getproperty.(values(intermediates), :lja)), intermediates...)
end
divide(source::ACRD, draws::AbstractVector{<:NamedTuple}) = parts(source), (;
((key, getproperty.(draws, key)) for key in keys(parts(source)))...
)
# IMPLEMENT THIS
recombine(::ACRD, ::Any) = error("unimplemented") | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 3964 | abstract type AbstractReparametrizableDistribution <: ContinuousMultivariateDistribution end
Broadcast.broadcastable(source::AbstractReparametrizableDistribution) = Ref(source)
Base.getproperty(source::T, key::Symbol) where {T<:AbstractReparametrizableDistribution} = hasfield(T, key) ? getfield(source, key) : getproperty(info(source), key)
info(source::AbstractReparametrizableDistribution) = getfield(source, :info)
Base.length(source::AbstractReparametrizableDistribution) = sum(lengths(source))
lengths(source::AbstractReparametrizableDistribution) = map(length, parts(source))
# IMPLEMENT THIS
parts(::AbstractReparametrizableDistribution) = error("unimplemented")
# IMPLEMENTING THIS FOR WarmupHMC.jl
to_nt(source::AbstractReparametrizableDistribution, draw::AbstractArray) = views(
parts(source), draw
)
to_array_limited(source, draw) = view(to_array(source, draw), 1:length(source))
to_array(source::AbstractReparametrizableDistribution, draw::NamedTuple) = vcat(
kmap(to_array_limited, parts(source), draw)...
)
# IMPLEMENT THIS
reparametrization_parameters(::AbstractReparametrizableDistribution) = error("unimplemented")
# IMPLEMENTING THIS FOR WarmupHMC.jl
optimization_reparametrization_parameters(source::AbstractReparametrizableDistribution) = vcat(
map(
broadcast,
ensure_like(reparametrization_parameters(source), optimization_parameters_fn(source)),
reparametrization_parameters(source)
)...
)
# MAY IMPLEMENT THIS
optimization_parameters_fn(::AbstractReparametrizableDistribution) = identity
# IMPLEMENTING THIS FOR WarmupHMC.jl
reparametrize(source::AbstractReparametrizableDistribution, parameters::AbstractVector) = reparametrize(
source,
map(
broadcast,
map(inverse, ensure_like(reparametrization_parameters(source), optimization_parameters_fn(source))),
views(reparametrization_parameters(source), parameters)
)
)
# MAY IMPLEMENT THIS or THE ABOVE
reparametrize(source::T, parameters::NamedTuple) where {T<:AbstractReparametrizableDistribution} = T.name.wrapper(merge(info(source), parameters))
# IMPLEMENT THIS or THE ABOVE
# reparametrize(::AbstractReparametrizableDistribution, ::NamedTuple) = error("unimplemented")
# MAY IMPLEMENT THIS
# to_array(::AbstractReparametrizableDistribution, ::NamedTuple)
# IMPLEMENT THIS
lpdf_update(::AbstractReparametrizableDistribution, ::NamedTuple, lpdf=0.) = error("unimplemented")
# IMPLEMENT THIS
lja_update(::AbstractReparametrizableDistribution, ::AbstractReparametrizableDistribution, ::NamedTuple, lpdf=0.) = error("unimplemented")
# IMPLEMENTING THIS FOR WarmupHMC.jl
find_reparametrization(source::AbstractReparametrizableDistribution, draw::AbstractVector{<:NamedTuple}; kwargs...) = begin
subsources, subdraws = divide(source, draw)
if length(subsources) == 1
find_reparametrization(:Optim, source, draw; kwargs...)
else
recombine(source, kmap(find_reparametrization, subsources, subdraws; kwargs...))
end
end
find_reparametrization(source::AbstractReparametrizableDistribution, draws::AbstractMatrix; kwargs...) = recombine(
source, kmap(find_reparametrization, divide(source, draws)...; kwargs...)
)
# MAY IMPLEMENT THIS
divide(source, draws::AbstractMatrix) = divide(
source, lpdf_and_invariants(source, draws, Ignore())
)
# MAY IMPLEMENT THIS
divide(source, draws::AbstractVector{<:NamedTuple}) = (source, ), (draws, )
# MAY IMPLEMENT THIS
recombine(::Any, resources::NTuple{1}) = resources[1]
# recombine(::Any, ::Any) = error("unimplemented")
# IMPLEMENTING THIS FOR LogDensityProblems.jl
LogDensityProblems.dimension(source::AbstractReparametrizableDistribution) = length(source)
LogDensityProblems.logdensity(source::AbstractReparametrizableDistribution, draw::AbstractVector) = try
lpdf_and_invariants(source, draw).lpdf
catch e
@warn """
Failed to evaluate log density:
$source
$draw
$(WarmupHMC.exception_to_string(e))
"""
-Inf
end | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 1050 | abstract type AbstractWrappedDistribution <: AbstractReparametrizableDistribution end
Base.parent(source::AbstractWrappedDistribution) = source.wrapped
parts(source::AbstractWrappedDistribution) = parts(parent(source))
reparametrization_parameters(source::AbstractWrappedDistribution) = reparametrization_parameters(parent(source))
reparametrize(source::AbstractWrappedDistribution, parameters::NamedTuple) = recombine(
source, reparametrize(parent(source), parameters)
)
lpdf_update(source::AbstractWrappedDistribution, draw::NamedTuple, lpdf=0.) = lpdf_update(
parent(source), draw, lpdf
)
lja_update(source::AbstractWrappedDistribution, target::AbstractWrappedDistribution, draw::NamedTuple, lpdf=0.) = lja_update(
parent(source), parent(target), draw, lpdf
)
find_reparametrization(source::AbstractWrappedDistribution, draws::AbstractMatrix; kwargs...) = recombine(
source,
find_reparametrization(parent(source), draws; kwargs...)
)
# IMPLEMENT THIS
recombine(::AbstractWrappedDistribution, reparent) = error("unimplemented") | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 61 | struct Dirac0{V}
value::V
end
Base.length(::Dirac0) = 0
| ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 1942 | abstract type AbstractDirectional <: AbstractReparametrizableDistribution end
parts(source::AbstractDirectional) = (;source.direction)
struct NormalDirectional{I} <: AbstractDirectional
info::I
end
NormalDirectional(dimension, c1, c2) = NormalDirectional(
(;
dimension,
c1, c2,
radius_squared=truncated(Normal(c1, c2); lower=0)
)
)
reparametrization_parameters(source::NormalDirectional) = (;source.c1, source.c2)
optimization_parameters_fn(::NormalDirectional) = finite_log
reparametrize(source::NormalDirectional, parameters::NamedTuple) = NormalDirectional(source.dimension, parameters...)
lpdf_update(source::AbstractDirectional, draw::NamedTuple, lpdf=0.) = begin
radius_squared = 1e-8 + sum(draw.direction .^ 2)
direction = draw.direction ./ sqrt(radius_squared)
lpdf += sum_logpdf(Normal(), draw.direction)
lpdf += (
_logpdf(source.radius_squared, radius_squared)
- _logpdf(Chisq(source.dimension), radius_squared)
)
(;lpdf, direction, radius_squared)
end
lja_update(source::AbstractDirectional, target::AbstractDirectional, invariants::NamedTuple, lja=0.) = begin
radius_squared = quantile_cdf(
target.radius_squared, source.radius_squared, invariants.radius_squared
)
direction = invariants.direction .* sqrt(radius_squared)
lja += sum_logpdf(Normal(), direction)
lja += (
_logpdf(target.radius_squared, radius_squared)
- _logpdf(Chisq(target.dimension), radius_squared)
)
(;lja, direction, radius_squared)
end
# struct Directional{I} <: AbstractDirectional
# info::I
# end
# Directional(dimension, non_centrality) = Directional(
# (;
# dimension,
# non_centrality,
# radius_squared=NoncentralChisq(dimension, non_centrality),
# )
# )
# reparametrize(source::Directional, parameters::AbstractVector) = Directional(source.info.dimension, exp(parameters[1])) | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 223 | struct FixedDistribution{W} <: AbstractWrappedDistribution
wrapped::W
end
reparametrize(source::FixedDistribution, ::Any) = source
find_reparametrization(source::FixedDistribution, ::AbstractMatrix; kwargs...) = source | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 1976 | struct GammaSimplex{I} <: AbstractReparametrizableDistribution
info::I
end
GammaSimplex(target::AbstractVector) = GammaSimplex(Dirichlet(target))
GammaSimplex(target::AbstractVector, parametrization::AbstractVector) = GammaSimplex(
Dirichlet(target), Dirichlet(parametrization)
)
GammaSimplex(target::Dirichlet) = GammaSimplex(target, target)
GammaSimplex(target::Dirichlet, parametrization::Dirichlet) = GammaSimplex((
target_distribution=target,
parametrization_distribution=parametrization,
parametrization_gammas=Gamma.(parametrization.alpha),
sum_gamma=Gamma(sum(parametrization.alpha)),
))
parts(source::GammaSimplex) = (;weights=source.target_distribution)
reparametrization_parameters(source::GammaSimplex) = (;
parametrization=source.parametrization_distribution.alpha
)
optimization_parameters_fn(::GammaSimplex) = finite_log
reparametrize(source::GammaSimplex, parameters::NamedTuple) = GammaSimplex(
source.target_distribution, Dirichlet(parameters.parametrization)
)
lpdf_update(source::GammaSimplex, draw::NamedTuple, lpdf=0.) = begin
unnormalized_weights = quantile_cdf.(source.parametrization_gammas, Normal(), draw.weights)
weights_sum = sum(unnormalized_weights)
weights = unnormalized_weights ./ weights_sum
lpdf += sum_logpdf(Normal(), draw.weights)
lpdf += logpdf(source.target_distribution, weights)
lpdf -= logpdf(source.parametrization_distribution, weights)
(;lpdf, weights, weights_sum)
end
lja_update(source::GammaSimplex, target::GammaSimplex, invariants::NamedTuple, lja=0.) = begin
weights_sum = quantile_cdf(target.sum_gamma, source.sum_gamma, invariants.weights_sum)
unnormalized_weights = invariants.weights .* weights_sum
weights = quantile_cdf.(Normal(), target.parametrization_gammas, unnormalized_weights)
lja += sum_logpdf(Normal(), weights)
lja -= logpdf(tinfo.parametrization_distribution, invariants.weights)
(;lja, weights, weights_sum)
end
| ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 3914 |
# abstract type AbstractHSGP <: AbstractReparametrizableDistribution end
struct HSGP{I} <: AbstractCompositeReparametrizableDistribution
info::I
end
HSGP(intercept, log_sd, log_lengthscale; intercept_shift, centeredness, kwargs...) = HSGP(
MeanShift(intercept, intercept_shift),
log_sd, log_lengthscale,
ScaleHierarchy([], centeredness); kwargs...
)
HSGP(intercept, log_sd, log_lengthscale, hierarchy; kwargs...) = HSGP(
(;intercept, log_sd, log_lengthscale, hierarchy, hsgp_extra(;n_functions=length(hierarchy), kwargs...)...)
)
hsgp_extra(;x, n_functions::Integer=32, boundary_factor::Real=1.5) = begin
idxs = 1:n_functions
# sin(diag_post_multiply(rep_matrix(pi()/(2*L) * (x+L), M), linspaced_vector(M, 1, M)))/sqrt(L);
X = sin.((x .+ boundary_factor) .* (pi/(2*boundary_factor)) .* idxs') ./ sqrt(boundary_factor)
# alpha * sqrt(sqrt(2*pi()) * rho) * exp(-0.25*(rho*pi()/2/L)^2 * linspaced_vector(M, 1, M)^2);
pre_eig = (-.25 * (pi/2/boundary_factor)^2) .* idxs .^ 2
(;X, pre_eig)
end
parts(source::HSGP) = (;source.intercept, source.log_sd, source.log_lengthscale, source.hierarchy)
lpdf_update(source::HSGP, draw::NamedTuple, lpdf=0.) = begin
# alpha * sqrt(sqrt(2*pi()) * rho) * exp(-0.25*(rho*pi()/2/L)^2 * linspaced_vector(M, 1, M)^2);
lengthscale = exp.(draw.log_lengthscale)
log_scale = (
draw.log_sd .+ .25 * log(2*pi) .+ .5 * draw.log_lengthscale
) .+ lengthscale.^2 .* source.pre_eig
log_scale = logaddexp.(log(1e-8), log_scale)
hierarchy = lpdf_and_invariants(source.hierarchy, (;log_scale, weights=draw.hierarchy), lpdf)
intercept = lpdf_and_invariants(source.intercept, (;draw.intercept, hierarchy.weights), lpdf)
lpdf += intercept.lpdf
lpdf += sum_logpdf(source.log_sd, draw.log_sd)
lpdf += sum_logpdf(source.log_lengthscale, draw.log_lengthscale)
lpdf += hierarchy.lpdf
y = intercept.intercept .+ source.X * hierarchy.weights
(;lpdf, intercept, hierarchy, y)
end
recombine(source::HSGP, reparts::NamedTuple) = HSGP(merge(info(source), reparts))
struct PHSGP{I} <: AbstractCompositeReparametrizableDistribution
info::I
end
PHSGP(log_sd, log_lengthscale; centeredness, kwargs...) = PHSGP(
log_sd, log_lengthscale,
ScaleHierarchy([], centeredness); kwargs...
)
PHSGP(log_sd, log_lengthscale, hierarchy; kwargs...) = PHSGP(
(;log_sd, log_lengthscale, hierarchy, phsgp_extra(;n_functions=length(hierarchy), kwargs...)...)
)
phsgp_extra(;x, n_functions::Integer=32, boundary_factor::Real=1.5) = begin
idxs = 1:(n_functions ÷ 2)
# return append_col(
# cos(diag_post_multiply(rep_matrix(2*pi()*x/L, M/2), linspaced_vector(M/2, 1, M/2))),
# sin(diag_post_multiply(rep_matrix(2*pi()*x/L, M/2), linspaced_vector(M/2, 1, M/2)))
# );
xi = (2 .* pi .* x ./ boundary_factor) .* idxs'
X = hcat(cos.(xi), sin.(xi))
(;X, idxs)
end
parts(source::PHSGP) = (;source.log_sd, source.log_lengthscale, source.hierarchy)
lpdf_update(source::PHSGP, draw::NamedTuple, lpdf=0.) = begin
# real a = exp(-2*log_lengthscale);
# vector[M/2] q = log_sd + 0.5 * (log(2) - a + to_vector(log_modified_bessel_first_kind(linspaced_int_array(M/2, 1, M/2), a)));
# return append_row(q,q);
a = exp.(-2 .* draw.log_lengthscale)
log_scale = (
# Let's see whether this is stable
draw.log_sd .+ .5 * (log(2) .+ log.(besselix.(source.idxs, a)))
)
log_scale = logaddexp.(log(1e-8), log_scale)
log_scale = vcat(log_scale, log_scale)
hierarchy = lpdf_and_invariants(source.hierarchy, (;log_scale, weights=draw.hierarchy), lpdf)
lpdf += sum_logpdf(source.log_sd, draw.log_sd)
lpdf += sum_logpdf(source.log_lengthscale, draw.log_lengthscale)
lpdf += hierarchy.lpdf
y = source.X * hierarchy.weights
(;lpdf, hierarchy, y)
end
recombine(source::PHSGP, reparts::NamedTuple) = PHSGP(merge(info(source), reparts)) | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 663 | struct MeanShift{I} <: AbstractReparametrizableDistribution
info::I
end
MeanShift(intercept, mean_shift) = MeanShift((;intercept, mean_shift))
parts(source::MeanShift) = (;source.intercept)
reparametrization_parameters(source::MeanShift) = (;source.mean_shift)
lpdf_update(source::MeanShift, draw::NamedTuple, lpdf=0.) = begin
intercept = draw.intercept .+ sum(draw.weights .* source.mean_shift)
lpdf += sum_logpdf(source.intercept, intercept)
(;lpdf, intercept)
end
lja_update(::MeanShift, target::MeanShift, invariants::NamedTuple, lja=0.) = begin
(;lja, intercept=invariants.intercept .- sum(invariants.weights .* target.mean_shift))
end | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 901 | struct R2D2{I} <: AbstractCompositeReparametrizableDistribution
info::I
end
R2D2(log_sigma, logit_R2, simplex, hierarchy) = R2D2((;log_sigma, logit_R2, simplex, hierarchy))
parts(source::R2D2) = source.info
# reparametrize(source::R2D2, parameters::NamedTuple) = R2D2(map(reparametrize, info(source), parameters))
lpdf_update(source::R2D2, draw::NamedTuple, lpdf=0.) = begin
sigma = exp.(draw.log_sigma)
R2 = logistic.(draw.logit_R2)
tau = R2 ./ (1 .- R2)
simplex = lpdf_and_invariants(source.simplex, draw.simplex)
log_scale = log.((sigma.*tau) .* sqrt.(simplex.weights))
hierarchy = lpdf_and_invariants(source.hierarchy, (;log_scale, xic=draw.hierarchy))
lpdf += sum_logpdf(source.log_sigma, draw.log_sigma)
lpdf += sum_logpdf(source.logit_R2, draw.logit_R2)
lpdf += simplex.lpdf
lpdf += hierarchy.lpdf
(;lpdf, simplex, hierarchy, sigma, R2, tau)
end | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
|
[
"MIT"
] | 0.1.0 | d064f853e5eca1d684a85b943723dacc9e0f6d7c | code | 1613 | struct RHS{I} <: AbstractCompositeReparametrizableDistribution
info::I
end
RHS(nu_global, nu_local, slab_scale, slab_df, scale_global, centeredness) = RHS((;
log_c=.5log_transform(slab_scale^2 * InverseGamma(.5slab_df, .5slab_df)),
log_lambda=fill(log_transform(TDist(nu_local)), size(centeredness)),
log_tau=log_transform(2*scale_global * TDist(nu_global)),
hierarchy=ScaleHierarchy((), centeredness)
))
parts(source::RHS) = info(source)
recombine(source::RHS, reparts::NamedTuple) = RHS(merge(info(source), reparts))
lpdf_update(source::RHS, draw::NamedTuple, lpdf=0.) = begin
# https://github.com/avehtari/casestudies/blob/967cdb3a6432e8985886b96fda306645fe156a29/Birthdays/gpbf8rhs.stan#L87-L91
# real c_f4 = slab_scale * sqrt(caux_f4); // slab scale
# beta ~ normal(0, sqrt( c^2 * square(lambda) ./ (c^2 + tau^2*square(lambda)))*tau);
# scale = c * lambda ./ sqrt(c^2 + tau^2*square(lambda)))*tau
# lambda ~ student_t(nu_local, 0, 1);
# tau ~ student_t(nu_global, 0, scale_global*2);
# caux ~ inv_gamma(0.5*slab_df, 0.5*slab_df);
log_c, log_lambda, log_tau = draw.log_c, draw.log_lambda, draw.log_tau
log_scale = log_c .+ log_lambda .- .5 .* logaddexp.(2 .* log_c, 2 .* (log_tau .+ log_lambda)) .+ log_tau;
hierarchy = lpdf_and_invariants(source.hierarchy, (;log_scale, weights=draw.hierarchy))
lpdf += sum_logpdf(source.log_c, draw.log_c)
lpdf += sum_logpdf(source.log_lambda, draw.log_lambda)
lpdf += sum_logpdf(source.log_tau, draw.log_tau)
lpdf += hierarchy.lpdf
(;lpdf, hierarchy, weights=hierarchy.weights)
end | ReparametrizableDistributions | https://github.com/nsiccha/ReparametrizableDistributions.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.