licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 12233 |
"""
GCPair(smarts,name;group_order = 1)
Struct used to hold a description of a group. Contains the SMARTS string necessary to match the group within a SMILES query, and the assigned name.
the `group_order` parameter is used for groups that follow a Constantinou-Gani approach: the list of `GCPair` with `group_order = 1` will be matched with strict coverage (failing if there is missing atoms to cover) while second order groups and above will not be stringly checked for total coverage. Each order group will be matched independendly.
"""
struct GCPair
smarts::String
name::String
group_order::Int
multiplicity::Int
end
GCPair(smarts,name;group_order = 1, multiplicity = 1) = GCPair(smarts,name,group_order,multiplicity)
export GCPair
smarts(x::GCPair) = x.smarts
name(x::GCPair) = x.name
group_order(x::GCPair) = x.group_order
first_group_order(x::GCPair) = x.group_order == 1
#sorting comparison between 2 smatches
function _isless_smatch(smatch1,smatch2)
#fallback, if one is not matched, throw to the end
len_smatch1 = length(smatch1)
len_smatch2 = length(smatch1)
if len_smatch1 == 0 || len_smatch2 == 0
return len_smatch1 < len_smatch2
end
#first criteria: bigger groups go first,
#the group with the bigger group
atom_size1 = length(smatch1[1]["atoms"])
atom_size2 = length(smatch2[1]["atoms"])
if atom_size1 != atom_size2
return atom_size1 < atom_size2
end
#second criteria
#if the size of the match is the same,
#return the one with the least amount of matches first
if len_smatch1 != len_smatch2
len_smatch1 > len_smatch2
end
#third criteria
#return the match with more bonds
bond_count1 = length(smatch1[1]["bonds"])
bond_count2 = length(smatch2[1]["bonds"])
if bond_count1 != bond_count2
return bond_count1 < bond_count2
end
#no more comparizons?
return false
end
function unique_groups!(groups)
n = length(groups)
counts = zeros(Int,n)
to_delete = fill(false,n)
#step 1: find uniques and group those
for i in 1:(n-1)
str,val =groups[i]
counts[i] = val
for j in (i+1):n
str2,vals2 = groups[j]
if str2 == str && !to_delete[j]
to_delete[j] = true
counts[i] += vals2
counts[j] = 0
end
end
end
#step 2: set new values
for i in 1:n
str,val =groups[i]
groups[i] = str => counts[i]
end
#step 3: delete groups with zero values
return deleteat!(groups,to_delete)
end
"""
get_grouplist(x)
Should return a `Vector{GCPair}` containing the available groups for SMILES matching.
"""
function get_grouplist end
get_grouplist(x::Vector{GCPair}) = x
"""
get_groups_from_smiles(smiles::String,groups;connectivity = false,check = true)
Given a SMILES string and a group list (`groups::Vector{GCPair}`), returns a list of groups and their corresponding amount.
If `connectivity` is true, then it will additionally return a vector containing the amount of bonds between each pair.
## Examples
```julia
julia> get_groups_from_smiles("CCO",UNIFACGroups)
("CCO", ["CH3" => 1, "CH2" => 1, "OH(P)" => 1])
julia> get_groups_from_smiles("CCO",JobackGroups,connectivity = true)
("CCO", ["-CH3" => 1, "-CH2-" => 1, "-OH (alcohol)" => 1], [("-CH3", "-CH2-") => 1, ("-CH2-", "-OH (alcohol)") => 1])
```
"""
function get_groups_from_smiles(smiles::String,groups;connectivity = false,check = true)
groups = get_grouplist(groups)
count(first_group_order,groups) == length(groups) && return _get_groups_from_smiles(smiles,groups,connectivity,check)
group_orders = group_order.(groups) |> unique! |> sort!
#find all group orders, perform a match for each order, then join the results.
conectivity_result = Vector{Pair{Tuple{String,String},Int}}[]
results = Tuple{String,Vector{Pair{String,Int}}}[]
for order in group_orders
groups_n = filter(x -> group_order(x) == order,groups)
if order == 1
result1 = _get_groups_from_smiles(smiles,groups_n,connectivity,check)
if connectivity
push!(conectivity_result,result1[3])
end
push!(results,(result1[1],result1[2]))
else
result_n = _get_groups_from_smiles(smiles,groups_n,false,false)
push!(results,result_n)
end
end
gc_pairs = mapreduce(last,vcat,results)
smiles_res = results[1][1]
if connectivity
return (smiles_res,gc_pairs,reduce(vcat,conectivity_result[1]))
else
return (smiles_res,gc_pairs)
end
end
function _get_groups_from_smiles(smiles::String,groups::Vector{GCPair},connectivity=false,check = true)
mol = get_mol(smiles)
atoms = get_atoms(mol)
natoms = length(atoms)
__bonds = __getbondlist(mol)
group_id_expanded, bond_mat_minimum = get_expanded_groups(mol, groups, atoms, __bonds, check, smiles)
group_id = unique(group_id_expanded)
group_occ_list = [sum(group_id_expanded .== i) for i in group_id]
gcpairs = [name(groups[group_id[i]]) => group_occ_list[i]*groups[group_id[i]].multiplicity for i in 1:length(group_id)]
if check
if sum(bond_mat_minimum) != natoms
error("Could not find all groups for "*smiles)
end
end
if connectivity
return (smiles,gcpairs,get_connectivity(mol,group_id,groups))
else
return (smiles,gcpairs)
end
end
function find_covered_atoms(mol, groups, atoms, __bonds, check)
smatches = Vector{Dict{String, Vector{Int64}}}[]
smatches_idx = Int[]
#step 0.a, find all groups that could get a match
for i in 1:length(groups)
query_i = get_qmol(smarts(groups[i]))
if has_substruct_match(mol,query_i)
push!(smatches,get_substruct_matches(mol,query_i,__bonds))
push!(smatches_idx,i)
end
end
#step 0.b, sort the matches by the amount of matched atoms. biggest groups come first.
perm = sortperm(smatches,lt = _isless_smatch,rev = true)
smatches = smatches[perm]
smatches_idx = smatches_idx[perm]
# Expand smatches so that groups are listed
smatches_expanded = [smatches[i][j] for i in 1:length(smatches) for j in 1:length(smatches[i])]
smatches_idx_expanded = [smatches_idx[i] for i in 1:length(smatches) for j in 1:length(smatches[i])]
ngroups = length(smatches_expanded)
natoms = length(atoms)
# Create a matrix with the atoms that are in each group
bond_mat = zeros(Int64, ngroups, natoms)
for i in 1:ngroups
smatches_expanded_i_atoms = smatches_expanded[i]["atoms"]
for j in 1:length(smatches_expanded_i_atoms)
bond_mat[i, smatches_expanded_i_atoms[j]+1] = 1
end
end
if check
if any(sum(bond_mat,dims=2).==0)
error("Could not find all groups for "*smiles)
end
end
return smatches_idx_expanded, bond_mat
end
function get_connectivity(mol,group_id,groups)
ngroups = length(group_id)
A = zeros(ngroups,ngroups)
connectivity = Pair{NTuple{2,String},Int}[]
for i in 1:ngroups
gci = groups[group_id[i]]
smart1 = smarts(gci)
smart2 = smarts(gci)
querie = get_qmol(smart1*smart2)
smatch = get_substruct_matches(mol,querie)
name_i = name(gci)
A[i,i] = length(unique(smatch))
if A[i,i]!=0
append!(connectivity,[(name_i,name_i)=>Int(A[i,i])])
end
for j in i+1:ngroups
gcj = groups[group_id[j]]
smart2 = smarts(gcj)
querie = get_qmol(smart1*smart2)
smatch = get_substruct_matches(mol,querie)
querie = get_qmol(smart2*smart1)
append!(smatch,get_substruct_matches(mol,querie))
A[i,j] = length(unique(smatch))
name_j = name(gcj)
if A[i,j]!=0
append!(connectivity,[(name_i,name_j)=>Int(A[i,j])])
end
end
end
return connectivity
end
function get_expanded_groups(mol, groups, atoms, __bonds, check, smiles)
smatches_idx_expanded, bond_mat = find_covered_atoms(mol, groups, atoms, __bonds, check)
# Find all atoms that are in more than one group
overlap = findall(sum(bond_mat, dims=1)[:] .> 1)
# non_overlap = findall(sum(bond_mat, dims=1)[:] .== 1)
# Split the groups in two sets: those that overlap and those that don't
overlap_groups = findall(sum(bond_mat[:, overlap], dims=2)[:] .> 0)
non_overlap_groups = [i for i in 1:length(smatches_idx_expanded) if !(i in overlap_groups)]
# Remove the overlapping groups from the non-overlapping groups
if !isempty(overlap)
# Reduce the bond_mat to only the overlapping atoms
bond_mat_overlap = bond_mat[overlap_groups, :]
# remove columns with only zeros
bond_mat_overlap = bond_mat_overlap[:, any(bond_mat_overlap .> 0, dims=1)[:]]
# Generate all possible combinations of groups which cover all atoms
i = 1
while sum(bond_mat_overlap, dims=1) != ones(Int64, 1, size(bond_mat_overlap, 2))
if i > length(overlap_groups)
error("Could not find all groups for "*smiles)
break
end
# for group i, check which other groups it's overlapping with
overlapping_group_i = findall((sum(bond_mat_overlap[:,bond_mat_overlap[i, :].==1], dims=2).>=1 .&& 1:size(bond_mat_overlap,1).!==i)[:])
# for each of those groups, check if they can be removed (i.e. do all of their atoms have overlaps)
can_remove = zeros(Bool, length(overlapping_group_i))
for j in 1:length(overlapping_group_i)
covered_atoms_j = sum(bond_mat_overlap[:,bond_mat_overlap[overlapping_group_i[j], :].==1], dims =1) .> 1
if all(covered_atoms_j)
can_remove[j] = true
end
end
if all(can_remove)
bond_mat_overlap = bond_mat_overlap[setdiff(1:size(bond_mat_overlap, 1), overlapping_group_i), :]
overlap_groups = overlap_groups[setdiff(1:length(overlap_groups), overlapping_group_i)]
end
i += 1
end
push!(non_overlap_groups, overlap_groups...)
end
bond_mat_minimum = bond_mat[non_overlap_groups, :]
group_id_expanded = smatches_idx_expanded[non_overlap_groups]
return group_id_expanded, bond_mat_minimum
end
#TODO: move this to Clapeyron?
"""
@gcstring_str(str)
given a string of the form "Group1:n1;Group2:2", returns ["Group1" => n1,"Group2" => n2]
"""
macro gcstring_str(str)
gcpairs = split(str,';')
res = Pair{String,Int}[]
for gci in gcpairs
gc,_ni = split_2(gci,':')
ni = parse(Int,_ni)
push!(res,gc => ni)
end
res
end
"""
group_replace(grouplist,keys...)
given a group list generated by [`get_groups_from_smiles`](@ref), replaces certain groups in `grouplist` with the values specified in `keys`.
## Examples
```
groups1 = get_groups_from_smiles("CCO", UNIFACGroups) #["CH3" => 1, "CH2" => 1, "OH(P)" => 1]
#we replace each "OH(P)" with 1 "OH" group
#and each "CH3" group with 3 "H" group and 1 "C" group
groups2 = group_replace(groups1[2],"OH(P)" => ("OH" => 1), "CH3" => [("C" => 1),("H" => 3)])
```
"""
function group_replace(grouplist,group_keys...)
res = Dict{String,Int}(grouplist)
for (k,v) in group_keys
if haskey(res,k)
multiplier = res[k]
res[k] = 0
if v isa Pair
v = (v,)
end
for vals in v
knew = first(vals)
vnew = last(vals)
if haskey(res,knew)
val_old = res[knew]
res[knew] = val_old + vnew*multiplier
else
res[knew] = vnew*multiplier
end
end
end
end
for kk in keys(res)
if res[kk] == 0
delete!(res,kk)
end
end
return [k => v for (k,v) in pairs(res)]
end
export get_groups_from_name, get_groups_from_smiles, group_replace | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 11097 | """
find_missing_groups_from_smiles(smiles::String, groups;max_group_size = nothing, environment=false, reduced=false)
Given a SMILES string and a group list (`groups::Vector{GCPair}`), returns a list of potential groups (`new_groups::Vector{GCPair}`) which could cover those atoms not covered within `groups`. If no `groups` vector is provided, it will simply generate all possible groups for the molecule.
A set of heuristics are built into the code when it comes to combining heavy atoms into large groups:
1. If a carbon atom is bonded to another carbon atom, unless only one of the carbons is on a ring, they will not be combined into a group.
2. All other combinations of atoms are allowed.
The logic behind the first heuristic is due to the fact that neighbouring atoms with similar electronegativities won't have a great impact on each other's properties. As such, they are not combined into a group. In the future, this approach could be extended to use HNMR data to determine which atoms can be combined into the same group.
Optional arguments:
- `max_group_size::Int`: The maximum number of atoms within a group to be generated. If `nothing`, the maximum size is however many atoms a central atom is bonded to.
- `environment::Bool`: If true, the groups SMARTS will include information about the environment of the group is in. For example, in pentane, if environment is false, there will only be one CH2 group, whereas, if environment is true, there will be two CH2 groups, one bonded to CH3 and one bonded to another CH2.
- `reduced::Bool`: If true, the groups will be generated such that the minimum number of groups required to represent the molecule, based on `max_group_size`, will be generated. If false, all possible groups will be generated.
## Example
```julia
julia> find_missing_groups_from_smiles("CC(=O)O")
7-element Vector{GCIdentifier.GCPair}:
GCIdentifier.GCPair("[CX4;H3;!R]", "CH3")
GCIdentifier.GCPair("[CX3;H0;!R]", "C=")
GCIdentifier.GCPair("[OX1;H0;!R]", "O=")
GCIdentifier.GCPair("[OX2;H1;!R]", "OH")
GCIdentifier.GCPair("[CX3;H0;!R](=[OX1;H0;!R])", "C=O=")
GCIdentifier.GCPair("[CX3;H0;!R]([OX2;H1;!R])", "C=OH")
GCIdentifier.GCPair("[CX3;H0;!R](=[OX1;H0;!R])([OX2;H1;!R])", "C=O=OH")
```
"""
function find_missing_groups_from_smiles(smiles, groups=nothing; max_group_size=nothing, environment=false, reduced=false)
mol = get_mol(smiles)
__bonds = __getbondlist(mol)
atoms = get_atoms(mol)
if isnothing(groups)
missing_atoms = ones(Bool, length(atoms))
else
if count(first_group_order,groups) == length(groups)
first_order_groups = groups
else
first_order_groups = filter(x -> group_order(x) == 1, groups)
end
smatches_idx_expanded, atom_coverage = find_covered_atoms(mol, first_order_groups, atoms, __bonds, false)
missing_atoms = (sum(atom_coverage, dims=1) .== 0)[:]
end
atom_type = string.(MolecularGraph.atom_symbol(mol))
graph = MolecularGraph.to_dict(mol)["graph"]
bond_mat = zeros(Int, length(atom_type), length(atom_type))
for i in 1:length(graph)
bond_mat[graph[i][1], graph[i][2]] = MolecularGraph.props(mol, graph[i][1], graph[i][2]).order
bond_mat[graph[i][2], graph[i][1]] = MolecularGraph.props(mol, graph[i][1], graph[i][2]).order
end
atom_type = atom_type[missing_atoms]
bond_mat = bond_mat[missing_atoms, missing_atoms]
is_bonded = bond_mat .> 0
ring = MolecularGraph.is_in_ring(mol)[missing_atoms]
ring_string = [if i "" else "!" end for i in ring]
hydrogens = MolecularGraph.total_hydrogens(mol)[missing_atoms]
bonds = MolecularGraph.connectivity(mol)[missing_atoms]
aromatic = MolecularGraph.is_aromatic(mol)[missing_atoms]
hybrid = MolecularGraph.hybridization(mol)[missing_atoms]
atom_type = [if aromatic[i] lowercase(atom_type[i]) else atom_type[i] end for i in 1:length(atom_type)]
natoms = length(atom_type)
smarts = @. "["*atom_type*"X"*string(bonds)*";H"*string(hydrogens)*";"*ring_string*"R"*"]"
names = @. generate_group_name(atom_type, bonds, hydrogens, ring, aromatic, hybrid)
if environment
new_smarts = deepcopy(smarts)
new_names = deepcopy(names)
for i in 1:natoms
bond_orders = bond_mat[i, is_bonded[i,:]]
bonded_smarts = smarts[is_bonded[i,:]]
aromatic_bondable = aromatic[is_bonded[i,:]]
new_smarts[i] = new_smarts[i][1:end-1]*raw";$("*new_smarts[i]*prod([if aromatic_bondable[j] "("*bonded_smarts[j]*")"
elseif bond_orders[j]==2 "(="*bonded_smarts[j]*")"
elseif bond_orders[j]==3 "(#"*bonded_smarts[j]*")"
else "("*bonded_smarts[j]*")" end for j in 1:length(bonded_smarts)])*")]"
new_names[i] = new_names[i]*"("*prod(names[is_bonded[i,:]])*")"
end
smarts = new_smarts
names = new_names
end
if max_group_size == 1
unique_smarts = unique(smarts)
unique_names = String[]
for i in 1:length(unique_smarts)
push!(unique_names, names[findall(isequal(unique_smarts[i]), smarts)[1]])
end
new_groups = [GCPair(unique_smarts[i], unique_names[i]) for i in 1:length(unique_smarts)]
return new_groups
end
occurrence = ones(Int, length(smarts))
for i in 1:natoms
# println(occurrence)
# println(smarts[i])
# If carbon atom on ring
if (atom_type[i] == "C" || atom_type[i] == "c") && ring[i] && (occurrence[i] > 0 || reduced)
for j in 1:natoms
if is_bonded[i,j]
# Bond it to any other atom that is non-carbon atom on a ring or carbon atom not on a ring
if (atom_type[j] == "C" && !ring[j] || (atom_type[j] != "C" && atom_type[j] != "c"))
push!(smarts, smarts[i]*smarts[j])
push!(names, names[i]*names[j])
occurrence[i] -= 1
occurrence[j] -= 1
append!(occurrence, [1])
end
end
end
elseif ((atom_type[i] != "C" && atom_type[i] != "c") || (atom_type[i] == "C" && !ring[i])) && (occurrence[i] > 0 || !reduced)
# Bond it to any other atom that is not a carbon atom
nbonds = sum(is_bonded[i,:].&occurrence[1:natoms] .> 0)
bondable_atom_types = atom_type[1:natoms][is_bonded[i,:].&occurrence[1:natoms] .> 0]
is_carbon = (bondable_atom_types .== "C") .| (bondable_atom_types .== "c")
ncarbons = sum(is_carbon)
nbonds = nbonds - ncarbons
idx_bondable = is_bonded[i,:].&occurrence[1:natoms] .> 0
bondable_smarts = smarts[1:natoms][is_bonded[i,:].&occurrence[1:natoms] .> 0][.!(is_carbon)]
bondable_names = names[1:natoms][is_bonded[i,:].&occurrence[1:natoms] .> 0][.!(is_carbon)]
bondable_atom_types = bondable_atom_types[.!(is_carbon)]
bond_orders = bond_mat[i, is_bonded[i,:].&occurrence[1:natoms] .> 0][.!(is_carbon)]
if isnothing(max_group_size)
max_size = nbonds+1
else
max_size = max_group_size
end
if reduced
min_size = max_size-1
else
min_size = 1
end
# println(smarts[i])
# println(max_size-1)
# println(min_size)
# println(nbonds)
for k in min_size:max_size-1
# println(k)
combs = Combinatorics.combinations(1:nbonds, k)
for comb in combs
# println(bondable_atom_types[comb])
if any((bondable_atom_types[comb] .== "C") .| (bondable_atom_types[comb] .== "c"))
# println(comb)
continue
else
new_smarts = deepcopy(smarts[i])
new_names = deepcopy(names[i])
for l in 1:length(comb)
if bond_orders[comb[l]] == 2
new_smarts *= "(="*bondable_smarts[comb[l]]*")"
elseif bond_orders[comb[l]] == 3
new_smarts *= "(#"*bondable_smarts[comb[l]]*")"
else
new_smarts *= "("*bondable_smarts[comb[l]]*")"
end
new_names *= bondable_names[comb[l]]
end
push!(smarts, new_smarts)
push!(names, new_names)
occurrence[i] -= 1
idx = 1:natoms
idx = idx[idx_bondable][.!(is_carbon)][comb]
occurrence[idx] .-= 1
append!(occurrence, [1])
end
end
end
end
end
if reduced
unique_smarts = unique(smarts[occurrence .> 0])
else
unique_smarts = unique(smarts)
end
# find the names of the unique smarts
unique_names = String[]
occurrence = zeros(Int, length(unique_smarts))
for i in 1:length(unique_smarts)
push!(unique_names, names[findall(x->x==unique_smarts[i], smarts)[1]])
query_i = get_qmol(unique_smarts[i])
occurrence[i] = length(get_substruct_matches(mol,query_i,__bonds))
# println(unique_smarts[i], " ", unique_names[i], " ", occurrence[i])
end
new_groups = [GCPair(unique_smarts[i], unique_names[i]) for i in 1:length(unique_smarts)]
return new_groups
end
function generate_group_name(atom_type::String, bond::Int, hydrogens::Int, ring::Bool, aromatic::Bool, hybrid::Symbol)
name = ""
if !ring # If is not on ring
if hydrogens == 0
name = atom_type
elseif hydrogens == 1
name = atom_type*"H"
else
name = atom_type*"H"*string(hydrogens)
end
elseif ring && lowercase(atom_type) != atom_type # If is on ring and not aromatic
if hydrogens == 0
name = "c"*atom_type
elseif hydrogens == 1
name = "c"*atom_type*"H"
else
name = "c"*atom_type*"H"*string(hydrogens)
end
elseif lowercase(atom_type) == atom_type # If is aromatic
if hydrogens == 0
name = "a"*uppercase(atom_type)
elseif hydrogens == 1
name = "a"*uppercase(atom_type)*"H"
else
name = "a"*uppercase(atom_type)*"H"*string(hydrogens)
end
end
if hybrid == :sp2 && !aromatic
name *= "="
elseif hybrid == :sp
name *= "#"
end
return name
end
export find_missing_groups_from_smiles | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 1609 | #we use MolecularGraph for windows, but we need the same api.
function get_mol(smiles)
mol = MolecularGraph.smilestomol(smiles)
return mol
end
function get_qmol(smarts)
return MolecularGraph.smartstomol(smarts)
end
function get_atoms(mol)
0:(length(mol.graph.fadjlist) - 1)
end
function has_substruct_match(mol,query)
MolecularGraph.has_substruct_match(mol,query)
end
function __getbondlist(mol)
res = Set{NTuple{2,Int}}()
adjlist = mol.graph.fadjlist
for (i,xi) in pairs(adjlist)
for j in xi
push!(res,minmax(i,j))
end
end
rvec = sort!(collect(res))
end
function get_substruct_matches(mol,query,bonds = __getbondlist(mol))
matches = MolecularGraph.substruct_matches(mol,query)
res = Dict{String,Vector{Int}}[]
#convert to RDKitLib expected struct.
for match in matches
dictᵢ = Dict{String,Vector{Int}}()
atomsᵢ = collect(keys(match))
if length(atomsᵢ) == 1
bondsᵢ = Int[]
else
bondsᵢ = __getbonds_mg(bonds,atomsᵢ)
end
atomsᵢ .-= 1
dictᵢ["atoms"] = atomsᵢ
dictᵢ["bonds"] = bondsᵢ
push!(res,dictᵢ)
end
return res
end
function __getbonds_mg(list,atoms)
res_set = Set{Int}()
n = length(atoms)
for i in 1:n
for j in (i+1):n
ij = minmax(atoms[i],atoms[j])
bond_idx = findfirst(isequal(ij),list)
if bond_idx !== nothing
push!(res_set,bond_idx)
end
end
end
res = sort!(collect(res_set))
res .-= 1
return res
end
| GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 1853 | const JobackGroups = [GCPair(raw"[CX4H3]","-CH3"),
GCPair(raw"[!R;CX4H2]","-CH2-"),
GCPair(raw"[!R;CX4H]",">CH-"),
GCPair(raw"[!R;CX4H0]",">C<"),
GCPair(raw"[CX3H2][CX3H1]","CH2=CH-"),
GCPair(raw"[CX3H1][CX3H1]","-CH=CH-"),
GCPair(raw"[$([!R;#6X3H0]);!$([!R;#6X3H0]=[#8])]","=C<"),
GCPair(raw"[$([CX2H0](=*)=*)]","=C="),
GCPair(raw"[$([CX2H1]#[!#7])]","CH"),
GCPair(raw"[$([CX2H0]#[!#7])]","C"),
GCPair(raw"[R;CX4H2]","ring-CH2-"),
GCPair(raw"[R;CX4H]","ring>CH-"),
GCPair(raw"[R;CX4H0]","ring>C<"),
GCPair(raw"[R;CX3H1,cX3H1]","ring=CH-"),
GCPair(raw"[$([R;#6X3H0]);!$([R;#6X3H0]=[#8])]","ring=C<"),
GCPair(raw"[F]","-F"),
GCPair(raw"[Cl]","-Cl"),
GCPair(raw"[Br]","-Br"),
GCPair(raw"[I]","-I"),
GCPair(raw"[OX2H;!$([OX2H]-[#6]=[O]);!$([OX2H]-a)]","-OH (alcohol)"),
GCPair(raw"[O;H1;$(O-!@c)]","-OH (phenol)"),
GCPair(raw"[OX2H0;!R;!$([OX2H0]-[#6]=[#8])]","-O- (non-ring)"),
GCPair(raw"[#8X2H0;R;!$([#8X2H0]~[#6]=[#8])]","-O- (ring)"),
GCPair(raw"[$([CX3H0](=[OX1]));!$([CX3](=[OX1])-[OX2]);!R]=O",">C=O (non-ring)"),
GCPair(raw"[$([#6X3H0](=[OX1]));!$([#6X3](=[#8X1])~[#8X2]);R]=O",">C=O (ring)"),
GCPair(raw"[CH;D2](=O)","O=CH- (aldehyde)"),
GCPair(raw"[OX2H]-[C]=O","-COOH (acid)"),
GCPair(raw"[#6X3H0;!$([#6X3H0](~O)(~O)(~O))](=[#8X1])[#8X2H0]","-COO- (ester)"),
GCPair(raw"[OX1H0;!$([OX1H0]~[#6X3]);!$([OX1H0]~[#7X3]~[#8])]","=O (other than above)"),
GCPair(raw"[NX3H2]","-NH2"),
GCPair(raw"[NX3H1;!R]",">NH (non-ring)"),
GCPair(raw"[#7X3H1;R]",">NH (ring)"),
GCPair(raw"[#7X3H0;!$([#7](~O)~O)]",">N- (non-ring)"),
GCPair(raw"[#7X2H0;!R]","-N= (non-ring)"),
GCPair(raw"[#7X2H0;R]","-N= (ring)"),
GCPair(raw"[#7X2H1]","=NH"),
GCPair(raw"[#6X2]#[#7X1H0]","-CN"),
GCPair(raw"[$([#7X3,#7X3+][!#8])](=[O])~[O-]","-NO2"),
GCPair(raw"[SX2H]","-SH"),
GCPair(raw"[#16X2H0;!R]","-S- (non-ring)"),
GCPair(raw"[#16X2H0;R]","-S- (ring)")
]
export JobackGroups | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 1296 | const SAFTgammaMieGroups = [GCPair(raw"[CX4H3]","CH3"),
GCPair(raw"[!R;CX4H2]","CH2"),
GCPair(raw"[!R;CX4H]","CH"),
GCPair(raw"[!R;CX4H0]","C"),
GCPair(raw"[cX3;H1]","aCH"),
GCPair(raw"[cX3;H0][CX4;H2]","aCCH2"),
GCPair(raw"[cX3;H0][CX4;H1]","aCCH"),
GCPair(raw"[CX3H2]","CH2="),
GCPair(raw"[!R;CX3H1;!$([CX3H1](=O))]","CH="),
GCPair(raw"[CH2;R]","cCH2"),
GCPair(raw"[OX2H]-[C]=O","COOH"),
GCPair(raw"[#6X3H0;!$([#6X3H0](~O)(~O)(~O))](=[#8X1])[#8X2H0]","COO"),
GCPair(raw"[OX2H;!$([OX2H]-[#6]=[O]);!$([OX2H]-a)]","OH"),
GCPair(raw"[CX4;H2;!R][OH1]","CH2OH"),
GCPair(raw"[CX4;H1;!R][OH1]","CHOH"),
GCPair(raw"[NX3H2]","NH2"),
GCPair(raw"[NX3H1;!R]","NH"),
GCPair(raw"[#7X3H0;!$([#7](~O)~O)]","N"),
GCPair(raw"[#7X3H1;R]","cNH"),
GCPair(raw"[#7X3H0;R]","cN"),
GCPair(raw"[!R;CX3H0;!$([CX3H0](=O))]","CH="),
GCPair(raw"[cX3;H0][CX4;H3]","aCCH3"),
GCPair(raw"[cX3;H0;R][OX2;H1]","aCOH"),
GCPair(raw"[CH1;R]","cCH"),
GCPair(raw"[CH1;R][NH1;!R]","cCHNH"),
GCPair(raw"[CH1;R][NH0;!R]","cCHN"),
GCPair(raw"[cH0][C;!R](=O)[cH0]","aCCOaC"),
GCPair(raw"[OX2H]-[C](=O)[cH0]","aCCOOH"),
GCPair(raw"[cH0][NH1;!R][cH0]","aCNHaC"),
GCPair(raw"[CH3][CX3](=O)","CH3CO"),
GCPair(raw"[OH0;!R;$([OH0;!R][CH3;!R]);$([OH0;!R][CH2;!R])]","eO"),
GCPair(raw"[OH0;!R;$([OH0;!R][CH2;!R])]","cO")
]
export SAFTgammaMieGroups | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 3980 | const UNIFACGroups = [GCPair(raw"[CX4;H3;!R]","CH3"),
GCPair(raw"[CX4;H2;!R]","CH2"),
GCPair(raw"[CX4;H1;!R]","CH"),
GCPair(raw"[CX4;H0;!R]","C"),
GCPair(raw"[CX3;H2]=[CX3;H1]","CH2=CH"),
GCPair(raw"[CX3;H1]=[CX3;H1]","CH=CH"),
GCPair(raw"[CX3;H2]=[CX3;H0]","CH2=C"),
GCPair(raw"[CX3;H1]=[CX3;H0]","CH=C"),
GCPair(raw"[OH1;$([OH1][CX4H2])]","OH(P)"),
GCPair(raw"[CX4;H3][OX2;H1]","CH3OH"),
GCPair(raw"[OH2]","H2O"),
GCPair(raw"[cX3;H0;R][OX2;H1]","ACOH"),
GCPair(raw"[CX4;H3][CX3;!H1](=O)","CH3CO"),
GCPair(raw"[CX4;H2][CX3;!H1](=O)","CH2CO"),
GCPair(raw"[CX3H1](=O)","CHO"),
GCPair(raw"[CH3][CX3;H0](=[O])[OH0]","CH3COO"),
GCPair(raw"[CX4;H2][CX3](=[OX1])[OX2]","CH2COO"),
GCPair(raw"[CX3;H1](=[OX1])[OX2]","HCOO"),
GCPair(raw"[CH3;!R][OH0;!R]","CH3O"),
GCPair(raw"[CH2;!R][OH0;!R]","CH2O"),
GCPair(raw"[C;H1;!R][OH0;!R]","CHO"),
GCPair(raw"[cX3;H1]","ACH"),
GCPair(raw"[cX3;H0]","AC"),
GCPair(raw"[cX3;H0][CX4;H3]","ACCH3"),
GCPair(raw"[cX3;H0][CX4;H2]","ACCH2"),
GCPair(raw"[cX3;H0][CX4;H1]","ACCH"),
GCPair(raw"[CX4;H2;R;$(C(C)OCC)][OX2;R][CX4;H2;R]","THF"),
GCPair(raw"[CX4;H3][NX3;H2]","CH3NH2"),
GCPair(raw"[CX4;H2][NX3;H2]","CH2NH2"),
GCPair(raw"[CX4;H1][NX3;H2]","CHNH2"),
GCPair(raw"[CX4;H3][NX3;H1]","CH3NH"),
GCPair(raw"[CX4;H2][NX3;H1]","CH2NH"),
GCPair(raw"[CX4;H1][NX3;H1]","CHNH"),
GCPair(raw"[CX4;H3][NX3;H0]","CH3N"),
GCPair(raw"[CX4;H2][NX3;H0]","CH2N"),
GCPair(raw"[c][NX3;H2]","ACNH2"),
GCPair(raw"[cX3H1][n][cX3H1]","AC2H2N"),
GCPair(raw"[cX3H0][n][cX3H1]","AC2HN"),
GCPair(raw"[cX3H0][n][cX3H0]","AC2N"),
GCPair(raw"[CX4;H3][CX2]#[NX1]","CH3CN"),
GCPair(raw"[CX4;H2][CX2]#[NX1]","CH2CN"),
GCPair(raw"[CX3,cX3](=[OX1])[OX2H0,oX2H0]","COO"),
GCPair(raw"[CX3](=[OX1])[O;H1]","COOH"),
GCPair(raw"[CX3;H1](=[OX1])[OX2;H1]","HCOOH"),
GCPair(raw"[CX4;H2;!$(C(Cl)(Cl))](Cl)","CH2CL"),
GCPair(raw"[CX4;H1;!$(C(Cl)(Cl))](Cl)","CHCL"),
GCPair(raw"[CX4;H0](Cl)","CCL"),
GCPair(raw"[CX4;H2;!$(C(Cl)(Cl)(Cl))](Cl)(Cl)","CH2CL2"),
GCPair(raw"[CX4;H1;!$(C(Cl)(Cl)(Cl))](Cl)(Cl)","CHCL2"),
GCPair(raw"[CX4;H0;!$(C(Cl)(Cl)(Cl))](Cl)(Cl)","CCL2"),
GCPair(raw"[CX4;H1;!$([CX4;H0](Cl)(Cl)(Cl)(Cl))](Cl)(Cl)(Cl)","CHCL3"),
GCPair(raw"[CX4;H0;!$([CX4;H0](Cl)(Cl)(Cl)(Cl))](Cl)(Cl)(Cl)","CCL3"),
GCPair(raw"[CX4;H0]([Cl])([Cl])([Cl])([Cl])","CCL4"),
GCPair(raw"[c][Cl]","ACCL"),
GCPair(raw"[CX4;H3][NX3](=[OX1])([OX1])","CH3NO2"),
GCPair(raw"[CX4;H2][NX3](=[OX1])([OX1])","CH2NO2"),
GCPair(raw"[CX4;H1][NX3](=[OX1])([OX1])","CHNO2"),
GCPair(raw"[cX3][NX3](=[OX1])([OX1])","ACNO2"),
GCPair(raw"C(=S)=S","CS2"),
GCPair(raw"[SX2H][CX4;H3]","CH3SH"),
GCPair(raw"[SX2H][CX4;H2]","CH2SH"),
GCPair(raw"c1cc(oc1)C=O","FURFURAL"),
GCPair(raw"[OX2;H1][CX4;H2][CX4;H2][OX2;H1]","DOH"),
GCPair(raw"[I]","I"),
GCPair(raw"[Br]","BR"),
GCPair(raw"[CX2;H1]#[CX2;H0]","CH=-C"),
GCPair(raw"[CX2;H0]#[CX2;H0]","C=-C"),
GCPair(raw"[SX3H0](=[OX1])([CX4;H3])[CX4;H3]","DMSO"),
GCPair(raw"[CX3;H2]=[CX3;H1][CX2;H0]#[NX1;H0]","ACRY"),
GCPair(raw"[$([Cl;H0]([C]=[C]))]","CL-(C=C)"),
GCPair(raw"[CX3;H0]=[CX3;H0]","C=C"),
GCPair(raw"[cX3][F]","ACF"),
GCPair(raw"[CX4;H3][N]([CX4;H3])[CX3;H1]=[O]","DMF"),
GCPair(raw"[NX3]([CX4;H2])([CX4;H2])[CX3;H1](=[OX1])","HCON(CH2)2"),
GCPair(raw"C(F)(F)F","CF3"),
GCPair(raw"C(F)F","CF2"),
GCPair(raw"C(F)","CF"),
GCPair(raw"[CH2;R]","CY-CH2"),
GCPair(raw"[CH1;R]","CY-CH"),
GCPair(raw"[CH0;R]","CY-C"),
GCPair(raw"[OH1;$([OH1][CX4H1])]","OH(S)"),
GCPair(raw"[OH1;$([OH1][CX4H0])]","OH(T)"),
GCPair(raw"[CX4H2;R][OX2;R;$(O(CC)C)][CX4H2;R][OX2;R][CX4H2;R]","CY-CH2O", 1, 2),
GCPair(raw"[CX4H2;R][OX2;R;$(O(C)C)]","TRIOXAN"),
GCPair(raw"[CX4H0][NH2]","CNH2"),
GCPair(raw"[OX1H0]=[C;R][NX3H0;R][CH3]","NMP"),
GCPair(raw"[OX1H0]=[CH0X3;R][H0;R][CH2]","NEP"),
GCPair(raw"[OX1H0;!R]=[CX3H0;R][NX3H0;R][C;!R]","NIPP"),
GCPair(raw"[OX1H0;!R]=[CH0X3;R][NX3H0;R][CH0;!R]","NTBP"),
GCPair(raw"[CX3H0](=[OX1H0])[NX3H2]","CONH2"),
GCPair(raw"[OX1H0;!R]=[CX3H0;!R][NH1X3;!R][CH3;!R]","CONHCH3"),
GCPair(raw"[CH2X4;!R][NH1X3;!R][CX3H0;!R]=[OX1H0;!R]","CONHCH2")]
export UNIFACGroups | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 138 | include("Joback.jl")
include("SAFTgammaMie.jl")
include("ogUNIFAC.jl")
include("UNIFAC.jl")
include("gcPCSAFT.jl")
include("gcPPCSAFT.jl") | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 780 | const gcPCSAFTGroups = [
GCPair(raw"[CX4H3]", "CH3"),
GCPair(raw"[!R;CX4H2]", "CH2"),
GCPair(raw"[!R;CX4H]", "CH"),
GCPair(raw"[!R;CX4H0]", "C"),
GCPair(raw"[CX3H2]", "CH2="),
GCPair(raw"[!R;CX3H1;!$([CX3H1](=O))]", "CH="),
GCPair(raw"[$([!R;#6X3H0]);!$([!R;#6X3H0]=[#8])]", "=C<"),
GCPair(raw"[CX2;H1]#[CX2;H0]", "C#CH"),
GCPair(raw"[CH2;R1;$(C1CCCC1)]", "cCH2_pen"),
GCPair(raw"[CH1;R1;$(C1CCCC1)]", "cCH_pen"),
GCPair(raw"[CH2;R1;$(C1CCCCC1)]", "cCH2_hex"),
GCPair(raw"[CH1;R1;$(C1CCCCC1)]", "cCH_hex"),
GCPair(raw"[cX3;H1]", "aCH"),
GCPair(raw"[cX3;H0]", "aCH"),
GCPair(raw"[OX2H;!$([OX2H]-[#6]=[O]);!$([OX2H]-a)]", "OH"),
GCPair(raw"[NX3H2]", "NH2"),
GCPair(raw"[O;H2]", "H2O")
]
export gcPCSAFTGroups
| GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 960 | const gcPPCSAFTGroups = [GCPair(raw"[CX4H3]","CH3"),
GCPair(raw"[!R;CX4H2]","CH2"),
GCPair(raw"[!R;CX4H]","CH"),
GCPair(raw"[!R;CX4H0]","C"),
GCPair(raw"[CX3H2]","CH2="),
GCPair(raw"[!R;CX3H1;!$([CX3H1](=O))]","CH="),
GCPair(raw"[$([!R;#6X3H0]);!$([!R;#6X3H0]=[#8])]","C="),
GCPair(raw"[CX2;H1]#[CX2;H0]","CH#C"),
GCPair(raw"[CH2;R1;$(C1AAAA1)]","cCH2_pent"),
GCPair(raw"[CH1;R1;$(C1AAAA1)]","cCH_pent"),
GCPair(raw"[CH2;R1;$(C1AAAAA1)]","cCH2_hex"),
GCPair(raw"[CH1;R1;$(C1AAAAA1)]","cCH_hex"),
GCPair(raw"[cX3;H1]","aCH"),
GCPair(raw"[cX3;H0]","aC"),
GCPair(raw"[OX2H;!$([OX2H]-[#6]=[O]);!$([OX2H]-a)]","OH"),
GCPair(raw"[NX3H2]","NH2"),
GCPair(raw"[CX3H0](=O)","C=O"),
GCPair(raw"[CH;D2](=O)","CH=O"),
GCPair(raw"[#6X3H0](=[#8X1])[#8X2H0]","COO"),
GCPair(raw"[CX4H3][OX2H0;!R;!$([OX2H0]-[#6]=[#8])]","OCH3"),
GCPair(raw"[CX4H2][OX2H0;!R;!$([OX2H0]-[#6]=[#8])]","OCH2"),
GCPair(raw"[CH;D2](=O)[OX2H0;!R;!$([OX2H0]-[#6]=[#8])]","HCOO")]
export gcPPCSAFTGroups | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 3814 | const ogUNIFACGroups = [GCPair(raw"[CX4;H3;!R]","CH3"),
GCPair(raw"[CX4;H2;!R]","CH2"),
GCPair(raw"[CX4;H1;!R]","CH"),
GCPair(raw"[CX4;H0;!R]","C"),
GCPair(raw"[CX3;H2]=[CX3;H1]","CH2=CH"),
GCPair(raw"[CX3;H1]=[CX3;H1]","CH=CH"),
GCPair(raw"[CX3;H2]=[CX3;H0]","CH2=C"),
GCPair(raw"[CX3;H1]=[CX3;H0]","CH=C"),
GCPair(raw"[cX3;H1]","ACH"),
GCPair(raw"[cX3;H0]","AC"),
GCPair(raw"[cX3;H0][CX4;H3]","ACCH3"),
GCPair(raw"[cX3;H0][CX4;H2]","ACCH2"),
GCPair(raw"[cX3;H0][CX4;H1]","ACCH"),
GCPair(raw"[OH1;$([OH1][CX4H2])]","OH(P)"),
GCPair(raw"[CX4;H3][OX2;H1]","CH3OH"),
GCPair(raw"[OH2]","H2O"),
GCPair(raw"[cX3;H0;R][OX2;H1]","ACOH"),
GCPair(raw"[CX4;H3][CX3](=O)","CH3CO"),
GCPair(raw"[CX4;H2][CX3](=O)","CH2CO"),
GCPair(raw"[CX3H1](=O)","CHO"),
GCPair(raw"[CH3][CX3;H0](=[O])[O]","CH3COO"),
GCPair(raw"[CX4;H2][CX3](=[OX1])[OX2]","CH2COO"),
GCPair(raw"[CX3;H1](=[OX1])[OX2]","HCOO"),
GCPair(raw"[CH3;!R][OH0;!R]","CH3O"),
GCPair(raw"[CH2;!R][OH0;!R]","CH2O"),
GCPair(raw"[C;H1;!R][OH0;!R]","CHO"),
GCPair(raw"[CX3H1](=O)","HCO"),
GCPair(raw"[CX4;H2;R][OX2;R][CX4;H2;R]","THF"),
GCPair(raw"[CX4;H3][NX3;H2]","CH3NH2"),
GCPair(raw"[CX4;H2][NX3;H2]","CH2NH2"),
GCPair(raw"[CX4;H1][NX3;H2]","CHNH2"),
GCPair(raw"[CX4;H3][NX3;H1]","CH3NH"),
GCPair(raw"[CX4;H2][NX3;H1]","CH2NH"),
GCPair(raw"[CX4;H1][NX3;H1]","CHNH"),
GCPair(raw"[CX4;H3][NX3;H0]","CH3N"),
GCPair(raw"[CX4;H2][NX3;H0]","CH2N"),
GCPair(raw"[c][NX3;H2]","ACNH2"),
GCPair(raw"[cX3H1][n][cX3H1]","AC2H2N"),
GCPair(raw"[cX3H0][n][cX3H1]","AC2HN"),
GCPair(raw"[cX3H0][n][cX3H0]","AC2N"),
GCPair(raw"[CX4;H3][CX2]#[NX1]","CH3CN"),
GCPair(raw"[CX4;H2][CX2]#[NX1]","CH2CN"),
GCPair(raw"[CX3,cX3](=[OX1])[OX2H0,oX2H0]","COO"),
GCPair(raw"[CX3](=[OX1])[O;H1]","COOH"),
GCPair(raw"[CX3;H1](=[OX1])[OX2;H1]","HCOOH"),
GCPair(raw"[CX4;H2;!$(C(Cl)(Cl))](Cl)","CH2CL"),
GCPair(raw"[CX4;H1;!$(C(Cl)(Cl))](Cl)","CHCL"),
GCPair(raw"[CX4;H0](Cl)","CCL"),
GCPair(raw"[CX4;H2;!$(C(Cl)(Cl)(Cl))](Cl)(Cl)","CH2CL2"),
GCPair(raw"[CX4;H1;!$(C(Cl)(Cl)(Cl))](Cl)(Cl)","CHCL2"),
GCPair(raw"[CX4;H0;!$(C(Cl)(Cl)(Cl))](Cl)(Cl)","CCL2"),
GCPair(raw"[CX4;H1;!$([CX4;H0](Cl)(Cl)(Cl)(Cl))](Cl)(Cl)(Cl)","CHCL3"),
GCPair(raw"[CX4;H0;!$([CX4;H0](Cl)(Cl)(Cl)(Cl))](Cl)(Cl)(Cl)","CCL3"),
GCPair(raw"[CX4;H0]([Cl])([Cl])([Cl])([Cl])","CCL4"),
GCPair(raw"[c][Cl]","ACCL"),
GCPair(raw"[CX4;H3][NX3](=[OX1])([OX1])","CH3NO2"),
GCPair(raw"[CX4;H2][NX3](=[OX1])([OX1])","CH2NO2"),
GCPair(raw"[CX4;H1][NX3](=[OX1])([OX1])","CHNO2"),
GCPair(raw"[cX3][NX3](=[OX1])([OX1])","ACNO2"),
GCPair(raw"C(=S)=S","CS2"),
GCPair(raw"[SX2H][CX4;H3]","CH3SH"),
GCPair(raw"[SX2H][CX4;H2]","CH2SH"),
GCPair(raw"c1cc(oc1)C=O","FURFURAL"),
GCPair(raw"[OX2;H1][CX4;H2][CX4;H2][OX2;H1]","DOH"),
GCPair(raw"[I]","I"),
GCPair(raw"[Br]","BR"),
GCPair(raw"[CX2;H1]#[CX2;H0]","CH=-C"),
GCPair(raw"[CX2;H0]#[CX2;H0]","C=-C"),
GCPair(raw"[SX3H0](=[OX1])([CX4;H3])[CX4;H3]","DMSO"),
GCPair(raw"[CX3;H2]=[CX3;H1][CX2;H0]#[NX1;H0]","ACRY"),
GCPair(raw"[$([Cl;H0]([C]=[C]))]","CL-(C=C)"),
GCPair(raw"[CX3;H0]=[CX3;H0]","C=C"),
GCPair(raw"[cX3][F]","ACF"),
GCPair(raw"[CX4;H3][N]([CX4;H3])[CX3;H1]=[O]","DMF"),
GCPair(raw"[NX3]([CX4;H2])([CX4;H2])[CX3;H1](=[OX1])","HCON(CH2)2"),
GCPair(raw"C(F)(F)F","CF3"),
GCPair(raw"C(F)F","CF2"),
GCPair(raw"C(F)","CF"),
GCPair(raw"[OH1;$([OH1][CX4H1])]","OH(S)"),
GCPair(raw"[OH1;$([OH1][CX4H0])]","OH(T)"),
GCPair(raw"[CX4H2;R][OX2;R]","TRIOXAN"),
GCPair(raw"[CX4H0][NH2]","CNH2"),
GCPair(raw"[OX1H0]=[C;R][NX3H0;R][CH3]","NMP"),
GCPair(raw"[OX1H0]=[CH0X3;R][H0;R][CH2]","NEP"),
GCPair(raw"[OX1H0;!R]=[CX3H0;R][NX3H0;R][C;!R]","NIPP"),
GCPair(raw"[OX1H0;!R]=[CH0X3;R][NX3H0;R][CH0;!R]","NTBP"),
GCPair(raw"[CX3H0](=[OX1H0])[NX3H2]","CONH2"),
GCPair(raw"[OX1H0;!R]=[CX3H0;!R][NH1X3;!R][CH3;!R]","CONHCH3"),
GCPair(raw"[CH2X4;!R][NH1X3;!R][CX3H0;!R]=[OX1H0;!R]","CONHCH2")]
export ogUNIFACGroups | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 253 | using GCIdentifier
using Test
using GCIdentifier: @gcstring_str
@testset "All tests" begin
include("test_group_search.jl")
include("test_missing_groups.jl")
include("test_ext_clapeyron.jl")
include("test_ext_chemicalidentifiers.jl")
end | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 412 | using ChemicalIdentifiers
@testset "Extension - ChemicalIdentifiers" begin
name = "ibuprofen"
(component, groups) = get_groups_from_name(name, UNIFACGroups)
@test isequal(groups, ["COOH" => 1,
"CH3" => 3,
"CH" => 1,
"ACH" => 4,
"ACCH2" => 1,
"ACCH" => 1])
end | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 377 | using Clapeyron
@testset "Extension - Clapeyron" begin
smiles = "c1ccccc1C(CCCC)(CCCC(C))C"
(component, groups) = get_groups_from_smiles(smiles, JobackIdeal)
@test isequal(groups,["-CH3" => 3,
"-CH2-" => 7,
">C<" => 1,
"ring=CH-" => 5,
"ring=C<" => 1])
end | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 4432 | function test_gcmatch(groups,smiles,result)
evaluated = Set(get_groups_from_smiles(smiles,groups,check = false)[2])
reference = Set(result)
@test isequal(evaluated,reference)
end
test_gcmatch(groups) = (smiles,result) -> test_gcmatch(groups,smiles,result)
@testset "UNIFAC examples" begin
#http://www.aim.env.uea.ac.uk/aim/info/UNIFACgroups.html
unifac = test_gcmatch(UNIFACGroups)
#alkane group
unifac("CC",gcstring"CH3:2") #ethane
unifac("CCCC",gcstring"CH3:2;CH2:2") #n-butane
unifac("CC(C)C",gcstring"CH3:3;CH:1") #isobutane
unifac("CC(C)(C)C",gcstring"CH3:4;C:1") #neopentane
#alpha-olefin group
unifac("CCCCC=C",gcstring"CH3:1;CH2:3;CH2=CH:1") #hexene-1
unifac("CCCC=CC",gcstring"CH3:2;CH2:2;CH=CH:1") #hexene-2
unifac("CCC(C)=C",gcstring"CH3:2;CH2:1;CH2=C:1") #2-methyl-1-butene
unifac("CC=C(C)C",gcstring"CH3:3;CH=C:1") #2-methyl-2-butene
unifac("CC(=C(C)C)C",gcstring"CH3:4;C=C:1") #2,3-dimethylbutene
#aromatic carbon
unifac("c1c2ccccc2ccc1",gcstring"ACH:8;AC:2") #napthaline
unifac("c1ccccc1C=C",gcstring"ACH:5;AC:1;CH2=CH:1") #styrene
#aromatic carbon-alkane
unifac("Cc1ccccc1",gcstring"ACH:5;ACCH3:1") #toluene
unifac("CCc1ccccc1",gcstring"ACH:5;ACCH2:1;CH3:1") #ethylbenzene
unifac("CC(C)c1ccccc1",gcstring"ACH:5;ACCH:1;CH3:2") #cumene
#alcohol
unifac("CC(O)C",gcstring"CH3:2;CH:1;OH(S):1") #2-propanol
unifac("CCO",gcstring"CH3:1;CH2:1;OH(P):1") #ethanol
#methanol
unifac("CO",gcstring"CH3OH:1") #methanol
#water
unifac("O",gcstring"H2O:1") #water
#aromatic carbon-alcohol
unifac("Oc1ccccc1",gcstring"ACH:5;ACOH:1") #phenol
#carbonyl
unifac("O=C(C)CC",gcstring"CH3:1;CH2:1;CH3CO:1") #butanone
unifac("O=C(CC)CC",gcstring"CH3:2;CH2:1;CH2CO:1") #pentanone-3
#aldehyde
unifac("CCC=O",gcstring"CH3:1;CH2:1;CHO:1") #propionaldehyde , fails at the matching stage
#acetate group
unifac("CCCCOC(=O)C",gcstring"CH3:1;CH2:3;CH3COO:1") #Butyl acetate #fails at the matching stage
unifac("O=C(OC)CC",gcstring"CH3:2;CH2COO:1") #methyl propionate #fails at the matching stage
#formate group
unifac("O=COCC",gcstring"CH3:1;CH2:1;HCOO:1") #ethyl formate
#ether
unifac("COC",gcstring"CH3:1;CH3O:1") #dimethyl ether
unifac("CCOCC",gcstring"CH3:2;CH2:1;CH2O:1") #diethyl ether
unifac("O(C(C)C)C(C)C",gcstring"CH3:4;CH:1;CHO:1") #diisopropyl ether
unifac("C1CCOC1",gcstring"CY-CH2:2;THF:1") #tetrahydrofuran #check?
#primary amine
unifac("CN",gcstring"CH3NH2:1") #methylamine
unifac("CCN",gcstring"CH3:1;CH2NH2:1") #ethylamine
unifac("CC(C)N",gcstring"CH3:2;CHNH2:1") #isopropyl amine
#secondary amine group
unifac("CNC",gcstring"CH3:1;CH3NH:1") #dimethylamine
unifac("CCNCC",gcstring"CH3:2;CH2:1;CH2NH:1") #diethylamine
unifac("CC(C)NC(C)C",gcstring"CH3:4;CH:1;CHNH:1") #diisopropyl amine
#tertiary amine
unifac("CN(C)C",gcstring"CH3:2;CH3N:1") #trimethylamine
unifac("CCN(CC)CC",gcstring"CH3:3;CH2:2;CH2N:1") #triethylamine
#aromatic amine
unifac("c1ccc(cc1)N",gcstring"ACH:5;ACNH2:1") #aniline
#furfural
unifac("c1cc(oc1)C=O",gcstring"FURFURAL:1") #furfural
#carboxylic acids
unifac("CC(=O)O",gcstring"CH3:1;COOH:1") #acetic acid
#cyclic ethers
unifac("C1OCOCO1",gcstring"TRIOXAN:3")
unifac("C1OCOCC1",gcstring"CY-CH2:1;CY-CH2O:2")
unifac("C1OCCC1",gcstring"CY-CH2:2;THF:1")
#non-unique group assignment
unifac("c1ccccc1COCCOCC",gcstring"ACH:5;ACCH2:1;CH2O:2;CH2:1;CH3:1")
end
@testset "gcPPC-SAFT Connectivity" begin
# Test a highly branched hydrocarbon
smiles = "c1ccccc1C(CCCC)(CCCC(C))C"
(component, groups, connectivity) = get_groups_from_smiles(smiles, gcPPCSAFTGroups; connectivity=true)
@test isequal(["CH2" => 7, "aCH" => 5, "CH3" => 3, "aC" => 1, "C" => 1] |> Set,Set(groups))
@test isequal([("aCH", "aCH") => 4, ("CH2", "C") => 2, ("C", "aC") => 1, ("aCH", "aC") => 2, ("CH3", "CH2") => 3, ("CH2", "CH2") => 5, ("CH3", "C") => 1] |> Set,Set(connectivity))
# Test case where order matters
# Test a highly branched hydrocarbon
smiles = "CC(=O)OC"
(component, groups, connectivity) = get_groups_from_smiles(smiles, gcPPCSAFTGroups; connectivity=true)
@test isequal(["CH3" => 2, "COO" => 1] |> Set,Set(groups))
@test isequal([("CH3", "COO") => 2] |> Set,Set(connectivity))
end | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | code | 2210 | @testset "Missing groups functionality" begin
@testset "Ketones in SAFT-γ Mie" begin
smiles = "CCC(=O)CC"
@test_throws ErrorException get_groups_from_smiles(smiles,SAFTgammaMieGroups)
new_groups = find_missing_groups_from_smiles(smiles, SAFTgammaMieGroups)
@test isequal(new_groups,[GCIdentifier.GCPair("[CX3;H0;!R]", "C="),
GCIdentifier.GCPair("[OX1;H0;!R]", "O="),
GCIdentifier.GCPair("[CX3;H0;!R](=[OX1;H0;!R])", "C=O=")])
new_groups = find_missing_groups_from_smiles(smiles, SAFTgammaMieGroups; max_group_size=1)
@test isequal(new_groups,[GCIdentifier.GCPair("[CX3;H0;!R]", "C="),
GCIdentifier.GCPair("[OX1;H0;!R]", "O=")])
new_groups = find_missing_groups_from_smiles(smiles, SAFTgammaMieGroups; reduced = true)
@test isequal(new_groups,[GCIdentifier.GCPair("[CX3;H0;!R](=[OX1;H0;!R])", "C=O=")])
new_groups = find_missing_groups_from_smiles("CCC(=O)CC", SAFTgammaMieGroups; environment=true)
@test isequal(new_groups, [GCIdentifier.GCPair("[CX3;H0;!R;\$([CX3;H0;!R](=[OX1;H0;!R]))]", "C=(O=)"),
GCIdentifier.GCPair("[OX1;H0;!R;\$([OX1;H0;!R](=[CX3;H0;!R]))]", "O=(C=)"),
GCIdentifier.GCPair("[CX3;H0;!R;\$([CX3;H0;!R](=[OX1;H0;!R]))](=[OX1;H0;!R;\$([OX1;H0;!R](=[CX3;H0;!R]))])", "C=(O=)O=(C=)")])
end
@testset "Generic groups for AMP" begin
smiles = "C1=NC(=C2C(=N1)N(C=N2)C3C(C(C(O3)COP(=O)(O)O)O)O)N"
groups = find_missing_groups_from_smiles(smiles)
@test isequal(groups[end-1], GCIdentifier.GCPair("[PX4;H0;!R]([OX2;H0;!R])(=[OX1;H0;!R])([OX2;H1;!R])([OX2;H1;!R])", "POO=OHOH"))
groups = find_missing_groups_from_smiles(smiles; reduced = true)
@test isequal(groups[end-2], GCIdentifier.GCPair("[OX2;H0;!R]([PX4;H0;!R])", "OP"))
groups = find_missing_groups_from_smiles(smiles; reduced = true, max_group_size = 5)
@test isequal(groups[end], GCIdentifier.GCPair("[PX4;H0;!R]([OX2;H0;!R])(=[OX1;H0;!R])([OX2;H1;!R])([OX2;H1;!R])", "POO=OHOH"))
end
end
| GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | docs | 4148 | [](https://github.com/ClapeyronThermo/GCIdentifier.jl/actions)
[](https://codecov.io/gh/ClapeyronThermo/GCIdentifier.jl)
[](https://clapeyronthermo.github.io/GCIdentifier.jl/dev)
[](https://julialang.zulipchat.com/#narrow/stream/265161-Clapeyron.2Ejl)
[](https://zenodo.org/badge/latestdoi/595426258)
# GCIdentifier.jl
Welcome to GCIdentifier! This module provides utilities needed to fragment a given molecular SMILES (or name) based on the groups provided in existing group-contribution methods (such as UNIFAC, Joback's method and SAFT-$\gamma$ Mie). Additional functionalities have been provided to automatically identify and propose new groups.

## Basis Usage
Once installed (more details below), GCIdentifier can easy be called upon:
```julia
julia> using GCIdentifier
```
Let's consider the case where we want to get the groups for ibuprofen from the UNIFAC group-contribution method. The SMILES representation for ibuprofen is CC(Cc1ccc(cc1)C(C(=O)O)C)C. To get the corresponding groups, simply use:
```julia
julia> (component,groups) = get_groups_from_smiles("CC(Cc1ccc(cc1)C(C(=O)O)C)C", UNIFACGroups)
("CC(Cc1ccc(cc1)C(C(=O)O)C)C", ["COOH" => 1, "CH3" => 3, "CH" => 1, "ACH" => 4, "ACCH2" => 1, "ACCH" => 1])
```
If the SMILES representation is not known, it is possible to use GCIdentifier in conjunction with [ChemicalIdentifiers](https://github.com/longemen3000/ChemicalIdentifiers.jl) where one can simply specify the molecule name:
```julia
julia> using ChemicalIdentifiers
julia> (component,groups) = get_groups_from_name("ibuprofen",UNIFACGroups)
("ibuprofen", ["COOH" => 1, "CH3" => 3, "CH" => 1, "ACH" => 4, "ACCH2" => 1, "ACCH" => 1])
```
These groups can then be used in packages such as [Clapeyron](https://github.com/ClapeyronThermo/Clapeyron.jl) to be used to obtain our desired properties:
```julia
julia> using Clapeyron
julia> model = UNIFAC(["water",(component,groups)])
julia> activity_coefficient(model,1e5,298.15,[1.,0.])
2-element Vector{Float64}:
1.0
421519.07740198134
```
More details have been provided in the [docs](https://clapeyronthermo.github.io/GCIdentifier.jl/dev/) where we provide additional details regarding how one can obtain the connectivity between groups, identifying new groups within a structure and how one could apply their own GC method.
## Installing GCIdentifier
The minimum supported version is Julia 1.6. To install GCIdentifier, launch Julia with
```julia
> julia
```
Hit the ```]``` key to enter Pkg mode, then type
```julia
Pkg> add GCIdentifier
```
Exit Pkg mode by hitting backspace.
Now you may begin using functions from the GCIdentifier library by entering the command
```julia
using GCIdentifier
```
To remove the package, hit the ```]``` key to enter Pkg mode, then type
```julia
Pkg> rm GCIdentifier
```
## Contributing
If you'd like to make a contribution to GCIdentifier, you can:
* Report an issue: If you encounter an error in which groups are assigned or any other type of error, feel free to raise an issue and one of the developers will try to address it. If you've found the source of the error and fixed it yourself, feel free to also open a Pull Request so that it can be reviewed and pushed to the main branch.
* Implement your own modifications: We are always open to making changes to the source code and documentation! If you'd like to add your own groups and/or functionalities to the package, feel free to open a pull request and one of the developers will review it before merging it with the main branch.
* General support: If you have questions regarding the usage of this package or are having difficulties getting started, feel free to open a Discussion on GitHub or contact us directly on Zulip (link in badge above)! | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | docs | 386 | ```@meta
CurrentModule = GCIdentifier
```
## Contents
```@contents
Pages = ["api.md"]
```
## Index
```@index
Pages = ["api.md"]
```
## types and methods
```@docs
GCIdentifier.GCPair
GCIdentifier.get_groups_from_smiles
GCIdentifier.get_groups_from_name
GCIdentifier.find_missing_groups_from_smiles
GCIdentifier.get_grouplist
GCIdentifier.@gcstring_str
GCIdentifier.group_replace
``` | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | docs | 1384 | # Defining Custom Groups
Within GCIdentifier, we support the following group-contribution methods:
* Joback's Method
* Original UNIFAC
* (Dortmund) UNIFAC
* gcPC-SAFT
* gcPPC-SAFT
* SAFT-$\gamma$ Mie
There are many more available that we have yet to implement. If you wish do so yourself, then all that needs to be done is define a vector of `GCPair`s. `GCPair` is a struct contain the group SMARTS and name:
```julia
julia> group = GCPair("[CX4H3]","CH3")
julia> group.name
"CH3"
julia> group.smarts
"[CX4H3]"
```
While the group name is entirely arbitrary, one must be very careful when defining the SMARTS as this is the information used by GCIdentifier (and MolecularGraph) to identify the groups. To learn more about SMARTS, one can consult:
* [Guide to understanding the SMARTS nomenclature](https://www.daylight.com/dayhtml/doc/theory/theory.smarts.html)
* [Tool to visualise SMARTS](https://smarts.plus)
Once the user is certain of their SMARTS representation and made the vector of `GCPair`s, then one can simply feed in this vector (`GroupList`) into `get_groups_from_smiles`:
```julia
julia> GroupList = [GCPair("[CX4H3]","CH3"),GCPair("[CX4H2]","CH2")]
julia> get_groups_from_smiles("CCCC", GroupList)
("CCCC", ["CH3" => 2, "CH2" => 2])
```
If you've defined your own `GroupList` for an existing (or new) group-contribution method, feel free to open a pull request!
| GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | docs | 3426 | # Group Assignment
The primary function of GCIdentifier is to automatically assign groups to a molecule given a specific group-contribution method. This assumes that the groups in the method being used are extensive enough to cover most molecules that might be of interest.
Let's consider the case where we want to get the groups for ibuprofen from the UNIFAC group-contribution method. The SMILES representation for ibuprofen is CC(Cc1ccc(cc1)C(C(=O)O)C)C. To get the corresponding groups, simply use:
```julia
julia> (smiles,groups) = get_groups_from_smiles("CC(Cc1ccc(cc1)C(C(=O)O)C)C", UNIFACGroups)
("CC(Cc1ccc(cc1)C(C(=O)O)C)C", ["COOH" => 1, "CH3" => 3, "CH" => 1, "ACH" => 4, "ACCH2" => 1, "ACCH" => 1])
```
where `smiles` will output the molecular SMILES and `groups` is a vector of pairs where the first element is the group name and the second is the number of times the group occurs within the molecule. If this function fails, it is usually because, either, the SMILES is unphysical, or the method used doesn't cover all atoms present within a molecule. In the case of the latter, one can still obtain the groups that have been identified by specifying `check=false` in the optional arguments. For example, SAFT-$\gamma$ Mie does not have the functional group for ketones:
```julia
julia> (smiles,groups) = get_groups_from_smiles("CCC(=O)CC", SAFTgammaMieGroups; check=false)
("CCC(=O)CC", ["CH3" => 2, "CH2" => 2])
```
To propose new groups that cover the missing atoms, take a look at our [`find_missing_groups` function](./missing_groups.md).
## Connectivity
There are certain group-contribution approaches, such as gcPCP-SAFT and s-SAFT-$\gamma$ Mie where information about how groups are linked to each other is required. It is possible to obtain information about the connectivity between groups from GCIdentifier by simply specifying `connectivity=true` within the `get_groups_from_smiles` function. For example, in the case of acetone:
```julia
julia> (smiles,groups,connectivity) = get_groups_from_smiles("CC(=O)C", gcPPCSAFTGroups; connectivity=true)
("CC(=O)C", ["C=O" => 1, "CH3" => 2], [("C=O", "CH3") => 2])
```
where `connectivity` is a vector of pairs where the first element is the groups involved in the link and the second element is the number of times this link occurs.
## Extensions
Unfortunately, the process of obtaining the SMILES representation of a molecule can be itself a challenge. As such, we have included an extension of GCIdentifier where, if called in conjunction with [ChemicalIdentifiers](https://github.com/longemen3000/ChemicalIdentifiers.jl), one can simply specify the molecule name:
```julia
julia> using ChemicalIdentifiers
julia> (component,groups) = get_groups_from_name("ibuprofen",UNIFACGroups)
("ibuprofen", ["COOH" => 1, "CH3" => 3, "CH" => 1, "ACH" => 4, "ACCH2" => 1, "ACCH" => 1])
```
This should greatly simplify the use of GCIdentifier. These groups can then be used in packages such as [Clapeyron](https://github.com/ClapeyronThermo/Clapeyron.jl) to be used to obtain our desired properties, such as the solubility of ibuprofen in water:
```julia
julia> using Clapeyron
julia> liquid = UNIFAC(["water",(component,groups)])
julia> solid = SolidHfus(["water","ibuprofen"])
julia> model = CompositeModel(["water","ibuprofen"]; solid=solid, liquid=liquid)
julia> sle_solubility(model,1e5,298.15, [1.,0.]; solute=["ibuprofen"])[2]
5.547514144547524e-7
``` | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | docs | 1697 | ```@meta
CurrentModule = GCIdentifier
```
# GCIdentifier.jl
Welcome to GCIdentifier! This module provides utilities needed to fragment a given molecular SMILES (or name) based on the groups provided in existing group-contribution methods (such as UNIFAC, Joback's method and SAFT-$\gamma$ Mie). Additional functionalities have been provided to automatically identify and propose new groups.
Group-contribution approaches are vital when it comes to computer-aided molecular design (CAMD) of, for example, novel refrigerants or in drug discovery, where their ability to accurately predict physical properties for new species aids in evaluating the performance of a hypothetical molecule. Here, the assignment of groups must be done thousands of times and, in some cases, for rather complex molecules. This is the primary motivator for the development of GCIdentifier.
The documentation is laid out as follows:
- **Group Assignment**: Find out how to assign groups to a species within a group-contribution method.
- **Finding Missing Groups**: Find out how to identify missing groups for a given species.
- **Custom Groups**: Find out how to implement your own groups within GCIdentifier.
- **API**: A list of all available methods.
### Authors
- [Pierre J. Walker](mailto:[email protected]), California Institute of Technology
- [Andrés Riedemann](mailto:[email protected]), University of Concepción
### License
GCIdentifier.jl is licensed under the [MIT license](https://github.com/ClapeyronThermo/GCIdentifier.jl/blob/master/LICENSE.md).
### Installation
GCIdentifier.jl is a registered package, it can be installed from the general registry by:
```
pkg> add GCIdentifier
``` | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | docs | 5877 | # Find Missing Groups
In some cases, our group contribution method will not have every group needed to cover every atom within a molecule. The example we gave previously was how SAFT-$\gamma$ Mie lacked a group for ketones:
```julia
julia> (smiles,groups) = get_groups_from_smiles("CCC(=O)CC", SAFTgammaMieGroups)
ERROR: Could not find all groups for CCC(=O)CC
```
In this case, it could be possible that GCIdentifier simply hasn't included the missing group within our database or perhaps that group needs to be parameterised. To find out which groups might help fill-in the missing atoms, one can use `find_missing_groups_from_smiles`:
```julia
julia> groups = find_missing_groups_from_smiles("CCC(=O)CC", SAFTgammaMieGroups)
3-element Vector{GCPair}:
GCPair("[CX3;H0;!R]", "C=")
GCPair("[OX1;H0;!R]", "O=")
GCPair("[CX3;H0;!R](=[OX1;H0;!R])", "C=O=")
```
where `groups` is a vector of `GCPair`s with proposed names of the groups. As we can see, GCIdentifier has proposed three potential groups, where the last is a combination of the first two. From this list, the users can decide which group might be best to parameterise. However, we also have our own [internal heuristics](./api.md) for proposing a _minimal_ group representation of molecules within GCIdentifier which will reduce this list to what we recommend:
```julia
julia> groups = find_missing_groups_from_smiles("CCC(=O)CC", SAFTgammaMieGroups; reduced=true)
1-element Vector{GCPair}:
GCPair("[CX3;H0;!R](=[OX1;H0;!R])", "C=O=")
```
In the case of the ketone, we would only really want to parameterise this group.
## Automatically fragment a molecule
Let us now consider an extreme case where we are trying to fragment a molecule into groups with no reference group-contribution approach. In the case of our ketone, the code will fragment the molecule into atomic groups, along with combinations of those atomic groups:
```julia
julia> groups = find_missing_groups_from_smiles("CCCC(=O)CCC")
5-element Vector{GCPair}:
GCPair("[CX4;H3;!R]", "CH3")
GCPair("[CX4;H2;!R]", "CH2")
GCPair("[CX3;H0;!R]", "C=")
GCPair("[OX1;H0;!R]", "O=")
GCPair("[CX3;H0;!R](=[OX1;H0;!R])", "C=O=")
julia> groups = find_missing_groups_from_smiles("CCCC(=O)CCC"; reduced=true)
3-element Vector{GCPair}:
GCPair("[CX4;H3;!R]", "CH3")
GCPair("[CX4;H2;!R]", "CH2")
GCPair("[CX3;H0;!R](=[OX1;H0;!R])", "C=O=")
```
As we can see, GCIdentifier proposes all the groups that one requires to represent this ketone. However, one aspect of this fragmentation that users may care about is that the methylene (CH$_2$) groups bonded to the ketone group versus those bonded to the methyl group could technically be treated different, due to the difference in environment within which they exist. This distinction can be made by adding the `environment=true` optional argument:
```julia
julia> groups = find_missing_groups_from_smiles("CCCC(=O)CCC"; reduced=true, environment=true)
6-element Vector{GCPair}:
GCPair("[CX4;H3;!R;$([CX4;H3;!R]([CX4;H2;!R]))]", "CH3(CH2)")
GCPair("[CX4;H2;!R;$([CX4;H2;!R]([CX4;H3;!R])([CX4;H2;!R]))]", "CH2(CH3CH2)")
GCPair("[CX4;H2;!R;$([CX4;H2;!R]([CX4;H2;!R])([CX3;H0;!R]))]", "CH2(CH2C=)")
GCPair("[CX3;H0;!R;$([CX3;H0;!R]([CX4;H2;!R])(=[OX1;H0;!R])([CX4;H2;!R]))](=[OX1;H0;!R;$([OX1;H0;!R](=[CX3;H0;!R]))])", "C=(CH2O=CH2)O=(C=)")
GCPair("[CX4;H2;!R;$([CX4;H2;!R]([CX3;H0;!R])([CX4;H2;!R]))]", "CH2(C=CH2)")
GCPair("[CX4;H2;!R;$([CX4;H2;!R]([CX4;H2;!R])([CX4;H3;!R]))]", "CH2(CH2CH3)")
```
As we can see in the above, GCIdentifier now proposes many more groups as we now care about the environment within which each group exists.
One last flexible element of the `find_missing_groups_from_smiles` function is related to the size of the groups. In all of the above cases, the groups proposed only involve one or two heavy atoms. This is fine for most small molecules. However, for larger ones, such as adenylic acid, the groups proposed may not necessarily be the best representation:
```julia
julia> groups = find_missing_groups_from_smiles("C1=NC(=C2C(=N1)N(C=N2)C3C(C(C(O3)COP(=O)(O)O)O)O)N"; reduced=true)
12-element Vector{GCPair}:
GCPair("[cX3;H1;R][nX2;H0;R]", "aCHaN")
GCPair("[cX3;H0;R][nX2;H0;R]", "aCaN")
GCPair("[cX3;H0;R][NX3;H2;!R]", "aCNH2")
GCPair("[cX3;H0;R][nX3;H0;R]", "aCaN")
GCPair("[cX3;H1;R][nX3;H0;R]", "aCHaN")
GCPair("[CX4;H1;R][nX3;H0;R]", "cCHaN")
GCPair("[CX4;H1;R][OX2;H0;R]", "cCHcO")
GCPair("[CX4;H1;R][OX2;H1;!R]", "cCHOH")
GCPair("[CX4;H1;R][CX4;H2;!R]", "cCHCH2")
GCPair("[OX2;H0;!R]([PX4;H0;!R])", "OP")
GCPair("[OX1;H0;!R]", "O=")
GCPair("[OX2;H1;!R]", "OH")
```
Most groups here are quite reasonable, with the exception of the "O=" and "OH" groups as those will be bonded directly to the phosphorous atom, which we would expect results in very different "O=" and "OH" groups than you'd find on, for example, alcohols. While we could again use the `environment` capability of GCIdentifier to simply specify the environment in which these groups exist, this will result in far more groups being proposed. Ideally, we would like to combine the phosphate group into one. This can be done by specifying a larger `max_group_size` in the optional arguments:
```julia
julia> groups = find_missing_groups_from_smiles("C1=NC(=C2C(=N1)N(C=N2)C3C(C(C(O3)COP(=O)(O)O)O)O)N"; reduced=true, max_group_size=5)
10-element Vector{GCPair}:
GCPair("[cX3;H1;R][nX2;H0;R]", "aCHaN")
GCPair("[cX3;H0;R][nX2;H0;R]", "aCaN")
GCPair("[cX3;H0;R][NX3;H2;!R]", "aCNH2")
GCPair("[cX3;H0;R][nX3;H0;R]", "aCaN")
GCPair("[cX3;H1;R][nX3;H0;R]", "aCHaN")
GCPair("[CX4;H1;R][nX3;H0;R]", "cCHaN")
GCPair("[CX4;H1;R][OX2;H0;R]", "cCHcO")
GCPair("[CX4;H1;R][OX2;H1;!R]", "cCHOH")
GCPair("[CX4;H1;R][CX4;H2;!R]", "cCHCH2")
GCPair("[PX4;H0;!R]([OX2;H0;!R])(=[OX1;H0;!R])([OX2;H1;!R])([OX2;H1;!R])", "POO=OHOH")
```
This is now a very reasonable representation of adenylic acid. | GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.3.5 | 3e53af9fa1ce5e05ed4201758f5518b6268303b9 | docs | 6368 | ---
title: 'GCIdentifier.jl: A Julia package for identifying molecular fragments from SMILES'
tags:
- Julia
- Group-contribution
- Thermodynamics
- Molecular Design
authors:
- name: Pierre J. Walker
orcid: 0000-0001-8628-6561
corresponding: true
affiliation: "1, 2"
- name: Andrés Riedemann
corresponding: true
affiliation: 3
- name: Zhen-Gang Wang
affiliation: 1
orcid: 0000-0002-3361-6114
affiliations:
- name: Division of Chemistry and Chemical Engineering, California Institute of Technology, Pasadena, California 91125, United States
index: 1
- name: Department of Chemical Engineering, Imperial College, London SW7 2AZ, United Kingdom
index: 2
- name: Departamento de Ingeniería Química, Universidad de Concepción, Concepción 4030000, Chile
index: 3
date: 9 February 2024
bibliography: paper.bib
---
# Summary
GCIdentifier.jl is an open-source toolkit for the automatic identification of group fragments based on the name of a molecule or its SMILES. Obtaining chemical properties of species, such as heat capacities [@bensonAdditivityRulesEstimation1958] or solvation free energies [@plattsEstimationMolecularLinear2000], will typically involve a set of parameters that represent a given species. For example, ideal isobaric heat capacities over a range of temperature of a pure component can be obtained using Reid polynomials with just four parameters ($a$, $b$, $c$ and $d$). Unfortunately, in this case, the parameters obtained are only applicable to a specific species and cannot be transferred to others (i.e. the $a$, $b$, $c$, $d$ parameters for water cannot then be used to model ibuprofen). A solution to this would be to split a set of molecules with similar chemical structures into moieties, known as groups, each of which will have their own parameters associated with them and adjust these parameters against experimental data for all of these molecules. The combination of these groups (and their associated parameters) can then be used to predict the properties of ibuprofen. In the case of the Joback method [@jobackEstimationPureComponentProperties1987], the Reid polynomial parameters can be obtained by summing over the group-specific parameters ($a_i$, $b_i$, $c_i$ and $d_i$) weighted by the occurrence of those groups in a species. The benefit of such approaches is that these groups can be combined many different ways such that they represent a larger variety of molecules. This type of approach is known as group contribution, where many examples of such approaches exist [@weidlichModifiedUNIFACModel1987;@walkerNewPredictiveGroupContribution2020;@chungGroupContributionMachine2022;@papaioannouGroupContributionMethodology2014] which can be used to predict a range of properties such as pharmaceutical solubilities [@wehbePhaseBehaviourPHsolubility2022], interfacial tensions [@rehnerSurfactantModelingUsing2021] and thermal conductivities [@hoppThermalConductivityEntropy2019]. An example of this process is shown in figure 1.

Unfortunately, the challenge with using group-contribution approaches is the assignment of the groups to represent a given species. While this assignment can be done manually, it is more convenient and, as discussed later, efficient to automate this process. Indeed, this is the exact objective of GCIdentifier. By simply feeding a species name or SMILES, along with the group-contribution approach one wishes to use, the group assignment is done automatically:
```julia
using GCIdentifier, ChemicalIdentifiers
groups = get_groups_from_name("ibuprofen", UNIFACGroups)
```
The output from this function can then be used in other packages, such as Clapeyron [@walkerClapeyronJlExtensible2022], to obtain chemical properties.
# Statement of need
Group-contribution approaches are vital when it comes to computer-aided molecular design (CAMD) of, for example, novel refrigerants [@sahinidisDesignAlternativeRefrigerants2003] or in drug discovery [@houADMEEvaluationDrug2004]. Here, the assignment of groups must be done thousands of times and, in some cases, for rather complex molecules. This is the primary motivator for the development of GCIdentifier. While other packages [@degenArtCompilingUsing2008;@liuBreakOrderBuild2017;@mullerFlexibleHeuristicAlgorithm2019] with similar functionalities have been developed in other languages, GCIdentifier.jl stands apart for multiple reasons.
GCIdentifier.jl is the first of such packages to be compatible with multiple group-contribution approaches, such as UNIFAC and SAFT-$\gamma$ Mie. By standardising the representation of groups using SMARTS and leveraging the powerful MolecularGraph [@seiji_matsuoka_2024_10478701] package, our group-identification code can be used with any existing group-contribution thermodynamic model. This extends to group-contribution approaches which require information about the connectivity between groups [@sauerComparisonHomoHeterosegmented2014] where, by simply specifying `connectivity=true` within the `get_groups_from_name` function, the connectivity matrix between groups will automatically be generated.
While packages in other languages are able to generate groups from _existing_ group databases, GCIdentifier.jl is able to systematically propose _new_ groups for a given molecule. Consider a case where an existing group-contribution framework is unable to cover all atoms present in a molecule. GCIdentifier.jl is able to consider these un-represented atoms and propose a list of new groups. From this list, users will be able to determine which groups they should obtain new parameters for. In the extreme case where we wish to generate a list of all possible groups that represent a molecule, GCIdentifier.jl will automatically split the molecule into groups, from which either the user or a set of built-in heuristics can then decide which set best represent the molecule.
These two features present within GCIdentifier.jl have potential applications beyond thermodynamic modelling, such as the development of molecular dynamics forcefields which could be integrated into packages such as Molly [@greenerDifferentiableSimulationDevelop2023].
# Acknowledgments
Z-G.W. acknowledges funding from Hong Kong Quantum AI Lab, AIR\@InnoHK of the Hong Kong Government.
# References
| GCIdentifier | https://github.com/ClapeyronThermo/GCIdentifier.jl.git |
|
[
"MIT"
] | 0.1.0 | 330289636fb8107c5f32088d2741e9fd7a061a5c | code | 607 | using SIMDTypes
using Documenter
DocMeta.setdocmeta!(SIMDTypes, :DocTestSetup, :(using SIMDTypes); recursive=true)
makedocs(;
modules=[SIMDTypes],
authors="Julia Computing, Inc. and Contributors",
repo="https://github.com/JuliaSIMD/SIMDTypes.jl/blob/{commit}{path}#{line}",
sitename="SIMDTypes.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://JuliaSIMD.github.io/SIMDTypes.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/JuliaSIMD/SIMDTypes.jl",
)
| SIMDTypes | https://github.com/JuliaSIMD/SIMDTypes.jl.git |
|
[
"MIT"
] | 0.1.0 | 330289636fb8107c5f32088d2741e9fd7a061a5c | code | 714 | module SIMDTypes
# using Static: StaticInt
struct Bit; data::Bool; end # Dummy for Ptr
# @inline Base.convert(::Type{Bool}, b::Bit) = getfield(b, :data)
const FloatingTypes = Union{Float16,Float32,Float64}
const SignedHW = Union{Int8,Int16,Int32,Int64}
const UnsignedHW = Union{UInt8,UInt16,UInt32,UInt64}
const IntegerTypesHW = Union{SignedHW,UnsignedHW}
# const IntegerTypes = Union{StaticInt,IntegerTypesHW}
const NativeTypesExceptBitandFloat16 = Union{Bool,Base.HWReal}
const NativeTypesExceptBit = Union{Bool,Base.HWReal,Float16}
const NativeTypesExceptFloat16 = Union{Bool,Base.HWReal,Bit}
const NativeTypes = Union{NativeTypesExceptBit, Bit}
const _Vec{W,T<:Number} = NTuple{W,Core.VecElement{T}}
end
| SIMDTypes | https://github.com/JuliaSIMD/SIMDTypes.jl.git |
|
[
"MIT"
] | 0.1.0 | 330289636fb8107c5f32088d2741e9fd7a061a5c | code | 1196 | using SIMDTypes
using Test
@testset "SIMDTypes.jl" begin
@test SIMDTypes.Bit(true).data
@test !SIMDTypes.Bit(false).data
@test SIMDTypes.Bit(true) isa SIMDTypes.NativeTypes
@test !(SIMDTypes.Bit(true) isa SIMDTypes.NativeTypesExceptBit)
@test SIMDTypes.Bit(true) isa SIMDTypes.NativeTypesExceptFloat16
@test !(SIMDTypes.Bit(true) isa SIMDTypes.NativeTypesExceptBitandFloat16)
@test Float16(1) isa SIMDTypes.NativeTypes
@test Float16(1) isa SIMDTypes.NativeTypesExceptBit
@test !(Float16(1) isa SIMDTypes.NativeTypesExceptFloat16)
@test !(Float16(1) isa SIMDTypes.NativeTypesExceptBitandFloat16)
@test 1f0 isa SIMDTypes.NativeTypesExceptBitandFloat16
@test 1.3 isa SIMDTypes.FloatingTypes
# @test !(1.3 isa SIMDTypes.IntegerTypes)
@test !(1.3 isa SIMDTypes.IntegerTypesHW)
# @test 1 isa SIMDTypes.IntegerTypes
@test 1 isa SIMDTypes.SignedHW
@test !(1 isa SIMDTypes.UnsignedHW)
@test 1 isa SIMDTypes.IntegerTypesHW
# @test one(UInt) isa SIMDTypes.IntegerTypes
@test !(one(UInt) isa SIMDTypes.SignedHW)
@test one(UInt) isa SIMDTypes.UnsignedHW
@test one(UInt) isa SIMDTypes.IntegerTypesHW
@test ntuple(Core.VecElement, 2) isa SIMDTypes._Vec
end
| SIMDTypes | https://github.com/JuliaSIMD/SIMDTypes.jl.git |
|
[
"MIT"
] | 0.1.0 | 330289636fb8107c5f32088d2741e9fd7a061a5c | docs | 499 | # SIMDTypes
[](https://JuliaSIMD.github.io/SIMDTypes.jl/stable)
[](https://JuliaSIMD.github.io/SIMDTypes.jl/dev)
[](https://github.com/JuliaSIMD/SIMDTypes.jl/actions)
[](https://codecov.io/gh/JuliaSIMD/SIMDTypes.jl)
| SIMDTypes | https://github.com/JuliaSIMD/SIMDTypes.jl.git |
|
[
"MIT"
] | 0.1.0 | 330289636fb8107c5f32088d2741e9fd7a061a5c | docs | 182 | ```@meta
CurrentModule = SIMDTypes
```
# SIMDTypes
Documentation for [SIMDTypes](https://github.com/JuliaSIMD/SIMDTypes.jl).
```@index
```
```@autodocs
Modules = [SIMDTypes]
```
| SIMDTypes | https://github.com/JuliaSIMD/SIMDTypes.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 1110 | __precompile__()
module TSAnalysis
# Libraries
using Dates, Distributed, Logging;
using LinearAlgebra, Distributions, Optim, Statistics;
# Custom dependencies
const local_path = dirname(@__FILE__);
include("$local_path/types.jl");
include("$local_path/methods.jl");
include("$local_path/kalman.jl");
include("$local_path/subsampling.jl");
include("$local_path/uc_models.jl");
# Export types
export JVector, JArray, FloatVector, FloatMatrix, FloatArray, SymMatrix, DiagMatrix,
KalmanSettings, ImmutableKalmanSettings, MutableKalmanSettings, KalmanStatus,
ARIMASettings, VARIMASettings;
# Export methods
export check_bounds, isnothing, error_info, verb_message, interpolate, soft_thresholding, isconverged,
mean_skipmissing, std_skipmissing, is_vector_in_matrix, demean, lag, companion_form;
# Export functions
export kfilter!, kforecast, ksmoother, fmin_uc_models, arima, varima, forecast,
block_jackknife, optimal_d, artificial_jackknife, moving_block_bootstrap, stationary_block_bootstrap;
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 9472 | """
apriori(X::FloatVector, settings::KalmanSettings)
Kalman filter a-priori prediction for X.
# Arguments
- `X`: Last expected value of the states
- `settings`: KalmanSettings struct
apriori(P::SymMatrix, settings::KalmanSettings)
Kalman filter a-priori prediction for P.
# Arguments
- `P`: Last conditional covariance the states
- `settings`: KalmanSettings struct
"""
apriori(X::FloatVector, settings::KalmanSettings) = settings.C * X;
apriori(P::SymMatrix, settings::KalmanSettings) = Symmetric(settings.C * P * settings.C' + settings.V)::SymMatrix;
"""
apriori!(::Type{Nothing}, settings::KalmanSettings, status::KalmanStatus)
Kalman filter a-priori prediction for t==1.
# Arguments
- `::Type{Nothing}`: first prediction
- `settings`: KalmanSettings struct
- `status`: KalmanStatus struct
apriori!(::Type{FloatVector}, settings::KalmanSettings, status::KalmanStatus)
Kalman filter a-priori prediction.
# Arguments
- `::Type{FloatVector}`: standard prediction
- `settings`: KalmanSettings struct
- `status`: KalmanStatus struct
"""
function apriori!(::Type{Nothing}, settings::KalmanSettings, status::KalmanStatus)
status.X_prior = apriori(settings.X0, settings);
status.P_prior = apriori(settings.P0, settings);
if settings.compute_loglik == true
status.loglik = 0.0;
end
if settings.store_history == true
status.history_X_prior = Array{FloatVector,1}();
status.history_X_post = Array{FloatVector,1}();
status.history_P_prior = Array{SymMatrix,1}();
status.history_P_post = Array{SymMatrix,1}();
end
end
function apriori!(::Type{FloatVector}, settings::KalmanSettings, status::KalmanStatus)
status.X_prior = apriori(status.X_post, settings);
status.P_prior = apriori(status.P_post, settings);
end
"""
find_observed_data(settings::KalmanSettings, status::KalmanStatus)
Return position of the observed measurements at time t.
# Arguments
- `settings`: KalmanSettings struct
- `status`: KalmanStatus struct
"""
function find_observed_data(settings::KalmanSettings, status::KalmanStatus)
if status.t <= settings.T
Y_t_all = @view settings.Y[:, status.t];
ind_not_missings = findall(ismissing.(Y_t_all) .== false);
if length(ind_not_missings) > 0
return ind_not_missings;
end
end
end
"""
update_loglik!(status::KalmanStatus, ε_t::FloatVector, Σ_t::SymMatrix)
Update status.loglik.
# Arguments
- `status`: KalmanStatus struct
- `ε_t`: Forecast error
- `Σ_t`: Forecast error covariance
"""
function update_loglik!(status::KalmanStatus, ε_t::FloatVector, Σ_t::SymMatrix)
status.loglik -= 0.5*(logdet(Σ_t) + ε_t'*inv(Σ_t)*ε_t);
end
"""
aposteriori!(settings::KalmanSettings, status::KalmanStatus, ind_not_missings::Array{Int64,1})
Kalman filter a-posteriori update. Measurements are observed (or partially observed) at time t.
# Arguments
- `settings`: KalmanSettings struct
- `status`: KalmanStatus struct
- `ind_not_missings`: Position of the observed measurements
aposteriori!(settings::KalmanSettings, status::KalmanStatus, ind_not_missings::Nothing)
Kalman filter a-posteriori update. All measurements are not observed at time t.
# Arguments
- `settings`: KalmanSettings struct
- `status`: KalmanStatus struct
- `ind_not_missings`: Empty array
"""
function aposteriori!(settings::KalmanSettings, status::KalmanStatus, ind_not_missings::Array{Int64,1})
Y_t = @view settings.Y[ind_not_missings, status.t];
B_t = @view settings.B[ind_not_missings, :];
R_t = @view settings.R[ind_not_missings, ind_not_missings];
# Forecast error
ε_t = Y_t - B_t*status.X_prior;
Σ_t = Symmetric(B_t*status.P_prior*B_t' + R_t)::SymMatrix;
# Kalman gain
K_t = status.P_prior*B_t'*inv(Σ_t);
# A posteriori estimates
status.X_post = status.X_prior + K_t*ε_t;
status.P_post = Symmetric(status.P_prior - K_t*B_t*status.P_prior)::SymMatrix;
# Update log likelihood
if settings.compute_loglik == true
update_loglik!(status, ε_t, Σ_t);
end
end
function aposteriori!(settings::KalmanSettings, status::KalmanStatus, ind_not_missings::Nothing)
status.X_post = copy(status.X_prior);
status.P_post = copy(status.P_prior);
end
"""
kfilter!(settings::KalmanSettings, status::KalmanStatus)
Kalman filter: a-priori prediction and a-posteriori update.
# Model
The state space model used below is,
``Y_{t} = B*X_{t} + e_{t}``
``X_{t} = C*X_{t-1} + v_{t}``
Where ``e_{t} ~ N(0, R)`` and ``v_{t} ~ N(0, V)``.
# Arguments
- `settings`: KalmanSettings struct
- `status`: KalmanStatus struct
"""
function kfilter!(settings::KalmanSettings, status::KalmanStatus)
# Update status.t
status.t += 1;
# A-priori prediction
apriori!(typeof(status.X_prior), settings, status);
# Handle missing observations
ind_not_missings = find_observed_data(settings, status);
# Ex-post update
aposteriori!(settings, status, ind_not_missings);
# Update history of *_prior and *_post
if settings.store_history == true
push!(status.history_X_prior, status.X_prior);
push!(status.history_X_post, status.X_post);
push!(status.history_P_prior, status.P_prior);
push!(status.history_P_post, status.P_post);
end
end
"""
kforecast(settings::KalmanSettings, X::Union{FloatVector, Nothing}, h::Int64)
Forecast X up to h-step ahead.
kforecast(settings::KalmanSettings, X::Union{FloatVector, Nothing}, P::Union{SymMatrix, Nothing}, h::Int64)
Forecast X and P up to h-step ahead.
# Model
The state space model used below is,
``Y_{t} = B*X_{t} + e_{t}``
``X_{t} = C*X_{t-1} + v_{t}``
Where ``e_{t} ~ N(0, R)`` and ``v_{t} ~ N(0, V)``.
"""
function kforecast(settings::KalmanSettings, Xt::Union{FloatVector, Nothing}, h::Int64)
# Initialise forecast history
history_X = Array{FloatVector,1}();
X = copy(Xt);
# Loop over forecast horizons
for horizon=1:h
X = apriori(X, settings);
push!(history_X, X);
end
# Return output
return history_X;
end
function kforecast(settings::KalmanSettings, Xt::Union{FloatVector, Nothing}, Pt::Union{SymMatrix, Nothing}, h::Int64)
# Initialise forecast history
history_X = Array{FloatVector,1}();
history_P = Array{SymMatrix,1}();
X = copy(Xt);
P = copy(Pt);
# Loop over forecast horizons
for horizon=1:h
X = apriori(X, settings);
P = apriori(P, settings);
push!(history_X, X);
push!(history_P, P);
end
# Return output
return history_X, history_P;
end
"""
compute_J1(Pf_lagged::SymMatrix, Pp::SymMatrix, settings::KalmanSettings)
Compute J_{t-1} as in Shumway and Stoffer (2011, chapter 6) to operate the RTS smoother.
"""
compute_J1(Pf_lagged::SymMatrix, Pp::SymMatrix, settings::KalmanSettings) = Pf_lagged*settings.C'*inv(Pp);
"""
backwards_pass(Xf_lagged::FloatVector, J1::FloatArray, Xs::FloatVector, Xp::FloatVector)
Backwards pass for X to get the smoothed states at time t-1.
# Arguments
- `Xf_lagged`: Filtered states for time t-1
- `J1`: J_{t-1} as in Shumway and Stoffer (2011, chapter 6)
- `Xs`: Smoothed states for time t
- `Xp`: A-priori states for time t
backwards_pass(Pf_lagged::SymMatrix, J1::FloatArray, Ps::SymMatrix, Pp::SymMatrix)
Backwards pass for P to get the smoothed conditional covariance at time t-1.
# Arguments
- `Pf_lagged`: Filtered conditional covariance for time t-1
- `J1`: J_{t-1} as in Shumway and Stoffer (2011, chapter 6)
- `Ps`: Smoothed conditional covariance for time t
- `Pp`: A-priori conditional covariance for time t
"""
backwards_pass(Xf_lagged::FloatVector, J1::FloatArray, Xs::FloatVector, Xp::FloatVector) = Xf_lagged + J1*(Xs-Xp);
backwards_pass(Pf_lagged::SymMatrix, J1::FloatArray, Ps::SymMatrix, Pp::SymMatrix) = Symmetric(Pf_lagged + J1*(Ps-Pp)*J1')::SymMatrix;
"""
ksmoother(settings::KalmanSettings, status::KalmanStatus)
Kalman smoother: RTS smoother from the last evaluated time period in status to t==0.
# Model
The state space model used below is,
``Y_{t} = B*X_{t} + e_{t}``
``X_{t} = C*X_{t-1} + v_{t}``
Where ``e_{t} ~ N(0, R)`` and ``v_{t} ~ N(0, V)``.
# Arguments
- `settings`: KalmanSettings struct
- `status`: KalmanStatus struct
"""
function ksmoother(settings::KalmanSettings, status::KalmanStatus)
# Initialise smoother history
history_X = Array{FloatVector,1}();
history_P = Array{SymMatrix,1}();
push!(history_X, copy(status.X_post));
push!(history_P, copy(status.P_post));
# Loop over t
for t=status.t:-1:2
# Pointers
Xp = status.history_X_prior[t];
Pp = status.history_P_prior[t];
Xf_lagged = status.history_X_post[t-1];
Pf_lagged = status.history_P_post[t-1];
Xs = history_X[1];
Ps = history_P[1];
# Smoothed estimates for t-1
J1 = compute_J1(Pf_lagged, Pp, settings);
pushfirst!(history_X, backwards_pass(Xf_lagged, J1, Xs, Xp));
pushfirst!(history_P, backwards_pass(Pf_lagged, J1, Ps, Pp));
end
# Pointers
Xp = status.history_X_prior[1];
Pp = status.history_P_prior[1];
Xs = history_X[1];
Ps = history_P[1];
# Compute smoothed estimates for t==0
J1 = compute_J1(settings.P0, Pp, settings);
X0 = backwards_pass(settings.X0, J1, Xs, Xp);
P0 = backwards_pass(settings.P0, J1, Ps, Pp);
# Return output
return history_X, history_P, X0, P0;
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 10351 | #=
--------------------------------------------------------------------------------------------------------------------------------
Base and generic math
--------------------------------------------------------------------------------------------------------------------------------
=#
"""
isnothing(::Any)
True if the argument is ```nothing``` (false otherwise).
"""
isnothing(::Any) = false;
isnothing(::Nothing) = true;
"""
verb_message(verb::Bool, message::String)
@info `message` if `verb` is true.
"""
verb_message(verb::Bool, message::String) = verb ? @info(message) : nothing;
"""
check_bounds(X::Number, LB::Number, UB::Number)
Check whether `X` is larger or equal than `LB` and lower or equal than `UB`
check_bounds(X::Number, LB::Number)
Check whether `X` is larger or equal than `LB`
"""
check_bounds(X::Number, LB::Number, UB::Number) = X < LB || X > UB ? throw(DomainError) : nothing
check_bounds(X::Number, LB::Number) = X < LB ? throw(DomainError) : nothing
"""
error_info(err::Exception)
error_info(err::RemoteException)
Return error main information
"""
error_info(err::Exception) = (err, err.msg, stacktrace(catch_backtrace()));
error_info(err::RemoteException) = (err.captured.ex, err.captured.ex.msg, [err.captured.processed_bt[i][1] for i=1:length(err.captured.processed_bt)]);
"""
mean_skipmissing(X::AbstractArray{Float64,1})
mean_skipmissing(X::AbstractArray{Union{Missing, Float64},1})
Compute the mean of the observed values in `X`.
mean_skipmissing(X::AbstractArray{Float64})
mean_skipmissing(X::AbstractArray{Union{Missing, Float64}})
Compute the mean of the observed values in `X` column wise.
# Examples
```jldoctest
julia> mean_skipmissing([1.0; missing; 3.0])
2.0
julia> mean_skipmissing([1.0 2.0; missing 3.0; 3.0 5.0])
3-element Array{Float64,1}:
1.5
3.0
4.0
```
"""
mean_skipmissing(X::AbstractArray{Float64,1}) = mean(X);
mean_skipmissing(X::AbstractArray{Float64}) = mean(X, dims=2);
mean_skipmissing(X::AbstractArray{Union{Missing, Float64},1}) = mean(skipmissing(X));
mean_skipmissing(X::AbstractArray{Union{Missing, Float64}}) = vcat([mean_skipmissing(X[i,:]) for i=1:size(X,1)]...);
"""
std_skipmissing(X::AbstractArray{Float64,1})
std_skipmissing(X::AbstractArray{Union{Missing, Float64},1})
Compute the standard deviation of the observed values in `X`.
std_skipmissing(X::AbstractArray{Float64})
std_skipmissing(X::AbstractArray{Union{Missing, Float64}})
Compute the standard deviation of the observed values in `X` column wise.
# Examples
```jldoctest
julia> std_skipmissing([1.0; missing; 3.0])
1.4142135623730951
julia> std_skipmissing([1.0 2.0; missing 3.0; 3.0 5.0])
3-element Array{Float64,1}:
0.7071067811865476
NaN
1.4142135623730951
```
"""
std_skipmissing(X::AbstractArray{Float64,1}) = std(X);
std_skipmissing(X::AbstractArray{Float64}) = std(X, dims=2);
std_skipmissing(X::AbstractArray{Union{Missing, Float64},1}) = std(skipmissing(X));
std_skipmissing(X::AbstractArray{Union{Missing, Float64}}) = vcat([std_skipmissing(X[i,:]) for i=1:size(X,1)]...);
"""
is_vector_in_matrix(vect::AbstractVector, matr::AbstractMatrix)
Check whether the vector `vect` is included in the matrix `matr`.
# Examples
julia> is_vector_in_matrix([1;2], [1 2; 2 3])
true
"""
is_vector_in_matrix(vect::AbstractVector, matr::AbstractMatrix) = sum(sum(vect .== matr, dims=1) .== length(vect)) > 0;
"""
isconverged(new::Float64, old::Float64, tol::Float64, ε::Float64, increasing::Bool)
Check whether `new` is close enough to `old`.
# Arguments
- `new`: new objective or loss
- `old`: old objective or loss
- `tol`: tolerance
- `ε`: small Float64
- `increasing`: true if `new` increases, at each iteration, with respect to `old`
"""
isconverged(new::Float64, old::Float64, tol::Float64, ε::Float64, increasing::Bool) = increasing ? (new-old)./(abs(old)+ε) <= tol : -(new-old)./(abs(old)+ε) <= tol;
"""
soft_thresholding(z::Float64, ζ::Float64)
Soft thresholding operator.
"""
soft_thresholding(z::Float64, ζ::Float64) = sign(z)*max(abs(z)-ζ, 0);
"""
square_vandermonde_matrix(λ::FloatVector)
Construct square vandermonde matrix on the basis of a vector of eigenvalues λ.
"""
square_vandermonde_matrix(λ::FloatVector) = λ'.^collect(length(λ)-1:-1:0);
#=
--------------------------------------------------------------------------------------------------------------------------------
Parameter transformations
--------------------------------------------------------------------------------------------------------------------------------
=#
"""
get_bounded_log(Θ_unbound::Float64, MIN::Float64)
Compute parameters with bounded support using a generalised log transformation.
"""
get_bounded_log(Θ_unbound::Float64, MIN::Float64) = exp(Θ_unbound) + MIN;
"""
get_unbounded_log(Θ_bound::Float64, MIN::Float64)
Compute parameters with unbounded support using a generalised log transformation.
"""
get_unbounded_log(Θ_bound::Float64, MIN::Float64) = log(Θ_bound - MIN);
"""
get_bounded_logit(Θ_unbound::Float64, MIN::Float64, MAX::Float64)
Compute parameters with bounded support using a generalised logit transformation.
"""
get_bounded_logit(Θ_unbound::Float64, MIN::Float64, MAX::Float64) = (MIN + (MAX * exp(Θ_unbound))) / (1 + exp(Θ_unbound));
"""
get_unbounded_logit(Θ_bound::Float64, MIN::Float64, MAX::Float64)
Compute parameters with unbounded support using a generalised logit transformation.
"""
get_unbounded_logit(Θ_bound::Float64, MIN::Float64, MAX::Float64) = log((Θ_bound - MIN) / (MAX - Θ_bound));
#=
--------------------------------------------------------------------------------------------------------------------------------
Time series
--------------------------------------------------------------------------------------------------------------------------------
=#
"""
demean(X::FloatVector)
demean(X::JVector)
Demean data.
demean(X::FloatMatrix)
demean(X::JArray)
Demean data.
# Examples
```jldoctest
julia> demean([1.0; 1.5; 2.0; 2.5; 3.0])
5-element Array{Float64,1}:
-1.0
-0.5
0.0
0.5
1.0
julia> demean([1.0 3.5 1.5 4.0 2.0; 4.5 2.5 5.0 3.0 5.5])
2×5 Array{Float64,2}:
-1.4 1.1 -0.9 1.6 -0.4
0.4 -1.6 0.9 -1.1 1.4
```
"""
demean(X::FloatVector) = X .- mean(X);
demean(X::FloatMatrix) = X .- mean(X,dims=2);
demean(X::JVector) = X .- mean_skipmissing(X);
demean(X::JArray) = X .- mean_skipmissing(X);
"""
interpolate(X::JArray{Float64}, n::Int64, T::Int64)
Interpolate each series in `X`, in turn, by replacing missing observations with the sample average of the non-missing values.
# Arguments
- `X`: observed measurements (`nxT`)
- `n` and `T` are the number of series and observations
interpolate(X::Array{Float64}, n::Int64, T::Int64)
# Arguments
- `X`: observed measurements (`nxT`)
- `n` and `T` are the number of series and observations
"""
function interpolate(X::JArray{Float64}, n::Int64, T::Int64)
data = copy(X);
for i=1:n
data[i, ismissing.(X[i, :])] .= mean_skipmissing(X[i, :]);
end
data = data |> FloatArray;
return data;
end
interpolate(X::FloatArray, n::Int64, T::Int64) = X;
"""
lag(X::FloatArray, p::Int64)
Construct the data required to run a standard vector autoregression.
# Arguments
- `X`: observed measurements (`nxT`), where `n` and `T` are the number of series and observations.
- `p`: number of lags in the vector autoregression
# Output
- `X_{t}`
- `X_{t-1}`
"""
function lag(X::FloatArray, p::Int64)
# VAR(p) data
X_t = X[:, 1+p:end];
X_lagged = vcat([X[:, p-j+1:end-j] for j=1:p]...);
# Return output
return X_t, X_lagged;
end
"""
companion_form(θ::FloatVector)
Construct the companion form parameters of θ.
"""
function companion_form(θ::FloatVector)
# Number of parameters in θ
k = length(θ);
# Return companion form
return [permutedims(θ); Matrix(I, k-1, k-1) zeros(k-1)];
end
#=
-------------------------------------------------------------------------------------------------------------------------------
Combinatorics and probability
-------------------------------------------------------------------------------------------------------------------------------
=#
"""
no_combinations(n::Int64, k::Int64)
Compute the binomial coefficient of `n` observations and `k` groups, for big integers.
# Examples
```jldoctest
julia> no_combinations(1000000,100000)
7.333191945934207610471288280331309569215030711272858517142085449641265002716664e+141178
```
"""
no_combinations(n::Int64, k::Int64) = factorial(big(n))/(factorial(big(k))*factorial(big(n-k)));
"""
rand_without_replacement(nT::Int64, d::Int64)
Draw `length(P)-d` elements from the positional vector `P` without replacement.
`P` is permanently changed in the process.
rand_without_replacement(n::Int64, T::Int64, d::Int64)
Draw `length(P)-d` elements from the positional vector `P` without replacement.
In the sampling process, no more than n-1 elements are removed for each point in time.
`P` is permanently changed in the process.
# Examples
```jldoctest
julia> rand_without_replacement(20, 5)
15-element Array{Int64,1}:
1
2
3
5
7
8
10
11
13
14
16
17
18
19
20
```
"""
function rand_without_replacement(nT::Int64, d::Int64)
# Positional vector
P = collect(1:nT);
# Draw without replacement d times
for i=1:d
deleteat!(P, findall(P.==rand(P)));
end
# Return output
return setdiff(1:nT, P);
end
function rand_without_replacement(n::Int64, T::Int64, d::Int64)
# Positional vector
P = collect(1:n*T);
# Full set of coordinates
coord = [repeat(1:n, T) kron(1:T, convert(Array{Int64}, ones(n)))];
# Counter
coord_counter = convert(Array{Int64}, zeros(T));
# Loop over d
for i=1:d
while true
# New candidate draw
draw = rand(P);
coord_draw = @view coord[draw, :];
# Accept the draw if all observations are not missing for time t = coord[draw, :][2]
if coord_counter[coord_draw[2]] < n-1
coord_counter[coord_draw[2]] += 1;
# Draw without replacement
deleteat!(P, findall(P.==draw));
break;
end
end
end
# Return output
return setdiff(1:n*T, P);
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 9160 | #=
--------------------------------------------------------------------------------------------------------------------------------
Subsampling: Jackknife
--------------------------------------------------------------------------------------------------------------------------------
=#
"""
block_jackknife(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64)
Generate block jackknife (Kunsch, 1989) samples. This implementation is described in Pellegrino (2020).
This technique subsamples a time series dataset by removing, in turn, all the blocks of consecutive observations with a given size.
# Arguments
- `Y`: Observed measurements (`nxT`), where `n` and `T` are the number of series and observations.
- `subsample`: Block size as a percentage of number of observed periods. It is bounded between 0 and 1.
# References
Kunsch (1989) and Pellegrino (2020).
"""
function block_jackknife(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64)
# Check inputs
check_bounds(subsample, 0, 1);
# Dimensions
n, T = size(Y);
# Block size
block_size = Int64(ceil(subsample*T));
if block_size == 0
error("subsample is too small!");
end
# Number of block jackknifes samples - as in Kunsch (1989)
samples = T-block_size+1;
# Initialise jackknife_data
jackknife_data = JArray{Float64,3}(undef, n, T, samples);
# Loop over j=1, ..., samples
for j=1:samples
# Index of missings
ind_j = collect(j:j+block_size-1);
# Block jackknife data
jackknife_data[:, :, j] = Y;
jackknife_data[:, ind_j, j] .= missing;
end
# Return jackknife_data
return jackknife_data;
end
"""
optimal_d(n::Int64, T::Int64)
Select the optimal value for d. See artificial_jackknife (...) for more details on d.
# Arguments
- `n`: Number of series
- `T`: Number of observations
"""
function optimal_d(n::Int64, T::Int64)
objfun_array = zeros(n*T);
for d=1:fld(n*T,2)
objfun_array[d] = objfun_optimal_d(n, T, d);
end
return argmax(objfun_array);
end
"""
objfun_optimal_d(n::Int64, T::Int64, d::Int64)
Objective function to select the optimal value for d.
# Arguments
- `n`: Number of series
- `T`: Number of observations
- `d`: Candidate d
"""
function objfun_optimal_d(n::Int64, T::Int64, d::Int64)
fun = no_combinations(n*T, d) - (d>=n).*no_combinations(n*T-n, d-n)*T;
for i=2:fld(d, n)
fun -= (-1^(i-1))*no_combinations(T, i)*no_combinations(n*T-i*n, d-i*n);
end
return fun;
end
"""
artificial_jackknife(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64, max_samples::Int64)
Generate artificial jackknife samples as in Pellegrino (2020).
The artificial delete-d jackknife is an extension of the delete-d jackknife for dependent data problems.
- This technique replaces the actual data removal step with a fictitious deletion, which consists of imposing `d`-dimensional (artificial) patterns of missing observations to the data.
- This approach does not alter the data order nor destroy the correlation structure.
# Arguments
- `Y`: Observed measurements (`nxT`), where `n` and `T` are the number of series and observations.
- `subsample`: `d` as a percentage of the original sample size. It is bounded between 0 and 1.
- `max_samples`: If `C(n*T,d)` is large, artificial_jackknife would generate `max_samples` jackknife samples.
# References
Pellegrino (2020).
"""
function artificial_jackknife(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64, max_samples::Int64)
# Dimensions
n, T = size(Y);
nT = n*T;
# Check inputs
check_bounds(subsample, 0, 1);
# Set d using subsample
d = Int64(ceil(subsample*nT));
# Error management
if d == 0
error("subsample is too small!");
end
# Warning
if subsample > 0.5
@warn "this algorithm might be unstable for `subsample` larger than 0.5!";
end
# Get vec(Y)
vec_Y = convert(JArray{Float64}, Y[:]);
# Initialise loop (controls)
C_nT_d = no_combinations(nT, d);
samples = convert(Int64, min(C_nT_d, max_samples));
zeros_vec = zeros(d);
# Initialise loop (output)
ind_missings = Array{Int64}(zeros(d, samples));
jackknife_data = JArray{Float64,3}(undef, n, T, samples);
# Loop over j=1, ..., samples
for j=1:samples
# First draw
if j == 1
if samples == C_nT_d
ind_missings[:,j] = rand_without_replacement(nT, d);
else
ind_missings[:,j] = rand_without_replacement(n, T, d);
end
# Iterates until ind_missings[:,j] is neither a vector of zeros, nor already included in ind_missings
else
while ind_missings[:,j] == zeros_vec || is_vector_in_matrix(ind_missings[:,j], ind_missings[:, 1:j-1])
if samples == C_nT_d
ind_missings[:,j] = rand_without_replacement(nT, d);
else
ind_missings[:,j] = rand_without_replacement(n, T, d);
end
end
end
# Add (artificial) missing observations
vec_Y_j = copy(vec_Y);
vec_Y_j[ind_missings[:,j]] .= missing;
# Store data
jackknife_data[:, :, j] = reshape(vec_Y_j, n, T);
end
# Return jackknife_data
return jackknife_data;
end
#=
--------------------------------------------------------------------------------------------------------------------------------
Subsampling: Bootstrap
--------------------------------------------------------------------------------------------------------------------------------
=#
"""
moving_block_bootstrap(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64, samples::Int64)
Generate moving block bootstrap samples.
The moving block bootstrap randomly subsamples a time series into ordered and overlapped blocks of consecutive observations.
# Arguments
- `Y`: Observed measurements (`nxT`), where `n` and `T` are the number of series and observations.
- `subsample`: Block size as a percentage of number of observed periods. It is bounded between 0 and 1.
- `samples`: Number of bootstrap samples.
# References
Kunsch (1989) and Liu and Singh (1992).
"""
function moving_block_bootstrap(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64, samples::Int64)
# Check inputs
check_bounds(subsample, 0, 1);
# Dimensions
n, T = size(Y);
# Block size
block_size = Int64(ceil(subsample*T));
if block_size == 0
error("subsample is too small!");
end
# Initialise bootstrap_data
bootstrap_data = JArray{Float64,3}(undef, n, block_size, samples);
# Loop over j=1, ..., samples
for j=1:samples
# Starting point for the moving block
ind_j = rand(1:T-block_size+1);
# Bootstrap data
bootstrap_data[:, :, j] .= Y[:, ind_j:ind_j+block_size-1];
end
# Return bootstrap_data
return bootstrap_data;
end
"""
stationary_block_bootstrap(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64, samples::Int64)
Generate stationary block bootstrap samples.
The stationary bootstrap is similar to the block bootstrap proposed in independently in Kunsch (1989) and Liu and Singh (1992).
There are two main differences:
- The blocks have random length
- In order to achieve stationarity, the stationary (block) bootstrap "wraps" the data around in a "circle" so that the first observation follows the last.
Note: Block size is exponentially distributed with mean `Int64(ceil(subsample*T))`.
# Arguments
- `Y`: Observed measurements (`nxT`), where `n` and `T` are the number of series and observations.
- `subsample`: Block size as a percentage of number of observed periods. It is bounded between 0 and 1.
- `samples`: Number of bootstrap samples.
# References
Politis and Romano (1994).
"""
function stationary_block_bootstrap(Y::Union{FloatMatrix, JArray{Float64,2}}, subsample::Float64, samples::Int64)
# Check inputs
check_bounds(subsample, 0, 1);
# Dimensions
n, T = size(Y);
# Block length is exponentially distributed with mean
avg_block_size = Int64(ceil(subsample*T));
if avg_block_size == 0
error("subsample is too small!");
end
# Initialise bootstrap_data
bootstrap_data = JArray{Float64,3}(undef, n, T, samples);
# Loop over j=1, ..., samples
for j=1:samples
# Merge multiple blocks of random size
ind_j = zeros(T) |> Array{Int64,1};
ind_j[1] = rand(1:T);
# Loop over t=2,...,T
for t=2:T
# Let ind_j[t] be picked at random
if rand() < 1/avg_block_size;
ind_j[t] = rand(1:T);
# Let ind_j[t] be ind_j[t-1] + 1
else
if ind_j[t-1] == T
ind_j[t] = 1;
else
ind_j[t] = ind_j[t-1] + 1;
end
end
end
# Generate j-th bootstrap sample
bootstrap_data[:, :, j] .= Y[:, ind_j];
end
# Return bootstrap_data
return bootstrap_data;
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 7260 | # Aliases (types)
const FloatVector = Array{Float64,1};
const FloatMatrix = Array{Float64,2};
const FloatArray = Array{Float64};
const SymMatrix = Symmetric{Float64,Array{Float64,2}};
const DiagMatrix = Diagonal{Float64,Array{Float64,1}};
const JVector{T} = Array{Union{Missing, T}, 1};
const JArray{T, N} = Array{Union{Missing, T}, N};
#=
--------------------------------------------------------------------------------------------------------------------------------
Kalman structures
--------------------------------------------------------------------------------------------------------------------------------
=#
abstract type KalmanSettings end
"""
ImmutableKalmanSettings(...)
Define an immutable structure that includes all the Kalman filter and smoother inputs.
# Model
The state space model used below is,
``Y_{t} = B*X_{t} + e_{t}``
``X_{t} = C*X_{t-1} + v_{t}``
Where ``e_{t} ~ N(0, R)`` and ``v_{t} ~ N(0, V)``.
# Arguments
- `Y`: Observed measurements (`nxT`)
- `B`: Measurement equations' coefficients
- `R`: Covariance matrix of the measurement equations' error terms
- `C`: Transition equations' coefficients
- `V`: Covariance matrix of the transition equations' error terms
- `X0`: Mean vector for the states at time t=0
- `P0`: Covariance matrix for the states at time t=0
- `n`: Number of series
- `T`: Number of observations
- `m`: Number of latent states
- `compute_loglik`: Boolean (true for computing the loglikelihood in the Kalman filter)
- `store_history`: Boolean (true to store the history of the filter and smoother)
"""
struct ImmutableKalmanSettings <: KalmanSettings
Y::Union{FloatMatrix, JArray{Float64,2}}
B::FloatMatrix
R::SymMatrix
C::FloatMatrix
V::SymMatrix
X0::FloatVector
P0::SymMatrix
n::Int64
T::Int64
m::Int64
compute_loglik::Bool
store_history::Bool
end
# ImmutableKalmanSettings constructor
function ImmutableKalmanSettings(Y::Union{FloatMatrix, JArray{Float64,2}}, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix; compute_loglik::Bool=true, store_history::Bool=true)
# Compute default value for missing parameters
n, T = size(Y);
m = size(B,2);
X0 = zeros(m);
P0 = Symmetric(reshape((I-kron(C, C))\V[:], m, m));
# Return ImmutableKalmanSettings
return ImmutableKalmanSettings(Y, B, R, C, V, X0, P0, n, T, m, compute_loglik, store_history);
end
# ImmutableKalmanSettings constructor
function ImmutableKalmanSettings(Y::Union{FloatMatrix, JArray{Float64,2}}, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix, X0::FloatVector, P0::SymMatrix; compute_loglik::Bool=true, store_history::Bool=true)
# Compute default value for missing parameters
n, T = size(Y);
m = size(B,2);
# Return ImmutableKalmanSettings
return ImmutableKalmanSettings(Y, B, R, C, V, X0, P0, n, T, m, compute_loglik, store_history);
end
"""
MutableKalmanSettings(...)
Define a mutable structure identical in shape to ImmutableKalmanSettings.
See the docstring of ImmutableKalmanSettings for more information.
"""
mutable struct MutableKalmanSettings <: KalmanSettings
Y::Union{FloatMatrix, JArray{Float64,2}}
B::FloatMatrix
R::SymMatrix
C::FloatMatrix
V::SymMatrix
X0::FloatVector
P0::SymMatrix
n::Int64
T::Int64
m::Int64
compute_loglik::Bool
store_history::Bool
end
# MutableKalmanSettings constructor
function MutableKalmanSettings(Y::Union{FloatMatrix, JArray{Float64,2}}, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix; compute_loglik::Bool=true, store_history::Bool=true)
# Compute default value for missing parameters
n, T = size(Y);
m = size(B,2);
X0 = zeros(m);
P0 = Symmetric(reshape((I-kron(C, C))\V[:], m, m));
# Return MutableKalmanSettings
return MutableKalmanSettings(Y, B, R, C, V, X0, P0, n, T, m, compute_loglik, store_history);
end
# MutableKalmanSettings constructor
function MutableKalmanSettings(Y::Union{FloatMatrix, JArray{Float64,2}}, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix, X0::FloatVector, P0::SymMatrix; compute_loglik::Bool=true, store_history::Bool=true)
# Compute default value for missing parameters
n, T = size(Y);
m = size(B,2);
# Return MutableKalmanSettings
return MutableKalmanSettings(Y, B, R, C, V, X0, P0, n, T, m, compute_loglik, store_history);
end
"""
KalmanStatus(...)
Define a mutable structure to manage the status of the Kalman filter and smoother.
# Arguments
- `t`: Current point in time
- `loglik`: Loglikelihood
- `X_prior`: Latest a-priori X
- `X_post`: Latest a-posteriori X
- `P_prior`: Latest a-priori P
- `P_post`: Latest a-posteriori P
- `history_X_prior`: History of a-priori X
- `history_X_post`: History of a-posteriori X
- `history_P_prior`: History of a-priori P
- `history_P_post`: History of a-posteriori P
"""
mutable struct KalmanStatus
t::Int64
loglik::Union{Float64, Nothing}
X_prior::Union{FloatVector, Nothing}
X_post::Union{FloatVector, Nothing}
P_prior::Union{SymMatrix, Nothing}
P_post::Union{SymMatrix, Nothing}
history_X_prior::Union{Array{FloatVector,1}, Nothing}
history_X_post::Union{Array{FloatVector,1}, Nothing}
history_P_prior::Union{Array{SymMatrix,1}, Nothing}
history_P_post::Union{Array{SymMatrix,1}, Nothing}
end
# KalmanStatus constructors
KalmanStatus() = KalmanStatus(0, [nothing for i=1:9]...);
#=
--------------------------------------------------------------------------------------------------------------------------------
UC models: structures
--------------------------------------------------------------------------------------------------------------------------------
=#
abstract type UCSettings end
"""
VARIMASettings(...)
Define an immutable structure to manage VARIMA specifications.
# Arguments
- `Y_levels`: Observed measurements (`nxT`) - in levels
- `Y`: Observed measurements (`nxT`) - differenced and demeaned
- `μ`: Sample average (per series)
- `n`: Number of series
- `d`: Degree of differencing
- `p`: Order of the autoregressive model
- `q`: Order of the moving average model
- `nr`: n*max(p, q+1)
- `np`: n*p
- `nq`: n*q
- `nnp`: (n^2)*p
- `nnq`: (n^2)*q
"""
struct VARIMASettings <: UCSettings
Y_levels::Union{FloatMatrix, JArray{Float64,2}}
Y::Union{FloatMatrix, JArray{Float64,2}}
μ::FloatVector
n::Int64
d::Int64
p::Int64
q::Int64
nr::Int64
np::Int64
nq::Int64
nnp::Int64
nnq::Int64
end
# VARIMASettings constructor
function VARIMASettings(Y_levels::Union{FloatMatrix, JArray{Float64,2}}, d::Int64, p::Int64, q::Int64)
# Initialise
n = size(Y_levels, 1);
r = max(p, q+1);
# Differenciate data
Y = copy(Y_levels);
if d > 0
for i=1:d
Y = diff(Y, dims=2);
end
end
# Mean
μ = mean_skipmissing(Y)[:,1];
# Demean data
Y = demean(Y);
# VARIMASettings
return VARIMASettings(Y_levels, Y, μ, n, d, p, q, n*r, n*p, n*q, (n^2)*p, (n^2)*q);
end
"""
ARIMASettings(...)
Define an alias of VARIMASettings for arima models.
"""
ARIMASettings(Y_levels::Union{FloatMatrix, JArray{Float64,2}}, d::Int64, p::Int64, q::Int64) = VARIMASettings(Y_levels, d, p, q);
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 10867 | #=
--------------------------------------------------------------------------------------------------------------------------------
UC models: general interface
--------------------------------------------------------------------------------------------------------------------------------
=#
"""
penalty_eigen(λ::Float64)
Return penalty value for a single eigenvalue λ.
"""
penalty_eigen(λ::Float64) = abs(λ) < 1 ? abs(λ)/(1-abs(λ)) : Inf;
penalty_eigen(λ::Complex{Float64}) = abs(λ) < 1 ? abs(λ)/(1-abs(λ)) : Inf;
"""
fmin_uc_models(θ_unbound::FloatVector, lb::FloatVector, ub::FloatVector, transform_id::Array{Int64,1}, model_structure::Function, settings::UCSettings)
Return fmin for the UC model specified by model_structure(settings).
# Arguments
- `θ_unbound`: Model parameters (with unbounded support)
- `lb`: Lower bound for the parameters
- `ub`: Upper bound for the parameters
- `transform_id`: Type of transformation required for the parameters (0 = none, 1 = generalised log, 2 = generalised logit)
- `model_structure`: Function to setup the state-space structure
- `settings`: Settings for model_structure
- `tightness`: Controls the strength of the penalty (if any)
"""
function fmin_uc_models(θ_unbound::FloatVector, lb::FloatVector, ub::FloatVector, transform_id::Array{Int64,1}, model_structure::Function, uc_settings::UCSettings, tightness::Float64)
# Compute parameters with bounded support
θ = copy(θ_unbound);
for i=1:length(θ)
if transform_id[i] == 1
θ[i] = get_bounded_log(θ_unbound[i], lb[i]);
elseif transform_id[i] == 2
θ[i] = get_bounded_logit(θ_unbound[i], lb[i], ub[i]);
end
end
# Kalman status and settings
status = KalmanStatus();
model_instance, model_penalty = model_structure(θ, uc_settings);
if ~isinf(model_penalty)
settings = ImmutableKalmanSettings(model_instance...);
# Compute loglikelihood for t = 1, ..., T
for t=1:size(settings.Y,2)
kfilter!(settings, status);
end
# Return fmin
return -status.loglik + tightness*model_penalty;
else
return 1/eps();
end
end
"""
forecast(settings::KalmanSettings, h::Int64)
Compute the h-step ahead forecast for the data included in settings.
# Arguments
- `settings`: KalmanSettings struct
- `h`: Forecast horizon
forecast(settings::KalmanSettings, X::FloatVector, h::Int64)
Compute the h-step ahead forecast for the data included in settings, starting from X.
# Arguments
- `settings`: KalmanSettings struct
- `X`: Last known value of the latent states
- `h`: Forecast horizon
"""
function forecast(settings::KalmanSettings, h::Int64)
# Initialise Kalman status
status = KalmanStatus();
# Compute the period referring to the last observation of each series
last_observations = zeros(settings.n) |> Array{Int64,1};
for i=1:settings.n
last_observations[i] = findall(.~ismissing.(settings.Y[i,:]))[end];
end
# Starting point for the forecast
starting_point = minimum(last_observations);
# Filter for t=1,...,T
for t=1:settings.T
kfilter!(settings, status);
end
# Initial forecast: series with a shorter history are forecasted until they match the others
fc = zeros(settings.m, settings.T-starting_point+h);
if starting_point < settings.T
fc[:,1:settings.T-starting_point] = hcat(status.history_X_post[starting_point+1:settings.T]...);
end
# h-steps ahead forecast of the states from last observed point
fc[:,settings.T-starting_point+1:end] = hcat(kforecast(settings, status.X_post, h)...);
# Compute forecast for Y
Y_fc = settings.B*fc;
if settings.T-starting_point > 0
Y_fc[last_observations.==settings.T, settings.T-starting_point] .= NaN;
end
# Return forecast for Y
return Y_fc;
end
forecast(settings::KalmanSettings, X::FloatVector, h::Int64) = settings.B*hcat(kforecast(settings, X, h)...);
#=
--------------------------------------------------------------------------------------------------------------------------------
VARIMA model
--------------------------------------------------------------------------------------------------------------------------------
=#
"""
varma_structure(θ::FloatVector, settings::VARIMASettings)
VARMA(p,q) representation similar to the form reported in Hamilton (1994) for ARIMA(p,q) models.
# Arguments
- `θ`: Model parameters
- `settings`: VARIMASettings struct
"""
function varma_structure(θ::FloatVector, settings::VARIMASettings)
# Initialise
ϑ = copy(θ);
I_n = Matrix(I, settings.n, settings.n) |> FloatMatrix;
UT_n = UpperTriangular(ones(settings.n, settings.n)) |> FloatMatrix;
UT_n[I_n.==1] .= 0;
# Observation equation
B = [I_n reshape(ϑ[1:settings.nnq], settings.n, settings.nq) zeros(settings.n, settings.nr-settings.nq-settings.n)];
R = Symmetric(I_n*1e-8);
# Transition equation: coefficients
C = [reshape(ϑ[settings.nnq+1:settings.nnq+settings.nnp], settings.n, settings.np) zeros(settings.n, settings.nr-settings.np);
Matrix(I, settings.nr-settings.n, settings.nr-settings.n) zeros(settings.nr-settings.n, settings.n)];
# VARMA(p,q) var-cov matrix
V1 = Diagonal(ϑ[settings.nnq+settings.nnp+1:settings.nnq+settings.nnp+settings.n]) |> FloatMatrix;
# Transition equation: variance
V = Symmetric(cat(dims=[1,2], V1, zeros(settings.nr-settings.n, settings.nr-settings.n)));
# Companion form for the moving average part
companion_vma = [B[:,settings.n+1:settings.n+settings.nq];
Matrix(I, settings.nq-settings.n, settings.nq-settings.n) zeros(settings.nq-settings.n, settings.n)];
# Compute penalty
varma_penalty = 0.0;
for λ = [eigvals(C); eigvals(companion_vma)]
varma_penalty += penalty_eigen(λ);
end
# Return state-space structure
return (settings.Y, B, R, C, V), varma_penalty;
end
"""
varima(θ::FloatVector, settings::VARIMASettings)
Return KalmanSettings for a varima(d,p,q) model with parameters θ.
# Arguments
- `θ`: Model parameters
- `settings`: VARIMASettings struct
varima(settings::VARIMASettings, args...; tightness::Float64=1.0)
Estimate varima(d,p,q) model.
# Arguments
- `settings`: VARIMASettings struct
- `args`: Arguments for Optim.optimize
- `tightness`: Controls the strength of the penalty for the non-causal / non-invertible case (default = 1)
"""
function varima(θ::FloatVector, settings::VARIMASettings)
# Compute state-space parameters
model_instance, _ = varma_structure(θ, settings);
output = ImmutableKalmanSettings(model_instance...);
# TBD: update the warnings
#=
# Warning 1: invertibility (in the past)
eigval_ma = eigvals(companion_form(output.B[2:end]));
if maximum(abs.(eigval_ma)) >= 1
@warn("Invertibility (in the past) is not properly enforced! \n Re-estimate the model increasing the degree of differencing.");
end
# Warning 2: causality
eigval_ar = eigvals(output.C);
if maximum(abs.(eigval_ar)) >= 1
@warn("Causality is not properly enforced! \n Re-estimate the model increasing the degree of differencing.");
end
# Warning 3: parameter redundancy
intersection_ar_ma = intersect(eigval_ar, eigval_ma);
if length(intersection_ar_ma) > 0
@warn("Parameter redundancy! \n Check the AR and MA polynomials.");
end
=#
# Return output
return output
end
function varima(settings::VARIMASettings, args...; tightness::Float64=1.0)
# Starting point
θ_starting = 1e-4*ones(settings.nnp+settings.nnq+settings.n);
# Bounds
lb = [-0.99*ones(settings.nnp+settings.nnq); 1e-6*ones(settings.n)];
ub = [0.99*ones(settings.nnp+settings.nnq); Inf*ones(settings.n)];
transform_id = [2*ones(settings.nnp+settings.nnq); ones(settings.n)] |> Array{Int64,1};
# Estimate the model
res = Optim.optimize(θ_unbound->fmin_uc_models(θ_unbound, lb, ub, transform_id, varma_structure, settings, tightness), θ_starting, args...);
# Apply bounds
θ_minimizer = copy(res.minimizer);
for i=1:length(θ_minimizer)
if transform_id[i] == 1
θ_minimizer[i] = get_bounded_log(θ_minimizer[i], lb[i]);
elseif transform_id[i] == 2
θ_minimizer[i] = get_bounded_logit(θ_minimizer[i], lb[i], ub[i]);
end
end
# Return output
return varima(θ_minimizer, settings);
end
"""
arima(settings::VARIMASettings, args...; tightness::Float64=1.0)
Define an alias of the varima function for arima models.
"""
arima(settings::VARIMASettings, args...; tightness::Float64=1.0) = varima(settings, args..., tightness=tightness);
"""
forecast(settings::KalmanSettings, h::Int64, varima_settings::VARIMASettings)
Compute the h-step ahead forecast for the data included in settings (in the varima_settings.Y_levels scale).
# Arguments
- `settings`: KalmanSettings struct
- `h`: Forecast horizon
- `varima_settings`: VARIMASettings struct
"""
function forecast(settings::KalmanSettings, h::Int64, varima_settings::VARIMASettings)
# VARIMA
if varima_settings.d > 0
Y = zeros(varima_settings.d, varima_settings.n);
# Loop over each series
for i=1:varima_settings.n
# Initialise Y_all
Y_all = zeros(varima_settings.d, varima_settings.d);
# Last observed point
last_observation = findall(.~ismissing.(varima_settings.Y_levels[i,:]))[end];
# The first row of Y_all is the data in levels
Y_all[1,:] = varima_settings.Y_levels[i, last_observation-varima_settings.d+1:last_observation];
# Differenced data, ex. (1-L)^d * Y_levels
for j=1:varima_settings.d-1
Y_all[1+j,:] = [NaN * ones(1,j) permutedims(diff(Y_all[j,:]))];
end
# Cut Y_all
Y[:,i] = permutedims(Y_all[:,end]);
end
# Initial cumulated forecast
fc_differenced = forecast(settings, h) .+ varima_settings.μ;
fc = zeros(size(fc_differenced));
# Loop over d to compute a prediction for the levels
for i=1:varima_settings.n
starting_point = findall(.~isnan.(fc_differenced[i,:]))[1];
fc[i,starting_point:end] .= cumsum(fc_differenced[i,starting_point:end], dims=2);
for j=varima_settings.d:-1:1
fc[i,starting_point:end] .+= Y[j,i];
if j != 1
fc[i,starting_point:end] = cumsum(fc[i,starting_point:end]);
end
end
end
# Insert NaNs
fc[isnan.(fc_differenced)] .= NaN;
# VARMA
else
# Compute forecast for varima_settings.Y (adjusted by its mean)
fc = forecast(settings, h) .+ varima_settings.μ;
end
return fc;
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 8461 | """
ksettings_input_test(ksettings::KalmanSettings, Y::JArray, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix; compute_loglik::Bool=true, store_history::Bool=true)
Return true if the entries of ksettings are correct (false otherwise).
"""
function ksettings_input_test(ksettings::KalmanSettings, Y::JArray, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix; compute_loglik::Bool=true, store_history::Bool=true)
return ~false in [ksettings.Y == Y;
ksettings.B == B;
ksettings.R == R;
ksettings.C == C;
ksettings.V == V;
ksettings.compute_loglik == compute_loglik;
ksettings.store_history == store_history];
end
"""
kalman_test(Y::JArray, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix, benchmark_data::Tuple)
Run a series of tests to check whether the kalman.jl functions work.
"""
function kalman_test(Y::JArray, B::FloatMatrix, R::SymMatrix, C::FloatMatrix, V::SymMatrix, benchmark_data::Tuple)
# Benchmark data
benchmark_X0, benchmark_P0, benchmark_X_prior, benchmark_P_prior, benchmark_X_post, benchmark_P_post, benchmark_X_fc, benchmark_P_fc, benchmark_loglik,
benchmark_X0_sm, benchmark_P0_sm, benchmark_X_sm, benchmark_P_sm = benchmark_data;
# Loop over ImmutableKalmanSettings and MutableKalmanSettings
for ksettings_type = [ImmutableKalmanSettings; MutableKalmanSettings]
# Tests on KalmanSettings
ksettings1 = ksettings_type(Y, B, R, C, V, compute_loglik=true, store_history=true);
@test ksettings_input_test(ksettings1, Y, B, R, C, V, compute_loglik=true, store_history=true);
ksettings2 = ksettings_type(Y, B, R, C, V, compute_loglik=false, store_history=true);
@test ksettings_input_test(ksettings2, Y, B, R, C, V, compute_loglik=false, store_history=true);
ksettings3 = ksettings_type(Y, B, R, C, V, compute_loglik=true, store_history=false);
@test ksettings_input_test(ksettings3, Y, B, R, C, V, compute_loglik=true, store_history=false);
ksettings4 = ksettings_type(Y, B, R, C, V, compute_loglik=false, store_history=false);
@test ksettings_input_test(ksettings4, Y, B, R, C, V, compute_loglik=false, store_history=false);
ksettings5 = ksettings_type(Y, B, R, C, V);
@test ksettings_input_test(ksettings5, Y, B, R, C, V);
# Initial conditions
@test round.(ksettings1.X0, digits=10) == benchmark_X0;
@test round.(ksettings1.P0, digits=10) == benchmark_P0;
@test ksettings1.X0 == ksettings2.X0;
@test ksettings1.X0 == ksettings3.X0;
@test ksettings1.X0 == ksettings4.X0;
@test ksettings1.X0 == ksettings5.X0;
@test ksettings1.P0 == ksettings2.P0;
@test ksettings1.P0 == ksettings3.P0;
@test ksettings1.P0 == ksettings4.P0;
@test ksettings1.P0 == ksettings5.P0;
# Set default ksettings
ksettings = ksettings5;
# Initialise kstatus
kstatus = KalmanStatus();
for t=1:size(Y,2)
# Run filter
kfilter!(ksettings, kstatus);
# A-priori
@test round.(kstatus.X_prior, digits=10) == benchmark_X_prior[t];
@test round.(kstatus.P_prior, digits=10) == benchmark_P_prior[t];
# A-posteriori
@test round.(kstatus.X_post, digits=10) == benchmark_X_post[t];
@test round.(kstatus.P_post, digits=10) == benchmark_P_post[t];
# 12-step ahead forecast
@test round.(kforecast(ksettings, kstatus.X_post, 12)[end], digits=10) == benchmark_X_fc[t];
@test round.(kforecast(ksettings, kstatus.X_post, kstatus.P_post, 12)[1][end], digits=10) == benchmark_X_fc[t];
@test round.(kforecast(ksettings, kstatus.X_post, kstatus.P_post, 12)[2][end], digits=10) == benchmark_P_fc[t];
end
# Final value of the loglikelihood
@test round.(kstatus.loglik, digits=10) == benchmark_loglik;
# Kalman smoother
X_sm, P_sm, X0_sm, P0_sm = ksmoother(ksettings, kstatus);
for t=1:size(Y,2)
@test round.(X_sm[t], digits=10) == benchmark_X_sm[t];
@test round.(P_sm[t], digits=10) == benchmark_P_sm[t];
end
@test round.(X0_sm, digits=10) == benchmark_X0_sm;
@test round.(P0_sm, digits=10) == benchmark_P0_sm;
end
end
@testset "univariate model" begin
# Initialise data and state-space parameters
Y = [0.35 0.62 missing missing 1.11 missing 2.76 2.73 3.45 3.66];
B = ones(1,1);
R = Symmetric(1e-4*ones(1,1));
C = 0.9*ones(1,1);
V = Symmetric(ones(1,1));
# Correct estimates: initial conditions
benchmark_X0 = read_test_input("./input/univariate/benchmark_X0");
benchmark_P0 = read_test_input("./input/univariate/benchmark_P0");
# Correct estimates: a priori
benchmark_X_prior = read_test_input("./input/univariate/benchmark_X_prior");
benchmark_P_prior = read_test_input("./input/univariate/benchmark_P_prior");
# Correct estimates: a posteriori
benchmark_X_post = read_test_input("./input/univariate/benchmark_X_post");
benchmark_P_post = read_test_input("./input/univariate/benchmark_P_post");
# Correct estimates: 12-step ahead forecast
benchmark_X_fc = read_test_input("./input/univariate/benchmark_X_fc");
benchmark_P_fc = read_test_input("./input/univariate/benchmark_P_fc");
# Correct estimates: loglikelihood
benchmark_loglik = read_test_input("./input/univariate/benchmark_loglik")[1];
# Correct estimates: kalman smoother (smoothed initial conditions)
benchmark_X0_sm = read_test_input("./input/univariate/benchmark_X0_sm");
benchmark_P0_sm = read_test_input("./input/univariate/benchmark_P0_sm");
# Correct estimates: kalman smoother
benchmark_X_sm = read_test_input("./input/univariate/benchmark_X_sm");
benchmark_P_sm = read_test_input("./input/univariate/benchmark_P_sm");
# Benchmark data
benchmark_data = (benchmark_X0, benchmark_P0, benchmark_X_prior, benchmark_P_prior, benchmark_X_post, benchmark_P_post, benchmark_X_fc, benchmark_P_fc, benchmark_loglik,
benchmark_X0_sm, benchmark_P0_sm, benchmark_X_sm, benchmark_P_sm);
# Run tests
kalman_test(Y, B, R, C, V, benchmark_data);
end
@testset "multivariate model" begin
# Initialise data and state-space parameters
Y = [0.72 missing 1.86 missing missing 2.52 2.98 3.81 missing 4.36;
0.95 0.70 missing missing missing missing 2.84 3.88 3.84 4.63];
B = [1.0 0.0; 1.0 1.0];
R = Symmetric(1e-4*Matrix(I,2,2));
C = [0.9 0.0; 0.0 0.1];
V = Symmetric(1.0*Matrix(I,2,2));
# Correct estimates: initial conditions
benchmark_X0 = read_test_input("./input/multivariate/benchmark_X0");
benchmark_P0 = read_test_input("./input/multivariate/benchmark_P0");
# Correct estimates: a priori
benchmark_X_prior = read_test_input("./input/multivariate/benchmark_X_prior");
benchmark_P_prior = read_test_input("./input/multivariate/benchmark_P_prior");
# Correct estimates: a posteriori
benchmark_X_post = read_test_input("./input/multivariate/benchmark_X_post");
benchmark_P_post = read_test_input("./input/multivariate/benchmark_P_post");
# Correct estimates: 12-step ahead forecast
benchmark_X_fc = read_test_input("./input/multivariate/benchmark_X_fc");
benchmark_P_fc = read_test_input("./input/multivariate/benchmark_P_fc");
# Correct estimates: loglikelihood
benchmark_loglik = read_test_input("./input/multivariate/benchmark_loglik")[1];
# Correct estimates: kalman smoother (smoothed initial conditions)
benchmark_X0_sm = read_test_input("./input/multivariate/benchmark_X0_sm");
benchmark_P0_sm = read_test_input("./input/multivariate/benchmark_P0_sm");
# Correct estimates: kalman smoother
benchmark_X_sm = read_test_input("./input/multivariate/benchmark_X_sm");
benchmark_P_sm = read_test_input("./input/multivariate/benchmark_P_sm");
# Benchmark data
benchmark_data = (benchmark_X0, benchmark_P0, benchmark_X_prior, benchmark_P_prior, benchmark_X_post, benchmark_P_post, benchmark_X_fc, benchmark_P_fc, benchmark_loglik,
benchmark_X0_sm, benchmark_P0_sm, benchmark_X_sm, benchmark_P_sm);
# Run tests
kalman_test(Y, B, R, C, V, benchmark_data);
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 100 | include("./tools.jl");
include("./kalman.jl");
include("./subsampling.jl");
include("./varima.jl");
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 1545 | """
subsampling_test(folder_name::String, subsampling_method::Function, args...)
Run a series of tests to check whether the subsampling functions in subsampling.jl work.
"""
function subsampling_test(folder_name::String, subsampling_method::Function, args...)
# Load data
Y = read_test_input("./input/subsampling/data");
# Copy Y and subsample
Y_copy = deepcopy(Y);
args_copy = deepcopy(args);
# Run `subsampling_method` with fixed random seed
Random.seed!(1);
output = subsampling_method(Y, args...);
# Load benchmark output
benchmark = read_test_input("./input/subsampling/$(folder_name)/output_chunk1");
benchmark_size = length(readdir("./input/subsampling/$(folder_name)"));
for i=2:size(output,3)
benchmark = cat(dims=3, benchmark, read_test_input("./input/subsampling/$(folder_name)/output_chunk$(i)"));
end
# Run tests
@test Y_copy == Y;
@test args_copy == args;
@test size(output,3) == benchmark_size;
@test sum(output .=== benchmark) == prod(size(output));
end
@testset "block jackknife" begin
subsampling_test("block_jackknife", block_jackknife, 0.2)
end
@testset "artificial jackknife" begin
subsampling_test("artificial_jackknife", artificial_jackknife, 0.2, 100)
end
@testset "moving block bootstrap" begin
subsampling_test("moving_block_bootstrap", moving_block_bootstrap, 0.2, 100)
end
@testset "stationary block bootstrap" begin
subsampling_test("stationary_block_bootstrap", stationary_block_bootstrap, 0.2, 100)
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 522 | using LinearAlgebra, Optim, Random, Test, TSAnalysis;
"""
read_test_input(filepath::String)
Read input data necessary to run the test for the Kalman routines. It does not use external dependencies to read input files.
"""
function read_test_input(filepath::String)
# Load CSV into Array{SubString{String},1}
data_str = split(read(open("$filepath.txt"), String), "\n");
deleteat!(data_str, findall(x->x=="", data_str));
# Return output
data = eval(Meta.parse(data_str[1]));
return data;
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | code | 5541 | """
varima_test(Y::Array{Float64,2}, d::Int64, p::Int64, q::Int64, benchmark_data::Tuple)
Run a series of tests to check whether the varima functions in uc_models.jl work.
"""
function varima_test(Y::JArray{Float64,2}, d::Int64, p::Int64, q::Int64, benchmark_data::Tuple)
# Benchmark data
benchmark_X0, benchmark_P0, benchmark_B, benchmark_R, benchmark_C, benchmark_V, benchmark_fc = benchmark_data;
# Tests on VARIMASettings
varima_settings = VARIMASettings(Y, d, p, q);
@test varima_settings.d == d;
@test varima_settings.p == p;
@test varima_settings.q == q;
@test varima_settings.n == size(Y,1);
@test varima_settings.nr == size(Y,1)*max(p, q+1);
@test varima_settings.np == size(Y,1)*p;
@test varima_settings.nq == size(Y,1)*q;
@test varima_settings.nnp == size(Y,1)^2*p;
@test varima_settings.nnq == size(Y,1)^2*q;
# Estimate parameters
varima_out = varima(varima_settings, NelderMead(), Optim.Options(iterations=10000, f_tol=1e-2, x_tol=1e-2, g_tol=1e-2, show_trace=true, show_every=500));
# Test on the parameters
@test round.(varima_out.B, digits=10) == benchmark_B;
@test round.(varima_out.R, digits=10) == benchmark_R;
@test round.(varima_out.C, digits=10) == benchmark_C;
@test round.(varima_out.V, digits=10) == benchmark_V;
@test round.(varima_out.X0, digits=10) == benchmark_X0;
@test round.(varima_out.P0, digits=10) == benchmark_P0;
# 12-step ahead forecast
fc = forecast(varima_out, 12, varima_settings);
@test round.(fc, digits=10) == benchmark_fc;
end
@testset "arma" begin
# Load data
Y = permutedims(read_test_input("./input/arma/data"));
# Settings for ARMA(1,1)
d = 0;
p = 1;
q = 1;
# Correct estimates: initial conditions
benchmark_X0 = read_test_input("./input/arma/benchmark_X0");
benchmark_P0 = read_test_input("./input/arma/benchmark_P0");
# Correct estimates: observation equation
benchmark_B = read_test_input("./input/arma/benchmark_B");
benchmark_R = read_test_input("./input/arma/benchmark_R");
# Correct estimates: transition equation
benchmark_C = read_test_input("./input/arma/benchmark_C");
benchmark_V = read_test_input("./input/arma/benchmark_V");
# Correct estimates: forecast
benchmark_fc = read_test_input("./input/arma/benchmark_fc");
# Benchmark data
benchmark_data = (benchmark_X0, benchmark_P0, benchmark_B, benchmark_R, benchmark_C, benchmark_V, benchmark_fc);
# Run tests
varima_test(Y, d, p, q, benchmark_data);
end
@testset "arima" begin
# Load data
Y = permutedims(read_test_input("./input/arima/data"));
# Settings for arima(1,1)
d = 1;
p = 1;
q = 1;
# Correct estimates: initial conditions
benchmark_X0 = read_test_input("./input/arima/benchmark_X0");
benchmark_P0 = read_test_input("./input/arima/benchmark_P0");
# Correct estimates: observation equation
benchmark_B = read_test_input("./input/arima/benchmark_B");
benchmark_R = read_test_input("./input/arima/benchmark_R");
# Correct estimates: transition equation
benchmark_C = read_test_input("./input/arima/benchmark_C");
benchmark_V = read_test_input("./input/arima/benchmark_V");
# Correct estimates: forecast
benchmark_fc = read_test_input("./input/arima/benchmark_fc");
# Benchmark data
benchmark_data = (benchmark_X0, benchmark_P0, benchmark_B, benchmark_R, benchmark_C, benchmark_V, benchmark_fc);
# Run tests
varima_test(Y, d, p, q, benchmark_data);
end
@testset "varma" begin
# Load data
Y = read_test_input("./input/varma/data");
# Settings for ARMA(1,1)
d = 0;
p = 1;
q = 1;
# Correct estimates: initial conditions
benchmark_X0 = read_test_input("./input/varma/benchmark_X0");
benchmark_P0 = read_test_input("./input/varma/benchmark_P0");
# Correct estimates: observation equation
benchmark_B = read_test_input("./input/varma/benchmark_B");
benchmark_R = read_test_input("./input/varma/benchmark_R");
# Correct estimates: transition equation
benchmark_C = read_test_input("./input/varma/benchmark_C");
benchmark_V = read_test_input("./input/varma/benchmark_V");
# Correct estimates: forecast
benchmark_fc = read_test_input("./input/varma/benchmark_fc");
# Benchmark data
benchmark_data = (benchmark_X0, benchmark_P0, benchmark_B, benchmark_R, benchmark_C, benchmark_V, benchmark_fc);
# Run tests
varima_test(Y, d, p, q, benchmark_data);
end
@testset "varima" begin
# Load data
Y = read_test_input("./input/varima/data");
# Settings for ARMA(1,1)
d = 1;
p = 1;
q = 1;
# Correct estimates: initial conditions
benchmark_X0 = read_test_input("./input/varima/benchmark_X0");
benchmark_P0 = read_test_input("./input/varima/benchmark_P0");
# Correct estimates: observation equation
benchmark_B = read_test_input("./input/varima/benchmark_B");
benchmark_R = read_test_input("./input/varima/benchmark_R");
# Correct estimates: transition equation
benchmark_C = read_test_input("./input/varima/benchmark_C");
benchmark_V = read_test_input("./input/varima/benchmark_V");
# Correct estimates: forecast
benchmark_fc = read_test_input("./input/varima/benchmark_fc");
# Benchmark data
benchmark_data = (benchmark_X0, benchmark_P0, benchmark_B, benchmark_R, benchmark_C, benchmark_V, benchmark_fc);
# Run tests
varima_test(Y, d, p, q, benchmark_data);
end
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.4 | 8a4516144f4b231223cfeb2ed99e37644ad7e1d0 | docs | 15059 | # TSAnalysis.jl
```TSAnalysis``` includes basic tools for time series analysis, compatible with incomplete data.
```julia
import Pkg;
Pkg.add("TSAnalysis")
```
## Preface
The Kalman filter and smoother included in this package use symmetric matrices (via ```LinearAlgebra```). This is particularly beneficial for the stability and speed of estimation algorithms (e.g., the EM algorithm in Shumway and Stoffer, 1982), and to handle high-dimensional forecasting problems.
For the examples below, I used economic data from FRED (https://fred.stlouisfed.org/) downloaded via the ```FredData``` package. The dependencies for the examples can be installed via:
```julia
import Pkg;
Pkg.add("FredData");
Pkg.add("Optim");
Pkg.add("Plots");
Pkg.add("Measures");
```
Make sure that your FRED API is accessible to ```FredData``` (as in https://github.com/micahjsmith/FredData.jl).
To run the examples below, execute first the following block of code:
```julia
using Dates, DataFrames, LinearAlgebra, FredData, Optim, Plots, Measures;
using TSAnalysis;
# Plots backend
plotlyjs();
# Initialise FredData
f = Fred();
"""
download_fred_vintage(tickers::Array{String,1}, transformations::Array{String,1})
Download multivariate data from FRED2.
"""
function download_fred_vintage(tickers::Array{String,1}, transformations::Array{String,1})
# Initialise output
output_data = DataFrame();
# Loop over tickers
for i=1:length(tickers)
# Download from FRED2
fred_data = get_data(f, tickers[i], observation_start="1984-01-01", units=transformations[i]).data[:, [:date, :value]];
rename!(fred_data, Symbol.(["date", tickers[i]]));
# Store current vintage
if i == 1
output_data = copy(fred_data);
else
output_data = join(output_data, fred_data, on=:date, kind = :outer);
end
end
# Return output
return output_data;
end
```
## Examples
- [ARIMA models](#arima-models)
- [VARIMA models](#varima-models)
- [Kalman filter and smoother](#kalman-filter-and-smoother)
- [Estimation of state-space models](#estimation-of-state-space-models)
- [Bootstrap and jackknife subsampling](#subsampling)
### ARIMA models
#### Data
Use the following lines of code to download the data for the examples on the ARIMA models:
```julia
# Download data of interest
Y_df = download_fred_vintage(["INDPRO"], ["log"]);
# Convert to JArray{Float64}
Y = Y_df[:,2:end] |> JArray{Float64};
Y = permutedims(Y);
```
#### Estimation
Suppose that we want to estimate an ARIMA(1,1,1) model for the Industrial Production Index. ```TSAnalysis``` provides a simple interface for that:
```julia
# Estimation settings for an ARIMA(1,1,1)
d = 1;
p = 1;
q = 1;
arima_settings = ARIMASettings(Y, d, p, q);
# Estimation
arima_out = arima(arima_settings, NelderMead(), Optim.Options(iterations=10000, f_tol=1e-2, x_tol=1e-2, g_tol=1e-2, show_trace=true, show_every=500));
```
Please note that in the estimation process of the underlying ARMA(p,q), the model is constrained to be causal and invertible in the past by default, for all candidate parameters. This behaviour can be controlled via the ```tightness``` keyword argument of the ```arima``` function.
#### Forecast
The standard forecast function generates prediction for the data in levels. In the example above, this implies that the standard forecast would be referring to industrial production in log-levels:
```julia
# 12-step ahead forecast
max_hz = 12;
fc = forecast(arima_out, max_hz, arima_settings);
```
This can be easily plotted via
```julia
# Extend date vector
date_ext = Y_df[!,:date] |> Array{Date,1};
for hz=1:max_hz
last_month = month(date_ext[end]);
last_year = year(date_ext[end]);
if last_month == 12
last_month = 1;
last_year += 1;
else
last_month += 1;
end
push!(date_ext, Date("01/$(last_month)/$(last_year)", "dd/mm/yyyy"))
end
# Generate plot
p_arima = plot(date_ext, [Y[1,:]; NaN*ones(max_hz)], label="Data", color=RGB(0,0,200/255),
xtickfont=font(8, "Helvetica Neue"), ytickfont=font(8, "Helvetica Neue"),
title="INDPRO", titlefont=font(10, "Helvetica Neue"), framestyle=:box,
legend=:right, size=(800,250), dpi=300, margin = 5mm);
plot!(date_ext, [NaN*ones(length(date_ext)-size(fc,2)); fc[1,:]], label="Forecast", color=RGB(0,0,200/255), line=:dot)
```
<img src="./img/arima.svg">
### VARIMA models
#### Data
Use the following lines of code to download the data for the examples on the VARIMA models:
```julia
# Tickers for data of interest
tickers = ["INDPRO", "PAYEMS", "CPIAUCSL"];
# Transformations for data of interest
transformations = ["log", "log", "log"];
# Download data of interest
Y_df = download_fred_vintage(tickers, transformations);
# Convert to JArray{Float64}
Y = Y_df[:,2:end] |> JArray{Float64};
Y = permutedims(Y);
```
#### Estimation
Suppose that we want to estimate a VARIMA(1,1,1) model. This can be done using:
```julia
# Estimation settings for a VARIMA(1,1,1)
d = 1;
p = 1;
q = 1;
varima_settings = VARIMASettings(Y, d, p, q);
# Estimation
varima_out = varima(varima_settings, NelderMead(), Optim.Options(iterations=20000, f_tol=1e-2, x_tol=1e-2, g_tol=1e-2, show_trace=true, show_every=500));
```
Please note that in the estimation process of the underlying VARMA(p,q), the model is constrained to be causal and invertible in the past by default, for all candidate parameters. This behaviour can be controlled via the ```tightness``` keyword argument of the ```varima``` function.
#### Forecast
The standard forecast function generates prediction for the data in levels. In the example above, this implies that the standard forecast would be referring to data in log-levels:
```julia
# 12-step ahead forecast
max_hz = 12;
fc = forecast(varima_out, max_hz, varima_settings);
```
This can be easily plotted via
```julia
# Extend date vector
date_ext = Y_df[!,:date] |> Array{Date,1};
for hz=1:max_hz
last_month = month(date_ext[end]);
last_year = year(date_ext[end]);
if last_month == 12
last_month = 1;
last_year += 1;
else
last_month += 1;
end
push!(date_ext, Date("01/$(last_month)/$(last_year)", "dd/mm/yyyy"))
end
# Generate plot
figure = Array{Any,1}(undef, varima_settings.n)
for i=1:varima_settings.n
figure[i] = plot(date_ext, [Y[i,:]; NaN*ones(max_hz)], label="Data", color=RGB(0,0,200/255),
xtickfont=font(8, "Helvetica Neue"), ytickfont=font(8, "Helvetica Neue"),
title=tickers[i], titlefont=font(10, "Helvetica Neue"), framestyle=:box,
legend=:right, size=(800,250), dpi=300, margin = 5mm);
plot!(date_ext, [NaN*ones(length(date_ext)-size(fc,2)); fc[i,:]], label="Forecast", color=RGB(0,0,200/255), line=:dot);
end
```
Industrial production (log-levels)
```julia
figure[1]
```
<img src="./img/varima_p1.svg">
Non-farm payrolls (log-levels)
```julia
figure[2]
```
<img src="./img/varima_p2.svg">
Headline CPI (log-levels)
```julia
figure[3]
```
<img src="./img/varima_p3.svg">
### Kalman filter and smoother
#### Data
The following examples show how to perform a standard univariate state-space decomposition (local linear trend + seasonal + noise decomposition) using the implementations of the Kalman filter and smoother in ```TSAnalysis```. These examples use non-seasonally adjusted (NSA) data that can be downloaded via:
```julia
# Download data of interest
Y_df = download_fred_vintage(["IPGMFN"], ["log"]);
# Convert to JArray{Float64}
Y = Y_df[:,2:end] |> JArray{Float64};
Y = permutedims(Y);
```
#### Kalman filter
```julia
# Initialise the Kalman filter and smoother status
kstatus = KalmanStatus();
# Specify the state-space structure
# Observation equation
B = hcat([1.0 0.0], [[1.0 0.0] for j=1:6]...);
R = Symmetric(ones(1,1)*0.01);
# Transition equation
C = cat(dims=[1,2], [1.0 1.0; 0.0 1.0], [[cos(2*pi*j/12) sin(2*pi*j/12); -sin(2*pi*j/12) cos(2*pi*j/12)] for j=1:6]...);
V = Symmetric(cat(dims=[1,2], [1e-4 0.0; 0.0 1e-4], 1e-4*Matrix(I,12,12)));
# Initial conditions
X0 = zeros(14);
P0 = Symmetric(cat(dims=[1,2], 1e3*Matrix(I,2,2), 1e3*Matrix(I,12,12)));
# Settings
ksettings = ImmutableKalmanSettings(Y, B, R, C, V, X0, P0);
# Filter for t = 1, ..., T (the output is dynamically stored into kstatus)
for t=1:size(Y,2)
kfilter!(ksettings, kstatus);
end
# Filtered trend
trend_llts = hcat(kstatus.history_X_post...)[1,:];
```
#### Kalman filter (out-of-sample forecast)
```TSAnalysis``` allows to compute *h*-step ahead forecasts for the latent states without resetting the Kalman filter. This is particularly efficient for applications wherein the number of observed time periods is particularly large, or for heavy out-of-sample exercises.
An easy way to compute the 12-step ahead prediction is to edit the block
```julia
# Filter for t = 1, ..., T (the output is dynamically stored into kstatus)
for t=1:size(Y,1)
kfilter!(ksettings, kstatus);
end
```
into
```julia
# Initialise forecast history
forecast_history = Array{Array{Float64,1},1}();
# 12-step ahead forecast
max_hz = 12;
# Filter for t = 1, ..., T (the output is dynamically stored into kstatus)
for t=1:size(Y,1)
kfilter!(ksettings, kstatus);
# Multiplying for B gives the out-of-sample forecast of the data
push!(forecast_history, (B*hcat(kforecast(ksettings, kstatus.X_post, max_hz)...))[:]);
end
```
#### Kalman smoother
At any point in time, the Kalman smoother can be executed via
```julia
history_Xs, history_Ps, X0s, P0s = ksmoother(ksettings, kstatus);
```
### Estimation of state-space models
State-space models without a high-level interface can be estimated using ```TSAnalysis``` and ```Optim``` jointly.
The state-space model described in the previous section can be estimated following the steps below.
```julia
function llt_seasonal_noise(θ_bound, Y, s)
# Initialise the Kalman filter and smoother status
kstatus = KalmanStatus();
# Specify the state-space structure
s_half = Int64(s/2);
# Observation equation
B = hcat([1.0 0.0], [[1.0 0.0] for j=1:s_half]...);
R = Symmetric(ones(1,1)*θ_bound[1]);
# Transition equation
C = cat(dims=[1,2], [1.0 1.0; 0.0 1.0], [[cos(2*pi*j/s) sin(2*pi*j/s); -sin(2*pi*j/s) cos(2*pi*j/s)] for j=1:s_half]...);
V = Symmetric(cat(dims=[1,2], [θ_bound[2] 0.0; 0.0 θ_bound[3]], θ_bound[4]*Matrix(I,s,s)));
# Initial conditions
X0 = zeros(2+s);
P0 = Symmetric(cat(dims=[1,2], 1e3*Matrix(I,2+s,2+s)));
# Settings
ksettings = ImmutableKalmanSettings(Y, B, R, C, V, X0, P0);
# Filter for t = 1, ..., T (the output is dynamically stored into kstatus)
for t=1:size(Y,2)
kfilter!(ksettings, kstatus);
end
return ksettings, kstatus;
end
function fmin(θ_unbound, Y; s::Int64=12)
# Apply bounds
θ_bound = copy(θ_unbound);
for i=1:length(θ_bound)
θ_bound[i] = TSAnalysis.get_bounded_log(θ_bound[i], 1e-8);
end
# Compute loglikelihood
ksettings, kstatus = llt_seasonal_noise(θ_bound, Y, s)
# Return -loglikelihood
return -kstatus.loglik;
end
# Starting point
θ_starting = 1e-8*ones(4);
# Estimate the model
res = Optim.optimize(θ_unbound->fmin(θ_unbound, Y, s=12), θ_starting, NelderMead(),
Optim.Options(iterations=10000, f_tol=1e-4, x_tol=1e-4, show_trace=true, show_every=500));
# Apply bounds
θ_bound = copy(res.minimizer);
for i=1:length(θ_bound)
θ_bound[i] = TSAnalysis.get_bounded_log(θ_bound[i], 1e-8);
end
```
More options for the optimisation can be found at https://github.com/JuliaNLSolvers/Optim.jl.
The results of the estimation can be visualised using ```Plots```.
```julia
# Kalman smoother estimates
ksettings, kstatus = llt_seasonal_noise(θ_bound, Y, 12);
history_Xs, history_Ps, X0s, P0s = ksmoother(ksettings, kstatus);
# Data vs trend
p_trend = plot(Y_df[!,:date], permutedims(Y), label="Data", color=RGB(185/255,185/255,185/255),
xtickfont=font(8, "Helvetica Neue"), ytickfont=font(8, "Helvetica Neue"),
title="IPGMFN", titlefont=font(10, "Helvetica Neue"), framestyle=:box,
legend=:right, size=(800,250), dpi=300, margin = 5mm);
plot!(Y_df[!,:date], hcat(history_Xs...)[1,:], label="Trend", color=RGB(0,0,200/255))
```
<img src="./img/ks_trend.svg">
and
```julia
# Slope (of the trend)
p_slope = plot(Y_df[!,:date], hcat(history_Xs...)[2,:], label="Slope", color=RGB(0,0,200/255),
xtickfont=font(8, "Helvetica Neue"), ytickfont=font(8, "Helvetica Neue"),
titlefont=font(10, "Helvetica Neue"), framestyle=:box,
legend=:right, size=(800,250), dpi=300, margin = 5mm)
```
<img src="./img/ks_slope_trend.svg">
### Bootstrap and jackknife subsampling
```TSAnalysis``` provides support for the bootstrap and jackknife subsampling methods introduced in Kunsch (1989), Liu and Singh (1992), Pellegrino (2020), Politis and Romano (1994):
* Artificial delete-*d* jackknife
* Block bootstrap
* Block jackknife
* Stationary bootstrap
#### Data
Use the following lines of code to download the data for the examples below:
```julia
# Tickers for data of interest
tickers = ["INDPRO", "PAYEMS", "CPIAUCSL"];
# Transformations for data of interest
transformations = ["log", "log", "log"];
# Download data of interest
Y_df = download_fred_vintage(tickers, transformations);
# Convert to JArray{Float64}
Y = Y_df[:,2:end] |> JArray{Float64};
Y = 100*permutedims(diff(Y, dims=1));
```
#### Subsampling
##### Artificial delete-*d* jackknife
```julia
# Optimal d. See Pellegrino (2020) for more details.
d_hat = optimal_d(size(Y)...);
# 100 artificial jackknife samples
output_ajk = artificial_jackknife(Y, d_hat/prod(size(Y)), 100);
```
##### Block bootstrap
```julia
# Block size
block_size = 10;
# 100 block bootstrap samples
output_bb = moving_block_bootstrap(Y, block_size/size(Y,2), 100);
```
##### Block jackknife
```julia
# Block size
block_size = 10;
# Block jackknife samples (full collection)
output_bjk = block_jackknife(Y, block_size/size(Y,2));
```
##### Stationary bootstrap
```julia
# Average block size
avg_block_size = 10;
# 100 stationary bootstrap samples
output_sb = stationary_block_bootstrap(Y, avg_block_size/size(Y,2), 100);
```
## Bibliography
* Kunsch, H. R. (1989). The jackknife and the bootstrap for general stationary observations. The annals of Statistics, 1217-1241.
* Liu, R. Y., & Singh, K. (1992). Moving blocks jackknife and bootstrap capture weak dependence. Exploring the limits of bootstrap, 225, 248.
* Pellegrino, F. (2020). Selecting time-series hyperparameters with the artificial jackknife. arXiv preprint arXiv:2002.04697.
* Politis, D. N., & Romano, J. P. (1994). The stationary bootstrap. Journal of the American Statistical association, 89(428), 1303-1313.
* Shumway, R. H., & Stoffer, D. S. (1982). An approach to time series smoothing and forecasting using the EM algorithm. Journal of time series analysis, 3(4), 253-264.
| TSAnalysis | https://github.com/fipelle/TSAnalysis.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 636 | using Documenter, Surrogates
cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true)
cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true)
# Make sure that plots don't throw a bunch of warnings / errors!
ENV["GKSwstype"] = "100"
using Plots
include("pages.jl")
makedocs(sitename = "Surrogates.jl",
linkcheck = true,
warnonly = [:missing_docs],
format = Documenter.HTML(analytics = "UA-90474609-3",
assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/Surrogates/stable/"),
pages = pages)
deploydocs(repo = "github.com/SciML/Surrogates.jl.git")
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1796 | pages = ["index.md"
"Tutorials" => [
"Basics" => "tutorials.md",
"Radials" => "radials.md",
"Kriging" => "kriging.md",
"Gaussian Process" => "abstractgps.md",
"Lobachevsky" => "lobachevsky.md",
"Linear" => "LinearSurrogate.md",
"InverseDistance" => "InverseDistance.md",
"RandomForest" => "randomforest.md",
"SecondOrderPolynomial" => "secondorderpoly.md",
"NeuralSurrogate" => "neural.md",
"Wendland" => "wendland.md",
"Polynomial Chaos" => "polychaos.md",
"Variable Fidelity" => "variablefidelity.md",
"Gradient Enhanced Kriging" => "gek.md",
"GEKPLS" => "gekpls.md",
"MOE" => "moe.md",
"Parallel Optimization" => "parallel.md"
]
"User guide" => [
"Samples" => "samples.md",
"Surrogates" => "surrogate.md",
"Optimization" => "optimizations.md"
]
"Benchmarks" => [
"Sphere function" => "sphere_function.md",
"Lp norm" => "lp.md",
"Rosenbrock" => "rosenbrock.md",
"Tensor product" => "tensor_prod.md",
"Cantilever beam" => "cantilever.md",
"Water Flow function" => "water_flow.md",
"Welded beam function" => "welded_beam.md",
"Branin function" => "BraninFunction.md",
"Improved Branin function" => "ImprovedBraninFunction.md",
"Ackley function" => "ackley.md",
"Gramacy & Lee Function" => "gramacylee.md",
"Salustowicz Benchmark" => "Salustowicz.md",
"Multi objective optimization" => "multi_objective_opt.md"
]]
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1559 | module SurrogatesAbstractGPs
import Surrogates: add_point!, AbstractSurrogate, std_error_at_point, _check_dimension
export AbstractGPSurrogate, var_at_point, logpdf_surrogate
using AbstractGPs
mutable struct AbstractGPSurrogate{X, Y, GP, GP_P, S} <: AbstractSurrogate
x::X
y::Y
gp::GP
gp_posterior::GP_P
Σy::S
end
# constructor
function AbstractGPSurrogate(x, y; gp = GP(Matern52Kernel()), Σy = 0.1)
AbstractGPSurrogate(x, y, gp, posterior(gp(x, Σy), y), Σy)
end
# predictor
function (g::AbstractGPSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(g, val)
return only(mean(g.gp_posterior([val])))
end
# for add point
# copies of x and y need to be made because we get
#"Error: cannot resize array with shared data " if we push! directly to x and y
function add_point!(g::AbstractGPSurrogate, new_x, new_y)
if new_x in g.x
println("Adding a sample that already exists, cannot build AbstracgGPSurrogate.")
return
end
x_copy = copy(g.x)
push!(x_copy, new_x)
y_copy = copy(g.y)
push!(y_copy, new_y)
updated_posterior = posterior(g.gp(x_copy, g.Σy), y_copy)
g.x, g.y, g.gp_posterior = x_copy, y_copy, updated_posterior
nothing
end
function std_error_at_point(g::AbstractGPSurrogate, val)
return sqrt(only(var(g.gp_posterior([val]))))
end
# Log marginal posterior predictive probability.
function logpdf_surrogate(g::AbstractGPSurrogate)
return logpdf(g.gp_posterior(g.x), g.y)
end
end # module
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 4675 | using SafeTestsets, Test
using Surrogates: sample, SobolSample
@safetestset "AbstractGPSurrogate" begin
using Surrogates
using SurrogatesAbstractGPs
using AbstractGPs
using Zygote
@testset "1D -> 1D" begin
lb = 0.0
ub = 3.0
f = x -> log(x) * exp(x)
x = sample(5, lb, ub, SobolSample())
y = f.(x)
agp1D = AbstractGPSurrogate(x, y, gp = GP(SqExponentialKernel()), Σy = 0.05)
x_new = 2.5
y_actual = f.(x_new)
y_predicted = agp1D([x_new])
@test isapprox(y_predicted, y_actual, atol = 0.1)
end
@testset "add points 1D" begin
lb = 0.0
ub = 3.0
f = x -> x^2
x_points = sample(5, lb, ub, SobolSample())
y_points = f.(x_points)
agp1D = AbstractGPSurrogate([x_points[1]], [y_points[1]],
gp = GP(SqExponentialKernel()), Σy = 0.05)
x_new = 2.5
y_actual = f.(x_new)
for i in 2:length(x_points)
add_point!(agp1D, x_points[i], y_points[i])
end
y_predicted = agp1D([x_new])
@test isapprox(y_predicted, y_actual, atol = 0.1)
end
@testset "2D -> 1D" begin
lb = [0.0; 0.0]
ub = [2.0; 2.0]
log_exp_f = x -> log(x[1]) * exp(x[2])
x = sample(50, lb, ub, SobolSample())
y = log_exp_f.(x)
agp_2D = AbstractGPSurrogate(x, y)
x_new_2D = (2.0, 1.0)
y_actual = log_exp_f(x_new_2D)
y_predicted = agp_2D(x_new_2D)
@test isapprox(y_predicted, y_actual, atol = 0.1)
end
@testset "add points 2D" begin
lb = [0.0; 0.0]
ub = [2.0; 2.0]
sphere = x -> x[1]^2 + x[2]^2
x = sample(20, lb, ub, SobolSample())
y = sphere.(x)
agp_2D = AbstractGPSurrogate([x[1]], [y[1]])
logpdf_vals = []
push!(logpdf_vals, logpdf_surrogate(agp_2D))
for i in 2:length(x)
add_point!(agp_2D, x[i], y[i])
push!(logpdf_vals, logpdf_surrogate(agp_2D))
end
@test first(logpdf_vals) < last(logpdf_vals) #as more points are added log marginal posterior predictive probability increases
end
@testset "check ND prediction" begin
lb = [-1.0; -1.0; -1.0]
ub = [1.0; 1.0; 1.0]
f = x -> hypot(x...)
x = sample(25, lb, ub, SobolSample())
y = f.(x)
agpND = AbstractGPSurrogate(x, y, gp = GP(SqExponentialKernel()), Σy = 0.05)
x_new = (-0.8, 0.8, 0.8)
@test agpND(x_new)≈f(x_new) atol=0.2
end
@testset "Optimization 1D" begin
objective_function = x -> 2 * x + 1
lb = 0.0
ub = 6.0
x = [2.0, 4.0, 6.0]
y = [5.0, 9.0, 13.0]
p = 2
a = 2
b = 6
my_k_EI1 = AbstractGPSurrogate(x, y)
surrogate_optimize(objective_function, EI(), a, b, my_k_EI1, RandomSample(),
maxiters = 200, num_new_samples = 155)
end
@testset "Optimization ND" begin
objective_function_ND = z -> 3 * hypot(z...) + 1
x = [(1.2, 3.0), (3.0, 3.5), (5.2, 5.7)]
y = objective_function_ND.(x)
theta = [2.0, 2.0]
lb = [1.0, 1.0]
ub = [6.0, 6.0]
my_k_E1N = AbstractGPSurrogate(x, y)
surrogate_optimize(objective_function_ND, EI(), lb, ub, my_k_E1N, RandomSample())
end
@testset "check working of logpdf_surrogate 1D" begin
lb = 0.0
ub = 3.0
f = x -> log(x) * exp(x)
x = sample(5, lb, ub, SobolSample())
y = f.(x)
agp1D = AbstractGPSurrogate(x, y, gp = GP(SqExponentialKernel()), Σy = 0.05)
logpdf_surrogate(agp1D)
end
@testset "check working of logpdf_surrogate ND" begin
lb = [0.0; 0.0]
ub = [2.0; 2.0]
f = x -> log(x[1]) * exp(x[2])
x = sample(5, lb, ub, SobolSample())
y = f.(x)
agpND = AbstractGPSurrogate(x, y, gp = GP(SqExponentialKernel()), Σy = 0.05)
logpdf_surrogate(agpND)
end
lb = 0.0
ub = 3.0
n = 10
x = sample(n, lb, ub, SobolSample())
f = x -> x^2
y = f.(x)
#AbstractGP 1D
@testset "AbstractGP 1D" begin
agp1D = AbstractGPSurrogate(x, y, gp = GP(SqExponentialKernel()), Σy = 0.05)
g = x -> agp1D'(x)
g([2.0])
end
lb = [0.0, 0.0]
ub = [10.0, 10.0]
n = 5
x = sample(n, lb, ub, SobolSample())
f = x -> x[1] * x[2]
y = f.(x)
# AbstractGP ND
@testset "AbstractGPSurrogate ND" begin
my_agp = AbstractGPSurrogate(x, y, gp = GP(SqExponentialKernel()), Σy = 0.05)
g = x -> Zygote.gradient(my_agp, x)
#g([(2.0,5.0)])
g((2.0, 5.0))
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1570 | module SurrogatesFlux
import Surrogates: add_point!, AbstractSurrogate, _check_dimension
export NeuralSurrogate
using Flux
mutable struct NeuralSurrogate{X, Y, M, L, O, P, N, A, U} <: AbstractSurrogate
x::X
y::Y
model::M
loss::L
opt::O
ps::P
n_echos::N
lb::A
ub::U
end
"""
NeuralSurrogate(x,y,lb,ub,model,loss,opt,n_echos)
- model: Flux layers
- loss: loss function
- opt: optimization function
"""
function NeuralSurrogate(x, y, lb, ub; model = Chain(Dense(length(x[1]), 1), first),
loss = (x, y) -> Flux.mse(model(x), y), opt = Descent(0.01),
n_echos::Int = 1)
X = vec.(collect.(x))
data = zip(X, y)
ps = Flux.params(model)
for epoch in 1:n_echos
Flux.train!(loss, ps, data, opt)
end
return NeuralSurrogate(x, y, model, loss, opt, ps, n_echos, lb, ub)
end
function (my_neural::NeuralSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(my_neural, val)
v = [val...]
out = my_neural.model(v)
if length(out) == 1
return out[1]
else
return out
end
end
function add_point!(my_n::NeuralSurrogate, x_new, y_new)
if eltype(x_new) == eltype(my_n.x)
append!(my_n.x, x_new)
append!(my_n.y, y_new)
else
push!(my_n.x, x_new)
push!(my_n.y, y_new)
end
X = vec.(collect.(my_n.x))
data = zip(X, my_n.y)
for epoch in 1:(my_n.n_echos)
Flux.train!(my_n.loss, my_n.ps, data, my_n.opt)
end
nothing
end
end # module
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 4695 | using SafeTestsets
@safetestset "SurrogatesFlux" begin
using Surrogates
using Surrogates: SobolSample
using Flux
using SurrogatesFlux
using LinearAlgebra
using Zygote
#1D
a = 0.0
b = 10.0
obj_1D = x -> 2 * x + 3
x = sample(10, 0.0, 10.0, SobolSample())
y = obj_1D.(x)
my_model = Chain(Dense(1, 1), first)
my_loss(x, y) = Flux.mse(my_model(x), y)
my_opt = Descent(0.01)
n_echos = 1
my_neural = NeuralSurrogate(x, y, a, b, model = my_model, loss = my_loss, opt = my_opt,
n_echos = 1)
my_neural_kwargs = NeuralSurrogate(x, y, a, b)
add_point!(my_neural, 8.5, 20.0)
add_point!(my_neural, [3.2, 3.5], [7.4, 8.0])
val = my_neural(5.0)
#ND
lb = [0.0, 0.0]
ub = [5.0, 5.0]
x = sample(5, lb, ub, SobolSample())
obj_ND_neural(x) = x[1] * x[2]
y = obj_ND_neural.(x)
my_model = Chain(Dense(2, 1), first)
my_loss(x, y) = Flux.mse(my_model(x), y)
my_opt = Descent(0.01)
n_echos = 1
my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss,
opt = my_opt, n_echos = 1)
my_neural_kwargs = NeuralSurrogate(x, y, lb, ub)
my_neural((3.5, 1.49))
my_neural([3.4, 1.4])
add_point!(my_neural, (3.5, 1.4), 4.9)
add_point!(my_neural, [(3.5, 1.4), (1.5, 1.4), (1.3, 1.2)], [1.3, 1.4, 1.5])
# Multi-output #98
f = x -> [x^2, x]
lb = 1.0
ub = 10.0
x = sample(5, lb, ub, SobolSample())
push!(x, 2.0)
y = f.(x)
my_model = Chain(Dense(1, 2))
my_loss(x, y) = Flux.mse(my_model(x), y)
surrogate = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss,
opt = my_opt, n_echos = 1)
surr_kwargs = NeuralSurrogate(x, y, lb, ub)
f = x -> [x[1], x[2]^2]
lb = [1.0, 2.0]
ub = [10.0, 8.5]
x = sample(20, lb, ub, SobolSample())
push!(x, (1.0, 2.0))
y = f.(x)
my_model = Chain(Dense(2, 2))
my_loss(x, y) = Flux.mse(my_model(x), y)
surrogate = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss,
opt = my_opt, n_echos = 1)
surrogate_kwargs = NeuralSurrogate(x, y, lb, ub)
surrogate((1.0, 2.0))
x_new = (2.0, 2.0)
y_new = f(x_new)
add_point!(surrogate, x_new, y_new)
#Optimization
lb = [1.0, 1.0]
ub = [6.0, 6.0]
x = sample(5, lb, ub, SobolSample())
objective_function_ND = z -> 3 * norm(z) + 1
y = objective_function_ND.(x)
model = Chain(Dense(2, 1), first)
loss(x, y) = Flux.mse(model(x), y)
opt = Descent(0.01)
n_echos = 1
my_neural_ND_neural = NeuralSurrogate(x, y, lb, ub)
surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_neural_ND_neural,
SobolSample(), maxiters = 15)
# AD Compatibility
lb = 0.0
ub = 3.0
n = 10
x = sample(n, lb, ub, SobolSample())
f = x -> x^2
y = f.(x)
#NN
@testset "NN" begin
my_model = Chain(Dense(1, 1), first)
my_loss(x, y) = Flux.mse(my_model(x), y)
my_opt = Descent(0.01)
n_echos = 1
my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss,
opt = my_opt, n_echos = 1)
g = x -> my_neural'(x)
g(3.4)
end
lb = [0.0, 0.0]
ub = [10.0, 10.0]
n = 5
x = sample(n, lb, ub, SobolSample())
f = x -> x[1] * x[2]
y = f.(x)
#NN
@testset "NN ND" begin
my_model = Chain(Dense(2, 1), first)
my_loss(x, y) = Flux.mse(my_model(x), y)
my_opt = Descent(0.01)
n_echos = 1
my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss,
opt = my_opt, n_echos = 1)
g = x -> Zygote.gradient(my_neural, x)
g((2.0, 5.0))
end
# ###### ND -> ND ######
lb = [0.0, 0.0]
ub = [10.0, 2.0]
n = 5
x = sample(n, lb, ub, SobolSample())
f = x -> [x[1]^2, x[2]]
y = f.(x)
#NN
@testset "NN ND -> ND" begin
my_model = Chain(Dense(2, 2))
my_loss(x, y) = Flux.mse(my_model(x), y)
my_opt = Descent(0.01)
n_echos = 1
my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss,
opt = my_opt, n_echos = 1)
Zygote.gradient(x -> sum(my_neural(x)), (2.0, 5.0))
my_rad = RadialBasis(x, y, lb, ub, rad = linearRadial())
Zygote.gradient(x -> sum(my_rad(x)), (2.0, 5.0))
my_p = 1.4
my_inverse = InverseDistanceSurrogate(x, y, lb, ub, p = my_p)
my_inverse((2.0, 5.0))
Zygote.gradient(x -> sum(my_inverse(x)), (2.0, 5.0))
my_second = SecondOrderPolynomialSurrogate(x, y, lb, ub)
Zygote.gradient(x -> sum(my_second(x)), (2.0, 5.0))
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 13588 | module SurrogatesMOE
import Surrogates: AbstractSurrogate, linearRadial, cubicRadial, multiquadricRadial,
thinplateRadial, RadialBasisStructure, RadialBasis,
InverseDistanceSurrogate, Kriging, LobachevskyStructure,
LobachevskySurrogate, NeuralStructure, PolyChaosStructure,
LinearSurrogate, add_point!
export MOE
using GaussianMixtures
using Random
using Distributions
using LinearAlgebra
using SurrogatesFlux
using SurrogatesPolyChaos
using SurrogatesRandomForest
using XGBoost
mutable struct MOE{X, Y, C, D, M, E, ND, NC} <: AbstractSurrogate
x::X
y::Y
c::C #clusters (C) - vector of gaussian mixture clusters
d::D #distributions (D) - vector of frozen multivariate distributions
m::M # models (M) - vector of trained models correspnoding to clusters (C) and distributions (D)
e::E #expert types
nd::ND #number of dimensions
nc::NC #number of clusters
end
"""
MOE(x, y, expert_types; ndim=1, n_clusters=2)
constructor for MOE; takes in x, y and expert types and returns an MOE struct
"""
function MOE(x, y, expert_types; ndim = 1, n_clusters = 2, quantile = 10)
if (ndim > 1)
#x = _vector_of_tuples_to_matrix(x)
X = _vector_of_tuples_to_matrix(x)
values = hcat(X, y)
else
values = hcat(x, y)
end
x_and_y_test, x_and_y_train = _extract_part(values, quantile)
# We get posdef error without jitter; And if values repeat we get NaN vals
# https://github.com/davidavdav/GaussianMixtures.jl/issues/21
jitter_vals = ((rand(eltype(x_and_y_train), size(x_and_y_train))) ./ 10000)
gm_cluster = GMM(n_clusters, x_and_y_train + jitter_vals, kind = :full, nInit = 50,
nIter = 20)
mvn_distributions = _create_clusters_distributions(gm_cluster, ndim, n_clusters)
cluster_classifier_train = _cluster_predict(gm_cluster, x_and_y_train)
clusters_train = _cluster_values(x_and_y_train, cluster_classifier_train, n_clusters)
cluster_classifier_test = _cluster_predict(gm_cluster, x_and_y_test)
clusters_test = _cluster_values(x_and_y_test, cluster_classifier_test, n_clusters)
best_models = []
for i in 1:n_clusters
best_model = _find_best_model(clusters_train[i], clusters_test[i], ndim,
expert_types)
push!(best_models, best_model)
end
# X = values[:, 1:ndim]
# y = values[:, 2]
#return MOE(X, y, gm_cluster, mvn_distributions, best_models)
return MOE(x, y, gm_cluster, mvn_distributions, best_models, expert_types, ndim,
n_clusters)
end
"""
(moe::MOE)(val::Number)
predictor for 1D inputs
"""
function (moe::MOE)(val::Number)
val = [val]
weights = GaussianMixtures.weights(moe.c)
rvs = [Distributions.pdf(moe.d[k], val) for k in 1:length(weights)]
probs = weights .* rvs
rad = sum(probs)
if rad > 0
probs = probs / rad
end
max_index = argmax(probs)
prediction = moe.m[max_index](val[1])
return prediction
end
"""
(moe::MOE)(val)
predictor for ndimensional inputs
"""
function (moe::MOE)(val)
val = collect(val) #to handle inputs that may sometimes be tuples
weights = GaussianMixtures.weights(moe.c)
rvs = [Distributions.pdf(moe.d[k], val) for k in 1:length(weights)]
probs = weights .* rvs
rad = sum(probs)
if rad > 0
probs = probs ./ rad
end
max_index = argmax(probs)
prediction = moe.m[max_index](val)
return prediction
end
"""
_cluster_predict(gmm:GMM, X::Matrix)
gmm - a trained Gaussian Mixture Model
X - a matrix of points with dimensions equal to the inputs used for the
training of the model
Return - Clusters to which each of the points belong to (starts at int 1)
Example:
X = [1.0 2; 1 4; 1 0; 10 2; 10 4; 10 0] + rand(Float64, (6, 2))
gm = GMM(2, X)
_cluster_predict(gm, [0.0 0.0; 12.0 3.0]) #returns [1,2]
"""
function _cluster_predict(gmm::GMM, X::Matrix)
llpg_X = llpg(gmm, X) #log likelihood probability of X belonging to each of the clusters in the gaussian mixture
return map(argmax, eachrow(llpg_X))
end
"""
_extract_part(values, quantile)
values - a matrix containing all the input values (n test points by d dimensions)
quantiles - the interval between rows
returns a test values matrix and a training values matrix
Ex:
values = [1.0 2.0; 3.0 4.0; 5.0 6.0; 7.0 8.0; 9.0 10]
quantile = 4
test, train = _extract_part(values, quantile)
test # [1.0 2.0; 9.0 10.0]
train # [3.0 4.0; 5.0 6.0; 7.0 8.0]
"""
function _extract_part(values, quantile)
num = size(values, 1)
indices = collect(1:quantile:num)
mask = falses(num)
mask[indices] .= true
#mask
return values[mask, :], values[.~mask, :]
end
"""
_cluster_values(values, cluster_classifier, num_clusters)
values - a concatenation of input and output values
cluster_classifier - a vector of integers representing which cluster each data point belongs to
num_clusters - number of clusters
output
clusters - values grouped by clusters
## Ex:
vals = [1.0 2.0; 3.0 4.0; 5.0 6.0; 7.0 8.0; 9.0 10.0]
cluster_classifier = [1, 2, 2, 2, 1]
num_clusters = 2
clusters = _cluster_values(vals, cluster_classifier, num_clusters)
@show clusters #prints values below
[[1.0, 2.0], [9.0, 10.0]]
[[3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]
"""
function _cluster_values(values, cluster_classifier, num_clusters)
num = length(cluster_classifier)
if (size(values, 1) != num)
error("Number of values don't match number of cluster_classifier points")
end
clusters = [[] for n in 1:num_clusters]
for i in 1:num
push!(clusters[cluster_classifier[i]], (values[i, :]))
end
return clusters
end
"""
_create_clusters_distributions(gmm::GMM, ndim, n_clusters)
gmm - a gaussian mixture model with concatenated X and y values that have been clustered
ndim - number of dimensions in X
n_clusters - number of clusters
output
distribs - a vector containing frozen multivariate normal distributions for each cluster
"""
function _create_clusters_distributions(gmm::GMM, ndim, n_clusters)
means = gmm.μ
cov = covars(gmm)
distribs = []
for k in 1:n_clusters
meansk = means[k, 1:ndim]
covk = cov[k][1:ndim, 1:ndim]
mvn = MvNormal(meansk, covk) # todo - check if we need allow_singular=True and implement
push!(distribs, mvn)
end
return distribs
end
"""
_find_upper_lower_bounds(m::Matrix)
returns upper and lower bounds in vector form
"""
function _find_upper_lower_bounds(X::Matrix)
ub = []
lb = []
for col in eachcol(X)
push!(ub, findmax(col)[1])
push!(lb, findmin(col)[1])
end
if (size(X, 2) == 1)
return lb[1][1], ub[1][1]
else
return lb, ub
end
end
"""
_find_best_model(clustered_values, clustered_test_values)
finds best model for each set of clustered values by validating against the clustered_test_values
"""
function _find_best_model(clustered_train_values, clustered_test_values, dim,
enabled_expert_types)
# find upper and lower bounds for clustered_train and test values concatenated
x_vec = [a[1:dim] for a in clustered_train_values]
y_vec = [last(a) for a in clustered_train_values]
x_test_vec = [a[1:dim] for a in clustered_test_values]
y_test_vec = [last(a) for a in clustered_test_values]
if (dim == 1)
xtrain_mat = reshape(x_vec, (size(clustered_train_values, 1), dim))
xtest_mat = reshape(x_test_vec, (size(clustered_test_values, 1), dim))
else
xtrain_mat = _vector_of_tuples_to_matrix(x_vec)
xtest_mat = _vector_of_tuples_to_matrix(x_test_vec)
end
X = !isnothing(xtest_mat) ? vcat(xtrain_mat, xtest_mat) : xtrain_mat
x_test_vec = !isnothing(xtest_mat) ? x_test_vec : x_vec
y_test_vec = !isnothing(xtest_mat) ? y_test_vec : y_vec
lb, ub = _find_upper_lower_bounds(X)
# call on _surrogate_builder with clustered_train_vals, enabled expert types, lb, ub
surr_vec = _surrogate_builder(
enabled_expert_types, length(enabled_expert_types), x_vec,
y_vec, lb, ub)
# use the models to find best model after validating against test data and return best model
best_rmse = Inf
best_model = surr_vec[1] #initial assignment can be any model
for surr_model in surr_vec
pred = surr_model.(x_test_vec)
rmse = norm(pred - y_test_vec, 2)
if (rmse < best_rmse)
best_rmse = rmse
best_model = surr_model
end
end
return best_model
end
"""
_surrogate_builder(local_kind, k, x, y, lb, ub)
takes in an array of surrogate types, and number of cluster, builds the surrogates and returns
an array of surrogate objects
"""
function _surrogate_builder(local_kind, k, x, y, lb, ub)
local_surr = []
for i in 1:k
if local_kind[i][1] == "RadialBasis"
#fit and append to local_surr
my_local_i = RadialBasis(x, y, lb, ub,
rad = local_kind[i].radial_function,
scale_factor = local_kind[i].scale_factor,
sparse = local_kind[i].sparse)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "Kriging"
#because Kriging takes abs of two vectors
if (length(lb) == 1)
x = [a[1] for a in x]
end
my_local_i = Kriging(x, y, lb, ub, p = local_kind[i].p,
theta = local_kind[i].theta)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "GEK"
my_local_i = GEK(x, y, lb, ub, p = local_kind[i].p,
theta = local_kind[i].theta)
push!(local_surr, my_local_i)
elseif local_kind[i] == "LinearSurrogate"
my_local_i = LinearSurrogate(x, y, lb, ub)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "InverseDistanceSurrogate"
my_local_i = InverseDistanceSurrogate(x, y, lb, ub, local_kind[i].p)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "LobachevskySurrogate"
my_local_i = LobachevskyStructure(x, y, lb, ub,
alpha = local_kind[i].alpha,
n = local_kind[i].n,
sparse = local_kind[i].sparse)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "NeuralSurrogate"
my_local_i = NeuralSurrogate(x, y, lb, ub,
model = local_kind[i].model,
loss = local_kind[i].loss, opt = local_kind[i].opt,
n_echos = local_kind[i].n_echos)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "RandomForestSurrogate"
my_local_i = RandomForestSurrogate(x, y, lb, ub,
num_round = local_kind[i].num_round)
push!(local_surr, my_local_i)
elseif local_kind[i] == "SecondOrderPolynomialSurrogate"
my_local_i = SecondOrderPolynomialSurrogate(x, y, lb, ub)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "Wendland"
my_local_i = Wendand(x, y, lb, ub, eps = local_kind[i].eps,
maxiters = local_kind[i].maxiters, tol = local_kind[i].tol)
push!(local_surr, my_local_i)
elseif local_kind[i][1] == "PolynomialChaosSurrogate"
my_local_i = PolynomialChaosSurrogate(x, y, lb, ub, op = local_kind[i].op)
push!(local_surr, my_local_i)
else
throw("A surrogate with name provided does not exist or is not currently supported with MOE.")
end
end
return local_surr
end
"""
add_point!(m::MOE, new_x, new_y)
add a new point to the dataset.
"""
function add_point!(m::MOE, x, y)
#function add_point!(m) #this works
push!(m.x, x)
push!(m.y, y)
quantile = 10
if (m.nd > 1) #numbef of dimensions
X = _vector_of_tuples_to_matrix(m.x)
values = hcat(X, m.y)
else
values = hcat(m.x, m.y)
end
x_and_y_test, x_and_y_train = _extract_part(values, quantile)
# We get posdef error without jitter; And if values repeat we get NaN vals
# https://github.com/davidavdav/GaussianMixtures.jl/issues/21
jitter_vals = ((rand(eltype(x_and_y_train), size(x_and_y_train))) ./ 10000)
gm_cluster = GMM(m.nc, x_and_y_train + jitter_vals, kind = :full, nInit = 50,
nIter = 20)
mvn_distributions = _create_clusters_distributions(gm_cluster, m.nd, m.nc)
cluster_classifier_train = _cluster_predict(gm_cluster, x_and_y_train)
clusters_train = _cluster_values(x_and_y_train, cluster_classifier_train, m.nc)
cluster_classifier_test = _cluster_predict(gm_cluster, x_and_y_test)
clusters_test = _cluster_values(x_and_y_test, cluster_classifier_test, m.nc)
best_models = []
for i in 1:(m.nc)
best_model = _find_best_model(clusters_train[i], clusters_test[i], m.nd,
m.e)
push!(best_models, best_model)
end
m.c = gm_cluster
m.d = mvn_distributions
m.m = best_models
end
"""
_vector_of_tuples_to_matrix(v)
takes in a vector of tuples or vector of vectors and converts it into a matrix
"""
function _vector_of_tuples_to_matrix(v)
if !isempty(v)
num_rows = length(v)
num_cols = length(first(v))
K = zeros(num_rows, num_cols)
for row in 1:num_rows
for col in 1:num_cols
K[row, col] = v[row][col]
end
end
return K
end
return nothing
end
end #module
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 5484 | using SafeTestsets
using StableRNGs, Random
const SEED = 42
Random.seed!(StableRNG(SEED), SEED)
# #test 1D function that is discontinuous
@safetestset "1D" begin
using Surrogates
using SurrogatesMOE
function discont_1D(x)
if x < 0.0
return -5.0
elseif x >= 0.0
return 5.0
end
end
lb = -1.0
ub = 1.0
x = sample(50, lb, ub, SobolSample())
y = discont_1D.(x)
# Radials vs MOE
RAD_1D = RadialBasis(x, y, lb, ub, rad = linearRadial(), scale_factor = 1.0,
sparse = false)
expert_types = [
RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0,
sparse = false),
RadialBasisStructure(radial_function = cubicRadial(), scale_factor = 1.0,
sparse = false)
]
MOE_1D_RAD_RAD = MOE(x, y, expert_types)
MOE_at0 = MOE_1D_RAD_RAD(0.0)
RAD_at0 = RAD_1D(0.0)
true_val = 5.0
@test (abs(RAD_at0 - true_val) > abs(MOE_at0 - true_val))
# Krig vs MOE
KRIG_1D = Kriging(x, y, lb, ub, p = 1.0, theta = 1.0)
expert_types = [InverseDistanceStructure(p = 1.0),
KrigingStructure(p = 1.0, theta = 1.0)
]
MOE_1D_INV_KRIG = MOE(x, y, expert_types)
MOE_at0 = MOE_1D_INV_KRIG(0.0)
KRIG_at0 = KRIG_1D(0.0)
true_val = 5.0
@test (abs(KRIG_at0 - true_val) > abs(MOE_at0 - true_val))
end
@safetestset "ND" begin
using Surrogates
using SurrogatesMOE
# helper to test accuracy of predictors
function rmse(a, b)
a = vec(a)
b = vec(b)
if (size(a) != size(b))
println("error in inputs")
return
end
n = size(a, 1)
return sqrt(sum((a - b) .^ 2) / n)
end
# multidimensional input function
function discont_NDIM(x)
if (x[1] >= 0.0 && x[2] >= 0.0)
return sum(x .^ 2) + 5
else
return sum(x .^ 2) - 5
end
end
lb = [-1.0, -1.0]
ub = [1.0, 1.0]
n = 150
x = sample(n, lb, ub, SobolSample())
y = discont_NDIM.(x)
x_test = sample(9, lb, ub, GoldenSample())
expert_types = [
KrigingStructure(p = [1.0, 1.0], theta = [1.0, 1.0]),
RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0,
sparse = false)
]
moe_nd_krig_rad = MOE(x, y, expert_types, ndim = 2, quantile = 5)
moe_pred_vals = moe_nd_krig_rad.(x_test)
true_vals = discont_NDIM.(x_test)
moe_rmse = rmse(true_vals, moe_pred_vals)
rbf = RadialBasis(x, y, lb, ub)
rbf_pred_vals = rbf.(x_test)
rbf_rmse = rmse(true_vals, rbf_pred_vals)
krig = Kriging(x, y, lb, ub, p = [1.0, 1.0], theta = [1.0, 1.0])
krig_pred_vals = krig.(x_test)
krig_rmse = rmse(true_vals, krig_pred_vals)
@test (rbf_rmse > moe_rmse)
@test (krig_rmse > moe_rmse)
end
@safetestset "Miscellaneous" begin
using Surrogates
using SurrogatesMOE
using SurrogatesFlux
using Flux
# multidimensional input function
function discont_NDIM(x)
if (x[1] >= 0.0 && x[2] >= 0.0)
return sum(x .^ 2) + 5
else
return sum(x .^ 2) - 5
end
end
lb = [-1.0, -1.0]
ub = [1.0, 1.0]
n = 120
x = sample(n, lb, ub, LatinHypercubeSample())
y = discont_NDIM.(x)
x_test = sample(10, lb, ub, GoldenSample())
# test if MOE handles 3 experts including SurrogatesFlux
expert_types = [
RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0,
sparse = false),
LinearStructure(),
InverseDistanceStructure(p = 1.0)
]
moe_nd_3_experts = MOE(x, y, expert_types, ndim = 2, n_clusters = 3)
moe_pred_vals = moe_nd_3_experts.(x_test)
# test if MOE handles SurrogatesFlux
model = Chain(Dense(2, 1), first)
loss(x, y) = Flux.mse(model(x), y)
opt = Descent(0.01)
n_echos = 1
expert_types = [
NeuralStructure(model = model, loss = loss, opt = opt, n_echos = n_echos),
LinearStructure()
]
moe_nn_ln = MOE(x, y, expert_types, ndim = 2)
moe_pred_vals = moe_nn_ln.(x_test)
end
@safetestset "Add Point 1D" begin
using Surrogates
using SurrogatesMOE
function discont_1D(x)
if x < 0.0
return -5.0
elseif x >= 0.0
return 5.0
end
end
lb = -1.0
ub = 1.0
x = sample(50, lb, ub, SobolSample())
y = discont_1D.(x)
expert_types = [
RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0,
sparse = false),
RadialBasisStructure(radial_function = cubicRadial(), scale_factor = 1.0,
sparse = false)
]
moe = MOE(x, y, expert_types)
add_point!(moe, 0.5, 5.0)
end
@safetestset "Add Point ND" begin
using Surrogates
using SurrogatesMOE
# multidimensional input function
function discont_NDIM(x)
if (x[1] >= 0.0 && x[2] >= 0.0)
return sum(x .^ 2) + 5
else
return sum(x .^ 2) - 5
end
end
lb = [-1.0, -1.0]
ub = [1.0, 1.0]
n = 110
x = sample(n, lb, ub, LatinHypercubeSample())
y = discont_NDIM.(x)
expert_types = [InverseDistanceStructure(p = 1.0),
RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0,
sparse = false)
]
moe_nd_inv_rad = MOE(x, y, expert_types, ndim = 2)
add_point!(moe_nd_inv_rad, (0.5, 0.5), sum((0.5, 0.5) .^ 2) + 5)
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 3435 | module SurrogatesPolyChaos
import Surrogates: AbstractSurrogate, add_point!, _check_dimension
export PolynomialChaosSurrogate
using PolyChaos
mutable struct PolynomialChaosSurrogate{X, Y, L, U, C, O, N} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
coeff::C
ortopolys::O
num_of_multi_indexes::N
end
function _calculatepce_coeff(x, y, num_of_multi_indexes, op::AbstractCanonicalOrthoPoly)
n = length(x)
A = zeros(eltype(x), n, num_of_multi_indexes)
for i in 1:n
A[i, :] = PolyChaos.evaluate(x[i], op)
end
return (A' * A) \ (A' * y)
end
function PolynomialChaosSurrogate(x, y, lb::Number, ub::Number;
op::AbstractCanonicalOrthoPoly = GaussOrthoPoly(2))
n = length(x)
poly_degree = op.deg
num_of_multi_indexes = 1 + poly_degree
if n < 2 + 3 * num_of_multi_indexes
throw("To avoid numerical problems, it's strongly suggested to have at least $(2+3*num_of_multi_indexes) samples")
end
coeff = _calculatepce_coeff(x, y, num_of_multi_indexes, op)
return PolynomialChaosSurrogate(x, y, lb, ub, coeff, op, num_of_multi_indexes)
end
function (pc::PolynomialChaosSurrogate)(val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(pc, val)
return sum([pc.coeff[i] * PolyChaos.evaluate(val, pc.ortopolys)[i]
for i in 1:(pc.num_of_multi_indexes)])
end
function _calculatepce_coeff(x, y, num_of_multi_indexes, op::MultiOrthoPoly)
n = length(x)
d = length(x[1])
A = zeros(eltype(x[1]), n, num_of_multi_indexes)
for i in 1:n
xi = zeros(eltype(x[1]), d)
for j in 1:d
xi[j] = x[i][j]
end
A[i, :] = PolyChaos.evaluate(xi, op)
end
return (A' * A) \ (A' * y)
end
function PolynomialChaosSurrogate(x, y, lb, ub;
op::MultiOrthoPoly = MultiOrthoPoly([GaussOrthoPoly(2)
for j in 1:length(lb)],
2))
n = length(x)
d = length(lb)
poly_degree = op.deg
num_of_multi_indexes = binomial(d + poly_degree, poly_degree)
if n < 2 + 3 * num_of_multi_indexes
throw("To avoid numerical problems, it's strongly suggested to have at least $(2+3*num_of_multi_indexes) samples")
end
coeff = _calculatepce_coeff(x, y, num_of_multi_indexes, op)
return PolynomialChaosSurrogate(x, y, lb, ub, coeff, op, num_of_multi_indexes)
end
function (pcND::PolynomialChaosSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(pcND, val)
sum = zero(eltype(val[1]))
for i in 1:(pcND.num_of_multi_indexes)
sum = sum +
pcND.coeff[i] *
first(PolyChaos.evaluate(pcND.ortopolys.ind[i, :], collect(val),
pcND.ortopolys))
end
return sum
end
function add_point!(polych::PolynomialChaosSurrogate, x_new, y_new)
if length(polych.lb) == 1
#1D
polych.x = vcat(polych.x, x_new)
polych.y = vcat(polych.y, y_new)
polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes,
polych.ortopolys)
else
polych.x = vcat(polych.x, x_new)
polych.y = vcat(polych.y, y_new)
polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes,
polych.ortopolys)
end
nothing
end
end # module
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 3198 | using SafeTestsets
@safetestset "PolynomialChaosSurrogates" begin
using Surrogates
using PolyChaos
using Surrogates: sample, SobolSample
using SurrogatesPolyChaos
using Zygote
#1D
n = 20
lb = 0.0
ub = 4.0
f = x -> 2 * x
x = sample(n, lb, ub, SobolSample())
y = f.(x)
my_pce = PolynomialChaosSurrogate(x, y, lb, ub)
val = my_pce(2.0)
add_point!(my_pce, 3.0, 6.0)
my_pce_changed = PolynomialChaosSurrogate(x, y, lb, ub, op = Uniform01OrthoPoly(1))
#ND
n = 60
lb = [0.0, 0.0]
ub = [5.0, 5.0]
f = x -> x[1] * x[2]
x = sample(n, lb, ub, SobolSample())
y = f.(x)
my_pce = PolynomialChaosSurrogate(x, y, lb, ub)
val = my_pce((2.0, 2.0))
add_point!(my_pce, (2.0, 3.0), 6.0)
op1 = Uniform01OrthoPoly(1)
op2 = Beta01OrthoPoly(2, 2, 1.2)
ops = [op1, op2]
multi_poly = MultiOrthoPoly(ops, min(1, 2))
my_pce_changed = PolynomialChaosSurrogate(x, y, lb, ub, op = multi_poly)
# Surrogate optimization test
lb = 0.0
ub = 15.0
p = 1.99
a = 2
b = 6
objective_function = x -> 2 * x + 1
x = sample(20, lb, ub, SobolSample())
y = objective_function.(x)
my_poly1d = PolynomialChaosSurrogate(x, y, lb, ub)
@test_broken surrogate_optimize(objective_function, SRBF(), a, b, my_poly1d,
LowDiscrepancySample(; base = 2))
lb = [0.0, 0.0]
ub = [10.0, 10.0]
obj_ND = x -> log(x[1]) * exp(x[2])
x = sample(40, lb, ub, RandomSample())
y = obj_ND.(x)
my_polyND = PolynomialChaosSurrogate(x, y, lb, ub)
surrogate_optimize(obj_ND, SRBF(), lb, ub, my_polyND, SobolSample(), maxiters = 15)
# AD Compatibility
lb = 0.0
ub = 3.0
n = 10
x = sample(n, lb, ub, SobolSample())
f = x -> x^2
y = f.(x)
# #Polynomialchaos
@testset "Polynomial Chaos" begin
f = x -> x^2
n = 50
x = sample(n, lb, ub, SobolSample())
y = f.(x)
my_poli = PolynomialChaosSurrogate(x, y, lb, ub)
g = x -> my_poli'(x)
g(3.0)
end
# #PolynomialChaos
@testset "Polynomial Chaos ND" begin
n = 50
lb = [0.0, 0.0]
ub = [10.0, 10.0]
x = sample(n, lb, ub, SobolSample())
f = x -> x[1] * x[2]
y = f.(x)
my_poli_ND = PolynomialChaosSurrogate(x, y, lb, ub)
g = x -> Zygote.gradient(my_poli_ND, x)
g((1.0, 1.0))
n = 10
d = 2
lb = [0.0, 0.0]
ub = [5.0, 5.0]
x = sample(n, lb, ub, SobolSample())
f = x -> x[1]^2 + x[2]^2
y1 = f.(x)
grad1 = x -> 2 * x[1]
grad2 = x -> 2 * x[2]
function create_grads(n, d, grad1, grad2, y)
c = 0
y2 = zeros(eltype(y[1]), n * d)
for i in 1:n
y2[i + c] = grad1(x[i])
y2[i + c + 1] = grad2(x[i])
c = c + 1
end
return y2
end
y2 = create_grads(n, d, grad1, grad2, y)
y = vcat(y1, y2)
my_gek_ND = GEK(x, y, lb, ub)
g = x -> Zygote.gradient(my_gek_ND, x)
@test_broken g((2.0, 5.0)) #breaks after Zygote version 0.6.43
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1738 | module SurrogatesRandomForest
using SurrogatesBase
using XGBoost: xgboost, predict
export RandomForestSurrogate, update!
mutable struct RandomForestSurrogate{X, Y, B, L, U, N} <:
SurrogatesBase.AbstractDeterministicSurrogate
x::X
y::Y
bst::B
lb::L
ub::U
num_round::N
end
"""
RandomForestSurrogate(x, y, lb, ub, num_round)
Build Random forest surrogate. num_round is the number of trees.
## Arguments
- `x`: Input data points.
- `y`: Output data points.
- `lb`: Lower bound of input data points.
- `ub`: Upper bound of output data points.
## Keyword Arguments
- `num_round`: number of rounds of training.
"""
function RandomForestSurrogate(x, y, lb, ub; num_round::Int = 1)
X = Array{Float64, 2}(undef, length(x), length(x[1]))
if length(lb) == 1
for j in eachindex(x)
X[j, 1] = x[j]
end
else
for j in eachindex(x)
X[j, :] = x[j]
end
end
bst = xgboost((X, y); num_round)
RandomForestSurrogate(x, y, bst, lb, ub, num_round)
end
function (rndfor::RandomForestSurrogate)(val::Number)
return rndfor([val])
end
function (rndfor::RandomForestSurrogate)(val)
return predict(rndfor.bst, reshape(val, length(val), 1))[1]
end
function SurrogatesBase.update!(rndfor::RandomForestSurrogate, x_new, y_new)
rndfor.x = vcat(rndfor.x, x_new)
rndfor.y = vcat(rndfor.y, y_new)
if length(rndfor.lb) == 1
rndfor.bst = xgboost((reshape(rndfor.x, length(rndfor.x), 1), rndfor.y);
num_round = rndfor.num_round)
else
rndfor.bst = xgboost(
(transpose(reduce(hcat, rndfor.x)), rndfor.y); num_round = rndfor.num_round)
end
nothing
end
end # module
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1299 | using SafeTestsets
@safetestset "RandomForestSurrogate" begin
using Surrogates
using SurrogatesRandomForest
using Test
using XGBoost: xgboost, predict
@testset "1D" begin
obj_1D = x -> 3 * x + 1
x = [1.0, 2.0, 3.0, 4.0, 5.0]
y = obj_1D.(x)
a = 0.0
b = 10.0
num_round = 2
my_forest_1D = RandomForestSurrogate(x, y, a, b; num_round = 2)
xgboost1 = xgboost((reshape(x, length(x), 1), y); num_round = 2)
val = my_forest_1D(3.5)
@test predict(xgboost1, [3.5;;])[1] == val
update!(my_forest_1D, [6.0], [19.0])
update!(my_forest_1D, [7.0, 8.0], obj_1D.([7.0, 8.0]))
end
@testset "ND" begin
lb = [0.0, 0.0, 0.0]
ub = [10.0, 10.0, 10.0]
x = collect.(sample(5, lb, ub, SobolSample()))
obj_ND = x -> x[1] * x[2]^2 * x[3]
y = obj_ND.(x)
my_forest_ND = RandomForestSurrogate(x, y, lb, ub; num_round = 2)
xgboostND = xgboost((reduce(hcat, x)', y); num_round = 2)
val = my_forest_ND([1.0, 1.0, 1.0])
@test predict(xgboostND, reshape([1.0, 1.0, 1.0], 3, 1))[1] == val
update!(my_forest_ND, [[1.0, 1.0, 1.0]], [1.0])
update!(my_forest_ND, [[1.2, 1.2, 1.0], [1.5, 1.5, 1.0]], [1.728, 3.375])
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1777 | module SurrogatesSVM
using SurrogatesBase
using LIBSVM
export SVMSurrogate, update!
mutable struct SVMSurrogate{X, Y, M, L, U} <: AbstractDeterministicSurrogate
x::X
y::Y
model::M
lb::L
ub::U
end
"""
SVMSurrogate(x, y, lb, ub)
Builds a SVM Surrogate using [LIBSVM](https://github.com/JuliaML/LIBSVM.jl).
## Arguments
- `x`: Input data points.
- `y`: Output data points.
- `lb`: Lower bound of input data points.
- `ub`: Upper bound of output data points.
"""
function SVMSurrogate(x, y, lb, ub)
X = Array{Float64, 2}(undef, length(x), length(first(x)))
if length(lb) == 1
for j in eachindex(x)
X[j, 1] = x[j]
end
else
for j in eachindex(x)
X[j, :] = x[j]
end
end
model = LIBSVM.fit!(SVC(), X, y)
SVMSurrogate(x, y, model, lb, ub)
end
function (svmsurr::SVMSurrogate)(val::Number)
return svmsurr([val])
end
function (svmsurr::SVMSurrogate)(val)
n = length(val)
return LIBSVM.predict(svmsurr.model, reshape(val, 1, n))[1]
end
"""
update!(svmsurr::SVMSurrogate, x_new, y_new)
## Arguments
- `svmsurr`: Surrogate of type [`SVMSurrogate`](@ref).
- `x_new`: Vector of new data points to be added to the training set of SVMSurrogate.
- `y_new`: Vector of new output points to be added to the training set of SVMSurrogate.
"""
function SurrogatesBase.update!(svmsurr::SVMSurrogate, x_new, y_new)
svmsurr.x = vcat(svmsurr.x, x_new)
svmsurr.y = vcat(svmsurr.y, y_new)
if length(svmsurr.lb) == 1
svmsurr.model = LIBSVM.fit!(
SVC(), reshape(svmsurr.x, length(svmsurr.x), 1), svmsurr.y)
else
svmsurr.model = LIBSVM.fit!(SVC(), transpose(reduce(hcat, svmsurr.x)), svmsurr.y)
end
end
end # module
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1518 | using SafeTestsets
@safetestset "SVMSurrogate" begin
using SurrogatesSVM
using Surrogates
using LIBSVM
using Test
@testset "1D" begin
obj_1D = x -> 2 * x + 1
a = 0.0
b = 10.0
x = sample(5, a, b, SobolSample())
y = obj_1D.(x)
svm = LIBSVM.fit!(SVC(), reshape(x, length(x), 1), y)
my_svm_1D = SVMSurrogate(x, y, a, b)
val = my_svm_1D([5.0])
@test LIBSVM.predict(svm, [5.0;;])[1] == val
update!(my_svm_1D, [3.1], [7.2])
update!(my_svm_1D, [3.2, 3.5], [7.4, 8.0])
svm = LIBSVM.fit!(SVC(), reshape(my_svm_1D.x, length(my_svm_1D.x), 1), my_svm_1D.y)
val = my_svm_1D(3.1)
@test LIBSVM.predict(svm, [3.1;;])[1] == val
end
@testset "ND" begin
obj_N = x -> x[1]^2 * x[2]
lb = [0.0, 0.0]
ub = [10.0, 10.0]
x = collect.(sample(100, lb, ub, RandomSample()))
y = obj_N.(x)
svm = LIBSVM.fit!(SVC(), transpose(reduce(hcat, x)), y)
my_svm_ND = SVMSurrogate(x, y, lb, ub)
x_test = [5.0, 1.2]
val = my_svm_ND(x_test)
@test LIBSVM.predict(svm, reshape(x_test, 1, 2))[1] == val
update!(my_svm_ND, [[1.0, 1.0]], [1.0])
update!(my_svm_ND, [[1.2, 1.2], [1.5, 1.5]], [1.728, 3.375])
svm = LIBSVM.fit!(SVC(), transpose(reduce(hcat, my_svm_ND.x)), my_svm_ND.y)
x_test = [1.0, 1.0]
val = my_svm_ND(x_test)
@test LIBSVM.predict(svm, reshape(x_test, 1, 2))[1] == val
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 13588 | using LinearAlgebra
mutable struct EarthSurrogate{X, Y, L, U, B, C, P, M, N, R, G, I, T} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
basis::B
coeff::C
penalty::P
n_min_terms::M
n_max_terms::N
rel_res_error::R
rel_GCV::G
intercept::I
maxiters::T
end
_hinge(x::Number, knot::Number) = max(0, x - knot)
_hinge_mirror(x::Number, knot::Number) = max(0, knot - x)
function _coeff_1d(x, y, basis)
n = length(x)
d = length(basis)
X = zeros(eltype(x[1]), n, d)
@inbounds for i in 1:n
for j in 1:d
X[i, j] = basis[j](x[i])
end
end
return (X' * X) \ (X' * y)
end
function _forward_pass_1d(x, y, n_max_terms, rel_res_error, maxiters)
n = length(x)
basis = Array{Function}(undef, 0)
current_sse = +Inf
intercept = sum([y[i] for i in 1:length(y)]) / length(y)
num_terms = 0
pos_of_knot = 0
iters = 0
while num_terms < n_max_terms || iters > maxiters
#Look for best addition:
new_addition = false
for i in 1:length(x)
#Add or not add the knot var_i?
var_i = x[i]
new_basis = copy(basis)
#select best new pair
hinge1 = x -> _hinge(x, var_i)
hinge2 = x -> _hinge_mirror(x, var_i)
push!(new_basis, hinge1)
push!(new_basis, hinge2)
#find coefficients
d = length(new_basis)
X = zeros(eltype(x[1]), n, d)
@inbounds for i in 1:n
for j in 1:d
X[i, j] = new_basis[j](x[i])
end
end
if (cond(X' * X) > 1e8)
condition_number = false
new_sse = +Inf
else
condition_number = true
coeff = (X' * X) \ (X' * y)
new_sse = zero(y[1])
d = length(new_basis)
for i in 1:n
val_i = sum(coeff[j] * new_basis[j](x[i]) for j in 1:d) + intercept
new_sse = new_sse + (y[i] - val_i)^2
end
end
#is the i-esim the best?
if ((new_sse < current_sse) && (abs(current_sse - new_sse) >= rel_res_error) &&
condition_number)
#Add the hinge function to the basis
pos_of_knot = i
current_sse = new_sse
new_addition = true
end
end
iters = iters + 1
if new_addition
best_hinge1 = z -> _hinge(z, x[pos_of_knot])
best_hinge2 = z -> _hinge_mirror(z, x[pos_of_knot])
push!(basis, best_hinge1)
push!(basis, best_hinge2)
num_terms = num_terms + 1
else
break
end
end
if length(basis) == 0
throw("Earth surrogate did not add any term, just the intercept. It is advised to double check the parameters.")
end
return basis
end
function _backward_pass_1d(x, y, n_min_terms, basis, penalty, rel_GCV)
n = length(x)
d = length(basis)
intercept = sum([y[i] for i in 1:length(y)]) / length(y)
coeff = _coeff_1d(x, y, basis)
sse = zero(y[1])
for i in 1:n
val_i = sum(coeff[j] * basis[j](x[i]) for j in 1:d) + intercept
sse = sse + (y[i] - val_i)^2
end
effect_num_params = d + penalty * (d - 1) / 2
current_gcv = sse / (n * (1 - effect_num_params / n)^2)
num_terms = d
while (num_terms > n_min_terms)
#Basis-> select worst performing element-> eliminate it
if num_terms <= 1
break
end
found_new_to_eliminate = false
for i in 1:n
current_basis = copy(basis)
#remove i-esim element from current basis
deleteat!(current_basis, i)
coef = _coeff_1d(x, y, current_basis)
new_sse = zero(y[i])
for a in 1:n
val_a = sum(coeff[j] * basis[j](x[a]) for j in 1:num_terms) + intercept
new_sse = new_sse + (y[a] - val_a)^2
end
effect_num_params = num_terms + penalty * (num_terms - 1) / 2
i_gcv = new_sse / (n * (1 - effect_num_params / n)^2)
if i_gcv < current_gcv
basis_to_remove = i
new_gcv = i_gcv
found_new_to_eliminate = true
end
end
if !found_new_to_eliminate
break
end
if abs(current_gcv - new_gcv) < rel_GCV
break
else
num_terms = num_terms - 1
deleteat!(basis, basis_to_remove)
end
end
return basis
end
function EarthSurrogate(x, y, lb::Number, ub::Number; penalty::Number = 2.0,
n_min_terms::Int = 2, n_max_terms::Int = 10,
rel_res_error::Number = 1e-2, rel_GCV::Number = 1e-2,
maxiters = 100)
intercept = sum([y[i] for i in 1:length(y)]) / length(y)
basis_after_forward = _forward_pass_1d(x, y, n_max_terms, rel_res_error, maxiters)
basis = _backward_pass_1d(x, y, n_min_terms, basis_after_forward, penalty, rel_GCV)
coeff = _coeff_1d(x, y, basis)
return EarthSurrogate(x, y, lb, ub, basis, coeff, penalty, n_min_terms, n_max_terms,
rel_res_error, rel_GCV, intercept, maxiters)
end
function (earth::EarthSurrogate)(val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(earth, val)
return sum([earth.coeff[i] * earth.basis[i](val) for i in 1:length(earth.coeff)]) +
earth.intercept
end
#ND
#inside arr_hing I have functions like g(x) = x -> _hinge(x,5.0) or g(x) = one(x)
#_product_hinge(val,arr_hing) = prod([arr_hing[i](val[i]) for i = 1:length(val)])
function _coeff_nd(x, y, basis)
n = length(x)
base_len = length(basis)
d = length(x[1])
X = zeros(eltype(x[1]), n, base_len)
@inbounds for a in 1:n
for b in 1:base_len
X[a, b] = prod([basis[b][c](x[a][c]) for c in 1:d])
end
end
return (X' * X) \ (X' * y)
end
function _forward_pass_nd(x, y, n_max_terms, rel_res_error, maxiters)
n = length(x)
basis = Array{Array{Function, 1}}(undef, 0) #ex of basis: push!(x,[x->1.0,x->1.0,x->_hinge(x,knot)]) so basis is of the form arr_hing and then I can pass my val
current_sse = +Inf
const_1 = x -> 1.0
intercept = sum([y[i] for i in 1:length(y)]) / length(y)
num_terms = 0
d = length(x[1])
best_hinge1 = best_hinge2 = [x -> one(eltype(x[1])) for j in 1:d]
new_addition = false
iters = 0
while num_terms <= n_max_terms || iters > maxiters
current_basis = copy(basis)
new_addition = false
for i in 1:n
for j in 1:d
for k in 1:n
for l in 1:d
new_basis = copy(basis)
#model with interaction between x[i][j] and x[k][l]
new_hinge1 = [w != j ? x -> one(eltype(x[1])) :
z -> _hinge(z, x[i][j]) for w in 1:d]
new_hinge2 = [w != l ? x -> one(eltype(x[1])) :
z -> _hinge_mirror(z, x[k][l]) for w in 1:d]
push!(new_basis, new_hinge1)
push!(new_basis, new_hinge2)
#build the model, find sse and check if it's better than current best, if so save the params
bas_len = length(new_basis)
X = zeros(eltype(x[1]), n, bas_len)
@inbounds for a in 1:n
for b in 1:bas_len
X[a, b] = prod([new_basis[b][c](x[a][c]) for c in 1:d])
end
end
if (cond(X' * X) > 1e8)
condition_number = false
new_sse = +Inf
else
condition_number = true
coeff = (X' * X) \ (X' * y)
new_sse = zero(y[1])
for a in 1:n
val_a = sum(coeff[b] *
prod([new_basis[b][c](x[a][c]) for c in 1:d])
for b in 1:bas_len) + intercept
new_sse = new_sse + (y[a] - val_a)^2
end
end
#is the i-esim the best?
if ((new_sse < current_sse) &&
(abs(current_sse - new_sse) >= rel_res_error) &&
condition_number)
#Add the hinge function to the basis
best_hinge1 = new_hinge1
best_hinge2 = new_hinge2
current_sse = new_sse
new_addition = true
end
end
end
end
end
iters = iters + 1
if new_addition
push!(basis, best_hinge1)
push!(basis, best_hinge2)
num_terms = num_terms + 1
else
break
end
end
if (length(basis) == 0)
throw("Earth surrogate did not add any term, just the intercept. It is advised to double check the parameters.")
end
return basis
end
function _backward_pass_nd(x, y, n_min_terms, basis, penalty, rel_GCV)
n = length(x)
d = length(x[1])
base_len = length(basis)
intercept = sum([y[i] for i in 1:length(y)]) / length(y)
coeff = _coeff_nd(x, y, basis)
sse = zero(y[1])
for a in 1:n
val_a = sum(coeff[b] * prod([basis[b][c](x[a][c]) for c in 1:d])
for b in 1:base_len) +
intercept
sse = sse + (y[a] - val_a)^2
end
effect_num_params = base_len + penalty * (base_len - 1) / 2
current_gcv = sse / (n * (1 - effect_num_params / n)^2)
num_terms = base_len
new_gcv = +Inf
basis_to_remove = 0
while (num_terms > n_min_terms)
#Basis-> select worst performing element-> eliminate it
if num_terms <= 1
break
end
found_new_to_eliminate = false
for i in 1:num_terms
current_basis = copy(basis)
#remove i-esim element from current basis
deleteat!(current_basis, i)
coef = _coeff_nd(x, y, current_basis)
new_sse = zero(y[i])
current_base_len = num_terms - 1
for a in 1:n
val_a = sum(coeff[b] * prod([current_basis[b][c](x[a][c]) for c in 1:d])
for b in 1:current_base_len) + intercept
new_sse = new_sse + (y[a] - val_a)^2
end
curr_effect_num_params = current_base_len + penalty * (current_base_len - 1) / 2
i_gcv = new_sse / (n * (1 - curr_effect_num_params / n)^2)
if i_gcv < current_gcv
basis_to_remove = i
new_gcv = i_gcv
found_new_to_eliminate = true
end
end
if !found_new_to_eliminate
break
elseif abs(current_gcv - new_gcv) < rel_GCV
break
else
num_terms = num_terms - 1
deleteat!(basis, basis_to_remove)
end
end
return basis
end
function EarthSurrogate(x, y, lb, ub; penalty::Number = 2.0, n_min_terms::Int = 2,
n_max_terms::Int = 10, rel_res_error::Number = 1e-2,
rel_GCV::Number = 1e-2, maxiters = 100)
intercept = sum([y[i] for i in 1:length(y)]) / length(y)
basis_after_forward = _forward_pass_nd(x, y, n_max_terms, rel_res_error, maxiters)
basis = _backward_pass_nd(x, y, n_min_terms, basis_after_forward, penalty, rel_GCV)
coeff = _coeff_nd(x, y, basis)
return EarthSurrogate(x, y, lb, ub, basis, coeff, penalty, n_min_terms, n_max_terms,
rel_res_error, rel_GCV, intercept, maxiters)
end
function (earth::EarthSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(earth, val)
return sum([earth.coeff[i] * prod([earth.basis[i][j](val[j]) for j in 1:length(val)])
for i in 1:length(earth.coeff)]) + earth.intercept
end
function add_point!(earth::EarthSurrogate, x_new, y_new)
if length(earth.x[1]) == 1
#1D
earth.x = vcat(earth.x, x_new)
earth.y = vcat(earth.y, y_new)
earth.intercept = sum([earth.y[i] for i in 1:length(earth.y)]) / length(earth.y)
basis_after_forward = _forward_pass_1d(earth.x, earth.y, earth.n_max_terms,
earth.rel_res_error, earth.maxiters)
earth.basis = _backward_pass_1d(earth.x, earth.y, earth.n_min_terms,
basis_after_forward, earth.penalty, earth.rel_GCV)
earth.coeff = _coeff_1d(earth.x, earth.y, earth.basis)
nothing
else
#ND
earth.x = vcat(earth.x, x_new)
earth.y = vcat(earth.y, y_new)
earth.intercept = sum([earth.y[i] for i in 1:length(earth.y)]) / length(earth.y)
basis_after_forward = _forward_pass_nd(earth.x, earth.y, earth.n_max_terms,
earth.rel_res_error, earth.maxiters)
earth.basis = _backward_pass_nd(earth.x, earth.y, earth.n_min_terms,
basis_after_forward, earth.penalty, earth.rel_GCV)
earth.coeff = _coeff_nd(earth.x, earth.y, earth.basis)
nothing
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 6611 | mutable struct GEK{X, Y, L, U, P, T, M, B, S, R} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
p::P
theta::T
mu::M
b::B
sigma::S
inverse_of_R::R
end
function _calc_gek_coeffs(x, y, p::Number, theta::Number)
nd1 = length(y) #2n
n = length(x)
R = zeros(eltype(x[1]), nd1, nd1)
#top left
@inbounds for i in 1:n
for j in 1:n
R[i, j] = exp(-theta * abs(x[i] - x[j])^p)
end
end
#top right
@inbounds for i in 1:n
for j in (n + 1):nd1
R[i, j] = 2 * theta * (x[i] - x[j - n]) * exp(-theta * abs(x[i] - x[j - n])^p)
end
end
#bottom left
@inbounds for i in (n + 1):nd1
for j in 1:n
R[i, j] = -2 * theta * (x[i - n] - x[j]) * exp(-theta * abs(x[i - n] - x[j])^p)
end
end
#bottom right
@inbounds for i in (n + 1):nd1
for j in (n + 1):nd1
R[i, j] = -4 * theta * (x[i - n] - x[j - n])^2 *
exp(-theta * abs(x[i - n] - x[j - n])^p)
end
end
one = ones(eltype(x[1]), nd1, 1)
for i in (n + 1):nd1
one[i] = zero(eltype(x[1]))
end
one_t = one'
inverse_of_R = inv(R)
mu = (one_t * inverse_of_R * y) / (one_t * inverse_of_R * one)
b = inverse_of_R * (y - one * mu)
sigma = ((y - one * mu)' * inverse_of_R * (y - one * mu)) / n
mu[1], b, sigma[1], inverse_of_R
end
function std_error_at_point(k::GEK, val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
phi(z) = exp(-(abs(z))^k.p)
nd1 = length(k.y)
n = length(k.x)
r = zeros(eltype(k.x[1]), nd1, 1)
@inbounds for i in 1:n
r[i] = phi(val - k.x[i])
end
one = ones(eltype(k.x[1]), nd1, 1)
@inbounds for i in (n + 1):nd1
one[i] = zero(eltype(k.x[1]))
end
one_t = one'
a = r' * k.inverse_of_R * r
a = a[1]
b = one_t * k.inverse_of_R * one
b = b[1]
mean_squared_error = k.sigma * (1 - a + (1 - a)^2 / (b))
return sqrt(abs(mean_squared_error))
end
function (k::GEK)(val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
phi = z -> exp(-(abs(z))^k.p)
n = length(k.x)
prediction = zero(eltype(k.x[1]))
@inbounds for i in 1:n
prediction = prediction + k.b[i] * phi(val - k.x[i])
end
prediction = k.mu + prediction
return prediction
end
function GEK(x, y, lb::Number, ub::Number; p = 1.0, theta = 1.0)
if length(x) != length(unique(x))
println("There exists a repetition in the samples, cannot build Kriging.")
return
end
mu, b, sigma, inverse_of_R = _calc_gek_coeffs(x, y, p, theta)
return GEK(x, y, lb, ub, p, theta, mu, b, sigma, inverse_of_R)
end
function _calc_gek_coeffs(x, y, p, theta)
nd = length(y)
n = length(x)
d = length(x[1])
R = zeros(eltype(x[1]), nd, nd)
#top left
@inbounds for i in 1:n
for j in 1:n
R[i, j] = prod([exp(-theta[l] * (x[i][l] - x[j][l])) for l in 1:d])
end
end
#top right
@inbounds for i in 1:n
jr = 1
for j in (n + 1):d:nd
for l in 1:d
R[i, j + l - 1] = +2 * theta[l] * (x[i][l] - x[jr][l]) * R[i, jr]
end
jr = jr + 1
end
end
#bottom left
@inbounds for j in 1:n
ir = 1
for i in (n + 1):d:nd
for l in 1:d
R[i + l - 1, j] = -2 * theta[l] * (x[ir][l] - x[j][l]) * R[ir, j]
end
ir = ir + 1
end
end
#bottom right
ir = 1
@inbounds for i in (n + 1):d:nd
for j in (n + 1):d:nd
jr = 1
for l in 1:d
for k in 1:d
R[i + l - 1, j + k - 1] = -4 * theta[l] * theta[k] *
(x[ir][l] - x[jr][l]) *
(x[ir][k] - x[jr][k]) * R[ir, jr]
end
end
jr = jr + 1
end
ir = ir + +1
end
one = ones(eltype(x[1]), nd, 1)
for i in (n + 1):nd
one[i] = zero(eltype(x[1]))
end
one_t = one'
inverse_of_R = inv(R)
mu = (one_t * inverse_of_R * y) / (one_t * inverse_of_R * one)
b = inverse_of_R * (y - one * mu)
sigma = ((y - one * mu)' * inverse_of_R * (y - one * mu)) / n
mu[1], b, sigma[1], inverse_of_R
end
function std_error_at_point(k::GEK, val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
nd1 = length(k.y)
n = length(k.x)
d = length(k.x[1])
r = zeros(eltype(k.x[1]), nd1, 1)
@inbounds for i in 1:n
sum = zero(eltype(k.x[1]))
for l in 1:d
sum = sum + k.theta[l] * norm(val[l] - k.x[i][l])^(k.p[l])
end
r[i] = exp(-sum)
end
one = ones(eltype(k.x[1]), nd1, 1)
@inbounds for i in (n + 1):nd1
one[i] = zero(eltype(k.x[1]))
end
one_t = one'
a = r' * k.inverse_of_R * r
a = a[1]
b = one_t * k.inverse_of_R * one
b = b[1]
mean_squared_error = k.sigma * (1 - a + (1 - a)^2 / (b))
return sqrt(abs(mean_squared_error))
end
function (k::GEK)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
d = length(val)
n = length(k.x)
return k.mu +
sum(k.b[i] *
exp(-sum(k.theta[j] * norm(val[j] - k.x[i][j])^k.p[j] for j in 1:d))
for i in 1:n)
end
function GEK(x, y, lb, ub; p = collect(one.(x[1])), theta = collect(one.(x[1])))
if length(x) != length(unique(x))
println("There exists a repetition in the samples, cannot build Kriging.")
return
end
mu, b, sigma, inverse_of_R = _calc_gek_coeffs(x, y, p, theta)
GEK(x, y, lb, ub, p, theta, mu, b, sigma, inverse_of_R)
end
function add_point!(k::GEK, new_x, new_y)
if new_x in k.x
println("Adding a sample that already exists, cannot build Kriging.")
return
end
if (length(new_x) == 1 && length(new_x[1]) == 1) ||
(length(new_x) > 1 && length(new_x[1]) == 1 && length(k.theta) > 1)
n = length(k.x)
k.x = insert!(k.x, n + 1, new_x)
k.y = insert!(k.y, n + 1, new_y)
else
n = length(k.x)
k.x = insert!(k.x, n + 1, new_x)
k.y = insert!(k.y, n + 1, new_y)
end
k.mu, k.b, k.sigma, k.inverse_of_R = _calc_gek_coeffs(k.x, k.y, k.p, k.theta)
nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 21626 | using LinearAlgebra
using Statistics
"""
GEKPLS(x, y, x_matrix, y_matrix, grads, xlimits, delta_x, extra_points, n_comp, beta, gamma, theta,
reduced_likelihood_function_value,
X_offset, X_scale, X_after_std, pls_mean_reshaped, y_mean, y_std)
"""
mutable struct GEKPLS{T, X, Y} <: AbstractSurrogate
x::X
y::Y
x_matrix::Matrix{T} #1
y_matrix::Matrix{T} #2
grads::Matrix{T} #3
xl::Matrix{T} #xlimits #4
delta::T #5
extra_points::Int #6
num_components::Int #7
beta::Vector{T} #8
gamma::Matrix{T} #9
theta::Vector{T} #10
reduced_likelihood_function_value::T #11
X_offset::Matrix{T} #12
X_scale::Matrix{T} #13
X_after_std::Matrix{T} #14 - X after standardization
pls_mean::Matrix{T} #15
y_mean::T #16
y_std::T #17
end
"""
bounds_error(x, xl)
Checks if input x values are within range of upper and lower bounds
"""
function bounds_error(x, xl)
num_x_rows = size(x, 1)
num_dim = size(xl, 1)
for i in 1:num_x_rows
for j in 1:num_dim
if (x[i, j] < xl[j, 1] || x[i, j] > xl[j, 2])
return true
end
end
end
return false
end
"""
GEKPLS(X, y, grads, n_comp, delta_x, lb, ub, extra_points, theta)
Constructor for GEKPLS Struct
- x_vec: vector of tuples with x values
- y_vec: vector of floats with outputs
- grads_vec: gradients associated with each of the X points
- n_comp: number of components
- lb: lower bounds
- ub: upper bounds
- delta_x: step size while doing Taylor approximation
- extra_points: number of points to consider
- theta: initial expected variance of PLS regression components
"""
function GEKPLS(x_vec, y_vec, grads_vec, n_comp, delta_x, lb, ub, extra_points, theta)
xlimits = hcat(lb, ub)
X = vector_of_tuples_to_matrix(x_vec)
y = reshape(y_vec, (size(X, 1), 1))
grads = vector_of_tuples_to_matrix2(grads_vec)
#ensure that X values are within the upper and lower bounds
if bounds_error(X, xlimits)
println("X values outside bounds")
return
end
pls_mean, X_after_PLS, y_after_PLS = _ge_compute_pls(X, y, n_comp, grads, delta_x,
xlimits, extra_points)
X_after_std, y_after_std, X_offset, y_mean, X_scale, y_std = standardization(
X_after_PLS,
y_after_PLS)
D, ij = cross_distances(X_after_std)
pls_mean_reshaped = reshape(pls_mean, (size(X, 2), n_comp))
d = componentwise_distance_PLS(D, "squar_exp", n_comp, pls_mean_reshaped)
nt, nd = size(X_after_PLS)
beta, gamma, reduced_likelihood_function_value = _reduced_likelihood_function(theta,
"squar_exp",
d, nt, ij,
y_after_std)
return GEKPLS(x_vec, y_vec, X, y, grads, xlimits, delta_x, extra_points, n_comp, beta,
gamma, theta,
reduced_likelihood_function_value,
X_offset, X_scale, X_after_std, pls_mean_reshaped, y_mean, y_std)
end
"""
(g::GEKPLS)(X_test)
Take in a set of one or more points and provide predicted approximate outputs (predictor).
"""
function (g::GEKPLS)(x_vec)
_check_dimension(g, x_vec)
X_test = prep_data_for_pred(x_vec)
n_eval, n_features_x = size(X_test)
X_cont = (X_test .- g.X_offset) ./ g.X_scale
dx = differences(X_cont, g.X_after_std)
pred_d = componentwise_distance_PLS(dx, "squar_exp", g.num_components, g.pls_mean)
nt = size(g.X_after_std, 1)
r = transpose(reshape(squar_exp(g.theta, pred_d), (nt, n_eval)))
f = ones(n_eval, 1)
y_ = (f * g.beta) + (r * g.gamma)
y = g.y_mean .+ g.y_std * y_
return y[1]
end
"""
add_point!(g::GEKPLS, new_x, new_y, new_grads)
add a new point to the dataset.
"""
function add_point!(g::GEKPLS, x_tup, y_val, grad_tup)
new_x = prep_data_for_pred(x_tup)
new_grads = prep_data_for_pred(grad_tup)
if vec(new_x) in eachrow(g.x_matrix)
println("Adding a sample that already exists. Cannot build GEKPLS")
return
end
if bounds_error(new_x, g.xl)
println("x values outside bounds")
return
end
temp_y = copy(g.y) #without the copy here, we get error ("cannot resize array with shared data")
push!(g.x, x_tup)
push!(temp_y, y_val)
g.y = temp_y
g.x_matrix = vcat(g.x_matrix, new_x)
g.y_matrix = vcat(g.y_matrix, y_val)
g.grads = vcat(g.grads, new_grads)
pls_mean, X_after_PLS, y_after_PLS = _ge_compute_pls(g.x_matrix, g.y_matrix,
g.num_components,
g.grads, g.delta, g.xl,
g.extra_points)
g.X_after_std, y_after_std, g.X_offset, g.y_mean, g.X_scale, g.y_std = standardization(
X_after_PLS,
y_after_PLS)
D, ij = cross_distances(g.X_after_std)
g.pls_mean = reshape(pls_mean, (size(g.x_matrix, 2), g.num_components))
d = componentwise_distance_PLS(D, "squar_exp", g.num_components, g.pls_mean)
nt, nd = size(X_after_PLS)
g.beta, g.gamma, g.reduced_likelihood_function_value = _reduced_likelihood_function(
g.theta,
"squar_exp",
d,
nt,
ij,
y_after_std)
end
"""
_ge_compute_pls(X, y, n_comp, grads, delta_x, xlimits, extra_points)
## Gradient-enhanced PLS-coefficients.
Parameters
- X: [n_obs,dim] - The input variables.
- y: [n_obs,ny] - The output variable
- n_comp: int - Number of principal components used.
- gradients: - The gradient values. Matrix size (n_obs,dim)
- delta_x: real - The step used in the First Order Taylor Approximation
- xlimits: [dim, 2]- The upper and lower var bounds.
- extra_points: int - The number of extra points per each training point.
Returns
* * *
- Coeff_pls: [dim, n_comp] - The PLS-coefficients.
- X: Concatenation of XX: [extra_points*nt, dim] - Extra points added (when extra_points > 0) and X
- y: Concatenation of yy[extra_points*nt, 1]- Extra points added (when extra_points > 0) and y
"""
function _ge_compute_pls(X, y, n_comp, grads, delta_x, xlimits, extra_points)
# this function is equivalent to a combination of
# https://github.com/SMTorg/smt/blob/f124c01ffa78c04b80221dded278a20123dac742/smt/utils/kriging_utils.py#L1036
# and https://github.com/SMTorg/smt/blob/f124c01ffa78c04b80221dded278a20123dac742/smt/surrogate_models/gekpls.py#L48
nt, dim = size(X)
XX = zeros(0, dim)
yy = zeros(0, size(y)[2])
coeff_pls = zeros((dim, n_comp, nt))
for i in 1:nt
if dim >= 3
bb_vals = circshift(boxbehnken(dim, 1), 1)
else
bb_vals = [0.0 0.0; #center
1.0 0.0; #right
0.0 1.0; #up
-1.0 0.0; #left
0.0 -1.0; #down
1.0 1.0; #right up
-1.0 1.0; #left up
-1.0 -1.0; #left down
1.0 -1.0]
end
_X = zeros((size(bb_vals)[1], dim))
_y = zeros((size(bb_vals)[1], 1))
bb_vals = bb_vals .* (delta_x * (xlimits[:, 2] - xlimits[:, 1]))' #smt calls this sign. I've called it bb_vals
_X = X[i, :]' .+ bb_vals
bb_vals = bb_vals .* grads[i, :]'
_y = y[i, :] .+ sum(bb_vals, dims = 2)
#_pls.fit(_X, _y) # relic from sklearn version; retained for future reference.
#coeff_pls[:, :, i] = _pls.x_rotations_ #relic from sklearn version; retained for future reference.
coeff_pls[:, :, i] = _modified_pls(_X, _y, n_comp) #_modified_pls returns the equivalent of SKLearn's _pls.x_rotations_
if extra_points != 0
start_index = max(1, length(coeff_pls[:, 1, i]) - extra_points + 1)
max_coeff = sortperm(broadcast(abs, coeff_pls[:, 1, i]))[start_index:end]
for ii in max_coeff
XX = [XX; transpose(X[i, :])]
XX[end, ii] += delta_x * (xlimits[ii, 2] - xlimits[ii, 1])
yy = [yy; y[i]]
yy[end] += grads[i, ii] * delta_x * (xlimits[ii, 2] - xlimits[ii, 1])
end
end
end
if extra_points != 0
X = [X; XX]
y = [y; yy]
end
pls_mean = mean(broadcast(abs, coeff_pls), dims = 3)
return pls_mean, X, y
end
######start of bbdesign######
#
# Adapted from 'ExperimentalDesign.jl: Design of Experiments in Julia'
# https://github.com/phrb/ExperimentalDesign.jl
# MIT License
# ExperimentalDesign.jl: Design of Experiments in Julia
# Copyright (C) 2019 Pedro Bruel <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
function boxbehnken(matrix_size::Int)
boxbehnken(matrix_size, matrix_size)
end
function boxbehnken(matrix_size::Int, center::Int)
@assert matrix_size >= 3
A_fact = explicit_fullfactorial(Tuple([-1, 1] for i in 1:2))
rows = floor(Int, (0.5 * matrix_size * (matrix_size - 1)) * size(A_fact)[1])
A = zeros(rows, matrix_size)
l = 0
for i in 1:(matrix_size - 1)
for j in (i + 1):matrix_size
l = l + 1
A[(max(0, (l - 1) * size(A_fact)[1]) + 1):(l * size(A_fact)[1]), i] = A_fact[:,
1]
A[(max(0, (l - 1) * size(A_fact)[1]) + 1):(l * size(A_fact)[1]), j] = A_fact[:,
2]
end
end
if center == matrix_size
if matrix_size <= 16
points = [0, 0, 3, 3, 6, 6, 6, 8, 9, 10, 12, 12, 13, 14, 15, 16]
center = points[matrix_size]
end
end
A = transpose(hcat(transpose(A), transpose(zeros(center, matrix_size))))
end
function explicit_fullfactorial(factors::Tuple)
explicit_fullfactorial(fullfactorial(factors))
end
function explicit_fullfactorial(iterator::Base.Iterators.ProductIterator)
hcat(vcat.(collect(iterator)...)...)
end
function fullfactorial(factors::Tuple)
Base.Iterators.product(factors...)
end
######end of bb design######
"""
We subtract the mean from each variable. Then, we divide the values of each
variable by its standard deviation.
## Parameters
X - The input variables.
y - The output variable.
## Returns
X: [n_obs, dim]
The standardized input matrix.
y: [n_obs, 1]
The standardized output vector.
X_offset: The mean (or the min if scale_X_to_unit=True) of each input variable.
y_mean: The mean of the output variable.
X_scale: The standard deviation of each input variable.
y_std: The standard deviation of the output variable.
"""
function standardization(X, y)
#Equivalent of https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/utils/kriging_utils.py#L21
X_offset = mean(X, dims = 1)
X_scale = std(X, dims = 1)
X_scale = map(x -> (x == 0.0) ? x = 1 : x = x, X_scale) #to prevent division by 0 below
y_mean = mean(y)
y_std = std(y)
y_std = map(y -> (y == 0) ? y = 1 : y = y, y_std) #to prevent division by 0 below
X = (X .- X_offset) ./ X_scale
y = (y .- y_mean) ./ y_std
return X, y, X_offset, y_mean, X_scale, y_std
end
"""
Computes the nonzero componentwise cross-distances between the vectors
in X
## Parameters
X: [n_obs, dim]
## Returns
D: [n_obs * (n_obs - 1) / 2, dim]
- The cross-distances between the vectors in X.
ij: [n_obs * (n_obs - 1) / 2, 2]
- The indices i and j of the vectors in X associated to the cross-
distances in D.
"""
function cross_distances(X)
# equivalent of https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/utils/kriging_utils.py#L86
n_samples, n_features = size(X)
n_nonzero_cross_dist = (n_samples * (n_samples - 1)) ÷ 2
ij = zeros((n_nonzero_cross_dist, 2))
D = zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in 1:(n_samples - 1)
ll_0 = ll_1 + 1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 1] .= k
ij[ll_0:ll_1, 2] = (k + 1):1:n_samples
D[ll_0:ll_1, :] = -(X[(k + 1):n_samples, :] .- X[k, :]')
end
return D, Int.(ij)
end
"""
Computes the nonzero componentwise cross-spatial-correlation-distance
between the vectors in X.
Equivalent of https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/utils/kriging_utils.py#L1257
with some simplifications (removed theta and return_derivative as it's not required for GEKPLS)
Parameters
----------
D: [n_obs * (n_obs - 1) / 2, dim]
- The L1 cross-distances between the vectors in X.
corr: str
- Name of the correlation function used.
squar_exp or abs_exp.
n_comp: int
- Number of principal components used.
coeff_pls: [dim, n_comp]
- The PLS-coefficients.
Returns
-------
D_corr: [n_obs * (n_obs - 1) / 2, n_comp]
- The componentwise cross-spatial-correlation-distance between the
vectors in X.
"""
function componentwise_distance_PLS(D, corr, n_comp, coeff_pls)
#equivalent of https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/utils/kriging_utils.py#L1257
#todo
#figure out how to handle this computation in the case of very large matrices
#similar to what SMT has done
#at https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/utils/kriging_utils.py#L1257
D_corr = zeros((size(D)[1], n_comp))
if corr == "squar_exp"
D_corr = D .^ 2 * coeff_pls .^ 2
else #abs_exp
D_corr = abs.(D) * abs.(coeff_pls)
end
return D_corr
end
"""
## Squared exponential correlation model.
Equivalent of https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/utils/kriging_utils.py#L604
Parameters:
theta : Hyperparameters of the correlation model
d: componentwise distances from componentwise_distance_PLS
## Returns:
r: array containing the values of the autocorrelation model
"""
function squar_exp(theta, d)
n_components = size(d)[2]
theta = reshape(theta, (1, n_components))
return exp.(-sum(theta .* d, dims = 2))
end
"""
differences(X, Y)
return differences between two arrays
given an input like this:
X = [1.0 1.0 1.0; 2.0 2.0 2.0]
Y = [1.0 2.0 3.0; 4.0 5.0 6.0; 7.0 8.0 9.0]
diff = differences(X,Y)
We get an output (diff) that looks like this:
[ 0. -1. -2.
-3. -4. -5.
-6. -7. -8.
1. 0. -1.
-2. -3. -4.
-5. -6. -7.]
"""
function differences(X, Y)
#equivalent of https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/utils/kriging_utils.py#L392
#code credit: Elias Carvalho - https://stackoverflow.com/questions/72392010/row-wise-operations-between-matrices-in-julia
Rx = repeat(X, inner = (size(Y, 1), 1))
Ry = repeat(Y, size(X, 1))
return Rx - Ry
end
"""
_reduced_likelihood_function(theta, kernel_type, d, nt, ij, y_norma, noise = 0.0)
Compute the reduced likelihood function value and other coefficients necessary for prediction
This function is a loose translation of SMT code from
https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/surrogate_models/krg_based.py#L247
It determines the BLUP parameters and evaluates the reduced likelihood function for the given theta.
## Parameters
theta: array containing the parameters at which the Gaussian Process model parameters should be determined.
kernel_type: name of the correlation function.
d: The componentwise cross-spatial-correlation-distance between the vectors in X.
nt: number of training points
ij: The indices i and j of the vectors in X associated to the cross-distances in D.
y_norma: Standardized y values
noise: noise hyperparameter - increasing noise reduces reduced_likelihood_function_value
## Returns
reduced_likelihood_function_value: real
- The value of the reduced likelihood function associated with the given autocorrelation parameters theta.
beta: Generalized least-squares regression weights
gamma: Gaussian Process weights.
"""
function _reduced_likelihood_function(theta, kernel_type, d, nt, ij, y_norma, noise = 0.0)
#equivalent of https://github.com/SMTorg/smt/blob/4a4df255b9259965439120091007f9852f41523e/smt/surrogate_models/krg_based.py#L247
reduced_likelihood_function_value = -Inf
nugget = 1000000.0 * eps() #a jitter for numerical stability; reducing the multiple from 1000000.0 results in positive definite error for Cholesky decomposition;
if kernel_type == "squar_exp" #todo - add other kernel type abs_exp etc.
r = squar_exp(theta, d)
end
R = (I + zeros(nt, nt)) .* (1.0 + nugget + noise)
for k in 1:size(ij)[1]
R[ij[k, 1], ij[k, 2]] = r[k]
R[ij[k, 2], ij[k, 1]] = r[k]
end
C = cholesky(R).L #todo - #values diverge at this point from SMT code; verify impact
F = ones(nt, 1) #todo - examine if this should be a parameter for this function
Ft = C \ F
Q, G = qr(Ft)
Q = Array(Q)
Yt = C \ y_norma
#todo - in smt, they check if the matrix is ill-conditioned using SVD. Verify and include if necessary
beta = G \ [(transpose(Q) ⋅ Yt)]
rho = Yt .- (Ft .* beta)
gamma = transpose(C) \ rho
sigma2 = sum((rho) .^ 2, dims = 1) / nt
detR = prod(diag(C) .^ (2.0 / nt))
reduced_likelihood_function_value = -nt * log10(sum(sigma2)) - nt * log10(detR)
return beta, gamma, reduced_likelihood_function_value
end
### MODIFIED PLS BELOW ###
# The code below is a simplified version of
# SKLearn's PLS
# https://github.com/scikit-learn/scikit-learn/blob/80598905e/sklearn/cross_decomposition/_pls.py
# It is completely self-contained (no dependencies)
function _center_scale(X, Y)
x_mean = mean(X, dims = 1)
X .-= x_mean
y_mean = mean(Y, dims = 1)
Y .-= y_mean
x_std = std(X, dims = 1)
x_std[x_std .== 0] .= 1.0
X ./= x_std
y_std = std(Y, dims = 1)
y_std[y_std .== 0] .= 1.0
Y ./= y_std
return X, Y
end
function _svd_flip_1d(u, v)
# equivalent of https://github.com/scikit-learn/scikit-learn/blob/80598905e517759b4696c74ecc35c6e2eb508cff/sklearn/cross_decomposition/_pls.py#L149
biggest_abs_val_idx = findmax(abs.(vec(u)))[2]
sign_ = sign(u[biggest_abs_val_idx])
u .*= sign_
v .*= sign_
end
function _get_first_singular_vectors_power_method(X, Y)
my_eps = eps()
y_score = vec(Y)
x_weights = transpose(X)y_score / dot(y_score, y_score)
x_weights ./= (sqrt(dot(x_weights, x_weights)) + my_eps)
x_score = X * x_weights
y_weights = transpose(Y)x_score / dot(x_score, x_score)
y_score = Y * y_weights / (dot(y_weights, y_weights) + my_eps)
#Equivalent in intent to https://github.com/scikit-learn/scikit-learn/blob/80598905e517759b4696c74ecc35c6e2eb508cff/sklearn/cross_decomposition/_pls.py#L66
if any(isnan.(x_weights)) || any(isnan.(y_weights))
return false, false
end
return x_weights, y_weights
end
function _modified_pls(X, Y, n_components)
x_weights_ = zeros(size(X, 2), n_components)
_x_scores = zeros(size(X, 1), n_components)
x_loadings_ = zeros(size(X, 2), n_components)
Xk, Yk = _center_scale(X, Y)
for k in 1:n_components
x_weights, y_weights = _get_first_singular_vectors_power_method(Xk, Yk)
if x_weights == false
break
end
_svd_flip_1d(x_weights, y_weights)
x_scores = Xk * x_weights
x_loadings = transpose(x_scores)Xk / dot(x_scores, x_scores)
Xk = Xk - (x_scores * x_loadings)
y_loadings = transpose(x_scores) * Yk / dot(x_scores, x_scores)
Yk = Yk - x_scores * y_loadings
x_weights_[:, k] = x_weights
_x_scores[:, k] = x_scores
x_loadings_[:, k] = vec(x_loadings)
end
x_rotations_ = x_weights_ * pinv(transpose(x_loadings_)x_weights_)
return x_rotations_
end
### MODIFIED PLS ABOVE ###
### BELOW ARE HELPER FUNCTIONS TO HELP MODIFY VECTORS INTO ARRAYS
function vector_of_tuples_to_matrix(v)
num_rows = length(v)
num_cols = length(first(v))
K = zeros(num_rows, num_cols)
for row in 1:num_rows
for col in 1:num_cols
K[row, col] = v[row][col]
end
end
return K
end
function vector_of_tuples_to_matrix2(v)
#convert gradients into matrix form
num_rows = length(v)
num_cols = length(first(first(v)))
K = zeros(num_rows, num_cols)
for row in 1:num_rows
for col in 1:num_cols
K[row, col] = v[row][1][col]
end
end
return K
end
function prep_data_for_pred(v)
l = length(first(v))
if (l == 1)
return [tup[k] for k in 1:1, tup in v]
end
return [tup[k] for tup in v, k in 1:l]
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1747 | """
mutable struct InverseDistanceSurrogate{X,Y,P,C,L,U} <: AbstractSurrogate
The inverse distance weighting model is an interpolating method and the
unknown points are calculated with a weighted average of the sampling points.
p is a positive real number called the power parameter.
p > 1 is needed for the derivative to be continuous.
"""
mutable struct InverseDistanceSurrogate{X, Y, L, U, P} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
p::P
end
function InverseDistanceSurrogate(x, y, lb, ub; p::Number = 1.0)
return InverseDistanceSurrogate(x, y, lb, ub, p)
end
function (inverSurr::InverseDistanceSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(inverSurr, val)
if val in inverSurr.x
return inverSurr.y[findfirst(x -> x == val, inverSurr.x)]
else
if length(inverSurr.lb) == 1
num = sum(inverSurr.y[i] * (norm(val .- inverSurr.x[i]))^(-inverSurr.p)
for i in 1:length(inverSurr.x))
den = sum(norm(val .- inverSurr.x[i])^(-inverSurr.p)
for i in 1:length(inverSurr.x))
return num / den
else
βᵢ = [norm(val .- inverSurr.x[i])^(-inverSurr.p) for i in 1:length(inverSurr.x)]
num = sum(inverSurr.y[i] * βᵢ[i] for i in 1:length(inverSurr.y))
den = sum(βᵢ)
return num / den
end
end
end
function add_point!(inverSurr::InverseDistanceSurrogate, x_new, y_new)
if eltype(x_new) == eltype(inverSurr.x)
#1D
append!(inverSurr.x, x_new)
append!(inverSurr.y, y_new)
else
#ND
push!(inverSurr.x, x_new)
push!(inverSurr.y, y_new)
end
nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 7669 | #=
One-dimensional Kriging method, following these papers:
"Efficient Global Optimization of Expensive Black Box Functions" and
"A Taxonomy of Global Optimization Methods Based on Response Surfaces"
both by DONALD R. JONES
=#
mutable struct Kriging{X, Y, L, U, P, T, M, B, S, R} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
p::P
theta::T
mu::M
b::B
sigma::S
inverse_of_R::R
end
"""
Gives the current estimate for array 'val' with respect to the Kriging object k.
"""
function (k::Kriging)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
n = length(k.x)
d = length(val)
return k.mu +
sum(k.b[i] *
exp(-sum(k.theta[j] * norm(val[j] - k.x[i][j])^k.p[j] for j in 1:d))
for i in 1:n)
end
"""
Returns sqrt of expected mean_squared_error error at the point.
"""
function std_error_at_point(k::Kriging, val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
n = length(k.x)
d = length(k.x[1])
r = zeros(eltype(k.x[1]), n, 1)
r = [let
sum = zero(eltype(k.x[1]))
for l in 1:d
sum = sum + k.theta[l] * norm(val[l] - k.x[i][l])^(k.p[l])
end
exp(-sum)
end
for i in 1:n]
one = ones(eltype(k.x[1]), n, 1)
one_t = one'
a = r' * k.inverse_of_R * r
b = one_t * k.inverse_of_R * one
mean_squared_error = k.sigma * (1 - a[1] + (1 - a[1])^2 / b[1])
return sqrt(abs(mean_squared_error))
end
"""
Gives the current estimate for 'val' with respect to the Kriging object k.
"""
function (k::Kriging)(val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
n = length(k.x)
return k.mu + sum(k.b[i] * exp(-sum(k.theta * abs(val - k.x[i])^k.p)) for i in 1:n)
end
"""
Returns sqrt of expected mean_squared_error error at the point.
"""
function std_error_at_point(k::Kriging, val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(k, val)
n = length(k.x)
r = [exp(-k.theta * abs(val - k.x[i])^k.p) for i in 1:n]
one = ones(eltype(k.x), n)
one_t = one'
a = r' * k.inverse_of_R * r
b = one_t * k.inverse_of_R * one
mean_squared_error = k.sigma * (1 - a[1] + (1 - a[1])^2 / b[1])
return sqrt(abs(mean_squared_error))
end
"""
Kriging(x, y, lb::Number, ub::Number; p::Number=2.0, theta::Number = 0.5/var(x))
Constructor for type Kriging.
#Arguments:
-(x,y): sampled points
-p: value between 0 and 2 modelling the
smoothness of the function being approximated, 0-> rough 2-> C^infinity
- theta: value > 0 modeling how much the function is changing in the i-th variable.
"""
function Kriging(x, y, lb::Number, ub::Number; p = 2.0,
theta = 0.5 / max(1e-6 * abs(ub - lb), std(x))^p)
if length(x) != length(unique(x))
println("There exists a repetition in the samples, cannot build Kriging.")
return
end
if p > 2.0 || p < 0.0
throw(ArgumentError("Hyperparameter p must be between 0 and 2! Got: $p."))
end
if theta ≤ 0
throw(ArgumentError("Hyperparameter theta must be positive! Got: $theta"))
end
mu, b, sigma, inverse_of_R = _calc_kriging_coeffs(x, y, p, theta)
Kriging(x, y, lb, ub, p, theta, mu, b, sigma, inverse_of_R)
end
function _calc_kriging_coeffs(x, y, p::Number, theta::Number)
n = length(x)
R = [exp(-theta * abs(x[i] - x[j])^p) for i in 1:n, j in 1:n]
# Estimate nugget based on maximum allowed condition number
# This regularizes R to allow for points being close to each other without R becoming
# singular, at the cost of slightly relaxing the interpolation condition
# Derived from "An analytic comparison of regularization methods for Gaussian Processes"
# by Mohammadi et al (https://arxiv.org/pdf/1602.00853.pdf)
λ = eigen(R).values
λmax = λ[end]
λmin = λ[1]
κmax = 1e8
λdiff = λmax - κmax * λmin
if λdiff ≥ 0
nugget = λdiff / (κmax - 1)
else
nugget = 0.0
end
one = ones(eltype(x[1]), n)
one_t = one'
R = R + Diagonal(nugget * one)
inverse_of_R = inv(R)
mu = (one_t * inverse_of_R * y) / (one_t * inverse_of_R * one)
b = inverse_of_R * (y - one * mu)
sigma = ((y - one * mu)' * b) / n
mu[1], b, sigma[1], inverse_of_R
end
"""
Kriging(x,y,lb,ub;p=collect(one.(x[1])),theta=collect(one.(x[1])))
Constructor for Kriging surrogate.
- (x,y): sampled points
- p: array of values 0<=p<2 modeling the
smoothness of the function being approximated in the i-th variable.
low p -> rough, high p -> smooth
- theta: array of values > 0 modeling how much the function is
changing in the i-th variable.
"""
function Kriging(x, y, lb, ub; p = 2.0 .* collect(one.(x[1])),
theta = [0.5 / max(1e-6 * norm(ub .- lb), std(x_i[i] for x_i in x))^p[i]
for i in 1:length(x[1])])
if length(x) != length(unique(x))
println("There exists a repetition in the samples, cannot build Kriging.")
return
end
for i in 1:length(x[1])
if p[i] > 2.0 || p[i] < 0.0
throw(ArgumentError("All p must be between 0 and 2! Got: $p."))
end
if theta[i] ≤ 0.0
throw(ArgumentError("All theta must be positive! Got: $theta."))
end
end
mu, b, sigma, inverse_of_R = _calc_kriging_coeffs(x, y, p, theta)
Kriging(x, y, lb, ub, p, theta, mu, b, sigma, inverse_of_R)
end
function _calc_kriging_coeffs(x, y, p, theta)
n = length(x)
d = length(x[1])
R = [let
sum = zero(eltype(x[1]))
for l in 1:d
sum = sum + theta[l] * norm(x[i][l] - x[j][l])^p[l]
end
exp(-sum)
end
for j in 1:n, i in 1:n]
# Estimate nugget based on maximum allowed condition number
# This regularizes R to allow for points being close to each other without R becoming
# singular, at the cost of slightly relaxing the interpolation condition
# Derived from "An analytic comparison of regularization methods for Gaussian Processes"
# by Mohammadi et al (https://arxiv.org/pdf/1602.00853.pdf)
λ = eigen(R).values
λmax = λ[end]
λmin = λ[1]
κmax = 1e8
λdiff = λmax - κmax * λmin
if λdiff ≥ 0
nugget = λdiff / (κmax - 1)
else
nugget = 0.0
end
one = ones(eltype(x[1]), n)
one_t = one'
R = R + Diagonal(nugget * one[:, 1])
inverse_of_R = inv(R)
mu = (one_t * inverse_of_R * y) / (one_t * inverse_of_R * one)
y_minus_1μ = y - one * mu
b = inverse_of_R * y_minus_1μ
sigma = (y_minus_1μ' * b) / n
mu[1], b, sigma[1], inverse_of_R
end
"""
add_point!(k::Kriging,new_x,new_y)
Adds the new point and its respective value to the sample points.
Warning: If you are just adding a single point, you have to wrap it with [].
Returns the updated Kriging model.
"""
function add_point!(k::Kriging, new_x, new_y)
if new_x in k.x
println("Adding a sample that already exists, cannot build Kriging.")
return
end
if (length(new_x) == 1 && length(new_x[1]) == 1) ||
(length(new_x) > 1 && length(new_x[1]) == 1 && length(k.theta) > 1)
push!(k.x, new_x)
push!(k.y, new_y)
else
append!(k.x, new_x)
append!(k.y, new_y)
end
k.mu, k.b, k.sigma, k.inverse_of_R = _calc_kriging_coeffs(k.x, k.y, k.p, k.theta)
nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 2249 | using GLM
mutable struct LinearSurrogate{X, Y, C, L, U} <: AbstractSurrogate
x::X
y::Y
coeff::C
lb::L
ub::U
end
function LinearSurrogate(x, y, lb::Number, ub::Number)
ols = lm(reshape(x, length(x), 1), y)
LinearSurrogate(x, y, coef(ols), lb, ub)
end
function add_point!(my_linear::LinearSurrogate, new_x, new_y)
if length(my_linear.lb) == 1
#1D
my_linear.x = vcat(my_linear.x, new_x)
my_linear.y = vcat(my_linear.y, new_y)
md = lm(reshape(my_linear.x, length(my_linear.x), 1), my_linear.y)
my_linear.coeff = coef(md)
else
#ND
n_previous = length(my_linear.x)
a = vcat(my_linear.x, new_x)
n_after = length(a)
dim_new = n_after - n_previous
n = length(my_linear.x)
d = length(my_linear.x[1])
tot_dim = n + dim_new
X = Array{Float64, 2}(undef, tot_dim, d)
for j in 1:n
X[j, :] = vec(collect(my_linear.x[j]))
end
if dim_new == 1
X[n + 1, :] = vec(collect(new_x))
else
i = 1
for j in (n + 1):tot_dim
X[j, :] = vec(collect(new_x[i]))
i = i + 1
end
end
my_linear.x = vcat(my_linear.x, new_x)
my_linear.y = vcat(my_linear.y, new_y)
md = lm(X, my_linear.y)
my_linear.coeff = coef(md)
end
nothing
end
function (lin::LinearSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(lin, val)
return lin.coeff' * [val...]
end
"""
LinearSurrogate(x,y,lb,ub)
Builds a linear surrogate using GLM.jl
"""
function LinearSurrogate(x, y, lb, ub)
#X = Array{eltype(x[1]),2}(undef,length(x),length(x[1]))
#=
X = Array{eltype(x[1]),2}(undef,length(x),length(x[1]))
for j = 1:length(x)
X[j,:] = vec(collect(x[j]))
end
=#
T = collect(reshape(collect(Base.Iterators.flatten(x)), (length(x[1]), length(x)))')
#T = transpose(reshape(reinterpret(eltype(x[1]), x), length(x[1]), length(x)))
X = Array{eltype(x[1]), 2}(undef, length(x), length(x[1]))
X = copy(T)
ols = lm(X, y)
return LinearSurrogate(x, y, coef(ols), lb, ub)
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 5926 | using ExtendableSparse
mutable struct LobachevskySurrogate{X, Y, A, N, L, U, C, S} <: AbstractSurrogate
x::X
y::Y
alpha::A
n::N
lb::L
ub::U
coeff::C
sparse::S
end
function phi_nj1D(point, x, alpha, n)
val = false * x[1]
for l in 0:n
a = sqrt(n / 3) * alpha * (point - x) + (n - 2 * l)
if a > 0
if l % 2 == 0
val += binomial(n, l) * a^(n - 1)
else
val -= binomial(n, l) * a^(n - 1)
end
end
end
val *= sqrt(n / 3) / (2^n * factorial(n - 1))
return val
end
function _calc_loba_coeff1D(x, y, alpha, n, sparse)
dim = length(x)
if sparse
D = ExtendableSparseMatrix{eltype(x), Int}(dim, dim)
else
D = zeros(eltype(x[1]), dim, dim)
end
for i in 1:dim
for j in 1:dim
D[i, j] = phi_nj1D(x[i], x[j], alpha, n)
end
end
Sym = Symmetric(D, :U)
return Sym \ y
end
"""
Lobachevsky interpolation, suggested parameters: 0 <= alpha <= 4, n must be even.
"""
function LobachevskySurrogate(
x, y, lb::Number, ub::Number; alpha::Number = 1.0, n::Int = 4,
sparse = false)
if alpha > 4 || alpha < 0
error("Alpha must be between 0 and 4")
end
if n % 2 != 0
error("Parameter n must be even")
end
coeff = _calc_loba_coeff1D(x, y, alpha, n, sparse)
LobachevskySurrogate(x, y, alpha, n, lb, ub, coeff, sparse)
end
function (loba::LobachevskySurrogate)(val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(loba, val)
return sum(loba.coeff[j] * phi_nj1D(val, loba.x[j], loba.alpha, loba.n)
for j in 1:length(loba.x))
end
function phi_njND(point, x, alpha, n)
return prod(phi_nj1D(point[h], x[h], alpha[h], n) for h in 1:length(x))
end
function _calc_loba_coeffND(x, y, alpha, n, sparse)
dim = length(x)
if sparse
D = ExtendableSparseMatrix{eltype(x[1]), Int}(dim, dim)
else
D = zeros(eltype(x[1]), dim, dim)
end
for i in 1:dim
for j in 1:dim
D[i, j] = phi_njND(x[i], x[j], alpha, n)
end
end
Sym = Symmetric(D, :U)
return Sym \ y
end
"""
LobachevskySurrogate(x,y,alpha,n::Int,lb,ub,sparse = false)
Build the Lobachevsky surrogate with parameters alpha and n.
"""
function LobachevskySurrogate(x, y, lb, ub; alpha = collect(one.(x[1])), n::Int = 4,
sparse = false)
if n % 2 != 0
error("Parameter n must be even")
end
coeff = _calc_loba_coeffND(x, y, alpha, n, sparse)
LobachevskySurrogate(x, y, alpha, n, lb, ub, coeff, sparse)
end
function (loba::LobachevskySurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(loba, val)
return sum(loba.coeff[j] * phi_njND(val, loba.x[j], loba.alpha, loba.n)
for j in 1:length(loba.x))
end
function add_point!(loba::LobachevskySurrogate, x_new, y_new)
if length(loba.x[1]) == 1
#1D
append!(loba.x, x_new)
append!(loba.y, y_new)
loba.coeff = _calc_loba_coeff1D(loba.x, loba.y, loba.alpha, loba.n, loba.sparse)
else
#ND
loba.x = vcat(loba.x, x_new)
loba.y = vcat(loba.y, y_new)
loba.coeff = _calc_loba_coeffND(loba.x, loba.y, loba.alpha, loba.n, loba.sparse)
end
nothing
end
#Lobachevsky integrals
function _phi_int(point, n)
res = zero(eltype(point))
for k in 0:n
c = sqrt(n / 3) * point + (n - 2 * k)
if c > 0
res = res + (-1)^k * binomial(n, k) * c^n
end
end
res *= 1 / (2^n * factorial(n))
end
function lobachevsky_integral(loba::LobachevskySurrogate, lb::Number, ub::Number)
val = zero(eltype(loba.y[1]))
n = length(loba.x)
for i in 1:n
a = loba.alpha * (ub - loba.x[i])
b = loba.alpha * (lb - loba.x[i])
int = 1 / loba.alpha * (_phi_int(a, loba.n) - _phi_int(b, loba.n))
val = val + loba.coeff[i] * int
end
return val
end
"""
lobachevsky_integral(loba::LobachevskySurrogate,lb,ub)
Calculates the integral of the Lobachevsky surrogate, which has a closed form.
"""
function lobachevsky_integral(loba::LobachevskySurrogate, lb, ub)
d = length(lb)
val = zero(eltype(loba.y[1]))
for j in 1:length(loba.x)
I = 1.0
for i in 1:d
upper = loba.alpha[i] * (ub[i] - loba.x[j][i])
lower = loba.alpha[i] * (lb[i] - loba.x[j][i])
I *= 1 / loba.alpha[i] * (_phi_int(upper, loba.n) - _phi_int(lower, loba.n))
end
val = val + loba.coeff[j] * I
end
return val
end
"""
lobachevsky_integrate_dimension(loba::LobachevskySurrogate,lb,ub,dimension)
Integrating the surrogate on selected dimension dim
"""
function lobachevsky_integrate_dimension(loba::LobachevskySurrogate, lb, ub, dim::Int)
gamma_d = zero(loba.coeff[1])
n = length(loba.x)
for i in 1:n
a = loba.alpha[dim] * (ub[dim] - loba.x[i][dim])
b = loba.alpha[dim] * (lb[dim] - loba.x[i][dim])
int = 1 / loba.alpha[dim] * (_phi_int(a, loba.n) - _phi_int(b, loba.n))
gamma_d = gamma_d + loba.coeff[i] * int
end
new_coeff = loba.coeff .* gamma_d
if length(lb) == 2
# Integrating one dimension -> 1D
new_x = zeros(eltype(loba.x[1][1]), n)
for i in 1:n
new_x[i] = deleteat!(collect(loba.x[i]), dim)[1]
end
else
dummy = loba.x[1]
dummy = deleteat!(collect(dummy), dim)
new_x = typeof(Tuple(dummy))[]
for i in 1:n
push!(new_x, Tuple(deleteat!(collect(loba.x[i]), dim)))
end
end
new_lb = deleteat!(lb, dim)
new_ub = deleteat!(ub, dim)
new_loba = deleteat!(loba.alpha, dim)
return LobachevskySurrogate(new_x, loba.y, loba.alpha, loba.n, new_lb, new_ub,
new_coeff, loba.sparse)
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 9829 | #=
using Clustering
#using GaussianMixtures
using LinearAlgebra
mutable struct MOE{X,Y,L,U,S,K,M,V,W} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
local_surr::S
k::K
means::M
varcov::V
weights::W
end
function MOE(x,y,lb::Number,ub::Number; scale_factor::Number = 1.0, k::Int = 2, local_kind = [RadialBasisStructure(radial_function = linearRadial, scale_factor=1.0,sparse = false),RadialBasisStructure(radial_function = cubicRadial, scale_factor=1.0, sparse = false)])
if k != length(local_kind)
throw("Number of mixtures = $k is not equal to length of local surrogates")
end
n = length(x)
x = x ./ scale_factor
y = y ./ scale_factor
# find weight, mean and variance for each mixture
# For GaussianMixtures I need nxd matrix
X_G = reshape(x,(n,1))
moe_gmm = GaussianMixtures.GMM(k,X_G)
weights = GaussianMixtures.weights(moe_gmm)
means = GaussianMixtures.means(moe_gmm)
variances = moe_gmm.Σ
#cluster the points
#For clustering I need dxn matrix
X_C = reshape(x,(1,n))
KNN = kmeans(X_C, k)
x_c = [ Array{eltype(x)}(undef,0) for i = 1:k]
y_c = [ Array{eltype(y)}(undef,0) for i = 1:k]
a = assignments(KNN)
@inbounds for i = 1:n
pos = a[i]
append!(x_c[pos],x[i])
append!(y_c[pos],y[i])
end
local_surr = Dict()
for i = 1:k
if local_kind[i][1] == "RadialBasis"
#fit and append to local_surr
my_local_i = RadialBasis(x_c[i],y_c[i],lb,ub,rad = local_kind[i].radial_function, scale_factor = local_kind[i].scale_factor, sparse = local_kind[i].sparse)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "Kriging"
my_local_i = Kriging(x_c[i], y_c[i],lb,ub, p = local_kind[i].p, theta = local_kind[i].theta)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "GEK"
my_local_i = GEK(x_c[i], y_c[i],lb,ub, p = local_kind[i].p, theta = local_kind[i].theta)
local_surr[i] = my_local_i
elseif local_kind[i] == "LinearSurrogate"
my_local_i = LinearSurrogate(x_c[i], y_c[i],lb,ub)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "InverseDistanceSurrogate"
my_local_i = InverseDistanceSurrogate(x_c[i], y_c[i],lb,ub, local_kind[i].p)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "LobachevskySurrogate"
my_local_i = LobachevskySurrogate(x_c[i], y_c[i],lb,ub,alpha = local_kind[i].alpha , n = local_kind[i].n, sparse = local_kind[i].sparse)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "NeuralSurrogate"
my_local_i = NeuralSurrogate(x_c[i], y_c[i],lb,ub, model = local_kind[i].model , loss = local_kind[i].loss ,opt = local_kind[i].opt ,n_echos = local_kind[i].n_echos)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "RandomForestSurrogate"
my_local_i = RandomForestSurrogate(x_c[i], y_c[i],lb,ub, num_round = local_kind[i].num_round)
local_surr[i] = my_local_i
elseif local_kind[i] == "SecondOrderPolynomialSurrogate"
my_local_i = SecondOrderPolynomialSurrogate(x_c[i], y_c[i],lb,ub)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "Wendland"
my_local_i = Wendand(x_c[i], y_c[i],lb,ub, eps = local_kind[i].eps, maxiters = local_kind[i].maxiters, tol = local_kind[i].tol)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "PolynomialChaosSurrogate"
my_local_i = PolynomialChaosSurrogate(x,y,lb,ub, op = local_kind[i].op)
local_surr[i] = my_local_i
else
throw("A surrogate with name provided does not exist or is not currently supported with MOE.")
end
end
return MOE(x,y,lb,ub,local_surr,k,means,variances,weights)
end
function MOE(x,y,lb,ub; k::Int = 2, scale_factor::Number = 1.0,
local_kind = [RadialBasisStructure(radial_function = linearRadial, scale_factor=1.0, sparse = false),RadialBasisStructure(radial_function = cubicRadial, scale_factor=1.0, sparse = false)])
n = length(x)
d = length(lb)
for i = 1:n
x[i] = x[i] ./ scale_factor
end
y = y ./ scale_factor
#GMM parameters:
X_G = collect(reshape(collect(Base.Iterators.flatten(x)), (d,n))')
my_gmm = GMM(k,X_G,kind = :full)
weights = my_gmm.w
means = my_gmm.μ
varcov = my_gmm.Σ
#cluster the points
X_C = collect(reshape(collect(Base.Iterators.flatten(x)), (d,n)))
KNN = kmeans(X_C, k)
x_c = [ Array{eltype(x)}(undef,0) for i = 1:k]
y_c = [ Array{eltype(y)}(undef,0) for i = 1:k]
a = assignments(KNN)
@inbounds for i = 1:n
pos = a[i]
x_c[pos] = vcat(x_c[pos],x[i])
append!(y_c[pos],y[i])
end
local_surr = Dict()
for i = 1:k
if local_kind[i][1] == "RadialBasis"
#fit and append to local_surr
my_local_i = RadialBasis(x_c[i],y_c[i],lb,ub,rad = local_kind[i].radial_function, scale_factor = local_kind[i].scale_factor, sparse = local_kind[i].sparse)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "Kriging"
my_local_i = Kriging(x_c[i], y_c[i],lb,ub, p = local_kind[i].p, theta = local_kind[i].theta)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "GEK"
my_local_i = GEK(x_c[i], y_c[i],lb,ub, p = local_kind[i].p, theta = local_kind[i].theta)
local_surr[i] = my_local_i
elseif local_kind[i] == "LinearSurrogate"
my_local_i = LinearSurrogate(x_c[i], y_c[i],lb,ub)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "InverseDistanceSurrogate"
my_local_i = InverseDistanceSurrogate(x_c[i], y_c[i],lb,ub, local_kind[i].p)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "LobachevskySurrogate"
my_local_i = LobachevskySurrogate(x_c[i], y_c[i],lb,ub,alpha = local_kind[i].alpha , n = local_kind[i].n, sparse = local_kind[i].sparse)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "NeuralSurrogate"
my_local_i = NeuralSurrogate(x_c[i], y_c[i],lb,ub, model = local_kind[i].model , loss = local_kind[i].loss ,opt = local_kind[i].opt ,n_echos = local_kind[i].n_echos)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "RandomForestSurrogate"
my_local_i = RandomForestSurrogate(x_c[i], y_c[i],lb,ub, num_round = local_kind[i].num_round)
local_surr[i] = my_local_i
elseif local_kind[i] == "SecondOrderPolynomialSurrogate"
my_local_i = SecondOrderPolynomialSurrogate(x_c[i], y_c[i],lb,ub)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "Wendland"
my_local_i = Wendand(x_c[i], y_c[i],lb,ub, eps = local_kind[i].eps, maxiters = local_kind[i].maxiters, tol = local_kind[i].tol)
local_surr[i] = my_local_i
elseif local_kind[i][1] == "PolynomialChaosSurrogate"
my_local_i = PolynomialChaosSurrogate(x,y,lb,ub, op = local_kind[i].op)
local_surr[i] = my_local_i
else
throw("A surrogate with name "* local_kind[i][1] *" does not exist or is not currently supported with MOE.")
end
end
return MOE(x,y,lb,ub,local_surr,k,means,varcov,weights)
end
function _prob_x_in_i(x::Number,i,mu,varcov,alpha,k)
num = (1/sqrt(varcov[i]))*alpha[i]*exp(-0.5(x-mu[i])*(1/varcov[i])*(x-mu[i]))
den = sum([(1/sqrt(varcov[j]))*alpha[j]*exp(-0.5(x-mu[j])*(1/varcov[j])*(x-mu[j])) for j = 1:k])
return num/den
end
function _prob_x_in_i(x,i,mu,varcov,alpha,k)
num = (1/sqrt(det(varcov[i])))*alpha[i]*exp(-0.5*(x .- mu[i,:])'*(inv(varcov[i]))*(x .- mu[i,:]))
den = sum([(1/sqrt(det(varcov[j])))*alpha[j]*exp(-0.5*(x .- mu[j,:])'*(inv(varcov[j]))*(x .- mu[j,:])) for j = 1:k])
return num/den
end
function (moe::MOE)(val)
return prod([moe.local_surr[i](val)*_prob_x_in_i(val,i,moe.means,moe.varcov,moe.weights,moe.k) for i = 1:moe.k])
end
function add_point!(my_moe::MOE,x_new,y_new)
if length(my_moe.x[1]) == 1
#1D
my_moe.x = vcat(my_moe.x,x_new)
my_moe.y = vcat(my_moe.y,y_new)
n = length(my_moe.x)
#New mixture parameters
X_G = reshape(my_moe.x,(n,1))
moe_gmm = GaussianMixtures.GMM(my_moe.k,X_G)
my_moe.weights = GaussianMixtures.weights(moe_gmm)
my_moe.means = GaussianMixtures.means(moe_gmm)
my_moe.varcov = moe_gmm.Σ
#Find cluster of new point(s):
n_added = length(x_new)
X_C = reshape(my_moe.x,(1,n))
KNN = kmeans(X_C, my_moe.k)
a = assignments(KNN)
#Recalculate only relevant surrogates
for i = 1:n_added
pos = a[n-n_added+i]
add_point!(my_moe.local_surr[i],my_moe.x[n-n_added+i],my_moe.y[n-n_added+i])
end
else
#ND
my_moe.x = vcat(my_moe.x,x_new)
my_moe.y = vcat(my_moe.y,y_new)
n = length(my_moe.x)
d = length(my_moe.lb)
#New mixture parameters
X_G = collect(reshape(collect(Base.Iterators.flatten(my_moe.x)), (d,n))')
my_gmm = GMM(my_moe.k,X_G,kind = :full)
my_moe.weights = my_gmm.w
my_moe.means = my_gmm.μ
my_moe.varcov = my_gmm.Σ
#cluster the points
X_C = collect(reshape(collect(Base.Iterators.flatten(my_moe.x)), (d,n)))
KNN = kmeans(X_C, my_moe.k)
a = assignments(KNN)
n_added = length(x_new)
for i = 1:n_added
pos = a[n-n_added+i]
add_point!(my_moe.local_surr[i],my_moe.x[n-n_added+i],my_moe.y[n-n_added+i])
end
end
nothing
end
=#
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 75924 | using LinearAlgebra
using Distributions
using Zygote
abstract type SurrogateOptimizationAlgorithm end
abstract type ParallelStrategy end
struct KrigingBeliever <: ParallelStrategy end
struct KrigingBelieverUpperBound <: ParallelStrategy end
struct KrigingBelieverLowerBound <: ParallelStrategy end
struct MinimumConstantLiar <: ParallelStrategy end
struct MaximumConstantLiar <: ParallelStrategy end
struct MeanConstantLiar <: ParallelStrategy end
#single objective optimization
struct SRBF <: SurrogateOptimizationAlgorithm end
struct LCBS <: SurrogateOptimizationAlgorithm end
struct EI <: SurrogateOptimizationAlgorithm end
struct DYCORS <: SurrogateOptimizationAlgorithm end
struct SOP{P} <: SurrogateOptimizationAlgorithm
p::P
end
#multi objective optimization
struct SMB <: SurrogateOptimizationAlgorithm end
struct RTEA{K, Z, P, N, S} <: SurrogateOptimizationAlgorithm
k::K
z::Z
p::P
n_c::N
sigma::S
end
function merit_function(point, w, surr::AbstractSurrogate, s_max, s_min, d_max, d_min,
box_size)
if length(point) == 1
D_x = box_size + 1
for i in 1:length(surr.x)
distance = norm(surr.x[i] - point)
if distance < D_x
D_x = distance
end
end
return w * (surr(point) - s_min) / (s_max - s_min) +
(1 - w) * ((d_max - D_x) / (d_max - d_min))
else
D_x = norm(box_size) + 1
for i in 1:length(surr.x)
distance = norm(surr.x[i] .- point)
if distance < D_x
D_x = distance
end
end
return w * (surr(point) - s_min) / (s_max - s_min) +
(1 - w) * ((d_max - D_x) / (d_max - d_min))
end
end
"""
The main idea is to pick the new evaluations from a set of candidate points, where each candidate point is generated as an N(0, sigma^2)
distributed perturbation from the current best solution.
The value of sigma is modified based on progress and follows the same logic as
in many trust region methods: we increase sigma if we make a lot of progress
(the surrogate is accurate) and decrease sigma when we aren’t able to make progress
(the surrogate model is inaccurate).
More details about how sigma is updated are given in the original papers.
After generating the candidate points, we predict their objective function value
and compute the minimum distance to the previously evaluated point.
Let the candidate points be denoted by C and let the function value predictions
be s(x\\_i) and the distance values be d(x\\_i), both rescaled through a
linear transformation to the interval [0,1]. This is done to put the values on
the same scale.
The next point selected for evaluation is the candidate point x that minimizes
the weighted-distance merit function:
``merit(x) = ws(x) + (1-w)(1-d(x))``
where ``0 \\leq w \\leq 1``.
That is, we want a small function value prediction and a large minimum distance
from the previously evaluated points.
The weight w is commonly cycled between
a few values to achieve both exploitation and exploration.
When w is close to zero, we do pure exploration, while w close to 1 corresponds to exploitation.
"""
function surrogate_optimize(obj::Function, ::SRBF, lb, ub, surr::AbstractSurrogate,
sample_type::SamplingAlgorithm; maxiters = 100,
num_new_samples = 100, needs_gradient = false)
scale = 0.2
success = 0
failure = 0
w_range = [0.3, 0.5, 0.7, 0.95]
#Vector containing size in each direction
box_size = lb - ub
success = 0
failures = 0
dtol = 1e-3 * norm(ub - lb)
d = length(surr.x)
num_of_iterations = 0
for w in Iterators.cycle(w_range)
num_of_iterations += 1
if num_of_iterations == maxiters
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
for k in 1:maxiters
incumbent_value = minimum(surr.y)
incumbent_x = surr.x[argmin(surr.y)]
new_lb = incumbent_x .- 3 * scale * norm(incumbent_x .- lb)
new_ub = incumbent_x .+ 3 * scale * norm(incumbent_x .- ub)
new_lb = vec(max.(new_lb, lb))
new_ub = vec(min.(new_ub, ub))
new_sample = sample(num_new_samples, new_lb, new_ub, sample_type)
s = zeros(eltype(surr.x[1]), num_new_samples)
for j in 1:num_new_samples
s[j] = surr(new_sample[j])
end
s_max = maximum(s)
s_min = minimum(s)
d_min = norm(box_size .+ 1)
d_max = 0.0
for r in 1:length(surr.x)
for c in 1:num_new_samples
distance_rc = norm(surr.x[r] .- new_sample[c])
if distance_rc > d_max
d_max = distance_rc
end
if distance_rc < d_min
d_min = distance_rc
end
end
end
#3)Evaluate merit function in the sampled points
evaluation_of_merit_function = zeros(float(eltype(surr.x[1])), num_new_samples)
@inbounds for r in 1:num_new_samples
evaluation_of_merit_function[r] = merit_function(new_sample[r], w, surr,
s_max, s_min, d_max, d_min,
box_size)
end
new_addition = false
adaptive_point_x = Tuple{}
diff_x = zeros(eltype(surr.x[1]), d)
while new_addition == false
#find minimum
new_min_y = minimum(evaluation_of_merit_function)
min_index = argmin(evaluation_of_merit_function)
new_min_x = new_sample[min_index]
for l in 1:d
diff_x[l] = norm(surr.x[l] .- new_min_x)
end
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluation_of_merit_function, min_index[1])
deleteat!(new_sample, min_index)
if length(new_sample) == 0
println("Out of sampling points")
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
else
new_addition = true
adaptive_point_x = Tuple(new_min_x)
end
end
#4) Evaluate objective function at adaptive point
adaptive_point_y = obj(adaptive_point_x)
#5) Update surrogate with (adaptive_point,objective(adaptive_point)
if (needs_gradient)
adaptive_grad = Zygote.gradient(obj, adaptive_point_x)
add_point!(surr, adaptive_point_x, adaptive_point_y, adaptive_grad)
else
add_point!(surr, adaptive_point_x, adaptive_point_y)
end
#6) How to go on?
if surr(adaptive_point_x) < incumbent_value
#success
incumbent_x = adaptive_point_x
incumbent_value = adaptive_point_y
if failure == 0
success += 1
else
failure = 0
success += 1
end
else
#failure
if success == 0
failure += 1
else
success = 0
failure += 1
end
end
if success == 3
scale = scale * 2
if scale > 0.8 * norm(ub - lb)
println("Exiting, scale too big")
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
success = 0
failure = 0
end
if failure == 5
scale = scale / 2
#check bounds and go on only if > 1e-5*interval
if scale < 1e-5
println("Exiting, too narrow")
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
success = 0
failure = 0
end
end
end
end
"""
SRBF 1D:
surrogate_optimize(obj::Function,::SRBF,lb::Number,ub::Number,surr::AbstractSurrogate,sample_type::SamplingAlgorithm;maxiters=100,num_new_samples=100)
"""
function surrogate_optimize(obj::Function, ::SRBF, lb::Number, ub::Number,
surr::AbstractSurrogate, sample_type::SamplingAlgorithm;
maxiters = 100, num_new_samples = 100)
#Suggested by:
#https://www.mathworks.com/help/gads/surrogate-optimization-algorithm.html
scale = 0.2
success = 0
failure = 0
w_range = [0.3, 0.5, 0.7, 0.95]
box_size = lb - ub
success = 0
failures = 0
dtol = 1e-3 * norm(ub - lb)
num_of_iterations = 0
for w in Iterators.cycle(w_range)
num_of_iterations += 1
if num_of_iterations == maxiters
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
for k in 1:maxiters
#1) Sample near incumbent (the 2 fraction is arbitrary here)
incumbent_value = minimum(surr.y)
incumbent_x = surr.x[argmin(surr.y)]
new_lb = incumbent_x - scale * norm(incumbent_x - lb)
new_ub = incumbent_x + scale * norm(incumbent_x - ub)
if new_lb < lb
new_lb = lb
end
if new_ub > ub
new_ub = ub
end
new_sample = sample(num_new_samples, new_lb, new_ub, sample_type)
#2) Create merit function
s = zeros(eltype(surr.x[1]), num_new_samples)
for j in 1:num_new_samples
s[j] = surr(new_sample[j])
end
s_max = maximum(s)
s_min = minimum(s)
d_min = box_size + 1
d_max = 0.0
for r in 1:length(surr.x)
for c in 1:num_new_samples
distance_rc = norm(surr.x[r] - new_sample[c])
if distance_rc > d_max
d_max = distance_rc
end
if distance_rc < d_min
d_min = distance_rc
end
end
end
#3) Evaluate merit function at the sampled points
evaluation_of_merit_function = merit_function.(new_sample, w, surr, s_max,
s_min, d_max, d_min, box_size)
new_addition = false
adaptive_point_x = zero(eltype(new_sample[1]))
while new_addition == false
#find minimum
new_min_y = minimum(evaluation_of_merit_function)
min_index = argmin(evaluation_of_merit_function)
new_min_x = new_sample[min_index]
diff_x = abs.(surr.x .- new_min_x)
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluation_of_merit_function, min_index)
deleteat!(new_sample, min_index)
if length(new_sample) == 0
println("Out of sampling points")
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
else
new_addition = true
adaptive_point_x = new_min_x
end
end
#4) Evaluate objective function at adaptive point
adaptive_point_y = obj(adaptive_point_x)
#5) Update surrogate with (adaptive_point,objective(adaptive_point)
add_point!(surr, adaptive_point_x, adaptive_point_y)
#6) How to go on?
if surr(adaptive_point_x) < incumbent_value
#success
incumbent_x = adaptive_point_x
incumbent_value = adaptive_point_y
if failure == 0
success += 1
else
failure = 0
success += 1
end
else
#failure
if success == 0
failure += 1
else
success = 0
failure += 1
end
end
if success == 3
scale = scale * 2
#check bounds cannot go more than [a,b]
if scale > 0.8 * norm(ub - lb)
println("Exiting, scale too big")
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
success = 0
failure = 0
end
if failure == 5
scale = scale / 2
#check bounds and go on only if > 1e-5*interval
if scale < 1e-5
println("Exiting, too narrow")
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
success = 0
failure = 0
end
end
end
end
# Ask SRBF ND
function potential_optimal_points(::SRBF, strategy, lb, ub, surr::AbstractSurrogate,
sample_type::SamplingAlgorithm, n_parallel;
num_new_samples = 500)
scale = 0.2
w_range = [0.3, 0.5, 0.7, 0.95]
w_cycle = Iterators.cycle(w_range)
w, state = iterate(w_cycle)
#Vector containing size in each direction
box_size = lb - ub
dtol = 1e-3 * norm(ub - lb)
d = length(surr.x)
incumbent_x = surr.x[argmin(surr.y)]
new_lb = incumbent_x .- 3 * scale * norm(incumbent_x .- lb)
new_ub = incumbent_x .+ 3 * scale * norm(incumbent_x .- ub)
@inbounds for i in 1:length(new_lb)
if new_lb[i] < lb[i]
new_lb = collect(new_lb)
new_lb[i] = lb[i]
end
if new_ub[i] > ub[i]
new_ub = collect(new_ub)
new_ub[i] = ub[i]
end
end
new_sample = sample(num_new_samples, new_lb, new_ub, sample_type)
s = zeros(eltype(surr.x[1]), num_new_samples)
for j in 1:num_new_samples
s[j] = surr(new_sample[j])
end
s_max = maximum(s)
s_min = minimum(s)
d_min = norm(box_size .+ 1)
d_max = 0.0
for r in 1:length(surr.x)
for c in 1:num_new_samples
distance_rc = norm(surr.x[r] .- new_sample[c])
if distance_rc > d_max
d_max = distance_rc
end
if distance_rc < d_min
d_min = distance_rc
end
end
end
tmp_surr = deepcopy(surr)
new_addition = 0
diff_x = zeros(eltype(surr.x[1]), d)
evaluation_of_merit_function = zeros(float(eltype(surr.x[1])), num_new_samples)
proposed_points_x = Vector{typeof(surr.x[1])}(undef, n_parallel)
merit_of_proposed_points = zeros(Float64, n_parallel)
while new_addition < n_parallel
#find minimum
@inbounds for r in eachindex(evaluation_of_merit_function)
evaluation_of_merit_function[r] = merit_function(new_sample[r], w, tmp_surr,
s_max, s_min, d_max, d_min,
box_size)
end
min_index = argmin(evaluation_of_merit_function)
new_min_x = new_sample[min_index]
min_x_merit = evaluation_of_merit_function[min_index]
for l in 1:d
diff_x[l] = norm(surr.x[l] .- new_min_x)
end
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluation_of_merit_function, min_index)
deleteat!(new_sample, min_index)
if length(new_sample) == 0
println("Out of sampling points")
index = argmin(surr.y)
return (surr.x[index], surr.y[index])
end
else
new_addition += 1
proposed_points_x[new_addition] = new_min_x
merit_of_proposed_points[new_addition] = min_x_merit
# Update temporary surrogate using provided strategy
calculate_liars(strategy, tmp_surr, surr, new_min_x)
end
#4) Update w
w, state = iterate(w_cycle, state)
end
return (proposed_points_x, merit_of_proposed_points)
end
# Ask SRBF 1D
function potential_optimal_points(::SRBF, strategy, lb::Number, ub::Number,
surr::AbstractSurrogate,
sample_type::SamplingAlgorithm, n_parallel;
num_new_samples = 500)
scale = 0.2
success = 0
w_range = [0.3, 0.5, 0.7, 0.95]
w_cycle = Iterators.cycle(w_range)
w, state = iterate(w_cycle)
box_size = lb - ub
success = 0
failures = 0
dtol = 1e-3 * norm(ub - lb)
num_of_iterations = 0
#1) Sample near incumbent (the 2 fraction is arbitrary here)
incumbent_x = surr.x[argmin(surr.y)]
new_lb = incumbent_x - scale * norm(incumbent_x - lb)
new_ub = incumbent_x + scale * norm(incumbent_x - ub)
if new_lb < lb
new_lb = lb
end
if new_ub > ub
new_ub = ub
end
new_sample = sample(num_new_samples, new_lb, new_ub, sample_type)
#2) Create merit function
s = zeros(eltype(surr.x[1]), num_new_samples)
for j in 1:num_new_samples
s[j] = surr(new_sample[j])
end
s_max = maximum(s)
s_min = minimum(s)
d_min = box_size + 1
d_max = 0.0
for r in 1:length(surr.x)
for c in 1:num_new_samples
distance_rc = norm(surr.x[r] - new_sample[c])
if distance_rc > d_max
d_max = distance_rc
end
if distance_rc < d_min
d_min = distance_rc
end
end
end
new_addition = 0
proposed_points_x = zeros(eltype(new_sample[1]), n_parallel)
merit_of_proposed_points = zeros(eltype(new_sample[1]), n_parallel)
# Temporary surrogate for virtual points
tmp_surr = deepcopy(surr)
# Loop until we have n_parallel new points
while new_addition < n_parallel
#3) Evaluate merit function at the sampled points in parallel
evaluation_of_merit_function = merit_function.(new_sample, w, tmp_surr, s_max,
s_min, d_max, d_min, box_size)
#find minimum
min_index = argmin(evaluation_of_merit_function)
new_min_x = new_sample[min_index]
min_x_merit = evaluation_of_merit_function[min_index]
diff_x = abs.(tmp_surr.x .- new_min_x)
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluation_of_merit_function, min_index)
deleteat!(new_sample, min_index)
if length(new_sample) == 0
println("Out of sampling points")
return (proposed_points_x[1:new_addition],
merit_of_proposed_points[1:new_addition])
end
else
new_addition += 1
proposed_points_x[new_addition] = new_min_x
merit_of_proposed_points[new_addition] = min_x_merit
# Update temporary surrogate using provided strategy
calculate_liars(strategy, tmp_surr, surr, new_min_x)
end
#4) Update w
w, state = iterate(w_cycle, state)
end
return (proposed_points_x, merit_of_proposed_points)
end
"""
This is an implementation of Lower Confidence Bound (LCB),
a popular acquisition function in Bayesian optimization.
Under a Gaussian process (GP) prior, the goal is to minimize:
``LCB(x) := E[x] - k * \\sqrt{(V[x])}``
default value ``k = 2``.
"""
function surrogate_optimize(obj::Function, ::LCBS, lb::Number, ub::Number, krig,
sample_type::SamplingAlgorithm; maxiters = 100,
num_new_samples = 100, k = 2.0)
dtol = 1e-3 * norm(ub - lb)
for i in 1:maxiters
new_sample = sample(num_new_samples, lb, ub, sample_type)
evaluations = zeros(eltype(krig.x[1]), num_new_samples)
for j in 1:num_new_samples
evaluations[j] = krig(new_sample[j]) +
k * std_error_at_point(krig, new_sample[j])
end
new_addition = false
min_add_x = zero(eltype(new_sample[1]))
min_add_y = zero(eltype(krig.y[1]))
while new_addition == false
#find minimum
new_min_y = minimum(evaluations)
min_index = argmin(evaluations)
new_min_x = new_sample[min_index]
diff_x = abs.(krig.x .- new_min_x)
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluations, min_index)
deleteat!(new_sample, min_index)
if length(new_sample) == 0
println("Out of sampling points")
index = argmin(krig.y)
return (krig.x[index], krig.y[index])
end
else
new_addition = true
min_add_x = new_min_x
min_add_y = new_min_y
end
end
if min_add_y < 1e-6 * (maximum(krig.y) - minimum(krig.y))
return
else
if (abs(min_add_y) == Inf || min_add_y == NaN)
println("New point being added is +Inf or NaN, skipping.\n")
else
add_point!(krig, min_add_x, min_add_y)
end
end
end
end
"""
This is an implementation of Lower Confidence Bound (LCB),
a popular acquisition function in Bayesian optimization.
Under a Gaussian process (GP) prior, the goal is to minimize:
``LCB(x) := E[x] - k * \\sqrt{(V[x])}``
default value ``k = 2``.
"""
function surrogate_optimize(obj::Function, ::LCBS, lb, ub, krig,
sample_type::SamplingAlgorithm; maxiters = 100,
num_new_samples = 100, k = 2.0)
dtol = 1e-3 * norm(ub - lb)
for i in 1:maxiters
d = length(krig.x)
new_sample = sample(num_new_samples, lb, ub, sample_type)
evaluations = zeros(eltype(krig.x[1]), num_new_samples)
for j in 1:num_new_samples
evaluations[j] = krig(new_sample[j]) +
k * std_error_at_point(krig, new_sample[j])
end
new_addition = false
min_add_x = Tuple{}
min_add_y = zero(eltype(krig.y[1]))
diff_x = zeros(eltype(krig.x[1]), d)
while new_addition == false
#find minimum
new_min_y = minimum(evaluations)
min_index = argmin(evaluations)
new_min_x = new_sample[min_index]
for l in 1:d
diff_x[l] = norm(krig.x[l] .- new_min_x)
end
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluations, min_index)
deleteat!(new_sample, min_index)
if length(new_sample) == 0
println("Out of sampling points")
index = argmin(krig.y)
return (krig.x[index], krig.y[index])
end
else
new_addition = true
min_add_x = new_min_x
min_add_y = new_min_y
end
end
if min_add_y < 1e-6 * (maximum(krig.y) - minimum(krig.y))
index = argmin(krig.y)
return (krig.x[index], krig.y[index])
else
min_add_y = obj(min_add_x) # I actually add the objc function at that point
if (abs(min_add_y) == Inf || min_add_y == NaN)
println("New point being added is +Inf or NaN, skipping.\n")
else
add_point!(krig, Tuple(min_add_x), min_add_y)
end
end
end
end
"""
Expected improvement method 1D
"""
function surrogate_optimize(obj::Function, ::EI, lb::Number, ub::Number, krig,
sample_type::SamplingAlgorithm; maxiters = 100,
num_new_samples = 100)
dtol = 1e-3 * norm(ub - lb)
eps = 0.01
for i in 1:maxiters
# Sample lots of points from the design space -- we will evaluate the EI function at these points
new_sample = sample(num_new_samples, lb, ub, sample_type)
# Find the best point so far
f_min = minimum(krig.y)
# Allocate some arrays
evaluations = zeros(eltype(krig.x[1]), num_new_samples) # Holds EI function evaluations
point_found = false # Whether we have found a new point to test
new_x_max = zero(eltype(krig.x[1])) # New x point
new_EI_max = zero(eltype(krig.x[1])) # EI at new x point
while point_found == false
# For each point in the sample set, evaluate the Expected Improvement function
for j in 1:length(new_sample)
std = std_error_at_point(krig, new_sample[j])
u = krig(new_sample[j])
if abs(std) > 1e-6
z = (f_min - u - eps) / std
else
z = 0
end
# Evaluate EI at point new_sample[j]
evaluations[j] = (f_min - u - eps) * cdf(Normal(), z) +
std * pdf(Normal(), z)
end
# find the sample which maximizes the EI function
index_max = argmax(evaluations)
x_new = new_sample[index_max] # x point which maximized EI
y_new = maximum(evaluations) # EI at the new point
diff_x = abs.(krig.x .- x_new)
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluations, index_max)
deleteat!(new_sample, index_max)
if length(new_sample) == 0
println("Out of sampling points")
index = argmin(krig.y)
return (krig.x[index], krig.y[index])
end
else
point_found = true
new_x_max = x_new
new_EI_max = y_new
end
end
# if the EI is less than some tolerance times the difference between the maximum and minimum points
# in the surrogate, then we terminate the optimizer.
if new_EI_max < 1e-6 * norm(maximum(krig.y) - minimum(krig.y))
index = argmin(krig.y)
println("Termination tolerance reached.")
return (krig.x[index], krig.y[index])
end
# Otherwise, evaluate the true objective function at the new point and repeat.
add_point!(krig, new_x_max, obj(new_x_max))
end
println("Completed maximum number of iterations")
end
# Ask EI 1D & ND
function potential_optimal_points(::EI, strategy, lb, ub, krig,
sample_type::SamplingAlgorithm, n_parallel::Number;
num_new_samples = 100)
lb = krig.lb
ub = krig.ub
dtol = 1e-3 * norm(ub - lb)
eps = 0.01
tmp_krig = deepcopy(krig) # Temporary copy of the kriging model to store virtual points
new_x_max = Vector{typeof(tmp_krig.x[1])}(undef, n_parallel) # New x point
new_EI_max = zeros(eltype(tmp_krig.x[1]), n_parallel) # EI at new x point
for i in 1:n_parallel
# Sample lots of points from the design space -- we will evaluate the EI function at these points
new_sample = sample(num_new_samples, lb, ub, sample_type)
# Find the best point so far
f_min = minimum(tmp_krig.y)
# Allocate some arrays
evaluations = zeros(eltype(tmp_krig.x[1]), num_new_samples) # Holds EI function evaluations
point_found = false # Whether we have found a new point to test
while point_found == false
# For each point in the sample set, evaluate the Expected Improvement function
for j in eachindex(new_sample)
std = std_error_at_point(tmp_krig, new_sample[j])
u = tmp_krig(new_sample[j])
if abs(std) > 1e-6
z = (f_min - u - eps) / std
else
z = 0
end
# Evaluate EI at point new_sample[j]
evaluations[j] = (f_min - u - eps) * cdf(Normal(), z) +
std * pdf(Normal(), z)
end
# find the sample which maximizes the EI function
index_max = argmax(evaluations)
x_new = new_sample[index_max] # x point which maximized EI
y_new = maximum(evaluations) # EI at the new point
diff_x = [norm(prev_point .- x_new) for prev_point in tmp_krig.x]
bit_x = [diff_x_point .> dtol for diff_x_point in diff_x]
#new_min_x has to have some distance from tmp_krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluations, index_max)
deleteat!(new_sample, index_max)
if length(new_sample) == 0
println("Out of sampling points")
index = argmin(tmp_krig.y)
return (tmp_krig.x[index], tmp_krig.y[index])
end
else
point_found = true
new_x_max[i] = x_new
new_EI_max[i] = y_new
calculate_liars(strategy, tmp_krig, krig, x_new)
end
end
end
return (new_x_max, new_EI_max)
end
"""
This is an implementation of Expected Improvement (EI),
arguably the most popular acquisition function in Bayesian optimization.
Under a Gaussian process (GP) prior, the goal is to
maximize expected improvement:
``EI(x) := E[max(f_{best}-f(x),0)``
"""
function surrogate_optimize(obj::Function, ::EI, lb, ub, krig,
sample_type::SamplingAlgorithm; maxiters = 100,
num_new_samples = 100)
dtol = 1e-3 * norm(ub - lb)
eps = 0.01
for i in 1:maxiters
d = length(krig.x)
# Sample lots of points from the design space -- we will evaluate the EI function at these points
new_sample = sample(num_new_samples, lb, ub, sample_type)
# Find the best point so far
f_min = minimum(krig.y)
# Allocate some arrays
evaluations = zeros(eltype(krig.x[1]), num_new_samples) # Holds EI function evaluations
point_found = false # Whether we have found a new point to test
new_x_max = zero(eltype(krig.x[1])) # New x point
new_EI_max = zero(eltype(krig.x[1])) # EI at new x point
diff_x = zeros(eltype(krig.x[1]), d)
while point_found == false
# For each point in the sample set, evaluate the Expected Improvement function
for j in 1:length(new_sample)
std = std_error_at_point(krig, new_sample[j])
u = krig(new_sample[j])
if abs(std) > 1e-6
z = (f_min - u - eps) / std
else
z = 0
end
# Evaluate EI at point new_sample[j]
evaluations[j] = (f_min - u - eps) * cdf(Normal(), z) +
std * pdf(Normal(), z)
end
# find the sample which maximizes the EI function
index_max = argmax(evaluations)
x_new = new_sample[index_max] # x point which maximized EI
EI_new = maximum(evaluations) # EI at the new point
for l in 1:d
diff_x[l] = norm(krig.x[l] .- x_new)
end
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluations, index_max)
deleteat!(new_sample, index_max)
if length(new_sample) == 0
println("Out of sampling points.")
index = argmin(krig.y)
return (krig.x[index], krig.y[index])
end
else
point_found = true
new_x_max = x_new
new_EI_max = EI_new
end
end
# if the EI is less than some tolerance times the difference between the maximum and minimum points
# in the surrogate, then we terminate the optimizer.
if new_EI_max < 1e-6 * norm(maximum(krig.y) - minimum(krig.y))
index = argmin(krig.y)
println("Termination tolerance reached.")
return (krig.x[index], krig.y[index])
end
# Otherwise, evaluate the true objective function at the new point and repeat.
add_point!(krig, Tuple(new_x_max), obj(new_x_max))
end
println("Completed maximum number of iterations.")
end
function adjust_step_size(sigma_n, sigma_min, C_success, t_success, C_fail, t_fail)
if C_success >= t_success
sigma_n = 2 * sigma_n
C_success = 0
end
if C_fail >= t_fail
sigma_n = max(sigma_n / 2, sigma_min)
C_fail = 0
end
return sigma_n, C_success, C_fail
end
function select_evaluation_point_1D(new_points1, surr1::AbstractSurrogate, numb_iters,
maxiters)
v = [0.3, 0.5, 0.8, 0.95]
k = 4
n = length(surr1.x)
if mod(maxiters - 1, 4) != 0
w_nR = v[mod(maxiters - 1, 4)]
else
w_nR = v[4]
end
w_nD = 1 - w_nR
l = length(new_points1)
evaluations1 = zeros(eltype(surr1.y[1]), l)
for i in 1:l
evaluations1[i] = surr1(new_points1[i])
end
s_max = maximum(evaluations1)
s_min = minimum(evaluations1)
V_nR = zeros(eltype(surr1.y[1]), l)
for i in 1:l
if abs(s_max - s_min) <= 10e-6
V_nR[i] = 1.0
else
V_nR[i] = (evaluations1[i] - s_min) / (s_max - s_min)
end
end
#Compute score V_nD
V_nD = zeros(eltype(surr1.y[1]), l)
delta_n_x = zeros(eltype(surr1.x[1]), l)
delta = zeros(eltype(surr1.x[1]), n)
for j in 1:l
for i in 1:n
delta[i] = norm(new_points1[j] - surr1.x[i])
end
delta_n_x[j] = minimum(delta)
end
delta_n_max = maximum(delta_n_x)
delta_n_min = minimum(delta_n_x)
for i in 1:l
if abs(delta_n_max - delta_n_min) <= 10e-6
V_nD[i] = 1.0
else
V_nD[i] = (delta_n_max - delta_n_x[i]) / (delta_n_max - delta_n_min)
end
end
#Compute weighted score
W_n = w_nR * V_nR + w_nD * V_nD
return new_points1[argmin(W_n)]
end
"""
surrogate_optimize(obj::Function,::DYCORS,lb::Number,ub::Number,surr1::AbstractSurrogate,sample_type::SamplingAlgorithm;maxiters=100,num_new_samples=100)
DYCORS optimization method in 1D, following closely: Combining radial basis function
surrogates and dynamic coordinate search in high-dimensional expensive black-box optimization".
"""
function surrogate_optimize(obj::Function, ::DYCORS, lb::Number, ub::Number,
surr1::AbstractSurrogate, sample_type::SamplingAlgorithm;
maxiters = 100, num_new_samples = 100)
x_best = argmin(surr1.y)
y_best = minimum(surr1.y)
sigma_n = 0.2 * norm(ub - lb)
d = length(lb)
sigma_min = 0.2 * (0.5)^6 * norm(ub - lb)
t_success = 3
t_fail = max(d, 5)
C_success = 0
C_fail = 0
for k in 1:maxiters
p_select = min(20 / d, 1) * (1 - log(k)) / log(maxiters - 1)
# In 1D I_perturb is always equal to one, no need to sample
d = 1
I_perturb = d
new_points = zeros(eltype(surr1.x[1]), num_new_samples)
for i in 1:num_new_samples
new_points[i] = x_best + rand(Normal(0, sigma_n))
while new_points[i] < lb || new_points[i] > ub
if new_points[i] > ub
#reflection
new_points[i] = max(lb,
maximum(surr1.x) -
norm(new_points[i] - maximum(surr1.x)))
end
if new_points[i] < lb
#reflection
new_points[i] = min(ub,
minimum(surr1.x) +
norm(new_points[i] - minimum(surr1.x)))
end
end
end
x_new = select_evaluation_point_1D(new_points, surr1, k, maxiters)
f_new = obj(x_new)
if f_new < y_best
C_success = C_success + 1
C_fail = 0
else
C_fail = C_fail + 1
C_success = 0
end
sigma_n, C_success, C_fail = adjust_step_size(sigma_n, sigma_min, C_success,
t_success, C_fail, t_fail)
if f_new < y_best
x_best = x_new
y_best = f_new
end
add_point!(surr1, x_new, f_new)
end
index = argmin(surr1.y)
return (surr1.x[index], surr1.y[index])
end
function select_evaluation_point_ND(new_points, surrn::AbstractSurrogate, numb_iters,
maxiters)
v = [0.3, 0.5, 0.8, 0.95]
k = 4
n = size(surrn.x, 1)
d = size(surrn.x, 2)
if mod(maxiters - 1, 4) != 0
w_nR = v[mod(maxiters - 1, 4)]
else
w_nR = v[4]
end
w_nD = 1 - w_nR
l = size(new_points, 1)
evaluations = zeros(eltype(surrn.y[1]), l)
for i in 1:l
evaluations[i] = surrn(Tuple(new_points[i, :]))
end
s_max = maximum(evaluations)
s_min = minimum(evaluations)
V_nR = zeros(eltype(surrn.y[1]), l)
for i in 1:l
if abs(s_max - s_min) <= 10e-6
V_nR[i] = 1.0
else
V_nR[i] = (evaluations[i] - s_min) / (s_max - s_min)
end
end
#Compute score V_nD
V_nD = zeros(eltype(surrn.y[1]), l)
delta_n_x = zeros(eltype(surrn.x[1]), l)
delta = zeros(eltype(surrn.x[1]), n)
for j in 1:l
for i in 1:n
delta[i] = norm(new_points[j, :] - collect(surrn.x[i]))
end
delta_n_x[j] = minimum(delta)
end
delta_n_max = maximum(delta_n_x)
delta_n_min = minimum(delta_n_x)
for i in 1:l
if abs(delta_n_max - delta_n_min) <= 10e-6
V_nD[i] = 1.0
else
V_nD[i] = (delta_n_max - delta_n_x[i]) / (delta_n_max - delta_n_min)
end
end
#Compute weighted score
W_n = w_nR * V_nR + w_nD * V_nD
return new_points[argmin(W_n), :]
end
"""
surrogate_optimize(obj::Function,::DYCORS,lb::Number,ub::Number,surr1::AbstractSurrogate,sample_type::SamplingAlgorithm;maxiters=100,num_new_samples=100)
This is an implementation of the DYCORS strategy by Regis and Shoemaker:
Rommel G Regis and Christine A Shoemaker.
Combining radial basis function surrogates and dynamic coordinate search in high-dimensional expensive black-box optimization.
Engineering Optimization, 45(5): 529–555, 2013.
This is an extension of the SRBF strategy that changes how the
candidate points are generated. The main idea is that many objective
functions depend only on a few directions, so it may be advantageous to
perturb only a few directions. In particular, we use a perturbation probability
to perturb a given coordinate and decrease this probability after each function
evaluation, so fewer coordinates are perturbed later in the optimization.
"""
function surrogate_optimize(obj::Function, ::DYCORS, lb, ub, surrn::AbstractSurrogate,
sample_type::SamplingAlgorithm; maxiters = 100,
num_new_samples = 100)
x_best = collect(surrn.x[argmin(surrn.y)])
y_best = minimum(surrn.y)
sigma_n = 0.2 * norm(ub - lb)
d = length(lb)
sigma_min = 0.2 * (0.5)^6 * norm(ub - lb)
t_success = 3
t_fail = max(d, 5)
C_success = 0
C_fail = 0
for k in 1:maxiters
p_select = min(20 / d, 1) * (1 - log(k)) / log(maxiters - 1)
new_points = zeros(eltype(surrn.x[1]), num_new_samples, d)
for j in 1:num_new_samples
w = sample(d, 0, 1, sample_type)
I_perturb = w .< p_select
if ~(true in I_perturb)
val = rand(1:d)
I_perturb = vcat(zeros(Int, val - 1), 1, zeros(Int, d - val))
end
I_perturb = Int.(I_perturb)
for i in 1:d
if I_perturb[i] == 1
new_points[j, i] = x_best[i] + rand(Normal(0, sigma_n))
else
new_points[j, i] = x_best[i]
end
end
end
for i in 1:num_new_samples
for j in 1:d
while new_points[i, j] < lb[j] || new_points[i, j] > ub[j]
if new_points[i, j] > ub[j]
new_points[i, j] = max(lb[j],
maximum(surrn.x)[j] -
norm(new_points[i, j] - maximum(surrn.x)[j]))
end
if new_points[i, j] < lb[j]
new_points[i, j] = min(ub[j],
minimum(surrn.x)[j] +
norm(new_points[i] - minimum(surrn.x)[j]))
end
end
end
end
#ND version
x_new = select_evaluation_point_ND(new_points, surrn, k, maxiters)
f_new = obj(x_new)
if f_new < y_best
C_success = C_success + 1
C_fail = 0
else
C_fail = C_fail + 1
C_success = 0
end
sigma_n, C_success, C_fail = adjust_step_size(sigma_n, sigma_min, C_success,
t_success, C_fail, t_fail)
if f_new < y_best
x_best = x_new
y_best = f_new
end
add_point!(surrn, Tuple(x_new), f_new)
end
index = argmin(surrn.y)
return (surrn.x[index], surrn.y[index])
end
function obj2_1D(value, points)
min = +Inf
my_p = filter(x -> abs(x - value) > 10^-6, points)
for i in 1:length(my_p)
new_val = norm(my_p[i] - value)
if new_val < min
min = new_val
end
end
return min
end
function I_tier_ranking_1D(P, surrSOP::AbstractSurrogate)
#obj1 = objective_function
#obj2 = obj2_1D
Fronts = Dict{Int, Array{eltype(surrSOP.x[1]), 1}}()
i = 1
while true
F = []
j = 1
for p in P
n_p = 0
k = 1
for q in P
#I use equality with floats because p and q are in surrSOP.x
#for sure at this stage
p_index = j
q_index = k
val1_p = surrSOP.y[p_index]
val2_p = obj2_1D(p, P)
val1_q = surrSOP.y[q_index]
val2_q = obj2_1D(q, P)
p_dominates_q = (val1_p < val1_q || abs(val1_p - val1_q) <= 10^-5) &&
(val2_p < val2_q || abs(val2_p - val2_q) <= 10^-5) &&
((val1_p < val1_q) || (val2_p < val2_q))
q_dominates_p = (val1_p < val1_q || abs(val1_p - val1_q) < 10^-5) &&
(val2_p < val2_q || abs(val2_p - val2_q) < 10^-5) &&
((val1_p < val1_q) || (val2_p < val2_q))
if q_dominates_p
n_p += 1
end
k = k + 1
end
if n_p == 0
# no individual dominates p
push!(F, p)
end
j = j + 1
end
if length(F) > 0
Fronts[i] = F
P = setdiff(P, F)
i = i + 1
else
return Fronts
end
end
return F
end
function II_tier_ranking_1D(D::Dict, srg::AbstractSurrogate)
for i in 1:length(D)
pos = []
yn = []
for j in 1:length(D[i])
push!(pos, findall(e -> e == D[i][j], srg.x))
push!(yn, srg.y[pos[j]])
end
D[i] = D[i][sortperm(D[i])]
end
return D
end
function Hypervolume_Pareto_improving(f1_new, f2_new, Pareto_set)
if size(Pareto_set, 1) == 1
area_before = zero(eltype(f1_new))
else
my_p = Pareto_set
#Area before
v_ref = [maximum(Pareto_set[:, 1]), maximum(Pareto_set[:, 2])]
my_p = vcat(my_p, v_ref)
v = sortperm(my_p[:, 2])
my_p[:, 1] = my_p[:, 1][v]
my_p[:, 2] = my_p[:, 2][v]
area_before = zero(eltype(f1_new))
for j in 1:(length(v) - 1)
area_before += (my_p[j + 1, 2] - my_p[j, 2]) * (v_ref[1] - my_p[j])
end
end
#Area after
Pareto_set = vcat(Pareto_set, [f1_new f2_new])
v_ref = [maximum(Pareto_set[:, 1]) maximum(Pareto_set[:, 2])]
Pareto_set = vcat(Pareto_set, v_ref)
v = sortperm(Pareto_set[:, 2])
Pareto_set[:, 1] = Pareto_set[:, 1][v]
Pareto_set[:, 2] = Pareto_set[:, 2][v]
area_after = zero(eltype(f1_new))
for j in 1:(length(v) - 1)
area_after += (Pareto_set[j + 1, 2] - Pareto_set[j, 2]) * (v_ref[1] - Pareto_set[j])
end
return area_after - area_before
end
"""
surrogate_optimize(obj::Function,::SOP,lb::Number,ub::Number,surr::AbstractSurrogate,sample_type::SamplingAlgorithm;maxiters=100,num_new_samples=100)
SOP Surrogate optimization method, following closely the following papers:
- SOP: parallel surrogate global optimization with Pareto center selection for computationally expensive single objective problems by Tipaluck Krityakierne
- Multiobjective Optimization Using Evolutionary Algorithms by Kalyan Deb
#Suggested number of new_samples = min(500*d,5000)
"""
function surrogate_optimize(obj::Function, sop1::SOP, lb::Number, ub::Number,
surrSOP::AbstractSurrogate, sample_type::SamplingAlgorithm;
maxiters = 100, num_new_samples = min(500 * 1, 5000))
d = length(lb)
N_fail = 3
N_tenure = 5
tau = 10^-5
num_P = sop1.p
centers_global = surrSOP.x
r_centers_global = 0.2 * norm(ub - lb) * ones(length(surrSOP.x))
N_failures_global = zeros(length(surrSOP.x))
tabu = []
N_tenures_tabu = []
for k in 1:maxiters
N_tenures_tabu .+= 1
#deleting points that have been in tabu for too long
del = N_tenures_tabu .> N_tenure
if length(del) > 0
for i in 1:length(del)
if del[i]
del[i] = i
end
end
deleteat!(N_tenures_tabu, del)
deleteat!(tabu, del)
end
##### P CENTERS ######
C = []
#S(x) set of points already evaluated
#Rank points in S with:
#1) Non dominated sorting
Fronts_I = I_tier_ranking_1D(centers_global, surrSOP)
#2) Second tier ranking
Fronts = II_tier_ranking_1D(Fronts_I, surrSOP)
ranked_list = []
for i in 1:length(Fronts)
for j in 1:length(Fronts[i])
push!(ranked_list, Fronts[i][j])
end
end
ranked_list = eltype(surrSOP.x[1]).(ranked_list)
centers_full = 0
i = 1
while i <= length(ranked_list) && centers_full == 0
flag = 0
for j in 1:length(ranked_list)
for m in 1:length(tabu)
if abs(ranked_list[j] - tabu[m]) < tau
flag = 1
end
end
for l in 1:length(centers_global)
if abs(ranked_list[j] - centers_global[l]) < tau
flag = 1
end
end
end
if flag == 1
skip
else
push!(C, ranked_list[i])
if length(C) == num_P
centers_full = 1
end
end
i = i + 1
end
#I examined all the points in the ranked list but num_selected < num_p
#I just iterate again using only radius rule
if length(C) < num_P
i = 1
while i <= length(ranked_list) && centers_full == 0
flag = 0
for j in 1:length(ranked_list)
for m in 1:length(centers_global)
if abs(centers_global[j] - ranked_list[m]) < tau
flag = 1
end
end
end
if flag == 1
skip
else
push!(C, ranked_list[i])
if length(C) == num_P
centers_full = 1
end
end
i = i + 1
end
end
#If I still have num_selected < num_P, I double down on some centers iteratively
if length(C) < num_P
i = 1
while i <= length(ranked_list)
push!(C, ranked_list[i])
if length(C) == num_P
centers_full = 1
end
i = i + 1
end
end
#Here I have selected C = [] containing the centers
r_centers = 0.2 * norm(ub - lb) * ones(num_P)
N_failures = zeros(num_P)
#2.3 Candidate search
new_points = zeros(eltype(surrSOP.x[1]), num_P, 2)
for i in 1:num_P
N_candidates = zeros(eltype(surrSOP.x[1]), num_new_samples)
#Using phi(n) just like DYCORS, merit function = surrogate
#Like in DYCORS, I_perturb = 1 always
evaluations = zeros(eltype(surrSOP.y[1]), num_new_samples)
for j in 1:num_new_samples
a = lb - C[i]
b = ub - C[i]
N_candidates[j] = C[i] + rand(truncated(Normal(0, r_centers[i]), a, b))
evaluations[j] = surrSOP(N_candidates[j])
end
x_best = N_candidates[argmin(evaluations)]
y_best = minimum(evaluations)
new_points[i, 1] = x_best
new_points[i, 2] = y_best
end
#new_points[i] now contains:
#[x_1,y_1; x_2,y_2,...,x_{num_new_samples},y_{num_new_samples}]
#2.4 Adaptive learning and tabu archive
for i in 1:num_P
if new_points[i, 1] in centers_global
r_centers[i] = r_centers_global[i]
N_failures[i] = N_failures_global[i]
end
f_1 = obj(new_points[i, 1])
f_2 = obj2_1D(f_1, surrSOP.x)
l = length(Fronts[1])
Pareto_set = zeros(eltype(surrSOP.x[1]), l, 2)
for j in 1:l
val = obj2_1D(Fronts[1][j], surrSOP.x)
Pareto_set[j, 1] = obj(Fronts[1][j])
Pareto_set[j, 2] = val
end
if (Hypervolume_Pareto_improving(f_1, f_2, Pareto_set) < tau)
#failure
r_centers[i] = r_centers[i] / 2
N_failures[i] += 1
if N_failures[i] > N_fail
push!(tabu, C[i])
push!(N_tenures_tabu, 0)
end
else
#P_i is success
#Adaptive_learning
add_point!(surrSOP, new_points[i, 1], new_points[i, 2])
push!(r_centers_global, r_centers[i])
push!(N_failures_global, N_failures[i])
end
end
end
index = argmin(surrSOP.y)
return (surrSOP.x[index], surrSOP.y[index])
end
function obj2_ND(value, points)
min = +Inf
my_p = filter(x -> norm(x .- value) > 10^-6, points)
for i in 1:length(my_p)
new_val = norm(my_p[i] .- value)
if new_val < min
min = new_val
end
end
return min
end
function I_tier_ranking_ND(P, surrSOPD::AbstractSurrogate)
#obj1 = objective_function
#obj2 = obj2_1D
Fronts = Dict{Int, Array{eltype(surrSOPD.x), 1}}()
i = 1
while true
F = Array{eltype(surrSOPD.x), 1}()
j = 1
for p in P
n_p = 0
k = 1
for q in P
#I use equality with floats because p and q are in surrSOP.x
#for sure at this stage
p_index = j
q_index = k
val1_p = surrSOPD.y[p_index]
val2_p = obj2_ND(p, P)
val1_q = surrSOPD.y[q_index]
val2_q = obj2_ND(q, P)
p_dominates_q = (val1_p < val1_q || abs(val1_p - val1_q) <= 10^-5) &&
(val2_p < val2_q || abs(val2_p - val2_q) <= 10^-5) &&
((val1_p < val1_q) || (val2_p < val2_q))
q_dominates_p = (val1_p < val1_q || abs(val1_p - val1_q) < 10^-5) &&
(val2_p < val2_q || abs(val2_p - val2_q) < 10^-5) &&
((val1_p < val1_q) || (val2_p < val2_q))
if q_dominates_p
n_p += 1
end
k = k + 1
end
if n_p == 0
# no individual dominates p
push!(F, p)
end
j = j + 1
end
if length(F) > 0
Fronts[i] = F
P = setdiff(P, F)
i = i + 1
else
return Fronts
end
end
return F
end
function II_tier_ranking_ND(D::Dict, srgD::AbstractSurrogate)
for i in 1:length(D)
pos = []
yn = []
for j in 1:length(D[i])
push!(pos, findall(e -> e == D[i][j], srgD.x))
push!(yn, srgD.y[pos[j]])
end
D[i] = D[i][sortperm(D[i])]
end
return D
end
function surrogate_optimize(obj::Function, sopd::SOP, lb, ub, surrSOPD::AbstractSurrogate,
sample_type::SamplingAlgorithm; maxiters = 100,
num_new_samples = min(500 * length(lb), 5000))
d = length(lb)
N_fail = 3
N_tenure = 5
tau = 10^-5
num_P = sopd.p
centers_global = surrSOPD.x
r_centers_global = 0.2 * norm(ub .- lb) * ones(length(surrSOPD.x))
N_failures_global = zeros(length(surrSOPD.x))
tabu = []
N_tenures_tabu = []
for k in 1:maxiters
N_tenures_tabu .+= 1
#deleting points that have been in tabu for too long
del = N_tenures_tabu .> N_tenure
if length(del) > 0
for i in 1:length(del)
if del[i]
del[i] = i
end
end
deleteat!(N_tenures_tabu, del)
deleteat!(tabu, del)
end
##### P CENTERS ######
C = Array{eltype(surrSOPD.x), 1}()
#S(x) set of points already evaluated
#Rank points in S with:
#1) Non dominated sorting
Fronts_I = I_tier_ranking_ND(centers_global, surrSOPD)
#2) Second tier ranking
Fronts = II_tier_ranking_ND(Fronts_I, surrSOPD)
ranked_list = Array{eltype(surrSOPD.x), 1}()
for i in 1:length(Fronts)
for j in 1:length(Fronts[i])
push!(ranked_list, Fronts[i][j])
end
end
centers_full = 0
i = 1
while i <= length(ranked_list) && centers_full == 0
flag = 0
for j in 1:length(ranked_list)
for m in 1:length(tabu)
if norm(ranked_list[j] .- tabu[m]) < tau
flag = 1
end
end
for l in 1:length(centers_global)
if norm(ranked_list[j] .- centers_global[l]) < tau
flag = 1
end
end
end
if flag == 1
skip
else
push!(C, ranked_list[i])
if length(C) == num_P
centers_full = 1
end
end
i = i + 1
end
#I examined all the points in the ranked list but num_selected < num_p
#I just iterate again using only radius rule
if length(C) < num_P
i = 1
while i <= length(ranked_list) && centers_full == 0
flag = 0
for j in 1:length(ranked_list)
for m in 1:length(centers_global)
if norm(centers_global[j] .- ranked_list[m]) < tau
flag = 1
end
end
end
if flag == 1
skip
else
push!(C, ranked_list[i])
if length(C) == num_P
centers_full = 1
end
end
i = i + 1
end
end
#If I still have num_selected < num_P, I double down on some centers iteratively
if length(C) < num_P
i = 1
while i <= length(ranked_list)
push!(C, ranked_list[i])
if length(C) == num_P
centers_full = 1
end
i = i + 1
end
end
#Here I have selected C = [(1.0,2.0),(3.0,4.0),.....] containing the centers
r_centers = 0.2 * norm(ub .- lb) * ones(num_P)
N_failures = zeros(num_P)
#2.3 Candidate search
new_points_x = Array{eltype(surrSOPD.x), 1}()
new_points_y = zeros(eltype(surrSOPD.y[1]), num_P)
for i in 1:num_P
N_candidates = zeros(eltype(surrSOPD.x[1]), num_new_samples, d)
#Using phi(n) just like DYCORS, merit function = surrogate
#Like in DYCORS, I_perturb = 1 always
evaluations = zeros(eltype(surrSOPD.y[1]), num_new_samples)
for j in 1:num_new_samples
for k in 1:d
a = lb[k] - C[i][k]
b = ub[k] - C[i][k]
N_candidates[j, k] = C[i][k] +
rand(truncated(Normal(0, r_centers[i]), a, b))
end
evaluations[j] = surrSOPD(Tuple(N_candidates[j, :]))
end
x_best = Tuple(N_candidates[argmin(evaluations), :])
y_best = minimum(evaluations)
push!(new_points_x, x_best)
new_points_y[i] = y_best
end
#new_points[i] is split in new_points_x and new_points_y now contains:
#[x_1,y_1; x_2,y_2,...,x_{num_new_samples},y_{num_new_samples}]
#2.4 Adaptive learning and tabu archive
for i in 1:num_P
if new_points_x[i] in centers_global
r_centers[i] = r_centers_global[i]
N_failures[i] = N_failures_global[i]
end
f_1 = obj(Tuple(new_points_x[i]))
f_2 = obj2_ND(f_1, surrSOPD.x)
l = length(Fronts[1])
Pareto_set = zeros(eltype(surrSOPD.x[1]), l, 2)
for j in 1:l
val = obj2_ND(Fronts[1][j], surrSOPD.x)
Pareto_set[j, 1] = obj(Tuple(Fronts[1][j]))
Pareto_set[j, 2] = val
end
if (Hypervolume_Pareto_improving(f_1, f_2, Pareto_set) < tau)#check this
#failure
r_centers[i] = r_centers[i] / 2
N_failures[i] += 1
if N_failures[i] > N_fail
push!(tabu, C[i])
push!(N_tenures_tabu, 0)
end
else
#P_i is success
#Adaptive_learning
add_point!(surrSOPD, new_points_x[i], new_points_y[i])
push!(r_centers_global, r_centers[i])
push!(N_failures_global, N_failures[i])
end
end
end
index = argmin(surrSOPD.y)
return (surrSOPD.x[index], surrSOPD.y[index])
end
#EGO
_dominates(x, y) = all(x .<= y) && any(x .< y)
function _nonDominatedSorting(arr::Array{Float64, 2})
fronts::Array{Array, 1} = Array[]
ind::Array{Int64, 1} = collect(1:size(arr, 1))
while !isempty(arr)
s = size(arr, 1)
red = dropdims(
sum([_dominates(arr[i, :], arr[j, :]) for i in 1:s, j in 1:s],
dims = 1) .== 0,
dims = 1)
a = 1:s
sel::Array{Int64, 1} = a[red]
push!(fronts, ind[sel])
da::Array{Int64, 1} = deleteat!(collect(1:s), sel)
ind = deleteat!(ind, sel)
arr = arr[da, :]
end
return fronts
end
function surrogate_optimize(obj::Function, sbm::SMB, lb::Number, ub::Number,
surrSMB::AbstractSurrogate, sample_type::SamplingAlgorithm;
maxiters = 100, n_new_look = 1000)
#obj contains a function for each output dimension
dim_out = length(surrSMB.y[1])
d = 1
x_to_look = sample(n_new_look, lb, ub, sample_type)
for iter in 1:maxiters
index_min = 0
min_mean = +Inf
for i in 1:n_new_look
new_mean = sum(obj(x_to_look[i])) / dim_out
if new_mean < min_mean
min_mean = new_mean
index_min = i
end
end
x_new = x_to_look[index_min]
deleteat!(x_to_look, index_min)
n_new_look = n_new_look - 1
# evaluate the true function at that point
y_new = obj(x_new)
#update the surrogate
add_point!(surrSMB, x_new, y_new)
end
#Find and return Pareto
y = surrSMB.y
y = permutedims(reshape(hcat(y...), (length(y[1]), length(y)))) #2d matrix
Fronts = _nonDominatedSorting(y) #this returns the indexes
pareto_front_index = Fronts[1]
pareto_set = []
pareto_front = []
for i in 1:length(pareto_front_index)
push!(pareto_set, surrSMB.x[pareto_front_index[i]])
push!(pareto_front, surrSMB.y[pareto_front_index[i]])
end
return pareto_set, pareto_front
end
function surrogate_optimize(obj::Function, smb::SMB, lb, ub, surrSMBND::AbstractSurrogate,
sample_type::SamplingAlgorithm; maxiters = 100,
n_new_look = 1000)
#obj contains a function for each output dimension
dim_out = length(surrSMBND.y[1])
d = length(lb)
x_to_look = sample(n_new_look, lb, ub, sample_type)
for iter in 1:maxiters
index_min = 0
min_mean = +Inf
for i in 1:n_new_look
new_mean = sum(obj(x_to_look[i])) / dim_out
if new_mean < min_mean
min_mean = new_mean
index_min = i
end
end
x_new = x_to_look[index_min]
deleteat!(x_to_look, index_min)
n_new_look = n_new_look - 1
# evaluate the true function at that point
y_new = obj(x_new)
#update the surrogate
add_point!(surrSMBND, x_new, y_new)
end
#Find and return Pareto
y = surrSMBND.y
y = permutedims(reshape(hcat(y...), (length(y[1]), length(y)))) #2d matrix
Fronts = _nonDominatedSorting(y) #this returns the indexes
pareto_front_index = Fronts[1]
pareto_set = []
pareto_front = []
for i in 1:length(pareto_front_index)
push!(pareto_set, surrSMBND.x[pareto_front_index[i]])
push!(pareto_front, surrSMBND.y[pareto_front_index[i]])
end
return pareto_set, pareto_front
end
# RTEA (Noisy model based multi objective optimization + standard rtea by fieldsen), use this for very noisy objective functions because there are a lot of re-evaluations
function surrogate_optimize(obj, rtea::RTEA, lb::Number, ub::Number,
surrRTEA::AbstractSurrogate, sample_type::SamplingAlgorithm;
maxiters = 100, n_new_look = 1000)
Z = rtea.z
K = rtea.k
p_cross = rtea.p
n_c = rtea.n_c
sigma = rtea.sigma
#find pareto set of the first evaluations: (estimated pareto)
y = surrRTEA.y
y = permutedims(reshape(hcat(y...), (length(y[1]), length(y)))) #2d matrix
Fronts = _nonDominatedSorting(y) #this returns the indexes
pareto_front_index = Fronts[1]
pareto_set = []
pareto_front = []
for i in 1:length(pareto_front_index)
push!(pareto_set, surrRTEA.x[pareto_front_index[i]])
push!(pareto_front, surrRTEA.y[pareto_front_index[i]])
end
number_of_revaluations = zeros(Int, length(pareto_set))
iter = 1
d = 1
dim_out = length(surrRTEA.y[1])
while iter < maxiters
if iter < (1 - Z) * maxiters
#1) propose new point x_new
#sample randomly from (estimated) pareto v and u
if length(pareto_set) < 2
throw("Starting pareto set is too small, increase number of sampling point of the surrogate")
end
u = pareto_set[rand(1:length(pareto_set))]
v = pareto_set[rand(1:length(pareto_set))]
#children
if rand() < p_cross
mu = rand()
if mu <= 0.5
beta = (2 * mu)^(1 / n_c + 1)
else
beta = (1 / (2 * (1 - mu)))^(1 / n_c + 1)
end
x = 0.5 * ((1 + beta) * v + (1 - beta) * u)
else
x = v
end
#mutation
x_new = x + rand(Normal(0, sigma))
y_new = obj(x_new)
#update pareto
new_to_pareto = false
for i in 1:length(pareto_set)
counter = zeros(Int, dim_out)
#compare the y_new values to pareto, if there is at least one entry where it dominates all the others, then it can be in pareto
for l in 1:dim_out
if y_new[l] < pareto_front[i][l]
counter[l]
end
end
end
for j in 1:dim_out
if counter[j] == dim_out
new_to_pareto = true
end
end
if new_to_pareto == true
push!(pareto_set, x_new)
push!(pareto_front, y_new)
push!(number_of_revaluations, 0)
end
add_point!(surrRTEA, new_x, new_y)
end
for k in 1:K
val, pos = findmin(number_of_revaluations)
x_r = pareto_set[pos]
y_r = obj(x_r)
number_of_revaluations[pos] = number_of_revaluations + 1
#check if it is again in the pareto set or not, if not eliminate it from pareto
still_in_pareto = false
for i in 1:length(pareto_set)
counter = zeros(Int, dim_out)
for l in 1:dim_out
if y_r[l] < pareto_front[i][l]
counter[l]
end
end
end
for j in 1:dim_out
if counter[j] == dim_out
still_in_pareto = true
end
end
if still_in_pareto == false
#remove from pareto
deleteat!(pareto_set, pos)
deleteat!(pareto_front, pos)
deleteat!(number_of_revaluationsm, pos)
end
end
iter = iter + 1
end
return pareto_set, pareto_front
end
function surrogate_optimize(obj, rtea::RTEA, lb, ub, surrRTEAND::AbstractSurrogate,
sample_type::SamplingAlgorithm; maxiters = 100,
n_new_look = 1000)
Z = rtea.z
K = rtea.k
p_cross = rtea.p
n_c = rtea.n_c
sigma = rtea.sigma
#find pareto set of the first evaluations: (estimated pareto)
y = surrRTEAND.y
y = permutedims(reshape(hcat(y...), (length(y[1]), length(y)))) #2d matrix
Fronts = _nonDominatedSorting(y) #this returns the indexes
pareto_front_index = Fronts[1]
pareto_set = []
pareto_front = []
for i in 1:length(pareto_front_index)
push!(pareto_set, surrRTEAND.x[pareto_front_index[i]])
push!(pareto_front, surrRTEAND.y[pareto_front_index[i]])
end
number_of_revaluations = zeros(Int, length(pareto_set))
iter = 1
d = length(lb)
dim_out = length(surrRTEAND.y[1])
while iter < maxiters
if iter < (1 - Z) * maxiters
#sample pareto_set
if length(pareto_set) < 2
throw("Starting pareto set is too small, increase number of sampling point of the surrogate")
end
u = pareto_set[rand(1:length(pareto_set))]
v = pareto_set[rand(1:length(pareto_set))]
#children
if rand() < p_cross
mu = rand()
if mu <= 0.5
beta = (2 * mu)^(1 / n_c + 1)
else
beta = (1 / (2 * (1 - mu)))^(1 / n_c + 1)
end
x = 0.5 * ((1 + beta) * v + (1 - beta) * u)
else
x = v
end
#mutation
for i in 1:d
x_new[i] = x[i] + rand(Normal(0, sigma))
end
y_new = obj(x_new)
#update pareto
new_to_pareto = false
for i in 1:length(pareto_set)
counter = zeros(Int, dim_out)
#compare the y_new values to pareto, if there is at least one entry where it dominates all the others, then it can be in pareto
for l in 1:dim_out
if y_new[l] < pareto_front[i][l]
counter[l]
end
end
end
for j in 1:dim_out
if counter[j] == dim_out
new_to_pareto = true
end
end
if new_to_pareto == true
push!(pareto_set, x_new)
push!(pareto_front, y_new)
push!(number_of_revaluations, 0)
end
add_point!(surrRTEAND, new_x, new_y)
end
for k in 1:K
val, pos = findmin(number_of_revaluations)
x_r = pareto_set[pos]
y_r = obj(x_r)
number_of_revaluations[pos] = number_of_revaluations + 1
#check if it is again in the pareto set or not, if not eliminate it from pareto
still_in_pareto = false
for i in 1:length(pareto_set)
counter = zeros(Int, dim_out)
for l in 1:dim_out
if y_r[l] < pareto_front[i][l]
counter[l]
end
end
end
for j in 1:dim_out
if counter[j] == dim_out
still_in_pareto = true
end
end
if still_in_pareto == false
#remove from pareto
deleteat!(pareto_set, pos)
deleteat!(pareto_front, pos)
deleteat!(number_of_revaluationsm, pos)
end
end
iter = iter + 1
end
return pareto_set, pareto_front
end
function surrogate_optimize(
obj::Function, ::EI, lb::AbstractArray, ub::AbstractArray, krig,
sample_type::SectionSample;
maxiters = 100, num_new_samples = 100)
dtol = 1e-3 * norm(ub - lb)
eps = 0.01
for i in 1:maxiters
d = length(krig.x)
# Sample lots of points from the design space -- we will evaluate the EI function at these points
new_sample = sample(num_new_samples, lb, ub, sample_type)
# Find the best point so far
f_min = minimum(krig.y)
# Allocate some arrays
evaluations = zeros(eltype(krig.x[1]), num_new_samples) # Holds EI function evaluations
point_found = false # Whether we have found a new point to test
new_x_max = zero(eltype(krig.x[1])) # New x point
new_EI_max = zero(eltype(krig.x[1])) # EI at new x point
diff_x = zeros(eltype(krig.x[1]), d)
# For each point in the sample set, evaluate the Expected Improvement function
while point_found == false
for j in 1:length(new_sample)
std = std_error_at_point(krig, new_sample[j])
u = krig(new_sample[j])
if abs(std) > 1e-6
z = (f_min - u - eps) / std
else
z = 0
end
# Evaluate EI at point new_sample[j]
evaluations[j] = (f_min - u - eps) * cdf(Normal(), z) +
std * pdf(Normal(), z)
end
# find the sample which maximizes the EI function
index_max = argmax(evaluations)
x_new = new_sample[index_max] # x point which maximized EI
EI_new = maximum(evaluations) # EI at the new point
for l in 1:d
diff_x[l] = norm(krig.x[l] .- x_new)
end
bit_x = diff_x .> dtol
#new_min_x has to have some distance from krig.x
if false in bit_x
#The new_point is not actually that new, discard it!
deleteat!(evaluations, index_max)
deleteat!(new_sample, index_max)
if length(new_sample) == 0
println("Out of sampling points.")
return section_sampler_returner(sample_type, krig.x, krig.y, lb, ub,
krig)
end
else
point_found = true
new_x_max = x_new
new_EI_max = EI_new
end
end
# if the EI is less than some tolerance times the difference between the maximum and minimum points
# in the surrogate, then we terminate the optimizer.
if new_EI_max < 1e-6 * norm(maximum(krig.y) - minimum(krig.y))
println("Termination tolerance reached.")
return section_sampler_returner(sample_type, krig.x, krig.y, lb, ub, krig)
end
add_point!(krig, Tuple(new_x_max), obj(new_x_max))
end
println("Completed maximum number of iterations.")
end
function section_sampler_returner(sample_type::SectionSample, surrn_x, surrn_y,
lb, ub, surrn)
d_fixed = fixed_dimensions(sample_type)
@assert length(surrn_y) == size(surrn_x)[1]
surrn_xy = [(surrn_x[y], surrn_y[y]) for y in 1:length(surrn_y)]
section_surr1_xy = filter(xyz -> xyz[1][d_fixed] == Tuple(sample_type.x0[d_fixed]),
surrn_xy)
section_surr1_x = [xy[1] for xy in section_surr1_xy]
section_surr1_y = [xy[2] for xy in section_surr1_xy]
if length(section_surr1_xy) == 0
@debug "No new point added - surrogate locally stable"
N_NEW_POINTS = 100
section_surr1_x = sample(N_NEW_POINTS, lb, ub, sample_type)
section_surr1_y = zeros(N_NEW_POINTS)
for i in 1:size(section_surr1_x, 1)
xi = Tuple([section_surr1_x[i, :]...])[1]
section_surr1_y[i] = surrn(xi)
end
end
index = argmin(section_surr1_y)
return (section_surr1_x[index, :][1], section_surr1_y[index])
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 3291 | using PolyChaos
mutable struct PolynomialChaosSurrogate{X, Y, L, U, C, O, N} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
coeff::C
ortopolys::O
num_of_multi_indexes::N
end
function _calculatepce_coeff(x, y, num_of_multi_indexes, op::AbstractCanonicalOrthoPoly)
n = length(x)
A = zeros(eltype(x), n, num_of_multi_indexes)
for i in 1:n
A[i, :] = PolyChaos.evaluate(x[i], op)
end
return (A' * A) \ (A' * y)
end
function PolynomialChaosSurrogate(x, y, lb::Number, ub::Number;
op::AbstractCanonicalOrthoPoly = GaussOrthoPoly(2))
n = length(x)
poly_degree = op.deg
num_of_multi_indexes = 1 + poly_degree
if n < 2 + 3 * num_of_multi_indexes
throw("To avoid numerical problems, it's strongly suggested to have at least $(2+3*num_of_multi_indexes) samples")
end
coeff = _calculatepce_coeff(x, y, num_of_multi_indexes, op)
return PolynomialChaosSurrogate(x, y, lb, ub, coeff, op, num_of_multi_indexes)
end
function (pc::PolynomialChaosSurrogate)(val::Number)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(pc, val)
return sum([pc.coeff[i] * PolyChaos.evaluate(val, pc.ortopolys)[i]
for i in 1:(pc.num_of_multi_indexes)])
end
function _calculatepce_coeff(x, y, num_of_multi_indexes, op::MultiOrthoPoly)
n = length(x)
d = length(x[1])
A = zeros(eltype(x[1]), n, num_of_multi_indexes)
for i in 1:n
xi = zeros(eltype(x[1]), d)
for j in 1:d
xi[j] = x[i][j]
end
A[i, :] = PolyChaos.evaluate(xi, op)
end
return (A' * A) \ (A' * y)
end
function PolynomialChaosSurrogate(x, y, lb, ub;
op::MultiOrthoPoly = MultiOrthoPoly([GaussOrthoPoly(2)
for j in 1:length(lb)],
2))
n = length(x)
d = length(lb)
poly_degree = op.deg
num_of_multi_indexes = binomial(d + poly_degree, poly_degree)
if n < 2 + 3 * num_of_multi_indexes
throw("To avoid numerical problems, it's strongly suggested to have at least $(2+3*num_of_multi_indexes) samples")
end
coeff = _calculatepce_coeff(x, y, num_of_multi_indexes, op)
return PolynomialChaosSurrogate(x, y, lb, ub, coeff, op, num_of_multi_indexes)
end
function (pcND::PolynomialChaosSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(pcND, val)
sum = zero(eltype(val[1]))
for i in 1:(pcND.num_of_multi_indexes)
sum = sum +
pcND.coeff[i] *
first(PolyChaos.evaluate(pcND.ortopolys.ind[i, :], collect(val),
pcND.ortopolys))
end
return sum
end
function add_point!(polych::PolynomialChaosSurrogate, x_new, y_new)
if length(polych.lb) == 1
#1D
polych.x = vcat(polych.x, x_new)
polych.y = vcat(polych.y, y_new)
polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes,
polych.ortopolys)
else
polych.x = vcat(polych.x, x_new)
polych.y = vcat(polych.y, y_new)
polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes,
polych.ortopolys)
end
nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 7259 | using LinearAlgebra
using ExtendableSparse
_copy(t::Tuple) = t
_copy(t) = copy(t)
mutable struct RadialBasis{F, Q, X, Y, L, U, C, S, D} <: AbstractSurrogate
phi::F
dim_poly::Q
x::X
y::Y
lb::L
ub::U
coeff::C
scale_factor::S
sparse::D
end
mutable struct RadialFunction{Q, P}
q::Q # degree of polynomial
phi::P
end
linearRadial() = RadialFunction(0, z -> norm(z))
cubicRadial() = RadialFunction(1, z -> norm(z)^3)
multiquadricRadial(c = 1.0) = RadialFunction(1, z -> sqrt((c * norm(z))^2 + 1))
thinplateRadial() = RadialFunction(2, z -> begin
result = norm(z)^2 * log(norm(z))
ifelse(iszero(z), zero(result), result)
end)
"""
RadialBasis(x,y,lb,ub,rad::RadialFunction, scale_factor::Float = 1.0)
Constructor for RadialBasis surrogate, of the form
``f(x) = \\sum_{i=1}^{N} w_i \\phi(|x - \\bold{c}_i|) \\bold{v}^{T} + \\bold{v}^{\\mathrm{T}} [ 0; \\bold{x} ]``
where ``w_i`` are the weights of polyharmonic splines ``\\phi(x)`` and ``\\bold{v}`` are coefficients
of a polynomial term.
References:
https://en.wikipedia.org/wiki/Polyharmonic_spline
"""
function RadialBasis(x, y, lb, ub; rad::RadialFunction = linearRadial(),
scale_factor::Real = 0.5, sparse = false)
q = rad.q
phi = rad.phi
coeff = _calc_coeffs(x, y, lb, ub, phi, q, scale_factor, sparse)
return RadialBasis(phi, q, x, y, lb, ub, coeff, scale_factor, sparse)
end
function _calc_coeffs(x, y, lb, ub, phi, q, scale_factor, sparse)
nd = length(first(x))
num_poly_terms = binomial(q + nd, q)
D = _construct_rbf_interp_matrix(x, first(x), lb, ub, phi, q, scale_factor, sparse)
Y = _construct_rbf_y_matrix(y, first(y), length(y) + num_poly_terms)
if (typeof(y) == Vector{Float64}) #single output case
coeff = _copy(transpose(D \ y))
else
coeff = _copy(transpose(D \ Y[1:size(D)[1], :])) #if y is multi output;
end
return coeff
end
function _construct_rbf_interp_matrix(x, x_el::Number, lb, ub, phi, q, scale_factor, sparse)
n = length(x)
if sparse
D = ExtendableSparseMatrix{eltype(x_el), Int}(n, n)
else
D = zeros(eltype(x_el), n, n)
end
@inbounds for i in 1:n
for j in i:n
D[i, j] = phi((x[i] .- x[j]) ./ scale_factor)
end
end
D_sym = Symmetric(D, :U)
return D_sym
end
function _construct_rbf_interp_matrix(x, x_el, lb, ub, phi, q, scale_factor, sparse)
n = length(x)
nd = length(x_el)
if sparse
D = ExtendableSparseMatrix{eltype(x_el), Int}(n, n)
else
D = zeros(eltype(x_el), n, n)
end
@inbounds for i in 1:n
for j in i:n
D[i, j] = phi((x[i] .- x[j]) ./ scale_factor)
end
end
D_sym = Symmetric(D, :U)
return D_sym
end
function _construct_rbf_y_matrix(y, y_el::Number, m)
[i <= length(y) ? y[i] : zero(y_el) for i in 1:m]
end
function _construct_rbf_y_matrix(y, y_el, m)
[i <= length(y) ? y[i][j] : zero(first(y_el)) for i in 1:m, j in 1:length(y_el)]
end
using Zygote: Buffer
using ChainRulesCore: @non_differentiable
function _make_combination(n, d, ix)
exponents_combinations = [e
for e
in collect(Iterators.product(Iterators.repeated(0:n,
d)...))[:]
if sum(e) <= n]
return exponents_combinations[ix + 1]
end
# TODO: Is this correct? Do we ever want to differentiate w.r.t n, d, or ix?
# By using @non_differentiable we force the gradient to be 1 for n, d, ix
@non_differentiable _make_combination(n, d, ix)
"""
multivar_poly_basis(x, ix, d, n)
Evaluates in `x` the `ix`-th element of the multivariate polynomial basis of maximum
degree `n` and `d` dimensions.
Time complexity: `(n+1)^d.`
# Example
For n=2, d=2 the multivariate polynomial basis is
````
1,
x,y
x^2,y^2,xy
````
Therefore the 3rd (ix=3) element is `y` .
Therefore when x=(13,43) and ix=3 this function will return 43.
"""
function multivar_poly_basis(x, ix, d, n)
if n == 0
return one(eltype(x))
else
prod(a^d
for (a, d)
in zip(x, _make_combination(n, d, ix)))
end
end
"""
Calculates current estimate of value 'val' with respect to the RadialBasis object.
"""
function (rad::RadialBasis)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(rad, val)
approx = _approx_rbf(val, rad)
return _match_container(approx, first(rad.y))
end
function _approx_rbf(val::Number, rad::RadialBasis)
n = length(rad.x)
approx = zero(rad.coeff[:, 1])
for i in 1:n
approx += rad.coeff[:, i] * rad.phi((val .- rad.x[i]) / rad.scale_factor)
end
return approx
end
function _make_approx(val, rad::RadialBasis)
l = size(rad.coeff, 1)
return Buffer(zeros(eltype(val), l), false)
end
function _add_tmp_to_approx!(approx, i, tmp, rad::RadialBasis; f = identity)
@inbounds @simd ivdep for j in 1:size(rad.coeff, 1)
approx[j] += rad.coeff[j, i] * f(tmp)
end
end
# specialise when only single output dimension
function _make_approx(val,
::RadialBasis{F, Q, X, <:AbstractArray{<:Number}}) where {F, Q, X}
return Ref(zero(eltype(val)))
end
function _add_tmp_to_approx!(approx::Base.RefValue, i, tmp,
rad::RadialBasis{F, Q, X, <:AbstractArray{<:Number}};
f = identity) where {F, Q, X}
@inbounds @simd ivdep for j in 1:size(rad.coeff, 1)
approx[] += rad.coeff[j, i] * f(tmp)
end
end
_ret_copy(v::Base.RefValue) = v[]
_ret_copy(v) = copy(v)
function _approx_rbf(val, rad::RadialBasis)
n = length(rad.x)
# make sure @inbounds is safe
if n > size(rad.coeff, 2)
throw("Length of model's x vector exceeds number of calculated coefficients ($n != $(size(rad.coeff, 2))).")
end
approx = _make_approx(val, rad)
if rad.phi === linearRadial().phi
for i in 1:n
tmp = zero(eltype(val))
@inbounds @simd ivdep for j in 1:length(val)
tmp += ((val[j] - rad.x[i][j]) / rad.scale_factor)^2
end
tmp = sqrt(tmp)
_add_tmp_to_approx!(approx, i, tmp, rad)
end
else
tmp = collect(val)
@inbounds for i in 1:n
tmp = (val .- rad.x[i]) ./ rad.scale_factor
_add_tmp_to_approx!(approx, i, tmp, rad; f = rad.phi)
end
end
return _ret_copy(approx)
end
_scaled_chebyshev(x, k, lb, ub) = cos(k * acos(-1 + 2 * (x - lb) / (ub - lb)))
_center_bounds(x::Tuple, lb, ub) = ntuple(i -> (ub[i] - lb[i]) / 2, length(x))
_center_bounds(x, lb, ub) = (ub .- lb) ./ 2
"""
add_point!(rad::RadialBasis,new_x,new_y)
Add new samples x and y and update the coefficients. Return the new object radial.
"""
function add_point!(rad::RadialBasis, new_x, new_y)
if (length(new_x) == 1 && length(new_x[1]) == 1) ||
(length(new_x) > 1 && length(new_x[1]) == 1 && length(rad.lb) > 1)
push!(rad.x, new_x)
push!(rad.y, new_y)
else
append!(rad.x, new_x)
append!(rad.y, new_y)
end
rad.coeff = _calc_coeffs(rad.x, rad.y, rad.lb, rad.ub, rad.phi, rad.dim_poly,
rad.scale_factor, rad.sparse)
nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 4579 | using QuasiMonteCarlo
using QuasiMonteCarlo: SamplingAlgorithm
# We need to convert the matrix that QuasiMonteCarlo produces into a vector of Tuples like Surrogates expects
# This will eventually be removed once we refactor the rest of the code to work with d x n matrices instead
# of vectors of Tuples
function sample(args...; kwargs...)
s = QuasiMonteCarlo.sample(args...; kwargs...)
if isone(size(s, 1))
# 1D case: s is a Vector
return vec(s)
else
# ND case: s is a d x n matrix, where d is the dimension and n is the number of samples
return collect(reinterpret(reshape, NTuple{size(s, 1), eltype(s)}, s))
end
end
#### SectionSample ####
"""
SectionSample{T}(x0, sa)
`SectionSample(x0, sampler)` where `sampler` is any sampler above and `x0` is a vector of either `NaN` for a free dimension or some scalar for a constrained dimension.
"""
struct SectionSample{
R <: Real,
I <: Integer,
VR <: AbstractVector{R},
VI <: AbstractVector{I}
} <: SamplingAlgorithm
x0::VR
sa::SamplingAlgorithm
fixed_dims::VI
end
fixed_dimensions(section_sampler::SectionSample)::Vector{Int64} = findall(x -> x == false,
isnan.(section_sampler.x0))
free_dimensions(section_sampler::SectionSample)::Vector{Int64} = findall(x -> x == true,
isnan.(section_sampler.x0))
"""
sample(n,lb,ub,K::SectionSample)
Returns Tuples constrained to a section.
In surrogate-based identification and control, optimization can alternate between unconstrained sampling in the full-dimensional parameter space, and sampling constrained on specific sections (e.g. a planes in a 3D volume),
A SectionSample allows sampling and optimizing on a subset of 'free' dimensions while keeping 'fixed' ones constrained.
The sampler is defined as in e.g.
`section_sampler_y_is_10 = SectionSample([NaN64, NaN64, 10.0, 10.0], UniformSample())`
where the first argument is a Vector{T} in which numbers are fixed coordinates and `NaN`s correspond to free dimensions, and the second argument is a SamplingAlgorithm which is used to sample in the free dimensions.
"""
function sample(n::Integer,
lb::T,
ub::T,
section_sampler::SectionSample) where {
T <: Union{Base.AbstractVecOrTuple, Number}}
@assert n>0 ZERO_SAMPLES_MESSAGE
QuasiMonteCarlo._check_sequence(lb, ub, length(lb))
if lb isa Number
if isnan(section_sampler.x0[1])
return vec(sample(n, lb, ub, section_sampler.sa))
else
return fill(section_sampler.x0[1], n)
end
else
d_free = free_dimensions(section_sampler)
@info d_free
new_samples = QuasiMonteCarlo.sample(n, lb[d_free], ub[d_free], section_sampler.sa)
out_as_vec = collect(repeat(section_sampler.x0', n, 1)')
for y in 1:size(out_as_vec, 2)
for (xi, d) in enumerate(d_free)
out_as_vec[d, y] = new_samples[xi, y]
end
end
return isone(size(out_as_vec, 1)) ? vec(out_as_vec) :
collect(reinterpret(reshape,
NTuple{size(out_as_vec, 1), eltype(out_as_vec)},
out_as_vec))
end
end
function SectionSample(x0::AbstractVector, sa::SamplingAlgorithm)
SectionSample(x0, sa, findall(isnan, x0))
end
"""
SectionSample(n, d, K::SectionSample)
In surrogate-based identification and control, optimization can alternate between unconstrained sampling in the full-dimensional parameter space, and sampling constrained on specific sections (e.g. planes in a 3D volume).
`SectionSample` allows sampling and optimizing on a subset of 'free' dimensions while keeping 'fixed' ones constrained.
The sampler is defined
`SectionSample([NaN64, NaN64, 10.0, 10.0], UniformSample())`
where the first argument is a Vector{T} in which numbers are fixed coordinates and `NaN`s correspond to free dimensions, and the second argument is a SamplingAlgorithm which is used to sample in the free dimensions.
"""
function sample(n::Integer,
d::Integer,
section_sampler::SectionSample,
T = eltype(section_sampler.x0))
QuasiMonteCarlo._check_sequence(n)
@assert eltype(section_sampler.x0) == T
@assert length(section_sampler.fixed_dims) == d
return sample(n, section_sampler)
end
@views function sample(n::Integer, section_sampler::SectionSample{T}) where {T}
samples = Matrix{T}(undef, n, length(section_sampler.x0))
fixed_dims = section_sampler.fixed_dims
samples[:, fixed_dims] .= sample(n, length(fixed_dims), section_sampler.sa, T)
return vec(samples)
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 2244 | """
mutable struct InverseDistanceSurrogate{X,Y,P,L,U} <: AbstractSurrogate
The square polynomial model can be expressed by 𝐲 = 𝐗β + ϵ, with β = 𝐗ᵗ𝐗⁻¹𝐗ᵗ𝐲
"""
mutable struct SecondOrderPolynomialSurrogate{X, Y, B, L, U} <: AbstractSurrogate
x::X
y::Y
β::B
lb::L
ub::U
end
function SecondOrderPolynomialSurrogate(x, y, lb, ub)
X = _construct_2nd_order_interp_matrix(x, first(x))
Y = _construct_y_matrix(y, first(y))
β = X \ Y
return SecondOrderPolynomialSurrogate(x, y, β, lb, ub)
end
function _construct_2nd_order_interp_matrix(x, x_el)
n = length(x)
d = length(x_el)
D = 1 + 2 * d + d * (d - 1) ÷ 2
X = ones(eltype(x_el), n, D)
for i in 1:n, j in 1:d
X[i, j + 1] = x[i][j]
end
idx = d + 1
for j in 1:d, k in (j + 1):d
idx += 1
for i in 1:n
X[i, idx] = x[i][j] * x[i][k]
end
end
for i in 1:n, j in 1:d
X[i, j + 1 + d + d * (d - 1) ÷ 2] = x[i][j]^2
end
return X
end
_construct_y_matrix(y, y_el::Number) = y
_construct_y_matrix(y, y_el) = [y[i][j] for i in 1:length(y), j in 1:length(y_el)]
function (my_second_ord::SecondOrderPolynomialSurrogate)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(my_second_ord, val)
#just create the val vector as X and multiply
d = length(val)
y = my_second_ord.β[1, :]
for j in 1:d
y += val[j] * my_second_ord.β[j + 1, :]
end
idx = d + 1
for j in 1:d, k in (j + 1):d
idx += 1
y += val[j] * val[k] * my_second_ord.β[idx, :]
end
for j in 1:d
y += val[j]^2 * my_second_ord.β[j + 1 + d + d * (d - 1) ÷ 2, :]
end
return _match_container(y, first(my_second_ord.y))
end
function add_point!(my_second::SecondOrderPolynomialSurrogate, x_new, y_new)
if eltype(x_new) == eltype(my_second.x)
append!(my_second.x, x_new)
append!(my_second.y, y_new)
else
push!(my_second.x, x_new)
push!(my_second.y, y_new)
end
X = _construct_2nd_order_interp_matrix(my_second.x, first(my_second.x))
Y = _construct_y_matrix(my_second.y, first(my_second.y))
β = X \ Y
my_second.β = β
nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 3765 | module Surrogates
using LinearAlgebra
using Distributions
abstract type AbstractSurrogate <: Function end
include("utils.jl")
include("Radials.jl")
include("Kriging.jl")
include("Sampling.jl")
include("Optimization.jl")
include("Lobachevsky.jl")
include("LinearSurrogate.jl")
include("InverseDistanceSurrogate.jl")
include("SecondOrderPolynomialSurrogate.jl")
include("Wendland.jl")
include("MOE.jl") #rewrite gaussian mixture with own algorithm to fix deps issue
include("VariableFidelity.jl")
include("Earth.jl")
include("GEK.jl")
include("GEKPLS.jl")
include("VirtualStrategy.jl")
current_surrogates = ["Kriging", "LinearSurrogate", "LobachevskySurrogate",
"NeuralSurrogate",
"RadialBasis", "RandomForestSurrogate", "SecondOrderPolynomialSurrogate",
"Wendland", "GEK", "PolynomialChaosSurrogate"]
#Radial structure:
function RadialBasisStructure(; radial_function, scale_factor, sparse)
return (name = "RadialBasis", radial_function = radial_function,
scale_factor = scale_factor, sparse = sparse)
end
#Kriging structure:
function KrigingStructure(; p, theta)
return (name = "Kriging", p = p, theta = theta)
end
function GEKStructure(; p, theta)
return (name = "GEK", p = p, theta = theta)
end
#Linear structure
function LinearStructure()
return (name = "LinearSurrogate")
end
#InverseDistance structure
function InverseDistanceStructure(; p)
return (name = "InverseDistanceSurrogate", p = p)
end
#Lobachevsky structure
function LobachevskyStructure(; alpha, n, sparse)
return (name = "LobachevskySurrogate", alpha = alpha, n = n, sparse = sparse)
end
#Neural structure
function NeuralStructure(; model, loss, opt, n_echos)
return (name = "NeuralSurrogate", model = model, loss = loss, opt = opt,
n_echos = n_echos)
end
#Random forest structure
function RandomForestStructure(; num_round)
return (name = "RandomForestSurrogate", num_round = num_round)
end
#Second order poly structure
function SecondOrderPolynomialStructure()
return (name = "SecondOrderPolynomialSurrogate")
end
#Wendland structure
function WendlandStructure(; eps, maxiters, tol)
return (name = "Wendland", eps = eps, maxiters = maxiters, tol = tol)
end
#Polychaos structure
function PolyChaosStructure(; op)
return (name = "PolynomialChaosSurrogate", op = op)
end
export current_surrogates
export GEKPLS
export RadialBasisStructure, KrigingStructure, LinearStructure, InverseDistanceStructure
export LobachevskyStructure,
NeuralStructure, RandomForestStructure,
SecondOrderPolynomialStructure
export WendlandStructure
export AbstractSurrogate, SamplingAlgorithm
export Kriging, RadialBasis, add_point!, std_error_at_point
# Parallelization Strategies
export potential_optimal_points
export MinimumConstantLiar, MaximumConstantLiar, MeanConstantLiar, KrigingBeliever,
KrigingBelieverUpperBound, KrigingBelieverLowerBound
# radial basis functions
export linearRadial, cubicRadial, multiquadricRadial, thinplateRadial
# samplers
export sample, GridSample, RandomSample, SobolSample, LatinHypercubeSample,
HaltonSample
export RandomSample, KroneckerSample, GoldenSample, SectionSample
# Optimization algorithms
export SRBF, LCBS, EI, DYCORS, SOP, RTEA, SMB, surrogate_optimize
export LobachevskySurrogate, lobachevsky_integral, lobachevsky_integrate_dimension
export LinearSurrogate
export InverseDistanceSurrogate
export SecondOrderPolynomialSurrogate
export Wendland
export RadialBasisStructure, KrigingStructure, LinearStructure, InverseDistanceStructure
export LobachevskyStructure,
NeuralStructure, RandomForestStructure,
SecondOrderPolynomialStructure
export WendlandStructure
#export MOE
export VariableFidelitySurrogate
export EarthSurrogate
export GEK
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 6863 | mutable struct VariableFidelitySurrogate{X, Y, L, U, N, F, E} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
num_high_fidel::N
low_fid_surr::F
eps_surr::E
end
function VariableFidelitySurrogate(x, y, lb, ub;
num_high_fidel = Int(floor(length(x) / 2)),
low_fid_structure = RadialBasisStructure(radial_function = linearRadial(),
scale_factor = 1.0,
sparse = false),
high_fid_structure = RadialBasisStructure(radial_function = cubicRadial(),
scale_factor = 1.0,
sparse = false))
x_high = x[1:num_high_fidel]
x_low = x[(num_high_fidel + 1):end]
y_high = y[1:num_high_fidel]
y_low = y[(num_high_fidel + 1):end]
#Fit low fidelity surrogate:
if low_fid_structure[1] == "RadialBasis"
#fit and append to local_surr
low_fid_surr = RadialBasis(x_low, y_low, lb, ub,
rad = low_fid_structure.radial_function,
scale_factor = low_fid_structure.scale_factor,
sparse = low_fid_structure.sparse)
elseif low_fid_structure[1] == "Kriging"
low_fid_surr = Kriging(x_low, y_low, lb, ub, p = low_fid_structure.p,
theta = low_fid_structure.theta)
elseif low_fid_structure[1] == "GEK"
low_fid_surr = GEK(x_low, y_low, lb, ub, p = low_fid_structure.p,
theta = low_fid_structure.theta)
elseif low_fid_structure == "LinearSurrogate"
low_fid_surr = LinearSurrogate(x_low, y_low, lb, ub)
elseif low_fid_structure[1] == "InverseDistanceSurrogate"
low_fid_surr = InverseDistanceSurrogate(x_low, y_low, lb, ub,
p = low_fid_structure.p)
elseif low_fid_structure[1] == "LobachevskySurrogate"
low_fid_surr = LobachevskySurrogate(x_low, y_low, lb, ub,
alpha = low_fid_structure.alpha,
n = low_fid_structure.n,
sparse = low_fid_structure.sparse)
elseif low_fid_structure[1] == "NeuralSurrogate"
low_fid_surr = NeuralSurrogate(x_low, y_low, lb, ub,
model = low_fid_structure.model,
loss = low_fid_structure.loss,
opt = low_fid_structure.opt,
n_echos = low_fid_structure.n_echos)
elseif low_fid_structure[1] == "RandomForestSurrogate"
low_fid_surr = RandomForestSurrogate(x_low, y_low, lb, ub,
num_round = low_fid_structure.num_round)
elseif low_fid_structure == "SecondOrderPolynomialSurrogate"
low_fid_surr = SecondOrderPolynomialSurrogate(x_low, y_low, lb, ub)
elseif low_fid_structure[1] == "Wendland"
low_fid_surr = Wendand(x_low, y_low, lb, ub, eps = low_fid_surr.eps,
maxiters = low_fid_surr.maxiters, tol = low_fid_surr.tol)
else
throw("A surrogate with the name provided does not exist or is not currently supported with VariableFidelity")
end
#Fit surrogate eps on high fidelity data with objective function y_high - low_find_surr
y_eps = zeros(eltype(y), num_high_fidel)
@inbounds for i in 1:num_high_fidel
y_eps[i] = y_high[i] - low_fid_surr(x_high[i])
end
if high_fid_structure[1] == "RadialBasis"
#fit and append to local_surr
eps = RadialBasis(x_high, y_eps, lb, ub, rad = high_fid_structure.radial_function,
scale_factor = high_fid_structure.scale_factor,
sparse = high_fid_structure.sparse)
elseif high_fid_structure[1] == "Kriging"
eps = Kriging(x_high, y_eps, lb, ub, p = high_fid_structure.p,
theta = high_fid_structure.theta)
elseif high_fid_structure == "LinearSurrogate"
eps = LinearSurrogate(x_high, y_eps, lb, ub)
elseif high_fid_structure[1] == "InverseDistanceSurrogate"
eps = InverseDistanceSurrogate(x_high, y_eps, lb, ub, high_fid_structure.p)
elseif high_fid_structure[1] == "LobachevskySurrogate"
eps = LobachevskySurrogate(x_high, y_eps, lb, ub, alpha = high_fid_structure.alpha,
n = high_fid_structure.n,
sparse = high_fid_structure.sparse)
elseif high_fid_structure[1] == "NeuralSurrogate"
eps = NeuralSurrogate(x_high, y_eps, lb, ub, model = high_fid_structure.model,
loss = high_fid_structure.loss, opt = high_fid_structure.opt,
n_echos = high_fid_structure.n_echos)
elseif high_fid_structure[1] == "RandomForestSurrogate"
eps = RandomForestSurrogate(x_high, y_eps, lb, ub,
num_round = high_fid_structure.num_round)
elseif high_fid_structure == "SecondOrderPolynomialSurrogate"
eps = SecondOrderPolynomialSurrogate(x_high, y_eps, lb, ub)
elseif high_fid_structure[1] == "Wendland"
eps = Wendand(x_high, y_eps, lb, ub, eps = high_fid_structure.eps,
maxiters = high_fid_structure.maxiters, tol = high_fid_structure.tol)
else
throw("A surrogate with the name provided does not exist or is not currently supported with VariableFidelity")
end
return VariableFidelitySurrogate(x, y, lb, ub, num_high_fidel, low_fid_surr, eps)
end
#=
function (varfid::VariableFidelitySurrogate)(val::Number)
return varfid.eps_surr(val) + varfid.low_fid_surr(val)
end
"""
VariableFidelitySurrogate(x,y,lb,ub;
num_high_fidel = Int(floor(length(x)/2))
low_fid = RadialBasisStructure(radial_function = linearRadial, scale_factor=1.0, sparse = false),
high_fid = RadialBasisStructure(radial_function = cubicRadial ,scale_factor=1.0,sparse=false))
First section (1:num_high_fidel) of samples are high fidelity, second section are low fidelity
"""
function VariableFidelitySurrogate(x,y,lb,ub;
num_high_fidel = Int(floor(length(x)/2))
low_fid = RadialBasisStructure(radial_function = linearRadial, scale_factor=1.0, sparse = false),
high_fid = RadialBasisStructure(radial_function = cubicRadial ,scale_factor=1.0,sparse=false))
end
=#
function (varfid::VariableFidelitySurrogate)(val)
return varfid.eps_surr(val) + varfid.low_fid_surr(val)
end
"""
add_point!(varfid::VariableFidelitySurrogate,x_new,y_new)
I expect to add low fidelity data to the surrogate.
"""
function add_point!(varfid::VariableFidelitySurrogate, x_new, y_new)
if length(varfid.x[1]) == 1
#1D
varfid.x = vcat(varfid.x, x_new)
varfid.y = vcat(varfid.y, y_new)
#I added a new lowfidelity datapoint, I need to update the low_fid_surr:
add_point!(varfid.low_fid_surr, x_new, y_new)
else
#ND
varfid.x = vcat(varfid.x, x_new)
varfid.y = vcat(varfid.y, y_new)
#I added a new lowfidelity datapoint, I need to update the low_fid_surr:
add_point!(varfid.low_fid_surr, x_new, y_new)
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1272 | # Minimum Constant Liar
function calculate_liars(::MinimumConstantLiar,
tmp_surr::AbstractSurrogate,
surr::AbstractSurrogate,
new_x)
new_y = minimum(surr.y)
add_point!(tmp_surr, new_x, new_y)
end
# Maximum Constant Liar
function calculate_liars(::MaximumConstantLiar,
tmp_surr::AbstractSurrogate,
surr::AbstractSurrogate,
new_x)
new_y = maximum(surr.y)
add_point!(tmp_surr, new_x, new_y)
end
# Mean Constant Liar
function calculate_liars(::MeanConstantLiar,
tmp_surr::AbstractSurrogate,
surr::AbstractSurrogate,
new_x)
new_y = mean(surr.y)
add_point!(tmp_surr, new_x, new_y)
end
# Kriging Believer
function calculate_liars(::KrigingBeliever, tmp_k::Kriging, k::Kriging, new_x)
new_y = k(new_x)
add_point!(tmp_k, new_x, new_y)
end
# Kriging Believer Upper Bound
function calculate_liars(::KrigingBelieverUpperBound, tmp_k::Kriging, k::Kriging, new_x)
new_y = k(new_x) + 3 * std_error_at_point(k, new_x)
add_point!(tmp_k, new_x, new_y)
end
# Kriging Believer Lower Bound
function calculate_liars(::KrigingBelieverLowerBound, tmp_k::Kriging, k::Kriging, new_x)
new_y = k(new_x) - 3 * std_error_at_point(k, new_x)
add_point!(tmp_k, new_x, new_y)
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1922 | using IterativeSolvers
using ExtendableSparse
using LinearAlgebra
mutable struct Wendland{X, Y, L, U, C, I, T, E} <: AbstractSurrogate
x::X
y::Y
lb::L
ub::U
coeff::C
maxiters::I
tol::T
eps::E
end
@inline _l(s, k) = floor(s / 2) + k + 1
function _wendland(x, eps)
r = eps * norm(x)
val = (1.0 - r)
if val >= 0
dim = length(x)
#at the moment only k = 1 is supported, but we could also support
# missing wendland (k=1/2,k=3/2,k=5/2), and different k's.
ell = _l(dim, 1)
powerTerm = ell + 1.0
val = (1.0 - r)
return val^powerTerm * (powerTerm * r + 1.0)
else
return zero(eltype(x[1]))
end
end
function _calc_coeffs_wend(x, y, eps, maxiters, tol)
n = length(x)
W = ExtendableSparseMatrix{eltype(x[1]), Int}(n, n)
@inbounds for i in 1:n
k = i #wendland is symmetric
for j in k:n
W[i, j] = _wendland(x[i] .- x[j], eps)
end
end
U = Symmetric(W, :U)
return cg(U, y, maxiter = maxiters, reltol = tol)
end
function Wendland(x, y, lb, ub; eps = 1.0, maxiters = 300, tol = 1e-6)
c = _calc_coeffs_wend(x, y, eps, maxiters, tol)
return Wendland(x, y, lb, ub, c, maxiters, tol, eps)
end
function (wend::Wendland)(val)
# Check to make sure dimensions of input matches expected dimension of surrogate
_check_dimension(wend, val)
return sum(wend.coeff[j] * _wendland(val, wend.eps) for j in 1:length(wend.coeff))
end
function add_point!(wend::Wendland, new_x, new_y)
if (length(new_x) == 1 && length(new_x[1]) == 1) ||
(length(new_x) > 1 && length(new_x[1]) == 1 && length(wend.lb) > 1)
push!(wend.x, new_x)
push!(wend.y, new_y)
else
append!(wend.x, new_x)
append!(wend.y, new_y)
end
wend.coeff = _calc_coeffs_wend(wend.x, wend.y, wend.eps, wend.maxiters, wend.tol)
nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 448 | remove_tracker(x) = x
_match_container(y, y_el::Number) = first(y)
_match_container(y, y_el) = y
_expected_dimension(x) = length(x[1])
function _check_dimension(surr, input)
expected_dim = _expected_dimension(surr.x)
input_dim = length(input)
if input_dim != expected_dim
throw(ArgumentError("This surrogate expects $expected_dim-dimensional inputs, but the input had dimension $input_dim."))
end
return nothing
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 4993 | using Surrogates
using LinearAlgebra
using Zygote
using Test
#using Zygote: @nograd
#=
#FORWARD
###### 1D ######
lb = 0.0
ub = 10.0
n = 5
x = sample(n,lb,ub,SobolSample())
f = x -> x^2
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,lb,ub,x->norm(x),2)
g = x -> ForwardDiff.derivative(my_rad,x)
g(5.0)
#Kriging
p = 1.5
my_krig = Kriging(x,y,p)
g = x -> ForwardDiff.derivative(my_krig,x)
g(5.0)
#Linear Surrogate
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.derivative(my_linear,x)
g(5.0)
#Inverse distance
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> ForwardDiff.derivative(my_inverse,x)
g(5.0)
#Lobachevsky
n = 4
α = 2.4
my_loba = LobachevskySurrogate(x,y,α,n,lb,ub)
g = x -> ForwardDiff.derivative(my_loba,x)
g(5.0)
#Second order polynomial
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.derivative(my_second,x)
g(5.0)
###### ND ######
lb = [0.0,0.0]
ub = [10.0,10.0]
n = 5
x = sample(n,lb,ub,SobolSample())
f = x -> x[1]*x[2]
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,[lb,ub],z->norm(z),2)
g = x -> ForwardDiff.gradient(my_rad,x)
g([2.0,5.0])
#Kriging
theta = [2.0,2.0]
p = [1.9,1.9]
my_krig = Kriging(x,y,p,theta)
g = x -> ForwardDiff.gradient(my_krig,x)
g([2.0,5.0])
#Linear Surrogate
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.gradient(my_linear,x)
g([2.0,5.0])
#Inverse Distance
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> ForwardDiff.gradient(my_inverse,x)
g([2.0,5.0])
#Lobachevsky
alpha = [1.4,1.4]
n = 4
my_loba_ND = LobachevskySurrogate(x,y,alpha,n,lb,ub)
g = x -> ForwardDiff.gradient(my_loba_ND,x)
g([2.0,5.0])
#Second order polynomial
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.gradient(my_second,x)
g([2.0,5.0])
=#
##############
### ZYGOTE ###
##############
############
#### 1D ####
############
lb = 0.0
ub = 3.0
n = 10
x = sample(n, lb, ub, SobolSample())
f = x -> x^2
y = f.(x)
#Radials
@testset "Radials 1D" begin
my_rad = RadialBasis(x, y, lb, ub, rad = linearRadial())
g = x -> my_rad'(x)
g(5.0)
end
# #Kriging
@testset "Kriging 1D" begin
my_p = 1.5
my_krig = Kriging(x, y, lb, ub, p = my_p)
g = x -> my_krig'(x)
g(5.0)
end
# #Linear Surrogate
@testset "Linear Surrogate" begin
my_linear = LinearSurrogate(x, y, lb, ub)
g = x -> my_linear'(x)
g(5.0)
end
#Inverse distance
@testset "Inverse Distance" begin
my_p = 1.4
my_inverse = InverseDistanceSurrogate(x, y, lb, ub, p = my_p)
g = x -> my_inverse'(x)
g(5.0)
end
#Second order polynomial
@testset "Second Order Polynomial" begin
my_second = SecondOrderPolynomialSurrogate(x, y, lb, ub)
g = x -> my_second'(x)
g(5.0)
end
#Lobachevsky
@testset "Lobachevsky" begin
n = 4
α = 2.4
my_loba = LobachevskySurrogate(x, y, lb, ub, alpha = α, n = 4)
g = x -> my_loba'(x)
g(0.0)
end
#Wendland
@testset "Wendland" begin
my_wend = Wendland(x, y, lb, ub)
g = x -> my_wend'(x)
g(3.0)
end
# MOE and VariableFidelity for free because they are Linear combinations
# of differentiable surrogates
# #Gek
@testset "GEK" begin
n = 10
lb = 0.0
ub = 5.0
x = sample(n, lb, ub, SobolSample())
f = x -> x^2
y1 = f.(x)
der = x -> 2 * x
y2 = der.(x)
y = vcat(y1, y2)
my_gek = GEK(x, y, lb, ub)
g = x -> my_gek'(x)
g(3.0)
end
################
###### ND ######
################
lb = [0.0, 0.0]
ub = [10.0, 10.0]
n = 5
x = sample(n, lb, ub, SobolSample())
f = x -> x[1] * x[2]
y = f.(x)
#Radials
@testset "Radials ND" begin
my_rad = RadialBasis(x, y, lb, ub, rad = linearRadial(), scale_factor = 2.1)
g = x -> Zygote.gradient(my_rad, x)
g((2.0, 5.0))
end
# Kriging
@testset "Kriging ND" begin
my_theta = [2.0, 2.0]
my_p = [1.9, 1.9]
my_krig = Kriging(x, y, lb, ub, p = my_p, theta = my_theta)
g = x -> Zygote.gradient(my_krig, x)
g((2.0, 5.0))
end
# Linear Surrogate
@testset "Linear Surrogate ND" begin
my_linear = LinearSurrogate(x, y, lb, ub)
g = x -> Zygote.gradient(my_linear, x)
g((2.0, 5.0))
end
#Inverse Distance
@testset "Inverse Distance ND" begin
my_p = 1.4
my_inverse = InverseDistanceSurrogate(x, y, lb, ub, p = my_p)
g = x -> Zygote.gradient(my_inverse, x)
g((2.0, 5.0))
end
#Lobachevsky not working yet weird issue with Zygote @nograd
#=
Zygote.refresh()
alpha = [1.4,1.4]
n = 4
my_loba_ND = LobachevskySurrogate(x,y,alpha,n,lb,ub)
g = x -> Zygote.gradient(my_loba_ND,x)
g((2.0,5.0))
=#
# Second order polynomial mutating arrays
@testset "SecondOrderPolynomialSurrogate ND" begin
my_second = SecondOrderPolynomialSurrogate(x, y, lb, ub)
g = x -> Zygote.gradient(my_second, x)
g((2.0, 5.0))
end
#wendland
@testset "Wendland ND" begin
my_wend_ND = Wendland(x, y, lb, ub)
g = x -> Zygote.gradient(my_wend_ND, x)
g((2.0, 5.0))
end
# #MOE and VariableFidelity for free because they are Linear combinations
# #of differentiable surrogates
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1251 | using Surrogates
#1D
n = 10
lb = 0.0
ub = 5.0
x = sample(n, lb, ub, SobolSample())
f = x -> x^2
y1 = f.(x)
der = x -> 2 * x
y2 = der.(x)
y = vcat(y1, y2)
my_gek = GEK(x, y, lb, ub)
val = my_gek(2.0)
std_err = std_error_at_point(my_gek, 1.0)
add_point!(my_gek, 2.5, 2.5^2)
# Test that input dimension is properly checked for 1D GEK surrogates
@test_throws ArgumentError my_gek(Float64[])
@test_throws ArgumentError my_gek((2.0, 3.0, 4.0))
#ND
n = 10
d = 2
lb = [0.0, 0.0]
ub = [5.0, 5.0]
x = sample(n, lb, ub, SobolSample())
f = x -> x[1]^2 + x[2]^2
y1 = f.(x)
grad1 = x -> 2 * x[1]
grad2 = x -> 2 * x[2]
function create_grads(n, d, grad1, grad2, y)
c = 0
y2 = zeros(eltype(y[1]), n * d)
for i in 1:n
y2[i + c] = grad1(x[i])
y2[i + c + 1] = grad2(x[i])
c = c + 1
end
return y2
end
y2 = create_grads(n, d, grad1, grad2, y)
y = vcat(y1, y2)
my_gek_ND = GEK(x, y, lb, ub)
val = my_gek_ND((1.0, 1.0))
std_err = std_error_at_point(my_gek_ND, (1.0, 1.0))
add_point!(my_gek_ND, (2.0, 2.0), 8.0)
# Test that input dimension is properly checked for ND GEK surrogates
@test_throws ArgumentError my_gek_ND(Float64[])
@test_throws ArgumentError my_gek_ND(2.0)
@test_throws ArgumentError my_gek_ND((2.0, 3.0, 4.0))
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 8125 | using Surrogates
using Zygote
# #water flow function tests
function water_flow(x)
r_w = x[1]
r = x[2]
T_u = x[3]
H_u = x[4]
T_l = x[5]
H_l = x[6]
L = x[7]
K_w = x[8]
log_val = log(r / r_w)
return (2 * pi * T_u * (H_u - H_l)) /
(log_val * (1 + (2 * L * T_u / (log_val * r_w^2 * K_w)) + T_u / T_l))
end
n = 1000
lb = [0.05, 100, 63070, 990, 63.1, 700, 1120, 9855]
ub = [0.15, 50000, 115600, 1110, 116, 820, 1680, 12045]
x = sample(n, lb, ub, SobolSample())
grads = gradient.(water_flow, x)
y = water_flow.(x)
n_test = 100
x_test = sample(n_test, lb, ub, GoldenSample())
y_true = water_flow.(x_test)
@testset "Test 1: Water Flow Function Test (dimensions = 8; n_comp = 2; extra_points = 2)" begin
n_comp = 2
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 0.03, atol = 0.02) #rmse: 0.039
end
@testset "Test 2: Water Flow Function Test (dimensions = 8; n_comp = 3; extra_points = 2)" begin
n_comp = 3
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 0.02, atol = 0.01) #rmse: 0.027
end
@testset "Test 3: Water Flow Function Test (dimensions = 8; n_comp = 3; extra_points = 3)" begin
n_comp = 3
delta_x = 0.0001
extra_points = 3
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 0.02, atol = 0.01) #rmse: 0.027
end
# ## welded beam tests
function welded_beam(x)
h = x[1]
l = x[2]
t = x[3]
a = 6000 / (sqrt(2) * h * l)
b = (6000 * (14 + 0.5 * l) * sqrt(0.25 * (l^2 + (h + t)^2))) /
(2 * (0.707 * h * l * (l^2 / 12 + 0.25 * (h + t)^2)))
return (sqrt(a^2 + b^2 + l * a * b)) / (sqrt(0.25 * (l^2 + (h + t)^2)))
end
n = 1000
lb = [0.125, 5.0, 5.0]
ub = [1.0, 10.0, 10.0]
x = sample(n, lb, ub, SobolSample())
grads = gradient.(welded_beam, x)
y = welded_beam.(x)
n_test = 100
x_test = sample(n_test, lb, ub, GoldenSample())
y_true = welded_beam.(x_test)
@testset "Test 4: Welded Beam Function Test (dimensions = 3; n_comp = 3; extra_points = 2)" begin
n_comp = 3
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 50.0, atol = 0.5)#rmse: 38.988
end
@testset "Test 5: Welded Beam Function Test (dimensions = 3; n_comp = 2; extra_points = 2)" begin
n_comp = 2
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 51.0, atol = 0.5) #rmse: 39.481
end
## increasing extra points increases accuracy
@testset "Test 6: Welded Beam Function Test (dimensions = 3; n_comp = 2; extra_points = 4)" begin
n_comp = 2
delta_x = 0.0001
extra_points = 4
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 49.0, atol = 0.5) #rmse: 37.87
end
## sphere function tests
function sphere_function(x)
return sum(x .^ 2)
end
## 3D
n = 100
lb = [-5.0, -5.0, -5.0]
ub = [5.0, 5.0, 5.0]
x = sample(n, lb, ub, SobolSample())
grads = gradient.(sphere_function, x)
y = sphere_function.(x)
n_test = 100
x_test = sample(n_test, lb, ub, GoldenSample())
y_true = sphere_function.(x_test)
@testset "Test 7: Sphere Function Test (dimensions = 3; n_comp = 2; extra_points = 2)" begin
n_comp = 2
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 0.001, atol = 0.05) #rmse: 0.00083
end
## 2D
n = 50
lb = [-10.0, -10.0]
ub = [10.0, 10.0]
x = sample(n, lb, ub, SobolSample())
grads = gradient.(sphere_function, x)
y = sphere_function.(x)
n_test = 10
x_test = sample(n_test, lb, ub, GoldenSample())
y_true = sphere_function.(x_test)
@testset "Test 8: Sphere Function Test (dimensions = 2; n_comp = 2; extra_points = 2" begin
n_comp = 2
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
y_pred = g.(x_test)
rmse = sqrt(sum(((y_pred - y_true) .^ 2) / n_test))
@test isapprox(rmse, 0.1, atol = 0.5) #rmse: 0.0022
end
@testset "Test 9: Add Point Test (dimensions = 3; n_comp = 2; extra_points = 2)" begin
#first we create a surrogate model with just 3 input points
initial_x_vec = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
initial_y = sphere_function.(initial_x_vec)
initial_grads = gradient.(sphere_function, initial_x_vec)
lb = [-5.0, -5.0, -5.0]
ub = [10.0, 10.0, 10.0]
n_comp = 2
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
g = GEKPLS(initial_x_vec, initial_y, initial_grads, n_comp, delta_x, lb, ub,
extra_points,
initial_theta)
n_test = 100
x_test = sample(n_test, lb, ub, GoldenSample())
y_true = sphere_function.(x_test)
y_pred1 = g.(x_test)
rmse1 = sqrt(sum(((y_pred1 - y_true) .^ 2) / n_test)) #rmse1 = 31.91
#then we update the model with more points to see if performance improves
n = 100
x = sample(n, lb, ub, SobolSample())
grads = gradient.(sphere_function, x)
y = sphere_function.(x)
for i in 1:size(x, 1)
add_point!(g, x[i], y[i], grads[i][1])
end
y_pred2 = g.(x_test)
rmse2 = sqrt(sum(((y_pred2 - y_true) .^ 2) / n_test)) #rmse2 = 0.0015
@test (rmse2 < rmse1)
end
@testset "Test 10: Check optimization (dimensions = 3; n_comp = 2; extra_points = 2)" begin
lb = [-5.0, -5.0, -5.0]
ub = [10.0, 10.0, 10.0]
n_comp = 2
delta_x = 0.0001
extra_points = 2
initial_theta = [0.01 for i in 1:n_comp]
n = 100
x = sample(n, lb, ub, SobolSample())
grads = gradient.(sphere_function, x)
y = sphere_function.(x)
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
x_point, minima = surrogate_optimize(sphere_function, SRBF(), lb, ub, g,
RandomSample(); maxiters = 20,
num_new_samples = 20, needs_gradient = true)
@test isapprox(minima, 0.0, atol = 0.0001)
end
@testset "Test 11: Check gradient (dimensions = 3; n_comp = 2; extra_points = 3)" begin
lb = [-5.0, -5.0, -5.0]
ub = [10.0, 10.0, 10.0]
n_comp = 2
delta_x = 0.0001
extra_points = 3
initial_theta = [0.01 for i in 1:n_comp]
n = 100
x = sample(n, lb, ub, SobolSample())
grads = gradient.(sphere_function, x)
y = sphere_function.(x)
g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta)
grad_surr = gradient(g, (1.0, 1.0, 1.0))
#test at a single point
grad_true = gradient(sphere_function, (1.0, 1.0, 1.0))
bool_tup = isapprox.((grad_surr[1] .- grad_true[1]), (0.0, 0.0, 0.0), (atol = 0.001))
@test (true, true, true) == bool_tup
#test for a bunch of points
grads_surr_vec = gradient.(g, x)
sum_of_rmse = 0.0
for i in eachindex(grads_surr_vec)
sum_of_rmse += sqrt((sum((grads_surr_vec[i][1] .- grads[i][1]) .^ 2) / 3.0))
end
@test isapprox(sum_of_rmse, 0.05, atol = 0.01)
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 4020 | using LinearAlgebra
using Surrogates
using Test
using Statistics
#1D
lb = 0.0
ub = 10.0
f = x -> log(x) * exp(x)
x = sample(5, lb, ub, SobolSample())
y = f.(x)
my_p = 1.9
# Check hyperparameter validation for constructing 1D Kriging surrogates
@test_throws ArgumentError my_k=Kriging(x, y, lb, ub, p = -1.0)
@test_throws ArgumentError my_k=Kriging(x, y, lb, ub, p = 3.0)
@test_throws ArgumentError my_k=Kriging(x, y, lb, ub, theta = -2.0)
my_k = Kriging(x, y, lb, ub, p = my_p)
@test my_k.theta ≈ 0.5 * std(x)^(-my_p)
# Check input dimension validation for 1D Kriging surrogates
@test_throws ArgumentError my_k(rand(3))
@test_throws ArgumentError my_k(Float64[])
add_point!(my_k, 4.0, 75.68)
add_point!(my_k, [5.0, 6.0], [238.86, 722.84])
pred = my_k(5.5)
@test 238.86 ≤ pred ≤ 722.84
@test my_k(5.0) ≈ 238.86
@test std_error_at_point(my_k, 5.0) < 1e-3 * my_k(5.0)
#WITHOUT ADD POINT
x = [1.0, 2.0, 3.0]
y = [4.0, 5.0, 6.0]
my_p = 1.3
my_k = Kriging(x, y, lb, ub, p = my_p)
est = my_k(1.0)
@test est == 4.0
std_err = std_error_at_point(my_k, 1.0)
@test std_err < 10^(-6)
#WITH ADD POINT adding singleton
x = [1.0, 2.0, 3.0]
y = [4.0, 5.0, 6.0]
my_p = 1.3
my_k = Kriging(x, y, lb, ub, p = my_p)
add_point!(my_k, 4.0, 9.0)
est = my_k(4.0)
std_err = std_error_at_point(my_k, 4.0)
@test std_err < 10^(-6)
#WITH ADD POINT adding more
x = [1.0, 2.0, 3.0]
y = [4.0, 5.0, 6.0]
my_p = 1.3
my_k = Kriging(x, y, lb, ub, p = my_p)
add_point!(my_k, [4.0, 5.0, 6.0], [9.0, 13.0, 15.0])
est = my_k(4.0)
std_err = std_error_at_point(my_k, 4.0)
@test std_err < 10^(-6)
#Testing kwargs 1D
kwar_krig = Kriging(x, y, lb, ub);
# Check hyperparameter initialization for 1D Kriging surrogates
p_expected = 2.0
@test kwar_krig.p == p_expected
@test kwar_krig.theta == 0.5 / std(x)^p_expected
#ND
lb = [0.0, 0.0, 1.0]
ub = [5.0, 7.5, 10.0]
x = sample(5, lb, ub, SobolSample())
f = x -> x[1] + x[2] * x[3]
y = f.(x)
my_theta = [2.0, 2.0, 2.0]
my_p = [1.9, 1.9, 1.9]
my_k = Kriging(x, y, lb, ub, p = my_p, theta = my_theta)
add_point!(my_k, (4.0, 3.2, 9.5), 34.4)
add_point!(my_k, [(1.0, 4.65, 6.4), (2.3, 5.4, 6.7)], [30.76, 38.48])
pred = my_k((3.5, 5.5, 6.5))
#test sets
#WITHOUT ADD POINT
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [1.0, 2.0, 3.0]
my_p = [1.0, 1.0, 1.0]
my_theta = [2.0, 2.0, 2.0]
my_k = Kriging(x, y, lb, ub, p = my_p, theta = my_theta)
est = my_k((1.0, 2.0, 3.0))
std_err = std_error_at_point(my_k, (1.0, 2.0, 3.0))
#WITH ADD POINT adding singleton
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [1.0, 2.0, 3.0]
my_p = [1.0, 1.0, 1.0]
my_theta = [2.0, 2.0, 2.0]
my_k = Kriging(x, y, lb, ub, p = my_p, theta = my_theta)
add_point!(my_k, (10.0, 11.0, 12.0), 4.0)
est = my_k((10.0, 11.0, 12.0))
std_err = std_error_at_point(my_k, (10.0, 11.0, 12.0))
@test std_err < 10^(-6)
#WITH ADD POINT ADDING MORE
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [1.0, 2.0, 3.0]
my_p = [1.0, 1.0, 1.0]
my_theta = [2.0, 2.0, 2.0]
my_k = Kriging(x, y, lb, ub, p = my_p, theta = my_theta)
add_point!(my_k, [(10.0, 11.0, 12.0), (13.0, 14.0, 15.0)], [4.0, 5.0])
est = my_k((10.0, 11.0, 12.0))
std_err = std_error_at_point(my_k, (10.0, 11.0, 12.0))
@test std_err < 10^(-6)
#test kwargs ND (hyperparameter initialization)
kwarg_krig_ND = Kriging(x, y, lb, ub)
# Check hyperparameter validation for ND kriging surrogate construction
@test_throws ArgumentError Kriging(x, y, lb, ub, p = 3 * my_p)
@test_throws ArgumentError Kriging(x, y, lb, ub, p = -my_p)
@test_throws ArgumentError Kriging(x, y, lb, ub, theta = -my_theta)
# Check input dimension validation for ND kriging surrogates
@test_throws ArgumentError kwarg_krig_ND(1.0)
@test_throws ArgumentError kwarg_krig_ND([1.0])
@test_throws ArgumentError kwarg_krig_ND([2.0, 3.0])
@test_throws ArgumentError kwarg_krig_ND(ones(5))
# Test hyperparameter initialization
d = length(x[3])
p_expected = 2.0
@test all(==(p_expected), kwarg_krig_ND.p)
@test all(kwarg_krig_ND.theta .≈ [0.5 / std(x_i[ℓ] for x_i in x)^p_expected for ℓ in 1:3])
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 736 | using Surrogates
#=
#1D MOE
n = 30
lb = 0.0
ub = 5.0
x = Surrogates.sample(n,lb,ub,RandomSample())
f = x-> 2*x
y = f.(x)
#Standard definition
my_moe = MOE(x,y,lb,ub)
val = my_moe(3.0)
#Local surrogates redefinition
my_local_kind = [InverseDistanceStructure(p = 1.0),
KrigingStructure(p=1.0, theta=1.0)]
my_moe = MOE(x,y,lb,ub,k = 2,local_kind = my_local_kind)
#ND MOE
n = 30
lb = [0.0,0.0]
ub = [5.0,5.0]
x = sample(n,lb,ub,LatinHypercubeSample())
f = x -> x[1]*x[2]
y = f.(x)
my_moe_ND = MOE(x,y,lb,ub)
val = my_moe_ND((1.0,1.0))
#Local surr redefinition
my_locals = [InverseDistanceStructure(p = 1.0),
RandomForestStructure(num_round=1)]
my_moe_redef = MOE(x,y,lb,ub,k = 2,local_kind = my_locals)
=#
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 5111 | using Base
using Test
using LinearAlgebra
using Surrogates
#1D
lb = 0.0
ub = 4.0
x = [1.0, 2.0, 3.0]
y = [4.0, 5.0, 6.0]
linear = x -> norm(x)
cubic = x -> x^3
λ = 2.3
multiquadr = x -> sqrt(x^2 + λ^2)
q = 1
my_rad = RadialBasis(x, y, lb, ub, rad = linearRadial())
est = my_rad(3.0)
@test est ≈ 6.0
add_point!(my_rad, 4.0, 10.0)
est = my_rad(3.0)
@test est ≈ 6.0
add_point!(my_rad, [3.2, 3.3, 3.4], [8.0, 9.0, 10.0])
est = my_rad(3.0)
@test est ≈ 6.0
my_rad = RadialBasis(x, y, lb, ub, rad = cubicRadial())
q = 2
my_rad = RadialBasis(x, y, lb, ub, rad = multiquadricRadial())
# Test that input dimension is properly checked for 1D radial surrogates
@test_throws ArgumentError my_rad(Float64[])
@test_throws ArgumentError my_rad((2.0, 3.0, 4.0))
#ND
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [4.0, 5.0, 6.0]
lb = [0.0, 3.0, 6.0]
ub = [4.0, 7.0, 10.0]
#bounds = [[0.0, 3.0, 6.0], [4.0, 7.0, 10.0]]
my_rad = RadialBasis(x, y, lb, ub)
est = my_rad((1.0, 2.0, 3.0))
@test est ≈ 4.0
#WITH ADD_POINT, adding singleton
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [4.0, 5.0, 6.0]
lb = [0.0, 3.0, 6.0]
ub = [4.0, 7.0, 10.0]
#bounds = [[0.0,3.0,6.0],[4.0,7.0,10.0]]
my_rad = RadialBasis(x, y, lb, ub, rad = linearRadial(), scale_factor = 1.0)
add_point!(my_rad, (9.0, 10.0, 11.0), 10.0)
est = my_rad((1.0, 2.0, 3.0))
@test est ≈ 4.0
#WITH ADD_POINT, adding more
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [4.0, 5.0, 6.0]
#bounds = [[0.0,3.0,6.0],[4.0,7.0,10.0]]
lb = [0.0, 3.0, 6.0]
ub = [4.0, 7.0, 10.0]
my_rad = RadialBasis(x, y, lb, ub)
add_point!(my_rad, [(9.0, 10.0, 11.0), (12.0, 13.0, 14.0)], [10.0, 11.0])
est = my_rad((1.0, 2.0, 3.0))
@test est ≈ 4.0
lb = [0.0, 0.0, 0.0]
ub = [10.0, 10.0, 10.0]
#bounds = [lb,ub]
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [4.0, 5.0, 6.0]
my_rad_ND = RadialBasis(x, y, lb, ub)
add_point!(my_rad_ND, (3.5, 4.5, 1.2), 18.9)
add_point!(my_rad_ND, [(3.2, 1.2, 6.7), (3.4, 9.5, 7.4)], [25.72, 239.0])
my_rad_ND = RadialBasis(x, y, lb, ub, rad = cubicRadial())
my_rad_ND = RadialBasis(x, y, lb, ub, rad = multiquadricRadial())
prediction = my_rad_ND((1.0, 1.0, 1.0))
f = x -> x[1] * x[2]
lb = [1.0, 2.0]
ub = [10.0, 8.5]
x = sample(500, lb, ub, SobolSample())
push!(x, (1.0, 2.0))
y = f.(x)
my_radial_basis = RadialBasis(x, y, lb, ub, rad = linearRadial())
@test my_radial_basis((1.0, 2.0)) ≈ 2
my_radial_basis = RadialBasis(x, y, lb, ub, rad = linearRadial())
@test my_radial_basis((1.0, 2.0)) ≈ 2
f = x -> x[1] * x[2]
lb = [1.0, 2.0]
ub = [10.0, 8.5]
x = sample(5, lb, ub, SobolSample())
push!(x, (1.0, 2.0))
y = f.(x)
my_radial_basis = RadialBasis(x, y, lb, ub, rad = linearRadial())
@test my_radial_basis((1.0, 2.0)) ≈ 2
# Test that input dimension is properly checked for ND radial surrogates
@test_throws ArgumentError my_radial_basis((1.0,))
@test_throws ArgumentError my_radial_basis((2.0, 3.0, 4.0))
# Multi-output
f = x -> [x^2, x]
lb = 1.0
ub = 10.0
x = sample(5, lb, ub, SobolSample())
push!(x, 2.0)
y = f.(x)
my_radial_basis = RadialBasis(x, y, lb, ub, rad = linearRadial())
my_radial_basis(2.0)
@test my_radial_basis(2.0) ≈ [4, 2]
f = x -> [x[1] * x[2], x[1] + x[2]^2]
lb = [1.0, 2.0]
ub = [10.0, 8.5]
x = sample(5, lb, ub, SobolSample())
push!(x, (1.0, 2.0))
y = f.(x)
my_radial_basis = RadialBasis(x, y, lb, ub, rad = linearRadial())
my_radial_basis((1.0, 2.0))
@test my_radial_basis((1.0, 2.0)) ≈ [2, 5]
x_new = (2.0, 2.0)
y_new = f(x_new)
add_point!(my_radial_basis, x_new, y_new)
@test my_radial_basis(x_new) ≈ y_new
#sparse
#1D
lb = 0.0
ub = 4.0
x = [1.0, 2.0, 3.0]
y = [4.0, 5.0, 6.0]
my_rad = RadialBasis(x, y, lb, ub, rad = linearRadial(), sparse = true)
#ND
x = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)]
y = [4.0, 5.0, 6.0]
lb = [0.0, 3.0, 6.0]
ub = [4.0, 7.0, 10.0]
#bounds = [[0.0, 3.0, 6.0], [4.0, 7.0, 10.0]]
my_rad = RadialBasis(x, y, lb, ub, sparse = true)
#test to verify multiquadricRadial with default scale_factor
lb = [0.0, 0.0, 0.0]
ub = [3.0, 3.0, 3.0]
n_samples = 100
g(x) = sqrt(x[1]^2 + x[2]^2 + x[3]^2)
x = sample(n_samples, lb, ub, SobolSample())
y = g.(x)
mq_rad = RadialBasis(x, y, lb, ub, rad = multiquadricRadial())
@test isapprox(mq_rad([2.0, 2.0, 1.0]), g([2.0, 2.0, 1.0]), atol = 0.0001)
mq_rad = RadialBasis(x, y, lb, ub, rad = multiquadricRadial(0.9)) # different shape parameter should not be as accurate
@test !isapprox(mq_rad([2.0, 2.0, 1.0]), g([2.0, 2.0, 1.0]), atol = 0.0001)
# Issue 316
x = sample(1024, [-0.45, -0.4, -0.9], [0.40, 0.55, 0.35], SobolSample())
lb = [-0.45 -0.4 -0.9]
ub = [0.40 0.55 0.35]
function mockvalues(in)
x, y, z = in
p1 = reverse(vec([1.09903695e+01 -1.01500500e+01 -4.06629740e+01 -1.41834931e+01 1.00604784e+01 4.34951623e+00 -1.06519689e-01 -1.93335202e-03]))
p2 = vec([2.12791877 2.12791877 4.23881665 -1.05464575])
f = evalpoly(z, p1)
f += p2[1] * x^2 + p2[2] * y^2 + p2[3] * x^2 * y + p2[4] * x * y^2
f
end
y = mockvalues.(x)
rbf = RadialBasis(x, y, lb, ub, rad = multiquadricRadial(1.788))
test = (lb .+ ub) ./ 2
@test isapprox(rbf(test), mockvalues(test), atol = 0.001)
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1221 | """
Sobel-sample x+y in [0,10]x[0,10],
then minimize it on Section([NaN,10.0]),
and verify that the minimum is on x,y=(0,10)
rather than in (0,0)
"""
using QuasiMonteCarlo
using Surrogates
using Test
lb = [0.0, 0.0, 0.0]
ub = [10.0, 10.0, 10.0]
x = Surrogates.sample(10, lb, ub, LatinHypercubeSample())
f = x -> x[1] + x[2] + x[3]
y = f.(x)
f([0, 0, 0]) == 0
f_hat = Kriging(x, y, lb, ub)
f_hat([0, 0, 0])
isapprox(f([0, 0, 0]), f_hat([0, 0, 0]))
""" The global minimum is at (0,0) """
(xy_min, f_hat_min) = surrogate_optimize(f,
DYCORS(), lb, ub,
f_hat,
SobolSample())
isapprox(xy_min[1], 0.0, atol = 1e-1)
""" The minimum on the (0,10) section is around (0,10) """
section_sampler_z_is_10 = SectionSample([NaN64, NaN64, 10.0],
Surrogates.RandomSample())
@test [3] == Surrogates.fixed_dimensions(section_sampler_z_is_10)
@test [1, 2] == Surrogates.free_dimensions(section_sampler_z_is_10)
Surrogates.sample(5, lb, ub, section_sampler_z_is_10)
(xy_min, f_hat_min) = surrogate_optimize(f,
EI(), lb, ub,
f_hat,
section_sampler_z_is_10, maxiters = 1000)
isapprox(xy_min[1], 0.0, atol = 0.1)
isapprox(xy_min[2], 0.0, atol = 0.1)
isapprox(xy_min[3], 10.0, atol = 0.1)
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1031 | using Surrogates
#1D
n = 10
lb = 0.0
ub = 10.0
x = sample(n, lb, ub, SobolSample())
f = x -> 2 * x
y = f.(x)
my_varfid = VariableFidelitySurrogate(x, y, lb, ub)
val = my_varfid(3.0)
add_point!(my_varfid, 3.0, 6.0)
val = my_varfid(3.0)
my_varfid_change_struct = VariableFidelitySurrogate(x, y, lb, ub, num_high_fidel = 2,
low_fid_structure = InverseDistanceStructure(p = 1.0),
high_fid_structure = RadialBasisStructure(radial_function = linearRadial(),
scale_factor = 1.0,
sparse = false))
#ND
n = 10
lb = [0.0, 0.0]
ub = [5.0, 5.0]
x = sample(n, lb, ub, SobolSample())
f = x -> x[1] * x[2]
y = f.(x)
my_varfidND = VariableFidelitySurrogate(x, y, lb, ub)
val = my_varfidND((2.0, 2.0))
add_point!(my_varfidND, (3.0, 3.0), 9.0)
my_varfidND_change_struct = VariableFidelitySurrogate(x, y, lb, ub, num_high_fidel = 2,
low_fid_structure = InverseDistanceStructure(p = 1.0),
high_fid_structure = RadialBasisStructure(radial_function = linearRadial(),
scale_factor = 1.0,
sparse = false))
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 842 | using Surrogates
#1D
x = [1.0, 2.0, 3.0]
y = [3.0, 5.0, 7.0]
lb = 0.0
ub = 5.0
my_wend = Wendland(x, y, lb, ub)
add_point!(my_wend, 0.5, 4.0)
val = my_wend(0.5)
# Test that input dimension is properly checked for 1D Wendland surrogates
@test_throws ArgumentError my_wend(Float64[])
@test_throws ArgumentError my_wend((2.0, 3.0, 4.0))
#ND
lb = [0.0, 0.0]
ub = [4.0, 4.0]
x = sample(5, lb, ub, SobolSample())
f = x -> x[1] + x[2]
y = f.(x)
my_wend_ND = Wendland(x, y, lb, ub)
est = my_wend_ND((1.0, 2.0))
add_point!(my_wend_ND, (3.0, 3.5), 4.0)
add_point!(my_wend_ND, [(9.0, 10.0), (12.0, 13.0)], [10.0, 11.0])
# Test that input dimension is properly checked for ND Wendland surrogates
@test_throws ArgumentError my_wend_ND(Float64[])
@test_throws ArgumentError my_wend_ND(2.0)
@test_throws ArgumentError my_wend_ND((2.0, 3.0, 4.0))
#todo
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 820 | using Surrogates
#1D
lb = 0.0
ub = 5.0
n = 20
x = sample(n, lb, ub, SobolSample())
f = x -> 2 * x + x^2
y = f.(x)
my_ear1d = EarthSurrogate(x, y, lb, ub)
val = my_ear1d(3.0)
add_point!(my_ear1d, 6.0, 48.0)
# Test that input dimension is properly checked for 1D Earth surrogates
@test_throws ArgumentError my_ear1d(Float64[])
@test_throws ArgumentError my_ear1d((2.0, 3.0, 4.0))
#ND
lb = [0.0, 0.0]
ub = [10.0, 10.0]
n = 30
x = sample(n, lb, ub, SobolSample())
f = x -> x[1] * x[2] + x[1]
y = f.(x)
my_earnd = EarthSurrogate(x, y, lb, ub)
val = my_earnd((2.0, 2.0))
add_point!(my_earnd, (2.0, 2.0), 6.0)
# Test that input dimension is properly checked for ND Earth surrogates
@test_throws ArgumentError my_earnd(Float64[])
@test_throws ArgumentError my_earnd(2.0)
@test_throws ArgumentError my_earnd((2.0, 3.0, 4.0))
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1888 | using Surrogates
using Test
using QuasiMonteCarlo
#1D
obj = x -> sin(x) + sin(x)^2 + sin(x)^3
lb = 0.0
ub = 10.0
x = sample(5, lb, ub, HaltonSample())
y = obj.(x)
p = 3.5
InverseDistance = InverseDistanceSurrogate(x, y, lb, ub, p = 2.4)
InverseDistance_kwargs = InverseDistanceSurrogate(x, y, lb, ub)
prediction = InverseDistance(5.0)
add_point!(InverseDistance, 5.0, -0.91)
add_point!(InverseDistance, [5.1, 5.2], [1.0, 2.0])
# Test that input dimension is properly checked for 1D inverse distance surrogates
@test_throws ArgumentError InverseDistance(Float64[])
@test_throws ArgumentError InverseDistance((2.0, 3.0, 4.0))
#ND
lb = [0.0, 0.0]
ub = [10.0, 10.0]
n = 100
x = sample(n, lb, ub, SobolSample())
f = x -> x[1] * x[2]^2
y = f.(x)
p = 3.0
InverseDistance = InverseDistanceSurrogate(x, y, lb, ub, p = p)
prediction = InverseDistance((1.0, 2.0))
add_point!(InverseDistance, (5.0, 3.4), -0.91)
add_point!(InverseDistance, [(5.1, 5.2), (5.3, 6.7)], [1.0, 2.0])
# Test that input dimension is properly checked for 1D inverse distance surrogates
@test_throws ArgumentError InverseDistance(Float64[])
@test_throws ArgumentError InverseDistance(2.0)
@test_throws ArgumentError InverseDistance((2.0, 3.0, 4.0))
# Multi-output #98
f = x -> [x^2, x]
lb = 1.0
ub = 10.0
x = sample(5, lb, ub, SobolSample())
push!(x, 2.0)
y = f.(x)
surrogate = InverseDistanceSurrogate(x, y, lb, ub, p = 1.2)
surrogate_kwargs = InverseDistanceSurrogate(x, y, lb, ub)
@test surrogate(2.0) ≈ [4, 2]
f = x -> [x[1], x[2]^2]
lb = [1.0, 2.0]
ub = [10.0, 8.5]
x = sample(20, lb, ub, SobolSample())
push!(x, (1.0, 2.0))
y = f.(x)
surrogate = InverseDistanceSurrogate(x, y, lb, ub, p = 1.2)
surrogate_kwargs = InverseDistanceSurrogate(x, y, lb, ub)
@test surrogate((1.0, 2.0)) ≈ [1, 4]
x_new = (2.0, 2.0)
y_new = f(x_new)
add_point!(surrogate, x_new, y_new)
@test surrogate(x_new) ≈ y_new
surrogate((0.0, 0.0))
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 963 | using Surrogates
#1D
x = [1.0, 2.0, 3.0]
y = [1.5, 3.5, 5.3]
lb = 0.0
ub = 7.0
my_linear_surr_1D = LinearSurrogate(x, y, lb, ub)
val = my_linear_surr_1D(5.0)
add_point!(my_linear_surr_1D, 4.0, 7.2)
add_point!(my_linear_surr_1D, [5.0, 6.0], [8.3, 9.7])
# Test that input dimension is properly checked for 1D Linear surrogates
@test_throws ArgumentError my_linear_surr_1D(Float64[])
@test_throws ArgumentError my_linear_surr_1D((2.0, 3.0, 4.0))
#ND
lb = [0.0, 0.0]
ub = [10.0, 10.0]
x = sample(5, lb, ub, SobolSample())
y = [4.0, 5.0, 6.0, 7.0, 8.0]
my_linear_ND = LinearSurrogate(x, y, lb, ub)
add_point!(my_linear_ND, (10.0, 11.0), 9.0)
add_point!(my_linear_ND, [(8.0, 5.0), (9.0, 9.5)], [4.0, 5.0])
val = my_linear_ND((10.0, 11.0))
# Test that input dimension is properly checked for ND Linear surrogates
@test_throws ArgumentError my_linear_ND(Float64[])
@test_throws ArgumentError my_linear_ND(1.0)
@test_throws ArgumentError my_linear_ND((2.0, 3.0, 4.0))
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 2233 | using Surrogates
using LinearAlgebra
using Test
using QuadGK
using Cubature
#1D
obj = x -> 3 * x + log(x)
a = 1.0
b = 4.0
x = sample(2000, a, b, SobolSample())
y = obj.(x)
alpha = 2.0
n = 6
my_loba = LobachevskySurrogate(x, y, a, b, alpha = 2.0, n = 6)
val = my_loba(3.83)
# Test that input dimension is properly checked for 1D Lobachevsky surrogates
@test_throws ArgumentError my_loba(Float64[])
@test_throws ArgumentError my_loba((2.0, 3.0, 4.0))
#1D integral
int_1D = lobachevsky_integral(my_loba, a, b)
int = quadgk(obj, a, b)
int_val_true = int[1] - int[2]
@test abs(int_1D - int_val_true) < 2 * 10^-5
add_point!(my_loba, 3.7, 12.1)
add_point!(my_loba, [1.23, 3.45], [5.20, 109.67])
#ND
obj = x -> x[1] + log(x[2])
lb = [0.0, 0.0]
ub = [8.0, 8.0]
alpha = [2.4, 2.4]
n = 8
x = sample(3200, lb, ub, SobolSample())
y = obj.(x)
my_loba_ND = LobachevskySurrogate(x, y, lb, ub, alpha = [2.4, 2.4], n = 8)
my_loba_kwargs = LobachevskySurrogate(x, y, lb, ub)
pred = my_loba_ND((1.0, 2.0))
# Test that input dimension is properly checked for ND Lobachevsky surrogates
@test_throws ArgumentError my_loba_ND(Float64[])
@test_throws ArgumentError my_loba_ND(1.0)
@test_throws ArgumentError my_loba_ND((2.0, 3.0, 4.0))
#ND
int_ND = lobachevsky_integral(my_loba_ND, lb, ub)
int = hcubature(obj, lb, ub)
int_val_true = int[1] - int[2]
@test abs(int_ND - int_val_true) < 10^-1
add_point!(my_loba_ND, (10.0, 11.0), 4.0)
add_point!(my_loba_ND, [(12.0, 15.0), (13.0, 14.0)], [4.0, 5.0])
lobachevsky_integrate_dimension(my_loba_ND, lb, ub, 2)
obj = x -> x[1] + log(x[2]) + exp(x[3])
lb = [0.0, 0.0, 0.0]
ub = [8.0, 8.0, 8.0]
alpha = [2.4, 2.4, 2.4]
x = sample(50, lb, ub, SobolSample())
y = obj.(x)
n = 4
my_loba_ND = LobachevskySurrogate(x, y, lb, ub)
lobachevsky_integrate_dimension(my_loba_ND, lb, ub, 2)
#Sparse
#1D
obj = x -> 3 * x + log(x)
a = 1.0
b = 4.0
x = sample(100, a, b, SobolSample())
y = obj.(x)
alpha = 2.0
n = 6
my_loba = LobachevskySurrogate(x, y, a, b, alpha = 2.0, n = 6, sparse = true)
#ND
obj = x -> x[1] + log(x[2])
lb = [0.0, 0.0]
ub = [8.0, 8.0]
alpha = [2.4, 2.4]
n = 8
x = sample(100, lb, ub, SobolSample())
y = obj.(x)
my_loba_ND = LobachevskySurrogate(x, y, lb, ub, alpha = [2.4, 2.4], n = 8, sparse = true)
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 8891 | using Surrogates
using LinearAlgebra
using QuasiMonteCarlo
#######SRBF############
##### 1D #####
lb = 0.0
ub = 15.0
objective_function = x -> 2 * x + 1
x = [2.5, 4.0, 6.0]
y = [6.0, 9.0, 13.0]
# In 1D values of p closer to 2 make the det(R) closer and closer to 0,
#this does not happen in higher dimensions because p would be a vector and not
#all components are generally C^inf
p = 1.99
a = 2
b = 6
#Using Kriging
x = [2.5, 4.0, 6.0]
y = [6.0, 9.0, 13.0]
my_k_SRBF1 = Kriging(x, y, lb, ub; p)
xstar, fstar = surrogate_optimize(objective_function, SRBF(), a, b, my_k_SRBF1,
RandomSample())
#Using RadialBasis
x = [2.5, 4.0, 6.0]
y = [6.0, 9.0, 13.0]
my_rad_SRBF1 = RadialBasis(x, y, a, b, rad = linearRadial())
(xstar, fstar) = surrogate_optimize(objective_function, SRBF(), a, b, my_rad_SRBF1,
RandomSample())
x = [2.5, 4.0, 6.0]
y = [6.0, 9.0, 13.0]
my_wend_1d = Wendland(x, y, lb, ub)
xstar, fstar = surrogate_optimize(objective_function, SRBF(), a, b, my_wend_1d,
RandomSample())
x = [2.5, 4.0, 6.0]
y = [6.0, 9.0, 13.0]
my_earth1d = EarthSurrogate(x, y, lb, ub)
xstar, fstar = surrogate_optimize(objective_function, SRBF(), a, b, my_earth1d,
HaltonSample())
##### ND #####
objective_function_ND = z -> 3 * norm(z) + 1
lb = [1.0, 1.0]
ub = [6.0, 6.0]
x = sample(5, lb, ub, SobolSample())
y = objective_function_ND.(x)
#Kriging
my_k_SRBFN = Kriging(x, y, lb, ub)
#Every optimization method now returns the y_min and its position
x_min, y_min = surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_k_SRBFN,
RandomSample())
#Radials
lb = [1.0, 1.0]
ub = [6.0, 6.0]
x = sample(5, lb, ub, SobolSample())
objective_function_ND = z -> 3 * norm(z) + 1
y = objective_function_ND.(x)
my_rad_SRBFN = RadialBasis(x, y, lb, ub, rad = linearRadial())
surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_rad_SRBFN, RandomSample())
# Lobachevsky
x = sample(5, lb, ub, RandomSample())
y = objective_function_ND.(x)
alpha = [2.0, 2.0]
n = 4
my_loba_ND = LobachevskySurrogate(x, y, lb, ub)
surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_loba_ND, RandomSample())
#Linear
lb = [1.0, 1.0]
ub = [6.0, 6.0]
x = sample(500, lb, ub, SobolSample())
objective_function_ND = z -> 3 * norm(z) + 1
y = objective_function_ND.(x)
my_linear_ND = LinearSurrogate(x, y, lb, ub)
surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_linear_ND, SobolSample(),
maxiters = 15)
#=
#SVM
lb = [1.0,1.0]
ub = [6.0,6.0]
x = sample(5,lb,ub,SobolSample())
objective_function_ND = z -> 3*norm(z)+1
y = objective_function_ND.(x)
my_SVM_ND = SVMSurrogate(x,y,lb,ub)
surrogate_optimize(objective_function_ND,SRBF(),lb,ub,my_SVM_ND,SobolSample(),maxiters=15)
=#
#Inverse distance surrogate
lb = [1.0, 1.0]
ub = [6.0, 6.0]
x = sample(5, lb, ub, SobolSample())
objective_function_ND = z -> 3 * norm(z) + 1
my_p = 2.5
y = objective_function_ND.(x)
my_inverse_ND = InverseDistanceSurrogate(x, y, lb, ub, p = my_p)
surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_inverse_ND, SobolSample(),
maxiters = 15)
#SecondOrderPolynomialSurrogate
lb = [0.0, 0.0]
ub = [10.0, 10.0]
obj_ND = x -> log(x[1]) * exp(x[2])
x = sample(15, lb, ub, RandomSample())
y = obj_ND.(x)
my_second_order_poly_ND = SecondOrderPolynomialSurrogate(x, y, lb, ub)
surrogate_optimize(obj_ND, SRBF(), lb, ub, my_second_order_poly_ND, SobolSample(),
maxiters = 15)
####### LCBS #########
######1D######
objective_function = x -> 2 * x + 1
lb = 0.0
ub = 15.0
x = [2.0, 4.0, 6.0]
y = [5.0, 9.0, 13.0]
p = 1.8
a = 2.0
b = 6.0
my_k_LCBS1 = Kriging(x, y, lb, ub)
surrogate_optimize(objective_function, LCBS(), a, b, my_k_LCBS1, RandomSample())
##### ND #####
objective_function_ND = z -> 3 * norm(z) + 1
x = [(1.2, 3.0), (3.0, 3.5), (5.2, 5.7)]
y = objective_function_ND.(x)
p = [1.2, 1.2]
theta = [2.0, 2.0]
lb = [1.0, 1.0]
ub = [6.0, 6.0]
#Kriging
my_k_LCBSN = Kriging(x, y, lb, ub)
surrogate_optimize(objective_function_ND, LCBS(), lb, ub, my_k_LCBSN, RandomSample())
##### EI ######
###1D###
objective_function = x -> (x + 1)^2 - x + 2# Minimum of this function is at x = -0.5, y = -2.75
true_min_x = -0.5
true_min_y = objective_function(true_min_x)
lb = -5
ub = 5
x = sample(5, lb, ub, SobolSample())
y = objective_function.(x)
my_k_EI1 = Kriging(x, y, lb, ub; p = 2)
surrogate_optimize(objective_function, EI(), lb, ub, my_k_EI1, SobolSample(),
maxiters = 200, num_new_samples = 155)
# Check that EI is correctly minimizing the objective
y_min, index_min = findmin(my_k_EI1.y)
x_min = my_k_EI1.x[index_min]
@test norm(x_min - true_min_x) < 0.05 * norm(ub .- lb)
@test abs(y_min - true_min_y) < 0.05 * (objective_function(ub) - objective_function(lb))
###ND###
objective_function_ND = z -> 3 * norm(z) + 1 # this is minimized at x = (0, 0), y = 1
true_min_x = (0.0, 0.0)
true_min_y = objective_function_ND(true_min_x)
x = [(1.2, 3.0), (3.0, 3.5), (5.2, 5.7)]
y = objective_function_ND.(x)
min_y = minimum(y)
p = [1.2, 1.2]
theta = [2.0, 2.0]
lb = [-1.0, -1.0]
ub = [6.0, 6.0]
#Kriging
my_k_EIN = Kriging(x, y, lb, ub)
surrogate_optimize(objective_function_ND, EI(), lb, ub, my_k_EIN, SobolSample())
# Check that EI is correctly minimizing instead of maximizing
y_min, index_min = findmin(my_k_EIN.y)
x_min = my_k_EIN.x[index_min]
@test norm(x_min .- true_min_x) < 0.05 * norm(ub .- lb)
@test abs(y_min - true_min_y) <
0.05 * (objective_function_ND(ub) - objective_function_ND(lb))
###ND with SectionSampler###
# We will make sure the EI function finds the minimum when constrained to a specific slice of 3D space
objective_function_section = x -> x[1]^2 + x[2]^2 + x[3]^2 # this is minimized at x = (0, 0, 0), y = 0
# We will constrain x[2] to some value
x2_constraint = 2.0
true_min_x = (0.0, x2_constraint, 0.0)
true_min_y = objective_function_section(true_min_x)
sampler = SectionSample([NaN64, x2_constraint, NaN64], SobolSample())
lb = [-1.0, x2_constraint, -1.0]
ub = [6.0, x2_constraint, 6.0]
x = sample(5, lb, ub, sampler)
y = objective_function_section.(x)
#Kriging
my_k_EIN_section = Kriging(x, y, lb, ub)
# Constrain our sampling to the plane where x[2] = 1
surrogate_optimize(objective_function_section, EI(), lb, ub, my_k_EIN_section, sampler)
# Check that EI is correctly minimizing instead of maximizing
y_min, index_min = findmin(my_k_EIN_section.y)
x_min = my_k_EIN_section.x[index_min]
@test norm(x_min .- true_min_x) < 0.05 * norm(ub .- lb)
@test abs(y_min - true_min_y) <
0.05 * (objective_function_section(ub) - objective_function_section(lb))
## DYCORS ##
#1D#
objective_function = x -> 3 * x + 1
x = [2.1, 2.5, 4.0, 6.0]
y = objective_function.(x)
p = 1.9
lb = 2.0
ub = 6.0
my_k_DYCORS1 = Kriging(x, y, lb, ub, p = 1.9)
surrogate_optimize(objective_function, DYCORS(), lb, ub, my_k_DYCORS1, RandomSample())
my_rad_DYCORS1 = RadialBasis(x, y, lb, ub, rad = linearRadial())
surrogate_optimize(objective_function, DYCORS(), lb, ub, my_rad_DYCORS1, RandomSample())
#ND#
objective_function_ND = z -> 2 * norm(z) + 1
x = [(2.3, 2.2), (1.4, 1.5)]
y = objective_function_ND.(x)
p = [1.5, 1.5]
theta = [2.0, 2.0]
lb = [1.0, 1.0]
ub = [6.0, 6.0]
my_k_DYCORSN = Kriging(x, y, lb, ub)
surrogate_optimize(objective_function_ND, DYCORS(), lb, ub, my_k_DYCORSN, RandomSample(),
maxiters = 30)
my_rad_DYCORSN = RadialBasis(x, y, lb, ub, rad = linearRadial())
surrogate_optimize(objective_function_ND, DYCORS(), lb, ub, my_rad_DYCORSN, RandomSample(),
maxiters = 30)
my_wend_ND = Wendland(x, y, lb, ub)
surrogate_optimize(objective_function_ND, DYCORS(), lb, ub, my_wend_ND, RandomSample(),
maxiters = 30)
### SOP ###
# 1D
objective_function = x -> 3 * x + 1
x = sample(20, 1.0, 6.0, SobolSample())
y = objective_function.(x)
p = 1.9
lb = 1.0
ub = 6.0
num_centers = 2
my_k_SOP1 = Kriging(x, y, lb, ub, p = 1.9)
surrogate_optimize(objective_function, SOP(num_centers), lb, ub, my_k_SOP1, SobolSample(),
maxiters = 60)
#ND
objective_function_ND = z -> 2 * norm(z) + 1
x = [(2.3, 2.2), (1.4, 1.5)]
y = objective_function_ND.(x)
p = [1.5, 1.5]
theta = [2.0, 2.0]
lb = [1.0, 1.0]
ub = [6.0, 6.0]
my_k_SOPND = Kriging(x, y, lb, ub)
num_centers = 2
surrogate_optimize(objective_function_ND, SOP(num_centers), lb, ub, my_k_SOPND,
SobolSample(), maxiters = 20)
#multi optimization
#=
f = x -> [x^2, x]
lb = 1.0
ub = 10.0
x = sample(100, lb, ub, SobolSample())
y = f.(x)
my_radial_basis_smb = RadialBasis(x, y, lb, ub, rad = linearRadial())
surrogate_optimize(f,SMB(),lb,ub,my_radial_basis_ego,SobolSample())
f = x -> [x^2, x]
lb = 1.0
ub = 10.0
x = sample(100, lb, ub, SobolSample())
y = f.(x)
my_radial_basis_rtea = RadialBasis(x, y, lb, ub, rad = linearRadial())
Z = 0.8 #percentage
K = 2 #number of revaluations
p_cross = 0.5 #crossing vs copy
n_c = 1.0 # hyperparameter for children creation
sigma = 1.5 # mutation
surrogate_optimize(f,RTEA(Z,K,p_cross,n_c,sigma),lb,ub,my_radial_basis_rtea,SobolSample())
=#
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 1507 | using Surrogates
using Test
using Revise
#1D
lb = 0.0
ub = 10.0
f = x -> log(x) * exp(x)
x = sample(5, lb, ub, SobolSample())
y = f.(x)
# Test lengths of new_x and EI (1D)
# TODO
my_k = Kriging(x, y, lb, ub)
new_x, eis = potential_optimal_points(EI(),
MeanConstantLiar(),
lb,
ub,
my_k,
SobolSample(),
3)
@test length(new_x) == 3
@test length(eis) == 3
# Test lengths of new_x and SRBF (1D)
my_surr = RadialBasis(x, y, lb, ub)
new_x, eis = potential_optimal_points(SRBF(),
MeanConstantLiar(),
lb,
ub,
my_surr,
SobolSample(),
3)
@test length(new_x) == 3
@test length(eis) == 3
# Test lengths of new_x and EI (ND)
lb = [0.0, 0.0, 1.0]
ub = [5.0, 7.5, 10.0]
x = sample(5, lb, ub, SobolSample())
f = x -> x[1] + x[2] * x[3]
y = f.(x)
my_k = Kriging(x, y, lb, ub)
new_x, eis = potential_optimal_points(EI(),
MeanConstantLiar(),
lb,
ub,
my_k,
SobolSample(),
5)
@test length(new_x) == 5
@test length(eis) == 5
@test length(new_x[1]) == 3
# Test lengths of new_x and SRBF (ND)
my_surr = RadialBasis(x, y, lb, ub)
new_x, eis = potential_optimal_points(SRBF(),
MeanConstantLiar(),
lb,
ub,
my_surr,
SobolSample(),
5)
@test length(new_x) == 5
@test length(eis) == 5
@test length(new_x[1]) == 3
# # Check hyperparameter validation for potential_optimal_points
@test_throws ArgumentError new_x, eis=potential_optimal_points(EI(),
MeanConstantLiar(),
lb,
ub,
my_k,
SobolSample(),
-1)
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 388 | using Surrogates, Aqua
@testset "Aqua" begin
Aqua.find_persistent_tasks_deps(Surrogates)
Aqua.test_ambiguities(Surrogates, recursive = false)
Aqua.test_deps_compat(Surrogates)
Aqua.test_piracies(Surrogates)
Aqua.test_project_extras(Surrogates)
Aqua.test_stale_deps(Surrogates)
Aqua.test_unbound_args(Surrogates)
Aqua.test_undefined_exports(Surrogates)
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 2196 | using Surrogates
using Test
using SafeTestsets
using Pkg
function dev_subpkg(subpkg)
subpkg_path = joinpath(dirname(@__DIR__), "lib", subpkg)
Pkg.develop(PackageSpec(path = subpkg_path))
end
@testset "Surrogates" begin
@safetestset "Quality Assurance" begin
include("qa.jl")
end
@testset "Libs" begin
@testset "$pkg" for pkg in [
"SurrogatesAbstractGPs", "SurrogatesFlux",
"SurrogatesPolyChaos", "SurrogatesMOE",
"SurrogatesRandomForest", "SurrogatesSVM"]
@time begin
dev_subpkg(pkg)
Pkg.test(pkg)
end
end
end
@testset "Algorithms" begin
@time @safetestset "GEKPLS" begin
include("GEKPLS.jl")
end
@time @safetestset "Radials" begin
include("Radials.jl")
end
@time @safetestset "Kriging" begin
include("Kriging.jl")
end
@time @safetestset "Sampling" begin
include("sampling.jl")
end
@time @safetestset "Optimization" begin
include("optimization.jl")
end
@time @safetestset "LinearSurrogate" begin
include("linearSurrogate.jl")
end
@time @safetestset "Lobachevsky" begin
include("lobachevsky.jl")
end
@time @safetestset "InverseDistanceSurrogate" begin
include("inverseDistanceSurrogate.jl")
end
@time @safetestset "SecondOrderPolynomialSurrogate" begin
include("secondOrderPolynomialSurrogate.jl")
end
# @time @safetestset "AD_Compatibility" begin include("AD_compatibility.jl") end
@time @safetestset "Wendland" begin
include("Wendland.jl")
end
@time @safetestset "VariableFidelity" begin
include("VariableFidelity.jl")
end
@time @safetestset "Earth" begin
include("earth.jl")
end
@time @safetestset "Gradient Enhanced Kriging" begin
include("GEK.jl")
end
@time @safetestset "Section Samplers" begin
include("SectionSampleTests.jl")
end
end
end
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 3857 | using Surrogates
using QuasiMonteCarlo
using QuasiMonteCarlo: KroneckerSample, GoldenSample
using Distributions: Cauchy, Normal
using Test
#1D
lb = 0.0
ub = 5.0
n = 5
d = 1
## Sampling methods from QuasiMonteCarlo.jl ##
# GridSample
s = Surrogates.sample(n, lb, ub, GridSample())
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
# RandomSample
s = Surrogates.sample(n, lb, ub, RandomSample())
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
# SobolSample
s = Surrogates.sample(n, lb, ub, SobolSample())
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
# LatinHypercubeSample
s = Surrogates.sample(n, lb, ub, LatinHypercubeSample())
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
# LowDiscrepancySample
s = Surrogates.sample(20, lb, ub, HaltonSample())
@test s isa Vector{Float64} && length(s) == 20 && all(x -> lb ≤ x ≤ ub, s)
# LatticeRuleSample (not originally in Surrogates.jl, now available through QuasiMonteCarlo.jl)
s = Surrogates.sample(20, lb, ub, LatticeRuleSample())
@test s isa Vector{Float64} && length(s) == 20 && all(x -> lb ≤ x ≤ ub, s)
# Distribution sampling (Cauchy)
s = Surrogates.sample(n, d, Cauchy())
@test s isa Vector{Float64} && length(s) == n
# Distributions sampling (Normal)
s = Surrogates.sample(n, d, Normal(0, 4))
@test s isa Vector{Float64} && length(s) == n
## Sampling methods specific to Surrogates.jl ##
# KroneckerSample
s = Surrogates.sample(n, lb, ub, KroneckerSample([sqrt(2)], NoRand()))
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
# GoldenSample
s = Surrogates.sample(n, lb, ub, GoldenSample())
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
# SectionSample
constrained_val = 1.0
s = Surrogates.sample(n, lb, ub, SectionSample([NaN64], RandomSample()))
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
s = Surrogates.sample(n, lb, ub, SectionSample([constrained_val], RandomSample()))
@test s isa Vector{Float64} && length(s) == n && all(x -> lb ≤ x ≤ ub, s)
@test all(==(constrained_val), s)
# ND
# Now that we use QuasiMonteCarlo.jl, these tests are to make sure that we transform the output
# from a Matrix to a Vector of Tuples properly for ND problems.
lb = [0.1, -0.5]
ub = [1.0, 20.0]
n = 5
d = 2
#GridSample{T}
s = Surrogates.sample(n, lb, ub, GridSample())
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#RandomSample()
s = Surrogates.sample(n, lb, ub, RandomSample())
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#SobolSample()
s = Surrogates.sample(n, lb, ub, SobolSample())
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#LHS
s = Surrogates.sample(n, lb, ub, LatinHypercubeSample())
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#LDS
s = Surrogates.sample(n, lb, ub, HaltonSample())
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#Distribution 1
s = Surrogates.sample(n, d, Cauchy())
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#Distribution 2
s = Surrogates.sample(n, d, Normal(3, 5))
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#Kronecker
s = Surrogates.sample(n, lb, ub, KroneckerSample([sqrt(2), 3.1415], NoRand()))
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
#Golden
s = Surrogates.sample(n, lb, ub, GoldenSample())
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
# SectionSample
constrained_val = 1.0
s = Surrogates.sample(n, lb, ub, SectionSample([NaN64, constrained_val], RandomSample()))
@test all(x -> x[end] == constrained_val, s)
@test isa(s, Array{Tuple{typeof(s[1][1]), typeof(s[1][1])}, 1}) == true
@test all(x -> lb[1] ≤ x[1] ≤ ub[1], s)
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | code | 2293 | using Surrogates
using Test
#1D
lb = 0.0
ub = 5.0
obj_1D = x -> log(x) * exp(x)
x = sample(5, lb, ub, SobolSample())
y = obj_1D.(x)
my_second_order_poly = SecondOrderPolynomialSurrogate(x, y, lb, ub)
val = my_second_order_poly(5.0)
add_point!(my_second_order_poly, 5.0, 238.86)
add_point!(my_second_order_poly, [6.0, 7.0], [722.84, 2133.94])
# Test that input dimension is properly checked for 1D SecondOrderPolynomial surrogates
@test_throws ArgumentError my_second_order_poly(Float64[])
@test_throws ArgumentError my_second_order_poly((2.0, 3.0, 4.0))
#ND
lb = [0.0, 0.0]
ub = [10.0, 10.0]
obj_ND = x -> log(x[1]) * exp(x[2])
x = sample(10, lb, ub, RandomSample())
y = obj_ND.(x)
my_second_order_poly = SecondOrderPolynomialSurrogate(x, y, lb, ub)
val = my_second_order_poly((5.0, 7.0))
add_point!(my_second_order_poly, (5.0, 7.0), 1764.96)
add_point!(my_second_order_poly, [(1.5, 1.5), (3.4, 5.4)], [1.817, 270.95])
# Test that input dimension is properly checked for ND SecondOrderPolynomial surrogates
@test_throws ArgumentError my_second_order_poly(Float64[])
@test_throws ArgumentError my_second_order_poly(2.0)
@test_throws ArgumentError my_second_order_poly((2.0, 3.0, 4.0))
# Multi-output #98
f = x -> [x^2, x]
lb = 1.0
ub = 10.0
x = sample(5, lb, ub, SobolSample())
push!(x, 2.0)
y = f.(x)
surrogate = SecondOrderPolynomialSurrogate(x, y, lb, ub)
# should be exact
@test surrogate.β ≈ [0 0; 0 1; 1 0]
@test surrogate(2.0) ≈ [4, 2]
@test surrogate(1.0) ≈ [1, 1]
f = x -> [x[1], x[2]^2]
lb = [1.0, 2.0]
ub = [10.0, 8.5]
x = sample(20, lb, ub, SobolSample())
push!(x, (1.0, 2.0))
y = f.(x)
surrogate = SecondOrderPolynomialSurrogate(x, y, lb, ub)
@test surrogate.β ≈ [0 0; 1 0; 0 0; 0 0; 0 0; 0 1]
@test surrogate((1.0, 2.0)) ≈ [1, 4]
x_new = (2.0, 2.0)
y_new = f(x_new)
@test surrogate(x_new) ≈ y_new
add_point!(surrogate, x_new, y_new)
@test surrogate(x_new) ≈ y_new
# surrogate should recover 2nd order polynomial
function second_order_target(x; a = 0.3, b = [0.7, 0.1], c = [0.3 0.4; 0.4 0.1])
a + b' * x + x' * c * x
end
second_order_target(x::Tuple; kwargs...) = f([x...]; kwargs...)
lb = fill(-5.0, 2);
ub = fill(5.0, 2);
n = 10^3;
x = sample(n, lb, ub, SobolSample())
y = second_order_target.(x)
sec = SecondOrderPolynomialSurrogate(x, y, lb, ub)
@test y ≈ sec.(x)
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 700 | # Contributing
1. Fork the repository on github. (Click the `Fork` button in the top-right corner)
2. Clone the repository you have just forked. `git clone https://github.com/YOUR_USERNAME/Surrogates.jl.git`
3. `cd Surrogates.jl` Enter the repository's directory.
4. `julia` Open the Julia REPL.
5. `]` Enter package mode
6. Activate the local environment `activate .`
7. Install the dependencies. `instantiate`
8. Perform your edits (Atom with Juno, or VSCode with the Julia plugin are good editor choices)
9. Stage, Commit, and Push your changes
10. [Open a Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork)
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 2493 | ## Surrogates.jl
[](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged)
[](https://docs.sciml.ai/Surrogates/stable/)
[](https://codecov.io/gh/SciML/Surrogates.jl)
[](https://github.com/SciML/Surrogates.jl/actions?query=workflow%3ACI)
[](https://github.com/SciML/ColPrac)
[](https://github.com/SciML/SciMLStyle)
A surrogate model is an approximation method that mimics the behavior of a computationally
expensive simulation. In more mathematical terms: suppose we are attempting to optimize a function
`f(p)`, but each calculation of `f` is very expensive. It may be the case we need to solve a PDE for each point or use advanced numerical linear algebra machinery, which is usually costly. The idea is then to develop a surrogate model `g` which approximates `f` by training on previous data collected from evaluations of `f`.
The construction of a surrogate model can be seen as a three-step process:
1. Sample selection
2. Construction of the surrogate model
3. Surrogate optimization
Sampling can be done through [QuasiMonteCarlo.jl](https://github.com/SciML/QuasiMonteCarlo.jl), all the functions available there can be used in Surrogates.jl.
## ALL the currently available surrogate models:
- Kriging
- Kriging using Stheno
- Radial Basis
- Wendland
- Linear
- Second Order Polynomial
- Support Vector Machines (Wait for LIBSVM resolution)
- Neural Networks
- Random Forests
- Lobachevsky
- Inverse-distance
- Polynomial expansions
- Variable fidelity
- Mixture of experts (Waiting GaussianMixtures package to work on v1.5)
- Earth
- Gradient Enhanced Kriging
## ALL the currently available optimization methods:
- SRBF
- LCBS
- DYCORS
- EI
- SOP
- Multi-optimization: SMB and RTEA
## Installing Surrogates package
```julia
using Pkg
Pkg.add("Surrogates")
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 2749 | # Branin Function
The Branin function is commonly used as a test function for metamodelling in computer experiments, especially in the context of optimization.
The expression of the Branin Function is given as:
``f(x) = (x_2 - \frac{5.1}{4\pi^2}x_1^{2} + \frac{5}{\pi}x_1 - 6)^2 + 10(1-\frac{1}{8\pi})\cos(x_1) + 10``
where ``x = (x_1, x_2)`` with ``-5\leq x_1 \leq 10, 0 \leq x_2 \leq 15``
First of all, we will import these two packages: `Surrogates` and `Plots`.
```@example BraninFunction
using Surrogates
using Plots
default()
```
Now, let's define our objective function:
```@example BraninFunction
function branin(x)
x1 = x[1]
x2 = x[2]
b = 5.1 / (4 * pi^2)
c = 5 / pi
r = 6
a = 1
s = 10
t = 1 / (8 * pi)
term1 = a * (x2 - b * x1^2 + c * x1 - r)^2
term2 = s * (1 - t) * cos(x1)
y = term1 + term2 + s
end
```
Now, let's plot it:
```@example BraninFunction
n_samples = 80
lower_bound = [-5, 0]
upper_bound = [10, 15]
xys = sample(n_samples, lower_bound, upper_bound, SobolSample())
zs = branin.(xys);
x, y = -5.00:10.00, 0.00:15.00
p1 = surface(x, y, (x1, x2) -> branin((x1, x2)))
xs = [xy[1] for xy in xys]
ys = [xy[2] for xy in xys]
scatter!(xs, ys, zs)
p2 = contour(x, y, (x1, x2) -> branin((x1, x2)))
scatter!(xs, ys)
plot(p1, p2, title = "True function")
```
Now it's time to try fitting different surrogates, and then we will plot them.
We will have a look at the radial basis surrogate `Radial Basis Surrogate`. :
```@example BraninFunction
radial_surrogate = RadialBasis(xys, zs, lower_bound, upper_bound)
```
```@example BraninFunction
p1 = surface(x, y, (x, y) -> radial_surrogate([x y]))
scatter!(xs, ys, zs, marker_z = zs)
p2 = contour(x, y, (x, y) -> radial_surrogate([x y]))
scatter!(xs, ys, marker_z = zs)
plot(p1, p2, title = "Radial Surrogate")
```
Now, we will have a look at `Inverse Distance Surrogate`:
```@example BraninFunction
InverseDistance = InverseDistanceSurrogate(xys, zs, lower_bound, upper_bound)
```
```@example BraninFunction
p1 = surface(x, y, (x, y) -> InverseDistance([x y])) # hide
scatter!(xs, ys, zs, marker_z = zs) # hide
p2 = contour(x, y, (x, y) -> InverseDistance([x y])) # hide
scatter!(xs, ys, marker_z = zs) # hide
plot(p1, p2, title = "Inverse Distance Surrogate") # hide
```
Now, let's talk about `Lobachevsky Surrogate`:
```@example BraninFunction
Lobachevsky = LobachevskySurrogate(
xys, zs, lower_bound, upper_bound, alpha = [2.8, 2.8], n = 8)
```
```@example BraninFunction
p1 = surface(x, y, (x, y) -> Lobachevsky([x y])) # hide
scatter!(xs, ys, zs, marker_z = zs) # hide
p2 = contour(x, y, (x, y) -> Lobachevsky([x y])) # hide
scatter!(xs, ys, marker_z = zs) # hide
plot(p1, p2, title = "Lobachevsky Surrogate") # hide
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 1857 | # Branin Function
The Branin Function is commonly used as a test function for metamodelling in computer experiments, especially in the context of optimization.
# Modifications for Improved Branin Function:
To enhance the Branin function, changes were made to introduce irregularities, variability, and a dynamic aspect to its landscape. Here's an example:
```@example improved_branin
function improved_branin(x, time_step)
x1 = x[1]
x2 = x[2]
b = 5.1 / (4 * pi^2)
c = 5 / pi
r = 6
a = 1
s = 10
t = 1 / (8 * pi)
# Adding noise to the function's output
noise = randn() * time_step # Simulating time-varying noise
term1 = a * (x2 - b * x1^2 + c * x1 - r)^2
term2 = s * (1 - t) * cos(x1 + noise) # Introducing dynamic component
y = term1 + term2 + s
end
```
This improved function now incorporates irregularities, variability, and a dynamic aspect. These changes aim to make the optimization landscape more challenging and realistic.
# Using the Improved Branin Function:
After defining the improved Branin function, you can proceed to test different surrogates and visualize their performance using the updated function. Here's an example of using the improved function with the Radial Basis surrogate:
```@example improved_branin
using Surrogates, Plots
n_samples = 80
lower_bound = [-5, 0]
upper_bound = [10, 15]
xys = sample(n_samples, lower_bound, upper_bound, SobolSample())
zs = [improved_branin(xy, 0.1) for xy in xys]
radial_surrogate = RadialBasis(xys, zs, lower_bound, upper_bound)
x, y = -5.00:10.00, 0.00:15.00
xs = [xy[1] for xy in xys]
ys = [xy[2] for xy in xys]
p1 = surface(x, y, (x, y) -> radial_surrogate([x, y]))
scatter!(xs, ys, marker_z = zs)
p2 = contour(x, y, (x, y) -> radial_surrogate([x, y]))
scatter!(xs, ys, marker_z = zs)
plot(p1, p2, title = "Radial Surrogate")
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 5380 | The **Inverse Distance Surrogate** is an interpolating method, and in this method, the unknown points are calculated with a weighted average of the sampling points. This model uses the inverse distance between the unknown and training points to predict the unknown point. We do not need to fit this model because the response of an unknown point x is computed with respect to the distance between x and the training points.
Let's optimize the following function to use Inverse Distance Surrogate:
$f(x) = sin(x) + sin(x)^2 + sin(x)^3$.
First of all, we have to import these two packages: `Surrogates` and `Plots`.
```@example Inverse_Distance1D
using Surrogates
using Plots
default()
```
### Sampling
We choose to sample f in 25 points between 0 and 10 using the `sample` function. The sampling points are chosen using a Low Discrepancy, this can be done by passing `HaltonSample()` to the `sample` function.
```@example Inverse_Distance1D
f(x) = sin(x) + sin(x)^2 + sin(x)^3
n_samples = 25
lower_bound = 0.0
upper_bound = 10.0
x = sample(n_samples, lower_bound, upper_bound, HaltonSample())
y = f.(x)
scatter(x, y, label = "Sampled points", xlims = (lower_bound, upper_bound), legend = :top)
plot!(f, label = "True function", xlims = (lower_bound, upper_bound), legend = :top)
```
## Building a Surrogate
```@example Inverse_Distance1D
InverseDistance = InverseDistanceSurrogate(x, y, lower_bound, upper_bound)
add_point!(InverseDistance, 5.0, f(5.0))
add_point!(InverseDistance, [5.1, 5.2], [f(5.1), f(5.2)])
prediction = InverseDistance(5.0)
```
Now, we will simply plot `InverseDistance`:
```@example Inverse_Distance1D
plot(x, y, seriestype = :scatter, label = "Sampled points",
xlims = (lower_bound, upper_bound), legend = :top)
plot!(f, label = "True function", xlims = (lower_bound, upper_bound), legend = :top)
plot!(InverseDistance, label = "Surrogate function",
xlims = (lower_bound, upper_bound), legend = :top)
```
## Optimizing
Having built a surrogate, we can now use it to search for minima in our original function `f`.
To optimize using our surrogate we call `surrogate_optimize` method. We choose to use Stochastic RBF as the optimization technique and again Sobol sampling as the sampling technique.
```@example Inverse_Distance1D
@show surrogate_optimize(
f, SRBF(), lower_bound, upper_bound, InverseDistance, SobolSample())
scatter(x, y, label = "Sampled points", legend = :top)
plot!(f, label = "True function", xlims = (lower_bound, upper_bound), legend = :top)
plot!(InverseDistance, label = "Surrogate function",
xlims = (lower_bound, upper_bound), legend = :top)
```
## Inverse Distance Surrogate Tutorial (ND):
First of all we will define the `Schaffer` function we are going to build a surrogate for. Notice, how its argument is a vector of numbers, one for each coordinate, and its output is a scalar.
```@example Inverse_DistanceND
using Plots # hide
default(c = :matter, legend = false, xlabel = "x", ylabel = "y") # hide
using Surrogates # hide
function schaffer(x)
x1 = x[1]
x2 = x[2]
fact1 = (sin(x1^2 - x2^2))^2 - 0.5
fact2 = (1 + 0.001 * (x1^2 + x2^2))^2
y = 0.5 + fact1 / fact2
end
```
### Sampling
Let's define our bounds, this time we are working in two dimensions. In particular we want our first dimension `x` to have bounds `-5, 10`, and `0, 15` for the second dimension. We are taking 60 samples of the space using Sobol Sequences. We then evaluate our function on all the sampling points.
```@example Inverse_DistanceND
n_samples = 60
lower_bound = [-5.0, 0.0]
upper_bound = [10.0, 15.0]
xys = sample(n_samples, lower_bound, upper_bound, SobolSample())
zs = schaffer.(xys);
```
```@example Inverse_DistanceND
x, y = -5:10, 0:15 # hide
p1 = surface(x, y, (x1, x2) -> schaffer((x1, x2))) # hide
xs = [xy[1] for xy in xys] # hide
ys = [xy[2] for xy in xys] # hide
scatter!(xs, ys, zs) # hide
p2 = contour(x, y, (x1, x2) -> schaffer((x1, x2))) # hide
scatter!(xs, ys) # hide
plot(p1, p2, title = "True function") # hide
```
### Building a surrogate
Using the sampled points we build the surrogate, the steps are analogous to the 1-dimensional case.
```@example Inverse_DistanceND
InverseDistance = InverseDistanceSurrogate(xys, zs, lower_bound, upper_bound)
```
```@example Inverse_DistanceND
p1 = surface(x, y, (x, y) -> InverseDistance([x y])) # hide
scatter!(xs, ys, zs, marker_z = zs) # hide
p2 = contour(x, y, (x, y) -> InverseDistance([x y])) # hide
scatter!(xs, ys, marker_z = zs) # hide
plot(p1, p2, title = "Surrogate") # hide
```
### Optimizing
With our surrogate, we can now search for the minima of the function.
Notice how the new sampled points, which were created during the optimization process, are appended to the `xys` array.
This is why its size changes.
```@example Inverse_DistanceND
size(xys)
```
```@example Inverse_DistanceND
surrogate_optimize(schaffer, SRBF(), lower_bound, upper_bound,
InverseDistance, SobolSample(), maxiters = 10)
```
```@example Inverse_DistanceND
size(xys)
```
```@example Inverse_DistanceND
p1 = surface(x, y, (x, y) -> InverseDistance([x y])) # hide
xs = [xy[1] for xy in xys] # hide
ys = [xy[2] for xy in xys] # hide
zs = schaffer.(xys) # hide
scatter!(xs, ys, zs, marker_z = zs) # hide
p2 = contour(x, y, (x, y) -> InverseDistance([x y])) # hide
scatter!(xs, ys, marker_z = zs) # hide
plot(p1, p2) # hide
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 5073 | ## Linear Surrogate
Linear Surrogate is a linear approach to modeling the relationship between a scalar response or dependent variable and one or more explanatory variables. We will use Linear Surrogate to optimize following function:
$f(x) = \sin(x) + \log(x)$
First of all we have to import these two packages: `Surrogates` and `Plots`.
```@example linear_surrogate1D
using Surrogates
using Plots
default()
```
### Sampling
We choose to sample f in 20 points between 0 and 10 using the `sample` function. The sampling points are chosen using a Sobol sequence, this can be done by passing `SobolSample()` to the `sample` function.
```@example linear_surrogate1D
f(x) = sin(x) + log(x)
n_samples = 20
lower_bound = 5.2
upper_bound = 12.5
x = sample(n_samples, lower_bound, upper_bound, SobolSample())
y = f.(x)
scatter(x, y, label = "Sampled points", xlims = (lower_bound, upper_bound))
plot!(f, label = "True function", xlims = (lower_bound, upper_bound))
```
## Building a Surrogate
With our sampled points, we can build the **Linear Surrogate** using the `LinearSurrogate` function.
We can simply calculate `linear_surrogate` for any value.
```@example linear_surrogate1D
my_linear_surr_1D = LinearSurrogate(x, y, lower_bound, upper_bound)
add_point!(my_linear_surr_1D, 4.0, 7.2)
add_point!(my_linear_surr_1D, [5.0, 6.0], [8.3, 9.7])
val = my_linear_surr_1D(5.0)
```
Now, we will simply plot `linear_surrogate`:
```@example linear_surrogate1D
plot(x, y, seriestype = :scatter, label = "Sampled points",
xlims = (lower_bound, upper_bound))
plot!(f, label = "True function", xlims = (lower_bound, upper_bound))
plot!(my_linear_surr_1D, label = "Surrogate function", xlims = (lower_bound, upper_bound))
```
## Optimizing
Having built a surrogate, we can now use it to search for minima in our original function `f`.
To optimize using our surrogate we call `surrogate_optimize` method. We choose to use Stochastic RBF as the optimization technique and again Sobol sampling as the sampling technique.
```@example linear_surrogate1D
@show surrogate_optimize(
f, SRBF(), lower_bound, upper_bound, my_linear_surr_1D, SobolSample())
scatter(x, y, label = "Sampled points")
plot!(f, label = "True function", xlims = (lower_bound, upper_bound))
plot!(my_linear_surr_1D, label = "Surrogate function", xlims = (lower_bound, upper_bound))
```
## Linear Surrogate tutorial (ND)
First of all we will define the `Egg Holder` function we are going to build a surrogate for. Notice, one how its argument is a vector of numbers, one for each coordinate, and its output is a scalar.
```@example linear_surrogateND
using Plots # hide
default(c = :matter, legend = false, xlabel = "x", ylabel = "y") # hide
using Surrogates # hide
function egg(x)
x1 = x[1]
x2 = x[2]
term1 = -(x2 + 47) * sin(sqrt(abs(x2 + x1 / 2 + 47)))
term2 = -x1 * sin(sqrt(abs(x1 - (x2 + 47))))
y = term1 + term2
end
```
### Sampling
Let's define our bounds, this time we are working in two dimensions. In particular we want our first dimension `x` to have bounds `-10, 5`, and `0, 15` for the second dimension. We are taking 50 samples of the space using Sobol Sequences. We then evaluate our function on all of the sampling points.
```@example linear_surrogateND
n_samples = 50
lower_bound = [-10.0, 0.0]
upper_bound = [5.0, 15.0]
xys = sample(n_samples, lower_bound, upper_bound, SobolSample())
zs = egg.(xys);
```
```@example linear_surrogateND
x, y = -10:5, 0:15 # hide
p1 = surface(x, y, (x1, x2) -> egg((x1, x2))) # hide
xs = [xy[1] for xy in xys] # hide
ys = [xy[2] for xy in xys] # hide
scatter!(xs, ys, zs) # hide
p2 = contour(x, y, (x1, x2) -> egg((x1, x2))) # hide
scatter!(xs, ys) # hide
plot(p1, p2, title = "True function") # hide
```
### Building a surrogate
Using the sampled points, we build the surrogate, the steps are analogous to the 1-dimensional case.
```@example linear_surrogateND
my_linear_ND = LinearSurrogate(xys, zs, lower_bound, upper_bound)
```
```@example linear_surrogateND
p1 = surface(x, y, (x, y) -> my_linear_ND([x y])) # hide
scatter!(xs, ys, zs, marker_z = zs) # hide
p2 = contour(x, y, (x, y) -> my_linear_ND([x y])) # hide
scatter!(xs, ys, marker_z = zs) # hide
plot(p1, p2, title = "Surrogate") # hide
```
### Optimizing
With our surrogate, we can now search for the minima of the function.
Notice how the new sampled points, which were created during the optimization process, are appended to the `xys` array.
This is why its size changes.
```@example linear_surrogateND
size(xys)
```
```@example linear_surrogateND
surrogate_optimize(
egg, SRBF(), lower_bound, upper_bound, my_linear_ND, SobolSample(), maxiters = 10)
```
```@example linear_surrogateND
size(xys)
```
```@example linear_surrogateND
p1 = surface(x, y, (x, y) -> my_linear_ND([x y])) # hide
xs = [xy[1] for xy in xys] # hide
ys = [xy[2] for xy in xys] # hide
zs = egg.(xys) # hide
scatter!(xs, ys, zs, marker_z = zs) # hide
p2 = contour(x, y, (x, y) -> my_linear_ND([x y])) # hide
scatter!(xs, ys, marker_z = zs) # hide
plot(p1, p2) # hide
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 2757 | ## Salustowicz Benchmark Function
The true underlying function HyGP had to approximate is the 1D Salustowicz function. The function can be evaluated in the given domain:
``x \in [0, 10]``.
The Salustowicz benchmark function is as follows:
``f(x) = e^{-x} x^3 \cos(x) \sin(x) (\cos(x) \sin^2(x) - 1)``
Let's import these two packages `Surrogates` and `Plots`:
```@example salustowicz1D
using Surrogates
using Plots
default()
```
Now, let's define our objective function:
```@example salustowicz1D
function salustowicz(x)
term1 = 2.72^(-x) * x^3 * cos(x) * sin(x)
term2 = (cos(x) * sin(x) * sin(x) - 1)
y = term1 * term2
end
```
Let's sample f in 30 points between 0 and 10 using the `sample` function. The sampling points are chosen using a Sobol Sample, this can be done by passing `SobolSample()` to the `sample` function.
```@example salustowicz1D
n_samples = 30
lower_bound = 0
upper_bound = 10
num_round = 2
x = sample(n_samples, lower_bound, upper_bound, SobolSample())
y = salustowicz.(x)
xs = lower_bound:0.001:upper_bound
scatter(x, y, label = "Sampled points", xlims = (lower_bound, upper_bound), legend = :top)
plot!(xs, salustowicz.(xs), label = "True function", legend = :top)
```
Now, let's fit the Salustowicz function with different surrogates:
```@example salustowicz1D
InverseDistance = InverseDistanceSurrogate(x, y, lower_bound, upper_bound)
lobachevsky_surrogate = LobachevskySurrogate(
x, y, lower_bound, upper_bound, alpha = 2.0, n = 6)
scatter(
x, y, label = "Sampled points", xlims = (lower_bound, upper_bound), legend = :topright)
plot!(xs, salustowicz.(xs), label = "True function", legend = :topright)
plot!(xs, InverseDistance.(xs), label = "InverseDistanceSurrogate", legend = :topright)
plot!(xs, lobachevsky_surrogate.(xs), label = "Lobachevsky", legend = :topright)
```
Not's let's see Kriging Surrogate with different hyper parameter:
```@example salustowicz1D
kriging_surrogate1 = Kriging(x, y, lower_bound, upper_bound, p = 0.9);
kriging_surrogate2 = Kriging(x, y, lower_bound, upper_bound, p = 1.5);
kriging_surrogate3 = Kriging(x, y, lower_bound, upper_bound, p = 1.9);
scatter(
x, y, label = "Sampled points", xlims = (lower_bound, upper_bound), legend = :topright)
plot!(xs, salustowicz.(xs), label = "True function", legend = :topright)
plot!(xs, kriging_surrogate1.(xs), label = "kriging_surrogate1",
ribbon = p -> std_error_at_point(kriging_surrogate1, p), legend = :topright)
plot!(xs, kriging_surrogate2.(xs), label = "kriging_surrogate2",
ribbon = p -> std_error_at_point(kriging_surrogate2, p), legend = :topright)
plot!(xs, kriging_surrogate3.(xs), label = "kriging_surrogate3",
ribbon = p -> std_error_at_point(kriging_surrogate3, p), legend = :topright)
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 4606 | # Gaussian Process Surrogate Tutorial
!!! note
This surrogate requires the 'SurrogatesAbstractGPs' module, which can be added by inputting "]add SurrogatesAbstractGPs" from the Julia command line.
Gaussian Process regression in Surrogates.jl is implemented as a simple wrapper around the [AbstractGPs.jl](https://github.com/JuliaGaussianProcesses/AbstractGPs.jl) package. AbstractGPs comes with a variety of covariance functions (kernels). See [KernelFunctions.jl](https://github.com/JuliaGaussianProcesses/KernelFunctions.jl/) for examples.
!!! tip
The examples below demonstrate the use of AbstractGPs with out-of-the-box settings without hyperparameter optimization (i.e. without changing parameters like lengthscale, signal variance, and noise variance). Beyond hyperparameter optimization, careful initialization of hyperparameters and priors on the parameters is required for this surrogate to work properly. For more details on how to fit GPs in practice, check out [A Practical Guide to Gaussian Processes](https://infallible-thompson-49de36.netlify.app/).
Also see this [example](https://juliagaussianprocesses.github.io/AbstractGPs.jl/stable/examples/1-mauna-loa/#Hyperparameter-Optimization) to understand hyperparameter optimization with AbstractGPs.
## 1D Example
In the example below, the 'gp_surrogate' assignment code can be commented / uncommented to see how the different kernels influence the predictions.
```@example gp_tutorial1d
using Surrogates
using Plots
default()
using AbstractGPs #required to access different types of kernels
using SurrogatesAbstractGPs
f(x) = (6 * x - 2)^2 * sin(12 * x - 4)
n_samples = 4
lower_bound = 0.0
upper_bound = 1.0
xs = lower_bound:0.001:upper_bound
x = sample(n_samples, lower_bound, upper_bound, SobolSample())
y = f.(x)
#gp_surrogate = AbstractGPSurrogate(x,y, gp=GP(SqExponentialKernel()), Σy=0.05) #example of Squared Exponential Kernel
#gp_surrogate = AbstractGPSurrogate(x,y, gp=GP(MaternKernel()), Σy=0.05) #example of MaternKernel
gp_surrogate = AbstractGPSurrogate(
x, y, gp = GP(PolynomialKernel(; c = 2.0, degree = 5)), Σy = 0.25)
plot(x, y, seriestype = :scatter, label = "Sampled points",
xlims = (lower_bound, upper_bound), ylims = (-7, 17), legend = :top)
plot!(xs, f.(xs), label = "True function", legend = :top)
plot!(0:0.001:1, gp_surrogate.gp_posterior; label = "Posterior", ribbon_scale = 2)
```
## Optimization Example
This example shows the use of AbstractGP Surrogates to find the minima of a function:
```@example abstractgps_tutorial_optimization
using Surrogates
using Plots
using AbstractGPs
using SurrogatesAbstractGPs
f(x) = (x - 2)^2
n_samples = 4
lower_bound = 0.0
upper_bound = 4.0
xs = lower_bound:0.1:upper_bound
x = sample(n_samples, lower_bound, upper_bound, SobolSample())
y = f.(x)
gp_surrogate = AbstractGPSurrogate(x, y)
@show surrogate_optimize(f, SRBF(), lower_bound, upper_bound, gp_surrogate, SobolSample())
```
Plotting the function and the sampled points:
```@example abstractgps_tutorial_optimization
scatter(gp_surrogate.x, gp_surrogate.y, label = "Sampled points",
ylims = (-1.0, 5.0), legend = :top)
plot!(xs, gp_surrogate.(xs), label = "Surrogate function",
ribbon = p -> std_error_at_point(gp_surrogate, p), legend = :top)
plot!(xs, f.(xs), label = "True function", legend = :top)
```
## ND Example
```@example abstractgps_tutorialnd
using Plots
default(c = :matter, legend = false, xlabel = "x", ylabel = "y")
using Surrogates
using AbstractGPs
using SurrogatesAbstractGPs
hypot_func = z -> 3 * hypot(z...) + 1
n_samples = 50
lower_bound = [-1.0, -1.0]
upper_bound = [1.0, 1.0]
xys = sample(n_samples, lower_bound, upper_bound, SobolSample())
zs = hypot_func.(xys);
x, y = -2:2, -2:2
p1 = surface(x, y, (x1, x2) -> hypot_func((x1, x2)))
xs = [xy[1] for xy in xys]
ys = [xy[2] for xy in xys]
scatter!(xs, ys, zs)
p2 = contour(x, y, (x1, x2) -> hypot_func((x1, x2)))
scatter!(xs, ys)
plot(p1, p2, title = "True function")
```
Now let's see how our surrogate performs:
```@example abstractgps_tutorialnd
gp_surrogate = AbstractGPSurrogate(xys, zs)
p1 = surface(x, y, (x, y) -> gp_surrogate([x y]))
scatter!(xs, ys, zs, marker_z = zs)
p2 = contour(x, y, (x, y) -> gp_surrogate([x y]))
scatter!(xs, ys, marker_z = zs)
plot(p1, p2, title = "Surrogate")
```
```@example abstractgps_tutorialnd
@show gp_surrogate((0.2, 0.2))
```
```@example abstractgps_tutorialnd
@show hypot_func((0.2, 0.2))
```
And this is our log marginal posterior predictive probability:
```@example abstractgps_tutorialnd
@show logpdf_surrogate(gp_surrogate)
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 1889 | # Ackley Function
The Ackley function is defined as:
``f(x) = -a*\exp(-b\sqrt{\frac{1}{d}\sum_{i=1}^d x_i^2}) - \exp(\frac{1}{d} \sum_{i=1}^d \cos(cx_i)) + a + \exp(1)``
Usually the recommended values are: ``a = 20``, ``b = 0.2`` and ``c = 2\pi``
Let's see the 1D case.
```@example ackley
using Surrogates
using Plots
default()
```
Now, let's define the `Ackley` function:
```@example ackley
function ackley(x)
a, b, c = 20.0, 0.2, 2.0 * π
len_recip = inv(length(x))
sum_sqrs = zero(eltype(x))
sum_cos = sum_sqrs
for i in x
sum_cos += cos(c * i)
sum_sqrs += i^2
end
return (-a * exp(-b * sqrt(len_recip * sum_sqrs)) -
exp(len_recip * sum_cos) + a + 2.71)
end
```
```@example ackley
n = 100
lb = -32.768
ub = 32.768
x = sample(n, lb, ub, SobolSample())
y = ackley.(x)
xs = lb:0.001:ub
scatter(x, y, label = "Sampled points", xlims = (lb, ub), ylims = (0, 30), legend = :top)
plot!(xs, ackley.(xs), label = "True function", legend = :top)
```
```@example ackley
my_rad = RadialBasis(x, y, lb, ub)
my_loba = LobachevskySurrogate(x, y, lb, ub)
```
```@example ackley
scatter(x, y, label = "Sampled points", xlims = (lb, ub), ylims = (0, 30), legend = :top)
plot!(xs, ackley.(xs), label = "True function", legend = :top)
plot!(xs, my_rad.(xs), label = "Polynomial expansion", legend = :top)
plot!(xs, my_loba.(xs), label = "Lobachevsky", legend = :top)
```
The fit looks good. Let's now see if we are able to find the minimum value using
optimization methods:
```@example ackley
surrogate_optimize(ackley, DYCORS(), lb, ub, my_rad, RandomSample())
scatter(x, y, label = "Sampled points", xlims = (lb, ub), ylims = (0, 30), legend = :top)
plot!(xs, ackley.(xs), label = "True function", legend = :top)
plot!(xs, my_rad.(xs), label = "Radial basis optimized", legend = :top)
```
The DYCORS method successfully finds the minimum.
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
|
[
"MIT"
] | 6.10.0 | ba29564853a3f7b10eb699f1e99a62cdb6a3770f | docs | 1790 | # Cantilever Beam Function
The Cantilever Beam function is defined as:
``f(w,t) = \frac{4L^3}{Ewt}*\sqrt{ (\frac{Y}{t^2})^2 + (\frac{X}{w^2})^2 }``
With parameters L,E,X and Y given.
Let's import Surrogates and Plots:
```@example beam
using Surrogates
using SurrogatesPolyChaos
using Plots
default()
```
Define the objective function:
```@example beam
function f(x)
t = x[1]
w = x[2]
L = 100.0
E = 2.770674127819261e7
X = 530.8038576066307
Y = 997.8714938733949
return (4 * L^3) / (E * w * t) * sqrt((Y / t^2)^2 + (X / w^2)^2)
end
```
Let's plot it:
```@example beam
n = 100
lb = [1.0, 1.0]
ub = [8.0, 8.0]
xys = sample(n, lb, ub, SobolSample());
zs = f.(xys);
x, y = 0.0:8.0, 0.0:8.0
p1 = surface(x, y, (x1, x2) -> f((x1, x2)))
xs = [xy[1] for xy in xys]
ys = [xy[2] for xy in xys]
scatter!(xs, ys, zs) # hide
p2 = contour(x, y, (x1, x2) -> f((x1, x2)))
scatter!(xs, ys)
plot(p1, p2, title = "True function")
```
Fitting different surrogates:
```@example beam
mypoly = PolynomialChaosSurrogate(xys, zs, lb, ub)
loba = LobachevskySurrogate(xys, zs, lb, ub)
rad = RadialBasis(xys, zs, lb, ub)
```
Plotting:
```@example beam
p1 = surface(x, y, (x, y) -> mypoly([x y]))
scatter!(xs, ys, zs, marker_z = zs)
p2 = contour(x, y, (x, y) -> mypoly([x y]))
scatter!(xs, ys, marker_z = zs)
plot(p1, p2, title = "Polynomial expansion")
```
```@example beam
p1 = surface(x, y, (x, y) -> loba([x y]))
scatter!(xs, ys, zs, marker_z = zs)
p2 = contour(x, y, (x, y) -> loba([x y]))
scatter!(xs, ys, marker_z = zs)
plot(p1, p2, title = "Lobachevsky")
```
```@example beam
p1 = surface(x, y, (x, y) -> rad([x y]))
scatter!(xs, ys, zs, marker_z = zs)
p2 = contour(x, y, (x, y) -> rad([x y]))
scatter!(xs, ys, marker_z = zs)
plot(p1, p2, title = "Inverse distance")
```
| Surrogates | https://github.com/SciML/Surrogates.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.