licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 587 | """
# precis
$(SIGNATURES)
"""
function precis(nt::NamedTuple; io = stdout, digits = 2, depth = Inf, alpha = 0.11)
post = rand(nt.distr, 10_000)
df = DataFrame(post', [keys(nt.coef)...])
Text(precis(df; io=String))
end
"""
# precis
$(SIGNATURES)
"""
function precis(m::DynamicPPL.Model;
io = stdout, digits = 2, depth = Inf, alpha = 0.11,
sampler=NUTS(0.65), nsamples=2000, nchains=4)
chns = mapreduce(c -> sample(m, sampler, nsamples), chainscat, 1:nchains)
df = DataFrame(Array(chns), names(chns, [:parameters]))
Text(precis(df; io=String))
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 1752 | # Run like
# using Turing
# @model height(heights) = begin
# μ ~ Normal(178, 20)
# σ ~ Uniform(0, 50)
# heights .~ Normal(μ, σ)
# end
# m = height(d2.height)
# res = quap(m)
# Find the MAP via optimization and get the information matrix at that point.
# Look if your solution converged. Sometimes even solutions that didn't converge
# might be pretty good, on the other hand, just because the solver converged that
# doesn't mean you got the point you are looking for; this isn't even global
# optimization. In any case, trying other optimization methods might help.
function turing_quap(model::Turing.Model, args...; kwargs...)
opt = optimize(model, MAP(), args...; kwargs...)
coef = opt.values.array
var_cov_matrix = informationmatrix(opt)
sym_var_cov_matrix = Symmetric(var_cov_matrix) # lest MvNormal complains, loudly
converged = Optim.converged(opt.optim_result)
distr = if length(coef) == 1
Normal(coef[1], √sym_var_cov_matrix[1]) # Normal expects stddev
else
MvNormal(coef, sym_var_cov_matrix) # MvNormal expects variance matrix
end
params = StatsBase.params(model)
params_tuple = tuple(Symbol.(params)...)
(
coef = NamedTuple{params_tuple}(coef),
vcov = sym_var_cov_matrix,
converged = converged,
distr = distr,
params = [params...]
)
end
function StatsBase.params(model::Turing.Model)
nt = model |> Turing.VarInfo |> Turing.tonamedtuple
p = String[]
for (a, v) in nt
arr = a[1]
var = v[1]
if length(arr) != 1
append!(p, ["$var[$i]" for i in 1:length(arr)])
else
push!(p, var)
end
end
return p
end
export
turing_quap
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 450 | using StanSample, NamedTupleTools
using StatisticalRethinking
using Test
tests = ["srtools", "link", "simulate", "lppd"]
stan_tests = ["wd-loo-compare",]
stan_exists()::Bool = ("JULIA_CMDSTAN_HOME" in keys(ENV) || "CMDSTAN" in keys(ENV))
for t ∈ tests
@testset "$t" begin
include("test_$t.jl")
end
end
if stan_exists()
for t ∈ stan_tests
@testset "$t" begin
#include("test_$t.jl")
end
end
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 175 | using DataFrames
d = DataFrame(:a => [1,2,3], :b => [2,3,4])
ref = [[3,5,7], [5,8,11]]
@test link(d, [:a, :b], 1:2) == ref
@test link(d, (r,x) -> r.a + r.b * x, 1:2) == ref
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 232 | using DataFrames
d = DataFrame(:mu => [0.0, 0.1, 0.2], :sigma => [0.1, 0.1, 0.1])
@test lppd(d, (r, x) -> Normal(r.mu + x, r.sigma), 1:4, 4:-1:1) ≈ [-391.7149657288782, -31.71476226597971, -49.71493819252957, -449.71496572887867]
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 449 | using DataFrames, Distributions
d = DataFrame(:mu => [1.0, 2.0], :sigma => [0.1, 0.2])
res = [
[1.0297287984535461, 2.0382395967790607],
[1.8804731046543537, 2.997910951072525]
]
res_dist = [
[Normal(1.0, 0.1), Normal(2.0, 0.1)],
[Normal(2.0, 0.2), Normal(3.0, 0.2)],
]
fun = (r, x) -> Normal(r.mu + x, r.sigma)
@test simulate(d, fun, 0:1, seed=1) ≈ res atol=0.6
@test simulate(d, fun, 0:1, seed=1, return_dist=true) == res_dist
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 136 |
@testset "PI" begin
r = PI(1:100)
@test r ≈ [6.445, 94.555]
r = PI(1:100; perc_prob=0.1)
@test r ≈ [45.55, 55.45]
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 5230 | using ParetoSmooth, AxisKeys
import ParetoSmooth: psis_loo, loo_compare
using NamedTupleTools, Distributions
using StanSample
using StatisticalRethinking
using Test
df = CSV.read(sr_datadir("WaffleDivorce.csv"), DataFrame);
df[!, :M] = zscore(df.Marriage)
df[!, :A] = zscore(df.MedianAgeMarriage)
df[!, :D] = zscore(df.Divorce)
data = (N=size(df, 1), D=df.D, A=df.A, M=df.M)
stan5_1 = "
data {
int < lower = 1 > N; // Sample size
vector[N] D; // Outcome
vector[N] A; // Predictor
}
parameters {
real a; // Intercept
real bA; // Slope (regression coefficients)
real < lower = 0 > sigma; // Error SD
}
transformed parameters {
vector[N] mu; // mu is a vector
for (i in 1:N)
mu[i] = a + bA * A[i];
}
model {
a ~ normal(0, 0.2); //Priors
bA ~ normal(0, 0.5);
sigma ~ exponential(1);
D ~ normal(mu , sigma); // Likelihood
}
generated quantities {
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
stan5_2 = "
data {
int N;
vector[N] D;
vector[N] M;
}
parameters {
real a;
real bM;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
for (i in 1:N)
mu[i]= a + bM * M[i];
}
model {
a ~ normal( 0 , 0.2 );
bM ~ normal( 0 , 0.5 );
sigma ~ exponential( 1 );
D ~ normal( mu , sigma );
}
generated quantities {
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
stan5_3 = "
data {
int N;
vector[N] D;
vector[N] M;
vector[N] A;
}
parameters {
real a;
real bA;
real bM;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
for (i in 1:N)
mu[i] = a + bA * A[i] + bM * M[i];
}
model {
a ~ normal( 0 , 0.2 );
bA ~ normal( 0 , 0.5 );
bM ~ normal( 0 , 0.5 );
sigma ~ exponential( 1 );
D ~ normal( mu , sigma );
}
generated quantities{
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
m5_1s = SampleModel("m5.1s", stan5_1)
rc5_1s = stan_sample(m5_1s; data)
m5_2s = SampleModel("m5.2s", stan5_2)
rc5_2s = stan_sample(m5_2s; data)
m5_3s = SampleModel("m5.3s", stan5_3)
rc5_3s = stan_sample(m5_3s; data)
function loo_compare2(models::Vector{SampleModel};
loglikelihood_name="loglik",
model_names=nothing,
sort_models=true,
show_psis=true)
nmodels = length(models)
model_names = [models[i].name for i in 1:nmodels]
chains_vec = read_samples.(models, :dataframe) # Obtain KeyedArray chains
chains_vec = DataFrame.(chains_vec, :loglik)
chains_vec = Array.(chains_vec)
println(length(chains_vec))
println(typeof(chains_vec[1]))
new_vec = Array{Float64, 3}[]
for i in 1:3
chains_vec[i] = permutedims(chains_vec[i], (2, 1))
num_params = size(chains_vec[i], 1)
num_samples = models[i].num_samples
num_chains = models[i].num_chains
println([i, size(chains_vec[i]), num_params, num_samples, num_chains])
println()
chn = reshape(chains_vec[i], (num_params, num_samples, num_chains))
append!(new_vec, [chn])
println([i, length(new_vec), size(new_vec[i])])
end
loo_compare2(chains_vec; loglikelihood_name, model_names, sort_models, show_psis)
end
function loo_compare2(ll_vec::Vector{<: Array};
loglikelihood_name="loglik",
model_names=nothing,
sort_models=true,
show_psis=true)
nmodels = length(ll_vec)
#ll_vec = Array.(matrix.(chains_vec, loglikelihood_name)) # Extract loglik matrix
#ll_vecp = map(to_paretosmooth, ll_vec) # Permute dims for ParetoSmooth
psis_vec = psis_loo.(ll_vec) # Compute PsisLoo for all models
if show_psis # If a printout is needed
for i in 1:nmodels
psis_vec[i] |> display
end
end
loo_compare(psis_vec...; model_names, sort_models)
end
if success(rc5_1s) && success(rc5_2s) && success(rc5_3s)
println()
nt5_1s = read_samples(m5_1s, :particles)
NamedTupleTools.select(nt5_1s, (:a, :bA, :sigma)) |> display
println()
nt5_2s = read_samples(m5_2s, :particles)
NamedTupleTools.select(nt5_2s, (:a, :bM, :sigma)) |> display
println()
nt5_3s = read_samples(m5_3s, :particles)
NamedTupleTools.select(nt5_3s, (:a, :bA, :bM, :sigma)) |> display
println("\n")
models = [m5_1s, m5_2s, m5_3s]
loo_comparison = loo_compare2(models)
println()
loo_comparison |> display
println()
end
@testset "loo_compare" begin
@test loo_comparison.estimates(Symbol("m5.1s"), :cv_elpd) ≈ 0 atol=0.01
@test loo_comparison.estimates(Symbol("m5.1s"), :cv_avg) ≈ 0 atol=0.01
@test loo_comparison.estimates(Symbol("m5.1s"), :weight) ≈ 0.7 atol=0.1
@test loo_comparison.estimates(Symbol("m5.2s"), :cv_elpd) ≈ -6.9 atol=0.6
@test loo_comparison.estimates(Symbol("m5.2s"), :cv_avg) ≈ -0.13 atol=0.02
@test loo_comparison.estimates(Symbol("m5.2s"), :weight) ≈ 0.0 atol=0.1
@test loo_comparison.estimates(Symbol("m5.3s"), :cv_elpd) ≈ -0.65 atol=0.6
@test loo_comparison.estimates(Symbol("m5.3s"), :cv_avg) ≈ -0.01 atol=0.02
@test loo_comparison.estimates(Symbol("m5.3s"), :weight) ≈ 0.34 atol=0.1
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | code | 3564 | using ParetoSmooth, AxisKeys
import ParetoSmooth: psis_loo, loo_compare
using NamedTupleTools, Distributions
using StanSample
using StatisticalRethinking
using Test
df = CSV.read(sr_datadir("WaffleDivorce.csv"), DataFrame);
df[!, :M] = zscore(df.Marriage)
df[!, :A] = zscore(df.MedianAgeMarriage)
df[!, :D] = zscore(df.Divorce)
data = (N=size(df, 1), D=df.D, A=df.A, M=df.M)
stan5_1 = "
data {
int < lower = 1 > N; // Sample size
vector[N] D; // Outcome
vector[N] A; // Predictor
}
parameters {
real a; // Intercept
real bA; // Slope (regression coefficients)
real < lower = 0 > sigma; // Error SD
}
transformed parameters {
vector[N] mu; // mu is a vector
for (i in 1:N)
mu[i] = a + bA * A[i];
}
model {
a ~ normal(0, 0.2); //Priors
bA ~ normal(0, 0.5);
sigma ~ exponential(1);
D ~ normal(mu , sigma); // Likelihood
}
generated quantities {
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
stan5_2 = "
data {
int N;
vector[N] D;
vector[N] M;
}
parameters {
real a;
real bM;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
for (i in 1:N)
mu[i]= a + bM * M[i];
}
model {
a ~ normal( 0 , 0.2 );
bM ~ normal( 0 , 0.5 );
sigma ~ exponential( 1 );
D ~ normal( mu , sigma );
}
generated quantities {
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
stan5_3 = "
data {
int N;
vector[N] D;
vector[N] M;
vector[N] A;
}
parameters {
real a;
real bA;
real bM;
real<lower=0> sigma;
}
transformed parameters {
vector[N] mu;
for (i in 1:N)
mu[i] = a + bA * A[i] + bM * M[i];
}
model {
a ~ normal( 0 , 0.2 );
bA ~ normal( 0 , 0.5 );
bM ~ normal( 0 , 0.5 );
sigma ~ exponential( 1 );
D ~ normal( mu , sigma );
}
generated quantities{
vector[N] loglik;
for (i in 1:N)
loglik[i] = normal_lpdf(D[i] | mu[i], sigma);
}
";
m5_1s = SampleModel("m5.1s", stan5_1)
rc5_1s = stan_sample(m5_1s; data)
m5_2s = SampleModel("m5.2s", stan5_2)
rc5_2s = stan_sample(m5_2s; data)
m5_3s = SampleModel("m5.3s", stan5_3)
rc5_3s = stan_sample(m5_3s; data)
if success(rc5_1s) && success(rc5_2s) && success(rc5_3s)
println()
nt5_1s = read_samples(m5_1s, :particles)
NamedTupleTools.select(nt5_1s, (:a, :bA, :sigma)) |> display
println()
nt5_2s = read_samples(m5_2s, :particles)
NamedTupleTools.select(nt5_2s, (:a, :bM, :sigma)) |> display
println()
nt5_3s = read_samples(m5_3s, :particles)
NamedTupleTools.select(nt5_3s, (:a, :bA, :bM, :sigma)) |> display
println("\n")
models = [m5_1s, m5_2s, m5_3s]
loo_comparison = loo_compare(models)
println()
loo_comparison |> display
println()
end
@testset "loo_compare" begin
@test loo_comparison.estimates(Symbol("m5.1s"), :cv_elpd) ≈ 0 atol=0.01
@test loo_comparison.estimates(Symbol("m5.1s"), :cv_avg) ≈ 0 atol=0.01
@test loo_comparison.estimates(Symbol("m5.1s"), :weight) ≈ 0.7 atol=0.1
@test loo_comparison.estimates(Symbol("m5.2s"), :cv_elpd) ≈ -6.9 atol=0.6
@test loo_comparison.estimates(Symbol("m5.2s"), :cv_avg) ≈ -0.13 atol=0.02
@test loo_comparison.estimates(Symbol("m5.2s"), :weight) ≈ 0.0 atol=0.1
@test loo_comparison.estimates(Symbol("m5.3s"), :cv_elpd) ≈ -0.65 atol=0.6
@test loo_comparison.estimates(Symbol("m5.3s"), :cv_avg) ≈ -0.01 atol=0.02
@test loo_comparison.estimates(Symbol("m5.3s"), :weight) ≈ 0.3 atol=0.1
end
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | docs | 10549 | # StatisticalRethinking v4
| **Project Status** | **Documentation** | **Build Status** |
|:-------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------:|
|![][project-status-img] | [![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] | ![][CI-build] |
## Note
After many years I have decided to step away from my work with Stan and Julia. My plan is to be around until the end of 2024 for support if someone decides to step in and take over further development and maintenance work.
At the end of 2024 I'll archive the different packages and projects included in the Github organisations StanJulia, StatisticalRethingJulia and RegressionAndOtherStoriesJulia if no one is interested (and time-wise able!) to take on this work.
I have thoroughly enjoyed working on both Julia and Stan and see both projects mature during the last 15 or so years. And I will always be grateful for the many folks who have helped me on numerous occasions. Both the Julia and the Stan community are awesome to work with! Thanks a lot!
## Purpose of this package
The StatisticalRethinking.jl `package` contains functions comparable to the functions in the R package "rethinking" associated with the book [Statistical Rethinking](https://xcelab.net/rm/statistical-rethinking/) by Richard McElreath.
These functions are used in Jupyter and Pluto notebook `projects` specifically intended for hands-on use while studying the book or taking the course.
Currently there are 3 of these notebook projects:
1. Max Lapan's [rethinking-2ed-julia](https://github.com/Shmuma/rethinking-2ed-julia) which uses Turing.jl and Jupyter notebooks.
2. The [SR2TuringPluto.jl](https://github.com/StatisticalRethinkingJulia/SR2TuringPluto.jl) project, also Turing.jl based but using Pluto.jl instead of Jupyter. It is based on Max Lapan's work above.
3. The [SR2StanPluto.jl](https://github.com/StatisticalRethinkingJulia/SR2StanPluto.jl) project, which uses Stan as implemented in StanSample.jl and StanQuap.jl. See [StanJulia](https://github.com/StanJulia).
There is a fourth option to study the Turing.jl versions of the models in the Statistical Rethinking book which is in the form of a package and Franklin web pages: [TuringModels.jl](https://github.com/StatisticalRethinkingJulia/TuringModels.jl).
## Why a StatisticalRethinking v4?
Over time more options become available to express the material covered in Statistical Rethinking, e.g. the use of KeyedArrays (provided by [AxisKeys.jl](https://github.com/JuliaArrays/AxisArrays.jl)) for the representation of mcmc chains.
Other examples are the recently developed [ParetoSmooth.jl](https://github.com/TuringLang/ParetoSmooth.jl) which could be used in the PSIS related examples as a replacement for ParetoSmoothedImportanceSampling.jl and the preliminary work by [SHMUMA](https://github.com/Shmuma/Dagitty.jl) on Dagitty.jl (a potential replacement for StructuralCausalModels.jl).
While StatisticalRethinking v3 focused on making StatisticalRethinking.jl mcmc package independent, StatisticalRethinking v4 aims at de-coupling it from a specific graphical package and thus enables new choices for graphics, e.g. using Makie.jl and AlgebraOfGraphics.jl.
StatisticalRethinking.jl v4 also fits better with the new setup of Pluto notebooks which keep track of used package versions in the notebooks themselves ([see here](https://github.com/fonsp/Pluto.jl/wiki/🎁-Package-management)).
## Workflow of StatisticalRethinkingJulia (v4):
1. Data preparation, typically using CSV.jl, DataFrames.jl and some statistical methods from StatsBase.jl and Statistics.jl. In some cases simulations are used which need Distributions.jl and a few special methods (available in StatisticalRethinking.jl).
2. Define the mcmc model, e.g. using StanSample.jl or Turing.jl, and obtain draws from the model.
3. Capture the draws for further processing. In Turing that is usually done using MCMCChains.jl, in StanSample.jl v4 it's mostly in the form of a DataFrame, a StanTable, a KeyedArray chains (obtained from AxisKeys.jl).
4. For further processing, the projects nearly always convert chains to a DataFrame.
5. Inspect the chains using statistical and visual methods. In many cases this will need one or more statistical packages and one of the graphical options.
Currently visual options are StatsPlots/Plots based, e.g. in MCMCChains.jl and StatisticalRethinkingPlots.jl.
5. The above 5 steps could all be done by just using StanSample.jl or Turing.jl.
**The book Statistical Rethinking has a different objective and studies how models compare, how models can help (or mislead) and why multilevel modeling might help in some cases.**
6. For this, additional packages are available, explained and demonstrated, e.g. StructuralCausalModels.jl, ParetoSmoothedImportanceSampling.jl and quite a few more.
## Using StatisticalRethinking v4
To work through the StatisticalRethinking book using Julia and Turing or Stan, download either one of the above mentioned `projects` and start Pluto (or Jupyter).
An early, experimental version of [StructuralCausalModels.jl](https://github.com/StatisticalRethinkingJulia/StructuralCausalModels.jl) is also included as a dependency in the StatisticalRethinking.jl package.
In the meantime I will definitely keep my eyes on [Dagitty.jl](https://github.com/Shmuma/Dagitty.jl), [Omega.jl](https://github.com/zenna/Omega.jl) and [CausalInference.jl](https://github.com/mschauer/CausalInference.jl). In particular Dagitty.jl has very similar objectives as StructuralCausalModels.jl and over time might replace it in the StatisticalRethinkingJulia ecosystem. For now, StructuralCausalModels does provide ways to convert DAGs to Dagitty and ggm formats.
Similarly, a dependency [ParetoSmoothedImportanceSampling.jl](https://github.com/StatisticalRethinkingJulia/ParetoSmoothedImportanceSampling.jl) is used which provides PSIS and WAIC statistics for model comparison.
## Versions
As listed in issue [#145](https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl/issues/145#issue-1064657635) recently it was noticed that some very old Jupyter notebook files are still present which makes an initial download, e.g. when `dev`-ing the package, rather long. This is not a problem when you just `add` the package.
I am planning to address that in v5.
### Version 4
- Drop the heavy use of @reexport.
- Enable a future switch to Makie.jl and AlgebraOfGraphics.jl by moving all graphics to StatisticalRethinkingPlots and StatisticalRethinkingMakie (in the future).
- Many more improvements by Max Lapan (@shmuma).
### Versions 3.2.1 - 3.3.6
- Improvements by Max Lapan.
- Added trankplot.jl.
- Add compare() and plot_models() abstractions.
### Version 3.2.0
- Option to retieve sampling results as a NamedTuple.
- Added new method to plotbounds() to handle NamedTuples.
- Added plotlines().
### Versions v3.1.1 - 3.1.8
- Updates from CompatHelper.
- Switch to Github actions (CI, Documenter).
- Updates from Rik Huijzer (link function).
- Redo quap() based on StanOptimize.
- Start Updating notebooks in ch 2-8 using new quap().
- Redoing and updating the models in the models subdirectory.
### Version 3.1.0
Align (stanbased) quap with Turing quap. quap() now returns a NamedTuple that includes a field `distr` which represents the quadratic Normal (MvNormal) approximation.
### Version 3.0.0
StatisticalRethinking.jl v3 is independent of the underlying mcmc package. All scripts previously in StatisticalRethinking.jl v2 holding the snippets have been replaced by Pluto notebooks in the above mentioned mcmc specific `project` repositories.
Initially SR2TuringPluto.jl will lag SR2StanPluto.jl somewhat but later this year both will cover the same chapters.
It is the intention to develop *tests* for StatisticalRethinking.jl v3 that work across the different mcmc implementations. This will limit dependencies to the `test/Project.toml`.
### Version 2.2.9
Currently the latest release available in the StatisticalRethinking.jl v2 format.
## Installation
To install the package (from the REPL):
```
] add StatisticalRethinking
```
but in most cases this package will be a dependency of another package or project, e.g. SR2StanPluto.jl or SR2TuringPluto.jl.
## Documentation
- [**STABLE**][docs-stable-url] — **documentation of the most recently tagged version.**
- [**DEVEL**][docs-dev-url] — *documentation of the in-development version.*
## Acknowledgements
Of course, without the excellent textbook by Richard McElreath, this package would not have been possible. The author has also been supportive of this work and gave permission to use the datasets.
## Questions and issues
Question and contributions are very welcome, as are feature requests and suggestions. Please open an [issue][issues-url] if you encounter any problems or have a question.
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://statisticalrethinkingjulia.github.io/StatisticalRethinking.jl/latest
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://statisticalrethinkingjulia.github.io/StatisticalRethinking.jl/stable
[CI-build]: https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl/workflows/CI/badge.svg?branch=master
[](https://codecov.io/gh/StatisticalRethinkingJulia/StatisticalRethinking.jl)
[](https://coveralls.io/github/StatisticalRethinkingJulia/StatisticalRethinking.jl?branch=master)
[codecov-img]: https://codecov.io/gh/StatisticalRethinkingJulia/StatisticalRethinking.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/StatisticalRethinkingJulia/StatisticalRethinking.jl
[issues-url]: https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl/issues
[project-status-img]: https://img.shields.io/badge/lifecycle-stable-green.svg
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | docs | 64 | # To do
1. Additional models and chapters
2. Documentation
3.
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | docs | 1096 | ## Acknowledgements
Of course, without this excellent textbook by Richard McElreath, this package would not have been possible. The author has also been supportive of this work and gave permission to use the datasets.
Richard Torkar has taken the lead in developing the Turing versions of the models in chapter 8 and subsequent chapters.
Tamas Papp has been very helpful during the development of the DynamicHMC versions of the models.
The TuringLang team and #turing contributors on Slack have been extremely helpful! The Turing examples by Cameron Pfiffer are followed closely in several example scripts.
The increasing use of Particles to represent quap approximations is possible thanks to the package [MonteCarloMeasurements.jl](https://github.com/baggepinnen/MonteCarloMeasurements.jl). [Soss.jl](https://github.com/cscherrer/Soss.jl) and [related write-ups](https://cscherrer.github.io) introduced me to that option.
Developing `rethinking` must have been an on-going process over several years, `StatisticalRethinking.jl` and associated packages will likely follow a similar path.
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | docs | 881 | ```@meta
CurrentModule = StatisticalRethinking
```
## sr\_path
```@docs
sr_path(parts...)
```
## sr\_datadir
```@docs
sr_datadir(parts...)
```
## link
```@docs
link
```
## lppd
```@docs
lppd
```
## rescale
```@docs
rescale(x::Vector{Float64}, xbar::Float64, xstd::Float64)
```
## sample
```@docs
sample(df::DataFrame, n; replace=true, ordered=false)
```
## hpdi
```@docs
hpdi(x::Vector{T}; alpha::Real=0.05) where {T<:Real}
```
## meanlowerupper
```@docs
meanlowerupper(data, PI = (0.055, 0.945))
```
## compare
```@docs
compare(m::Vector{Matrix{Float64}}, ::Val{:waic})
```
## create\_observation\_matrix
```@docs
create_observation_matrix(x::Vector, k::Int)
```
## r2\_is\_bad
```@docs
r2_is_bad(model::NamedTuple, df::DataFrame)
```
## PI
```@docs
PI
```
## var2
```@docs
var2(x)
```
## sim\_happiness
```@docs
sim_happiness
```
## simulate
```@docs
simulate
```
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | docs | 1709 | ## References
This package is based on:
1. [McElreath: Statistical Rethinking 2nd edition](http://xcelab.net/rm/statistical-rethinking/)
There is no shortage of additional good books on Bayesian statistics. A few of my favorites are:
2. [Bolstad: Introduction to Bayesian statistics](http://www.wiley.com/WileyCDA/WileyTitle/productCd-1118593227.html)
3. [Bolstad: Understanding Computational Bayesian Statistics](http://www.wiley.com/WileyCDA/WileyTitle/productCd-0470046090.html)
4. [Gelman, Hill: Data Analysis Using Regression and Multilevel/Hierarchical Models](http://www.stat.columbia.edu/~gelman/arm/)
5. [Kruschke: Doing Bayesian Data Analysis](https://sites.google.com/site/doingbayesiandataanalysis/what-s-new-in-2nd-ed)
6. [Lee, Wagenmakers: Bayesian Cognitive Modeling](https://www.cambridge.org/us/academic/subjects/psychology/psychology-research-methods-and-statistics/bayesian-cognitive-modeling-practical-course?format=PB&isbn=9781107603578)
7. [Gelman, Carlin, and others: Bayesian Data Analysis](http://www.stat.columbia.edu/~gelman/book/)
8. [Causal Inference in Statistics - A Primer](https://www.wiley.com/en-us/Causal+Inference+in+Statistics%3A+A+Primer-p-9781119186847)
9. [Betancourt: A Conceptual Introduction to Hamiltonian Monte Carlo](https://arxiv.org/abs/1701.02434)
10. [Pearl, Mackenzie: The Book of Why](https://www.basicbooks.com/titles/judea-pearl/the-book-of-why/9780465097616/)
Special mention is appropriate for the new book:
11. [Gelman, Hill, Vehtari: Rgression and other stories](https://www.cambridge.org/highereducation/books/regression-and-other-stories/DD20DD6C9057118581076E54E40C372C#overview)
which in a sense is a major update to item 4. above. | StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 4.9.0 | 60ced4d92c6d6cc473df31cc95d1d561beaeeb20 | docs | 6030 | ## Github organization
StatisticalRethinking.jl is part of the broader [StatisticalRethinkingJulia](https://github.com/StatisticalRethinkingJulia) Github organization.
## Purpose of this package
The StatisticalRethinking.jl `package` contains functions comparable to the functions in the R package "rethinking" associated with the book [Statistical Rethinking](https://xcelab.net/rm/statistical-rethinking/) by Richard McElreath.
These functions are used in Jupyter and Pluto notebook `projects` specifically intended for hands-on use while studying the book or taking the course.
Currently there are 3 of these notebook projects:
1. Max Lapan's [rethinking-2ed-julia](https://github.com/Shmuma/rethinking-2ed-julia) which uses Turing.jl and Jupyter notebooks. This has been forked, renamed to SR2TuringJupyter.jl and modified in a few places (e.g. data files are obtained from StatisticalRethinking.jl).
2. The [SR2TuringPluto.jl](https://github.com/StatisticalRethinkingJulia/SR2TuringPluto.jl) project, also Turing.jl based but using Pluto.jl instead of Jupyter. It is based on Max Lapan's work above.
3. The [SR2StanPluto.jl](https://github.com/StatisticalRethinkingJulia/SR2StanPluto.jl) project, which uses Stan as implemented in StanSample.jl and StanQuap.jl. See [StanJulia](https://github.com/StanJulia).
There is a fourth option to study the (Turing.jl) models in the Statistical Rethinking book which is in the form of a package and Franklin web pages: [TuringModels.jl](https://github.com/StatisticalRethinkingJulia/TuringModels.jl).
## Why a StatisticalRethinking v4?
Over time more and better options become available to express the material covered in Statistical Rethinking, e.g. the use of KeyedArrays (provided by [AxisKeys.jl](https://github.com/JuliaArrays/AxisArrays.jl)) for the representation of mcmc chains.
But other examples are the recently developed [ParetoSmooth.jl](https://github.com/TuringLang/ParetoSmooth.jl) which could be used in the PSIS related examples as a replacement for ParetoSmoothedImportanceSampling.jl and the preliminary work by [SHMUMA](https://github.com/Shmuma/Dagitty.jl) on Dagitty.jl (a potential replacement for StructuralCausalModels.jl).
While StatisticalRethinking v3 focused on making StatisticalRethinking.jl mcmc package independent, StatisticalRethinking v4 aims at de-coupling it from a specific graphical package and thus enables new choices for graphics, e.g. using Makie.jl and AlgebraOfGraphics.jl.
Also, an attempt has been made to make StatisticalRethinking.jl fit better with the new setup of Pluto notebooks which keep track of used package versions in the notebooks themselves ([see here](https://github.com/fonsp/Pluto.jl/wiki/🎁-Package-management)).
## Workflow of StatisticalRethinkingJulia (v4):
1. Data preparation, typically using CSV.jl, DataFrames.jl and some statistical methods from StatsBase.jl and Statistics.jl. In some cases simulations are used which need Distributions.jl and a few special methods (available in StatisticalRethinking.jl).
2. Define the mcmc model, e.g. using StanSample.jl or Turing.jl, and obtain draws from the model.
3. Capture the draws for further processing. In Turing that is usually done using MCMCChains.jl, in StanSample.jl v4 it's mostly in the form of a DataFrame, a StanTable, a KeyedArray chains (obtained from AxisKeys.jl).
4. Inspect the chains using statistical and visual methods. In many cases this will need one or more statistical packages and one of the graphical options.
Currently visual options are StatsPlots/Plots based, e.g. in MCMCChains.jl and StatisticalRethinkingPlots.jl.
The above 4 items could all be done by just using StanSample.jl or Turing.jl.
**The book Statistical Rethinking has a different objective and studies how models compare, how models can help (or mislead) and why multilevel modeling might help in some cases.**
For this, additional packages are available, explained and demonstrated, e.g. StructuralCausalModels.jl, ParetoSmoothedImportanceSampling.jl and quite a few more.
## How to use StatisticalRethinking.jl
To work through the StatisticalRethinking book using Julia and Turing, download either of the `projects` [SR2TuringJupyter.jl](https://github.com/StatisticalRethinkingJulia/SR2TuringJupyter.jl) or [SR2TuringPluto.jl](https://github.com/StatisticalRethinkingJulia/SR2TuringPluto.jl).
To work through the StatisticalRethinking book using Julia and Stan, download `project` [SR2StanPluto.jl](https://github.com/StatisticalRethinkingJulia/SR2StanPluto.jl).
All three projects create a Julia environment where most needed packages are available and can be imported.
In addition to providing a Julia package environment, these also contain chapter by chapter Jupyter or Pluto notebooks to work through the Statistical Rethinking book.
To tailor StatisticalRethinking.jl for Stan, use (in that order!):
```
using StanSample
using StatisticalRethinking
```
or, for Turing:
```
using Turing
using StatisticalRethinking
```
See the notebook examples in the projects for other often used packages.
## Structure of StatisticalRethinkingJulia (v4):
In order to keep environment packages relatively simple (i.e. have a limited set of dependencies on other Julia packages) StatisticalRethinking consists of 2 layers, a top layer containing mcmc dependent methods (e.g. a model comparison method taking Turing.jl or StanSample.jl derived objects) which in turn call common methods in the bottom layer. The same applies for the graphic packages. This feature relies on Requires.jl and the mcmc dependent methods can be found in `src/require` directories.
Consequently, the StatisticalRethinkingJulia ecosystem contains 4 layers:
1. The lowest layer provides mcmc methods, currently Turing.jl and StanSample.jl.
2. Common (mcmc independent) bottom layer in StatisticalRethinking (and StatisticalRethinkingPlots).
3. MCMC dependent top layer in StatisticalRethinking (and StatisticalRethinkingPlots).
4. Chapter by chapter notebooks.
| StatisticalRethinking | https://github.com/StatisticalRethinkingJulia/StatisticalRethinking.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 688 | using MonolithicFEMVLFS
using Documenter
DocMeta.setdocmeta!(MonolithicFEMVLFS, :DocTestSetup, :(using MonolithicFEMVLFS); recursive=true)
makedocs(;
modules=[MonolithicFEMVLFS],
authors="Oriol Colomes <[email protected]>",
repo="https://github.com/oriolcg/MonolithicFEMVLFS.jl/blob/{commit}{path}#{line}",
sitename="MonolithicFEMVLFS.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://oriolcg.github.io/MonolithicFEMVLFS.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/oriolcg/MonolithicFEMVLFS.jl",
devbranch="main",
)
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 5065 |
function run_5_1_1_periodic_beam_sapatial_convergence()
# Define Execution function
function run_5_1_1(case::Periodic_Beam_params)
case_name = savename(case)
println("-------------")
println("Case: ",case_name)
e_ϕ, e_η, = run_periodic_beam(case)
e_ϕ_i = last(e_ϕ)
e_η_i = last(e_η)
case_name_suffix = savename(case,"jld2";digits=10)
file = datadir("5-1-1-periodic-beam-spatial-convergence", case_name_suffix)
prefix, data, suffix = DrWatson.parse_savename(case_name_suffix, parsetypes=(Int, Float64))
push!(data, "e_ϕ_i"=>e_ϕ_i, "e_η_i"=>e_η_i)
@tagsave(file,data)
return data
end
# Warm-up case
k = 1
H = 1.0
g = 9.81
ω = √(g*k*tanh(k*H))
T = 2π/ω
Δt = T/1
n = 3
order = 2
path = datadir("5-1-1-periodic-beam-spatial-convergence")
case = Periodic_Beam_params(
name="11Warm-up",
n=n,
dt=Δt,
tf=T,
k=k,
orderϕ=order,
orderη=order,
vtk_output=false
)
produce_or_load(path,case,run_5_1_1;digits=8)
# Element size Convergence
Δt = 1.0e-6
tf = 1.0e-4
k = 15
e_ϕ_n = Float64[]
e_η_n = Float64[]
for order in 2:4
for i in 1:5
nelem = 2^(i+1)
case = Periodic_Beam_params(
name="spatialConvergence",
n=nelem,
dt=Δt,
tf=tf,
k=k,
orderϕ=order,
orderη=order
)
data, file = produce_or_load(path,case,run_5_1_1;digits=8)
push!(e_ϕ_n,data["e_ϕ_i"])
push!(e_η_n,data["e_η_i"])
end
end
plot_case = Periodic_Beam_params(
name="spatialConvergence",
dt=Δt,
tf=tf,
k=k,
)
# Element size Convergence with orderϕ = 2
Δt = 1.0e-6
tf = 1.0e-4
k = 15
e_ϕ_n = Float64[]
e_η_n = Float64[]
for order in 2:4
for i in 1:5
nelem = 2^(i+1)
case = Periodic_Beam_params(
name="spatialConvergence",
n=nelem,
dt=Δt,
tf=tf,
k=k,
orderϕ=2,
orderη=order
)
data, file = produce_or_load(path,case,run_5_1_1;digits=8)
push!(e_ϕ_n,data["e_ϕ_i"])
push!(e_η_n,data["e_η_i"])
end
end
plot_case = Periodic_Beam_params(
name="spatialConvergence",
dt=Δt,
tf=tf,
k=k,
)
plotName = savename(plot_case;ignores=("n", "orderϕ", "orderη"),digits=8)
res = collect_results(path)
println("Ploting h-convergence")
plt1 = plot(
fontsize=12,
legend=:bottomleft,
legendfontsize=10
)
plt2 = plot(
fontsize=12,
legend=:bottomleft,
legendfontsize=10
)
plt3 = plot(
fontsize=12,
legend=:bottomleft,
legendfontsize=10
)
xlabel!(plt1,"Number of elements in x-direction")
xlabel!(plt2,"Number of elements in x-direction")
xlabel!(plt3,"Number of elements in x-direction")
ylabel!(plt1,"Error")
ylabel!(plt2,"Error")
ylabel!(plt3,"Error")
nelems = [2^(i+1) for i in 1:5]
styles = [:dash,:dashdot,:dashdotdot]
shapes = [:square,:circle,:utriangle]
factors_plt1 = [2,5,10]
factors_plt2 = [10,30,80]
for (iorder,order) in enumerate(2:4)
res_order = @linq res |> where(:orderϕ .== order, :orderη .== order, :k .== k, :dt .== Δt, :tf .== tf) |> orderby(:n)
res_order_fixed = @linq res |> where(:orderϕ .== 2, :orderη .== order, :k .== k, :dt .== Δt, :tf .== tf) |> orderby(:n)
local_errors_ϕ = res_order[!,:e_ϕ_i]
local_errors_η = res_order[!,:e_η_i]
local_errors_η_fixed = res_order_fixed[!,:e_η_i]
plot!(plt1,
nelems,local_errors_ϕ,
xaxis=:log,
yaxis=:log,
shape=shapes[iorder],
color=:blue,
style=styles[iorder],
msize=4,
label="r=$(order)"
)
plot!(plt2,
nelems,local_errors_η,
xaxis=:log,
yaxis=:log,
shape=shapes[iorder],
color=:red,
style=styles[iorder],
msize=4,
label="r=$(order+1)"
)
plot!(plt3,
nelems,local_errors_η_fixed,
xaxis=:log,
yaxis=:log,
shape=shapes[iorder],
color=:red,
style=styles[iorder],
msize=4,
label="r=$(order+1)"
)
end
for (iorder,order) in enumerate(2:4)
rate_label = latexstring("n^{-"*"$(order+1)"*"}")
plot!(plt1,
nelems,factors_plt1[iorder]*nelems.^(-float(order+1)),
color=:black,
style=styles[iorder],
label=rate_label,
xticks=(nelems,[string(2*nelem) for nelem in nelems])
)
plot!(plt2,
nelems,factors_plt2[iorder]*nelems.^(-float(order+1)),
color=:black,
style=styles[iorder],
label=rate_label,
xticks=(nelems,[string(2*nelem) for nelem in nelems])
)
plot!(plt3,
nelems,factors_plt2[iorder]*nelems.^(-float(order+1)),
color=:black,
style=styles[iorder],
label=rate_label,
xticks=(nelems,[string(2*nelem) for nelem in nelems])
)
end
savefig(plt1,plotsdir("5-1-1-periodic-beam-spatial-convergence",plotName)*"_phi.png")
savefig(plt2,plotsdir("5-1-1-periodic-beam-spatial-convergence",plotName)*"_eta.png")
savefig(plt3,plotsdir("5-1-1-periodic-beam-spatial-convergence",plotName)*"_eta-fixed-order.png")
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 3286 | function run_5_1_2_periodic_beam_time_convergence()
# Define Execution function
function run_5_1_2(case::Periodic_Beam_params)
case_name = savename(case;digits=8)
println("-------------")
println("Case: ",case_name)
e_ϕ, e_η, = run_periodic_beam(case)
e_ϕ_i = last(e_ϕ)
e_η_i = last(e_η)
case_name_suffix = savename(case,"jld2";digits=8)
file = datadir("5-1-2-periodic-beam-time-convergence", case_name_suffix)
prefix, data, suffix = DrWatson.parse_savename(case_name_suffix, parsetypes=(Int, Float64))
push!(data, "e_ϕ_i"=>e_ϕ_i, "e_η_i"=>e_η_i)
@tagsave(file,data)
return data
end
# Warm-up case
k = 1
H = 1.0
g = 9.81
ω = √(g*k*tanh(k*H))
T = 2π/ω
Δt = T/1
n = 3
order = 2
path = datadir("5-1-2-periodic-beam-time-convergence")
case = Periodic_Beam_params(
name="1Warm-up",
n=n,
dt=Δt,
tf=T,
k=k,
orderϕ=order,
orderη=order,
vtk_output=false
)
produce_or_load(path,case,run_5_1_2;digits=8)
# Element size Convergence
nelem = 64
tf = 1.0
k = 1
order = 4
e_ϕ_n = Float64[]
e_η_n = Float64[]
factor = 1.0
for i in 0:5
Δt = tf*factor*2.0^(-i)
case = Periodic_Beam_params(
name="timeConvergence",
n=nelem,
dt=Δt,
tf=tf,
k=k,
orderϕ=order,
orderη=order
)
data, file = produce_or_load(path,case,run_5_1_2;digits=8)
println("dt ",Δt," e ",data["e_ϕ_i"])
push!(e_ϕ_n,data["e_ϕ_i"])
push!(e_η_n,data["e_η_i"])
end
plot_case = Periodic_Beam_params(
name="timeConvergence",
dt=Δt,
tf=tf,
k=k,
n=nelem,
orderϕ=order,
orderη=order,
)
plotName = savename(plot_case;ignores=("dt"),digits=8)
res = collect_results(path)
println("Ploting Δt-convergence")
plt1 = plot(
fontsize=12,
legend=:topleft,
legendfontsize=10
)
plt2 = plot(
fontsize=12,
legend=:topleft,
legendfontsize=10
)
xlabel!(plt1,"Time step size")
xlabel!(plt2,"Time step size")
ylabel!(plt1,"Error")
ylabel!(plt2,"Error")
styles = [:dash,:dashdot,:dashdotdot]
shapes = [:square,:circle,:utriangle]
res_dt = @linq res |> where(:orderϕ .== order, :orderη .== order, :k .== k, :n .== nelem, :tf .== tf) #|> orderby(:dt)
errors_ϕ = res_dt[!,:e_ϕ_i]
errors_η = res_dt[!,:e_η_i]
Δts = res_dt[!,:dt]
println(Δts)
println(errors_η)
plot!(plt1,
Δts,errors_ϕ,
xaxis=:log,
yaxis=:log,
shape=shapes[order-1],
color=:blue,
style=styles[order-1],
msize=5,
label=L"\|\phi-\phi_h\|"
)
plot!(plt2,
Δts,errors_η,
xaxis=:log,
yaxis=:log,
shape=shapes[order-1],
color=:red,
style=styles[order-1],
msize=5,
label=L"\|\eta-\eta_h\|"#latexstring("\|\eta_h-\eta\|")
)
rate_label = latexstring("dt^{-2}")
plot!(plt1,
Δts,0.1*Δts.^(2),
color=:black,
style=styles[order-1],
label=rate_label,
xticks=(Δts,[string(Δt) for Δt in Δts])
)
plot!(plt2,
Δts,0.04*Δts.^(2),
color=:black,
style=styles[order-1],
label=rate_label,
xticks=(Δts,[string(Δt) for Δt in Δts])
)
savefig(plt1,plotsdir("5-1-2-periodic-beam-time-convergence",plotName)*"_phi.png")
savefig(plt2,plotsdir("5-1-2-periodic-beam-time-convergence",plotName)*"_eta.png")
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 4315 | function run_5_1_3_periodic_beam_energy()
# Define Execution function
function run_5_1_3(case::Periodic_Beam_params)
case_name = savename(case)
println("-------------")
println("Case: ",case_name)
(e_ϕ, e_η, E_kin_f , E_pot_f, E_kin_s, E_ela_s, E_kin_f₀, E_kin_s₀, E_pot_f₀, E_ela_s₀, time) = run_periodic_beam(case)
e_ϕ_i = last(e_ϕ)
e_η_i = last(e_η)
case_name_suffix = savename(case,"jld2";digits=8)
file = datadir("5-1-3-periodic-beam-energy", case_name_suffix)
prefix, data, suffix = DrWatson.parse_savename(case_name_suffix, parsetypes=(Int, Float64))
push!(data,
"E_kin_f"=>E_kin_f,
"E_pot_f"=>E_pot_f,
"E_kin_s"=>E_kin_s,
"E_ela_s"=>E_ela_s,
"E_kin_f₀"=>E_kin_f₀,
"E_kin_s₀"=>E_kin_s₀,
"E_pot_f₀"=>E_pot_f₀,
"E_ela_s₀"=>E_ela_s₀,
"time"=>time
)
@tagsave(file,data)
return data
end
# Warm-up case
k = 1
H = 1.0
g = 9.81
ω = √(g*k*tanh(k*H))
T = 2π/ω
Δt = T/1
n = 3
order = 2
path = datadir("5-1-3-periodic-beam-energy")
case = Periodic_Beam_params(
name="11Warm-up",
n=n,
dt=Δt,
tf=T,
k=k,
orderϕ=order,
orderη=order,
vtk_output=false
)
produce_or_load(path,case,run_5_1_3;digits=8)
# parameters
k = 15
ω = √(9.81*k*tanh(k*1.0))
T = 2π/ω
tf1 = 10*T
# Element size Convergence
Δt = 1.0e-3
order = 4
for i in 1:5
nelem = 2^(i+1)
case = Periodic_Beam_params(
name="EnergyEvolution",
n=nelem,
dt=Δt,
tf=tf1,
k=k,
orderϕ=order,
orderη=order
)
data, file = produce_or_load(path,case,run_5_1_3;digits=8)
end
# Time step size Convergence
order = 4
nelem = 64
tf2 = 1*T
for i in 1:5
Δt = tf2 * 2^(-1.0-i)
case = Periodic_Beam_params(
name="EnergyEvolution",
n=nelem,
dt=Δt,
tf=tf2,
k=k,
orderϕ=order,
orderη=order
)
data, file = produce_or_load(path,case,run_5_1_3;digits=8)
end
res = collect_results(path)
println("Ploting Energy evolution")
plt1 = plot(
fontsize=12,
legend=:outerright,
legendfontsize=10,
)
plt2 = plot(
fontsize=12,
legend=:topleft,
legendfontsize=10,
xaxis=:log,
yaxis=:log,
)
xlabel!(plt1,"Time")
xlabel!(plt2,"Time step size")
ylabel!(plt1,"Relative energy error")
ylabel!(plt2,"Relative energy error")
styles = [:dash,:dashdot,:dashdotdot,:dot]
shapes = [:square,:circle,:utriangle,:diamond]
colors = cgrad(:winter,4,categorical=true)
factors_plt2 = [10,30,80,80]
# Plot energy evolution vs elements
colors = cgrad(:winter,5,categorical=true)
for i in 2:5
order = 4
Δti = 1.0e-3
nelem = 2^(i+1)
nx = 2*nelem
res_elem = @linq res |> where(:orderϕ .== order, :orderη .== order, :k .== k, :dt .== Δti, :n.==nelem, :tf .== round(tf1,digits=8))
t = res_elem[!,:time]
E_tot = res_elem[!,:E_kin_f]+res_elem[!,:E_pot_f]+res_elem[!,:E_kin_s]+res_elem[!,:E_ela_s]
E_tot₀ = res_elem[!,:E_kin_f₀]+res_elem[!,:E_pot_f₀]+res_elem[!,:E_kin_s₀]+res_elem[!,:E_ela_s₀]
E_tot = abs.(E_tot[1] .- E_tot₀) ./ E_tot₀
plot!(plt1,
t,E_tot,
yaxis=:log,
color=colors[i],
label="nx=$nx"
)
end
# Plot energy evolution vs time step size
Eh = Float64[]
Δts = Float64[]
for i in 1:5
order = 4
nelem = 64
Δt = round(tf2 * 2^(-1.0-i),digits=8)
res_time = @linq res |> where(:orderϕ .== order, :orderη .== order, :k .== k, :dt .== Δt, :n.==nelem, :tf .== round(tf2,digits=8))
t = res_time[!,:time]
E_tot = res_time[!,:E_kin_f]+res_time[!,:E_pot_f]+res_time[!,:E_kin_s]+res_time[!,:E_ela_s]
E_tot₀ = res_time[!,:E_kin_f₀]+res_time[!,:E_pot_f₀]+res_time[!,:E_kin_s₀]+res_time[!,:E_ela_s₀]
E_tot = abs.(E_tot[1] .- E_tot₀) ./ E_tot₀
push!(Eh,E_tot[end])
push!(Δts,Δt)
end
plot!(plt2,
Δts,Eh,
shape=shapes[3],
color=:red,
style=styles[3],
label="r=4, n=128"
)
plot!(plt2,
Δts,0.4*Δts.^(2),
color=:black,
style=styles[3],
label=latexstring("dt^{-2}"),
xticks=(Δts,["T/$(2^(i+1))" for i in 1:length(Δts)])
)
# Save
savefig(plt1,plotsdir("5-1-3-periodic-beam-energy","error_mesh_vs_time.png"))
savefig(plt2,plotsdir("5-1-3-periodic-beam-energy","error_convergence_time.png"))
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 4292 | function run_5_1_4_periodic_beam_free_surface_energy()
# Define Execution function
function run_5_1_4(case::Periodic_Beam_FS_params)
case_name = savename(case)
println("-------------")
println("Case: ",case_name)
(e_ϕ, e_η, E_kin_f , E_pot_f, E_kin_s, E_ela_s, E_kin_f₀, E_kin_s₀, E_pot_f₀, E_ela_s₀, time) = run_periodic_beam_FS(case)
e_ϕ_i = last(e_ϕ)
e_η_i = last(e_η)
case_name_suffix = savename(case,"jld2";digits=8)
file = datadir("5-1-4-periodic-beam-free-surface-energy", case_name_suffix)
prefix, data, suffix = DrWatson.parse_savename(case_name_suffix, parsetypes=(Int, Float64))
push!(data,
"E_kin_f"=>E_kin_f,
"E_pot_f"=>E_pot_f,
"E_kin_s"=>E_kin_s,
"E_ela_s"=>E_ela_s,
"E_kin_f₀"=>E_kin_f₀,
"E_kin_s₀"=>E_kin_s₀,
"E_pot_f₀"=>E_pot_f₀,
"E_ela_s₀"=>E_ela_s₀,
"time"=>time
)
@tagsave(file,data)
return data
end
# Warm-up case
k = 1
H = 1.0
g = 9.81
ω = √(g*k*tanh(k*H))
T = 2π/ω
Δt = T/1
n = 3
order = 2
path = datadir("5-1-4-periodic-beam-free-surface-energy")
case = Periodic_Beam_FS_params(
name="11Warm-up",
n=n,
dt=Δt,
tf=T,
k=k,
order=order,
vtk_output=false
)
produce_or_load(path,case,run_5_1_4;digits=8)
# parameters
k = 15
ω = √(9.81*k*tanh(k*1.0))
T = 2π/ω
tf1 = 10*T
# Element size Convergence
Δt = 1.0e-3
order = 4
for i in 1:5
nelem = 2^(i+1)
case = Periodic_Beam_FS_params(
name="EnergyEvolution",
n=nelem,
dt=Δt,
tf=tf1,
k=k,
order=order
)
data, file = produce_or_load(path,case,run_5_1_4;digits=8)
end
# Time step size Convergence
order = 4
nelem = 64
tf2 = 1*T
for i in 1:5
Δt = T * 2^(-1.0-i)
case = Periodic_Beam_FS_params(
name="EnergyEvolution",
n=nelem,
dt=Δt,
tf=tf2,
k=k,
order=order
)
data, file = produce_or_load(path,case,run_5_1_4;digits=8)
end
res = collect_results(path)
println("Ploting Energy evolution")
plt1 = plot(
fontsize=12,
legend=:outerright,
legendfontsize=10,
)
plt2 = plot(
fontsize=12,
legend=:topleft,
legendfontsize=10,
xaxis=:log,
yaxis=:log,
)
xlabel!(plt1,"Time")
xlabel!(plt2,"Time step size")
ylabel!(plt1,"Relative energy error")
ylabel!(plt2,"Relative energy error")
styles = [:dash,:dashdot,:dashdotdot,:dot]
shapes = [:square,:circle,:utriangle,:diamond]
colors = cgrad(:winter,4,categorical=true)
factors_plt2 = [10,30,80,80]
# Plot energy evolution vs elements
colors = cgrad(:winter,5,categorical=true)
for i in 2:5
order = 4
Δti = 1.0e-3
nelem = 2^(i+1)
nx = 2*nelem
res_elem = @linq res |> where(:order .== order, :k .== k, :dt .== Δti, :n.==nelem, :tf .== round(tf1,digits=8))
t = res_elem[!,:time]
E_tot = res_elem[!,:E_kin_f]+res_elem[!,:E_pot_f]+res_elem[!,:E_kin_s]+res_elem[!,:E_ela_s]
E_tot₀ = res_elem[!,:E_kin_f₀]+res_elem[!,:E_pot_f₀]+res_elem[!,:E_kin_s₀]+res_elem[!,:E_ela_s₀]
E_tot = abs.(E_tot[1] .- E_tot₀) ./ E_tot₀
plot!(plt1,
t[1],E_tot,
yaxis=:log,
color=colors[i],
label="nx=$nx"
)
end
# Plot energy evolution vs time step size
Eh = Float64[]
Δts = Float64[]
for i in 1:5
order = 4
nelem = 64
Δt = round(T * 2^(-1.0-i),digits=8)
res_time = @linq res |> where(:order .== order, :k .== k, :dt .== Δt, :n.==nelem, :tf .== round(tf2,digits=8))
t = res_time[!,:time]
E_tot = res_time[!,:E_kin_f]+res_time[!,:E_pot_f]+res_time[!,:E_kin_s]+res_time[!,:E_ela_s]
E_tot₀ = res_time[!,:E_kin_f₀]+res_time[!,:E_pot_f₀]+res_time[!,:E_kin_s₀]+res_time[!,:E_ela_s₀]
E_tot = abs.(E_tot[1] .- E_tot₀) ./ E_tot₀
push!(Eh,E_tot[end])
push!(Δts,Δt)
end
plot!(plt2,
Δts,Eh,
shape=shapes[3],
color=:red,
style=styles[3],
label="r=4, n=128"
)
plot!(plt2,
Δts,0.2*Δts.^(2),
color=:black,
style=styles[3],
label=latexstring("dt^{-2}"),
xticks=(Δts,["T/$(2^(i+1))" for i in 1:length(Δts)])
)
# Save
savefig(plt1,plotsdir("5-1-4-periodic-beam-free-surface-energy","error_mesh_vs_time.png"))
savefig(plt2,plotsdir("5-1-4-periodic-beam-free-surface-energy","error_convergence_time.png"))
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 2887 | function run_5_2_1_Khavakhpasheva_freq_domain()
# Define execution function
function run_5_2_1(case::Khabakhpasheva_freq_domain_params)
case_name = savename(case)
println("-------------")
println("Case: ",case_name)
x,η = run_Khabakhpasheva_freq_domain(case)
case_name_suffix = savename(case,"jld2";digits=8)
file = datadir("5-2-1-Khabakhpasheva-freq-domain", case_name_suffix)
prefix,data,suffix = DrWatson.parse_savename(case_name_suffix,parsetypes=(Int, Float64))
push!(data,"x"=>x, "η"=>η)
save(file,data)
return data
end
# Warm-up case
nx = 10
ny = 1
order = 2
path = datadir("5-2-1-Khabakhpasheva-freq-domain")
case = Khabakhpasheva_freq_domain_params(
name="2Warm-up",
nx=nx,
ny=ny,
order=order
)
produce_or_load(path,case,run_5_2_1;digits=8)
# Case 1: with joint
case = Khabakhpasheva_freq_domain_params(name="xi-0")
data, file = produce_or_load(path,case,run_5_2_1)
# Case 2: without joint
case = Khabakhpasheva_freq_domain_params(ξ=625,name="xi-635")
data, file = produce_or_load(path,case,run_5_2_1)
# Gather data
res = collect_results(path)
@show res
# Reference data
Khabakhpasheva_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Khabakhpasheva_with_joint.csv");header=false)
Riyansyah_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Riyansyah_with_joint.csv");header=false)
Khabakhpasheva_woj_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Khabakhpasheva_without_joint.csv");header=false)
Riyansyah_woj_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Riyansyah_without_joint.csv");header=false)
# Plot case 1
res1 = @linq res |> where(:ξ.==0.0, :order .== 4)
xs1 = res1[!,:x]
η_xs1 = res1[!,:η]
plt1 = plot(xs1,η_xs1,
xlims=(0,1),
lw=2,
label="Monolithic CG/DG",
palette=:rainbow)
plot!(plt1,Khabakhpasheva_data.Column1,Khabakhpasheva_data.Column2,marker="o",line=false,label="Khabakhpasheva et al.")
plot!(plt1,Riyansyah_data.Column1,Riyansyah_data.Column2,
ls=:dash,
lw=2,
label="Riyansyah et al.")
xlabel!("x/L")
ylabel!("|η|/η₀")
savefig(plt1, plotsdir("5-2-1-Khabakhpasheva-freq-domain","with_joint"))
# Plot case 2
res2 = @linq res |> where(:ξ.==625.0, :order .== 4)
xs2 = res2[!,:x]
η_xs2 = res2[!,:η]
plt2 = plot(xs2,η_xs2,
xlims=(0,1),
lw=2,
label="Monolithic CG/DG",
palette=:rainbow)
plot!(plt2,Khabakhpasheva_woj_data.Column1,Khabakhpasheva_woj_data.Column2,marker="o",line=false,label="Khabakhpasheva et al.")
plot!(plt2,Riyansyah_woj_data.Column1,Riyansyah_woj_data.Column2,
ls=:dash,
lw=2,
label="Riyansyah et al.")
xlabel!("x/L")
ylabel!("|η|/η₀")
savefig(plt2, plotsdir("5-2-1-Khabakhpasheva-freq-domain","without_joint"))
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 3751 | function run_5_2_2_Khavakhpasheva_time_domain()
# Define execution function
function run_5_2_2(case::Khabakhpasheva_time_domain_params)
case_name = savename(case)
println("-------------")
println("Case: ",case_name)
t,x,ηt = run_Khabakhpasheva_time_domain(case)
case_name_suffix = savename(case,"jld2";digits=8)
file = datadir("5-2-2-Khabakhpasheva-time-domain", case_name_suffix)
prefix,data,suffix = DrWatson.parse_savename(case_name_suffix,parsetypes=(Int, Float64))
push!(data,"t"=>t, "x"=>x, "ηt"=>ηt)
save(file,data)
return data
end
# Warm-up case
nx = 10
ny = 1
order = 2
path = datadir("5-2-2-Khabakhpasheva-time-domain")
case = Khabakhpasheva_time_domain_params(
name="2Warm-up",
nx=nx,
ny=ny,
order=order
)
produce_or_load(path,case,run_5_2_2;digits=8)
# Case 1: with joint
case = Khabakhpasheva_time_domain_params(name="xi-0")
data, file = produce_or_load(path,case,run_5_2_2)
# Case 2: without joint
case = Khabakhpasheva_time_domain_params(ξ=625,name="xi-625")
data, file = produce_or_load(path,case,run_5_2_2)
# Gather data
res = collect_results(path)
@show res
# Reference data
Khabakhpasheva_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Khabakhpasheva_with_joint.csv");header=false)
Riyansyah_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Riyansyah_with_joint.csv");header=false)
Khabakhpasheva_woj_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Khabakhpasheva_without_joint.csv");header=false)
Riyansyah_woj_data = CSV.File(datadir("Ref_data/Khabakhpasheva","Riyansyah_without_joint.csv");header=false)
# Plot case 1
res1 = @linq res |> where(:ξ.==0.0, :order .== 4)
ts1 = res1[!,:t][1]
xs1 = res1[!,:x][1]
η_xs1 = res1[!,:ηt][1]
ηxps = permutedims(reshape(hcat(η_xs1...),length(xs1),length(ts1)))
η_max = [maximum(abs.(ηxps[1000:2000,it])) for it in 1:length(xs1)]
colors = cgrad(:Blues_9,7,categorical=true,rev=true)
plt1 = plot(xs1,η_max,
xlims=(0,1),
lw=2,
label="Monolithic CG/DG (envelope)",
palette=:rainbow)
for (i,it) in enumerate(1000:5:1015)
t_it = round(ts1[it],digits=3)
plot!(plt1,xs1,η_xs1[it],
xlims=(0,1),
lw=1,
label="Monolithic CG/DG t=$t_it",
color=colors[i])
end
plot!(plt1,Khabakhpasheva_data.Column1,Khabakhpasheva_data.Column2,marker="o",line=false,label="Khabakhpasheva et al.")
plot!(plt1,Riyansyah_data.Column1,Riyansyah_data.Column2,
ls=:dash,
lw=2,
label="Riyansyah et al.",
color=:green)
xlabel!("x/L")
ylabel!("|η|/η₀")
savefig(plt1, plotsdir("5-2-2-Khabakhpasheva-time-domain","with_joint"))
# Plot case 2
res2 = @linq res |> where(:ξ.==625.0, :order .== 4)
ts2 = res2[!,:t][1]
xs2 = res2[!,:x][1]
η_xs2 = res2[!,:ηt][1]
ηxps = permutedims(reshape(hcat(η_xs2...),length(xs2),length(ts2)))
η_max = [maximum(abs.(ηxps[1000:2000,it])) for it in 1:length(xs2)]
plt2 = plot(xs2,η_max,
xlims=(0,1),
lw=2,
label="Monolithic CG/DG (envelope)",
palette=:rainbow)
for (i,it) in enumerate(1000:5:1015)
t_it = round(ts2[it],digits=3)
plot!(plt2,xs1,η_xs2[it],
xlims=(0,1),
lw=1,
label="Monolithic CG/DG t=$t_it",
color=colors[i])
end
plot!(plt2,Khabakhpasheva_woj_data.Column1,Khabakhpasheva_woj_data.Column2,marker="o",line=false,label="Khabakhpasheva et al.")
plot!(plt2,Riyansyah_woj_data.Column1,Riyansyah_woj_data.Column2,
ls=:dash,
lw=2,
label="Riyansyah et al.",
color=:green)
xlabel!("x/L")
ylabel!("|η|/η₀")
savefig(plt2, plotsdir("5-2-2-Khabakhpasheva-time-domain","without_joint"))
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 1963 | function run_5_3_1_Liu()
# Define execution function
function run_5_3_1(case::Liu_params)
case_name = savename(case)
println("-------------")
println("Case: ",case_name)
x,η = run_Liu(case)
case_name_suffix = savename(case,"jld2";digits=8)
file = datadir("5-3-1-Liu", case_name_suffix)
prefix,data,suffix = DrWatson.parse_savename(case_name_suffix,parsetypes=(Int, Float64))
push!(data,"x"=>x, "η"=>η)
save(file,data)
return data
end
# Case 1: ω=0.4
path = datadir("5-3-1-Liu")
case = Liu_params(ω=0.4,name="omega-04")
@show case
data, file = produce_or_load(path,case,run_5_3_1)
# Case 2: ω=0.8
case = Liu_params(ω=0.8,name="omega-08")
@show case
data, file = produce_or_load(path,case,run_5_3_1)
# Gather data
res = collect_results(path)
# Reference data
Liu_data_04 = CSV.File(datadir("Ref_data/Liu","omega_04.csv");header=false)
Liu_data_08 = CSV.File(datadir("Ref_data/Liu","omega_08.csv");header=false)
# Plot case 1
res1 = @linq res |> where(:ω .== 0.4)
xs1 = res1[!,:x][1]
η_xs1 = res1[!,:η][1]
p = sortperm(xs1)
plt1 = plot(xs1[p],η_xs1[p],
xlims=(0,1),
ylims=(0.6,1.4),
lw=2,
label="Monolithic CG/DG",
palette=:rainbow)
plot!(plt1,Liu_data_04.Column1,Liu_data_04.Column2,marker="o",line=false,label="Liu et al.")
xlabel!("x/L")
ylabel!("|η|/η₀")
savefig(plt1, plotsdir("5-3-1-Liu","omega_04"))
# Plot case 2
res1 = @linq res |> where(:ω .== 0.8)
xs1 = res1[!,:x][1]
η_xs1 = res1[!,:η][1]
p = sortperm(xs1)
plt1 = plot(xs1[p],η_xs1[p],
xlims=(0,1),
ylims=(0,1.2),
lw=2,
label="Monolithic CG/DG",
palette=:rainbow)
plot!(plt1,Liu_data_08.Column1,Liu_data_08.Column2,marker="o",line=false,label="Liu et al.")
xlabel!("x/L")
ylabel!("|η|/η₀")
savefig(plt1, plotsdir("5-3-1-Liu","omega_08"))
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 3775 | function run_5_4_1_Yago()
# Define execution function
function run_5_4_1(case::Yago_freq_domain_params)
case_name = savename(case)
println("-------------")
println("Case: ",case_name)
x,η = run_Yago_freq_domain(case)
case_name_suffix = savename(case,"jld2";digits=8)
file = datadir("5-4-1-Yago", case_name_suffix)
prefix,data,suffix = DrWatson.parse_savename(case_name_suffix,parsetypes=(Int, Float64))
push!(data,"x"=>x, "η"=>η)
@tagsave(file,data)
return data
end
# Warm-up case
path = datadir("5-4-1-Yago")
nx = 2
ny = 2
nz = 1
r = 2
case = Yago_freq_domain_params(
name="4warm-up",
order=r,
nx=nx,ny=ny,nz=nz,
λfactor=0.4,
vtk_output=true
)
data, file = produce_or_load(path,case,run_5_4_1;digits=8)
# Case 1: λfactor=0.4
λfactor = 0.4
nx = 32
ny = 4
nz = 3
r = 4
dfactor=4
case = Yago_freq_domain_params(
name="lambda-04",
order=r,
nx=nx,ny=ny,nz=nz,
λfactor=λfactor,
dfactor=dfactor,
vtk_output=true
)
data, file = produce_or_load(path,case,run_5_4_1;digits=8)
# Case 2: λfactor=0.6
λfactor = 0.6
case = Yago_freq_domain_params(
name="lambda-06",
order=r,
nx=nx,ny=ny,nz=nz,
λfactor=λfactor,
dfactor=dfactor,
vtk_output=true
)
data, file = produce_or_load(path,case,run_5_4_1;digits=8)
# Case 3: λfactor=0.8
λfactor = 0.8
case = Yago_freq_domain_params(
name="lambda-08",
order=r,
nx=nx,ny=ny,nz=nz,
λfactor=λfactor,
dfactor=dfactor,
vtk_output=true
)
data, file = produce_or_load(path,case,run_5_4_1;digits=8)
# Gather data
res = collect_results(path)
Yago_data = []
for i in 1:10
α = round(0.1*i,digits=2)
push!(Yago_data, CSV.File(datadir("Ref_data/Yago","lambda_L_$α.csv");header=false))
end
Fu_data = []
for case in ["L_04" "L_06" "L_08"]
push!(Fu_data, CSV.File(datadir("Ref_data/Fu","$case.csv");header=false))
end
# Plot case 1
λfactor = 0.4
plt1 = plot(legend=:top)
res_λ = @linq res |> where(:order .== r, :nx .== nx, :ny .== ny, :nz .== nz, :λfactor .== λfactor, :dfactor.== dfactor )
ηs = res_λ[!,:η][1]
xps = res_λ[!,:x][1]
p = sortperm(xps)
plot!(plt1,xps[p],ηs[p],label="Monolithic CG/DG",ylims=(0,1.3),lw=2,palette=:rainbow)#,marker="o",line=false)
plot!(plt1,-2.0.*(Fu_data[1].Column1.-0.5),Fu_data[1].Column2,ls=:dash,label="Fu et al.")#color=:black,
plot!(plt1,Yago_data[4].Column1,Yago_data[4].Column2,marker="o",line=false,label="Yago et al.")
savefig(plt1,plotsdir("5-4-1-Yago","L_factor_04"))
# Plot case 2
λfactor = 0.6
plt1 = plot(legend=:top)
res_λ = @linq res |> where(:order .== r, :nx .== nx, :ny .== ny, :nz .== nz, :λfactor .== λfactor, :dfactor.== dfactor )
ηs = res_λ[!,:η][1]
xps = res_λ[!,:x][1]
p = sortperm(xps)
plot!(plt1,xps[p],ηs[p],label="Monolithic CG/DG",ylims=(0,1.3),lw=2,palette=:rainbow)#,color=colors[i])
plot!(plt1,-2.0.*(Fu_data[2].Column1.-0.5),Fu_data[2].Column2,ls=:dash,label="Fu et al.")
plot!(plt1,Yago_data[6].Column1,Yago_data[6].Column2,marker="o",line=false,label="Yago et al.")
savefig(plt1,plotsdir("5-4-1-Yago","L_factor_06"))
# Plot case 3
λfactor = 0.8
plt1 = plot(legend=:top)
res_λ = @linq res |> where(:order .== r, :nx .== nx, :ny .== ny, :nz .== nz, :λfactor .== λfactor, :dfactor.== dfactor )
ηs = res_λ[!,:η][1]
xps = res_λ[!,:x][1]
p = sortperm(xps)
plot!(plt1,xps[p],ηs[p],label="Monolithic CG/DG",ylims=(0,1.3),lw=2,palette=:rainbow)#,color=colors[i])
plot!(plt1,-2.0.*(Fu_data[3].Column1.-0.5),Fu_data[3].Column2,ls=:dash,label="Fu et al.")
plot!(plt1,Yago_data[8].Column1,Yago_data[8].Column2,marker="o",line=false,label="Yago et al.")
savefig(plt1,plotsdir("5-4-1-Yago","L_factor_08"))
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 8481 | module Khabakhpasheva_freq_domain
using Gridap
using Gridap.Geometry
using Gridap.FESpaces
using Gridap.CellData
using Plots
using Parameters
export run_Khabakhpasheva_freq_domain
export Khabakhpasheva_freq_domain_params
@with_kw struct Khabakhpasheva_freq_domain_params
name::String = "KhabakhpashevaFreqDomain"
nx::Int = 20
ny::Int = 5
order::Int = 4
ξ::Float64 = 0.0
vtk_output::Bool = true
end
function run_Khabakhpasheva_freq_domain(params::Khabakhpasheva_freq_domain_params)
# Unpack input parameters
@unpack name, nx, ny, order, ξ, vtk_output = params
# Fixed parameters
Lb = 12.5
m = 8.36
EI₁ = 47100.0
EI₂ = 471.0
β = 0.2
H = 1.1
α = 0.249
# Domain size
Ld = Lb # damping zone length
LΩ = 2Ld + 2Lb
x₀ = 0.0
xdᵢₙ = x₀ + Ld
xb₀ = xdᵢₙ + Lb/2
xbⱼ = xb₀ + β*Lb
xb₁ = xb₀ + Lb
xdₒᵤₜ = LΩ - Ld
@show Ld
@show LΩ
@show x₀
@show xdᵢₙ
@show xb₀
@show xbⱼ
@show xb₁
@show xdₒᵤₜ
# Physics
g = 9.81
ρ = 1025
d₀ = m/ρ
a₁ = EI₁/ρ
a₂ = EI₂/ρ
kᵣ = ξ*a₁/Lb
# wave properties
λ = α*Lb
k = 2π/λ
ω = sqrt(g*k*tanh(k*H))
T = 2π/ω
η₀ = 0.01
ηᵢₙ(x) = η₀*exp(im*k*x[1])
ϕᵢₙ(x) = -im*(η₀*ω/k)*(cosh(k*x[2]) / sinh(k*H))*exp(im*k*x[1])
vᵢₙ(x) = (η₀*ω)*(cosh(k*x[2]) / sinh(k*H))*exp(im*k*x[1])
vzᵢₙ(x) = -im*ω*η₀*exp(im*k*x[1])
# Numerics constants
nx_total = Int(ceil(nx/β)*ceil(LΩ/Lb))
h = LΩ / nx_total
γ = 1.0*order*(order-1)/h
βₕ = 0.5
αₕ = -im*ω/g * (1-βₕ)/βₕ
@show nx_total
@show h
@show βₕ
@show αₕ
# Damping
μ₀ = 2.5
μ₁ᵢₙ(x) = μ₀*(1.0 - sin(π/2*(x[1])/Ld))
μ₁ₒᵤₜ(x) = μ₀*(1.0 - cos(π/2*(x[1]-xdₒᵤₜ)/Ld))
μ₂ᵢₙ(x) = μ₁ᵢₙ(x)*k
μ₂ₒᵤₜ(x) = μ₁ₒᵤₜ(x)*k
ηd(x) = μ₂ᵢₙ(x)*ηᵢₙ(x)
∇ₙϕd(x) = μ₁ᵢₙ(x)*vzᵢₙ(x)
# Fluid model
domain = (x₀, LΩ, 0.0, H)
partition = (nx_total,ny)
function f_y(x)
if x == H
return H
end
i = x / (H/ny)
return H-H/(2.5^i)
end
map(x) = VectorValue(x[1], f_y(x[2]))
𝒯_Ω = CartesianDiscreteModel(domain,partition,map=map)
# Labelling
labels_Ω = get_face_labeling(𝒯_Ω)
add_tag_from_tags!(labels_Ω,"surface",[3,4,6]) # assign the label "surface" to the entity 3,4 and 6 (top corners and top side)
add_tag_from_tags!(labels_Ω,"bottom",[1,2,5]) # assign the label "bottom" to the entity 1,2 and 5 (bottom corners and bottom side)
add_tag_from_tags!(labels_Ω,"inlet",[7]) # assign the label "inlet" to the entity 7 (left side)
add_tag_from_tags!(labels_Ω,"outlet",[8]) # assign the label "outlet" to the entity 8 (right side)
add_tag_from_tags!(labels_Ω, "water", [9]) # assign the label "water" to the entity 9 (interior)
# Triangulations
Ω = Interior(𝒯_Ω)
Γ = Boundary(𝒯_Ω,tags="surface")
Γin = Boundary(𝒯_Ω,tags="inlet")
# Auxiliar functions
function is_beam1(xs) # Check if an element is inside the beam1
n = length(xs)
x = (1/n)*sum(xs)
(xb₀ <= x[1] <= xbⱼ ) * ( x[2] ≈ H)
end
function is_beam2(xs) # Check if an element is inside the beam2
n = length(xs)
x = (1/n)*sum(xs)
(xbⱼ <= x[1] <= xb₁ ) * ( x[2] ≈ H)
end
function is_damping1(xs) # Check if an element is inside the damping zone 1
n = length(xs)
x = (1/n)*sum(xs)
(x₀ <= x[1] <= xdᵢₙ ) * ( x[2] ≈ H)
end
function is_damping2(xs) # Check if an element is inside the damping zone 2
n = length(xs)
x = (1/n)*sum(xs)
(xdₒᵤₜ <= x[1] ) * ( x[2] ≈ H)
end
function is_beam_boundary(xs) # Check if an element is on the beam boundary
is_on_xb₀ = [x[1]≈xb₀ for x in xs] # array of booleans of size the number of points in an element (for points, it will be an array of size 1)
is_on_xb₁ = [x[1]≈xb₁ for x in xs]
element_on_xb₀ = minimum(is_on_xb₀) # Boolean with "true" if at least one entry is true, "false" otherwise.
element_on_xb₁ = minimum(is_on_xb₁)
element_on_xb₀ | element_on_xb₁ # Return "true" if any of the two cases is true
end
function is_a_joint(xs) # Check if an element is a joint
is_on_xbⱼ = [x[1]≈xbⱼ && x[2]≈H for x in xs] # array of booleans of size the number of points in an element (for points, it will be an array of size 1)
element_on_xbⱼ = minimum(is_on_xbⱼ) # Boolean with "true" if at least one entry is true, "false" otherwise.
element_on_xbⱼ
end
# Beam triangulations
xΓ = get_cell_coordinates(Γ)
Γb1_to_Γ_mask = lazy_map(is_beam1,xΓ)
Γb2_to_Γ_mask = lazy_map(is_beam2,xΓ)
Γd1_to_Γ_mask = lazy_map(is_damping1,xΓ)
Γd2_to_Γ_mask = lazy_map(is_damping2,xΓ)
Γb1_to_Γ = findall(Γb1_to_Γ_mask)
Γb2_to_Γ = findall(Γb2_to_Γ_mask)
Γd1_to_Γ = findall(Γd1_to_Γ_mask)
Γd2_to_Γ = findall(Γd2_to_Γ_mask)
Γf_to_Γ = findall(!,Γb1_to_Γ_mask .| Γb2_to_Γ_mask .| Γd1_to_Γ_mask .| Γd2_to_Γ_mask)
Γη_to_Γ = findall(Γb1_to_Γ_mask .| Γb2_to_Γ_mask )
Γκ_to_Γ = findall(!,Γb1_to_Γ_mask .| Γb2_to_Γ_mask )
Γb1 = Triangulation(Γ,Γb1_to_Γ)
Γb2 = Triangulation(Γ,Γb2_to_Γ)
Γd1 = Triangulation(Γ,Γd1_to_Γ)
Γd2 = Triangulation(Γ,Γd2_to_Γ)
Γfs = Triangulation(Γ,Γf_to_Γ)
Γη = Triangulation(Γ,Γη_to_Γ)
Γκ = Triangulation(Γ,Γκ_to_Γ)
Λb1 = Skeleton(Γb1)
Λb2 = Skeleton(Γb2)
# Construct the mask for the joint
Γ_mask_in_Ω_dim_0 = get_face_mask(labels_Ω,"surface",0)
grid_dim_0_Γ = GridPortion(Grid(ReferenceFE{0},𝒯_Ω),Γ_mask_in_Ω_dim_0)
xΓ_dim_0 = get_cell_coordinates(grid_dim_0_Γ)
Λj_to_Γ_mask = lazy_map(is_a_joint,xΓ_dim_0)
Λj = Skeleton(Γ,Λj_to_Γ_mask)
if vtk_output == true
filename = "data/VTKOutput/5-2-1-Khabakhpasheva-freq-domain/"*name
writevtk(Ω,filename*"_O")
writevtk(Γ,filename*"_G")
writevtk(Γb1,filename*"_Gb1")
writevtk(Γb2,filename*"_Gb2")
writevtk(Γd1,filename*"_Gd1")
writevtk(Γd2,filename*"_Gd2")
writevtk(Γfs,filename*"_Gfs")
writevtk(Λb1,filename*"_L1")
writevtk(Λb2,filename*"_L2")
writevtk(Λj,filename*"_Lj")
end
# Measures
degree = 2*order
dΩ = Measure(Ω,degree)
dΓb1 = Measure(Γb1,degree)
dΓb2 = Measure(Γb2,degree)
dΓd1 = Measure(Γd1,degree)
dΓd2 = Measure(Γd2,degree)
dΓfs = Measure(Γfs,degree)
dΓin = Measure(Γin,degree)
dΛb1 = Measure(Λb1,degree)
dΛb2 = Measure(Λb2,degree)
dΛj = Measure(Λj,degree)
# Normals
nΛb1 = get_normal_vector(Λb1)
nΛb2 = get_normal_vector(Λb2)
nΛj = get_normal_vector(Λj)
# FE spaces
reffe = ReferenceFE(lagrangian,Float64,order)
V_Ω = TestFESpace(Ω, reffe, conformity=:H1, vector_type=Vector{ComplexF64})
V_Γκ = TestFESpace(Γκ, reffe, conformity=:H1, vector_type=Vector{ComplexF64})
V_Γη = TestFESpace(Γη, reffe, conformity=:H1, vector_type=Vector{ComplexF64})
U_Ω = TrialFESpace(V_Ω)
U_Γκ = TrialFESpace(V_Γκ)
U_Γη = TrialFESpace(V_Γη)
X = MultiFieldFESpace([U_Ω,U_Γκ,U_Γη])
Y = MultiFieldFESpace([V_Ω,V_Γκ,V_Γη])
# Weak form
∇ₙ(ϕ) = ∇(ϕ)⋅VectorValue(0.0,1.0)
a((ϕ,κ,η),(w,u,v)) = ∫( ∇(w)⋅∇(ϕ) )dΩ +
∫( βₕ*(u + αₕ*w)*(g*κ - im*ω*ϕ) + im*ω*w*κ )dΓfs +
∫( βₕ*(u + αₕ*w)*(g*κ - im*ω*ϕ) + im*ω*w*κ - μ₂ᵢₙ*κ*w + μ₁ᵢₙ*∇ₙ(ϕ)*(u + αₕ*w) )dΓd1 +
∫( βₕ*(u + αₕ*w)*(g*κ - im*ω*ϕ) + im*ω*w*κ - μ₂ₒᵤₜ*κ*w + μ₁ₒᵤₜ*∇ₙ(ϕ)*(u + αₕ*w) )dΓd2 +
∫( ( v*((-ω^2*d₀ + g)*η - im*ω*ϕ) + a₁*Δ(v)*Δ(η) ) + im*ω*w*η )dΓb1 +
∫( ( v*((-ω^2*d₀ + g)*η - im*ω*ϕ) + a₂*Δ(v)*Δ(η) ) + im*ω*w*η )dΓb2 +
∫( a₁ * ( - jump(∇(v)⋅nΛb1) * mean(Δ(η)) - mean(Δ(v)) * jump(∇(η)⋅nΛb1) + γ*( jump(∇(v)⋅nΛb1) * jump(∇(η)⋅nΛb1) ) ) )dΛb1 +
∫( a₂ * ( - jump(∇(v)⋅nΛb2) * mean(Δ(η)) - mean(Δ(v)) * jump(∇(η)⋅nΛb2) + γ*( jump(∇(v)⋅nΛb2) * jump(∇(η)⋅nΛb2) ) ) )dΛb2 +
∫( (jump(∇(v)⋅nΛj) * kᵣ * jump(∇(η)⋅nΛj)) )dΛj
l((w,u,v)) = ∫( w*vᵢₙ )dΓin - ∫( ηd*w - ∇ₙϕd*(u + αₕ*w) )dΓd1
# Solution
op = AffineFEOperator(a,l,X,Y)
(ϕₕ,κₕ,ηₕ) = solve(op)
if vtk_output == true
writevtk(Ω,filename * "_O_solution.vtu",cellfields = ["phi_re" => real(ϕₕ),"phi_im" => imag(ϕₕ)],nsubcells=10)
writevtk(Γκ,filename * "_Gk_solution.vtu",cellfields = ["eta_re" => real(κₕ),"eta_im" => imag(κₕ)],nsubcells=10)
writevtk(Γη,filename * "_Ge_solution.vtu",cellfields = ["eta_re" => real(ηₕ),"eta_im" => imag(ηₕ)],nsubcells=10)
end
# Postprocess
xy_cp = get_cell_points(get_fe_dof_basis(V_Γη)).cell_phys_point
x_cp = [[xy_ij[1] for xy_ij in xy_i] for xy_i in xy_cp]
η_cdv = get_cell_dof_values(ηₕ)
p = sortperm(x_cp[1])
x_cp_sorted = [x_i[p] for x_i in x_cp]
η_cdv_sorted = [η_i[p] for η_i in η_cdv]
xs = [(x_i-xb₀)/Lb for x_i in vcat(x_cp_sorted...)]
η_rel_xs = [abs(η_i)/η₀ for η_i in vcat(η_cdv_sorted...)]
# show(to)
return (xs,η_rel_xs)
end
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 9823 | module Khabakhpasheva_time_domain
using Gridap
using Gridap.Geometry
using Gridap.FESpaces
using Gridap.CellData
using Plots
using Parameters
using WriteVTK
export run_Khabakhpasheva_time_domain
export Khabakhpasheva_time_domain_params
@with_kw struct Khabakhpasheva_time_domain_params
name::String = "KhabakhpashevaTime"
nx::Int = 20
ny::Int = 5
order::Int = 4
ξ::Float64 = 0.0
vtk_output::Bool = true
end
function run_Khabakhpasheva_time_domain(params::Khabakhpasheva_time_domain_params)
# Unpack input parameters
@unpack name, nx, ny, order, ξ, vtk_output= params
# Fixed parameters
Lb = 12.5
mᵨ = 8.36
EI₁ = 47100.0
EI₂ = 471.0
β = 0.2
H = 1.1
α = 0.249
# Domain size
Ld = Lb # damping zone length
LΩ = 2Ld + 2Lb
x₀ = 0.0
xdᵢₙ = x₀ + Ld
xb₀ = xdᵢₙ + Lb/2
xbⱼ = xb₀ + β*Lb
xb₁ = xb₀ + Lb
xdₒᵤₜ = LΩ - Ld
@show Ld
@show LΩ
@show x₀
@show xdᵢₙ
@show xb₀
@show xbⱼ
@show xb₁
@show xdₒᵤₜ
# Physics
g = 9.81
ρ = 1025
d₀ = mᵨ/ρ
a₁ = EI₁/ρ
a₂ = EI₂/ρ
kᵣ = ξ*a₁/Lb
# wave properties
λ = α*Lb
k = 2π/λ
ω = sqrt(g*k*tanh(k*H))
T = 2π/ω
η₀ = 0.01
ηᵢₙ(x,t) = η₀*cos(k*x[1]-ω*t)
ϕᵢₙ(x,t) = (η₀*ω/k)*(cosh(k*x[2]) / sinh(k*H))*sin(k*x[1]-ω*t)
vᵢₙ(x,t) = -(η₀*ω)*(cosh(k*x[2]) / sinh(k*H))*cos(k*x[1]-ω*t)
vzᵢₙ(x,t) = ω*η₀*sin(k*x[1]-ω*t)
ηᵢₙ(t::Real) = x -> ηᵢₙ(x,t)
ϕᵢₙ(t::Real) = x -> ϕᵢₙ(x,t)
vᵢₙ(t::Real) = x -> vᵢₙ(x,t)
vzᵢₙ(t::Real) = x -> vzᵢₙ(x,t)
# Time stepping
γₜ = 0.5
βₜ = 0.25
t₀ = 0.0
Δt = T/40
tf = 50*T#/λ_factor
∂uₜ_∂u = γₜ/(βₜ*Δt)
∂uₜₜ_∂u = 1/(βₜ*Δt^2)
# Numerics constants
nx_total = Int(ceil(nx/β)*ceil(LΩ/Lb))
h = LΩ / nx_total
γ = 1.0*order*(order-1)/h
βₕ = 0.5
αₕ = ∂uₜ_∂u/g * (1-βₕ)/βₕ
# Damping
μ₀ = 2.5
μ₁ᵢₙ(x::VectorValue) = μ₀*(1.0 - sin(π/2*(x[1])/Ld))
μ₁ₒᵤₜ(x::VectorValue) = μ₀*(1.0 - cos(π/2*(x[1]-xdₒᵤₜ)/Ld))
μ₂ᵢₙ(x) = μ₁ᵢₙ(x)*k
μ₂ₒᵤₜ(x) = μ₁ₒᵤₜ(x)*k
ηd(t) = x -> μ₂ᵢₙ(x)*ηᵢₙ(x,t)
∇ₙϕd(t) = x -> μ₁ᵢₙ(x)*vzᵢₙ(x,t)
# Fluid model
domain = (x₀, LΩ, 0.0, H)
partition = (nx_total,ny)
function f_y(x)
if x == H
return H
end
i = x / (H/ny)
return H-H/(2.5^i)
end
map(x) = VectorValue(x[1], f_y(x[2]))
𝒯_Ω = CartesianDiscreteModel(domain,partition,map=map)
# Labelling
labels_Ω = get_face_labeling(𝒯_Ω)
add_tag_from_tags!(labels_Ω,"surface",[3,4,6]) # assign the label "surface" to the entity 3,4 and 6 (top corners and top side)
add_tag_from_tags!(labels_Ω,"bottom",[1,2,5]) # assign the label "bottom" to the entity 1,2 and 5 (bottom corners and bottom side)
add_tag_from_tags!(labels_Ω,"inlet",[7]) # assign the label "inlet" to the entity 7 (left side)
add_tag_from_tags!(labels_Ω,"outlet",[8]) # assign the label "outlet" to the entity 8 (right side)
add_tag_from_tags!(labels_Ω, "water", [9]) # assign the label "water" to the entity 9 (interior)
# Triangulations
Ω = Interior(𝒯_Ω)
Γ = Boundary(𝒯_Ω,tags="surface")
Γin = Boundary(𝒯_Ω,tags="inlet")
# Auxiliar functions
function is_beam1(xs) # Check if an element is inside the beam1
n = length(xs)
x = (1/n)*sum(xs)
(xb₀ <= x[1] <= xbⱼ ) * ( x[2] ≈ H)
end
function is_beam2(xs) # Check if an element is inside the beam2
n = length(xs)
x = (1/n)*sum(xs)
(xbⱼ <= x[1] <= xb₁ ) * ( x[2] ≈ H)
end
function is_damping1(xs) # Check if an element is inside the damping zone 1
n = length(xs)
x = (1/n)*sum(xs)
(x₀ <= x[1] <= xdᵢₙ ) * ( x[2] ≈ H)
end
function is_damping2(xs) # Check if an element is inside the damping zone 2
n = length(xs)
x = (1/n)*sum(xs)
(xdₒᵤₜ <= x[1] ) * ( x[2] ≈ H)
end
function is_beam_boundary(xs) # Check if an element is on the beam boundary
is_on_xb₀ = [x[1]≈xb₀ for x in xs] # array of booleans of size the number of points in an element (for points, it will be an array of size 1)
is_on_xb₁ = [x[1]≈xb₁ for x in xs]
element_on_xb₀ = minimum(is_on_xb₀) # Boolean with "true" if at least one entry is true, "false" otherwise.
element_on_xb₁ = minimum(is_on_xb₁)
element_on_xb₀ | element_on_xb₁ # Return "true" if any of the two cases is true
end
function is_a_joint(xs) # Check if an element is a joint
is_on_xbⱼ = [x[1]≈xbⱼ && x[2]≈H for x in xs] # array of booleans of size the number of points in an element (for points, it will be an array of size 1)
element_on_xbⱼ = minimum(is_on_xbⱼ) # Boolean with "true" if at least one entry is true, "false" otherwise.
element_on_xbⱼ
end
# Beam triangulations
xΓ = get_cell_coordinates(Γ)
Γb1_to_Γ_mask = lazy_map(is_beam1,xΓ)
Γb2_to_Γ_mask = lazy_map(is_beam2,xΓ)
Γd1_to_Γ_mask = lazy_map(is_damping1,xΓ)
Γd2_to_Γ_mask = lazy_map(is_damping2,xΓ)
Γb1_to_Γ = findall(Γb1_to_Γ_mask)
Γb2_to_Γ = findall(Γb2_to_Γ_mask)
Γd1_to_Γ = findall(Γd1_to_Γ_mask)
Γd2_to_Γ = findall(Γd2_to_Γ_mask)
Γf_to_Γ = findall(!,Γb1_to_Γ_mask .| Γb2_to_Γ_mask .| Γd1_to_Γ_mask .| Γd2_to_Γ_mask)
Γη_to_Γ = findall(Γb1_to_Γ_mask .| Γb2_to_Γ_mask )
Γκ_to_Γ = findall(!,Γb1_to_Γ_mask .| Γb2_to_Γ_mask )
Γb1 = Triangulation(Γ,Γb1_to_Γ)
Γb2 = Triangulation(Γ,Γb2_to_Γ)
Γd1 = Triangulation(Γ,Γd1_to_Γ)
Γd2 = Triangulation(Γ,Γd2_to_Γ)
Γfs = Triangulation(Γ,Γf_to_Γ)
Γη = Triangulation(Γ,Γη_to_Γ)
Γκ = Triangulation(Γ,Γκ_to_Γ)
Λb1 = Skeleton(Γb1)
Λb2 = Skeleton(Γb2)
# Construct the mask for the joint
Γ_mask_in_Ω_dim_0 = get_face_mask(labels_Ω,"surface",0)
grid_dim_0_Γ = GridPortion(Grid(ReferenceFE{0},𝒯_Ω),Γ_mask_in_Ω_dim_0)
xΓ_dim_0 = get_cell_coordinates(grid_dim_0_Γ)
Λj_to_Γ_mask = lazy_map(is_a_joint,xΓ_dim_0)
Λj = Skeleton(Γ,Λj_to_Γ_mask)
if vtk_output == true
filename = "data/VTKOutput/5-2-2-Khabakhpasheva-time-domain/"*name
writevtk(Ω,filename*"_O")
writevtk(Γ,filename*"_G")
writevtk(Γb1,filename*"_Gb1")
writevtk(Γb2,filename*"_Gb2")
writevtk(Γd1,filename*"_Gd1")
writevtk(Γd2,filename*"_Gd2")
writevtk(Γfs,filename*"_Gfs")
writevtk(Λb1,filename*"_L1")
writevtk(Λb2,filename*"_L2")
writevtk(Λj,filename*"_Lj")
end
# Measures
degree = 2*order
dΩ = Measure(Ω,degree)
dΓb1 = Measure(Γb1,degree)
dΓb2 = Measure(Γb2,degree)
dΓd1 = Measure(Γd1,degree)
dΓd2 = Measure(Γd2,degree)
dΓfs = Measure(Γfs,degree)
dΓin = Measure(Γin,degree)
dΛb1 = Measure(Λb1,degree)
dΛb2 = Measure(Λb2,degree)
dΛj = Measure(Λj,degree)
# Normals
nΛb1 = get_normal_vector(Λb1)
nΛb2 = get_normal_vector(Λb2)
nΛj = get_normal_vector(Λj)
# FE spaces
reffe = ReferenceFE(lagrangian,Float64,order)
V_Ω = TestFESpace(Ω, reffe, conformity=:H1)
V_Γκ = TestFESpace(Γκ, reffe, conformity=:H1)
V_Γη = TestFESpace(Γη, reffe, conformity=:H1)
U_Ω = TransientTrialFESpace(V_Ω)
U_Γκ = TransientTrialFESpace(V_Γκ)
U_Γη = TransientTrialFESpace(V_Γη)
X = TransientMultiFieldFESpace([U_Ω,U_Γκ,U_Γη])
Y = MultiFieldFESpace([V_Ω,V_Γκ,V_Γη])
# Weak form
∇ₙ(ϕ) = ∇(ϕ)⋅VectorValue(0.0,1.0)
m((ϕₜₜ,κₜₜ,ηₜₜ),(w,u,v)) = ∫( d₀*ηₜₜ*v )dΓb1 + ∫( d₀*ηₜₜ*v )dΓb2
c((ϕₜ,κₜ,ηₜ),(w,u,v)) = ∫( βₕ*ϕₜ*(u + αₕ*w) - κₜ*w )dΓfs +
∫( βₕ*ϕₜ*(u + αₕ*w) - κₜ*w )dΓd1 +
∫( βₕ*ϕₜ*(u + αₕ*w) - κₜ*w )dΓd2 +
∫( ϕₜ*v - ηₜ*w )dΓb1 +
∫( ϕₜ*v - ηₜ*w )dΓb2
a((ϕ,κ,η),(w,u,v)) = ∫( ∇(w)⋅∇(ϕ) )dΩ +
∫( βₕ*(u + αₕ*w)*(g*κ) )dΓfs +
∫( βₕ*(u + αₕ*w)*(g*κ) - μ₂ᵢₙ*κ*w + μ₁ᵢₙ*∇ₙ(ϕ)*(u + αₕ*w) )dΓd1 +
∫( βₕ*(u + αₕ*w)*(g*κ) - μ₂ₒᵤₜ*κ*w + μ₁ₒᵤₜ*∇ₙ(ϕ)*(u + αₕ*w) )dΓd2 +
∫( ( v*(g*η) + a₁*Δ(v)*Δ(η) ) )dΓb1 +
∫( ( v*(g*η) + a₂*Δ(v)*Δ(η) ) )dΓb2 +
∫( a₁ * ( - jump(∇(v)⋅nΛb1) * mean(Δ(η)) - mean(Δ(v)) * jump(∇(η)⋅nΛb1) + γ*( jump(∇(v)⋅nΛb1) * jump(∇(η)⋅nΛb1) ) ) )dΛb1 +
∫( a₂ * ( - jump(∇(v)⋅nΛb2) * mean(Δ(η)) - mean(Δ(v)) * jump(∇(η)⋅nΛb2) + γ*( jump(∇(v)⋅nΛb2) * jump(∇(η)⋅nΛb2) ) ) )dΛb2 +
∫( (jump(∇(v)⋅nΛj) * kᵣ * jump(∇(η)⋅nΛj)) )dΛj
b(t,(w,u,v)) = ∫( w*vᵢₙ(t) )dΓin - ∫( ηd(t)*w - ∇ₙϕd(t)*(u + αₕ*w) )dΓd1
# Solution
op = TransientConstantMatrixFEOperator(m,c,a,b,X,Y)
ls = LUSolver()
ode_solver = Newmark(ls,Δt,γₜ,βₜ)
# Initial solution
x₀ = interpolate_everywhere([0.0,0.0,0.0],X(0.0))
v₀ = interpolate_everywhere([0.0,0.0,0.0],X(0.0))
a₀ = interpolate_everywhere([0.0,0.0,0.0],X(0.0))
xₜ = solve(ode_solver,op,(x₀,v₀,a₀),t₀,tf)
if vtk_output == true
pvd_Ω = paraview_collection(filename * "_O_solution", append=false)
pvd_Γκ = paraview_collection(filename * "_Gk_solution", append=false)
pvd_Γη = paraview_collection(filename * "_Ge_solution", append=false)
end
# Postprocess
xy_cp = get_cell_points(get_fe_dof_basis(V_Γη)).cell_phys_point
x_cp = [[xy_ij[1] for xy_ij in xy_i] for xy_i in xy_cp]
p = sortperm(x_cp[1])
x_cp_sorted = [x_i[p] for x_i in x_cp]
xs = [(x_i-1.5*Lb)/Lb for x_i in vcat(x_cp_sorted...)]
ts = Float64[]
ηxps_t = []
for ((ϕₕ,κₕ,ηₕ),tₙ) in xₜ
println("t = $tₙ")
η_cdv = get_cell_dof_values(ηₕ)
η_cdv_sorted = [η_i[p] for η_i in η_cdv]
η_rel_xs = [abs(η_i)/η₀ for η_i in vcat(η_cdv_sorted...)]
push!(ηxps_t,η_rel_xs)
push!(ts,tₙ)
if vtk_output == true
pvd_Ω[tₙ] = createvtk(Ω,filename * "_O_solution" * "_$tₙ.vtu",cellfields = ["phi" => ϕₕ])#,nsubcells=10)
pvd_Γκ[tₙ] = createvtk(Γκ,filename * "_Gk_solution" * "_$tₙ.vtu",cellfields = ["kappa" => κₕ],nsubcells=10)
pvd_Γη[tₙ] = createvtk(Γη,filename * "_Ge_solution" * "_$tₙ.vtu",cellfields = ["eta" => ηₕ],nsubcells=10)
end
end
if vtk_output == true
vtk_save(pvd_Ω)
vtk_save(pvd_Γκ)
vtk_save(pvd_Γη)
end
return (ts,xs,ηxps_t)
end
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 3933 | module Liu
using Gridap
using Gridap.Geometry
using Gridap.FESpaces
using Parameters
using Roots
export run_Liu
export Liu_params
@with_kw struct Liu_params
name::String = "Liu"
ω::Real = 0.2
end
function run_Liu(params::Liu_params)
@unpack name, ω = params
# Fixed parameters
m = 500
EI = 1.0e10
H₀ = 60
Lb = 300.0
# Physics
g = 9.81
ρ = 1025
d₀ = m/ρ
a₁ = EI/ρ
# wave properties
f(k) = sqrt(g*k*tanh(k*H₀)) - ω
k = abs(find_zero(f, 0.2)) # wave number
λ = 2*π / k # wavelength
@show λ, λ/Lb
η₀ = 0.01
ηᵢₙ(x) = η₀*exp(im*k*x[1])
ϕᵢₙ(x) = -im*(η₀*ω/k)*(cosh(k*(x[2]+0.075*Lb)) / sinh(k*H₀))*exp(im*k*x[1])
vᵢₙ(x) = (η₀*ω)*(cosh(k*(x[2]+0.075*Lb)) / sinh(k*H₀))*exp(im*k*x[1])
vzᵢₙ(x) = -im*ω*η₀*exp(im*k*x[1])
# Numerics constants
order = 4
h = Lb/50
γ = 1.0*order*(order-1)/h
βₕ = 0.5
αₕ = -im*ω/g * (1-βₕ)/βₕ
# Damping
μ₀ = 6.0
Ld = 4*Lb
xdₒᵤₜ = 9*Lb
μ₁ᵢₙ(x) = μ₀*(1.0 - sin(π/2*(x[1])/Ld))
μ₁ₒᵤₜ(x) = μ₀*(1.0 - cos(π/2*(x[1]-xdₒᵤₜ)/Ld))
μ₂ᵢₙ(x) = μ₁ᵢₙ(x)*k
μ₂ₒᵤₜ(x) = μ₁ₒᵤₜ(x)*k
ηd(x) = μ₂ᵢₙ(x)*ηᵢₙ(x)
∇ₙϕd(x) = μ₁ᵢₙ(x)*vzᵢₙ(x)
# Fluid model
𝒯_Ω = DiscreteModelFromFile("models/Liu.json")
# Triangulations
Ω = Interior(𝒯_Ω)
Γ = Boundary(𝒯_Ω,tags=["beam","free_surface","damping_in","damping_out"])
Γᵢₙ = Boundary(𝒯_Ω,tags="inlet")
Γb = Boundary(𝒯_Ω,tags="beam")
Γd1 = Boundary(𝒯_Ω,tags="damping_in")
Γd2 = Boundary(𝒯_Ω,tags="damping_out")
Γf = Boundary(𝒯_Ω,tags="free_surface")
Γκ = Boundary(𝒯_Ω,tags=["free_surface","damping_in","damping_out"])
Λb = Skeleton(Γb)
filename = "data/VTKOutput/5-3-1-Liu/"*name
writevtk(Ω,filename*"_O_trian")
writevtk(Γb,filename*"_Gb_trian")
writevtk(Γd1,filename*"_Gd1_trian")
writevtk(Γd2,filename*"_Gd2_trian")
writevtk(Γf,filename*"_Gf_trian")
writevtk(Λb,filename*"_Lb_trian")
# Measures
degree = 2*order
dΩ = Measure(Ω,degree)
dΓb = Measure(Γb,degree)
dΓd1 = Measure(Γd1,degree)
dΓd2 = Measure(Γd2,degree)
dΓf = Measure(Γf,degree)
dΓᵢₙ = Measure(Γᵢₙ,degree)
dΛb = Measure(Λb,degree)
# Normals
nΛb = get_normal_vector(Λb)
# FE spaces
reffe = ReferenceFE(lagrangian,Float64,order)
V_Ω = TestFESpace(Ω, reffe, conformity=:H1, vector_type=Vector{ComplexF64})
V_Γκ = TestFESpace(Γκ, reffe, conformity=:H1, vector_type=Vector{ComplexF64})
V_Γη = TestFESpace(Γb, reffe, conformity=:H1, vector_type=Vector{ComplexF64})
U_Ω = TrialFESpace(V_Ω)
U_Γκ = TrialFESpace(V_Γκ)
U_Γη = TrialFESpace(V_Γη)
X = MultiFieldFESpace([U_Ω,U_Γκ,U_Γη])
Y = MultiFieldFESpace([V_Ω,V_Γκ,V_Γη])
# Weak form
∇ₙ(ϕ) = ∇(ϕ)⋅VectorValue(0.0,1.0)
a((ϕ,κ,η),(w,u,v)) = ∫( ∇(w)⋅∇(ϕ) )dΩ +
∫( βₕ*(u + αₕ*w)*(g*κ - im*ω*ϕ) + im*ω*w*κ )dΓf +
∫( βₕ*(u + αₕ*w)*(g*κ - im*ω*ϕ) + im*ω*w*κ - μ₂ᵢₙ*κ*w + μ₁ᵢₙ*∇ₙ(ϕ)*(u + αₕ*w) )dΓd1 +
∫( βₕ*(u + αₕ*w)*(g*κ - im*ω*ϕ) + im*ω*w*κ - μ₂ₒᵤₜ*κ*w + μ₁ₒᵤₜ*∇ₙ(ϕ)*(u + αₕ*w) )dΓd2 +
∫( ( v*((-ω^2*d₀ + g)*η - im*ω*ϕ) + a₁*Δ(v)*Δ(η) ) + im*ω*w*η )dΓb +
∫( a₁ * ( - jump(∇(v)⋅nΛb) * mean(Δ(η)) - mean(Δ(v)) * jump(∇(η)⋅nΛb) + γ*( jump(∇(v)⋅nΛb) * jump(∇(η)⋅nΛb) ) ) )dΛb
l((w,u,v)) = ∫( w*vᵢₙ )dΓᵢₙ - ∫( ηd*w - ∇ₙϕd*(u + αₕ*w) )dΓd1
op = AffineFEOperator(a,l,X,Y)
(ϕₕ,κₕ,ηₕ) = Gridap.solve(op)
xy_cp = get_cell_points(get_fe_dof_basis(V_Γη)).cell_phys_point
x_cp = [[xy_ij[1] for xy_ij in xy_i] for xy_i in xy_cp]
η_cdv = get_cell_dof_values(ηₕ)
p = sortperm(x_cp[1])
x_cp_sorted = [x_i[p] for x_i in x_cp]
η_cdv_sorted = [η_i[p] for η_i in η_cdv]
xs = [(x_i-6*Lb)/Lb for x_i in vcat(x_cp_sorted...)]
η_rel_xs = [abs(η_i)/η₀ for η_i in vcat(η_cdv_sorted...)]
writevtk(Γκ,filename*"_kappa",cellfields=["eta_re"=>real(κₕ),"eta_im"=>imag(κₕ)])
writevtk(Γb,filename*"_eta",cellfields=["eta_re"=>real(ηₕ),"eta_im"=>imag(ηₕ)])
writevtk(Ω,filename*"_phi",cellfields=["phi_re"=>real(ϕₕ),"phi_im"=>imag(ϕₕ)])
return (xs,η_rel_xs)
end
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 3390 | module MonolithicFEMVLFS
using DrWatson
@quickactivate "MonolithicFEMVLFS"
using Plots
using LaTeXStrings
using DataFrames
using DataFramesMeta
using CSV
export run_tests
# Include source files
include("Periodic_Beam.jl")
include("Periodic_Beam_FS.jl")
include("Khabakhpasheva_freq_domain.jl")
include("Khabakhpasheva_time_domain.jl")
include("Liu.jl")
include("Yago_freq_domain.jl")
using .Periodic_Beam: Periodic_Beam_params, run_periodic_beam
using .Periodic_Beam_FS: Periodic_Beam_FS_params, run_periodic_beam_FS
using .Khabakhpasheva_freq_domain: Khabakhpasheva_freq_domain_params, run_Khabakhpasheva_freq_domain
using .Khabakhpasheva_time_domain: Khabakhpasheva_time_domain_params, run_Khabakhpasheva_time_domain
using .Liu: Liu_params, run_Liu
using .Yago_freq_domain: Yago_freq_domain_params, run_Yago_freq_domain
# Extend DrWatson functions
DrWatson.allaccess(c::Periodic_Beam_params) = (:n, :dt, :tf, :orderϕ, :orderη, :k)
DrWatson.default_prefix(c::Periodic_Beam_params) = c.name
DrWatson.allaccess(c::Periodic_Beam_FS_params) = (:n, :dt, :tf, :order, :k)
DrWatson.default_prefix(c::Periodic_Beam_FS_params) = c.name
DrWatson.allaccess(c::Khabakhpasheva_freq_domain_params) = (:nx, :ny, :order, :ξ, :vtk_output)
DrWatson.default_prefix(c::Khabakhpasheva_freq_domain_params) = c.name
DrWatson.allaccess(c::Khabakhpasheva_time_domain_params) = (:nx, :ny, :order, :ξ, :vtk_output)
DrWatson.default_prefix(c::Khabakhpasheva_time_domain_params) = c.name
DrWatson.allaccess(c::Liu_params) = (:ω,)
DrWatson.default_prefix(c::Liu_params) = c.name
DrWatson.allaccess(c::Yago_freq_domain_params) = (:nx, :ny, :nz, :order, :λfactor, :dfactor)
DrWatson.default_prefix(c::Yago_freq_domain_params) = c.name
# Include script files
include("../scripts/5-1-1-periodic-beam-spatial-convergence.jl")
include("../scripts/5-1-2-periodic-beam-time-convergence.jl")
include("../scripts/5-1-3-periodic-beam-energy.jl")
include("../scripts/5-1-4-periodic-beam-free-surface-energy.jl")
include("../scripts/5-2-1-Khabakhpasheva-freq-domain.jl")
include("../scripts/5-2-2-Khabakhpasheva-time-domain.jl")
include("../scripts/5-3-1-Liu.jl")
include("../scripts/5-4-1-Yago.jl")
function run_tests(test::String)
if test=="all"
run_5_1_1_periodic_beam_sapatial_convergence()
run_5_1_2_periodic_beam_time_convergence()
run_5_1_3_periodic_beam_energy()
run_5_1_4_periodic_beam_free_surface_energy()
run_5_2_1_Khavakhpasheva_freq_domain()
run_5_2_2_Khavakhpasheva_time_domain()
run_5_3_1_Liu()
run_5_4_1_Yago()
elseif test == "5-1-1" || test == "5-1-1-periodic-beam-spatial-convergence"
run_5_1_1_periodic_beam_sapatial_convergence()
elseif test == "5-1-2" || test == "5-1-2-periodic-beam-time-convergence"
run_5_1_2_periodic_beam_time_convergence()
elseif test == "5-1-3" || test == "5-1-3-periodic-beam-energy"
run_5_1_3_periodic_beam_energy()
elseif test == "5-1-4" || test == "5-1-4-periodic-beam-free-surface-energy"
run_5_1_4_periodic_beam_free_surface_energy()
elseif test == "5-2-1" || test == "5-2-1-Khavakhpasheva-freq-domain"
run_5_2_1_Khavakhpasheva_freq_domain()
elseif test == "5-2-2" || test == "5-2-2-Khavakhpasheva-time-domain"
run_5_2_2_Khavakhpasheva_time_domain()
elseif test == "5-3-1" || test == "5-3-1-Liu"
run_5_3_1_Liu()
elseif test == "5-4-1" || test == "5-4-1-Yago"
run_5_4_1_Yago()
end
end
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 4332 | module Periodic_Beam
using Gridap
using Gridap.Geometry
using Gridap.FESpaces
using WriteVTK
using Parameters
export run_periodic_beam
export Periodic_Beam_params
@with_kw struct Periodic_Beam_params
name::String = "PeriodicBeam"
n::Int = 4
dt::Real = 0.001
tf::Real = 1.0
orderϕ::Int = 2
orderη::Int = 2
k::Int = 10
vtk_output = false
end
function run_periodic_beam(params)
# Unpack input parameters
@unpack name, n, dt, tf, orderϕ, orderη, k, vtk_output = params
# Fixed parameters
## Geometry
L = 2.0*π
H = 1.0
## Physics
g = 9.81
ρ_w = 1.0e3
ρ_b = 1.0e2
h_b = 1.0e-2
λ = 2*π/ k
ω = √(g*k*tanh(k*H))
EI_b = ρ_b*h_b*ω^2/(k^4)# + (k/kₚ)^4 - (ω/ω₀)^2
d₀ = ρ_b*h_b/ρ_w
Dᵨ = EI_b/ρ_w
η₀ = 0.01
η(x,t) = η₀*cos(k*x[1]-ω*t)
ϕ(x,t) = η₀*ω/k * cosh(k*x[2]) / sinh(k*H) * sin(k*x[1]-ω*t)
η(t::Real) = x -> η(x,t)
ϕ(t::Real) = x -> ϕ(x,t)
## Numerics (time discretization)
γ_t = 0.5
β_t = 0.25
t₀ = 0.0
## Numerics (space discretization)
h = L/n
γ = 10.0*orderη*(orderη+1)
# Define fluid domain
println("Defining fluid domain")
domain = (0.0, L, 0.0, H)
partition = (2*n,n)
model_Ω = CartesianDiscreteModel(domain,partition,isperiodic=(true,false))
# Define beam domain
println("Defining beam domain")
labels = get_face_labeling(model_Ω)
add_tag_from_tags!(labels,"bottom",[1,2,5])
add_tag_from_tags!(labels,"beam",[3,4,6])
# Triangulations
println("Defining triangulations")
Ω = Interior(model_Ω)
Γ = Boundary(model_Ω,tags="beam")
Λ = Skeleton(Γ)
nΛ = get_normal_vector(Λ)
# Quadratures
println("Defining quadratures")
degree = 2*orderη
dΩ = Measure(Ω,degree)
dΓ = Measure(Γ,degree)
dΛ = Measure(Λ,degree)
# FE spaces
println("Defining FE spaces")
reffe_Ω = ReferenceFE(lagrangian,Float64,orderϕ)
reffe_Γ = ReferenceFE(lagrangian,Float64,orderη)
V_Ω = TestFESpace(Ω,reffe_Ω,conformity=:H1)
V_Γ = TestFESpace(Γ,reffe_Γ,conformity=:H1)
U_Ω = TransientTrialFESpace(V_Ω)
U_Γ = TransientTrialFESpace(V_Γ)
Y = MultiFieldFESpace([V_Ω,V_Γ])
X = TransientMultiFieldFESpace([U_Ω,U_Γ])
# Weak form
m((ϕₜₜ,ηₜₜ),(w,v)) = ∫( d₀*ηₜₜ*v )dΓ
c((ϕₜ,ηₜ),(w,v)) = ∫( ϕₜ*v - ηₜ*w )dΓ
a((ϕ,η),(w,v)) = ∫( ∇(ϕ)⋅∇(w) )dΩ +
∫( g*η*v + Dᵨ*Δ(η)*Δ(v) )dΓ +
∫( Dᵨ*( - mean(Δ(η))*jump(∇(v)⋅nΛ) -
jump(∇(η)⋅nΛ)*mean(Δ(v)) +
γ/h*jump(∇(v)⋅nΛ)*jump(∇(η)⋅nΛ) ) )dΛ
b((w,v)) = ∫( 0.0 * w )dΩ
op = TransientConstantFEOperator(m,c,a,b,X,Y)
# Solver
ls = LUSolver()
ode_solver = Newmark(ls,dt,γ_t,β_t)
# Initial solution
x₀ = interpolate_everywhere([ϕ(0.0),η(0.0)],X(0.0))
v₀ = interpolate_everywhere([∂t(ϕ)(0.0),∂t(η)(0.0)],X(0.0))
a₀ = interpolate_everywhere([∂tt(ϕ)(0.0),∂tt(η)(0.0)],X(0.0))
# Solution
xₜ = solve(ode_solver,op,(x₀,v₀,a₀),t₀,tf)
# Auxiliar functions
l2_Ω(x) = √(∑( ∫( x⋅x )dΩ ))
l2_Γ(x) = √(∑( ∫( x⋅x )dΓ ))
t_global = Float64[]
e_ϕ = Float64[]
e_η = Float64[]
E_kin_f = Float64[]
E_pot_f = Float64[]
E_kin_s = Float64[]
E_ela_s = Float64[]
E_kin_f₀ = 0.25 * d₀ * ω^2 * η₀^2 * L
E_kin_s₀ = 0.25 * g * η₀^2 * L
E_pot_f₀ = 0.25 * Dᵨ * k^4 * η₀^2 * L
E_ela_s₀ = 0.25 * g * η₀^2 * L
if vtk_output == true
filename = "data/VTKOutput/5-1-1-periodic-beam/"*name
pvd_Ω = paraview_collection(filename * "_O", append=false)
pvd_Γ = paraview_collection(filename * "_G", append=false)
end
global ηₙ = x₀[2]
global ηₙ_fv = get_free_dof_values(ηₙ)
for ((ϕₕ,ηₕ),tₙ) in xₜ
push!(e_ϕ,l2_Ω(ϕ(tₙ) - ϕₕ))
push!(e_η,l2_Γ(η(tₙ) - ηₕ))
ηₜ = (ηₕ-ηₙ)/dt
push!(E_kin_f, 0.5*∑( ∫( ∇(ϕₕ)⋅∇(ϕₕ) )dΩ ) )
push!(E_pot_f, 0.5*g*∑( ∫( ηₕ*ηₕ )dΓ ) )
push!(E_kin_s, 0.5*d₀*∑( ∫( ηₜ*ηₜ )dΓ ) )
push!(E_ela_s, 0.5*Dᵨ*∑( ∫( Δ(ηₕ)*Δ(ηₕ) )dΓ ) )
push!(t_global,tₙ)
if vtk_output == true
pvd_Ω[tₙ] = createvtk(Ω,filename * "_O" * "_$tₙ.vtu",cellfields = ["phi" => ϕₕ],nsubcells=10)
pvd_Γ[tₙ] = createvtk(Γ,filename * "_G" * "_$tₙ.vtu",cellfields = ["eta" => ηₕ],nsubcells=10)
end
ηₙ=interpolate!(ηₕ,ηₙ_fv,U_Γ(tₙ))
end
if vtk_output == true
vtk_save(pvd_Ω)
vtk_save(pvd_Γ)
end
return e_ϕ, e_η, E_kin_f , E_pot_f, E_kin_s, E_ela_s, E_kin_f₀, E_kin_s₀, E_pot_f₀, E_ela_s₀, t_global
end
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 6287 | module Periodic_Beam_FS
using Gridap
using Gridap.Geometry
using Gridap.FESpaces
using WriteVTK
using Parameters
export run_periodic_beam_FS
export Periodic_Beam_FS_params
@with_kw struct Periodic_Beam_FS_params
name::String = "PeriodicBeamFS"
n::Int = 4
dt::Real = 0.001
tf::Real = 1.0
order::Int = 2
k::Int = 10
vtk_output = false
end
function run_periodic_beam_FS(params)
# Unpack input parameters
@unpack name, n, dt, tf, order, k, vtk_output = params
# Fixed parameters
## Geometry
L = 2.0*π
H = 1.0
## Physics
g = 9.81
ρ_w = 1.0e3
ρ_b = 1.0e2
h_b = 1.0e-2
λ = 2*π/ k
ω = √(g*k*tanh(k*H))
EI_b = ρ_b*h_b*ω^2/(k^4)# + (k/kₚ)^4 - (ω/ω₀)^2
d₀ = ρ_b*h_b/ρ_w
Dᵨ = EI_b/ρ_w
η₀ = 0.01
η(x,t) = η₀*cos(k*x[1]-ω*t)
ϕ(x,t) = η₀*ω/k * cosh(k*x[2]) / sinh(k*H) * sin(k*x[1]-ω*t)
η(t::Real) = x -> η(x,t)
ϕ(t::Real) = x -> ϕ(x,t)
## Numerics (time discretization)
γ_t = 0.5
β_t = 0.25
t₀ = 0.0
∂uₜ_∂u = γ_t/(β_t*dt)
∂uₜₜ_∂u = 1/(β_t*dt^2)
βₕ = 0.5
αₕ = ∂uₜ_∂u/g * (1-βₕ)/βₕ
## Numerics (space discretization)
h = L/n
γ = 10.0*order*(order-1)/h
# Define fluid domain
println("Defining fluid domain")
domain = (0.0, L, 0.0, H)
partition = (2*n,n)
𝒯_Ω = CartesianDiscreteModel(domain,partition,isperiodic=(true,false))
# Domain size
Lb = π
x₀ = 0.0
xb₀ = 0.5π
xb₁ = xb₀ + Lb
# Labelling
labels_Ω = get_face_labeling(𝒯_Ω)
add_tag_from_tags!(labels_Ω,"surface",[3,4,6]) # assign the label "surface" to the entity 3,4 and 6 (top corners and top side)
add_tag_from_tags!(labels_Ω,"bottom",[1,2,5]) # assign the label "bottom" to the entity 1,2 and 5 (bottom corners and bottom side)
add_tag_from_tags!(labels_Ω, "water", [9]) # assign the label "water" to the entity 9 (interior)
# Triangulations
Ω = Interior(𝒯_Ω)
Γ = Boundary(𝒯_Ω,tags="surface")
# Auxiliar functions
function is_beam(xs) # Check if an element is inside the beam
n = length(xs)
x = (1/n)*sum(xs)
(xb₀ <= x[1] <= xb₁ ) * ( x[2] ≈ H)
end
function is_beam_boundary(xs) # Check if an element is on the beam boundary
is_on_xb₀ = [x[1]≈xb₀ for x in xs] # array of booleans of size the number of points in an element (for points, it will be an array of size 1)
is_on_xb₁ = [x[1]≈xb₁ for x in xs]
element_on_xb₀ = minimum(is_on_xb₀) # Boolean with "true" if at least one entry is true, "false" otherwise.
element_on_xb₁ = minimum(is_on_xb₁)
element_on_xb₀ | element_on_xb₁ # Return "true" if any of the two cases is true
end
# Beam triangulations
xΓ = get_cell_coordinates(Γ)
Γb_to_Γ_mask = lazy_map(is_beam,xΓ)
Γb_to_Γ = findall(Γb_to_Γ_mask)
Γf_to_Γ = findall(!,Γb_to_Γ_mask)
Γb = Triangulation(Γ,Γb_to_Γ)
Γfs = Triangulation(Γ,Γf_to_Γ)
Λb = Skeleton(Γb)
if vtk_output == true
filename = "data/VTKOutput/5-1-4-periodic-beam-free-surface/"*name
writevtk(Ω,filename*"_O")
writevtk(Γ,filename*"_G")
writevtk(Γb,filename*"_Gb")
writevtk(Γfs,filename*"_Gfs")
writevtk(Λb,filename*"_L")
end
# Measures
degree = 2*order
dΩ = Measure(Ω,degree)
dΓ = Measure(Γ,degree)
dΓb = Measure(Γb,degree)
dΓfs = Measure(Γfs,degree)
dΛb = Measure(Λb,degree)
# Normals
nΛb = get_normal_vector(Λb)
# FE spaces
reffe = ReferenceFE(lagrangian,Float64,order)
V_Ω = TestFESpace(Ω, reffe, conformity=:H1)
V_Γfs = TestFESpace(Γfs, reffe, conformity=:H1)
V_Γb = TestFESpace(Γb, reffe, conformity=:H1)
U_Ω = TransientTrialFESpace(V_Ω)
U_Γfs = TransientTrialFESpace(V_Γfs)
U_Γb = TransientTrialFESpace(V_Γb)
X = TransientMultiFieldFESpace([U_Ω,U_Γfs,U_Γb])
Y = MultiFieldFESpace([V_Ω,V_Γfs,V_Γb])
# Weak form
∇ₙ(ϕ) = ∇(ϕ)⋅VectorValue(0.0,1.0)
m((ϕₜₜ,κₜₜ,ηₜₜ),(w,u,v)) = ∫( d₀*ηₜₜ*v )dΓb
c((ϕₜ,κₜ,ηₜ),(w,u,v)) = ∫( βₕ*ϕₜ*(u + αₕ*w) - κₜ*w )dΓfs +
∫( ϕₜ*v - ηₜ*w )dΓb
a((ϕ,κ,η),(w,u,v)) = ∫( ∇(w)⋅∇(ϕ) )dΩ +
∫( βₕ*(u + αₕ*w)*(g*κ) )dΓfs +
∫( ( v*(g*η) + Dᵨ*Δ(v)*Δ(η) ) )dΓb +
∫( Dᵨ * ( - jump(∇(v)⋅nΛb) * mean(Δ(η)) - mean(Δ(v)) * jump(∇(η)⋅nΛb) + γ*( jump(∇(v)⋅nΛb) * jump(∇(η)⋅nΛb) ) ) )dΛb
b((w,v)) = ∫( 0.0 * w )dΩ
op = TransientConstantFEOperator(m,c,a,b,X,Y)
# Solver
ls = LUSolver()
ode_solver = Newmark(ls,dt,γ_t,β_t)
# Initial solution
x₀ = interpolate_everywhere([ϕ(0.0),η(0.0),η(0.0)],X(0.0))
v₀ = interpolate_everywhere([∂t(ϕ)(0.0),∂t(η)(0.0),∂t(η)(0.0)],X(0.0))
a₀ = interpolate_everywhere([∂tt(ϕ)(0.0),∂tt(η)(0.0),∂tt(η)(0.0)],X(0.0))
# Solution
xₜ = solve(ode_solver,op,(x₀,v₀,a₀),t₀,tf)
# Auxiliar functions
l2_Ω(x) = √(∑( ∫( x⋅x )dΩ ))
l2_Γfs(x) = √(∑( ∫( x⋅x )dΓfs ))
l2_Γb(x) = √(∑( ∫( x⋅x )dΓb ))
t_global = Float64[]
e_ϕ = Float64[]
e_η = Float64[]
E_kin_f = Float64[]
E_pot_f = Float64[]
E_kin_s = Float64[]
E_ela_s = Float64[]
ηₜ₀ = CellField(∂t(η)(0.0),Γb)
∇ϕ₀ = CellField(∇(ϕ(0.0)),Ω)
Δη₀ = CellField(Δ(η(0.0)),Γb)
η_0 = CellField(η(0.0),Γ)
E_kin_s₀ = 0.25 * d₀ * ω^2 * η₀^2 * Lb
E_kin_f₀ = 0.25 * g * η₀^2 * L
E_ela_s₀ = 0.25 * Dᵨ * k^4 * η₀^2 * Lb
E_pot_f₀ = 0.25 * g * η₀^2 * L
if vtk_output == true
filename = "data/VTKOutput/5-1-4-periodic-beam-free-surface/"*name
pvd_Ω = paraview_collection(filename * "_O", append=false)
pvd_Γ = paraview_collection(filename * "_G", append=false)
end
global ηₙ = x₀[2]
global ηₙ_fv = get_free_dof_values(ηₙ)
for ((ϕₕ,κₕ,ηₕ),tₙ) in xₜ
push!(e_ϕ,l2_Ω(ϕ(tₙ) - ϕₕ))
push!(e_η,l2_Γfs(η(tₙ) - κₕ)+l2_Γb(η(tₙ) - ηₕ))
ηₜ = (ηₕ-ηₙ)/dt
push!(E_kin_f, 0.5*∑( ∫( ∇(ϕₕ)⋅∇(ϕₕ) )dΩ ) )
push!(E_pot_f, 0.5*g*∑( ∫( κₕ*κₕ )dΓfs ) + 0.5*g*∑( ∫( ηₕ*ηₕ )dΓb ))
push!(E_kin_s, 0.5*d₀*∑( ∫( ηₜ*ηₜ )dΓb ) )
push!(E_ela_s, 0.5*Dᵨ*∑( ∫( Δ(ηₕ)*Δ(ηₕ) )dΓb ) )
push!(t_global,tₙ)
if vtk_output == true
pvd_Ω[tₙ] = createvtk(Ω,filename * "_O" * "_$tₙ.vtu",cellfields = ["phi" => ϕₕ],nsubcells=10)
pvd_Γ[tₙ] = createvtk(Γ,filename * "_G" * "_$tₙ.vtu",cellfields = ["eta" => ηₕ],nsubcells=10)
end
ηₙ=interpolate!(ηₕ,ηₙ_fv,U_Γb(tₙ))
end
if vtk_output == true
vtk_save(pvd_Ω)
vtk_save(pvd_Γ)
end
return e_ϕ, e_η, E_kin_f , E_pot_f, E_kin_s, E_ela_s, E_kin_f₀, E_kin_s₀, E_pot_f₀, E_ela_s₀, t_global
end
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 5175 | module Yago_freq_domain
using Gridap
using Gridap.Geometry
using Gridap.TensorValues
using Parameters
export run_Yago_freq_domain
export Yago_freq_domain_params
@with_kw struct Yago_freq_domain_params
name::String = "YagoFreq"
nx::Int = 32
ny::Int = 4
nz::Int = 4
order::Int = 4
λfactor::Real = 0.4
dfactor::Real = 3
vtk_output = false
end
function run_Yago_freq_domain(params)
# Unpack input parameters
@unpack name, nx, ny, nz, order, λfactor, dfactor, vtk_output = params
# Fixed parameters
## Geometry
L = 300
B = 60
H = 58.5
hb = 2.0
nLΩ = 10
nBΩ = 18
LΩ = nLΩ*L
BΩ = nBΩ*B
xb₀ = 4.5*L
xb₁ = xb₀ + L
yb₀ = -B/2
yb₁ = yb₀ + B
## Physics
g = 9.81
ρ = 1025
E = 11.9e9
ρb = 256.25
ν = 0.13
D = 4.77e11
d₀ = ρb*hb/ρ
δ(x,y) = ==(x,y)
C = SymFourthOrderTensorValue{3,Float64}
μ = E/(2*(1+ν))
λ = ν*E/(1-ν^2)
I = hb^3/12
Cvals = zero(Array{Float64}(undef,36))
for i in 1:2
for j in 1:2
for k in 1:2
for l in 1:2
Cvals[data_index(C,i,j,k,l)] = I/ρ*(μ*(δ(i,k)*δ(j,l) +δ(i,l)*δ(j,k)) + λ*(δ(i,j)*δ(k,l)))
end
end
end
end
C = SymFourthOrderTensorValue(Cvals...)
# Wave properties
λ = L*λfactor
k = 2π/λ
ω = √(g*k*tanh(k*H))
T = 2π/ω
η₀ = 0.01
ηᵢₙ(x) = η₀*exp(im*k*x[1])
ϕᵢₙ(x) = -im*(η₀*ω/k)*(cosh(k*x[3]) / sinh(k*H))*exp(im*k*x[1])
vᵢₙ(x) = (η₀*ω)*(cosh(k*x[3]) / sinh(k*H))*exp(im*k*x[1])
vzᵢₙ(x) = -im*ω*η₀*exp(im*k*x[1])
## Numerics (space discretization)
h = LΩ/(nLΩ*nx)
βₕ = 0.5
αₕ = -im*ω/g * (1-βₕ)/βₕ
γ = 1.0*order*(order+1)/h
# Damping method 5
μ₀ = 2.5
Ld = dfactor*L
Ld₀ = dfactor*L
xd = LΩ-Ld
xd₀ = Ld₀
μ₁(x) = μ₀*(1.0 - cos(π/2*(x[1]-xd)/Ld)) * (x[1]>xd) + μ₀*(1-cos(π/2*(Ld₀-x[1])/Ld₀)) * (x[1]<xd₀)
μ₂(x) = μ₁(x)*k
ηd(x) = μ₂(x)*ηᵢₙ(x)*(x[1]<xd₀)
∇ₙϕd(x) = μ₁(x)*vzᵢₙ(x)*(x[1]<xd₀)
# Define fluid model
println("Defining fluid model")
domain = (0.0, LΩ, -BΩ/2, BΩ/2, 0.0, H)
partition = (nLΩ*nx,nBΩ*ny,nz)
function f_z(x)
if x == H
return H
end
i = x / (H/nz)
return H-H/((2.5)^i)
end
map(x) = VectorValue(x[1], x[2], f_z(x[3]))
model_Ω = CartesianDiscreteModel(domain,partition,map=map)
# Add labels to Ω
labels_Ω = get_face_labeling(model_Ω)
add_tag_from_tags!(labels_Ω,"surface",[22])
add_tag_from_tags!(labels_Ω,"inlet",[25])
# Triangulations
Ω = Interior(model_Ω)
Γ = Boundary(model_Ω,tags="surface")
Γᵢₙ = Boundary(model_Ω,tags="inlet")
# Create masks in Γ
function is_plate(x)
is_in = ([(xb₀ <= xm[1]) * (xm[1] <= xb₁) * (yb₀ <= xm[2]) * (xm[2] <= yb₁) for xm in x])
minimum(is_in)
end
xΓ = get_cell_coordinates(Γ)
Γb_to_Γ_mask = lazy_map(is_plate,xΓ)
Γb_to_Γ = findall(Γb_to_Γ_mask)
Γf_to_Γ = findall(!,Γb_to_Γ_mask)
Γb = Triangulation(Γ,Γb_to_Γ)
Γf = Triangulation(Γ,Γf_to_Γ)
Λb = Skeleton(Γb)
# Measures
degree = 2*order
dΩ = Measure(Ω,degree)
dΓb = Measure(Γb,degree)
dΓf = Measure(Γf,degree)
dΓᵢₙ = Measure(Γᵢₙ,degree)
dΛb = Measure(Λb,degree)
# Normals
nΛb = get_normal_vector(Λb)
# FE spaces
println("Defining FE spaces")
reffeη = ReferenceFE(lagrangian,Float64,order)
reffeκ = ReferenceFE(lagrangian,Float64,order)
reffeᵩ = ReferenceFE(lagrangian,Float64,2)
V_Ω = TestFESpace(Ω, reffeᵩ, vector_type=Vector{ComplexF64})
V_Γκ = TestFESpace(Γf, reffeκ, vector_type=Vector{ComplexF64})
V_Γη = TestFESpace(Γb, reffeη, vector_type=Vector{ComplexF64})
U_Ω = TrialFESpace(V_Ω)
U_Γκ = TrialFESpace(V_Γκ)
U_Γη = TrialFESpace(V_Γη)
X = MultiFieldFESpace([U_Ω,U_Γκ,U_Γη])
Y = MultiFieldFESpace([V_Ω,V_Γκ,V_Γη])
# Weak form
println("Defining weak form")
∇ₙ(ϕ) = ∇(ϕ)⋅VectorValue(0.0,0.0,1.0)
a((ϕ,κ,η),(w,u,v)) = ∫( ∇(ϕ)⋅∇(w) )dΩ +
∫( βₕ*(g*κ-im*ω*ϕ)*(u + αₕ*w) + im*ω*κ*w - μ₂*κ*w + μ₁*∇ₙ(ϕ)*(u + αₕ*w) )dΓf +
∫( ((g-ω^2*d₀)*η-im*ω*ϕ)*v + im*ω*η*w + (∇∇(v)⊙(C⊙∇∇(η))) )dΓb +
∫( ( - jump(∇(v))⊙(mean(C⊙∇∇(η))⋅nΛb.⁺) - (mean((C⊙∇∇(v)))⋅nΛb.⁺) ⊙jump(∇(η)) + D/ρ*γ*jump(∇(v))⊙jump(∇(η)) ) )dΛb
b((w,u,v)) = ∫( vᵢₙ*w )dΓᵢₙ - ∫( ηd*w - ∇ₙϕd*(u + αₕ*w) )dΓf
op = AffineFEOperator(a,b,X,Y)
# Solution
println("Defining solution")
xₕ = solve(op)
if vtk_output == true
filename = "data/VTKOutput/5-4-1-Yago/"*name
end
# Output points
xps = []
indices = []
x_η = get_cell_coordinates(Γb)
for (ie,xie) in enumerate(x_η)
for (in,xi) in enumerate(xie)
if -1 <= -2*(xi[1]-(xb₀+L/2))/L <=1 && xi[2]==0.0
push!(indices,(ie,in))
push!(xps,-2*(xi[1]-(xb₀+L/2))/L)
end
end
end
println("Computing solution")
(ϕₕ,κₕ,ηₕ) = xₕ
# Store values
η_x = get_cell_dof_values(ηₕ)
ηxps = Float64[]
for i in 1:length(indices)
(ie,in) = indices[i]
push!(ηxps,abs(η_x[ie][in])/η₀)
end
if vtk_output == true
writevtk(Ω,filename * "_O",cellfields = ["phi_re" => real(ϕₕ),"phi_im" => imag(ϕₕ)],nsubcells=10)
writevtk(Γf,filename * "_Gk",cellfields = ["eta_re" => real(κₕ),"eta_im" => imag(κₕ)],nsubcells=10)
writevtk(Γb,filename * "_Ge",cellfields = ["eta_re" => real(ηₕ),"eta_im" => imag(ηₕ)],nsubcells=10)
end
return (xps,ηxps)
end
end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 2127 | using Gridap
using WriteVTK
# Parameters
L = 2.0*π; H = 1.0; k = 10; η₀ = 0.01; g = 9.81
ρ_w = 1.0e3; λ = 2*π/k; ω = √(g*k*tanh(k*H))
ρ_b = 1.0e2; h_b = 1.0e-2; d₀ = ρ_b*h_b/ρ_w
Dρ = ρ_b*h_b*ω^2/(k^4)/ρ_w
dt = 0.001; γ_t = 0.5; β_t = 0.25; t₀ = 0.0; tf = 1.0
order = 2; degree = 2*order; n = 30; h = L/n
γ = 1.0*order*(order+1)
# Finite element mesh
domain = (0.0,L,0.0,H); partition = (2*n,n)
model_Ω = CartesianDiscreteModel(
domain,partition,isperiodic=(true,false))
labels = get_face_labeling(model_Ω)
add_tag_from_tags!(labels,"bottom",[1,2,5])
add_tag_from_tags!(labels,"beam",[3,4,6])
# Domains
Ω = Interior(model_Ω)
Γ = Boundary(model_Ω,tags="beam")
Λ = Skeleton(Γ); nΛ = get_normal_vector(Λ)
dΩ = Measure(Ω,degree);
dΓ = Measure(Γ,degree)
dΛ = Measure(Λ,degree)
# FE spaces
reffe = ReferenceFE(lagrangian,Float64,order)
V_Ω = TestFESpace(Ω,reffe)
V_Γ = TestFESpace(Γ,reffe)
U_Ω = TransientTrialFESpace(V_Ω)
U_Γ = TransientTrialFESpace(V_Γ)
Y = MultiFieldFESpace([V_Ω,V_Γ])
X = TransientMultiFieldFESpace([U_Ω,U_Γ])
# Weak form
m((ϕₜₜ,ηₜₜ),(w,v)) = ∫( d₀*ηₜₜ*v )dΓ
c((ϕₜ,ηₜ),(w,v)) = ∫( ϕₜ*v - ηₜ*w )dΓ
a((ϕ,η),(w,v)) =
∫( ∇(ϕ)⋅∇(w) )dΩ +
∫( g*η*v + Dρ*Δ(η)*Δ(v) )dΓ +
∫( Dρ*(
- mean(Δ(η))*jump(∇(v)⋅nΛ) -
jump(∇(η)⋅nΛ)*mean(Δ(v)) +
γ/h*jump(∇(v)⋅nΛ)*jump(∇(η)⋅nΛ) )
)dΛ
b((w,v)) = ∫( 0.0 * w )dΩ
op = TransientConstantFEOperator(m,c,a,b,X,Y)
# Initial condition
η(x,t) = η₀*cos(k*x[1]-ω*t)
ϕ(x,t) =
η₀*ω/k*cosh(k*x[2])/sinh(k*H)*sin(k*x[1]-ω*t)
η(t::Real) = x->η(x,t); ϕ(t::Real) = x->ϕ(x,t)
x₀ = interpolate_everywhere(
[ϕ(0.0),η(0.0)],X(0.0))
v₀ = interpolate_everywhere(
[∂t(ϕ)(0.0),∂t(η)(0.0)],X(0.0))
a₀ = interpolate_everywhere(
[∂tt(ϕ)(0.0),∂tt(η)(0.0)],X(0.0))
# Time stepping and Paraview output
ode_solver = Newmark(LUSolver(),dt,γ_t,β_t)
xₜ = solve(ode_solver,op,(x₀,v₀,a₀),t₀,tf)
pvd_Ω = paraview_collection("Ω",append=false)
pvd_Γ = paraview_collection("Γ",append=false)
for ((ϕₕ,ηₕ),tₙ) in xₜ
pvd_Ω[tₙ] = createvtk(
Ω,"Ω_$tₙ.vtu",cellfields=["phi"=>ϕₕ])
pvd_Γ[tₙ] = createvtk(
Γ,"Γ_$tₙ.vtu",cellfields=["eta"=>ηₕ])
end
vtk_save(pvd_Ω); vtk_save(pvd_Γ)
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | code | 102 | using MonolithicFEMVLFS
# @testset "MonolithicFEMVLFS.jl" begin
# # Write your tests here.
# end
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 4112 | # MonolithicFEMVLFS
[](https://oriolcg.github.io/MonolithicFEMVLFS.jl/stable)
[](https://oriolcg.github.io/MonolithicFEMVLFS.jl/dev)
[](https://github.com/oriolcg/MonolithicFEMVLFS.jl/actions/workflows/CI.yml?query=branch%3Amain)
A monolithic Finite Element formulation for the hydroelastic analysis of Very Large Floating Structures
This repository contains all the tests performed in the manuscript:
*A Monolithic Finite Element formulation for Very Large Floating Structures* by Oriol Colomés, Francesc Verdugo and Ido Akkerman. If you use this formulation, please cite:
```
@article{colomes2022monolithic,
doi = {10.48550/ARXIV.2206.12410},
url = {https://arxiv.org/abs/2206.12410},
author = {Colomés, Oriol and Verdugo, Francesc and Akkerman, Ido},
keywords = {Computational Engineering, Finance, and Science (cs.CE), Numerical Analysis (math.NA), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics},
title = {A monolithic Finite Element formulation for the hydroelastic analysis of Very Large Floating Structures},
publisher = {arXiv},
year = {2022},
copyright = {Creative Commons Attribution 4.0 International}
}
```
and
```
@software{Colomes_MonolithicFEMVLFS_2022,
author = {Colomes, Oriol},
doi = {10.4121/19601419},
month = {4},
title = {{MonolithicFEMVLFS.jl}},
url = {https://github.com/oriolcg/MonolithicFEMVLFS.jl},
year = {2022},
version = {0.1.0}
}
```
### Abstract
In this work we present a novel monolithic Finite Element Method (FEM) for the hydroelastic analysis of Very Large Floating Structures (VLFS) with arbitrary shapes that is stable, energy conserving and overcomes the need of an iterative algorithm. The new formulation enables a fully monolithic solution of the linear free-surface flow, described by linear potential flow, coupled with floating thin structures, described by the Euler-Bernoulli beam or Poisson-Kirchhoff plate equations.
The formulation presented in this work is general in the sense that solutions can be found in the frequency and time domains, it overcomes the need of using elements with $ C^1 $ continuity by employing a continuous/discontinuous Galerkin (C/DG) approach, and it is suitable for finite elements of arbitrary order.
We show that the proposed approach can accurately describe the hydroelastic phenomena of VLFS with a variety of tests, including structures with elastic joints, variable bathymetry and arbitrary strucutral shapes.
## Installation
`MonolithicFEMVLFS` is a package registered in the official [Julia package registry](https://github.com/JuliaRegistries/General). Thus, the installation of this package is straight forward using the [Julia's package manager](https://julialang.github.io/Pkg.jl/v1/). Open the Julia REPL, type `]` to enter package mode, and install as follows
```julia
pkg> add MonolithicFEMVLFS
```
## Usage
To run all the test cases in the paper do:
```julia
using MonolithicFEMVLFS
run_tests("all")
```
To run only a specific test, for example the Khabakhpasheva test in frequency domain, do:
```julia
using MonolithicFEMVLFS
run_tests("5-2-1-Khabakhpasheva-freq-domain.jl")
```
Note that the numbers in front of the script indicate the section in the manuscript.
After execution, the data will be stored in the respective folder `data/<Section-number>-<test-name>`. If the flag to generate VTK files is active, the VTK output will be stored in `data/VTKOutput/<Section-number>-<test-name>`. The plots shown in the manuscript are stored in `plots/<Section-number>-<test-name>`.
This repository uses DrWatson package, the data will only be generated the first time the tests are executed. If the data is already stored, the scripts will only regenerate the figures.
The code snipped appearing in Figure 3 of the manuscript can be found in `src/lst_periodic_beam.jl`. | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-1-1 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-1-2 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-1-3 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-1-4 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-2-1 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-2-2 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-3-1 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-4-1 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 36 | Folder to store test 5-1-1 VTK files | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 36 | Folder to store test 5-1-4 VTK files | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 36 | Folder to store test 5-2-1 VTK files | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 36 | Folder to store test 5-2-2 VTK files | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 36 | Folder to store test 5-3-1 VTK files | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 36 | Folder to store test 5-4-1 VTK files | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 220 | ```@meta
CurrentModule = MonolithicFEMVLFS
```
# MonolithicFEMVLFS
Documentation for [MonolithicFEMVLFS](https://github.com/oriolcg/MonolithicFEMVLFS.jl).
```@index
```
```@autodocs
Modules = [MonolithicFEMVLFS]
```
| MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 32 | Folder to store test 5-1-1 plots | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-1-2 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-1-3 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 31 | Folder to store test 5-1-4 data | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 32 | Folder to store test 5-2-1 plots | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 32 | Folder to store test 5-2-2 plots | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 32 | Folder to store test 5-3-1 plots | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.1 | 5a50dd846a6e8431c653f91ae3572433893ce1a4 | docs | 32 | Folder to store test 5-4-1 plots | MonolithicFEMVLFS | https://github.com/oriolcg/MonolithicFEMVLFS.jl.git |
|
[
"MIT"
] | 0.1.0 | 8283438f37b103c995488de3ad1237910bfbefac | code | 4103 | module StructuredPrinting
import Crayons
export @structured_print, Options
"""
Options(
[types...];
match_only::Bool = false
print_types::Bool = false
recursion_types = (UnionAll,DataType)
recursion_depth = 1000
)
Printing options for `@structured_print`:
- `match_only`: only print properties that match the given types
- `print_types`: print types (e.g., `prop::typeof(prop)`)
- `recursion_types`: skip recursing through recursion types (e.g., `UnionAll` and `DataType`)
to avoid infinite recursion
- `recursion_depth`: limit recursion depth (to avoid infinite recursion)
## Example
```julia
struct Leaf{T} end
struct Branch{A,B,C}
leafA::A
leafB::B
leafC::C
end
struct Tree{A,B,C}
branchA::A
branchB::B
branchC::C
end
t = Tree(
Branch(Leaf{(:A1, :L1)}(), Leaf{(:B1, :L2)}(), Leaf{(:C1, :L3)}()),
Branch(Leaf{(:A2, :L1)}(), Leaf{(:B2, :L2)}(), Leaf{(:C2, :L3)}()),
Branch(Leaf{(:A3, :L1)}(), Leaf{(:B3, :L2)}(), Leaf{(:C3, :L3)}()),
)
using StructuredPrinting
# Print struct alone
@structured_print t
# Print struct with type highlighted
@structured_print t Options(typeof(t.branchB))
# Print struct with Tuple of types highlighted
@structured_print t Options((typeof(t.branchB), typeof(t.branchA)))
```
"""
struct Options{T}
types::T
match_only::Bool
print_types::Bool
recursion_types::Tuple
recursion_depth::Int
function Options(
types...;
match_only = false,
print_types = false,
recursion_types = (UnionAll, DataType),
recursion_depth = 1000
)
if (types isa AbstractArray || types isa Tuple) && length(types) > 0
types = types[1]
else
types = (Union{},)
end
return new{typeof(types)}(
types,
match_only,
print_types,
recursion_types,
recursion_depth
)
end
end
Options(type::Type; kwargs...) = Options((type, ); kwargs...)
Options() = Options(();)
function _structured_print(io, obj, pc; o::Options, name, counter=0)
counter > o.recursion_depth && return
for pn in propertynames(obj)
prop = getproperty(obj, pn)
pc_full = (pc..., ".", pn)
pc_string = name*string(join(pc_full))
if any(map(type -> prop isa type, o.types))
suffix = o.print_types ? "::$(typeof(prop))" : ""
pc_colored = Crayons.Box.RED_FG(pc_string)
println(io, "$pc_colored$suffix")
if !any(map(x->prop isa x, o.recursion_types))
_structured_print(io, prop, pc_full; o, name, counter=counter+1)
counter > o.recursion_depth && return
end
else
if !o.match_only
suffix = o.print_types ? "::$(typeof(prop))" : ""
println(io, "$pc_string$suffix")
end
_structured_print(io, prop, pc_full; o, name, counter=counter+1)
counter > o.recursion_depth && return
end
end
end
print_name(io, name, o) = o.match_only || println(io, name)
function structured_print(io, obj, name, o::Options = Options())
print_name(io, name, o)
_structured_print(
io,
obj,
(); # pc
o,
name,
)
println(io, "")
end
"""
@structured_print obj options
Recursively print out propertynames of
`obj` given options `options`. See
[`Options`](@ref) for more information
on available options.
"""
macro structured_print(obj, o)
return :(
structured_print(
stdout,
$(esc(obj)),
$(string(obj)),
$(esc(o)),
)
)
end
"""
@structured_print obj options
Recursively print out propertynames of
`obj` given options `options`. See
[`Options`](@ref) for more information
on available options.
"""
macro structured_print(obj)
return :(
structured_print(
stdout,
$(esc(obj)),
$(string(obj)),
)
)
end
end # module
| StructuredPrinting | https://github.com/charleskawczynski/StructuredPrinting.jl.git |
|
[
"MIT"
] | 0.1.0 | 8283438f37b103c995488de3ad1237910bfbefac | code | 1426 | using Test
using StructuredPrinting
struct Leaf{T} end
struct Branch{A,B,C}
leafA::A
leafB::B
leafC::C
end
struct Tree{A,B,C}
branchA::A
branchB::B
branchC::C
end
t = Tree(
Branch(Leaf{(:A1, :L1)}(), Leaf{(:B1, :L2)}(), Leaf{(:C1, :L3)}()),
Branch(Leaf{(:A2, :L1)}(), Leaf{(:B2, :L2)}(), Leaf{(:C2, :L3)}()),
Branch(Leaf{(:A3, :L1)}(), Leaf{(:B3, :L2)}(), Leaf{(:C3, :L3)}()),
)
@testset "StructuredPrinting" begin
# Print struct alone
@structured_print t
end
@testset "StructuredPrinting with types" begin
# Print struct with type
@structured_print t Options(typeof(t.branchB))
# Print struct with Tuple of types
types = (typeof(t.branchB), typeof(t.branchA))
@structured_print t Options(types)
end
@testset "StructuredPrinting with types and matching" begin
# Print struct with type
@structured_print t Options(typeof(t.branchB); match_only=true)
# Print struct with Tuple of types
types = (typeof(t.branchB), typeof(t.branchA))
@structured_print t Options(types; match_only=true)
end
@testset "StructuredPrinting with types and matching" begin
# Print struct with type
@structured_print t Options(typeof(t.branchB); match_only=true, print_types = true)
# Print struct with Tuple of types
types = (typeof(t.branchB), typeof(t.branchA))
@structured_print t Options(types; match_only=true, print_types = true)
end
| StructuredPrinting | https://github.com/charleskawczynski/StructuredPrinting.jl.git |
|
[
"MIT"
] | 0.1.0 | 8283438f37b103c995488de3ad1237910bfbefac | docs | 2701 | # StructuredPrinting.jl
A simple Julia package for printing structs in a structured way, while offering a way to filter and highlight specified information. This package was developed for debugging.
<details>
<summary>The story of how this started</summary>
One day, I was trying to find out if `UnionAll` objects existed in a very large OrdinaryDiffEq integrator. I ended up writing:
```julia
import Crayons
function getpropertyviz(obj, pc = (), indent = "")
for pn in propertynames(obj)
prop = getproperty(obj, pn)
pc_full = (pc..., ".", pn)
pc_string = string(join(pc_full))
if prop isa UnionAll || prop isa DataType
pc_colored = Crayons.Box.RED_FG(pc_string)
println("$indent $pc_colored :: $(typeof(prop)), FLAGME!")
else
println("$indent $pc_string :: $(typeof(prop))")
getpropertyviz(prop, pc_full, indent*" ")
end
end
end
getpropertyviz(integrator)
```
Which ended up highlighting 3 (of the thousands of) structs that were either `UnionAll`, or `DataType`, and this helped me to identify which structs were responsible for hurting compiler inference in a large codebase.
Since this code was so generic, I thought it might be useful to write a small tool for it and add some bells and whistles. Enter StructuredPrinting.jl.
</details>
## Demo
Here's a demo of this package in action (directly from the test suite):
```julia
struct Leaf{T} end
struct Branch{A,B,C}
leafA::A
leafB::B
leafC::C
end
struct Tree{A,B,C}
branchA::A
branchB::B
branchC::C
end
t = Tree(
Branch(Leaf{(:A1, :L1)}(), Leaf{(:B1, :L2)}(), Leaf{(:C1, :L3)}()),
Branch(Leaf{(:A2, :L1)}(), Leaf{(:B2, :L2)}(), Leaf{(:C2, :L3)}()),
Branch(Leaf{(:A3, :L1)}(), Leaf{(:B3, :L2)}(), Leaf{(:C3, :L3)}()),
)
using StructuredPrinting
# Print struct alone
@structured_print t
# Print struct with type highlighted
@structured_print t Options(typeof(t.branchB))
# Print struct with Tuple of types highlighted
@structured_print t Options((typeof(t.branchB), typeof(t.branchA)))
```
StructuredPrinting can be useful to find which object match certain types, which can be helpful to identify potential inference issues:
```julia
struct Foo{A}
a::A
end
bar(obj, i::Int) = obj.type(i)
obj = (; type = Foo, x = 1, y = 2) # using a (<:Type)::DataType is a performance issue
bar(obj, 3) # make sure this is callable
@code_warntype bar(obj, 3) # demo performance issue
using StructuredPrinting
@structured_print obj Options((UnionAll, DataType)) # highlight `UnionAll` and `DataType`s
# Or, print types directly:
@structured_print obj Options((UnionAll, DataType); print_types=true)
```
| StructuredPrinting | https://github.com/charleskawczynski/StructuredPrinting.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1135 | using DynamicHMCModels
Random.seed!(1)
cd(@__DIR__)
include("simulateGaussian.jl")
struct GaussianProb{TY <: AbstractVector}
"Observations."
y::TY
end
function (problem::GaussianProb)(θ)
@unpack y = problem # extract the data
@unpack mu, sigma = θ
loglikelihood(Normal(mu, sigma), y) + logpdf(Normal(0,1), mu) +
logpdf(Truncated(Cauchy(0,5),0,Inf), sigma)
end
# Define problem with data and inits.
data = simulateGaussian(;Nd=100)
p = GaussianProb(data.y);
p((mu = 0.0, sigma = 1.0))
# Write a function to return properly dimensioned transformation.
problem_transformation(p::GaussianProb) =
as((mu = as(Real, -25, 25), sigma = asℝ₊), )
# Use Flux for the gradient.
P = TransformedLogDensity(problem_transformation(p), p)
∇P = ADgradient(:ForwardDiff, P)
#import Zygote
#∇P = ADgradient(:Zygote, P)
# Sample from the posterior.
chain, NUTS_tuned = NUTS_init_tune_mcmc(∇P, 4000);
# Undo the transformation to obtain the posterior from the chain.
posterior = TransformVariables.transform.(Ref(problem_transformation(p)),
get_position.(chain));
chns = nptochain(posterior, NUTS_tuned)
describe(chns) | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 123 | using Distributions, Random
function simulateGaussian(;μ=0, σ=1, Nd, kwargs...)
(y = rand(Normal(μ,σ), Nd), N = Nd)
end
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1421 | using DynamicHMCModels
Random.seed!(1)
ProjDir = @__DIR__
cd(ProjDir)
include("simulatePoisson.jl")
struct PoissonProb
y::Array{Int64,1}
x::Array{Float64,1}
idx::Array{Int64,1}
N::Int64
Ns::Int64
end
function (problem::PoissonProb)(θ)
@unpack y, x, idx, N, Ns = problem # extract the data
@unpack a0,a1,a0s,a0_sig = θ
LL = 0.0
LL += logpdf(Cauchy(0, 1),a0_sig)
LL += sum(logpdf.(Normal(0,a0_sig),a0s))
LL += logpdf.(Normal(0, 10),a0)
LL += logpdf.(Normal(0, 1),a1)
for i in 1:N
λ = exp(a0 + a0s[idx[i]] + a1*x[i])
LL += logpdf(Poisson(λ),y[i])
end
return LL
end
y, x, idx, N, Ns = simulatePoisson(;Nd=1,Ns=10,a0=1.0,a1=.5,a0_sig=.3)
p = PoissonProb(y,x,idx,N,Ns)
p((a0=0.0,a1=0.0,a0s=fill(0.0,Ns),a0_sig=.3))
# Write a function to return properly dimensioned transformation.
problem_transformation(p::PoissonProb) =
as( (a0 = asℝ, a1 = asℝ, a0s = as(Array, Ns), a0_sig = asℝ₊) )
# Use Flux for the gradient.
P = TransformedLogDensity(problem_transformation(p), p)
#∇P = LogDensityRejectErrors(ADgradient(:ForwardDiff, P))
import Zygote
∇P = ADgradient(:Zygote, P)
# FSample from the posterior.
chain, NUTS_tuned = NUTS_init_tune_mcmc(∇P, 1000);
# Undo the transformation to obtain the posterior from the chain.
posterior = TransformVariables.transform.(Ref(problem_transformation(p)), get_position.(chain));
chns = nptochain(posterior,NUTS_tuned)
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 502 | using Distributions, Random
function simulatePoisson(;Nd=1,Ns=10,a0=1.0,a1=.5,a0_sig=.3,kwargs...)
N = Nd*Ns
y = fill(0,N)
x = fill(0.0,N)
idx = similar(y)
i = 0
for s in 1:Ns
a0s = rand(Normal(0,a0_sig))
logpop = rand(Normal(9,1.5))
λ = exp(a0 + a0s + a1*logpop)
for nd in 1:Nd
i+=1
x[i] = logpop
idx[i] = s
y[i] = rand(Poisson(λ))
end
end
return (y=y,x=x,idx=idx,N=N,Ns=Ns)
end
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 4041 | using Distributions, Parameters, DynamicHMC, LogDensityProblems, TransformVariables
using Random, StatsFuns
import Distributions: pdf, logpdf, rand
export LBA,pdf,logpdf,rand
Base.@kwdef struct LBA{T1,T2,T3,T4} <: ContinuousUnivariateDistribution
ν::T1
A::T2
k::T3
τ::T4
σ::Float64 = 1.0
end
Base.broadcastable(x::LBA) = Ref(x)
###
### simulation
###
function selectWinner(dt)
if any(x -> x > 0,dt)
mi, mv = 0, Inf
for (i, t) in enumerate(dt)
if (t > 0) && (t < mv)
mi = i
mv = t
end
end
else
return 1,-1.0
end
return mi,mv
end
function sampleDriftRates(ν,σ)
noPositive=true
v = similar(ν)
while noPositive
v = [rand(Normal(d,σ)) for d in ν]
any(x->x>0, v) ? noPositive=false : nothing
end
return v
end
function rand(d::LBA)
@unpack τ,A,k,ν,σ = d
b=A+k
N = length(ν)
v = sampleDriftRates(ν, σ)
a = rand(Uniform(0, A), N)
dt = @. (b-a)/v
choice,mn = selectWinner(dt)
rt = τ .+ mn
return choice,rt
end
function rand(d::LBA, N::Int)
choice = fill(0, N)
rt = fill(0.0, N)
for i in 1:N
choice[i], rt[i] = rand(d)
end
return (choice=choice, rt=rt)
end
function simulateLBA(;Nd, v=[1.0,1.5,2.0], A=.8, k=.2, tau=.4, kwargs...)
return (rand(LBA(ν=v, A=A, k=k, τ=tau), Nd)..., N=Nd, Nc=length(v))
end
###
### log densities
###
function logpdf(d::LBA,data::T) where {T<:NamedTuple}
return sum(logpdf.(d,data...))
end
logpdf(dist::LBA,data::Array{<:Tuple,1}) = sum(d -> logpdf(dist, d), data)
function logpdf(d::LBA, c, rt)
@unpack τ,A,k,ν,σ = d
b = A + k
logden = 0.0
rt < τ && return -Inf
for (i,v) in enumerate(ν)
if c == i
logden += log_dens_win(d, v, rt)
else
logden += log1mexp(log_dens_lose(d, v, rt))
end
end
return logden - log1mexp(logpnegative(d))
end
logpdf(d::LBA, data::Tuple) = logpdf(d, data...)
function log_dens_win(d::LBA, v, rt)
@unpack τ,A,k,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
Δcdfs = cdf(Normal(0,1),n2) - cdf(Normal(0,1),n1)
Δpdfs = pdf(Normal(0,1),n1) - pdf(Normal(0,1),n2)
return -log(A) + logaddexp(log(σ) + log(Δpdfs), log(v) + log(Δcdfs))
end
function log_dens_lose(d::LBA, v, rt)
@unpack τ,A,k,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
cm = 1 + ((b-A-dt*v)/A)*cdf(Normal(0, 1), n1) -
((b-dt*v)/A)*cdf(Normal(0, 1), n2) + ((dt*σ)/A)*pdf(Normal(0, 1), n1) -
((dt*σ)/A)*pdf(Normal(0, 1), n2)
cm = max(cm, 1e-10)
return log(cm)
end
function logpnegative(d::LBA)
@unpack ν,σ=d
sum(v -> logcdf(Normal(0, 1), -v/σ), ν)
end
struct LBAProb{T}
data::T
N::Int
Nc::Int
end
function (problem::LBAProb)(θ)
@unpack data=problem
@unpack v,A,k,tau=θ
d = LBA(ν=v, A=A, k=k, τ=tau)
minRT = minimum(last, data)
logprior = (sum(logpdf.(TruncatedNormal(0, 3, 0, Inf), v)) +
logpdf(TruncatedNormal(.8, .4, 0, Inf) ,A) +
logpdf(TruncatedNormal(.2, .3, 0, Inf), k) +
logpdf(TruncatedNormal(.4, .1, 0, minRT), tau))
loglikelihood = logpdf(d, data)
end
function sampleDHMC(choice, rt, N, Nc, nsamples)
data = [(c,r) for (c,r) in zip(choice,rt)]
return sampleDHMC(data, N, Nc, nsamples)
end
#Random.seed!(54548)
Random.seed!(123)
N = 10
data = simulateLBA(Nd = N)
p = LBAProb(collect(zip(data.choice, data.rt)), N, data.Nc)
p((v=fill(.5, data.Nc),A=.8, k=.2, tau=.4))
trans = as((v=as(Array,asℝ₊,data.Nc),A=asℝ₊,k=asℝ₊,tau=asℝ₊))
#minRT = minimum(data.rt)
#trans = as((v=as(Array,asℝ₊,data.Nc),A=asℝ₊,k=asℝ₊,tau=as(Real,0,minRT)))
P = TransformedLogDensity(trans, p)
∇P = ADgradient(:ForwardDiff, P)
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 2000; warmup_stages =
default_warmup_stages(; local_optimization = nothing, M = DynamicHMC.Symmetric))
posterior = trans.(results.chain) | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 4354 | using Distributions, Parameters, DynamicHMC, LogDensityProblems, TransformVariables
using Random, StatsFuns, MCMCChains
import Distributions: pdf, logpdf, rand
export LBA,pdf,logpdf,rand
Base.@kwdef struct LBA{T1,T2,T3,T4} <: ContinuousUnivariateDistribution
ν::T1
A::T2
k::T3
τ::T4
σ::Float64 = 1.0
end
Base.broadcastable(x::LBA) = Ref(x)
###
### simulation
###
function selectWinner(dt)
if any(x -> x > 0,dt)
mi, mv = 0, Inf
for (i, t) in enumerate(dt)
if (t > 0) && (t < mv)
mi = i
mv = t
end
end
else
return 1,-1.0
end
return mi,mv
end
function sampleDriftRates(ν,σ)
noPositive=true
v = similar(ν)
while noPositive
v = [rand(Normal(d,σ)) for d in ν]
any(x->x>0, v) ? noPositive=false : nothing
end
return v
end
function rand(d::LBA)
@unpack τ,A,k,ν,σ = d
b=A+k
N = length(ν)
v = sampleDriftRates(ν, σ)
a = rand(Uniform(0, A), N)
dt = @. (b-a)/v
choice,mn = selectWinner(dt)
rt = τ .+ mn
return choice,rt
end
function rand(d::LBA, N::Int)
choice = fill(0, N)
rt = fill(0.0, N)
for i in 1:N
choice[i], rt[i] = rand(d)
end
return (choice=choice, rt=rt)
end
function simulateLBA(;Nd, v=[1.0,1.5,2.0], A=.8, k=.2, tau=.4, kwargs...)
return (rand(LBA(ν=v, A=A, k=k, τ=tau), Nd)..., N=Nd, Nc=length(v))
end
###
### log densities
###
function logpdf(d::LBA,data::T) where {T<:NamedTuple}
return sum(logpdf.(d,data...))
end
logpdf(dist::LBA,data::Array{<:Tuple,1}) = sum(d -> logpdf(dist, d), data)
function logpdf(d::LBA, c, rt)
@unpack τ,A,k,ν,σ = d
b = A + k
logden = 0.0
rt < τ && return -Inf
for (i,v) in enumerate(ν)
if c == i
logden += log_dens_win(d, v, rt)
else
logden += log1mexp(log_dens_lose(d, v, rt))
end
end
return logden - log1mexp(logpnegative(d))
end
logpdf(d::LBA, data::Tuple) = logpdf(d, data...)
function log_dens_win(d::LBA, v, rt)
@unpack τ,A,k,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
dens = (1/A)*(-v*cdf(Normal(0, 1), n1) + σ*pdf(Normal(0, 1), n1) +
v*cdf(Normal(0, 1), n2) - σ*pdf(Normal(0, 1), n2))
dens = max(dens, 1e-10)
return log(dens)
end
function log_dens_lose(d::LBA, v, rt)
@unpack τ,A,k,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
cm = 1 + ((b-A-dt*v)/A)*cdf(Normal(0, 1), n1) -
((b-dt*v)/A)*cdf(Normal(0, 1), n2) + ((dt*σ)/A)*pdf(Normal(0, 1), n1) -
((dt*σ)/A)*pdf(Normal(0, 1), n2)
cm = max(cm, 1e-10)
return log(cm)
end
function logpnegative(d::LBA)
@unpack ν,σ=d
sum(v -> logcdf(Normal(0, 1), -v/σ), ν)
end
struct LBAProb{T}
data::T
N::Int
Nc::Int
end
function (problem::LBAProb)(θ)
@unpack data=problem
@unpack v,A,k,tau=θ
d = LBA(ν=v, A=A, k=k, τ=tau)
minRT = minimum(last, data)
logprior = (sum(logpdf.(TruncatedNormal(0, 3, 0, Inf), v)) +
logpdf(TruncatedNormal(.8, .4, 0, Inf) ,A) +
logpdf(TruncatedNormal(.2, .3, 0, Inf), k) +
logpdf(TruncatedNormal(.4, .1, 0, minRT), tau))
loglikelihood = logpdf(d, data)
end
function sampleDHMC(choice, rt, N, Nc, nsamples)
data = [(c,r) for (c,r) in zip(choice,rt)]
return sampleDHMC(data, N, Nc, nsamples)
end
#Random.seed!(1333)
N = 10
data = simulateLBA(Nd = N)
p = LBAProb(collect(zip(data.choice, data.rt)), N, data.Nc)
p((v=fill(.5, data.Nc),A=.8, k=.2, tau=.4))
trans = as((v=as(Array,asℝ₊,data.Nc),A=asℝ₊,k=asℝ₊,tau=asℝ₊))
P = TransformedLogDensity(trans, p)
∇P = ADgradient(:ForwardDiff, P)
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 2000;
# warmup_stages = default_warmup_stages(local_optimization=nothing)
)
posterior = trans.(results.chain)
parameter_names = ["v[1]", "v[2]", "v[3]", "A", "k", "tau"]
# Create a3d
a3d = Array{Float64, 3}(undef, 2000, 6, 1);
for j in 1:1
for i in 1:2000
a3d[i, 1:3, j] = values(posterior[i].v)
a3d[i, 4, j] = values(posterior[i].A)
a3d[i, 5, j] = values(posterior[i].k)
a3d[i, 6, j] = values(posterior[i].tau)
end
end
chns = MCMCChains.Chains(a3d,
vcat(parameter_names),
Dict(
:parameters => parameter_names
)
)
describe(chns)
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 4425 | using Distributions, Parameters, DynamicHMC, LogDensityProblems, TransformVariables
using Random, StatsFuns
import Distributions: pdf, logpdf, rand
export LBA,pdf,logpdf,rand
Base.@kwdef struct LBA{T1,T2,T3,T4} <: ContinuousUnivariateDistribution
ν::T1
A::T2
k::T3
τ::T4
σ::Float64 = 1.0
end
Base.broadcastable(x::LBA) = Ref(x)
###
### simulation
###
function selectWinner(dt)
if any(x -> x > 0,dt)
mi, mv = 0, Inf
for (i, t) in enumerate(dt)
if (t > 0) && (t < mv)
mi = i
mv = t
end
end
else
return 1,-1.0
end
return mi,mv
end
function sampleDriftRates(ν,σ)
noPositive=true
v = similar(ν)
while noPositive
v = [rand(Normal(d,σ)) for d in ν]
any(x->x>0, v) ? noPositive=false : nothing
end
return v
end
function rand(d::LBA)
@unpack τ,A,k,ν,σ = d
b=A+k
N = length(ν)
v = sampleDriftRates(ν, σ)
a = rand(Uniform(0, A), N)
dt = @. (b-a)/v
choice,mn = selectWinner(dt)
rt = τ .+ mn
return choice,rt
end
function rand(d::LBA, N::Int)
choice = fill(0, N)
rt = fill(0.0, N)
for i in 1:N
choice[i], rt[i] = rand(d)
end
return (choice=choice, rt=rt)
end
function simulateLBA(;Nd, v=[1.0,1.5,2.0], A=.8, k=.2, tau=.4, kwargs...)
return (rand(LBA(ν=v, A=A, k=k, τ=tau), Nd)..., N=Nd, Nc=length(v))
end
###
### log densities
###
function logpdf(d::LBA,data::T) where {T<:NamedTuple}
return sum(logpdf.(d,data...))
end
logpdf(dist::LBA,data::Array{<:Tuple,1}) = sum(d -> logpdf(dist, d), data)
function logpdf(d::LBA, c, rt)
@unpack τ,A,k,ν,σ = d
b = A + k
logden = 0.0
rt < τ && return -Inf
for (i,v) in enumerate(ν)
if c == i
logden += log_dens_win(d, v, rt)
else
logden += log1mexp(log_dens_lose(d, v, rt))
end
end
return logden - log1mexp(logpnegative(d))
end
logpdf(d::LBA, data::Tuple) = logpdf(d, data...)
function log_dens_win(d::LBA, v, rt)
@unpack τ,A,k,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
Δcdfs = cdf(Normal(0,1),n2) - cdf(Normal(0,1),n1)
Δpdfs = pdf(Normal(0,1),n1) - pdf(Normal(0,1),n2)
return -log(A) + logaddexp(log(σ) + log(Δpdfs), log(v) + log(Δcdfs))
end
function log_dens_lose(d::LBA, v, rt)
@unpack τ,A,k,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
cm = 1 + ((b-A-dt*v)/A)*cdf(Normal(0, 1), n1) -
((b-dt*v)/A)*cdf(Normal(0, 1), n2) + ((dt*σ)/A)*pdf(Normal(0, 1), n1) -
((dt*σ)/A)*pdf(Normal(0, 1), n2)
cm = max(cm, 1e-10)
return log(cm)
end
function logpnegative(d::LBA)
@unpack ν,σ=d
sum(v -> logcdf(Normal(0, 1), -v/σ), ν)
end
struct LBAProb{T}
data::T
N::Int
Nc::Int
end
function (problem::LBAProb)(θ)
@unpack data=problem
@unpack v,A,k,tau=θ
d = LBA(ν=v, A=A, k=k, τ=tau)
minRT = minimum(last, data)
logprior = (sum(logpdf.(TruncatedNormal(0, 3, 0, Inf), v)) +
logpdf(TruncatedNormal(.8, .4, 0, Inf) ,A) +
logpdf(TruncatedNormal(.2, .3, 0, Inf), k) +
logpdf(TruncatedNormal(.4, .1, 0, minRT), tau))
loglikelihood = logpdf(d, data)
end
function sampleDHMC(choice, rt, N, Nc, nsamples)
data = [(c,r) for (c,r) in zip(choice,rt)]
return sampleDHMC(data, N, Nc, nsamples)
end
Random.seed!(54548)
#Random.seed!(123)
N = 10
data = simulateLBA(Nd = N)
p = LBAProb(collect(zip(data.choice, data.rt)), N, data.Nc)
p((v=fill(.5, data.Nc),A=.8, k=.2, tau=.4))
#trans = as((v=as(Array,asℝ₊,data.Nc),A=asℝ₊,k=asℝ₊,tau=asℝ₊))
minRT = minimum(data.rt)
trans = as((v=as(Array,asℝ₊,data.Nc),A=asℝ₊,k=asℝ₊,tau=as(Real,0,minRT)))
P = TransformedLogDensity(trans, p)
∇P = ADgradient(:ForwardDiff, P)
bad_xs = LogDensityProblems.stresstest(LogDensityProblems.logdensity, P; scale = 0.001)
bad_xs |> display
bad_θs = trans.(bad_xs)
bad_θs |> display
bad_gxs = LogDensityProblems.stresstest(LogDensityProblems.logdensity_and_gradient, P; scale = 0.001)
bad_gxs |> display
bad_gθs = trans.(bad_gxs)
bad_gθs |> display
#LogDensityProblems.logdensity(P, bad_xs[1])
#p(bad_θs[1])
#=
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 2000; warmup_stages =
default_warmup_stages(; local_optimization = nothing, M = DynamicHMC.Symmetric))
posterior = trans.(results.chain)
=# | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2974 | using Distributions, Parameters, Random
import Distributions: pdf,logpdf,rand
export LBA,pdf,logpdf,rand
# See discussions at https://discourse.julialang.org/t/dynamichmc-reached-maximum-number-of-iterations/24721
############################################
# Model functions
############################################
mutable struct LBA{T1,T2,T3,T4} <: ContinuousUnivariateDistribution
ν::T1
A::T2
k::T3
τ::T4
σ::Float64
end
Base.broadcastable(x::LBA)=Ref(x)
LBA(;τ,A,k,ν,σ=1.0) = LBA(ν,A,k,τ,σ)
function selectWinner(dt)
if any(x->x >0,dt)
mi,mv = 0,Inf
for (i,t) in enumerate(dt)
if (t > 0) && (t < mv)
mi = i
mv = t
end
end
else
return 1,-1.0
end
return mi,mv
end
function sampleDriftRates(ν,σ)
noPositive=true
v = similar(ν)
while noPositive
v = [rand(Normal(d,σ)) for d in ν]
any(x->x>0,v) ? noPositive=false : nothing
end
return v
end
function rand(d::LBA)
@unpack τ,A,k,ν,σ = d
b=A+k
N = length(ν)
v = sampleDriftRates(ν,σ)
a = rand(Uniform(0,A),N)
dt = @. (b-a)/v
choice,mn = selectWinner(dt)
rt = τ .+ mn
return choice,rt
end
function rand(d::LBA,N::Int)
choice = fill(0,N)
rt = fill(0.0,N)
for i in 1:N
choice[i],rt[i]=rand(d)
end
return (choice=choice,rt=rt)
end
logpdf(d::LBA,choice,rt) = log(pdf(d,choice,rt))
function logpdf(d::LBA,data::T) where {T<:NamedTuple}
return sum(logpdf.(d,data...))
end
function logpdf(dist::LBA,data::Array{<:Tuple,1})
LL = 0.0
for d in data
LL += logpdf(dist,d...)
end
return LL
end
function pdf(d::LBA,c,rt)
@unpack τ,A,k,ν,σ = d
b=A+k; den = 1.0
rt < τ ? (return 1e-10) : nothing
for (i,v) in enumerate(ν)
if c == i
den *= dens(d,v,rt)
else
den *= (1-cummulative(d,v,rt))
end
end
pneg = pnegative(d)
den = den/(1-pneg)
den = max(den,1e-10)
isnan(den) ? (return 0.0) : (return den)
end
function dens(d::LBA,v,rt)
@unpack τ,A,k,ν,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
dens = (1/A)*(-v*cdf(Normal(0,1),n1) + σ*pdf(Normal(0,1),n1) +
v*cdf(Normal(0,1),n2) - σ*pdf(Normal(0,1),n2))
return dens
end
function cummulative(d::LBA,v,rt)
@unpack τ,A,k,ν,σ = d
dt = rt-τ; b=A+k
n1 = (b-A-dt*v)/(dt*σ)
n2 = (b-dt*v)/(dt*σ)
cm = 1 + ((b-A-dt*v)/A)*cdf(Normal(0,1),n1) -
((b-dt*v)/A)*cdf(Normal(0,1),n2) + ((dt*σ)/A)*pdf(Normal(0,1),n1) -
((dt*σ)/A)*pdf(Normal(0,1),n2)
return cm
end
function pnegative(d::LBA)
@unpack ν,σ=d
p=1.0
for v in ν
p*= cdf(Normal(0,1),-v/σ)
end
return p
end
function simulateLBA(;Nd=10,v=[1.0,1.5,2.0],A=.8,k=.2,tau=.4,kwargs...)
return (rand(LBA(ν=v,A=A,k=k,τ=tau),Nd)...,N=Nd,Nc=length(v))
end
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1920 | using DynamicHMCModels, MCMCChains, Random
#Random.seed!(1233)
ProjDir = @__DIR__
cd(ProjDir)
include(joinpath(@__DIR__, "LBA_functions.jl"))
Base.@kwdef struct LBAModel{T}
data::T
N::Int
Nc::Int
end
function make_transformation(model::LBAModel)
as((v=as(Array,asℝ₊,Nc), A=asℝ₊, k=asℝ₊, tau=asℝ₊))
end
N = 10
v = [1.0, 1.5]
Nc = length(v)
data=simulateLBA(;Nd=N,v=v,A=.8,k=.2,tau=.4)
#dist = LBA(ν=[1.0,1.5,2.0],A=.8,k=.2,τ=.4)
#data = rand(dist,N)
model = LBAModel(; data=data, N=N, Nc=Nc)
function (model::LBAModel)(θ)
@unpack data=model
@unpack v,A,k,tau=θ
d=LBA(ν=v,A=A,k=k,τ=tau)
minRT = minimum(x->x[2],data)
logpdf(d,data)+sum(logpdf.(TruncatedNormal(0,3,0,Inf),v)) +
logpdf(TruncatedNormal(.8,.4,0,Inf),A)+logpdf(TruncatedNormal(.2,.3,0,Inf),k)+
logpdf(TruncatedNormal(.4,.1,0,minRT),tau)
end
d = [(c,r) for (c,r) in zip(data.choice,data.rt)]
p = LBAModel(d,N,Nc)
p((v=fill(.5,Nc),A=.8,k=.2,tau=.4))
# Use Flux for the gradient.
P = TransformedLogDensity(make_transformation(p), p)
∇P = ADgradient(:ForwardDiff, P)
# Sample from the posterior.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000;
warmup_stages = default_warmup_stages(local_optimization=nothing),
reporter = NoProgressReport()
)
posterior = P.transformation.(results.chain)
@show DynamicHMC.Diagnostics.EBFMI(results.tree_statistics)
println()
@show DynamicHMC.Diagnostics.summarize_tree_statistics(results.tree_statistics)
println()
parameter_names = ["v[1]", "v[2]", "A", "k", "tau"]
# Create a3d
a3d = Array{Float64, 3}(undef, 1000, 5, 1);
for j in 1:1
for i in 1:1000
a3d[i, 1:2, j] = values(posterior[i].v)
a3d[i, 3, j] = values(posterior[i].A)
a3d[i, 4, j] = values(posterior[i].k)
a3d[i, 5, j] = values(posterior[i].tau)
end
end
chns = MCMCChains.Chains(a3d,
vcat(parameter_names),
Dict(
:parameters => parameter_names
)
)
describe(chns)
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1235 | using DynamicHMCModels
Random.seed!(1)
cd(@__DIR__)
include("simulateLR.jl")
struct RegressionProb
x::Array{Float64,2}
y::Array{Float64,1}
Nd::Int64
Nc::Int64
end
function (problem::RegressionProb)(θ)
@unpack x,y,Nd,Nc = problem # extract the data
@unpack B0,B,sigma = θ
μ = B0 .+x*B
sum(logpdf.(Normal.(μ,sigma),y)) + logpdf(Normal(0,10),B0) +
loglikelihood(Normal(0,10),B) + logpdf(Truncated(Cauchy(0,5),0,Inf),sigma)
end
# Define problem with data and inits.
x, y, Nd, Nc = simulateLR()
p = RegressionProb(x, y, Nd, Nc)
p((B0 = 0.0, B = fill(0.0, Nc), sigma = 1.0))
# Write a function to return properly dimensioned transformation.
problem_transformation(p::RegressionProb) =
as((B0=asℝ, B=as(Array, asℝ, Nc), sigma = asℝ₊))
# Use Flux for the gradient.
P = TransformedLogDensity(problem_transformation(p), p)
∇P = ADgradient(:ForwardDiff, P)
#import Zygote
#∇P = ADgradient(:Zygote, P)
# Sample from the posterior.
chain, NUTS_tuned = NUTS_init_tune_mcmc(∇P, 4000);
# Undo the transformation to obtain the posterior from the chain.
posterior = TransformVariables.transform.(Ref(problem_transformation(p)), get_position.(chain));
chns = nptochain(posterior,NUTS_tuned)
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 226 | using Distributions, Random
function simulateLR(;Nd = 4, Nc = 1,β0 = 1., β = fill(.5, Nc), σ = 1, kwargs...)
x = rand(Normal(10,5),Nd,Nc)
y = β0 .+ x*β .+ rand(Normal(0,σ),Nd)
return (x=x, y=y, Nd=Nd, Nc=Nc)
end
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1085 | using DynamicHMCModels
Random.seed!(1)
cd(@__DIR__)
include("sdt_functions.jl")
include("simulateSDT.jl")
struct SDTProblem
hits::Int64
fas::Int64
Nd::Int64
end
function (problem::SDTProblem)(θ)
@unpack hits,fas,Nd=problem # extract the data
@unpack d,c=θ
logpdf(SDT(d,c),[hits,fas,Nd])+logpdf(Normal(0,1/sqrt(2)),d) +
logpdf(Normal(0,1/sqrt(2)),c)
end
# Define problem with data and inits.
data = simulateSDT(;Nd=100)
p = SDTProblem(data...)
p((d=2.0,c=.0))
# Write a function to return properly dimensioned transformation.
problem_transformation(p::SDTProblem) =
as((d=asℝ,c=asℝ))
# Use Flux for the gradient.
P = TransformedLogDensity(problem_transformation(p), p)
∇P = ADgradient(:ForwardDiff, P);
#import Zygote
#∇P = ADgradient(:Zygote, P);
# FSample from the posterior.
chain, NUTS_tuned = NUTS_init_tune_mcmc(∇P, 4000);
# Undo the transformation to obtain the posterior from the chain.
posterior = TransformVariables.transform.(Ref(problem_transformation(p)),
get_position.(chain));
chns = nptochain(posterior,NUTS_tuned)
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 526 | import Distributions: logpdf, pdf
struct SDT{T1,T2} <: ContinuousUnivariateDistribution
d::T1
c::T2
end
logpdf(d::SDT,data::Vector{Int64}) = logpdf(d,data...)
logpdf(d::SDT,data::Tuple{Vararg{Int64}}) = logpdf(d,data...)
function logpdf(d::SDT,hits,fas,Nd)
@unpack d,c=d
θhit=cdf(Normal(0,1),d/2-c)
θfa=cdf(Normal(0,1),-d/2-c)
loghits = logpdf(Binomial(Nd,θhit),hits)
logfas = logpdf(Binomial(Nd,θfa),fas)
return loghits+logfas
end
pdf(d::SDT,data::Vector{Int64}) = exp(logpdf(d,data...))
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 251 | using Distributions, Random
function simulateSDT(;d=2.,c=0.,Nd,kwargs...)
θhit=cdf(Normal(0,1),d/2-c)
θfa=cdf(Normal(0,1),-d/2-c)
hits = rand(Binomial(Nd,θhit))
fas = rand(Binomial(Nd,θfa))
return (hits=hits,fas=fas,Nd=Nd)
end
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1274 | # # Estimate Binomial draw probabilility
using DynamicHMCModels
Random.seed!(1356779)
# Define a structure to hold the data.
Base.@kwdef struct BernoulliProblem
"Total number of draws in the data."
n::Int
"Number of draws ' == 1' "
obs::Vector{Int}
end;
# Write a function to return properly dimensioned transformation.
make_transformation(model::BernoulliProblem) =
as((p = as𝕀, ))
# Add data
model = BernoulliProblem(; n = 9, obs = rand(Binomial(9, 2/3), 3))
# Make the type callable with the parameters *as a single argument*.
function (model::BernoulliProblem)(θ)
@unpack n, obs = model # extract the data
@unpack p = θ
loglikelihood(Binomial(n, p), obs)
end
# Use a flat priors (the default, omitted) for α
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Sample chain
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000;
reporter = NoProgressReport()
)
posterior = P.transformation.(results.chain)
# Create Particles NamedTuple object
println()
p = as_particles(posterior)
p |> display
println()
DynamicHMC.Diagnostics.EBFMI(results.tree_statistics) |> display
println()
DynamicHMC.Diagnostics.summarize_tree_statistics(results.tree_statistics) |> display
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1866 | # # Heights_1 problem
# We estimate simple linear regression model with a half-T prior.
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
# Import the dataset.
delim = ';'
data = CSV.read(joinpath("..", "..", "data", "Howell1.csv"), DataFrame; delim);
# Use only adults and standardize
df = filter(row -> row[:age] >= 18, data);
# Half-T for `σ`, see below.
Base.@kwdef mutable struct Heights_1{Ty <: AbstractVector, Tν <: Real}
"Observations."
y::Ty
"Degrees of freedom for prior on sigma."
v::Tν
end;
# Write a function to return properly dimensioned transformation.
function make_transformation(model::Heights_1)
as((σ = asℝ₊, μ = as(Real, 100, 250)), )
end
model = Heights_1(;y = df[:, :height], v=1.0)
# Then make the type callable with the parameters *as a single argument*.
function (model::Heights_1)(θ)
@unpack y, v = model # extract the data
@unpack μ, σ = θ
loglikelihood(Normal(μ, σ), y) + logpdf(TDist(v), σ)
end;
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
# Stan.jl results
cmdstan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
sigma 7.7641872 0.29928194 0.004732063 0.0055677898 1000
mu 154.6055177 0.41989355 0.006639100 0.0085038356 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
sigma 7.21853 7.5560625 7.751355 7.9566775 8.410391
mu 153.77992 154.3157500 154.602000 154.8820000 155.431000
";
# end of m4.1d.jl | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2314 | # # Heights_1 problem
# We estimate simple linear regression model with a half-T prior.
using StatisticalRethinking, DynamicHMCModels, MCMCChains
ProjDir = @__DIR__
cd(ProjDir)
# Import the dataset.
data = CSV.read(rel_path("..", "data", "Howell1.csv"), DataFrame)
# Use only adults and standardize
df = filter(row -> row[:age] >= 18, data);
# Half-T for `σ`, see below.
Base.@kwdef mutable struct Heights_1{Ty <: AbstractVector, Tν <: Real}
"Observations."
y::Ty
"Degrees of freedom for prior on sigma."
v::Tν
end;
# Write a function to return properly dimensioned transformation.
function make_transformation(model::Heights_1)
as((σ = asℝ₊, μ = as(Real, 100, 250)), )
end
model = Heights_1(;y = df[:, :height], v=1.0)
# Then make the type callable with the parameters *as a single argument*.
function (model::Heights_1)(θ)
@unpack y, v = model # extract the data
@unpack μ, σ = θ
loglikelihood(Normal(μ, σ), y) + logpdf(TDist(v), σ)
end;
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
DynamicHMC.Diagnostics.EBFMI(results.tree_statistics) |> display
println()
DynamicHMC.Diagnostics.summarize_tree_statistics(results.tree_statistics) |> display
println()
a3d = Array{Float64, 3}(undef, 1000, 2, 1);
for j in 1:1
for i in 1:1000
a3d[i, 1, j] = values(posterior[i].μ)
a3d[i, 2, j] = values(posterior[i].σ)
end
end
pnames = ["μ", "σ"]
sections = Dict(
:parameters => pnames,
)
chns = create_mcmcchains(a3d, pnames, sections, start=1);
# Stan.jl results
cmdstan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
sigma 7.7641872 0.29928194 0.004732063 0.0055677898 1000
mu 154.6055177 0.41989355 0.006639100 0.0085038356 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
sigma 7.21853 7.5560625 7.751355 7.9566775 8.410391
mu 153.77992 154.3157500 154.602000 154.8820000 155.431000
";
show(chns)
# end of m4.1d.jl | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1791 | # # Heights_2 problem with restricted prior on mu.
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
# Import the dataset.
data = CSV.read(joinpath("..", "..", "data", "Howell1.csv"), DataFrame)
# Use only adults and standardize
df = filter(row -> row[:age] >= 18, data);
# Flat `σ`, see below.
Base.@kwdef mutable struct Heights_2{Ty <: AbstractVector}
"Observations."
y::Ty
end;
# Write a function to return properly dimensioned transformation.
function make_transformation(model::Heights_2)
as((σ = asℝ₊, μ = as(Real, 100, 250)), )
end
model = Heights_2(;y = df[:, :height])
# Then make the type callable with the parameters *as a single argument*. Very constraint prior on μ. Flat σ.
function (model::Heights_2)(θ)
@unpack y = model # extract the data
@unpack μ, σ = θ
loglikelihood(Normal(μ, σ), y) + logpdf(Normal(178, 0.1), μ) +
logpdf(Uniform(0, 50), σ)
end;
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
p |> display
# cmdstan result
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
sigma 24.604616 0.946911707 0.0149719887 0.0162406632 1000
mu 177.864069 0.102284043 0.0016172527 0.0013514459 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
sigma 22.826377 23.942275 24.56935 25.2294 26.528368
mu 177.665000 177.797000 177.86400 177.9310 178.066000
";
# end of m4.2d.jl | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2730 | # # Polynomial weight model model
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
# Import the dataset.
data = CSV.read(joinpath("..", "..", "data", "Howell1.csv"), DataFrame)
# Use only adults and standardize
df = filter(row -> row[:age] >= 18, data);
df[!, :weight] = convert(Vector{Float64}, df[:, :weight]);
df[!, :weight_s] = (df[:, :weight] .- mean(df[:, :weight])) / std(df[:, :weight]);
df[!, :weight_s2] = df[:, :weight_s] .^ 2;
# LR model ``y ∼ xβ + ϵ``, where ``ϵ ∼ N(0, σ²)`` IID.
Base.@kwdef mutable struct ConstraintHeightProblem{Ty <: AbstractVector,
Tx <: AbstractMatrix}
"Observations."
y::Ty
"Covariates"
x::Tx
end;
# Write a function to return a properly dimensioned transformation.
function make_transformation(model::ConstraintHeightProblem)
as((β = as(Array, size(model.x, 2)), σ = asℝ₊))
end
N = size(df, 1)
x = hcat(ones(N), hcat(df[:, :weight_s], df[:, :weight_s2]));
model = ConstraintHeightProblem(;y = df[:, :height], x=x)
# Pack the parameters in a single argument θ.
function (problem::ConstraintHeightProblem)(θ)
@unpack y, x = problem # extract the data
@unpack β, σ = θ # works on the named tuple too
ll = 0.0
ll += logpdf(Normal(178, 100), x[1]) # a = x[1]
ll += logpdf(Normal(0, 10), x[2]) # b1 = x[2]
ll += logpdf(Normal(0, 10), x[3]) # b2 = x[3]
ll += logpdf(TDist(1.0), σ)
ll += loglikelihood(Normal(0, σ), y .- x*β)
ll
end
# Evaluate at model function at some initial valuues
println()
model((β = [1.0, 2.0, 3.0], σ = 1.0)) |> display
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 154.609019750 0.36158389 0.0057171433 0.0071845548 1000
b1 5.838431778 0.27920926 0.0044146860 0.0048693502 1000
b2 -0.009985954 0.22897191 0.0036203637 0.0047224478 1000
sigma 5.110136300 0.19096315 0.0030193925 0.0030728192 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a 153.92392500 154.3567500 154.60700000 154.8502500 155.32100000
b1 5.27846200 5.6493250 5.83991000 6.0276275 6.39728200
b2 -0.45954687 -0.1668285 -0.01382935 0.1423620 0.43600905
sigma 4.76114350 4.9816850 5.10326000 5.2300450 5.51500975
";
# end of m4.5d.jl | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2781 | # Estimate polynomial linear regression model with a half-T prior.
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
# Import the dataset.
data = CSV.read(joinpath("..", "..", "data", "Howell1.csv"), DataFrame)
# Use only adults and standardize
df = filter(row -> row[:age] >= 18, data);
df[!, :weight] = convert(Vector{Float64}, df[:, :weight]);
df[!, :weight_s] = (df[:, :weight] .- mean(df[:, :weight])) / std(df[:, :weight]);
df[!, :weight_s2] = df[:, :weight_s] .^ 2;
# Define a structure to hold the data: observables, covariates,
# and the degrees of freedom for the prior.
"""
Linear regression model ``y ∼ Xβ + ϵ``, where ``ϵ ∼ N(0, σ²)`` IID.
Flat prior for `β`, half-T for `σ`.
"""
Base.@kwdef mutable struct LinearRegressionModel{Ty <: AbstractVector, Tx <: AbstractMatrix,
Tv <: Real}
"Observations."
y::Ty
"Covariates"
x::Tx
"Degrees of freedom for prior."
v::Tv
end
# Write a function to return a properly dimensioned transformation.
function make_transformation(model::LinearRegressionModel)
as((β = as(Array, size(model.x, 2)), σ = asℝ₊))
end
N = size(df, 1)
x = hcat(ones(N), hcat(df[:, :weight_s], df[:, :weight_s2]));
model = LinearRegressionModel(;y = df[:, :height], x=x, v=1.0)
# Pack parameters *as a single argument*.
function (model::LinearRegressionModel)(θ)
@unpack y, x, v = model # extract data
@unpack β, σ = θ # extract parameters
loglikelihood(Normal(0, σ), y .- x*β) + logpdf(TDist(v), σ)
end
# Evaluate at model function at some initial valuues
println()
model((β = [1.0, 2.0, 3.0], σ = 1.0)) |> display
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 154.609019750 0.36158389 0.0057171433 0.0071845548 1000
b1 5.838431778 0.27920926 0.0044146860 0.0048693502 1000
b2 -0.009985954 0.22897191 0.0036203637 0.0047224478 1000
sigma 5.110136300 0.19096315 0.0030193925 0.0030728192 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a 153.92392500 154.3567500 154.60700000 154.8502500 155.32100000
b1 5.27846200 5.6493250 5.83991000 6.0276275 6.39728200
b2 -0.45954687 -0.1668285 -0.01382935 0.1423620 0.43600905
sigma 4.76114350 4.9816850 5.10326000 5.2300450 5.51500975
";
# end of m4.5d.jl | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2366 | # # Linear regression
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
# Import the dataset.
# ### snippet 5.1
df = CSV.read(joinpath("..", "..", "data", "WaffleDivorce.csv"), DataFrame)
mean_ma = mean(df[:, :MedianAgeMarriage])
df[!, :MedianAgeMarriage_s] = convert(Vector{Float64},
(df[:, :MedianAgeMarriage]) .- mean_ma)/std(df[:, :MedianAgeMarriage]);
# Model ``y ∼ Normal(y - Xβ, σ)``. Flat prior `β`, half-T for `σ`.
Base.@kwdef mutable struct WaffleDivorce{Ty <: AbstractVector,
Tx <: AbstractMatrix}
"Observations."
y::Ty
"Covariates"
x::Tx
end
# Write a function to return a properly dimensioned transformation.
function make_transformation(model::WaffleDivorce)
as((β = as(Array, size(model.x, 2)), σ = asℝ₊))
end
# Instantiate the model with data and inits.
x = hcat(ones(size(df, 1)), df[:, :MedianAgeMarriage_s]);
model = WaffleDivorce(;y=df[:, :Divorce], x=x);
# Make tmodel callable with the parameters *as a single argument*.
function (model::WaffleDivorce)(θ)
@unpack y, x = model # extract the data
@unpack β, σ = θ # works on the named tuple too
ll = 0.0
ll += logpdf(Normal(10, 10), x[1]) # alpha
ll += logpdf(Normal(0, 1), x[2]) # beta
ll += logpdf(TDist(1.0), σ)
ll += loglikelihood(Normal(0, σ), y .- x*β)
ll
end
println()
model((β = [1.0, 2.0], σ = 1.0)) |> display
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 9.6882466 0.22179190 0.0035068378 0.0031243061 1000
bA -1.0361742 0.21650514 0.0034232469 0.0034433245 1000
sigma 1.5180337 0.15992781 0.0025286807 0.0026279593 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a 9.253141 9.5393175 9.689585 9.84221500 10.11121000
bA -1.454571 -1.1821025 -1.033065 -0.89366925 -0.61711705
sigma 1.241496 1.4079225 1.504790 1.61630750 1.86642750
";
# end of m4.5d.jl | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2789 | # # Linear regression
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
# Import the dataset.
# ### snippet 5.4
df = CSV.read(joinpath("..", "..", "data", "WaffleDivorce.csv"), DataFrame)
mean_ma = mean(df[:, :Marriage])
df[!, :Marriage_s] = convert(Vector{Float64},
(df[:, :Marriage]) .- mean_ma)/std(df[:, :Marriage]);
mean_mam = mean(df[:, :MedianAgeMarriage])
df[!, :MedianAgeMarriage_s] = convert(Vector{Float64},
(df[:, :MedianAgeMarriage]) .- mean_mam)/std(df[:, :MedianAgeMarriage]);
# Model ``y ∼ Xβ + ϵ``, where ``ϵ ∼ N(0, σ²)`` IID. Student on σ
Base.@kwdef mutable struct WaffleDivorce{Ty <: AbstractVector, Tx <: AbstractMatrix}
"Observations."
y::Ty
"Covariates"
x::Tx
end
# Write a function to return a properly dimensioned transformation.
function make_transformation(model::WaffleDivorce)
as((β = as(Array, size(model.x, 2)), σ = asℝ₊))
end
# Instantiate the model with data and inits.
x = hcat(ones(size(df, 1)), df[:, :Marriage_s], df[:, :MedianAgeMarriage_s]);
model = WaffleDivorce(;y=df[:, :Divorce], x=x);
# Make the type callable with the parameters *as a single argument*.
function (model::WaffleDivorce)(θ)
@unpack y, x = model # extract the data
@unpack β, σ = θ # works on the named tuple too
ll = 0.0
ll += logpdf(Normal(10, 10), x[1])
ll += logpdf(Normal(0, 1), x[2])
ll += logpdf(Normal(0, 1), x[3])
ll += logpdf(TDist(1.0), σ)
ll += loglikelihood(Normal(0, σ), y .- x*β)
ll
end
println()
model((β = [1.0, 2.0, 3.0], σ = 1.0)) |> display
println()
# Instantiate the model with data and inits.
println()
model((β = [1.0, 2.0, 3.0], σ = 1.0))
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 9.69137275 0.21507432 0.0034006235 0.0038501180 1000
bA -1.12184710 0.29039965 0.0045916216 0.0053055477 1000
bM -0.12106472 0.28705400 0.0045387223 0.0051444688 1000
sigma 1.52326545 0.16272599 0.0025729239 0.0034436330 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a 9.2694878 9.5497650 9.6906850 9.83227750 10.11643500
bA -1.6852295 -1.3167700 -1.1254650 -0.92889225 -0.53389157
bM -0.6889247 -0.3151695 -0.1231065 0.07218513 0.45527243
sigma 1.2421182 1.4125950 1.5107700 1.61579000 1.89891925
";
# end of m4.5d.jl | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2411 | # Load Julia packages (libraries) needed for the snippets in chapter 0
using DynamicHMCModels
# CmdStan uses a tmp directory to store the output of cmdstan
ProjDir = @__DIR__
cd(ProjDir)
# Read the milk data
df = CSV.read(joinpath("..", "..", "data", "milk.csv"), DataFrame)
df = filter(row -> !(row[:neocortex_perc] == "NA"), df)
#df[:, :kcal_per_g] = convert(Vector{Float64}, df[:, :kcal_per_g])
df[!, :log_mass] = log.(convert(Vector{Float64}, df[:, :mass]))
# Define the model struct
Base.@kwdef mutable struct MilkModel{Ty <: AbstractVector, Tx <: AbstractMatrix}
"Observations."
y::Ty
"Covariates"
x::Tx
end
# Write a function to return properly dimensioned transformation.
function make_transformation(model::MilkModel)
as((β = as(Array, size(model.x, 2)), σ = asℝ₊))
end
# Instantiate the model with data and inits.
x = hcat(ones(size(df, 1)), df[:, :log_mass]);
model = MilkModel(;y=df[:, :kcal_per_g], x=x)
# Make the type callable with the parameters *as a single argument*.
function (model::MilkModel)(θ)
@unpack y, x, = model # extract the data
@unpack β, σ = θ # works on the named tuple too
ll = 0.0
ll += logpdf(Normal(0, 100), x[1])
ll += logpdf(Normal(0, 1), x[2])
ll += logpdf(TDist(1.0), σ)
ll += loglikelihood(Normal(0, σ), y .- x*β)
ll
end
println()
model((β = [1.0, 2.0], σ = 1.0))
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 0.70472876 0.057040655 0.00090189195 0.0011398893 1000
bm -0.03150330 0.023642759 0.00037382484 0.0004712342 1000
sigma 0.18378372 0.039212805 0.00062000888 0.0011395979 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a 0.59112968 0.66848775 0.70444950 0.741410500 0.81915225
bm -0.07729257 -0.04708425 -0.03104865 -0.015942925 0.01424901
sigma 0.12638780 0.15605950 0.17800600 0.204319250 0.27993590
";
# End of `05/5.6d.jl`
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2059 | # Load Julia packages (libraries) needed for the snippets in chapter 0
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
# Read in data
delim = ';'
df = CSV.read(joinpath("..", "..", "data", "rugged.csv"), DataFrame; delim)
df = filter(row -> !(ismissing(row[:rgdppc_2000])), df)
df.log_gdp = log.(df.rgdppc_2000)
df.cont_africa = Array{Float64}(convert(Array{Int}, df.cont_africa))
Base.@kwdef mutable struct RuggedModel{Ty <: AbstractVector,
Tx <: AbstractMatrix}
"Observations."
y::Ty
"Covariates"
x::Tx
end
# Write a function to return properly dimensioned transformation.
function make_transformation(model::RuggedModel)
as((β = as(Array, size(model.x, 2)), σ = asℝ₊))
end
# Instantiate the model with data and inits.
x = hcat(ones(size(df, 1)), df[:, :rugged], df[:, :cont_africa],
df[:, :rugged] .* df[:, :cont_africa]);
model = RuggedModel(;y=df[:, :log_gdp], x=x)
# Model callable with *a single argument*.
function (problem::RuggedModel)(θ)
@unpack y, x = problem # extract the data
@unpack β, σ = θ # works on the named tuple too
ll = 0.0
ll += logpdf(Normal(0, 100), x[1])
ll += logpdf(Normal(0, 10), x[2])
ll += logpdf(Normal(0, 10), x[3])
ll += logpdf(Normal(0, 10), x[4])
ll += logpdf(TDist(1.0), σ)
ll += loglikelihood(Normal(0, σ), y .- x*β)
ll
end
println()
model((β = [1.0, 2.0, 1.0, 2.0], σ = 1.0))
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
# Tune and sample.
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
# Result rethinking
rethinking = "
mean sd 5.5% 94.5% n_eff Rhat
a 9.22 0.14 9.00 9.46 282 1
bR -0.21 0.08 -0.33 -0.08 275 1
bA -1.94 0.24 -2.33 -1.59 268 1
bAR 0.40 0.14 0.18 0.62 271 1
sigma 0.96 0.05 0.87 1.04 339 1
"
# End of `08/m8.1s.jl`
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2058 | using DynamicHMCModels, LinearAlgebra, StatsFuns
ProjDir = @__DIR__
cd(ProjDir)
delim = ';'
df = CSV.read(joinpath("..", "..", "data", "chimpanzees.csv"), DataFrame; delim)
df.pulled_left = convert(Array{Int64}, df.pulled_left)
df.prosoc_left = convert(Array{Int64}, df.prosoc_left)
first(df, 5)
Base.@kwdef mutable struct Chimpanzees{Ty <: AbstractVector,
Tx <: AbstractMatrix}
"Observations."
y::Ty
"Covariates"
x::Tx
"Number of observations"
N::Int
end
# Write a function to return properly dimensioned transformation.
function make_transformation(model::Chimpanzees)
as( (β = as(Array, size(model.x, 2)), ) )
end
# Instantiate the model with data and inits.
N = size(df, 1)
x = hcat(ones(Int64, N), df[:, :prosoc_left]);
y = df[:, :pulled_left]
model = Chimpanzees(;y=y, x=x, N=N);
# Make the model callable with a single argument.
function (model::Chimpanzees)(θ)
@unpack y, x, N = model # extract the data
@unpack β = θ # works on the named tuple too
ll = 0.0
ll += sum(logpdf.(Normal(0, 10), β)) # a & bp
ll += sum([loglikelihood(Binomial(1, logistic(dot(x[i, :], β))), [y[i]]) for i in 1:N])
ll
end
println()
θ = (β = [1.0, 2.0],)
model(θ)
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P)
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 0.05103234 0.12579086 0.0019889282 0.0035186307 1000
bp 0.55711212 0.18074275 0.0028577937 0.0040160451 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a -0.19755400 -0.029431425 0.05024655 0.12978825 0.30087758
bp 0.20803447 0.433720250 0.55340400 0.67960975 0.91466915
";
# End of `10/m10.2d.jl`
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 2147 | using DynamicHMCModels, StatsFuns
ProjDir = @__DIR__
delim = ';'
df = CSV.read(joinpath(ProjDir, "..", "..", "data", "chimpanzees.csv"), DataFrame; delim)
Base.@kwdef struct Chimpanzees_02
"Number of actors"
N_actors::Int
pulled_left::Vector{Int}
prosoc_left::Vector{Int}
condition::Vector{Int}
actor::Vector{Int}
end
function make_transformation(model::Chimpanzees_02)
as((a = as(Vector, model.N_actors), bp = asℝ, bpC = asℝ))
end
model = Chimpanzees_02(; N_actors = maximum(df.actor), pulled_left = df.pulled_left,
prosoc_left = df.prosoc_left, condition = df.condition,
actor = df.actor)
function (model::Chimpanzees_02)(θ)
@unpack pulled_left, prosoc_left, condition, actor = model
@unpack a, bp, bpC = θ
ℓ_likelihood = mapreduce(+, actor, condition, prosoc_left,
pulled_left) do actor, condition, prosoc_left, pulled_left
p = logistic(a[actor] + (bp + bpC * condition) * prosoc_left)
logpdf(Bernoulli(p), pulled_left)
end
P = Normal(0, 10)
ℓ_prior = logpdf(P, bpC) + logpdf(P, bp) + sum(a -> logpdf(P, a), a)
ℓ_prior + ℓ_likelihood
end
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P)
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
# Result rethinking
rethinking = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a.1 -0.74503184 0.26613979 0.0042080396 0.0060183398 1000
a.2 10.77955494 5.32538998 0.0842018089 0.1269148045 1000
a.3 -1.04982353 0.28535997 0.0045119373 0.0049074219 1000
a.4 -1.04898135 0.28129307 0.0044476339 0.0056325117 1000
a.5 -0.74390933 0.26949936 0.0042611590 0.0052178124 1000
a.6 0.21599365 0.26307574 0.0041595927 0.0045153523 1000
a.7 1.81090866 0.39318577 0.0062168129 0.0071483527 1000
bp 0.83979926 0.26284676 0.0041559722 0.0059795826 1000
bpC -0.12913322 0.29935741 0.0047332562 0.0049519863 1000
";
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 3211 | using DynamicHMCModels
ProjDir = @__DIR__
delim = ';'
df = CSV.read(joinpath(ProjDir, "..", "..", "data", "Kline.csv"), DataFrame; delim)
# New col logpop, set log() for population data
df.logpop = map((x) -> log(x), df.population);
df.society = 1:10;
Base.@kwdef mutable struct KlineModel{Ty <: AbstractVector,
Tx <: AbstractMatrix, Ts <: AbstractVector}
"Observations (total_tools)."
y::Ty
"Covariates (logpop)"
x::Tx
"Society"
s::Ts
"Number of observations (10)"
N::Int
"Number of societies (also 10)"
N_societies::Int
end
function make_transformation(model::KlineModel)
as( (β = as(Array, size(model.x, 2)), α = as(Array, model.N_societies), σ = asℝ₊) )
end
# Instantiate the model with data and inits.
N = size(df, 1)
N_societies = length(unique(df[:, :society]))
x = hcat(ones(Int64, N), df[:, :logpop]);
s = df[:, :society]
y = df[:, :total_tools]
model = KlineModel(; y=y, x=x, s=s, N=N, N_societies=N_societies)
# Make the type callable with the parameters *as a single argument*.
function (model::KlineModel)(θ)
@unpack y, x, s, N, N_societies = model # data
@unpack β, α, σ = θ # parameters
ll = 0.0
ll += logpdf(Cauchy(0, 1), σ)
ll += sum(logpdf.(Normal(0, σ), α)) # α[1:10]
ll += logpdf.(Normal(0, 10), β[1]) # a
ll += logpdf.(Normal(0, 1), β[2]) # a
ll += sum(
[loglikelihood(Poisson(exp(α[s[i]] + dot(x[i, :], β))), [y[i]]) for i in 1:N]
)
ll
end
println()
θ = (β = [1.0, 0.25], α = rand(Normal(0, 1), N_societies), σ = 0.2)
model(θ) |> display
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
p = as_particles(posterior)
display(p)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 1.076167468 0.7704872560 0.01218247319 0.0210530022 1000.000000
bp 0.263056273 0.0823415805 0.00130193470 0.0022645077 1000.000000
a_society.1 -0.191723568 0.2421382537 0.00382854195 0.0060563054 1000.000000
a_society.2 0.054569029 0.2278506876 0.00360263570 0.0051693148 1000.000000
a_society.3 -0.035935050 0.1926364647 0.00304584994 0.0039948433 1000.000000
a_society.4 0.334355037 0.1929971201 0.00305155241 0.0063871707 913.029080
a_society.5 0.049747513 0.1801287716 0.00284808595 0.0043631095 1000.000000
a_society.6 -0.311903245 0.2096126337 0.00331426674 0.0053000536 1000.000000
a_society.7 0.148637507 0.1744680594 0.00275858223 0.0047660246 1000.000000
a_society.8 -0.164567976 0.1821341074 0.00287979309 0.0034297298 1000.000000
a_society.9 0.277066965 0.1758237250 0.00278001719 0.0055844175 991.286501
a_society.10 -0.094149204 0.2846206232 0.00450024719 0.0080735022 1000.000000
sigma_society 0.310352849 0.1374834682 0.00217380450 0.0057325226 575.187461
";
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 4018 | using DynamicHMCModels, MCMCChains
ProjDir = @__DIR__
delim = ';'
df = CSV.read(joinpath(ProjDir, "..", "..", "data", "Kline.csv"), DataFrame; delim)
# New col logpop, set log() for population data
df.logpop = map((x) -> log(x), df.population);
df.society = 1:10;
Base.@kwdef mutable struct KlineModel{Ty <: AbstractVector,
Tx <: AbstractMatrix, Ts <: AbstractVector}
"Observations (total_tools)."
y::Ty
"Covariates (logpop)"
x::Tx
"Society"
s::Ts
"Number of observations (10)"
N::Int
"Number of societies (also 10)"
N_societies::Int
end
function make_transformation(model::KlineModel)
as( (β = as(Array, size(model.x, 2)), α = as(Array, model.N_societies), σ = asℝ₊) )
end
# Instantiate the model with data and inits.
N = size(df, 1)
N_societies = length(unique(df[:, :society]))
x = hcat(ones(Int64, N), df[:, :logpop]);
s = df[:, :society]
y = df[:, :total_tools]
model = KlineModel(; y=y, x=x, s=s, N=N, N_societies=N_societies)
# Make the type callable with the parameters *as a single argument*.
function (model::KlineModel)(θ)
@unpack y, x, s, N, N_societies = model # data
@unpack β, α, σ = θ # parameters
ll = 0.0
ll += logpdf(Cauchy(0, 1), σ)
ll += sum(logpdf.(Normal(0, σ), α)) # α[1:10]
ll += logpdf.(Normal(0, 10), β[1]) # a
ll += logpdf.(Normal(0, 1), β[2]) # a
ll += sum(
[loglikelihood(Poisson(exp(α[s[i]] + dot(x[i, :], β))), [y[i]]) for i in 1:N]
)
ll
end
println()
θ = (β = [1.0, 0.25], α = rand(Normal(0, 1), N_societies), σ = 0.2)
model(θ) |> display
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P);
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
println()
DynamicHMC.Diagnostics.EBFMI(results.tree_statistics) |> display
println()
DynamicHMC.Diagnostics.summarize_tree_statistics(results.tree_statistics) |> display
println()
# Set varable names
parameter_names = ["a", "bp", "sigma_society"]
pooled_parameter_names = ["a_society[$i]" for i in 1:10]
# Create a3d
a3d = Array{Float64, 3}(undef, 1000, 13, 1);
for j in 1:1
for i in 1:1000
a3d[i, 1:2, j] = values(posterior[i].β)
a3d[i, 3, j] = values(posterior[i].σ)
a3d[i, 4:13, j] = values(posterior[i].α)
end
end
chns = MCMCChains.Chains(a3d,
vcat(parameter_names, pooled_parameter_names),
Dict(
:parameters => parameter_names,
:pooled => pooled_parameter_names
)
);
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 1.076167468 0.7704872560 0.01218247319 0.0210530022 1000.000000
bp 0.263056273 0.0823415805 0.00130193470 0.0022645077 1000.000000
a_society.1 -0.191723568 0.2421382537 0.00382854195 0.0060563054 1000.000000
a_society.2 0.054569029 0.2278506876 0.00360263570 0.0051693148 1000.000000
a_society.3 -0.035935050 0.1926364647 0.00304584994 0.0039948433 1000.000000
a_society.4 0.334355037 0.1929971201 0.00305155241 0.0063871707 913.029080
a_society.5 0.049747513 0.1801287716 0.00284808595 0.0043631095 1000.000000
a_society.6 -0.311903245 0.2096126337 0.00331426674 0.0053000536 1000.000000
a_society.7 0.148637507 0.1744680594 0.00275858223 0.0047660246 1000.000000
a_society.8 -0.164567976 0.1821341074 0.00287979309 0.0034297298 1000.000000
a_society.9 0.277066965 0.1758237250 0.00278001719 0.0055844175 991.286501
a_society.10 -0.094149204 0.2846206232 0.00450024719 0.0080735022 1000.000000
sigma_society 0.310352849 0.1374834682 0.00217380450 0.0057325226 575.187461
";
# Describe the chain
describe(chns) |> display
println()
# Describe the chain
describe(chns, sections=[:pooled])
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 404 | module DynamicHMCModels
using Reexport, Requires
@reexport using DynamicHMC, LogDensityProblems, TransformVariables
@reexport using Distributions, Random, Statistics
@reexport using Parameters, CSV, DataFrames
@reexport using MonteCarloMeasurements
function __init__()
@require MCMCChains="c7f686f2-ff18-58e9-bc7b-31028e88f75d" include("require/chains.jl")
end
include("particles.jl")
end # module | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1689 | """
Particles constructor for DynamicHMC
Convert DynamcHMC samples to a Particle NamedTuple
* `posterior`: an array of NamedTuple consisting of mcmc samples
"""
function as_particles(posterior)
d = Dict{Symbol, Union{Particles, Vector{Particles}}}()
pnt = posterior[1]
for parm in keys(pnt)
if size(pnt[parm], 1) > 1
d[parm] = Particles[]
for i in 1:size(pnt[parm], 1)
temp = Float64[]
for post in posterior
push!(temp, post[parm][i])
end
push!(d[parm], Particles(temp))
end
else
temp = Float64[]
for post in posterior
push!(temp, post[parm])
end
d[parm] = Particles(temp)
end
end
return (; d...)
end
function nptoa3d(posterior)
Np = length(vcat(posterior[1]...))
Ns = length(posterior)
a3d = Array{Float64,3}(undef,Ns,Np,1)
for (i,post) in enumerate(posterior)
temp = Float64[]
for p in post
push!(temp,values(p)...)
end
a3d[i,:,1] = temp'
end
parameter_names = getnames(posterior)
return (a3d, parameter_names)
end
function getnames(post)
nt = post[1]
Np =length(vcat(nt...))
parm_names = fill("",Np)
cnt = 0
for (k,v) in pairs(nt)
N = length(v)
if isa(v,Array)
for i in 1:N
cnt += 1
parm_names[cnt] = string(k,"[",i,"]")
end
else
cnt+=1
parm_names[cnt] = string(k)
end
end
return Symbol.(parm_names)
end
export
Particles,
as_particles | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1827 | import MCMCChains: Chains
function create_a3d(noofsamples, noofvariables, noofchains)
a3d = fill(0.0, noofsamples, noofvariables, noofchains)
a3d
end
function insert_chain!(a3d, chain, posterior, trans)
for i in 1:size(a3d, 1)
a3d[i,:,chain] = inverse(trans, posterior[i])
end
end
function insert_chain!(a3d, chain, posterior)
for i in 1:size(a3d, 1)
a3d[i,:,chain] = posterior[i, :]
end
end
function create_mcmcchains(a3d, cnames;start=1)
Chains(a3d, cnames; start=start)
end
function create_mcmcchains(a3d, cnames, sections::Dict{Symbol, Vector{String}};
start=1)
Chains(a3d, cnames, sections; start=start)
end
"""
Convert DynamcHMC samples to a chain
* `posterior`: an array of NamedTuple consisting of mcmc samples
"""
function nptochain(posterior,tune)
Np = length(vcat(posterior[1]...))+1 #include lf_eps
Ns = length(posterior)
a3d = Array{Float64,3}(undef,Ns,Np,1)
ϵ=tune.ϵ
for (i,post) in enumerate(posterior)
temp = Float64[]
for p in post
push!(temp,values(p)...)
end
push!(temp,ϵ)
a3d[i,:,1] = temp'
end
parameter_names = getnames(posterior)
push!(parameter_names,"lf_eps")
chns = MCMCChains.Chains(a3d,parameter_names,
Dict(:internals => ["lf_eps"]))
return chns
end
function getnames(post)
nt = post[1]
Np =length(vcat(nt...))
parm_names = fill("",Np)
cnt = 0
for (k,v) in pairs(nt)
N = length(v)
if isa(v,Array)
for i in 1:N
cnt += 1
parm_names[cnt] = string(k,"[",i,"]")
end
else
cnt+=1
parm_names[cnt] = string(k)
end
end
return parm_names
end
export
create_a3d,
insert_chain!,
nptochain,
create_mcmcchains
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 472 | using DynamicHMCModels
using Test
scripts = [
"../scripts/02/m2.1d.jl",
"../scripts/04/m4.1d.jl",
"../scripts/04/m4.2d.jl",
"../scripts/04/m4.5d.jl",
"../scripts/04/m4.5d1.jl",
"../scripts/05/m5.1d.jl",
"../scripts/05/m5.3d.jl",
"../scripts/05/m5.6d.jl",
"../scripts/10/m10.2d.jl",
"../scripts/10/m10.4d.jl",
"../scripts/12/m12.6d.jl"
]
for script in scripts
println("\n * $script *\n")
include(script)
println("\n * $script completed\n")
end | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1478 | using DynamicHMCModels
#=
using LinearAlgebra
using StaticArrays
using TransformVariables
using LogDensityProblems
using DynamicHMC
using Parameters
using Random
using MCMCChains
=#
function LogLikelihoodMin(ω,dyDep)
dt = 0.1
cD = [0 -1im ; 1im 0]
H = (ω/2.) * cD
rho=[0.05 0 ; 0 0.95]
M = I - ((cD'*cD)/2) * dt + (cD * dyDep) - 1im * H * dt
newRho = M * rho * M'
lklhood = real(tr(newRho))- (ω* dt/2)^2
return log(lklhood)
end
struct Experiment
dyDep::Float64
end
function (problem::Experiment)((ω,)::NamedTuple{(:ω,)})
@unpack dyDep = problem # extract the data
LogLikelihoodMin(ω,dyDep)
end
dyDepObs=0.690691
p1 = Experiment(dyDepObs)
println(p1((ω=.4,)))
trans_single = as((ω=as(Real, 2, 4),))
P1 = TransformedLogDensity(trans_single, p1)
∇P1 = ADgradient(:ForwardDiff, P1)
# Sample 4 chains
a3d = Array{Float64, 3}(undef, 1000, 1, 4);
for j in 1:4
global results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P1, 1000;
reporter = NoProgressReport())
global posterior = P1.transformation.(results.chain)
for i in 1:1000
a3d[i, 1, j] = values(posterior[i].ω)
end
end
# Create MCMCChains object
parameter_names = ["ω"]
sections = Dict(
:parameters => parameter_names,
)
chns = create_mcmcchains(a3d, parameter_names, sections, start=1)
show(chns)
println()
DynamicHMC.Diagnostics.EBFMI(results.tree_statistics) |> display
println()
DynamicHMC.Diagnostics.summarize_tree_statistics(results.tree_statistics)
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | code | 1100 | using DynamicHMCModels, MCMCChains, Random
#Random.seed!(1233)
ProjDir = @__DIR__
cd(ProjDir)
include(joinpath(@__DIR__, "../Benchmarks/LBA/LBA_functions.jl"))
Base.@kwdef struct LBAModel{T}
data::T
N::Int
Nc::Int
end
function make_transformation(model::LBAModel)
as((v=as(Array,asℝ₊,Nc), A=asℝ₊, k=asℝ₊, tau=asℝ₊))
end
for i = 1:3
N = 10
v = [1.0, 1.5]
Nc = length(v)
data=simulateLBA(;Nd=N,v=v,A=.8,k=.2,tau=.4)
model = LBAModel(; data=data, N=N, Nc=Nc)
function (model::LBAModel)(θ)
@unpack data=model
@unpack v,A,k,tau=θ
d=LBA(ν=v,A=A,k=k,τ=tau)
minRT = minimum(x->x[2],data)
logpdf(d,data)+sum(logpdf.(TruncatedNormal(0,3,0,Inf),v)) +
logpdf(TruncatedNormal(.8,.4,0,Inf),A)+logpdf(TruncatedNormal(.2,.3,0,Inf),k)+
logpdf(TruncatedNormal(.4,.1,0,minRT),tau)
end
d = [(c,r) for (c,r) in zip(data.choice,data.rt)]
p = LBAModel(d,N,Nc)
A = rand(Normal(.8, 1.0), 1)[1]
k = rand(Normal(.2, 1.0), 1)[1]
tau = rand(Normal(.4, 1.0), 1)[1]
v=rand(Normal(0.0, 1.0),Nc)
p((v=v,A=A,k=k,tau=tau))
display(d)
end | DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 2.1.4 | 488da266cd18e792ba8cde04197d6c7817753650 | docs | 2360 | # DynamicHMCModels
| **Project Status** | **Documentation** | **Build Status** |
|:-------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------:|
|![][project-status-img] | [![][docs-stable-img]][docs-stable-url] [![][docs-dev-img]][docs-dev-url] | [![][travis-img]][travis-url] |
## Introduction
This package contains Julia versions of the mcmc models contained in the R package "rethinking" associated with the book [Statistical Rethinking](https://xcelab.net/rm/statistical-rethinking/) by Richard McElreath. It is part of the [StatisticalRethinkingJulia](https://github.com/StatisticalRethinkingJulia) Github organization of packages.
This package implements the models using [DynamicHMC](https://github.com/tpapp/DynamicHMC.jl).
## Note
Converted to DynamicHMC 2.0
## Acknowledgements
Tamas Papp has been very helpful during the development og the DynamicHMC versions of the models.
## Questions and issues
Question and contributions are very welcome, as are feature requests and suggestions. Please open an [issue][issues-url] if you encounter any problems or have a question.
[docs-dev-img]: https://img.shields.io/badge/docs-dev-blue.svg
[docs-dev-url]: https://statisticalrethinkingjulia.github.io/DynamicHMCModels.jl/latest
[docs-stable-img]: https://img.shields.io/badge/docs-stable-blue.svg
[docs-stable-url]: https://statisticalrethinkingjulia.github.io/DynamicHMCModels.jl/stable
[travis-img]: https://travis-ci.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.svg?branch=master
[travis-url]: https://travis-ci.com/StatisticalRethinkingJulia/DynamicHMCModels.jl
[codecov-img]: https://codecov.io/gh/StatisticalRethinkingJulia/DynamicHMCModels.jl/branch/master/graph/badge.svg
[codecov-url]: https://codecov.io/gh/StatisticalRethinkingJulia/DynamicHMCModels.jl
[issues-url]: https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl/issues
[project-status-img]: https://img.shields.io/badge/lifecycle-wip-orange.svg
| DynamicHMCModels | https://github.com/StatisticalRethinkingJulia/DynamicHMCModels.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 515 | using Documenter, FinEtools, FinEtoolsMultithreading
makedocs(
modules = [FinEtoolsMultithreading],
doctest = false, clean = true,
warnonly = Documenter.except(:linkcheck, :footnote),
format = Documenter.HTML(prettyurls = false),
authors = "Petr Krysl",
sitename = "FinEtoolsMultithreading.jl",
pages = Any[
"Home" => "index.md",
"How to guide" => "guide/guide.md",
"Types and Functions" => Any[
"man/man.md"]
]
)
deploydocs(
repo = "github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git",
)
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 991 | println("Current folder: $(pwd())")
if length(ARGS) < 1
error("I need at least one arguments: N (mesh subdivision)")
end
using Pkg
# Pkg.add("ThreadPinning")
Pkg.activate(".")
Pkg.instantiate()
using LinearAlgebra
LinearAlgebra.BLAS.set_num_threads(1)
# Turn off thread pinning because it seems to interfere with the graph coloring library.
# using ThreadPinning
# ThreadPinning.Prefs.set_os_warning(false)
# pinthreads(:cores)
N = parse(Int, ARGS[1])
ntasks = Threads.nthreads()
if length(ARGS) > 1
ntasks = parse(Int, ARGS[2])
end
assembly_only = true
if length(ARGS) > 2
assembly_only = parse(Bool, ARGS[3])
end
include(raw"sphere_modes_parallel.jl")
using .sphere_modes_parallel;
using FinEtoolsMultithreading
Pkg.status("FinEtoolsMultithreading")
NTRIALS = 5
for trial in 1:NTRIALS
@info "Trial $(trial) out of $(NTRIALS): nthreads=$(Threads.nthreads()), ntasks=$(ntasks), N=$(N)"
sphere_modes_parallel.run(N, ntasks, assembly_only)
GC.gc(true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 498 | println("Current folder: $(pwd())")
if length(ARGS) < 1
error("I need one argument: N (mesh subdivision)")
end
using Pkg
Pkg.activate(".")
Pkg.instantiate()
N = parse(Int, ARGS[1])
assembly_only = true
if length(ARGS) > 1
assembly_only = parse(Bool, ARGS[2])
end
include(raw"sphere_modes_serial.jl")
using .sphere_modes_serial;
NTRIALS = 5
for trial in 1:NTRIALS
@info "Trial $(trial) out of $(NTRIALS): N=$(N)"
sphere_modes_serial.run(N, assembly_only)
GC.gc(true)
end
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 10463 | module sphere_modes_parallel
using FinEtools
using FinEtools.AlgoBaseModule: matrix_blocked
using FinEtoolsAcoustics
using FinEtoolsMultithreading
using FinEtoolsMultithreading.Exports
using FinEtoolsMultithreading: decompose,
parallel_matrix_assembly!, SysmatAssemblerSparsePatt, SysmatAssemblerSparsePattwLookup
using FinEtools.MeshExportModule
using ECLGraphColor
using LinearAlgebra
using Arpack: eigs
using DataDrop
# For the data
# rho = 1.2*phun("kg/m^3");# mass density
# c = 340.0*phun("m/s");# sound speed
# bulk = c^2*rho;
# R = 1000.0*phun("mm");# radius of the piston
# the reference
# @article{GAO2013914,
# title = {Eigenvalue analysis for acoustic problem in 3D by boundary element method with the block Sakurai–Sugiura method},
# journal = {Engineering Analysis with Boundary Elements},
# volume = {37},
# number = {6},
# pages = {914-923},
# year = {2013},
# issn = {0955-7997},
# doi = {https://doi.org/10.1016/j.enganabound.2013.03.015},
# url = {https://www.sciencedirect.com/science/article/pii/S0955799713000714},
# author = {Haifeng Gao and Toshiro Matsumoto and Toru Takahashi and Hiroshi Isakari},
# keywords = {Eigenvalues, Acoustic, The block SS method, Boundary element method, Burton–Miller's method},
# abstract = {This paper presents accurate numerical solutions for nonlinear eigenvalue analysis of three-dimensional acoustic cavities by boundary element method (BEM). To solve the nonlinear eigenvalue problem (NEP) formulated by BEM, we employ a contour integral method, called block Sakurai–Sugiura (SS) method, by which the NEP is converted to a standard linear eigenvalue problem and the dimension of eigenspace is reduced. The block version adopted in present work can also extract eigenvalues whose multiplicity is larger than one, but for the complex connected region which includes a internal closed boundary, the methodology yields fictitious eigenvalues. The application of the technique is demonstrated through the eigenvalue calculation of sphere with unique homogenous boundary conditions, cube with mixed boundary conditions and a complex connected region formed by cubic boundary and spherical boundary, however, the fictitious eigenvalues can be identified by Burton–Miller's method. These numerical results are supported by appropriate convergence study and comparisons with close form.}
# }
# shows the wave numbers in Table 1.
#=
The multiplicity of the Dirichlet eigenvalues.
Wavenumber*R Multiplicity
3.14159, 6.28319, 9.42478 1
4.49340, 7.72525, 10.90412 3
5.76346, 9.09501, 12.32294 5
6.98793, 10.41711, 13.69802 7
8.18256, 11.70491, 15.03966 9
9.35581, 12.96653, 16.35471 11
=#
# Sphere with Dirichlet boundary conditions: model analysis.
# Sphere of radius $(R), in WATER.
# Tetrahedral T4 mesh.
# Exact fundamental frequency: $(c/2/R)
function run(N=2, ntasks=Threads.nthreads(), assembly_only=false)
rho = 1000 * phun("kg/m^3")# mass density
c = 1500.0 * phun("m/s")# sound speed
bulk = c^2 * rho
R = 500.0 * phun("mm")# radius of the sphere
tolerance = R / 1e3
neigvs = 7
wn_table = [
([3.14159, 6.28319, 9.42478], 1),
([4.49340, 7.72525, 10.90412], 3),
([5.76346, 9.09501, 12.32294], 5),
([6.98793, 10.41711, 13.69802], 7),
([8.18256, 11.70491, 15.03966], 9),
([9.35581, 12.96653, 16.35471], 11),
]
# @info "Reference frequencies"
# for i in axes(wn_table, 1)
# fq = wn_table[i][1] ./ R .* c / (2 * pi)
# @info "$(fq), multiplicity $(wn_table[i][2])"
# end
fens, fes = H8sphere(R, N)
renumb(c) = c[[1, 4, 3, 2, 5, 8, 7, 6]]
fens1, fes1 = mirrormesh(
fens,
fes,
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
renumb=renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
fens1, fes1 = mirrormesh(
fens,
fes,
[0.0, -1.0, 0.0],
[0.0, 0.0, 0.0],
renumb=renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
fens1, fes1 = mirrormesh(
fens,
fes,
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
renumb=renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
@info "$(count(fens)) nodes"
geom = NodalField(fens.xyz)
P = NodalField(zeros(size(fens.xyz, 1), 1))
bfes = meshboundary(fes)
setebc!(P, connectednodes(bfes))
numberdofs!(P)
mass_times = Dict{String,Vector{Float64}}()
t1 = time()
n2e = FENodeToFEMapThr(fes, nnodes(P))
mass_times["FENodeToFEMap"] = [time() - t1]
println("Make node to element map = $(mass_times["FENodeToFEMap"]) [s]")
material = MatAcoustFluid(bulk, rho)
GC.enable(false)
AT = SysmatAssemblerSparsePatt
# AT = SysmatAssemblerSparsePattwLookup
t0 = time()
t1 = time()
e2e = FElemToNeighborsMap(n2e, fes, ECLGraphColor.int_type())
mass_times["FElemToNeighborsMap"] = [time() - t1]
println(" Make element to neighbor map = $(mass_times["FElemToNeighborsMap"]) [s]")
t1 = time()
coloring = FinEtoolsMultithreading.element_coloring(fes, e2e, ntasks)
mass_times["ElementColors"] = [time() - t1]
println(" Compute element colors = $(mass_times["ElementColors"]) [s]")
t1 = time()
n2n = FENodeToNeighborsMap(n2e, fes)
mass_times["FENodeToNeighborsMap"] = [time() - t1]
println(" Make node to neighbor map = $(mass_times["FENodeToNeighborsMap"]) [s]")
t1 = time()
K_pattern = csc_symmetric_pattern(P.dofnums, nalldofs(P), n2n, eltype(P.values))
mass_times["SparsityPattern"] = [time() - t1]
println(" Sparsity pattern = $(mass_times["SparsityPattern"]) [s]")
t1 = time()
decomposition = decompose(fes, coloring,
(fessubset) -> FEMMAcoust(IntegDomain(fessubset, GaussRule(3, 2)), material), ntasks)
mass_times["DomainDecomposition"] = [time() - t1]
println(" Domain decomposition = $(mass_times["DomainDecomposition"]) [s]")
assembler = AT(K_pattern)
startassembly!(assembler, 24, 24, 1000, nalldofs(P), nalldofs(P))
assemble!(assembler, zeros(24, 24), 1:24, 1:24)
t1 = time()
Ma = parallel_matrix_assembly!(
AT(K_pattern),
decomposition,
(femm, assmblr) -> acousticmass(femm, assmblr, geom, P),
)
mass_times["AssemblyOfValues"] = [time() - t1]
println(" Add to matrix = $(mass_times["AssemblyOfValues"]) [s]")
mass_times["TotalAssemblyMass"] = [time() - t0]
println("Assembly MASS total = $(mass_times["TotalAssemblyMass"]) [s]")
GC.enable(true)
GC.enable(false)
stiffness_times = Dict{String,Vector{Float64}}()
t0 = time()
t1 = time()
e2e = FElemToNeighborsMap(n2e, fes)
stiffness_times["FElemToNeighborsMap"] = [time() - t1]
println(" Make element to neighbor map = $(stiffness_times["FElemToNeighborsMap"]) [s]")
t1 = time()
coloring = FinEtoolsMultithreading.element_coloring(fes, e2e)
stiffness_times["ElementColors"] = [time() - t1]
println(" Compute element colors = $(stiffness_times["ElementColors"]) [s]")
t1 = time()
n2n = FENodeToNeighborsMap(n2e, fes)
stiffness_times["FENodeToNeighborsMap"] = [time() - t1]
println(" Make node to neighbor map = $(stiffness_times["FENodeToNeighborsMap"]) [s]")
t1 = time()
K_pattern = csc_symmetric_pattern(P.dofnums, nalldofs(P), n2n, eltype(P.values))
stiffness_times["SparsityPattern"] = [time() - t1]
println(" Sparsity pattern = $(stiffness_times["SparsityPattern"]) [s]")
t1 = time()
decomposition = decompose(fes, coloring,
(fessubset) -> FEMMAcoust(IntegDomain(fessubset, GaussRule(3, 2)), material), ntasks)
stiffness_times["DomainDecomposition"] = [time() - t1]
println(" Domain decomposition = $(stiffness_times["DomainDecomposition"]) [s]")
t1 = time()
@time Ka = parallel_matrix_assembly!(
AT(K_pattern),
decomposition,
(femm, assmblr) -> acousticstiffness(femm, assmblr, geom, P),
)
stiffness_times["AssemblyOfValues"] = [time() - t1]
println(" Add to matrix = $(stiffness_times["AssemblyOfValues"]) [s]")
stiffness_times["TotalAssemblyStiffness"] = [time() - t0]
println("Assembly STIFFNESS total = $(stiffness_times["TotalAssemblyStiffness"]) [s]")
GC.enable(true)
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "sphere_modes_parallel-timing-parallel-stiffness-nth=$(ntasks)"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
stiffness_times[k] = cat(stiffness_times[k], storedtimes[k], dims=1)
end
end
DataDrop.store_json(n, stiffness_times)
n = DataDrop.with_extension(joinpath("$(N)", "sphere_modes_parallel-timing-parallel-mass-nth=$(ntasks)"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
mass_times[k] = cat(mass_times[k], storedtimes[k], dims=1)
end
end
DataDrop.store_json(n, mass_times)
return
end
Ma_ff = matrix_blocked(Ma, nfreedofs(P), nfreedofs(P))[:ff]
Ka_ff = matrix_blocked(Ka, nfreedofs(P), nfreedofs(P))[:ff]
d, v, nconv = eigs(Ka_ff, Ma_ff; nev=neigvs, which=:SM, explicittransform=:none)
v = real.(v)
fs = real(sqrt.(complex(d))) ./ (2 * pi)
@info("Frequencies (1:5): $(fs[1:5]) [Hz]")
@info "Reference frequencies"
for i in axes(wn_table, 1)
fq = wn_table[i][1] ./ R .* c / (2 * pi)
@info "$(fq), multiplicity $(wn_table[i][2])"
end
ks = (2 * pi) .* fs ./ c ./ phun("m")
# @info("Wavenumbers: $(ks) [m]")
File = "sphere_modes_parallel.vtk"
scalarllist = Any[]
for n = [2, 5, 7]
scattersysvec!(P, v[:, n])
push!(scalarllist, ("Pressure_mode_$n", deepcopy(P.values)))
end
vtkexportmesh(
File,
connasarray(fes),
geom.values,
FinEtools.MeshExportModule.VTK.H8;
scalars=scalarllist,
)
# @async run(`"paraview.exe" $File`)
true
end # sphere_h8_in_air
end # module sphere_mode_examples
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
|
[
"MIT"
] | 0.5.0 | 50fe5082e08b83183a2f1521a8c52214988ef972 | code | 6970 | module sphere_modes_parallel
using FinEtools
using FinEtools.AlgoBaseModule: matrix_blocked
using FinEtoolsAcoustics
using FinEtoolsMultithreading.Exports
using FinEtools.MeshExportModule
using LinearAlgebra
using Arpack: eigs
using DataDrop
# For the data
# rho = 1.2*phun("kg/m^3");# mass density
# c = 340.0*phun("m/s");# sound speed
# bulk = c^2*rho;
# R = 1000.0*phun("mm");# radius of the piston
# the reference
# @article{GAO2013914,
# title = {Eigenvalue analysis for acoustic problem in 3D by boundary element method with the block Sakurai–Sugiura method},
# journal = {Engineering Analysis with Boundary Elements},
# volume = {37},
# number = {6},
# pages = {914-923},
# year = {2013},
# issn = {0955-7997},
# doi = {https://doi.org/10.1016/j.enganabound.2013.03.015},
# url = {https://www.sciencedirect.com/science/article/pii/S0955799713000714},
# author = {Haifeng Gao and Toshiro Matsumoto and Toru Takahashi and Hiroshi Isakari},
# keywords = {Eigenvalues, Acoustic, The block SS method, Boundary element method, Burton–Miller's method},
# abstract = {This paper presents accurate numerical solutions for nonlinear eigenvalue analysis of three-dimensional acoustic cavities by boundary element method (BEM). To solve the nonlinear eigenvalue problem (NEP) formulated by BEM, we employ a contour integral method, called block Sakurai–Sugiura (SS) method, by which the NEP is converted to a standard linear eigenvalue problem and the dimension of eigenspace is reduced. The block version adopted in present work can also extract eigenvalues whose multiplicity is larger than one, but for the complex connected region which includes a internal closed boundary, the methodology yields fictitious eigenvalues. The application of the technique is demonstrated through the eigenvalue calculation of sphere with unique homogenous boundary conditions, cube with mixed boundary conditions and a complex connected region formed by cubic boundary and spherical boundary, however, the fictitious eigenvalues can be identified by Burton–Miller's method. These numerical results are supported by appropriate convergence study and comparisons with close form.}
# }
# shows the wave numbers in Table 1.
#=
The multiplicity of the Dirichlet eigenvalues.
Wavenumber*R Multiplicity
3.14159, 6.28319, 9.42478 1
4.49340, 7.72525, 10.90412 3
5.76346, 9.09501, 12.32294 5
6.98793, 10.41711, 13.69802 7
8.18256, 11.70491, 15.03966 9
9.35581, 12.96653, 16.35471 11
=#
# Sphere with Dirichlet boundary conditions: model analysis.
# Sphere of radius $(R), in WATER.
# Tetrahedral T4 mesh.
# Exact fundamental frequency: $(c/2/R)
function run(N = 2, ntasks = Threads.nthreads(), assembly_only = false)
times = Dict{String, Vector{Float64}}()
rho = 1000 * phun("kg/m^3")# mass density
c = 1500.0 * phun("m/s")# sound speed
bulk = c^2 * rho
R = 500.0 * phun("mm")# radius of the sphere
tolerance = R / 1e3
neigvs = 7
wn_table = [
([3.14159, 6.28319, 9.42478], 1),
([4.49340, 7.72525, 10.90412], 3),
([5.76346, 9.09501, 12.32294], 5),
([6.98793, 10.41711, 13.69802], 7),
([8.18256, 11.70491, 15.03966], 9),
([9.35581, 12.96653, 16.35471], 11),
]
# @info "Reference frequencies"
# for i in axes(wn_table, 1)
# fq = wn_table[i][1] ./ R .* c / (2 * pi)
# @info "$(fq), multiplicity $(wn_table[i][2])"
# end
fens, fes = H8sphere(R, N)
renumb(c) = c[[1, 4, 3, 2, 5, 8, 7, 6]]
fens1, fes1 = mirrormesh(
fens,
fes,
[-1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
renumb = renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
fens1, fes1 = mirrormesh(
fens,
fes,
[0.0, -1.0, 0.0],
[0.0, 0.0, 0.0],
renumb = renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
fens1, fes1 = mirrormesh(
fens,
fes,
[0.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
renumb = renumb,
)
fens, newfes1, fes2 = mergemeshes(fens1, fes1, fens, fes, tolerance)
fes = cat(newfes1, fes2)
@info "$(count(fens)) nodes"
geom = NodalField(fens.xyz)
P = NodalField(zeros(size(fens.xyz, 1), 1))
bfes = meshboundary(fes)
setebc!(P, connectednodes(bfes))
numberdofs!(P)
t1 = time()
n2e = FENodeToFEMapThr(fes, nnodes(P))
times["FENodeToFEMap"] = [time() - t1]
println("Make node to element map = $(times["FENodeToFEMap"]) [s]")
material = MatAcoustFluid(bulk, rho)
t1 = time()
Ma = parallel_make_matrix(
fes,
P.dofnums,
nalldofs(P),
eltype(P.values),
n2e,
(fessubset) -> FEMMAcoust(IntegDomain(fessubset, GaussRule(3, 2)), material),
(femm, assmblr) -> acousticmass(femm, assmblr, geom, P),
ntasks,
:CSC
)
# Ma = acousticmass(femm, geom, P)
times["AssembleMass"] = [time() - t1]
println("Assemble mass = $(times["AssembleMass"]) [s]")
t1 = time()
Ka = parallel_make_matrix(
fes,
P.dofnums,
nalldofs(P),
eltype(P.values),
n2e,
(fessubset) -> FEMMAcoust(IntegDomain(fessubset, GaussRule(3, 2)), material),
(femm, assmblr) -> acousticstiffness(femm, assmblr, geom, P),
ntasks,
:CSC
)
# Ka = acousticstiffness(femm, geom, P)
times["AssembleStiffness"] = [time() - t1]
println("Assemble stiffness = $(times["AssembleStiffness"]) [s]")
if assembly_only
isdir("$(N)") || mkdir("$(N)")
n = DataDrop.with_extension(joinpath("$(N)", "sphere_modes_parallel-timing-parallel-nth=$(ntasks)"), "json")
if isfile(n)
storedtimes = DataDrop.retrieve_json(n)
for k in keys(storedtimes)
times[k] = cat(times[k], storedtimes[k], dims = 1)
end
end
DataDrop.store_json(n, times)
return
end
Ma_ff = matrix_blocked(Ma, nfreedofs(P), nfreedofs(P))[:ff]
Ka_ff = matrix_blocked(Ka, nfreedofs(P), nfreedofs(P))[:ff]
d, v, nconv = eigs(Ka_ff, Ma_ff; nev = neigvs, which = :SM, explicittransform = :none)
v = real.(v)
fs = real(sqrt.(complex(d))) ./ (2 * pi)
# @info("Frequencies (1:5): $(fs[1:5]) [Hz]")
ks = (2 * pi) .* fs ./ c ./ phun("m")
# @info("Wavenumbers: $(ks) [m]")
File = "sphere_modes_parallel.vtk"
scalarllist = Any[]
for n = [2, 5, 7]
scattersysvec!(P, v[:, n])
push!(scalarllist, ("Pressure_mode_$n", deepcopy(P.values)))
end
vtkexportmesh(
File,
connasarray(fes),
geom.values,
FinEtools.MeshExportModule.VTK.H8;
scalars = scalarllist,
)
# @async run(`"paraview.exe" $File`)
true
end # sphere_h8_in_air
end # module sphere_mode_examples
nothing
| FinEtoolsMultithreading | https://github.com/PetrKryslUCSD/FinEtoolsMultithreading.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.