licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 835 | @testset "reflect" begin
@test FactorRotations.reflect_cols(ones(8, 3)) == ones(1, 3)
@test FactorRotations.reflect_cols(hcat(ones(8), -ones(8))) == [1 -1]
@test FactorRotations.reflect_cols(hcat(-ones(8), zeros(8), ones(8))) == [-1 1 1]
@testset "FactorRotation" begin
Imat = diagm(ones(Float64, 3))
r = FactorRotation(hcat(ones(4), zeros(4), -ones(4)), Imat, ones(4))
expectation = FactorRotation(
hcat(ones(4), zeros(4), ones(4)),
diagm(Float64[1, 1, -1]),
ones(4),
)
@test reflect(r).T == expectation.T
@test reflect(r).L == expectation.L
@test reflect(r).phi == expectation.phi
reflect!(r)
@test r.T == expectation.T
@test r.L == expectation.L
@test r.phi == expectation.phi
end
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 6295 | @testset "rotate" begin
# initial values
@test_throws ArgumentError rotate(A, Varimax(), init = rand(10, 10))
@test_throws ArgumentError rotate(A, Varimax(), init = rand(8, 2))
rot_default_init = rotate(A, Varimax(), g_atol = 1e-7)
rot_identity_init = rotate(A, Varimax(); g_atol = 1e-7, init)
@test loadings(rot_default_init) ≈ loadings(rot_identity_init) atol = 1e-7
@test rotation(rot_default_init) ≈ rotation(rot_identity_init) atol = 1e-7
@test factor_correlation(rot_default_init) ≈ factor_correlation(rot_identity_init) atol =
1e-7
# in-place rotation
B = copy(A)
rotate!(B, Quartimax())
@test B == loadings(rotate(A, Quartimax()))
# random starts
@test_warn "Ignoring initial starting values" rotate(
A,
Varimax(),
randomstarts = 3,
init = I(2),
)
@test_nowarn rotate(A, Varimax(), randomstarts = 3)
# convergence
struct NonConverging <: RotationMethod{Orthogonal} end
function FactorRotations.criterion_and_gradient!(
∇Q::Union{Nothing,AbstractMatrix},
::NonConverging,
m::AbstractMatrix,
)
isnothing(∇Q) || fill!(∇Q, one(eltype(∇Q)))
return 1.0
end
@test_warn "did not converge" rotate(A, NonConverging())
@test_warn "did not converge" rotate(A, NonConverging(), randomstarts = 3)
@test_warn "did not converge" rotate(
ones(8, 2),
LinearRightConstant(1.0),
randomstarts = true,
)
# normalize
B = copy(A)
rotate(B, Varimax(), normalize = true)
@test B == A
@testset "parse_randomstarts" begin
@test FactorRotations.parse_randomstarts(true) == 100
@test FactorRotations.parse_randomstarts(true; default = 10) == 10
@test FactorRotations.parse_randomstarts(false) == 0
@test FactorRotations.parse_randomstarts(9) == 9
@test_throws ArgumentError FactorRotations.parse_randomstarts(-1)
@test_throws ArgumentError FactorRotations.parse_randomstarts(0)
end
@testset "RotationState" begin
orthogonal_state = FactorRotations.RotationState(Orthogonal, init, A)
@test orthogonal_state.init == init
@test orthogonal_state.A == A
@test orthogonal_state.T == init
@test orthogonal_state.L == A * init
@test orthogonal_state.iterations == FactorRotations.IterationState[]
@test isnan(FactorRotations.minimumQ(orthogonal_state))
oblique_state = FactorRotations.RotationState(Oblique, init, A)
@test oblique_state.init == init
@test oblique_state.A == A
@test oblique_state.T == init
@test oblique_state.Ti == inv(init)
@test oblique_state.L == A * inv(init)'
@test oblique_state.iterations == FactorRotations.IterationState[]
@test isnan(FactorRotations.minimumQ(oblique_state))
struct BadRotation <: FactorRotations.RotationType end
@test_throws "Unsupported rotation type BadRotation" FactorRotations.RotationState(
BadRotation,
init,
A,
)
end
@testset "gradient_f!" begin
orthogonal_state = FactorRotations.RotationState(Orthogonal, init, A)
g = fill!(similar(init), NaN)
@test g === FactorRotations.gradient_f!(g, orthogonal_state, zeros(size(A)))
@test FactorRotations.gradient_f!(g, orthogonal_state, zeros(size(A))) ==
zeros(size(init))
oblique_state = FactorRotations.RotationState(Oblique, init, A)
fill!(g, NaN)
@test g === FactorRotations.gradient_f!(g, oblique_state, zeros(size(A)))
@test FactorRotations.gradient_f!(g, oblique_state, zeros(size(A))) ==
zeros(size(init))
end
@testset "project_G!" begin
Gp = zeros(Float64, size(init))
G = rand(size(init)...)
orthogonal_state = FactorRotations.RotationState(Orthogonal, init, A)
@test Gp === FactorRotations.project_G!(Gp, orthogonal_state, G)
@test FactorRotations.project_G!(Gp, orthogonal_state, G) != zeros(size(init))
@test Gp != zeros(size(init))
Gp = zeros(Float64, size(init))
oblique_state = FactorRotations.RotationState(Oblique, init, A)
@test Gp === FactorRotations.project_G!(Gp, oblique_state, G)
@test FactorRotations.project_G!(Gp, oblique_state, G) != zeros(size(init))
@test Gp != zeros(size(init))
end
@testset "project_X!" begin
state = FactorRotations.RotationState(Orthogonal, init, A)
X = fill!(similar(init), NaN)
@test X === FactorRotations.project_X!(X, state, I(2))
@test FactorRotations.project_X!(X, state, I(2)) == I(2)
state = FactorRotations.RotationState(Oblique, init, A)
fill!(X, NaN)
@test X === FactorRotations.project_X!(X, state, I(2))
@test FactorRotations.project_X!(X, state, I(2)) == I(2)
end
@testset "update_state!" begin
Tt = [1 1; 1 0]
orthogonal_state = FactorRotations.RotationState(Orthogonal, init, A)
@test FactorRotations.update_state!(orthogonal_state, Tt) == A * Tt
@test orthogonal_state.L == A * Tt
oblique_state = FactorRotations.RotationState(Oblique, init, A)
@test FactorRotations.update_state!(oblique_state, Tt) == A * inv(Tt)'
@test oblique_state.Ti == inv(Tt)
@test oblique_state.L == A * inv(Tt)'
end
@testset "MultivariateStatsExt" begin
using MultivariateStats
X = rand(3, 100)
methods = [FactorAnalysis, PCA, PPCA]
for method in methods
model = fit(FactorAnalysis, X)
raw_loadings = MultivariateStats.loadings(model)
rot = rotate(raw_loadings, Varimax())
rotated_loadings = FactorRotations.loadings(rot)
@test size(raw_loadings) == (3, 2)
@test size(FactorRotations.loadings(rotate(model, Varimax()))) == (3, 2)
@test rotated_loadings == FactorRotations.loadings(rotate(model, Varimax()))
@test MultivariateStats.loadings(model) == raw_loadings
@test rotated_loadings == rotate!(model, Varimax())
@test MultivariateStats.loadings(model) == rotated_loadings
end
end
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 415 | using FactorRotations
using LinearAlgebra
using Statistics
using Test
A = [
0.830 -0.396
0.818 -0.469
0.777 -0.470
0.798 -0.401
0.786 0.500
0.672 0.458
0.594 0.444
0.647 0.333
];
init = Matrix{Float64}(I, 2, 2)
@testset "FactorRotations.jl" begin
include("utils.jl")
include("normalize.jl")
include("methods.jl")
include("rotate.jl")
include("reflect.jl")
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | code | 1662 | @testset "utility functions" begin
@testset "zerodiag" begin
m = ones(3, 3)
@test FactorRotations.zerodiag!(m) == ones(3, 3) - I
@test diag(m) == zeros(3)
end
@testset "nthsmallest" begin
@test FactorRotations.nthsmallest(1:10, 6) == 6
@test FactorRotations.nthsmallest(1:10, 1) == 1
@test FactorRotations.nthsmallest([1, 10, 100], 3) == 100
@test FactorRotations.nthsmallest([1 3; 7 13], 2) == 3
end
@testset "random_orthogonal_matrix" begin
m = FactorRotations.random_orthogonal_matrix(10)
@test size(m) == (10, 10)
end
@testset "setverbosity!" begin
@test FactorRotations.VERBOSITY[] == false
@test_logs (:info, "FactorRotations logging is disabled globally.") setverbosity!(false)
@test_logs (:info, "FactorRotations logging is enabled globally.") setverbosity!(true)
@test FactorRotations.VERBOSITY[] == true
setverbosity!(false) # disable logging for following tests
end
@testset "set_autodiff_backend" begin
@test FactorRotations.AUTODIFF_BACKEND[] == FactorRotations.AutodiffBackend(:Enzyme)
FactorRotations.set_autodiff_backend(:ABC)
@test FactorRotations.AUTODIFF_BACKEND[] == FactorRotations.AutodiffBackend(:ABC)
FactorRotations.set_autodiff_backend(:Enzyme)
@test FactorRotations.AUTODIFF_BACKEND[] == FactorRotations.AutodiffBackend(:Enzyme)
end
@testset "centercols!" begin
x = [1 2; 1 2]
@test FactorRotations.centercols!(copy(x)) ≈ x .- mean(x, dims = 1)
@test FactorRotations.centercols!(x) == [0 0; 0 0]
end
end
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 1980 | # FactorRotations.jl
[](https://p-gw.github.io/FactorRotations.jl/stable/)
[](https://p-gw.github.io/FactorRotations.jl/dev/)
[](https://github.com/p-gw/FactorRotations.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/p-gw/FactorRotations.jl)
[FactorRotations.jl](https://github.com/p-gw/FactorRotations.jl) implements factor rotations by the gradient projections algorithms described
in Bernaards & Jennrich (2005).
## Installation
To install FactorRotations.jl you can use the Julia package manager,
```julia
] add FactorRotations
```
## Getting started
FactorRotations.jl provides methods to rotate factor loading matrices, e.g. from
exploratory factor analysis or principle component analysis.
Assume you aquired a factor loading matrix `L` then you can rotate the matrix by calling
the `rotate` function. The `rotate` function takes the factor loading matrix as the first
argument and an instance of a rotation method as the second argument.
```julia
L = [
0.830 -0.396
0.818 -0.469
0.777 -0.470
0.798 -0.401
0.786 0.500
0.672 0.458
0.594 0.444
0.647 0.333
]
rotate(L, Varimax())
```
For a complete list of available methods see the [Rotation Methods](https://github.com/p-gw/FactorRotations.jl/rotation_methods.jl) section of the documentation.
For a fully worked example see the [Guides](https://github.com/p-gw/FactorRotations.jl/guides/index.html) section of the documentation.
# References
Bernaards, C. A., & Jennrich, R. I. (2005). Gradient projection algorithms and software for arbitrary rotation criteria in factor analysis. *Educational and psychological measurement, 65*(5), 676-696.
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 759 | ```@meta
CurrentModule = FactorRotations
```
# API
## Rotation criteria
```@docs
Absolmin
Biquartimax
Biquartimin
ComponentLoss
Concave
CrawfordFerguson
Equamax
Geomin
Infomax
KatzRohlf
LinearRightConstant
MinimumEntropy
MinimumEntropyRatio
Oblimax
Oblimin
Parsimax
PatternSimplicity
Quartimax
Simplimax
TandemCriteria
TandemCriterionI
TandemCriterionII
TargetRotation
Varimax
```
## User Interface
```@docs
factor_correlation
isoblique
isorthogonal
kaiser_denormalize
kaiser_denormalize!
kaiser_normalize
kaiser_normalize!
loadings
reflect
reflect!
rotate
rotate!
rotation
rotation_type
setverbosity!
FactorRotations.set_autodiff_backend
```
## Internals
```@docs
FactorRotation
Oblique
Orthogonal
RotationMethod
criterion
criterion_and_gradient!
```
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 1891 | # FactorRotations.jl
[](https://p-gw.github.io/FactorRotations.jl/stable/)
[](https://p-gw.github.io/FactorRotations.jl/dev/)
[](https://github.com/p-gw/FactorRotations.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/p-gw/FactorRotations.jl)
[FactorRotations.jl](https://github.com/p-gw/FactorRotations.jl) implements factor rotations by the gradient projections algorithms described
in Bernaards & Jennrich (2005).
## Installation
To install FactorRotations.jl you can use the Julia package manager,
```julia
] add FactorRotations
```
## Getting started
*FactorRotations.jl* provides methods to rotate factor loading matrices, e.g. from
exploratory factor analysis or principle component analysis.
Assume you aquired a factor loading matrix `L` then you can rotate the matrix by calling
the `rotate` function. The `rotate` function takes the factor loading matrix as the first
argument and an instance of a rotation method as the second argument.
```julia
L = [
0.830 -0.396
0.818 -0.469
0.777 -0.470
0.798 -0.401
0.786 0.500
0.672 0.458
0.594 0.444
0.647 0.333
]
rotate(L, Varimax())
```
For a complete list of available methods see the [Rotation Methods](@ref rotation_methods) section of the documentation.
For a fully worked example see the [Guides](@ref guides) section of the documentation.
# References
Bernaards, C. A., & Jennrich, R. I. (2005). Gradient projection algorithms and software for arbitrary rotation criteria in factor analysis. *Educational and psychological measurement, 65*(5), 676-696.
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 4041 | # [Rotation Methods](@id rotation_methods)
*FactorRotations.jl* implements multiple *orthogonal* and *oblique* rotation methods.
Let us consider the *p*-by-*k* factor loadings matrix *L* for *p* variables and *k* factors.
Most of the rotation methods aim to find the full-rank *k*-by-*k* rotation matrix *U*,
so that the rotated loadings matrix *Λ = L × U* optimizes the given *criterion* function *Q(Λ)*.
## Orthogonal methods
*Orthogonal* criteria restrict the rotation matrix *U* to be orthogonal.
| criterion | reference | note |
| ----------------------------- | ------------------------------------ | ------------------------------------------------------- |
| [`Biquartimax`](@ref) | | equivalent to `Oblimin(gamma = 0.5, orthogonal = true)` |
| [`Biquartimin`](@ref) | [jennrich2011](@citet) |
| [`ComponentLoss`](@ref) | [jennrich2004, jennrich2006](@citet) |
| [`CrawfordFerguson`](@ref) | [crawford1970](@citet) |
| [`Equamax`](@ref) | [crawford1970](@citet) | equivalent to `Oblimin(gamma = k/2, orthogonal = true)` |
| [`Infomax`](@ref) | [browne2001](@citet) | based on the unpublished manuscript McKeon (1968) |
| [`KatzRohlf`](@ref) | |
| [`LinearRightConstant`](@ref) | [jennrich2004](@citet) |
| [`MinimumEntropyRatio`](@ref) | [mccammon1966](@citet) |
| [`MinimumEntropy`](@ref) | [jennrich2004](@citet) |
| [`Oblimax`](@ref) | |
| [`Oblimin`](@ref) | |
| [`Parsimax`](@ref) | [crawford1970](@citet) | equivalent to `Oblimin(gamma = p*(k-1)/(p+k-2), orthogonal = true)`|
| [`PatternSimplicity`](@ref) | [bentler1977](@citet) |
| [`Quartimax`](@ref) | [neuhaus1954](@citet) | equivalent to `Oblimin(gamma = 0, orthogonal = true)` |
| [`TandemCriteria`](@ref) | [comrey1967](@citet) |
| [`TandemCriterionII`](@ref) | [comrey1967](@citet) | second step of [`TandemCriteria`](@ref) |
| [`TandemCriterionI`](@ref) | [comrey1967](@citet) | first step of [`TandemCriteria`](@ref) |
| [`TargetRotation`](@ref) | |
| [`Varimax`](@ref) | [kaiser1958](@citet) | equivalent to `Oblimin(gamma = 1, orthogonal = true)` |
## Oblique methods
*Oblique* criteria allow the rotation matrix *U* to be an arbitrary full-rank *k*-by-*k* matrix.
| criterium | reference | note |
| --------------------------- | ------------------------------------ | ------------------------------------------------- |
| [`Absolmin`](@ref) | [jennrich2006](@citet) |
| [`Biquartimin`](@ref) | [jennrich2011](@citet) |
| [`ComponentLoss`](@ref) | [jennrich2004, jennrich2006](@citet) |
| [`Concave`](@ref) | [jennrich2006](@citet) |
| [`CrawfordFerguson`](@ref) | [crawford1970](@citet) |
| [`Geomin`](@ref) | |
| [`Infomax`](@ref) | [browne2001](@citet) | based on the unpublished manuscript McKeon (1968) |
| [`Oblimax`](@ref) | |
| [`Oblimin`](@ref) | |
| [`PatternSimplicity`](@ref) | [bentler1977](@citet) |
| [`Simplimax`](@ref) | |
| [`TargetRotation`](@ref) | |
## References
```@bibliography
Pages = ["rotation_methods.md"]
```
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 3653 | # Rotate an existing loading matrix
This guide considers the basic use case of FactorRotations.jl: Given an existing factor loading matrix `L`, calculate the rotation of the loading matrix according to some rotation criterion. In this example we will first consider the simple case of orthogonal _Varimax_ rotation. At a later stage we will see how to easily switch the factor rotation criterion to arrive at a different rotation.
First, we assume a factor loading matrix `L`.
In this example we will use the loading matrix given by Bernaard & Jennrich (2005),
```jldoctest basic_example
julia> using FactorRotations
julia> L = [
0.830 -0.396
0.818 -0.469
0.777 -0.470
0.798 -0.401
0.786 0.500
0.672 0.458
0.594 0.444
0.647 0.333
]
8×2 Matrix{Float64}:
0.83 -0.396
0.818 -0.469
0.777 -0.47
0.798 -0.401
0.786 0.5
0.672 0.458
0.594 0.444
0.647 0.333
```
Rotating the loading matrix consists of a single call to [`rotate`](@ref). This function takes the unrotated loading matrix as the first argument, and an instance of a factor rotation method as a second argument.
For clarity we first set up our [`Varimax`](@ref) rotation method,
```jldoctest basic_example
julia> criterion = Varimax()
Varimax()
```
Finally we perform the rotation using [`rotate`](@ref),
```jldoctest basic_example; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> L_rotated = rotate(L, criterion)
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.886061 0.246196
0.924934 0.183253
0.894664 0.155581
0.865205 0.221416
0.264636 0.893176
0.206218 0.786653
0.156572 0.724884
0.269424 0.67595
```
Different rotation can be achieved by simply changing `criterion` or passing it directly to [`rotate`](@ref).
```jldoctest basic_example; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> L_rotated = rotate(L, MinimumEntropy())
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.90711 0.151221
0.939117 0.084524
0.906093 0.0602051
0.883753 0.128783
0.357504 0.860225
0.28816 0.760468
0.232268 0.704289
0.339319 0.643709
```
The resulting [`FactorRotation`](@ref) object contains the rotated loading matrix, the rotation
matrix, and the factor correlation matrix. To access the fields you can use [`loadings`](@ref),
[`rotation`](@ref), and [`factor_correlation`](@ref) respectively.
```jldoctest basic_example; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> loadings(L_rotated)
8×2 Matrix{Float64}:
0.90711 0.151221
0.939117 0.084524
0.906093 0.0602051
0.883753 0.128783
0.357504 0.860225
0.28816 0.760468
0.232268 0.704289
0.339319 0.643709
julia> rotation(L_rotated)
2×2 Matrix{Float64}:
0.819445 0.573158
-0.573158 0.819445
julia> factor_correlation(L_rotated)
2×2 Matrix{Float64}:
1.0 -1.66533e-16
-1.66533e-16 1.0
```
## In-place rotation
In some cases it can be useful to modify `L` directly.
For this use case the package provides an in-place rotation, [`rotate!`](@ref) with the same function signature as before.
!!! warning
Contrary to [`rotate`](@ref), the in-place [`rotate!`](@ref) returns the loading matrix instead of a [`FactorRotation`](@ref) object.
```jldoctest basic_example; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> rotate!(L, MinimumEntropy())
8×2 Matrix{Float64}:
0.90711 0.151221
0.939117 0.084524
0.906093 0.0602051
0.883753 0.128783
0.357504 0.860225
0.28816 0.760468
0.232268 0.704289
0.339319 0.643709
julia> L == loadings(L_rotated)
true
```
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 6459 | # Implementing a rotation method
If you wish to implement your own factor rotation method or extend this package, you can do so in two ways:
- Implementing a rotation method without specifying the gradient
- Implementing a rotation method with gradients
In the following guide we will walk through both ways of implementing a rotation method.
As an example we will reimplement [`Quartimax`](@ref), which minimizes
```math
Q = \sum_p \sum_k \lambda_{pk}^4
```
where ``p`` and ``k`` are the row and column indices of factor loading matrix ``\Lambda`` and ``\lambda_{pk}`` are the entries of the factor loading matrix.
The first step to a custom implementation is to define a new `struct` for the rotation method.
FactorRotations.jl requires that all rotation methods inherit from [`RotationMethod`](@ref).
One must also specify whether the new method can be used for orthogonal rotation, oblique rotation, or both.
For orthogonal rotation it is required that `T <: RotationMethod{Orthogonal}`.
Oblique rotations must satisfy `T <: RotationMethod{Oblique}`.
Methods that can be used for both orthogonal and oblique rotation are defined `T{RT} <: RotationMethod{RT}`.
Since Quartimax is an orthogonal rotation method, we define it as such.
```jldoctest implementing_rotation_methods
julia> using FactorRotations
julia> struct MyQuartimax <: RotationMethod{Orthogonal} end
```
## Defining the rotation quality criterion
The easiest way to define a rotation method is to implement the
[`criterion_and_gradient!(::Nothing, ...)`](@ref criterion_and_gradient!) function,
which calculates the rotation quality criterion.
`nothing` fixed as the first argument specifies that
this implementation does not calculate the criterion gradient.
```jldoctest implementing_rotation_methods
julia> import FactorRotations: criterion_and_gradient!
julia> function criterion_and_gradient!(::Nothing, method::MyQuartimax, Λ::AbstractMatrix)
return -sum(Λ .^ 4)
end;
julia> criterion(MyQuartimax(), ones(10, 2))
-20.0
```
!!! note
Since the algorithm in this package minimizes the criterion value, we have to make sure to return `-sum(...)` instead of the original criterion for Quartimax.
*FactorRotations.jl* will apply [Automatic Differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation) to derive the gradient for your quality criterion
and use it during rotation optimization.
By default, *FactorRotations.jl* would use [*Enzyme.jl*](https://github.com/EnzymeAD/Enzyme.jl) as an autodiff engine,
but it could be changed with [`FactorRotations.set_autodiff_backend`](@ref).
To enable autodiff in *FactorRotations.jl*, the corresponding autodiff package should be loaded first:
```jldoctest implementing_rotation_methods
julia> using Enzyme
julia> grad = fill(NaN, 10, 2);
julia> criterion_and_gradient!(grad, MyQuartimax(), ones(10, 2))
-20.0
julia> grad
10×2 Matrix{Float64}:
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
-4.0 -4.0
```
```jldoctest implementing_rotation_methods; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> L = [
0.830 -0.396
0.818 -0.469
0.777 -0.470
0.798 -0.401
0.786 0.500
0.672 0.458
0.594 0.444
0.647 0.333
];
julia> L_rotated = rotate(L, MyQuartimax())
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.898755 0.194824
0.933943 0.129749
0.902131 0.103864
0.876508 0.171284
0.315572 0.876476
0.251123 0.773489
0.198007 0.714678
0.307857 0.659334
```
Checking against the [`Quartimax`](@ref) implementation shows that the results are approximately equal.
```jldoctest implementing_rotation_methods; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> L_reference = rotate(L, Quartimax())
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.898755 0.194823
0.933943 0.129748
0.902132 0.103864
0.876508 0.171284
0.315572 0.876476
0.251124 0.773489
0.198008 0.714678
0.307858 0.659334
```
```jldoctest implementing_rotation_methods
julia> isapprox(loadings(L_rotated), loadings(L_reference), atol = 1e-5)
true
```
## Defining the rotation quality gradient
When the gradient formula is available, [`criterion_and_gradient!`](@ref) method
could be modified to allow `∇Q::AbstractMatrix` as the first argument.
In this case *FactorRotations.jl* will expect that the `criterion_and_gradient!(∇Q, method, Λ)` call
sets `∇Q` to the ``∇Q(Λ)`` in-place and also returns ``Q(Λ)``.
This variant of `criterion_and_gradient!` allows reusing intermediate computations between
the criterion and its gradient, as well as providing more efficient gradient calculation
than the autodiff-based one.
Continuing the example of `MyQuartimax`:
```jldoctest implementing_rotation_methods
julia> import FactorRotations: criterion_and_gradient!
julia> function criterion_and_gradient!(∇Q::Union{AbstractMatrix, Nothing}, method::MyQuartimax, Λ::AbstractMatrix)
Q = -sum(Λ .^ 4)
if !isnothing(∇Q)
∇Q .= -Λ.^3
end
return Q
end;
```
User-defined `criterion_and_gradient!` has priority over the default autodiff-based one:
```jldoctest implementing_rotation_methods
julia> grad = fill(NaN, 10, 2);
julia> criterion_and_gradient!(grad, MyQuartimax(), ones(10, 2))
-20.0
julia> grad
10×2 Matrix{Float64}:
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
-1.0 -1.0
```
[`rotate`](@ref) will now also use the custom [`criterion_and_gradient!`](@ref):
```jldoctest implementing_rotation_methods; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> L_rotated = rotate(L, MyQuartimax())
FactorRotation{Float64} with loading matrix:
8×2 Matrix{Float64}:
0.898755 0.194824
0.933943 0.129749
0.902131 0.103865
0.876508 0.171285
0.315572 0.876476
0.251123 0.773489
0.198007 0.714678
0.307857 0.659335
julia> isapprox(loadings(L_rotated), loadings(L_reference), atol = 1e-5)
true
```
Note that in our `criterion_and_gradient!` method gradient calculation is optional
and could be skipped by passing `nothing`. This variant of the method is used by [`criterion`](@ref):
```jldoctest implementing_rotation_methods; filter = r"([0-9]*)\.([0-9]{4})[0-9]+" => s"\1.\2"
julia> criterion(MyQuartimax(), L)
-2.8829327011730004
```
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 151 | # [Guides](@id guides)
- [Rotate an existing loading matrix](./basic_usage.md)
- [Implementing a rotation method](./implementing_rotation_methods.md)
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.5.1 | 516ef8294ebd05e00e48c4fc9c73b8c545404f9d | docs | 2299 | # Working with MultivariateStats.jl
FactorRotations.jl provides direct support for models fitted by [MultivariateStats.jl](https://github.com/JuliaStats/MultivariateStats.jl). Specifically, you can fit a [factor analysis](https://juliastats.org/MultivariateStats.jl/stable/fa/) or [principal component analysis](https://juliastats.org/MultivariateStats.jl/stable/pca/) model and directly rotate the resulting loading matrix.
To user FactorRotations.jl with MultivariateStats.jl we first need to fit a factor analysis or principal component analysis model.
In this tutorial we will be using the `bfi` data provided by the [`psych`](https://cran.r-project.org/web/packages/psych/psych.pdf) R package.
It contains 25 self report items concerning personality.
!!! note
For performance reasons we just use the first 200 observations of the dataset.
!!! warning
Missing values are dropped for purposes of this tutorial.
Properly handle missing data in your own analysis!
```jldoctest multivariatestats
julia> using FactorRotations, MultivariateStats, RDatasets
julia> data = dataset("psych", "bfi")[:, 2:26] |> dropmissing!;
julia> data = Matrix(data[1:200, :]); # use just the first 200 observations
julia> model = fit(FactorAnalysis, data', maxoutdim = 5)
Factor Analysis(indim = 25, outdim = 5)
```
After fitting the model the loadings could be extracted using `MultivariateStats.loadings` and then rotated,
```jldoctest multivariatestats
julia> raw_loadings = MultivariateStats.loadings(model);
julia> rotated_loadings = FactorRotations.loadings(rotate(raw_loadings, Geomin()));
```
However, FactorAnalysis.jl provides convenience functions to pass `model` directly.
Analogous to rotating raw loading matrices, there are two ways to rotate a MultivariateStats.jl solution: regular and in-place.
For both we can pass our `model` to [`rotate`](@ref) or [`rotate!`](@ref) respectively.
Using [`rotate`](@ref),
```jldoctest multivariatestats
julia> rot = rotate(model, Geomin());
julia> FactorRotations.loadings(rot) == rotated_loadings
true
```
Similarly we can use [`rotate!`](@ref) to change the loading matrix of `model` in-place.
```jldoctest multivariatestats
julia> rotate!(model, Geomin());
julia> MultivariateStats.loadings(model) == rotated_loadings
true
```
| FactorRotations | https://github.com/p-gw/FactorRotations.jl.git |
|
[
"MIT"
] | 0.1.2 | ddda82a2158fb9914768d6334572398f61e508f6 | code | 3612 | module DotMaps
export DotMap
"""
DotMaps.DotMap(::AbstractDict)
Constructs a DotMap from a Dict. This provides the same functionaliity as dictionaries, but allows indexing with `.` instead of (or in addition to) `[""]`.
"""
struct DotMap
__dict__ ::Dict{Symbol,Any}
DotMap() = new(Dict{Symbol,Any}())
end
function DotMap(d::AbstractDict)
dm = DotMap()
for (k, v) in d
dm.__dict__[Symbol(k)] = DotMap(v)
end
return dm
end
DotMap(d::Any) = d
"""
DotMaps.todict(::DotMap; keys_as_strings=false)
Constructs a Dict from a DotMap. If `keys_as_strings`, the keys will be `String` instead of `Symbol`.
"""
function todict(obj::DotMap; keys_as_strings::Bool = false)
dict = Dict()
for (k, v) in obj
nk = keys_as_strings ? string(k) : k
dict[nk] = todict(v, keys_as_strings = keys_as_strings)
end
return dict
end
# return at leaves
todict(obj::Any; keys_as_strings::Bool = false) = obj
# make dots work
function Base.getproperty(obj::DotMap, name::Symbol)
if name == :__dict__
return getfield(obj, name)
else
return obj.__dict__[name]
end
end
# make dictionary indexing work
Base.getindex(obj::DotMap, name::Symbol) = Base.getindex(obj.__dict__, name)
Base.getindex(obj::DotMap, name::Any) = Base.getindex(obj, Symbol(name))
Base.setindex!(obj::DotMap, x, name::Symbol) = Base.setindex!(obj.__dict__, x, name)
Base.setindex!(obj::DotMap, x::Dict, name::Symbol) = Base.setindex!(obj.__dict__, DotMap(x), name)
Base.setindex!(obj::DotMap, x, name) = Base.setindex!(obj, x, Symbol(name))
# assignment with dots
Base.setproperty!(obj::DotMap, name::Symbol, x) = Base.setindex!(obj, x, name)
# iteration
Base.iterate(obj::DotMap) = Base.iterate(obj.__dict__)
Base.iterate(obj::DotMap, s::Any) = Base.iterate(obj.__dict__, s)
Base.length(obj::DotMap) = Base.length(obj.__dict__)
Base.firstindex(obj::DotMap) = Base.firstindex(obj.__dict__)
Base.lastindex(obj::DotMap) = Base.lastindex(obj.__dict__)
# dictionary methods
Base.keys(obj::DotMap) = Base.keys(obj.__dict__)
Base.values(obj::DotMap) = Base.values(obj.__dict__)
Base.collect(obj::DotMap) = Base.collect(obj.__dict__)
# retrieval/modification
Base.get(obj::DotMap, key::Symbol, default) = Base.get(obj.__dict__, key, default)
Base.get(obj::DotMap, key::Any, default) = Base.get(obj.__dict__, Symbol(key), default)
Base.get!(obj::DotMap, key::Symbol, default) = Base.get!(obj.__dict__, key, default)
Base.get!(obj::DotMap, key::Any, default) = Base.get!(obj.__dict__, Symbol(key), default)
Base.getkey(obj::DotMap, key::Symbol, default) = Base.getkey(obj.__dict__, key, default)
Base.getkey(obj::DotMap, key::Any, default) = Base.getkey(obj.__dict__, Symbol(key), default)
Base.pop!(obj::DotMap, key::Symbol) = Base.pop!(obj.__dict__, key)
Base.pop!(obj::DotMap, key::Any) = Base.pop!(obj.__dict__, Symbol(key))
Base.delete!(obj::DotMap, key::Symbol) = Base.pop!(obj.__dict__, key)
Base.delete!(obj::DotMap, key::Any) = Base.pop!(obj.__dict__, Symbol(key))
Base.filter!(pred, obj::DotMap) = Base.filter!(pred, obj.__dict__)
Base.filter(pred, obj::DotMap) = DotMap(Base.filter(pred, obj.__dict__))
# about this dict
Base.propertynames(obj::DotMap) = Base.keys(obj.__dict__)
Base.isempty(obj::DotMap) = Base.isempty(obj.__dict__)
Base.show(obj::DotMap) = Base.show(obj.__dict__)
# containment
Base.in(key::Symbol, obj::DotMap) = Base.in(key, obj.__dict__)
Base.in(key::Any, obj::DotMap) = Base.in(Symbol(key), obj.__dict__)
Base.haskey(obj::DotMap, key::Symbol) = Base.haskey(obj.__dict__, key)
Base.haskey(obj::DotMap, key::Any) = Base.haskey(obj.__dict__, Symbol(key))
end
| DotMaps | https://github.com/mcmcgrath13/DotMaps.jl.git |
|
[
"MIT"
] | 0.1.2 | ddda82a2158fb9914768d6334572398f61e508f6 | code | 1058 | using DotMaps
using Test
@testset "DotMaps.jl" begin
dict = Dict("a"=>1, "b"=>2, "c" => Dict("d"=>3))
DM = DotMap(dict)
@test DM.a == 1
@test DM.c.d == 3
DM.c.e = 4
@test DM.c.e == 4
delete!(DM.c, "e")
@test !("e" in keys(DM.c))
@test get!(DM.c, "e", 5) == 5
@test DM.c.e == 5
@test DM["c"].e == 5
DM.c.f = Dict("g" => 6)
@test DM.c.f.g == 6
for (k, v) in DM
@test isa(k, Symbol)
end
@test length(DM) == 3
@test 3 in collect(values(DM.c))
@test pop!(DM.c, "e") == 5
filtered = filter(x -> isa(last(x), Int), DM)
@test !("c" in keys(filtered))
@test filtered.a == 1
filter!(x -> isa(last(x), Int), DM)
@test !("c" in keys(DM))
@test haskey(DM, "a")
@test DM.a == 1
@test :a in propertynames(DM)
@test isempty(DotMap())
d = DotMap()
d.a = Dict("b" => 2)
dd = DotMaps.todict(d, keys_as_strings=true)
@test dd == Dict("a" => Dict("b" => 2))
dds = DotMaps.todict(d)
@test dds == Dict(:a => Dict(:b => 2))
end
| DotMaps | https://github.com/mcmcgrath13/DotMaps.jl.git |
|
[
"MIT"
] | 0.1.2 | ddda82a2158fb9914768d6334572398f61e508f6 | docs | 726 | # DotMaps
[](https://github.com/mcmcgrath13/DotMaps.jl/actions)
A wrapper for dictionaries that allows dot notation indexing as well as traditional bracket indexing.
```julia
dict = Dict("a"=>1, "b"=>2, "c" => Dict("d"=>3))
dm = DotMap(dict)
dm.c.d # returns 3
dm.c.e = 5
dm["c"].e # returns 5
DotMap.todict(dm, keys_as_strings=true) # returns Dict("a"=>1, "b"=>2, "c" => Dict("d"=>3, "e"=>5))
DotMap.todict(dm) # returns Dict(:a=>1, :b=>2, :c => Dict(:d=>3, :e=>5))
```
**NOTE** This is not as performative as using normal dictionaries, but is nice for accessing deeply nested dictionary structures, such as large config/yaml/json files.
| DotMaps | https://github.com/mcmcgrath13/DotMaps.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 329 | module PSID
using XLSX, DataDeps, DataFrames, CSV, LightXML, AbstractTrees, JSONTables, JSON3
using DataFramesMeta, SHA, DataStructures, Parsers
include("types.jl")
include("init.jl")
include("process_codebook.jl")
include("use_codebook.jl")
include("unzip_data.jl")
include("construct_alldata.jl")
export makePSID
end # module
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 7160 | inrange(x, l, u) = l <= x <= u
inrange(x::Missing, l, u) = false
function famid(A)
# A is length 1 or 2 (depending on having spouse)
# This needs to be symmetric. Order of A can't matter
if length(A) == 1
return A[1]
elseif length(A) == 2
return 1_000_000*minimum(A) + maximum(A)
else
#error("$A")
return missing
end
end
function construct_alldata(famdatas, inddata; codemissings = true)
## Combine the VarInfo5 array with the data
readme = JSON3.read(read("output/user_output.json", String), Vector{VarInfo5})
#readme = procvar
readme_ind = JSON3.read(read("output/ind_output.json", String), Vector{VarInfo5})
years = [collect(1968:1997); collect(1999:2:2021)]
newdatas_ind = [DataFrame() for x in famdatas]
# Go year by year
for y_nx in eachindex(years)
year = years[y_nx]
df = newdatas_ind[y_nx]
data = inddata
#df[!, :id_ind] = 1:nrow(inddata)
df[!, :id_ind] = Int.(1000data.ER30001 .+ data.ER30002)
df[!, :famid_1968] = Int.(data.ER30001)
df[!, :year] .= year
for vari in readme_ind
## Check if vari is in the data this year
if !haskey(vari.yeardict, string(year))
continue
end
ss = vari.yeardict[string(year)][1]
sym = Symbol(ss)
dat1 = data[!, sym]
# Apply missing value codes
sm = vari.yeardict[string(year)][3]
finalname = Symbol("$(vari.name_user)_$(vari.unit)")
finalname_code = Symbol("$(finalname)_code_ind")
if codemissings
dat2 = [x in sm ? missing : x for x in dat1 ]
else
dat2 = [x for x in dat1]
end
if vari.iscontinuous == false
labs = Dict(parse(Int, k) => v for (k, v) in vari.labeldict)
strlabel = [ismissing(x) ? missing : labs[x] for x in dat2]
finalname_label = Symbol("$(vari.name_user)_$(vari.unit)_label")
df[!, finalname_label] = strlabel
end
df[!, finalname] = dat2
df[!, finalname_code] .= ss
end
end
newdatas2 = (headdata = [DataFrame() for x in famdatas], spousedata = [DataFrame() for x in famdatas])
## Heads, spouses
for ishead in (false, true)
# Go year by year
for y_nx in eachindex(years)
year = years[y_nx]
df = ishead ? newdatas2.headdata[y_nx] : newdatas2.spousedata[y_nx]
data = famdatas[year]
for vari in readme
## Check if vari is in the data this year
if !haskey(vari.yeardict, string(year))
continue
end
ss = vari.yeardict[string(year)][1]
sym = Symbol(ss)
if vari.unit == "family"
finalname = Symbol("$(vari.name_user)_family")
elseif vari.unit == "head" && ishead
finalname = Symbol("$(vari.name_user)_ind")
elseif vari.unit == "spouse" && ishead
finalname = Symbol("$(vari.name_user)_spouse")
elseif vari.unit == "head" && !ishead
finalname = Symbol("$(vari.name_user)_spouse")
elseif vari.unit == "spouse" && !ishead
finalname = Symbol("$(vari.name_user)_ind")
else
error("Variable unit for $(vari.name_user) must be family, head, or spouse")
end
finalname_code = Symbol("$(finalname)_code_fam")
if hasproperty(data, sym)
dat1 = data[!, sym]
# Apply missing value codes
sm = vari.yeardict[string(year)][3]
if codemissings
dat2 = [x in sm ? missing : x for x in dat1 ]
else
dat2 = [x for x in dat1]
end
# if categorical
newdat = dat2
if vari.iscontinuous == false
labs = Dict(parse(Int, k) => v for (k, v) in vari.labeldict)
strlabel = [ismissing(x) ? missing : labs[x] for x in dat2]
finalname_label = Symbol("$(finalname)_label")
df[!, finalname_label] = strlabel
end
else
newdat = [missing for i in 1:nrow(data)]
println("Warning: $sym, $finalname in $year was supposed to be in the data but isn't there")
end
df[!, finalname] = newdat
df[!, finalname_code] .= ss
end
df[!, :year] .= year
df[!, :ishead] .= ishead
end
end
allinds = DataFrame()
for y_nx in eachindex(years)
di = newdatas_ind[y_nx]
if y_nx > 1 # Sequence number not in 1968
di = @subset(di, inrange.(:seq_num_ind, 1, 20))
end
dj_heads = @subset(di, in.(:rel_head_ind, (1, 10) |> Set |> Ref))
dj_spouses = @subset(di, in.(:rel_head_ind, (2, 20, 22) |> Set |> Ref))
djall = vcat(dj_heads, dj_spouses)
#famids = by(djall, [:id_family, :year], (:id_ind,) => x -> (famid = famid(x.id_ind),))
famids = combine(groupby(djall, [:id_family, :year]), AsTable(:id_ind) => (x -> famid(x.id_ind) => :famid))
# join the heads with the head family file
hi = innerjoin(dj_heads, newdatas2.headdata[y_nx], on = [:id_family, :year])
si = innerjoin(dj_spouses, newdatas2.spousedata[y_nx], on = [:id_family, :year])
hi = innerjoin(hi, famids, on = [:id_family, :year])
si = innerjoin(si, famids, on = [:id_family, :year])
allinds = vcat(allinds, hi, cols = :union)
allinds = vcat(allinds, si, cols = :union)
end
CSV.write("output/allinds.csv", allinds)
println("Finished constructing individual data, saved to output/allinds.csv")
return allinds
end
"""
makePSID(userinput_json; codemissings = true)
Constructs the PSID panel of individuals using the variables in the input JSON.
Arguments: `userinput_json` - A string naming the input JSON file
Keyword arguments: `codemissings` - A bool indicating whether values detected as missing according to the codebook
will be converted to `missing`. Defaults to true.
"""
function makePSID(userinput_json; codemissings = true)
isfile(userinput_json) || error("$userinput_json not found in current directory")
x = dirname(pathof(PSID))
fx = "$x/allfiles_hash.json"
@assert isfile(fx)
PSID.verifyfiles(fx)
isdir("output") || mkdir("output")
isdir("datafiles") || mkdir("datafiles")
println("Making codebook")
PSID.process_codebook()
println("Processing input JSON")
PSID.process_input(userinput_json)
println("Reading data files")
famdatas, inddata = PSID.unzip_data()
println("Constructing data")
PSID.construct_alldata(famdatas, inddata, codemissings = codemissings)
end
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 645 | function checkhash(filename)
filename |> read |> sha256 |> bytes2hex
end
function verifyfiles(allfilesjson; skip = false)
allfiles_dict = JSON3.read(read(allfilesjson, String), SortedDict{String, String})
for (f, v) in allfiles_dict
if !isfile(f)
if !skip
error("$f not found and is required.")
else
@warn "$f not found, skipping"
continue
end
end
fh = checkhash(f)
if fh == v
println("Found file $f, hash OK")
else
@warn "Hash for file $f is $fh, expected $v"
end
end
end
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 1755 | AbstractTrees.children(x::AbstractXMLNode) = collect(child_elements(x));
AbstractTrees.printnode(io::IO, x::AbstractXMLNode) = print(io, name(x));
#Processes XML codebook tree into a usable JSON table
function process_codebook()
xdoc = parse_file("PSIDCodebook.xml");
#@infiltrate
r = root(xdoc);
#t = Tree(r)
# Travel down the tree until we reach the list of variables
#name(t[1])
current_node = children(r)[1]
name(current_node)
current_node = children(current_node)[1]
name(current_node)
current_node = children(current_node)[2]
name(current_node)
ce = current_node["VARIABLE"]
# Ingest the variables into a DataFrame
outdf = DataFrame()
outdf.NAME = [content(ce[i]["NAME"][1]) for i in 1:length(ce)]
outdf.YEAR = [content(ce[i]["YEAR"][1]) for i in 1:length(ce)]
outdf.QTEXT = [content(ce[i]["QTEXT"][1]) for i in 1:length(ce)]
outdf.ETEXT = [content(ce[i]["ETEXT"][1]) for i in 1:length(ce)]
outdf.TYPE_ID = [content(ce[i]["TYPE_ID"][1]) for i in 1:length(ce)]
outdf.LABEL = [content(ce[i]["LABEL"][1]) for i in 1:length(ce)]
# codes
list_codes = [ce[i]["LIST_CODE"][1] for i in 1:length(ce)]
list_codes[1]
# Take a codexml vector of some length containing (value, text) pairs
# Return a dict
function process_codes(codexml)
codes = codexml["CODE"]
vals = [content(c["VALUE"][1]) for c in codes]
texts = [content(c["TEXT"][1]) for c in codes]
Dict(v => t for (v, t) in zip(vals, texts))
end
process_codes(list_codes[1])
morecodes = process_codes.(list_codes)
outdf.codedict = morecodes
#j = objecttable(outdf)
j = arraytable(outdf)
write("output/codebook.json", j)
end
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 519 | mutable struct VarInfo5
name_user::String
unit::String
yeardict::Dict{String, Tuple{String, String, Vector{Float64}}}
iscontinuous::Bool
labeldict::Dict{String, String}
VarInfo5() = new()
VarInfo5(x...) = new(x...)
end
JSON3.StructType(::Type{VarInfo5}) = JSON3.Mutable()
mutable struct VarInput
name_user::String
varID::String
unit::String #family, head, or spouse
VarInput() = new()
VarInput(x...) = new(x...)
end
JSON3.StructType(::Type{VarInput}) = JSON3.Mutable()
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 2208 | function parsestring(s)
s = replace(s, "long" => "") # In the 2017 do file they declared some variables as long
tokens = String[]
n = 0
i = 1
while i <= lastindex(s)
if s[i] == ' '
i = i + 1
else
nx = findnext(" ", s, i)
if nx === nothing
push!(tokens, s[i:end])
break
else
push!(tokens, s[i:(first(nx) - 1) ])
i = first(nx)
end
end
end
tokens
end
function process_tok(tok)
alltokens = String[]
ready = false
for i = 1:length(tok)
w = tok[i]
if "infix" ⊆ w
ready = true
continue
end
"using" ⊆ w && break
ready && append!(alltokens, parsestring(w))
end
alltokens
end
function str2range(s)
myr = r"\d+"
d = eachmatch(myr, s)
c = collect(d)
parse(Int, first(c).match):parse(Int, last(c).match)
end
function read_fixedwidth(data, toks)
names = String[tok[1] for tok in toks]
dat = zeros(length(data), length(toks))
for i in eachindex(data)
line = data[i]
for j in eachindex(toks)
tok = toks[j]
r = tok[2]
dat[i, j] = Parsers.parse(Float64, line[r])
end
end
DataFrame(dat, Symbol.(names))
end
function readPSID(zipname)
zipp = "$zipname.zip"
ZIPNAME = uppercase(zipname)
#t = mktempdir()
t = "datafiles/$zipname"
#run(`unzip $zipname -d $t`)
isdir(t) || run(DataDeps.unpack_cmd("$zipp", "$t", ".zip", ""))
tok = readlines("$t/$ZIPNAME.do")
alltokens = process_tok(tok)
toks = [(alltokens[i], str2range(alltokens[i+1])) for i in 1:2:length(alltokens)]
data = readlines("$t/$ZIPNAME.txt")
Base.GC.gc() # memory was going too high
out = read_fixedwidth(data, toks)
end
function unzip_data()
years = [collect(1968:1997); collect(1999:2:2021)]
filenames = [year <= 1993 ? "fam$year" : "fam$(year)er" for year in years]
datas = SortedDict(year => readPSID(filename) for (year, filename) in zip(years, filenames))
inddata = readPSID("ind2021er")
(famdatas = datas, inddata = inddata)
end
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 5636 | #Transforms a string like \"This time in 1996\" to \"This time in YEAR\"
function year2year(s)
rtest = r"(19|20)\d{2}"
replace(s, rtest => "YEAR")
end
#If x or y are supersets of each other, keep the superset. Otherwise OR them
function checkerror(x, y)
x, y = year2year(x), year2year(y)
if x == y
return x
elseif x ⊆ y
return y
elseif y ⊆ x
return x
else # give up
return "$x PSIDOR $y"
end
end
dropY(s) = parse(Int, match(r"(19|20)\d{2}", s).match)
#Check if this label describes a missing value code
function checkmissing(s)
for r in (r"NA", r"DK", r"Inap.", r"Wild code", r"Missing")
occursin(r, s) && return true
end
return false
end
#Check if this is a continuous variable
function iscontinuous(k)
for key in k
out = tryparse(Float64, key)
if out === nothing
return true
end
end
return false
end
dropcomma(s) = String([c for c in s if !(c == ',')])
#Try to parse this value as a float
function parse2(s, v)
out = tryparse(Float64, dropcomma(s))
# if this isn't a Float, maybe it was a range "-89.0 - -0.4"
# TODO fix this
if out === nothing
#@show s v
return missing
else
return out
end
end
narrowtypes(A) = [a for a in A]
"""
Inputs:
name: The variable ID we want to match
var2ind_dict: The crosswalk table
df_vars: The data
codebook_df: The codebook table
fastfind: Dict mapping from variable IDs to their index in the codebook
Processes a variable ID, finds all years thats match, and collects the labels
"""
function process_varname(name, var2ind_dict, df_vars, codebook_df, fastfind)
## Find the row in the crosswalk we can find this variable in
myrow = var2ind_dict[name]
## Fetch all the names in that row
dfvar = df_vars[myrow, :]
mynames = [ r for r in dfvar if r !== missing]
## Need to figure out the variable label expansion
# Can I just take the union?
codevec = [fastfind[s] for s in values(mynames)]
codedict = [codebook_df.codedict[i] for i in codevec]
un = Dict{String, String}()
merge!(checkerror, un, codedict...)
map!(trimlabel, values(un))
varnames = Dict{String, Tuple{String, String, Vector{Float64}}}(
codebook_df.YEAR[i] => (codebook_df.NAME[i], codebook_df.LABEL[i],
codebook_df.excluding[i]) for i in codevec)
varnames, iscontinuous(keys(un)), un
end
"""
Sometimes the labels uses a comma in one year and a semicolon in another,
but are otherwise identical.
This function parses the different labels and drops these duplicates.
It also keeps only labels which are unique after cleaning, and constructs
a label which is a union of the parts (A OR B OR C)
"""
function trimlabel(s)
sp = strip.(split(s, "PSIDOR"))
# find common substrings
# For each string in s, check if it occurs in another string in s
# If so, drop it from s
# If not, push it to the new string list
# For each index in s
# Check if s[i] is in s \ excluded
# If so, add this index to the excluded list
clean(x) = lowercase(dropcomma(x))
setsp = Set(sp)
#cleaned = Set(clean.(sp))
excluded = Int[]
# We want to find the unique (after cleaning) labels
# Iterate through the set and check if we have seen this label before
# If not, add it to the seen list
for i in eachindex(sp)
targind = setdiff(1:length(sp), union(i, excluded))
cleaned = clean.(sp[targind])
any(clean(sp[i]) ⊆ c for c in cleaned) && push!(excluded, i)
end
newsp = sp[setdiff(1:length(sp), excluded)]
if length(newsp) == 1
return newsp[1]
else
return reduce((x, y) -> "$x OR $y", newsp[2:end], init = newsp[1])
end
end
"""
Processes input JSON file
Reads the crosswalk and codebook table from disk and
harmonizes the labels. Constructs the output JSON
"""
function process_input(inputjson)
@assert last(splitext(inputjson)) == ".json"
codebook_json = jsontable(read("output/codebook.json", String));
codebook_df = DataFrame(codebook_json);
codebook_df.codedict = [Dict(string(x) => y for (x, y) in dt) for dt in codebook_df.codedict]
#@infiltrate
crosswalk_df = DataFrame(XLSX.readtable("psid.xlsx", "MATRIX"))
crosswalk_df = mapcols(narrowtypes, crosswalk_df)
## Need a map from VAR to the right row
df_vars = crosswalk_df[!, r"^Y.+"]
var2ind_dict = Dict{String, Int}()
##
for col in eachcol(df_vars)
x = Dict(col[i] => i for i in 1:length(col) if col[i] !== missing)
merge!(checkerror, var2ind_dict, x)
end
## Need to figure out the variable label expansion
fastfind = Dict(codebook_df.NAME[i] => i for i in 1:length(codebook_df.NAME))
# Check if this label denotes a missing value code. If so, this value is an excluding value
codebook_df.excluding = [[parse2(k, v) for (k, v) in d if checkmissing(v)] |> skipmissing |> narrowtypes for d in codebook_df.codedict]
### Do the final processing of the input JSON, produce the output
read_input = JSON3.read(read(inputjson, String), Vector{VarInput})
process_varinput(v::VarInput) = VarInfo5(v.name_user, v.unit, process_varname(v.varID, var2ind_dict, df_vars, codebook_df, fastfind)...)
procvar = process_varinput.(read_input)
write("output/user_output.json", JSON3.write(procvar))
modpath = dirname(pathof(PSID))
indpath = "$modpath/ind_input.json"
read_input = JSON3.read(read(indpath, String), Vector{VarInput})
procvar = process_varinput.(read_input)
write("output/ind_output.json", JSON3.write(procvar))
end
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | code | 1874 | #using Pkg
#pkg"activate ."
using PSID
makePSID("user_input.json")
using CSV, DataFrames, DataFramesMeta
using Test
using Missings
@testset "Data looks ok" begin
alldata = CSV.read("output/allinds.csv", DataFrame, copycols = true)
@test length(unique(alldata.age_spouse)) < 120
@test minimum(alldata.age_spouse |> skipmissing) >= 0
@test maximum(alldata.age_spouse|> skipmissing) <= 120
@test length(unique(alldata.age_ind)) < 120
@test minimum(alldata.age_ind |> skipmissing) >= 0
@test maximum(alldata.age_ind|> skipmissing) <= 120
@test nrow(alldata) >= 472609
@test ncol(alldata) == 41
nrows_byind = [nrow(sdf) for sdf in groupby(alldata, "id_ind")]
@test minimum(nrows_byind) == 1
@test maximum(nrows_byind) >= 41
@test maximum(nrows_byind) <= maximum(alldata.year) - minimum(alldata.year)
@test minimum(alldata.year) == 1968
@test maximum(alldata.year) == 2021
## fix income since it changed in 1993
inds = (alldata.year .<= 1993) .& (alldata.ishead .== true)
alldata.labor_inc_spouse[inds] .= alldata.labor_inc_pre_spouse[inds]
inds = (alldata.year .<= 1993) .& (alldata.ishead .== false)
alldata.labor_inc_ind[inds] .= alldata.labor_inc_pre_ind[inds]
## keep only SRC sample
alldata = @subset(alldata, :famid_1968 .< 3000) # Keep only SRC sample
## assume missing income is 0
re(x, val) = Missings.replace(x, val) |> collect # Replace missing with value
alldata.labor_inc_ind = re(alldata.labor_inc_ind, 0.)
alldata.hours_ind = re(alldata.hours_ind, 0.)
##
using Statistics
inc_byind = [mean(sdf.labor_inc_ind) for sdf in groupby(alldata, "id_ind")]
hours_byind = [mean(sdf.hours_ind) for sdf in groupby(alldata, "id_ind")]
wages_byind = [mean(sdf.labor_inc_ind ./ sdf.hours_ind) for sdf in groupby(alldata, "id_ind")]
@test 10 <= median((w for w in wages_byind if w > 0)) <= 15
@test 15_000 <= median((w for w in inc_byind if w > 0)) <= 25_000
end | PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 4.0.1 | fee08bd8cf985794c222c61ec31fbb621507caa2 | docs | 6108 | # PSID.jl
[](https://travis-ci.com/aaowens/PSID.jl)
[](https://codecov.io/gh/aaowens/PSID.jl)
The Panel Study of Income Dynamics (PSID) is a longitudinal public dataset which has been following a collection of families and their descendants since 1968. It provides a breadth of information about labor supply and life-cycle dynamics. More information is available at https://psidonline.isr.umich.edu/.
This package produces a labeled panel of individuals with a consistent individual ID across time. You provide a JSON file describing the variables you want. An example input file can be found at [examples/user_input.json.](https://github.com/aaowens/PSID.jl/blob/master/examples/user_input.json). Currently only variables in the family files can be added, but in the future it should be possible to support variables in the individual files or the supplements.
# Example
An example workflow can be found on my blog post [here](https://aaowens.github.io/julia/2020/02/11/Using-the-Panel-Study-of-Income-Dynamics.html).
# Instructions
To add this package, use
```
(@v1.6) pkg> add PSID
```
Next, download the PSID data files yourself. The package can't automatically fetch them because the PSID requires you to register for a free account before using the data.
The list of data files required to be in the current directory can be found [here](https://github.com/aaowens/PSID.jl/blob/master/src/allfiles_hash.json). These files are
1. The PSID codebook in XML format. You used to be able to download this from the PSID here https://simba.isr.umich.edu/downloads/PSIDCodebook.zip , but the link is broken. I put it in my Google Drive here https://drive.google.com/file/d/1CPwM5tsphdezi4RqlHGMkS1hiLLRZIT7/view .
2. The zipped PSID family files and cross-year individual file, which can be downloaded here https://simba.isr.umich.edu/Zips/ZipMain.aspx. Do not extract the files--leave them zipped. You need to download every family file from 1968 to 2019, and you also need to download the cross-year individual file.
3. The XLSX cross-year index for the variables, which can be downloaded here https://psidonline.isr.umich.edu/help/xyr/psid.xlsx.
After acquiring the data, run
```
julia> using PSID
julia> makePSID("user_input.json")
# to not code missings, makePSID("user_input.json", codemissings = false)
```
It will verify the required files exist and then construct the data. If successful, it will print `Finished constructing individual data, saved to output/allinds.csv` after about 5 minutes.
This will use about 15 GB of RAM. It may not work on machines without much memory.
## The input JSON file
The file passed to `makePSID` describes the variables you want.
```
{
"name_user": "hours",
"varID": "V465",
"unit": "head"
},
```
There are three fields, `name_user`, `varID`, and `unit`. `name_user` is a name chosen by you. `varID` is one of the codes assigned by the PSID to this variable. These can be looked up in the PSID [cross-year index](https://simba.isr.umich.edu/VS/i.aspx). For example, hours above can be found in the crosswalk at ` Family Public Data Index 01>WORK 02>Hours and Weeks 03>annual in prior year 04>head 05>total:`. Clicking on the variable info will show the the list of years and associated IDs when that variable is available. Choose any of the IDs for `varID`, it does not matter. `PSID.jl` will look up all available years for that variable in the crosswalk. You must also indicate the unit, which can be `head`, `spouse`, or `family`. This makes sure the variable is assigned to the correct individual.
# Features
This package provides the following features:
1. Automatically labels missing values by searching the value labels from the codebook for strings like "NA", "Inap.", or "Missing".
2. Tries to produce consistent value labels across years for categorical variables. This is difficult because the labels in the PSID sometimes change between years. This package uses an algorithm to try to harmonize the labels when possible by removing common subsets. For example, in one year race is labeled as "Asian" but in the next year it is "Asian, Pacific Islander". The first is a subset of the second, so the final label will be "Asian, Pacific Islander". When this is not possible, the final label will be "A or B or C" for however many incomparable labels were found.
3. Matches the individuals across time to produce a panel with consistent (ID, year) keys and their associated variables.
4. Produces consistent individual or spouse variables for individuals. In the input JSON file, you must indicate whether a variable is family level, household head level, or household spouse level. The final output will have variables of the form `VAR_family`, `VAR_ind`, or `VAR_spouse`. When the individual is a household head, `VAR_ind` will come from the household head version of that variable, and `VAR_spouse` will come from the household spouse version. If the individual is a household spouse, it is the reverse. Both individuals will get all family level variables.
5. It's easiest to track individuals, but this package also produces a consistent family ID by treating a family as a combination of head and spouse (if spouse exists). If you keep only household heads and drop years before 1970, (famid, year) should be an ID.
# Notable Omissions
Certain variables are not in the family files. For example, the wealth data are in separate files, and there is some unique information in the individual file directly. In the future I plan to add support for these data, but you can manually add them by constructing the unique individual ID yourself as (ER30001 * 1000) + ER30002, and then joining your data on that ID with the dataset produced by PSID.jl.
Please file issues if you find a bug.
# Donate your input JSON
If you've made an input JSON file containing variables useful for some topic, feel free to file an issue or make a PR to add your file to the examples.
| PSID | https://github.com/aaowens/PSID.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 548 | using KitBase, CUDA
u = collect(-5.0:0.0001:5.0)
primL = [1.0, 0.0, 0.5]
primR = [0.125, 0.0, 0.625]
dt = 1.0
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@time flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
```0.001932 seconds (6 allocations: 2.289 MiB)```
u = collect(-5.0:0.0001:5.0) |> CuArray
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@time flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
```0.000219 seconds (203 allocations: 15.125 KiB)```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 1399 | """
This file compares the performance by using dynamic and static arrays to store solutions.
"""
using Kinetic, StaticArrays, BenchmarkTools, OffsetArrays
cd(@__DIR__)
#--- dynamic ---#
ks, ctr, face, t = Kinetic.initialize("bench_ds.txt")
@btime Kinetic.solve!(ks, ctr, face, t)
"""The result on my NUC is around 707 ms."""
#--- static ---#
ctr2 = deepcopy(ctr)
for i in eachindex(ctr2)
if i <= ks.pSpace.nx ÷ 2
ctr2[i] = ControlVolume1D2F(
ks.pSpace.x[i],
ks.pSpace.dx[i],
MVector{length(ks.ib.wL)}(ks.ib.wL),
MVector{length(ks.ib.wL)}(ks.ib.primL),
MVector{length(ks.ib.hL)}(ks.ib.hL),
MVector{length(ks.ib.hL)}(ks.ib.bL),
)
else
ctr2[i] = ControlVolume1D2F(
ks.pSpace.x[i],
ks.pSpace.dx[i],
MVector{length(ks.ib.wL)}(ks.ib.wR),
MVector{length(ks.ib.wL)}(ks.ib.primR),
MVector{length(ks.ib.hL)}(ks.ib.hR),
MVector{length(ks.ib.hL)}(ks.ib.bR),
)
end
end
face2 = deepcopy(face)
for i = 1:ks.pSpace.nx+1
face2[i] = Interface1D2F(
MVector{length(ks.ib.wL)}(ks.ib.wL),
MVector{length(ks.ib.hL)}(ks.ib.hL),
)
end
@btime Kinetic.solve!(ks, ctr2, face2, t)
"""
The result on my NUC is around 523 ms.
Further improvements are possible if we make all structs (ks, ctr, face) static.
"""
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 2819 | @everywhere using Distributed, SharedArrays
addprocs(3)
@everywhere using KitBase
begin
vars = Dict{String,Any}()
vars["case"] = "sod"
vars["space"] = "1d0f0v"
vars["flux"] = "kfvs"
vars["collision"] = "bgk"
vars["nSpecies"] = 1
vars["interpOrder"] = 1
vars["limiter"] = "vanleer"
vars["boundary"] = "fix"
vars["cfl"] = 0.5
vars["maxTime"] = 0.2
vars["x0"] = 0.0
vars["x1"] = 1.0
vars["nx"] = 2000
vars["pMeshType"] = "uniform"
vars["nxg"] = 0
vars["knudsen"] = 0.001
vars["mach"] = 0.0
vars["prandtl"] = 1.0
vars["inK"] = 0.0
vars["omega"] = 0.81
vars["alphaRef"] = 1.0
vars["omegaRef"] = 0.5
end
set = KitBase.set_setup(vars)
pSpace = KitBase.set_geometry(vars)
vSpace = KitBase.set_velocity(vars)
gas = KitBase.set_property(vars)
ib = KitBase.set_ib(vars, set, vSpace, gas)
folder = @__DIR__
ks = KitBase.SolverSet(set, pSpace, vSpace, gas, ib, folder)
dt = ks.pSpace.dx[1] / (5.0 + KitBase.sound_speed(ks.ib.primL, ks.gas.γ))
nt = floor(ks.set.maxTime / dt) |> Int
#--- parallel ---#
wp = SharedArray{Float64}((ks.pSpace.nx, 3), init = A -> (A = zeros(ks.pSpace.nx, 3)))
for i = 1:ks.pSpace.nx
if i <= ks.pSpace.nx ÷ 2
wp[i, :] .= ks.ib.wL
else
wp[i, :] .= ks.ib.wR
end
end
fwp = SharedArray{Float64}(
(ks.pSpace.nx + 1, 3),
init = A -> (A = zeros(ks.pSpace.nx + 1, 3)),
)
@time for iter = 1:nt÷3
@sync @distributed for i = 2:ks.pSpace.nx
flux = @view fwp[i, :]
KitBase.flux_gks!(
flux,
wp[i-1, :],
wp[i, :],
ks.gas.γ,
ks.gas.K,
ks.gas.μᵣ,
ks.gas.ω,
dt,
0.5 * ks.pSpace.dx[i-1],
0.5 * ks.pSpace.dx[i],
)
end
@sync @distributed for i = 2:ks.pSpace.nx-1
for j = 1:3
wp[i, j] += (fwp[i, j] - fwp[i+1, j]) / ks.pSpace.dx[i]
end
end
end
"""~13.620491 seconds (2.26 M allocations: 101.219 MiB, 0.22% gc time)"""
#--- serial ---#
w = zeros(ks.pSpace.nx, 3)
for i = 1:ks.pSpace.nx
if i <= ks.pSpace.nx ÷ 2
w[i, :] .= ks.ib.wL
else
w[i, :] .= ks.ib.wR
end
end
fw = zeros(ks.pSpace.nx + 1, 3)
@time for iter = 1:nt÷3
for i = 2:ks.pSpace.nx
flux = @view fw[i, :]
KitBase.flux_gks!(
flux,
w[i-1, :],
w[i, :],
ks.gas.γ,
ks.gas.K,
ks.gas.μᵣ,
ks.gas.ω,
dt,
0.5 * ks.pSpace.dx[i-1],
0.5 * ks.pSpace.dx[i],
)
end
for i = 2:ks.pSpace.nx-1
for j = 1:3
w[i, j] += (fw[i, j] - fw[i+1, j]) / ks.pSpace.dx[i]
end
end
end
"""~20.830331 seconds (323.96 M allocations: 24.472 GiB, 16.89% gc time)"""
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 2161 | push!(LOAD_PATH, "..")
import Pkg
Pkg.add("Documenter")
using Documenter, Kinetic
using Kinetic: KitBase, KitML
tutorial_page = [
"Examples" => "tutorial.md",
"Advection diffusion" => "eg_advection.md",
"Burgers" => "eg_burgers.md",
"Shock tube" => "eg_shock.md",
"Lid-driven cavity" => "eg_cavity.md",
]
type_page = [
"Configuration" => "type_config.md",
"Setup" => "type_setup.md",
"Domain" => "type_domain.md",
"Velocity" => "type_velocity.md",
"Property" => "type_property.md",
"Condition" => "type_ib.md",
"FVM" => "type_fvm.md",
]
solver_page = [
"Framework" => "solver.md",
"Preprocess" => "solver_pre.md",
"Timestep" => "solver_timestep.md",
"Reconstruction" => "solver_reconstruction.md",
"Flux" => "solver_flux.md",
"Update" => "solver_update.md",
"Postprocess" => "solver_post.md",
]
utility_page = [
"I / O" => "api_io.md",
"Math" => "api_math.md",
"Theory" => "api_theory.md",
"Physical space" => "api_geo.md",
"Phase space" => "api_phase.md",
"Configuration" => "api_config.md",
"Stepper" => "api_step.md",
]
parallel_page = [
"General" => "parallel.md",
"Threading" => "para_thread.md",
"Distributed" => "para_dist.md",
"CUDA" => "para_cuda.md",
]
ml_page = ["KitML" => "kitml1.md", "UBE" => "kitml2.md"]
fortran_page = ["KitFort" => "fortran1.md", "Benchmark" => "fortran2.md"]
format = Documenter.HTML(assets = ["assets/favicon.ico"], collapselevel = 1)
makedocs(
sitename = "Kinetic.jl",
modules = [Kinetic, KitBase, KitML],
pages = Any[
"Home"=>"index.md",
"Installation"=>"install.md",
"Physics"=>"physics.md",
"Type"=>type_page,
"Solver"=>solver_page,
"Tutorial"=>tutorial_page,
"Parallelization"=>parallel_page,
"Utility"=>utility_page,
"SciML"=>ml_page,
"Fortran"=>fortran_page,
"Index"=>"function_index.md",
"Python"=>"python.md",
"Contribution"=>"contribution.md",
"Reference"=>"reference.md",
],
format = format,
)
deploydocs(repo = "github.com/vavrines/Kinetic.jl.git")
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 760 | using KitBase, Plots
using KitBase: ib_cavity
using KitBase.ProgressMeter: @showprogress
set = Setup(
case = "cacity",
space = "2d2f2v",
boundary = ["maxwell", "maxwell", "maxwell", "maxwell"],
limiter = "minmod",
cfl = 0.5,
maxTime = 10, # time
)
ps = PSpace2D(0, 1, 45, 0, 1, 45)
vs = VSpace2D(-5, 5, 28, -5, 5, 28)
gas = Gas(Kn = 0.075, K = 1.0)
ib = IB2F(ib_cavity(set, ps, vs, gas)...)
ks = SolverSet(set, ps, vs, gas, ib)
ctr, a1face, a2face = init_fvm(ks)
t = 0.0
dt = timestep(ks, ctr, 0.0)
nt = ks.set.maxTime ÷ dt |> Int
res = zeros(4)
@showprogress for iter = 1:nt
evolve!(ks, ctr, a1face, a2face, dt)
update!(ks, ctr, a1face, a2face, dt, res)
if maximum(res) < 1e-6
break
end
end
plot(ks, ctr)
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 2066 | using KitBase, Plots
using KitBase.ProgressMeter: @showprogress
pyplot()
begin
set = Setup(
case = "cylinder",
space = "2d2f2v",
boundary = ["maxwell", "extra", "mirror", "mirror"],
limiter = "minmod",
cfl = 0.5,
maxTime = 2.0, # time
)
ps = CSpace2D(1.0, 6.0, 30, 0.0, π, 50, 1, 1)
vs = VSpace2D(-10.0, 10.0, 48, -10.0, 10.0, 48)
gas = Gas(Kn = 1e-3, Ma = 4.0, K = 1.0)
prim0 = [1.0, 0.0, 0.0, 1.0]
prim1 = [1.0, gas.Ma * sound_speed(1.0, gas.γ), 0.0, 1.0]
fw = (args...) -> prim_conserve(prim1, gas.γ)
ff = function (args...)
prim = conserve_prim(fw(args...), gas.γ)
h = maxwellian(vs.u, vs.v, prim)
b = h .* gas.K / 2 / prim[end]
return h, b
end
bc = function (x, y, args...)
if abs(x^2 + y^2 - 1) < 1e-3
return prim0
else
return prim1
end
end
ib = IB2F(fw, ff, bc, NamedTuple())
ks = SolverSet(set, ps, vs, gas, ib)
end
ctr, a1face, a2face = init_fvm(ks)
cd(@__DIR__)
t = 0.0
dt = timestep(ks, ctr, 0.0)
nt = ks.set.maxTime ÷ dt |> Int
res = zeros(4)
@showprogress for iter = 1:nt
evolve!(ks, ctr, a1face, a2face, dt)
update!(ks, ctr, a1face, a2face, dt, res)
for j = ks.ps.nθ÷2+1:ks.ps.nθ
ctr[ks.ps.nr+1, j].w .= ks.ib.fw(6, 0)
ctr[ks.ps.nr+1, j].prim .= conserve_prim(ctr[ks.ps.nr+1, j].w, ks.gas.γ)
ctr[ks.ps.nr+1, j].sw .= 0.0
ctr[ks.ps.nr+1, j].h .= maxwellian(ks.vs.u, ks.vs.v, ctr[ks.ps.nr+1, j].prim)
ctr[ks.ps.nr+1, j].b .=
ctr[ks.ps.nr+1, j].h .* ks.gas.K ./ 2 ./ ctr[ks.ps.nr+1, j].prim[end]
end
global t += dt
if maximum(res) < 1e-6
break
end
end
begin
sol = zeros(ks.ps.nr, ks.ps.nθ, 4)
for i in axes(sol, 1), j in axes(sol, 2)
sol[i, j, :] .= ctr[i, j].prim
sol[i, j, end] = 1 / sol[i, j, end]
end
contourf(
ps.x[1:ks.ps.nr, 1:ks.ps.nθ],
ps.y[1:ks.ps.nr, 1:ks.ps.nθ],
sol[:, :, 4],
ratio = 1,
)
end
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 1152 | using KitBase, OrdinaryDiffEq, Plots
set = config_ntuple(u0 = -8, u1 = 8, nu = 80, t1 = 8, nt = 30, Kn = 1)
tspan = (0, set.t1)
tsteps = linspace(tspan[1], tspan[2], set.nt)
γ = 3.0
vs = VSpace1D(set.u0, set.u1, set.nu)
f0 = @. 0.5 * (1 / π)^0.5 * (exp.(-(vs.u - 2)^2) + 0.5 * exp(-(vs.u + 2)^2))
prim0 = conserve_prim(moments_conserve(f0, vs.u, vs.weights), γ)
M0 = maxwellian(vs.u, prim0)
mu0 = ref_vhs_vis(set.Kn, set.α, set.ω)
τ0 = mu0 * 2.0 * prim0[end]^(0.5) / prim0[1]
# BGK
prob1 = ODEProblem(bgk_ode!, f0, tspan, [M0, τ0])
sol_bgk = solve(prob1, Tsit5(), saveat = tsteps)
# Shakhov
q = heat_flux(f0, prim0, vs.u, vs.weights)
S0 = shakhov(vs.u, M0, q, prim0, 2 / 3)
prob2 = ODEProblem(bgk_ode!, f0, tspan, [M0 .+ S0, τ0])
sol_shakhov = solve(prob2, Tsit5(), saveat = tsteps)
# ES-BGK
prob3 = ODEProblem(esbgk_ode!, f0, tspan, [vs.u, vs.weights, prim0, 2 / 3, τ0])
sol_es = solve(prob3, Tsit5(), saveat = tsteps)
idx = (rand() * set.nt |> round |> Int) + 1
begin
plot(vs.u, sol_bgk[idx]; label = "BGK")
plot!(vs.u, sol_shakhov[idx]; label = "Shakhov", line = :dash)
scatter!(vs.u, sol_es[idx]; label = "ES", alpha = 0.5)
end
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 4647 | using KitBase
using KitBase.OffsetArrays
using KitBase.ProgressMeter: @showprogress
cd(@__DIR__)
D = read_dict("shear.txt")
for key in keys(D)
s = Symbol(key)
@eval $s = $(D[key])
end
begin
γ = heat_capacity_ratio(inK, 2)
set = set_setup(D)
pSpace = PSpace1D(x0, x1, nx, nxg)
vSpace =
VSpace2D(umin, umax, nu, vmin, vmax, nv; type = vMeshType, ngu = nug, ngv = nvg)
μᵣ = ref_vhs_vis(knudsen, alphaRef, omegaRef)
gas = set_property(D)
primL = [1.0, 0.0, 1.0, 1.0]
primR = [1.0, 0.0, -1.0, 2.0]
wL = prim_conserve(primL, γ)
wR = prim_conserve(primR, γ)
HL = maxwellian(vSpace.u, vSpace.v, primL)
HR = maxwellian(vSpace.u, vSpace.v, primR)
BL = HL .* inK ./ (2.0 * primL[end])
BR = HR .* inK ./ (2.0 * primR[end])
bc = zeros(4)
p = (
x0 = x0,
x1 = x1,
wL = wL,
wR = wR,
primL = primL,
primR = primR,
HL = HL,
HR = HR,
BL = BL,
BR = BR,
)
fw = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.wL
else
return p.wR
end
end
ff = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.HL, p.BL
else
return p.HR, p.BR
end
end
bc = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.primL
else
return p.primR
end
end
ib = IB2F(fw, ff, bc, p)
ks = SolverSet(set, pSpace, vSpace, gas, ib)
end
begin
ctr = OffsetArray{ControlVolume2F}(undef, axes(ks.pSpace.x, 1))
face = Array{Interface2F}(undef, ks.pSpace.nx + 1)
idx0 = (eachindex(pSpace.x)|>collect)[1]
idx1 = (eachindex(pSpace.x)|>collect)[end]
for i in eachindex(ctr)
w = ks.ib.fw(ks.ps.x[i], ks.ib.p)
prim = conserve_prim(w, γ)
h, b = ks.ib.ff(ks.ps.x[i], ks.ib.p)
ctr[i] = ControlVolume(w, prim, h, b, 2)
end
for i = 1:ks.pSpace.nx+1
fw = deepcopy(ks.ib.fw(ks.ps.x[1], ks.ib.p))
ff = deepcopy(ks.ib.ff(ks.ps.x[1], ks.ib.p)[1])
face[i] = Interface(fw, ff, ff, 2)
end
end
begin
iter = 0
res = zeros(4)
simTime = 0.0
dt = Kinetic.timestep(ks, ctr, simTime)
maxTime = vhs_collision_time(ks.ib.bc(ks.ps.x0, ks.ib.p), μᵣ, omega)
nt = Int(floor(maxTime / dt))
end
# There're no default solver for 1D simulation with 2D setting
# Let's do it manually
@showprogress for iter = 1:nt
#Kinetic.reconstruct!(ks, ctr)
@inbounds Threads.@threads for i in eachindex(face)
flux_kfvs!(
face[i].fw,
face[i].fh,
face[i].fb,
ctr[i-1].h,
ctr[i-1].b,
ctr[i].h,
ctr[i].b,
ks.vSpace.u,
ks.vSpace.v,
ks.vSpace.weights,
dt,
1.0,
)
end
@inbounds Threads.@threads for i = 1:ks.pSpace.nx
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(ctr[i].w)
#--- update W^{n+1} ---#
@. ctr[i].w += (face[i].fw - face[i+1].fw) / ks.ps.dx[i]
ctr[i].prim .= conserve_prim(ctr[i].w, ks.gas.γ)
#--- calculate M^{n+1} and tau^{n+1} ---#
MH = maxwellian(ks.vSpace.u, ks.vSpace.v, ctr[i].prim)
MB = MH .* ks.gas.K ./ (2.0 * ctr[i].prim[end])
τ = vhs_collision_time(ctr[i].prim, ks.gas.μᵣ, ks.gas.ω)
#--- update distribution function ---#
for q in axes(MH, 2), p in axes(MH, 1)
ctr[i].h[p, q] =
(
ctr[i].h[p, q] +
(face[i].fh[p, q] - face[i+1].fh[p, q]) / ks.ps.dx[i] +
dt / τ * MH[p, q]
) / (1.0 + dt / τ)
ctr[i].b[p, q] =
(
ctr[i].b[p, q] +
(face[i].fb[p, q] - face[i+1].fb[p, q]) / ks.ps.dx[i] +
dt / τ * MB[p, q]
) / (1.0 + dt / τ)
end
end
end
sol = zeros(ks.pSpace.nx, 10)
for i = 1:ks.pSpace.nx
sol[i, 1:3] = ctr[i].prim[1:3]
sol[i, 4] = 1.0 / ctr[i].prim[4]
end
using Plots
plot(ks.pSpace.x[1:ks.pSpace.nx], sol[:, 1])
plot(ks.pSpace.x[1:ks.pSpace.nx], sol[:, 2])
plot(ks.pSpace.x[1:ks.pSpace.nx], sol[:, 3])
plot(ks.pSpace.x[1:ks.pSpace.nx], sol[:, 4])
u1d = VSpace1D(umin, umax, nu)
f = zeros(ks.vSpace.nv)
for j in axes(ctr[1].h, 2), i in axes(ctr[1].h, 1)
f[j] =
0.5 * (
sum(@. u1d.weights * ctr[ks.pSpace.nx÷2].h[:, j]) +
sum(@. u1d.weights * ctr[ks.pSpace.nx÷2+1].h[:, j])
)
end
plot(ks.vSpace.v[end÷2, :, 1], f)
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 120 | using KitBase, Plots
cd(@__DIR__)
ks, ctr, face, t = initialize("sod.txt")
t = solve!(ks, ctr, face, t)
plot(ks, ctr)
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 636 | using KitBase, Plots
using KitBase.ProgressMeter: @showprogress
cd(@__DIR__)
ks = SolverSet("mixture_shock.txt")
ctr, face = init_fvm(ks)
begin
iter = 0
res = zeros(3, 2)
t = 0.0
dt = timestep(ks, ctr, t)
nt = Int(floor(ks.set.maxTime / dt))
end
@showprogress for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, face, dt)
update!(ks, ctr, face, dt, res; bc = :fix)
end
sol = zeros(ks.ps.nx, 6)
for i in axes(sol, 1)
sol[i, 1:3] .= ctr[i].prim[:, 1]
sol[i, 4:6] .= ctr[i].prim[:, 2]
end
plot(ks.ps.x[1:ks.ps.nx], sol[:, 1] ./ ks.gas.mi)
plot!(ks.ps.x[1:ks.ps.nx], sol[:, 4] ./ ks.gas.me)
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 1981 | using Kinetic, OrdinaryDiffEq, SciMLSensitivity, Plots
using KitML.Solaris
using KitML.Solaris.Flux: Adam, throttle
set = Setup(
case = "shock",
space = "1d1f1v",
collision = "shakhov",
interpOrder = 1,
maxTime = 10,
)
ps = PSpace1D(-25, 25, 50, 0)
gas = Gas(Kn = 1, Ma = 2, Pr = 2 / 3, K = 0, γ = 3)
vs = VSpace1D(-10, 10, 72)
fw, ff, bc, p = Kinetic.KitBase.ib_rh(set, ps, vs, gas)
ib = IB1F(fw, ff, bc, p)
ks = SolverSet(set, ps, vs, gas, ib)
ctr, face = init_fvm(ks)
dt = timestep(ks, ctr, 0.0)
sumRes = zeros(3)
for iter = 1:123
evolve!(ks, ctr, face, dt)
update!(ks, ctr, face, dt, sumRes)
end
X = Array{Float32}(undef, ks.vSpace.nu, ks.pSpace.nx)
for i = 1:ks.pSpace.nx
X .= ctr[i].f
end
function shakhov!(df, f, p, t)
M, tau = p
df .= (M .- f) ./ tau
end
M = Array{Float32}(undef, vs.nu, size(X, 2))
SM = Array{Float32}(undef, vs.nu, size(X, 2))
τ = Array{Float32}(undef, 1, size(X, 2))
for i in axes(X, 2)
M[:, i] .= maxwellian(ks.vSpace.u, ctr[i].prim)
q = heat_flux(ctr[i].f, ctr[i].prim, ks.vSpace.u, ks.vSpace.weights)
S = shakhov(ks.vSpace.u, M[:, i], q, ctr[i].prim, ks.gas.Pr)
SM[:, i] .= M[:, i] .+ S
τ[1, i] = vhs_collision_time(ctr[i].prim, ks.gas.μᵣ, ks.gas.ω)
end
P = [SM, τ]
tspan = (0, dt)
prob = ODEProblem(shakhov!, X, tspan, P)
Y = solve(prob, Euler(), dt = dt) |> Array
model_univ = FnChain(FnDense(vs.nu, vs.nu * 2, tanh), FnDense(vs.nu * 2, vs.nu))
p_model = init_params(model_univ)
function dfdt(f, p, t)
df = (M .- f) ./ τ .+ model_univ(f .- M, p)
end
prob_ube = ODEProblem(dfdt, X, tspan, p_model)
function loss(p)
sol_ube = solve(prob_ube, Euler(), u0 = X, p = p, dt = dt)
loss = sum(abs2, Array(sol_ube) .- Y)
return loss
end
cb = function (p, l)
display(l)
return false
end
res = sci_train(loss, p_model, Adam(), cb = throttle(cb, 1), maxiters = 200)
sol = solve(prob_ube, Euler(), u0 = X, p = res.u, dt = dt)
contour(ks.ps.x, ks.vs.u, sol.u[end])
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 873 | using KitBase
using KitBase.ProgressMeter: @showprogress
cd(@__DIR__)
ks, ctr, face, t = initialize("briowu_1d.txt")
dt = timestep(ks, ctr, t)
nt = Int(floor(ks.set.maxTime / dt)) + 1
res = zeros(5, 2)
@showprogress for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, face, dt; mode = :kcu, isPlasma = true)
update!(ks, ctr, face, dt, res; coll = :bgk, bc = :extra, isMHD = true)
end
sol = zeros(ks.ps.nx, 10, 2)
for i = 1:ks.ps.nx
sol[i, 1, 1] = ctr[i].prim[1, 1]
sol[i, 1, 2] = ctr[i].prim[1, 2] / ks.gas.me
sol[i, 2:4, 1] .= ctr[i].prim[2:4, 1]
sol[i, 2:4, 2] .= ctr[i].prim[2:4, 2]
sol[i, 5, 1] = 1.0 / ctr[i].prim[5, 1]
sol[i, 5, 2] = ks.gas.me / ctr[i].prim[5, 2]
sol[i, 6, 1] = ctr[i].B[2]
sol[i, 6, 2] = ctr[i].E[1]
end
using Plots
plot(ks.ps.x[1:ks.ps.nx], sol[:, 1, :])
plot(ks.ps.x[1:ks.ps.nx], sol[:, 6, :])
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 4979 | using KitBase, KitBase.OffsetArrays
using KitBase.ProgressMeter: @showprogress
cd(@__DIR__)
D = read_dict("briowu_2d.txt")
for key in keys(D)
s = Symbol(key)
@eval $s = $(D[key])
end
begin
γ = heat_capacity_ratio(D[:inK], 3)
set = set_setup(D)
ps = set_geometry(D)
ue0 = umin * sqrt(mi / me)
ue1 = umax * sqrt(mi / me)
ve0 = vmin * sqrt(mi / me)
ve1 = vmax * sqrt(mi / me)
kne = knudsen * (me / mi)
vs = MVSpace2D(
umin,
umax,
ue0,
ue1,
nu,
vmin,
vmax,
ve0,
ve1,
nv;
type = vMeshType,
ngu = nug,
ngv = nvg,
)
plasma = Plasma1D(
[knudsen, kne],
mach,
prandtl,
inK,
γ,
mi,
ni,
me,
ne,
lD,
rL,
sol,
echi,
bnu,
)
begin
# upstream
primL = zeros(5, 2)
primL[1, 1] = 1.0 * mi
primL[2, 1] = 0.0
primL[3, 1] = 0.0
primL[4, 1] = 0.0
primL[5, 1] = mi / 1.0
primL[1, 2] = 1.0 * me
primL[2, 2] = 0.0
primL[3, 2] = 0.0
primL[4, 2] = 0.0
primL[5, 2] = me / 1.0
wL = mixture_prim_conserve(primL, γ)
h0L = mixture_maxwellian(vs.u, vs.v, primL)
h1L = similar(h0L)
h2L = similar(h0L)
for j in axes(h0L, 3)
h1L[:, :, j] .= primL[4, j] .* h0L[:, :, j]
h2L[:, :, j] .= (primL[4, j]^2 + 1.0 / (2.0 * primL[end, j])) .* h0L[:, :, j]
end
EL = zeros(3)
BL = zeros(3)
BL[1] = 0.75
BL[2] = 1.0
# downstream
primR = zeros(5, 2)
primR[1, 1] = 0.125 * mi
primR[2, 1] = 0.0
primR[3, 1] = 0.0
primR[4, 1] = 0.0
primR[5, 1] = mi * 1.25
primR[1, 2] = 0.125 * me
primR[2, 2] = 0.0
primR[3, 2] = 0.0
primR[4, 2] = 0.0
primR[5, 2] = me * 1.25
wR = mixture_prim_conserve(primR, γ)
h0R = mixture_maxwellian(vs.u, vs.v, primR)
h1R = similar(h0R)
h2R = similar(h0R)
for j in axes(h0R, 3)
h1R[:, :, j] .= primR[4, j] .* h0R[:, :, j]
h2R[:, :, j] .= (primR[4, j]^2 + 1.0 / (2.0 * primR[end, j])) .* h0R[:, :, j]
end
ER = zeros(3)
BR = zeros(3)
BR[1] = 0.75
BR[2] = -1.0
lorenzL = zeros(3, 2)
lorenzR = zeros(3, 2)
bcL = zeros(5, 2)
bcR = zeros(5, 2)
p = (
x0 = x0,
x1 = x1,
wL = wL,
wR = wR,
primL = primL,
primR = primR,
h0L = h0L,
h1L = h1L,
h2L = h2L,
h0R = h0R,
h1R = h1R,
h2R = h2R,
EL = EL,
ER = ER,
BL = BL,
BR = BR,
lorenzL = lorenzL,
lorenzR = lorenzR,
)
fw = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.wL
else
return p.wR
end
end
ff = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.h0L, p.h1L, p.h2L
else
return p.h0R, p.h1R, p.h2R
end
end
fE = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.EL
else
return p.ER
end
end
fB = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.BL
else
return p.BR
end
end
fL = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.lorenzL
else
return p.lorenzR
end
end
bc = function (x, p)
if x <= (p.x0 + p.x1) / 2
return p.primL
else
return p.primR
end
end
ib = IB3F(fw, ff, fE, fB, fL, bc, p)
end
ks = SolverSet(set, ps, vs, plasma, ib)
end
ctr, face = init_fvm(ks)
begin
t = 0.0
dt = timestep(ks, ctr, t)
nt = Int(floor(ks.set.maxTime / dt)) + 1
res = zeros(5, 2)
end
@showprogress for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, face, dt; mode = :kcu, isPlasma = :true)
update!(ks, ctr, face, dt, res, isMHD = true)
end
soluiton = zeros(ks.ps.nx, 10, 2)
for i = 1:ks.ps.nx
soluiton[i, 1, 1] = ctr[i].prim[1, 1]
soluiton[i, 1, 2] = ctr[i].prim[1, 2] / ks.gas.me
soluiton[i, 2:4, 1] .= ctr[i].prim[2:4, 1]
soluiton[i, 2:4, 2] .= ctr[i].prim[2:4, 2]
soluiton[i, 5, 1] = 1.0 / ctr[i].prim[5, 1]
soluiton[i, 5, 2] = ks.gas.me / ctr[i].prim[5, 2]
soluiton[i, 6, 1] = ctr[i].B[2]
soluiton[i, 6, 2] = ctr[i].E[1]
end
using Plots
plot(ks.ps.x[1:ks.ps.nx], soluiton[:, 1, 1:2])
plot(ks.ps.x[1:ks.ps.nx], soluiton[:, 6, 1:2])
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 1909 | using KitBase
using KitBase.ProgressMeter: @showprogress
begin
# space
x0 = -1.5
x1 = 1.5
y0 = -1.5
y1 = 1.5
nx = 100
ny = 100
dx = (x1 - x0) / nx
dy = (y1 - y0) / ny
pspace = PSpace2D(x0, x1, nx, y0, y1, ny)
# time
tEnd = 1.0
cfl = 0.95
# quadrature
quadratureorder = 6
points, weights = octa_quadrature(quadratureorder)
nq = size(points, 1)
# particle
SigmaS = 1 * ones(ny + 4, nx + 4)
SigmaA = 0 * ones(ny + 4, nx + 4)
SigmaT = SigmaS + SigmaA
end
# initial distribution
phi = zeros(nq, nx, ny)
s2 = 0.03^2
flr = 1e-4
init_field(x, y) = max(flr, 1.0 / (4.0 * pi * s2) * exp(-(x^2 + y^2) / 4.0 / s2))
for j = 1:nx
for i = 1:ny
y = y0 + dy / 2 + (i - 3) * dy
x = x0 + dx / 2 + (j - 3) * dx
for q = 1:nq
phi[q, i, j] = init_field(x, y) / 4.0 / π
end
end
end
dt = cfl / 2 * (dx * dy) / (dx + dy)
global t = 0.0
flux1 = zeros(nq, nx + 1, ny)
flux2 = zeros(nq, nx, ny + 1)
@showprogress for iter = 1:50
for i = 2:nx, j = 1:ny
tmp = @view flux1[:, i, j]
flux_kfvs!(tmp, phi[:, i-1, j], phi[:, i, j], points[:, 1], dt)
end
for i = 1:nx, j = 2:ny
tmp = @view flux2[:, i, j]
flux_kfvs!(tmp, phi[:, i, j-1], phi[:, i, j], points[:, 2], dt)
end
for j = 1:ny, i = 1:nx
integral = discrete_moments(phi[:, i, j], weights)
integral *= 1.0 / 4.0 / pi
for q = 1:nq
phi[q, i, j] =
phi[q, i, j] +
(flux1[q, i, j] - flux1[q, i+1, j]) / dx +
(flux2[q, i, j] - flux2[q, i, j+1]) / dy +
(integral - phi[q, i, j]) * dt
end
end
global t += dt
end
ρ = zeros(nx, ny)
for i = 1:nx, j = 1:ny
ρ[i, j] = discrete_moments(phi[:, i, j], weights)
end
using Plots
contourf(pspace.x[1:nx, 1], pspace.y[1, 1:ny], ρ[:, :])
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 878 | using KitBase, Plots
set = Setup(
matter = "scalar", # material
case = "advection", # test case
space = "1d0f0v", # phase space
flux = "gks", # flux
collision = "", # collision: for scalar conservation laws there are none
interpOrder = 1, # interpolation order
boundary = "period", # boundary
cfl = 0.5, # cfl
maxTime = 1.0, # simulation time
)
pSpace = PSpace1D(0.0, 1.0, 100, 1)
vSpace = nothing
property = Scalar(1.0, 1e-6)
ib = IB((x, p) -> sin(2π * x), property)
ks = SolverSet(set, pSpace, vSpace, property, ib)
ctr, face = init_fvm(ks)
t = 0.0
dt = timestep(ks, ctr, t)
nt = ks.set.maxTime ÷ dt |> Int
anim = @animate for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, face, dt)
update!(ks, ctr, face, dt, 0.0)
plot(ks, ctr, xlabel = "x", label = "u", ylims = [-1, 1])
end
gif(anim, "advection.gif", fps = 45)
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 307 | """
Kinetic.jl: A Portable Framework for Scientific and Neural Computing
Copyright (c) 2020-2023 Tianbai Xiao ([email protected])
"""
module Kinetic
export 転
"""
轻量化的科学与神经网络计算框架
"転" means "rolling" in Japanese
"""
const 転 = Kinetic
using Reexport
@reexport using KitBase
@reexport using KitML
end
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | code | 185 | using Kinetic, Plots
# initialization
set, ctr, xface, yface, t = initialize("config.toml")
# solution algorithm
t = solve!(set, ctr, xface, yface, t)
# visualization
plot(set, ctr)
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 58 | # Known Issues
- FFT doesn't work well with offset arrays | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 8632 | <div align="center">
<h1>Kinetic</h1>
<img
src="https://i.postimg.cc/ncXfgjXd/dancing-circles.gif"
alt="Kinetic Logo" width="300">
</img>
[](https://juliahub.com/ui/Packages/Kinetic/wrVmu)
[](https://xiaotianbai.com/Kinetic.jl/dev/)
[](https://xiaotianbai.com/Kinetic.jl/stable/)
[](https://joss.theoj.org/papers/65d56efef938caf92c2cc942d2c25ea4)
[](https://pkgs.genieframework.com?packages=Kinetic)
<!--[](https://pkgs.genieframework.com?packages=Kinetic)-->
| [Kinetic](https://github.com/vavrines/Kinetic.jl) | [KitBase](https://github.com/vavrines/KitBase.jl) | [KitML](https://github.com/vavrines/KitML.jl) | [KitFort](https://github.com/vavrines/KitFort.jl) |
| ---------- | --------- | ---------------- | ------ |
|  |  |  |  |
| [](https://codecov.io/gh/vavrines/Kinetic.jl) | [](https://codecov.io/gh/vavrines/KitBase.jl) | [](https://codecov.io/gh/vavrines/KitML.jl) | [](https://codecov.io/gh/vavrines/KitFort.jl) |
 |  |  | 
</div>
<!--



[](https://github.com/SciML/ColPrac)
[](https://github.com/vavrines/Kinetic.jl)
[](https://badges.pufler.dev)
-->
<!--<div align="center"> <img
src="https://i.postimg.cc/ncXfgjXd/dancing-circles.gif"
alt="Kinetic Logo" width="300"></img>
</div>-->
<!--
# Kinetic.jl
<img src="https://i.postimg.cc/ncXfgjXd/dancing-circles.gif" width="300"/>
-->
**Kinetic** is a computational fluid dynamics toolbox written in <a href="https://julialang.org"><img src="https://raw.githubusercontent.com/JuliaLang/julia-logo-graphics/master/images/julia.ico" width="16em">Julia.</a>
It aims to furnish efficient modeling and simulation methodologies for fluid dynamics, augmented by the power of machine learning.
Based on differentiable programming, mechanical and neural network models are fused and solved in a unified framework.
Simultaneous 1-3 dimensional numerical simulations can be performed on CPUs and GPUs.
The ecosystem follows the modular design philosophy.
Depending on the specific use case, the main module is split into portable components to reduce the lantency caused by the LLVM [just-in-time](https://llvm.org/docs/tutorial/index.html#building-a-jit-in-llvm) compiler:
- [KitBase](https://github.com/vavrines/KitBase.jl): physical models and numerical schemes
- [KitML](https://github.com/vavrines/KitML.jl): neural models and machine learning methods
- [KitFort](https://github.com/vavrines/KitFort.jl): optional high-performance Fortran backend
- [FluxReconstruction](https://github.com/vavrines/FluxReconstruction.jl): high-fidelity solution algorithms
- [Langevin](https://github.com/vavrines/Langevin.jl): intrusive uncertainty quantification methods
- [kineticpy](https://github.com/vavrines/kineticpy): Python interface built on top of [pyjulia](https://github.com/JuliaPy/pyjulia)
## Installation
Kinetic is a registered package in the official [Julia package registry](https://github.com/JuliaRegistries/General).
We recommend installing it with the Julia package manager.
From the Julia REPL, you can get in the package manager (by pressing `]`) and add the package
```julia
julia> ]
(v1.9) pkg> add Kinetic
```
This will automatically install a currently stable release and all its dependencies.
## Physics
Kinetic models and simulates fluid dynamics problems from the perspective of particle transport.
Any advection-diffusion-type equation of different particles, including molecules, photons, plasmas, neutrons, etc., can be solved within the framework.
Special attentions have been paid on Hilbert's sixth problem, i.e. to build the numerical passage between kinetic theory of gases, e.g. the Boltzmann equation, and continuum mechanics, e.g. the Euler and Navier-Stokes equations.
A partial list of current supported models and equations include:
- Boltzmann equation
- radiative transfer equation
- Fokker-Planck-Landau equation
- direct simulation Monte Carlo
- advection-diffusion equation
- Burgers equation
- Euler equations
- Navier-Stokes equations
- Magnetohydrodynamical equations
- Maxwell's equations
## Structure
The structure of Kinetic is shown in the schematic below:
```mermaid
flowchart LR
subgraph Com[Component]
KitBase
KitML
end
subgraph Backend
CPU
CUDA
end
subgraph Mesh
FiniteMesh
end
subgraph SciML[Scientific Machine Learning]
Solaris(Solaris)
Flux(Flux)
TensorFlow[TensorFlow]
end
subgraph AD[Automatic Differentiation]
ForwardDiff
Zygote
end
subgraph Parallel[Parallel Computing]
Threads
Distributed
MPI["MPI (experimental)"]
end
subgraph Serial[Serialization]
CSV
JLD2
BSON
end
subgraph Opt[Optimization]
Optimisers
Optim
Optimization
end
subgraph Ar[Array]
Array
StaticArrays
StructArrays
end
Kt(Kinetic)
Com --> Kt
Ar --> Kt
Mesh --> Kt
Backend --> Kt
AD --> Kt
Serial --> Kt
Kt --> Parallel
Kt --> SciML
Kt --> Opt
```
## Documentation
For the detailed implementation and usage of the package, please
check the documentation:
- [**STABLE**](https://xiaotianbai.com/Kinetic.jl/stable/): latest tagged version of the package
- [**LATEST**](https://xiaotianbai.com/Kinetic.jl/dev/): in-development version of the package
## Citing
If you benefit from Kinetic in your research, teaching, or otherwise, we would be happy if you could mention or cite it:
```
@article{xiao2021kinetic,
doi = {10.21105/joss.03060},
url = {https://doi.org/10.21105/joss.03060},
year = {2021},
publisher = {The Open Journal},
volume = {6},
number = {62},
pages = {3060},
author = {Tianbai Xiao},
title = {Kinetic.jl: A portable finite volume toolbox for scientific and neural computing},
journal = {Journal of Open Source Software}
}
```
## Contributing
Feel free to dive in! If you have any questions or ideas, please [open an issue](https://github.com/vavrines/Kinetic.jl/issues/new) or submit pull requests.
If you're new to the open source community and looking for a cool little project to work on that fits your interests, we're happy to help along the way.
## License
[MIT](LICENSE) © Tianbai Xiao
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 62 | # Configuration
```@docs
ib_rh
ib_sod
ib_briowu
ib_cavity
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 126 | # Physical Space
```@docs
global_frame
local_frame
uniform_mesh
meshgrid
mesh_connectivity_2D
mesh_center_2D
mesh_area_2D
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 32 | # I / O
```@docs
read_dict
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 158 | # Math
```@docs
linspace
heaviside
fortsign
mat_split
central_diff
central_diff!
upwind_diff
upwind_diff!
unstruct_diff
KitBase.lgwt
KitBase.extract_last
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 95 | # Phase Space
```@docs
newton_cotes
legendre_quadrature
octa_quadrature
quadrature_weights
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 38 | # Stepper
```@docs
KitBase.step!
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 608 | # Theory
```@docs
prim_conserve
conserve_prim
mixture_prim_conserve
mixture_conserve_prim
em_coefficients
advection_flux
burgers_flux
euler_flux
euler_jacobi
gauss_moments
mixture_gauss_moments
moments_conserve
mixture_moments_conserve
pdf_slope
mixture_pdf_slope
moments_conserve_slope
mixture_moments_conserve_slope
discrete_moments
stress
heat_flux
maxwellian
mixture_maxwellian
shakhov
reduce_distribution
full_distribution
ref_vhs_vis
vhs_collision_time
aap_hs_collision_time
aap_hs_prim
aap_hs_diffeq!
shift_pdf!
hs_boltz_kn
kernel_mode
boltzmann_fft
boltzmann_fft!
heat_capacity_ratio
sound_speed
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 5414 | # Guide for Contributors
Thank you for considering contributing to Kinetic! This short guide will
give you ideas on how you can contribute and help you make a contribution.
Please feel free to ask us questions and chat with us at any time if you're
unsure about anything.
## What can I do?
* Tackle an existing issue.
* Try to run Kinetic and play around with it to simulate your favorite
fluid and kinetic physics. If you run into any problems or find it difficult
to use or understand, please open an issue!
* Write up an example or tutorial on how to do something useful with
Kinetic, like how to set up a new physical configuration.
* Improve documentation or comments if you found something hard to use.
* Implement a new feature if you need it to use Kinetic.
If you're interested in working on something, let us know by commenting on
existing issues or by opening a new issue if. This is to make sure no one else
is working on the same issue and so we can help and guide you in case there
is anything you need to know beforehand.
## Philosophy
* Each pull request should consist of a logical collection of changes. You can
include multiple bug fixes in a single pull request, but they should be related.
For unrelated changes, please submit multiple pull requests.
* Do not commit changes to files that are irrelevant to your feature or bugfix
(eg: .gitignore).
* Be willing to accept criticism and work on improving your code; we don't want
to break other users' code, so care must be taken not to introduce bugs. We
discuss pull requests and keep working on them until we believe we've done a
good job.
* Be aware that the pull request review process is not immediate, and is
generally proportional to the size of the pull request.
## Reporting a bug
The easiest way to get involved is to report issues you encounter when using
Kinetic or by requesting something you think is missing.
* Head over to the issues in [KitBase](https://github.com/vavrines/KitBase.jl/issues) or [KitML](https://github.com/vavrines/KitML.jl/issues) page.
* Search to see if your issue already exists or has even been solved previously.
* If you indeed have a new issue or request, click the "New Issue" button.
* Please be as specific as possible. Include the version of the code you were using, as
well as what operating system you are running. The output of Julia's `versioninfo()`
and `] status` is helpful to include. If possible, include complete, minimal example
code that reproduces the problem.
## Setting up your development environment
* Install [Julia](https://julialang.org/) on your system.
* Install git on your system if it is not already there (install XCode command line tools on
a Mac or git bash on Windows).
* Login to your GitHub account and make a fork of the
[KitBase](https://github.com/vavrines/KitBase.jl) or [KitML](https://github.com/vavrines/KitML.jl) by
clicking the "Fork" button.
* Clone your fork of the Kinetic repository (in terminal on Mac/Linux or git shell/
GUI on Windows) in the location you'd like to keep it.
```
git clone https://github.com/your-user-name/KitBase.jl.git or https://github.com/your-user-name/KitML.jl.git
```
* Navigate to that folder in the terminal or in Anaconda Prompt if you're on Windows.
* Connect your repository to the upstream (main project).
```git remote add KitBase https://github.com/vavrines/KitBase.jl.git``` or
```git remote add KitML https://github.com/vavrines/KitML.jl.git```
* Create the development environment by opening Julia via `julia --project` then
typing in `] instantiate`. This will install all the dependencies in the Project.toml
file.
* You can test to make sure Kinetic works by typing in `] test` which will run all
the tests (this can take a while).
Your development environment is now ready!
## Pull requests
Changes and contributions should be made via GitHub pull requests against the ``master`` branch.
When you're done making changes, commit the changes you made. Chris Beams has
written a [guide](https://chris.beams.io/posts/git-commit/) on how to write
good commit messages.
When you think your changes are ready to be merged into the main repository,
push to your fork and submit a pull request in https://github.com/vavrines/KitBase.jl/compare/ and https://github.com/vavrines/KitML.jl/compare/.
**Working on your first Pull Request?** You can learn how from the video series
[How to Contribute to an Open Source Project on GitHub](https://egghead.io/courses/how-to-contribute-to-an-open-source-project-on-github), Aaron Meurer's [tutorial on the git workflow](https://www.asmeurer.com/git-workflow/), or the guide [“How to Contribute to Open Source"](https://opensource.guide/how-to-contribute/).
## Documentation
Now that you've made your awesome contribution, it's time to tell the world how to use it.
Writing documentation strings is really important to make sure others use your functionality
properly. Didn't write new functions? That's fine, but be sure that the documentation for
the code you touched is still in great shape. It is not uncommon to find some strange wording
or clarification that you can take care of while you are here.
## Credits
This contributor's guide is based on the [MetPy contributor's guide](https://github.com/Unidata/MetPy/blob/master/CONTRIBUTING.md) and [Oceananigans](https://github.com/CliMA/Oceananigans.jl). | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1723 | # Advection diffusion
The first example is the scalar advection-diffusion equation.
It's a one dimensional problem in spatial domain ``x``.
Let's first configure the solver setup.
```julia
using Kinetic, Plots
set = Setup(
matter = "scalar", # material
case = "advection", # test case
space = "1d0f0v", # phase space
flux = "gks", # flux
collision = "", # collision: for scalar conservation laws there are none
interpOrder = 1, # interpolation order
boundary = "period", # boundary condition
cfl = 0.5, # cfl
maxTime = 1.0, # simulation time
)
```
Then we generate the computational mesh.
Since we solve the macroscopic transport equation, the phase space is set to be nothing.
```julia
ps = PSpace1D(0.0, 1.0, 100, 1)
vs = nothing
```
And we define the physical property of material.
For the advection-diffusion equation, the two fields are the advection speed and viscosity respectively.
```julia
property = Scalar(1.0, 1e-6)
```
A sine wave is used as the initial condition.
```julia
ib = IB((x, p...) -> sin(2π * x), property)
```
For brevity, the above setups can be integrated into a single structure.
We also allocate the structures for cell-centered solutions and interface fluxes.
```julia
ks = SolverSet(set, ps, vs, property, ib)
ctr, face = init_fvm(ks)
```
The solution algorithm can be processed together with visualization.
```julia
t = 0.0
dt = KitBase.timestep(ks, ctr, t)
nt = ks.set.maxTime ÷ dt |> Int
anim = @animate for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, face, dt)
update!(ks, ctr, face, dt, 0.0)
plot(ks, ctr, xlabel="x", label="u", ylims=[-1, 1])
end
gif(anim, "advection.gif", fps = 45)
```

| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1229 | # Burgers
Now we could turn to the Burgers equation.
It's a typical hyperbolic conservation law, where discontinuous solution can emerge in a self-evolving system.
Let's consider the same initial configuration as advection-diffusion example.
```julia
using Kinetic, Plots
set = Setup(
matter = "scalar", # material
case = "burgers", # test case
space = "1d0f0v", # phase space
flux = "gks", # flux
collision = "", # collision: for scalar conservation laws there are none
interpOrder = 1, # interpolation order
boundary = "period", # boundary condition
cfl = 0.5, # cfl
maxTime = 1.0, # simulation time
)
ps = PSpace1D(0.0, 1.0, 100, 1)
vs = nothing
property = Scalar(0, 1e-6)
ib = IB(x -> sin(2π * x), property)
ks = SolverSet(set, ps, vs, property, ib)
ctr, face = init_fvm(ks)
```
The solution algorithm can be processed together with visualization.
```julia
t = 0.0
dt = KitBase.timestep(ks, ctr, t)
nt = ks.set.maxTime ÷ dt |> Int
anim = @animate for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, face, dt)
update!(ks, ctr, face, dt, 0.0)
plot(ks, ctr, xlabel="x", label="u", ylims=[-1, 1])
end
gif(anim, "burgers.gif", fps = 45)
```

| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 6398 | # Lid-driven cavity
We then show the lid-driven cavity.
It's a four dimensional problem, with two in physical domain ``(x,y)`` and another in particle velocity domain ``(u,v)``.
Similarly, we prepare the configuration file as
```
# setup
matter = gas
case = cavity
space = 2d2f2v
flux = kfvs
collision = bgk
nSpecies = 1
interpOrder = 2
limiter = vanleer
boundary = maxwell
cfl = 0.8
maxTime = 5.0
# phase space
x0 = 0.0
x1 = 1.0
nx = 45
y0 = 0.0
y1 = 1.0
ny = 45
pMeshType = uniform
nxg = 0
nyg = 0
# velocity space
umin = -5.0
umax = 5.0
nu = 28
vmin = -5.0
vmax = 5.0
nv = 28
vMeshType = rectangle
nug = 0
nvg = 0
# gas
knudsen = 0.075
mach = 0.0
prandtl = 1.0
inK = 1.0
omega = 0.72
alphaRef = 1.0
omegaRef = 0.5
# boundary
uLid = 0.15
vLid = 0.0
tLid = 1.0
```
We then execute the following codes to conduct a simulation
```julia
using Kinetic
ks, ctr, a1face, a2face, t = initialize("config.txt")
t = solve!(ks, ctr, a1face, a2face, t)
```
The high-level solver `solve!` is equivalent as the following low-level procedures
```julia
using ProgressMeter
res = zeros(4)
dt = timestep(ks, ctr, t)
nt = floor(ks.set.maxTime / dt) |> Int
@showprogress for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, a1face, a2face, dt; mode = Symbol(ks.set.flux), bc = Symbol(ks.set.boundary))
update!(ks, ctr, a1face, a2face, dt, res; coll = Symbol(ks.set.collision), bc = Symbol(ks.set.boundary))
end
```
It can be further expanded into the lower-level backend.
```julia
# lower-level backend
@showprogress for iter = 1:nt
# horizontal flux
@inbounds Threads.@threads for j = 1:ks.pSpace.ny
for i = 2:ks.pSpace.nx
KitBase.flux_kfvs!(
a1face[i, j].fw,
a1face[i, j].fh,
a1face[i, j].fb,
ctr[i-1, j].h,
ctr[i-1, j].b,
ctr[i, j].h,
ctr[i, j].b,
ks.vSpace.u,
ks.vSpace.v,
ks.vSpace.weights,
dt,
a1face[i, j].len,
)
end
end
# vertical flux
vn = ks.vSpace.v
vt = -ks.vSpace.u
@inbounds Threads.@threads for j = 2:ks.pSpace.ny
for i = 1:ks.pSpace.nx
KitBase.flux_kfvs!(
a2face[i, j].fw,
a2face[i, j].fh,
a2face[i, j].fb,
ctr[i, j-1].h,
ctr[i, j-1].b,
ctr[i, j].h,
ctr[i, j].b,
vn,
vt,
ks.vSpace.weights,
dt,
a2face[i, j].len,
)
a2face[i, j].fw .= KitBase.global_frame(a2face[i, j].fw, 0., 1.)
end
end
# boundary flux
@inbounds Threads.@threads for j = 1:ks.pSpace.ny
KitBase.flux_boundary_maxwell!(
a1face[1, j].fw,
a1face[1, j].fh,
a1face[1, j].fb,
ks.ib.bcL,
ctr[1, j].h,
ctr[1, j].b,
ks.vSpace.u,
ks.vSpace.v,
ks.vSpace.weights,
ks.gas.K,
dt,
ctr[1, j].dy,
1.,
)
KitBase.flux_boundary_maxwell!(
a1face[ks.pSpace.nx+1, j].fw,
a1face[ks.pSpace.nx+1, j].fh,
a1face[ks.pSpace.nx+1, j].fb,
ks.ib.bcR,
ctr[ks.pSpace.nx, j].h,
ctr[ks.pSpace.nx, j].b,
ks.vSpace.u,
ks.vSpace.v,
ks.vSpace.weights,
ks.gas.K,
dt,
ctr[ks.pSpace.nx, j].dy,
-1.,
)
end
@inbounds Threads.@threads for i = 1:ks.pSpace.nx
KitBase.flux_boundary_maxwell!(
a2face[i, 1].fw,
a2face[i, 1].fh,
a2face[i, 1].fb,
ks.ib.bcD,
ctr[i, 1].h,
ctr[i, 1].b,
vn,
vt,
ks.vSpace.weights,
ks.gas.K,
dt,
ctr[i, 1].dx,
1,
)
a2face[i, 1].fw .= KitBase.global_frame(a2face[i, 1].fw, 0., 1.)
KitBase.flux_boundary_maxwell!(
a2face[i, ks.pSpace.ny+1].fw,
a2face[i, ks.pSpace.ny+1].fh,
a2face[i, ks.pSpace.ny+1].fb,
[1., 0.0, -0.15, 1.0],
ctr[i, ks.pSpace.ny].h,
ctr[i, ks.pSpace.ny].b,
vn,
vt,
ks.vSpace.weights,
ks.gas.K,
dt,
ctr[i, ks.pSpace.ny].dy,
-1,
)
a2face[i, ks.pSpace.ny+1].fw .= KitBase.global_frame(
a2face[i, ks.pSpace.ny+1].fw,
0.,
1.,
)
end
# update
@inbounds for j = 1:ks.pSpace.ny
for i = 1:ks.pSpace.nx
KitBase.step!(
ctr[i, j].w,
ctr[i, j].prim,
ctr[i, j].h,
ctr[i, j].b,
a1face[i, j].fw,
a1face[i, j].fh,
a1face[i, j].fb,
a1face[i+1, j].fw,
a1face[i+1, j].fh,
a1face[i+1, j].fb,
a2face[i, j].fw,
a2face[i, j].fh,
a2face[i, j].fb,
a2face[i, j+1].fw,
a2face[i, j+1].fh,
a2face[i, j+1].fb,
ks.vSpace.u,
ks.vSpace.v,
ks.vSpace.weights,
ks.gas.K,
ks.gas.γ,
ks.gas.μᵣ,
ks.gas.ω,
ks.gas.Pr,
ctr[i, j].dx * ctr[i, j].dy,
dt,
zeros(4),
zeros(4),
:bgk,
)
end
end
end
```
The result can be visualized with built-in function `plot_contour`, which presents the contours of gas density, U-velocity, V-velocity and temperature inside the cavity.
```julia
KitBase.plot_contour(ks, ctr)
```

It is equivalent as the following low-level backend.
```julia
begin
using Plots
sol = zeros(4, ks.pSpace.nx, ks.pSpace.ny)
for i in axes(sol, 2)
for j in axes(sol, 3)
sol[1:3, i, j] .= ctr[i, j].prim[1:3]
sol[4, i, j] = 1.0 / ctr[i, j].prim[4]
end
end
contourf(ks.pSpace.x[1:ks.pSpace.nx, 1], ks.pSpace.y[1, 1:ks.pSpace.ny], sol[3, :, :]')
end
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 2758 | # Shock tube problem
We then use the Boltzmann equation to solve the shock tube problem in gas dynamics.
It's a two dimensional problem, with one in physical domain ``x`` and another in particle velocity domain ``u``.
First let us prepare the configuration file as
```
# case
matter = gas
case = sod
space = 1d2f1v
nSpecies = 1
flux = kfvs
collision = bgk
interpOrder = 2
limiter = vanleer
boundary = fix
cfl = 0.5
maxTime = 0.2
# physical space
x0 = 0
x1 = 1
nx = 200
pMeshType = uniform
nxg = 1
# velocity space
vMeshType = rectangle
umin = -5
umax = 5
nu = 28
nug = 0
# gas
knudsen = 0.0001
mach = 0.0
prandtl = 1
inK = 2
omega = 0.81
alphaRef = 1.0
omegaRef = 0.5
```
The configuration file can be understood as follows:
- The simulation case is the standard Sod shock tube
- A phase space in 1D physical and 1D velocity space is created with two particle distribution functions inside
- The numerical flux function is the kinetic flux vector splitting method and the collision term uses the BGK relaxation
- The reconstruction step employs van Leer limiter to create 2nd-order interpolation
- The two boundaries are fixed with Dirichlet boundary condition
- The timestep is determined with a CFL number of 0.5
- The maximum simulation time is 0.2
- The physical space spans in [0, 1] with 200 uniform cells
- The velocity space spans in [-5, 5] with 28 uniform cells
- The reference Knudsen number is set as 1e-4
- The reference Mach number is absent
- The reference Prandtl number is 1
- The gas molecule contains two internal degrees of freedom
- The viscosity is evaluated with the following formulas
```math
\mu = \mu_{ref} \left(\frac{T}{T_{ref}}\right)^{\omega}
```
```math
\mu_{ref}=\frac{5(\alpha+1)(\alpha+2) \sqrt{\pi}}{4 \alpha(5-2 \omega)(7-2 \omega)} Kn_{ref}
```
The configuration file directly generate variables during runtime via meta-programming in Julia,
and it can be stored in any text format (txt, toml, cfg, etc.).
For example, if `config.txt` is created,
we then execute the following codes to conduct a simulation
```julia
using Kinetic
set, ctr, face, t = initialize("config.txt")
t = solve!(set, ctr, face, t)
```
The computational setup is stored in `set` and the control volume solutions are stored in `ctr` and `face`.
The high-level solver `solve!` is equivalent as the following low-level procedures
```julia
dt = timestep(ks, ctr, t)
nt = Int(floor(ks.set.maxTime / dt))
res = zeros(3)
for iter = 1:nt
reconstruct!(ks, ctr)
evolve!(ks, ctr, face, dt)
update!(ks, ctr, face, dt, res)
end
```
The result can be visualized with built-in function `plot_line`, which presents the profiles of gas density, velocity and temperature inside the tube.
```julia
plot_line(set, ctr)
```

| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1781 | # KitFort and high performance computing
Numerical simulations of nonlinear models and differential equations are essentially connected with supercomputers and high-performance computing (HPC).
The performance of a supercomputer or a software program is commonly measured in floating-point operations per second (FLOPS).
Through the milestone astronomy research of [Celeste](https://juliacomputing.com/case-studies/celeste/), Julia has entered the PetaFLOPS club (together with C/C++ and Fortran) since 2017.
Julia is experiencing a dramatic Rise in HPC and elsewhere, and that is why we use Julia to organize the Kinetic.
However, compared with the mature C/C++ ecosystem, the equivalent execution efficiency isn't going to happen in all time and situations.
Some existing hardware architecture, e.g. [Sunway TaihuLight](https://en.wikipedia.org/wiki/Sunway_TaihuLight), the previou fastest supercomputer in [TOP500](https://www.top500.org/) list, is built upon 40,960 Chinese-designed SW26010 manycore 64-bit RISC processors, which is not specifically optimized for Julia.
Therefore, we've develop an accompanying package [KitFort.jl](https://github.com/vavrines/KitFort.jl).
The Fortran codes have been linked to the Julia syntax with the built-in `ccall` function.
It's not a default submodule of Kinetic since we believe the Julia codes are sufficient for general users and developers and encounter no two-language problem.
However, it can be manually imported when the executing efficiency becomes the first priority by executing
```julia
julia> ]
(v1.8) pkg> add KitFort
```
After that, using/import the package.
```julia
julia> using KitFort
```
It can be updated to the latest tagged release from the package manager by executing
```julia
(v1.8) pkg> update KitFort
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 3100 | # Benchmark
Here we provide a benchmark to identity the performance variation between Julia and Fortran implementations.
For brevity, we direct make use of the dynamic library [kitmod.so](https://github.com/vavrines/KitFort.jl/tree/main/src/fortran) by `ccall` function in Julia, and
compare the efficiency of computing numerical fluxes by [BenchmarkTools.jl](https://github.com/JuliaCI/BenchmarkTools.jl).
```julia
using Kinetic, BenchmarkTools
begin
u = collect(-5.0:0.1:5.0)
nu = length(u)
weights = ones(nu) .* 0.5
fw = zeros(3)
fh = zeros(nu)
fb = zeros(nu)
inK = 2
γ = 5.0 / 3.0
primL = [1., 0., 1.]
wL = prim_conserve(primL, γ)
hL = maxwellian(u, primL) |> Array;
bL = hL .* 2 ./ (2.)
shL = zeros(nu)
sbL = zeros(nu)
lenL = 0.1
primR = [0.5, 0., 1.]
wR = prim_conserve(primR, γ)
hR = maxwellian(u, primR) |> Array;
bR = hR .* 2 ./ (2.)
shR = zeros(nu)
sbR = zeros(nu)
lenR = 0.1
muref = 0.001
omega = 0.72
prandtl = 1.0
dt = 1e-4
end
#--- kfvs ---#
@btime ccall(
(:__kinetic_MOD_flux_kfvs_2f1v, "kitmod.so"),
Nothing,
(
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Int},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
),
fw,
fh,
fb,
hL,
bL,
hR,
bR,
u,
weights,
nu,
dt,
shL,
sbL,
shR,
sbR,
)
@btime flux_kfvs!(fw, fh, fb, hL, bL, hR, bR, u, weights, dt, shL, sbL, shR, sbR)
#--- ugks ---#
@btime ccall(
(:__kinetic_MOD_flux_ugks_2f1v, "kitmod.so"),
Nothing,
(
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Int},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
Ref{Float64},
),
fw,
fh,
fb,
wL,
hL,
bL,
wR,
hR,
bR,
u,
weights,
nu,
inK,
γ,
muref,
omega,
prandtl,
dt,
lenL,
lenR,
shL,
sbL,
shR,
sbR,
)
@btime flux_ugks!(fw, fh, fb, wL, hL, bL, wR, hR, bR, u, weights, inK, γ, muref, omega, prandtl, dt, lenL, lenR, shL, sbL, shR, sbR)
```
The results on a intel NUC8i7BEH with i7-8559U with 101 velocity points is as follows
Kinetic.jl
- KFVS flux ~ 6.747 μs (13 allocations: 11.38 KiB)
- UGKS flux ~ 13.344 μs (123 allocations: 20.94 KiB)
KitFort.jl
- KFVS flux ~ 5.421 μs (37 allocations: 800 bytes)
- UGKS flux ~ 11.413 μs (55 allocations: 1.09 KiB)
As presented, there is an improvement on efficiency by around 15%. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 43 | # Index of Types and Methods
```@index
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 3307 | # Kinetic.jl
Kinetic is a computational fluid dynamics toolbox written in Julia. Based on differentiable programming, mechanical and neural network models are fused and solved in a unified framework. Simultaneous 1-3 dimensional numerical simulations can be performed on CPUs and GPUs.
The ecosystem follows the modular design philosophy. Depending on the specific use case, the main module is split into portable components to reduce the lantency caused by the LLVM [just-in-time](https://llvm.org/docs/tutorial/index.html#building-a-jit-in-llvm) compiler:
- [KitBase.jl](https://github.com/vavrines/KitBase.jl): physical models and numerical schemes
- [KitML.jl](https://github.com/vavrines/KitML.jl): neural models and machine learning methods
- [KitFort.jl](https://github.com/vavrines/KitFort.jl): optional high-performance Fortran backend
- [kineticpy](https://github.com/vavrines/kineticpy): Python interface built on top of [pyjulia](https://github.com/JuliaPy/pyjulia)
## Scope of application
Kinetic models and simulates fluid dynamics problems from the perspective of particle transport.
Any advection-diffusion-type equation of different particles, including molecules, photons, plasmas, neutrons, etc., can be solved within the framework.
Special attentions have been paid on Hilbert's sixth problem, i.e. to build the numerical passage between [kinetic theory of gases](https://en.wikipedia.org/wiki/Kinetic_theory_of_gases), e.g. the Boltzmann equation, and continuum mechanics, e.g. the Euler and Navier-Stokes equations. A partial list of current supported models and equations include:
- linear Boltzmann equation
- nonlinear Boltzmann equation
- multi-component Boltzmann equation
- Fokker-Planck-Landau equation
- direct simulation Monte Carlo
- advection-diffusion equation
- Burgers' equation
- Euler equations
- Navier-Stokes equations
- Extended hydrodynamical equations from asymptotic expansion
- Magnetohydrodynamical equations
- Maxwell's equations
## Design philosophy
The code hierarchy is designed as intuitive and neat as possible.
It's dedicated to providing a friendly interface for educational usage in kinetic theory and rich functionality for scientific research.
Benefiting from the brilliant expressiveness and low-overhead abstraction provided by the [Julia programming language](https://julialang.org/),
we provide different levels of APIs to allow the users to focus on physics and to cooperate with the existing packages in the Julia ecosystem.
## What is new?
Finite volume method is a proven approach for simulating conservation laws.
Compared with the existing open-source softwares, e.g. [OpenFOAM](https://openfoam.org/), [SU2](https://su2code.github.io/) and [Clawpack](https://www.clawpack.org/),
Kinetic holds the novelty through the following points:
- 100% Julia stack that encounters no two-language problem
- Comprehensive support for kinetic theory and phase-space equations
- Lightweight design to ensure the flexibility for secondary development
- Closely coupling with scientific machine learning
## How to get help?
If you are interested in using Kinetic or are trying to figure out how to use it, please feel free to get in touch and raise questions.
Do open an issue or pull request if you have questions, suggestions, or solutions. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1335 | # Installation Instructions
Kinetic is a registered Julia package in the official entry.
We recommend installing it with the built-in Julia package manager.
It automatically installs a currently stable and tagged release.
From the Julia REPL, you can add the package.
```julia
julia> ]
(v1.8) pkg> add Kinetic
```
This will automatically install Kinetic and all its dependencies, and it's not needed to build the package manually.
You can also build the dependencies if some of them were removed by mistake.
```julia
julia> ]
(v1.8) pkg> build Kinetic
```
After that, we can `using` or `import` the package.
`using` will load the module and make its exported names available for direct use.
```julia
julia> using Kinetic
julia> linspace(0, 1, 5)
5-element Vector{Float64}:
0.0
0.25
0.5
0.75
1.0
```
Correspondingly, `import` only loads the module while the names needs to be accessed with dot syntax.
```julia
julia> import Kinetic
julia> Kinetic.linspace(0, 1, 5)
5-element Vector{Float64}:
0.0
0.25
0.5
0.75
1.0
```
Kinetic can be updated to the latest tagged release from the package manager.
```julia
(v1.8) pkg> update Kinetic
```
!!! tip "Use Julia 1.3 or newer"
Kinetic matches perfectly with Julia 1.3 and newer versions.
Installing it with an older version of Julia will locate incomplete functionality. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 3330 | # KitML and scientific machine learning
Machine learning is building its momentum in scientific computing.
Given the nonlinear structure of differential and integral equations, it is promising to incorporate the universal function approximator from machine learning models into the governing equations and achieve the balance between efficiency and accuracy.
KitML is designed as a scientific machine learning toolbox, which devotes to fusing mechanical and neural models.
For example, the Boltzmann collision operator can be divided into a combination of relaxation model and neural network, i.e. the so-called universal Boltzmann equation.
```math
\frac{df}{dt} = \int_{\mathcal{R}^{3}} \int_{\mathcal{S}^{2}} \mathcal{B}(\cos \beta, g)\left[f\left(\mathbf{u}^{\prime}\right) f\left(\mathbf{u}_{*}^{\prime}\right)-f(\mathbf{u}) f\left(\mathbf{u}_{*}\right)\right] d \mathbf{\Omega} d \mathbf{u}_{*} \simeq \nu(\mathcal{M}-f)+\mathrm{NN}_{\theta}(\mathcal{M}-f)
```
The UBE has the following benefits.
First, it automatically ensures the asymptotic limits.
Let's consider the Chapman-Enskog method for solving Boltzmann equation, where the distribution function is approximated with expansion series.
```math
f \simeq f^{(0)}+f^{(1)}+f^{(2)}+\cdots, \quad f^{(0)}=\mathcal{M}
```
Take the zeroth order truncation, and consider an illustrative multi-layer perceptron.
```math
\mathrm{NN}_{\theta}(x)=\operatorname{layer}_{n}\left(\ldots \text { layer }_{2}\left({\sigma}\left(\text { layer }_{1}(x)\right)\right)\right), \quad \operatorname{layer}(x)=w x
```
Given the zero input from ``M − f``, the contribution from collision term is absent, and the moment equation naturally leads to the Euler equations.
```math
\frac{\partial}{\partial t}\left(\begin{array}{c}
\rho \\
\rho \mathbf{U} \\
\rho E
\end{array}\right)+\nabla_{\mathbf{x}} \cdot\left(\begin{array}{c}
\rho \mathbf{U} \\
\rho \mathbf{U} \otimes \mathbf{U} \\
\mathbf{U}(\rho E+p)
\end{array}\right)=\int\left(\begin{array}{c}
1 \\
\mathbf{u} \\
\frac{1}{2} \mathbf{u}^{2}
\end{array}\right)\left(\mathcal{M}_{t}+\mathbf{u} \cdot \nabla_{\mathbf{x}} \mathcal{M}\right) d \mathbf{u}=0
```
KitML provides two functions to construct universal Boltzmann equation, and it works seamlessly with any modern ODE solver in [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl).
```@docs
ube_dfdt
ube_dfdt!
```
Besides, we provide an input convex neural network (ICNN) developed by Amos et al.
The neural network parameters are constrained such that the output of the network is a convex function of the inputs.
The structure of the ICNN is shown as follows, and it allows for efficient inference via optimization over some inputs to the network given others, and can be applied to settings including structured prediction, data imputation, reinforcement learning, and others.
It is important for entropy-based modelling, since the minimization principle works exclusively with convex function.

```@docs
ICNNLayer
ICNNChain
```
Besides, we also provide scientific machine learning training interfaces and I/O methods.
They are consistent with both [Flux.jl](https://github.com/FluxML/Flux.jl) and [DiffEqFlux.jl](https://github.com/SciML/DiffEqFlux.jl) ecosystem.
```@docs
sci_train
sci_train!
load_data
save_model
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 4946 | # Universal Boltzmann equation
In the following, we present a universal differential equation strategy to construct the neural network enhanced Boltzmann equation.
The complicated fivefold integral operator is replaced by a combination of relaxation and neural models.
It promises a completely differential structure and thus the neural ODE type training and computing becomes possible.
The approach reduces the computational cost up to three orders of magnitude and preserves the perfect accuracy.
The detailed theory and implementation can be found in [Tianbai Xiao & Martin Frank, Using neural networks to accelerate the solution of the Boltzmann equation](https://www.sciencedirect.com/science/article/pii/S0021999121004162).
First we load all the packages needed and set up the configurations.
```julia
using Flux, Kinetic, OrdinaryDiffEq, Plots, SciMLSensitivity, Solaris
begin
t1 = 3
nt = 16
u0 = -5
u1 = 5
nu = 80
v0 = -5
v1 = 5
nv = 28
w0 = -5
w1 = 5
nw = 28
knudsen = 1
inK = 0
alpha = 1.0
omega = 0.5
nh = 8
end
```
The dataset is produced by the fast spectral method, which solves the nonlinear Boltzmann integral with fast Fourier transformation.
```julia
begin
tspan = (0.0, t1)
tsteps = linspace(tspan[1], tspan[2], nt)
γ = heat_capacity_ratio(inK, 3)
vs = VSpace3D(u0, u1, nu, v0, v1, nv, w0, w1, nw)
f0 = @. 0.5 * (1 / π)^1.5 *
(exp(-(vs.u - 1) ^ 2) + exp(-(vs.u + 1) ^ 2)) *
exp(-vs.v ^ 2) * exp(-vs.w ^ 2)
prim0 =
conserve_prim(moments_conserve(f0, vs.u, vs.v, vs.w, vs.weights), γ)
M0 = maxwellian(vs.u, vs.v, vs.w, prim0)
mu_ref = ref_vhs_vis(knudsen, alpha, omega)
τ0 = mu_ref * 2.0 * prim0[end]^(0.5) / prim0[1]
# Boltzmann
prob = ODEProblem(boltzmann_ode!, f0, tspan, fsm_kernel(vs, mu_ref))
data_boltz = solve(prob, Tsit5(), saveat = tsteps) |> Array
# BGK
prob1 = ODEProblem(bgk_ode!, f0, tspan, [M0, τ0])
data_bgk = solve(prob1, Tsit5(), saveat = tsteps) |> Array
data_boltz_1D = zeros(Float64, axes(data_boltz, 1), axes(data_boltz, 4))
data_bgk_1D = zeros(Float64, axes(data_bgk, 1), axes(data_bgk, 4))
for j in axes(data_boltz_1D, 2)
data_boltz_1D[:, j] .=
reduce_distribution(data_boltz[:, :, :, j], vs.weights[1, :, :])
data_bgk_1D[:, j] .=
reduce_distribution(data_bgk[:, :, :, j], vs.weights[1, :, :])
end
f0_1D = reduce_distribution(f0, vs.weights[1, :, :])
M0_1D = reduce_distribution(M0, vs.weights[1, :, :])
X = Array{Float64}(undef, vs.nu, 1)
for i in axes(X, 2)
X[:, i] .= f0_1D
end
Y = Array{Float64}(undef, vs.nu, 1, nt)
for i in axes(Y, 2)
Y[:, i, :] .= data_boltz_1D
end
M = Array{Float64}(undef, nu, size(X, 2))
for i in axes(M, 2)
M[:, i] .= M0_1D
end
τ = Array{Float64}(undef, 1, size(X, 2))
for i in axes(τ, 2)
τ[1, i] = τ0
end
end
```
Then we define the neural network and construct the unified model with mechanical and neural parts.
The training is conducted by Solaris.jl with the Adam optimizer.
```julia
begin
model_univ = FnChain(
FnDense(nu, nu * nh, tanh),
FnDense(nu * nh, nu),
)
p_model = init_params(model_univ)
function dfdt(df, f, p, t)
df .= (M .- f) ./ τ .+ model_univ(M .- f, p)
end
prob_ube = ODEProblem(dfdt, X, tspan, p_model)
function loss(p)
sol_ube = solve(prob_ube, Midpoint(), u0 = X, p = p, saveat = tsteps)
loss = sum(abs2, Array(sol_ube) .- Y)
return loss
end
his = []
cb = function (p, l)
display(l)
push!(his, l)
return false
end
end
res = sci_train(loss, p_model, Adam(); cb = cb, maxiters = 200)
res = sci_train(loss, res.u, Adam(); cb = cb, maxiters = 200)
```
Once we have trained a hybrid Boltzmann collision term, we could solve it as a normal differential equation with any desirable solvers.
Consider the Midpoint rule as an example, the solution algorithm and visualization can be organized.
```julia
ube = ODEProblem(ube_dfdt, f0_1D, tspan, [M0_1D, τ0, (model_univ, res.u)])
sol = solve(
ube,
Midpoint(),
u0 = f0_1D,
p = [M0_1D, τ0, (model_univ, res.u)],
saveat = tsteps,
)
plot(
vs.u[:, vs.nv÷2, vs.nw÷2],
data_boltz_1D[:, 1],
lw = 2,
label = "Initial",
color = :gray32,
xlabel = "u",
ylabel = "particle distribution",
)
plot!(
vs.u[:, vs.nv÷2, vs.nw÷2],
data_boltz_1D[:, 2],
lw = 2,
label = "Boltzmann",
color = 1,
)
plot!(
vs.u[:, vs.nv÷2, vs.nw÷2],
data_bgk_1D[:, 2],
lw = 2,
line = :dash,
label = "BGK",
color = 2,
)
plot!(
vs.u[:, vs.nv÷2, vs.nw÷2],
M0_1D,
lw = 2,
label = "Maxwellian",
color = 10,
)
scatter!(vs.u[:, vs.nv÷2, vs.nw÷2], sol.u[2], lw = 2, label = "UBE", color = 3)
```

| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 2913 | # GPU computing
The thriving development of GPUs provides an alternative choice for scientific computing.
Kinetic enables computation on the graphical architecture on the basis of [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl).
It provides the main programming interface for working with NVIDIA CUDA GPUs.
It features a user-friendly array abstraction, a compiler for writing CUDA kernels in Julia, and wrappers for various CUDA libraries.
In the following, we present an illustrative test of kinetic flux vector splitting method to evaluate upwind flux of the Boltzmann equation.
The test is conducted on a Tesla K80 GPU on [nextjournal.com](nextjournal.com).
We first load all the modules, and do a CPU-based computation.
```julia
import Pkg
Pkg.add("Revise")
Pkg.add("KitBase")
Pkg.add("CUDA")
Pkg.add("BenchmarkTools")
using Revise, CUDA, BenchmarkTools, KitBase
dt = 1e-3
primL = [1.0, 0.0, 0.5]
primR = [0.125, 0.0, 0.625]
u = collect(-5.0:0.01:5.0)
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@btime flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
```
The benchmark result on a Intel NUC8i7BEH desktop is around `5.244 μs (3 allocations: 24.00 KiB)`.
Then let's turn to GPU.
```julia
u = collect(-5.0:0.01:5.0) |> CuArray
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@btime flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
```
The benchmark result is around `32.965 μs (187 allocations: 10.73 KiB)`.
As can be seen, due to the relative small input size, the GPU threads aren't fully occupied, and therefore CPU is more efficient in this case.
Then let's increase the input vector size, i.e. to consider more discrete particle velocity points for distribution functions.
```julia
u = collect(-5.0:0.001:5.0)
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@btime flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
u = collect(-5.0:0.001:5.0) |> CuArray
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@btime flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
```
The results become around `50.011 μs (6 allocations: 234.80 KiB)` for CPU and `33.640 μs (187 allocations: 10.73 KiB)` for GPU.
We could further increase the computation size.
```julia
u = collect(-5.0:0.0001:5.0)
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@btime flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
u = collect(-5.0:0.0001:5.0) |> CuArray
fL = maxwellian(u, primL)
fR = maxwellian(u, primR)
ff = similar(fL)
sfL = zero(fL)
sfR = zero(fR)
@btime flux_kfvs!(ff, fL, fR, u, dt, sfL, sfR)
```
The results become around `507.960 μs (6 allocations: 2.29 MiB)` for CPU and `32.021 μs (187 allocations: 10.73 KiB)` for GPU.
Under this size of computation, the GPU brings about 16x efficiency increment. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 4176 | # Distributed computing
The distributed computation is built upon Julia's `@distributed` macro in the `Distributed` module.
```julia
using Distributed
@distributed [reducer] for var = range
body
end
```
It provides a MPI-type parallelization with a leaner code size.
The specified range is partitioned and locally executed across all workers.
In case an optional reducer function is specified, `@distributed` performs local reductions on each worker with a final reduction on the calling process.
Without a reducer function, @distributed will execute asynchronously, i.e. it spawns independent tasks on all available workers and returns immediately without waiting for completion.
To make it wait for completion, prefix the call with `@sync` like :
```julia
@sync @distributed for var = range
body
end
```
In the following, we present an example to conduct distributed computing with the help Julia's `SharedArrays` module, which creates arrays shared by all the processors.
More massive computing can be made with [DistributedArrays](https://github.com/JuliaParallel/DistributedArrays.jl).
First, we consider a distributed computing.
```julia
using Distributed, SharedArrays
addprocs(3)
@everywhere using KitBase
begin
vars = Dict{Symbol,Any}()
vars[:matter] = "gas"
vars[:case] = "sod"
vars[:space] = "1d0f0v"
vars[:flux] = "kfvs"
vars[:collision] = "bgk"
vars[:nSpecies] = 1
vars[:interpOrder] = 1
vars[:limiter] = "vanleer"
vars[:boundary] = "fix"
vars[:cfl] = 0.5
vars[:maxTime] = 0.2
vars[:x0] = 0.0
vars[:x1] = 1.0
vars[:nx] = 2000
vars[:pMeshType] = "uniform"
vars[:nxg] = 0
vars[:knudsen] = 0.001
vars[:mach] = 0.0
vars[:prandtl] = 1.0
vars[:inK] = 0.0
vars[:omega] = 0.81
vars[:alphaRef] = 1.0
vars[:omegaRef] = 0.5
end
set = KitBase.set_setup(vars)
pSpace = KitBase.set_geometry(vars)
vSpace = KitBase.set_velocity(vars)
gas = KitBase.set_property(vars)
ib = KitBase.set_ib(vars, set, vSpace, gas)
folder = @__DIR__
ks = KitBase.SolverSet(set, pSpace, vSpace, gas, ib, folder)
dt = ks.pSpace.dx[1] / (5.0 + KitBase.sound_speed(ks.ib.primL, ks.gas.γ))
nt = floor(ks.set.maxTime / dt) |> Int
wp = SharedArray{Float64}((ks.pSpace.nx, 3), init=A->(A=zeros(ks.pSpace.nx, 3)))
for i in 1:ks.pSpace.nx
if i <= ks.pSpace.nx ÷ 2
wp[i,:] .= ks.ib.wL
else
wp[i,:] .= ks.ib.wR
end
end
fwp = SharedArray{Float64}((ks.pSpace.nx+1, 3), init=A->(A=zeros(ks.pSpace.nx+1, 3)))
@time for iter = 1:nt÷3
@sync @distributed for i in 2:ks.pSpace.nx
flux = @view fwp[i,:]
KitBase.flux_gks!(
flux,
wp[i-1,:],
wp[i,:],
ks.gas.γ,
ks.gas.K,
ks.gas.μᵣ,
ks.gas.ω,
dt,
0.5 * ks.pSpace.dx[i-1],
0.5 * ks.pSpace.dx[i],
)
end
@sync @distributed for i in 2:ks.pSpace.nx-1
for j in 1:3
wp[i,j] += (fwp[i,j] - fwp[i+1,j]) / ks.pSpace.dx[i]
end
end
end
```
The benchmark result on a Intel NUC8i7BEH desktop is around `13.620491 seconds (2.26 M allocations: 101.219 MiB, 0.22% gc time)`.
Then, we compare the efficiency with a serial execution.
```julia
w = zeros(ks.pSpace.nx, 3)
for i in 1:ks.pSpace.nx
if i <= ks.pSpace.nx ÷ 2
w[i,:] .= ks.ib.wL
else
w[i,:] .= ks.ib.wR
end
end
fw = zeros(ks.pSpace.nx+1, 3)
@time for iter = 1:nt÷3
for i in 2:ks.pSpace.nx
flux = @view fw[i,:]
KitBase.flux_gks!(
flux,
w[i-1,:],
w[i,:],
ks.gas.γ,
ks.gas.K,
ks.gas.μᵣ,
ks.gas.ω,
dt,
0.5 * ks.pSpace.dx[i-1],
0.5 * ks.pSpace.dx[i],
)
end
for i in 2:ks.pSpace.nx-1
for j in 1:3
w[i,j] += (fw[i,j] - fw[i+1,j]) / ks.pSpace.dx[i]
end
end
end
```
The result on the same desktop is around `20.830331 seconds (323.96 M allocations: 24.472 GiB, 16.89% gc time)`.
With more grid cells being used, the performance deviation is expected to be more significant. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1681 | # Multiple threading
The multi-threading computation is built upon Julia's `@threads` macro.
```julia
Base.Threads.@threads for ... end
```
It provides an OpenMP type parallelization.
The iteration space is splitted among multiple tasks and those tasks are parallelized on threads according to a scheduling policy.
A barrier is placed at the end of the loop which waits for all tasks to finish execution.
In Kinetic, `@threads` is set in front of the loops for reconstruction, evolution and update.
For example, the evaluation of fluxes is conducted as follows.
```julia
@inbounds Threads.@threads for i = idx0:idx1
flux_gks!(
face[i].fw,
ctr[i-1].w .+ 0.5 .* ctr[i-1].dx .* ctr[i-1].sw,
ctr[i].w .- 0.5 .* ctr[i].dx .* ctr[i].sw,
KS.gas.γ,
KS.gas.K,
KS.gas.μᵣ,
KS.gas.ω,
dt,
0.5 * ctr[i-1].dx,
0.5 * ctr[i].dx,
ctr[i-1].sw,
ctr[i].sw,
)
end
```
It automatically makes use of multiple threading if Julia is initialized with
```bash
julia -t n
```
Besides of `@threads`, finer dispatch can be made with `@spawn` macro.
```julia
Base.Threads.@spawn
```
It creates and runs a task on any available thread.
To wait for the task to finish, call `wait` on the result of this macro, or call fetch to wait and then obtain its return value.
Values can be interpolated into `@spawn` via `$`, which copies the value directly into the constructed underlying closure.
It allows user to insert the value of a variable, isolating the aysnchronous code from changes to the variable's value in the current task.
This can be conduct with the low-level reconstruction, flux and step functions. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 946 | # Parallel computing
Julia supports different categories of parallel programming natively:
- Asynchronous "tasks", or coroutines
- Multi-threading
- Distributed computing
Kinetic integrates the the latter two mechanism along with the CUDA-based GPU computing.
An initialization function is built in Kinetic.
```julia
function __init__()
np = nworkers()
nt = Threads.nthreads()
if nt > 1 || np > 1
@info "Kinetic will run with $np processors and $nt threads"
else
@info "Kinetic will run serially"
end
if has_cuda()
@info "Kinetic will run with CUDA"
for (i, dev) in enumerate(CUDA.devices())
@info "$i: $(CUDA.name(dev))"
end
@info "Scalar operation is disabled in CUDA"
CUDA.allowscalar(false)
end
end
```
As the package is imported, it will report the computational resources (processors, threads and CUDA devices) that are going to be utilized. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 5394 | # Basic Physics
## Microscopic formulation
The physical world shows a diverse set of behaviors on different characteristic scales.
Consider the molecular motion of gases as an example.
Down to the finest scale of a many-particle system, the Newton's second law depicts particle motions via
```math
\mathbf{F} = m \mathbf{a}.
```
As a first order system it reads
```math
\frac{d \mathbf x}{dt} = \mathbf v, \ \frac{d \mathbf v}{dt} = \frac{\mathbf F}{m},
```
where ``\mathbf F`` is external force and ``m`` is particle mass.
An intuitive numerical algorithm is to get the numerous particles on board and track the trajectories of them.
A typical example is the [Molecular Dynamics](https://en.wikipedia.org/wiki/Molecular_dynamics).
This is not going to be efficient since there are more than `2e25` molecules per cubic meter in normal atmosphere, and things get even more complicated when you count on the N-body interactions all the time.
Some methods have been proposed to simplify the computation.
As an example, the [Direct simulation Monte Carlo](https://en.wikipedia.org/wiki/Direct_simulation_Monte_Carlo) employs certain molecular models and conduct the intermolecular collisions in a stochastic manner.
It significantly reduces the computational cost, while the trade-off is the artificial fluctuations.
Many realizations must be simulated successively to average the solutions and reduce the errors.
## Mesoscopic formulation
An alternative strategy is made from ensemble averaging, where the coarse-grained modeling is used to provide a bottom-up view.
At the mean free path and collision time scale of molecules, particles travel freely during most of time with mild intermolecular collisions.
Such dynamics can be described with an operator splitting approach, i.e. the kinetic transport equation
```math
\frac{\partial f}{\partial t}+ \mathbf v \cdot \nabla_\mathbf x f + \mathbf a \cdot \nabla_\mathbf v f = Q(f),
```
where ``f`` denotes the probability of finding a particle at certain location in phase space.
The left-hand side of the equation above model the transport phenomena due to the inhomogeneous distribution of particles and external force field,
while the right-hand side depicts intermolecular collision.
Different collision models can be inserted into such equation.
If the particles only collide with a background material one obtains linear Boltzmann collision operator
```math
Q(f)=\int_{\mathbb R^3} \mathcal B(\mathbf v_*, \mathbf v) \left[ f(\mathbf v_*)-f(\mathbf v)\right] d\mathbf v_*,
```
where the collision kernel ``\mathcal B`` models the strength of collisions at different velocities.
If the interactions among particles are considered, the collision operator becomes nonlinear.
For example, the two-body collision results in nonlinear Boltzmann equation
```math
Q(f)=\int_{\mathbb R^3} \int_{\mathcal S^2} \mathcal B(\cos \beta, |\mathbf{v}-\mathbf{v_*}|) \left[ f(\mathbf v')f(\mathbf v_*')-f(\mathbf v)f(\mathbf v_*)\right] d\mathbf \Omega d\mathbf v_*.
```
To solve the Boltzmann equation, a discretized phase space needs to be introduced and the solution algorithm is called [discrete ordinates method](https://en.wikipedia.org/wiki/Discrete_ordinates_method) or discrete velocity method.
Due to the complicated fivefold integral in the nonlinear Boltzmann collision operator, sometimes it is replaced by the simplified models in the discrete velocity method, e.g. the relaxation model
```math
Q(f) = \nu (\mathcal M - f).
```
From the [H-theorem](https://en.wikipedia.org/wiki/H-theorem), we learn that an isolated system evolves in the direction with entropy increment.
The maximal entropy status corresponds to the well-known Maxwellian distribution
```math
\mathcal M = n\left(\frac{m}{2\pi k T}\right)^{D/2}\exp \left( -\frac{m}{2kT} (\mathbf v - \mathbf V)^2 \right),
```
where ``k`` is the Boltzmann constant, ``\mathbf V`` and ``T`` are the bulk velocity and temperature.
The Boltzmann dynamics can be projected onto lower dimensionality.
For example, with one-dimensional velocity space formulation, the high-dimensional particle distribution can be integrated with respect to the rest coordinates as
```math
h_0 = \int_{-\infty}^{\infty} \int_{-\infty}^{\infty} f d v dw, \ h_1 = \int_{-\infty}^{\infty} \int_{-\infty}^{\infty} (v^2+w^2) f dv dw,
```
where ``h_0`` and ``h_1`` are called reduced distribution functions and form a so-called `1d2f1v` system.
## Macroscopic formulation
Meanwhile, with the enlargement of modeling scale to a macroscopic hydrodynamic level, the accumulating effect of particle collisions results in an equalization of local temperature and velocity,
where the moderate non-equilibrium effects can be well described by viscous transport, heat conduction and mass diffusion,
i.e., the so called transport phenomena.
Large-scale dynamics presents the property of waves, and the macroscopic transport equations can be constructed to describe the bulk behaviors of fluids.
Typical examples are the Euler and Navier-Stokes equations
```math
\frac{\partial \mathbf W}{\partial t} + \nabla_\mathbf x \cdot \mathbf F = \mathbf S
```
From microscopic particle transport to macroscopic fluid motion, there is a continuous variation of flow dynamics.
We pay special attentions to Hilbert's sixth problem, i.e. building the numerical passage between the kinetic theory of gases and continuum mechanics.
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1435 | # Calling from Python
For maximum convenience, a wrapper [kineticpy](https://github.com/vavrines/kineticpy) has been built to locate all the methods from Python.
## How to use?
Let's start by cloning the repository and changing into the directory.
```bash
git clone https://github.com/vavrines/kineticpy.git
cd kineticpy
```
Next, we start `python`.
The Julia main module can be installed and initialized by
```python
>>> import kineticpy
>>> kineticpy.install()
```
The basic structs and methods are stored in the base module, and can be imported via
```python
>>> from kineticpy import base
```
## Example
We provide some quick tutorial here for kineticpy.
```python
>>> from kineticpy import base
>>> import numpy as np
>>> u = np.linspace(-5, 5, 28) # velocity space
>>> prim_var = np.array([1.0, 0.0, 1.0]) # primitive flow variables
>>> M = base.maxwellian(u, prim_var) # compute Maxwellian distribution
>>> M.view()
array([7.83543327e-12, 2.77323769e-10, 7.46041809e-09, 1.52542631e-07,
2.37067103e-06, 2.80029217e-05, 2.51412806e-04, 1.71562923e-03,
8.89839075e-03, 3.50793472e-02, 1.05109877e-01, 2.39379825e-01,
4.14365469e-01, 5.45169515e-01, 5.45169515e-01, 4.14365469e-01,
2.39379825e-01, 1.05109877e-01, 3.50793472e-02, 8.89839075e-03,
1.71562923e-03, 2.51412806e-04, 2.80029217e-05, 2.37067103e-06,
1.52542631e-07, 7.46041809e-09, 2.77323769e-10, 7.83543327e-12])
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1663 | # Reference
- Bezanson, J., Edelman, A., Karpinski, S., & Shah, V. B. (2017). Julia: A fresh approach to numerical computing. SIAM review, 59(1), 65-98.
- Chapman, S., Cowling, T. G., & Burnett, D. (1990). The mathematical theory of non-uniform gases: an account of the kinetic theory of viscosity, thermal conduction and diffusion in gases. Cambridge University Press.
- Landau, L.D., & Lifshitz E. M. (1959). Fluid mechanics, London: Pergamon Press.
- Blazek, J. (2015). Computational fluid dynamics: principles and applications. Butterworth-Heinemann.
- Xu, K., & Huang, J. C. (2010). A unified gas-kinetic scheme for continuum and rarefied flows. Journal of Computational Physics, 229(20), 7747-7764.
- Bird, G. A. (1994). Molecular gas dynamics and the direct simulation of gas flows. Molecular gas dynamics and the direct simulation of gas flows.
- Xiao, T., Cai, Q., & Xu, K. (2017). A well-balanced unified gas-kinetic scheme for multiscale flow transport under gravitational field. Journal of Computational Physics, 332, 475-491.
- Xiao, T., Xu, K., & Cai, Q. (2019). A unified gas-kinetic scheme for multiscale and multicomponent flow transport. Applied Mathematics and Mechanics, 40(3), 355-372.
- Xiao, T., Liu, C., Xu, K., & Cai, Q. (2020). A velocity-space adaptive unified gas kinetic scheme for continuum and rarefied flows. Journal of Computational Physics, 415, 109535.
- Xiao, T., & Frank, M. (2020). Using neural networks to accelerate the solution of the Boltzmann equation. arXiv:2010.13649.
- Amos, B., Xu, L., & Kolter, J. Z. (2017, July). Input convex neural networks. In International Conference on Machine Learning (pp. 146-155). PMLR. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 457 | # General framework
Kinetic employs the finite volume method (FVM) for modeling and simulation.
The general solution algorithm can be conclude as follows, where both explicit and implicit methods are implemented.

The high-level solver function is
```@docs
solve!
```
The detailed solution procedures can be concluded as follows
- pre-process
- timestep calculation
- reconstruction
- evolution
- update
- post-process
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 522 | # Evolution
```@docs
evolve!
```
The evolution solver calculate the interface numerical fluxes based on two neighbor cells.
Different flux functions can be used with the option `model`.
- macroscopic: Godunov, Lax, Roe, HLL, wave-propagation
- mesoscopic: upwind, central-upwind, gas-kinetic scheme
The available flux solvers are listed as follows.
```@docs
flux_lax!
flux_hll!
flux_roe!
flux_gks
flux_gks!
flux_kfvs!
flux_kcu!
flux_ugks!
flux_boundary_maxwell!
flux_boundary_specular!
flux_em!
flux_emx!
flux_emy!
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 133 | # Postprocess
The post-process solver handles the simulation data and visualization.
```@docs
plot_line
plot_contour
write_jld
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 279 | # Preprocess
```@docs
initialize
```
The pre-process solver initializes the simulation that returns solver set, control volumes, interfaces, and current time.
It could be a new simulation or restart of an interrupted one.
- new run: .txt / .cfg / .toml / etc.
- restart: .jld2
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 346 | # Reconstruction
```@docs
reconstruct!
```
The reconstruction solver interpolates piecewise solutions with the desirable order of accuracy.
The reconstruction stencils can be based on 2 or 3 cells
```@docs
reconstruct2
reconstruct2!
reconstruct3
reconstruct3!
```
The available schemes are
```@docs
vanleer
minmod
superbee
vanalbaba
weno5
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 149 | # Timestep
```@docs
timestep
```
The timestep solver returns the time interval used for the upcoming solution loop based on the current variables.
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 735 | # Update
The update solver calculate the variables at n+1 step based on numerical fluxes and in-cell collisions.
```@docs
update!
```
The current solver supports different collision models, for example:
- `:bgk`: BGK relaxation model
- `:shakhov`: Shakhov relaxation model
- `:boltzmann`: Boltzmann: original Boltzmann collision integral
The boundary conditions vary.
- `:fix`: fixed Dirichlet boundary
- `:period`: periodic boundary
- `:extra`: extrapolation
- `:maxwell`: Maxwell's diffusive boundary
The current solver adopts implicit-explicit (IMEX) uniformly.
Further Multi-step time integrators can be used in conjunction with method of lines in [DifferentialEquations.jl](https://github.com/SciML/DifferentialEquations.jl).
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 809 | # Illustrative examples
Thanks to the brilliant expressiveness and low-overhead abstraction in Julia, we provide different levels of solution algorithm for modeling and simulating advection-diffusion dynamics.
The high-level solver is able to solve complex physics in a few lines, while the low-level APIs keep all the detailed implementations and benefit the secondary development.
The low-level methods are easy to be called from Python and C.
In the following, we present some quick tutorials to illustrate the usage of Kinetic.
For more examples, please refer the example directories in [Kinetic.jl](https://github.com/vavrines/Kinetic.jl/tree/master/example), [KitBase.jl](https://github.com/vavrines/KitBase.jl/tree/main/example) and [KitML.jl](https://github.com/vavrines/KitML.jl/tree/main/example).
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 769 | # Configuration of solver
Kinetic is organized with the data structures and methods of both generality and convenience.
While the low-level methods can be applied to multi-dimensional arrays directly, we provide a set of domain-specific structs that handles multiple dispatch in an elegant way.
For a solver pending for execution, its configurations can be handled in a `SolverSet <: AbstractSolverSet` struct.
```@docs
SolverSet
```
It contains six fields:
- set: general setup of a simulation
- pSpace: physical space settings
- vSpace: particle velocity space settings
- gas: properties of the simulated substance
- ib: initial and boundary conditions
- outputFolder: file directory for the output results
This struct plays an key role in the solution algorithm. | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 628 | # Physical space
A struct `set <: AbstractPhysicalSpace` defines the geometric setup of a simulation.
For the structured topology, structs for 1 and 2 dimensional physical space are built.
```@docs
PSpace1D
PSpace2D
```
It contains:
- x0 (y0): location of starting point
- x1 (y1): location of ending point
- nx (ny): number of cells in one direction
- x (y): locations of middle points of all cells
- dx (dy): intervals of all cell points
Besides, a unstrctured mesh struct is built, which supports 1-3 dimensional geometries.
```@docs
UnstructPSpace
```
It can be created by the built-in mesh reader.
```@docs
read_mesh
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 1045 | # Finite volume data
In the finite volume method, the data is stored separately throughout the cells.
Therefore, we provide `AbstractControlVolume` and `AbstractInterface` structs for processing in-cell and edge information,
which are used as arrays of structs (AoS) in numerical simulations.
Considering one-dimensional physical space ``x``, we provide the following control volume structs.
The structs differs from the number of particle distribution functions.
```@docs
ControlVolume1D
ControlVolume1D1F
ControlVolume1D2F
ControlVolume1D3F
ControlVolume1D4F
```
Within each cell, different numbers of particle distribution function can be tracked.
The interface data is stored correspondingly.
```@docs
Interface1D
Interface1D1F
Interface1D2F
Interface1D3F
Interface1D4F
```
The 2D control volume structs are implemented as well.
```@docs
ControlVolume2D
ControlVolume2D1F
ControlVolume2D2F
ControlVolume2D3F
```
The numerical fluxes are evaluated through `AbstractInterface` structs.
```@docs
Interface2D
Interface2D1F
Interface2D2F
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 421 | # Initial and boundary conditions
A struct `ib <: AbstractCondition` defines the initial and boundary conditions of a simulation.
It contains the values of conservative and primitive variables, and particle distribution functions at left and right (up and down) domain for both initial and boundary conditions.
It is set this way to easily deal with discontinuous initial conditions.
```@docs
IB
IB1F
IB2F
IB3F
IB4F
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 857 | # Particle properties
A struct `gas <: AbstractProperty` defines the properties of particle model.
It currently supports the following models:
- scalar
- gas-type molecule
- plasma
```@docs
Scalar
Gas
Mixture
Plasma1D
Plasma2D
```
The fields denote, for example:
- Kn: reference Knudsen number
- Ma: reference Mach number
- Pr: reference Prandtl number
- K: internal degree of freedom of molecule
- γ: adiabatic index
- ω: viscosity index
- αᵣ: reference ``\alpha`` in viscosity evaluation
- ωᵣ: reference ``\omega`` in viscosity evaluation
- μᵣ: reference viscosity
- m: mass of each particle
- np: number of particles
The viscosity is evaluated the following hard-sphere model.
```math
\mu = \mu_{ref} \left(\frac{T}{T_{ref}}\right)^{\omega}
```
```math
\mu_{ref}=\frac{5(\alpha+1)(\alpha+2) \sqrt{\pi}}{4 \alpha(5-2 \omega)(7-2 \omega)} Kn_{ref}
```
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 662 | # Parameter Settings
A struct `set <: AbstractSetup` defines the general setup of a simulation.
```@docs
Setup
```
It contains
- matter: fluid substance
- case: simulation case name
- space: ``n_1 d n_2 f n_3 v``, which denotes the physical dimensionality, numbers of particle distribution functions and velocity dimensionality
- flux: numerical flux function name
- collision: collision operator of kinetic equation
- nSpecies: number of species
- interpOrder: order of accuracy for reconstruction
- limiter: limiter function name
- boundary: boundary condition
- cfl: Courant-Friedrichs-Lewy number for determining time step
- maxTime: maximum simulation time | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 892 | # Particle velocity space
A struct `vSpace <: AbstractSetup` defines the particle velocity setup of a simulation.
Structs for 1-3 dimensional particle velocity space are built.
```@docs
VSpace1D
VSpace2D
VSpace3D
```
It contains
- u0 (v0, w0): location of starting point
- u1 (v1, w1): location of ending point
- nu (nv, nw): number of cells in one direction
- u (v, w): locations of middle points of all cells
- du (dv, dw): intervals of all cell points
- weights: quadrature weights for numerical integral
Note that the one-dimensional velocity space can be used to handle 1-3 dimensional unstructured topology as well.
In addition, velocity space structs for multi-component substance are implemented.
```@docs
MVSpace1D
MVSpace2D
MVSpace3D
```
For the simulation cases where no phase-space evolution is involved, `vSpace` can be set as `nothing` directly.
```julia
vSpace = nothing
``` | Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.7.10 | 54b92e6e1bcc3444013cf29729637e86bf62dcd0 | docs | 9349 | ---
title: 'Kinetic.jl: A portable finite volume toolbox for scientific and neural computing'
tags:
- kinetic theory
- computational fluid dynamics
- scientific machine learning
- julia
authors:
- name: Tianbai Xiao
orcid: 0000-0001-9127-9497
affiliation: 1
affiliations:
- name: Karlsruhe Institute of Technology, 76131 Karlsruhe, Germany
index: 1
date: 06 January 2021
bibliography: paper.bib
---
# Summary
Kinetic.jl is a lightweight finite volume toolbox written in the Julia programming language for the study of computational physics and scientific machine learning.
It is an open-source project hosted on GitHub and distributed under the MIT license.
The main module consists of KitBase.jl for basic physics and KitML.jl for neural dynamics.
The function library provides a rich set of numerical fluxes and source terms for differential and integral equations.
Any advection-diffusion type mechanical or neural equation can be set up and solved within the framework.
Machine learning methods can be seamlessly integrated to build data-driven closure models and accelerate the calculation of nonlinear terms.
The package is designed to balance programming flexibility for scientific research, algorithmic efficiency for applications, the simplicity for educational usage.
# Statement of need
A physical system can perform a wonderfully diverse set of acts on different characteristic scales.
It is challenging to propose a universal theory that can be applied to describing multi-scale physical evolutions quantitatively.
For example, particle transport can be depicted statistically by fluid mechanics at a macroscopic level [@batchelor2000], but needs to be followed in more detail by the kinetic theory of gases at the molecular mean free path scale [@chapman1990].
With rapidly advancing computing power, the finite volume method (FVM) provides a prevalent method to conduct direct numerical simulations based on first physical principles.
Most existing FVM libraries, e.g., OpenFOAM [@jasak2007], are dedicated to solving the Euler and the Navier-Stokes equations.
Very limited work has been done for phase-field models [@zhu2017; @krause2021].
Since classical fluid dynamics basically requires an one-shot simulation process from initial to final solution fields, these libraries are mostly written in compiled languages (C/C++ and Fortran).
Such approaches enjoy good execution efficiency but sacrifice the flexibility of secondary development.
This makes it cumbersome to integrate existing numerical solvers with scientific machine learning (SciML) packages, as interactive programming is becoming a mainstream practice in data science.
This also causes considerable difficulties to general or educational users who are not familiar with the package in configuring environments and compiling binaries.
One compromise can be made by using a combination of static and dynamic languages [@clawpack2020], where the high-level front-ends and the low-level computational back-ends are split.
This methodology benefits general users, while researchers still need to work on the back-end if a new feature is required.
The so-called two-language problem introduces additional tradeoffs in both development and execution.
For example, a two-tiered system brings unavoidable challenges for type domain transition and memory management.
Special attention needs to be paid on optimizing the high-level codes, e.g., the vectorization of massive computation part, which can be unnatural in a physical simulation and might generate additional temporary objects.
In addition, interfacing between layers may add significant overhead and makes whole-program optimization much more difficult [@bezanson2012].
Unlike these packages, Kinetic.jl is built upon the Julia programming language [@bezanson2017], which is dynamically typed and designed for high performance computing for a broad range of devices.
Based on type inference and multiple dispatch, it is a promising choice to solve the two-language problem.
Kinetic.jl focuses on the theoretical and numerical studies of many-particle systems of gases, photons, plasmas, neutrons, etc. [@xiao2017; @xiao2020a]
A hierarchy of abstractions is implemented in the library.
At the highest level, it is feasible to model and simulate a fluid dynamic problem within ten lines of code.
At the lowest level, we designed methods for general numbers and arrays so that it is possible to cooperate with existing packages in Julia ecosystem.
For example, Flux.jl [@Flux2018] can be used to create and train scientific machine learning models.
Innovations of the package are:
- 100% Julia stack that encounters no two-language problem
- Comprehensive support for kinetic theory and phase-space equations
- Lightweight design to ensure the flexibility for secondary development
- Close coupling with scientific machine learning
# KitBase.jl
The main module of Kinetic.jl is split into two pieces to reduce the just-in-time (JIT) compilation time for domain specific applications.
The basic physical laws and finite volume method are implemented in KitBase.jl.
It provides a variety of solvers for the Boltzmann equation, Maxwell's equations, advection-diffusion equation, Burgers' equation, Euler and Navier-Stokes equations, etc.
Different parallel computing techniques are provided, e.g., multi-threading, distributed computing, and CUDA programming.
In the following, we present an illustrative example of solving a lid-driven cavity problem with the Boltzmann equation.
Two initialization methods, i.e., configuration text and Julia script, are available for setting up the solver.
With the configuration file `config.toml` set as below,
```toml
# setup
matter = gas # material
case = cavity # case
space = 2d2f2v # phase
flux = kfvs # flux function
collision = bgk # intermolecular collision
nSpecies = 1 # number of species
interpOrder = 2 # interpolation order of accuracy
limiter = vanleer # limiter function
boundary = maxwell # boundary condition
cfl = 0.8 # CFL number
maxTime = 5.0 # maximal simulation time
# physical space
x0 = 0.0 # starting point in x
x1 = 1.0 # ending point in x
nx = 45 # number of cells in x
y0 = 0.0 # starting point in y
y1 = 1.0 # ending point in y
ny = 45 # number of cells in y
pMeshType = uniform # mesh type
nxg = 0 # number of ghost cell in x
nyg = 0 # number of ghost cell in y
# velocity space
umin = -5.0 # starting point in u
umax = 5.0 # ending point in u
nu = 28 # number of cells in u
vmin = -5.0 # starting point in v
vmax = 5.0 # ending point in v
nv = 28 # number of cells in v
vMeshType = rectangle # mesh type
nug = 0 # number of ghost cell in u
nvg = 0 # number of ghost cell in v
# gas property
knudsen = 0.075 # Knudsen number
mach = 0.0 # Mach number
prandtl = 1.0 # Prandtl number
inK = 1.0 # molecular inner degree of freedom
omega = 0.72 # viscosity index of hard-sphere gas
alphaRef = 1.0 # viscosity index of hard-sphere gas in reference state
omegaRef = 0.5 # viscosity index of hard-sphere gas ub reference state
# boundary condition
uLid = 0.15 # U-velocity of moving wall
vLid = 0.0 # V-velocity of moving wall
tLid = 1.0 # temperature of wall
```
we can execute the following codes
```julia
using Kinetic
set, ctr, xface, yface, t = initialize("config.toml")
t = solve!(set, ctr, xface, yface, t)
plot_contour(set, ctr)
```
In the above codes, the computational setup is stored in `set`.
The solutions over control volumes are represented in an array `ctr`, while `xface` and `yface` record the interface fluxes along x and y directions.
In this example, the structured mesh is generated automatically by Kinetic.jl, while a non-structured mesh file can also be imported and used for computation.
The result is visualized with built-in function `plot_contour`, which presents the distributions of gas density, velocity, and temperature inside the cavity.

Fig. 1: macroscopic variables in the lid-driven cavity (top left: density, top right: U-velocity, bottom left: V-velocity, bottom right: temperature).
# KitML.jl
Machine learning has increasing momentum in scientific computing.
Given the nonlinear structure of differential and integral equations, it is promising to incorporate the universal function approximators from machine learning surrogate models into the governing equations and achieve a better balance between efficiency and accuracy.
In KitML.jl, we implement strategies to construct hybrid mechanical-neural differential operators and form structure-preserving data-driven closure models.
The detailed background can be found in @xiao2020b.
# Extension
Numerical simulations of nonlinear models and differential equations are essentially connected with supercomputers and high-performance computing (HPC).
Considering that some existing hardware architecture, e.g., Sunway TaihuLight with Chinese-designed SW26010 processors, only provides optimization for specific languages, we have developed an accompanying package KitFort.jl.
This is not a default component of Kinetic.jl but can be manually imported.
In addition, a wrapper, kineticpy, has been built to locate structures and methods from the Python ecosystem.
# Acknowledgements
The current work is funded by the Alexander von Humboldt Foundation (Ref3.5-CHN-1210132-HFST-P).
# References
| Kinetic | https://github.com/vavrines/Kinetic.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 946 | module TopOptProblems
const PENALTY_BEFORE_INTERPOLATION = true
using Ferrite, StaticArrays, LinearAlgebra
using SparseArrays, Setfield, Requires
import Distributions
using VTKDataTypes
import Ferrite: assemble!
abstract type AbstractTopOptProblem end
include("Utilities/Utilities.jl")
using .Utilities
using .Utilities: @forward_property
include("utils.jl")
include("grids.jl")
include("metadata.jl")
include("problem_types.jl")
include("multiload.jl")
include("elementmatrix.jl")
include("matrices_and_vectors.jl")
include("elementinfo.jl")
include("assemble.jl")
include("buckling.jl")
include(joinpath("IO", "IO.jl"))
using .InputOutput
export RayProblem, PointLoadCantilever, HalfMBB, LBeam, TieBeam, InpStiffness, StiffnessTopOptProblem, AbstractTopOptProblem, GlobalFEAInfo, ElementFEAInfo, YoungsModulus, assemble, assemble_f!, RaggedArray, ElementMatrix, rawmatrix, bcmatrix, save_mesh, RandomMagnitude, MultiLoad
end # module
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 5420 | function assemble(problem::StiffnessTopOptProblem{dim,T}, elementinfo::ElementFEAInfo{dim, T}, vars = ones(T, getncells(getdh(problem).grid)), penalty = PowerPenalty(T(1)), xmin = T(0.001)) where {dim,T}
globalinfo = GlobalFEAInfo(problem)
assemble!(globalinfo, problem, elementinfo, vars, penalty, xmin)
return globalinfo
end
function assemble!(globalinfo::GlobalFEAInfo{T}, problem::StiffnessTopOptProblem{dim,T}, elementinfo::ElementFEAInfo{dim, T, TK}, vars = ones(T, getncells(getdh(problem).grid)), penalty = PowerPenalty(T(1)), xmin = T(0.001); assemble_f = true) where {dim, T, TK}
ch = problem.ch
dh = ch.dh
K, f = globalinfo.K, globalinfo.f
if assemble_f
f .= elementinfo.fixedload
end
Kes, fes = elementinfo.Kes, elementinfo.fes
black = problem.black
white = problem.white
varind = problem.varind
_K = K isa Symmetric ? K.data : K
_K.nzval .= 0
assembler = Ferrite.AssemblerSparsityPattern(_K, f, Int[], Int[])
global_dofs = zeros(Int, ndofs_per_cell(dh))
fe = zeros(typeof(fes[1]))
Ke = zeros(T, size(rawmatrix(Kes[1])))
celliterator = CellIterator(dh)
for (i,cell) in enumerate(celliterator)
celldofs!(global_dofs, dh, i)
fe = fes[i]
_Ke = rawmatrix(Kes[i])
Ke = _Ke isa Symmetric ? _Ke.data : _Ke
if black[i]
if assemble_f
Ferrite.assemble!(assembler, global_dofs, Ke, fe)
else
Ferrite.assemble!(assembler, global_dofs, Ke)
end
elseif white[i]
if PENALTY_BEFORE_INTERPOLATION
px = xmin
else
px = penalty(xmin)
end
Ke = px * Ke
if assemble_f
fe = px * fe
Ferrite.assemble!(assembler, global_dofs, Ke, fe)
else
Ferrite.assemble!(assembler, global_dofs, Ke)
end
else
if PENALTY_BEFORE_INTERPOLATION
px = density(penalty(vars[varind[i]]), xmin)
else
px = penalty(density(vars[varind[i]], xmin))
end
Ke = px * Ke
if assemble_f
fe = px * fe
Ferrite.assemble!(assembler, global_dofs, Ke, fe)
else
Ferrite.assemble!(assembler, global_dofs, Ke)
end
end
end
_K = TK <: Symmetric ? K.data : K
apply!(_K, f, ch)
return
end
function assemble_f(problem::StiffnessTopOptProblem{dim,T}, elementinfo::ElementFEAInfo{dim, T}, vars::AbstractVector{T}, penalty, xmin = T(1)/1000) where {dim, T}
f = get_f(problem, vars)
assemble_f!(f, problem, elementinfo, vars, penalty, xmin)
return f
end
get_f(problem, vars::Array) = zeros(T, ndofs(problem.ch.dh))
function assemble_f!(f::AbstractVector, problem::StiffnessTopOptProblem,
elementinfo::ElementFEAInfo, vars::AbstractVector, penalty, xmin)
black = elementinfo.black
white = elementinfo.white
varind = elementinfo.varind
fes = elementinfo.fes
dof_cells = elementinfo.metadata.dof_cells
update_f!(f, fes, elementinfo.fixedload, dof_cells, black,
white, penalty, vars, varind, xmin)
return f
end
function update_f!(f::Vector, fes, fixedload, dof_cells, black,
white, penalty, vars, varind, xmin)
@inbounds for dofidx in 1:length(f)
f[dofidx] = fixedload[dofidx]
r = dof_cells.offsets[dofidx] : dof_cells.offsets[dofidx+1]-1
for i in r
cellidx, localidx = dof_cells.values[i]
if black[cellidx]
f[dofidx] += fes[cellidx][localidx]
elseif white[cellidx]
if PENALTY_BEFORE_INTERPOLATION
px = xmin
else
px = penalty(xmin)
end
f[dofidx] += px * fes[cellidx][localidx]
else
if PENALTY_BEFORE_INTERPOLATION
px = density(penalty(vars[varind[cellidx]]), xmin)
else
px = penalty(density(vars[varind[cellidx]], xmin))
end
f[dofidx] += px * fes[cellidx][localidx]
end
end
end
return
end
function assemble_f!(f::AbstractVector, problem, dloads)
metadata = problem.metadata
dof_cells = metadata.dof_cells
update_f!(f, dof_cells, dloads)
return f
end
function update_f!(f::Vector, dof_cells, dloads)
for dofidx in 1:length(f)
r = dof_cells.offsets[dofidx] : dof_cells.offsets[dofidx+1]-1
for i in r
cellidx, localidx = dof_cells.values[i]
f[dofidx] += dloads[cellidx][localidx]
end
end
return
end
#=
function update_f!(f::CuVector, dof_cells, dloads)
args = (f, dof_cells.offsets, dof_cells.values, dloads)
callkernel(dev, assemble_kernel2, args)
CUDAdrv.synchronize(ctx)
return
end
function assemble_kernel2(f, dof_cells_offsets, dof_cells_values, dloads)
i = @thread_global_index()
offset = @total_threads()
@inbounds while i <= length(f)
r = dof_cells_offsets[i] : dof_cells_offsets[i+1]-1
for i in r
cellidx, localidx = dof_cells_values[i]
f[i] += dloads[cellidx][localidx]
end
i += offset
end
return
end
=#
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 3314 | using Einsum
function get_Kσs(sp::StiffnessTopOptProblem{xdim, TT}, u_dofs, cellvalues) where {xdim, TT}
E = getE(sp)
ν = getν(sp)
dh = sp.ch.dh
# usually ndof_pc = xdim * n_basefuncs
# ? number of nodes per cell == n_basefuncs per cell
ndof_pc = ndofs_per_cell(dh)
n_basefuncs = getnbasefunctions(cellvalues)
global_dofs = zeros(Int, ndof_pc)
Kσs = [zeros(TT, ndof_pc, ndof_pc) for i in 1:getncells(dh.grid)]
Kσ_e = zeros(TT, ndof_pc, ndof_pc)
# block-diagonal - block σ_e = σ_ij, i,j in xdim
# ! shouldn't this be xdim*xdim by xdim*xdim?
# ? ψ_e = zeros(TT, xdim*ndof_pc, xdim*ndof_pc)
ψ_e = zeros(TT, xdim*xdim, xdim*xdim)
# ? G = zeros(TT, xdim*ndof_pc, ndof_pc)
G = zeros(TT, xdim*xdim, xdim*n_basefuncs)
δ = Matrix(TT(1.0)I, xdim, xdim)
ϵ = zeros(TT, xdim, xdim)
σ = zeros(TT, xdim, xdim)
# u_i,j: partial derivative
u_p = zeros(TT, xdim, xdim)
for (cellidx, cell) in enumerate(CellIterator(dh))
Kσ_e .= 0
reinit!(cellvalues, cell)
# get cell's dof's global dof indices, i.e. CC_a^e
celldofs!(global_dofs, dh, cellidx)
for q_point in 1:getnquadpoints(cellvalues)
dΩ = getdetJdV(cellvalues, q_point)
for d in 1:xdim
ψ_e[(d-1)*xdim+1:d*xdim, (d-1)*xdim+1:d*xdim] .= 0
end
for a in 1:n_basefuncs
∇ϕ = shape_gradient(cellvalues, q_point, a)
_u = @view u_dofs[(@view global_dofs[xdim*(a-1) .+ (1:xdim)])]
# u_i,j, i for spatial xdim, j for partial derivative
@einsum u_p[i,j] = _u[i]*∇ϕ[j]
# effect of the quadratic term in the strain formula have on the stress field is ignored
@einsum ϵ[i,j] = 1/2*(u_p[i,j] + u_p[j,i])
# isotropic solid
@einsum σ[i,j] = E*ν/(1-ν^2)*δ[i,j]*ϵ[k,k] + E*ν*(1+ν)*ϵ[i,j]
for d in 1:xdim
# block diagonal
ψ_e[(d-1)*xdim .+ 1:d*xdim, (d-1)*xdim .+ 1:d*xdim] .+= σ
G[(xdim*(d-1)+1):(xdim*d), (a-1)*xdim+d] .= ∇ϕ
end
end
Kσ_e .+= G'*ψ_e*G*dΩ
end
Kσs[cellidx] .= Kσ_e
end
return Kσs
end
function buckling(problem::StiffnessTopOptProblem{xdim, T}, ginfo, einfo) where {xdim, T}
dh = problem.ch.dh
u = ginfo.K \ ginfo.f
Kσs = get_Kσs(problem, u, einfo.cellvalues)
Kσ = deepcopy(ginfo.K)
if Kσ isa Symmetric
Kσ.data.nzval .= 0
assembler = Ferrite.AssemblerSparsityPattern(Kσ.data, T[], Int[], Int[])
else
Kσ.nzval .= 0
assembler = Ferrite.AssemblerSparsityPattern(Kσ, T[], Int[], Int[])
end
# * assemble global geometric stiffness matrix
global_dofs = zeros(Int, ndofs_per_cell(dh))
Kσ_e = zeros(T, size(Kσs[1]))
celliteratortype = CellIterator{typeof(dh).parameters...}
_celliterator::celliteratortype = CellIterator(dh)
TK = eltype(Kσs)
for (i,cell) in enumerate(_celliterator)
celldofs!(global_dofs, dh, i)
if TK <: Symmetric
Ferrite.assemble!(assembler, global_dofs, Kσs[i].data)
else
Ferrite.assemble!(assembler, global_dofs, Kσs[i])
end
end
return ginfo.K, Kσ
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 6352 | """
struct ElementFEAInfo{dim, T}
Kes::AbstractVector{<:AbstractMatrix{T}}
fes::AbstractVector{<:AbstractVector{T}}
fixedload::AbstractVector{T}
cellvolumes::AbstractVector{T}
cellvalues::CellValues{dim, T}
facevalues::FaceValues{<:Any, T}
metadata::Metadata
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
cells
end
An instance of the `ElementFEAInfo` type stores element information such as:
- `Kes`: the element stiffness matrices,
- `fes`: the element load vectors,
- `cellvolumes`: the element volumes,
- `cellvalues` and `facevalues`: two `Ferrite` types that facilitate cell and face iteration and queries.
- `metadata`: that stores degree of freedom (dof) to node mapping, dof to cell mapping, etc.
- `black`: a `BitVector` such that `black[i]` is 1 iff element `i` must be part of any feasible design.
- `white`: a `BitVector` such that `white[i]` is 1 iff element `i` must never be part of any feasible design.
- `varind`: a vector such that `varind[i]` gives the decision variable index of element `i`.
- `cells`: the cell connectivities.
"""
@params struct ElementFEAInfo{dim, T}
Kes::AbstractVector{<:AbstractMatrix{T}}
fes::AbstractVector{<:AbstractVector{T}}
fixedload::AbstractVector{T}
cellvolumes::AbstractVector{T}
cellvalues::CellValues{dim, T, <:Any}
facevalues::FaceValues{<:Any, T, <:Any}
metadata::Metadata
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
cells
end
function Base.show(io::Base.IO, ::MIME"text/plain", efeainfo::ElementFEAInfo)
print(io, "ElementFEAInfo: Kes |$(length(efeainfo.Kes))|, fes |$(length(efeainfo.fes))|, fixedload |$(length(efeainfo.fixedload))|, cells |$(length(efeainfo.cells))|")
end
"""
ElementFEAInfo(sp, quad_order=2, ::Type{Val{mat_type}}=Val{:Static}) where {mat_type}
Constructs an instance of `ElementFEAInfo` from a stiffness problem `sp` using a Gaussian quadrature order of `quad_order`. The element matrix and vector types will be:
1. `SMatrix` and `SVector` if `mat_type` is `:SMatrix` or `:Static`, the default,
2. `MMatrix` and `MVector` if `mat_type` is `:MMatrix`, or
3. `Matrix` and `Vector` otherwise.
The static matrices and vectors are more performant and GPU-compatible therefore they are used by default.
"""
function ElementFEAInfo(
sp,
quad_order = 2,
::Type{Val{mat_type}} = Val{:Static},
) where {mat_type}
Kes, weights, dloads, cellvalues, facevalues = make_Kes_and_fes(
sp,
quad_order,
Val{mat_type},
)
element_Kes = convert(
Vector{<:ElementMatrix},
Kes;
bc_dofs = sp.ch.prescribed_dofs,
dof_cells = sp.metadata.dof_cells,
)
fixedload = Vector(make_cload(sp))
assemble_f!(fixedload, sp, dloads)
cellvolumes = get_cell_volumes(sp, cellvalues)
cells = sp.ch.dh.grid.cells
ElementFEAInfo(
element_Kes,
weights,
fixedload,
cellvolumes,
cellvalues,
facevalues,
sp.metadata,
sp.black,
sp.white,
sp.varind,
cells,
)
end
"""
struct GlobalFEAInfo{T, TK<:AbstractMatrix{T}, Tf<:AbstractVector{T}, Tchol}
K::TK
f::Tf
cholK::Tchol
end
An instance of `GlobalFEAInfo` hosts the global stiffness matrix `K`, the load vector `f` and the cholesky decomposition of the `K`, `cholK`.
"""
@params mutable struct GlobalFEAInfo{T}
K::AbstractMatrix{T}
f::AbstractVector{T}
cholK
qrK
end
Base.show(::IO, ::MIME{Symbol("text/plain")}, ::GlobalFEAInfo) = println("TopOpt global FEA information")
"""
GlobalFEAInfo(::Type{T}=Float64) where {T}
Constructs an empty instance of `GlobalFEAInfo` where the field `K` is an empty sparse matrix of element type `T` and the field `f` is an empty dense vector of element type `T`.
"""
GlobalFEAInfo(::Type{T}=Float64) where {T} = GlobalFEAInfo{T}()
function GlobalFEAInfo{T}() where {T}
return GlobalFEAInfo(sparse(zeros(T, 0, 0)), zeros(T, 0), cholesky(one(T)), qr(one(T)))
end
"""
GlobalFEAInfo(sp::StiffnessTopOptProblem)
Constructs an instance of `GlobalFEAInfo` where the field `K` is a sparse matrix with the correct size and sparsity pattern for the problem instance `sp`. The field `f` is a dense vector of the appropriate size. The values in `K` and `f` are meaningless though and require calling the function `assemble!` to update.
"""
function GlobalFEAInfo(sp::StiffnessTopOptProblem)
K = initialize_K(sp)
f = initialize_f(sp)
return GlobalFEAInfo(K, f)
end
function GlobalFEAInfo(
K::Union{AbstractSparseMatrix, Symmetric{<:Any, <:AbstractSparseMatrix}},
f,
)
chol = cholesky(spdiagm(0=>ones(size(K, 1))))
qrfact = qr(spdiagm(0=>ones(size(K, 1))))
return GlobalFEAInfo{eltype(K), typeof(K), typeof(f), typeof(chol), typeof(qrfact)}(K, f, chol, qrfact)
end
"""
GlobalFEAInfo(K, f)
Constructs an instance of `GlobalFEAInfo` with global stiffness matrix `K` and load vector `f`.
"""
function GlobalFEAInfo(K, f)
chol = cholesky(Matrix{eltype(K)}(I, size(K)...))
qrfact = qr(Matrix{eltype(K)}(I, size(K)...))
return GlobalFEAInfo(K, f, chol, qrfact)
end
"""
get_cell_volumes(sp::StiffnessTopOptProblem{dim, T}, cellvalues)
Calculates an approximation of the element volumes by approximating the volume integral of 1 over each element using Gaussian quadrature. `cellvalues` is a `Ferrite` struct that facilitates the computation of the integral. To initialize `cellvalues` for an element with index `cell`, `Ferrite.reinit!(cellvalues, cell)` can be called. Calling `Ferrite.getdetJdV(cellvalues, q_point)` then computes the value of the determinant of the Jacobian of the geometric basis functions at the point `q_point` in the reference element. The sum of such values for all integration points is the volume approximation.
"""
function get_cell_volumes(sp::StiffnessTopOptProblem{dim, T}, cellvalues) where {dim, T}
dh = sp.ch.dh
cellvolumes = zeros(T, getncells(dh.grid))
for (i, cell) in enumerate(CellIterator(dh))
reinit!(cellvalues, cell)
cellvolumes[i] = sum(Ferrite.getdetJdV(cellvalues, q_point) for q_point in 1:Ferrite.getnquadpoints(cellvalues))
end
return cellvolumes
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 5466 | """
struct ElementMatrix{T, TM <: AbstractMatrix{T}} <: AbstractMatrix{T}
matrix::TM
mask
meandiag::T
end
An element stiffness matrix. `matrix` is the unconstrained element stiffness matrix. `mask` is a `BitVector` where `mask[i]` is 1 iff the local degree of freedom `i` is not constrained by a Dirichlet boundary condition. `meandiag` is the mean of the diagonal of the unconstrained element stiffness matrix.
"""
@params struct ElementMatrix{T, TM <: AbstractMatrix{T}} <: AbstractMatrix{T}
matrix::TM
mask
meandiag::T
end
ElementMatrix(matrix, mask) = ElementMatrix(matrix, mask, sumdiag(matrix)/size(matrix, 1))
Base.show(::IO, ::MIME{Symbol("text/plain")}, ::ElementMatrix) = println("TopOpt element matrix")
"""
rawmatrix(m::ElementMatrix)
Returns the unconstrained element stiffness matrix `m.matrix`.
"""
rawmatrix(m::ElementMatrix) = m.matrix
rawmatrix(m::Symmetric{T, <:ElementMatrix{T}}) where {T} = Symmetric(m.data.matrix)
"""
bcmatrix(m::ElementMatrix{T, TM}) where {dim, T, TM <: StaticMatrix{dim, dim, T}}
Returns the constrained element stiffness matrix where the elements in the rows and columns corresponding to any local degree of freedom with a Dirichlet boundary condition are replaced by 0.
"""
@generated function bcmatrix(m::ElementMatrix{T, TM}) where {dim, T, TM <: StaticMatrix{dim, dim, T}}
expr = Expr(:tuple)
for j in 1:dim, i in 1:dim
push!(expr.args, :(ifelse(m.mask[$i] && m.mask[$j], m.matrix[$i,$j], zero(T))))
end
return :($(Expr(:meta, :inline)); $TM($expr))
end
@generated function bcmatrix(m::Symmetric{T, <:ElementMatrix{T, TM}}) where {dim, T, TM <: StaticMatrix{dim, dim, T}}
expr = Expr(:tuple)
for j in 1:dim, i in 1:dim
push!(expr.args, :(ifelse(m.data.mask[$i] && m.data.mask[$j], m.data.matrix[$i,$j], zero(T))))
end
return :($(Expr(:meta, :inline)); Symmetric($TM($expr)))
end
Base.size(m::ElementMatrix) = size(m.matrix)
Base.getindex(m::ElementMatrix, i...) = m.matrix[i...]
"""
convert(::Type{Vector{<:ElementMatrix}}, Kes::Vector{<:AbstractMatrix})
Converts the element stiffness matrices `Kes` from an abstract vector of matrices to a vector of instances of the type `ElementMatrix`.
"""
function Base.convert(
::Type{Vector{<:ElementMatrix}},
Kes::Vector{TM};
bc_dofs,
dof_cells,
) where {
N, T, TM <: StaticMatrix{N, N, T},
}
fill_matrix = zero(TM)
fill_mask = ones(SVector{N, Bool})
element_Kes = fill(ElementMatrix(fill_matrix, fill_mask), length(Kes))
for i in bc_dofs
d_cells = dof_cells[i]
for c in d_cells
(cellid, localdof) = c
Ke = element_Kes[cellid]
new_Ke = @set Ke.mask[localdof] = false
element_Kes[cellid] = Symmetric(new_Ke)
end
end
for e in 1:length(element_Kes)
Ke = element_Kes[e]
matrix = Kes[e]
Ke = @set Ke.matrix = matrix
element_Kes[e] = @set Ke.meandiag = sumdiag(Ke.matrix)
end
return element_Kes
end
function Base.convert(
::Type{Vector{<:ElementMatrix}},
Kes::Vector{Symmetric{T, TM}};
bc_dofs,
dof_cells,
) where {
N, T, TM <: StaticMatrix{N, N, T},
}
fill_matrix = zero(TM)
fill_mask = ones(SVector{N, Bool})
element_Kes = fill(Symmetric(ElementMatrix(fill_matrix, fill_mask)), length(Kes))
for i in bc_dofs
d_cells = dof_cells[i]
for c in d_cells
(cellid, localdof) = c
Ke = element_Kes[cellid].data
new_Ke = @set Ke.mask[localdof] = false
element_Kes[cellid] = Symmetric(new_Ke)
end
end
for e in 1:length(element_Kes)
Ke = element_Kes[e].data
matrix = Kes[e].data
Ke = @set Ke.matrix = matrix
element_Kes[e] = Symmetric(@set Ke.meandiag = sumdiag(Ke.matrix))
end
return element_Kes
end
function Base.convert(
::Type{Vector{<:ElementMatrix}},
Kes::Vector{TM};
bc_dofs,
dof_cells,
) where {
T, TM <: AbstractMatrix{T},
}
N = size(Kes[1], 1)
fill_matrix = zero(TM)
fill_mask = ones(Bool, N)
element_Kes = [deepcopy(ElementMatrix(fill_matrix, fill_mask)) for i in 1:length(Kes)]
for i in bc_dofs
d_cells = dof_cells[i]
for c in d_cells
(cellid, localdof) = c
Ke = element_Kes[cellid]
Ke.mask[localdof] = false
end
end
return element_Kes
end
function Base.convert(
::Type{Vector{<:ElementMatrix}},
Kes::Vector{Symmetric{T, TM}};
bc_dofs,
dof_cells,
) where {
T, TM <: AbstractMatrix{T},
}
N = size(Kes[1], 1)
fill_matrix = zero(TM)
fill_mask = ones(Bool, N)
element_Kes = [
Symmetric(deepcopy(ElementMatrix(fill_matrix, fill_mask))) for i in 1:length(Kes)
]
for i in bc_dofs
d_cells = dof_cells[i]
for c in d_cells
(cellid, localdof) = c
Ke = element_Kes[cellid].data
Ke.mask[localdof] = false
end
end
return element_Kes
end
for TM in (:(StaticMatrix{m, m, T}), :(Symmetric{T, <:StaticMatrix{m, m, T}}))
@eval begin
@generated function sumdiag(K::$TM) where {m,T}
return reduce((ex1,ex2) -> :($ex1 + $ex2), [:(K[$j,$j]) for j in 1:m])
end
end
end
@doc """
sumdiag(K::Union{StaticMatrix, Symmetric{<:Any, <:StaticMatrix}})
Computes the sum of the diagonal of the static matrix `K`.
""" sumdiag
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 2223 | using ..CUDASupport
using ..TopOpt: @init_cuda
@init_cuda()
using ..GPUUtils
import ..TopOpt: whichdevice
get_f(problem, vars::CuArray) = f = zeros(typeof(vars), ndofs(problem.ch.dh))
function update_f!(f::CuVector{T}, fes, fixedload, dof_cells, black,
white, penalty, vars, varind, xmin) where {T}
args = (f, fes, fixedload, dof_cells.offsets, dof_cells.values, black,
white, penalty, vars, varind, xmin, length(f))
callkernel(dev, assemble_kernel1, args)
CUDAdrv.synchronize(ctx)
end
function assemble_kernel1(f, fes, fixedload, dof_cells_offsets, dof_cells_values, black,
white, penalty, vars, varind, xmin, ndofs)
dofidx = @thread_global_index()
offset = @total_threads()
while dofidx <= ndofs
f[dofidx] = fixedload[dofidx]
r = dof_cells_offsets[dofidx] : dof_cells_offsets[dofidx+1]-1
for i in r
cellidx, localidx = dof_cells_values[i]
if black[cellidx]
f[dofidx] += fes[cellidx][localidx]
elseif white[cellidx]
px = xmin
f[dofidx] += px * fes[cellidx][localidx]
else
if PENALTY_BEFORE_INTERPOLATION
px = density(penalty(vars[varind[cellidx]]), xmin)
else
px = penalty(density(vars[varind[cellidx]], xmin))
end
f[dofidx] += px * fes[cellidx][localidx]
end
end
dofidx += offset
end
return
end
whichdevice(p::StiffnessTopOptProblem) = whichdevice(p.ch)
whichdevice(ch::ConstraintHandler) = whichdevice(ch.dh)
whichdevice(dh::DofHandler) = whichdevice(dh.grid)
whichdevice(g::Ferrite.Grid) = whichdevice(g.cells)
@define_cu(ElementFEAInfo, :Kes, :fes, :fixedload, :cellvolumes, :metadata, :black, :white, :varind, :cells)
@define_cu(TopOptProblems.Metadata, :cell_dofs, :dof_cells, :node_cells, :node_dofs)
@define_cu(Ferrite.ConstraintHandler, :values, :prescribed_dofs, :dh)
@define_cu(Ferrite.DofHandler, :grid)
@define_cu(Ferrite.Grid, :cells)
for T in (PointLoadCantilever, HalfMBB, LBeam, TieBeam, InpStiffness)
@eval @define_cu($T, :ch, :black, :white, :varind)
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 2108 | const dev = CUDAdrv.device()
const ctx = CUDAdrv.CuContext(dev)
macro thread_local_index()
:((threadIdx().z - 1) * blockDim().y * blockDim().x + (threadIdx().y - 1) * blockDim().x + threadIdx().x)
end
macro total_threads_per_block()
:(blockDim().z * blockDim().y * blockDim().x)
end
macro block_index()
:(blockIdx().x + (blockIdx().y - 1) * gridDim().x + (blockIdx().z - 1) * gridDim().x * gridDim().y)
end
macro total_blocks()
:(gridDim().z * gridDim().x * gridDim().y)
end
macro thread_global_index()
:((@block_index() - 1) * (blockDim().x * blockDim().y * blockDim().z) + @thread_local_index())
end
macro total_threads()
:(@total_blocks() * @total_threads_per_block())
end
function callkernel(dev, kernel, args)
blocks, threads = getvalidconfig(dev, kernel, args)
@cuda blocks=blocks threads=threads kernel(args...)
return
end
function getvalidconfig(dev, kernel, parallel_args)
R = parallel_args[1]
Rlength = length(R)
Ssize = size(R)
Slength = prod(Ssize)
GC.@preserve parallel_args begin
parallel_kargs = cudaconvert.(parallel_args)
parallel_tt = Tuple{Core.Typeof.(parallel_kargs)...}
parallel_kernel = cufunction(kernel, parallel_tt)
# we are limited in how many threads we can launch...
## by the kernel
kernel_threads = CUDAnative.maxthreads(parallel_kernel)
## by the device
block_threads = (x=CUDAdrv.attribute(dev, CUDAdrv.MAX_BLOCK_DIM_X),
y=CUDAdrv.attribute(dev, CUDAdrv.MAX_BLOCK_DIM_Y),
total=CUDAdrv.attribute(dev, CUDAdrv.MAX_THREADS_PER_BLOCK))
# figure out a legal launch configuration
y_thr = min(nextpow(2, Rlength ÷ 512 + 1), 512, block_threads.y, kernel_threads)
x_thr = min(512 ÷ y_thr, Slength, block_threads.x,
ceil(Int, block_threads.total/y_thr),
ceil(Int, kernel_threads/y_thr))
blk, thr = (Rlength - 1) ÷ y_thr + 1, (x_thr, y_thr, 1)
blk = min(blk, ceil(Int, Rlength / prod(thr)))
end
return blk, thr
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 23668 | abstract type AbstractGrid{dim, T} end
const Vec = Ferrite.Vec
"""
```
struct RectilinearGrid{dim, T, N, M, TG<:Ferrite.Grid{dim, <:Ferrite.Cell{dim,N,M}, T}} <: AbstractGrid{dim, T}
grid::TG
nels::NTuple{dim, Int}
sizes::NTuple{dim, T}
corners::NTuple{2, Vec{dim, T}}
white_cells::BitVector
black_cells::BitVector
constant_cells::BitVector
end
```
A type that represents a rectilinear grid with corner points `corners`.
- `dim`: dimension of the problem
- `T`: number type for computations and coordinates
- `N`: number of nodes in a cell of the grid
- `M`: number of faces in a cell of the grid
- `grid`: a Ferrite.Grid struct
- `nels`: number of elements in every dimension
- `sizes`: dimensions of each rectilinear cell
- `corners`: 2 corner points of the rectilinear grid
- `white_cells`: cells fixed to be void during optimization
- `black_cells`: cells fixed to have material during optimization
- `constant_cells`: cells fixed to be either void or have material during optimization
"""
@params struct RectilinearGrid{dim, T, N, M, TG<:Ferrite.Grid{dim, <:Ferrite.Cell{dim,N,M}, T}} <: AbstractGrid{dim, T}
grid::TG
nels::NTuple{dim, Int}
sizes::NTuple{dim, T}
corners::NTuple{2, Vec{dim,T}}
white_cells::BitVector
black_cells::BitVector
constant_cells::BitVector
end
"""
RectilinearGrid(::Type{Val{CellType}}, nels::NTuple{dim,Int}, sizes::NTuple{dim,T}) where {dim, T, CellType}
Constructs an instance of [`RectilinearGrid`](@ref).
- `dim`: dimension of the problem
- `T`: number type for coordinates
- `nels`: number of elements in every dimension
- `sizes`: dimensions of each rectilinear cell
Example:
```
rectgrid = RectilinearGrid((60,20), (1.0,1.0))
```
"""
function RectilinearGrid(::Type{Val{CellType}}, nels::NTuple{dim,Int}, sizes::NTuple{dim,T}) where {dim, T, CellType}
if dim === 2
if CellType === :Linear
geoshape = Quadrilateral
else
geoshape = QuadraticQuadrilateral
end
else
geoshape = Hexahedron
end
corner1 = Vec{dim}(fill(T(0), dim))
corner2 = Vec{dim}((nels .* sizes))
grid = generate_grid(geoshape, nels, corner1, corner2);
N = nnodes(geoshape)
M = Ferrite.nfaces(geoshape)
ncells = prod(nels)
return RectilinearGrid(grid, nels, sizes, (corner1, corner2), falses(ncells), falses(ncells), falses(ncells))
end
nnodespercell(::RectilinearGrid{dim,T,N,M}) where {dim, T, N, M} = N
nfacespercell(::RectilinearGrid{dim,T,N,M}) where {dim, T, N, M} = M
left(rectgrid::RectilinearGrid, x) = x[1] ≈ rectgrid.corners[1][1]
right(rectgrid::RectilinearGrid, x) = x[1] ≈ rectgrid.corners[2][1]
bottom(rectgrid::RectilinearGrid, x) = x[2] ≈ rectgrid.corners[1][2]
top(rectgrid::RectilinearGrid, x) = x[2] ≈ rectgrid.corners[2][2]
back(rectgrid::RectilinearGrid, x) = x[3] ≈ rectgrid.corners[1][3]
front(rectgrid::RectilinearGrid, x) = x[3] ≈ rectgrid.corners[2][3]
middlex(rectgrid::RectilinearGrid, x) = x[1] ≈ (rectgrid.corners[1][1] + rectgrid.corners[2][1]) / 2
middley(rectgrid::RectilinearGrid, x) = x[2] ≈ (rectgrid.corners[1][2] + rectgrid.corners[2][2]) / 2
middlez(rectgrid::RectilinearGrid, x) = x[3] ≈ (rectgrid.corners[1][3] + rectgrid.corners[2][3]) / 2
nnodes(cell::Type{Ferrite.Cell{dim,N,M}}) where {dim, N, M} = N
nnodes(cell::Ferrite.Cell) = nnodes(typeof(cell))
"""
LGrid(::Type{Val{CellType}}, ::Type{T}; length = 100, height = 100, upperslab = 50, lowerslab = 50) where {T, CellType}
LGrid(::Type{Val{CellType}}, nel1::NTuple{2,Int}, nel2::NTuple{2,Int}, LL::Vec{2,T}, UR::Vec{2,T}, MR::Vec{2,T}) where {CellType, T}
Constructs a `Ferrite.Grid` that represents the following L-shaped grid.
```
upperslab UR
............
. .
. .
. .
height . . MR
. ......................
. .
. . lowerslab
. .
.................................
LL length
```
Examples:
```
LGrid(upperslab = 30, lowerslab = 70)
LGrid(Val{:Linear}, (2, 4), (2, 2), Vec{2,Float64}((0.0,0.0)), Vec{2,Float64}((2.0, 4.0)), Vec{2,Float64}((4.0, 2.0)))
```
"""
function LGrid(::Type{Val{CellType}}, ::Type{T}; length = 100, height = 100, upperslab = 50, lowerslab = 50) where {T, CellType}
@assert length > upperslab
@assert height > lowerslab
LGrid(Val{CellType}, (upperslab, height), (length-upperslab, lowerslab),
Vec{2,T}((0.0,0.0)), Vec{2,T}((T(upperslab), T(height))),
Vec{2,T}((T(length), T(lowerslab))))
end
function LGrid(::Type{Val{CellType}}, nel1::NTuple{2,Int}, nel2::NTuple{2,Int},
LL::Vec{2,T}, UR::Vec{2,T}, MR::Vec{2,T}) where {CellType, T}
if CellType === :Linear
return _LinearLGrid(nel1, nel2, LL, UR, MR)
else
return _QuadraticLGrid(nel1, nel2, LL, UR, MR)
end
end
function _LinearLGrid(nel1::NTuple{2,Int}, nel2::NTuple{2,Int},
LL::Vec{2,T}, UR::Vec{2,T}, MR::Vec{2,T}) where {T}
@assert nel1[2] > nel2[2]
midpointindy = round(Int, nel2[2]/2) + 1
nodes = Node{2,T}[]
cells = Quadrilateral[]
boundary = Tuple{Int,Int}[]
facesets = Dict{String, Set{Tuple{Int,Int}}}()
facesets["right"] = Set{Tuple{Int,Int}}()
facesets["top"] = Set{Tuple{Int,Int}}()
nodesets = Dict{String, Set{Int}}()
nodesets["load"] = Set{Int}()
# Lower left rectangle
nel_x1 = nel1[1]; nel_y1 = nel2[2];
n_nodes_x1 = nel_x1 + 1; n_nodes_y1 = nel_y1 + 1
n_nodes1 = n_nodes_x1 * n_nodes_y1
_LR = Vec{2,T}((UR[1], LL[2]))
_UL = Vec{2,T}((LL[1], MR[2]))
_UR = Vec{2,T}((UR[1], MR[2]))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x1, n_nodes_y1, LL, _LR, _UR, _UL)
node_array1 = reshape(collect(1:n_nodes1), (n_nodes_x1, n_nodes_y1))
for j in 1:nel_y1, i in 1:nel_x1
push!(cells, Quadrilateral((node_array1[i,j], node_array1[i+1,j], node_array1[i+1,j+1], node_array1[i,j+1])))
if i == 1
push!(boundary, (length(cells), 4))
end
if j == 1
push!(boundary, (length(cells), 1))
end
end
# Lower right rectangle
offsetstep = (MR[1] - _LR[1])/nel2[1]
indexoffset = length(nodes)
nel_x2 = nel2[1] - 1; nel_y2 = nel2[2]
n_nodes_x2 = nel_x2 + 1; n_nodes_y2 = nel_y2 + 1
n_nodes2 = n_nodes_x2 * n_nodes_y2
_LL = Vec{2,T}((_LR[1] + offsetstep, _LR[2]))
_LR = Vec{2,T}((MR[1], LL[2]))
_UL = Vec{2,T}((_UR[1] + offsetstep, MR[2]))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x2, n_nodes_y2, _LL, _LR, MR, _UL)
node_array2 = reshape(collect(indexoffset+1:indexoffset+n_nodes2), (n_nodes_x2, n_nodes_y2))
for j in 1:nel_y2
push!(cells, Quadrilateral((node_array1[end,j], node_array2[1,j], node_array2[1,j+1], node_array1[end,j+1])))
j == 1 && push!(boundary, (length(cells), 1))
j == nel_y2 && push!(boundary, (length(cells), 3))
if nel_x2 == 1
push!(boundary, (length(cells), 2))
push!(facesets["right"], (length(cells), 2))
end
for i in 1:nel_x2
push!(cells, Quadrilateral((node_array2[i,j], node_array2[i+1,j], node_array2[i+1,j+1], node_array2[i,j+1])))
if i == nel_x2
push!(boundary, (length(cells), 2))
push!(facesets["right"], (length(cells), 2))
end
j == 1 && push!(boundary, (length(cells), 1))
j == nel_y2 && push!(boundary, (length(cells), 3))
end
end
push!(nodesets["load"], node_array2[end, midpointindy])
# Upper left rectangle
offsetstep = (UR[2] - MR[2])/(nel1[2] - nel2[2])
indexoffset = length(nodes)
nel_x3 = nel1[1]; nel_y3 = nel1[2] - nel2[2] - 1
n_nodes_x3 = nel_x3 + 1; n_nodes_y3 = nel_y3 + 1
n_nodes3 = n_nodes_x3 * n_nodes_y3
_LL = Vec{2,T}((LL[1], MR[2] + offsetstep))
_LR = Vec{2,T}((UR[1], MR[2] + offsetstep))
_UL = Vec{2,T}((LL[1], UR[2]))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x3, n_nodes_y3, _LL, _LR, UR, _UL)
# Generate cells
node_array3 = reshape(collect(indexoffset+1:indexoffset+n_nodes3), (n_nodes_x3, n_nodes_y3))
for i in 1:nel_x3
push!(cells, Quadrilateral((node_array1[i,end], node_array1[i+1,end], node_array3[i+1,1], node_array3[i,1])))
i == 1 && push!(boundary, (length(cells), 4))
i == nel_x3 && push!(boundary, (length(cells), 2))
end
for j in 1:nel_y3, i in 1:nel_x3
push!(cells, Quadrilateral((node_array3[i,j], node_array3[i+1,j], node_array3[i+1,j+1], node_array3[i,j+1])))
i == 1 && push!(boundary, (length(cells), 4))
i == nel_x3 && push!(boundary, (length(cells), 2))
if j == nel_y3
push!(boundary, (length(cells), 3))
push!(facesets["top"], (length(cells), 3))
end
end
boundary_matrix = Ferrite.boundaries_to_sparse(boundary)
return Grid(cells, nodes, facesets=facesets, nodesets=nodesets,
boundary_matrix=boundary_matrix)
end
function _QuadraticLGrid(nel1::NTuple{2,Int}, nel2::NTuple{2,Int},
LL::Vec{2,T}, UR::Vec{2,T}, MR::Vec{2,T}) where {T}
@assert nel1[2] > nel2[2]
midpointindy = round(Int, nel2[2]/2) + 1
nodes = Node{2,T}[]
cells = QuadraticQuadrilateral[]
boundary = Tuple{Int,Int}[]
facesets = Dict{String, Set{Tuple{Int,Int}}}()
facesets["right"] = Set{Tuple{Int,Int}}()
facesets["top"] = Set{Tuple{Int,Int}}()
nodesets = Dict{String, Set{Int}}()
nodesets["load"] = Set{Int}()
# Lower left rectangle
nel_x1 = nel1[1]; nel_y1 = nel2[2];
n_nodes_x1 = 2*nel_x1 + 1; n_nodes_y1 = 2*nel_y1 + 1
n_nodes1 = n_nodes_x1 * n_nodes_y1
_LR = Vec{2,T}((UR[1], LL[2]))
_UL = Vec{2,T}((LL[1], MR[2]))
_UR = Vec{2,T}((UR[1], MR[2]))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x1, n_nodes_y1, LL, _LR, _UR, _UL)
node_array1 = reshape(collect(1:n_nodes1), (n_nodes_x1, n_nodes_y1))
for j in 1:nel_y1, i in 1:nel_x1
push!(cells, QuadraticQuadrilateral((node_array1[2*i-1,2*j-1], node_array1[2*i+1,2*j-1],
node_array1[2*i+1,2*j+1],node_array1[2*i-1,2*j+1],
node_array1[2*i,2*j-1],node_array1[2*i+1,2*j],
node_array1[2*i,2*j+1],node_array1[2*i-1,2*j],
node_array1[2*i,2*j])))
if i == 1
push!(boundary, (length(cells), 4))
end
if j == 1
push!(boundary, (length(cells), 1))
end
end
# Lower right rectangle
offsetstep = (MR[1] - _LR[1])/nel2[1]/2
indexoffset = length(nodes)
nel_x2 = nel2[1] - 1; nel_y2 = nel2[2]
n_nodes_x2 = 2*nel_x2 + 2; n_nodes_y2 = 2*nel_y2 + 1
n_nodes2 = n_nodes_x2 * n_nodes_y2
_LL = Vec{2,T}((_LR[1] + offsetstep, _LR[2]))
_LR = Vec{2,T}((MR[1], LL[2]))
_UL = Vec{2,T}((_UR[1] + offsetstep, MR[2]))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x2, n_nodes_y2, _LL, _LR, MR, _UL)
node_array2 = reshape(collect(indexoffset+1:indexoffset+n_nodes2), (n_nodes_x2, n_nodes_y2))
for j in 1:nel_y2
push!(cells, QuadraticQuadrilateral((node_array1[end,2*j-1], node_array2[2,2*j-1],
node_array2[2,2*j+1], node_array1[end,2*j+1],
node_array2[1,2*j-1], node_array2[2,2*j],
node_array2[1,2*j+1], node_array1[end,2*j],
node_array2[1,2*j])))
j == 1 && push!(boundary, (length(cells), 1))
j == nel_y2 && push!(boundary, (length(cells), 3))
if nel_x2 == 1
push!(boundary, (length(cells), 2))
push!(facesets["right"], (length(cells), 2))
end
for i in 1:nel_x2
push!(cells, QuadraticQuadrilateral((node_array2[2*i,2*j-1],
node_array2[2*i+2,2*j-1],
node_array2[2*i+2,2*j+1],
node_array2[2*i,2*j+1],
node_array2[2*i+1,2*j-1],
node_array2[2*i+2,2*j],
node_array2[2*i+1,2*j+1],
node_array2[2*i,2*j],
node_array2[2*i+1,2*j])))
if i == nel_x2
push!(boundary, (length(cells), 2))
push!(facesets["right"], (length(cells), 2))
end
j == 1 && push!(boundary, (length(cells), 1))
j == nel_y2 && push!(boundary, (length(cells), 3))
end
end
push!(nodesets["load"], node_array2[end, midpointindy])
# Upper left rectangle
offsetstep = (UR[2] - MR[2])/(nel1[2] - nel2[2])/2
indexoffset = length(nodes)
nel_x3 = nel1[1]; nel_y3 = nel1[2] - nel2[2] - 1
n_nodes_x3 = 2*nel_x3 + 1; n_nodes_y3 = 2*nel_y3 + 2
n_nodes3 = n_nodes_x3 * n_nodes_y3
_LL = Vec{2,T}((LL[1], MR[2] + offsetstep))
_LR = Vec{2,T}((UR[1], MR[2] + offsetstep))
_UL = Vec{2,T}((LL[1], UR[2]))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x3, n_nodes_y3, _LL, _LR, UR, _UL)
# Generate cells
node_array3 = reshape(collect(indexoffset+1:indexoffset+n_nodes3), (n_nodes_x3, n_nodes_y3))
for i in 1:nel_x3
push!(cells, QuadraticQuadrilateral((node_array1[2i-1,end], node_array1[2i+1,end],
node_array3[2i+1,2], node_array3[2i-1,2],
node_array1[2i,end], node_array3[2i+1,1],
node_array3[2i,2], node_array3[2i-1,1],
node_array3[2i,1])))
i == 1 && push!(boundary, (length(cells), 4))
i == nel_x3 && push!(boundary, (length(cells), 2))
end
for j in 1:nel_y3, i in 1:nel_x3
push!(cells, QuadraticQuadrilateral((node_array3[2i-1,2j],
node_array3[2i+1,2j],
node_array3[2i+1,2j+2],
node_array3[2i-1,2j+2],
node_array3[2i,2j],
node_array3[2i+1,2j+1],
node_array3[2i,2j+2],
node_array3[2i-1,2j+1],
node_array3[2i,2j+1])))
i == 1 && push!(boundary, (length(cells), 4))
i == nel_x3 && push!(boundary, (length(cells), 2))
if j == nel_y3
push!(boundary, (length(cells), 3))
push!(facesets["top"], (length(cells), 3))
end
end
boundary_matrix = Ferrite.boundaries_to_sparse(boundary)
return Grid(cells, nodes, facesets=facesets, nodesets=nodesets,
boundary_matrix=boundary_matrix)
end
function TieBeamGrid(::Type{Val{CellType}}, ::Type{T}=Float64, refine=1) where {T, CellType}
if CellType === :Linear
return _LinearTieBeamGrid(T, refine)
else
return _QuadraticTieBeamGrid(T, refine)
end
end
function _LinearTieBeamGrid(::Type{T}=Float64, refine=1) where {T}
nodes = Node{2,T}[]
cells = Quadrilateral[]
boundary = Tuple{Int,Int}[]
facesets = Dict{String, Set{Tuple{Int,Int}}}()
facesets["leftfixed"] = Set{Tuple{Int,Int}}()
facesets["toproller"] = Set{Tuple{Int,Int}}()
facesets["rightload"] = Set{Tuple{Int,Int}}()
facesets["bottomload"] = Set{Tuple{Int,Int}}()
# Lower left rectangle
nel_x1 = 32 * refine; nel_y1 = 3 * refine;
n_nodes_x1 = nel_x1 + 1; n_nodes_y1 = nel_y1 + 1
n_nodes1 = n_nodes_x1 * n_nodes_y1
LL = Vec{2,T}((0, 0))
LR = Vec{2,T}((T(nel_x1 / refine), T(0)))
UR = Vec{2,T}((T(nel_x1 / refine), T(nel_y1 / refine)))
UL = Vec{2,T}((T(0), T(nel_y1 / refine)))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x1, n_nodes_y1, LL, LR, UR, UL)
node_array1 = reshape(collect(1:n_nodes1), (n_nodes_x1, n_nodes_y1))
for j in 1:nel_y1, i in 1:nel_x1
push!(cells, Quadrilateral((node_array1[i,j], node_array1[i+1,j],
node_array1[i+1,j+1],node_array1[i,j+1])))
if i == 1
cidx = length(cells)
push!(boundary, (cidx, 4))
push!(facesets["leftfixed"], (cidx, 4))
end
if i == nel_x1
cidx = length(cells)
push!(boundary, (cidx, 2))
push!(facesets["rightload"], (cidx, 2))
end
if j == 1
cidx = length(cells)
push!(boundary, (cidx, 1))
if i == 31
push!(facesets["bottomload"], (cidx, 1))
end
end
if j == nel_y1 && i != 31
cidx = length(cells)
push!(boundary, (cidx, 3))
end
end
nel_x2 = 1 * refine; nel_y2 = 3 * refine + refine - 1
n_nodes_x2 = nel_x2 + 1; n_nodes_y2 = nel_y2 + 1
n_nodes2 = n_nodes_x2 * n_nodes_y2
indexoffset = length(nodes)
LL = Vec{2,T}((T(30), (nel_y1 + T(1)) / refine))
LR = Vec{2,T}((T(31), (nel_y1 + T(1)) / refine))
UR = Vec{2,T}((T(31), nel_y1 / refine + T(4)))
UL = Vec{2,T}((T(30), nel_y1 / refine + T(4)))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x2, n_nodes_y2, LL, LR, UR, UL)
node_array2 = reshape(collect(indexoffset+1:indexoffset+n_nodes2), (n_nodes_x2, n_nodes_y2))
t = 30
for i in 1:refine
push!(cells, Quadrilateral((node_array1[t*refine+i, nel_y1 + 1],
node_array1[t*refine+i+1, nel_y1 + 1],
node_array2[i+1,1],
node_array2[i,1])))
if i == 1
cidx = length(cells)
push!(boundary, (cidx, 4))
end
if i == refine
cidx = length(cells)
push!(boundary, (cidx, 2))
end
end
for j in 1:nel_y2, i in 1:nel_x2
push!(cells, Quadrilateral((node_array2[i,j], node_array2[i+1,j],
node_array2[i+1,j+1], node_array2[i,j+1])))
if i == 1
cidx = length(cells)
push!(boundary, (cidx, 4))
end
if i == nel_x2
cidx = length(cells)
push!(boundary, (cidx, 2))
end
if j == nel_y2
cidx = length(cells)
push!(boundary, (cidx, 3))
push!(facesets["toproller"], (cidx, 3))
end
end
boundary_matrix = Ferrite.boundaries_to_sparse(boundary)
return Grid(cells, nodes, facesets=facesets,
boundary_matrix=boundary_matrix)
end
function _QuadraticTieBeamGrid(::Type{T}=Float64, refine = 1) where {T}
nodes = Node{2,T}[]
cells = QuadraticQuadrilateral[]
boundary = Tuple{Int,Int}[]
facesets = Dict{String, Set{Tuple{Int,Int}}}()
facesets["leftfixed"] = Set{Tuple{Int,Int}}()
facesets["toproller"] = Set{Tuple{Int,Int}}()
facesets["rightload"] = Set{Tuple{Int,Int}}()
facesets["bottomload"] = Set{Tuple{Int,Int}}()
# Lower left rectangle
nel_x1 = 32*refine; nel_y1 = 3*refine;
n_nodes_x1 = 2*nel_x1 + 1; n_nodes_y1 = 2*nel_y1 + 1
n_nodes1 = n_nodes_x1 * n_nodes_y1
LL = Vec{2,T}((0, 0))
LR = Vec{2,T}((T(nel_x1/refine), T(0)))
UR = Vec{2,T}((T(nel_x1/refine), T(nel_y1/refine)))
UL = Vec{2,T}((T(0), T(nel_y1/refine)))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x1, n_nodes_y1, LL, LR, UR, UL)
node_array1 = reshape(collect(1:n_nodes1), (n_nodes_x1, n_nodes_y1))
for j in 1:nel_y1, i in 1:nel_x1
push!(cells, QuadraticQuadrilateral((node_array1[2*i-1,2*j-1], node_array1[2*i+1,2*j-1],
node_array1[2*i+1,2*j+1],node_array1[2*i-1,2*j+1],
node_array1[2*i,2*j-1],node_array1[2*i+1,2*j],
node_array1[2*i,2*j+1],node_array1[2*i-1,2*j],
node_array1[2*i,2*j])))
if i == 1
cidx = length(cells)
push!(boundary, (cidx, 4))
push!(facesets["leftfixed"], (cidx, 4))
end
if i == nel_x1
cidx = length(cells)
push!(boundary, (cidx, 2))
push!(facesets["rightload"], (cidx, 2))
end
if j == 1
cidx = length(cells)
push!(boundary, (cidx, 1))
if i == 31
push!(facesets["bottomload"], (cidx, 1))
end
end
if j == nel_y1 && i != 31
cidx = length(cells)
push!(boundary, (cidx, 3))
end
end
nel_x2 = 1*refine; nel_y2 = 3*refine + refine - 1
n_nodes_x2 = 2*nel_x2 + 1; n_nodes_y2 = 2*nel_y2 + 2
n_nodes2 = n_nodes_x2 * n_nodes_y2
indexoffset = length(nodes)
LL = Vec{2,T}((T(30), (nel_y1 + T(0.5)) / refine))
LR = Vec{2,T}((T(31), (nel_y1 + T(0.5)) / refine))
UR = Vec{2,T}((T(31), nel_y1/refine + T(4)))
UL = Vec{2,T}((T(30), nel_y1/refine + T(4)))
Ferrite._generate_2d_nodes!(nodes, n_nodes_x2, n_nodes_y2, LL, LR, UR, UL)
node_array2 = reshape(collect(indexoffset+1:indexoffset+n_nodes2), (n_nodes_x2, n_nodes_y2))
t = 30
for i in 1:refine
push!(cells, QuadraticQuadrilateral((node_array1[2*(refine*t+i-1)+1, 2*nel_y1+1],
node_array1[2*(refine*t+i-1)+3, 2*nel_y1+1],
node_array2[1+2i, 2],
node_array2[2i-1, 2],
node_array1[2*(refine*t+i-1)+2, 2*nel_y1+1],
node_array2[1+2i, 1],
node_array2[2i, 2],
node_array2[2i-1, 1],
node_array2[2i, 1])))
if i == 1
cidx = length(cells)
push!(boundary, (cidx, 4))
end
if i == refine
cidx = length(cells)
push!(boundary, (cidx, 2))
end
end
for j in 1:nel_y2, i in 1:nel_x2
push!(cells, QuadraticQuadrilateral((node_array2[2*i-1,2*j], node_array2[2*i+1,2*j],
node_array2[2*i+1,2*j+2],node_array2[2*i-1,2*j+2],
node_array2[2*i,2*j],node_array2[2*i+1,2*j+1],
node_array2[2*i,2*j+2],node_array2[2*i-1,2*j+1],
node_array2[2*i,2*j+1])))
if i == 1
cidx = length(cells)
push!(boundary, (cidx, 4))
end
if i == nel_x2
cidx = length(cells)
push!(boundary, (cidx, 2))
end
if j == nel_y2
cidx = length(cells)
push!(boundary, (cidx, 3))
push!(facesets["toproller"], (cidx, 3))
end
end
boundary_matrix = Ferrite.boundaries_to_sparse(boundary)
return Grid(cells, nodes, facesets=facesets,
boundary_matrix=boundary_matrix)
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 9509 | function gettypes(
::Type{T}, # number type
::Type{Val{:Static}}, # matrix type
::Type{Val{Kesize}}, # matrix size
) where {T, Kesize}
return SMatrix{Kesize, Kesize, T, Kesize^2}, SVector{Kesize, T}
end
function gettypes(
::Type{T}, # number type
::Type{Val{:SMatrix}}, # matrix type
::Type{Val{Kesize}}, # matrix size
) where {T, Kesize}
return SMatrix{Kesize, Kesize, T, Kesize^2}, SVector{Kesize, T}
end
function gettypes(
::Type{T}, # number type
::Type{Val{:MMatrix}}, # matrix type
::Type{Val{Kesize}}, # matrix size
) where {T, Kesize}
return MMatrix{Kesize, Kesize, T, Kesize^2}, MVector{Kesize, T}
end
function gettypes(
::Type{BigFloat}, # number type
::Type{Val{:Static}}, # matrix type
::Type{Val{Kesize}}, # matrix size
) where {Kesize}
return SizedMatrix{Kesize, Kesize, BigFloat, Kesize^2}, SizedVector{Kesize, BigFloat}
end
function gettypes(
::Type{BigFloat}, # number type
::Type{Val{:SMatrix}}, # matrix type
::Type{Val{Kesize}}, # matrix size
) where {Kesize}
return SizedMatrix{Kesize, Kesize, BigFloat, Kesize^2}, SizedVector{Kesize, BigFloat}
end
function gettypes(
::Type{BigFloat}, # number type
::Type{Val{:MMatrix}}, # matrix type
::Type{Val{Kesize}}, # matrix size
) where {Kesize}
return SizedMatrix{Kesize, Kesize, BigFloat, Kesize^2}, SizedVector{Kesize, BigFloat}
end
function gettypes(
::Type{T}, # number type
::Any, # matrix type
::Any, # matrix size
) where {T}
return Matrix{T}, Vector{T}
end
initialize_K(sp::StiffnessTopOptProblem) = Symmetric(create_sparsity_pattern(sp.ch.dh))
initialize_f(sp::StiffnessTopOptProblem{dim, T}) where {dim, T} = zeros(T, ndofs(sp.ch.dh))
function make_Kes_and_fes(problem, quad_order=2)
make_Kes_and_fes(problem, quad_order, Val{:Static})
end
function make_Kes_and_fes(problem, ::Type{Val{mat_type}}) where mat_type
make_Kes_and_fes(problem, 2, Val{mat_type})
end
function make_Kes_and_fes(problem, quad_order, ::Type{Val{mat_type}}) where {mat_type}
T = floattype(problem)
dim = getdim(problem)
geom_order = getgeomorder(problem)
dh = getdh(problem)
E = getE(problem)
ν = getν(problem)
ρ = getdensity(problem)
refshape = Ferrite.getrefshape(dh.field_interpolations[1])
λ = E*ν / ((1 + ν) * (1 - 2*ν))
μ = E / (2*(1 + ν))
δ(i,j) = i == j ? T(1) : T(0)
g(i,j,k,l) = λ*δ(i,j)*δ(k,l) + μ*(δ(i,k)*δ(j,l) + δ(i,l)*δ(j,k))
C = SymmetricTensor{4, dim}(g)
# Shape functions and quadrature rule
interpolation_space = Lagrange{dim, refshape, geom_order}()
quadrature_rule = QuadratureRule{dim, refshape}(quad_order)
cellvalues = CellScalarValues(quadrature_rule, interpolation_space)
facevalues = FaceScalarValues(QuadratureRule{dim-1, refshape}(quad_order), interpolation_space)
# Calculate element stiffness matrices
n_basefuncs = getnbasefunctions(cellvalues)
Kesize = dim*n_basefuncs
MatrixType, VectorType = gettypes(T, Val{mat_type}, Val{Kesize})
Kes, weights = _make_Kes_and_weights(dh, Tuple{MatrixType, VectorType}, Val{n_basefuncs}, Val{dim*n_basefuncs}, C, ρ, quadrature_rule, cellvalues)
dloads = _make_dloads(weights, problem, facevalues)
return Kes, weights, dloads, cellvalues, facevalues
end
const g = [0., 9.81, 0.] # N/kg or m/s^2
# Element stiffness matrices are StaticArrays
# `weights` : a vector of `xdim` vectors, element_id => self-weight load vector
function _make_Kes_and_weights(dh::DofHandler{dim, N, T}, ::Type{Tuple{MatrixType, VectorType}},
::Type{Val{n_basefuncs}}, ::Type{Val{Kesize}}, C, ρ, quadrature_rule, cellvalues) where {
dim, N, T, MatrixType <: StaticArray, VectorType, n_basefuncs, Kesize}
# Calculate element stiffness matrices
nel = getncells(dh.grid)
body_force = ρ .* g # Force per unit volume
Kes = Symmetric{T, MatrixType}[]
sizehint!(Kes, nel)
weights = [zeros(VectorType) for i in 1:nel]
Ke_e = zeros(T, dim, dim)
fe = zeros(T, Kesize)
Ke_0 = Matrix{T}(undef, Kesize, Kesize)
celliterator = CellIterator(dh)
for (k, cell) in enumerate(celliterator)
Ke_0 .= 0
reinit!(cellvalues, cell)
fe = weights[k]
for q_point in 1:getnquadpoints(cellvalues)
dΩ = getdetJdV(cellvalues, q_point)
for b in 1:n_basefuncs
∇ϕb = shape_gradient(cellvalues, q_point, b)
ϕb = shape_value(cellvalues, q_point, b)
for d2 in 1:dim
fe = @set fe[(b-1)*dim + d2] += ϕb * body_force[d2] * dΩ
for a in 1:n_basefuncs
∇ϕa = shape_gradient(cellvalues, q_point, a)
Ke_e .= dotdot(∇ϕa, C, ∇ϕb) * dΩ
for d1 in 1:dim
#if dim*(b-1) + d2 >= dim*(a-1) + d1
Ke_0[dim*(a-1) + d1, dim*(b-1) + d2] += Ke_e[d1,d2]
#end
end
end
end
end
end
weights[k] = fe
if MatrixType <: SizedMatrix # Work around because full constructor errors
push!(Kes, Symmetric(SizedMatrix{Kesize,Kesize,T}(Ke_0)))
else
push!(Kes, Symmetric(MatrixType(Ke_0)))
end
end
return Kes, weights
end
# Fallback
function _make_Kes_and_weights(
dh::DofHandler{dim, N, T},
::Type{Tuple{MatrixType, VectorType}},
::Type{Val{n_basefuncs}},
::Type{Val{Kesize}},
C,
ρ,
quadrature_rule,
cellvalues,
) where {
dim, N, T, MatrixType, VectorType, n_basefuncs, Kesize,
}
# Calculate element stiffness matrices
nel = getncells(dh.grid)
body_force = ρ .* g # Force per unit volume
Kes = let Kesize=Kesize, nel=nel
[Symmetric(zeros(T, Kesize, Kesize), :U) for i = 1:nel]
end
weights = let Kesize=Kesize, nel=nel
[zeros(T, Kesize) for i = 1:nel]
end
Ke_e = zeros(T, dim, dim)
celliterator = CellIterator(dh)
for (k, cell) in enumerate(celliterator)
reinit!(cellvalues, cell)
fe = weights[k]
for q_point in 1:getnquadpoints(cellvalues)
dΩ = getdetJdV(cellvalues, q_point)
for b in 1:n_basefuncs
∇ϕb = shape_gradient(cellvalues, q_point, b)
ϕb = shape_value(cellvalues, q_point, b)
for d2 in 1:dim
fe[(b-1)*dim + d2] += ϕb * body_force[d2] * dΩ
for a in 1:n_basefuncs
∇ϕa = shape_gradient(cellvalues, q_point, a)
Ke_e .= dotdot(∇ϕa, C, ∇ϕb) * dΩ
for d1 in 1:dim
#if dim*(b-1) + d2 >= dim*(a-1) + d1
Kes[k].data[dim*(a-1) + d1, dim*(b-1) + d2] += Ke_e[d1,d2]
#end
end
end
end
end
end
end
return Kes, weights
end
"""
_make_dload(problem)
Assemble a sparse vector for boundary (face) distributed loads
"""
function _make_dloads(fes, problem, facevalues)
dim = getdim(problem)
N = nnodespercell(problem)
T = floattype(problem)
dloads = deepcopy(fes)
for i in 1:length(dloads)
if eltype(dloads) <: SArray
dloads[i] = zero(eltype(dloads))
else
dloads[i] .= 0
end
end
pressuredict = getpressuredict(problem)
dh = getdh(problem)
grid = dh.grid
boundary_matrix = grid.boundary_matrix
cell_coords = zeros(Ferrite.Vec{dim, T}, N)
n_basefuncs = getnbasefunctions(facevalues)
for k in keys(pressuredict)
t = -pressuredict[k] # traction = negative the pressure
faceset = getfacesets(problem)[k]
for (cellid, faceid) in faceset
boundary_matrix[faceid, cellid] || throw("Face $((cellid, faceid)) not on boundary.")
fe = dloads[cellid]
getcoordinates!(cell_coords, grid, cellid)
reinit!(facevalues, cell_coords, faceid)
for q_point in 1:getnquadpoints(facevalues)
dΓ = getdetJdV(facevalues, q_point) # Face area
normal = getnormal(facevalues, q_point) # Nomral vector at quad point
for i in 1:n_basefuncs
ϕ = shape_value(facevalues, q_point, i) # Shape function value
for d = 1:dim
if fe isa SArray
fe = @set fe[(i-1)*dim + d] += ϕ * t * normal[d] * dΓ
else
fe[(i-1)*dim + d] += ϕ * t * normal[d] * dΓ
end
end
end
end
dloads[cellid] = fe
end
end
return dloads
end
"""
make_cload(problem)
Assemble a sparse vector for concentrated loads
"""
function make_cload(problem)
T = floattype(problem)
dim = getdim(problem)
cloads = getcloaddict(problem)
dh = getdh(problem)
metadata = getmetadata(problem)
node_dofs = metadata.node_dofs
inds = Int[]
vals = T[]
for nodeidx in keys(cloads)
for (dofidx, force) in enumerate(cloads[nodeidx])
if force != 0
dof = node_dofs[(nodeidx-1)*dim+dofidx]
push!(inds, dof)
push!(vals, force)
end
end
end
return sparsevec(inds, vals, ndofs(dh))
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 4934 | """
struct Metadata
cell_dofs
dof_cells
node_cells
node_dofs
end
An instance of the `Metadata` type stores ... information such as:
- `cell_dofs`: a `ndof_per_cell` x `ncells` matrix that maps `[localdofidx, cellidx]` into dof idx
cell_dofs[j,cellidx] = j-th nodal dof of cell `cellidx`
- `dof_cells`: a `ncells_per_dof` x `ndofs` `RaggedArray` of `Tuple{Int, Int}` that maps each dof to its cells and its local dof idx in each cell
dof_cells[dofidx] = [(cellidx, cell's local dof idx), ...]
- `node_cells`: a `ncells_per_node` x `nnodes` `RaggedArray` of `Tuple{Int, Int}` that maps each node to its cells and its local idx in each cell
node_cells[node_idx] = [(cellidx, cell's local node idx), ...]
- `node_dofs`: a `ndofspernode x nnodes` Matrix that maps `[localdofidx, node_idx]` into dof indices
node_dofs[j,nodeidx] = j-th dof of node `nodeidx`
"""
@params struct Metadata
cell_dofs
dof_cells
#node_first_cells::TTupleVec
node_cells
node_dofs
end
function Metadata(dh::DofHandler{dim}) where dim
cell_dofs = get_cell_dofs_matrix(dh)
dof_cells = get_dof_cells_matrix(dh, cell_dofs)
#node_first_cells = get_node_first_cells(dh)
node_cells = get_node_cells(dh)
node_dofs = get_node_dofs(dh)
meta = Metadata(cell_dofs, dof_cells, node_cells, node_dofs)
end
"""
Returns a `ndof_per_cell` x `ncells` matrix that maps `[localdofidx, cellidx]` into nof index
"""
function get_cell_dofs_matrix(dh)
cell_dofs = zeros(Int, ndofs_per_cell(dh), getncells(dh.grid))
for i in 1:size(cell_dofs, 2)
r = dh.cell_dofs_offset[i]:dh.cell_dofs_offset[i+1]-1
for j in 1:length(r)
cell_dofs[j,i] = dh.cell_dofs[r[j]]
end
end
cell_dofs
end
"""
Inputs
======
- `dh`: DofHandler
- `cell_dofs`: matrix `ndof_per_cell` x `ncells`: cell_dofs[j,i] = j-th nodal dof of cell i
Returns
=======
- a RaggedArray that maps dof into cell and its local dof idx
`ndof` x (Vector of Tuple{Int, Int})
dof_cells_matrix[dofidx] = [(cellidx, cell's local dof idx), ...]
"""
function get_dof_cells_matrix(dh, cell_dofs)
dof_cells_vecofvecs = [Vector{Tuple{Int,Int}}() for i in 1:ndofs(dh)]
l = 0
for cellidx in 1:size(cell_dofs, 2)
for localidx in 1:size(cell_dofs, 1)
dofidx = cell_dofs[localidx, cellidx]
push!(dof_cells_vecofvecs[dofidx], (cellidx, localidx))
l += 1
end
end
return RaggedArray(dof_cells_vecofvecs)
end
function get_node_first_cells(dh)
node_first_cells = fill((0,0), getnnodes(dh.grid))
visited = falses(getnnodes(dh.grid))
for cellidx in 1:getncells(dh.grid)
for (local_node_idx, global_node_idx) in enumerate(dh.grid.cells[cellidx].nodes)
if !visited[global_node_idx]
visited[global_node_idx] = true
node_first_cells[global_node_idx] = (cellidx, local_node_idx)
end
end
end
return node_first_cells
end
"""
Returns
=======
- A RaggedArray that maps dof index into connected cell indices
`ndof` x (Vector of Tuple{Int, Int})
dof_cells_matrix[nodeidx] = [(cellidx, cell's local nodal idx), ...]
"""
function get_node_cells(dh)
node_cells_vecofvecs = [Vector{Tuple{Int,Int}}() for i in 1:ndofs(dh)]
l = 0
for (cellidx, cell) in enumerate(CellIterator(dh))
for (localidx, nodeidx) in enumerate(cell.nodes)
push!(node_cells_vecofvecs[nodeidx], (cellidx, localidx))
l += 1
end
end
return RaggedArray(node_cells_vecofvecs)
end
node_field_offset(dh, f) = sum(view(dh.field_dims, 1:f-1))
"""
Returns
=======
- A `ndofspernode x nnodes` Matrix that maps `[localdofidx, node_idx]` into dof indices
"""
function get_node_dofs(dh::DofHandler)
ndofspernode = sum(dh.field_dims)
nfields = length(dh.field_dims)
nnodes = getnnodes(dh.grid)
interpol_points = ndofs_per_cell(dh)
_celldofs = fill(0, ndofs_per_cell(dh))
node_dofs = zeros(Int, ndofspernode, nnodes)
visited = falses(nnodes)
for field in 1:nfields
field_dim = dh.field_dims[field]
node_offset = node_field_offset(dh, field)
offset = Ferrite.field_offset(dh, dh.field_names[field])
for (cellidx, cell) in enumerate(dh.grid.cells)
celldofs!(_celldofs, dh, cellidx) # update the dofs for this cell
for idx in 1:min(interpol_points, length(cell.nodes))
node = cell.nodes[idx]
if !visited[node]
noderange = (offset + (idx-1)*field_dim + 1):(offset + idx*field_dim) # the dofs in this node
for i in 1:field_dim
node_dofs[node_offset+i,node] = _celldofs[noderange[i]]
end
visited[node] = true
end
end
end
end
return node_dofs
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 3285 | """
Usage example:
```
using Distributions, LinearAlgebra, TopOpt
f1 = RandomMagnitude([0, -1], Uniform(0.5, 1.5))
f2 = RandomMagnitude(normalize([1, -1]), Uniform(0.5, 1.5))
f3 = RandomMagnitude(normalize([-1, -1]), Uniform(0.5, 1.5))
base_problem = PointLoadCantilever(Val{:Linear}, (160, 40), (1.0, 1.0), 1.0, 0.3, 1.0)
problem = MultiLoad(base_problem, [(160, 20) => f1, (80, 40) => f2, (120, 0) => f3], 10000)
"""
struct MultiLoad{dim, T, TP <: StiffnessTopOptProblem{dim, T}, TF} <: StiffnessTopOptProblem{dim, T}
problem::TP
F::TF
end
@forward_property MultiLoad problem
for F in (:getE, :getν, :nnodespercell, :getcloaddict, :getdim, :getpressuredict, :getfacesets)
@eval $F(p::MultiLoad) = $F(p.problem)
end
function MultiLoad(problem::StiffnessTopOptProblem, N::Int, load_rules::Vector{<:Pair})
I = Int[]
J = Int[]
V = Float64[]
for (pos, f) in load_rules
dofs = find_nearest_dofs(problem, pos)
for i in 1:N
load = f()
append!(I, dofs)
push!(J, fill(i, length(dofs))...)
append!(V, load)
end
end
F = sparse(I, J, V, ndofs(problem.ch.dh), N)
return MultiLoad(problem, F)
end
function MultiLoad(
problem::StiffnessTopOptProblem,
N::Int,
dist::Distributions.Distribution = Uniform(-2, 2),
)
F = generate_random_loads(problem, N, dist, random_direction)
return MultiLoad(problem, F)
end
function find_nearest_dofs(problem, p)
grid = problem.ch.dh.grid
shortest = Inf
closest = 0
for (i, n) in enumerate(grid.nodes)
dist = norm(n.x .- p)
if dist < shortest
shortest = dist
closest = i
end
end
@assert closest != 0
return problem.metadata.node_dofs[:, closest]
end
struct RandomMagnitude{Tf, Tdist} <: Function
f::Tf
dist::Tdist
end
(rm::RandomMagnitude)() = rm.f .* rand(rm.dist)
function random_direction()
theta = rand() * 2 * π
return [cos(theta), sin(theta)]
end
function get_surface_dofs(problem::StiffnessTopOptProblem)
dh = problem.ch.dh
boundary_matrix = dh.grid.boundary_matrix
interpolation = dh.field_interpolations[1]
celliterator = Ferrite.CellIterator(dh)
node_dofs = problem.metadata.node_dofs
faces, cells, _ = findnz(boundary_matrix)
surface_node_inds = Int[]
for i in 1:length(cells)
cellind = cells[i]
faceind = faces[i]
face = [Ferrite.faces(interpolation)[faceind]...]
Ferrite.reinit!(celliterator, cellind)
nodeinds = celliterator.nodes[face]
append!(surface_node_inds, nodeinds)
end
unique!(surface_node_inds)
return setdiff(node_dofs[:, surface_node_inds], problem.ch.prescribed_dofs)
end
function generate_random_loads(
problem::StiffnessTopOptProblem,
N::Int,
scalar::Distributions.Distribution = Distributions.Uniform(-2, 2),
direction::Function = random_direction,
)
loadrule = () -> direction() .* rand(scalar)
surface_dofs = get_surface_dofs(problem)
FI = Int[]
FJ = Int[]
FV = Float64[]
nodeinds = rand(1:size(surface_dofs, 2), N)
for i in 1:N
load = loadrule()
dofs = surface_dofs[:, nodeinds[i]]
append!(FI, dofs)
push!(FJ, i, i)
append!(FV, load)
end
return sparse(FI, FJ, FV)
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 25165 | using Ferrite: Cell
"""
abstract type StiffnessTopOptProblem{dim, T} <: AbstractTopOptProblem end
An abstract stiffness topology optimization problem. All subtypes must have the following fields:
- `ch`: a `Ferrite.ConstraintHandler` struct
- `metadata`: Metadata having various cell-node-dof relationships
- `black`: a `BitVector` of length equal to the number of elements where `black[e]` is 1 iff the `e`^th element must be part of the final design
- `white`: a `BitVector` of length equal to the number of elements where `white[e]` is 1 iff the `e`^th element must not be part of the final design
- `varind`: an `AbstractVector{Int}` of length equal to the number of elements where `varind[e]` gives the index of the decision variable corresponding to element `e`. Because some elements can be fixed to be black or white, not every element has a decision variable associated.
"""
abstract type StiffnessTopOptProblem{dim, T} <: AbstractTopOptProblem end
# Fallbacks
getdim(::StiffnessTopOptProblem{dim, T}) where {dim, T} = dim
floattype(::StiffnessTopOptProblem{dim, T}) where {dim, T} = T
getE(p::StiffnessTopOptProblem) = p.E
getν(p::StiffnessTopOptProblem) = p.ν
getgeomorder(p::StiffnessTopOptProblem) = nnodespercell(p) == 9 ? 2 : 1
getdensity(::StiffnessTopOptProblem{dim, T}) where {dim, T} = T(0)
getmetadata(p::StiffnessTopOptProblem) = p.metadata
getdh(p::StiffnessTopOptProblem) = p.ch.dh
getcloaddict(p::StiffnessTopOptProblem{dim, T}) where {dim, T} = Dict{String, Vector{T}}()
getpressuredict(p::StiffnessTopOptProblem{dim, T}) where {dim, T} = Dict{String, T}()
getfacesets(p::StiffnessTopOptProblem{dim, T}) where {dim, T} = Dict{String, Tuple{Int, T}}()
Ferrite.getncells(problem::StiffnessTopOptProblem) = Ferrite.getncells(getdh(problem).grid)
"""
```
///**********************************
///* *
///* * |
///* * |
///********************************** v
@params struct PointLoadCantilever{dim, T, N, M} <: StiffnessTopOptProblem{dim, T}
rect_grid::RectilinearGrid{dim, T, N, M}
E::T
ν::T
ch::ConstraintHandler{<:DofHandler{dim, <:Cell{dim,N,M}, T}, T}
force::T
force_dof::Integer
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
```
- `dim`: dimension of the problem
- `T`: number type for computations and coordinates
- `N`: number of nodes in a cell of the grid
- `M`: number of faces in a cell of the grid
- `rect_grid`: a RectilinearGrid struct
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the center right of the cantilever beam (positive is downward)
- `force_dof`: dof number at which the force is applied
- `ch`: a `Ferrite.ConstraintHandler` struct
- `metadata`: Metadata having various cell-node-dof relationships
- `black`: a `BitVector` of length equal to the number of elements where `black[e]` is 1 iff the `e`^th element must be part of the final design
- `white`: a `BitVector` of length equal to the number of elements where `white[e]` is 1 iff the `e`^th element must not be part of the final design
- `varind`: an `AbstractVector{Int}` of length equal to the number of elements where `varind[e]` gives the index of the decision variable corresponding to element `e`. Because some elements can be fixed to be black or white, not every element has a decision variable associated.
"""
@params struct PointLoadCantilever{dim, T, N, M} <: StiffnessTopOptProblem{dim, T}
rect_grid::RectilinearGrid{dim, T, N, M}
E::T
ν::T
ch::ConstraintHandler{<:DofHandler{dim, <:Cell{dim,N,M}, T}, T}
force::T
force_dof::Integer
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
Base.show(::IO, ::MIME{Symbol("text/plain")}, ::PointLoadCantilever) = println("TopOpt point load cantilever beam problem")
"""
PointLoadCantilever(::Type{Val{CellType}}, nels::NTuple{dim,Int}, sizes::NTuple{dim}, E, ν, force) where {dim, CellType}
- `dim`: dimension of the problem
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the center right of the cantilever beam (positive is downward)
- `nels`: number of elements in each direction, a 2-tuple for 2D problems and a 3-tuple for 3D problems
- `sizes`: the size of each element in each direction, a 2-tuple for 2D problems and a 3-tuple for 3D problems
- `CellType`: can be either `:Linear` or `:Quadratic` to determine the order of the geometric and field basis functions and element type. Only isoparametric elements are supported for now.
Example:
```
nels = (60,20);
sizes = (1.0,1.0);
E = 1.0;
ν = 0.3;
force = 1.0;
# Linear elements and linear basis functions
celltype = :Linear
# Quadratic elements and quadratic basis functions
#celltype = :Quadratic
problem = PointLoadCantilever(Val{celltype}, nels, sizes, E, ν, force)
```
"""
function PointLoadCantilever(::Type{Val{CellType}}, nels::NTuple{dim,Int}, sizes::NTuple{dim}, E = 1.0, ν = 0.3, force = 1.0) where {dim, CellType}
iseven(nels[2]) && (length(nels) < 3 || iseven(nels[3])) || throw("Grid does not have an even number of elements along the y and/or z axes.")
_T = promote_type(eltype(sizes), typeof(E), typeof(ν), typeof(force))
if _T <: Integer
T = Float64
else
T = _T
end
if CellType === :Linear || dim === 3
rect_grid = RectilinearGrid(Val{:Linear}, nels, T.(sizes))
else
rect_grid = RectilinearGrid(Val{:Quadratic}, nels, T.(sizes))
end
if haskey(rect_grid.grid.facesets, "fixed_all")
pop!(rect_grid.grid.facesets, "fixed_all")
end
#addfaceset!(rect_grid.grid, "fixed_all", x -> left(rect_grid, x));
addnodeset!(rect_grid.grid, "fixed_all", x -> left(rect_grid, x));
if haskey(rect_grid.grid.nodesets, "down_force")
pop!(rect_grid.grid.nodesets, "down_force")
end
addnodeset!(rect_grid.grid, "down_force", x -> right(rect_grid, x) && middley(rect_grid, x));
# Create displacement field u
dh = DofHandler(rect_grid.grid)
if CellType === :Linear || dim === 3
push!(dh, :u, dim) # Add a displacement field
else
ip = Lagrange{2, RefCube, 2}()
push!(dh, :u, dim, ip) # Add a displacement field
end
close!(dh)
ch = ConstraintHandler(dh)
#dbc = Dirichlet(:u, getfaceset(rect_grid.grid, "fixed_all"), (x,t) -> zeros(T, dim), collect(1:dim))
dbc = Dirichlet(:u, getnodeset(rect_grid.grid, "fixed_all"), (x,t) -> zeros(T, dim), collect(1:dim))
add!(ch, dbc)
close!(ch)
t = T(0)
update!(ch, t)
metadata = Metadata(dh)
fnode = Tuple(getnodeset(rect_grid.grid, "down_force"))[1]
node_dofs = metadata.node_dofs
force_dof = node_dofs[2, fnode]
N = nnodespercell(rect_grid)
M = nfacespercell(rect_grid)
black, white = find_black_and_white(dh)
varind = find_varind(black, white)
return PointLoadCantilever(rect_grid, E, ν, ch, force, force_dof, black, white, varind, metadata)
end
"""
```
|
|
v
O*********************************
O* *
O* *
O* *
O*********************************
O
struct HalfMBB{dim, T, N, M} <: StiffnessTopOptProblem{dim, T}
rect_grid::RectilinearGrid{dim, T, N, M}
E::T
ν::T
ch::ConstraintHandler{<:DofHandler{dim, <:Cell{dim,N,M}, T}, T}
force::T
force_dof::Integer
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
```
- `dim`: dimension of the problem
- `T`: number type for computations and coordinates
- `N`: number of nodes in a cell of the grid
- `M`: number of faces in a cell of the grid
- `rect_grid`: a RectilinearGrid struct
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the top left of half the MBB (positive is downward)
- `force_dof`: dof number at which the force is applied
- `ch`: a `Ferrite.ConstraintHandler` struct
- `metadata`: Metadata having various cell-node-dof relationships
- `black`: a `BitVector` of length equal to the number of elements where `black[e]` is 1 iff the `e`^th element must be part of the final design
- `white`: a `BitVector` of length equal to the number of elements where `white[e]` is 1 iff the `e`^th element must not be part of the final design
- `varind`: an `AbstractVector{Int}` of length equal to the number of elements where `varind[e]` gives the index of the decision variable corresponding to element `e`. Because some elements can be fixed to be black or white, not every element has a decision variable associated.
"""
@params struct HalfMBB{dim, T, N, M} <: StiffnessTopOptProblem{dim, T}
rect_grid::RectilinearGrid{dim, T, N, M}
E::T
ν::T
ch::ConstraintHandler{<:DofHandler{dim, <:Cell{dim,N,M}, T}, T}
force::T
force_dof::Integer
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
Base.show(::IO, ::MIME{Symbol("text/plain")}, ::HalfMBB) = println("TopOpt half MBB problem")
"""
HalfMBB(::Type{Val{CellType}}, nels::NTuple{dim,Int}, sizes::NTuple{dim}, E, ν, force) where {dim, CellType}
- `dim`: dimension of the problem
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the top left of half the MBB (positive is downward)
- `nels`: number of elements in each direction, a 2-tuple for 2D problems and a 3-tuple for 3D problems
- `sizes`: the size of each element in each direction, a 2-tuple for 2D problems and a 3-tuple for 3D problems
- `CellType`: can be either `:Linear` or `:Quadratic` to determine the order of the geometric and field basis functions and element type. Only isoparametric elements are supported for now.
Example:
```
nels = (60,20);
sizes = (1.0,1.0);
E = 1.0;
ν = 0.3;
force = -1.0;
# Linear elements and linear basis functions
celltype = :Linear
# Quadratic elements and quadratic basis functions
#celltype = :Quadratic
problem = HalfMBB(Val{celltype}, nels, sizes, E, ν, force)
```
"""
function HalfMBB(::Type{Val{CellType}}, nels::NTuple{dim,Int}, sizes::NTuple{dim}, E = 1.0, ν = 0.3, force = 1.0) where {dim, CellType}
_T = promote_type(eltype(sizes), typeof(E), typeof(ν), typeof(force))
if _T <: Integer
T = Float64
else
T = _T
end
if CellType === :Linear || dim === 3
rect_grid = RectilinearGrid(Val{:Linear}, nels, T.(sizes))
else
rect_grid = RectilinearGrid(Val{:Quadratic}, nels, T.(sizes))
end
if haskey(rect_grid.grid.facesets, "fixed_u1")
pop!(rect_grid.grid.facesets, "fixed_u1")
end
#addfaceset!(rect_grid.grid, "fixed_u1", x -> left(rect_grid, x));
addnodeset!(rect_grid.grid, "fixed_u1", x -> left(rect_grid, x));
if haskey(rect_grid.grid.nodesets, "fixed_u2")
pop!(rect_grid.grid.nodesets, "fixed_u2")
end
addnodeset!(rect_grid.grid, "fixed_u2", x -> bottom(rect_grid, x) && right(rect_grid, x));
if haskey(rect_grid.grid.nodesets, "down_force")
pop!(rect_grid.grid.nodesets, "down_force")
end
addnodeset!(rect_grid.grid, "down_force", x -> top(rect_grid, x) && left(rect_grid, x));
# Create displacement field u
dh = DofHandler(rect_grid.grid)
if CellType === :Linear || dim === 3
push!(dh, :u, dim)
else
ip = Lagrange{2, RefCube, 2}()
push!(dh, :u, dim, ip)
end
close!(dh)
ch = ConstraintHandler(dh)
#dbc1 = Dirichlet(:u, getfaceset(rect_grid.grid, "fixed_u1"), (x,t)->T[0], [1])
dbc1 = Dirichlet(:u, getnodeset(rect_grid.grid, "fixed_u1"), (x,t)->T[0], [1])
add!(ch, dbc1)
dbc2 = Dirichlet(:u, getnodeset(rect_grid.grid, "fixed_u2"), (x,t)->T[0], [2])
add!(ch, dbc2)
close!(ch)
t = T(0)
update!(ch, t)
metadata = Metadata(dh)
fnode = Tuple(getnodeset(rect_grid.grid, "down_force"))[1]
node_dofs = metadata.node_dofs
force_dof = node_dofs[2, fnode]
N = nnodespercell(rect_grid)
M = nfacespercell(rect_grid)
black, white = find_black_and_white(dh)
varind = find_varind(black, white)
return HalfMBB(rect_grid, E, ν, ch, force, force_dof, black, white, varind, metadata)
end
nnodespercell(p::Union{PointLoadCantilever, HalfMBB}) = nnodespercell(p.rect_grid)
function getcloaddict(p::Union{PointLoadCantilever{dim, T}, HalfMBB{dim, T}}) where {dim, T}
f = T[0, -p.force, 0]
fnode = Tuple(getnodeset(p.rect_grid.grid, "down_force"))[1]
return Dict{Int, Vector{T}}(fnode => f)
end
"""
```
////////////
............
. .
. .
. .
. .
. ......................
. .
. .
. . |
................................. v
force
struct LBeam{T, N, M} <: StiffnessTopOptProblem{2, T}
E::T
ν::T
ch::ConstraintHandler{<:DofHandler{2, <:Cell{2,N,M}, T}, T}
force::T
force_dof::Integer
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
```
- `T`: number type for computations and coordinates
- `N`: number of nodes in a cell of the grid
- `M`: number of faces in a cell of the grid
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the center right of the cantilever beam (positive is downward)
- `force_dof`: dof number at which the force is applied
- `ch`: a `Ferrite.ConstraintHandler` struct
- `metadata`: Metadata having various cell-node-dof relationships
- `black`: a `BitVector` of length equal to the number of elements where `black[e]` is 1 iff the `e`^th element must be part of the final design
- `white`: a `BitVector` of length equal to the number of elements where `white[e]` is 1 iff the `e`^th element must not be part of the final design
- `varind`: an `AbstractVector{Int}` of length equal to the number of elements where `varind[e]` gives the index of the decision variable corresponding to element `e`. Because some elements can be fixed to be black or white, not every element has a decision variable associated.
"""
@params struct LBeam{T, N, M} <: StiffnessTopOptProblem{2, T}
E::T
ν::T
ch::ConstraintHandler{<:DofHandler{2, <:Cell{2,N,M}, T}, T}
force::T
force_dof::Integer
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
Base.show(::IO, ::MIME{Symbol("text/plain")}, ::LBeam) = println("TopOpt L-beam problem")
"""
LBeam(::Type{Val{CellType}}, ::Type{T}=Float64; length = 100, height = 100, upperslab = 50, lowerslab = 50, E = 1.0, ν = 0.3, force = 1.0) where {T, CellType}
- `T`: number type for computations and coordinates
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the center right of the cantilever beam (positive is downward)
- `length`, `height`, `upperslab` and `lowerslab` are explained in [`LGrid`](@ref).
- `CellType`: can be either `:Linear` or `:Quadratic` to determine the order of the geometric and field basis functions and element type. Only isoparametric elements are supported for now.
Example:
```
E = 1.0;
ν = 0.3;
force = 1.0;
# Linear elements and linear basis functions
celltype = :Linear
# Quadratic elements and quadratic basis functions
#celltype = :Quadratic
problem = LBeam(Val{celltype}, E = E, ν = ν, force = force)
```
"""
function LBeam(::Type{Val{CellType}}, ::Type{T}=Float64; length = 100, height = 100, upperslab = 50, lowerslab = 50, E = 1.0, ν = 0.3, force = 1.0) where {T, CellType}
# Create displacement field u
grid = LGrid(Val{CellType}, T, length=length, height=height, upperslab=upperslab,
lowerslab=lowerslab)
dh = DofHandler(grid)
if CellType === :Linear
push!(dh, :u, 2)
else
ip = Lagrange{2, RefCube, 2}()
push!(dh, :u, 2, ip)
end
close!(dh)
ch = ConstraintHandler(dh)
dbc = Dirichlet(:u, getfaceset(grid, "top"), (x,t)->T[0, 0], [1, 2])
add!(ch, dbc)
close!(ch)
t = T(0)
update!(ch, t)
metadata = Metadata(dh)
fnode = Tuple(getnodeset(grid, "load"))[1]
node_dofs = metadata.node_dofs
force_dof = node_dofs[2, fnode]
black, white = find_black_and_white(dh)
varind = find_varind(black, white)
TInds = typeof(varind)
TMeta = typeof(metadata)
return LBeam(E, ν, ch, force, force_dof, black, white, varind, metadata)
end
function boundingbox(grid::Ferrite.Grid{dim}) where dim
xmin1 = minimum(n->n.x[1], grid.nodes)
xmax1 = maximum(n->n.x[1], grid.nodes)
xmin2 = minimum(n->n.x[2], grid.nodes)
xmax2 = maximum(n->n.x[2], grid.nodes)
if dim === 2
return ((xmin1, xmin2), (xmax1, xmax2))
else
xmin3 = minimum(n->n.x[3], grid.nodes)
xmax3 = maximum(n->n.x[3], grid.nodes)
return ((xmin1, xmin2, xmin3), (xmax1, xmax2, xmax3))
end
end
function RectilinearTopology(b, topology = ones(getncells(getdh(b).grid)))
bb = boundingbox(getdh(b).grid)
go = getgeomorder(b)
nels = Int.(round.(bb[2] .- bb[1]))
dim = length(nels)
if go === 1
rectgrid = generate_grid(Quadrilateral, nels, Vec{dim}(bb[1]), Vec{dim}(bb[2]))
elseif go === 2
rectgrid = generate_grid(QuadraticQuadrilateral, nels, Vec{dim}(bb[1]), Vec{dim}(bb[2]))
else
throw("Unsupported geometry.")
end
new_topology = zeros(prod(nels))
for (i, cell) in enumerate(CellIterator(getdh(b)))
sub = Int.(round.((cell.coords[1]...,))) .+ (1, 1)
ind = LinearIndices(nels)[sub...]
new_topology[ind] = topology[i]
end
return copy(reshape(new_topology, nels)')
end
nnodespercell(p::LBeam{T, N}) where {T, N} = N
getdim(::LBeam) = 2
function getcloaddict(p::LBeam{T}) where {T}
f = T[0, -p.force]
fnode = Tuple(getnodeset(getdh(p).grid, "load"))[1]
return Dict{Int, Vector{T}}(fnode => f)
end
"""
```
1
OOO
...
. .
4 . .
30 . .
/ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <-
/ . . <- 2 f
/ . 3 . <-
/ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <-
^^^
|||
1 f
struct TieBeam{T, N, M} <: StiffnessTopOptProblem{2, T}
E::T
ν::T
force::T
ch::ConstraintHandler{<:DofHandler{2, N, T, M}, T}
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
```
- `T`: number type for computations and coordinates
- `N`: number of nodes in a cell of the grid
- `M`: number of faces in a cell of the grid
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the center right of the cantilever beam (positive is downward)
- `ch`: a `Ferrite.ConstraintHandler` struct
- `metadata`: Metadata having various cell-node-dof relationships
- `black`: a `BitVector` of length equal to the number of elements where `black[e]` is 1 iff the `e`^th element must be part of the final design
- `white`: a `BitVector` of length equal to the number of elements where `white[e]` is 1 iff the `e`^th element must not be part of the final design
- `varind`: an `AbstractVector{Int}` of length equal to the number of elements where `varind[e]` gives the index of the decision variable corresponding to element `e`. Because some elements can be fixed to be black or white, not every element has a decision variable associated.
"""
@params struct TieBeam{T, N, M} <: StiffnessTopOptProblem{2, T}
E::T
ν::T
force::T
ch::ConstraintHandler{<:DofHandler{2, <:Cell{2,N,M}, T}, T}
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
Base.show(::IO, ::MIME{Symbol("text/plain")}, ::TieBeam) = println("TopOpt tie-beam problem")
"""
TieBeam(::Type{Val{CellType}}, ::Type{T} = Float64, refine = 1, force = T(1); E = T(1), ν = T(0.3)) where {T, CellType}
- `T`: number type for computations and coordinates
- `E`: Young's modulus
- `ν`: Poisson's ration
- `force`: force at the center right of the cantilever beam (positive is downward)
- `refine`: an integer value of 1 or greater that specifies the mesh refinement extent. A value of 1 gives the standard tie-beam problem in literature.
- `CellType`: can be either `:Linear` or `:Quadratic` to determine the order of the geometric and field basis functions and element type. Only isoparametric elements are supported for now.
"""
function TieBeam(::Type{Val{CellType}}, ::Type{T} = Float64, refine = 1, force = T(1); E = T(1), ν = T(0.3)) where {T, CellType}
grid = TieBeamGrid(Val{CellType}, T, refine)
dh = DofHandler(grid)
if CellType === :Linear
push!(dh, :u, 2)
else
ip = Lagrange{2, RefCube, 2}()
push!(dh, :u, 2, ip)
end
close!(dh)
ch = ConstraintHandler(dh)
dbc1 = Dirichlet(:u, getfaceset(grid, "leftfixed"), (x,t)->T[0, 0], [1, 2])
add!(ch, dbc1)
dbc2 = Dirichlet(:u, getfaceset(grid, "toproller"), (x,t)->T[0], [2])
add!(ch, dbc2)
close!(ch)
t = T(0)
update!(ch, t)
metadata = Metadata(dh)
black, white = find_black_and_white(dh)
varind = find_varind(black, white)
return TieBeam(E, ν, force, ch, black, white, varind, metadata)
end
getdim(::TieBeam) = 2
nnodespercell(::TieBeam{T, N}) where {T, N} = N
getpressuredict(p::TieBeam{T}) where {T} = Dict{String, T}("rightload"=>2*p.force, "bottomload"=>-p.force)
getfacesets(p::TieBeam) = getdh(p).grid.facesets
"""
```
******************************
* Pin1 F1 ―――> *
* o | *
* v *
* Pin2 *
* o F2 ―――> *
* | *
* v *
******************************
RayProblem(nels, pins, loads)
Constructs an instance of the type `RayProblem` that is a 2D beam with:
- Number of elements `nels`, e.g. `(60, 20)` where each element is a 1 x 1 square,
- Pinned locations `pins` where each pinned location is a `Vector` of length 2, e.g. `[[1, 18], [2, 8]]` indicating the locations of the pins, and
- Loads specified in `loads` where `loads` is a dictionary mapping the location of each load to its vector value, e.g. `Dict([10, 18] => [1.0, -1.0], [5, 5] => [1.0, -1.0])` which defines a load of `[1.0, -1.0]` at the point located at `[10, 18]` and a similar load at the point located at `[5, 5]`.
```
"""
@params struct RayProblem{T, N, M} <: StiffnessTopOptProblem{2, T}
rect_grid::RectilinearGrid{2, T, N, M}
E::T
ν::T
ch::ConstraintHandler{<:DofHandler{2, <:Cell{2, N, M}, T}, T}
loads::Dict
black::AbstractVector
white::AbstractVector
varind::AbstractVector{Int}
metadata::Metadata
end
function RayProblem(nels::NTuple{2, Int}, pins::Vector{<:Vector}, loads::Dict{<:Vector, <:Vector})
T = Float64
rect_grid = RectilinearGrid(Val{:Linear}, nels, (1.0, 1.0))
dim = length(nels)
for (i, pin) in enumerate(pins)
if haskey(rect_grid.grid.nodesets, "fixed$i")
pop!(rect_grid.grid.nodesets, "fixed$i")
end
addnodeset!(rect_grid.grid, "fixed$i", x -> x ≈ pin)
end
for (i, k) in enumerate(keys(loads))
if haskey(rect_grid.grid.nodesets, "force$i")
pop!(rect_grid.grid.nodesets, "force$i")
end
addnodeset!(rect_grid.grid, "force$i", x -> x ≈ k);
end
# Create displacement field u
dh = DofHandler(rect_grid.grid)
ip = Lagrange{2, RefCube, 1}()
push!(dh, :u, dim, ip) # Add a displacement field
close!(dh)
ch = ConstraintHandler(dh)
for i in 1:length(pins)
dbc = Dirichlet(:u, getnodeset(rect_grid.grid, "fixed$i"), (x,t) -> zeros(T, dim), collect(1:dim))
add!(ch, dbc)
end
close!(ch)
t = T(0)
update!(ch, t)
metadata = Metadata(dh)
black, white = find_black_and_white(dh)
varind = find_varind(black, white)
loadsdict = Dict{Int, Vector{Float64}}(map(enumerate(keys(loads))) do (i, k)
fnode = Tuple(getnodeset(rect_grid.grid, "force$i"))[1]
(fnode => loads[k])
end)
return RayProblem(rect_grid, 1.0, 0.3, ch, loadsdict, black, white, varind, metadata)
end
nnodespercell(p::RayProblem) = nnodespercell(p.rect_grid)
getcloaddict(p::RayProblem) = p.loads
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 142 | module InputOutput
export InpStiffness,
save_mesh
include(joinpath("INP", "INP.jl"))
using .INP
include("VTK.jl")
using .VTK
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 1876 | module VTK
using ...TopOptProblems: TopOptProblems, StiffnessTopOptProblem, Ferrite
using WriteVTK
export save_mesh
function save_mesh(filename, problem::StiffnessTopOptProblem)
topology = ones(getncells(TopOptProblems.getdh(problem).grid))
vtkfile = WriteVTK.vtk_grid(filename, problem, topology)
outfiles = WriteVTK.vtk_save(vtkfile)
end
function save_mesh(filename, problem, solver)
save_mesh(filename, problem, solver.vars)
end
function save_mesh(filename, alg)
problem = alg.obj.problem
vars = alg.optimizer.obj.solver.vars
save_mesh(filename, problem, vars)
end
function save_mesh(filename, problem, vars::AbstractVector)
vtkfile = WriteVTK.vtk_grid(filename, problem, vars)
outfiles = WriteVTK.vtk_save(vtkfile)
end
function WriteVTK.vtk_grid(filename::AbstractString, problem::StiffnessTopOptProblem{dim, T}, vars::AbstractVector{T}) where {dim, T}
varind = problem.varind
black = problem.black
white = problem.white
grid = problem.ch.dh.grid
full_top = length(vars) == length(TopOptProblems.getdh(problem).grid.cells)
celltype = Ferrite.cell_to_vtkcell(Ferrite.getcelltype(grid))
cls = Ferrite.MeshCell[]
for (i, cell) in enumerate(Ferrite.CellIterator(grid))
if full_top
if vars[i] >= 0.5
push!(cls, Ferrite.MeshCell(celltype, copy(Ferrite.getnodes(cell))))
end
else
if black[i]
push!(cls, Ferrite.MeshCell(celltype, copy(Ferrite.getnodes(cell))))
elseif !white[i]
if vars[varind[i]] >= 0.5
push!(cls, Ferrite.MeshCell(celltype, copy(Ferrite.getnodes(cell))))
end
end
end
end
coords = reshape(reinterpret(T, Ferrite.getnodes(grid)), (dim, Ferrite.getnnodes(grid)))
return vtk_grid(filename, coords, cls)
end
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
|
[
"MIT"
] | 0.1.1 | 10c04258080b378ecf9b81b6739206eca3f63535 | code | 371 | module INP
export InpStiffness
using ...TopOptProblems: Metadata, StiffnessTopOptProblem
using Ferrite
using ...Utilities: find_black_and_white, find_varind
import ...TopOptProblems: nnodespercell, getE, getν, getgeomorder, getdensity, getpressuredict, getcloaddict, getfacesets
include(joinpath("Parser", "Parser.jl"))
using .Parser
include("inpstiffness.jl")
end
| TopOptProblems | https://github.com/JuliaTopOpt/TopOptProblems.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.