licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | code | 7050 | function similarity(cache::AbstractCache, indices::Vector{Int}, embedding::Any)
return lock(cache.items_lock) do
similarity(cache, cache.items, indices, embedding)
end
end
"""
similarity(cache::SemanticCache, items::Vector{CachedItem},
indices::Vector{Int}, embedding::Vector{Float32})
Finds the most similar item in the cache to the given embedding. Search is done via cosine similarity (dot product).
# Arguments
- `cache::SemanticCache`: The cache to search in.
- `items::Vector{CachedItem}`: The items to search in.
- `indices::Vector{Int}`: The indices of the items to search in.
- `embedding::Vector{Float32}`: The embedding to search for.
# Returns
A tuple `(max_sim, max_idx)` where
- `max_sim`: The maximum similarity.
- `max_idx`: The index of the most similar item.
# Notes
- The return item is not guaranteed to be very similar, you need to check if the similarity is high enough.
- We assume that embeddings are normalized to have L2 norm 1, so Cosine similarity is the same as dot product.
"""
function similarity(cache::SemanticCache, items::Vector{CachedItem},
indices::Vector{Int}, embedding::Vector{Float32})
isempty(indices) && return Float32[]
len = length(items)
@assert len>=maximum(indices) "all `indices` must be less than or equal to the length of `items`"
@assert 0<=minimum(indices) "all `indices` must be greater than or equal to 0"
## Find the highest and check if it's above the threshold
max_sim = -1
max_idx = 0
@inbounds for i in indices
sim = dot(items[i].embedding, embedding)
if sim > max_sim
max_sim = sim
max_idx = i
end
end
return (max_sim, max_idx)
end
"""
(cache::SemanticCache)(
key::String, fuzzy_input::String; verbose::Integer = 0, min_similarity::Real = 0.95)
Finds the item that EXACTLY matches the provided cache `key` and is the most similar given its embedding. Similarity must be at least `min_similarity`.
Search is done via cosine similarity (dot product).
# Arguments
- `key::String`: The key to match exactly.
- `fuzzy_input::String`: The input to embed and compare to the cache.
- `verbose::Integer = 0`: The verbosity level.
- `min_similarity::Real = 0.95`: The minimum similarity.
# Returns
A `CachedItem`:
- If the similarity is above `min_similarity`, the `output` field is set to the cached output.
- If the similarity is below `min_similarity`, the `output` field is set to `nothing`.
You can validate if an item has been found by checking if `output` is not `nothing` or simply `isvalid(item)`.
# Example
```julia
cache = SemanticCache()
item = cache("key1", "fuzzy_input"; min_similarity=0.95)
## add it to cache if new
if !isvalid(item)
# calculate the expensive output
output = expensive_calculation()
item.output = output
## add it to cache
push!(cache, item)
end
# If you ask again, it will be faster because it's in the cache
item = cache("key1", "fuzzy_input"; min_similarity=0.95)
```
"""
function (cache::SemanticCache)(
key::String, fuzzy_input::String; verbose::Integer = 0, min_similarity::Real = 0.95)
indices = get(cache, key, Int[])
(verbose >= 2) && @info "Candidates for $(key): $(length(indices)) items"
## Embed and Normalize
emb_result = FlashRank.embed(EMBEDDER, fuzzy_input; split_instead_trunc = true)
embedding = if size(emb_result.embeddings, 2) > 1
mean(emb_result.embeddings; dims = 2) |> vec |> normalize
else
emb_result.embeddings |> vec |> normalize
end
(verbose >= 2) && @info "Embedding computed in $(round(emb_result.elapsed, digits=3))s"
hash_ = hash(fuzzy_input)
isempty(indices) && return CachedItem(; input_hash = hash_, embedding, key)
## Calculate similarity
max_sim, max_idx = similarity(cache, indices, embedding)
## Find the highest and check if it's above the threshold
output = max_sim >= min_similarity ? cache.items[max_idx].output : nothing
(verbose >= 1) &&
@info (isnothing(output) ?
"No cache match found (max. sim: $(round(max_sim, digits=3)))" :
"Match found (max. sim: $(round(max_sim, digits=3)))")
##
return CachedItem(; input_hash = hash_, embedding, key, output)
end
"""
similarity(cache::HashCache, items::Vector{CachedItem},
indices::Vector{Int}, hash::UInt64)
Finds the items with the exact hash as `hash`.
"""
function similarity(cache::HashCache, items::Vector{CachedItem},
indices::Vector{Int}, hash::UInt64)
isempty(indices) && return Float32[]
len = length(items)
@assert len>=maximum(indices) "all `indices` must be less than or equal to the length of `items`"
@assert 0<=minimum(indices) "all `indices` must be greater than or equal to 0"
## Find the highest and check if it's above the threshold
max_sim = -1
max_idx = 0
@inbounds for i in indices
sim = items[i].input_hash == hash
if sim
## find the first match and stop
max_sim = sim
max_idx = i
break
end
end
return (max_sim, max_idx)
end
"""
(cache::HashCache)(key::String, fuzzy_input::String; verbose::Integer = 0, min_similarity::Real = 1.0)
Finds the item that EXACTLY matches the provided cache `key` and EXACTLY matches the hash of `fuzzy_input`.
# Arguments
- `key::String`: The key to match exactly.
- `fuzzy_input::String`: The input to compare the hash of.
- `verbose::Integer = 0`: The verbosity level.
- `min_similarity::Real = 1.0`: The minimum similarity (we expect exact match defined as 1.0).
# Returns
A `CachedItem`:
- If an exact match is found, the `output` field is set to the cached output.
- If no exact match is found, the `output` field is set to `nothing`.
You can validate if an item has been found by checking if `output` is not `nothing` or simply `isvalid(item)`.
# Example
```julia
cache = HashCache()
item = cache("key1", "fuzzy_input")
## add it to cache if new
if !isvalid(item)
# calculate the expensive output
output = expensive_calculation()
item.output = output
## add it to cache
push!(cache, item)
end
# If you ask again, it will be faster because it's in the cache
item = cache("key1", "fuzzy_input")
```
"""
function (cache::HashCache)(
key::String, fuzzy_input::String; verbose::Integer = 0, min_similarity::Real = 1.0)
indices = get(cache, key, Int[])
(verbose >= 2) && @info "Candidates for $(key): $(length(indices)) items"
hash_ = hash(fuzzy_input) # fake embedding
isempty(indices) && return CachedItem(; input_hash = hash_, key)
## Calculate similarity
max_sim, max_idx = similarity(cache, indices, hash_)
## Find the highest and check if it's above the threshold
output = max_sim >= min_similarity ? cache.items[max_idx].output : nothing
(verbose >= 1) &&
@info (isnothing(output) ? "No cache match found" : "Match found")
##
return CachedItem(; input_hash = hash_, key, output)
end
| SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | code | 3108 | @kwdef mutable struct CachedItem
key::String
input_hash::UInt64
embedding::Vector{Float32} = Float32[]
output::Any = nothing
created_at::DateTime = now()
end
Base.isvalid(item::CachedItem) = !isnothing(item.output)
abstract type AbstractCache end
"""
SemanticCache
A cache that stores embeddings and uses semantic search to find the most relevant items.
Any incoming request must match `key` exactly (in `lookup`), otherwise it's not accepted.
`key` represents what user finds meaningful to be strictly matching (eg, model name, temperature, etc).
# Fields
- `items`: A vector of cached items (type `CachedItem`)
- `lookup`: A dictionary that maps keys to the indices of the items that have that key.
- `items_lock`: A lock for the items vector.
- `lookup_lock`: A lock for the lookup dictionary.
"""
@kwdef mutable struct SemanticCache <: AbstractCache
items::Vector{CachedItem} = CachedItem[]
lookup::Dict{String, Vector{Int}} = Dict{String, Vector{Int}}()
items_lock::ReentrantLock = ReentrantLock()
lookup_lock::ReentrantLock = ReentrantLock()
end
"""
HashCache
A cache that uses string hashes to find the exactly matching items. Useful for long input strings, which cannot be embedded quickly.
Any incoming request must match `key` exactly (in `lookup`), otherwise it's not accepted.
`key` represents what user finds meaningful to be strictly matching (eg, model name, temperature, etc).
# Fields
- `items`: A vector of cached items (type `CachedItem`)
- `lookup`: A dictionary that maps keys to the indices of the items that have that key.
- `items_lock`: A lock for the items vector.
- `lookup_lock`: A lock for the lookup dictionary.
"""
@kwdef mutable struct HashCache <: AbstractCache
items::Vector{CachedItem} = CachedItem[]
lookup::Dict{String, Vector{Int}} = Dict{String, Vector{Int}}()
items_lock::ReentrantLock = ReentrantLock()
lookup_lock::ReentrantLock = ReentrantLock()
end
## Show methods
function Base.show(io::IO, cache::AbstractCache)
print(io, "$(nameof(typeof(cache))) with $(length(cache.items)) items")
end
function Base.show(io::IO, item::CachedItem)
has_output = !isnothing(item.output) ? "<has output>" : "<no output>"
print(io, "CachedItem with key: $(item.key) and output: $has_output")
end
function Base.push!(cache::AbstractCache, item::CachedItem)
## Lock the system to not get corrupted
lock(cache.lookup_lock)
lock(cache.items_lock)
## Add to items vector
push!(cache.items, item)
idx = length(cache.items)
## Add to lookup
if haskey(cache.lookup, item.key)
push!(cache.lookup[item.key], idx)
else
cache.lookup[item.key] = [idx]
end
## Unlock
unlock(cache.items_lock)
unlock(cache.lookup_lock)
return cache
end
function Base.getindex(cache::AbstractCache, key::String)
return lock(cache.lookup_lock) do
getindex(cache.lookup, key)
end
end
function Base.get(cache::AbstractCache, key::String, default = Int[])
return lock(cache.lookup_lock) do
get(cache.lookup, key, default)
end
end | SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | code | 372 | using SemanticCaches
using Dates
using Test
using Aqua
@testset "SemanticCaches.jl" begin
@testset "Code quality (Aqua.jl)" begin
# disable ambiguities due to upstream packages
Aqua.test_all(SemanticCaches; ambiguities = false)
end
@testset "SemanticCache" begin
include("types.jl")
include("similarity_lookup.jl")
end
end
| SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | code | 1930 |
@testset "similarity_lookup" begin
# Test 1: Basic similarity lookup with SemanticCache
cache = SemanticCache()
item = cache("key1", "fuzzy_input"; min_similarity = 0.95)
@test !isvalid(item) # Expecting a cache miss since the cache is empty
# Test 2: Adding and retrieving an item from SemanticCache
cache = SemanticCache()
item = cache("key1", "fuzzy_input"; min_similarity = 0.95)
if !isvalid(item)
item.output = "expensive result"
push!(cache, item)
end
item = cache("key1", "fuzzy_input"; min_similarity = 0.95)
@test isvalid(item) && item.output == "expensive result" # Expecting a cache hit
# Test 3: Similarity threshold in SemanticCache
cache = SemanticCache()
item = cache("key1", "this is my input"; min_similarity = 0.95)
if !isvalid(item)
item.output = "expensive result"
push!(cache, item)
end
item = cache("key1", "very different text"; min_similarity = 0.95)
@test !isvalid(item) # Expecting a cache miss due to low similarity
# Test 4: Basic similarity lookup with HashCache
cache = HashCache()
item = cache("key1", "fuzzy_input")
@test !isvalid(item) # Expecting a cache miss since the cache is empty
# Test 5: Adding and retrieving an item from HashCache
cache = HashCache()
item = cache("key1", "fuzzy_input")
if !isvalid(item)
item.output = "expensive result"
push!(cache, item)
end
item = cache("key1", "fuzzy_input")
@test isvalid(item) && item.output == "expensive result" # Expecting a cache hit
# Test 6: Exact match requirement in HashCache
cache = HashCache()
item = cache("key1", "fuzzy_input")
if !isvalid(item)
item.output = "expensive result"
push!(cache, item)
end
item = cache("key1", "different_input")
@test !isvalid(item) # Expecting a cache miss due to different input hash
end
| SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | code | 7252 |
@testset "CachedItem,SemanticCache" begin
# Test 1: Create a CachedItem and check its fields
item = CachedItem(; key = "key1", input_hash = hash("input1"))
@test item.key == "key1" # Check key field
@test item.input_hash == hash("input1") # Check input field
@test isnothing(item.output) # Check output field is nothing initially
@test item.created_at <= now() # Check created_at field is a DateTime
@test isvalid(item) == false
# Test 2: Create a SemanticCache and check its initial state
cache = SemanticCache()
@test length(cache.items) == 0 # Check items vector is empty
@test length(cache.lookup) == 0 # Check lookup dictionary is empty
# Test 3: Add a CachedItem to SemanticCache and check its state
push!(cache, item)
@test length(cache.items) == 1 # Check items vector has one item
@test haskey(cache.lookup, "key1") # Check lookup dictionary has the key
@test cache.lookup["key1"] == [1] # Check lookup dictionary points to the correct index
# Test 4: Retrieve an item from SemanticCache using getindex
idxs = cache["key1"]
@test idxs == [1] # Check the retrieved index is correct
# Test 5: Retrieve an item from SemanticCache using get
idxs = get(cache, "key1")
@test idxs == [1] # Check the retrieved index is correct
# Test 6: Add another CachedItem with the same key and check the state
item2 = CachedItem(; key = "key1", input_hash = hash("input2"))
push!(cache, item2)
@test length(cache.items) == 2 # Check items vector has two items
@test cache.lookup["key1"] == [1, 2] # Check lookup dictionary points to both indices
# Test 7: Add a CachedItem with a different key and check the state
item3 = CachedItem(; key = "key2", input_hash = hash("input3"))
push!(cache, item3)
@test length(cache.items) == 3 # Check items vector has three items
@test haskey(cache.lookup, "key2") # Check lookup dictionary has the new key
@test cache.lookup["key2"] == [3] # Check lookup dictionary points to the correct index
# Test 8: Retrieve an item with a non-existent key using getindex
@test_throws KeyError cache["non_existent_key"]
# Test 9: Retrieve an item with a non-existent key using get
idxs = get(cache, "non_existent_key")
@test idxs == Int64[] # Check the retrieved index is an empty array
# Test 10: Ensure thread safety by adding items concurrently
Threads.@threads for i in 1:100
item = CachedItem(; key = "key$i", input_hash = hash("input$i"))
push!(cache, item)
end
@test length(cache.items) == 103 # Check items vector has 103 items
for i in 1:100
@test haskey(cache.lookup, "key$i") # Check lookup dictionary has the new keys
end
## Show methods
# Test 1: Show an empty SemanticCache
sem_cache = SemanticCache()
io = IOBuffer()
show(io, sem_cache)
@test String(take!(io)) == "SemanticCache with 0 items" # Check the output for an empty SemanticCache
# Test 2: Show a SemanticCache with one item
item = CachedItem(; key = "key1", input_hash = hash("input1"))
push!(sem_cache, item)
io = IOBuffer()
show(io, sem_cache)
@test String(take!(io)) == "SemanticCache with 1 items" # Check the output for a SemanticCache with one item
# Test 3: Show a SemanticCache with multiple items
item2 = CachedItem(; key = "key2", input_hash = hash("input2"))
push!(sem_cache, item2)
io = IOBuffer()
show(io, sem_cache)
@test String(take!(io)) == "SemanticCache with 2 items" # Check the output for a SemanticCache with multiple items
# Test 4: CachedItem
io = IOBuffer()
show(io, item)
@test String(take!(io)) ==
"CachedItem with key: key1 and output: <no output>"
# with output
item.output = "output1"
show(io, item)
@test String(take!(io)) ==
"CachedItem with key: key1 and output: <has output>"
end
@testset "HashCache" begin
# Test 1: Create a HashCache and check its initial state
hash_cache = HashCache()
@test length(hash_cache.items) == 0 # Check items vector is empty
@test length(hash_cache.lookup) == 0 # Check lookup dictionary is empty
# Test 2: Add a CachedItem to HashCache and check its state
item = CachedItem(; key = "key1", input_hash = hash("input1"))
push!(hash_cache, item)
@test length(hash_cache.items) == 1 # Check items vector has one item
@test haskey(hash_cache.lookup, "key1") # Check lookup dictionary has the key
@test hash_cache.lookup["key1"] == [1] # Check lookup dictionary points to the correct index
# Test 3: Retrieve an item from HashCache using getindex
idxs = hash_cache["key1"]
@test idxs == [1] # Check the retrieved index is correct
# Test 4: Retrieve an item from HashCache using get
idxs = get(hash_cache, "key1")
@test idxs == [1] # Check the retrieved index is correct
# Test 5: Add another CachedItem with the same key and check the state
item2 = CachedItem(; key = "key1", input_hash = hash("input2"))
push!(hash_cache, item2)
@test length(hash_cache.items) == 2 # Check items vector has two items
@test hash_cache.lookup["key1"] == [1, 2] # Check lookup dictionary points to both indices
# Test 6: Add a CachedItem with a different key and check the state
item3 = CachedItem(; key = "key2", input_hash = hash("input3"))
push!(hash_cache, item3)
@test length(hash_cache.items) == 3 # Check items vector has three items
@test haskey(hash_cache.lookup, "key2") # Check lookup dictionary has the new key
@test hash_cache.lookup["key2"] == [3] # Check lookup dictionary points to the correct index
# Test 7: Retrieve an item with a non-existent key using getindex
@test_throws KeyError hash_cache["non_existent_key"]
# Test 8: Retrieve an item with a non-existent key using get
idxs = get(hash_cache, "non_existent_key")
@test idxs == Int64[] # Check the retrieved index is an empty array
# Test 9: Ensure thread safety by adding items concurrently
Threads.@threads for i in 1:100
item = CachedItem(; key = "key$i", input_hash = hash("input$i"))
push!(hash_cache, item)
end
@test length(hash_cache.items) == 103 # Check items vector has 103 items
for i in 1:100
@test haskey(hash_cache.lookup, "key$i") # Check lookup dictionary has the new keys
end
## Show methods
# Test 4: Show an empty HashCache
hash_cache = HashCache()
io = IOBuffer()
show(io, hash_cache)
@test String(take!(io)) == "HashCache with 0 items" # Check the output for an empty HashCache
# Test 5: Show a HashCache with one item
item3 = CachedItem(; key = "key3", input_hash = hash("input3"))
push!(hash_cache, item3)
io = IOBuffer()
show(io, hash_cache)
@test String(take!(io)) == "HashCache with 1 items" # Check the output for a HashCache with one item
# Test 6: Show a HashCache with multiple items
item4 = CachedItem(; key = "key4", input_hash = hash("input4"))
push!(hash_cache, item4)
io = IOBuffer()
show(io, hash_cache)
@test String(take!(io)) == "HashCache with 2 items" # Check the output for a HashCache with multiple items
end | SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | docs | 577 | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
### Fixed
## [0.2.0]
### Fixed
- Remove statements that automatically allow DATADEPS downloading. Now simply catches the error and prints a message to the user. For users, remember to set `ENV["DATADEPS_ALWAYS_ACCEPT"] = true` before running the package.
## [0.1.0]
### Added
- Initial release | SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | docs | 12382 | # SemanticCaches.jl
[](https://svilupp.github.io/SemanticCaches.jl/dev/)
[](https://github.com/svilupp/SemanticCaches.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/svilupp/SemanticCaches.jl)
[](https://github.com/JuliaTesting/Aqua.jl)
SemanticCaches.jl is a very hacky implementation of a semantic cache for AI applications to save time and money with repeated requests.
It's not particularly fast, because we're trying to prevent API calls that can take even 20 seconds.
Note that we're using a tiny BERT model with a maximum chunk size of 512 tokens to provide fast local embeddings running on a CPU.
For longer sentences, we split them in several chunks and consider their average embedding, but use it carefully! The latency can sky rocket and become worse than simply calling the original API.
## Installation
To install SemanticCaches.jl, simply add the package using the Julia package manager:
```julia
using Pkg;
Pkg.activate(".")
Pkg.add("SemanticCaches")
```
## Quick Start Guide
```julia
## This line is very important to be able to download the models!!!
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
using SemanticCaches
sem_cache = SemanticCache()
# First argument: the key must always match exactly, eg, model, temperature, etc
# Second argument: the input text to be compared with the cache, can be fuzzy matched
item = sem_cache("key1", "say hi!"; verbose = 1) # notice the verbose flag it can 0,1,2 for different level of detail
if !isvalid(item)
@info "cache miss!"
item.output = "expensive result X"
# Save the result to the cache for future reference
push!(sem_cache, item)
end
# If practice, long texts may take too long to embed even with our tiny model
# so let's not compare anything above 2000 tokens =~ 5000 characters (threshold of c. 100ms)
hash_cache = HashCache()
input = "say hi"
input = "say hi "^1000
active_cache = length(input) > 5000 ? hash_cache : sem_cache
item = active_cache("key1", input; verbose = 1)
if !isvalid(item)
@info "cache miss!"
item.output = "expensive result X"
push!(active_cache, item)
end
```
## How it Works
The primary objective of building this package was to cache expensive API calls to GenAI models.
The system offers exact matching (faster, `HashCache`) and semantic similarity lookup (slower, `SemanticCache`) of STRING inputs.
In addition, all requests are first compared on a “cache key”, which presents a key that must always match exactly for requests to be considered interchangeable (eg, same model, same provider, same temperature, etc).
You need to choose the appropriate cache key and input depending on your use case. This default choice for the cache key should be the model name.
What happens when you call the cache (provide `cache_key` and `string_input`)?
- All cached outputs are stored in a vector `cache.items`.
- When we receive a request, the `cache_key` is looked up to find indices of the corresponding items in `items`. If `cache_key` is not found, we return `CachedItem` with an empty `output` field (ie, `isvalid(item) == false`).
- We embed the `string_input` using a tiny BERT model and normalize the embeddings (to make it easier to compare the cosine distance later).
- We then compare the cosine distance with the embeddings of the cached items.
- If the cosine distance is higher than `min_similarity` threshold, we return the cached item (The output can be found in the field `item.output`).
If we haven't found any cached item, we return `CachedItem` with an empty `output` field (ie, `isvalid(item) == false`).
Once you calculate the response and save it in `item.output`, you can push the item to the cache by calling `push!(cache, item)`.
## Suitable Use Cases
- This package is great if you know you will have a smaller volume of requests (eg, <10k per session or machine).
- It’s ideal to reduce the costs of running your evals, because even when you change your RAG pipeline configuration many of the calls will be repeated and can take advantage of caching.
- Lastly, this package can be really useful for demos and small user applications, where you can know some of the system inputs upfront, so you can cache them and show incredible response times!
- This package is NOT suitable for production systems with hundreds of thousands of requests and remember that this is a very basic cache that you need to manually invalidate over time!
## Advanced Usage
### Caching HTTP Requests
Based on your knowledge of the API calls made, you need determine the: 1) cache key (separate store of cached items, eg, different models or temperatures) and 2) how to unpack the HTTP request into a string (eg, unwrap and join the formatted message contents for OpenAI API).
Here's a brief outline of how you can use SemanticCaches.jl with [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl).
```julia
using PromptingTools
using SemanticCaches
using HTTP
## Define the new caching mechanism as a layer for HTTP
## See documentation [here](https://juliaweb.github.io/HTTP.jl/stable/client/#Quick-Examples)
module MyCache
using HTTP, JSON3
using SemanticCaches
const SEM_CACHE = SemanticCache()
const HASH_CACHE = HashCache()
function cache_layer(handler)
return function (req; cache_key::Union{AbstractString,Nothing}=nothing, kw...)
# only apply the cache layer if the user passed `cache_key`
# we could also use the contents of the payload, eg, `cache_key = get(body, "model", "unknown")`
if req.method == "POST" && cache_key !== nothing
body = JSON3.read(copy(req.body))
if occursin("v1/chat/completions", req.target)
## We're in chat completion endpoint
input = join([m["content"] for m in body["messages"]], " ")
elseif occursin("v1/embeddings", req.target)
## We're in embedding endpoint
input = body["input"]
else
## Skip, unknown API
return handler(req; kw...)
end
## Check the cache
@info "Check if we can cache this request ($(length(input)) chars)"
active_cache = length(input) > 5000 ? HASH_CACHE : SEM_CACHE
item = active_cache("key1", input; verbose=2) # change verbosity to 0 to disable detailed logs
if !isvalid(item)
@info "Cache miss! Pinging the API"
# pass the request along to the next layer by calling `cache_layer` arg `handler`
resp = handler(req; kw...)
item.output = resp
# Let's remember it for the next time
push!(active_cache, item)
end
## Return the calculated or cached result
return item.output
end
# pass the request along to the next layer by calling `cache_layer` arg `handler`
# also pass along the trailing keyword args `kw...`
return handler(req; kw...)
end
end
# Create a new client with the auth layer added
HTTP.@client [cache_layer]
end # module
# Let's push the layer globally in all HTTP.jl requests
HTTP.pushlayer!(MyCache.cache_layer)
# HTTP.poplayer!() # to remove it later
# Let's call the API
@time msg = aigenerate("What is the meaning of life?"; http_kwargs=(; cache_key="key1"))
# The first call will be slow as usual, but any subsequent call should be pretty quick - try it a few times!
```
You can also use it for embeddings, eg,
```julia
@time msg = aiembed("how is it going?"; http_kwargs=(; cache_key="key2")) # 0.7s
@time msg = aiembed("how is it going?"; http_kwargs=(; cache_key="key2")) # 0.02s
# Even with a tiny difference (no question mark), it still picks the right cache
@time msg = aiembed("how is it going"; http_kwargs=(; cache_key="key2")) # 0.02s
```
You can remove the cache layer by calling `HTTP.poplayer!()` (and add it again if you made some changes).
You can probe the cache by calling `MyCache.SEM_CACHE` (eg, `MyCache.SEM_CACHE.items[1]`).
## Frequently Asked Questions
**How is the performance?**
The majority of time will be spent in 1) tiny embeddings (for large texts, eg, thousands of tokens) and in calculating cosine similarity (for large caches, eg, over 10k items).
For reference, embedding smaller texts like questions to embed takes only a few milliseconds. Embedding 2000 tokens can take anywhere from 50-100ms.
When it comes to the caching system, there are many locks to avoid faults, but the overhead is still negligible - I ran experiments with 100k sequential insertions and the time per item was only a few milliseconds (dominated by the cosine similarity). If your bottleneck is in the cosine similarity calculation (c. 4ms for 100k items), consider moving vectors into a matrix for continuous memory and/or use Boolean embeddings with Hamming distance (XOR operator, c. order of magnitude speed up).
All in all, the system is faster than necessary for normal workloads with thousands of cached items. You’re more likely to have GC and memory problems if your payloads are big (consider swapping to disk) than to face compute bounds. Remember that the motivation is to prevent API calls that take anywhere between 1-20 seconds!
**How to measure the time it takes to do X?**
Have a look at the example snippets below - time whichever part of it you’re interested in.
```julia
sem_cache = SemanticCache()
# First argument: the key must always match exactly, eg, model, temperature, etc
# Second argument: the input text to be compared with the cache, can be fuzzy matched
item = sem_cache("key1", "say hi!"; verbose = 1) # notice the verbose flag it can 0,1,2 for different level of detail
if !isvalid(item)
@info "cache miss!"
item.output = "expensive result X"
# Save the result to the cache for future reference
push!(sem_cache, item)
end
```
Embedding only (to tune the `min_similarity` threshold or to time the embedding)
```julia
using SemanticCaches.FlashRank: embed
using SemanticCaches: EMBEDDER
@time res = embed(EMBEDDER, "say hi")
# 0.000903 seconds (104 allocations: 19.273 KiB)
# see res.elapsed or res.embeddings
# long inputs (split into several chunks and then combining the embeddings)
@time embed(EMBEDDER, "say hi "^1000)
# 0.032148 seconds (8.11 k allocations: 662.656 KiB)
```
**How to set the `min_similarity` threshold?**
You can set the `min_similarity` threshold by adding the kwarg `active_cache("key1", input; verbose=2, min_similarity=0.95)`.
The default is 0.95, which is a very high threshold. For practical purposes, I'd recommend ~0.9. If you're expecting some typos, you can go even a bit lower (eg, 0.85).
> [!WARNING]
> Be careful with similarity thresholds. It's hard to embed super short sequences well! You might want to adjust the threshold depending on the length of the input.
> Always test them with your inputs!!
If you want to calculate the cosine similarity, remember to `normalize` the embeddings first or divide the dot product by the norms.
```julia
using SemanticCaches.LinearAlgebra: normalize, norm, dot
cosine_similarity = dot(r1.embeddings, r2.embeddings) / (norm(r1.embeddings) * norm(r2.embeddings))
# remember that 1 is the best similarity, -1 is the exact opposite
```
You can compare different inputs to determine the best threshold for your use cases
```julia
emb1 = embed(EMBEDDER, "How is it going?") |> x -> vec(x.embeddings) |> normalize
emb2 = embed(EMBEDDER, "How is it goin'?") |> x -> vec(x.embeddings) |> normalize
dot(emb1, emb2) # 0.944
emb1 = embed(EMBEDDER, "How is it going?") |> x -> vec(x.embeddings) |> normalize
emb2 = embed(EMBEDDER, "How is it goin'") |> x -> vec(x.embeddings) |> normalize
dot(emb1, emb2) # 0.920
```
**How to debug it?**
Enable verbose logging by adding the kwarg `verbose = 2`, eg, `item = active_cache("key1", input; verbose=2)`.
## Roadmap
[ ] Time-based cache validity
[ ] Speed up the embedding process / consider pre-processing the inputs
[ ] Native integration with PromptingTools and the API schemas | SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | docs | 205 | ```@meta
CurrentModule = SemanticCaches
```
# API Reference
API reference for [SemanticCaches](https://github.com/svilupp/SemanticCaches.jl).
```@index
```
```@autodocs
Modules = [SemanticCaches]
```
| SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 0.2.0 | a5c4f933e59401f83e1c36ccfc40e4ac2fd87e73 | docs | 11688 | ```@meta
CurrentModule = SemanticCaches
```
# SemanticCaches.jl
Documentation for [SemanticCaches.jl](https://github.com/svilupp/SemanticCaches.jl).
SemanticCaches.jl is a very hacky implementation of a semantic cache for AI applications to save time and money with repeated requests.
It's not particularly fast, because we're trying to prevent API calls that can take even 20 seconds.
Note that we're using a tiny BERT model with a maximum chunk size of 512 tokens to provide fast local embeddings running on a CPU.
For longer sentences, we split them in several chunks and consider their average embedding, but use it carefully! The latency can sky rocket and become worse than simply calling the original API.
## Installation
To install SemanticCaches.jl, simply add the package using the Julia package manager:
```julia
using Pkg;
Pkg.activate(".")
Pkg.add("SemanticCaches")
```
## Quick Start Guide
```julia
## This line is very important to be able to download the models!!!
ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
using SemanticCaches
sem_cache = SemanticCache()
# First argument: the key must always match exactly, eg, model, temperature, etc
# Second argument: the input text to be compared with the cache, can be fuzzy matched
item = sem_cache("key1", "say hi!"; verbose = 1) # notice the verbose flag it can 0,1,2 for different level of detail
if !isvalid(item)
@info "cache miss!"
item.output = "expensive result X"
# Save the result to the cache for future reference
push!(sem_cache, item)
end
# If practice, long texts may take too long to embed even with our tiny model
# so let's not compare anything above 2000 tokens =~ 5000 characters (threshold of c. 100ms)
hash_cache = HashCache()
input = "say hi"
input = "say hi "^1000
active_cache = length(input) > 5000 ? hash_cache : sem_cache
item = active_cache("key1", input; verbose = 1)
if !isvalid(item)
@info "cache miss!"
item.output = "expensive result X"
push!(active_cache, item)
end
```
## How it Works
The primary objective of building this package was to cache expensive API calls to GenAI models.
The system offers exact matching (faster, `HashCache`) and semantic similarity lookup (slower, `SemanticCache`) of STRING inputs.
In addition, all requests are first compared on a “cache key”, which presents a key that must always match exactly for requests to be considered interchangeable (eg, same model, same provider, same temperature, etc).
You need to choose the appropriate cache key and input depending on your use case. This default choice for the cache key should be the model name.
What happens when you call the cache (provide `cache_key` and `string_input`)?
- All cached outputs are stored in a vector `cache.items`.
- When we receive a request, the `cache_key` is looked up to find indices of the corresponding items in `items`. If `cache_key` is not found, we return `CachedItem` with an empty `output` field (ie, `isvalid(item) == false`).
- We embed the `string_input` using a tiny BERT model and normalize the embeddings (to make it easier to compare the cosine distance later).
- We then compare the cosine distance with the embeddings of the cached items.
- If the cosine distance is higher than `min_similarity` threshold, we return the cached item (The output can be found in the field `item.output`).
If we haven't found any cached item, we return `CachedItem` with an empty `output` field (ie, `isvalid(item) == false`).
Once you calculate the response and save it in `item.output`, you can push the item to the cache by calling `push!(cache, item)`.
## Suitable Use Cases
- This package is great if you know you will have a smaller volume of requests (eg, <10k per session or machine).
- It’s ideal to reduce the costs of running your evals, because even when you change your RAG pipeline configuration many of the calls will be repeated and can take advantage of caching.
- Lastly, this package can be really useful for demos and small user applications, where you can know some of the system inputs upfront, so you can cache them and show incredible response times!
- This package is NOT suitable for production systems with hundreds of thousands of requests and remember that this is a very basic cache that you need to manually invalidate over time!
## Advanced Usage
### Caching HTTP Requests
Based on your knowledge of the API calls made, you need determine the: 1) cache key (separate store of cached items, eg, different models or temperatures) and 2) how to unpack the HTTP request into a string (eg, unwrap and join the formatted message contents for OpenAI API).
Here's a brief outline of how you can use SemanticCaches.jl with [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl).
```julia
using PromptingTools
using SemanticCaches
using HTTP
## Define the new caching mechanism as a layer for HTTP
## See documentation [here](https://juliaweb.github.io/HTTP.jl/stable/client/#Quick-Examples)
module MyCache
using HTTP, JSON3
using SemanticCaches
const SEM_CACHE = SemanticCache()
const HASH_CACHE = HashCache()
function cache_layer(handler)
return function (req; cache_key::Union{AbstractString,Nothing}=nothing, kw...)
# only apply the cache layer if the user passed `cache_key`
# we could also use the contents of the payload, eg, `cache_key = get(body, "model", "unknown")`
if req.method == "POST" && cache_key !== nothing
body = JSON3.read(copy(req.body))
if occursin("v1/chat/completions", req.target)
## We're in chat completion endpoint
input = join([m["content"] for m in body["messages"]], " ")
elseif occursin("v1/embeddings", req.target)
## We're in embedding endpoint
input = body["input"]
else
## Skip, unknown API
return handler(req; kw...)
end
## Check the cache
@info "Check if we can cache this request ($(length(input)) chars)"
active_cache = length(input) > 5000 ? HASH_CACHE : SEM_CACHE
item = active_cache("key1", input; verbose=2) # change verbosity to 0 to disable detailed logs
if !isvalid(item)
@info "Cache miss! Pinging the API"
# pass the request along to the next layer by calling `cache_layer` arg `handler`
resp = handler(req; kw...)
item.output = resp
# Let's remember it for the next time
push!(active_cache, item)
end
## Return the calculated or cached result
return item.output
end
# pass the request along to the next layer by calling `cache_layer` arg `handler`
# also pass along the trailing keyword args `kw...`
return handler(req; kw...)
end
end
# Create a new client with the auth layer added
HTTP.@client [cache_layer]
end # module
# Let's push the layer globally in all HTTP.jl requests
HTTP.pushlayer!(MyCache.cache_layer)
# HTTP.poplayer!() # to remove it later
# Let's call the API
@time msg = aigenerate("What is the meaning of life?"; http_kwargs=(; cache_key="key1"))
# The first call will be slow as usual, but any subsequent call should be pretty quick - try it a few times!
```
You can also use it for embeddings, eg,
```julia
@time msg = aiembed("how is it going?"; http_kwargs=(; cache_key="key2")) # 0.7s
@time msg = aiembed("how is it going?"; http_kwargs=(; cache_key="key2")) # 0.02s
# Even with a tiny difference (no question mark), it still picks the right cache
@time msg = aiembed("how is it going"; http_kwargs=(; cache_key="key2")) # 0.02s
```
You can remove the cache layer by calling `HTTP.poplayer!()` (and add it again if you made some changes).
You can probe the cache by calling `MyCache.SEM_CACHE` (eg, `MyCache.SEM_CACHE.items[1]`).
## Frequently Asked Questions
**How is the performance?**
The majority of time will be spent in 1) tiny embeddings (for large texts, eg, thousands of tokens) and in calculating cosine similarity (for large caches, eg, over 10k items).
For reference, embedding smaller texts like questions to embed takes only a few milliseconds. Embedding 2000 tokens can take anywhere from 50-100ms.
When it comes to the caching system, there are many locks to avoid faults, but the overhead is still negligible - I ran experiments with 100k sequential insertions and the time per item was only a few milliseconds (dominated by the cosine similarity). If your bottleneck is in the cosine similarity calculation (c. 4ms for 100k items), consider moving vectors into a matrix for continuous memory and/or use Boolean embeddings with Hamming distance (XOR operator, c. order of magnitude speed up).
All in all, the system is faster than necessary for normal workloads with thousands of cached items. You’re more likely to have GC and memory problems if your payloads are big (consider swapping to disk) than to face compute bounds. Remember that the motivation is to prevent API calls that take anywhere between 1-20 seconds!
**How to measure the time it takes to do X?**
Have a look at the example snippets below - time whichever part of it you’re interested in.
```julia
sem_cache = SemanticCache()
# First argument: the key must always match exactly, eg, model, temperature, etc
# Second argument: the input text to be compared with the cache, can be fuzzy matched
item = sem_cache("key1", "say hi!"; verbose = 1) # notice the verbose flag it can 0,1,2 for different level of detail
if !isvalid(item)
@info "cache miss!"
item.output = "expensive result X"
# Save the result to the cache for future reference
push!(sem_cache, item)
end
```
Embedding only (to tune the `min_similarity` threshold or to time the embedding)
```julia
using SemanticCaches.FlashRank: embed
using SemanticCaches: EMBEDDER
@time res = embed(EMBEDDER, "say hi")
# 0.000903 seconds (104 allocations: 19.273 KiB)
# see res.elapsed or res.embeddings
# long inputs (split into several chunks and then combining the embeddings)
@time embed(EMBEDDER, "say hi "^1000)
# 0.032148 seconds (8.11 k allocations: 662.656 KiB)
```
**How to set the `min_similarity` threshold?**
You can set the `min_similarity` threshold by adding the kwarg `active_cache("key1", input; verbose=2, min_similarity=0.95)`.
The default is 0.95, which is a very high threshold. For practical purposes, I'd recommend ~0.9. If you're expecting some typos, you can go even a bit lower (eg, 0.85).
Be careful though - it's hard to embed super short sequences well! You might want to adjust the threshold depending on the length of the input.
If you want to calculate the cosine similarity, remember to `normalize` the embeddings first or divide the dot product by the norms.
```julia
using SemanticCaches.LinearAlgebra: normalize, norm, dot
cosine_similarity = dot(r1.embeddings, r2.embeddings) / (norm(r1.embeddings) * norm(r2.embeddings))
# remember that 1 is the best similarity, -1 is the exact opposite
```
You can compare different inputs to determine the best threshold for your use cases
```julia
emb1 = embed(EMBEDDER, "How is it going?") |> x -> vec(x.embeddings) |> normalize
emb2 = embed(EMBEDDER, "How is it goin'?") |> x -> vec(x.embeddings) |> normalize
dot(emb1, emb2) # 0.944
emb1 = embed(EMBEDDER, "How is it going?") |> x -> vec(x.embeddings) |> normalize
emb2 = embed(EMBEDDER, "How is it goin'") |> x -> vec(x.embeddings) |> normalize
dot(emb1, emb2) # 0.920
```
**How to debug it?**
Enable verbose logging by adding the kwarg `verbose = 2`, eg, `item = active_cache("key1", input; verbose=2)`.
| SemanticCaches | https://github.com/svilupp/SemanticCaches.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 609 | using Documenter, DiffEqGPU
include("pages.jl")
cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true)
cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true)
makedocs(sitename = "DiffEqGPU.jl",
authors = "Chris Rackauckas",
modules = [DiffEqGPU],
clean = true, doctest = false, linkcheck = true,
warnonly = [:missing_docs],
format = Documenter.HTML(assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/DiffEqGPU/stable/"),
pages = pages)
deploydocs(repo = "github.com/SciML/DiffEqGPU.jl.git";
push_preview = true)
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 877 | # Put in a separate page so it can be used by SciMLDocs.jl
pages = ["index.md",
"getting_started.md",
"Tutorials" => Any["GPU Ensembles" => Any["tutorials/gpu_ensemble_basic.md",
"tutorials/parallel_callbacks.md",
"tutorials/multigpu.md",
"tutorials/lower_level_api.md",
"tutorials/weak_order_conv_sde.md"],
"Within-Method GPU" => Any["tutorials/within_method_gpu.md"]],
"Examples" => Any["GPU Ensembles" => Any["examples/sde.md",
"examples/ad.md",
"examples/reductions.md"],
"Within-Method GPU" => Any["examples/reaction_diffusion.md",
"examples/bruss.md"]],
"Manual" => Any["manual/ensemblegpukernel.md",
"manual/ensemblegpuarray.md",
"manual/backends.md",
"manual/optimal_trajectories.md",
"manual/choosing_ensembler.md"],
]
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 530 | module AMDGPUExt
isdefined(Base, :get_extension) ? (using AMDGPU) : (using ..AMDGPU)
import DiffEqGPU
using .AMDGPU
import .AMDGPU: ROCBackend
function DiffEqGPU.EnsembleGPUArray(cpu_offload::Float64)
DiffEqGPU.EnsembleGPUArray(ROCBackend(), cpu_offload)
end
DiffEqGPU.maxthreads(::ROCBackend) = 256
DiffEqGPU.maybe_prefer_blocks(::ROCBackend) = ROCBackend()
# Not yet implemented in AMDGPU
# function DiffEqGPU.lufact!(::ROCBackend, W)
# AMDGPU.rocBLAS.getrf_strided_batched!(W, false)
# return nothing
# end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 505 | module CUDAExt
isdefined(Base, :get_extension) ? (using CUDA) : (using ..CUDA)
import DiffEqGPU
using .CUDA
import .CUDA: CUDABackend
function DiffEqGPU.EnsembleGPUArray(cpu_offload::Float64)
DiffEqGPU.EnsembleGPUArray(CUDABackend(), cpu_offload)
end
DiffEqGPU.maxthreads(::CUDABackend) = 256
DiffEqGPU.maybe_prefer_blocks(::CUDABackend) = CUDABackend(; prefer_blocks = true)
function DiffEqGPU.lufact!(::CUDABackend, W)
CUDA.CUBLAS.getrf_strided_batched!(W, false)
return nothing
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 253 | module MetalExt
isdefined(Base, :get_extension) ? (using Metal) : (using ..Metal)
import DiffEqGPU
using .Metal
import .Metal: MetalBackend
DiffEqGPU.maxthreads(::MetalBackend) = 256
DiffEqGPU.maybe_prefer_blocks(::MetalBackend) = MetalBackend()
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 262 | module oneAPIExt
isdefined(Base, :get_extension) ? (using oneAPI) : (using ..oneAPI)
import DiffEqGPU
using .oneAPI
import .oneAPI: oneAPIBackend
DiffEqGPU.maxthreads(::oneAPIBackend) = 256
DiffEqGPU.maybe_prefer_blocks(::oneAPIBackend) = oneAPIBackend()
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 3747 | """
$(DocStringExtensions.README)
"""
module DiffEqGPU
using DocStringExtensions
using KernelAbstractions
import KernelAbstractions: get_backend, allocate
using SciMLBase, DiffEqBase, LinearAlgebra, Distributed
using ForwardDiff
import ChainRulesCore
import ChainRulesCore: NoTangent
using RecursiveArrayTools
import ZygoteRules
import Base.Threads
using LinearSolve
#For gpu_tsit5
using Adapt, SimpleDiffEq, StaticArrays
using Parameters, MuladdMacro
using Random
using Setfield
using ForwardDiff
import StaticArrays: StaticVecOrMat, @_inline_meta
# import LinearAlgebra: \
import StaticArrays: LU, StaticLUMatrix, arithmetic_closure
abstract type EnsembleArrayAlgorithm <: SciMLBase.EnsembleAlgorithm end
abstract type EnsembleKernelAlgorithm <: SciMLBase.EnsembleAlgorithm end
##Solvers for EnsembleGPUKernel
abstract type GPUODEAlgorithm <: DiffEqBase.AbstractODEAlgorithm end
abstract type GPUSDEAlgorithm <: DiffEqBase.AbstractSDEAlgorithm end
abstract type GPUODEImplicitAlgorithm{AD} <: GPUODEAlgorithm end
include("ensemblegpuarray/callbacks.jl")
include("ensemblegpuarray/kernels.jl")
include("ensemblegpuarray/problem_generation.jl")
include("ensemblegpuarray/lowerlevel_solve.jl")
include("ensemblegpukernel/callbacks.jl")
include("ensemblegpukernel/lowerlevel_solve.jl")
include("ensemblegpukernel/gpukernel_algorithms.jl")
include("ensemblegpukernel/linalg/lu.jl")
include("ensemblegpukernel/linalg/linsolve.jl")
include("ensemblegpukernel/alg_utils.jl")
include("ensemblegpukernel/integrators/nonstiff/types.jl")
include("ensemblegpukernel/integrators/stiff/types.jl")
include("ensemblegpukernel/integrators/integrator_utils.jl")
include("ensemblegpukernel/integrators/stiff/interpolants.jl")
include("ensemblegpukernel/integrators/nonstiff/interpolants.jl")
include("ensemblegpukernel/nlsolve/type.jl")
include("ensemblegpukernel/nlsolve/utils.jl")
include("ensemblegpukernel/kernels.jl")
include("ensemblegpukernel/perform_step/gpu_tsit5_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_vern7_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_vern9_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_em_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_siea_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_rosenbrock23_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_rodas4_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_rodas5P_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_kvaerno3_perform_step.jl")
include("ensemblegpukernel/perform_step/gpu_kvaerno5_perform_step.jl")
include("ensemblegpukernel/tableaus/verner_tableaus.jl")
include("ensemblegpukernel/tableaus/rodas_tableaus.jl")
include("ensemblegpukernel/tableaus/kvaerno_tableaus.jl")
include("ensemblegpukernel/problems/ode_problems.jl")
include("utils.jl")
include("algorithms.jl")
include("solve.jl")
export EnsembleCPUArray, EnsembleGPUArray, EnsembleGPUKernel, LinSolveGPUSplitFactorize
export GPUTsit5, GPUVern7, GPUVern9, GPUEM, GPUSIEA
## Stiff ODE solvers
export GPURosenbrock23, GPURodas4, GPURodas5P, GPUKvaerno3, GPUKvaerno5
export terminate!
# This symbol is only defined on Julia versions that support extensions
if !isdefined(Base, :get_extension)
using Requires
end
@static if !isdefined(Base, :get_extension)
function __init__()
@require CUDA="052768ef-5323-5732-b1bb-66c8b64840ba" include("../ext/CUDAExt.jl")
@require AMDGPU="21141c5a-9bdb-4563-92ae-f87d6854732e" include("../ext/AMDGPUExt.jl")
@require oneAPI="8f75cd03-7ff8-4ecb-9b8f-daf728133b1b" include("../ext/oneAPIExt.jl")
@require Metal="dde4c033-4e86-420c-a63e-0dd931031962" include("../ext/MetalExt.jl")
end
end
end # module
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 6217 | """
```julia
EnsembleCPUArray(cpu_offload = 0.2)
```
An `EnsembleArrayAlgorithm` which utilizes the CPU kernels to parallelize each ODE solve
with their separate ODE integrator on each kernel. This method is meant to be a debugging
counterpart to `EnsembleGPUArray`, having the same behavior and using the same
KernelAbstractions.jl process to build the combined ODE, but without the restrictions of
`f` being a GPU-compatible kernel function.
It is unlikely that this method is useful beyond library development and debugging, as
almost any case should be faster with `EnsembleThreads` or `EnsembleDistributed`.
"""
struct EnsembleCPUArray <: EnsembleArrayAlgorithm end
"""
```julia
EnsembleGPUArray(backend,cpu_offload = 0.2)
```
An `EnsembleArrayAlgorithm` which utilizes the GPU kernels to parallelize each ODE solve
with their separate ODE integrator on each kernel.
## Positional Arguments
- `backend`: the KernelAbstractions backend for performing the computation.
- `cpu_offload`: the percentage of trajectories to offload to the CPU. Default is 0.2 or
20% of trajectories.
## Limitations
`EnsembleGPUArray` requires being able to generate a kernel for `f` using
KernelAbstractons.jl and solving the resulting ODE defined over `CuArray` input types.
This introduces the following limitations on its usage:
- Not all standard Julia `f` functions are allowed. Only Julia `f` functions which are
capable of being compiled into a GPU kernel are allowed. This notably means that
certain features of Julia can cause issues inside of kernel, like:
+ Allocating memory (building arrays)
+ Linear algebra (anything that calls BLAS)
+ Broadcast
- Not all ODE solvers are allowed, only those from OrdinaryDiffEq.jl. The tested feature
set from OrdinaryDiffEq.jl includes:
+ Explicit Runge-Kutta methods
+ Implicit Runge-Kutta methods
+ Rosenbrock methods
+ DiscreteCallbacks and ContinuousCallbacks
- Stiff ODEs require the analytical solution of every derivative function it requires.
For example, Rosenbrock methods require the Jacobian and the gradient with respect to
time, and so these two functions are required to be given. Note that they can be
generated by the
[modelingtoolkitize](https://docs.juliadiffeq.org/latest/tutorials/advanced_ode_example/#Automatic-Derivation-of-Jacobian-Functions-1)
approach.
- To use multiple GPUs over clusters, one must manually set up one process per GPU. See the
multi-GPU tutorial for more details.
!!! warn
Callbacks with `terminate!` does not work well with `EnsembleGPUArray` as the entire
integration will hault when any of the trajectories hault. Use with caution.
## Example
```julia
using DiffEqGPU, CUDA, OrdinaryDiffEq
function lorenz(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = Float32[1.0;0.0;0.0]
tspan = (0.0f0,100.0f0)
p = [10.0f0,28.0f0,8/3f0]
prob = ODEProblem(lorenz,u0,tspan,p)
prob_func = (prob,i,repeat) -> remake(prob,p=rand(Float32,3).*p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy=false)
@time sol = solve(monteprob,Tsit5(),EnsembleGPUArray(CUDADevice()),trajectories=10_000,saveat=1.0f0)
```
"""
struct EnsembleGPUArray{Backend} <: EnsembleArrayAlgorithm
backend::Backend
cpu_offload::Float64
end
"""
```julia
EnsembleGPUKernel(backend,cpu_offload = 0.2)
```
A massively-parallel ensemble algorithm which generates a unique GPU kernel for the entire
ODE which is then executed. This leads to a very low overhead GPU code generation, but
imparts some extra limitations on the use.
## Positional Arguments
- `backend`: the KernelAbstractions backend for performing the computation.
- `cpu_offload`: the percentage of trajectories to offload to the CPU. Default is 0.0 or
0% of trajectories.
## Limitations
- Not all standard Julia `f` functions are allowed. Only Julia `f` functions which are
capable of being compiled into a GPU kernel are allowed. This notably means that
certain features of Julia can cause issues inside a kernel, like:
+ Allocating memory (building arrays)
+ Linear algebra (anything that calls BLAS)
+ Broadcast
- Only out-of-place `f` definitions are allowed. Coupled with the requirement of not
allowing for memory allocations, this means that the ODE must be defined with
`StaticArray` initial conditions.
- Only specific ODE solvers are allowed. This includes:
+ GPUTsit5
+ GPUVern7
+ GPUVern9
- To use multiple GPUs over clusters, one must manually set up one process per GPU. See the
multi-GPU tutorial for more details.
## Example
```julia
using DiffEqGPU, CUDA, OrdinaryDiffEq, StaticArrays
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = (@SVector rand(Float32, 3)) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
@time sol = solve(monteprob, GPUTsit5(), EnsembleGPUKernel(), trajectories = 10_000,
adaptive = false, dt = 0.1f0)
```
"""
struct EnsembleGPUKernel{Dev} <: EnsembleKernelAlgorithm
dev::Dev
cpu_offload::Float64
end
cpu_alg = Dict(GPUTsit5 => (GPUSimpleTsit5(), GPUSimpleATsit5()),
GPUVern7 => (GPUSimpleVern7(), GPUSimpleAVern7()),
GPUVern9 => (GPUSimpleVern9(), GPUSimpleAVern9()))
# Work around the fact that Zygote cannot handle the task system
# Work around the fact that Zygote isderiving fails with constants?
function EnsembleGPUArray(dev)
EnsembleGPUArray(dev, 0.2)
end
function EnsembleGPUKernel(dev)
EnsembleGPUKernel(dev, 0.0)
end
function ChainRulesCore.rrule(::Type{<:EnsembleGPUArray})
EnsembleGPUArray(0.0), _ -> NoTangent()
end
ZygoteRules.@adjoint function EnsembleGPUArray(dev)
EnsembleGPUArray(dev, 0.0), _ -> nothing
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 16364 | function SciMLBase.__solve(ensembleprob::SciMLBase.AbstractEnsembleProblem,
alg::Union{SciMLBase.DEAlgorithm, Nothing,
DiffEqGPU.GPUODEAlgorithm, DiffEqGPU.GPUSDEAlgorithm},
ensemblealg::Union{EnsembleArrayAlgorithm,
EnsembleKernelAlgorithm};
trajectories, batch_size = trajectories,
unstable_check = (dt, u, p, t) -> false, adaptive = true,
kwargs...)
if trajectories == 1
return SciMLBase.__solve(ensembleprob, alg, EnsembleSerial(); trajectories = 1,
kwargs...)
end
cpu_trajectories = ((ensemblealg isa EnsembleGPUArray ||
ensemblealg isa EnsembleGPUKernel) &&
ensembleprob.reduction === SciMLBase.DEFAULT_REDUCTION) &&
(haskey(kwargs, :callback) ? kwargs[:callback] === nothing : true) ?
round(Int, trajectories * ensemblealg.cpu_offload) : 0
gpu_trajectories = trajectories - cpu_trajectories
num_batches = gpu_trajectories ÷ batch_size
num_batches * batch_size != gpu_trajectories && (num_batches += 1)
if cpu_trajectories != 0 && ensembleprob.reduction === SciMLBase.DEFAULT_REDUCTION
cpu_II = (gpu_trajectories + 1):trajectories
_alg = if alg isa GPUODEAlgorithm
if adaptive == false
cpu_alg[typeof(alg)][1]
else
cpu_alg[typeof(alg)][2]
end
elseif alg isa GPUSDEAlgorithm
if adaptive == false
SimpleEM()
else
error("Adaptive EM is not supported yet.")
end
else
alg
end
function f()
SciMLBase.solve_batch(ensembleprob, _alg, EnsembleThreads(), cpu_II, nothing;
kwargs...)
end
cpu_sols = Channel{Core.Compiler.return_type(f, Tuple{})}(1)
t = @task begin
put!(cpu_sols, f())
end
schedule(t)
end
if num_batches == 1 && ensembleprob.reduction === SciMLBase.DEFAULT_REDUCTION
time = @elapsed sol = batch_solve(ensembleprob, alg, ensemblealg,
1:gpu_trajectories, adaptive;
unstable_check = unstable_check, kwargs...)
if cpu_trajectories != 0
wait(t)
sol = vcat(sol, take!(cpu_sols))
end
return SciMLBase.EnsembleSolution(sol, time, true)
end
converged::Bool = false
u = ensembleprob.u_init === nothing ?
similar(batch_solve(ensembleprob, alg, ensemblealg, 1:batch_size, adaptive;
unstable_check = unstable_check, kwargs...), 0) :
ensembleprob.u_init
if nprocs() == 1
# While pmap works, this makes much better error messages.
time = @elapsed begin
sols = map(1:num_batches) do i
if i == num_batches
I = (batch_size * (i - 1) + 1):gpu_trajectories
else
I = (batch_size * (i - 1) + 1):(batch_size * i)
end
batch_data = batch_solve(ensembleprob, alg, ensemblealg, I, adaptive;
unstable_check = unstable_check, kwargs...)
if ensembleprob.reduction !== SciMLBase.DEFAULT_REDUCTION
u, _ = ensembleprob.reduction(u, batch_data, I)
return u
else
batch_data
end
end
end
else
time = @elapsed begin
sols = pmap(1:num_batches) do i
if i == num_batches
I = (batch_size * (i - 1) + 1):gpu_trajectories
else
I = (batch_size * (i - 1) + 1):(batch_size * i)
end
x = batch_solve(ensembleprob, alg, ensemblealg, I, adaptive;
unstable_check = unstable_check, kwargs...)
yield()
if ensembleprob.reduction !== SciMLBase.DEFAULT_REDUCTION
u, _ = ensembleprob.reduction(u, x, I)
else
x
end
end
end
end
if ensembleprob.reduction === SciMLBase.DEFAULT_REDUCTION
if cpu_trajectories != 0
wait(t)
sols = vcat(reduce(vcat, vec.(sols)), take!(cpu_sols))
else
sols = reduce(vcat, sols)
end
SciMLBase.EnsembleSolution(sols, time, true)
else
SciMLBase.EnsembleSolution(sols[end], time, true)
end
end
function batch_solve(ensembleprob, alg,
ensemblealg::Union{EnsembleArrayAlgorithm, EnsembleKernelAlgorithm}, I,
adaptive;
kwargs...)
@assert !isempty(I)
#@assert all(p->p.f === probs[1].f,probs)
if ensemblealg isa EnsembleGPUKernel
if ensembleprob.safetycopy
probs = map(I) do i
make_prob_compatible(ensembleprob.prob_func(deepcopy(ensembleprob.prob),
i,
1))
end
else
probs = map(I) do i
make_prob_compatible(ensembleprob.prob_func(ensembleprob.prob, i, 1))
end
end
# Using inner saveat requires all of them to be of same size,
# because the dimension of CuMatrix is decided by it.
# The columns of it are accessed at each thread.
if !all(Base.Fix2((prob1, prob2) -> isequal(prob1.tspan, prob2.tspan),
probs[1]),
probs)
if !iszero(ensemblealg.cpu_offload)
error("Different time spans in an Ensemble Simulation with CPU offloading is not supported yet.")
end
if get(probs[1].kwargs, :saveat, nothing) === nothing && !adaptive &&
get(kwargs, :save_everystep, true)
error("Using different time-spans require either turning off save_everystep or using saveat. If using saveat, it should be of same length across the ensemble.")
end
if !all(Base.Fix2((prob1, prob2) -> isequal(sizeof(get(prob1.kwargs, :saveat,
nothing)),
sizeof(get(prob2.kwargs, :saveat,
nothing))), probs[1]),
probs)
error("Using different saveat in EnsembleGPUKernel requires all of them to be of same length. Use saveats of same size only.")
end
end
if alg isa Union{GPUODEAlgorithm, GPUSDEAlgorithm}
# Get inner saveat if global one isn't specified
_saveat = get(probs[1].kwargs, :saveat, nothing)
saveat = _saveat === nothing ? get(kwargs, :saveat, nothing) : _saveat
solts, solus = batch_solve_up_kernel(ensembleprob, probs, alg, ensemblealg, I,
adaptive; saveat = saveat, kwargs...)
[begin
ts = @view solts[:, i]
us = @view solus[:, i]
sol_idx = findlast(x -> x != probs[i].tspan[1], ts)
if sol_idx === nothing
@error "No solution found" tspan=probs[i].tspan[1] ts
error("Batch solve failed")
end
@views ensembleprob.output_func(SciMLBase.build_solution(probs[i],
alg,
ts[1:sol_idx],
us[1:sol_idx],
k = nothing,
stats = nothing,
calculate_error = false,
retcode = sol_idx !=
length(ts) ?
ReturnCode.Terminated :
ReturnCode.Success),
i)[1]
end
for i in eachindex(probs)]
else
error("We don't have solvers implemented for this algorithm yet")
end
else
if ensembleprob.safetycopy
probs = map(I) do i
ensembleprob.prob_func(deepcopy(ensembleprob.prob), i, 1)
end
else
probs = map(I) do i
ensembleprob.prob_func(ensembleprob.prob, i, 1)
end
end
u0 = reduce(hcat, Array(probs[i].u0) for i in 1:length(I))
if !all(Base.Fix2((prob1, prob2) -> isequal(prob1.tspan, prob2.tspan),
probs[1]),
probs)
# Requires prob.p to be isbits otherwise it wouldn't work with ParamWrapper
@assert all(prob -> isbits(prob.p), probs)
# Remaking the problem to normalize time span values..."
p = reduce(hcat,
ParamWrapper(probs[i].p, probs[i].tspan)
for i in 1:length(I))
# Change the tspan of first problem to (0,1)
orig_prob = probs[1]
probs[1] = remake(probs[1];
tspan = (zero(probs[1].tspan[1]), one(probs[1].tspan[2])))
sol, solus = batch_solve_up(ensembleprob, probs, alg, ensemblealg, I,
u0, p; adaptive = adaptive, kwargs...)
probs[1] = orig_prob
[ensembleprob.output_func(SciMLBase.build_solution(probs[i], alg,
map(t -> probs[i].tspan[1] +
(probs[i].tspan[2] -
probs[i].tspan[1]) *
t, sol.t), solus[i],
stats = sol.stats,
retcode = sol.retcode), i)[1]
for i in 1:length(probs)]
else
p = reduce(hcat,
probs[i].p isa AbstractArray ? Array(probs[i].p) : probs[i].p
for i in 1:length(I))
sol, solus = batch_solve_up(ensembleprob, probs, alg, ensemblealg, I, u0, p;
adaptive = adaptive, kwargs...)
[ensembleprob.output_func(SciMLBase.build_solution(probs[i], alg, sol.t,
solus[i],
stats = sol.stats,
retcode = sol.retcode), i)[1]
for i in 1:length(probs)]
end
end
end
function batch_solve_up_kernel(ensembleprob, probs, alg, ensemblealg, I, adaptive;
kwargs...)
_callback = CallbackSet(generate_callback(probs[1], length(I), ensemblealg; kwargs...))
_callback = CallbackSet(convert.(DiffEqGPU.GPUDiscreteCallback,
_callback.discrete_callbacks)...,
convert.(DiffEqGPU.GPUContinuousCallback,
_callback.continuous_callbacks)...)
dev = ensemblealg.dev
probs = adapt(dev, probs)
#Adaptive version only works with saveat
if adaptive
ts, us = vectorized_asolve(probs, ensembleprob.prob, alg;
kwargs..., callback = _callback)
else
ts, us = vectorized_solve(probs, ensembleprob.prob, alg;
kwargs..., callback = _callback)
end
solus = Array(us)
solts = Array(ts)
(solts, solus)
end
function batch_solve_up(ensembleprob, probs, alg, ensemblealg, I, u0, p; kwargs...)
if ensemblealg isa EnsembleGPUArray
backend = ensemblealg.backend
u0 = adapt(backend, u0)
p = adapt(backend, p)
end
len = length(probs[1].u0)
if SciMLBase.has_jac(probs[1].f)
if ensemblealg isa EnsembleGPUArray
backend = ensemblealg.backend
jac_prototype = allocate(backend, Float32, (len, len, length(I)))
fill!(jac_prototype, 0.0)
else
jac_prototype = zeros(Float32, len, len, length(I))
end
if probs[1].f.colorvec !== nothing
colorvec = repeat(probs[1].f.colorvec, length(I))
else
colorvec = repeat(1:length(probs[1].u0), length(I))
end
else
jac_prototype = nothing
colorvec = nothing
end
_callback = generate_callback(probs[1], length(I), ensemblealg; kwargs...)
prob = generate_problem(probs[1], u0, p, jac_prototype, colorvec)
if hasproperty(alg, :linsolve)
_alg = remake(alg, linsolve = LinSolveGPUSplitFactorize(len, -1))
else
_alg = alg
end
sol = solve(prob, _alg; kwargs..., callback = _callback, merge_callbacks = false,
internalnorm = diffeqgpunorm)
us = Array.(sol.u)
solus = [[@view(us[i][:, j]) for i in 1:length(us)] for j in 1:length(probs)]
(sol, solus)
end
function seed_duals(x::Matrix{V}, ::Type{T},
::ForwardDiff.Chunk{N} = ForwardDiff.Chunk(@view(x[:, 1]),
typemax(Int64))) where {V, T,
N}
seeds = ForwardDiff.construct_seeds(ForwardDiff.Partials{N, V})
duals = [ForwardDiff.Dual{T}(x[i, j], seeds[i])
for i in 1:size(x, 1), j in 1:size(x, 2)]
end
function extract_dus(us)
jsize = size(us[1], 1), ForwardDiff.npartials(us[1][1])
utype = typeof(ForwardDiff.value(us[1][1]))
map(1:size(us[1], 2)) do k
map(us) do u
du_i = zeros(utype, jsize)
for i in size(u, 1)
du_i[i, :] = ForwardDiff.partials(u[i, k])
end
du_i
end
end
end
struct DiffEqGPUAdjTag end
function ChainRulesCore.rrule(::typeof(batch_solve_up), ensembleprob, probs, alg,
ensemblealg, I, u0, p; kwargs...)
pdual = seed_duals(p, DiffEqGPUAdjTag)
u0 = convert.(eltype(pdual), u0)
if ensemblealg isa EnsembleGPUArray
backend = ensemblealg.backend
u0 = adapt(backend, u0)
pdual = adapt(backend, pdual)
end
len = length(probs[1].u0)
if SciMLBase.has_jac(probs[1].f)
if ensemblealg isa EnsembleGPUArray
backend = ensemblealg.backend
jac_prototype = allocate(backend, Float32, (len, len, length(I)))
fill!(jac_prototype, 0.0)
else
jac_prototype = zeros(Float32, len, len, length(I))
end
if probs[1].f.colorvec !== nothing
colorvec = repeat(probs[1].f.colorvec, length(I))
else
colorvec = repeat(1:length(probs[1].u0), length(I))
end
else
jac_prototype = nothing
colorvec = nothing
end
_callback = generate_callback(probs[1], length(I), ensemblealg)
prob = generate_problem(probs[1], u0, pdual, jac_prototype, colorvec)
if hasproperty(alg, :linsolve)
_alg = remake(alg, linsolve = LinSolveGPUSplitFactorize(len, -1))
else
_alg = alg
end
sol = solve(prob, _alg; kwargs..., callback = _callback, merge_callbacks = false,
internalnorm = diffeqgpunorm)
us = Array.(sol.u)
solus = [[ForwardDiff.value.(@view(us[i][:, j])) for i in 1:length(us)]
for j in 1:length(probs)]
function batch_solve_up_adjoint(Δ)
dus = extract_dus(us)
_Δ = Δ[2]
adj = map(eachindex(dus)) do j
sum(eachindex(dus[j])) do i
J = dus[j][i]
if _Δ[j] isa AbstractVector
v = _Δ[j][i]
else
v = @view _Δ[j][i]
end
J'v
end
end
(ntuple(_ -> NoTangent(), 7)..., Array(VectorOfArray(adj)))
end
(sol, solus), batch_solve_up_adjoint
end
function solve_batch(prob, alg, ensemblealg::EnsembleThreads, II, pmap_batch_size;
kwargs...)
if length(II) == 1 || Threads.nthreads() == 1
return SciMLBase.solve_batch(prob, alg, EnsembleSerial(), II, pmap_batch_size;
kwargs...)
end
if prob.prob isa SciMLBase.AbstractJumpProblem && length(II) != 1
probs = [deepcopy(prob.prob) for i in 1:Threads.nthreads()]
else
probs = prob.prob
end
#
batch_size = length(II) ÷ (Threads.nthreads() - 1)
batch_data = tmap(1:(Threads.nthreads() - 1)) do i
if i == Threads.nthreads() - 1
I_local = II[(batch_size * (i - 1) + 1):end]
else
I_local = II[(batch_size * (i - 1) + 1):(batch_size * i)]
end
SciMLBase.solve_batch(prob, alg, EnsembleSerial(), I_local, pmap_batch_size;
kwargs...)
end
SciMLBase.tighten_container_eltype(batch_data)
end
function tmap(f, args...)
batch_data = Vector{Core.Compiler.return_type(f, Tuple{typeof.(getindex.(args, 1))...})
}(undef,
length(args[1]))
Threads.@threads for i in 1:length(args[1])
batch_data[i] = f(getindex.(args, i)...)
end
reduce(vcat, batch_data)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 463 | diffeqgpunorm(u::AbstractArray, t) = sqrt.(sum(abs2, u) ./ length(u))
diffeqgpunorm(u::Union{AbstractFloat, Complex}, t) = abs(u)
function diffeqgpunorm(u::AbstractArray{<:ForwardDiff.Dual}, t)
sqrt.(sum(abs2 ∘ ForwardDiff.value, u) ./ length(u))
end
diffeqgpunorm(u::ForwardDiff.Dual, t) = abs(ForwardDiff.value(u))
make_prob_compatible(prob) = prob
function make_prob_compatible(prob::T) where {T <: ODEProblem}
convert(ImmutableODEProblem, prob)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 2282 | function generate_callback(callback::ContinuousCallback, I, ensemblealg)
if ensemblealg isa EnsembleGPUKernel
return callback
end
_condition = callback.condition
_affect! = callback.affect!
_affect_neg! = callback.affect_neg!
condition = function (out, u, t, integrator)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
continuous_condition_kernel(version)(_condition, out, u, t, integrator.p;
ndrange = size(u, 2),
workgroupsize = wgs)
nothing
end
affect! = function (integrator, event_idx)
version = get_backend(integrator.u)
wgs = workgroupsize(version, size(integrator.u, 2))
continuous_affect!_kernel(version)(_affect!, event_idx, integrator.u,
integrator.t, integrator.p;
ndrange = size(integrator.u, 2),
workgroupsize = wgs)
end
affect_neg! = function (integrator, event_idx)
version = get_backend(integrator.u)
wgs = workgroupsize(version, size(integrator.u, 2))
continuous_affect!_kernel(version)(_affect_neg!, event_idx, integrator.u,
integrator.t, integrator.p;
ndrange = size(integrator.u, 2),
workgroupsize = wgs)
end
return VectorContinuousCallback(condition, affect!, affect_neg!, I,
save_positions = callback.save_positions)
end
function generate_callback(callback::CallbackSet, I, ensemblealg)
return CallbackSet(map(cb -> generate_callback(cb, I, ensemblealg),
(callback.continuous_callbacks...,
callback.discrete_callbacks...))...)
end
generate_callback(::Tuple{}, I, ensemblealg) = nothing
function generate_callback(x)
# will catch any VectorContinuousCallbacks
error("Callback unsupported")
end
function generate_callback(prob, I, ensemblealg; kwargs...)
prob_cb = get(prob.kwargs, :callback, ())
kwarg_cb = get(kwargs, :merge_callbacks, false) ? get(kwargs, :callback, ()) : ()
if (prob_cb === nothing || isempty(prob_cb)) &&
(kwarg_cb === nothing || isempty(kwarg_cb))
return nothing
else
return CallbackSet(generate_callback(prob_cb, I, ensemblealg),
generate_callback(kwarg_cb, I, ensemblealg))
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 12110 | """
Wrapper for modifying parameters to contain additional data. Useful for simulating
trajectories with different time span values.
"""
struct ParamWrapper{P, T}
params::P
data::T
end
function Adapt.adapt_structure(to, ps::ParamWrapper{P, T}) where {P, T}
ParamWrapper(adapt(to, ps.params),
adapt(to, ps.data))
end
@kernel function gpu_kernel(f, du, @Const(u),
@Const(params::AbstractArray{ParamWrapper{P, T}}),
@Const(t)) where {P, T}
i = @index(Global, Linear)
@inbounds p = params[i].params
@inbounds tspan = params[i].data
@views @inbounds f(du[:, i], u[:, i], p, t)
@inbounds for j in 1:size(du, 1)
du[j, i] = du[j, i] * (tspan[2] - tspan[1])
end
end
@kernel function gpu_kernel_oop(f, du, @Const(u),
@Const(params::AbstractArray{ParamWrapper{P, T}}),
@Const(t)) where {P, T}
i = @index(Global, Linear)
@inbounds p = params[i].params
@inbounds tspan = params[i].data
@views @inbounds x = f(u[:, i], p, t)
@inbounds for j in 1:size(du, 1)
du[j, i] = x[j] * (tspan[2] - tspan[1])
end
end
@kernel function gpu_kernel(f, du, @Const(u), @Const(p), @Const(t))
i = @index(Global, Linear)
if eltype(p) <: Number
@views @inbounds f(du[:, i], u[:, i], p[:, i], t)
else
@views @inbounds f(du[:, i], u[:, i], p[i], t)
end
end
@kernel function gpu_kernel_oop(f, du, @Const(u), @Const(p), @Const(t))
i = @index(Global, Linear)
if eltype(p) <: Number
@views @inbounds x = f(u[:, i], p[:, i], t)
else
@views @inbounds x = f(u[:, i], p[i], t)
end
@inbounds for j in 1:size(du, 1)
du[j, i] = x[j]
end
end
@kernel function jac_kernel(f, J, @Const(u),
@Const(params::AbstractArray{ParamWrapper{P, T}}),
@Const(t)) where {P, T}
i = @index(Global, Linear) - 1
section = (1 + (i * size(u, 1))):((i + 1) * size(u, 1))
@inbounds p = params[i + 1].params
@inbounds tspan = params[i + 1].data
@views @inbounds f(J[section, section], u[:, i + 1], p, t)
@inbounds for j in section, k in section
J[k, j] = J[k, j] * (tspan[2] - tspan[1])
end
end
@kernel function jac_kernel_oop(f, J, @Const(u),
@Const(params::AbstractArray{ParamWrapper{P, T}}),
@Const(t)) where {P, T}
i = @index(Global, Linear) - 1
section = (1 + (i * size(u, 1))):((i + 1) * size(u, 1))
@inbounds p = params[i + 1].params
@inbounds tspan = params[i + 1].data
@views @inbounds x = f(u[:, i + 1], p, t)
@inbounds for j in section, k in section
J[k, j] = x[k, j] * (tspan[2] - tspan[1])
end
end
@kernel function jac_kernel(f, J, @Const(u), @Const(p), @Const(t))
i = @index(Global, Linear) - 1
section = (1 + (i * size(u, 1))):((i + 1) * size(u, 1))
if eltype(p) <: Number
@views @inbounds f(J[section, section], u[:, i + 1], p[:, i + 1], t)
else
@views @inbounds f(J[section, section], u[:, i + 1], p[i + 1], t)
end
end
@kernel function jac_kernel_oop(f, J, @Const(u), @Const(p), @Const(t))
i = @index(Global, Linear) - 1
section = (1 + (i * size(u, 1))):((i + 1) * size(u, 1))
if eltype(p) <: Number
@views @inbounds x = f(u[:, i + 1], p[:, i + 1], t)
else
@views @inbounds x = f(u[:, i + 1], p[i + 1], t)
end
@inbounds for j in section, k in section
J[k, j] = x[k, j]
end
end
@kernel function discrete_condition_kernel(condition, cur, @Const(u), @Const(t), @Const(p))
i = @index(Global, Linear)
@views @inbounds cur[i] = condition(u[:, i], t, FakeIntegrator(u[:, i], t, p[:, i]))
end
@kernel function discrete_affect!_kernel(affect!, cur, u, t, p)
i = @index(Global, Linear)
@views @inbounds cur[i] && affect!(FakeIntegrator(u[:, i], t, p[:, i]))
end
@kernel function continuous_condition_kernel(condition, out, @Const(u), @Const(t),
@Const(p))
i = @index(Global, Linear)
@views @inbounds out[i] = condition(u[:, i], t, FakeIntegrator(u[:, i], t, p[:, i]))
end
@kernel function continuous_affect!_kernel(affect!, event_idx, u, t, p)
for i in event_idx
@views @inbounds affect!(FakeIntegrator(u[:, i], t, p[:, i]))
end
end
maxthreads(::CPU) = 1024
maybe_prefer_blocks(::CPU) = CPU()
function workgroupsize(backend, n)
min(maxthreads(backend), n)
end
@kernel function W_kernel(jac, W, @Const(u),
@Const(params::AbstractArray{ParamWrapper{P, T}}), @Const(gamma),
@Const(t)) where {P, T}
i = @index(Global, Linear)
len = size(u, 1)
_W = @inbounds @view(W[:, :, i])
@inbounds p = params[i].params
@inbounds tspan = params[i].data
@views @inbounds jac(_W, u[:, i], p, t)
@inbounds for i in eachindex(_W)
_W[i] = gamma * _W[i] * (tspan[2] - tspan[1])
end
_one = one(eltype(_W))
@inbounds for i in 1:len
_W[i, i] = _W[i, i] - _one
end
end
@kernel function W_kernel(jac, W, @Const(u), @Const(p), @Const(gamma), @Const(t))
i = @index(Global, Linear)
len = size(u, 1)
_W = @inbounds @view(W[:, :, i])
@views @inbounds jac(_W, u[:, i], p[:, i], t)
@inbounds for i in eachindex(_W)
_W[i] = gamma * _W[i]
end
_one = one(eltype(_W))
@inbounds for i in 1:len
_W[i, i] = _W[i, i] - _one
end
end
@kernel function W_kernel_oop(jac, W, @Const(u),
@Const(params::AbstractArray{ParamWrapper{P, T}}),
@Const(gamma),
@Const(t)) where {P, T}
i = @index(Global, Linear)
len = size(u, 1)
@inbounds p = params[i].params
@inbounds tspan = params[i].data
_W = @inbounds @view(W[:, :, i])
@views @inbounds x = jac(u[:, i], p, t)
@inbounds for j in 1:length(_W)
_W[j] = x[j] * (tspan[2] - tspan[1])
end
@inbounds for i in eachindex(_W)
_W[i] = gamma * _W[i]
end
_one = one(eltype(_W))
@inbounds for i in 1:len
_W[i, i] = _W[i, i] - _one
end
end
@kernel function W_kernel_oop(jac, W, @Const(u), @Const(p), @Const(gamma), @Const(t))
i = @index(Global, Linear)
len = size(u, 1)
_W = @inbounds @view(W[:, :, i])
@views @inbounds x = jac(u[:, i], p[:, i], t)
@inbounds for j in 1:length(_W)
_W[j] = x[j]
end
@inbounds for i in eachindex(_W)
_W[i] = gamma * _W[i]
end
_one = one(eltype(_W))
@inbounds for i in 1:len
_W[i, i] = _W[i, i] - _one
end
end
@kernel function Wt_kernel(f::AbstractArray{T}, W, @Const(u), @Const(p), @Const(gamma),
@Const(t)) where {T}
i = @index(Global, Linear)
len = size(u, 1)
_W = @inbounds @view(W[:, :, i])
@inbounds jac = f[i].tgrad
@views @inbounds jac(_W, u[:, i], p[:, i], t)
@inbounds for i in 1:len
_W[i, i] = -inv(gamma) + _W[i, i]
end
end
@kernel function Wt_kernel(jac, W, @Const(u), @Const(p), @Const(gamma), @Const(t))
i = @index(Global, Linear)
len = size(u, 1)
_W = @inbounds @view(W[:, :, i])
@views @inbounds jac(_W, u[:, i], p[:, i], t)
@inbounds for i in 1:len
_W[i, i] = -inv(gamma) + _W[i, i]
end
end
@kernel function Wt_kernel_oop(f::AbstractArray{T}, W, @Const(u), @Const(p), @Const(gamma),
@Const(t)) where {T}
i = @index(Global, Linear)
len = size(u, 1)
_W = @inbounds @view(W[:, :, i])
@inbounds jac = f[i].tgrad
@views @inbounds x = jac(u[:, i], p[:, i], t)
@inbounds for j in 1:length(_W)
_W[j] = x[j]
end
@inbounds for i in 1:len
_W[i, i] = -inv(gamma) + _W[i, i]
end
end
@kernel function Wt_kernel_oop(jac, W, @Const(u), @Const(p), @Const(gamma), @Const(t))
i = @index(Global, Linear)
len = size(u, 1)
_W = @inbounds @view(W[:, :, i])
@views @inbounds x = jac(u[:, i], p[:, i], t)
@inbounds for j in 1:length(_W)
_W[j] = x[j]
end
@inbounds for i in 1:len
_W[i, i] = -inv(gamma) + _W[i, i]
end
end
@kernel function gpu_kernel_tgrad(f::AbstractArray{T}, du, @Const(u), @Const(p),
@Const(t)) where {T}
i = @index(Global, Linear)
@inbounds f = f[i].tgrad
if eltype(p) <: Number
@views @inbounds f(du[:, i], u[:, i], p[:, i], t)
else
@views @inbounds f(du[:, i], u[:, i], p[i], t)
end
end
@kernel function gpu_kernel_oop_tgrad(f::AbstractArray{T}, du, @Const(u), @Const(p),
@Const(t)) where {T}
i = @index(Global, Linear)
@inbounds f = f[i].tgrad
if eltype(p) <: Number
@views @inbounds x = f(u[:, i], p[:, i], t)
else
@views @inbounds x = f(u[:, i], p[i], t)
end
@inbounds for j in 1:size(du, 1)
du[j, i] = x[j]
end
end
function lufact!(::CPU, W)
len = size(W, 1)
for i in 1:size(W, 3)
_W = @inbounds @view(W[:, :, i])
generic_lufact!(_W, len)
end
return nothing
end
struct FakeIntegrator{uType, tType, P}
u::uType
t::tType
p::P
end
### GPU Factorization
"""
A parameter-parallel `SciMLLinearSolveAlgorithm`.
"""
struct LinSolveGPUSplitFactorize <: LinearSolve.SciMLLinearSolveAlgorithm
len::Int
nfacts::Int
end
LinSolveGPUSplitFactorize() = LinSolveGPUSplitFactorize(0, 0)
LinearSolve.needs_concrete_A(::LinSolveGPUSplitFactorize) = true
function LinearSolve.init_cacheval(linsol::LinSolveGPUSplitFactorize, A, b, u, Pl, Pr,
maxiters::Int, abstol, reltol, verbose::Bool,
assumptions::LinearSolve.OperatorAssumptions)
LinSolveGPUSplitFactorize(linsol.len, length(u) ÷ linsol.len)
end
function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::LinSolveGPUSplitFactorize,
args...; kwargs...)
p = cache.cacheval
A = cache.A
b = cache.b
x = cache.u
version = get_backend(b)
copyto!(x, b)
wgs = workgroupsize(version, p.nfacts)
# Note that the matrix is already factorized, only ldiv is needed.
ldiv!_kernel(version)(A, x, p.len, p.nfacts;
ndrange = p.nfacts,
workgroupsize = wgs)
SciMLBase.build_linear_solution(alg, x, nothing, cache)
end
# Old stuff
function (p::LinSolveGPUSplitFactorize)(x, A, b, update_matrix = false; kwargs...)
version = get_backend(b)
copyto!(x, b)
wgs = workgroupsize(version, p.nfacts)
ldiv!_kernel(version)(A, x, p.len, p.nfacts;
ndrange = p.nfacts,
workgroupsize = wgs)
return nothing
end
function (p::LinSolveGPUSplitFactorize)(::Type{Val{:init}}, f, u0_prototype)
LinSolveGPUSplitFactorize(size(u0_prototype)...)
end
@kernel function ldiv!_kernel(W, u, @Const(len), @Const(nfacts))
i = @index(Global, Linear)
section = (1 + ((i - 1) * len)):(i * len)
_W = @inbounds @view(W[:, :, i])
_u = @inbounds @view u[section]
naivesolve!(_W, _u, len)
end
function generic_lufact!(A::AbstractMatrix{T}, minmn) where {T}
m = n = minmn
#@cuprintf "\n\nbefore lufact!\n"
#__printjac(A, ii)
#@cuprintf "\n"
@inbounds for k in 1:minmn
#@cuprintf "inner factorization loop\n"
# Scale first column
Akkinv = inv(A[k, k])
for i in (k + 1):m
#@cuprintf "L\n"
A[i, k] *= Akkinv
end
# Update the rest
for j in (k + 1):n, i in (k + 1):m
#@cuprintf "U\n"
A[i, j] -= A[i, k] * A[k, j]
end
end
#@cuprintf "after lufact!"
#__printjac(A, ii)
#@cuprintf "\n\n\n"
return nothing
end
struct MyL{T} # UnitLowerTriangular
data::T
end
struct MyU{T} # UpperTriangular
data::T
end
function naivesub!(A::MyU, b::AbstractVector, n)
x = b
@inbounds for j in n:-1:1
xj = x[j] = A.data[j, j] \ b[j]
for i in (j - 1):-1:1 # counterintuitively 1:j-1 performs slightly better
b[i] -= A.data[i, j] * xj
end
end
return nothing
end
function naivesub!(A::MyL, b::AbstractVector, n)
x = b
@inbounds for j in 1:n
xj = x[j]
for i in (j + 1):n
b[i] -= A.data[i, j] * xj
end
end
return nothing
end
function naivesolve!(A::AbstractMatrix, x::AbstractVector, n)
naivesub!(MyL(A), x, n)
naivesub!(MyU(A), x, n)
return nothing
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 2771 | """
Lower level API for `EnsembleArrayAlgorithm`. Avoids conversion of solution to CPU arrays.
```julia
vectorized_map_solve(probs, alg,
ensemblealg::Union{EnsembleArrayAlgorithm}, I,
adaptive)
```
## Arguments
- `probs`: the GPU-setup problems generated by the ensemble.
- `alg`: the kernel-based differential equation solver. Most of the solvers from OrdinaryDiffEq.jl
are supported.
- `ensemblealg`: The `EnsembleGPUArray()` algorithm.
- `I`: The iterator argument. Can be set to for e.g. 1:10_000 to simulate 10,000 trajectories.
- `adaptive`: The Boolean argument for time-stepping. Use `true` to enable adaptive time-stepping.
## Keyword Arguments
Only a subset of the common solver arguments are supported.
"""
function vectorized_map_solve end
function vectorized_map_solve(probs, alg,
ensemblealg::Union{EnsembleArrayAlgorithm}, I,
adaptive;
kwargs...)
# @assert all(Base.Fix2((prob1, prob2) -> isequal(prob1.tspan, prob2.tspan),probs[1]),probs)
# u0 = reduce(hcat, Array(probs[i].u0) for i in 1:length(I))
# p = reduce(hcat,
# probs[i].p isa SciMLBase.NullParameters ? probs[i].p : Array(probs[i].p)
# for i in 1:length(I))
u0 = hcat([Array(probs[i].u0) for i in 1:length(I)]...)
p = hcat([Array(probs[i].p) for i in 1:length(I)]...)
prob = probs[1]
sol = vectorized_map_solve_up(prob, alg, ensemblealg, I, u0, p;
adaptive = adaptive, kwargs...)
end
function vectorized_map_solve_up(prob, alg, ensemblealg, I, u0, p; kwargs...)
if ensemblealg isa EnsembleGPUArray
backend = ensemblealg.backend
u0 = adapt(backend, u0)
p = adapt(backend, p)
end
len = length(prob.u0)
if SciMLBase.has_jac(prob.f)
if ensemblealg isa EnsembleGPUArray
backend = ensemblealg.backend
jac_prototype = allocate(backend, Float32, (len, len, length(I)))
fill!(jac_prototype, 0.0)
else
jac_prototype = zeros(Float32, len, len, length(I))
end
if prob.f.colorvec !== nothing
colorvec = repeat(prob.f.colorvec, length(I))
else
colorvec = repeat(1:length(prob.u0), length(I))
end
else
jac_prototype = nothing
colorvec = nothing
end
_callback = generate_callback(prob, length(I), ensemblealg; kwargs...)
prob = generate_problem(prob, u0, p, jac_prototype, colorvec)
if hasproperty(alg, :linsolve)
_alg = remake(alg, linsolve = LinSolveGPUSplitFactorize(len, -1))
else
_alg = alg
end
sol = solve(prob, _alg; kwargs..., callback = _callback, merge_callbacks = false,
internalnorm = diffeqgpunorm)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4777 | function generate_problem(prob::SciMLBase.AbstractODEProblem,
u0,
p,
jac_prototype,
colorvec)
_f = let f = prob.f.f, kernel = DiffEqBase.isinplace(prob) ? gpu_kernel : gpu_kernel_oop
function (du, u, p, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(f, du, u, p, t; ndrange = size(u, 2),
workgroupsize = wgs)
end
end
if SciMLBase.has_jac(prob.f)
_Wfact! = let jac = prob.f.jac,
kernel = DiffEqBase.isinplace(prob) ? W_kernel : W_kernel_oop
function (W, u, p, gamma, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(jac, W, u, p, gamma, t;
ndrange = size(u, 2),
workgroupsize = wgs)
lufact!(version, W)
end
end
_Wfact!_t = let jac = prob.f.jac,
kernel = DiffEqBase.isinplace(prob) ? Wt_kernel : Wt_kernel_oop
function (W, u, p, gamma, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(jac, W, u, p, gamma, t;
ndrange = size(u, 2),
workgroupsize = wgs)
lufact!(version, W)
end
end
else
_Wfact! = nothing
_Wfact!_t = nothing
end
if SciMLBase.has_tgrad(prob.f)
_tgrad = let tgrad = prob.f.tgrad,
kernel = DiffEqBase.isinplace(prob) ? gpu_kernel : gpu_kernel_oop
function (J, u, p, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(tgrad, J, u, p, t;
ndrange = size(u, 2),
workgroupsize = wgs)
end
end
else
_tgrad = nothing
end
f_func = ODEFunction(_f, Wfact = _Wfact!,
Wfact_t = _Wfact!_t,
#colorvec=colorvec,
jac_prototype = jac_prototype,
tgrad = _tgrad)
prob = ODEProblem(f_func, u0, prob.tspan, p;
prob.kwargs...)
end
function generate_problem(prob::SDEProblem, u0, p, jac_prototype, colorvec)
_f = let f = prob.f.f, kernel = DiffEqBase.isinplace(prob) ? gpu_kernel : gpu_kernel_oop
function (du, u, p, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(f, du, u, p, t;
ndrange = size(u, 2),
workgroupsize = wgs)
end
end
_g = let f = prob.f.g, kernel = DiffEqBase.isinplace(prob) ? gpu_kernel : gpu_kernel_oop
function (du, u, p, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(f, du, u, p, t;
ndrange = size(u, 2),
workgroupsize = wgs)
end
end
if SciMLBase.has_jac(prob.f)
_Wfact! = let jac = prob.f.jac,
kernel = DiffEqBase.isinplace(prob) ? W_kernel : W_kernel_oop
function (W, u, p, gamma, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(jac, W, u, p, gamma, t;
ndrange = size(u, 2),
workgroupsize = wgs)
lufact!(version, W)
end
end
_Wfact!_t = let jac = prob.f.jac,
kernel = DiffEqBase.isinplace(prob) ? Wt_kernel : Wt_kernel_oop
function (W, u, p, gamma, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(jac, W, u, p, gamma, t;
ndrange = size(u, 2),
workgroupsize = wgs)
lufact!(version, W)
end
end
else
_Wfact! = nothing
_Wfact!_t = nothing
end
if SciMLBase.has_tgrad(prob.f)
_tgrad = let tgrad = prob.f.tgrad,
kernel = DiffEqBase.isinplace(prob) ? gpu_kernel : gpu_kernel_oop
function (J, u, p, t)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
kernel(version)(tgrad, J, u, p, t;
ndrange = size(u, 2),
workgroupsize = wgs)
end
end
else
_tgrad = nothing
end
f_func = SDEFunction(_f, _g, Wfact = _Wfact!,
Wfact_t = _Wfact!_t,
#colorvec=colorvec,
jac_prototype = jac_prototype,
tgrad = _tgrad)
prob = SDEProblem(f_func, _g, u0, prob.tspan, p;
prob.kwargs...)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1008 | function alg_order(alg::Union{GPUODEAlgorithm, GPUSDEAlgorithm})
error("Order is not defined for this algorithm")
end
alg_order(alg::GPUTsit5) = 5
alg_order(alg::GPUVern7) = 7
alg_order(alg::GPUVern9) = 9
alg_order(alg::GPURosenbrock23) = 2
alg_order(alg::GPURodas4) = 4
alg_order(alg::GPURodas5P) = 5
alg_order(alg::GPUKvaerno3) = 3
alg_order(alg::GPUKvaerno5) = 5
alg_order(alg::GPUEM) = 1
alg_order(alg::GPUSIEA) = 2
function finite_diff_jac(f, jac_prototype, x)
dx = sqrt(eps(DiffEqBase.RecursiveArrayTools.recursive_bottom_eltype(x)))
jac = MMatrix{size(x, 1), size(x, 1), eltype(x)}(1I)
for i in eachindex(x)
x_dx = convert(MArray, x)
x_dx[i] = x_dx[i] + dx
x_dx = convert(SArray, x_dx)
jac[:, i] .= (f(x_dx) - f(x)) / dx
end
convert(SMatrix, jac)
end
function alg_autodiff(alg::GPUODEAlgorithm)
error("This algorithm does not have an autodifferentiation option defined.")
end
alg_autodiff(::GPUODEImplicitAlgorithm{AD}) where {AD} = AD
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 5111 | struct GPUDiscreteCallback{F1, F2, F3, F4, F5} <: SciMLBase.AbstractDiscreteCallback
condition::F1
affect!::F2
initialize::F3
finalize::F4
save_positions::F5
function GPUDiscreteCallback(condition::F1, affect!::F2,
initialize::F3, finalize::F4,
save_positions::F5) where {F1, F2, F3, F4, F5}
if save_positions != (false, false)
error("Callback `save_positions` are incompatible with kernel-based GPU ODE solvers due requiring static sizing. Please ensure `save_positions = (false,false)` is set in all callback definitions used with such solvers.")
end
new{F1, F2, F3, F4, F5}(condition,
affect!, initialize, finalize, save_positions)
end
end
function GPUDiscreteCallback(condition, affect!;
initialize = SciMLBase.INITIALIZE_DEFAULT,
finalize = SciMLBase.FINALIZE_DEFAULT,
save_positions = (false, false))
GPUDiscreteCallback(condition, affect!, initialize, finalize, save_positions)
end
function Base.convert(::Type{GPUDiscreteCallback}, x::T) where {T <: DiscreteCallback}
GPUDiscreteCallback(x.condition, x.affect!, x.initialize, x.finalize,
Tuple(x.save_positions))
end
struct GPUContinuousCallback{F1, F2, F3, F4, F5, F6, T, T2, T3, I, R} <:
SciMLBase.AbstractContinuousCallback
condition::F1
affect!::F2
affect_neg!::F3
initialize::F4
finalize::F5
idxs::I
rootfind::SciMLBase.RootfindOpt
interp_points::Int
save_positions::F6
dtrelax::R
abstol::T
reltol::T2
repeat_nudge::T3
function GPUContinuousCallback(condition::F1, affect!::F2, affect_neg!::F3,
initialize::F4, finalize::F5, idxs::I, rootfind,
interp_points, save_positions::F6, dtrelax::R, abstol::T,
reltol::T2,
repeat_nudge::T3) where {F1, F2, F3, F4, F5, F6, T, T2,
T3, I, R,
}
if save_positions != (false, false)
error("Callback `save_positions` are incompatible with kernel-based GPU ODE solvers due requiring static sizing. Please ensure `save_positions = (false,false)` is set in all callback definitions used with such solvers.")
end
new{F1, F2, F3, F4, F5, F6, T, T2, T3, I, R}(condition,
affect!, affect_neg!,
initialize, finalize, idxs, rootfind,
interp_points,
save_positions,
dtrelax, abstol, reltol, repeat_nudge)
end
end
function GPUContinuousCallback(condition, affect!, affect_neg!;
initialize = SciMLBase.INITIALIZE_DEFAULT,
finalize = SciMLBase.FINALIZE_DEFAULT,
idxs = nothing,
rootfind = LeftRootFind,
save_positions = (false, false),
interp_points = 10,
dtrelax = 1,
abstol = 10eps(Float32), reltol = 0,
repeat_nudge = 1 // 100)
GPUContinuousCallback(condition, affect!, affect_neg!, initialize, finalize,
idxs,
rootfind, interp_points,
save_positions,
dtrelax, abstol, reltol, repeat_nudge)
end
function GPUContinuousCallback(condition, affect!;
initialize = SciMLBase.INITIALIZE_DEFAULT,
finalize = SciMLBase.FINALIZE_DEFAULT,
idxs = nothing,
rootfind = LeftRootFind,
save_positions = (false, false),
affect_neg! = affect!,
interp_points = 10,
dtrelax = 1,
abstol = 10eps(Float32), reltol = 0, repeat_nudge = 1 // 100)
GPUContinuousCallback(condition, affect!, affect_neg!, initialize, finalize, idxs,
rootfind, interp_points,
save_positions,
dtrelax, abstol, reltol, repeat_nudge)
end
function Base.convert(::Type{GPUContinuousCallback}, x::T) where {T <: ContinuousCallback}
GPUContinuousCallback(x.condition, x.affect!, x.affect_neg!, x.initialize, x.finalize,
x.idxs, x.rootfind, x.interp_points,
Tuple(x.save_positions), x.dtrelax, 100 * eps(Float32), x.reltol,
x.repeat_nudge)
end
function generate_callback(callback::DiscreteCallback, I,
ensemblealg)
if ensemblealg isa EnsembleGPUArray
backend = ensemblealg.backend
cur = adapt(backend, [false for i in 1:I])
elseif ensemblealg isa EnsembleGPUKernel
return callback
else
cur = [false for i in 1:I]
end
_condition = callback.condition
_affect! = callback.affect!
condition = function (u, t, integrator)
version = get_backend(u)
wgs = workgroupsize(version, size(u, 2))
discrete_condition_kernel(version)(_condition, cur, u, t, integrator.p;
ndrange = size(u, 2),
workgroupsize = wgs)
any(cur)
end
affect! = function (integrator)
version = get_backend(integrator.u)
wgs = workgroupsize(version, size(integrator.u, 2))
discrete_affect!_kernel(version)(_affect!, cur, integrator.u, integrator.t,
integrator.p;
ndrange = size(integrator.u, 2),
workgroupsize = wgs)
end
return DiscreteCallback(condition, affect!, save_positions = callback.save_positions)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 2226 | """
GPUTsit5()
A specialized implementation of the 5th order `Tsit5` method specifically for kernel
generation with EnsembleGPUKernel. For a similar CPU implementation, see
SimpleATsit5 from SimpleDiffEq.jl.
"""
struct GPUTsit5 <: GPUODEAlgorithm end
"""
GPUVern7()
A specialized implementation of the 7th order `Vern7` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPUVern7 <: GPUODEAlgorithm end
"""
GPUVern9()
A specialized implementation of the 9th order `Vern9` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPUVern9 <: GPUODEAlgorithm end
"""
GPURosenbrock23()
A specialized implementation of the W-method `Rosenbrock23` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPURosenbrock23{AD} <: GPUODEImplicitAlgorithm{AD} end
"""
GPURodas4()
A specialized implementation of the `Rodas4` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPURodas4{AD} <: GPUODEImplicitAlgorithm{AD} end
"""
GPURodas5P()
A specialized implementation of the `Rodas5P` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPURodas5P{AD} <: GPUODEImplicitAlgorithm{AD} end
"""
GPUKvaerno3()
A specialized implementation of the `Kvaerno3` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPUKvaerno3{AD} <: GPUODEImplicitAlgorithm{AD} end
"""
GPUKvaerno5()
A specialized implementation of the `Kvaerno5` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPUKvaerno5{AD} <: GPUODEImplicitAlgorithm{AD} end
for Alg in [:GPURosenbrock23, :GPURodas4, :GPURodas5P, :GPUKvaerno3, :GPUKvaerno5]
@eval begin
function $Alg(; autodiff = Val{true}())
$Alg{SciMLBase._unwrap_val(autodiff)}()
end
end
end
"""
GPUEM()
A specialized implementation of the Euler-Maruyama `GPUEM` method with weak order 1.0. Made specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPUEM <: GPUSDEAlgorithm end
"""
GPUSIEA()
A specialized implementation of the weak order 2.0 for Ito SDEs `GPUSIEA` method specifically for kernel
generation with EnsembleGPUKernel.
"""
struct GPUSIEA <: GPUSDEAlgorithm end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 3079 |
@kernel function ode_solve_kernel(@Const(probs), alg, _us, _ts, dt, callback,
tstops, nsteps,
saveat, ::Val{save_everystep}) where {save_everystep}
i = @index(Global, Linear)
# get the actual problem for this thread
prob = @inbounds probs[i]
# get the input/output arrays for this thread
ts = @inbounds view(_ts, :, i)
us = @inbounds view(_us, :, i)
_saveat = get(prob.kwargs, :saveat, nothing)
saveat = _saveat === nothing ? saveat : _saveat
integ = init(alg, prob.f, false, prob.u0, prob.tspan[1], dt, prob.p, tstops,
callback, save_everystep, saveat)
u0 = prob.u0
tspan = prob.tspan
integ.cur_t = 0
if saveat !== nothing
integ.cur_t = 1
if prob.tspan[1] == saveat[1]
integ.cur_t += 1
@inbounds us[1] = u0
end
else
@inbounds ts[integ.step_idx] = prob.tspan[1]
@inbounds us[integ.step_idx] = prob.u0
end
integ.step_idx += 1
# FSAL
while integ.t < tspan[2] && integ.retcode != DiffEqBase.ReturnCode.Terminated
saved_in_cb = step!(integ, ts, us)
!saved_in_cb && savevalues!(integ, ts, us)
end
if integ.t > tspan[2] && saveat === nothing
## Intepolate to tf
@inbounds us[end] = integ(tspan[2])
@inbounds ts[end] = tspan[2]
end
if saveat === nothing && !save_everystep
@inbounds us[2] = integ.u
@inbounds ts[2] = integ.t
end
end
@kernel function ode_asolve_kernel(@Const(probs), alg, _us, _ts, dt, callback, tstops,
abstol, reltol,
saveat,
::Val{save_everystep}) where {save_everystep}
i = @index(Global, Linear)
# get the actual problem for this thread
prob = @inbounds probs[i]
# get the input/output arrays for this thread
ts = @inbounds view(_ts, :, i)
us = @inbounds view(_us, :, i)
# TODO: optimize contiguous view to return a CuDeviceArray
_saveat = get(prob.kwargs, :saveat, nothing)
saveat = _saveat === nothing ? saveat : _saveat
u0 = prob.u0
tspan = prob.tspan
f = prob.f
p = prob.p
t = tspan[1]
tf = prob.tspan[2]
integ = init(alg, prob.f, false, prob.u0, prob.tspan[1], prob.tspan[2], dt,
prob.p,
abstol, reltol, DiffEqBase.ODE_DEFAULT_NORM, tstops, callback,
saveat)
integ.cur_t = 0
if saveat !== nothing
integ.cur_t = 1
if tspan[1] == saveat[1]
integ.cur_t += 1
@inbounds us[1] = u0
end
else
@inbounds ts[1] = tspan[1]
@inbounds us[1] = u0
end
while integ.t < tspan[2] && integ.retcode != DiffEqBase.ReturnCode.Terminated
saved_in_cb = step!(integ, ts, us)
!saved_in_cb && savevalues!(integ, ts, us)
end
if integ.t > tspan[2] && saveat === nothing
## Intepolate to tf
@inbounds us[end] = integ(tspan[2])
@inbounds ts[end] = tspan[2]
end
if saveat === nothing && !save_everystep
@inbounds us[2] = integ.u
@inbounds ts[2] = integ.t
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 8985 | """
```julia
vectorized_solve(probs, prob::Union{ODEProblem, SDEProblem}alg;
dt, saveat = nothing,
save_everystep = true,
debug = false, callback = CallbackSet(nothing), tstops = nothing)
```
A lower level interface to the kernel generation solvers of EnsembleGPUKernel with fixed
time-stepping.
## Arguments
- `probs`: the GPU-setup problems generated by the ensemble.
- `prob`: the quintessential problem form. Can be just `probs[1]`
- `alg`: the kernel-based differential equation solver. Must be one of the
EnsembleGPUKernel specialized methods.
## Keyword Arguments
Only a subset of the common solver arguments are supported.
"""
function vectorized_solve end
function vectorized_solve(probs, prob::ODEProblem, alg;
dt, saveat = nothing,
save_everystep = true,
debug = false, callback = CallbackSet(nothing), tstops = nothing,
kwargs...)
backend = get_backend(probs)
backend = maybe_prefer_blocks(backend)
# if saveat is specified, we'll use a vector of timestamps.
# otherwise it's a matrix that may be different for each ODE.
timeseries = prob.tspan[1]:dt:prob.tspan[2]
nsteps = length(timeseries)
prob = convert(ImmutableODEProblem, prob)
dt = convert(eltype(prob.tspan), dt)
if saveat === nothing
if save_everystep
len = length(prob.tspan[1]:dt:prob.tspan[2])
if tstops !== nothing
len += length(tstops) - count(x -> x in tstops, timeseries)
nsteps += length(tstops) - count(x -> x in tstops, timeseries)
end
else
len = 2
end
ts = allocate(backend, typeof(dt), (len, length(probs)))
fill!(ts, prob.tspan[1])
us = allocate(backend, typeof(prob.u0), (len, length(probs)))
else
saveat = if saveat isa AbstractRange
_saveat = range(convert(eltype(prob.tspan), first(saveat)),
convert(eltype(prob.tspan), last(saveat)),
length = length(saveat))
convert(StepRangeLen{
eltype(_saveat),
eltype(_saveat),
eltype(_saveat),
eltype(_saveat) === Float32 ? Int32 : Int64,
},
_saveat)
elseif saveat isa AbstractVector
adapt(backend, convert.(eltype(prob.tspan), saveat))
else
_saveat = prob.tspan[1]:convert(eltype(prob.tspan), saveat):prob.tspan[end]
convert(StepRangeLen{
eltype(_saveat),
eltype(_saveat),
eltype(_saveat),
eltype(_saveat) === Float32 ? Int32 : Int64,
},
_saveat)
end
ts = allocate(backend, typeof(dt), (length(saveat), length(probs)))
fill!(ts, prob.tspan[1])
us = allocate(backend, typeof(prob.u0), (length(saveat), length(probs)))
end
tstops = adapt(backend, tstops)
kernel = ode_solve_kernel(backend)
if backend isa CPU
@warn "Running the kernel on CPU"
end
kernel(probs, alg, us, ts, dt, callback, tstops, nsteps, saveat,
Val(save_everystep);
ndrange = length(probs))
# we build the actual solution object on the CPU because the GPU would create one
# containig CuDeviceArrays, which we cannot use on the host (not GC tracked,
# no useful operations, etc). That's unfortunate though, since this loop is
# generally slower than the entire GPU execution, and necessitates synchronization
#EDIT: Done when using with DiffEqGPU
ts, us
end
# SDEProblems over GPU cannot support u0 as a Number type, because GPU kernels compiled only through u0 being StaticArrays
function vectorized_solve(probs, prob::SDEProblem, alg;
dt, saveat = nothing,
save_everystep = true,
debug = false,
kwargs...)
backend = get_backend(probs)
backend = maybe_prefer_blocks(backend)
dt = convert(eltype(prob.tspan), dt)
if saveat === nothing
if save_everystep
len = length(prob.tspan[1]:dt:prob.tspan[2])
else
len = 2
end
ts = allocate(backend, typeof(dt), (len, length(probs)))
fill!(ts, prob.tspan[1])
us = allocate(backend, typeof(prob.u0), (len, length(probs)))
else
saveat = if saveat isa AbstractRange
range(convert(eltype(prob.tspan), first(saveat)),
convert(eltype(prob.tspan), last(saveat)),
length = length(saveat))
elseif saveat isa AbstractVector
convert.(eltype(prob.tspan), adapt(backend, saveat))
else
prob.tspan[1]:convert(eltype(prob.tspan), saveat):prob.tspan[end]
end
ts = allocate(backend, typeof(dt), (length(saveat), length(probs)))
fill!(ts, prob.tspan[1])
us = allocate(backend, typeof(prob.u0), (length(saveat), length(probs)))
end
if alg isa GPUEM
kernel = em_kernel(backend)
elseif alg isa Union{GPUSIEA}
SciMLBase.is_diagonal_noise(prob) ? nothing :
error("The algorithm is not compatible with the chosen noise type. Please see the documentation on the solver methods")
kernel = siea_kernel(backend)
end
if backend isa CPU
@warn "Running the kernel on CPU"
end
kernel(probs, us, ts, dt, saveat, Val(save_everystep);
ndrange = length(probs))
ts, us
end
"""
```julia
vectorized_asolve(probs, prob::ODEProblem, alg;
dt = 0.1f0, saveat = nothing,
save_everystep = false,
abstol = 1.0f-6, reltol = 1.0f-3,
callback = CallbackSet(nothing), tstops = nothing)
```
A lower level interface to the kernel generation solvers of EnsembleGPUKernel with adaptive
time-stepping.
## Arguments
- `probs`: the GPU-setup problems generated by the ensemble.
- `prob`: the quintessential problem form. Can be just `probs[1]`
- `alg`: the kernel-based differential equation solver. Must be one of the
EnsembleGPUKernel specialized methods.
## Keyword Arguments
Only a subset of the common solver arguments are supported.
"""
function vectorized_asolve end
function vectorized_asolve(probs, prob::ODEProblem, alg;
dt = 0.1f0, saveat = nothing,
save_everystep = false,
abstol = 1.0f-6, reltol = 1.0f-3,
debug = false, callback = CallbackSet(nothing), tstops = nothing,
kwargs...)
backend = get_backend(probs)
backend = maybe_prefer_blocks(backend)
prob = convert(ImmutableODEProblem, prob)
dt = convert(eltype(prob.tspan), dt)
abstol = convert(eltype(prob.tspan), abstol)
reltol = convert(eltype(prob.tspan), reltol)
# if saveat is specified, we'll use a vector of timestamps.
# otherwise it's a matrix that may be different for each ODE.
if saveat === nothing
if save_everystep
error("Don't use adaptive version with saveat == nothing and save_everystep = true")
else
len = 2
end
# if tstops !== nothing
# len += length(tstops)
# end
ts = allocate(backend, typeof(dt), (len, length(probs)))
fill!(ts, prob.tspan[1])
us = allocate(backend, typeof(prob.u0), (len, length(probs)))
else
saveat = if saveat isa AbstractRange
range(convert(eltype(prob.tspan), first(saveat)),
convert(eltype(prob.tspan), last(saveat)),
length = length(saveat))
elseif saveat isa AbstractVector
adapt(backend, convert.(eltype(prob.tspan), saveat))
else
prob.tspan[1]:convert(eltype(prob.tspan), saveat):prob.tspan[end]
end
ts = allocate(backend, typeof(dt), (length(saveat), length(probs)))
fill!(ts, prob.tspan[1])
us = allocate(backend, typeof(prob.u0), (length(saveat), length(probs)))
end
us = adapt(backend, us)
ts = adapt(backend, ts)
tstops = adapt(backend, tstops)
kernel = ode_asolve_kernel(backend)
if backend isa CPU
@warn "Running the kernel on CPU"
end
kernel(probs, alg, us, ts, dt, callback, tstops,
abstol, reltol, saveat, Val(save_everystep);
ndrange = length(probs))
# we build the actual solution object on the CPU because the GPU would create one
# containig CuDeviceArrays, which we cannot use on the host (not GC tracked,
# no useful operations, etc). That's unfortunate though, since this loop is
# generally slower than the entire GPU execution, and necessitates synchronization
#EDIT: Done when using with DiffEqGPU
ts, us
end
function vectorized_asolve(probs, prob::SDEProblem, alg;
dt, saveat = nothing,
save_everystep = true,
debug = false,
kwargs...)
error("Adaptive time-stepping is not supported yet with GPUEM.")
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 13118 | function build_adaptive_controller_cache(alg::A, ::Type{T}) where {A, T}
beta1 = T(7 / (10 * alg_order(alg)))
beta2 = T(2 / (5 * alg_order(alg)))
qmax = T(10.0)
qmin = T(1 / 5)
gamma = T(9 / 10)
qoldinit = T(1e-4)
qold = qoldinit
return beta1, beta2, qmax, qmin, gamma, qoldinit, qold
end
@inline function savevalues!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S,
T,
}, ts,
us,
force = false) where {AlgType <: GPUODEAlgorithm, IIP, S, T}
saved, savedexactly = false, false
saveat = integrator.saveat
save_everystep = integrator.save_everystep
if saveat === nothing && save_everystep
saved = true
savedexactly = true
@inbounds us[integrator.step_idx] = integrator.u
@inbounds ts[integrator.step_idx] = integrator.t
integrator.step_idx += 1
elseif saveat !== nothing
saved = true
savedexactly = true
while integrator.cur_t <= length(saveat) && saveat[integrator.cur_t] <= integrator.t
savet = saveat[integrator.cur_t]
Θ = (savet - integrator.tprev) / integrator.dt
@inbounds us[integrator.cur_t] = _ode_interpolant(Θ, integrator.dt,
integrator.uprev, integrator)
@inbounds ts[integrator.cur_t] = savet
integrator.cur_t += 1
end
end
saved, savedexactly
end
@inline function DiffEqBase.terminate!(integrator::DiffEqBase.AbstractODEIntegrator{AlgType,
IIP, S,
T},
retcode = ReturnCode.Terminated) where {
AlgType <:
GPUODEAlgorithm,
IIP,
S,
T,
}
integrator.retcode = retcode
end
@inline function apply_discrete_callback!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S, T,
},
ts, us,
callback::GPUDiscreteCallback) where {
AlgType <:
GPUODEAlgorithm,
IIP, S, T}
saved_in_cb = false
if callback.condition(integrator.u, integrator.t, integrator)
# handle saveat
_, savedexactly = savevalues!(integrator, ts, us)
saved_in_cb = true
integrator.u_modified = true
callback.affect!(integrator)
end
integrator.u_modified, saved_in_cb
end
@inline function apply_discrete_callback!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S, T,
},
ts, us,
callback::GPUDiscreteCallback,
args...) where {AlgType <: GPUODEAlgorithm, IIP,
S, T}
apply_discrete_callback!(integrator, ts, us,
apply_discrete_callback!(integrator, ts, us, callback)...,
args...)
end
@inline function apply_discrete_callback!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S, T,
},
ts, us,
discrete_modified::Bool,
saved_in_cb::Bool, callback::GPUDiscreteCallback,
args...) where {AlgType <: GPUODEAlgorithm, IIP,
S, T}
bool, saved_in_cb2 = apply_discrete_callback!(integrator, ts, us,
apply_discrete_callback!(integrator, ts,
us, callback)...,
args...)
discrete_modified || bool, saved_in_cb || saved_in_cb2
end
@inline function apply_discrete_callback!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S, T,
},
ts, us,
discrete_modified::Bool,
saved_in_cb::Bool,
callback::GPUDiscreteCallback) where {
AlgType <:
GPUODEAlgorithm,
IIP, S, T}
bool, saved_in_cb2 = apply_discrete_callback!(integrator, ts, us, callback)
discrete_modified || bool, saved_in_cb || saved_in_cb2
end
@inline function interpolate(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S,
T,
},
t) where {AlgType <: GPUODEAlgorithm, IIP, S, T}
θ = (t - integrator.tprev) / integrator.dt
b1θ, b2θ, b3θ, b4θ, b5θ, b6θ, b7θ = SimpleDiffEq.bθs(integrator.rs, θ)
return integrator.uprev +
integrator.dt *
(b1θ * integrator.k1 + b2θ * integrator.k2 + b3θ * integrator.k3 +
b4θ * integrator.k4 + b5θ * integrator.k5 + b6θ * integrator.k6 +
b7θ * integrator.k7)
end
@inline function _change_t_via_interpolation!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S,
T,
},
t,
modify_save_endpoint::Type{Val{T1}}) where {
AlgType <:
GPUODEAlgorithm,
IIP,
S,
T,
T1,
}
# Can get rid of an allocation here with a function
# get_tmp_arr(integrator.cache) which gives a pointer to some
# cache array which can be modified.
if integrator.tdir * t < integrator.tdir * integrator.tprev
error("Current interpolant only works between tprev and t")
elseif t != integrator.t
integrator.u = integrator(t)
integrator.step_idx -= Int(round((integrator.t - t) / integrator.dt))
integrator.t = t
#integrator.dt = integrator.t - integrator.tprev
end
end
@inline function DiffEqBase.change_t_via_interpolation!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S,
T,
},
t,
modify_save_endpoint::Type{Val{T1}} = Val{
false,
}) where {
AlgType <:
GPUODEAlgorithm,
IIP,
S,
T,
T1,
}
_change_t_via_interpolation!(integrator, t, modify_save_endpoint)
end
@inline function apply_callback!(integrator::DiffEqBase.AbstractODEIntegrator{AlgType, IIP,
S, T},
callback::GPUContinuousCallback,
cb_time, prev_sign, event_idx, ts,
us) where {AlgType <: GPUODEAlgorithm, IIP, S, T}
DiffEqBase.change_t_via_interpolation!(integrator, integrator.tprev + cb_time)
# handle saveat
_, savedexactly = savevalues!(integrator, ts, us)
saved_in_cb = true
integrator.u_modified = true
if prev_sign < 0
if callback.affect! === nothing
integrator.u_modified = false
else
callback.affect!(integrator)
end
elseif prev_sign > 0
if callback.affect_neg! === nothing
integrator.u_modified = false
else
callback.affect_neg!(integrator)
end
end
true, saved_in_cb
end
@inline function handle_callbacks!(integrator::DiffEqBase.AbstractODEIntegrator{AlgType,
IIP, S, T},
ts, us) where {AlgType <: GPUODEAlgorithm, IIP, S, T}
discrete_callbacks = integrator.callback.discrete_callbacks
continuous_callbacks = integrator.callback.continuous_callbacks
atleast_one_callback = false
continuous_modified = false
discrete_modified = false
saved_in_cb = false
if !(continuous_callbacks isa Tuple{})
event_occurred = false
time, upcrossing, event_occurred, event_idx, idx, counter = DiffEqBase.find_first_continuous_callback(integrator,
continuous_callbacks...)
if event_occurred
integrator.event_last_time = idx
integrator.vector_event_last_time = event_idx
continuous_modified, saved_in_cb = apply_callback!(integrator,
continuous_callbacks[1],
time, upcrossing,
event_idx, ts, us)
else
integrator.event_last_time = 0
integrator.vector_event_last_time = 1
end
end
if !(discrete_callbacks isa Tuple{})
discrete_modified, saved_in_cb = apply_discrete_callback!(integrator, ts, us,
discrete_callbacks...)
return discrete_modified, saved_in_cb
end
return false, saved_in_cb
end
@inline function DiffEqBase.find_callback_time(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S,
T,
},
callback::DiffEqGPU.GPUContinuousCallback,
counter) where {AlgType <: GPUODEAlgorithm,
IIP, S, T}
event_occurred, interp_index, prev_sign, prev_sign_index, event_idx = DiffEqBase.determine_event_occurance(integrator,
callback,
counter)
if event_occurred
if callback.condition === nothing
new_t = zero(typeof(integrator.t))
else
top_t = integrator.t
bottom_t = integrator.tprev
if callback.rootfind != SciMLBase.NoRootFind
function zero_func(abst, p = nothing)
DiffEqBase.get_condition(integrator, callback, abst)
end
if zero_func(top_t) == 0
Θ = top_t
else
if integrator.event_last_time == counter &&
abs(zero_func(bottom_t)) <= 100abs(integrator.last_event_error) &&
prev_sign_index == 1
# Determined that there is an event by derivative
# But floating point error may make the end point negative
bottom_t += integrator.dt * callback.repeat_nudge
sign_top = sign(zero_func(top_t))
sign(zero_func(bottom_t)) * sign_top >= zero(sign_top) &&
error("Double callback crossing floating pointer reducer errored. Report this issue.")
end
Θ = DiffEqBase.bisection(zero_func, (bottom_t, top_t),
isone(integrator.tdir),
callback.rootfind, callback.abstol,
callback.reltol)
integrator.last_event_error = DiffEqBase.ODE_DEFAULT_NORM(zero_func(Θ),
Θ)
end
new_t = Θ - integrator.tprev
else
# If no solve and no interpolants, just use endpoint
new_t = integrator.dt
end
end
else
new_t = zero(typeof(integrator.t))
end
new_t, prev_sign, event_occurred, event_idx
end
@inline function SciMLBase.get_tmp_cache(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S, T}) where {
AlgType <:
GPUODEAlgorithm,
IIP,
S,
T,
}
return nothing
end
@inline function DiffEqBase.get_condition(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S, T,
},
callback,
abst) where {AlgType <: GPUODEAlgorithm, IIP, S, T
}
if abst == integrator.t
tmp = integrator.u
elseif abst == integrator.tprev
tmp = integrator.uprev
else
tmp = integrator(abst)
end
return callback.condition(tmp, abst, integrator)
end
# interp_points = 0 or equivalently nothing
@inline function DiffEqBase.determine_event_occurance(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S,
T,
},
callback::DiffEqGPU.GPUContinuousCallback,
counter) where {
AlgType <:
GPUODEAlgorithm, IIP,
S, T}
event_occurred = false
interp_index = 0
# Check if the event occured
previous_condition = callback.condition(integrator.uprev, integrator.tprev,
integrator)
prev_sign = zero(integrator.t)
next_sign = zero(integrator.t)
# @show typeof(0)
if integrator.event_last_time == counter &&
minimum(DiffEqBase.ODE_DEFAULT_NORM(previous_condition, integrator.t)) <=
100DiffEqBase.ODE_DEFAULT_NORM(integrator.last_event_error, integrator.t)
# If there was a previous event, utilize the derivative at the start to
# chose the previous sign. If the derivative is positive at tprev, then
# we treat `prev_sign` as negetive, and if the derivative is negative then we
# treat `prev_sign` as positive, regardless of the postiivity/negativity
# of the true value due to it being =0 sans floating point issues.
# Only due this if the discontinuity did not move it far away from an event
# Since near even we use direction instead of location to reset
# Evaluate condition slightly in future
abst = integrator.tprev + integrator.dt * callback.repeat_nudge
tmp_condition = DiffEqBase.get_condition(integrator, callback, abst)
prev_sign = sign(tmp_condition)
else
prev_sign = sign(previous_condition)
end
prev_sign_index = 1
abst = integrator.t
next_condition = DiffEqBase.get_condition(integrator, callback, abst)
next_sign = sign(next_condition)
if ((prev_sign < 0 && callback.affect! !== nothing) ||
(prev_sign > 0 && callback.affect_neg! !== nothing)) && prev_sign * next_sign <= 0
event_occurred = true
end
event_idx = 1
event_occurred, interp_index, prev_sign, prev_sign_index, event_idx
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 11819 | # Default: Hermite Interpolation
@inline @muladd function _ode_interpolant(Θ, dt, y₀,
integ::DiffEqBase.AbstractODEIntegrator{AlgType,
IIP, S, T,
}) where {
AlgType <:
GPUODEAlgorithm,
IIP,
S,
T,
}
y₁ = integ.u
k1 = integ.k1
k2 = integ.k2
out = (1 - Θ) * y₀ + Θ * y₁ +
Θ * (Θ - 1) * ((1 - 2Θ) * (y₁ - y₀) + (Θ - 1) * dt * k1 + Θ * dt * k2)
return out
end
@inline function bΘs(integ::T, Θ) where {T <: Union{GPUV7I, GPUAV7I}}
@unpack r011, r012, r013, r014, r015, r016, r017, r042, r043, r044, r045, r046, r047,
r052, r053, r054, r055, r056, r057, r062, r063, r064, r065, r066, r067, r072, r073,
r074, r075, r076, r077, r082, r083, r084, r085, r086, r087, r092, r093, r094, r095,
r096, r097, r112, r113, r114, r115, r116, r117, r122, r123, r124, r125, r126, r127,
r132, r133, r134, r135, r136, r137, r142, r143, r144, r145, r146, r147, r152, r153,
r154, r155, r156, r157, r162, r163, r164, r165, r166, r167 = integ.tab.interp
b1Θ = @evalpoly(Θ, 0, r011, r012, r013, r014, r015, r016, r017)
b4Θ = @evalpoly(Θ, 0, 0, r042, r043, r044, r045, r046, r047)
b5Θ = @evalpoly(Θ, 0, 0, r052, r053, r054, r055, r056, r057)
b6Θ = @evalpoly(Θ, 0, 0, r062, r063, r064, r065, r066, r067)
b7Θ = @evalpoly(Θ, 0, 0, r072, r073, r074, r075, r076, r077)
b8Θ = @evalpoly(Θ, 0, 0, r082, r083, r084, r085, r086, r087)
b9Θ = @evalpoly(Θ, 0, 0, r092, r093, r094, r095, r096, r097)
b11Θ = @evalpoly(Θ, 0, 0, r112, r113, r114, r115, r116, r117)
b12Θ = @evalpoly(Θ, 0, 0, r122, r123, r124, r125, r126, r127)
b13Θ = @evalpoly(Θ, 0, 0, r132, r133, r134, r135, r136, r137)
b14Θ = @evalpoly(Θ, 0, 0, r142, r143, r144, r145, r146, r147)
b15Θ = @evalpoly(Θ, 0, 0, r152, r153, r154, r155, r156, r157)
b16Θ = @evalpoly(Θ, 0, 0, r162, r163, r164, r165, r166, r167)
return b1Θ, b4Θ, b5Θ, b6Θ, b7Θ, b8Θ, b9Θ, b11Θ, b12Θ, b13Θ, b14Θ, b15Θ, b16Θ
end
@inline @muladd function _ode_interpolant(Θ, dt, y₀,
integ::T) where {T <:
Union{GPUV7I, GPUAV7I}}
b1Θ, b4Θ, b5Θ, b6Θ, b7Θ, b8Θ, b9Θ, b11Θ, b12Θ, b13Θ, b14Θ, b15Θ, b16Θ = bΘs(integ, Θ)
@unpack c11, a1101, a1104, a1105, a1106, a1107, a1108, a1109, c12, a1201, a1204,
a1205, a1206, a1207, a1208, a1209, a1211, c13, a1301, a1304, a1305, a1306, a1307,
a1308, a1309, a1311, a1312, c14, a1401, a1404, a1405, a1406, a1407, a1408, a1409,
a1411, a1412, a1413, c15, a1501, a1504, a1505, a1506, a1507, a1508, a1509, a1511,
a1512, a1513, c16, a1601, a1604, a1605, a1606, a1607, a1608, a1609,
a1611, a1612, a1613 = integ.tab.extra
@unpack k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, uprev, f, t, p = integ
k11 = f(uprev +
dt * (a1101 * k1 + a1104 * k4 + a1105 * k5 + a1106 * k6 +
a1107 * k7 + a1108 * k8 + a1109 * k9), p, t + c11 * dt)
k12 = f(uprev +
dt * (a1201 * k1 + a1204 * k4 + a1205 * k5 + a1206 * k6 +
a1207 * k7 + a1208 * k8 + a1209 * k9 + a1211 * k11), p,
t + c12 * dt)
k13 = f(uprev +
dt * (a1301 * k1 + a1304 * k4 + a1305 * k5 + a1306 * k6 +
a1307 * k7 + a1308 * k8 + a1309 * k9 + a1311 * k11 +
a1312 * k12), p, t + c13 * dt)
k14 = f(uprev +
dt * (a1401 * k1 + a1404 * k4 + a1405 * k5 + a1406 * k6 +
a1407 * k7 + a1408 * k8 + a1409 * k9 + a1411 * k11 +
a1412 * k12 + a1413 * k13), p, t + c14 * dt)
k15 = f(uprev +
dt * (a1501 * k1 + a1504 * k4 + a1505 * k5 + a1506 * k6 +
a1507 * k7 + a1508 * k8 + a1509 * k9 + a1511 * k11 +
a1512 * k12 + a1513 * k13), p, t + c15 * dt)
k16 = f(uprev +
dt * (a1601 * k1 + a1604 * k4 + a1605 * k5 + a1606 * k6 +
a1607 * k7 + a1608 * k8 + a1609 * k9 + a1611 * k11 +
a1612 * k12 + a1613 * k13), p, t + c16 * dt)
return y₀ +
dt * (integ.k1 * b1Θ
+ integ.k4 * b4Θ + integ.k5 * b5Θ + integ.k6 * b6Θ + integ.k7 * b7Θ +
integ.k8 * b8Θ + integ.k9 * b9Θ
+ k11 * b11Θ + k12 * b12Θ + k13 * b13Θ +
k14 * b14Θ + k15 * b15Θ + k16 * b16Θ)
end
@inline function bΘs(integ::T, Θ) where {T <: Union{GPUV9I, GPUAV9I}}
@unpack r011, r012, r013, r014, r015, r016, r017, r018, r019, r082, r083, r084, r085,
r086, r087, r088, r089, r092, r093, r094, r095, r096, r097, r098, r099, r102, r103,
r104, r105, r106, r107, r108, r109, r112, r113, r114, r115, r116, r117, r118, r119,
r122, r123, r124, r125, r126, r127, r128, r129, r132, r133, r134, r135, r136, r137,
r138, r139, r142, r143, r144, r145, r146, r147, r148, r149, r152, r153, r154, r155,
r156, r157, r158, r159, r172, r173, r174, r175, r176, r177, r178, r179, r182, r183,
r184, r185, r186, r187, r188, r189, r192, r193, r194, r195, r196, r197, r198, r199,
r202, r203, r204, r205, r206, r207, r208, r209, r212, r213, r214, r215, r216, r217,
r218, r219, r222, r223, r224, r225, r226, r227, r228, r229, r232, r233, r234, r235,
r236, r237, r238, r239, r242, r243, r244, r245, r246, r247, r248, r249, r252, r253,
r254, r255, r256, r257, r258, r259, r262, r263, r264, r265, r266,
r267, r268, r269 = integ.tab.interp
b1Θ = @evalpoly(Θ, 0, r011, r012, r013, r014, r015, r016, r017, r018, r019)
b8Θ = @evalpoly(Θ, 0, 0, r082, r083, r084, r085, r086, r087, r088, r089)
b9Θ = @evalpoly(Θ, 0, 0, r092, r093, r094, r095, r096, r097, r098, r099)
b10Θ = @evalpoly(Θ, 0, 0, r102, r103, r104, r105, r106, r107, r108, r109)
b11Θ = @evalpoly(Θ, 0, 0, r112, r113, r114, r115, r116, r117, r118, r119)
b12Θ = @evalpoly(Θ, 0, 0, r122, r123, r124, r125, r126, r127, r128, r129)
b13Θ = @evalpoly(Θ, 0, 0, r132, r133, r134, r135, r136, r137, r138, r139)
b14Θ = @evalpoly(Θ, 0, 0, r142, r143, r144, r145, r146, r147, r148, r149)
b15Θ = @evalpoly(Θ, 0, 0, r152, r153, r154, r155, r156, r157, r158, r159)
b17Θ = @evalpoly(Θ, 0, 0, r172, r173, r174, r175, r176, r177, r178, r179)
b18Θ = @evalpoly(Θ, 0, 0, r182, r183, r184, r185, r186, r187, r188, r189)
b19Θ = @evalpoly(Θ, 0, 0, r192, r193, r194, r195, r196, r197, r198, r199)
b20Θ = @evalpoly(Θ, 0, 0, r202, r203, r204, r205, r206, r207, r208, r209)
b21Θ = @evalpoly(Θ, 0, 0, r212, r213, r214, r215, r216, r217, r218, r219)
b22Θ = @evalpoly(Θ, 0, 0, r222, r223, r224, r225, r226, r227, r228, r229)
b23Θ = @evalpoly(Θ, 0, 0, r232, r233, r234, r235, r236, r237, r238, r239)
b24Θ = @evalpoly(Θ, 0, 0, r242, r243, r244, r245, r246, r247, r248, r249)
b25Θ = @evalpoly(Θ, 0, 0, r252, r253, r254, r255, r256, r257, r258, r259)
b26Θ = @evalpoly(Θ, 0, 0, r262, r263, r264, r265, r266, r267, r268, r269)
return b1Θ, b8Θ, b9Θ, b10Θ, b11Θ, b12Θ, b13Θ, b14Θ, b15Θ, b17Θ, b18Θ, b19Θ, b20Θ,
b21Θ, b22Θ, b23Θ, b24Θ, b25Θ, b26Θ
end
@inline @muladd function _ode_interpolant(Θ, dt, y₀,
integ::T) where {T <:
Union{GPUV9I, GPUAV9I}}
b1Θ, b8Θ, b9Θ, b10Θ, b11Θ, b12Θ, b13Θ, b14Θ, b15Θ, b17Θ, b18Θ, b19Θ, b20Θ,
b21Θ, b22Θ, b23Θ, b24Θ, b25Θ, b26Θ = bΘs(integ, Θ)
@unpack c17, a1701, a1708, a1709, a1710, a1711, a1712, a1713, a1714, a1715, c18, a1801,
a1808, a1809, a1810, a1811, a1812, a1813, a1814, a1815, a1817, c19, a1901, a1908, a1909,
a1910, a1911, a1912, a1913, a1914, a1915, a1917, a1918, c20, a2001, a2008, a2009, a2010,
a2011, a2012, a2013, a2014, a2015, a2017, a2018, a2019, c21, a2101, a2108, a2109, a2110,
a2111, a2112, a2113, a2114, a2115, a2117, a2118, a2119, a2120, c22, a2201, a2208, a2209,
a2210, a2211, a2212, a2213, a2214, a2215, a2217, a2218, a2219, a2220, a2221, c23, a2301,
a2308, a2309, a2310, a2311, a2312, a2313, a2314, a2315, a2317, a2318, a2319, a2320,
a2321, c24, a2401, a2408, a2409, a2410, a2411, a2412, a2413, a2414, a2415, a2417,
a2418, a2419, a2420, a2421, c25, a2501, a2508, a2509, a2510, a2511, a2512, a2513,
a2514, a2515, a2517, a2518, a2519, a2520, a2521, c26, a2601, a2608, a2609, a2610,
a2611, a2612, a2613, a2614, a2615, a2617, a2618, a2619, a2620, a2621 = integ.tab.extra
@unpack k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, uprev, f, t, p = integ
k11 = f(uprev +
dt * (a1701 * k1 + a1708 * k2 + a1709 * k3 + a1710 * k4 +
a1711 * k5 + a1712 * k6 + a1713 * k7 + a1714 * k8 + a1715 * k9),
p, t + c17 * dt)
k12 = f(uprev +
dt * (a1801 * k1 + a1808 * k2 + a1809 * k3 + a1810 * k4 +
a1811 * k5 + a1812 * k6 + a1813 * k7 + a1814 * k8 +
a1815 * k9 + a1817 * k11), p, t + c18 * dt)
k13 = f(uprev +
dt * (a1901 * k1 + a1908 * k2 + a1909 * k3 + a1910 * k4 +
a1911 * k5 + a1912 * k6 + a1913 * k7 + a1914 * k8 +
a1915 * k9 + a1917 * k11 + a1918 * k12), p, t + c19 * dt)
k14 = f(uprev +
dt * (a2001 * k1 + a2008 * k2 + a2009 * k3 + a2010 * k4 +
a2011 * k5 + a2012 * k6 + a2013 * k7 + a2014 * k8 +
a2015 * k9 + a2017 * k11 + a2018 * k12 + a2019 * k13), p,
t + c20 * dt)
k15 = f(uprev +
dt * (a2101 * k1 + a2108 * k2 + a2109 * k3 + a2110 * k4 +
a2111 * k5 + a2112 * k6 + a2113 * k7 + a2114 * k8 +
a2115 * k9 + a2117 * k11 + a2118 * k12 + a2119 * k13 +
a2120 * k14), p, t + c21 * dt)
k16 = f(uprev +
dt * (a2201 * k1 + a2208 * k2 + a2209 * k3 + a2210 * k4 +
a2211 * k5 + a2212 * k6 + a2213 * k7 + a2214 * k8 +
a2215 * k9 + a2217 * k11 + a2218 * k12 + a2219 * k13 +
a2220 * k14 + a2221 * k15), p, t + c22 * dt)
k17 = f(uprev +
dt * (a2301 * k1 + a2308 * k2 + a2309 * k3 + a2310 * k4 +
a2311 * k5 + a2312 * k6 + a2313 * k7 + a2314 * k8 +
a2315 * k9 + a2317 * k11 + a2318 * k12 + a2319 * k13 +
a2320 * k14 + a2321 * k15), p, t + c23 * dt)
k18 = f(uprev +
dt * (a2401 * k1 + a2408 * k2 + a2409 * k3 + a2410 * k4 +
a2411 * k5 + a2412 * k6 + a2413 * k7 + a2414 * k8 +
a2415 * k9 + a2417 * k11 + a2418 * k12 + a2419 * k13 +
a2420 * k14 + a2421 * k15), p, t + c24 * dt)
k19 = f(uprev +
dt * (a2501 * k1 + a2508 * k2 + a2509 * k3 + a2510 * k4 +
a2511 * k5 + a2512 * k6 + a2513 * k7 + a2514 * k8 +
a2515 * k9 + a2517 * k11 + a2518 * k12 + a2519 * k13 +
a2520 * k14 + a2521 * k15), p, t + c25 * dt)
k20 = f(uprev +
dt * (a2601 * k1 + a2608 * k2 + a2609 * k3 + a2610 * k4 +
a2611 * k5 + a2612 * k6 + a2613 * k7 + a2614 * k8 +
a2615 * k9 + a2617 * k11 + a2618 * k12 + a2619 * k13 +
a2620 * k14 + a2621 * k15), p, t + c26 * dt)
return y₀ +
dt *
(integ.k1 * b1Θ + integ.k2 * b8Θ + integ.k3 * b9Θ + integ.k4 * b10Θ +
integ.k5 * b11Θ +
integ.k6 * b12Θ + integ.k7 * b13Θ + integ.k8 * b14Θ + integ.k9 * b15Θ +
k11 * b17Θ +
k12 * b18Θ + k13 * b19Θ + k14 * b20Θ + k15 * b21Θ +
k16 * b22Θ +
k17 * b23Θ + k18 * b24Θ + k19 * b25Θ + k20 * b26Θ)
end
@inline @muladd function _ode_interpolant(Θ, dt, y₀,
integ::T) where {T <:
Union{GPUT5I, GPUAT5I}}
b1θ, b2θ, b3θ, b4θ, b5θ, b6θ, b7θ = SimpleDiffEq.bθs(integ.rs, Θ)
return y₀ +
dt *
(b1θ * integ.k1 + b2θ * integ.k2 + b3θ * integ.k3 +
b4θ * integ.k4 + b5θ * integ.k5 + b6θ * integ.k6 +
b7θ * integ.k7)
end
@inline @muladd function _ode_interpolant(Θ, dt, y₀,
integ::T) where {T <:
Union{GPURB23I, GPUARB23I}}
c1 = Θ * (1 - Θ) / (1 - 2 * integ.d)
c2 = Θ * (Θ - 2 * integ.d) / (1 - 2 * integ.d)
return y₀ + dt * (c1 * integ.k1 + c2 * integ.k2)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 15201 | ## Fixed TimeStep Integrator
function Adapt.adapt_structure(to, prob::ODEProblem{<:Any, <:Any, iip}) where {iip}
ODEProblem{iip, true}(adapt(to, prob.f),
adapt(to, prob.u0),
adapt(to, prob.tspan),
adapt(to, prob.p);
adapt(to, prob.kwargs)...)
end
mutable struct GPUTsit5Integrator{IIP, S, T, ST, P, F, TS, CB, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S
k3::S
k4::S
k5::S
k6::S
k7::S
cs::SVector{6, T} # ci factors cache: time coefficients
as::SVector{21, T} # aij factors cache: solution coefficients
rs::SVector{22, T} # rij factors cache: interpolation coefficients
retcode::DiffEqBase.ReturnCode.T
end
const GPUT5I = GPUTsit5Integrator
@inline function (integrator::GPUTsit5Integrator)(t)
Θ = (t - integrator.tprev) / integrator.dt
_ode_interpolant(Θ, integrator.dt, integrator.uprev, integrator)
end
@inline function DiffEqBase.u_modified!(integrator::GPUTsit5Integrator, bool::Bool)
integrator.u_modified = bool
end
DiffEqBase.isinplace(::GPUT5I{IIP}) where {IIP} = IIP
## Adaptive TimeStep Integrator
mutable struct GPUATsit5Integrator{IIP, S, T, ST, P, F, N, TOL, Q, TS, CB, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S # interpolants of the algorithm
k2::S
k3::S
k4::S
k5::S
k6::S
k7::S
cs::SVector{6, T} # ci factors cache: time coefficients
as::SVector{21, T} # aij factors cache: solution coefficients
btildes::SVector{7, T}
rs::SVector{22, T} # rij factors cache: interpolation coefficients
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUAT5I = GPUATsit5Integrator
@inline function (integrator::GPUATsit5Integrator)(t)
Θ = (t - integrator.tprev) / integrator.dt
_ode_interpolant(Θ, integrator.dt, integrator.uprev, integrator)
end
@inline function DiffEqBase.u_modified!(integrator::GPUATsit5Integrator, bool::Bool)
integrator.u_modified = bool
end
## Vern7
mutable struct GPUV7Integrator{IIP, S, T, ST, P, F, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S
k3::S
k4::S
k5::S
k6::S
k7::S
k8::S
k9::S
k10::S
tab::TabType
retcode::DiffEqBase.ReturnCode.T
end
const GPUV7I = GPUV7Integrator
@inline function (integrator::GPUV7I)(t)
Θ = (t - integrator.tprev) / integrator.dt
_ode_interpolant(Θ, integrator.dt, integrator.uprev, integrator)
end
mutable struct GPUAV7Integrator{IIP, S, T, ST, P, F, N, TOL, Q, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S # interpolants of the algorithm
k2::S
k3::S
k4::S
k5::S
k6::S
k7::S
k8::S
k9::S
k10::S
tab::TabType
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUAV7I = GPUAV7Integrator
@inline function (integrator::GPUAV7I)(t)
Θ = (t - integrator.tprev) / integrator.dt
_ode_interpolant(Θ, integrator.dt, integrator.uprev, integrator)
end
## Vern9
mutable struct GPUV9Integrator{IIP, S, T, ST, P, F, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S
k3::S
k4::S
k5::S
k6::S
k7::S
k8::S
k9::S
k10::S
tab::TabType
retcode::DiffEqBase.ReturnCode.T
end
const GPUV9I = GPUV9Integrator
@inline function (integrator::GPUV9I)(t)
Θ = (t - integrator.tprev) / integrator.dt
_ode_interpolant(Θ, integrator.dt, integrator.uprev, integrator)
end
mutable struct GPUAV9Integrator{IIP, S, T, ST, P, F, N, TOL, Q, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S # interpolants of the algorithm
k2::S
k3::S
k4::S
k5::S
k6::S
k7::S
k8::S
k9::S
k10::S
tab::TabType
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUAV9I = GPUAV9Integrator
@inline function (integrator::GPUAV9I)(t)
Θ = (t - integrator.tprev) / integrator.dt
_ode_interpolant(Θ, integrator.dt, integrator.uprev, integrator)
end
#######################################################################################
# Initialization of Integrators
#######################################################################################
@inline function init(alg::GPUTsit5, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T, S,
TS, CB, ST}
cs, as, rs = SimpleDiffEq._build_tsit5_caches(T)
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
integ = GPUT5I{IIP, S, T, ST, P, F, TS, CB, typeof(alg)}(alg, f, copy(u0), copy(u0),
copy(u0), t0, t0, t0,
dt,
sign(dt), p, true, tstops, 1,
callback,
save_everystep, saveat, 1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0), copy(u0), copy(u0),
copy(u0),
copy(u0),
copy(u0), copy(u0), cs, as, rs,
DiffEqBase.ReturnCode.Default)
end
@inline function init(alg::GPUTsit5, f::F, IIP::Bool, u0::S, t0::T, tf::T, dt::T,
p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS, CB, ST}
cs, as, btildes, rs = SimpleDiffEq._build_atsit5_caches(T)
!IIP && @assert S <: SArray
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
integ = GPUAT5I{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB, typeof(alg)}(alg,
f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1,
1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
cs,
as,
btildes,
rs,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
@inline function init(alg::GPUVern7, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T, S,
TS, CB, ST}
tab = Vern7Tableau(T, T)
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
integ = GPUV7I{IIP, S, T, ST, P, F, TS, CB, typeof(tab), typeof(alg)}(alg, f, copy(u0),
copy(u0),
copy(u0),
t0, t0, t0, dt,
sign(dt), p, true,
tstops, 1,
callback,
save_everystep,
saveat, 1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
tab,
DiffEqBase.ReturnCode.Default)
end
@inline function init(alg::GPUVern7, f::F, IIP::Bool, u0::S, t0::T, tf::T, dt::T,
p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS, CB, ST}
!IIP && @assert S <: SArray
tab = Vern7Tableau(T, T)
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
integ = GPUAV7I{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB, typeof(tab),
typeof(alg)}(alg, f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1,
1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
tab,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
@inline function init(alg::GPUVern9, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T, S,
TS, CB, ST}
tab = Vern9Tableau(T, T)
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
integ = GPUV9I{IIP, S, T, ST, P, F, TS, CB, typeof(tab), typeof(alg)}(alg, f, copy(u0),
copy(u0),
copy(u0),
t0, t0, t0, dt,
sign(dt), p, true,
tstops, 1,
callback,
save_everystep,
saveat, 1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
tab,
DiffEqBase.ReturnCode.Default)
end
@inline function init(alg::GPUVern9, f::F, IIP::Bool, u0::S, t0::T, tf::T, dt::T,
p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS, CB, ST}
!IIP && @assert S <: SArray
tab = Vern9Tableau(T, T)
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
integ = GPUAV9I{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB, typeof(tab),
typeof(alg)}(alg, f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1,
1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
copy(u0),
tab,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 482 | @inline @muladd function _ode_interpolant(Θ, dt, y₀,
integ::T) where {
T <:
Union{GPURodas4I, GPUARodas4I}}
Θ1 = 1 - Θ
y₁ = integ.u
return Θ1 * y₀ + Θ * (y₁ + Θ1 * (integ.k1 + Θ * integ.k2))
end
@inline @muladd function _ode_interpolant(Θ, dt, y₀,
integ::T) where {
T <:
Union{GPURodas5PI, GPUARodas5PI}}
Θ1 = 1 - Θ
y₁ = integ.u
return Θ1 * y₀ + Θ * (y₁ + Θ1 * (integ.k1 + Θ * (integ.k2 + Θ * integ.k3)))
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 21422 | @inline function (integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP,
S,
T,
})(t) where {
AlgType <:
GPUODEAlgorithm,
IIP,
S,
T,
}
Θ = (t - integrator.tprev) / integrator.dt
_ode_interpolant(Θ, integrator.dt, integrator.uprev, integrator)
end
@inline function DiffEqBase.u_modified!(integrator::DiffEqBase.AbstractODEIntegrator{
AlgType,
IIP, S,
T},
bool::Bool) where {AlgType <: GPUODEAlgorithm, IIP,
S, T}
integrator.u_modified = bool
end
mutable struct GPURosenbrock23Integrator{IIP, S, T, ST, P, F, TS, CB, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S
d::T
retcode::DiffEqBase.ReturnCode.T
end
const GPURB23I = GPURosenbrock23Integrator
@inline function init(alg::GPURosenbrock23, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T,
S,
TS, CB, ST}
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
d = T(2)
d = 1 / (d + sqrt(d))
integ = GPURB23I{IIP, S, T, ST, P, F, TS, CB, typeof(alg)}(alg, f, copy(u0), copy(u0),
copy(u0), t0, t0,
t0,
dt,
sign(dt), p, true, tstops, 1,
callback,
save_everystep, saveat, 1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0), copy(u0), d,
DiffEqBase.ReturnCode.Default)
end
mutable struct GPUARosenbrock23Integrator{
IIP,
S,
T,
ST,
P,
F,
N,
TOL,
Q,
TS,
CB,
AlgType,
} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S # interpolants of the algorithm
k2::S
k3::S
d::T
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUARB23I = GPUARosenbrock23Integrator
@inline function init(alg::GPURosenbrock23, f::F, IIP::Bool, u0::S, t0::T, tf::T,
dt::T, p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS,
CB, ST}
!IIP && @assert S <: SArray
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
d = T(2)
d = 1 / (d + sqrt(d))
integ = GPUARB23I{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB, typeof(alg)}(alg,
f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1,
1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
d,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
##########################
# Rodas 4
##########################
# Fixed Step
mutable struct GPURodas4Integrator{IIP, S, T, ST, P, F, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
tab::TabType
retcode::DiffEqBase.ReturnCode.T
end
const GPURodas4I = GPURodas4Integrator
@inline function init(alg::GPURodas4, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T,
S,
TS, CB, ST}
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Rodas4Tableau(T, T)
integ = GPURodas4I{IIP, S, T, ST, P, F, TS, CB, typeof(tab), typeof(alg)}(alg, f,
copy(u0),
copy(u0),
copy(u0), t0,
t0,
t0,
dt,
sign(dt), p,
true,
tstops, 1,
callback,
save_everystep,
saveat, 1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0), tab,
DiffEqBase.ReturnCode.Default)
end
# Adaptive Step
mutable struct GPUARodas4Integrator{
IIP,
S,
T,
ST,
P,
F,
N,
TOL,
Q,
TS,
CB,
TabType,
AlgType,
} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
tab::TabType
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUARodas4I = GPUARodas4Integrator
@inline function init(alg::GPURodas4, f::F, IIP::Bool, u0::S, t0::T, tf::T,
dt::T, p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS,
CB, ST}
!IIP && @assert S <: SArray
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Rodas4Tableau(T, T)
integ = GPUARodas4I{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB, typeof(tab),
typeof(alg)}(alg,
f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
tab,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
##########################
# Rodas 5P
##########################
# Fixed Step
mutable struct GPURodas5PIntegrator{IIP, S, T, ST, P, F, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
k3::S #intepolants
tab::TabType
retcode::DiffEqBase.ReturnCode.T
end
const GPURodas5PI = GPURodas5PIntegrator
@inline function init(alg::GPURodas5P, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T,
S,
TS, CB, ST}
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Rodas5PTableau(T, T)
integ = GPURodas5PI{IIP, S, T, ST, P, F, TS, CB, typeof(tab), typeof(alg)}(alg, f,
copy(u0),
copy(u0),
copy(u0), t0,
t0,
t0,
dt,
sign(dt), p,
true,
tstops, 1,
callback,
save_everystep,
saveat, 1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
tab,
DiffEqBase.ReturnCode.Default)
end
# Adaptive Step
mutable struct GPUARodas5PIntegrator{IIP, S, T, ST, P, F, N, TOL, Q, TS, CB, TabType,
AlgType,
} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
k3::S #intepolants
tab::TabType
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUARodas5PI = GPUARodas5PIntegrator
@inline function init(alg::GPURodas5P, f::F, IIP::Bool, u0::S, t0::T, tf::T,
dt::T, p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS,
CB, ST}
!IIP && @assert S <: SArray
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Rodas5PTableau(T, T)
integ = GPUARodas5PI{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB, typeof(tab),
typeof(alg)}(alg,
f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
copy(u0),
tab,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
##########################
# Kvaerno3
##########################
# Fixed Step
mutable struct GPUKvaerno3Integrator{IIP, S, T, ST, P, F, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
tab::TabType
retcode::DiffEqBase.ReturnCode.T
end
const GPUKvaerno3I = GPUKvaerno3Integrator
@inline function init(alg::GPUKvaerno3, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T,
S,
TS, CB, ST}
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Kvaerno3Tableau(T, T)
integ = GPUKvaerno3I{IIP, S, T, ST, P, F, TS, CB, typeof(tab), typeof(alg)}(alg, f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
dt,
sign(dt), p,
true,
tstops, 1,
callback,
save_everystep,
saveat, 1,
1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
tab,
DiffEqBase.ReturnCode.Default)
end
# Adaptive Step
mutable struct GPUAKvaerno3Integrator{IIP, S, T, ST, P, F, N, TOL, Q, TS, CB, TabType,
AlgType,
} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
tab::TabType
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUAKvaerno3I = GPUAKvaerno3Integrator
@inline function init(alg::GPUKvaerno3, f::F, IIP::Bool, u0::S, t0::T, tf::T,
dt::T, p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS,
CB, ST}
!IIP && @assert S <: SArray
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Kvaerno3Tableau(T, T)
integ = GPUAKvaerno3I{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB,
typeof(tab),
typeof(alg)}(alg,
f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
tab,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
##########################
# Kvaerno5
##########################
# Fixed Step
mutable struct GPUKvaerno5Integrator{IIP, S, T, ST, P, F, TS, CB, TabType, AlgType} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
dt::T # step size
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
tab::TabType
retcode::DiffEqBase.ReturnCode.T
end
const GPUKvaerno5I = GPUKvaerno5Integrator
@inline function init(alg::GPUKvaerno5, f::F, IIP::Bool, u0::S, t0::T, dt::T,
p::P, tstops::TS,
callback::CB,
save_everystep::Bool,
saveat::ST) where {F, P, T,
S,
TS, CB, ST}
!IIP && @assert S <: SArray
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Kvaerno5Tableau(T, T)
integ = GPUKvaerno5I{IIP, S, T, ST, P, F, TS, CB, typeof(tab), typeof(alg)}(alg, f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
dt,
sign(dt), p,
true,
tstops, 1,
callback,
save_everystep,
saveat, 1,
1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
tab,
DiffEqBase.ReturnCode.Default)
end
# Adaptive Step
mutable struct GPUAKvaerno5Integrator{IIP, S, T, ST, P, F, N, TOL, Q, TS, CB, TabType,
AlgType,
} <:
DiffEqBase.AbstractODEIntegrator{AlgType, IIP, S, T}
alg::AlgType
f::F # eom
uprev::S # previous state
u::S # current state
tmp::S # dummy, same as state
tprev::T # previous time
t::T # current time
t0::T # initial time, only for reinit
tf::T
dt::T # step size
dtnew::T
tdir::T
p::P # parameter container
u_modified::Bool
tstops::TS
tstops_idx::Int
callback::CB
save_everystep::Bool
saveat::ST
cur_t::Int
step_idx::Int
event_last_time::Int
vector_event_last_time::Int
last_event_error::T
k1::S #intepolants
k2::S #intepolants
tab::TabType
qold::Q
abstol::TOL
reltol::TOL
internalnorm::N # function that computes the error EEst based on state
retcode::DiffEqBase.ReturnCode.T
end
const GPUAKvaerno5I = GPUAKvaerno5Integrator
@inline function init(alg::GPUKvaerno5, f::F, IIP::Bool, u0::S, t0::T, tf::T,
dt::T, p::P,
abstol::TOL, reltol::TOL,
internalnorm::N, tstops::TS,
callback::CB,
saveat::ST) where {F, P, S, T, N, TOL, TS,
CB, ST}
!IIP && @assert S <: SArray
qoldinit = T(1e-4)
event_last_time = 1
vector_event_last_time = 0
last_event_error = zero(T)
tab = Kvaerno5Tableau(T, T)
integ = GPUAKvaerno5I{IIP, S, T, ST, P, F, N, TOL, typeof(qoldinit), TS, CB,
typeof(tab),
typeof(alg)}(alg,
f,
copy(u0),
copy(u0),
copy(u0),
t0,
t0,
t0,
tf,
dt,
dt,
sign(tf -
t0),
p,
true,
tstops,
1,
callback,
false,
saveat,
1, 1,
event_last_time,
vector_event_last_time,
last_event_error,
copy(u0),
copy(u0),
tab,
qoldinit,
abstol,
reltol,
internalnorm,
DiffEqBase.ReturnCode.Default)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 3342 | # Credits: StaticArrays.jl
# https://github.com/JuliaArrays/StaticArrays.jl/blob/master/src/solve.jl
@inline function linear_solve(A::StaticMatrix, b::StaticVecOrMat)
_linear_solve(Size(A), Size(b), A, b)
end
@inline function _linear_solve(::Size{(1, 1)},
::Size{(1,)},
a::StaticMatrix{<:Any, <:Any, Ta},
b::StaticVector{<:Any, Tb}) where {Ta, Tb}
@inbounds return similar_type(b, typeof(a[1] \ b[1]))(a[1] \ b[1])
end
@inline function _linear_solve(::Size{(2, 2)},
::Size{(2,)},
a::StaticMatrix{<:Any, <:Any, Ta},
b::StaticVector{<:Any, Tb}) where {Ta, Tb}
d = det(a)
T = typeof((one(Ta) * zero(Tb) + one(Ta) * zero(Tb)) / d)
@inbounds return similar_type(b, T)((a[2, 2] * b[1] - a[1, 2] * b[2]) / d,
(a[1, 1] * b[2] - a[2, 1] * b[1]) / d)
end
@inline function _linear_solve(::Size{(3, 3)},
::Size{(3,)},
a::StaticMatrix{<:Any, <:Any, Ta},
b::StaticVector{<:Any, Tb}) where {Ta, Tb}
d = det(a)
T = typeof((one(Ta) * zero(Tb) + one(Ta) * zero(Tb)) / d)
@inbounds return similar_type(b, T)(((a[2, 2] * a[3, 3] - a[2, 3] * a[3, 2]) * b[1] +
(a[1, 3] * a[3, 2] - a[1, 2] * a[3, 3]) * b[2] +
(a[1, 2] * a[2, 3] - a[1, 3] * a[2, 2]) * b[3]) /
d,
((a[2, 3] * a[3, 1] - a[2, 1] * a[3, 3]) * b[1] +
(a[1, 1] * a[3, 3] - a[1, 3] * a[3, 1]) * b[2] +
(a[1, 3] * a[2, 1] - a[1, 1] * a[2, 3]) * b[3]) / d,
((a[2, 1] * a[3, 2] - a[2, 2] * a[3, 1]) * b[1] +
(a[1, 2] * a[3, 1] - a[1, 1] * a[3, 2]) * b[2] +
(a[1, 1] * a[2, 2] - a[1, 2] * a[2, 1]) * b[3]) / d)
end
for Sa in [(2, 2), (3, 3)] # not needed for Sa = (1, 1);
@eval begin
@inline function _linear_solve(::Size{$Sa},
::Size{Sb},
a::StaticMatrix{<:Any, <:Any, Ta},
b::StaticMatrix{<:Any, <:Any, Tb}) where {Sb, Ta, Tb}
d = det(a)
T = typeof((one(Ta) * zero(Tb) + one(Ta) * zero(Tb)) / d)
if isbitstype(T)
# This if block can be removed when https://github.com/JuliaArrays/StaticArrays.jl/pull/749 is merged.
c = similar(b, T)
for col in 1:Sb[2]
@inbounds c[:, col] = _linear_solve(Size($Sa),
Size($Sa[1]),
a,
b[:, col])
end
return similar_type(b, T)(c)
else
return _linear_solve_general($(Size(Sa)), Size(Sb), a, b)
end
end
end # @eval
end
@inline function _linear_solve(sa::Size, sb::Size, a::StaticMatrix, b::StaticVecOrMat)
_linear_solve_general(sa, sb, a, b)
end
@generated function _linear_solve_general(::Size{Sa},
::Size{Sb},
a::StaticMatrix{<:Any, <:Any, Ta},
b::StaticVecOrMat{Tb}) where {Sa, Sb, Ta, Tb}
if Sa[1] != Sb[1]
return quote
throw(DimensionMismatch("Left and right hand side first dimensions do not match in backdivide (got sizes $Sa and $Sb)"))
end
end
quote
@_inline_meta
LUp = static_lu(a)
LUp.U \ (LUp.L \ $(length(Sb) > 1 ? :(b[LUp.p, :]) : :(b[LUp.p])))
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 5006 |
#Credits: StaticArrays.jl
# https://github.com/JuliaArrays/StaticArrays.jl/blob/master/src/lu.jl
# LU decomposition
pivot_options = if isdefined(LinearAlgebra, :PivotingStrategy) # introduced in Julia v1.7
(:(Val{true}), :(Val{false}), :NoPivot, :RowMaximum)
else
(:(Val{true}), :(Val{false}))
end
for pv in pivot_options
# ... define each `pivot::Val{true/false}` method individually to avoid ambiguties
@eval function static_lu(A::StaticLUMatrix, pivot::$pv; check = true)
L, U, p = _lu(A, pivot, check)
LU(L, U, p)
end
# For the square version, return explicit lower and upper triangular matrices.
# We would do this for the rectangular case too, but Base doesn't support that.
@eval function static_lu(A::StaticLUMatrix{N, N}, pivot::$pv; check = true) where {N}
L, U, p = _lu(A, pivot, check)
LU(LowerTriangular(L), UpperTriangular(U), p)
end
end
static_lu(A::StaticLUMatrix; check = true) = static_lu(A, Val(true); check = check)
# location of the first zero on the diagonal, 0 when not found
function _first_zero_on_diagonal(A::StaticLUMatrix{M, N, T}) where {M, N, T}
if @generated
quote
$(map(i -> :(A[$i, $i] == zero(T) && return $i), 1:min(M, N))...)
0
end
else
for i in 1:min(M, N)
A[i, i] == 0 && return i
end
0
end
end
@generated function _lu(A::StaticLUMatrix{M, N, T}, pivot, check) where {M, N, T}
_pivot = if isdefined(LinearAlgebra, :PivotingStrategy) # v1.7 feature
pivot === RowMaximum ? Val(true) : pivot === NoPivot ? Val(false) : pivot()
else
pivot()
end
quote
L, U, P = __lu(A, $(_pivot))
if check
i = _first_zero_on_diagonal(U)
i == 0 || throw(SingularException(i))
end
L, U, P
end
end
function __lu(A::StaticMatrix{0, 0, T}, ::Val{Pivot}) where {T, Pivot}
(SMatrix{0, 0, typeof(one(T))}(), A, SVector{0, Int}())
end
function __lu(A::StaticMatrix{0, 1, T}, ::Val{Pivot}) where {T, Pivot}
(SMatrix{0, 0, typeof(one(T))}(), A, SVector{0, Int}())
end
function __lu(A::StaticMatrix{0, N, T}, ::Val{Pivot}) where {T, N, Pivot}
(SMatrix{0, 0, typeof(one(T))}(), A, SVector{0, Int}())
end
function __lu(A::StaticMatrix{1, 0, T}, ::Val{Pivot}) where {T, Pivot}
(SMatrix{1, 0, typeof(one(T))}(), SMatrix{0, 0, T}(), SVector{1, Int}(1))
end
function __lu(A::StaticMatrix{M, 0, T}, ::Val{Pivot}) where {T, M, Pivot}
(SMatrix{M, 0, typeof(one(T))}(), SMatrix{0, 0, T}(), SVector{M, Int}(1:M))
end
function __lu(A::StaticMatrix{1, 1, T}, ::Val{Pivot}) where {T, Pivot}
(SMatrix{1, 1}(one(T)), A, SVector(1))
end
function __lu(A::LinearAlgebra.HermOrSym{T, <:StaticMatrix{1, 1, T}},
::Val{Pivot}) where {T, Pivot}
(SMatrix{1, 1}(one(T)), A.data, SVector(1))
end
function __lu(A::StaticMatrix{1, N, T}, ::Val{Pivot}) where {N, T, Pivot}
(SMatrix{1, 1, T}(one(T)), A, SVector{1, Int}(1))
end
function __lu(A::StaticMatrix{M, 1}, ::Val{Pivot}) where {M, Pivot}
@inbounds begin
kp = 1
if Pivot
amax = abs(A[1, 1])
for i in 2:M
absi = abs(A[i, 1])
if absi > amax
kp = i
amax = absi
end
end
end
ps = tailindices(Val{M})
if kp != 1
ps = setindex(ps, 1, kp - 1)
end
U = SMatrix{1, 1}(A[kp, 1])
# Scale first column
Akkinv = inv(A[kp, 1])
Ls = A[ps, 1] * Akkinv
if !isfinite(Akkinv)
Ls = zeros(typeof(Ls))
end
L = [SVector{1}(one(eltype(Ls))); Ls]
p = [SVector{1, Int}(kp); ps]
end
return (SMatrix{M, 1}(L), U, p)
end
function __lu(A::StaticLUMatrix{M, N, T}, ::Val{Pivot}) where {M, N, T, Pivot}
@inbounds begin
kp = 1
if Pivot
amax = abs(A[1, 1])
for i in 2:M
absi = abs(A[i, 1])
if absi > amax
kp = i
amax = absi
end
end
end
ps = tailindices(Val{M})
if kp != 1
ps = setindex(ps, 1, kp - 1)
end
Ufirst = SMatrix{1, N}(A[kp, :])
# Scale first column
Akkinv = inv(A[kp, 1])
Ls = A[ps, 1] * Akkinv
if !isfinite(Akkinv)
Ls = zeros(typeof(Ls))
end
# Update the rest
Arest = A[ps, tailindices(Val{N})] - Ls * Ufirst[:, tailindices(Val{N})]
Lrest, Urest, prest = __lu(Arest, Val(Pivot))
p = [SVector{1, Int}(kp); ps[prest]]
L = [[SVector{1}(one(eltype(Ls))); Ls[prest]] [zeros(typeof(SMatrix{1}(Lrest[1,
:])));
Lrest]]
U = [Ufirst; [zeros(typeof(Urest[:, 1])) Urest]]
end
return (L, U, p)
end
@generated function tailindices(::Type{Val{M}}) where {M}
:(SVector{$(M - 1), Int}($(tuple(2:M...))))
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 2192 | abstract type AbstractNLSolver end
abstract type AbstractNLSolverCache end
struct NLSolver{uType, gamType, tmpType, tType, JType, WType, pType} <: AbstractNLSolver
z::uType
tmp::uType # DIRK and multistep methods only use tmp
tmp2::tmpType # for GLM if neccssary
ztmp::uType
γ::gamType
c::tType
α::tType
κ::tType
J::JType
W::WType
dt::tType
t::tType
p::pType
iter::Int
maxiters::Int
end
function NLSolver{tType}(z, tmp, ztmp, γ, c, α, κ, J, W, dt, t, p,
iter, maxiters, tmp2 = nothing) where {tType}
NLSolver{typeof(z), typeof(γ), typeof(tmp2), tType, typeof(J), typeof(W), typeof(p)}(z,
tmp,
tmp2,
ztmp,
γ,
convert(tType,
c),
convert(tType,
α),
convert(tType,
κ),
J,
W,
dt,
t,
p,
iter,
maxiters)
end
@inline function build_J_W(alg, f, γ, dt)
J(u, p, t) =
if DiffEqBase.has_jac(f)
f.jac(u, p, t)
elseif alg_autodiff(alg)
ForwardDiff.jacobian(u -> f(u, p, t), u)
else
finite_diff_jac(u -> f(u, p, t), f.jac_prototype, u)
end
W(u, p, t) = -LinearAlgebra.I + γ * dt * J(u, p, t)
J, W
end
@inline function build_tgrad(alg, f)
function tgrad(u, p, t)
if DiffEqBase.has_tgrad(f)
f.tgrad(u, p, t)
elseif alg_autodiff(alg)
ForwardDiff.derivative(t -> f(u, p, t), t)
else
# derivative using finite difference
begin
dt = sqrt(eps(eltype(t)))
(f(u, p, t + dt) - f(u, p, t)) / dt
end
end
end
tgrad
end
@inline function build_nlsolver(alg, u, p,
t, dt,
f,
γ, c)
build_nlsolver(alg, u, p, t, dt, f, γ, c, 1)
end
@inline function build_nlsolver(alg, u, p,
t, dt,
f,
γ, c, α)
# define fields of non-linear solver
z = u
tmp = u
ztmp = u
J, W = build_J_W(alg, f, γ, dt)
max_iter = 30
κ = 1 / 100
NLSolver{typeof(dt)}(z, tmp, ztmp, γ, c, α, κ,
J, W, dt, t, p, 0, max_iter)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 693 | @inline function nlsolve(nlsolver::NLType, integrator::IntegType) where {NLType, IntegType}
maxiters = nlsolver.maxiters
dt = nlsolver.dt
p = integrator.p
t = nlsolver.t
γ = nlsolver.γ
tmp = nlsolver.tmp
z_i = nlsolver.z
c = nlsolver.c
abstol = 100eps(eltype(z_i))
for i in 1:maxiters
W_eval = nlsolver.W(tmp + γ * z_i, p, t + c * dt)
f_eval = integrator.f(tmp + γ * z_i, p, t + c * dt)
f_rhs = dt * f_eval - z_i
Δz = linear_solve(W_eval, f_rhs)
z_i = z_i - Δz
if norm(dt * integrator.f(tmp + γ * z_i, p, t + c * dt) - z_i) < abstol
break
end
end
@set! nlsolver.z = z_i
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1923 | @kernel function em_kernel(@Const(probs), _us, _ts, dt,
saveat, ::Val{save_everystep}) where {save_everystep}
i = @index(Global, Linear)
# get the actual problem for this thread
prob = @inbounds probs[i]
Random.seed!(prob.seed)
# get the input/output arrays for this thread
ts = @inbounds view(_ts, :, i)
us = @inbounds view(_us, :, i)
_saveat = get(prob.kwargs, :saveat, nothing)
saveat = _saveat === nothing ? saveat : _saveat
f = prob.f
g = prob.g
u0 = prob.u0
tspan = prob.tspan
p = prob.p
is_diagonal_noise = SciMLBase.is_diagonal_noise(prob)
cur_t = 0
if saveat !== nothing
cur_t = 1
if tspan[1] == saveat[1]
cur_t += 1
@inbounds us[1] = u0
end
else
@inbounds ts[1] = tspan[1]
@inbounds us[1] = u0
end
sqdt = sqrt(dt)
u = copy(u0)
t = copy(tspan[1])
n = length(tspan[1]:dt:tspan[2])
for j in 2:n
uprev = u
if is_diagonal_noise
u = uprev + f(uprev, p, t) * dt +
sqdt * g(uprev, p, t) .* randn(typeof(u0))
else
u = uprev + f(uprev, p, t) * dt +
sqdt * g(uprev, p, t) * randn(typeof(prob.noise_rate_prototype[1, :]))
end
t += dt
if saveat === nothing && save_everystep
@inbounds us[j] = u
@inbounds ts[j] = t
elseif saveat !== nothing
while cur_t <= length(saveat) && saveat[cur_t] <= t
savet = saveat[cur_t]
Θ = (savet - (t - dt)) / dt
# Linear Interpolation
@inbounds us[cur_t] = uprev + (u - uprev) * Θ
@inbounds ts[cur_t] = savet
cur_t += 1
end
end
end
if saveat === nothing && !save_everystep
@inbounds us[2] = u
@inbounds ts[2] = t
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 5664 | @inline function step!(integ::GPUKvaerno3I{false, S, T}, ts, us) where {T, S}
dt = integ.dt
t = integ.t
p = integ.p
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
@unpack γ, a31, a32, a41, a42, a43, btilde1, btilde2, btilde3, btilde4, c3, α31, α32 = integ.tab
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
## Set correct dt
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
## Build nlsolver
nlsolver = build_nlsolver(integ.alg, integ.u, integ.p, integ.t, integ.dt, integ.f,
integ.tab.γ,
integ.tab.c3)
## Steps
# FSAL Step 1
z₁ = dt * k1
##### Step 2
@set! nlsolver.z = z₁
@set! nlsolver.tmp = uprev + γ * z₁
@set! nlsolver.c = γ
nlsolver = nlsolve(nlsolver, integ)
z₂ = nlsolver.z
##### Step 3
z₃ = α31 * z₁ + α32 * z₂
@set! nlsolver.z = z₃
@set! nlsolver.tmp = uprev + a31 * z₁ + a32 * z₂
@set! nlsolver.c = c3
nlsolver = nlsolve(nlsolver, integ)
z₃ = nlsolver.z
################################## Solve Step 4
@set! nlsolver.z = z₄ = a31 * z₁ + a32 * z₂ + γ * z₃ # use yhat as prediction
@set! nlsolver.tmp = uprev + a41 * z₁ + a42 * z₂ + a43 * z₃
@set! nlsolver.c = one(nlsolver.c)
nlsolver = nlsolve(nlsolver, integ)
z₄ = nlsolver.z
integ.u = nlsolver.tmp + γ * z₄
k2 = z₄ ./ dt
@inbounds begin # Necessary for interpolation
integ.k1 = f(integ.u, p, t)
integ.k2 = k2
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
@inline function step!(integ::GPUAKvaerno3I{false, S, T}, ts, us) where {T, S}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
@unpack γ, a31, a32, a41, a42, a43, btilde1, btilde2, btilde3, btilde4, c3, α31, α32 = integ.tab
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
EEst = convert(T, Inf)
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
## Steps
nlsolver = build_nlsolver(integ.alg, integ.u, integ.p, integ.t, dt, integ.f,
integ.tab.γ,
integ.tab.c3)
# FSAL Step 1
k1 = f(uprev, p, t)
z₁ = dt * k1
##### Step 2
@set! nlsolver.z = z₁
@set! nlsolver.tmp = uprev + γ * z₁
@set! nlsolver.c = γ
nlsolver = nlsolve(nlsolver, integ)
z₂ = nlsolver.z
##### Step 3
z₃ = α31 * z₁ + α32 * z₂
@set! nlsolver.z = z₃
@set! nlsolver.tmp = uprev + a31 * z₁ + a32 * z₂
@set! nlsolver.c = c3
nlsolver = nlsolve(nlsolver, integ)
z₃ = nlsolver.z
################################## Solve Step 4
@set! nlsolver.z = z₄ = a31 * z₁ + a32 * z₂ + γ * z₃ # use yhat as prediction
@set! nlsolver.tmp = uprev + a41 * z₁ + a42 * z₂ + a43 * z₃
@set! nlsolver.c = one(nlsolver.c)
nlsolver = nlsolve(nlsolver, integ)
z₄ = nlsolver.z
u = nlsolver.tmp + γ * z₄
k2 = z₄ ./ dt
W_eval = nlsolver.W(nlsolver.tmp + nlsolver.γ * z₄, p, t + nlsolver.c * dt)
err = linear_solve(W_eval,
btilde1 * z₁ + btilde2 * z₂ + btilde3 * z₃ + btilde4 * z₄)
tmp = (err) ./
(abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1.0f-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 7736 | @inline function step!(integ::GPUKvaerno5I{false, S, T}, ts, us) where {T, S}
dt = integ.dt
t = integ.t
p = integ.p
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
@unpack γ, a31, a32, a41, a42, a43, a51, a52, a53, a54, a61, a63, a64, a65, a71, a73, a74, a75, a76, c3, c4, c5, c6 = integ.tab
@unpack btilde1, btilde3, btilde4, btilde5, btilde6, btilde7 = integ.tab
@unpack α31, α32, α41, α42, α43, α51, α52, α53, α61, α62, α63 = integ.tab
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
## Set correct dt
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
## Build nlsolver
nlsolver = build_nlsolver(integ.alg, integ.u, integ.p, integ.t, integ.dt, integ.f,
integ.tab.γ,
integ.tab.c3)
## Steps
# FSAL Step 1
z₁ = dt * k1
##### Step 2
@set! nlsolver.z = z₁
@set! nlsolver.tmp = uprev + γ * z₁
@set! nlsolver.c = γ
nlsolver = nlsolve(nlsolver, integ)
z₂ = nlsolver.z
##### Step 3
z₃ = α31 * z₁ + α32 * z₂
@set! nlsolver.z = z₃
@set! nlsolver.tmp = uprev + a31 * z₁ + a32 * z₂
@set! nlsolver.c = c3
nlsolver = nlsolve(nlsolver, integ)
z₃ = nlsolver.z
################################## Solve Step 4
z₄ = α41 * z₁ + α42 * z₂ + α43 * z₃
@set! nlsolver.z = z₄
@set! nlsolver.tmp = uprev + a41 * z₁ + a42 * z₂ + a43 * z₃
@set! nlsolver.c = c4
nlsolver = nlsolve(nlsolver, integ)
z₄ = nlsolver.z
z₅ = α51 * z₁ + α52 * z₂ + α53 * z₃
@set! nlsolver.z = z₅
@set! nlsolver.tmp = uprev + a51 * z₁ + a52 * z₂ + a53 * z₃ + a54 * z₄
@set! nlsolver.c = c5
nlsolver = nlsolve(nlsolver, integ)
z₅ = nlsolver.z
z₆ = α61 * z₁ + α62 * z₂ + α63 * z₃
@set! nlsolver.z = z₆
@set! nlsolver.tmp = uprev + a61 * z₁ + a63 * z₃ + a64 * z₄ + a65 * z₅
@set! nlsolver.c = c6
nlsolver = nlsolve(nlsolver, integ)
z₆ = nlsolver.z
z₇ = a61 * z₁ + a63 * z₃ + a64 * z₄ + a65 * z₅ + γ * z₆
@set! nlsolver.z = z₇
@set! nlsolver.tmp = uprev + a71 * z₁ + a73 * z₃ + a74 * z₄ + a75 * z₅ + a76 * z₆
@set! nlsolver.c = one(nlsolver.c)
nlsolver = nlsolve(nlsolver, integ)
z₇ = nlsolver.z
integ.u = nlsolver.tmp + γ * z₇
k2 = z₇ ./ dt
@inbounds begin # Necessary for interpolation
integ.k1 = f(integ.u, p, t)
integ.k2 = k2
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
@inline function step!(integ::GPUAKvaerno5I{false, S, T}, ts, us) where {T, S}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
@unpack γ, a31, a32, a41, a42, a43, a51, a52, a53, a54, a61, a63, a64, a65, a71, a73, a74, a75, a76, c3, c4, c5, c6 = integ.tab
@unpack btilde1, btilde3, btilde4, btilde5, btilde6, btilde7 = integ.tab
@unpack α31, α32, α41, α42, α43, α51, α52, α53, α61, α62, α63 = integ.tab
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
EEst = convert(T, Inf)
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
## Steps
nlsolver = build_nlsolver(integ.alg, integ.u, integ.p, integ.t, dt, integ.f,
integ.tab.γ,
integ.tab.c3)
# FSAL Step 1
k1 = f(uprev, p, t)
z₁ = dt * k1
##### Step 2
@set! nlsolver.z = z₁
@set! nlsolver.tmp = uprev + γ * z₁
@set! nlsolver.c = γ
nlsolver = nlsolve(nlsolver, integ)
z₂ = nlsolver.z
##### Step 3
z₃ = α31 * z₁ + α32 * z₂
@set! nlsolver.z = z₃
@set! nlsolver.tmp = uprev + a31 * z₁ + a32 * z₂
@set! nlsolver.c = c3
nlsolver = nlsolve(nlsolver, integ)
z₃ = nlsolver.z
################################## Solve Step 4
z₄ = α41 * z₁ + α42 * z₂ + α43 * z₃
@set! nlsolver.z = z₄
@set! nlsolver.tmp = uprev + a41 * z₁ + a42 * z₂ + a43 * z₃
@set! nlsolver.c = c4
nlsolver = nlsolve(nlsolver, integ)
z₄ = nlsolver.z
################################## Solve Step 5
z₅ = α51 * z₁ + α52 * z₂ + α53 * z₃
@set! nlsolver.z = z₅
@set! nlsolver.tmp = uprev + a51 * z₁ + a52 * z₂ + a53 * z₃ + a54 * z₄
@set! nlsolver.c = c5
nlsolver = nlsolve(nlsolver, integ)
z₅ = nlsolver.z
################################## Solve Step 6
z₆ = α61 * z₁ + α62 * z₂ + α63 * z₃
@set! nlsolver.z = z₆
@set! nlsolver.tmp = uprev + a61 * z₁ + a63 * z₃ + a64 * z₄ + a65 * z₅
@set! nlsolver.c = c6
nlsolver = nlsolve(nlsolver, integ)
z₆ = nlsolver.z
################################## Solve Step 7
z₇ = a61 * z₁ + a63 * z₃ + a64 * z₄ + a65 * z₅ + γ * z₆
@set! nlsolver.z = z₇
@set! nlsolver.tmp = uprev + a71 * z₁ + a73 * z₃ + a74 * z₄ + a75 * z₅ + a76 * z₆
@set! nlsolver.c = one(nlsolver.c)
nlsolver = nlsolve(nlsolver, integ)
z₇ = nlsolver.z
u = nlsolver.tmp + γ * z₇
k2 = z₇ ./ dt
W_eval = nlsolver.W(nlsolver.tmp + nlsolver.γ * z₇, p, t + nlsolver.c * dt)
err = linear_solve(W_eval,
btilde1 * z₁ + btilde3 * z₃ + btilde4 * z₄ + btilde5 * z₅ + btilde6 * z₆ +
btilde7 * z₇)
tmp = (err) ./
(abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1.0f-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 7635 | @inline function step!(integ::GPURodas4I{false, S, T}, ts, us) where {T, S}
dt = integ.dt
t = integ.t
p = integ.p
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
@unpack a21, a31, a32, a41, a42, a43, a51, a52, a53, a54, C21, C31, C32, C41, C42, C43,
C51, C52, C53, C54, C61, C62, C63, C64, C65, γ, c2, c3, c4, d1, d2, d3, d4 = integ.tab
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
## Set correct dt
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
# Jacobian
Jf, _ = build_J_W(integ.alg, f, γ, dt)
J = Jf(uprev, p, t)
Tgrad = build_tgrad(integ.alg, f)
dT = Tgrad(uprev, p, t)
# Precalculations
dtC21 = C21 / dt
dtC31 = C31 / dt
dtC32 = C32 / dt
dtC41 = C41 / dt
dtC42 = C42 / dt
dtC43 = C43 / dt
dtC51 = C51 / dt
dtC52 = C52 / dt
dtC53 = C53 / dt
dtC54 = C54 / dt
dtC61 = C61 / dt
dtC62 = C62 / dt
dtC63 = C63 / dt
dtC64 = C64 / dt
dtC65 = C65 / dt
dtd1 = dt * d1
dtd2 = dt * d2
dtd3 = dt * d3
dtd4 = dt * d4
dtgamma = dt * γ
# Starting
W = J - I * inv(dtgamma)
du = f(uprev, p, t)
# Step 1
linsolve_tmp = du + dtd1 * dT
k1 = linear_solve(W, -linsolve_tmp)
u = uprev + a21 * k1
du = f(u, p, t + c2 * dt)
# Step 2
linsolve_tmp = du + dtd2 * dT + dtC21 * k1
k2 = linear_solve(W, -linsolve_tmp)
u = uprev + a31 * k1 + a32 * k2
du = f(u, p, t + c3 * dt)
# Step 3
linsolve_tmp = du + dtd3 * dT + (dtC31 * k1 + dtC32 * k2)
k3 = linear_solve(W, -linsolve_tmp)
u = uprev + a41 * k1 + a42 * k2 + a43 * k3
du = f(u, p, t + c4 * dt)
# Step 4
linsolve_tmp = du + dtd4 * dT + (dtC41 * k1 + dtC42 * k2 + dtC43 * k3)
k4 = linear_solve(W, -linsolve_tmp)
u = uprev + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4
du = f(u, p, t + dt)
# Step 5
linsolve_tmp = du + (dtC52 * k2 + dtC54 * k4 + dtC51 * k1 + dtC53 * k3)
k5 = linear_solve(W, -linsolve_tmp)
u = u + k5
du = f(u, p, t + dt)
# Step 6
linsolve_tmp = du + (dtC61 * k1 + dtC62 * k2 + dtC65 * k5 + dtC64 * k4 + dtC63 * k3)
k6 = linear_solve(W, -linsolve_tmp)
integ.u = u + k6
@inbounds begin # Necessary for interpolation
@unpack h21, h22, h23, h24, h25, h31, h32, h33, h34, h35 = integ.tab
integ.k1 = h21 * k1 + h22 * k2 + h23 * k3 + h24 * k4 + h25 * k5
integ.k2 = h31 * k1 + h32 * k2 + h33 * k3 + h34 * k4 + h35 * k5
# integ.k1 = k1
# integ.k2 = k2
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
@inline function step!(integ::GPUARodas4I{false, S, T}, ts, us) where {T, S}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
@unpack a21, a31, a32, a41, a42, a43, a51, a52, a53, a54, C21, C31, C32, C41, C42, C43,
C51, C52, C53, C54, C61, C62, C63, C64, C65, γ, c2, c3, c4, d1, d2, d3,
d4 = integ.tab
# Jacobian
Jf, _ = build_J_W(integ.alg, f, γ, dt)
J = Jf(uprev, p, t)
Tgrad = build_tgrad(integ.alg, f)
dT = Tgrad(uprev, p, t)
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
EEst = convert(T, Inf)
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
# Precalculations
dtC21 = C21 / dt
dtC31 = C31 / dt
dtC32 = C32 / dt
dtC41 = C41 / dt
dtC42 = C42 / dt
dtC43 = C43 / dt
dtC51 = C51 / dt
dtC52 = C52 / dt
dtC53 = C53 / dt
dtC54 = C54 / dt
dtC61 = C61 / dt
dtC62 = C62 / dt
dtC63 = C63 / dt
dtC64 = C64 / dt
dtC65 = C65 / dt
dtd1 = dt * d1
dtd2 = dt * d2
dtd3 = dt * d3
dtd4 = dt * d4
dtgamma = dt * γ
# Starting
W = J - I * inv(dtgamma)
du = f(uprev, p, t)
# Step 1
linsolve_tmp = du + dtd1 * dT
k1 = linear_solve(W, -linsolve_tmp)
u = uprev + a21 * k1
du = f(u, p, t + c2 * dt)
# Step 2
linsolve_tmp = du + dtd2 * dT + dtC21 * k1
k2 = linear_solve(W, -linsolve_tmp)
u = uprev + a31 * k1 + a32 * k2
du = f(u, p, t + c3 * dt)
# Step 3
linsolve_tmp = du + dtd3 * dT + (dtC31 * k1 + dtC32 * k2)
k3 = linear_solve(W, -linsolve_tmp)
u = uprev + a41 * k1 + a42 * k2 + a43 * k3
du = f(u, p, t + c4 * dt)
# Step 4
linsolve_tmp = du + dtd4 * dT + (dtC41 * k1 + dtC42 * k2 + dtC43 * k3)
k4 = linear_solve(W, -linsolve_tmp)
u = uprev + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4
du = f(u, p, t + dt)
# Step 5
linsolve_tmp = du + (dtC52 * k2 + dtC54 * k4 + dtC51 * k1 + dtC53 * k3)
k5 = linear_solve(W, -linsolve_tmp)
u = u + k5
du = f(u, p, t + dt)
# Step 6
linsolve_tmp = du + (dtC61 * k1 + dtC62 * k2 + dtC65 * k5 + dtC64 * k4 + dtC63 * k3)
k6 = linear_solve(W, -linsolve_tmp)
u = u + k6
tmp = k6 ./ (abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
@unpack h21, h22, h23, h24, h25, h31, h32, h33, h34, h35 = integ.tab
integ.k1 = h21 * k1 + h22 * k2 + h23 * k3 + h24 * k4 + h25 * k5
integ.k2 = h31 * k1 + h32 * k2 + h33 * k3 + h34 * k4 + h35 * k5
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1.0f-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 10206 | @inline function step!(integ::GPURodas5PI{false, S, T}, ts, us) where {T, S}
dt = integ.dt
t = integ.t
p = integ.p
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
@unpack a21, a31, a32, a41, a42, a43, a51, a52, a53, a54, a61, a62, a63, a64, a65,
C21, C31, C32, C41, C42, C43, C51, C52, C53, C54, C61, C62, C63, C64, C65, C71, C72, C73, C74, C75, C76,
C81, C82, C83, C84, C85, C86, C87, γ, d1, d2, d3, d4, d5, c2, c3, c4, c5 = integ.tab
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
## Set correct dt
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
# Jacobian
Jf, _ = build_J_W(integ.alg, f, γ, dt)
J = Jf(uprev, p, t)
Tgrad = build_tgrad(integ.alg, f)
dT = Tgrad(uprev, p, t)
# Precalculations
dtC21 = C21 / dt
dtC31 = C31 / dt
dtC32 = C32 / dt
dtC41 = C41 / dt
dtC42 = C42 / dt
dtC43 = C43 / dt
dtC51 = C51 / dt
dtC52 = C52 / dt
dtC53 = C53 / dt
dtC54 = C54 / dt
dtC61 = C61 / dt
dtC62 = C62 / dt
dtC63 = C63 / dt
dtC64 = C64 / dt
dtC65 = C65 / dt
dtC71 = C71 / dt
dtC72 = C72 / dt
dtC73 = C73 / dt
dtC74 = C74 / dt
dtC75 = C75 / dt
dtC76 = C76 / dt
dtC81 = C81 / dt
dtC82 = C82 / dt
dtC83 = C83 / dt
dtC84 = C84 / dt
dtC85 = C85 / dt
dtC86 = C86 / dt
dtC87 = C87 / dt
dtd1 = dt * d1
dtd2 = dt * d2
dtd3 = dt * d3
dtd4 = dt * d4
dtd5 = dt * d5
dtgamma = dt * γ
# Starting
W = J - I * inv(dtgamma)
du = f(uprev, p, t)
# Step 1
linsolve_tmp = du + dtd1 * dT
k1 = linear_solve(W, -linsolve_tmp)
u = uprev + a21 * k1
du = f(u, p, t + c2 * dt)
# Step 2
linsolve_tmp = du + dtd2 * dT + dtC21 * k1
k2 = linear_solve(W, -linsolve_tmp)
u = uprev + a31 * k1 + a32 * k2
du = f(u, p, t + c3 * dt)
# Step 3
linsolve_tmp = du + dtd3 * dT + (dtC31 * k1 + dtC32 * k2)
k3 = linear_solve(W, -linsolve_tmp)
u = uprev + a41 * k1 + a42 * k2 + a43 * k3
du = f(u, p, t + c4 * dt)
# Step 4
linsolve_tmp = du + dtd4 * dT + (dtC41 * k1 + dtC42 * k2 + dtC43 * k3)
k4 = linear_solve(W, -linsolve_tmp)
u = uprev + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4
du = f(u, p, t + c5 * dt)
# Step 5
linsolve_tmp = du + dtd5 * dT + (dtC52 * k2 + dtC54 * k4 + dtC51 * k1 + dtC53 * k3)
k5 = linear_solve(W, -linsolve_tmp)
u = uprev + a61 * k1 + a62 * k2 + a63 * k3 + a64 * k4 + a65 * k5
du = f(u, p, t + dt)
# Step 6
linsolve_tmp = du + (dtC61 * k1 + dtC62 * k2 + dtC63 * k3 + dtC64 * k4 + dtC65 * k5)
k6 = linear_solve(W, -linsolve_tmp)
u = u + k6
du = f(u, p, t + dt)
# Step 7
linsolve_tmp = du + (dtC71 * k1 + dtC72 * k2 + dtC73 * k3 + dtC74 * k4 + dtC75 * k5 +
dtC76 * k6)
k7 = linear_solve(W, -linsolve_tmp)
u = u + k7
du = f(u, p, t + dt)
# Step 8
linsolve_tmp = du + (dtC81 * k1 + dtC82 * k2 + dtC83 * k3 + dtC84 * k4 + dtC85 * k5 +
dtC86 * k6 + dtC87 * k7)
k8 = linear_solve(W, -linsolve_tmp)
integ.u = u + k8
@inbounds begin # Necessary for interpolation
@unpack h21, h22, h23, h24, h25, h26, h27, h28, h31, h32, h33, h34,
h35, h36, h37, h38, h41, h42, h43, h44, h45, h46, h47, h48 = integ.tab
integ.k1 = h21 * k1 + h22 * k2 + h23 * k3 + h24 * k4 + h25 * k5 + h26 * k6 +
h27 * k7 + h28 * k8
integ.k2 = h31 * k1 + h32 * k2 + h33 * k3 + h34 * k4 + h35 * k5 + h36 * k6 +
h37 * k7 + h38 * k8
integ.k3 = h41 * k1 + h42 * k2 + h43 * k3 + h44 * k4 + h45 * k5 + h46 * k6 +
h47 * k7 + h48 * k8
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
@inline function step!(integ::GPUARodas5PI{false, S, T}, ts, us) where {T, S}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
@unpack a21, a31, a32, a41, a42, a43, a51, a52, a53, a54, a61, a62, a63, a64, a65,
C21, C31, C32, C41, C42, C43, C51, C52, C53, C54, C61, C62, C63, C64, C65, C71, C72, C73, C74, C75, C76,
C81, C82, C83, C84, C85, C86, C87, γ, d1, d2, d3, d4, d5, c2, c3, c4, c5 = integ.tab
# Jacobian
Jf, _ = build_J_W(integ.alg, f, γ, dt)
J = Jf(uprev, p, t)
Tgrad = build_tgrad(integ.alg, f)
dT = Tgrad(uprev, p, t)
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
EEst = convert(T, Inf)
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
# Precalculations
dtC21 = C21 / dt
dtC31 = C31 / dt
dtC32 = C32 / dt
dtC41 = C41 / dt
dtC42 = C42 / dt
dtC43 = C43 / dt
dtC51 = C51 / dt
dtC52 = C52 / dt
dtC53 = C53 / dt
dtC54 = C54 / dt
dtC61 = C61 / dt
dtC62 = C62 / dt
dtC63 = C63 / dt
dtC64 = C64 / dt
dtC65 = C65 / dt
dtC71 = C71 / dt
dtC72 = C72 / dt
dtC73 = C73 / dt
dtC74 = C74 / dt
dtC75 = C75 / dt
dtC76 = C76 / dt
dtC81 = C81 / dt
dtC82 = C82 / dt
dtC83 = C83 / dt
dtC84 = C84 / dt
dtC85 = C85 / dt
dtC86 = C86 / dt
dtC87 = C87 / dt
dtd1 = dt * d1
dtd2 = dt * d2
dtd3 = dt * d3
dtd4 = dt * d4
dtd5 = dt * d5
dtgamma = dt * γ
# Starting
W = J - I * inv(dtgamma)
du = f(uprev, p, t)
# Step 1
linsolve_tmp = du + dtd1 * dT
k1 = linear_solve(W, -linsolve_tmp)
u = uprev + a21 * k1
du = f(u, p, t + c2 * dt)
# Step 2
linsolve_tmp = du + dtd2 * dT + dtC21 * k1
k2 = linear_solve(W, -linsolve_tmp)
u = uprev + a31 * k1 + a32 * k2
du = f(u, p, t + c3 * dt)
# Step 3
linsolve_tmp = du + dtd3 * dT + (dtC31 * k1 + dtC32 * k2)
k3 = linear_solve(W, -linsolve_tmp)
u = uprev + a41 * k1 + a42 * k2 + a43 * k3
du = f(u, p, t + c4 * dt)
# Step 4
linsolve_tmp = du + dtd4 * dT + (dtC41 * k1 + dtC42 * k2 + dtC43 * k3)
k4 = linear_solve(W, -linsolve_tmp)
u = uprev + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4
du = f(u, p, t + c5 * dt)
# Step 5
linsolve_tmp = du + dtd5 * dT + (dtC52 * k2 + dtC54 * k4 + dtC51 * k1 + dtC53 * k3)
k5 = linear_solve(W, -linsolve_tmp)
u = uprev + a61 * k1 + a62 * k2 + a63 * k3 + a64 * k4 + a65 * k5
du = f(u, p, t + dt)
# Step 6
linsolve_tmp = du + (dtC61 * k1 + dtC62 * k2 + dtC63 * k3 + dtC64 * k4 + dtC65 * k5)
k6 = linear_solve(W, -linsolve_tmp)
u = u + k6
du = f(u, p, t + dt)
# Step 7
linsolve_tmp = du +
(dtC71 * k1 + dtC72 * k2 + dtC73 * k3 + dtC74 * k4 + dtC75 * k5 +
dtC76 * k6)
k7 = linear_solve(W, -linsolve_tmp)
u = u + k7
du = f(u, p, t + dt)
# Step 8
linsolve_tmp = du +
(dtC81 * k1 + dtC82 * k2 + dtC83 * k3 + dtC84 * k4 + dtC85 * k5 +
dtC86 * k6 + dtC87 * k7)
k8 = linear_solve(W, -linsolve_tmp)
u = u + k8
tmp = k8 ./ (abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
@unpack h21, h22, h23, h24, h25, h26, h27, h28, h31, h32, h33, h34,
h35, h36, h37, h38, h41, h42, h43, h44, h45, h46, h47, h48 = integ.tab
integ.k1 = h21 * k1 + h22 * k2 + h23 * k3 + h24 * k4 + h25 * k5 + h26 * k6 +
h27 * k7 + h28 * k8
integ.k2 = h31 * k1 + h32 * k2 + h33 * k3 + h34 * k4 + h35 * k5 + h36 * k6 +
h37 * k7 + h38 * k8
integ.k3 = h41 * k1 + h42 * k2 + h43 * k3 + h44 * k4 + h45 * k5 + h46 * k6 +
h47 * k7 + h48 * k8
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1.0f-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4972 | @inline function step!(integ::GPURB23I{false, S, T}, ts, us) where {T, S}
dt = integ.dt
t = integ.t
p = integ.p
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
d = integ.d
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
## Set correct dt
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
γ = dt * d
dto2 = dt / 2
dto6 = dt / 6
Jf, _ = build_J_W(integ.alg, f, γ, dt)
J = Jf(uprev, p, t)
Tgrad = build_tgrad(integ.alg, f)
dT = Tgrad(uprev, p, t)
mass_matrix = integ.f.mass_matrix
W = mass_matrix - γ * J
W_fact = W
# F = lu(W)
F₀ = f(uprev, p, t)
k1 = linear_solve(W_fact, F₀ + γ * dT)
F₁ = f(uprev + dto2 * k1, p, t + dto2)
if mass_matrix === I
k2 = linear_solve(W_fact, (F₁ - k1)) + k1
else
k2 = linear_solve(W_fact, (F₁ - mass_matrix * k1)) + k1
end
integ.u = uprev + dt * k2
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
#############################Adaptive Version#####################################
@inline function step!(integ::GPUARB23I{false, S, T}, ts, us) where {S, T}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
d = integ.d
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
EEst = convert(T, Inf)
mass_matrix = integ.f.mass_matrix
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
γ = dt * d
dto2 = dt / 2
dto6 = dt / 6
Jf, _ = build_J_W(integ.alg, f, γ, dt)
J = Jf(uprev, p, t)
Tgrad = build_tgrad(integ.alg, f)
dT = Tgrad(uprev, p, t)
W = mass_matrix - γ * J
W_fact = W
# F = lu(W)
F₀ = f(uprev, p, t)
k1 = linear_solve(W_fact, F₀ + γ * dT)
F₁ = f(uprev + dto2 * k1, p, t + dto2)
if mass_matrix === I
k2 = linear_solve(W_fact, F₁ - k1) + k1
else
k2 = linear_solve(W_fact, F₁ - mass_matrix * k1) + k1
end
u = uprev + dt * k2
e32 = T(6) + sqrt(T(2))
F₂ = f(u, p, t + dt)
if mass_matrix === I
k3 = linear_solve(W_fact, F₂ - e32 * (k2 - F₁) - 2 * (k1 - F₀) + dt * dT)
else
k3 = linear_solve(W_fact,
F₂ - mass_matrix * (e32 * k2 + 2 * k1) +
e32 * F₁ + 2 * F₀ + dt * dT)
end
tmp = dto6 * (k1 - 2 * k2 + k3)
tmp = tmp ./ (abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1.0f-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 3540 | struct SIESMEConstantCache{T, T2}
α1::T
α2::T
γ1::T
λ1::T
λ2::T
λ3::T
µ1::T
µ2::T
µ3::T
µ0::T2
µbar0::T2
λ0::T
λbar0::T
ν1::T
ν2::T
β2::T
β3::T
δ2::T
δ3::T
end
function SIEAConstantCache(::Type{T}, ::Type{T2}) where {T, T2}
α1 = convert(T, 1 / 2)
α2 = convert(T, 1 / 2)
γ1 = convert(T, 1 / 2)
λ1 = convert(T, 1 / 4)
λ2 = convert(T, -1 / 4)
λ3 = convert(T, 1 / 4)
µ1 = convert(T, 1 / 4)
µ2 = convert(T, 1 / 4)
µ3 = convert(T, -1 / 4)
µ0 = convert(T2, 1 / 1)
µbar0 = convert(T2, 1 / 1)
λ0 = convert(T, 1 / 1)
λbar0 = convert(T, 1 / 1)
ν1 = convert(T, 1 / 1)
ν2 = convert(T, 0)
β2 = convert(T, 1 / 1)
β3 = convert(T, 0)
δ2 = convert(T, -1 / 1)
δ3 = convert(T, 0)
SIESMEConstantCache(α1, α2, γ1, λ1, λ2, λ3, µ1, µ2, µ3, µ0, µbar0, λ0, λbar0, ν1, ν2,
β2, β3, δ2, δ3)
end
@kernel function siea_kernel(@Const(probs), _us, _ts, dt,
saveat, ::Val{save_everystep}) where {save_everystep}
i = @index(Global, Linear)
# get the actual problem for this thread
prob = @inbounds probs[i]
Random.seed!(prob.seed)
# get the input/output arrays for this thread
ts = @inbounds view(_ts, :, i)
us = @inbounds view(_us, :, i)
_saveat = get(prob.kwargs, :saveat, nothing)
saveat = _saveat === nothing ? saveat : _saveat
f = prob.f
g = prob.g
u0 = prob.u0
tspan = prob.tspan
p = prob.p
is_diagonal_noise = SciMLBase.is_diagonal_noise(prob)
cur_t = 0
if saveat !== nothing
cur_t = 1
if tspan[1] == saveat[1]
cur_t += 1
@inbounds us[1] = u0
end
else
@inbounds ts[1] = tspan[1]
@inbounds us[1] = u0
end
@inbounds ts[1] = prob.tspan[1]
@inbounds us[1] = prob.u0
sqdt = sqrt(dt)
u = copy(u0)
t = copy(tspan[1])
n = length(tspan[1]:dt:tspan[2])
cache = SIEAConstantCache(eltype(u0), typeof(t))
@unpack α1, α2, γ1, λ1, λ2, λ3, µ1, µ2, µ3, µ0, µbar0, λ0, λbar0, ν1, ν2, β2, β3, δ2, δ3 = cache
for j in 2:n
uprev = u
# compute stage values
k0 = f(uprev, p, t)
g0 = g(uprev, p, t)
if is_diagonal_noise
dW = sqdt * randn(typeof(u0))
W2 = (dW) .^ 2 / sqdt
W3 = ν2 * (dW) .^ 3 / dt
k1 = f(uprev + λ0 * k0 * dt + ν1 * g0 .* dW + g0 .* W3, p, t + µ0 * dt)
g1 = g(uprev + λbar0 * k0 * dt + β2 * g0 * sqdt + β3 * g0 .* W2, p,
t + µbar0 * dt)
g2 = g(uprev + λbar0 * k0 * dt + δ2 * g0 * sqdt + δ3 * g0 .* W2, p,
t + µbar0 * dt)
u = uprev + (α1 * k0 + α2 * k1) * dt
u += γ1 * g0 .* dW + (λ1 .* dW .+ λ2 * sqdt + λ3 .* W2) .* g1 +
(µ1 .* dW .+ µ2 * sqdt + µ3 .* W2) .* g2
end
t += dt
if saveat === nothing && save_everystep
@inbounds us[j] = u
@inbounds ts[j] = t
elseif saveat !== nothing
while cur_t <= length(saveat) && saveat[cur_t] <= t
savet = saveat[cur_t]
Θ = (savet - (t - dt)) / dt
# Linear Interpolation
@inbounds us[cur_t] = uprev + (u - uprev) * Θ
@inbounds ts[cur_t] = savet
cur_t += 1
end
end
end
if saveat === nothing && !save_everystep
@inbounds us[2] = u
@inbounds ts[2] = t
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 5286 | @inline function step!(integ::GPUT5I{false, S, T}, ts, us) where {T, S}
c1, c2, c3, c4, c5, c6 = integ.cs
dt = integ.dt
t = integ.t
p = integ.p
a21, a31, a32, a41, a42, a43, a51, a52, a53, a54,
a61, a62, a63, a64, a65, a71, a72, a73, a74, a75, a76 = integ.as
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
## Set correct dt
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k7
end
tmp = uprev + dt * a21 * k1
k2 = f(tmp, p, t + c1 * dt)
tmp = uprev + dt * (a31 * k1 + a32 * k2)
k3 = f(tmp, p, t + c2 * dt)
tmp = uprev + dt * (a41 * k1 + a42 * k2 + a43 * k3)
k4 = f(tmp, p, t + c3 * dt)
tmp = uprev + dt * (a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4)
k5 = f(tmp, p, t + c4 * dt)
tmp = uprev + dt * (a61 * k1 + a62 * k2 + a63 * k3 + a64 * k4 + a65 * k5)
k6 = f(tmp, p, t + dt)
integ.u = uprev +
dt * ((a71 * k1 + a72 * k2 + a73 * k3 + a74 * k4) + a75 * k5 + a76 * k6)
k7 = f(integ.u, p, t + dt)
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
integ.k3 = k3
integ.k4 = k4
integ.k5 = k5
integ.k6 = k6
integ.k7 = k7
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
#############################Adaptive Version#####################################
@inline function step!(integ::GPUAT5I{false, S, T}, ts, us) where {S, T}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
c1, c2, c3, c4, c5, c6 = integ.cs
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
a21, a31, a32, a41, a42, a43, a51, a52, a53, a54,
a61, a62, a63, a64, a65, a71, a72, a73, a74, a75, a76 = integ.as
btilde1, btilde2, btilde3, btilde4, btilde5, btilde6, btilde7 = integ.btildes
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k7
end
EEst = convert(T, Inf)
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
tmp = uprev + dt * a21 * k1
k2 = f(tmp, p, t + c1 * dt)
tmp = uprev + dt * (a31 * k1 + a32 * k2)
k3 = f(tmp, p, t + c2 * dt)
tmp = uprev + dt * (a41 * k1 + a42 * k2 + a43 * k3)
k4 = f(tmp, p, t + c3 * dt)
tmp = uprev + dt * (a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4)
k5 = f(tmp, p, t + c4 * dt)
tmp = uprev + dt * (a61 * k1 + a62 * k2 + a63 * k3 + a64 * k4 + a65 * k5)
k6 = f(tmp, p, t + dt)
u = uprev + dt * (a71 * k1 + a72 * k2 + a73 * k3 + a74 * k4 + a75 * k5 + a76 * k6)
k7 = f(u, p, t + dt)
tmp = dt * (btilde1 * k1 + btilde2 * k2 + btilde3 * k3 + btilde4 * k4 +
btilde5 * k5 + btilde6 * k6 + btilde7 * k7)
tmp = tmp ./ (abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
integ.k3 = k3
integ.k4 = k4
integ.k5 = k5
integ.k6 = k6
integ.k7 = k7
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1.0f-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 6725 | @inline function step!(integ::GPUV7I{false, S, T}, ts, us) where {T, S}
@unpack dt = integ
t = integ.t
p = integ.p
@unpack c2, c3, c4, c5, c6, c7, c8, a021, a031, a032, a041, a043, a051, a053, a054,
a061, a063, a064, a065, a071, a073, a074, a075, a076, a081, a083, a084,
a085, a086, a087, a091, a093, a094, a095, a096, a097, a098, a101, a103,
a104, a105, a106, a107, b1, b4, b5, b6, b7, b8, b9 = integ.tab
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
k1 = f(uprev, p, t)
a = dt * a021
k2 = f(uprev + a * k1, p, t + c2 * dt)
k3 = f(uprev + dt * (a031 * k1 + a032 * k2), p, t + c3 * dt)
k4 = f(uprev + dt * (a041 * k1 + a043 * k3), p, t + c4 * dt)
k5 = f(uprev + dt * (a051 * k1 + a053 * k3 + a054 * k4), p, t + c5 * dt)
k6 = f(uprev + dt * (a061 * k1 + a063 * k3 + a064 * k4 + a065 * k5), p, t + c6 * dt)
k7 = f(uprev + dt * (a071 * k1 + a073 * k3 + a074 * k4 + a075 * k5 + a076 * k6), p,
t + c7 * dt)
k8 = f(uprev +
dt * (a081 * k1 + a083 * k3 + a084 * k4 + a085 * k5 + a086 * k6 + a087 * k7), p,
t + c8 * dt)
g9 = uprev +
dt *
(a091 * k1 + a093 * k3 + a094 * k4 + a095 * k5 + a096 * k6 + a097 * k7 + a098 * k8)
g10 = uprev +
dt * (a101 * k1 + a103 * k3 + a104 * k4 + a105 * k5 + a106 * k6 + a107 * k7)
k9 = f(g9, p, t + dt)
k10 = f(g10, p, t + dt)
integ.u = uprev +
dt * (b1 * k1 + b4 * k4 + b5 * k5 + b6 * k6 + b7 * k7 + b8 * k8 + b9 * k9)
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
integ.k3 = k3
integ.k4 = k4
integ.k5 = k5
integ.k6 = k6
integ.k7 = k7
integ.k8 = k8
integ.k9 = k9
integ.k10 = k10
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
#############################Adaptive Version#####################################
@inline function step!(integ::GPUAV7I{false, S, T}, ts, us) where {S, T}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
@unpack c2, c3, c4, c5, c6, c7, c8, a021, a031, a032, a041, a043, a051, a053, a054,
a061, a063, a064, a065, a071, a073, a074, a075, a076, a081, a083, a084,
a085, a086, a087, a091, a093, a094, a095, a096, a097, a098, a101, a103,
a104, a105, a106, a107, b1, b4, b5, b6, b7, b8, b9, btilde1, btilde4,
btilde5, btilde6, btilde7, btilde8, btilde9, btilde10, extra, interp = integ.tab
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
EEst = convert(T, Inf)
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
k1 = f(uprev, p, t)
a = dt * a021
k2 = f(uprev + a * k1, p, t + c2 * dt)
k3 = f(uprev + dt * (a031 * k1 + a032 * k2), p, t + c3 * dt)
k4 = f(uprev + dt * (a041 * k1 + a043 * k3), p, t + c4 * dt)
k5 = f(uprev + dt * (a051 * k1 + a053 * k3 + a054 * k4), p, t + c5 * dt)
k6 = f(uprev + dt * (a061 * k1 + a063 * k3 + a064 * k4 + a065 * k5), p, t + c6 * dt)
k7 = f(uprev + dt * (a071 * k1 + a073 * k3 + a074 * k4 + a075 * k5 + a076 * k6), p,
t + c7 * dt)
k8 = f(uprev +
dt * (a081 * k1 + a083 * k3 + a084 * k4 + a085 * k5 + a086 * k6 + a087 * k7),
p,
t + c8 * dt)
g9 = uprev +
dt *
(a091 * k1 + a093 * k3 + a094 * k4 + a095 * k5 + a096 * k6 + a097 * k7 +
a098 * k8)
g10 = uprev +
dt * (a101 * k1 + a103 * k3 + a104 * k4 + a105 * k5 + a106 * k6 + a107 * k7)
k9 = f(g9, p, t + dt)
k10 = f(g10, p, t + dt)
u = uprev +
dt * (b1 * k1 + b4 * k4 + b5 * k5 + b6 * k6 + b7 * k7 + b8 * k8 + b9 * k9)
tmp = dt *
(btilde1 * k1 + btilde4 * k4 + btilde5 * k5 + btilde6 * k6 + btilde7 * k7 +
btilde8 * k8 + btilde9 * k9 + btilde10 * k10)
tmp = tmp ./ (abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k2
integ.k3 = k3
integ.k4 = k4
integ.k5 = k5
integ.k6 = k6
integ.k7 = k7
integ.k8 = k8
integ.k9 = k9
integ.k10 = k10
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1.0f-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 9949 | @inline function step!(integ::GPUV9I{false, S, T}, ts, us) where {T, S}
@unpack dt = integ
t = integ.t
p = integ.p
@unpack c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, a0201, a0301, a0302,
a0401, a0403, a0501, a0503, a0504, a0601, a0604, a0605, a0701, a0704, a0705, a0706,
a0801, a0806, a0807, a0901, a0906, a0907, a0908, a1001, a1006, a1007, a1008, a1009,
a1101, a1106, a1107, a1108, a1109, a1110, a1201, a1206, a1207, a1208, a1209, a1210,
a1211, a1301, a1306, a1307, a1308, a1309, a1310, a1311, a1312, a1401, a1406, a1407,
a1408, a1409, a1410, a1411, a1412, a1413, a1501, a1506, a1507, a1508, a1509, a1510,
a1511, a1512, a1513, a1514, a1601, a1606, a1607, a1608, a1609, a1610, a1611, a1612,
a1613, b1, b8, b9, b10, b11, b12, b13, b14, b15, btilde1, btilde8, btilde9, btilde10,
btilde11, btilde12, btilde13, btilde14, btilde15, btilde16 = integ.tab
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
integ.tprev = t
saved_in_cb = false
adv_integ = true
## Check if tstops are within the range of time-series
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
(integ.tstops[integ.tstops_idx] - integ.t - integ.dt - 100 * eps(T) < 0)
integ.t = integ.tstops[integ.tstops_idx]
## Set correct dt
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
k1 = f(uprev, p, t)
a = dt * a0201
k2 = f(uprev + a * k1, p, t + c1 * dt)
k3 = f(uprev + dt * (a0301 * k1 + a0302 * k2), p, t + c2 * dt)
k4 = f(uprev + dt * (a0401 * k1 + a0403 * k3), p, t + c3 * dt)
k5 = f(uprev + dt * (a0501 * k1 + a0503 * k3 + a0504 * k4), p, t + c4 * dt)
k6 = f(uprev + dt * (a0601 * k1 + a0604 * k4 + a0605 * k5), p, t + c5 * dt)
k7 = f(uprev + dt * (a0701 * k1 + a0704 * k4 + a0705 * k5 + a0706 * k6), p, t + c6 * dt)
k8 = f(uprev + dt * (a0801 * k1 + a0806 * k6 + a0807 * k7), p, t + c7 * dt)
k9 = f(uprev + dt * (a0901 * k1 + a0906 * k6 + a0907 * k7 + a0908 * k8), p, t + c8 * dt)
k10 = f(uprev + dt * (a1001 * k1 + a1006 * k6 + a1007 * k7 + a1008 * k8 + a1009 * k9),
p, t + c9 * dt)
k11 = f(uprev +
dt *
(a1101 * k1 + a1106 * k6 + a1107 * k7 + a1108 * k8 + a1109 * k9 + a1110 * k10),
p, t + c10 * dt)
k12 = f(uprev +
dt *
(a1201 * k1 + a1206 * k6 + a1207 * k7 + a1208 * k8 + a1209 * k9 + a1210 * k10 +
a1211 * k11), p, t + c11 * dt)
k13 = f(uprev +
dt *
(a1301 * k1 + a1306 * k6 + a1307 * k7 + a1308 * k8 + a1309 * k9 + a1310 * k10 +
a1311 * k11 + a1312 * k12), p, t + c12 * dt)
k14 = f(uprev +
dt *
(a1401 * k1 + a1406 * k6 + a1407 * k7 + a1408 * k8 + a1409 * k9 + a1410 * k10 +
a1411 * k11 + a1412 * k12 + a1413 * k13), p, t + c13 * dt)
g15 = uprev +
dt *
(a1501 * k1 + a1506 * k6 + a1507 * k7 + a1508 * k8 + a1509 * k9 + a1510 * k10 +
a1511 * k11 + a1512 * k12 + a1513 * k13 + a1514 * k14)
g16 = uprev +
dt *
(a1601 * k1 + a1606 * k6 + a1607 * k7 + a1608 * k8 + a1609 * k9 + a1610 * k10 +
a1611 * k11 + a1612 * k12 + a1613 * k13)
k15 = f(g15, p, t + dt)
k16 = f(g16, p, t + dt)
integ.u = uprev +
dt *
(b1 * k1 + b8 * k8 + b9 * k9 + b10 * k10 + b11 * k11 + b12 * k12 + b13 * k13 +
b14 * k14 + b15 * k15)
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k8
integ.k3 = k9
integ.k4 = k10
integ.k5 = k11
integ.k6 = k12
integ.k7 = k13
integ.k8 = k14
integ.k9 = k15
integ.k10 = k16
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
#############################Adaptive Version#####################################
@inline function step!(integ::GPUAV9I{false, S, T}, ts, us) where {S, T}
beta1, beta2, qmax, qmin, gamma, qoldinit, _ = build_adaptive_controller_cache(integ.alg,
T)
dt = integ.dtnew
t = integ.t
p = integ.p
tf = integ.tf
@unpack c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, a0201, a0301, a0302,
a0401, a0403, a0501, a0503, a0504, a0601, a0604, a0605, a0701, a0704, a0705, a0706,
a0801, a0806, a0807, a0901, a0906, a0907, a0908, a1001, a1006, a1007, a1008, a1009,
a1101, a1106, a1107, a1108, a1109, a1110, a1201, a1206, a1207, a1208, a1209, a1210,
a1211, a1301, a1306, a1307, a1308, a1309, a1310, a1311, a1312, a1401, a1406, a1407,
a1408, a1409, a1410, a1411, a1412, a1413, a1501, a1506, a1507, a1508, a1509, a1510,
a1511, a1512, a1513, a1514, a1601, a1606, a1607, a1608, a1609, a1610, a1611, a1612,
a1613, b1, b8, b9, b10, b11, b12, b13, b14, b15, btilde1, btilde8, btilde9, btilde10,
btilde11, btilde12, btilde13, btilde14, btilde15, btilde16 = integ.tab
tmp = integ.tmp
f = integ.f
integ.uprev = integ.u
uprev = integ.u
qold = integ.qold
abstol = integ.abstol
reltol = integ.reltol
if integ.u_modified
k1 = f(uprev, p, t)
integ.u_modified = false
else
@inbounds k1 = integ.k1
end
EEst = convert(T, Inf)
while EEst > convert(T, 1.0)
dt < convert(T, 1.0f-14) && error("dt<dtmin")
k1 = f(uprev, p, t)
a = dt * a0201
k2 = f(uprev + a * k1, p, t + c1 * dt)
k3 = f(uprev + dt * (a0301 * k1 + a0302 * k2), p, t + c2 * dt)
k4 = f(uprev + dt * (a0401 * k1 + a0403 * k3), p, t + c3 * dt)
k5 = f(uprev + dt * (a0501 * k1 + a0503 * k3 + a0504 * k4), p, t + c4 * dt)
k6 = f(uprev + dt * (a0601 * k1 + a0604 * k4 + a0605 * k5), p, t + c5 * dt)
k7 = f(uprev + dt * (a0701 * k1 + a0704 * k4 + a0705 * k5 + a0706 * k6), p,
t + c6 * dt)
k8 = f(uprev + dt * (a0801 * k1 + a0806 * k6 + a0807 * k7), p, t + c7 * dt)
k9 = f(uprev + dt * (a0901 * k1 + a0906 * k6 + a0907 * k7 + a0908 * k8), p,
t + c8 * dt)
k10 = f(uprev +
dt * (a1001 * k1 + a1006 * k6 + a1007 * k7 + a1008 * k8 + a1009 * k9),
p, t + c9 * dt)
k11 = f(uprev +
dt *
(a1101 * k1 + a1106 * k6 + a1107 * k7 + a1108 * k8 + a1109 * k9 +
a1110 * k10),
p, t + c10 * dt)
k12 = f(uprev +
dt *
(a1201 * k1 + a1206 * k6 + a1207 * k7 + a1208 * k8 + a1209 * k9 +
a1210 * k10 +
a1211 * k11), p, t + c11 * dt)
k13 = f(uprev +
dt *
(a1301 * k1 + a1306 * k6 + a1307 * k7 + a1308 * k8 + a1309 * k9 +
a1310 * k10 +
a1311 * k11 + a1312 * k12), p, t + c12 * dt)
k14 = f(uprev +
dt *
(a1401 * k1 + a1406 * k6 + a1407 * k7 + a1408 * k8 + a1409 * k9 +
a1410 * k10 +
a1411 * k11 + a1412 * k12 + a1413 * k13), p, t + c13 * dt)
g15 = uprev +
dt *
(a1501 * k1 + a1506 * k6 + a1507 * k7 + a1508 * k8 + a1509 * k9 +
a1510 * k10 +
a1511 * k11 + a1512 * k12 + a1513 * k13 + a1514 * k14)
g16 = uprev +
dt *
(a1601 * k1 + a1606 * k6 + a1607 * k7 + a1608 * k8 + a1609 * k9 +
a1610 * k10 +
a1611 * k11 + a1612 * k12 + a1613 * k13)
k15 = f(g15, p, t + dt)
k16 = f(g16, p, t + dt)
u = uprev +
dt *
(b1 * k1 + b8 * k8 + b9 * k9 + b10 * k10 + b11 * k11 + b12 * k12 + b13 * k13 +
b14 * k14 + b15 * k15)
tmp = dt * (btilde1 * k1 + btilde8 * k8 + btilde9 * k9 + btilde10 * k10 +
btilde11 * k11 + btilde12 * k12 + btilde13 * k13 + btilde14 * k14 +
btilde15 * k15 + btilde16 * k16)
tmp = tmp ./ (abstol .+ max.(abs.(uprev), abs.(u)) * reltol)
EEst = DiffEqBase.ODE_DEFAULT_NORM(tmp, t)
if iszero(EEst)
q = inv(qmax)
else
q11 = EEst^beta1
q = q11 / (qold^beta2)
end
if EEst > 1
dt = dt / min(inv(qmin), q11 / gamma)
else # EEst <= 1
q = max(inv(qmax), min(inv(qmin), q / gamma))
qold = max(EEst, qoldinit)
dtnew = dt / q #dtnew
dtnew = min(abs(dtnew), abs(tf - t - dt))
@inbounds begin # Necessary for interpolation
integ.k1 = k1
integ.k2 = k8
integ.k3 = k9
integ.k4 = k10
integ.k5 = k11
integ.k6 = k12
integ.k7 = k13
integ.k8 = k14
integ.k9 = k15
integ.k10 = k16
end
integ.dt = dt
integ.dtnew = dtnew
integ.qold = qold
integ.tprev = t
integ.u = u
if (tf - t - dt) < convert(T, 1e-14)
integ.t = tf
else
if integ.tstops !== nothing && integ.tstops_idx <= length(integ.tstops) &&
integ.tstops[integ.tstops_idx] - integ.t - integ.dt -
100 * eps(T) < 0
integ.t = integ.tstops[integ.tstops_idx]
integ.u = integ(integ.t)
dt = integ.t - integ.tprev
integ.tstops_idx += 1
else
##Advance the integrator
integ.t += dt
end
end
end
end
_, saved_in_cb = handle_callbacks!(integ, ts, us)
return saved_in_cb
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4141 | import SciMLBase: @add_kwonly, AbstractODEProblem, AbstractODEFunction,
FunctionWrapperSpecialize, StandardODEProblem, prepare_initial_state, promote_tspan,
warn_paramtype
struct ImmutableODEProblem{uType, tType, isinplace, P, F, K, PT} <:
AbstractODEProblem{uType, tType, isinplace}
"""The ODE is `du = f(u,p,t)` for out-of-place and f(du,u,p,t) for in-place."""
f::F
"""The initial condition is `u(tspan[1]) = u0`."""
u0::uType
"""The solution `u(t)` will be computed for `tspan[1] ≤ t ≤ tspan[2]`."""
tspan::tType
"""Constant parameters to be supplied as the second argument of `f`."""
p::P
"""A callback to be applied to every solver which uses the problem."""
kwargs::K
"""An internal argument for storing traits about the solving process."""
problem_type::PT
@add_kwonly function ImmutableODEProblem{iip}(f::AbstractODEFunction{iip},
u0, tspan, p = NullParameters(),
problem_type = StandardODEProblem();
kwargs...) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
warn_paramtype(p)
new{typeof(_u0), typeof(_tspan),
isinplace(f), typeof(p), typeof(f),
typeof(kwargs),
typeof(problem_type)}(f,
_u0,
_tspan,
p,
kwargs,
problem_type)
end
"""
ImmutableODEProblem{isinplace}(f,u0,tspan,p=NullParameters(),callback=CallbackSet())
Define an ODE problem with the specified function.
`isinplace` optionally sets whether the function is inplace or not.
This is determined automatically, but not inferred.
"""
function ImmutableODEProblem{iip}(f,
u0,
tspan,
p = NullParameters();
kwargs...) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
_f = ODEFunction{iip, DEFAULT_SPECIALIZATION}(f)
ImmutableODEProblem(_f, _u0, _tspan, p; kwargs...)
end
@add_kwonly function ImmutableODEProblem{iip, recompile}(f, u0, tspan,
p = NullParameters();
kwargs...) where {iip, recompile}
ImmutableODEProblem{iip}(ODEFunction{iip, recompile}(f), u0, tspan, p; kwargs...)
end
function ImmutableODEProblem{iip, FunctionWrapperSpecialize}(f, u0, tspan,
p = NullParameters();
kwargs...) where {iip}
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
if !(f isa FunctionWrappersWrappers.FunctionWrappersWrapper)
if iip
ff = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_iip(f,
(_u0, _u0, p,
_tspan[1])))
else
ff = ODEFunction{iip, FunctionWrapperSpecialize}(wrapfun_oop(f,
(_u0, p,
_tspan[1])))
end
end
ImmutableODEProblem{iip}(ff, _u0, _tspan, p; kwargs...)
end
end
"""
ImmutableODEProblem(f::ODEFunction,u0,tspan,p=NullParameters(),callback=CallbackSet())
Define an ODE problem from an [`ODEFunction`](@ref).
"""
function ImmutableODEProblem(f::AbstractODEFunction, u0, tspan, args...; kwargs...)
ImmutableODEProblem{isinplace(f)}(f, u0, tspan, args...; kwargs...)
end
function ImmutableODEProblem(f, u0, tspan, p = NullParameters(); kwargs...)
iip = isinplace(f, 4)
_u0 = prepare_initial_state(u0)
_tspan = promote_tspan(tspan)
_f = ODEFunction{iip, DEFAULT_SPECIALIZATION}(f)
ImmutableODEProblem(_f, _u0, _tspan, p; kwargs...)
end
staticarray_itize(x) = x
staticarray_itize(x::Vector) = SVector{length(x)}(x)
staticarray_itize(x::SizedVector) = SVector{length(x)}(x)
staticarray_itize(x::Matrix) = SMatrix{size(x)...}(x)
staticarray_itize(x::SizedMatrix) = SMatrix{size(x)...}(x)
function Base.convert(::Type{ImmutableODEProblem}, prob::T) where {T <: ODEProblem}
ImmutableODEProblem(prob.f,
staticarray_itize(prob.u0),
prob.tspan,
staticarray_itize(prob.p),
prob.problem_type;
prob.kwargs...)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4628 | struct Kvaerno3Tableau{T, T2}
γ::T2
a31::T
a32::T
a41::T
a42::T
a43::T
btilde1::T
btilde2::T
btilde3::T
btilde4::T
c3::T2
α31::T2
α32::T2
α41::T2
α42::T2
end
function Kvaerno3Tableau(T, T2)
γ = convert(T2, 0.4358665215)
a31 = convert(T, 0.490563388419108)
a32 = convert(T, 0.073570090080892)
a41 = convert(T, 0.308809969973036)
a42 = convert(T, 1.490563388254106)
a43 = -convert(T, 1.235239879727145)
# bhat1 = convert(T,0.490563388419108)
# bhat2 = convert(T,0.073570090080892)
# bhat3 = convert(T,0.4358665215)
# bhat4 = convert(T,0.0)
btilde1 = convert(T, 0.181753418446072) # bhat1-a41
btilde2 = convert(T, -1.416993298173214) # bhat2-a42
btilde3 = convert(T, 1.671106401227145) # bhat3-a43
btilde4 = convert(T, -γ) # bhat4-γ
c3 = convert(T2, 1)
c2 = 2γ
θ = c3 / c2
α31 = ((1 + (-4θ + 3θ^2)) + (6θ * (1 - θ) / c2) * γ)
α32 = ((-2θ + 3θ^2) + (6θ * (1 - θ) / c2) * γ)
α41 = convert(T2, 0.0)
α42 = convert(T2, 0.0)
Kvaerno3Tableau(γ, a31, a32, a41, a42, a43, btilde1, btilde2, btilde3, btilde4, c3, α31,
α32, α41, α42)
end
struct Kvaerno5Tableau{T, T2}
γ::T2
a31::T
a32::T
a41::T
a42::T
a43::T
a51::T
a52::T
a53::T
a54::T
a61::T
a63::T
a64::T
a65::T
a71::T
a73::T
a74::T
a75::T
a76::T
btilde1::T
btilde3::T
btilde4::T
btilde5::T
btilde6::T
btilde7::T
c3::T2
c4::T2
c5::T2
c6::T2
α31::T2
α32::T2
α41::T2
α42::T2
α43::T2
α51::T2
α52::T2
α53::T2
α61::T2
α62::T2
α63::T2
end
#=
# Kvaerno5
# Predict z3 from Hermite z2 and z1
c2 = 2γ
θ = c3/c2
dt = c2
((1 + (-4θ + 3θ^2)) + (6θ*(1-θ)/c2)*γ)
((-2θ + 3θ^2) + (6θ*(1-θ)/c2)*γ)
# Predict others from z1 and z3 since it covers [0,1.23]
dt = c3 since interval is [c1,c3] and c1 = 0
θ = c4/c3, c5/c3, c6/c3, c7/c3
z = dt*k
z₁ + Θ*(-4dt*z₁ - 2dt*z₃ - 6y₀ + Θ*(3dt*z₁ + 3z₃ + 6y₀ - 6y₁ ) + 6y₁)/dt
(1 + (-4θ + 3θ^2))*z₁ + (-2θ + 3θ^2)*z₃ + (6θ*(1-θ)/dt)*(y₁-y₀)
y₀ = uprev
y₁ = uprev + a31*z₁ + a32*z₂ + γ*z₃
y₁-y₀ = a31*z₁ + a32*z₂ + γ*z₃
(1 + (-4θ + 3θ^2) + a31*(6θ*(1-θ)/dt))*z₁ +
(-2θ + 3θ^2 + γ*(6θ*(1-θ)/dt))*z₃ + (6θ*(1-θ)/dt)*a32*z₂
dt = c3
θ = c4/c3
(1 + (-4θ + 3θ^2) + a31*(6θ*(1-θ)/dt))
(6θ*(1-θ)/dt)*a32
(-2θ + 3θ^2 + γ*(6θ*(1-θ)/dt))
θ = c5/c3
(1 + (-4θ + 3θ^2) + a31*(6θ*(1-θ)/dt))
(6θ*(1-θ)/dt)*a32
(-2θ + 3θ^2 + γ*(6θ*(1-θ)/dt))
θ = c6/c3
(1 + (-4θ + 3θ^2) + a31*(6θ*(1-θ)/dt))
(6θ*(1-θ)/dt)*a32
(-2θ + 3θ^2 + γ*(6θ*(1-θ)/dt))
=#
function Kvaerno5Tableau(T, T2)
γ = convert(T2, 0.26)
a31 = convert(T, 0.13)
a32 = convert(T, 0.84033320996790809)
a41 = convert(T, 0.22371961478320505)
a42 = convert(T, 0.47675532319799699)
a43 = -convert(T, 0.06470895363112615)
a51 = convert(T, 0.16648564323248321)
a52 = convert(T, 0.10450018841591720)
a53 = convert(T, 0.03631482272098715)
a54 = -convert(T, 0.13090704451073998)
a61 = convert(T, 0.13855640231268224)
a63 = -convert(T, 0.04245337201752043)
a64 = convert(T, 0.02446657898003141)
a65 = convert(T, 0.61943039072480676)
a71 = convert(T, 0.13659751177640291)
a73 = -convert(T, 0.05496908796538376)
a74 = -convert(T, 0.04118626728321046)
a75 = convert(T, 0.62993304899016403)
a76 = convert(T, 0.06962479448202728)
btilde1 = convert(T, 0.00195889053627933) # a61-a71
btilde3 = convert(T, 0.01251571594786333) # a63-a73
btilde4 = convert(T, 0.06565284626324187) # a64-a74
btilde5 = -convert(T, 0.01050265826535727) # a65-a75
btilde6 = convert(T, 0.19037520551797272) # γ-a76
btilde7 = -γ
α21 = convert(T, 2) # c2/γ
α31 = convert(T2, -1.366025403784441)
α32 = convert(T2, 2.3660254037844357)
α41 = convert(T2, -0.19650552613122207)
α42 = convert(T2, 0.8113579546496623)
α43 = convert(T2, 0.38514757148155954)
α51 = convert(T2, 0.10375304369958693)
α52 = convert(T2, 0.937994698066431)
α53 = convert(T2, -0.04174774176601781)
α61 = convert(T2, -0.17281112873898072)
α62 = convert(T2, 0.6235784481025847)
α63 = convert(T2, 0.5492326806363959)
c3 = convert(T, 1.230333209967908)
c4 = convert(T, 0.895765984350076)
c5 = convert(T, 0.436393609858648)
c6 = convert(T, 1)
Kvaerno5Tableau(γ, a31, a32, a41, a42, a43, a51, a52, a53, a54,
a61, a63, a64, a65, a71, a73, a74, a75, a76,
btilde1, btilde3, btilde4, btilde5, btilde6, btilde7,
c3, c4, c5, c6, α31, α32, α41, α42, α43, α51, α52, α53,
α61, α62, α63)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 7093 | struct Rodas4Tableau{T, T2}
a21::T
a31::T
a32::T
a41::T
a42::T
a43::T
a51::T
a52::T
a53::T
a54::T
C21::T
C31::T
C32::T
C41::T
C42::T
C43::T
C51::T
C52::T
C53::T
C54::T
C61::T
C62::T
C63::T
C64::T
C65::T
γ::T
c2::T2
c3::T2
c4::T2
d1::T
d2::T
d3::T
d4::T
h21::T
h22::T
h23::T
h24::T
h25::T
h31::T
h32::T
h33::T
h34::T
h35::T
end
function Rodas4Tableau(T::Type{T1}, T2::Type{T1}) where {T1}
γ = convert(T, 0.25)
#BET2P=0.0317D0
#BET3P=0.0635D0
#BET4P=0.3438D0
a21 = convert(T, 1.544000000000000)
a31 = convert(T, 0.9466785280815826)
a32 = convert(T, 0.2557011698983284)
a41 = convert(T, 3.314825187068521)
a42 = convert(T, 2.896124015972201)
a43 = convert(T, 0.9986419139977817)
a51 = convert(T, 1.221224509226641)
a52 = convert(T, 6.019134481288629)
a53 = convert(T, 12.53708332932087)
a54 = -convert(T, 0.6878860361058950)
C21 = -convert(T, 5.668800000000000)
C31 = -convert(T, 2.430093356833875)
C32 = -convert(T, 0.2063599157091915)
C41 = -convert(T, 0.1073529058151375)
C42 = -convert(T, 9.594562251023355)
C43 = -convert(T, 20.47028614809616)
C51 = convert(T, 7.496443313967647)
C52 = -convert(T, 10.24680431464352)
C53 = -convert(T, 33.99990352819905)
C54 = convert(T, 11.70890893206160)
C61 = convert(T, 8.083246795921522)
C62 = -convert(T, 7.981132988064893)
C63 = -convert(T, 31.52159432874371)
C64 = convert(T, 16.31930543123136)
C65 = -convert(T, 6.058818238834054)
c2 = convert(T2, 0.386)
c3 = convert(T2, 0.21)
c4 = convert(T2, 0.63)
d1 = convert(T, 0.2500000000000000)
d2 = -convert(T, 0.1043000000000000)
d3 = convert(T, 0.1035000000000000)
d4 = -convert(T, 0.03620000000000023)
h21 = convert(T, 10.12623508344586)
h22 = -convert(T, 7.487995877610167)
h23 = -convert(T, 34.80091861555747)
h24 = -convert(T, 7.992771707568823)
h25 = convert(T, 1.025137723295662)
h31 = -convert(T, 0.6762803392801253)
h32 = convert(T, 6.087714651680015)
h33 = convert(T, 16.43084320892478)
h34 = convert(T, 24.76722511418386)
h35 = -convert(T, 6.594389125716872)
Rodas4Tableau(a21, a31, a32, a41, a42, a43, a51, a52, a53, a54,
C21, C31, C32, C41, C42, C43, C51, C52, C53, C54, C61, C62, C63, C64, C65,
γ, c2, c3, c4, d1, d2, d3, d4,
h21, h22, h23, h24, h25, h31, h32, h33, h34, h35)
end
struct Rodas5PTableau{T, T2}
a21::T
a31::T
a32::T
a41::T
a42::T
a43::T
a51::T
a52::T
a53::T
a54::T
a61::T
a62::T
a63::T
a64::T
a65::T
C21::T
C31::T
C32::T
C41::T
C42::T
C43::T
C51::T
C52::T
C53::T
C54::T
C61::T
C62::T
C63::T
C64::T
C65::T
C71::T
C72::T
C73::T
C74::T
C75::T
C76::T
C81::T
C82::T
C83::T
C84::T
C85::T
C86::T
C87::T
γ::T2
d1::T
d2::T
d3::T
d4::T
d5::T
c2::T2
c3::T2
c4::T2
c5::T2
h21::T
h22::T
h23::T
h24::T
h25::T
h26::T
h27::T
h28::T
h31::T
h32::T
h33::T
h34::T
h35::T
h36::T
h37::T
h38::T
h41::T
h42::T
h43::T
h44::T
h45::T
h46::T
h47::T
h48::T
end
function Rodas5PTableau(T, T2)
γ = convert(T2, 0.21193756319429014)
a21 = convert(T, 3.0)
a31 = convert(T, 2.849394379747939)
a32 = convert(T, 0.45842242204463923)
a41 = convert(T, -6.954028509809101)
a42 = convert(T, 2.489845061869568)
a43 = convert(T, -10.358996098473584)
a51 = convert(T, 2.8029986275628964)
a52 = convert(T, 0.5072464736228206)
a53 = convert(T, -0.3988312541770524)
a54 = convert(T, -0.04721187230404641)
a61 = convert(T, -7.502846399306121)
a62 = convert(T, 2.561846144803919)
a63 = convert(T, -11.627539656261098)
a64 = convert(T, -0.18268767659942256)
a65 = convert(T, 0.030198172008377946)
C21 = convert(T, -14.155112264123755)
C31 = convert(T, -17.97296035885952)
C32 = convert(T, -2.859693295451294)
C41 = convert(T, 147.12150275711716)
C42 = convert(T, -1.41221402718213)
C43 = convert(T, 71.68940251302358)
C51 = convert(T, 165.43517024871676)
C52 = convert(T, -0.4592823456491126)
C53 = convert(T, 42.90938336958603)
C54 = convert(T, -5.961986721573306)
C61 = convert(T, 24.854864614690072)
C62 = convert(T, -3.0009227002832186)
C63 = convert(T, 47.4931110020768)
C64 = convert(T, 5.5814197821558125)
C65 = convert(T, -0.6610691825249471)
C71 = convert(T, 30.91273214028599)
C72 = convert(T, -3.1208243349937974)
C73 = convert(T, 77.79954646070892)
C74 = convert(T, 34.28646028294783)
C75 = convert(T, -19.097331116725623)
C76 = convert(T, -28.087943162872662)
C81 = convert(T, 37.80277123390563)
C82 = convert(T, -3.2571969029072276)
C83 = convert(T, 112.26918849496327)
C84 = convert(T, 66.9347231244047)
C85 = convert(T, -40.06618937091002)
C86 = convert(T, -54.66780262877968)
C87 = convert(T, -9.48861652309627)
c2 = convert(T2, 0.6358126895828704)
c3 = convert(T2, 0.4095798393397535)
c4 = convert(T2, 0.9769306725060716)
c5 = convert(T2, 0.4288403609558664)
d1 = convert(T, 0.21193756319429014)
d2 = convert(T, -0.42387512638858027)
d3 = convert(T, -0.3384627126235924)
d4 = convert(T, 1.8046452872882734)
d5 = convert(T, 2.325825639765069)
h21 = convert(T, 25.948786856663858)
h22 = convert(T, -2.5579724845846235)
h23 = convert(T, 10.433815404888879)
h24 = convert(T, -2.3679251022685204)
h25 = convert(T, 0.524948541321073)
h26 = convert(T, 1.1241088310450404)
h27 = convert(T, 0.4272876194431874)
h28 = convert(T, -0.17202221070155493)
h31 = convert(T, -9.91568850695171)
h32 = convert(T, -0.9689944594115154)
h33 = convert(T, 3.0438037242978453)
h34 = convert(T, -24.495224566215796)
h35 = convert(T, 20.176138334709044)
h36 = convert(T, 15.98066361424651)
h37 = convert(T, -6.789040303419874)
h38 = convert(T, -6.710236069923372)
h41 = convert(T, 11.419903575922262)
h42 = convert(T, 2.8879645146136994)
h43 = convert(T, 72.92137995996029)
h44 = convert(T, 80.12511834622643)
h45 = convert(T, -52.072871366152654)
h46 = convert(T, -59.78993625266729)
h47 = convert(T, -0.15582684282751913)
h48 = convert(T, 4.883087185713722)
Rodas5PTableau(a21, a31, a32, a41, a42, a43, a51, a52, a53, a54,
a61, a62, a63, a64, a65,
C21, C31, C32, C41, C42, C43, C51, C52, C53, C54,
C61, C62, C63, C64, C65, C71, C72, C73, C74, C75, C76,
C81, C82, C83, C84, C85, C86, C87,
γ, d1, d2, d3, d4, d5, c2, c3, c4, c5,
h21, h22, h23, h24, h25, h26, h27, h28, h31, h32, h33, h34, h35, h36,
h37,
h38, h41, h42, h43, h44, h45, h46, h47, h48)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 38674 | ## Vern7
struct Vern7ExtraStages{T, T2}
c11::T2
a1101::T
a1104::T
a1105::T
a1106::T
a1107::T
a1108::T
a1109::T
c12::T2
a1201::T
a1204::T
a1205::T
a1206::T
a1207::T
a1208::T
a1209::T
a1211::T
c13::T2
a1301::T
a1304::T
a1305::T
a1306::T
a1307::T
a1308::T
a1309::T
a1311::T
a1312::T
c14::T2
a1401::T
a1404::T
a1405::T
a1406::T
a1407::T
a1408::T
a1409::T
a1411::T
a1412::T
a1413::T
c15::T2
a1501::T
a1504::T
a1505::T
a1506::T
a1507::T
a1508::T
a1509::T
a1511::T
a1512::T
a1513::T
c16::T2
a1601::T
a1604::T
a1605::T
a1606::T
a1607::T
a1608::T
a1609::T
a1611::T
a1612::T
a1613::T
end
function Vern7ExtraStages(T::Type{T1}, T2::Type{T1}) where {T1}
c11 = convert(T2, 1)
a1101 = convert(T, 0.04715561848627222)
a1104 = convert(T, 0.25750564298434153)
a1105 = convert(T, 0.2621665397741262)
a1106 = convert(T, 0.15216092656738558)
a1107 = convert(T, 0.49399691700324844)
a1108 = convert(T, -0.29430311714032503)
a1109 = convert(T, 0.0813174723249511)
c12 = convert(T2, 0.29)
a1201 = convert(T, 0.0523222769159969)
a1204 = convert(T, 0.22495861826705715)
a1205 = convert(T, 0.017443709248776376)
a1206 = convert(T, -0.007669379876829393)
a1207 = convert(T, 0.03435896044073285)
a1208 = convert(T, -0.0410209723009395)
a1209 = convert(T, 0.025651133005205617)
a1211 = convert(T, -0.0160443457)
c13 = convert(T2, 0.125)
a1301 = convert(T, 0.053053341257859085)
a1304 = convert(T, 0.12195301011401886)
a1305 = convert(T, 0.017746840737602496)
a1306 = convert(T, -0.0005928372667681495)
a1307 = convert(T, 0.008381833970853752)
a1308 = convert(T, -0.01293369259698612)
a1309 = convert(T, 0.009412056815253861)
a1311 = convert(T, -0.005353253107275676)
a1312 = convert(T, -0.06666729992455811)
c14 = convert(T2, 0.25)
a1401 = convert(T, 0.03887903257436304)
a1404 = convert(T, -0.0024403203308301317)
a1405 = convert(T, -0.0013928917214672623)
a1406 = convert(T, -0.00047446291558680135)
a1407 = convert(T, 0.00039207932413159514)
a1408 = convert(T, -0.00040554733285128004)
a1409 = convert(T, 0.00019897093147716726)
a1411 = convert(T, -0.00010278198793179169)
a1412 = convert(T, 0.03385661513870267)
a1413 = convert(T, 0.1814893063199928)
c15 = convert(T2, 0.53)
a1501 = convert(T, 0.05723681204690013)
a1504 = convert(T, 0.22265948066761182)
a1505 = convert(T, 0.12344864200186899)
a1506 = convert(T, 0.04006332526666491)
a1507 = convert(T, -0.05269894848581452)
a1508 = convert(T, 0.04765971214244523)
a1509 = convert(T, -0.02138895885042213)
a1511 = convert(T, 0.015193891064036402)
a1512 = convert(T, 0.12060546716289655)
a1513 = convert(T, -0.022779423016187374)
c16 = convert(T2, 0.79)
a1601 = convert(T, 0.051372038802756814)
a1604 = convert(T, 0.5414214473439406)
a1605 = convert(T, 0.350399806692184)
a1606 = convert(T, 0.14193112269692182)
a1607 = convert(T, 0.10527377478429423)
a1608 = convert(T, -0.031081847805874016)
a1609 = convert(T, -0.007401883149519145)
a1611 = convert(T, -0.006377932504865363)
a1612 = convert(T, -0.17325495908361865)
a1613 = convert(T, -0.18228156777622026)
Vern7ExtraStages(c11, a1101, a1104, a1105, a1106, a1107, a1108, a1109, c12, a1201,
a1204, a1205, a1206, a1207, a1208, a1209, a1211, c13, a1301, a1304,
a1305, a1306, a1307, a1308, a1309, a1311, a1312, c14, a1401, a1404,
a1405, a1406, a1407, a1408, a1409, a1411, a1412, a1413, c15, a1501,
a1504, a1505, a1506, a1507, a1508, a1509, a1511, a1512, a1513, c16,
a1601, a1604, a1605, a1606, a1607, a1608, a1609, a1611, a1612, a1613)
end
struct Vern7InterpolationCoefficients{T}
r011::T
r012::T
r013::T
r014::T
r015::T
r016::T
r017::T
r042::T
r043::T
r044::T
r045::T
r046::T
r047::T
r052::T
r053::T
r054::T
r055::T
r056::T
r057::T
r062::T
r063::T
r064::T
r065::T
r066::T
r067::T
r072::T
r073::T
r074::T
r075::T
r076::T
r077::T
r082::T
r083::T
r084::T
r085::T
r086::T
r087::T
r092::T
r093::T
r094::T
r095::T
r096::T
r097::T
r112::T
r113::T
r114::T
r115::T
r116::T
r117::T
r122::T
r123::T
r124::T
r125::T
r126::T
r127::T
r132::T
r133::T
r134::T
r135::T
r136::T
r137::T
r142::T
r143::T
r144::T
r145::T
r146::T
r147::T
r152::T
r153::T
r154::T
r155::T
r156::T
r157::T
r162::T
r163::T
r164::T
r165::T
r166::T
r167::T
end
function Vern7InterpolationCoefficients(::Type{T}) where {T}
r011 = convert(T, 1)
r012 = convert(T, -8.413387198332767)
r013 = convert(T, 33.675508884490895)
r014 = convert(T, -70.80159089484886)
r015 = convert(T, 80.64695108301298)
r016 = convert(T, -47.19413969837522)
r017 = convert(T, 11.133813442539243)
r042 = convert(T, 8.754921980674396)
r043 = convert(T, -88.4596828699771)
r044 = convert(T, 346.9017638429916)
r045 = convert(T, -629.2580030059837)
r046 = convert(T, 529.6773755604193)
r047 = convert(T, -167.35886986514018)
r052 = convert(T, 8.913387586637922)
r053 = convert(T, -90.06081846893218)
r054 = convert(T, 353.1807459217058)
r055 = convert(T, -640.6476819744374)
r056 = convert(T, 539.2646279047156)
r057 = convert(T, -170.38809442991547)
r062 = convert(T, 5.1733120298478)
r063 = convert(T, -52.271115900055385)
r064 = convert(T, 204.9853867374073)
r065 = convert(T, -371.8306118563603)
r066 = convert(T, 312.9880934374529)
r067 = convert(T, -98.89290352172495)
r072 = convert(T, 16.79537744079696)
r073 = convert(T, -169.70040000059728)
r074 = convert(T, 665.4937727009246)
r075 = convert(T, -1207.1638892336007)
r076 = convert(T, 1016.1291515818546)
r077 = convert(T, -321.06001557237494)
r082 = convert(T, -10.005997536098665)
r083 = convert(T, 101.1005433052275)
r084 = convert(T, -396.47391512378437)
r085 = convert(T, 719.1787707014183)
r086 = convert(T, -605.3681033918824)
r087 = convert(T, 191.27439892797935)
r092 = convert(T, 2.764708833638599)
r093 = convert(T, -27.934602637390462)
r094 = convert(T, 109.54779186137893)
r095 = convert(T, -198.7128113064482)
r096 = convert(T, 167.26633571640318)
r097 = convert(T, -52.85010499525706)
r112 = convert(T, -2.1696320280163506)
r113 = convert(T, 22.016696037569876)
r114 = convert(T, -86.90152427798948)
r115 = convert(T, 159.22388973861476)
r116 = convert(T, -135.9618306534588)
r117 = convert(T, 43.792401183280006)
r122 = convert(T, -4.890070188793804)
r123 = convert(T, 22.75407737425176)
r124 = convert(T, -30.78034218537731)
r125 = convert(T, -2.797194317207249)
r126 = convert(T, 31.369456637508403)
r127 = convert(T, -15.655927320381801)
r132 = convert(T, 10.862170929551967)
r133 = convert(T, -50.542971417827104)
r134 = convert(T, 68.37148040407511)
r135 = convert(T, 6.213326521632409)
r136 = convert(T, -69.68006323194157)
r137 = convert(T, 34.776056794509195)
r142 = convert(T, -11.37286691922923)
r143 = convert(T, 130.79058078246717)
r144 = convert(T, -488.65113677785604)
r145 = convert(T, 832.2148793276441)
r146 = convert(T, -664.7743368554426)
r147 = convert(T, 201.79288044241662)
r152 = convert(T, -5.919778732715007)
r153 = convert(T, 63.27679965889219)
r154 = convert(T, -265.432682088738)
r155 = convert(T, 520.1009254140611)
r156 = convert(T, -467.412109533902)
r157 = convert(T, 155.3868452824017)
r162 = convert(T, -10.492146197961823)
r163 = convert(T, 105.35538525188011)
r164 = convert(T, -409.43975011988937)
r165 = convert(T, 732.831448907654)
r166 = convert(T, -606.3044574733512)
r167 = convert(T, 188.0495196316683)
Vern7InterpolationCoefficients(r011, r012, r013, r014, r015, r016, r017, r042, r043,
r044, r045, r046, r047, r052, r053, r054, r055, r056,
r057, r062, r063, r064, r065, r066, r067, r072, r073,
r074, r075, r076, r077, r082, r083, r084, r085, r086,
r087, r092, r093, r094, r095, r096, r097, r112, r113,
r114, r115, r116, r117, r122, r123, r124, r125, r126,
r127, r132, r133, r134, r135, r136, r137, r142, r143,
r144, r145, r146, r147, r152, r153, r154, r155, r156,
r157, r162, r163, r164, r165, r166, r167)
end
struct Vern7Tableau{T, T2}
c2::T2
c3::T2
c4::T2
c5::T2
c6::T2
c7::T2
c8::T2
a021::T
a031::T
a032::T
a041::T
a043::T
a051::T
a053::T
a054::T
a061::T
a063::T
a064::T
a065::T
a071::T
a073::T
a074::T
a075::T
a076::T
a081::T
a083::T
a084::T
a085::T
a086::T
a087::T
a091::T
a093::T
a094::T
a095::T
a096::T
a097::T
a098::T
a101::T
a103::T
a104::T
a105::T
a106::T
a107::T
b1::T
b4::T
b5::T
b6::T
b7::T
b8::T
b9::T
btilde1::T
btilde4::T
btilde5::T
btilde6::T
btilde7::T
btilde8::T
btilde9::T
btilde10::T
extra::Vern7ExtraStages{T, T2}
interp::Vern7InterpolationCoefficients{T}
end
function Vern7Tableau(T::Type{T1}, T2::Type{T1}) where {T1}
c2 = convert(T2, 0.005)
c3 = convert(T2, 0.10888888888888888)
c4 = convert(T2, 0.16333333333333333)
c5 = convert(T2, 0.4555)
c6 = convert(T2, 0.6095094489978381)
c7 = convert(T2, 0.884)
c8 = convert(T2, 0.925)
a021 = convert(T, 0.005)
a031 = convert(T, -1.07679012345679)
a032 = convert(T, 1.185679012345679)
a041 = convert(T, 0.04083333333333333)
a043 = convert(T, 0.1225)
a051 = convert(T, 0.6389139236255726)
a053 = convert(T, -2.455672638223657)
a054 = convert(T, 2.272258714598084)
a061 = convert(T, -2.6615773750187572)
a063 = convert(T, 10.804513886456137)
a064 = convert(T, -8.3539146573962)
a065 = convert(T, 0.820487594956657)
a071 = convert(T, 6.067741434696772)
a073 = convert(T, -24.711273635911088)
a074 = convert(T, 20.427517930788895)
a075 = convert(T, -1.9061579788166472)
a076 = convert(T, 1.006172249242068)
a081 = convert(T, 12.054670076253203)
a083 = convert(T, -49.75478495046899)
a084 = convert(T, 41.142888638604674)
a085 = convert(T, -4.461760149974004)
a086 = convert(T, 2.042334822239175)
a087 = convert(T, -0.09834843665406107)
a091 = convert(T, 10.138146522881808)
a093 = convert(T, -42.6411360317175)
a094 = convert(T, 35.76384003992257)
a095 = convert(T, -4.3480228403929075)
a096 = convert(T, 2.0098622683770357)
a097 = convert(T, 0.3487490460338272)
a098 = convert(T, -0.27143900510483127)
a101 = convert(T, -45.030072034298676)
a103 = convert(T, 187.3272437654589)
a104 = convert(T, -154.02882369350186)
a105 = convert(T, 18.56465306347536)
a106 = convert(T, -7.141809679295079)
a107 = convert(T, 1.3088085781613787)
b1 = convert(T, 0.04715561848627222)
b4 = convert(T, 0.25750564298434153)
b5 = convert(T, 0.26216653977412624)
b6 = convert(T, 0.15216092656738558)
b7 = convert(T, 0.4939969170032485)
b8 = convert(T, -0.29430311714032503)
b9 = convert(T, 0.08131747232495111)
# bhat1 = convert(T,0.044608606606341174)
# bhat4 = convert(T,0.26716403785713727)
# bhat5 = convert(T,0.22010183001772932)
# bhat6 = convert(T,0.2188431703143157)
# bhat7 = convert(T,0.22898717054112028)
# bhat10 = convert(T,0.02029518466335628)
btilde1 = convert(T, 0.002547011879931045)
btilde4 = convert(T, -0.00965839487279575)
btilde5 = convert(T, 0.04206470975639691)
btilde6 = convert(T, -0.0666822437469301)
btilde7 = convert(T, 0.2650097464621281)
btilde8 = convert(T, -0.29430311714032503)
btilde9 = convert(T, 0.08131747232495111)
btilde10 = convert(T, -0.02029518466335628)
extra = Vern7ExtraStages(T, T2)
interp = Vern7InterpolationCoefficients(T)
Vern7Tableau(c2, c3, c4, c5, c6, c7, c8, a021, a031, a032, a041, a043, a051, a053, a054,
a061, a063, a064, a065, a071, a073, a074, a075, a076, a081, a083, a084,
a085, a086, a087, a091, a093, a094, a095, a096, a097, a098, a101, a103,
a104, a105, a106, a107, b1, b4, b5, b6, b7, b8, b9, btilde1, btilde4,
btilde5, btilde6, btilde7, btilde8, btilde9, btilde10, extra, interp)
end
## Vern9
## Vern9
struct Vern9ExtraStages{T, T2}
c17::T2
a1701::T
a1708::T
a1709::T
a1710::T
a1711::T
a1712::T
a1713::T
a1714::T
a1715::T
c18::T2
a1801::T
a1808::T
a1809::T
a1810::T
a1811::T
a1812::T
a1813::T
a1814::T
a1815::T
a1817::T
c19::T2
a1901::T
a1908::T
a1909::T
a1910::T
a1911::T
a1912::T
a1913::T
a1914::T
a1915::T
a1917::T
a1918::T
c20::T2
a2001::T
a2008::T
a2009::T
a2010::T
a2011::T
a2012::T
a2013::T
a2014::T
a2015::T
a2017::T
a2018::T
a2019::T
c21::T2
a2101::T
a2108::T
a2109::T
a2110::T
a2111::T
a2112::T
a2113::T
a2114::T
a2115::T
a2117::T
a2118::T
a2119::T
a2120::T
c22::T2
a2201::T
a2208::T
a2209::T
a2210::T
a2211::T
a2212::T
a2213::T
a2214::T
a2215::T
a2217::T
a2218::T
a2219::T
a2220::T
a2221::T
c23::T2
a2301::T
a2308::T
a2309::T
a2310::T
a2311::T
a2312::T
a2313::T
a2314::T
a2315::T
a2317::T
a2318::T
a2319::T
a2320::T
a2321::T
c24::T2
a2401::T
a2408::T
a2409::T
a2410::T
a2411::T
a2412::T
a2413::T
a2414::T
a2415::T
a2417::T
a2418::T
a2419::T
a2420::T
a2421::T
c25::T2
a2501::T
a2508::T
a2509::T
a2510::T
a2511::T
a2512::T
a2513::T
a2514::T
a2515::T
a2517::T
a2518::T
a2519::T
a2520::T
a2521::T
c26::T2
a2601::T
a2608::T
a2609::T
a2610::T
a2611::T
a2612::T
a2613::T
a2614::T
a2615::T
a2617::T
a2618::T
a2619::T
a2620::T
a2621::T
end
function Vern9ExtraStages(T::Type{T1}, T2::Type{T1}) where {T1}
# FIVE ADDITIONAL STAGES FOR INTERPOLANT OF ORDER 8
c17 = convert(T2, 1)
a1701 = convert(T, 0.014611976858423152)
a1708 = convert(T, -0.3915211862331339)
a1709 = convert(T, 0.23109325002895065)
a1710 = convert(T, 0.12747667699928525)
a1711 = convert(T, 0.2246434176204158)
a1712 = convert(T, 0.5684352689748513)
a1713 = convert(T, 0.058258715572158275)
a1714 = convert(T, 0.13643174034822156)
a1715 = convert(T, 0.030570139830827976)
c18 = convert(T2, 0.7404185470631561)
a1801 = convert(T, 0.015499736681895594)
a1808 = convert(T, 0.3355153219059635)
a1809 = convert(T, 0.20036139441918607)
a1810 = convert(T, 0.12520606592835493)
a1811 = convert(T, 0.22986763931842066)
a1812 = convert(T, -0.20202506534761813)
a1813 = convert(T, 0.05917103230665457)
a1814 = convert(T, -0.026518347830476387)
a1815 = convert(T, -0.023840946021309713)
a1817 = convert(T, 0.027181715702085017)
c19 = convert(T2, 0.888)
a1901 = convert(T, 0.013024539431143383)
a1908 = convert(T, -0.7452850902413112)
a1909 = convert(T, 0.2643867896429301)
a1910 = convert(T, 0.1313961758372754)
a1911 = convert(T, 0.21672538151229273)
a1912 = convert(T, 0.8734117564076053)
a1913 = convert(T, 0.011859056439357767)
a1914 = convert(T, 0.05876002941689551)
a1915 = convert(T, 0.003266518630202088)
a1917 = convert(T, -0.00895930864841793)
a1918 = convert(T, 0.06941415157202692)
c20 = convert(T2, 0.696)
a2001 = convert(T, 0.013970899969259426)
a2008 = convert(T, -0.46657653359576745)
a2009 = convert(T, 0.24163727872162571)
a2010 = convert(T, 0.12903633413456747)
a2011 = convert(T, 0.22167006717351054)
a2012 = convert(T, 0.6257275123364645)
a2013 = convert(T, 0.04355312415679284)
a2014 = convert(T, 0.10119624916672908)
a2015 = convert(T, 0.01808582254679721)
a2017 = convert(T, -0.020798755876891697)
a2018 = convert(T, -0.09022232517086219)
a2019 = convert(T, -0.12127967356222542)
c21 = convert(T2, 0.487)
a2101 = convert(T, 0.016046388883181127)
a2108 = convert(T, 0.09517712399458336)
a2109 = convert(T, 0.13591872646553177)
a2110 = convert(T, 0.1237765280959854)
a2111 = convert(T, 0.2335656264102966)
a2112 = convert(T, -0.09051508172625873)
a2113 = convert(T, -0.02537574270006131)
a2114 = convert(T, -0.13596316968871622)
a2115 = convert(T, -0.04679214284145113)
a2117 = convert(T, 0.05177958859391748)
a2118 = convert(T, 0.09672595677476774)
a2119 = convert(T, 0.14773126903407427)
a2120 = convert(T, -0.11507507129585039)
# FIVE ADDITIONAL STAGES FOR INTERPOLANT OF ORDER 9
c22 = convert(T2, 0.025)
a2201 = convert(T, 0.018029186238936207)
a2208 = convert(T, 0.06983601042028874)
a2209 = convert(T, -0.025412476607916634)
a2210 = convert(T, 0.008487827035463275)
a2211 = convert(T, -0.002427525516089802)
a2212 = convert(T, -0.10478397528938199)
a2213 = convert(T, -0.014731477952480419)
a2214 = convert(T, -0.03916338390816177)
a2215 = convert(T, -0.010056573432939595)
a2217 = convert(T, 0.011025103922048344)
a2218 = convert(T, 0.005092830749095398)
a2219 = convert(T, 0.04759715599420645)
a2220 = convert(T, 0.03386307003288383)
a2221 = convert(T, 0.02764422831404798)
c23 = convert(T2, 0.15)
a2301 = convert(T, 0.01677431640522778)
a2308 = convert(T, 0.6220437408820475)
a2309 = convert(T, -0.2060859809768842)
a2310 = convert(T, 0.11563949897660589)
a2311 = convert(T, 0.026641017933783588)
a2312 = convert(T, -0.937681079341877)
a2313 = convert(T, -0.13678064667021603)
a2314 = convert(T, -0.3678480995268297)
a2315 = convert(T, -0.09547871314402478)
a2317 = convert(T, 0.10134920184223697)
a2318 = convert(T, -0.08911323084568594)
a2319 = convert(T, 0.46641409889747604)
a2320 = convert(T, 0.450273629235458)
a2321 = convert(T, 0.18385224633268188)
c24 = convert(T2, 0.32)
a2401 = convert(T, 0.010711497314914442)
a2408 = convert(T, -0.07094336118221108)
a2409 = convert(T, 0.10021649003400916)
a2410 = convert(T, 0.13834539804680251)
a2411 = convert(T, 0.17963306335781634)
a2412 = convert(T, 0.09048246545576182)
a2413 = convert(T, -0.005460662294523339)
a2414 = convert(T, -0.030004579051196197)
a2415 = convert(T, -0.011451920269627991)
a2417 = convert(T, 0.010033946861093851)
a2418 = convert(T, -0.09506485282809046)
a2419 = convert(T, 0.04853358804093592)
a2420 = convert(T, 0.08013325919783924)
a2421 = convert(T, -0.1251643326835242)
c25 = convert(T2, 0.78)
a2501 = convert(T, 0.014101720888692213)
a2508 = convert(T, -0.3713379753704491)
a2509 = convert(T, 0.22312655481171803)
a2510 = convert(T, 0.12870053459181202)
a2511 = convert(T, 0.22246006596754947)
a2512 = convert(T, 0.5382853042550702)
a2513 = convert(T, 0.05417202616988763)
a2514 = convert(T, 0.1256968791308744)
a2515 = convert(T, 0.027844927890020542)
a2517 = convert(T, -0.0307740924620506)
a2518 = convert(T, 0.008569805293689777)
a2519 = convert(T, -0.15351746905870445)
a2520 = convert(T, -0.021799570305481963)
a2521 = convert(T, 0.014471288197371868)
c26 = convert(T2, 0.96)
a2601 = convert(T, 0.014246004117356466)
a2608 = convert(T, -0.3767107393295407)
a2609 = convert(T, 0.22523997807304214)
a2610 = convert(T, 0.128360307629253)
a2611 = convert(T, 0.22302387052616926)
a2612 = convert(T, 0.5463127827750747)
a2613 = convert(T, 0.0552619079137578)
a2614 = convert(T, 0.12856135087499826)
a2615 = convert(T, 0.028572506812964065)
a2617 = convert(T, -0.02398761886357109)
a2618 = convert(T, 0.055562244589105095)
a2619 = convert(T, -0.017406756507628386)
a2620 = convert(T, -0.03815462365996979)
a2621 = convert(T, 0.011118785048989178)
Vern9ExtraStages(c17, a1701, a1708, a1709, a1710, a1711, a1712, a1713, a1714, a1715,
c18, a1801, a1808, a1809, a1810, a1811, a1812, a1813, a1814, a1815,
a1817, c19, a1901, a1908, a1909, a1910, a1911, a1912, a1913, a1914,
a1915, a1917, a1918, c20, a2001, a2008, a2009, a2010, a2011, a2012,
a2013, a2014, a2015, a2017, a2018, a2019, c21, a2101, a2108, a2109,
a2110, a2111, a2112, a2113, a2114, a2115, a2117, a2118, a2119, a2120,
c22, a2201, a2208, a2209, a2210, a2211, a2212, a2213, a2214, a2215,
a2217, a2218, a2219, a2220, a2221, c23, a2301, a2308, a2309, a2310,
a2311, a2312, a2313, a2314, a2315, a2317, a2318, a2319, a2320, a2321,
c24, a2401, a2408, a2409, a2410, a2411, a2412, a2413, a2414, a2415,
a2417, a2418, a2419, a2420, a2421, c25, a2501, a2508, a2509, a2510,
a2511, a2512, a2513, a2514, a2515, a2517, a2518, a2519, a2520, a2521,
c26, a2601, a2608, a2609, a2610, a2611, a2612, a2613, a2614, a2615,
a2617, a2618, a2619, a2620, a2621)
end
struct Vern9InterpolationCoefficients{T}
r011::T
r012::T
r013::T
r014::T
r015::T
r016::T
r017::T
r018::T
r019::T
r082::T
r083::T
r084::T
r085::T
r086::T
r087::T
r088::T
r089::T
r092::T
r093::T
r094::T
r095::T
r096::T
r097::T
r098::T
r099::T
r102::T
r103::T
r104::T
r105::T
r106::T
r107::T
r108::T
r109::T
r112::T
r113::T
r114::T
r115::T
r116::T
r117::T
r118::T
r119::T
r122::T
r123::T
r124::T
r125::T
r126::T
r127::T
r128::T
r129::T
r132::T
r133::T
r134::T
r135::T
r136::T
r137::T
r138::T
r139::T
r142::T
r143::T
r144::T
r145::T
r146::T
r147::T
r148::T
r149::T
r152::T
r153::T
r154::T
r155::T
r156::T
r157::T
r158::T
r159::T
r172::T
r173::T
r174::T
r175::T
r176::T
r177::T
r178::T
r179::T
r182::T
r183::T
r184::T
r185::T
r186::T
r187::T
r188::T
r189::T
r192::T
r193::T
r194::T
r195::T
r196::T
r197::T
r198::T
r199::T
r202::T
r203::T
r204::T
r205::T
r206::T
r207::T
r208::T
r209::T
r212::T
r213::T
r214::T
r215::T
r216::T
r217::T
r218::T
r219::T
r222::T
r223::T
r224::T
r225::T
r226::T
r227::T
r228::T
r229::T
r232::T
r233::T
r234::T
r235::T
r236::T
r237::T
r238::T
r239::T
r242::T
r243::T
r244::T
r245::T
r246::T
r247::T
r248::T
r249::T
r252::T
r253::T
r254::T
r255::T
r256::T
r257::T
r258::T
r259::T
r262::T
r263::T
r264::T
r265::T
r266::T
r267::T
r268::T
r269::T
end
function Vern9InterpolationCoefficients(::Type{T}) where {T}
r011 = convert(T, 1)
r012 = convert(T, -28.330488700617398)
r013 = convert(T, 257.6535452078578)
r014 = convert(T, -1152.1544557434572)
r015 = convert(T, 2909.390878345409)
r016 = convert(T, -4355.005172868188)
r017 = convert(T, 3834.083497036262)
r018 = convert(T, -1835.419052683407)
r019 = convert(T, 368.7958613829998)
r082 = convert(T, 2.649656243770091)
r083 = convert(T, -96.30312807816006)
r084 = convert(T, 869.3095462492796)
r085 = convert(T, -3395.688567551074)
r086 = convert(T, 6796.933987158715)
r087 = convert(T, -7340.848417712072)
r088 = convert(T, 4082.8488969923656)
r089 = convert(T, -919.2934944890586)
r092 = convert(T, -1.5639451819287329)
r093 = convert(T, 56.8423973927286)
r094 = convert(T, -513.1052300304285)
r095 = convert(T, 2004.2867021103232)
r096 = convert(T, -4011.8533059139295)
r097 = convert(T, 4332.895839278586)
r098 = convert(T, -2409.8793479371448)
r099 = convert(T, 542.6079835318221)
r102 = convert(T, -0.8627103334967224)
r103 = convert(T, 31.355653751851733)
r104 = convert(T, -283.0413682227354)
r105 = convert(T, 1105.613463426007)
r106 = convert(T, -2213.0362006784526)
r107 = convert(T, 2390.1310977541207)
r108 = convert(T, -1329.3482661468738)
r109 = convert(T, 299.31580712657853)
r112 = convert(T, -1.5202953379012147)
r113 = convert(T, 55.25592121120227)
r114 = convert(T, -498.7844190970741)
r115 = convert(T, 1948.346888525776)
r116 = convert(T, -3899.8821364075516)
r117 = convert(T, 4211.964345158858)
r118 = convert(T, -2342.619408856117)
r119 = convert(T, 527.4637482204279)
r122 = convert(T, -3.8469388441255234)
r123 = convert(T, 139.81898409868404)
r124 = convert(T, -1262.1186876216004)
r125 = convert(T, 4930.075848057311)
r126 = convert(T, -9868.21948606954)
r127 = convert(T, 10657.908924348867)
r128 = convert(T, -5927.738759872814)
r129 = convert(T, 1334.688551172191)
r132 = convert(T, -0.39427130612001415)
r133 = convert(T, 14.329994760676497)
r134 = convert(T, -129.35406659945582)
r135 = convert(T, 505.28160770025175)
r136 = convert(T, -1011.3900801394333)
r137 = convert(T, 1092.3250517818917)
r138 = convert(T, -607.531701930281)
r139 = convert(T, 136.79172444804232)
r142 = convert(T, -0.9233145622082102)
r143 = convert(T, 33.55834582309799)
r144 = convert(T, -302.9246397549736)
r145 = convert(T, 1183.2813069678675)
r146 = convert(T, -2368.4989867901113)
r147 = convert(T, 2558.034559755808)
r148 = convert(T, -1422.7331755778803)
r149 = convert(T, 320.3423358787482)
r152 = convert(T, -0.20688628029300538)
r153 = convert(T, 7.519388975651663)
r154 = convert(T, -67.87605708082904)
r155 = convert(T, 265.136799698415)
r156 = convert(T, -530.7074807559026)
r157 = convert(T, 573.176549564149)
r158 = convert(T, -318.7905688834869)
r159 = convert(T, 71.77882490212657)
r172 = convert(T, -0.44724419067440996)
r173 = convert(T, 16.44684676010504)
r174 = convert(T, -154.40861059212955)
r175 = convert(T, 641.8986298540249)
r176 = convert(T, -1391.9392256879823)
r177 = convert(T, 1643.890568302952)
r178 = convert(T, -1004.0652972233179)
r179 = convert(T, 248.6243327770223)
r182 = convert(T, -0.1507876007899798)
r183 = convert(T, 5.527328824824632)
r184 = convert(T, -51.33833743084619)
r185 = convert(T, 209.60220027032804)
r186 = convert(T, -442.7692650421826)
r187 = convert(T, 505.0579312588053)
r188 = convert(T, -295.63364106156195)
r189 = convert(T, 69.70457078142275)
r192 = convert(T, -0.6413652207435296)
r193 = convert(T, 23.510132486246846)
r194 = convert(T, -218.36426832469724)
r195 = convert(T, 891.5292818535365)
r196 = convert(T, -1883.290177206008)
r197 = convert(T, 2148.2309544883997)
r198 = convert(T, -1257.4584015217124)
r199 = convert(T, 296.4838434449778)
r202 = convert(T, 1.8107293134448457)
r203 = convert(T, -66.37479657295337)
r204 = convert(T, 616.4952025401107)
r205 = convert(T, -2517.0030307773227)
r206 = convert(T, 5316.984175781034)
r207 = convert(T, -6064.976140789574)
r208 = convert(T, 3550.1095388883914)
r209 = convert(T, -837.0456783831302)
r212 = convert(T, 0.05176008760353718)
r213 = convert(T, -1.8973378625803488)
r214 = convert(T, 17.622648207936294)
r215 = convert(T, -71.94907400242467)
r216 = convert(T, 151.9871383765666)
r217 = convert(T, -173.36864987478606)
r218 = convert(T, 101.4806461521468)
r219 = convert(T, -23.927131084462175)
r222 = convert(T, 31.321782556688)
r223 = convert(T, -355.6570858339106)
r224 = convert(T, 1752.6852824895159)
r225 = convert(T, -4708.092293138363)
r226 = convert(T, 7370.900776193489)
r227 = convert(T, -6716.504964764566)
r228 = convert(T, 3303.940398161186)
r229 = convert(T, -678.5938956640391)
r232 = convert(T, -2.7196073341859246)
r233 = convert(T, 86.64045615858264)
r234 = convert(T, -454.1926030939031)
r235 = convert(T, 1014.7492211005434)
r236 = convert(T, -1133.583456714544)
r237 = convert(T, 610.4671827718666)
r238 = convert(T, -109.02334994495438)
r239 = convert(T, -12.337842943405471)
r242 = convert(T, 3.1772148014329233)
r243 = convert(T, -113.8098697715143)
r244 = convert(T, 978.0935981825675)
r245 = convert(T, -3575.1293776236703)
r246 = convert(T, 6764.3615198384505)
r247 = convert(T, -6987.161043852012)
r248 = convert(T, 3751.9057627895713)
r249 = convert(T, -821.4378043648254)
r252 = convert(T, 0.877284308346553)
r253 = convert(T, -31.51810423988375)
r254 = convert(T, 273.1229151353221)
r255 = convert(T, -993.2198643101782)
r256 = convert(T, 1787.888078312664)
r257 = convert(T, -1677.394835799641)
r258 = convert(T, 781.3579535062688)
r259 = convert(T, -141.11342691289855)
r262 = convert(T, 1.7194275817987157)
r263 = convert(T, -62.89867309250732)
r264 = convert(T, 580.333550787398)
r265 = convert(T, -2348.110620506761)
r266 = convert(T, 4921.119298612906)
r267 = convert(T, -5597.912448707917)
r268 = convert(T, 3288.5977751496216)
r269 = convert(T, -782.8483098245397)
Vern9InterpolationCoefficients(r011, r012, r013, r014, r015, r016, r017, r018, r019,
r082, r083, r084, r085, r086, r087, r088, r089, r092,
r093, r094, r095, r096, r097, r098, r099, r102, r103,
r104, r105, r106, r107, r108, r109, r112, r113, r114,
r115, r116, r117, r118, r119, r122, r123, r124, r125,
r126, r127, r128, r129, r132, r133, r134, r135, r136,
r137, r138, r139, r142, r143, r144, r145, r146, r147,
r148, r149, r152, r153, r154, r155, r156, r157, r158,
r159, r172, r173, r174, r175, r176, r177, r178, r179,
r182, r183, r184, r185, r186, r187, r188, r189, r192,
r193, r194, r195, r196, r197, r198, r199, r202, r203,
r204, r205, r206, r207, r208, r209, r212, r213, r214,
r215, r216, r217, r218, r219, r222, r223, r224, r225,
r226, r227, r228, r229, r232, r233, r234, r235, r236,
r237, r238, r239, r242, r243, r244, r245, r246, r247,
r248, r249, r252, r253, r254, r255, r256, r257, r258,
r259, r262, r263, r264, r265, r266, r267, r268, r269)
end
"""
From Verner's Webiste
"""
struct Vern9Tableau{T, T2}
c1::T2
c2::T2
c3::T2
c4::T2
c5::T2
c6::T2
c7::T2
c8::T2
c9::T2
c10::T2
c11::T2
c12::T2
c13::T2
a0201::T
a0301::T
a0302::T
a0401::T
a0403::T
a0501::T
a0503::T
a0504::T
a0601::T
a0604::T
a0605::T
a0701::T
a0704::T
a0705::T
a0706::T
a0801::T
a0806::T
a0807::T
a0901::T
a0906::T
a0907::T
a0908::T
a1001::T
a1006::T
a1007::T
a1008::T
a1009::T
a1101::T
a1106::T
a1107::T
a1108::T
a1109::T
a1110::T
a1201::T
a1206::T
a1207::T
a1208::T
a1209::T
a1210::T
a1211::T
a1301::T
a1306::T
a1307::T
a1308::T
a1309::T
a1310::T
a1311::T
a1312::T
a1401::T
a1406::T
a1407::T
a1408::T
a1409::T
a1410::T
a1411::T
a1412::T
a1413::T
a1501::T
a1506::T
a1507::T
a1508::T
a1509::T
a1510::T
a1511::T
a1512::T
a1513::T
a1514::T
a1601::T
a1606::T
a1607::T
a1608::T
a1609::T
a1610::T
a1611::T
a1612::T
a1613::T
b1::T
b8::T
b9::T
b10::T
b11::T
b12::T
b13::T
b14::T
b15::T
btilde1::T
btilde8::T
btilde9::T
btilde10::T
btilde11::T
btilde12::T
btilde13::T
btilde14::T
btilde15::T
btilde16::T
extra::Vern9ExtraStages{T, T2}
interp::Vern9InterpolationCoefficients{T}
end
function Vern9Tableau(T::Type{T1}, T2::Type{T1}) where {T1}
c1 = convert(T2, 0.03462)
c2 = convert(T2, 0.09702435063878045)
c3 = convert(T2, 0.14553652595817068)
c4 = convert(T2, 0.561)
c5 = convert(T2, 0.22900791159048503)
c6 = convert(T2, 0.544992088409515)
c7 = convert(T2, 0.645)
c8 = convert(T2, 0.48375)
c9 = convert(T2, 0.06757)
c10 = convert(T2, 0.25)
c11 = convert(T2, 0.6590650618730999)
c12 = convert(T2, 0.8206)
c13 = convert(T2, 0.9012)
a0201 = convert(T, 0.03462)
a0301 = convert(T, -0.03893354388572875)
a0302 = convert(T, 0.13595789452450918)
a0401 = convert(T, 0.03638413148954267)
a0403 = convert(T, 0.10915239446862801)
a0501 = convert(T, 2.0257639143939694)
a0503 = convert(T, -7.638023836496291)
a0504 = convert(T, 6.173259922102322)
a0601 = convert(T, 0.05112275589406061)
a0604 = convert(T, 0.17708237945550218)
a0605 = convert(T, 0.0008027762409222536)
a0701 = convert(T, 0.13160063579752163)
a0704 = convert(T, -0.2957276252669636)
a0705 = convert(T, 0.08781378035642955)
a0706 = convert(T, 0.6213052975225274)
a0801 = convert(T, 0.07166666666666667)
a0806 = convert(T, 0.33055335789153195)
a0807 = convert(T, 0.2427799754418014)
a0901 = convert(T, 0.071806640625)
a0906 = convert(T, 0.3294380283228177)
a0907 = convert(T, 0.1165190029271823)
a0908 = convert(T, -0.034013671875)
a1001 = convert(T, 0.04836757646340646)
a1006 = convert(T, 0.03928989925676164)
a1007 = convert(T, 0.10547409458903446)
a1008 = convert(T, -0.021438652846483126)
a1009 = convert(T, -0.10412291746271944)
a1101 = convert(T, -0.026645614872014785)
a1106 = convert(T, 0.03333333333333333)
a1107 = convert(T, -0.1631072244872467)
a1108 = convert(T, 0.03396081684127761)
a1109 = convert(T, 0.1572319413814626)
a1110 = convert(T, 0.21522674780318796)
a1201 = convert(T, 0.03689009248708622)
a1206 = convert(T, -0.1465181576725543)
a1207 = convert(T, 0.2242577768172024)
a1208 = convert(T, 0.02294405717066073)
a1209 = convert(T, -0.0035850052905728597)
a1210 = convert(T, 0.08669223316444385)
a1211 = convert(T, 0.43838406519683376)
a1301 = convert(T, -0.4866012215113341)
a1306 = convert(T, -6.304602650282853)
a1307 = convert(T, -0.2812456182894729)
a1308 = convert(T, -2.679019236219849)
a1309 = convert(T, 0.5188156639241577)
a1310 = convert(T, 1.3653531876033418)
a1311 = convert(T, 5.8850910885039465)
a1312 = convert(T, 2.8028087862720628)
a1401 = convert(T, 0.4185367457753472)
a1406 = convert(T, 6.724547581906459)
a1407 = convert(T, -0.42544428016461133)
a1408 = convert(T, 3.3432791530012653)
a1409 = convert(T, 0.6170816631175374)
a1410 = convert(T, -0.9299661239399329)
a1411 = convert(T, -6.099948804751011)
a1412 = convert(T, -3.002206187889399)
a1413 = convert(T, 0.2553202529443446)
a1501 = convert(T, -0.7793740861228848)
a1506 = convert(T, -13.937342538107776)
a1507 = convert(T, 1.2520488533793563)
a1508 = convert(T, -14.691500408016868)
a1509 = convert(T, -0.494705058533141)
a1510 = convert(T, 2.2429749091462368)
a1511 = convert(T, 13.367893803828643)
a1512 = convert(T, 14.396650486650687)
a1513 = convert(T, -0.79758133317768)
a1514 = convert(T, 0.4409353709534278)
a1601 = convert(T, 2.0580513374668867)
a1606 = convert(T, 22.357937727968032)
a1607 = convert(T, 0.9094981099755646)
a1608 = convert(T, 35.89110098240264)
a1609 = convert(T, -3.442515027624454)
a1610 = convert(T, -4.865481358036369)
a1611 = convert(T, -18.909803813543427)
a1612 = convert(T, -34.26354448030452)
a1613 = convert(T, 1.2647565216956427)
b1 = convert(T, 0.014611976858423152)
b8 = convert(T, -0.3915211862331339)
b9 = convert(T, 0.23109325002895065)
b10 = convert(T, 0.12747667699928525)
b11 = convert(T, 0.2246434176204158)
b12 = convert(T, 0.5684352689748513)
b13 = convert(T, 0.058258715572158275)
b14 = convert(T, 0.13643174034822156)
b15 = convert(T, 0.030570139830827976)
# bhat1 =convert(T,0.01996996514886773)
# bhat8 =convert(T,2.19149930494933)
# bhat9 =convert(T,0.08857071848208438)
# bhat10 =convert(T,0.11405602348659656)
# bhat11 =convert(T,0.2533163805345107)
# bhat12 =convert(T,-2.056564386240941)
# bhat13 =convert(T,0.340809679901312)
# bhat16 =convert(T,0.04834231373823958)
btilde1 = convert(T, -0.005357988290444578)
btilde8 = convert(T, -2.583020491182464)
btilde9 = convert(T, 0.14252253154686625)
btilde10 = convert(T, 0.013420653512688676)
btilde11 = convert(T, -0.02867296291409493)
btilde12 = convert(T, 2.624999655215792)
btilde13 = convert(T, -0.2825509643291537)
btilde14 = convert(T, 0.13643174034822156)
btilde15 = convert(T, 0.030570139830827976)
btilde16 = convert(T, -0.04834231373823958)
extra = Vern9ExtraStages(T, T2)
interp = Vern9InterpolationCoefficients(T)
Vern9Tableau(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, a0201, a0301,
a0302, a0401, a0403, a0501, a0503, a0504, a0601, a0604, a0605, a0701,
a0704, a0705, a0706, a0801, a0806, a0807, a0901, a0906, a0907, a0908,
a1001, a1006, a1007, a1008, a1009, a1101, a1106, a1107, a1108, a1109,
a1110, a1201, a1206, a1207, a1208, a1209, a1210, a1211, a1301, a1306,
a1307, a1308, a1309, a1310, a1311, a1312, a1401, a1406, a1407, a1408,
a1409, a1410, a1411, a1412, a1413, a1501, a1506, a1507, a1508, a1509,
a1510, a1511, a1512, a1513, a1514, a1601, a1606, a1607, a1608, a1609,
a1610, a1611, a1612, a1613, b1, b8, b9, b10, b11, b12, b13, b14, b15,
btilde1, btilde8, btilde9, btilde10, btilde11, btilde12, btilde13,
btilde14, btilde15, btilde16, extra, interp)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1721 | using Distributed
addprocs(2)
@everywhere using DiffEqGPU, OrdinaryDiffEq, Test, Random
@everywhere include("utils.jl")
@everywhere begin
function lorenz_distributed(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 100.0f0)
p = (10.0f0, 28.0f0, 8 / 3.0f0)
Random.seed!(1)
pre_p_distributed = [rand(Float32, 3) for i in 1:10]
function prob_func_distributed(prob, i, repeat)
remake(prob, p = pre_p_distributed[i] .* p)
end
end
prob = ODEProblem(lorenz_distributed, u0, tspan, p)
monteprob = EnsembleProblem(prob, prob_func = prob_func_distributed)
#Performance check with nvvp
# CUDAnative.CUDAdrv.@profile
@time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0)
@test length(filter(x -> x.u != sol.u[1].u, sol.u)) != 0 # 0 element array
@time sol = solve(monteprob, ROCK4(), EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0)
@time sol2 = solve(monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10,
batch_size = 5, saveat = 1.0f0)
@test length(filter(x -> x.u != sol.u[1].u, sol.u)) != 0 # 0 element array
@test length(filter(x -> x.u != sol2.u[6].u, sol.u)) != 0 # 0 element array
@test all(all(sol[i].prob.p .== pre_p_distributed[i] .* p) for i in 1:10)
@test all(all(sol2[i].prob.p .== pre_p_distributed[i] .* p) for i in 1:10)
#To set 1 GPU per device:
#=
using Distributed
addprocs(numgpus)
import CUDAdrv, CUDAnative
let gpuworkers = asyncmap(collect(zip(workers(), CUDAdrv.devices()))) do (p, d)
remotecall_wait(CUDAnative.device!, p, d)
p
end
=#
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 8517 | using DiffEqGPU, OrdinaryDiffEq, Test
include("utils.jl")
function lorenz(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 100.0f0)
p = (10.0f0, 28.0f0, 8 / 3.0f0)
prob = ODEProblem(lorenz, u0, tspan, p)
const pre_p = [rand(Float32, 3) for i in 1:10]
prob_func = (prob, i, repeat) -> remake(prob, p = pre_p[i] .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func)
@info "Explicit Methods"
#Performance check with nvvp
# CUDAnative.CUDAdrv.@profile
@time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0)
@test length(filter(x -> x.u != sol.u[1].u, sol.u)) != 0 # 0 element array
@time sol = solve(monteprob, ROCK4(), EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0)
@time sol2 = solve(monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10,
batch_size = 5, saveat = 1.0f0)
@test length(filter(x -> x.u != sol.u[1].u, sol.u)) != 0 # 0 element array
@test length(filter(x -> x.u != sol2.u[6].u, sol.u)) != 0 # 0 element array
@test all(all(sol[i].prob.p .== pre_p[i] .* p) for i in 1:10)
@test all(all(sol2[i].prob.p .== pre_p[i] .* p) for i in 1:10)
@time solve(monteprob, Tsit5(), EnsembleCPUArray(), trajectories = 10, saveat = 1.0f0)
@time solve(monteprob, Tsit5(), EnsembleThreads(), trajectories = 10, saveat = 1.0f0)
@time solve(monteprob, Tsit5(), EnsembleSerial(), trajectories = 10, saveat = 1.0f0)
#=
solve(monteprob,TRBDF2(),EnsembleCPUArray(),dt=0.1,trajectories=2,saveat=1.0f0)
solve(monteprob,TRBDF2(),EnsembleGPUArray(backend),dt=0.1,trajectories=2,saveat=1.0f0)
@test_broken solve(monteprob,TRBDF2(),EnsembleGPUArray(backend),dt=0.1,trajectories=2,saveat=1.0f0)
=#
@info "Implicit Methods"
function lorenz_jac(J, u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
x = u[1]
y = u[2]
z = u[3]
J[1, 1] = -σ
J[2, 1] = ρ - z
J[3, 1] = y
J[1, 2] = σ
J[2, 2] = -1
J[3, 2] = x
J[1, 3] = 0
J[2, 3] = -x
J[3, 3] = -β
end
function lorenz_tgrad(J, u, p, t)
nothing
end
func = ODEFunction(lorenz, jac = lorenz_jac, tgrad = lorenz_tgrad)
prob_jac = ODEProblem(func, u0, tspan, p)
monteprob_jac = EnsembleProblem(prob_jac, prob_func = prob_func)
@time solve(monteprob_jac, Rodas5(), EnsembleCPUArray(), dt = 0.1,
trajectories = 10,
saveat = 1.0f0)
@time solve(monteprob_jac, TRBDF2(), EnsembleCPUArray(), dt = 0.1,
trajectories = 10,
saveat = 1.0f0)
if GROUP == "CUDA"
@time solve(monteprob_jac, Rodas5(), EnsembleGPUArray(backend), dt = 0.1,
trajectories = 10,
saveat = 1.0f0)
@time solve(monteprob_jac, TRBDF2(), EnsembleGPUArray(backend), dt = 0.1,
trajectories = 10,
saveat = 1.0f0)
end
@info "Callbacks"
condition = function (u, t, integrator)
@inbounds u[1] > 5
end
affect! = function (integrator)
@inbounds integrator.u[1] = -4
end
# test discrete
discrete_callback = DiscreteCallback(condition, affect!, save_positions = (false, false))
callback_prob = ODEProblem(lorenz, u0, tspan, p, callback = discrete_callback)
callback_monteprob = EnsembleProblem(callback_prob, prob_func = prob_func)
@time solve(callback_monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0)
c_condition = function (u, t, integrator)
@inbounds u[1] - 3
end
c_affect! = function (integrator)
@inbounds integrator.u[1] += 20
end
# test continuous
continuous_callback = ContinuousCallback(c_condition, c_affect!,
save_positions = (false, false))
callback_prob = ODEProblem(lorenz, u0, tspan, p, callback = continuous_callback)
callback_monteprob = EnsembleProblem(callback_prob, prob_func = prob_func)
solve(callback_monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 2,
saveat = 1.0f0)
# test callback set
callback_set = CallbackSet(discrete_callback, continuous_callback)
callback_prob = ODEProblem(lorenz, u0, tspan, p, callback = callback_set)
callback_monteprob = EnsembleProblem(callback_prob, prob_func = prob_func)
solve(callback_monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 2,
saveat = 1.0f0)
# test merge
callback_prob = ODEProblem(lorenz, u0, tspan, p, callback = discrete_callback)
callback_monteprob = EnsembleProblem(callback_prob, prob_func = prob_func)
solve(callback_monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 2,
saveat = 1.0f0,
callback = continuous_callback)
@info "ROBER"
#=
using OrdinaryDiffEq, LinearAlgebra, ParameterizedFunctions
LinearAlgebra.BLAS.set_num_threads(1)
rober = @ode_def begin
dy₁ = -k₁*y₁+k₃*y₂*y₃
dy₂ = k₁*y₁-k₂*y₂^2-k₃*y₂*y₃
dy₃ = k₂*y₂^2
end k₁ k₂ k₃
=#
function rober_f(internal_var___du, internal_var___u, internal_var___p, t)
@inbounds begin
internal_var___du[1] = -(internal_var___p[1]) * internal_var___u[1] +
internal_var___p[3] * internal_var___u[2] *
internal_var___u[3]
internal_var___du[2] = (internal_var___p[1] * internal_var___u[1] -
internal_var___p[2] * internal_var___u[2]^2) -
internal_var___p[3] * internal_var___u[2] *
internal_var___u[3]
internal_var___du[3] = internal_var___p[2] * internal_var___u[2]^2
end
nothing
end
function rober_jac(internal_var___J, internal_var___u, internal_var___p, t)
@inbounds begin
internal_var___J[1, 1] = -(internal_var___p[1])
internal_var___J[1, 2] = internal_var___p[3] * internal_var___u[3]
internal_var___J[1, 3] = internal_var___p[3] * internal_var___u[2]
internal_var___J[2, 1] = internal_var___p[1] * 1
internal_var___J[2, 2] = -2 * internal_var___p[2] * internal_var___u[2] -
internal_var___p[3] * internal_var___u[3]
internal_var___J[2, 3] = -(internal_var___p[3]) * internal_var___u[2]
internal_var___J[3, 1] = 0 * 1
internal_var___J[3, 2] = 2 * internal_var___p[2] * internal_var___u[2]
internal_var___J[3, 3] = 0 * 1
end
nothing
end
function rober_tgrad(J, u, p, t)
nothing
end
rober_prob = ODEProblem(ODEFunction(rober_f, jac = rober_jac, tgrad = rober_tgrad),
Float32[1.0, 0.0, 0.0], (0.0f0, 1.0f5), (0.04f0, 3.0f7, 1.0f4))
sol = solve(rober_prob, Rodas5(), abstol = 1.0f-8, reltol = 1.0f-8)
sol = solve(rober_prob, TRBDF2(), abstol = 1.0f-4, reltol = 1.0f-1)
rober_monteprob = EnsembleProblem(rober_prob, prob_func = prob_func)
if GROUP == "CUDA"
@time sol = solve(rober_monteprob, Rodas5(),
EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0,
abstol = 1.0f-8,
reltol = 1.0f-8)
@time sol = solve(rober_monteprob, TRBDF2(),
EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0,
abstol = 1.0f-4,
reltol = 1.0f-1)
end
@time sol = solve(rober_monteprob, TRBDF2(), EnsembleThreads(),
trajectories = 10,
abstol = 1e-4, reltol = 1e-1, saveat = 1.0f0)
@info "Struct parameters"
struct LorenzParameters
σ::Float32
ρ::Float32
β::Float32
end
function lorenzp(du, u, p::LorenzParameters, t)
du[1] = p.σ * (u[2] - u[1])
du[2] = u[1] * (p.ρ - u[3]) - u[2]
du[3] = u[1] * u[2] - p.β * u[3]
end
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 100.0f0)
p = LorenzParameters(10.0f0, 28.0f0, 8 / 3.0f0)
prob = ODEProblem(lorenzp, u0, tspan, p)
function param_prob_func(prob, i, repeat)
p = LorenzParameters(pre_p[i][1] .* 10.0f0,
pre_p[i][2] .* 28.0f0,
pre_p[i][3] .* 8 / 3.0f0)
remake(prob; p)
end
monteprob = EnsembleProblem(prob, prob_func = param_prob_func)
@time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0)
@test length(filter(x -> x.u != sol.u[1].u, sol.u)) != 0 # 0 element array
@info "Different time-spans"
saveats = 1.0f0:1.0f0:10.0f0
prob = ODEProblem(lorenz, u0, tspan, p)
monteprob = EnsembleProblem(prob_jac,
prob_func = (prob, i, repeat) -> remake(prob;
tspan = (0.0f0,
saveats[i])))
sol = solve(monteprob, Tsit5(), EnsembleGPUArray(backend, 0.0), trajectories = 10,
adaptive = false, dt = 0.01f0, save_everystep = false)
if GROUP == "CUDA"
sol = solve(monteprob, Rosenbrock23(), EnsembleGPUArray(backend, 0.0),
trajectories = 10,
adaptive = false, dt = 0.01f0, save_everystep = false)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1072 | using OrdinaryDiffEq, DiffEqGPU, ForwardDiff, Test
include("utils.jl")
function lorenz(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = [ForwardDiff.Dual(1.0f0, (1.0, 0.0, 0.0)), ForwardDiff.Dual(0.0f0, (0.0, 1.0, 0.0)),
ForwardDiff.Dual(0.0f0, (0.0, 0.0, 1.0))]
tspan = (0.0f0, 100.0f0)
p = (10.0f0, 28.0f0, 8 / 3.0f0)
prob = ODEProblem{true, SciMLBase.FullSpecialize}(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = rand(Float32, 3) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func)
@time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10_000,
saveat = 1.0f0)
#=
u0 = [1f0u"m";0u"m";0u"m"]
tspan = (0.0f0u"s",100.0f0u"s")
p = (10.0f0,28.0f0,8/3f0)
prob = ODEProblem(lorenz,u0,tspan,p)
prob_func = (prob,i,repeat) -> remake(prob,p=rand(Float32,3).*p)
monteprob = EnsembleProblem(prob, prob_func = prob_func)
@test_broken sol = solve(monteprob,Tsit5(),EnsembleGPUArray(),trajectories=10_000,saveat=1.0f0u"s")
=#
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1174 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays
include("utils.jl")
function lorenz(u, p, t)
du1 = p[1] * (u[2] - u[1])
du2 = u[1] * (p[2] - u[3]) - u[2]
du3 = u[1] * u[2] - p[3] * u[3]
SA[du1, du2, du3]
end
function lorenz_jac(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
x = u[1]
y = u[2]
z = u[3]
SA[-σ σ 0
ρ-z -1 -x
y x -β]
end
function lorenz_tgrad(u, p, t)
SA[0.0, 0.0, 0.0]
end
func = ODEFunction(lorenz, jac = lorenz_jac, tgrad = lorenz_tgrad)
u0 = SA[1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 100.0f0)
p = SA[10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem(func, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = rand(Float32, 3) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
@time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(backend), trajectories = 10_000,
saveat = 1.0f0)
if GROUP == "CUDA"
@time sol = solve(monteprob, Rosenbrock23(), EnsembleGPUArray(backend),
trajectories = 10_000,
saveat = 1.0f0)
@time sol = solve(monteprob, TRBDF2(), EnsembleGPUArray(backend),
trajectories = 10_000,
saveat = 1.0f0)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 829 | using DiffEqGPU, StochasticDiffEq, Test
include("utils.jl")
function lorenz(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
function multiplicative_noise(du, u, p, t)
du[1] = 0.1 * u[1]
du[2] = 0.1 * u[2]
du[3] = 0.1 * u[3]
end
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 10.0f0)
p = (10.0f0, 28.0f0, 8 / 3.0f0)
prob = SDEProblem(lorenz, multiplicative_noise, u0, tspan, p)
const pre_p = [rand(Float32, 3) for i in 1:10]
prob_func = (prob, i, repeat) -> remake(prob, p = pre_p[i] .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func)
@info "Explicit Methods"
#Performance check with nvvp
# CUDAnative.CUDAdrv.@profile
@time sol = solve(monteprob, SOSRI(), EnsembleGPUArray(backend), trajectories = 10,
saveat = 1.0f0)
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 2033 | using DiffEqGPU, StaticArrays, Adapt, OrdinaryDiffEq
include("utils.jl")
@info "Testing lower level API for EnsembleGPUKernel"
trajectories = 10_000
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
function lorenz_jac(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
x = u[1]
y = u[2]
z = u[3]
J11 = -σ
J21 = ρ - z
J31 = y
J12 = σ
J22 = -1
J32 = x
J13 = 0
J23 = -x
J33 = -β
return SMatrix{3, 3}(J11, J21, J31, J12, J22, J32, J13, J23, J33)
end
function lorenz_tgrad(u, p, t)
return SVector{3, eltype(u)}(0.0, 0.0, 0.0)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
func = ODEFunction(lorenz, jac = lorenz_jac, tgrad = lorenz_tgrad)
prob = ODEProblem{false}(func, u0, tspan, p)
## Building different problems for different parameters
batch = 1:trajectories
probs = map(batch) do i
DiffEqGPU.make_prob_compatible(remake(prob, p = (@SVector rand(Float32, 3)) .* p))
end
## Move the arrays to the GPU
gpu_probs = adapt(backend, probs)
## Finally use the lower API for faster solves! (Fixed time-stepping)
algs = (GPUTsit5(), GPUVern7(), GPUVern9(), GPURosenbrock23(), GPURodas4())
for alg in algs
@info alg
DiffEqGPU.vectorized_solve(gpu_probs, prob, alg;
save_everystep = false, dt = 0.1f0)
DiffEqGPU.vectorized_asolve(gpu_probs, prob, alg;
save_everystep = false, dt = 0.1f0)
end
@info "Testing lower level API for EnsembleGPUArray"
@time sol = DiffEqGPU.vectorized_map_solve(probs, Tsit5(), EnsembleGPUArray(backend, 0.0),
batch, false, dt = 0.001f0,
save_everystep = false, dense = false)
## Adaptive time-stepping (Notice the boolean argument)
@time sol = DiffEqGPU.vectorized_map_solve(probs, Tsit5(), EnsembleGPUArray(backend, 0.0),
batch, true, dt = 0.001f0,
save_everystep = false, dense = false)
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1177 | # ode checks
using OrdinaryDiffEq, DiffEqGPU, Test
include("utils.jl")
seed = 100
using Random;
Random.seed!(seed);
ra = rand(100)
function f!(du, u, p, t)
du[1] = 1.01 * u[1]
end
prob = ODEProblem(f!, [0.5], (0.0, 1.0))
function output_func(sol, i)
last(sol), false
end
function prob_func(prob, i, repeat)
remake(prob, u0 = ra[i] * prob.u0)
end
function reduction(u, batch, I)
u .+ sum(batch), false
end
# no reduction
prob1 = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func)
sim1 = @time solve(prob1, Tsit5(), trajectories = 100, batch_size = 20)
# reduction and EnsembleThreads()
prob2 = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func,
reduction = reduction, u_init = Vector{eltype(prob.u0)}([0.0]))
sim2 = @time solve(prob2, Tsit5(), trajectories = 100, batch_size = 20)
# EnsembleCPUArray() and EnsembleGPUArray()
sim3 = @time solve(prob2, Tsit5(), EnsembleCPUArray(), trajectories = 100, batch_size = 20)
sim4 = @time solve(prob2, Tsit5(), EnsembleGPUArray(backend), trajectories = 100,
batch_size = 20)
@info sim2[1]
@test sum(sim1.u) ≈ sim2.u
@test sim2.u ≈ sim3.u
@test sim2.u ≈ sim4.u
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 850 | using OrdinaryDiffEq, Flux, DiffEqGPU, Test
include("utils.jl")
function modelf(du, u, p, t)
du[1] = 1.01 * u[1] * p[1] * p[2]
end
function model()
prob = ODEProblem(modelf, u0, (0.0, 1.0), pa)
function prob_func(prob, i, repeat)
remake(prob, u0 = 0.5 .+ i / 100 .* prob.u0)
end
ensemble_prob = EnsembleProblem(prob, prob_func = prob_func)
solve(ensemble_prob, Tsit5(), EnsembleGPUArray(backend), saveat = 0.1,
trajectories = 10)
end
# loss function
loss() = sum(abs2, 1.0 .- Array(model()))
data = Iterators.repeated((), 10)
cb = function () # callback function to observe training
@show loss()
end
pa = [1.0, 2.0]
u0 = [3.0]
opt = ADAM(0.1)
println("Starting to train")
l1 = loss()
for epoch in 1:10
Flux.train!(loss, Flux.params([pa]), data, opt; cb = cb)
end
l2 = loss()
@test 3l2 < l1
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 3366 | # Pkg.test runs with --check_bounds=1, forcing all bounds checks.
# This is incompatible with GPUifyLoops.
# TODO: Is this needed any longer?
if Base.JLOptions().check_bounds == 1
cmd = Cmd(filter(arg -> !startswith(arg, "--check-bounds"), Base.julia_cmd().exec))
code = """
$(Base.load_path_setup_code(false))
cd($(repr(@__DIR__)))
include($(repr(@__FILE__)))
"""
run(`$cmd --eval $code`)
exit()
end
@assert Base.JLOptions().check_bounds == 0
const SUPPORTS_LUFACT = Set(["CUDA", "AMDGPU"])
const SUPPORTS_DOUBLE_PRECISION = Set(["CUDA", "AMDGPU"])
const GROUP = get(ENV, "GROUP", "CUDA")
using SafeTestsets, Test
@time @safetestset "GPU Kernelized Stiff ODE Mass Matrix" begin
include("gpu_kernel_de/stiff_ode/gpu_ode_mass_matrix.jl")
end
@time @testset "GPU Kernelized Non Stiff ODE Regression" begin
include("gpu_kernel_de/gpu_ode_regression.jl")
end
@time @safetestset "GPU Kernelized Non Stiff ODE DiscreteCallback" begin
include("gpu_kernel_de/gpu_ode_discrete_callbacks.jl")
end
@time @testset "GPU Kernelized Stiff ODE Regression" begin
include("gpu_kernel_de/stiff_ode/gpu_ode_regression.jl")
end
@time @safetestset "GPU Kernelized Stiff ODE DiscreteCallback" begin
include("gpu_kernel_de/stiff_ode/gpu_ode_discrete_callbacks.jl")
end
@time @safetestset "GPU Kernelized ForwardDiff tests" begin
include("gpu_kernel_de/forward_diff.jl")
end
@time @safetestset "GPU Kernelized FiniteDiff tests" begin
include("gpu_kernel_de/finite_diff.jl")
end
@time @safetestset "GPU Kernelized Auto-Conversion tests" begin
include("gpu_kernel_de/conversions.jl")
end
if GROUP in SUPPORTS_LUFACT
@time @safetestset "EnsembleGPUArray" begin
include("ensemblegpuarray.jl")
end
@time @safetestset "EnsembleGPUArray OOP" begin
include("ensemblegpuarray_oop.jl")
end
end
# EnsembleGPUArray kernels has Int64 arguments, causing them to fail with Metal and oneAPI
if GROUP in SUPPORTS_DOUBLE_PRECISION
@time @safetestset "EnsembleGPUArray SDE" begin
include("ensemblegpuarray_sde.jl")
end
@time @safetestset "EnsembleGPUArray Input Types" begin
include("ensemblegpuarray_inputtypes.jl")
end
@time @safetestset "Reduction" begin
include("reduction.jl")
end
@time @safetestset "Reverse Mode AD" begin
include("reverse_ad_tests.jl")
end
# Not safe because distributed doesn't play nicely with modules.
@time @testset "Distributed Multi-GPU" begin
include("distributed_multi_gpu.jl")
end
@time @testset "Lower level API" begin
include("lower_level_api.jl")
end
end
# Callbacks currently error on v1.10
if GROUP == "CUDA" && VERSION <= v"1.9"
# Causes dynamic function invocation
@time @testset "GPU Kernelized Non Stiff ODE ContinuousCallback" begin
include("gpu_kernel_de/gpu_ode_continuous_callbacks.jl")
end
@time @testset "GPU Kernelized Stiff ODE ContinuousCallback" begin
include("gpu_kernel_de/stiff_ode/gpu_ode_continuous_callbacks.jl")
end
# device Random not implemented yet
@time @testset "GPU Kernelized SDE Regression" begin
include("gpu_kernel_de/gpu_sde_regression.jl")
end
@time @testset "GPU Kernelized SDE Convergence" begin
include("gpu_kernel_de/gpu_sde_convergence.jl")
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 401 | const GROUP = get(ENV, "GROUP", "CUDA")
const backend = if GROUP == "CUDA"
using CUDA
CUDA.CUDABackend()
elseif GROUP == "AMDGPU"
using AMDGPU
AMDGPU.ROCBackend()
elseif GROUP == "oneAPI"
using oneAPI
oneAPI.oneAPIBackend()
elseif GROUP == "Metal"
using Metal
Metal.MetalBackend()
end
import GPUArraysCore
GPUArraysCore.allowscalar(false)
@info "Testing on " backend
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1697 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra, Test
include("../utils.jl")
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = (@SVector rand(Float32, 3)) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
## Don't test the problems in which GPUs don't support FP64 completely yet
## Creating StepRangeLen causes some param types to be FP64 inferred by `float` function
if ENV["GROUP"] ∉ ("Metal", "oneAPI")
@test solve(monteprob, GPUTsit5(), EnsembleGPUKernel(backend),
trajectories = 10_000,
saveat = 1:10)[1].t == Float32.(1:10)
@test solve(monteprob, GPUTsit5(), EnsembleGPUKernel(backend),
trajectories = 10_000,
saveat = 1:0.1:10)[1].t == 1.0f0:0.1f0:10.0f0
@test solve(monteprob, GPUTsit5(), EnsembleGPUKernel(backend),
trajectories = 10_000,
saveat = 1:(1.0f0):10)[1].t == 1:1.0f0:10
@test solve(monteprob, GPUTsit5(), EnsembleGPUKernel(backend),
trajectories = 10_000,
saveat = 1.0)[1].t == 0.0f0:1.0f0:10.0f0
end
@test solve(monteprob, GPUTsit5(), EnsembleGPUKernel(backend),
trajectories = 10_000,
saveat = [1.0f0, 5.0f0, 10.0f0])[1].t == [1.0f0, 5.0f0, 10.0f0]
@test solve(monteprob, GPUTsit5(), EnsembleGPUKernel(backend),
trajectories = 10_000,
saveat = [1.0, 5.0, 10.0])[1].t == [1.0f0, 5.0f0, 10.0f0]
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1051 |
using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra
include("../utils.jl")
using ForwardDiff
function f(u, p, t)
du1 = -p[1] * u[1] * u[1]
return SVector{1}(du1)
end
u0 = @SVector [10.0f0]
p = @SVector [1.0f0]
tspan = (0.0f0, 10.0f0)
prob = ODEProblem{false}(f, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
osol = solve(prob, Rodas5P(), dt = 0.01f0, save_everystep = false)
for alg in (GPURosenbrock23(autodiff = false), GPURodas4(autodiff = false),
GPURodas5P(autodiff = false), GPUKvaerno3(autodiff = false),
GPUKvaerno5(autodiff = false))
@info alg
sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0),
trajectories = 2, save_everystep = false, adaptive = true, dt = 0.01f0)
@test norm(sol[1].u - osol.u) < 2e-4
# massive threads
sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0),
trajectories = 10_000, save_everystep = false, adaptive = true, dt = 0.01f0)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1483 |
using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra
include("../utils.jl")
using ForwardDiff
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [ForwardDiff.Dual(1.0f0, (1.0f0, 0.0f0, 0.0f0, 0.0f0, 0.0f0, 0.0f0));
ForwardDiff.Dual(0.0f0, (0.0f0, 1.0f0, 0.0f0, 0.0f0, 0.0f0, 0.0f0));
ForwardDiff.Dual(0.0f0, (0.0f0, 0.0f0, 1.0f0, 0.0f0, 0.0f0, 0.0f0))]
p = @SVector [
ForwardDiff.Dual(10.0f0, (0.0f0, 0.0f0, 0.0f0, 1.0f0, 0.0f0, 0.0f0)),
ForwardDiff.Dual(28.0f0, (0.0f0, 0.0f0, 0.0f0, 0.0f0, 1.0f0, 0.0f0)),
ForwardDiff.Dual(8 / 3.0f0, (0.0f0, 0.0f0, 0.0f0, 0.0f0, 0.0f0, 1.0f0)),
]
tspan = (0.0f0, 10.0f0)
prob = ODEProblem{false}(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
for alg in (GPUTsit5(), GPUVern7(), GPUVern9(), GPURosenbrock23(autodiff = false),
GPURodas4(autodiff = false), GPURodas5P(autodiff = false),
GPUKvaerno3(autodiff = false), GPUKvaerno5(autodiff = false))
@info alg
sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0),
trajectories = 2, save_everystep = false, adaptive = false, dt = 0.01f0)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0),
trajectories = 2, adaptive = true, dt = 0.01f0)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4402 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra
@info "Callbacks"
include("../utils.jl")
function f(u, p, t)
du1 = u[2]
du2 = -p[1]
return SVector{2}(du1, du2)
end
u0 = @SVector[45.0f0, 0.0f0]
tspan = (0.0f0, 15.0f0)
p = @SVector [10.0f0]
prob = ODEProblem{false}(f, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = prob.p)
monteprob = EnsembleProblem(prob, safetycopy = false)
function affect!(integrator)
integrator.u += @SVector[0.0f0, -2.0f0] .* integrator.u
end
function condition(u, t, integrator)
u[1]
end
algs = [GPUTsit5(), GPUVern7()]
diffeq_algs = [Tsit5(), Vern7()]
for (alg, diffeq_alg) in zip(algs, diffeq_algs)
@info typeof(alg)
cb = ContinuousCallback(condition, affect!; save_positions = (false, false))
@info "Unadaptive version"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 7e-4
@info "Callback: CallbackSets"
cb = CallbackSet(cb, cb)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 7e-4
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
@test norm(bench_sol.u - sol[1].u) < 2e-3
@info "save_everystep and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true,
save_everystep = false)
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true,
save_everystep = false)
@test norm(bench_sol.u - sol[1].u) < 7e-4
@info "Adaptive version"
cb = ContinuousCallback(condition, affect!; save_positions = (false, false))
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, diffeq_alg,
adaptive = true, save_everystep = false, dt = 0.1f0, callback = cb,
merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 2e-3
@info "Callback: CallbackSets"
cb = CallbackSet(cb, cb)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, diffeq_alg,
adaptive = true, dt = 0.1f0, save_everystep = false, callback = cb,
merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 2e-3
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0], reltol = 1.0f-6, abstol = 1.0f-6)
bench_sol = solve(prob, diffeq_alg,
adaptive = true, save_everystep = false, dt = 0.1f0, callback = cb,
merge_callbacks = true,
tstops = [24.0f0, 40.0f0], saveat = [0.0f0, 9.1f0], reltol = 1.0f-6,
abstol = 1.0f-6)
@test norm(bench_sol.u - sol[1].u) < 8e-4
@info "Unadaptive and Adaptive comparison"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
asol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
@test norm(asol[1].u - sol[1].u) < 7e-4
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 6768 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra
@info "Callbacks"
include("../utils.jl")
function f(u, p, t)
du1 = -u[1]
return SVector{1}(du1)
end
u0 = @SVector [10.0f0]
prob = ODEProblem{false}(f, u0, (0.0f0, 10.0f0))
prob_func = (prob, i, repeat) -> remake(prob, p = prob.p)
monteprob = EnsembleProblem(prob, safetycopy = false)
algs = [GPUTsit5(), GPUVern7(), GPUVern9()]
for alg in algs
@info typeof(alg)
condition(u, t, integrator) = t == 2.40f0
affect!(integrator) = integrator.u += @SVector[10.0f0]
cb = DiscreteCallback(condition, affect!; save_positions = (false, false))
@info "Unadaptive version"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
bench_sol = solve(prob, Vern9(),
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 2e-3
@test norm(bench_sol.u - sol[1].u) < 5e-3
#Test the truncation error due to floating point math, encountered when adjusting t for tstops
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.01f0, callback = cb, merge_callbacks = true,
tstops = [4.0f0])
bench_sol = solve(prob, Vern9(),
adaptive = false, dt = 0.01f0, callback = cb, merge_callbacks = true,
tstops = [4.0f0])
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 2e-6
@test norm(bench_sol.u - sol[1].u) < 3e-5
@info "Callback: CallbackSets"
condition_1(u, t, integrator) = t == 2.40f0
condition_2(u, t, integrator) = t == 4.0f0
cb_1 = DiscreteCallback(condition_1, affect!; save_positions = (false, false))
cb_2 = DiscreteCallback(condition_2, affect!; save_positions = (false, false))
cb = CallbackSet(cb_1, cb_2)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
bench_sol = solve(prob, Vern9(),
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 2e-3
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 3e-3
@test norm(bench_sol.u - sol[1].u) < 7e-3
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0])
bench_sol = solve(prob, Vern9(),
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 1e-3
@test norm(bench_sol(6.0f0) - sol[1](6.0f0)) < 3e-3
@test norm(bench_sol.u - sol[1].u) < 3e-3
@info "save_everystep and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], save_everystep = false)
bench_sol = solve(prob, Vern9(),
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], save_everystep = false)
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 3e-5
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 5e-5
@test norm(bench_sol.u - sol[1].u) < 2e-4
@info "Adaptive version"
cb = DiscreteCallback(condition, affect!; save_positions = (false, false))
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [4.0f0])
bench_sol = solve(prob, Vern9(),
adaptive = true, save_everystep = false, dt = 1.0f0, callback = cb,
merge_callbacks = true,
tstops = [4.0f0])
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 5e-5
@test norm(bench_sol.u - sol[1].u) < 2e-4
@info "Callback: CallbackSets"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
bench_sol = solve(prob, Vern9(),
adaptive = true, dt = 1.0f0, save_everystep = false, callback = cb,
merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 6e-4
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 1e-3
@test norm(bench_sol.u - sol[1].u) < 3e-3
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0], reltol = 1.0f-7,
abstol = 1.0f-7)
bench_sol = solve(prob, Vern9(),
adaptive = true, save_everystep = false, dt = 1.0f0, callback = cb,
merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0], reltol = 1.0f-7,
abstol = 1.0f-7)
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 7e-3
@test norm(bench_sol(6.0f0) - sol[1](6.0f0)) < 2e-2
@test norm(bench_sol.u - sol[1].u) < 2e-2
@info "Unadaptive and Adaptive comparison"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 4.0f0])
asol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 4.0f0])
@test norm(asol[1](2.40f0) - sol[1](2.40f0)) < 3e-3
@test norm(asol[1](4.0f0) - sol[1](4.0f0)) < 5e-3
@test norm(asol[1].u - sol[1].u) < 5e-3
@info "Terminate callback"
cb = DiscreteCallback(condition, affect!; save_positions = (false, false))
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
bench_sol = solve(prob, Vern9(),
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
@test norm(bench_sol.t - sol[1].t) < 2e-3
@test norm(bench_sol.u - sol[1].u) < 5e-3
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4127 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra
include("../utils.jl")
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz, u0, tspan, p)
algs = (GPUTsit5(), GPUVern7(), GPUVern9())
for alg in algs
prob_func = (prob, i, repeat) -> remake(prob, p = p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
@info typeof(alg)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 10,
adaptive = false, dt = 0.01f0)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 10,
adaptive = true, dt = 0.1f-1, abstol = 1.0f-7, reltol = 1.0f-7)
@test sol.converged == true
@test asol.converged == true
## Regression test
bench_sol = solve(prob, Vern9(), adaptive = false, dt = 0.01f0)
bench_asol = solve(prob, Vern9(), dt = 0.1f-1, save_everystep = false, abstol = 1.0f-7,
reltol = 1.0f-7)
@test norm(bench_sol.u[end] - sol[1].u[end]) < 5e-3
@test norm(bench_asol.u - asol[1].u) < 8e-4
### solve parameters
saveat = [2.0f0, 4.0f0]
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = false, dt = 0.01f0, saveat = saveat)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = true, dt = 0.1f-1, abstol = 1.0f-7, reltol = 1.0f-7,
saveat = saveat)
bench_sol = solve(prob, Vern9(), adaptive = false, dt = 0.01f0, saveat = saveat)
bench_asol = solve(prob, Vern9(), dt = 0.1f-1, save_everystep = false, abstol = 1.0f-7,
reltol = 1.0f-7, saveat = saveat)
@test norm(asol[1].u[end] - sol[1].u[end]) < 5e-3
@test norm(bench_sol.u - sol[1].u) < 2e-4
@test norm(bench_asol.u - asol[1].u) < 2e-4
@test length(sol[1].u) == length(saveat)
@test length(asol[1].u) == length(saveat)
saveat = collect(0.0f0:0.1f0:10.0f0)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = false, dt = 0.01f0, saveat = saveat)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = true, dt = 0.1f-1, abstol = 1.0f-7, reltol = 1.0f-7,
saveat = saveat)
bench_sol = solve(prob, Vern9(), adaptive = false, dt = 0.01f0, saveat = saveat)
bench_asol = solve(prob, Vern9(), dt = 0.1f-1, save_everystep = false, abstol = 1.0f-7,
reltol = 1.0f-7, saveat = saveat)
@test norm(asol[1].u[end] - sol[1].u[end]) < 6e-3
@test norm(bench_sol.u - sol[1].u) < 2e-3
@test norm(bench_asol.u - asol[1].u) < 4e-3
@test length(sol[1].u) == length(saveat)
@test length(asol[1].u) == length(saveat)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = false, dt = 0.01f0, save_everystep = false)
bench_sol = solve(prob, Vern9(), adaptive = false, dt = 0.01f0, save_everystep = false)
@test norm(bench_sol.u - sol[1].u) < 5e-3
@test length(sol[1].u) == length(bench_sol.u)
### Huge number of threads
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 10_000,
adaptive = false, dt = 0.01f0, save_everystep = false)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 10_000,
adaptive = true, dt = 0.01f0, save_everystep = false)
## With random parameters
prob_func = (prob, i, repeat) -> remake(prob, p = (@SVector rand(Float32, 3)) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 10,
adaptive = false, dt = 0.1f0)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 10,
adaptive = true, dt = 0.1f-1, abstol = 1.0f-7, reltol = 1.0f-7)
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1064 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra, Statistics
using DiffEqDevTools
include("../utils.jl")
u₀ = SA[0.1f0]
f(u, p, t) = SA[p[1] * u[1]]
g(u, p, t) = SA[p[2] * u[1]]
tspan = (0.0f0, 1.0f0)
p = SA[1.5f0, 0.01f0]
prob = SDEProblem(f, g, u₀, tspan, p; seed = 1234)
dts = 1 .// 2 .^ (5:-1:2)
ensemble_prob = EnsembleProblem(prob;
output_func = (sol, i) -> (sol[end], false))
@info "EM"
dts = 1 .// 2 .^ (12:-1:8)
sim = test_convergence(Float32.(dts), ensemble_prob, GPUEM(),
EnsembleGPUKernel(backend, 0.0),
save_everystep = false, trajectories = Int(1e5),
weak_timeseries_errors = false,
expected_value = SA[u₀ * exp((p[1]))])
@show sim.𝒪est[:weak_final]
@test abs(sim.𝒪est[:weak_final] - 1.0) < 0.1
@info "GPUSIEA"
dts = 1 .// 2 .^ (6:-1:4)
sim = test_convergence(Float32.(dts), ensemble_prob, GPUSIEA(),
EnsembleGPUKernel(backend, 0.0),
save_everystep = false, trajectories = Int(5e4),
expected_value = SA[u₀ * exp((p[1]))])
@show sim.𝒪est[:weak_final]
@test abs(sim.𝒪est[:weak_final] - 2.1) < 0.4
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 2828 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra, Statistics
include("../utils.jl")
@info "Convergence Test"
algs = [GPUEM(), GPUSIEA()]
for alg in algs
@info alg
# dX_t = u*dt + udW_t
f(u, p, t) = u
g(u, p, t) = u
u0 = @SVector [0.5f0]
tspan = (0.0f0, 1.0f0)
prob = SDEProblem(f, g, u0, tspan; seed = 123)
monteprob = EnsembleProblem(prob)
dt = Float32(1 // 2^(8))
## solve using off-loading on CPU
sol = solve(monteprob, alg, EnsembleGPUKernel(backend), dt = dt, trajectories = 1000,
adaptive = false)
sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), dt = dt,
trajectories = 1000,
adaptive = false)
sol_array = Array(sol)
us = reshape(mean(sol_array, dims = 3), size(sol_array, 2))
us_exact = 0.5f0 * exp.(sol[1].t)
@test norm(us - us_exact, Inf) < 6e-2
@info "Diagonal Noise"
function lorenz(u, p, t)
du1 = 10.0(u[2] - u[1])
du2 = u[1] * (28.0 - u[3]) - u[2]
du3 = u[1] * u[2] - (8 / 3) * u[3]
return SVector{3}(du1, du2, du3)
end
function σ_lorenz(u, p, t)
return SVector{3}(3.0f0, 3.0f0, 3.0f0)
end
u0 = @SVector [1.0f0, 0.0f0, 0.0f0]
tspan = (0.0f0, 10.0f0)
prob = SDEProblem(lorenz, σ_lorenz, u0, tspan)
monteprob = EnsembleProblem(prob)
dt = Float32(1 // 2^(8))
sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), dt = dt, trajectories = 10,
adaptive = false)
@test sol.converged == true
sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), dt = dt, trajectories = 10,
adaptive = false, save_everystep = false)
@test sol.converged == true
@test length(sol[1].u) == 2
saveat = [0.3f0, 0.5f0]
sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), dt = dt, trajectories = 10,
adaptive = false, saveat = saveat)
end
@info "Non-Diagonal Noise"
function f(u, p, t)
return 1.01 .* u
end
function g(u, p, t)
du1_1 = 0.3u[1]
du1_2 = 0.6u[1]
du1_3 = 0.9u[1]
du1_4 = 0.12u[1]
du2_1 = 1.2u[2]
du2_2 = 0.2u[2]
du2_3 = 0.3u[2]
du2_4 = 1.8u[2]
return SMatrix{2, 4}(du1_1, du1_2, du1_3, du1_4, du2_1, du2_2, du2_3, du2_4)
end
u0 = @SVector ones(Float32, 2)
dt = Float32(1 // 2^(8))
noise_rate_prototype = @SMatrix zeros(Float32, 2, 4)
prob = SDEProblem(f, g, u0, (0.0f0, 1.0f0), noise_rate_prototype = noise_rate_prototype)
monteprob = EnsembleProblem(prob)
sol = solve(monteprob, GPUEM(), EnsembleGPUKernel(backend, 0.0), dt = dt, trajectories = 10,
adaptive = false)
@test sol.converged == true
sol = solve(monteprob, GPUEM(), EnsembleGPUKernel(backend, 0.0), dt = dt, trajectories = 10,
adaptive = false, save_everystep = false)
@test sol.converged == true
@test length(sol[1].u) == 2
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4630 | using DiffEqGPU, StaticArrays, OrdinaryDiffEq, LinearAlgebra
include("../../utils.jl")
function f(u, p, t)
du1 = u[2]
du2 = -p[1]
return SVector{2}(du1, du2)
end
function f_jac(u, p, t)
J11 = 0.0
J21 = 0.0
J12 = 1.0
J22 = 0.0
return SMatrix{2, 2, eltype(u)}(J11, J21, J12, J22)
end
function f_tgrad(u, p, t)
return SVector{2, eltype(u)}(0.0, 0.0)
end
u0 = @SVector[45.0f0, 0.0f0]
tspan = (0.0f0, 16.5f0)
p = @SVector [10.0f0]
func = ODEFunction(f, jac = f_jac, tgrad = f_tgrad)
prob = ODEProblem{false}(func, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = prob.p)
monteprob = EnsembleProblem(prob, safetycopy = false)
function affect!(integrator)
integrator.u += @SVector[0.0f0, -2.0f0] .* integrator.u
end
function condition(u, t, integrator)
u[1]
end
algs = [GPURosenbrock23(), GPURodas4()]
for alg in algs
@info typeof(alg)
cb = ContinuousCallback(condition, affect!; save_positions = (false, false))
@info "Unadaptive version"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, Rosenbrock23(),
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 8e-4
@info "Callback: CallbackSets"
cb = CallbackSet(cb, cb)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, Rosenbrock23(),
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 8e-4
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
bench_sol = solve(prob, Rosenbrock23(),
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
@test norm(bench_sol.u - sol[1].u) < 5e-4
@info "save_everystep and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true,
save_everystep = false)
bench_sol = solve(prob, Rosenbrock23(),
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true,
save_everystep = false)
@test norm(bench_sol.u - sol[1].u) < 6e-4
@info "Adaptive version"
cb = ContinuousCallback(condition, affect!; save_positions = (false, false))
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, Rosenbrock23(),
adaptive = true, save_everystep = false, dt = 0.1f0, callback = cb,
merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 2e-3
@info "Callback: CallbackSets"
cb = CallbackSet(cb, cb)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true)
bench_sol = solve(prob, Rosenbrock23(),
adaptive = true, dt = 0.1f0, save_everystep = false, callback = cb,
merge_callbacks = true)
@test norm(bench_sol.u - sol[1].u) < 2e-3
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0], reltol = 1.0f-6, abstol = 1.0f-6)
bench_sol = solve(prob, Rosenbrock23(),
adaptive = true, save_everystep = false, dt = 0.1f0, callback = cb,
merge_callbacks = true,
tstops = [24.0f0, 40.0f0], saveat = [0.0f0, 9.1f0], reltol = 1.0f-6,
abstol = 1.0f-6)
@test norm(bench_sol.u - sol[1].u) < 6e-4
@info "Unadaptive and Adaptive comparison"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.1f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
asol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 0.1f0, callback = cb, merge_callbacks = true,
saveat = [0.0f0, 9.1f0])
@test norm(asol[1].u - sol[1].u) < 7e-4
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 6406 | using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra
@info "Callbacks"
include("../../utils.jl")
function f(u, p, t)
du1 = -u[1]
return SVector{1}(du1)
end
function f_jac(u, p, t)
return SMatrix{1, 1, eltype(u)}(-1)
end
function f_tgrad(u, p, t)
return SVector{1, eltype(u)}(0.0)
end
func = ODEFunction(f, jac = f_jac, tgrad = f_tgrad)
u0 = @SVector [10.0f0]
prob = ODEProblem{false}(func, u0, (0.0f0, 10.0f0))
prob_func = (prob, i, repeat) -> remake(prob, p = prob.p)
monteprob = EnsembleProblem(prob, safetycopy = false)
algs = [GPURosenbrock23(), GPURodas4()]
diffeq_algs = [Rosenbrock23(), Rodas4()]
for (alg, diffeq_alg) in zip(algs, diffeq_algs)
@info typeof(alg)
condition(u, t, integrator) = t == 2.40f0
affect!(integrator) = integrator.u += @SVector[10.0f0]
cb = DiscreteCallback(condition, affect!; save_positions = (false, false))
@info "Unadaptive version"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 2e-3
@test norm(bench_sol.u - sol[1].u) < 5e-3
#Test the truncation error due to floating point math, encountered when adjusting t for tstops
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 0.01f0, callback = cb, merge_callbacks = true,
tstops = [4.0f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 0.01f0, callback = cb, merge_callbacks = true,
tstops = [4.0f0])
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 2e-6
@test norm(bench_sol.u - sol[1].u) < 3e-5
@info "Callback: CallbackSets"
condition_1(u, t, integrator) = t == 2.40f0
condition_2(u, t, integrator) = t == 4.0f0
cb_1 = DiscreteCallback(condition_1, affect!; save_positions = (false, false))
cb_2 = DiscreteCallback(condition_2, affect!; save_positions = (false, false))
cb = CallbackSet(cb_1, cb_2)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 2e-3
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 3e-3
@test norm(bench_sol.u - sol[1].u) < 7e-3
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 1e-3
@test norm(bench_sol(6.0f0) - sol[1](6.0f0)) < 3e-3
@test norm(bench_sol.u - sol[1].u) < 3e-3
@info "save_everystep and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], save_everystep = false)
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], save_everystep = false)
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 3e-5
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 5e-5
@test norm(bench_sol.u - sol[1].u) < 2e-4
@info "Adaptive version"
cb = DiscreteCallback(condition, affect!; save_positions = (false, false))
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [4.0f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = true, save_everystep = false, dt = 1.0f0, callback = cb,
merge_callbacks = true,
tstops = [4.0f0])
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 5e-5
@test norm(bench_sol.u - sol[1].u) < 2e-4
@info "Callback: CallbackSets"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = true, dt = 1.0f0, save_everystep = false, callback = cb,
merge_callbacks = true,
tstops = [2.40f0, 4.0f0])
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 6e-4
@test norm(bench_sol(4.0f0) - sol[1](4.0f0)) < 1e-3
@test norm(bench_sol.u - sol[1].u) < 3e-3
@info "saveat and callbacks"
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = true, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0], reltol = 1.0f-7,
abstol = 1.0f-7)
bench_sol = solve(prob, diffeq_alg,
adaptive = true, save_everystep = false, dt = 1.0f0, callback = cb,
merge_callbacks = true,
tstops = [2.40f0, 4.0f0], saveat = [0.0f0, 6.0f0], reltol = 1.0f-7,
abstol = 1.0f-7)
@test norm(bench_sol(2.40f0) - sol[1](2.40f0)) < 7e-3
@test norm(bench_sol(6.0f0) - sol[1](6.0f0)) < 2e-2
@test norm(bench_sol.u - sol[1].u) < 2e-2
@info "Terminate callback"
cb = DiscreteCallback(condition, affect!; save_positions = (false, false))
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
bench_sol = solve(prob, diffeq_alg,
adaptive = false, dt = 1.0f0, callback = cb, merge_callbacks = true,
tstops = [2.40f0])
@test norm(bench_sol.t - sol[1].t) < 2e-3
@test norm(bench_sol.u - sol[1].u) < 5e-3
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 1102 | using DiffEqGPU, StaticArrays, OrdinaryDiffEq, LinearAlgebra
include("../../utils.jl")
function rober(u, p, t)
y₁, y₂, y₃ = u
k₁, k₂, k₃ = p
return @SVector [
-k₁ * y₁ + k₃ * y₂ * y₃,
k₁ * y₁ - k₂ * y₂^2 - k₃ * y₂ * y₃,
y₁ + y₂ + y₃ - 1]
end
function rober_jac(u, p, t)
y₁, y₂, y₃ = u
k₁, k₂, k₃ = p
return @SMatrix[(k₁*-1) (y₃*k₃) (k₃*y₂)
k₁ (y₂ * k₂ * -2+y₃ * k₃ * -1) (k₃*y₂*-1)
0 (y₂*2*k₂) (0)]
end
M = @SMatrix [1.0f0 0.0f0 0.0f0
0.0f0 1.0f0 0.0f0
0.0f0 0.0f0 0.0f0]
ff = ODEFunction(rober, mass_matrix = M)
prob = ODEProblem(ff, @SVector([1.0f0, 0.0f0, 0.0f0]), (0.0f0, 1.0f5),
(0.04f0, 3.0f7, 1.0f4))
monteprob = EnsembleProblem(prob, safetycopy = false)
alg = GPURosenbrock23()
bench_sol = solve(prob, Rosenbrock23(), dt = 0.1, abstol = 1.0f-5, reltol = 1.0f-5)
sol = solve(monteprob, alg, EnsembleGPUKernel(backend),
trajectories = 2,
dt = 0.1f0,
adaptive = true, abstol = 1.0f-5, reltol = 1.0f-5)
@test norm(bench_sol.u[1] - sol[1].u[1]) < 8e-4
@test norm(bench_sol.u[end] - sol[1].u[end]) < 8e-4
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | code | 4801 | using DiffEqGPU, StaticArrays, OrdinaryDiffEq, LinearAlgebra
include("../../utils.jl")
function f(u, p, t)
du1 = -p[1] * u[1]
return SVector{1}(du1)
end
function f_jac(u, p, t)
return @SMatrix [-1.0f0]
end
function f_tgrad(u, p, t)
return SVector{1, eltype(u)}(0.0)
end
u0 = @SVector [10.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [1.0f0]
func = ODEFunction(f, jac = f_jac, tgrad = f_tgrad)
prob = ODEProblem{false}(func, u0, tspan, p)
function f_large(u::AbstractArray{T}, p, t) where {T}
return T(1.01) * u
end
large_u0 = @SVector rand(Float32, 15)
large_prob = ODEProblem(f_large, large_u0, (0.0f0, 10.0f0))
algs = (GPURosenbrock23(), GPURodas4(), GPURodas5P(), GPUKvaerno3(), GPUKvaerno5())
for alg in algs
prob_func = (prob, i, repeat) -> remake(prob, p = p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
@info typeof(alg)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), trajectories = 10,
adaptive = false, dt = 0.01f0)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), trajectories = 10,
adaptive = true, dt = 0.1f-1)
@test sol.converged == true
@test asol.converged == true
## Regression test
bench_sol = solve(prob, Rosenbrock23(), adaptive = false, dt = 0.01f0)
bench_asol = solve(prob, Rosenbrock23(), dt = 0.1f-1, save_everystep = false,
abstol = 1.0f-7,
reltol = 1.0f-7)
@test norm(bench_sol.u[end] - sol[1].u[end]) < 5e-3
@test norm(bench_asol.u - asol[1].u) < 6e-3
### solve parameters
saveat = [2.0f0, 4.0f0]
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), trajectories = 2,
adaptive = false, dt = 0.01f0, saveat = saveat)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), trajectories = 2,
adaptive = true, dt = 0.1f-1, abstol = 1.0f-7, reltol = 1.0f-7,
saveat = saveat)
bench_sol = solve(prob, Rosenbrock23(), adaptive = false, dt = 0.01f0, saveat = saveat)
bench_asol = solve(prob, Rosenbrock23(), dt = 0.1f-1, save_everystep = false,
abstol = 1.0f-7,
reltol = 1.0f-7, saveat = saveat)
@test norm(asol[1].u[end] - sol[1].u[end]) < 4e-2
@test norm(bench_sol.u - sol[1].u) < 2e-4
#Use to fail for 2e-4
@test norm(bench_asol.u - asol[1].u) < 2e-3
@test length(sol[1].u) == length(saveat)
@test length(asol[1].u) == length(saveat)
saveat = collect(0.0f0:0.1f0:10.0f0)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = false, dt = 0.01f0, saveat = saveat)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = true, dt = 0.1f-1, abstol = 1.0f-7, reltol = 1.0f-7,
saveat = saveat)
bench_sol = solve(prob, Rosenbrock23(), adaptive = false, dt = 0.01f0, saveat = saveat)
bench_asol = solve(prob, Rosenbrock23(), dt = 0.1f-1, save_everystep = false,
abstol = 1.0f-7,
reltol = 1.0f-7, saveat = saveat)
#Fails also OrdinaryDiffEq.jl
# @test norm(asol[1].u[end] - sol[1].u[end]) < 6e-3
@test norm(bench_sol.u - sol[1].u) < 2e-3
@test norm(bench_asol.u - asol[1].u) < 3e-2
@test length(sol[1].u) == length(saveat)
@test length(asol[1].u) == length(saveat)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend), trajectories = 2,
adaptive = false, dt = 0.01f0, save_everystep = false)
bench_sol = solve(prob, Rosenbrock23(), adaptive = false, dt = 0.01f0,
save_everystep = false)
@test norm(bench_sol.u - sol[1].u) < 5e-3
@test length(sol[1].u) == length(bench_sol.u)
### Huge number of threads
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0),
trajectories = 10_000,
adaptive = false, dt = 0.01f0, save_everystep = false)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0),
trajectories = 10_000,
adaptive = true, dt = 0.01f0, save_everystep = false)
## With random parameters
prob_func = (prob, i, repeat) -> remake(prob, p = (@SVector rand(Float32, 1)) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), trajectories = 10,
adaptive = false, dt = 0.1f0)
asol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), trajectories = 10,
adaptive = true, dt = 0.1f-1, abstol = 1.0f-7, reltol = 1.0f-7)
## large no. of dimensions
if GROUP == "CUDA"
monteprob = EnsembleProblem(large_prob, safetycopy = false)
local sol = solve(monteprob, alg, EnsembleGPUKernel(backend, 0.0), trajectories = 2,
adaptive = true, dt = 0.1f0)
end
end
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 5174 | # DiffEqGPU
[](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged)
[](https://docs.sciml.ai/DiffEqGPU/stable/)
[](https://buildkite.com/julialang/diffeqgpu-dot-jl)
[](https://codecov.io/gh/SciML/DiffEqGPU.jl)
[](https://github.com/SciML/ColPrac)
[](https://github.com/SciML/SciMLStyle)
This library is a component package of the DifferentialEquations.jl ecosystem. It includes
functionality for making use of GPUs in the differential equation solvers.
## The two ways to accelerate ODE solvers with GPUs
There are two very different ways that one can
accelerate an ODE solution with GPUs. There is one case where `u` is very big and `f`
is very expensive but very structured, and you use GPUs to accelerate the computation
of said `f`. The other use case is where `u` is very small but you want to solve the ODE
`f` over many different initial conditions (`u0`) or parameters `p`. In that case, you can
use GPUs to parallelize over different parameters and initial conditions. In other words:
| Type of Problem | SciML Solution |
|:----------------------------------------- |:-------------------------------------------------------------------------------------------------------- |
| Accelerate a big ODE | Use [CUDA.jl's](https://cuda.juliagpu.org/stable/) CuArray as `u0` |
| Solve the same ODE with many `u0` and `p` | Use [DiffEqGPU.jl's](https://docs.sciml.ai/DiffEqGPU/stable/) `EnsembleGPUArray` and `EnsembleGPUKernel` |
## Supported GPUs
SciML's GPU support extends to a wide array of hardware, including:
| GPU Manufacturer | GPU Kernel Language | Julia Support Package | Backend Type |
|:---------------- |:------------------- |:-------------------------------------------------- |:------------------------ |
| NVIDIA | CUDA | [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl) | `CUDA.CUDABackend()` |
| AMD | ROCm | [AMDGPU.jl](https://github.com/JuliaGPU/AMDGPU.jl) | `AMDGPU.ROCBackend()` |
| Intel | OneAPI | [OneAPI.jl](https://github.com/JuliaGPU/oneAPI.jl) | `oneAPI.oneAPIBackend()` |
| Apple (M-Series) | Metal | [Metal.jl](https://github.com/JuliaGPU/Metal.jl) | `Metal.MetalBackend()` |
For this tutorial we will demonstrate the CUDA backend for NVIDIA GPUs, though any of the other GPUs can be
used by simply swapping out the `backend` choice.
## Example of Within-Method GPU Parallelism
```julia
using OrdinaryDiffEq, CUDA, LinearAlgebra
u0 = cu(rand(1000))
A = cu(randn(1000, 1000))
f(du, u, p, t) = mul!(du, A, u)
prob = ODEProblem(f, u0, (0.0f0, 1.0f0)) # Float32 is better on GPUs!
sol = solve(prob, Tsit5())
```
## Example of Parameter-Parallelism with GPU Ensemble Methods
```julia
using DiffEqGPU, CUDA, OrdinaryDiffEq, StaticArrays
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = (@SVector rand(Float32, 3)) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
@time sol = solve(monteprob, GPUTsit5(), EnsembleGPUKernel(CUDA.CUDABackend()),
trajectories = 10_000, adaptive = false, dt = 0.1f0)
```
## Benchmarks
Curious about our claims? See [https://github.com/utkarsh530/GPUODEBenchmarks](https://github.com/utkarsh530/GPUODEBenchmarks) for comparsion of our GPU solvers against CPUs and GPUs implementation in C++, JAX and PyTorch.
## Citation
If you are using `DiffEqGPU.jl` in your work, consider citing our paper:
```
@article{utkarsh2024automated,
title={Automated translation and accelerated solving of differential equations on multiple GPU platforms},
author={Utkarsh, Utkarsh and Churavy, Valentin and Ma, Yingbo and Besard, Tim and Srisuma, Prakitr and Gymnich, Tim and Gerlach, Adam R and Edelman, Alan and Barbastathis, George and Braatz, Richard D and others},
journal={Computer Methods in Applied Mechanics and Engineering},
volume={419},
pages={116591},
year={2024},
publisher={Elsevier}
}
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 4354 | # Getting Started with GPU-Accelerated Differential Equations in Julia
## The two ways to accelerate ODE solvers with GPUs
There are two very different ways that one can
accelerate an ODE solution with GPUs. There is one case where `u` is very big and `f`
is very expensive but very structured, and you use GPUs to accelerate the computation
of said `f`. The other use case is where `u` is very small, but you want to solve the ODE
`f` over many different initial conditions (`u0`) or parameters `p`. In that case, you can
use GPUs to parallelize over different parameters and initial conditions. In other words:
| Type of Problem | SciML Solution |
|:----------------------------------------- |:-------------------------------------------------------------------------------------------------------- |
| Accelerate a big ODE | Use [CUDA.jl's](https://cuda.juliagpu.org/stable/) CuArray as `u0` |
| Solve the same ODE with many `u0` and `p` | Use [DiffEqGPU.jl's](https://docs.sciml.ai/DiffEqGPU/stable/) `EnsembleGPUArray` and `EnsembleGPUKernel` |
## Supported GPUs
SciML's GPU support extends to a wide array of hardware, including:
| GPU Manufacturer | GPU Kernel Language | Julia Support Package | Backend Type |
|:---------------- |:------------------- |:-------------------------------------------------- |:------------------------ |
| NVIDIA | CUDA | [CUDA.jl](https://github.com/JuliaGPU/CUDA.jl) | `CUDA.CUDABackend()` |
| AMD | ROCm | [AMDGPU.jl](https://github.com/JuliaGPU/AMDGPU.jl) | `AMDGPU.ROCBackend()` |
| Intel | OneAPI | [OneAPI.jl](https://github.com/JuliaGPU/oneAPI.jl) | `oneAPI.oneAPIBackend()` |
| Apple (M-Series) | Metal | [Metal.jl](https://github.com/JuliaGPU/Metal.jl) | `Metal.MetalBackend()` |
For this tutorial we will demonstrate the CUDA backend for NVIDIA GPUs, though any of the other GPUs can be
used by simply swapping out the `backend` choice.
## Simple Example of Within-Method GPU Parallelism
The following is a quick and dirty example of doing within-method GPU parallelism.
Let's say we had a simple but large ODE with many linear algebra or map/broadcast
operations:
```@example basic
using OrdinaryDiffEq, LinearAlgebra
u0 = rand(1000)
A = randn(1000, 1000)
f(du, u, p, t) = mul!(du, A, u)
prob = ODEProblem(f, u0, (0.0, 1.0))
sol = solve(prob, Tsit5())
```
Translating this to a GPU-based solve of the ODE simply requires moving the arrays for
the initial condition, parameters, and caches to the GPU. This looks like:
```@example basic
using OrdinaryDiffEq, CUDA, LinearAlgebra
u0 = cu(rand(1000))
A = cu(randn(1000, 1000))
f(du, u, p, t) = mul!(du, A, u)
prob = ODEProblem(f, u0, (0.0f0, 1.0f0)) # Float32 is better on GPUs!
sol = solve(prob, Tsit5())
```
Notice that the solution values `sol[i]` are CUDA-based arrays, which can be moved back
to the CPU using `Array(sol[i])`.
More details on effective use of within-method GPU parallelism can be found in
[the within-method GPU parallelism tutorial](@ref withingpu).
## Example of Parameter-Parallelism with GPU Ensemble Methods
On the other side of the spectrum, what if we want to solve tons of small ODEs? For this
use case, we would use the ensemble methods to solve the same ODE many times with
different parameters. This looks like:
```@example basic
using DiffEqGPU, OrdinaryDiffEq, StaticArrays, CUDA
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = (@SVector rand(Float32, 3)) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
sol = solve(monteprob, GPUTsit5(), EnsembleGPUKernel(CUDA.CUDABackend()),
trajectories = 10_000)
```
To dig more into this example, see the [ensemble GPU solving tutorial](@ref lorenz).
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 2609 | # DiffEqGPU: Massively Data-Parallel GPU Solving of ODEs
This library is a component package of the DifferentialEquations.jl ecosystem. It includes
functionality for making use of GPUs in the differential equation solvers.
## Installation
To install DiffEqGPU.jl, use the Julia package manager:
```julia
using Pkg
Pkg.add("DiffEqGPU")
```
This will also install all the dependencies, including the
[CUDA.jl](https://cuda.juliagpu.org/stable/), which will also install all the required
versions of CUDA and CuDNN required by these libraries. Note that the same requirements
of CUDA.jl apply to DiffEqGPU, such as requiring a GPU with CUDA v11 compatibility. For
more information on these requirements, see
[the requirements of CUDA.jl](https://cuda.juliagpu.org/stable/installation/overview/).
## Contributing
- Please refer to the
[SciML ColPrac: Contributor's Guide on Collaborative Practices for Community Packages](https://github.com/SciML/ColPrac/blob/master/README.md)
for guidance on PRs, issues, and other matters relating to contributing to SciML.
- There are a few community forums:
+ the #diffeq-bridged channel in the [Julia Slack](https://julialang.org/slack/)
+ [JuliaDiffEq](https://gitter.im/JuliaDiffEq/Lobby) on Gitter
+ on the [Julia Discourse forums](https://discourse.julialang.org)
+ see also [SciML Community page](https://sciml.ai/community/)
## Reproducibility
```@raw html
<details><summary>The documentation of this SciML package was built using these direct dependencies,</summary>
```
```@example
using Pkg # hide
Pkg.status() # hide
```
```@raw html
</details>
```
```@raw html
<details><summary>and using this machine and Julia version.</summary>
```
```@example
using InteractiveUtils # hide
versioninfo() # hide
```
```@raw html
</details>
```
```@raw html
<details><summary>A more complete overview of all dependencies and their versions is also provided.</summary>
```
```@example
using Pkg # hide
Pkg.status(; mode = PKGMODE_MANIFEST) # hide
```
```@raw html
</details>
```
```@eval
using TOML
using Markdown
version = TOML.parse(read("../../Project.toml", String))["version"]
name = TOML.parse(read("../../Project.toml", String))["name"]
link_manifest = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
"/assets/Manifest.toml"
link_project = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
"/assets/Project.toml"
Markdown.parse("""You can also download the
[manifest]($link_manifest)
file and the
[project]($link_project)
file.
""")
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 1979 | # Using GPU-accelerated Ensembles with Automatic Differentiation
`EnsembleGPUArray` comes with derivative overloads for reverse mode automatic differentiation,
and thus can be thrown into deep learning training loops. The following is an example
of this use:
```@example ad
using OrdinaryDiffEq, SciMLSensitivity, Flux, DiffEqGPU, CUDA, Test
CUDA.allowscalar(false)
function modelf(du, u, p, t)
du[1] = 1.01 * u[1] * p[1] * p[2]
end
function model()
prob = ODEProblem(modelf, u0, (0.0, 1.0), pa)
function prob_func(prob, i, repeat)
remake(prob, u0 = 0.5 .+ i / 100 .* prob.u0)
end
ensemble_prob = EnsembleProblem(prob, prob_func = prob_func)
solve(ensemble_prob, Tsit5(), EnsembleGPUArray(CUDA.CUDABackend()), saveat = 0.1,
trajectories = 10)
end
# loss function
loss() = sum(abs2, 1.0 .- Array(model()))
data = Iterators.repeated((), 10)
cb = function () # callback function to observe training
@show loss()
end
pa = [1.0, 2.0]
u0 = [3.0]
opt = ADAM(0.1)
println("Starting to train")
l1 = loss()
Flux.train!(loss, Flux.params([pa]), data, opt; cb = cb)
```
Forward-mode automatic differentiation works as well, as demonstrated by its capability
to recompile for Dual number arithmetic:
```@example ad
using OrdinaryDiffEq, DiffEqGPU, ForwardDiff, Test
function lorenz(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = [ForwardDiff.Dual(1.0f0, (1.0, 0.0, 0.0)), ForwardDiff.Dual(0.0f0, (0.0, 1.0, 0.0)),
ForwardDiff.Dual(0.0f0, (0.0, 0.0, 1.0))]
tspan = (0.0f0, 100.0f0)
p = (10.0f0, 28.0f0, 8 / 3.0f0)
prob = ODEProblem{true, SciMLBase.FullSpecialize}(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = rand(Float32, 3) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func)
@time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(CUDA.CUDABackend()),
trajectories = 10_000,
saveat = 1.0f0)
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 3164 | # GPU-Acceleration of a Stiff Nonlinear Partial Differential Equation
The following is a demonstration of a GPU-accelerated implicit solve of a stiff
nonlinear partial differential equation (the Brusselator model):
```@example bruss
using OrdinaryDiffEq, CUDA, LinearAlgebra
const N = 32
const xyd_brusselator = range(0, stop = 1, length = N)
brusselator_f(x, y, t) = (((x - 0.3)^2 + (y - 0.6)^2) <= 0.1^2) * (t >= 1.1) * 5.0
limit(a, N) = a == N + 1 ? 1 : a == 0 ? N : a
kernel_u! = let N = N, xyd = xyd_brusselator, dx = step(xyd_brusselator)
@inline function (du, u, A, B, α, II, I, t)
i, j = Tuple(I)
x = xyd[I[1]]
y = xyd[I[2]]
ip1 = limit(i + 1, N)
im1 = limit(i - 1, N)
jp1 = limit(j + 1, N)
jm1 = limit(j - 1, N)
du[II[i, j, 1]] = α * (u[II[im1, j, 1]] + u[II[ip1, j, 1]] + u[II[i, jp1, 1]] +
u[II[i, jm1, 1]] - 4u[II[i, j, 1]]) +
B + u[II[i, j, 1]]^2 * u[II[i, j, 2]] - (A + 1) * u[II[i, j, 1]] +
brusselator_f(x, y, t)
end
end
kernel_v! = let N = N, xyd = xyd_brusselator, dx = step(xyd_brusselator)
@inline function (du, u, A, B, α, II, I, t)
i, j = Tuple(I)
ip1 = limit(i + 1, N)
im1 = limit(i - 1, N)
jp1 = limit(j + 1, N)
jm1 = limit(j - 1, N)
du[II[i, j, 2]] = α * (u[II[im1, j, 2]] + u[II[ip1, j, 2]] + u[II[i, jp1, 2]] +
u[II[i, jm1, 2]] - 4u[II[i, j, 2]]) +
A * u[II[i, j, 1]] - u[II[i, j, 1]]^2 * u[II[i, j, 2]]
end
end
brusselator_2d = let N = N, xyd = xyd_brusselator, dx = step(xyd_brusselator)
function (du, u, p, t)
@inbounds begin
ii1 = N^2
ii2 = ii1 + N^2
ii3 = ii2 + 2(N^2)
A = p[1]
B = p[2]
α = p[3] / dx^2
II = LinearIndices((N, N, 2))
kernel_u!.(Ref(du), Ref(u), A, B, α, Ref(II), CartesianIndices((N, N)), t)
kernel_v!.(Ref(du), Ref(u), A, B, α, Ref(II), CartesianIndices((N, N)), t)
return nothing
end
end
end
p = (3.4, 1.0, 10.0, step(xyd_brusselator))
function init_brusselator_2d(xyd)
N = length(xyd)
u = zeros(N, N, 2)
for I in CartesianIndices((N, N))
x = xyd[I[1]]
y = xyd[I[2]]
u[I, 1] = 22 * (y * (1 - y))^(3 / 2)
u[I, 2] = 27 * (x * (1 - x))^(3 / 2)
end
u
end
u0 = init_brusselator_2d(xyd_brusselator)
prob_ode_brusselator_2d = ODEProblem(brusselator_2d, u0, (0.0, 11.5), p)
du = similar(u0)
brusselator_2d(du, u0, p, 0.0)
du[34] # 802.9807693762164
du[1058] # 985.3120721709204
du[2000] # -403.5817880634729
du[end] # 1431.1460373522068
du[521] # -323.1677459142322
du2 = similar(u0)
brusselator_2d(du2, u0, p, 1.3)
du2[34] # 802.9807693762164
du2[1058] # 985.3120721709204
du2[2000] # -403.5817880634729
du2[end] # 1431.1460373522068
du2[521] # -318.1677459142322
prob_ode_brusselator_2d_cuda = ODEProblem(brusselator_2d, CuArray(u0), (0.0f0, 11.5f0), p,
tstops = [1.1f0])
solve(prob_ode_brusselator_2d_cuda, Rosenbrock23(), save_everystep = false);
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 184 | # GPU-Accelerated Stochastic Partial Differential Equations
```@raw html
<meta http-equiv="Refresh" content="0; url='https://docs.sciml.ai/Overview/stable/showcase/gpu_spde/'" />
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 1254 | # Batched Reductions for Lowering Peak Memory Requirements
Just as in the regular form of the
[DifferentialEquations.jl ensemble interface](https://docs.sciml.ai/DiffEqDocs/stable/features/ensemble/),
a `reduction` function can be given to reduce between batches. Here we show an example
of running 20 ODEs at a time, grabbing its value at the end, and reducing by summing all
the values. This then allows for only saving the sum of the previous batches, boosting
the trajectory count to an amount that is higher than would fit in memory, and only saving
the summed values.
```@example reductions
using OrdinaryDiffEq, DiffEqGPU, CUDA
seed = 100
using Random;
Random.seed!(seed);
ra = rand(100)
function f!(du, u, p, t)
du[1] = 1.01 * u[1]
end
prob = ODEProblem(f!, [0.5], (0.0, 1.0))
function output_func(sol, i)
last(sol), false
end
function prob_func(prob, i, repeat)
remake(prob, u0 = ra[i] * prob.u0)
end
function reduction(u, batch, I)
u .+ sum(batch), false
end
prob2 = EnsembleProblem(prob, prob_func = prob_func, output_func = output_func,
reduction = reduction, u_init = Vector{eltype(prob.u0)}([0.0]))
sim4 = solve(prob2, Tsit5(), EnsembleGPUArray(CUDA.CUDABackend()), trajectories = 100,
batch_size = 20)
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 1105 | # GPU Parallel Solving of Stochastic Differential Equations
One major application of DiffEqGPU is for computing ensemble statistics of SDE solutions
using `EnsembleGPUArray`. The following demonstrates using this technique to generate
large ensembles of solutions for a diagonal noise SDE with a high order adaptive method:
```@example sde
using DiffEqGPU, CUDA, StochasticDiffEq
function lorenz(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
function multiplicative_noise(du, u, p, t)
du[1] = 0.1 * u[1]
du[2] = 0.1 * u[2]
du[3] = 0.1 * u[3]
end
CUDA.allowscalar(false)
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 10.0f0)
p = (10.0f0, 28.0f0, 8 / 3.0f0)
prob = SDEProblem(lorenz, multiplicative_noise, u0, tspan, p)
const pre_p = [rand(Float32, 3) for i in 1:10_000]
prob_func = (prob, i, repeat) -> remake(prob, p = pre_p[i] .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func)
sol = solve(monteprob, SOSRI(), EnsembleGPUArray(CUDA.CUDABackend()), trajectories = 10_000,
saveat = 1.0f0)
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 1086 | # Compute Backends (GPU Choices)
DiffEqGPU.jl supports a multitude of different GPU devices. These must be chosen during the
construction of the `EnsembleGPUArray` and `EnsembleGPUKernel` construction and correpond
to the compute backends of [KernelAbstractions.jl](https://github.com/JuliaGPU/KernelAbstractions.jl).
The choices for backends are:
- `CUDA.CUDABackend()`: For NVIDIA GPUs via code generation for CUDA kernels.
- `AMDGPU.ROCBackend()`: For AMD GPUs via code generation for ROCm kernels.
- `oneAPI.oneAPIBackend()`: For Intel GPUs via code generation for OneAPI kernels.
- `Metal.MetalBackend()`: For Apple Silicon (M-Series such as M1 or M2) via code generation
for Metal kernels.
This is used for example like `EnsembleGPUKernel(oneAPI.oneAPIBackend())` to enable the
computations for Intel GPUs. The choice of backend is mandatory and requires the installation
of the respective package. Thus for example, using the OneAPI backend requires that the
user has successfully installed [oneAPI.jl](https://github.com/JuliaGPU/oneAPI.jl) and has
an Intel GPU.
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 931 | # Choosing the Ensemble: EnsembleGPUArray vs EnsembleGPUKernel
The short answer for how to choose an ensemble method is that,
if `EnsembleGPUKernel` works on your problem, you should use it.
A more complex discussion is the following:
- `EnsembleGPUKernel` is more asynchronous and has lower kernel call counts than
`EnsembleGPUArray`. This should amount to lower overhead in any case where the algorithms
are the same.
- `EnsembleGPUKernel` is restrictive on the types of ODE solvers that have been implemented
for it. If the most efficient method is not in the list of GPU kernel methods, it may be
more efficient to use `EnsembleGPUArray` with the better method.
- `EnsembleGPUKernel` requires equations to be written in out-of-place form, along with a
few other restrictions, and thus in some cases can be less automatic than
`EnsembleGPUArray` depending on how the code was originally written.
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 75 | # EnsembleGPUArray
## API
```@docs
EnsembleGPUArray
EnsembleCPUArray
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 358 | # EnsembleGPUKernel
## [API](@id egk_doc)
```@docs
EnsembleGPUKernel
```
### [Specialized Solvers](@id specialsolvers)
```@docs
GPUTsit5
GPUVern7
GPUVern9
GPUEM
GPUSIEA
GPURosenbrock23
GPURodas4
GPURodas5P
GPUKvaerno3
GPUKvaerno5
```
## Lower Level API
```@docs
DiffEqGPU.vectorized_solve
DiffEqGPU.vectorized_asolve
DiffEqGPU.vectorized_map_solve
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 1264 | # Choosing Optimal Numbers of Trajectories
There is a balance between two things for choosing the number of trajectories:
- The number of trajectories needs to be high enough that the work per kernel
is sufficient to overcome the kernel call cost.
- More trajectories means that every trajectory will need more time steps, since
the adaptivity syncs all solves.
From our testing, the balance is found at around 10,000 trajectories being optimal for
EnsembleGPUArray, since it has higher kernel call costs because every internal operation
of the ODE solver requires a kernel call. Thus, for larger sets of trajectories, use a
batch size of 10,000. Of course, benchmark for yourself on your own setup, as all GPUs
are different.
On the other hand, EnsembleGPUKernel fuses the entire GPU solve into a single kernel,
greatly reducing the kernel call cost. This means longer or more expensive ODE solves
will require even less of a percentage of time kernel launching, making the cutoff
much smaller. We see some cases with around 100 ODEs being viable with EnsembleGPUKernel.
Again, this is highly dependent on the ODE and the chosen GPU and thus one will need
to benchmark to get accurate numbers for their system, this is merely a ballpark estimate.
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 3943 | # [Massively Data-Parallel ODE Solving the Lorenz Equation](@id lorenz)
For example, the following solves the Lorenz equation with 10,000 separate random parameters on the GPU. To start, we create a normal
[`EnsembleProblem` as per DifferentialEquations.jl](https://docs.sciml.ai/DiffEqDocs/stable/features/ensemble/). Here's a perfectly good multithreaded CPU parallelized Lorenz solve
over randomized parameters:
```@example lorenz
using DiffEqGPU, OrdinaryDiffEq, CUDA
function lorenz(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 100.0f0)
p = [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem(lorenz, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = rand(Float32, 3) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
sol = solve(monteprob, Tsit5(), EnsembleThreads(), trajectories = 10_000, saveat = 1.0f0);
```
Changing this to being GPU-parallelized is as simple as changing the ensemble method to
`EnsembleGPUArray`:
```@example lorenz
sol = solve(monteprob, Tsit5(), EnsembleGPUArray(CUDA.CUDABackend()), trajectories = 10_000,
saveat = 1.0f0);
```
and voilà, the method is re-compiled to parallelize the solves over a GPU!
While `EnsembleGPUArray` has a bit of overhead due to its form of GPU code construction,
`EnsembleGPUKernel` is a more restrictive GPU-itizing algorithm that achieves a much lower
overhead in kernel launching costs. However, it requires this problem to be written in
out-of-place form and use [special solvers](@ref specialsolvers). This looks like:
```@example lorenz
using DiffEqGPU, OrdinaryDiffEq, StaticArrays, CUDA
function lorenz2(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz2, u0, tspan, p)
prob_func = (prob, i, repeat) -> remake(prob, p = (@SVector rand(Float32, 3)) .* p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy = false)
sol = solve(monteprob, GPUTsit5(), EnsembleGPUKernel(CUDA.CUDABackend()),
trajectories = 10_000,
saveat = 1.0f0)
```
Note that this form is also compatible with `EnsembleThreads()`, and `EnsembleGPUArray()`,
so `EnsembleGPUKernel()` simply supports a subset of possible problem types. For more
information on the limitations of `EnsembleGPUKernel()`, see [its docstring](@ref egk_doc).
## Using Stiff ODE Solvers with EnsembleGPUArray
DiffEqGPU also supports more advanced features than shown above. Other tutorials dive into
[handling events or callbacks](@ref events) and [multi-GPU parallelism](@ref multigpu).
But the simplest thing to show is that the generality of solvers allows for other types of
equations. For example, one can handle stiff ODEs with `EnsembleGPUArray` simply by using a
stiff ODE solver. Note that, as explained in the docstring, analytical derivatives
(Jacobian and time gradient) must be supplied. For the Lorenz equation, this looks like:
```@example lorenz
function lorenz_jac(J, u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
x = u[1]
y = u[2]
z = u[3]
J[1, 1] = -σ
J[2, 1] = ρ - z
J[3, 1] = y
J[1, 2] = σ
J[2, 2] = -1
J[3, 2] = x
J[1, 3] = 0
J[2, 3] = -x
J[3, 3] = -β
end
function lorenz_tgrad(J, u, p, t)
nothing
end
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 100.0f0)
p = [10.0f0, 28.0f0, 8 / 3.0f0]
func = ODEFunction(lorenz, jac = lorenz_jac, tgrad = lorenz_tgrad)
prob_jac = ODEProblem(func, u0, tspan, p)
monteprob_jac = EnsembleProblem(prob_jac, prob_func = prob_func)
solve(monteprob_jac, Rodas5(), EnsembleGPUArray(CUDA.CUDABackend()), trajectories = 10_000,
saveat = 1.0f0)
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 3543 | # [Using the Lower Level API for Decreased Overhead with GPU acclerated Ensembles](@id lowerlevel)
`EnsembleGPUKernel` is designed to match the SciML ensemble interface in order to allow for directly
converting CPU code to GPU code without any code changes. However, this hiding of the GPU aspects
decreases the overall performance as it always transfers the problem to the GPU and the result back
to the CPU for the user. These overheads can be removed by directly using the lower level API elements
of EnsembleGPUKernel.
The example below provides a way to generate solves using the lower level API with lower overheads:
```@example lower_level
using DiffEqGPU, OrdinaryDiffEq, StaticArrays, CUDA, DiffEqBase
trajectories = 10_000
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz, u0, tspan, p)
## Building different problems for different parameters
probs = map(1:trajectories) do i
DiffEqGPU.make_prob_compatible(remake(prob, p = (@SVector rand(Float32, 3)) .* p))
end
## Move the arrays to the GPU
probs = cu(probs)
## Finally use the lower API for faster solves! (Fixed time-stepping)
# Run once for compilation
@time CUDA.@sync ts, us = DiffEqGPU.vectorized_solve(probs, prob, GPUTsit5();
save_everystep = false, dt = 0.1f0)
@time CUDA.@sync ts, us = DiffEqGPU.vectorized_solve(probs, prob, GPUTsit5();
save_everystep = false, dt = 0.1f0)
## Adaptive time-stepping
# Run once for compilation
@time CUDA.@sync ts, us = DiffEqGPU.vectorized_asolve(probs, prob, GPUTsit5();
save_everystep = false, dt = 0.1f0)
@time CUDA.@sync ts, us = DiffEqGPU.vectorized_asolve(probs, prob, GPUTsit5();
save_everystep = false, dt = 0.1f0)
```
Note that the core is the function `DiffEqGPU.vectorized_solve` which is the solver for the CUDA-based `probs`
which uses the manually converted problems, and returns `us` which is a vector of CuArrays for the solution.
Similarily, there exists a lower-level API for `EnsembleGPUArray` as well, primarily for benchmarking purposes. The solution
returned for state (`sol.u`) is a matrix having columns as different parameter-parallel solutions for the ensemble problem.
An example is shown below:
```@example lower_level
using DiffEqGPU, OrdinaryDiffEq, StaticArrays, CUDA, DiffEqBase
trajectories = 10_000
function lorenz(u, p, t)
σ = p[1]
ρ = p[2]
β = p[3]
du1 = σ * (u[2] - u[1])
du2 = u[1] * (ρ - u[3]) - u[2]
du3 = u[1] * u[2] - β * u[3]
return SVector{3}(du1, du2, du3)
end
u0 = @SVector [1.0f0; 0.0f0; 0.0f0]
tspan = (0.0f0, 10.0f0)
p = @SVector [10.0f0, 28.0f0, 8 / 3.0f0]
prob = ODEProblem{false}(lorenz, u0, tspan, p)
## Building different problems for different parameters
batch = 1:trajectories
probs = map(batch) do i
remake(prob, p = (@SVector rand(Float32, 3)) .* p)
end
## Finally use the lower API for faster solves! (Fixed time-stepping)
@time CUDA.@sync sol = DiffEqGPU.vectorized_map_solve(probs, Tsit5(), EnsembleGPUArray(0.0),
batch, false, dt = 0.001f0,
save_everystep = false, dense = false)
## Adaptive time-stepping (Notice the boolean argument)
@time CUDA.@sync sol = DiffEqGPU.vectorized_map_solve(probs, Tsit5(), EnsembleGPUArray(0.0),
batch, true, dt = 0.001f0,
save_everystep = false, dense = false)
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 3705 | # [Setting Up Multi-GPU Parallel Parameter Sweeps](@id multigpu)
!!! note
This tutorial assumes one already has familiarity with EnsembleGPUArray and
EnsembleGPUKernel. Please see [the Lorenz equation tutorial](@ref lorenz) before
reading this tutorial!
In this tutorial, we will show how to increase the number of trajectories that can be
computed in parallel by setting up and using a multi-GPU solve. For this, we will set up
one Julia process for each GPU and let the internal `pmap` system of `EnsembleGPUArray`
parallelize the system across all of our GPUs. Let's dig in.
## Setting Up a Multi-GPU Julia Environment
To set up a multi-GPU environment, first set up processes such that each process
has a different GPU. For example:
```julia
# Setup processes with different CUDA devices
using Distributed
numgpus = 1
addprocs(numgpus)
import CUDA
let gpuworkers = asyncmap(collect(zip(workers(), CUDA.devices()))) do (p, d)
remotecall_wait(CUDA.device!, p, d)
p
end
end
```
Then set up the calls to work with distributed processes:
```julia
@everywhere using DiffEqGPU, CUDA, OrdinaryDiffEq, Test, Random
@everywhere begin
function lorenz_distributed(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
CUDA.allowscalar(false)
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 100.0f0)
p = [10.0f0, 28.0f0, 8 / 3.0f0]
Random.seed!(1)
function prob_func_distributed(prob, i, repeat)
remake(prob, p = rand(3) .* p)
end
end
```
Now each batch will run on separate GPUs. Thus, we need to use the `batch_size`
keyword argument from the Ensemble interface to ensure there are multiple batches.
Let's solve 40,000 trajectories, batching 10,000 trajectories at a time:
```julia
prob = ODEProblem(lorenz_distributed, u0, tspan, p)
monteprob = EnsembleProblem(prob, prob_func = prob_func_distributed)
@time sol2 = solve(monteprob, Tsit5(), EnsembleGPUArray(CUDA.CUDABackend()),
trajectories = 40_000,
batch_size = 10_000, saveat = 1.0f0)
```
This will `pmap` over the batches, and thus if you have 4 processes each with
a GPU, each batch of 10,000 trajectories will be run simultaneously. If you have
two processes with two GPUs, this will do two sets of 10,000 at a time.
## Example Multi-GPU Script
In this example, we know we have a 2-GPU system (1 eGPU), and we split the work
across the two by directly defining the devices on the two worker processes:
```julia
using DiffEqGPU, CUDA, OrdinaryDiffEq, Test
CUDA.device!(0)
using Distributed
addprocs(2)
@everywhere using DiffEqGPU, CUDA, OrdinaryDiffEq, Test, Random
@everywhere begin
function lorenz_distributed(du, u, p, t)
du[1] = p[1] * (u[2] - u[1])
du[2] = u[1] * (p[2] - u[3]) - u[2]
du[3] = u[1] * u[2] - p[3] * u[3]
end
CUDA.allowscalar(false)
u0 = Float32[1.0; 0.0; 0.0]
tspan = (0.0f0, 100.0f0)
p = [10.0f0, 28.0f0, 8 / 3.0f0]
Random.seed!(1)
pre_p_distributed = [rand(Float32, 3) for i in 1:100_000]
function prob_func_distributed(prob, i, repeat)
remake(prob, p = pre_p_distributed[i] .* p)
end
end
@sync begin
@spawnat 2 begin
CUDA.allowscalar(false)
CUDA.device!(0)
end
@spawnat 3 begin
CUDA.allowscalar(false)
CUDA.device!(1)
end
end
CUDA.allowscalar(false)
prob = ODEProblem(lorenz_distributed, u0, tspan, p)
monteprob = EnsembleProblem(prob, prob_func = prob_func_distributed)
@time sol = solve(monteprob, Tsit5(), EnsembleGPUArray(CUDA.CUDABackend()),
trajectories = 100_000,
batch_size = 50_000, saveat = 1.0f0)
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 796 | # [Massively Parallel ODE Solving with Event Handling and Callbacks](@id events)
```@example parallel_callbacks
using DiffEqGPU, StaticArrays, OrdinaryDiffEq, CUDA
function f(u, p, t)
du1 = -u[1]
return SVector{1}(du1)
end
u0 = @SVector [10.0f0]
prob = ODEProblem{false}(f, u0, (0.0f0, 10.0f0))
prob_func = (prob, i, repeat) -> remake(prob, p = prob.p)
monteprob = EnsembleProblem(prob, safetycopy = false)
condition(u, t, integrator) = t == 4.0f0
affect!(integrator) = integrator.u += @SVector[10.0f0]
gpu_cb = DiscreteCallback(condition, affect!; save_positions = (false, false))
sol = solve(monteprob, GPUTsit5(), EnsembleGPUKernel(CUDA.CUDABackend()),
trajectories = 10,
adaptive = false, dt = 0.01f0, callback = gpu_cb, merge_callbacks = true,
tstops = [4.0f0])
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 1282 | # [Using the EnsembleGPUKernel SDE solvers for the expectation of SDEs ](@id sdeweakconv)
Solving the `SDEProblem` using weak methods on multiple trajectories helps to generate the expectation of the stochastic process.
With the lower overhead of `EnsembleGPUKernel` API, these calculations can be done in parallel on GPU, potentially being faster.
The example below provides a way to calculate the expectation time-series of a linear SDE:
```@example kernel_sde
using DiffEqGPU, OrdinaryDiffEq, StaticArrays, LinearAlgebra, Statistics
num_trajectories = 10_000
# Defining the Problem
# dX = pudt + qudW
u₀ = SA[0.1f0]
f(u, p, t) = SA[p[1] * u[1]]
g(u, p, t) = SA[p[2] * u[1]]
tspan = (0.0f0, 1.0f0)
p = SA[1.5f0, 0.01f0]
prob = SDEProblem(f, g, u₀, tspan, p; seed = 1234)
monteprob = EnsembleProblem(prob)
sol = solve(monteprob, GPUEM(), EnsembleGPUKernel(0.0), dt = Float32(1 // 2^8),
trajectories = num_trajectories, adaptive = false)
sol_array = Array(sol)
ts = sol[1].t
us_calc = reshape(mean(sol_array, dims = 3), size(sol_array, 2))
us_expect = u₀ .* exp.(p[1] * ts)
using Plots
plot(ts, us_expect, lw = 5,
xaxis = "Time (t)", yaxis = "y(t)", label = "True Expected value")
plot!(ts, us_calc, lw = 3, ls = :dash, label = "Caculated Expected value")
```
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 3.4.1 | 7a714b856fe00d6194774bc38e387375f06963fe | docs | 2080 | # [Within-Method GPU Parallelism of Ordinary Differential Equation Solves](@id withingpu)
Within-Method GPU Parallelism for ODE solvers is a method for accelerating large ODE
solves with regularity, i.e., only using array-based “vectorized” operations like
linear algebra, maps, and broadcast statements. In these cases, the solve can be GPU
accelerated simply by placing the initial condition array on the GPU. As a quick example:
```@example within_gpu
using OrdinaryDiffEq, CUDA, LinearAlgebra
function f(du, u, p, t)
mul!(du, A, u)
end
A = cu(-rand(3, 3))
u0 = cu([1.0; 0.0; 0.0])
tspan = (0.0f0, 100.0f0)
prob = ODEProblem(f, u0, tspan)
sol = solve(prob, Tsit5())
sol = solve(prob, Rosenbrock23())
```
Notice that both stiff and non-stiff ODE solvers were used here.
!!! note
Time span was changed to `Float32` types, as GPUs generally have very slow `Float64`
operations, usually around 1/32 of the speed of `Float32`. `cu(x)` on an array
automatically changes an `Array{Float64}` to a `CuArray{Float32}`. If this is not
intended, use the `CuArray` constructor directly. For more information on GPU
`Float64` performance issues, search around Google for
[discussions like this](https://www.techpowerup.com/forums/threads/nerfed-fp64-performance-in-consumer-gpu-cards.272732/).
!!! warn
`Float32` precision is sometimes not enough precision to accurately solve a
stiff ODE. Make sure that the precision is necessary by investigating the condition
number of the Jacobian. If this value is well-above `1e8`, use `Float32` with caution!
## Restrictions of CuArrays
Note that all the rules of [CUDA.jl](https://cuda.juliagpu.org/stable/) apply when
`CuArrays` are being used in the solver. While for most of the `AbstractArray` interface
they act similarly to `Array`s, such as having valid broadcasting operations (`x .* y`)
defined, they will work on GPUs. For more information on the rules and restrictions of
`CuArrays`, see
[this page from the CUDA.jl documentation](https://cuda.juliagpu.org/stable/usage/array/).
| DiffEqGPU | https://github.com/SciML/DiffEqGPU.jl.git |
|
[
"MIT"
] | 0.1.5 | e5757caef58785c978da4bc16bf234f166c058a4 | code | 652 | using Wilsonloop
using Documenter
DocMeta.setdocmeta!(Wilsonloop, :DocTestSetup, :(using Wilsonloop); recursive=true)
makedocs(;
modules=[Wilsonloop],
authors="cometscome <[email protected]> and contributors",
repo="https://github.com/cometscome/Wilsonloop.jl/blob/{commit}{path}#{line}",
sitename="Wilsonloop.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://cometscome.github.io/Wilsonloop.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/cometscome/Wilsonloop.jl",
devbranch="main",
)
| Wilsonloop | https://github.com/akio-tomiya/Wilsonloop.jl.git |
|
[
"MIT"
] | 0.1.5 | e5757caef58785c978da4bc16bf234f166c058a4 | code | 22417 | module Wilsonloop
export make_staple,
Wilsonline,
make_staple_and_loop,
derive_U,
make_Cμ,
make_plaq_staple,
make_plaq,
loops_staple_prime,
get_position,
derive_Udag,
make_loops_fromname,
make_chair,
get_rightlinks,
get_leftlinks,
get_direction,
loops_plaq,
loops_rect,
check_plaqset,
isdag
using LaTeXStrings
using LinearAlgebra
import Base
import Base.:(==)
abstract type Gaugelink{Dim} end
struct GLink{Dim} <: Gaugelink{Dim}
direction::Int8
position::NTuple{Dim,Int64}
isdag::Bool
function GLink{Dim}(direction, position, isdag = false) where {Dim}
return new{Dim}(direction, position, isdag)
end
end
struct Adjoint_GLink{Dim} <: Gaugelink{Dim}
parent::GLink{Dim}
end
function LinearAlgebra.adjoint(glink::Adjoint_GLink{Dim}) where {Dim}
return GLink{Dim}(glink.direction, glink.position, true)#, glink.parent
end
function LinearAlgebra.adjoint(glink::GLink{Dim}) where {Dim}
return GLink{Dim}(glink.direction, glink.position, !(glink.isdag))
# return Adjoint_GLink{Dim}(glink)
end
function get_direction(glink::GLink)
return glink.direction
end
function get_position(glink::GLink)
return glink.position
end
function isdag(glink::GLink)
return glink.isdag
end
function set_position(glink::GLink{Dim}, position) where {Dim}
return GLink{Dim}(glink.direction, position, glink.isdag)
end
mutable struct Wilsonline{Dim}
glinks::Vector{GLink{Dim}}
#glinks::Array{Union{GLink{Dim},Adjoint_GLink{Dim}},1}
Wilsonline(; Dim = 4) = new{Dim}([])
Wilsonline(glinks; Dim = 4) = new{Dim}(glinks)
function Wilsonline(segments_in::Array{Tuple{T,T},1}; Dim = 4) where {T<:Integer}
segments = make_links(segments_in)
numline = length(segments)
glinks = Array{GLink{Dim},1}(undef, numline)
#glinks = Array{Union{GLink{Dim},Adjoint_GLink{Dim}},1}(undef,numline)
position = zeros(Int64, Dim)
for (i, segment) in enumerate(segments)
dimension = segment[1]
hoppingdirection = segment[2]
if hoppingdirection == 1
glinks[i] = GLink{Dim}(dimension, Tuple(position))
position[dimension] += 1
elseif hoppingdirection == -1
position[dimension] += -1
glinks[i] = GLink{Dim}(dimension, Tuple(position))'
else
error(
"hoppingdirection in segment should be 1 or -1. But now $hoppingdirection",
)
end
end
return new{Dim}(glinks)
end
end
function ==(x::GLink{Dim}, y::GLink{Dim}) where {Dim}
if x.isdag != y.isdag
return false
end
if x.direction == y.direction && x.position == y.position
return true
else
return false
end
end
function ==(x::Wilsonline{Dim}, y::Wilsonline{Dim}) where {Dim}
flag = true
if length(x) != length(y)
return false
end
for i = 1:length(x)
if x[i] != y[i]
return false
end
end
return true
end
struct DwDU{Dim}
parent::Wilsonline{Dim}
insertindex::Int64
position::NTuple{Dim,Int64}
leftlinks::Wilsonline{Dim}
rightlinks::Wilsonline{Dim}
μ::Int8
end
function get_leftlinks(dw::DwDU)
return dw.leftlinks
end
function get_rightlinks(dw::DwDU)
return dw.rightlinks
end
function get_position(dw::DwDU)
return dw.position
end
function Base.push!(w::Wilsonline, link)
push!(w.glinks, link)
end
function Base.append!(w::Wilsonline, a::Wilsonline)
append!(w.glinks, a.glinks)
end
function Base.length(w::Wilsonline)
return length(w.glinks)
end
function Base.getindex(w::Wilsonline, i)
return w.glinks[i]
end
function Base.lastindex(w::Wilsonline)
return length(w)
end
function LinearAlgebra.adjoint(w::Wilsonline{Dim}) where {Dim}
wa = Wilsonline(; Dim = Dim)
numlinks = length(w)
for i = numlinks:-1:1
push!(wa, w[i]')
end
return wa
end
function LinearAlgebra.adjoint(ws::Array{<:Wilsonline{Dim},1}) where {Dim}
num = length(ws)
wad = Array{eltype(ws),1}(undef, num)
for i = 1:num
wad[i] = ws[i]'
end
return wad
end
function Base.show(io::IO, ws::Array{<:Wilsonline{Dim},1}) where {Dim}
for i = 1:length(ws)
if i == 1
st = "st"
elseif i == 2
st = "nd"
elseif i == 3
st = "rd"
else
st = "th"
end
println(io, "$i-$st loop")
show(io, ws[i])
#display(io,ws[i])
end
end
function Base.display(ws::Array{<:Wilsonline{Dim},1}) where {Dim}
for i = 1:length(ws)
if i == 1
st = "st"
elseif i == 2
st = "nd"
elseif i == 3
st = "rd"
else
st = "th"
end
println("$i-$st loop")
display(ws[i])
end
end
function get_printstring_direction(glink::Gaugelink{Dim}) where {Dim}
nstring = "n"
position = get_position(glink)
for μ = 1:Dim
m = position[μ]
if m != 0
if abs(m) == 1
if m > 0
nstring = nstring * "+e_{$(μ)}"
else
nstring = nstring * "-e_{$(μ)}"
end
else
if m > 0
nstring = nstring * "+$(m)e_{$(μ)}"
else
nstring = nstring * "-$(abs(m))e_{$(μ)}"
end
end
end
end
return nstring
end
function get_printstring(glink::Gaugelink{Dim}) where {Dim}
direction = get_direction(glink)
dagornot = ifelse(glink.isdag, "^{\\dagger}","")
#dagornot = ifelse(typeof(glink) <: GLink, "", "^{\\dagger}")
nstring = get_printstring_direction(glink)
return "U$(dagornot)_{$(direction)}($(nstring))"
end
function Base.show(io::IO, glink::Gaugelink{Dim}) where {Dim}
outputstring = get_printstring(glink)
show(io, latexstring(outputstring))
return latexstring(outputstring)
end
function Base.show(io::IO, w::Wilsonline{Dim}) where {Dim}
outputstring = ""
for (i, glink) in enumerate(w.glinks)
outputstring = outputstring * get_printstring(glink)
end
show(io, latexstring(outputstring))
println("\t")
#println(io,outputstring)
#return latexstring(outputstring)
end
function make_staple(w::Wilsonline{Dim}, μ) where {Dim}
dwdUs = derive_U(w, μ)
numstaples = length(dwdUs)
staple = Array{typeof(w),1}(undef, numstaples)
for i = 1:numstaples
wi = Wilsonline(Dim = Dim)
append!(wi, get_rightlinks(dwdUs[i]))
append!(wi, get_leftlinks(dwdUs[i]))
staple[i] = wi
end
return staple
end
function make_Cμ(w::Wilsonline{Dim}, μ) where {Dim}
V1 = make_staple(w, μ)
V2 = make_staple(w', μ)
C = eltype(V1)[]
for i = 1:length(V1)
push!(C, V1[i]')
end
for i = 1:length(V2)
push!(C, V2[i]')
end
return C
end
function make_staple_and_loop(w::Wilsonline{Dim}, μ) where {Dim}
C = make_staple(w, μ)
append!(C, make_staple(w', μ))
numstaple = length(C)
CUdag = Array{typeof(w),1}(undef, numstaple)
Udag = GLink{Dim}(μ, (0, 0, 0, 0))'
for i = 1:numstaple
CUdag[i] = deepcopy(C[i])'
push!(CUdag[i], Udag)
#CUdag[i] = CUdag[i]'
end
return CUdag
end
function check_link(w, μ)
numlinks = length(w)
linkindices = Int64[]
for i = 1:numlinks
link = w[i]
if link.isdag == false
#typeof(link) <: GLink
if link.direction == μ
append!(linkindices, i)
end
end
end
return linkindices
end
function check_link_dag(w, μ)
numlinks = length(w)
linkindices = Int64[]
for i = 1:numlinks
link = w[i]
if link.isdag
#if typeof(link) <: Adjoint_GLink
if get_direction(link) == μ
append!(linkindices, i)
end
end
end
return linkindices
end
"""
like U U U U -> U U otimes U
"""
function derive_U(w::Wilsonline{Dim}, μ) where {Dim}
numlinks = length(w)
linkindices = check_link(w, μ)
numstaples = length(linkindices)
dwdU = Array{DwDU{Dim},1}(undef, numstaples)
for (i, ith) in enumerate(linkindices)
#wi =Wilsonline(Dim=Dim)
rightlinks = Wilsonline(Dim = Dim)
leftlinks = Wilsonline(Dim = Dim)
origin = w[ith].position
position = zero(collect(origin))
position[w[ith].direction] += 1
for j = ith+1:numlinks
link = w[j]
if link.isdag == false
#if typeof(link) <: GLink
link_rev = set_position(link, Tuple(position))
position[get_direction(link)] += 1
else
position[get_direction(link)] += -1
link_rev = set_position(link, Tuple(position))
end
push!(rightlinks, link_rev)
#push!(rightlinks,link)
end
for j = 1:ith-1
link = w[j]
position = collect(get_position(link)) .- origin
link_rev = set_position(link, Tuple(position))
push!(leftlinks, link_rev)
#push!(leftlinks,link)
end
dwdU[i] = DwDU{Dim}(w, ith, origin, leftlinks, rightlinks, μ)
#println("μ = ",μ)
#display(wi)
end
return dwdU
end
"""
like U U U U -> U U otimes U
"""
function derive_Udag(w::Wilsonline{Dim}, μ) where {Dim}
#error("not yet")
numlinks = length(w)
linkindices = check_link_dag(w, μ)
numstaples = length(linkindices)
dwdUdag = Array{DwDU{Dim},1}(undef, numstaples)
for (i, ith) in enumerate(linkindices)
#wi =Wilsonline(Dim=Dim)
rightlinks = Wilsonline(Dim = Dim)
leftlinks = Wilsonline(Dim = Dim)
origin = get_position(w[ith]) #.position
position = zero(collect(origin))
#position[w[ith].direction] += 1
for j = ith+1:numlinks
link = w[j]
push!(rightlinks, link)
end
for j = 1:ith-1
link = w[j]
push!(leftlinks, link)
end
dwdUdag[i] = DwDU{Dim}(w, ith, origin, leftlinks, rightlinks, μ)
#println("μ = ",μ)
#display(wi)
end
return dwdUdag
end
function Base.display(dwdU::DwDU{Dim}) where {Dim}
outputstring = ""
if length(dwdU.leftlinks.glinks) == 0
outputstring = outputstring * "I "
else
for glink in dwdU.leftlinks.glinks
outputstring = outputstring * get_printstring(glink)
end
end
outputstring = outputstring * " \\otimes "
if length(dwdU.rightlinks.glinks) == 0
outputstring = outputstring * "I "
else
for glink in dwdU.rightlinks.glinks
outputstring = outputstring * get_printstring(glink)
end
end
nstring = get_printstring_direction(dwdU.parent.glinks[dwdU.insertindex])
outputstring = outputstring * "\\delta_{m,$(nstring)}"
println(outputstring)
return outputstring
end
function Base.show(io::IO, dwdU::Array{DwDU{Dim},1}) where {Dim}
for i = 1:length(dwdU)
if i == 1
st = "st"
elseif i == 2
st = "nd"
elseif i == 3
st = "rd"
else
st = "th"
end
println("$i-$st loop")
show(dwdU[i])
end
end
function Base.show(io::IO, dwdU::DwDU{Dim}) where {Dim}
outputstring = ""
if length(dwdU.leftlinks.glinks) == 0
outputstring = outputstring * "I "
else
for glink in dwdU.leftlinks.glinks
outputstring = outputstring * get_printstring(glink)
end
end
outputstring = outputstring * " \\otimes "
if length(dwdU.rightlinks.glinks) == 0
outputstring = outputstring * "I "
else
for glink in dwdU.rightlinks.glinks
outputstring = outputstring * get_printstring(glink)
end
end
nstring = get_printstring_direction(dwdU.parent.glinks[dwdU.insertindex])
outputstring = outputstring * "\\delta_{m,$(nstring)}"
show(io, latexstring(outputstring))
println("\t")
#println(outputstring)
return outputstring
end
function make_links(segments::Array{Tuple{T,T},1}) where {T<:Integer}
links = Tuple{Int8,Int8}[]
for segment in segments
s = sign(segment[2])
if segment[2] == 0
push!(links, (segment[1], 0))
else
#@assert segment[2] != 0
for i = 1:abs(segment[2])
push!(links, (segment[1], s * 1))
end
end
end
return links
end
function make_plaq(μ, ν; Dim = 4)
return Wilsonline([(μ, 1), (ν, 1), (μ, -1), (ν, -1)], Dim = Dim)
end
function make_plaq(; Dim = 4)
loops = Wilsonline{Dim}[]
for μ = 1:Dim
#for ν=1:4
for ν = μ:Dim
if ν == μ
continue
end
plaq = make_plaq(μ, ν, Dim = Dim)
push!(loops, plaq)
end
end
return loops
end
function construct_plaq()
loops_plaq = Dict{Tuple{Int8,Int8,Int8},Any}()
for Dim = 1:4
for μ = 1:Dim
for ν = μ:Dim
if μ == ν
continue
end
loops_plaq[(Dim, μ, ν)] = make_plaq(μ, ν, Dim = Dim)
end
end
end
return loops_plaq
end
function make_rect(; Dim = 4)
loops = Wilsonline{Dim}[]
for μ = 1:Dim
for ν = μ:Dim
if ν == μ
continue
end
#loop = make_links([(μ,1),(ν,2),(μ,-1),(ν,-2)])
loop1 = Wilsonline([(μ, 1), (ν, 2), (μ, -1), (ν, -2)], Dim = Dim)
#loop1 = Wilson_loop([(μ,1),(ν,2),(μ,-1),(ν,-2)])
#loop1 = Wilson_loop(loop,Tuple(origin))
push!(loops, loop1)
loop1 = Wilsonline([(μ, 2), (ν, 1), (μ, -2), (ν, -1)], Dim = Dim)
#loop1 = Wilson_loop([(μ,2),(ν,1),(μ,-2),(ν,-1)])
push!(loops, loop1)
end
end
return loops
end
function construct_rect()
loops_rect = Dict{Tuple{Int8,Int8,Int8,Int8},Any}()
for Dim = 1:4
for μ = 1:Dim
for ν = μ:Dim
if μ == ν
continue
end
loops_rect[(Dim, μ, ν, 1)] =
Wilsonline([(μ, 1), (ν, 2), (μ, -1), (ν, -2)], Dim = Dim)
loops_rect[(Dim, μ, ν, 2)] =
Wilsonline([(μ, 2), (ν, 1), (μ, -2), (ν, -1)], Dim = Dim)
end
end
end
return loops_rect
end
function make_cloverloops(μ, ν; Dim = 4)
loops = Wilsonline{Dim}[]
loop_righttop = Wilsonline([(μ, 1), (ν, 1), (μ, -1), (ν, -1)])
loop_lefttop = Wilsonline([(ν, 1), (μ, -1), (ν, -1), (μ, 1)])
loop_rightbottom = Wilsonline([(ν, -1), (μ, 1), (ν, 1), (μ, -1)])
loop_leftbottom = Wilsonline([(μ, -1), (ν, -1), (μ, 1), (ν, 1)])
push!(loops, loop_righttop)
push!(loops, loop_lefttop)
push!(loops, loop_rightbottom)
push!(loops, loop_leftbottom)
return loops
end
function make_polyakov(μ, Lμ; Dim = 4)
loops = Wilsonline{Dim}[]
loop1 = Wilsonline([(μ, Lμ)], Dim = Dim)
push!(loops, loop1)
return loops
end
function make_polyakov_xyz(Lμ; Dim = 4)
loops = Wilsonline{Dim}[]
for μ = 1:3
loop1 = Wilsonline([(μ, Lμ)], Dim = Dim)
push!(loops, loop1)
end
return loops
end
function make_loopforactions(couplinglist, L)
Dim = length(L)
loops = Array{Array{Wilsonline{Dim},1},1}(undef, length(couplinglist))
for (i, name) in enumerate(couplinglist)
if name == "plaquette"
loops[i] = make_plaq(Dim = Dim)
elseif name == "rectangular"
loops[i] = make_rect(Dim = Dim)
elseif name == "chair"
loops[i] = make_chair(Dim = Dim)
elseif name == "polyakov_t"
μ = Dim
loops[i] = make_polyakov(μ, L[μ], Dim = Dim)
elseif name == "polyakov_z"
@assert Dim > 3 "Dimension should be Dim > 3 but now Dim = $Dim"
μ = 3
loops[i] = make_polyakov(μ, L[μ], Dim = Dim)
elseif name == "polyakov_y"
@assert Dim > 2 "Dimension should be Dim > 2 but now Dim = $Dim"
μ = 2
loops[i] = make_polyakov(μ, L[μ], Dim = Dim)
elseif name == "polyakov_x"
μ = 1
loops[i] = make_polyakov(μ, L[μ], Dim = Dim)
else
error("$name is not supported!")
end
end
return loops
end
function make_loops_fromname(name; Dim = 4, L = nothing)
if L != nothing
@assert Dim == length(L)
end
if name == "plaquette"
loops = make_plaq(Dim = Dim)
elseif name == "rectangular"
loops = make_rect(Dim = Dim)
elseif name == "chair"
loops = make_chair(Dim = Dim)
elseif name == "polyakov_t"
@assert L != nothing "system size should be given to obtain polyakov loops. please do like make_loops(\"polyakov_t\";Dim=4,L=[4,4,4,4])"
μ = Dim
loops = make_polyakov(μ, L[μ], Dim = Dim)
elseif name == "polyakov_z"
@assert L != nothing "system size should be given to obtain polyakov loops. please do like make_loops(\"polyakov_z\";Dim=4,L=[4,4,4,4])"
@assert Dim > 3 "Dimension should be Dim > 3 but now Dim = $Dim"
μ = 3
loops = make_polyakov(μ, L[μ], Dim = Dim)
elseif name == "polyakov_y"
@assert L != nothing "system size should be given to obtain polyakov loops. please do like make_loops(\"polyakov_y\";Dim=4,L=[4,4,4,4])"
@assert Dim > 2 "Dimension should be Dim > 2 but now Dim = $Dim"
μ = 2
loops = make_polyakov(μ, L[μ], Dim = Dim)
elseif name == "polyakov_x"
@assert L != nothing "system size should be given to obtain polyakov loops. please do like make_loops(\"polyakov_x\";Dim=4,L=[4,4,4,4])"
μ = 1
loops = make_polyakov(μ, L[μ], Dim = Dim)
else
error("$name is not supported!")
end
return loops
end
function make_chair(; Dim = 4)
@assert Dim == 4 "only Dim = 4 is supported now"
#loopset = []
loopset = Wilsonline{Dim}[]
set1 = (1, 2, 3)
set2 = (1, 2, 4)
set3 = (2, 3, 4)
set4 = (1, 3, 4)
for set in (set1, set2, set3, set4)
mu, nu, rho = set
origin = zeros(Int8, 4)
loop = [(mu, 1), (nu, 1), (rho, 1), (mu, -1), (rho, -1), (nu, -1)]
loop1 = Wilsonline(loop, Dim = Dim)
push!(loopset, loop1)
mu, rho, nu = set
loop = [(mu, 1), (nu, 1), (rho, 1), (mu, -1), (rho, -1), (nu, -1)]
loop1 = Wilsonline(loop, Dim = Dim)
push!(loopset, loop1)
nu, rho, mu = set
loop = [(mu, 1), (nu, 1), (rho, 1), (mu, -1), (rho, -1), (nu, -1)]
loop1 = Wilsonline(loop, Dim = Dim)
push!(loopset, loop1)
nu, mu, rho = set
loop = [(mu, 1), (nu, 1), (rho, 1), (mu, -1), (rho, -1), (nu, -1)]
loop1 = Wilsonline(loop, Dim = Dim)
push!(loopset, loop1)
rho, mu, nu = set
loop = [(mu, 1), (nu, 1), (rho, 1), (mu, -1), (rho, -1), (nu, -1)]
loop1 = Wilsonline(loop, Dim = Dim)
push!(loopset, loop1)
rho, nu, mu = set
loop = [(mu, 1), (nu, 1), (rho, 1), (mu, -1), (rho, -1), (nu, -1)]
loop1 = Wilsonline(loop, Dim = Dim)
push!(loopset, loop1)
end
return loopset
end
function make_plaq_staple(μ; Dim = 4)
loops = Wilsonline{Dim}[]
plaqs = make_plaq(Dim = Dim)
numplaq = length(plaqs)
for i = 1:numplaq
plaq = plaqs[i]
staples = make_staple(plaq, μ)
for j = 1:length(staples)
push!(loops, staples[j])
end
plaqdag = plaqs[i]'
staples = make_staple(plaqdag, μ)
for j = 1:length(staples)
push!(loops, staples[j])
end
end
return loops
end
function construct_staple_prime()
loops_staple_prime = Dict{Tuple{Int8,Int8},Any}()
for Dim = 1:4
for μ = 1:Dim
loops_staple_prime[(Dim, μ)] = make_plaq_staple(μ, Dim = Dim)'
end
end
return loops_staple_prime
end
function construct_staple()
loops_staple = Dict{Tuple{Int8,Int8},Any}()
for Dim = 1:4
for μ = 1:Dim
loops_staple[(Dim, μ)] = make_plaq_staple(μ, Dim = Dim)
end
end
return loops_staple
end
const loops_staple_prime = construct_staple_prime()
const loops_staple = construct_staple()
const loops_plaq = construct_plaq()
const loops_rect = construct_rect()
function check_plaqset(wi::Wilsonline{Dim}) where {Dim}
flag = false
for μ = 1:Dim
for ν = μ:Dim
if μ == ν
continue
end
loop = loops_plaq[(Dim, μ, ν)]
if loop == wi
#println("match!")
flag = true
break
end
end
if flag
continue
end
end
return flag
end
function check_plaqset(w::Vector{Wilsonline{Dim}}) where {Dim}
flag = false
for wi in w
flag = check_plaqset(wi)
if flag != true
return false
end
end
return flag
end
function check_rectset(wi::Wilsonline{Dim}) where {Dim}
flag = false
direction = (0, 0)
for μ = 1:Dim
for ν = μ:Dim
if μ == ν
continue
end
loop = loops_rect[(Dim, μ, ν, 1)]
if loop == wi
#println("match!")
direction = (μ, ν)
flag = true
break
end
loop = loops_rect[(Dim, μ, ν, 2)]
if loop == wi
#println("match!")
direction = (μ, ν)
flag = true
break
end
end
if flag
continue
end
end
return flag, direction
end
function check_rectset(w::Vector{Wilsonline{Dim}}) where {Dim}
flag = false
direction = (0, 0)
for wi in w
flag, direction = check_rectset(wi)
if flag != true
return false, direction
end
end
return flag, direction
end
end
| Wilsonloop | https://github.com/akio-tomiya/Wilsonloop.jl.git |
|
[
"MIT"
] | 0.1.5 | e5757caef58785c978da4bc16bf234f166c058a4 | code | 3085 | using Wilsonloop
using Test
function test()
println("plaq")
plaq = make_plaq()
display(plaq)
for μ=1:4
println("μ = $μ")
staples = make_plaq_staple(μ)
display(staples)
end
show(loops_staple_prime[(4,4)])
#return
loop = [(1,+1)]
println(loop)
w = Wilsonline(loop)
loop2 = [(1,-1)]
w2 = Wilsonline(loop2)
V1 = derive_U(w,1)
V2 = derive_U(w2,1)
show(V1)
println("d")
show(V2)
println("d")
loop = [(1,+1),(2,+1),(1,-1),(2,-1)]
println(loop)
w = Wilsonline(loop)
println("P: ")
show(w)
println("P^+: ")
show(w')
println("staple")
for μ=1:4
println("μ = $μ")
V1 = make_staple(w,μ)
V2 = make_staple(w',μ)
show(V1)
show(V2)
end
println("derive w")
for μ=1:4
dU = derive_U(w,μ)
for i=1:length(dU)
show(dU[i])
end
end
println("-------------------------------------------------------")
println("C and dC/dU")
for μ=1:4
C = make_Cμ(w,μ)
#=
V1 = make_staple(w,μ)
V2 = make_staple(w',μ)
C = eltype(V1)[]
for i=1:length(V1)
push!(C,V1[i]')
end
for i=1:length(V2)
push!(C,V2[i]')
end
=#
println("-------------------------------------------")
println("μ = $μ")
for i=1:length(C)
println("---------------------------------------")
println("C[$i]: ")
show(C[i])
for ν=1:4
println("-----------------------------")
println("ν = $ν")
dCdU = derive_U(C[i],ν)
println("dC_{$μ}/dU_{$ν}: ")
for j=1:length(dCdU)
show(dCdU[j])
end
end
end
end
println("-------------------------------------------------------")
println("C and dC/dUdag")
for μ=1:4
C = make_Cμ(w,μ)
#=
V1 = make_staple(w,μ)
V2 = make_staple(w',μ)
C = eltype(V1)[]
for i=1:length(V1)
push!(C,V1[i]')
end
for i=1:length(V2)
push!(C,V2[i]')
end
=#
println("-------------------------------------------")
println("μ = $μ")
for i=1:length(C)
println("---------------------------------------")
println("C[$i]: ")
show(C[i])
for ν=1:4
println("-----------------------------")
println("ν = $ν")
dCdU = derive_Udag(C[i],ν)
println("dC_{$μ}/dUdag_{$ν}: ")
for j=1:length(dCdU)
show(dCdU[j])
end
end
end
end
#=
w = Wilsonline(loop)
println("P: ")
display(w)
println("P^+: ")
display(w')
println("staple")
make_staples(w)
=#
end
@testset "Wilsonloop.jl" begin
test()
@test true
# Write your tests here.
end
| Wilsonloop | https://github.com/akio-tomiya/Wilsonloop.jl.git |
|
[
"MIT"
] | 0.1.5 | e5757caef58785c978da4bc16bf234f166c058a4 | docs | 9420 | # Wilsonloop.jl [](https://github.com/akio-tomiya/Wilsonloop.jl/actions/workflows/CI.yml)
# Abstract
In Lattice Quantum Chromo-Dynamics (QCD), the gauge action is constructed by gauge invariant objects, Wilson loops, in discretized spacetime.
Wilsonloop.jl helps us to treat with the Wilson loops and generic Wilson lines in any Nc and dimensions.
This is a package for lattice QCD codes.
<img src="LQCDjl_block.png" width=300>
This package will be used in [LatticeQCD.jl](https://github.com/akio-tomiya/LatticeQCD.jl).
# What this package can do
- From a symbolic definition of Wilson lines, this returns SU(Nc)-valued Wilson lines as objects
- Constructing all staples from given symbolic Wilson lines
- Constructing derivatives of given symbolic Wilson lines (auto-grad for SU(Nc) variables)
# How to install
```julia
add Wilsonloop
```
# Notation warning
In Julia, adjoint represents *hermitian conjugate*, and we follow this terminology.
For example ``Adjoint_GLink`` means hermitian conjugate of a gauge link, not the link in the adjoint representation.
Please do not confuse with a link in the adjoint representation in conventional lattice QCD context.
We do not support links in adjoint representation.
# Basic idea
This package defines ```Wilsonline{Dim}``` type.
```julia
mutable struct Wilsonline{Dim}
glinks::Array{GLink{Dim},1}
end
```
This is a array of ```GLink{Dim}```.
The ```GLink{Dim}``` is defined as
```julia
abstract type Gaugelink{Dim} end
struct GLink{Dim} <: Gaugelink{Dim}
direction::Int8
position::NTuple{Dim,Int64}
isdag::Bool
end
```
```GLink{Dim}``` has a direction of a bond on the lattice and relative position $U_{\mu}(n)$.
The direction and position are obtained by ```get_direction(a)``` and ```get_position(a)```, respectively.
For example if we want to have 2nd link of the Wilson loop ```w```, just do ```get_position(w[2])```.
# How to use
## Plaquette and its staple
We can easily generate a plaquette.
```julia
println("plaq")
plaq = make_plaq()
display(plaq)
```
The output is
```
plaq
1-st loop
L"$U_{1}(n)U_{2}(n+e_{1})U^{\dagger}_{1}(n+e_{2})U^{\dagger}_{2}(n)$"
2-nd loop
L"$U_{1}(n)U_{3}(n+e_{1})U^{\dagger}_{1}(n+e_{3})U^{\dagger}_{3}(n)$"
3-rd loop
L"$U_{1}(n)U_{4}(n+e_{1})U^{\dagger}_{1}(n+e_{4})U^{\dagger}_{4}(n)$"
4-th loop
L"$U_{2}(n)U_{3}(n+e_{2})U^{\dagger}_{2}(n+e_{3})U^{\dagger}_{3}(n)$"
5-th loop
L"$U_{2}(n)U_{4}(n+e_{2})U^{\dagger}_{2}(n+e_{4})U^{\dagger}_{4}(n)$"
6-th loop
L"$U_{3}(n)U_{4}(n+e_{3})U^{\dagger}_{3}(n+e_{4})U^{\dagger}_{4}(n)$"
```
If we want to consider 2D system, we can do ```make_plaq(Dim=2)```.
The staple of the plaquette is given as
```julia
for μ=1:4
println("μ = $μ")
staples = make_plaq_staple(μ)
display(staples)
end
```
The output is
```
μ = 1
1-st loop
L"$U_{2}(n+e_{1})U^{\dagger}_{1}(n+e_{2})U^{\dagger}_{2}(n)$"
2-nd loop
L"$U^{\dagger}_{2}(n+e_{1}-e_{2})U^{\dagger}_{1}(n-e_{2})U_{2}(n-e_{2})$"
3-rd loop
L"$U_{3}(n+e_{1})U^{\dagger}_{1}(n+e_{3})U^{\dagger}_{3}(n)$"
4-th loop
L"$U^{\dagger}_{3}(n+e_{1}-e_{3})U^{\dagger}_{1}(n-e_{3})U_{3}(n-e_{3})$"
5-th loop
L"$U_{4}(n+e_{1})U^{\dagger}_{1}(n+e_{4})U^{\dagger}_{4}(n)$"
6-th loop
L"$U^{\dagger}_{4}(n+e_{1}-e_{4})U^{\dagger}_{1}(n-e_{4})U_{4}(n-e_{4})$"
μ = 2
1-st loop
L"$U^{\dagger}_{1}(n-e_{1}+e_{2})U^{\dagger}_{2}(n-e_{1})U_{1}(n-e_{1})$"
2-nd loop
L"$U_{1}(n+e_{2})U^{\dagger}_{2}(n+e_{1})U^{\dagger}_{1}(n)$"
3-rd loop
L"$U_{3}(n+e_{2})U^{\dagger}_{2}(n+e_{3})U^{\dagger}_{3}(n)$"
4-th loop
L"$U^{\dagger}_{3}(n+e_{2}-e_{3})U^{\dagger}_{2}(n-e_{3})U_{3}(n-e_{3})$"
5-th loop
L"$U_{4}(n+e_{2})U^{\dagger}_{2}(n+e_{4})U^{\dagger}_{4}(n)$"
6-th loop
L"$U^{\dagger}_{4}(n+e_{2}-e_{4})U^{\dagger}_{2}(n-e_{4})U_{4}(n-e_{4})$"
μ = 3
1-st loop
L"$U^{\dagger}_{1}(n-e_{1}+e_{3})U^{\dagger}_{3}(n-e_{1})U_{1}(n-e_{1})$"
2-nd loop
L"$U_{1}(n+e_{3})U^{\dagger}_{3}(n+e_{1})U^{\dagger}_{1}(n)$"
3-rd loop
L"$U^{\dagger}_{2}(n-e_{2}+e_{3})U^{\dagger}_{3}(n-e_{2})U_{2}(n-e_{2})$"
4-th loop
L"$U_{2}(n+e_{3})U^{\dagger}_{3}(n+e_{2})U^{\dagger}_{2}(n)$"
5-th loop
L"$U_{4}(n+e_{3})U^{\dagger}_{3}(n+e_{4})U^{\dagger}_{4}(n)$"
6-th loop
L"$U^{\dagger}_{4}(n+e_{3}-e_{4})U^{\dagger}_{3}(n-e_{4})U_{4}(n-e_{4})$"
μ = 4
1-st loop
L"$U^{\dagger}_{1}(n-e_{1}+e_{4})U^{\dagger}_{4}(n-e_{1})U_{1}(n-e_{1})$"
2-nd loop
L"$U_{1}(n+e_{4})U^{\dagger}_{4}(n+e_{1})U^{\dagger}_{1}(n)$"
3-rd loop
L"$U^{\dagger}_{2}(n-e_{2}+e_{4})U^{\dagger}_{4}(n-e_{2})U_{2}(n-e_{2})$"
4-th loop
L"$U_{2}(n+e_{4})U^{\dagger}_{4}(n+e_{2})U^{\dagger}_{2}(n)$"
5-th loop
L"$U^{\dagger}_{3}(n-e_{3}+e_{4})U^{\dagger}_{4}(n-e_{3})U_{3}(n-e_{3})$"
6-th loop
L"$U_{3}(n+e_{4})U^{\dagger}_{4}(n+e_{3})U^{\dagger}_{3}(n)$"
1-st loop
L"$U^{\dagger}_{1}(n-e_{1})U_{4}(n-e_{1})U_{1}(n-e_{1}+e_{4})$"
2-nd loop
L"$U_{1}(n)U_{4}(n+e_{1})U^{\dagger}_{1}(n+e_{4})$"
3-rd loop
L"$U^{\dagger}_{2}(n-e_{2})U_{4}(n-e_{2})U_{2}(n-e_{2}+e_{4})$"
4-th loop
L"$U_{2}(n)U_{4}(n+e_{2})U^{\dagger}_{2}(n+e_{4})$"
5-th loop
L"$U^{\dagger}_{3}(n-e_{3})U_{4}(n-e_{3})U_{3}(n-e_{3}+e_{4})$"
6-th loop
L"$U_{3}(n)U_{4}(n+e_{3})U^{\dagger}_{3}(n+e_{4})$"
```
## Input loops
The arbitrary Wilson loop is constructed as
```julia
loop = [(1,+1),(2,+1),(1,-1),(2,-1)]
println(loop)
w = Wilsonline(loop)
println("P: ")
show(w)
```
Its adjoint is calculated as
```julia
println("P^+: ")
show(w')
```
Its staple is calculated as
```julia
println("staple")
for μ=1:4
println("μ = $μ")
V1 = make_staple(w,μ)
V2 = make_staple(w',μ)
show(V1)
show(V2)
end
```
## Derivatives
The derivative of the lines $dw/dU_{\mu}$ is calculated as
```julia
println("derive w")
for μ=1:4
dU = derive_U(w,μ)
for i=1:length(dU)
show(dU[i])
end
end
```
Note that the derivative is a rank-4 tensor.
The output is
```
L"$I \otimes U_{2}(n+e_{1})U^{\dagger}_{1}(n+e_{2})U^{\dagger}_{2}(n)\delta_{m,n}$"
L"$U_{1}(n-e_{1}) \otimes U^{\dagger}_{1}(n-e_{1}+e_{2})U^{\dagger}_{2}(n-e_{1})\delta_{m,n+e_{1}}$"
```
The derivatives are usually used for making the smearing of the gauge fields (Stout smearing can be used in Gaugefields.jl).
# Examples
## Long lines and its staple
```julia
mu = 1
nu = 2
rho = 3
loops = [(mu,2),(nu,1),(rho,3),(mu,-2),(rho,-3),(nu,-1)]
w = Wilsonline(loops)
```
```
L"$U_{1}(n)U_{1}(n+e_{1})U_{2}(n+2e_{1})U_{3}(n+2e_{1}+e_{2})U_{3}(n+2e_{1}+e_{2}+e_{3})U_{3}(n+2e_{1}+e_{2}+2e_{3})U^{\dagger}_{1}(n+e_{1}+e_{2}+3e_{3})U^{\dagger}_{1}(n+e_{2}+3e_{3})U^{\dagger}_{3}(n+e_{2}+2e_{3})U^{\dagger}_{3}(n+e_{2}+e_{3})U^{\dagger}_{3}(n+e_{2})U^{\dagger}_{2}(n)$"
```
Its staple:
```julia
staple = make_staple(w,mu)
```
```
1-st loop
L"$U_{1}(n+e_{1})U_{2}(n+2e_{1})U_{3}(n+2e_{1}+e_{2})U_{3}(n+2e_{1}+e_{2}+e_{3})U_{3}(n+2e_{1}+e_{2}+2e_{3})U^{\dagger}_{1}(n+e_{1}+e_{2}+3e_{3})U^{\dagger}_{1}(n+e_{2}+3e_{3})U^{\dagger}_{3}(n+e_{2}+2e_{3})U^{\dagger}_{3}(n+e_{2}+e_{3})U^{\dagger}_{3}(n+e_{2})U^{\dagger}_{2}(n)$"
2-nd loop
L"$U_{2}(n+e_{1})U_{3}(n+e_{1}+e_{2})U_{3}(n+e_{1}+e_{2}+e_{3})U_{3}(n+e_{1}+e_{2}+2e_{3})U^{\dagger}_{1}(n+e_{2}+3e_{3})U^{\dagger}_{1}(n-e_{1}+e_{2}+3e_{3})U^{\dagger}_{3}(n-e_{1}+e_{2}+2e_{3})U^{\dagger}_{3}(n-e_{1}+e_{2}+e_{3})U^{\dagger}_{3}(n-e_{1}+e_{2})U^{\dagger}_{2}(n-e_{1})U_{1}(n-e_{1})$"
```
## Derivative of Wilson line
The derivative of the staple
```julia
dev = derive_U(staple[1],nu)
```
```
L"$U_{1}(n-e_{1}) \otimes U_{3}(n+e_{2})U_{3}(n+e_{2}+e_{3})U_{3}(n+e_{2}+2e_{3})U^{\dagger}_{1}(n-e_{1}+e_{2}+3e_{3})U^{\dagger}_{1}(n-2e_{1}+e_{2}+3e_{3})U^{\dagger}_{3}(n-2e_{1}+e_{2}+2e_{3})U^{\dagger}_{3}(n-2e_{1}+e_{2}+e_{3})U^{\dagger}_{3}(n-2e_{1}+e_{2})U^{\dagger}_{2}(n-2e_{1})\delta_{m,n+2e_{1}}$"
```
The derivative of the Wilson loops with respect to a link is a rank-4 tensor ([ref](https://arxiv.org/abs/2103.11965)), which is expressed as
<img src="https://latex.codecogs.com/svg.image?\frac{\partial&space;V}{\partial&space;U}&space;=&space;\sum_{i}&space;A_i&space;\otimes&space;B_i" title="\frac{\partial V}{\partial U} = \sum_{i} A_i \otimes B_i" />
, where A and B are matrices.
We can get the A and B matrices, expressed by ```Wilsonline{Dim}``` type :
```julia
devl = get_leftlinks(dev[1])
devr = get_rightlinks(dev[1])
```
## The derivative of the action
The action is usually expressed as
<img src="https://latex.codecogs.com/svg.image?S&space;=&space;\sum_{n}&space;{\rm&space;Tr}&space;U(n)V(n)" title="S = \sum_{n} {\rm Tr} U(n)V(n)" />
The derivative of the action is
<img src="https://latex.codecogs.com/svg.image?\frac{\partial&space;S}{\partial&space;U_{\mu}(m)}&space;=&space;\sum_{n}&space;\frac{\partial&space;{\rm&space;Tr}U_{\mu}(n)V(n)&space;}{\partial&space;U_{\mu}(m)}&space;=&space;&space;V(m)" title="\frac{\partial S}{\partial U_{\mu}(m)} = \sum_{n} \frac{\partial {\rm Tr}U_{\mu}(n)V(n) }{\partial U_{\mu}(m)} = V(m)" />
Therefore, the staple V is important to get the derivative.
Note that we define the derivative as
<img src="https://latex.codecogs.com/svg.image?\left[&space;\frac{\partial&space;S}{\partial&space;U_{\mu}(m)}&space;\right]_{ij}&space;\equiv&space;\frac{\partial&space;S}{\partial&space;[U_{\mu}(m)]_{ji}&space;}&space;" title="\left[ \frac{\partial S}{\partial U_{\mu}(m)} \right]_{ij} \equiv \frac{\partial S}{\partial [U_{\mu}(m)]_{ji} } " />
| Wilsonloop | https://github.com/akio-tomiya/Wilsonloop.jl.git |
|
[
"MIT"
] | 0.1.5 | e5757caef58785c978da4bc16bf234f166c058a4 | docs | 188 | ```@meta
CurrentModule = Wilsonloop
```
# Wilsonloop
Documentation for [Wilsonloop](https://github.com/cometscome/Wilsonloop.jl).
```@index
```
```@autodocs
Modules = [Wilsonloop]
```
| Wilsonloop | https://github.com/akio-tomiya/Wilsonloop.jl.git |
|
[
"MIT"
] | 0.1.1 | e819b0d814ad86f0b6edbd52278fe6f5660c7fec | code | 2450 | """
Created in April, 2022 by
[chifi - an open source software dynasty.](https://github.com/orgs/ChifiSource)
by team
[odd-data](https://github.com/orgs/ChifiSource/teams/odd-data)
This software is MIT-licensed.
### IPy.jl
IPy.jl is a consistent cell format amongst three different cell file-types. The
package combines .ipynb, .jl, and .jl pluto files together into one easy to
use module.
"""
module IPyCells
import Base: string, read, getindex
using JSON
using Random
__precompile__()
"""
### abstract type AbstractCell
An abstracted cell type, primarily used for the IPy.Cell constructor.
##### Consistencies
- id::String
- source::String
- outputs::Any
- n::Int64
"""
abstract type AbstractCell end
"""
## Cell(::Any, ::String, ::Any, ::Dict, ::Integer) <: AbstractCell
The cell type is just a Julian equivalent to the JSON data that is read in
for Jupyter cells.
### fields
- id::String
- outputs::Any - Output of the cell
- type::String - Cell type (code/md)
- source::String - The content of the cell
- n::Integer - The execution position of the cell.
### Constructors
- Cell(n::Int64, type::String, content::String, outputs::Any = "") Constructs cells from a dictionary of cell-data.
"""
mutable struct Cell{T} <: AbstractCell
id::String
type::String
source::String
outputs::Any
n::Int64
function Cell(n::Int64, type::String, content::String,
outputs::Any = ""; id::String = "")
if id == ""
Random.seed!(rand(1:100000))
id = randstring(10)::String
end
new{Symbol(type)}(id, type, content, outputs, n)::Cell{<:Any}
end
end
"""
## string(cell::Cell{<:Any}) -> ::String
Converts a cell to a `String`, used by `IPy.save` to write different cell types.
### example
```julia
cells = read_plto("myfile.jl")
```
"""
function string(cell::Cell{<:Any})
if cell.source != ""
return(*(cell.source,
"\n#==output[$(cell.type)]\n$(string(cell.outputs))\n==#\n#==|||==#\n"))::String
end
""::String
end
function string(cell::Cell{:markdown})
"\"\"\"$(cell.source)\"\"\"\n#==|||==#\n"::String
end
function string(cell::Cell{:doc})
"\"\"\"\n$(cell.source)\n\"\"\""
end
function getindex(v::Vector{Cell{<:Any}}, s::String)
v[findall(c -> c.id == s, v)[1]]
end
include("IPyRW.jl")
export ipynbjl
end # module
| IPyCells | https://github.com/ChifiSource/IPyCells.jl.git |
|
[
"MIT"
] | 0.1.1 | e819b0d814ad86f0b6edbd52278fe6f5660c7fec | code | 6341 |
function plto_cell_lines(uri::String)
cellpos = Dict()
first = 0
ccount = 0
for (count, line) in enumerate(readlines(uri))
if occursin("# ╔═╡", line)
if first == 0
first = count
else
ccount += 1
push!(cellpos, ccount => first:count - 1)
first = count
end
end
end
return(cellpos)
end
"""
## read_plto(path::String) -> ::Vector{<:AbstractCell}
Reads a pluto file into IPy cells.
### example
```julia
cells = read_plto("myfile.jl")
```
"""
function read_plto(uri::String)
cells::Vector{Cell} = []
cellpos = plto_cell_lines(uri)
x = readlines(uri)
[begin
unprocessed_uuid = x[cell[1]]
text_data = x[cell[2:end]]
Cell(n, "code", string(text_data))
end for (n, cell) in enumerate(values(cellpos))]
end
"""
## read_jlcells(path::String) -> ::Vector{<:AbstractCell}
Reads in an `IPy.save` saved Julia file.
### example
```julia
cells = read_jlcells("myfile.jl")
```
"""
function jlcells(str::String)
lines = split(str, "#==|||==#")
[begin
if contains(s, "#==output")
outpfirst = findfirst("#==output", s)
ctypeend = findnext("]", s, maximum(outpfirst))[1]
celltype = s[maximum(outpfirst) + 2:ctypeend - 1]
outpend = findnext("==#", s, outpfirst[1])
outp = ""
if ~(isnothing(outpend))
outp = s[ctypeend + 2:outpend[1] - 1]
end
inp = s[1:outpfirst[1] - 2]
Cell(n, string(celltype), string(inp), string(outp))
elseif contains(s, "\"\"\"")
rp = replace(s, "\n" => "")
if contains(rp[1:3], "\"\"\"") && contains(rp[length(rp) - 4:length(rp)], "\"\"\"")
inp = replace(s, "\"\"\"" => "")
Cell(n, "markdown", string(inp))
else
Cell(n, "code", string(s))
end
else
Cell(n, "code", string(s))
end
end for (n, s) in enumerate(lines)]::AbstractVector
end
read_jlcells(path::String) = jlcells(read(path, String))
"""
## read_jl(path::String) -> ::Vector{<:AbstractCell}
Reads in a Vector of cells from a Julia file. If the file is found to contain
IPy style output, this function will promptly redirect to `read_jlcells`. If
the file is found to contain `Pluto` output, it will be redirected to
`read_plto`.
### example
```julia
cells = read_jl("myfile.jl")
```
"""
function read_jl(uri::String)
readin = read(uri, String)
if contains(readin, "═╡")
return(read_plto(uri))
end
if contains(readin, "#==output[") && contains(readin, "#==|||==#")
return(read_jlcells(uri))
end
lines = split(readin, "\n\n")
[Cell(n, "code", string(s)) for (n, s) in enumerate(lines)]::AbstractVector
end
"""
## save(cells::Vector{<:AbstractCell}, path::String) -> _
Saves cells as Julia file.
### example
```julia
cells = read_jl("myfile.jl")
save(cells, "myfile.jl")
```
"""
function save(cells::Vector{<:AbstractCell}, path::String)
open(path, "w") do file
output::String = join([string(cell) for cell in cells])
write(file, output)
end
end
"""
## save_ipynb(cells::Vector{<:AbstractCell}, path::String) -> _
Saves cells as IPython notebook file. **Note that as of right now, this currently
breaks the IJulia reading of the file -- this will (hopefully) be fixed in future
IPy releases**.
### example
```julia
cells = read_jl("myfile.jl")
save(cells, "myfile.jl")
```
"""
function save_ipynb(cells::Vector{<:AbstractCell}, path::String)
newd = Dict{String, Any}("nbformat_minor" => 4, "nbformat" => 4)
newcells = Dict{String, Any}()
metadata = Dict{String, Any}()
lang_info = Dict{String, Any}("file_extension" => ".jl",
"mimetype" => "application/julia", "name" => "julia",
"version" => string(VERSION))
kern_spec = Dict{String, Any}("name" => "julia-$(split(string(VERSION), ".")[1:2])",
"display_name" => "Julia $(string(VERSION))", "language" => "julia")
push!(metadata, "language_info" => lang_info,
"kernelspec" => kern_spec)
ncells = Dict([begin
cell.n = e
outp = Dict{String, Any}("output_type" => "execute_result",
"data" => Dict{String, Any}("text/plain" => Any["$(cell.outputs)"]),
"metadata" => Dict{String, Any}(),
"execution_count" => cell.n)
cell.n => Dict(cell.n => Dict{String, Any}("execution_count" => cell.n,
"metadata" => Dict{String, Any}(), "source" => Any[cell.source],
"cell_type" => cell.type, "outputs" => outp))
end for (e, cell) in enumerate(cells)])
push!(newd, "metadata" => metadata, "cells" => ncells)
open(path, "w") do io
JSON.print(io, newd)
end
newd
end
"""
## read_ipynb(f::String) -> ::Vector{Cell}
Reads an IPython notebook into a vector of cells.
### example
```
cells = read_ipynb("helloworld.ipynb")
```
"""
function read_ipynb(f::String)
file::String = read(f, String)
j::Dict = JSON.parse(file)
[begin
outputs = ""
ctype = cell["cell_type"]
source = string(join(cell["source"]))
if "outputs" in keys(cell)
#== if length(cell["outputs"][1]["data"]) > 0
println(cell["outputs"][1]["data"])
outputs = join([v for v in values(cell["outputs"][1]["data"])])
end ==#
end
Cell(n, ctype, source, outputs)
end for (n, cell) in enumerate(j["cells"])]::AbstractVector
end
"""
## ipynbjl(ipynb_path::String, output_path::String)
Reads notebook at **ipynb_path** and then outputs as .jl Julia file to
**output_path**.
### example
```
ipynbjl("helloworld.ipynb", "helloworld.jl")
```
"""
function ipyjl(ipynb_path::String, output_path::String)
cells = read_ipynb(ipynb_path)
output = save(cells, output_path)
end
"""
### sep(::Any) -> ::String
---
Separates and parses lines of individual cell content via an array of strings.
Returns string of concetenated text. Basically, the goal of sep is to ignore
n exit code inside of the document.
### example
```
```
"""
function sep(content::Any)
total = string()
if length(content) == 0
return("")
end
for line in content
total = total * string(line)
end
total
end
| IPyCells | https://github.com/ChifiSource/IPyCells.jl.git |
|
[
"MIT"
] | 0.1.1 | e819b0d814ad86f0b6edbd52278fe6f5660c7fec | code | 198 | # 1
# Comment example
# 2
println("Hello world!")
# 3
function this(is::Int64)
multiple = "lines"
end
# 0
cellmd0 = """# This should be a multi-line string (Markdown)"""
# 4
println("yay")
# 0
| IPyCells | https://github.com/ChifiSource/IPyCells.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.