licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | code | 1081 | abstract type ShortReads{A<:DNAAlphabet} <: ReadDatastore{LongSequence{A}} end
@inline BioSequences.bits_per_symbol(prds::ShortReads{A}) where {A<:DNAAlphabet} = BioSequences.bits_per_symbol(A())
@inline _offset_of_sequence(sr::ShortReads, idx::Integer) = _read_data_begin(sr) + (_bytes_per_read(sr) * (idx - 1))
@inline function inbounds_load_sequence!(ds::ShortReads{A}, i::Integer, seq::LongSequence{A}) where {A<:DNAAlphabet}
pos = _offset_of_sequence(ds, i)
seek(stream(ds), pos)
seqlen = read(stream(ds), UInt64)
resize!(seq, seqlen)
return _load_sequence_data!(ds, seq)
end
@inline function load_sequence!(sr::ShortReads{A}, idx::Integer, seq::LongSequence{A}) where {A<:DNAAlphabet}
checkbounds(sr, idx)
return inbounds_load_sequence!(sr, idx, seq)
end
@inline function Base.getindex(sr::ShortReads{A}, idx::Integer) where {A<:DNAAlphabet}
@boundscheck checkbounds(sr, idx)
seq = eltype(sr)(max_read_length(sr))
return inbounds_load_sequence!(sr, idx, seq)
end
function Base.show(io::IO, sr::ShortReads)
summary(io, sr)
end | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | code | 3588 | @testset "Linked read datastores" begin
function ucdavis_tag(rec::FASTQ.Record)
id = FASTQ.identifier(rec)
tag = zero(UInt32)
@inbounds for i in 1:16
tag <<= 2
letter = id[i]
if letter == 'C'
tag = tag + 1
elseif letter == 'G'
tag = tag + 2
elseif letter == 'T'
tag = tag + 3
elseif letter != 'A'
tag = zero(UInt32)
break
end
end
return tag
end
function get_fastq_seqs(::Type{A}, r1, r2, maxlen) where {A<:DNAAlphabet}
r1s = open(FASTQ.Reader, r1) do rdr
collect(rdr)
end
r2s = open(FASTQ.Reader, r2) do rdr
collect(rdr)
end
tags = map(r1s) do rec
ucdavis_tag(rec)
end
keep = tags .!= zero(UInt32)
tags = tags[keep]
r1s = FASTQ.sequence.(LongSequence{A}, r1s[keep])
r2s = FASTQ.sequence.(LongSequence{A}, r2s[keep])
for seq in r1s
if length(seq) > maxlen
resize!(seq, maxlen)
end
end
for seq in r1s
if length(seq) > maxlen
resize!(seq, maxlen)
end
end
order = sortperm(tags)
return r1s[order], r2s[order]
end
function check_round_trip(::Type{A}, R1, R2, maxlen, chunksize = 1000000) where {A<:DNAAlphabet}
r1_seqs, r2_seqs = get_fastq_seqs(A, R1, R2, maxlen)
fqa = open(FASTQ.Reader, R1)
fqb = open(FASTQ.Reader, R2)
ds = LinkedReads{A}(fqa, fqb, "10xtest", Symbol("ucdavis-test"), UCDavis10x, maxlen, chunksize)
ds2 = open(LinkedReads{A}, "10xtest.lrseq")
@test ReadDatastores.deduce_datastore_type("10xtest.lrseq") == LinkedReads{A}
ds_seqs = collect(ds)
ds2_seqs = collect(ds2)
return ds_seqs[1:2:end] == ds2_seqs[1:2:end] == r1_seqs && ds_seqs[2:2:end] == ds2_seqs[2:2:end] == r2_seqs
end
function check_show(ds, msg)
buf = IOBuffer()
show(buf, ds)
return String(take!(buf)) == msg
end
@test check_round_trip(DNAAlphabet{2}, "10x_tester_R1.fastq", "10x_tester_R2.fastq", 250, 10)
@test check_round_trip(DNAAlphabet{4}, "10x_tester_R1.fastq", "10x_tester_R2.fastq", 250, 10)
ds = open(LinkedReads{DNAAlphabet{4}}, "10xtest.lrseq")
@test ReadDatastores.name(ds) == Symbol("ucdavis-test")
@test ReadDatastores.max_read_length(ds) == 250
@test check_show(ds, "Linked Read Datastore 'ucdavis-test': 166 reads (83 pairs)")
@test firstindex(ds) == 1
@test lastindex(ds) == 166
@test Base.IteratorSize(ds) == Base.HasLength()
@test Base.IteratorEltype(ds) == Base.HasEltype()
@test Base.eltype(ds) == LongSequence{DNAAlphabet{4}}
@test_throws BoundsError ds[200]
@test_throws BoundsError buffer(ds)[200]
@test_throws ErrorException buffer(ds, 1)
@test_throws ErrorException buffer(ds, ReadDatastores._bytes_per_read(ds) - 1)
@test_throws BoundsError load_sequence!(ds, 200, dna"")
@test collect(ds) == collect(buffer(ds)) == open(LinkedReads{DNAAlphabet{4}}, "10xtest.lrseq") do ds
collect(ds)
end
@test_throws ReadDatastores.DatastoreVersionError{LinkedReads{DNAAlphabet{2}}} open(LinkedReads{DNAAlphabet{2}}, "10xtest-old.lrseq")
@test_throws ReadDatastores.DatastoreEncodingError{LinkedReads{DNAAlphabet{2}}} open(LinkedReads{DNAAlphabet{2}}, "10xtest.lrseq")
end | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | code | 2653 | @testset "Long read datastores" begin
function get_fastq_seqs(file)
seqs = map(open(FASTQ.Reader, file) do rdr
collect(rdr)
end) do rec
FASTQ.sequence(LongDNASeq, rec)
end
return seqs
end
function check_round_trip(::Type{A}, FQ) where {A<:DNAAlphabet}
seqs = get_fastq_seqs(FQ)
fq = open(FASTQ.Reader, FQ)
ds = LongReads{A}(fq, "human-nanopore", "human-nanopore", 0)
ds2 = open(LongReads{A}, "human-nanopore.loseq")
ds_seqs = collect(ds)
ds2_seqs = collect(ds2)
return ds_seqs == seqs == ds2_seqs
end
function check_show(ds, msg)
buf = IOBuffer()
show(buf, ds)
return String(take!(buf)) == msg
end
function run_tests(::Type{A}) where {A<:DNAAlphabet}
@test check_round_trip(A, "human_nanopore_tester_2D.fastq")
ds = open(LongReads{A}, "human-nanopore.loseq")
ds2 = open(LongReads{A}, "human-nanopore.loseq")
@test ReadDatastores.deduce_datastore_type("human-nanopore.loseq") == LongReads{A}
@test ReadDatastores.index(ds) == ReadDatastores.index(ds2)
@test ReadDatastores.index(ds)[1] == ReadDatastores.index(ds2)[1]
@test firstindex(ds) == firstindex(ds2) == 1
@test lastindex(ds) == lastindex(ds2) == 10
@test check_show(ds, "Long Read Datastore 'human-nanopore': 10 reads")
@test Base.IteratorSize(ds) == Base.IteratorSize(ds2) == Base.HasLength()
@test Base.IteratorEltype(ds) == Base.IteratorEltype(ds2) == Base.HasEltype()
@test Base.eltype(ds) == Base.eltype(ds2) == LongSequence{A}
@test collect(ds) == collect(buffer(ds)) == collect(buffer(ds, 1))
@test ds[5] == buffer(ds)[5] == load_sequence!(ds, 5, LongSequence{A}()) == load_sequence!(buffer(ds), 5, LongSequence{A}())
bds = buffer(ds)
@test eltype(bds) == eltype(ds)
@test firstindex(bds) == firstindex(ds)
@test eachindex(bds) == eachindex(ds)
@test Base.IteratorSize(bds) == Base.IteratorSize(ds)
@test Base.IteratorEltype(bds) == Base.IteratorEltype(ds)
end
@testset "Human oxford nanopore 2D consensus reads tester" begin
run_tests(DNAAlphabet{4})
run_tests(DNAAlphabet{2})
@test_throws ReadDatastores.DatastoreVersionError{LongReads{DNAAlphabet{2}}} open(LongReads{DNAAlphabet{2}}, "human-nanopore-old.loseq")
@test_throws ReadDatastores.DatastoreEncodingError{LongReads{DNAAlphabet{4}}} open(LongReads{DNAAlphabet{4}}, "human-nanopore.loseq")
end
end | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | code | 2736 | @testset "Paired read datastores" begin
function get_fastq_seqs(::Type{A}, file) where {A<:DNAAlphabet}
seqs = map(open(FASTQ.Reader, file) do rdr
collect(rdr)
end) do rec
FASTQ.sequence(LongSequence{A}, rec)
end
for seq in seqs
if length(seq) > 300
resize!(seq, 300)
end
end
return seqs
end
function check_round_trip(::Type{A}, R1, R2) where {A<:DNAAlphabet}
r1_seqs = get_fastq_seqs(A, R1)
r2_seqs = get_fastq_seqs(A, R2)
fqa = open(FASTQ.Reader, R1)
fqb = open(FASTQ.Reader, R2)
ds = PairedReads{A}(fqa, fqb, "ecoli-pe", "ecoli-pe", 250, 300, 0, FwRv)
ds_seqs = collect(ds)
return ds_seqs[1:2:end] == r1_seqs && ds_seqs[2:2:end] == r2_seqs
end
function check_show(ds, msg)
buf = IOBuffer()
show(buf, ds)
return String(take!(buf)) == msg
end
function run_checks(::Type{A}) where {A<:DNAAlphabet}
@test check_round_trip(A, "ecoli_tester_R1.fastq", "ecoli_tester_R2.fastq")
ds = open(PairedReads{A}, "ecoli-pe.prseq")
@test ReadDatastores.deduce_datastore_type("ecoli-pe.prseq") == PairedReads{A}
@test ReadDatastores.name(ds) == Symbol("ecoli-pe")
@test ReadDatastores.max_read_length(ds) == 300
@test ReadDatastores.orientation(ds) == FwRv
@test check_show(ds, "Paired Read Datastore 'ecoli-pe': 20 reads (10 pairs)")
@test firstindex(ds) == 1
@test lastindex(ds) == 20
@test Base.IteratorSize(ds) == Base.HasLength()
@test Base.IteratorEltype(ds) == Base.HasEltype()
@test Base.eltype(ds) == LongSequence{A}
@test_throws BoundsError ds[100]
@test_throws BoundsError buffer(ds)[100]
@test_throws ErrorException buffer(ds, 1)
@test_throws ErrorException buffer(ds, ReadDatastores._bytes_per_read(ds) - 1)
@test_throws BoundsError load_sequence!(ds, 100, LongSequence{A}())
@test collect(ds) == collect(buffer(ds)) == open(PairedReads{A}, "ecoli-pe.prseq") do ds
collect(ds)
end
@test ds[5] == buffer(ds)[5] == load_sequence!(ds, 5, LongSequence{A}()) == load_sequence!(buffer(ds), 5, LongSequence{A}())
end
run_checks(DNAAlphabet{4})
run_checks(DNAAlphabet{2})
@test_throws ReadDatastores.DatastoreVersionError{PairedReads{DNAAlphabet{2}}} open(PairedReads{DNAAlphabet{2}}, "ecoli-paired-old.prseq")
@test_throws ReadDatastores.DatastoreEncodingError{PairedReads{DNAAlphabet{4}}} open(PairedReads{DNAAlphabet{4}}, "ecoli-pe.prseq")
end | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | code | 2055 | module TestReadDatastores
using Test, FASTX, ReadDatastores, BioSequences
include("paired-reads.jl")
include("long-reads.jl")
include("linked-reads.jl")
@testset "Error Messages" begin
buf = IOBuffer()
Base.showerror(buf, ReadDatastores.MissingMagicError("myreads.prseq"))
@test String(take!(buf)) == "MissingMagicError: the file myreads.prseq does not appear to be a valid read datastore file, it does not begin with the expected magic bytes."
Base.showerror(buf, ReadDatastores.DatastoreTypeError{PairedReads{DNAAlphabet{2}}}("myreads.prseq", ReadDatastores.LongDS))
@test String(take!(buf)) == "DatastoreTypeError: myreads.prseq contains a long read datastore and cannot be opened as a ReadDatastores.PairedReads{BioSequences.DNAAlphabet{2}}"
Base.showerror(buf, ReadDatastores.DatastoreVersionError{PairedReads{DNAAlphabet{2}}}(UInt16(2)))
@test String(take!(buf)) == "DatastoreVersionError: file format version of paired read datastore file (v2) is deprecated: this version of ReadDatastores.jl supports v$(Int(ReadDatastores.PairedDS_Version))"
Base.showerror(buf, ReadDatastores.DatastoreVersionError{LongReads{DNAAlphabet{2}}}(UInt16(2)))
@test String(take!(buf)) == "DatastoreVersionError: file format version of long read datastore file (v2) is deprecated: this version of ReadDatastores.jl supports v$(Int(ReadDatastores.LongDS_Version))"
Base.showerror(buf, ReadDatastores.DatastoreVersionError{LinkedReads{DNAAlphabet{2}}}(UInt16(2)))
@test String(take!(buf)) == "DatastoreVersionError: file format version of linked read datastore file (v2) is deprecated: this version of ReadDatastores.jl supports v$(Int(ReadDatastores.LinkedDS_Version))"
Base.showerror(buf, ReadDatastores.DatastoreEncodingError{PairedReads{DNAAlphabet{2}}}("myreads.prseq", 4))
@test String(take!(buf)) == "DatastoreEncodingError: myreads.prseq encodes reads using 4 bits per element and cannot be opened as a ReadDatastores.PairedReads{BioSequences.DNAAlphabet{2}}"
end
end # module
| ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 5297 | # <img src="./sticker.svg" width="30%" align="right" /> ReadDatastores
[](https://github.com/BioJulia/ReadDatastores.jl/releases/latest)
[](https://github.com/BioJulia/ReadDatastores.jl/blob/master/LICENSE)
[](https://zenodo.org/badge/latestdoi/195041644)
[](https://biojulia.github.io/ReadDatastores.jl/stable)
[](https://biojulia.github.io/ReadDatastores.jl/latest/)
[](https://www.repostatus.org/#wi)
[](https://gitter.im/BioJulia/ReadDatastores.jl)
## Description
Not your papa's FASTQ files.
ReadDatastores provides a set of datastore types for storing and randomly accessing sequences
from read datasets from disk. Each datastore type is optimised to the type of read data stored.
Using these data-stores grants greater performance than using text files that
store reads (see FASTX.jl, XAM.jl, etc.)
since the sequences are stored in BioSequences.jl succinct bit encodings already,
and preset formats/layouts of the binary files means no need to constantly validate the input.
- A paired read datastore is provided for paired-end reads and long mate-pairs (Illumina MiSeq etc).
- A long read datastore is provided for long-reads (Nanopore, PacBio etc.)
- A linked read datastore is provided for shorter reads that are linked or grouped using some additional
(typically proximity based) tag (10x).
Also included is the ability to buffer these datastores, sacrificing some RAM,
for faster iteration / sequential access of the reads in the datastore.
## Installation
You can install ReadDatastores from the julia
REPL. Press `]` to enter pkg mode again, and enter the following:
```julia
add ReadDatastores
```
If you are interested in the cutting edge of the development, please check out
the master branch to try new features before release.
## Testing
ReadDatastores is tested against Julia `1.X` on Linux, OS X, and Windows.
**Latest build status:**


[](https://codecov.io/gh/BioJulia/ReadDatastores.jl)
## Contributing
We appreciate contributions from users including reporting bugs, fixing
issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing)
detailed contributor and maintainer guidelines, and code of conduct.
### Financial contributions
We also welcome financial contributions in full transparency on our
[open collective](https://opencollective.com/biojulia).
Anyone can file an expense. If the expense makes sense for the development
of the community, it will be "merged" in the ledger of our open collective by
the core contributors and the person who filed the expense will be reimbursed.
## Backers & Sponsors
Thank you to all our backers and sponsors!
Love our work and community? [Become a backer](https://opencollective.com/biojulia#backer).
[](https://opencollective.com/biojulia#backers)
Does your company use BioJulia? Help keep BioJulia feature rich and healthy by
[sponsoring the project](https://opencollective.com/biojulia#sponsor)
Your logo will show up here with a link to your website.
[](https://opencollective.com/biojulia/sponsor/0/website)
[](https://opencollective.com/biojulia/sponsor/1/website)
[](https://opencollective.com/biojulia/sponsor/2/website)
[](https://opencollective.com/biojulia/sponsor/3/website)
[](https://opencollective.com/biojulia/sponsor/4/website)
[](https://opencollective.com/biojulia/sponsor/5/website)
[](https://opencollective.com/biojulia/sponsor/6/website)
[](https://opencollective.com/biojulia/sponsor/7/website)
[](https://opencollective.com/biojulia/sponsor/8/website)
[](https://opencollective.com/biojulia/sponsor/9/website)
## Questions?
If you have a question about contributing or using BioJulia software, come
on over and chat to us on [Gitter](https://gitter.im/BioJulia/General), or you can try the
[Bio category of the Julia discourse site](https://discourse.julialang.org/c/domain/bio).
| ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 1874 | <!--- Provide a general summary of the issue in the Title above -->
> _This template is rather extensive. Fill out all that you can, if are a new contributor or you're unsure about any section, leave it unchanged and a reviewer will help you_ :smile:. _This template is simply a tool to help everyone remember the BioJulia guidelines, if you feel anything in this template is not relevant, simply delete it._
## Expected Behavior
<!--- If you're describing a bug, tell us what you expect to happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution / Implementation
<!--- If describing a bug, suggest a fix/reason for the bug (optional) -->
<!--- If you're suggesting a change/improvement, suggest ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs)
<!--- You may include copy/pasteable snippets or a list of steps to reproduce the bug -->
1.
2.
3.
4.
<!--- Optionally, provide a link to a live example -->
<!--- You can use [this tool](https://www.cockos.com/licecap/) -->
<!--- ...Or [this tool](https://github.com/colinkeenan/silentcast) -->
<!--- ...Or [this tool](https://github.com/GNOME/byzanz) on Linux -->
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
- Package Version used:
- Julia Version used:
- Operating System and version (desktop or mobile):
- Link to your project:
<!-- Can you list installed packages here? -->
| ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 2791 | # A clear and descriptive title (No issue numbers please)
> _This template is rather extensive. Fill out all that you can, if are a new contributor or you're unsure about any section, leave it unchanged and a reviewer will help you_ :smile:. _This template is simply a tool to help everyone remember the BioJulia guidelines, if you feel anything in this template is not relevant, simply delete it._
## Types of changes
This PR implements the following changes:
_(Please tick any or all of the following that are applicable)_
* [ ] :sparkles: New feature (A non-breaking change which adds functionality).
* [ ] :bug: Bug fix (A non-breaking change, which fixes an issue).
* [ ] :boom: Breaking change (fix or feature that would cause existing functionality to change).
## :clipboard: Additional detail
- If you have implemented new features or behaviour
- **Provide a description of the addition** in as many details as possible.
- **Provide justification of the addition**.
- **Provide a runnable example of use of your addition**. This lets reviewers
and others try out the feature before it is merged or makes it's way to release.
- If you have changed current behaviour...
- **Describe the behaviour prior to you changes**
- **Describe the behaviour after your changes** and justify why you have made the changes,
Please describe any breakages you anticipate as a result of these changes.
- **Does your change alter APIs or existing exposed methods/types?**
If so, this may cause dependency issues and breakages, so the maintainer
will need to consider this when versioning the next release.
- If you are implementing changes that are intended to increase performance, you
should provide the results of a simple performance benchmark exercise
demonstrating the improvement. Especially if the changes make code less legible.
## :ballot_box_with_check: Checklist
- [ ] :art: The changes implemented is consistent with the [julia style guide](https://docs.julialang.org/en/stable/manual/style-guide/).
- [ ] :blue_book: I have updated and added relevant docstrings, in a manner consistent with the [documentation styleguide](https://docs.julialang.org/en/stable/manual/documentation/).
- [ ] :blue_book: I have added or updated relevant user and developer manuals/documentation in `docs/src/`.
- [ ] :ok: There are unit tests that cover the code changes I have made.
- [ ] :ok: The unit tests cover my code changes AND they pass.
- [ ] :pencil: I have added an entry to the `[UNRELEASED]` section of the manually curated `CHANGELOG.md` file for this repository.
- [ ] :ok: All changes should be compatible with the latest stable version of Julia.
- [ ] :thought_balloon: I have commented liberally for any complex pieces of internal code.
| ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 270 | # Additional methods
All `ReadDatastores` permit construction from fastq files, iteration, and
indexing.
But some specific `ReadDatastore` types also have additional specific methods.
They are listed here.
```@docs
read_tag
name
max_read_length
orientation
stream
``` | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 4300 | ```@meta
CurrentModule = ReadDatastores
DocTestSetup = quote
using BioSequences
using ReadDatastores
using FASTX
println(pwd())
fwq = open(FASTQ.Reader, "../test/ecoli_tester_R1.fastq")
rvq = open(FASTQ.Reader, "../test/ecoli_tester_R2.fastq")
PairedReads{DNAAlphabet{2}}(fwq, rvq, "ecoli-test-paired", "my-ecoli-test", 250, 300, 0, FwRv)
end
```
# Building Read Datastores
To build a read datastore you first need to decide what sort of read data you have.
The process of building these datastores is consistent, but for each datastore
there are datastore specific options the constructor will accept:
```@docs
PairedReads
LongReads
LinkedReads
```
# Loading Read Datastores
Once you have built a datastore, you can open it in other projects again using
a `Base.open` method:
```jldoctest
julia> ds = open(PairedReads{DNAAlphabet{2}}, "ecoli-test-paired.prseq", "my-ecoli-pe")
Paired Read Datastore 'my-ecoli-pe': 20 reads (10 pairs)
```
The open method takes a `ReadDatastore` type, the filename of the datastore,
and, optionally a name for the datastore, if you omit the name, the datastore
will use the default name that was specified on construction.
You can also use `do` block notation when opening a datastore and that will
ensure that the underlying stream of the reader will close.
## Using the macros and string literals
If you try to open a datastore with the `open` method as above, and you provide
the wrong type you will get an error.
This will protect you from trying to open a long read datastore as a paired read
datastore and such, but it's not always convenient.
For example, if you have a paired read datastore storing reads in 2-bit format
and you tried to open it as a `PairedReads{DNAAlphabet{4}}` type you will still
get an error.
This is obviously correct behaviour, you don't want to be loading sequences
using a different encoding to the one they were saved with!
However, in a practical setting this will get annoying: maybe you want to use
some long reads you put in a datastore a while ago but don't remember if your
datastore file is a `LongReads{DNAAlphabet{4}}` or a `LongReads{DNAAlphabet{2}}`.
Or maybe you get a `somefile.prseq` file from a colleague, and from the extension,
you deduce it is paired reads but even then that's not guaranteed.
To solve this problem a few convenience macros are provided for you, so you can
load datastores without specifying the datastore type, yet still avoid type
uncertainty in your generated code.
The macro `@openreads` accepts a filepath, and optionally a datastore name.
The macro is evaluated and expanded before you julia code is compiled.
During that time, the header of the datastore file is peeked at, and the correct
ReadDatastore subtype is deduced, and a correctly typed `open` statement is generated.
For example if the file `myreads.prseq` was a 2-bit encoded paired read datastore,
and you had the following statement in your script: `ds = @openreads "myreads.prseq"`
The statement would be expanded to: `ds = open(PairedReads{DNAAlphabet{2}}, "myreads.prseq")`
You can also open a datastore using a string literal e.g. `reads"myreads.prseq"`.
When you do this, type type of the datastore is detected as with `@openreads`,
however, rather than returning the expression
`ds = open(PairedReads{DNAAlphabet{2}}, "myreads.prseq")`, as `@openreads` does,
the `open` is executed and the macro returns the value of `ds`.
!!! note
In order for `@openreads` or the literals to work properly in any script,
the datastore file must exist prior to running the script.
This is unavoidable because macros are evaluated and expanded first before
the resulting expanded code is compiled and run.
So creating a datastore file and loading it again using `@openreads` within the
same script will not work, and `@openreads` will try to peek at the file and
deduce its contents before the script can generate the datastore file.
You will get an error telling you the file can't be found / does not exist.
In an interactive setting, in which statements are entered, compiled and run
by the REPL one at a time, this should rarely be a problem.
**In summary: If in doubt about the datastore type of a file, simply use `@openreads`** | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 5170 | # ReadDatastores
[](https://github.com/BioJulia/ReadDatastores.jl/releases/latest)
[](https://github.com/BioJulia/ReadDatastores.jl/blob/master/LICENSE)
[](https://zenodo.org/badge/latestdoi/195041644)
[](https://biojulia.github.io/ReadDatastores.jl/stable)
[](https://biojulia.github.io/ReadDatastores.jl/latest/)
[](https://www.repostatus.org/#wi)
[](https://gitter.im/BioJulia/ReadDatastores.jl)
## Description
Not your papa's FASTQ files.
ReadDatastores provides a set of datastore types for storing and randomly accessing sequences
from read datasets from disk. Each datastore type is optimised to the type of read data stored.
Using these data-stores grants greater performance than using text files that
store reads (see FASTX.jl, XAM.jl, etc.)
since the sequences are stored in BioSequences.jl succinct bit encodings already,
and preset formats/layouts of the binary files means no need to constantly validate the input.
- A paired read datastore is provided for paired-end reads and long mate-pairs (Illumina MiSeq etc).
- A long read datastore is provided for long-reads (Nanopore, PacBio etc.)
- A linked read datastore is provided for shorter reads that are linked or grouped using some additional
(typically proximity based) tag (10x).
Also included is the ability to buffer these datastores, sacrificing some RAM,
for faster iteration / sequential access of the reads in the datastore.
## Installation
You can install ReadDatastores from the julia
REPL. Press `]` to enter pkg mode again, and enter the following:
```julia
add ReadDatastores
```
If you are interested in the cutting edge of the development, please check out
the master branch to try new features before release.
## Testing
ReadDatastores is tested against Julia `1.X` on Linux, OS X, and Windows.
[](https://travis-ci.com/BioJulia/ReadDatastores.jl)
[](https://codecov.io/gh/BioJulia/ReadDatastores.jl)
## Contributing
We appreciate contributions from users including reporting bugs, fixing
issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing)
detailed contributor and maintainer guidelines, and code of conduct.
### Financial contributions
We also welcome financial contributions in full transparency on our
[open collective](https://opencollective.com/biojulia).
Anyone can file an expense. If the expense makes sense for the development
of the community, it will be "merged" in the ledger of our open collective by
the core contributors and the person who filed the expense will be reimbursed.
## Backers & Sponsors
Thank you to all our backers and sponsors!
Love our work and community? [Become a backer](https://opencollective.com/biojulia#backer).
[](https://opencollective.com/biojulia#backers)
Does your company use BioJulia? Help keep BioJulia feature rich and healthy by
[sponsoring the project](https://opencollective.com/biojulia#sponsor)
Your logo will show up here with a link to your website.
[](https://opencollective.com/biojulia/sponsor/0/website)
[](https://opencollective.com/biojulia/sponsor/1/website)
[](https://opencollective.com/biojulia/sponsor/2/website)
[](https://opencollective.com/biojulia/sponsor/3/website)
[](https://opencollective.com/biojulia/sponsor/4/website)
[](https://opencollective.com/biojulia/sponsor/5/website)
[](https://opencollective.com/biojulia/sponsor/6/website)
[](https://opencollective.com/biojulia/sponsor/7/website)
[](https://opencollective.com/biojulia/sponsor/8/website)
[](https://opencollective.com/biojulia/sponsor/9/website)
## Questions?
If you have a question about contributing or using BioJulia software, come
on over and chat to us on [Gitter](https://gitter.im/BioJulia/General), or you can try the
[Bio category of the Julia discourse site](https://discourse.julialang.org/c/domain/bio).
| ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 3407 | ```@meta
CurrentModule = ReadDatastores
DocTestSetup = quote
using BioSequences, ReadDatastores
end
```
# Indexing
`Base.getindex` is defined for ReadDatastores:
```jldoctest indexing
julia> ds = open(PairedReads{DNAAlphabet{2}}, "ecoli-test-paired.prseq", "my-ecoli-pe")
Paired Read Datastore 'my-ecoli-pe': 20 reads (10 pairs)
julia> ds[5]
300nt DNA Sequence:
ACATGCACTTCAACGGCATTACTGGTGACCTCTTCGTCC…TCTATCAACGCAAAAGGGTTACACAGATAATCGTCAGCT
```
Indexing a read datastore creates a new sequence. If you want to load a sequence
from a datastore and into an existing sequence, then you can use the `load_sequence!`
method.
```jldoctest indexing
julia> seq = LongSequence{DNAAlphabet{2}}()
0nt DNA Sequence:
< EMPTY SEQUENCE >
julia> load_sequence!(ds, 6, seq)
300nt DNA Sequence:
ATTACTGCGATTACTGCTGCGAATTTTTTCATGTTTATT…GTCCACTGGTTTACACAAGGTCGTAAGGGAAAAGAGGCG
julia> seq
300nt DNA Sequence:
ATTACTGCGATTACTGCTGCGAATTTTTTCATGTTTATT…GTCCACTGGTTTACACAAGGTCGTAAGGGAAAAGAGGCG
```
# Iteration
The ReadDatastore types also support the `Base.iterate` interface:
```jldoctest indexing
julia> collect(ds)
20-element Array{LongSequence{DNAAlphabet{2}},1}:
GGGCTTTAAAATCCACTTTTTCCATATCGATAGTCACGT…ATTTCTTCGATTCTTCTTTGTCACCGCAGCCAGCAAGAG
GTGGGTTTTTATCGGCTGGCACATGTGTTGGGACAATTT…GGCTTTCAATACGCTGTTTTCCCTCGTTGTTTCATCTGT
TGAACTCCACATCCTGCGGATCGTAAACCGTCACCTCTT…TCTTCCAGGCAGGCCGCCAGGGTATCACCTTCCAGACCA
GATGAATCTGGCGGTTATTAACGGTAACAATAACCAGCA…AGACGGCAAACCGGCTGCAGGCGGTAGGTTGTTGCAGGT
ACATGCACTTCAACGGCATTACTGGTGACCTCTTCGTCC…TCTATCAACGCAAAAGGGTTACACAGATAATCGTCAGCT
ATTACTGCGATTACTGCTGCGAATTTTTTCATGTTTATT…GTCCACTGGTTTACACAAGGTCGTAAGGGAAAAGAGGCG
CGGTTGAGTTCAAAGGCAAAGATTTGCTTGCGCTGTCGC…TTTTCCGGCGGCGAGAAAAAGCGCAACGATTTTTTGCAA
TTCGTCCCTGATATAGCACATGAACGTAATCAGGCTTGA…AATCTTCCGGCATCTTCAGGAGAGCGATTTTCTCTTCCA
ACGACACATTACCGGAAATTCAGGCCGACCCGGACAGGC…TTGAACAACACGGTGGTACAATTCAGGTCGCAAGCCAGG
TCCACCACCAGAATATCGATATTATCGTGCGTCATCCTT…TCACGCCCGCGCCGCTTTCGCTGGCCGTCACGCTAATCA
CGTAACTTTATTCATATCTCTTCCCCCTCCCTGTACTTC…CTGTTACCGCATGGCGGCAGTGCGCTGGTCGATATGACC
ATCGGGTAGGGGACGGAACGAATACGACAGTCAATATTC…AAGACTTTATCGTGCGGTCCGAACCGACTTTGTGGCGGC
GCCCTGGAGCTGGTGAAAGAAGGTCGAGCGCAAGCCTGT…CAATCCTCGCGTGGCGTTGCTCAATATTGGTGAAGAAGA
GAAAGGAACATCCTGACAACACCTTCCATCGTCTTTAAT…ATAAAGGCAAATTGCACCACCATGATGCTGTCCCAATCA
GTCTGGTGGTGCCTCTTTACTTAAGGAATTTCATCCTGT…AACGATGCCAGGCACCTGCGAAACTTTCCTGCACCAGCC
GACCGTTTTTCCCCAATCCGAGAACGCATAAATCCAGAC…TTTCTTCCCGGTAATGATACGTCACTATTGGAGTGGCCC
AGAGGCCACAGCGCGCCCATAATGGCGACTGAAAGCCAG…TTCACCGCGGTGACCGGAATCAGGGCAAATTCGACATGT
AAAAGGATCGCCGACCTTAACCATTCTGAATGTGATTGG…CTGGTGCCTGTCATATTTCGAACTCTGGGGGGACAGCAT
TGAGCAAATATGCCCGACCCAGCCTCATGACAGCGATAT…ACCGAAAAAAAAGTAATCGTCGGCATGTCCGGCGGTGTC
AGGCTTTAAATTTGATCTCTTTGTTGCACAGAATATCCG…GCCAGGAAGAAACGGAGGAACCGACACCGCCGGCCATGC
```
# Buffers
When iterating over a `ReadDatastore` either using `Base.getindex` or `load_sequence!`,
you can sacrifice some memory for a buffer, to reduce the number of times the
hard disk is read, and speed up sequential access and iteration. You can use the
`buffer` method to wrap a datastore in such a buffer.
```jldoctest indexing
julia> bds = buffer(ds)
Buffered Paired Read Datastore 'my-ecoli-pe': 20 reads (10 pairs)
julia> for i in eachindex(bds)
load_sequence!(bds, i, seq)
println(length(seq))
end
297
300
299
300
300
300
299
300
300
300
300
300
299
300
300
300
300
300
300
300
``` | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 0.4.0 | 72f9df4854d3ee73097ce9f89871557848692541 | docs | 1323 | # `ReadDatastore` types
ReadDatastores.jl implements several different types that index read sequence
data on disk, permitting efficient random access and iteration, without having
to keep every read in memory at once.
Currently there are three types of read datastore:
`PairedReads` handles short (somewhat regular in length) paired reads such as
those sequenced from a paired-end library, or a long mate pair library.
`LongReads` handles long reads of inconsistent length, such as you would get from
single-molecule real-time sequencing of a long read library.
`LinkedReads` handles short (somewhat regular in length) paired reads,
however unlike typical paired-end reads pairs are also linked to each other,
through some proximity tagging mechanism (like 10x).
All 3 types are subtypes of the `ReadDatastore` abstract type, and their API and
behaviour is consistent, except in cases where the read data itself demands a
divergence in behaviour (e.g. you can't ask for the tag of a read from a `PairedReads`
dataset, but you can with a `LinkedReads` dataset).
The three types of read processed by the three types of datastore are not specific
to any one technology, sequencing machine, or company. Rather the datastores
were chosen by considering the different characteristics of read datasets
currently produced. | ReadDatastores | https://github.com/BioJulia/ReadDatastores.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 1212 | using NaturalNeighbours
using Documenter
DocMeta.setdocmeta!(NaturalNeighbours, :DocTestSetup, :(using NaturalNeighbours); recursive=true)
makedocs(;
modules=[NaturalNeighbours],
authors="Daniel VandenHeuvel <[email protected]>",
repo="https://github.com/DanielVandH/NaturalNeighbours.jl/blob/{commit}{path}#{line}",
sitename="NaturalNeighbours.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://DanielVandH.github.io/NaturalNeighbours.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
"Examples" => [
"Interpolation" => "interpolation.md",
"Differentiation" => "differentiation.md",
"Switzerland Elevation Data" => "swiss.md"
],
"Comparison of Interpolation Methods" => "compare.md",
"Mathematical Details" => [
"Interpolation Details" => "interpolation_math.md",
"Differentiation Details" => "differentiation_math.md"
]
],
warnonly=true
)
deploydocs(;
repo="github.com/DanielVandH/NaturalNeighbours.jl",
devbranch="main",
push_preview=true
)
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2633 | module NaturalNeighbours
import DelaunayTriangulation: DelaunayTriangulation,
triangulate,
integer_type,
num_points,
InsertionEventHistory,
add_point!,
each_added_triangle,
Triangulation,
triangulate,
is_ghost_vertex,
get_neighbours,
construct_triangle,
Adjacent,
add_triangle!,
edge_type,
get_adjacent,
num_triangles,
number_type,
previndex_circular,
nextindex_circular,
get_point,
get_points,
triangle_circumcenter,
num_points,
number_type,
getxy,
polygon_features,
getpoint,
num_solid_vertices,
has_ghost_triangles,
add_ghost_triangles!,
is_collinear,
has_boundary_nodes,
get_triangulation,
is_on,
is_degenerate,
point_position_relative_to_triangle,
point_position_on_line_segment,
point_position_relative_to_line,
is_ghost_triangle,
distance_to_polygon,
initial,
terminal,
find_edge,
triangle_type,
is_boundary_triangle,
replace_boundary_triangle_with_ghost_triangle,
each_solid_triangle,
jump_and_march,
jump_to_voronoi_polygon,
iterated_neighbourhood,
sort_triangle,
each_point,
iterated_neighbourhood!,
triangle_area,
get_convex_hull_vertices,
get_boundary_nodes,
triangle_vertices
import ChunkSplitters: chunks
using ElasticArrays
using LinearAlgebra
using Random
#num_points(::NTuple{N,F}) where {N,F} = N
#getpoint(p::NTuple{N,F}, i::Integer) where {N,F} = p[i]
export interpolate
export differentiate
export generate_derivatives, generate_gradients
export identify_exterior_points
export Sibson, Triangle, Nearest, Laplace, Direct, Iterative, Farin, Hiyoshi
include("data_structures/natural_coordinates.jl")
include("data_structures/neighbour_cache.jl")
include("data_structures/interpolant.jl")
include("data_structures/derivative_cache.jl")
include("data_structures/differentiator.jl")
include("interpolation/extrapolation.jl")
include("interpolation/interpolate.jl")
include("interpolation/eval.jl")
include("interpolation/coordinates/sibson.jl")
include("interpolation/coordinates/triangle.jl")
include("interpolation/coordinates/nearest.jl")
include("interpolation/coordinates/laplace.jl")
include("interpolation/coordinates/farin.jl")
include("interpolation/coordinates/hiyoshi.jl")
include("interpolation/utils.jl")
include("differentiation/generate.jl")
include("differentiation/differentiate.jl")
include("differentiation/methods/direct.jl")
include("differentiation/methods/iterative.jl")
include("differentiation/utils.jl")
include("utils.jl")
end # module NaturalNeighbours | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 1494 | """
identify_exterior_points(x, y, points, boundary_nodes; tol = 0.0)
Given a polygon described by `(points, boundary_nodes)`, matching the
specification of polygons in DelaunayTriangulation.jl,
returns a vector of indices of the points defined by `(x, y)` that are outside of the polygon.
Use `tol` to specify a tolerance for the distance to the polygon.
"""
function identify_exterior_points(x, y, points, boundary_nodes; tol = 0.0)
@assert length(x) == length(y) "x and y must have the same length."
exterior_points = Int64[]
sizehint!(exterior_points, isqrt(length(x)))
for i in eachindex(x, y)
xᵢ = x[i]
yᵢ = y[i]
q = (xᵢ, yᵢ)
δ = distance_to_polygon(q, points, boundary_nodes)
if δ < tol
push!(exterior_points, i)
end
end
return exterior_points
end
"""
identify_exterior_points(x, y, itp::NaturalNeighboursInterpolant; tol = 0.0)
Returns the indices of the points defined by the vectors `(x, y)` that are
outside of the underlying triangulation to the interpolant `itp`.
Use `tol` to specify a tolerance for the distance to the triangulation.
"""
function identify_exterior_points(x, y, itp::NaturalNeighboursInterpolant; tol=0.0)
tri = get_triangulation(itp)
points = get_points(tri)
if !has_boundary_nodes(tri)
bn = get_convex_hull_vertices(tri)
else
bn = get_boundary_nodes(tri)
end
return identify_exterior_points(x, y, points, bn; tol=tol)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 1696 | struct DerivativeCache{I, F}
iterated_neighbourhood::Set{I}
second_iterated_neighbourhood::Set{I}
linear_matrix::ElasticMatrix{F, Vector{F}}
quadratic_matrix::ElasticMatrix{F, Vector{F}}
quadratic_matrix_no_cubic::ElasticMatrix{F, Vector{F}}
rhs_vector::Vector{F}
end
get_iterated_neighbourhood(cache::DerivativeCache) = cache.iterated_neighbourhood
get_second_iterated_neighbourhood(cache::DerivativeCache) = cache.second_iterated_neighbourhood
get_linear_matrix(cache::DerivativeCache) = cache.linear_matrix
get_quadratic_matrix(cache::DerivativeCache) = cache.quadratic_matrix
get_quadratic_matrix_no_cubic(cache::DerivativeCache) = cache.quadratic_matrix_no_cubic
get_rhs_vector(cache::DerivativeCache) = cache.rhs_vector
function DerivativeCache(tri::Triangulation{P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE}) where {P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE}
iterated_neighbourhood = Set{I}()
second_iterated_neighbourhood = Set{I}()
F = number_type(tri)
linear_matrix = ElasticMatrix{F, Vector{F}}(undef, 2, 0)
quadratic_matrix = ElasticMatrix{F, Vector{F}}(undef, 9, 0)
quadratic_matrix_no_cubic = ElasticMatrix{F, Vector{F}}(undef, 5, 0)
rhs_vector = zeros(F, 0)
sizehint_m = 2^5
sizehint!(iterated_neighbourhood, sizehint_m)
sizehint!(second_iterated_neighbourhood, sizehint_m)
sizehint!(linear_matrix, (2, sizehint_m))
sizehint!(quadratic_matrix, (9, sizehint_m))
sizehint!(quadratic_matrix_no_cubic, (5, sizehint_m))
sizehint!(rhs_vector, sizehint_m)
return DerivativeCache(iterated_neighbourhood, second_iterated_neighbourhood, linear_matrix, quadratic_matrix, quadratic_matrix_no_cubic, rhs_vector)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 695 | struct NaturalNeighboursDifferentiator{I,O}
interpolant::I
function NaturalNeighboursDifferentiator(itp::I, order) where {I}
@assert order ∈ (1, 2) "order must be 1 or 2."
return new{I, order}(itp)
end
end
get_interpolant(d::NaturalNeighboursDifferentiator) = d.interpolant
function Base.show(io::IO, ::MIME"text/plain", d::NaturalNeighboursDifferentiator{I,O}) where {I,O}
z = get_z(get_interpolant(d))
∇ = get_gradient(get_interpolant(d))
ℋ = get_hessian(get_interpolant(d))
println(io, "Natural Neighbour Differentiator")
println(io, " Order: ", O)
println(io, " z: ", z)
println(io, " ∇: ", ∇)
print(io, " H: ", ℋ)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2990 | struct NaturalNeighboursInterpolant{T<:Triangulation,F,G,H,N,D}
triangulation::T
z::Vector{F}
gradient::G # (∂ˣf, ∂ʸf)
hessian::H # (∂ˣˣf, ∂ʸʸf, ∂ˣʸf)
neighbour_cache::N
derivative_cache::D
function NaturalNeighboursInterpolant(
tri::T,
z::AbstractVector{F},
gradient=nothing,
hessian=nothing;
derivatives=false,
kwargs...
) where {T,F}
@assert num_points(tri) == length(z) "The number of points in the triangulation must equal the length of the data vector."
!has_ghost_triangles(tri) && add_ghost_triangles!(tri)
if has_boundary_nodes(tri)
@warn "Natural neighbour interpolation is only defined over unconstrained triangulations.\nYou may find unexpected results when interpolating near the boundaries or constrained edges, and especially near non-convex boundaries or outside of the triangulation.\nIn your later evaluations of this interpolant, consider using project=false." maxlog=1
end
nt = Base.Threads.nthreads()
derivative_caches = [DerivativeCache(tri) for _ in 1:nt]
neighbour_caches = [NaturalNeighboursCache(tri) for _ in 1:nt]
D = typeof(derivative_caches)
N = typeof(neighbour_caches)
if derivatives
∇, ℋ = generate_derivatives(tri, z, derivative_caches, neighbour_caches; kwargs...)
else
∇ = nothing # TODO: In 2.0, change these to be NTuple{2, F}[] and NTuple{3, F}[]
ℋ = nothing
end
if isnothing(gradient)
gradient = ∇
end
if isnothing(hessian)
hessian = ℋ
end
G = typeof(gradient)
H = typeof(hessian)
return new{T,F,G,H,N,D}(tri, z, gradient, hessian, neighbour_caches, derivative_caches)
end
end
function Base.show(io::IO, ::MIME"text/plain", nc::NaturalNeighboursInterpolant)
z = get_z(nc)
println(io, "Natural Neighbour Interpolant")
println(io, " z: ", z)
println(io, " ∇: ", get_gradient(nc))
print(io, " H: ", get_hessian(nc))
end
get_triangulation(ni::NaturalNeighboursInterpolant) = ni.triangulation
get_z(ni::NaturalNeighboursInterpolant) = ni.z
get_z(ni::NaturalNeighboursInterpolant, i) = ni.z[i]
get_neighbour_cache(ni::NaturalNeighboursInterpolant) = ni.neighbour_cache
get_neighbour_cache(ni::NaturalNeighboursInterpolant, id) = ni.neighbour_cache[id]
get_derivative_cache(ni::NaturalNeighboursInterpolant) = ni.derivative_cache
get_derivative_cache(ni::NaturalNeighboursInterpolant, id) = ni.derivative_cache[id]
get_gradient(ni::NaturalNeighboursInterpolant) = ni.gradient
get_gradient(ni::NaturalNeighboursInterpolant, i) = ni.gradient[i]
get_hessian(ni::NaturalNeighboursInterpolant) = ni.hessian
get_hessian(ni::NaturalNeighboursInterpolant, i) = ni.hessian[i]
has_gradient(ni::NaturalNeighboursInterpolant) = !isnothing(get_gradient(ni))
has_hessian(ni::NaturalNeighboursInterpolant) = !isnothing(get_hessian(ni)) | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 792 | struct NaturalCoordinates{F,I,T<:Triangulation}
coordinates::Vector{F}
indices::Vector{I}
interpolation_point::NTuple{2,F}
triangulation::T
end
function Base.show(io::IO, ::MIME"text/plain", nc::NaturalCoordinates{F,I}) where {F,I}
coordinates = get_coordinates(nc)
indices = get_indices(nc)
interpolation_point = get_interpolation_point(nc)
println(io, "NaturalCoordinates{", F, ",", I, "}")
println(io, " u: ", interpolation_point)
println(io, " λ: ", coordinates)
print(io, " k: ", indices)
end
get_coordinates(nc::NaturalCoordinates) = nc.coordinates
get_indices(nc::NaturalCoordinates) = nc.indices
get_interpolation_point(nc::NaturalCoordinates) = nc.interpolation_point
get_triangulation(nc::NaturalCoordinates) = nc.triangulation
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 1192 | struct NaturalNeighboursCache{F,I,H,E,R}
coordinates::Vector{F}
envelope::Vector{I}
insertion_event_history::H
poly_points::Vector{NTuple{2,F}}
temp_adjacent::Adjacent{I,E}
last_triangle::R
end
get_coordinates(cache::NaturalNeighboursCache) = cache.coordinates
get_envelope(cache::NaturalNeighboursCache) = cache.envelope
get_insertion_event_history(cache::NaturalNeighboursCache) = cache.insertion_event_history
get_poly_points(cache::NaturalNeighboursCache) = cache.poly_points
get_temp_adjacent(cache::NaturalNeighboursCache) = cache.temp_adjacent
get_last_triangle(cache::NaturalNeighboursCache) = cache.last_triangle
function NaturalNeighboursCache(tri::Triangulation{P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE}) where {P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE}
coordinates = number_type(tri)[]
envelope = I[]
insertion_event_history = InsertionEventHistory(tri)
poly_points = NTuple{2,number_type(tri)}[]
temp_adjacent = Adjacent{I,E}()
last_triangle = (Ref ∘ triangle_vertices ∘ first ∘ each_solid_triangle)(tri)
return NaturalNeighboursCache(coordinates, envelope, insertion_event_history, poly_points, temp_adjacent, last_triangle)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 9169 | """
differentiate(itp::NaturalNeighboursInterpolant, order)
Differentiate a given interpolant `itp` up to degree `order` (1 or 2). The returned object is a
`NaturalNeighboursDifferentiator` struct, which is callable.
!!! warning "Missing vertices"
When the underlying triangulation, `tri`, has points in `get_points(tri)` that are not
vertices of the triangulation itself, the associated derivatives at these points
will be set to zero.
For calling the resulting struct, we define the following methods:
(∂::NaturalNeighboursDifferentiator)(x, y, zᵢ, nc, id::Integer=1; parallel=false, method=default_diff_method(∂), kwargs...)
(∂::NaturalNeighboursDifferentiator)(x, y, id::Integer=1; parallel=false, method=default_diff_method(∂), interpolant_method=Sibson(), rng=Random.default_rng(), project = true, kwargs...)
(∂::NaturalNeighboursDifferentiator)(vals::AbstractVector, x::AbstractVector, y::AbstractVector; parallel=true, method=default_diff_method(∂), interpolant_method=Sibson(), kwargs...)
(∂::NaturalNeighboursDifferentiator{I, O})(x::AbstractVector, y::AbstractVector; parallel=true, method=default_diff_method(∂), interpolant_method=Sibson(), kwargs...) where {I, O}
1. This method is useful if you already have an estimate for the function value, `zᵢ`, at the data site, `(x, y)`, provided you also provide the `NaturalCoordinates` `nc`. `id` is the thread id.
2. This method is for scalars, with `id` referring to a thread id.
3. This method is an in-place method for vectors, storing `∂(x[i], y[i], 1)` into `vals[i]`.
4. This method is similar to (3), but `vals` is constructed and returned.
The available keyword arguments are:
- `parallel=true`: Whether to use multithreading. Ignored for the first two methods.
- `method=default_diff_method(∂)`: Default method for evaluating the interpolant. `default_diff_method(∂)` returns `Direct()`. The method must be a [`AbstractDifferentiator`](@ref).
- `interpolant_method=Sibson()`: The method used for evaluating the interpolant to estimate `zᵢ` for the latter three methods. See [`AbstractInterpolator`](@ref) for the available methods.
- `rng=Random.default_rng()`: The random number generator used for estimating `zᵢ` for the latter three methods, or for constructing the natural coordinates.
- `project=false`: Whether to project any extrapolated points onto the boundary of the convex hull of the data sites and perform two-point interpolation, or to simply replace any extrapolated values with `Inf`, when evaluating the interpolant in the latter three methods.
- `use_cubic_terms=true`: If estimating second order derivatives, whether to use cubic terms. Only relevant for `method == Direct()`.
- `alpha=0.1`: If estimating second order derivatives, the weighting parameter used for estimating the second order derivatives. Only relevant for `method == Iterative()`.
- `use_sibson_weight=true`: Whether to weight the residuals in the associated least squares problems by the associated Sibson coordinates. Only relevant for `method == Iterative()` if `order == 2`.
The outputs are:
- `order == 1`: The scalar methods return a `Tuple` of the form `(∂x, ∂y)`, while the vector methods return a vector of `Tuple`s of the form `(∂x, ∂y)`.
- `order == 2`: The scalar methods return a `(∇, ℋ)`, where `∇` is a `Tuple` of the form `(∂x, ∂y)` and `ℋ` is a `Tuple` of the form `(∂xx, ∂yy, ∂xy)`. The vector methods return a vector of `(∇, ℋ)`s.
!!! warning
Until we implement ghost point extrapolation, behaviour near the convex hull of your data sites may in some cases be undesirable,
despite the extrapolation method we describe above, even for points that are inside the convex hull. If you want to control this
behaviour so that you discard any points that are very close to the convex hull, see `identify_exterior_points` and the `tol`
keyword argument.
"""
differentiate(itp::NaturalNeighboursInterpolant, order) = NaturalNeighboursDifferentiator(itp, order)
function _eval_differentiator(method::AbstractDifferentiator, ∂::NaturalNeighboursDifferentiator{I,O}, p, zᵢ::F, nc, id;
use_cubic_terms=true,
alpha=0.1,
use_sibson_weight=true) where {I,O,F}
itp = get_interpolant(∂)
tri = get_triangulation(itp)
z = get_z(itp)
d_cache = get_derivative_cache(itp, id)
initial_gradients = get_gradient(itp)
if method == Iterative() && isnothing(initial_gradients)
throw(ArgumentError("initial_gradients must be provided for iterative derivative estimation. Consider using e.g. interpolate(tri, z; derivatives = true)."))
end
S = get_iterated_neighbourhood(d_cache)
S′ = get_second_iterated_neighbourhood(d_cache)
if O == 1
λ, E = get_taylor_neighbourhood!(S, S′, tri, 1, nc)
if length(λ) == 1 && !isfinite(λ[1]) # this happens when we extrapolate
return (F(Inf), F(Inf))
end
∇ = generate_first_order_derivatives(method, tri, z, zᵢ, p, λ, E, d_cache; use_cubic_terms, alpha, use_sibson_weight, initial_gradients)
return ∇
else # O == 2
if method == Direct()
λ, E = get_taylor_neighbourhood!(S, S′, tri, 2 + use_cubic_terms, nc)
else
λ, E = get_taylor_neighbourhood!(S, S′, tri, 1, nc)
end
if length(λ) == 1 && !isfinite(λ[1]) # this happens when we extrapolate
return (F(Inf), F(Inf)), (F(Inf), F(Inf), F(Inf))
end
∇, H = generate_second_order_derivatives(method, tri, z, zᵢ, p, λ, E, d_cache; use_cubic_terms, alpha, use_sibson_weight, initial_gradients)
return ∇, H
end
end
@inline default_diff_method(∂) = Direct() # isnothing(get_gradient(get_interpolant(∂))) ? Direct() : Iterative()
function _get_nc_and_z(method::AbstractInterpolator{D}, p, z, gradients, hessians, tri, cache=NaturalNeighboursCache(tri); rng=Random.default_rng(), project=true) where {D}
if (method isa Triangle) || method == Nearest() # coordinates need to be the natural neighbours
nc = compute_natural_coordinates(Sibson(), tri, p, cache; rng, project)
else
nc = compute_natural_coordinates(method, tri, p, cache; rng, project)
end
if D == 0
zᵢ = _eval_natural_coordinates(nc, z)
elseif D == 1
zᵢ = _eval_natural_coordinates(method, nc, z, gradients, tri)
else # D == 2
zᵢ = _eval_natural_coordinates(method, nc, z, gradients, hessians, tri)
end
return nc, zᵢ
end
@inline function (∂::NaturalNeighboursDifferentiator)(x, y, zᵢ, nc, id::Integer=1; parallel=false, method=default_diff_method(∂), kwargs...)
method = dwrap(method)
F = (number_type ∘ get_triangulation ∘ get_interpolant)(∂)
p = (F(x), F(y))
return _eval_differentiator(method, ∂, p, zᵢ, nc, id; kwargs...)
end
function (∂::NaturalNeighboursDifferentiator)(x, y, id::Integer=1; parallel=false, method=default_diff_method(∂), interpolant_method=Sibson(), project=false, rng=Random.default_rng(), kwargs...)
method = dwrap(method)
interpolant_method = iwrap(interpolant_method)
itp = get_interpolant(∂)
tri = get_triangulation(itp)
F = number_type(tri)
p = (F(x), F(y))
n_cache = get_neighbour_cache(itp, id)
z = get_z(itp)
gradients = get_gradient(itp)
hessians = get_hessian(itp)
nc, zᵢ = _get_nc_and_z(interpolant_method, p, z, gradients, hessians, tri, n_cache; rng, project)
return ∂(F(x), F(y), zᵢ, nc, id; parallel, method, kwargs...)
end
function (∂::NaturalNeighboursDifferentiator)(vals::AbstractVector, x::AbstractVector, y::AbstractVector; parallel=true, method=default_diff_method(∂), interpolant_method=Sibson(), kwargs...)
@assert length(x) == length(y) == length(vals) "x, y, and vals must have the same length."
method = dwrap(method)
interpolant_method = iwrap(interpolant_method)
interpolant_method isa Triangle && populate_cache!(interpolant_method, get_triangulation(get_interpolant(∂)))
if !parallel
for i in eachindex(x, y)
vals[i] = ∂(x[i], y[i], 1; method, interpolant_method, kwargs...)
end
else
nt = Base.Threads.nthreads()
chunked_iterator = chunks(vals, nt)
Threads.@threads for (xrange, chunk_id) in chunked_iterator
for i in xrange
vals[i] = ∂(x[i], y[i], chunk_id; method, interpolant_method, kwargs...)
end
end
end
return nothing
end
function (∂::NaturalNeighboursDifferentiator{I,O})(x::AbstractVector, y::AbstractVector; parallel=true, method=default_diff_method(∂), interpolant_method=Sibson(), kwargs...) where {I,O}
@assert length(x) == length(y) "x and y must have the same length."
n = length(x)
itp = get_interpolant(∂)
tri = get_triangulation(itp)
F = number_type(tri)
if O == 1
vals = Vector{NTuple{2,F}}(undef, n)
else # O == 2
vals = Vector{Tuple{NTuple{2,F},NTuple{3,F}}}(undef, n)
end
method = dwrap(method)
interpolant_method = iwrap(interpolant_method)
interpolant_method isa Triangle && populate_cache!(interpolant_method, tri)
∂(vals, x, y; method, interpolant_method, parallel, kwargs...)
return vals
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 8301 | """
abstract type AbstractDifferentiator end
Abstract type for defining the method used for differentiating an interpolant or generating derivatives at data sites.
- `Direct()`: Generate derivatives directly with one least squares problem.
- `Iterative()`: Generate derivatives iteratively: Gradients are estimated first, and then both gradients and Hessians are estimated with the initial gradients used to refine the results.
"""
abstract type AbstractDifferentiator end
@doc """
Direct()
Generate derivatives directly with one least squares problem.
""" struct Direct <: AbstractDifferentiator end
@doc """
Iterative()
Generate derivatives iteratively: Gradients are estimated first, and then both gradients and Hessians are estimated with the initial gradients used to refine the results.
""" struct Iterative <: AbstractDifferentiator end
dwrap(d::AbstractDifferentiator) = d
function dwrap(d::Symbol)
if d == :direct
return Direct()
elseif d == :iterative
return Iterative()
else
throw(ArgumentError("Unknown differentiator: $d"))
end
end
"""
generate_derivatives(
tri,
z,
derivative_caches=[DerivativeCache(tri) for _ in 1:Base.Threads.nthreads()],
neighbour_caches=[NaturalNeighboursCache(tri) for _ in 1:Base.Threads.nthreads()];
parallel=true,
method=Direct(),
use_cubic_terms=true,
alpha=0.1,
initial_gradients=dwrap(method) == Direct() ? nothing : generate_gradients(tri, z, derivative_caches, neighbour_caches; method=dwrap(method), parallel, rng)
)
Generate derivatives at the data sites defined by the triangulation `tri` with associated function values `tri`.
# Arguments
- `tri`: A `Triangulation` object.
- `z`: A vector of function values at the data sites.
- `derivative_caches=[DerivativeCache(tri) for _ in 1:Base.Threads.nthreads()]`: A vector of `DerivativeCache` objects, one for each thread.
- `neighbour_caches=[NaturalNeighboursCache(tri) for _ in 1:Base.Threads.nthreads()]`: A vector of `NaturalNeighboursCache` objects, one for each thread.
# Keyword Arguments
- `parallel=true`: Whether to use multithreading or not.
- `method=Direct()`: The method used for generating the derivatives. See [`AbstractDifferentiator`](@ref).
- `use_cubic_terms=true`: Whether to use cubic terms for estimating the second order derivatives. Only relevant for `method == Direct()`.
- `alpha=0.1`: The weighting parameter used for estimating the second order derivatives. Only relevant for `method == Iterative()`.
- `initial_gradients=dwrap(method) == Direct() ? nothing : generate_gradients(tri, z, derivative_caches, neighbour_caches; method=dwrap(method), parallel)`: The initial gradients used for estimating the second order derivatives. Only relevant for `method == Iterative()`.
# Output
- `∇`: A vector of gradients at the data sites. Each element is a `Tuple` defining the gradient entries.
- `ℋ`: A vector of Hessians at the data sites. Each element is a `Tuple` defining the Hessian entries in the form `(H[1, 1], H[2, 2], H[1, 2])` (`H[2, 1]` is the same as `H[2, 2]`).
"""
function generate_derivatives(
tri,
z,
derivative_caches=[DerivativeCache(tri) for _ in 1:Base.Threads.nthreads()],
neighbour_caches=[NaturalNeighboursCache(tri) for _ in 1:Base.Threads.nthreads()];
parallel=true,
method=Direct(),
use_cubic_terms=true,
alpha=0.1,
initial_gradients=dwrap(method) == Direct() ? nothing : generate_gradients(tri, z, derivative_caches, neighbour_caches; parallel)
)
n = length(z)
F = number_type(tri)
∇ = Vector{NTuple{2,F}}(undef, n)
ℋ = Vector{NTuple{3,F}}(undef, n)
if !parallel
generate_second_order_derivatives!(∇, ℋ, method, tri, z, eachindex(z), derivative_caches, neighbour_caches, 1; alpha, use_cubic_terms, initial_gradients)
else
nt = length(derivative_caches)
chunked_iterator = chunks(z, nt)
Base.Threads.@threads for (zrange, chunk_id) in chunked_iterator
generate_second_order_derivatives!(∇, ℋ, method, tri, z, zrange, derivative_caches, neighbour_caches, chunk_id; alpha, use_cubic_terms, initial_gradients)
end
end
return ∇, ℋ
end
"""
generate_gradients(
tri,
z,
derivative_caches=[DerivativeCache(tri) for _ in 1:Base.Threads.nthreads()],
neighbour_caches=[NaturalNeighboursCache(tri) for _ in 1:Base.Threads.nthreads()];
parallel=true
)
Generate gradients at the data sites defined by the triangulation `tri` with associated function values `tri`.
# Arguments
- `tri`: A `Triangulation` object.
- `z`: A vector of function values at the data sites.
- `derivative_caches=[DerivativeCache(tri) for _ in 1:Base.Threads.nthreads()]`: A vector of `DerivativeCache` objects, one for each thread.
- `neighbour_caches=[NaturalNeighboursCache(tri) for _ in 1:Base.Threads.nthreads()]`: A vector of `NaturalNeighboursCache` objects, one for each thread.
# Keyword Arguments
- `parallel=true`: Whether to use multithreading or not.
# Output
- `∇`: A vector of gradients at the data sites. Each element is a `Tuple` defining the gradient entries.
"""
function generate_gradients(
tri,
z,
derivative_caches=[DerivativeCache(tri) for _ in 1:Base.Threads.nthreads()],
neighbour_caches=[NaturalNeighboursCache(tri) for _ in 1:Base.Threads.nthreads()];
parallel=true
)
n = length(z)
F = number_type(tri)
∇ = Vector{NTuple{2,F}}(undef, n)
if !parallel
generate_first_order_derivatives!(∇, Direct(), tri, z, eachindex(z), derivative_caches, neighbour_caches, 1)
else
nt = length(derivative_caches)
chunked_iterator = chunks(∇, nt)
Base.Threads.@threads for (zrange, chunk_id) in chunked_iterator
generate_first_order_derivatives!(∇, Direct(), tri, z, zrange, derivative_caches, neighbour_caches, chunk_id)
end
end
return ∇
end
# need these redirections to avoid Boxing
@inline function generate_second_order_derivatives!(∇, ℋ, method, tri, z, zrange, derivative_caches, neighbour_caches, id; alpha, use_cubic_terms, initial_gradients)
for i in zrange
zᵢ = z[i]
if !DelaunayTriangulation.has_vertex(tri, i)
∇[i] = (zero(zᵢ), zero(zᵢ))
ℋ[i] = (zero(zᵢ), zero(zᵢ), zero(zᵢ))
else
d_cache = derivative_caches[id]
n_cache = neighbour_caches[id]
S = get_iterated_neighbourhood(d_cache)
S′ = get_second_iterated_neighbourhood(d_cache)
if method == Direct()
λ, E = get_taylor_neighbourhood!(S, S′, tri, i, 2 + use_cubic_terms, n_cache)
elseif method == Iterative()
λ, E = get_taylor_neighbourhood!(S, S′, tri, i, 1, n_cache)
end
∇[i], ℋ[i] = generate_second_order_derivatives(method, tri, z, zᵢ, i, λ, E, derivative_caches, id; alpha, use_cubic_terms, initial_gradients)
end
end
return nothing
end
@inline function generate_first_order_derivatives!(∇, method, tri, z, zrange, derivative_caches, neighbour_caches, id)
for i in zrange
zᵢ = z[i]
if !DelaunayTriangulation.has_vertex(tri, i)
∇[i] = (zero(zᵢ), zero(zᵢ))
else
d_cache = derivative_caches[id]
n_cache = neighbour_caches[id]
S = get_iterated_neighbourhood(d_cache)
S′ = get_second_iterated_neighbourhood(d_cache)
λ, E = get_taylor_neighbourhood!(S, S′, tri, i, 1, n_cache)
∇[i] = generate_first_order_derivatives(method, tri, z, zᵢ, i, λ, E, derivative_caches, id)
end
end
return nothing
end
@inline function generate_first_order_derivatives(
method::AbstractDifferentiator,
tri,
z,
zᵢ,
i,
λ,
E,
derivative_caches,
id;
kwargs...)
return generate_first_order_derivatives(method, tri, z, zᵢ, i, λ, E, derivative_caches[id]; kwargs...)
end
@inline function generate_second_order_derivatives(
method::AbstractDifferentiator,
tri,
z,
zᵢ,
i,
λ,
E,
derivative_caches,
id;
kwargs...)
return generate_second_order_derivatives(method, tri, z, zᵢ, i, λ, E, derivative_caches[id]; kwargs...)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2219 | function get_taylor_neighbourhood!(S, S′, tri, i::Integer, d, ::Any; rng=nothing)
iterated_neighbourhood!(S, tri, i, d)
return one(i), S
end
function get_taylor_neighbourhood!(S, S′, tri, p, d, n_cache=NaturalNeighboursCache(tri); rng=Random.default_rng())
@assert d ∈ (1, 2, 3) "d must be 1, 2 or 3."
nc = compute_natural_coordinates(Sibson(), tri, p, n_cache; rng)
return get_taylor_neighbourhood!(S, S′, tri, d, nc)
end
function get_taylor_neighbourhood!(S, S′, tri, d, nc::NaturalCoordinates)
coordinates = get_coordinates(nc)
envelope = get_indices(nc)
idx = findfirst(isone, coordinates)
if !isnothing(idx)
return get_taylor_neighbourhood!(S, S′, tri, envelope[idx], d, nc)
elseif d == 2
# Bit hard to use iterated_neighbourhood here, since we want to make sure we don't overwrite any
# coordinates from the initial set of natural neighbours. So we just do it manually. Will
# need to generalise if we ever want higher order derivatives.
empty!(S′)
for i in envelope
for j in get_neighbours(tri, i)
if !is_ghost_vertex(j) && j ∉ envelope
push!(S′, j)
end
end
end
for s in S′
push!(envelope, s)
end
elseif d == 3
empty!(S′)
for i in envelope
for j in get_neighbours(tri, i)
if !is_ghost_vertex(j) && j ∉ envelope
push!(S′, j)
end
end
end
empty!(S)
for s in S′
for j in get_neighbours(tri, s)
if !is_ghost_vertex(j) && j ∉ envelope && j ∉ S′
push!(S, j)
end
end
end
for s in S
push!(envelope, s)
end
for s in S′
push!(envelope, s)
end
end
return coordinates, envelope
end
function get_λ(coordinates::Vector{F}, j, use_sibson_weight) where {F}
if !use_sibson_weight || j > lastindex(coordinates)
return one(F)
else
return coordinates[j]
end
end
get_λ(coordinates::F, j, use_sibson_weight) where {F<:Number} = one(F)
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 3116 | function _generate_first_order_derivatives_direct(
tri,
z,
zᵢ,
i,
λ,
E,
d_cache=DerivativeCache(tri);
use_sibson_weight=!(i isa Integer))
X = get_linear_matrix(d_cache)
b = get_rhs_vector(d_cache)
p = get_point(tri, i)
xᵢ, yᵢ = getxy(p)
m = length(E)
resize!(X, 2, m)
resize!(b, m)
for (j, s) in enumerate(E)
λₛ = get_λ(λ, j, use_sibson_weight)
zₛ = z[s]
pₛ = get_point(tri, s)
xₛ, yₛ = getxy(pₛ)
δ = sqrt((xᵢ - xₛ)^2 + (yᵢ - yₛ)^2)
βₛ = sqrt(λₛ) * inv(δ)
X[1, j] = βₛ * (xₛ - xᵢ)
X[2, j] = βₛ * (yₛ - yᵢ)
b[j] = βₛ * (zₛ - zᵢ)
end
@static if VERSION < v"1.7.0"
qr_X = qr!(Matrix(X'))
else
qr_X = qr!(X')
end
∇ = copy(b) # This is the same fix in https://github.com/JuliaLang/julia/pull/43510 to avoid views, avoiding shared data issues
ldiv!(qr_X, ∇)
return (∇[1], ∇[2])
end
function _generate_second_order_derivatives_direct(
tri,
z,
zᵢ,
i,
E,
d_cache=DerivativeCache(tri);
use_cubic_terms=true)
X = if use_cubic_terms
get_quadratic_matrix(d_cache)
else
get_quadratic_matrix_no_cubic(d_cache)
end
b = get_rhs_vector(d_cache)
p = get_point(tri, i)
xᵢ, yᵢ = getxy(p)
m = length(E)
resize!(X, 5 + 4use_cubic_terms, m)
resize!(b, m)
for (j, s) in enumerate(E)
zₛ = z[s]
pₛ = get_point(tri, s)
xₛ, yₛ = getxy(pₛ)
δ = sqrt((xᵢ - xₛ)^2 + (yᵢ - yₛ)^2)
βₛ = inv(δ)
X[1, j] = βₛ * (xₛ - xᵢ)
X[2, j] = βₛ * (yₛ - yᵢ)
X[3, j] = βₛ * (xₛ - xᵢ)^2 / 2
X[4, j] = βₛ * (yₛ - yᵢ)^2 / 2
X[5, j] = βₛ * (xₛ - xᵢ) * (yₛ - yᵢ)
if use_cubic_terms
X[6, j] = βₛ * (xₛ - xᵢ)^3 / 6
X[7, j] = βₛ * (yₛ - yᵢ)^3 / 6
X[8, j] = βₛ * (xₛ - xᵢ)^2 * (yₛ - yᵢ) / 2
X[9, j] = βₛ * (xₛ - xᵢ) * (yₛ - yᵢ)^2 / 2
end
b[j] = βₛ * (zₛ - zᵢ)
end
@static if VERSION < v"1.7.0"
∇ℋ = Matrix(X') \ b
return (∇ℋ[1], ∇ℋ[2]), (∇ℋ[3], ∇ℋ[4], ∇ℋ[5])
else
qr_X = qr!(X')
∇ℋ = copy(b) # This is the same fix in https://github.com/JuliaLang/julia/pull/43510 to avoid views, avoiding shared data issues
5 + 4use_cubic_terms > m && resize!(∇ℋ, 5 + 4use_cubic_terms) # See Issue #11
ldiv!(qr_X, ∇ℋ)
return (∇ℋ[1], ∇ℋ[2]), (∇ℋ[3], ∇ℋ[4], ∇ℋ[5])
end
end
function generate_first_order_derivatives(
method::AbstractDifferentiator, # Iterative() also goes here
tri,
z,
zᵢ,
i,
λ,
E,
d_cache=DerivativeCache(tri);
use_sibson_weight=!(i isa Integer),
kwargs...
)
return _generate_first_order_derivatives_direct(tri, z, zᵢ, i, λ, E, d_cache; use_sibson_weight)
end
function generate_second_order_derivatives(
::Direct,
tri,
z,
zᵢ,
i,
λ,
E,
d_cache=DerivativeCache(tri);
use_cubic_terms=true,
kwargs...
)
return _generate_second_order_derivatives_direct(tri, z, zᵢ, i, E, d_cache; use_cubic_terms)
end
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2392 | function _generate_second_order_derivatives_iterative(
tri,
z,
zᵢ,
i,
λ,
E,
initial_gradients,
d_cache=DerivativeCache(tri),
alpha=0.1,
use_sibson_weight=true
)
X = get_quadratic_matrix_no_cubic(d_cache)
b = get_rhs_vector(d_cache)
p = get_point(tri, i)
xᵢ, yᵢ = getxy(p)
m = length(E)
resize!(X, 5, 3m)
resize!(b, 3m)
α = alpha
α′ = one(alpha) - alpha
for (j, s) in enumerate(E)
λₛ = get_λ(λ, j, use_sibson_weight)
zₛ = z[s]
pₛ = get_point(tri, s)
xₛ, yₛ = getxy(pₛ)
∇ₛ¹, ∇ₛ² = initial_gradients[s]
δ = (xᵢ - xₛ)^2 + (yᵢ - yₛ)^2
βₛ = λₛ * inv(δ)
γₛ = sqrt(α * βₛ)
γₛ′ = sqrt(α′ * βₛ)
X[1, j] = γₛ * (xₛ - xᵢ)
X[2, j] = γₛ * (yₛ - yᵢ)
X[3, j] = γₛ * (xₛ - xᵢ)^2 / 2
X[4, j] = γₛ * (yₛ - yᵢ)^2 / 2
X[5, j] = γₛ * (xₛ - xᵢ) * (yₛ - yᵢ)
j′ = m + j
X[1, j′] = γₛ′
X[2, j′] = zero(γₛ′)
X[3, j′] = γₛ′ * (xₛ - xᵢ)
X[4, j′] = zero(γₛ′)
X[5, j′] = γₛ′ * (yₛ - yᵢ)
j′′ = 2m + j
X[1, j′′] = zero(γₛ′)
X[2, j′′] = γₛ′
X[3, j′′] = zero(γₛ′)
X[4, j′′] = γₛ′ * (yₛ - yᵢ)
X[5, j′′] = γₛ′ * (xₛ - xᵢ)
z̃ₛ = zₛ - zᵢ
b[j] = γₛ * z̃ₛ
b[j′] = γₛ′ * ∇ₛ¹
b[j′′] = γₛ′ * ∇ₛ²
end
@static if VERSION < v"1.7.0"
∇ℋ = Matrix(X') \ b
return (∇ℋ[1], ∇ℋ[2]), (∇ℋ[3], ∇ℋ[4], ∇ℋ[5])
else
qr_X = qr!(X')
∇ℋ = copy(b) # This is the same fix in https://github.com/JuliaLang/julia/pull/43510 to avoid views, avoiding shared data issues
5 > 3m && resize!(∇ℋ, 5)
ldiv!(qr_X, ∇ℋ)
return (∇ℋ[1], ∇ℋ[2]), (∇ℋ[3], ∇ℋ[4], ∇ℋ[5])
end
end
function generate_second_order_derivatives(
method::Iterative,
tri,
z,
zᵢ,
i,
λ,
E,
d_cache=DerivativeCache(tri);
alpha=0.1,
initial_gradients=nothing,
use_cubic_terms=nothing, # not used,
use_sibson_weight=true
)
if isnothing(initial_gradients)
throw(ArgumentError("initial_gradients must be provided for iterative derivative estimation. Consider using e.g. interpolate(tri, z; derivatives = true)."))
end
return _generate_second_order_derivatives_iterative(tri, z, zᵢ, i, λ, E, initial_gradients, d_cache, alpha, use_sibson_weight)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 4316 | @inline function _eval_natural_coordinates(coordinates, indices, z)
val = zero(eltype(z))
for (λ, k) in zip(coordinates, indices)
zₖ = z[k]
val += λ * zₖ
end
return val
end
@inline function _eval_natural_coordinates(nc::NaturalCoordinates{F}, z) where {F}
coordinates = get_coordinates(nc)
indices = get_indices(nc)
return _eval_natural_coordinates(coordinates, indices, z)
end
function _eval_natural_coordinates(::Sibson{1}, nc::NaturalCoordinates{F}, z, gradients, tri) where {F}
sib0 = _eval_natural_coordinates(nc, z)
coordinates = get_coordinates(nc)
if length(coordinates) ≤ 2 # 2 means extrapolation, 1 means we're evaluating at a data site
return sib0
end
ζ, α, β = _compute_sibson_1_coordinates(nc, tri, z, gradients)
num = α * sib0 + β * ζ
den = α + β
return num / den
end
@inline function _eval_natural_coordinates(::Farin, nc::NaturalCoordinates{F}, z, gradients, tri) where {F}
λ = get_coordinates(nc)
if length(λ) ≤ 2 # 2 means extrapolation, 1 means we're evaluating at a data site
return _eval_natural_coordinates(nc, z)
end
return _compute_farin_coordinates(nc, tri, z, gradients)
end
@inline function _eval_natural_coordinates(::Hiyoshi{2}, nc::NaturalCoordinates{F}, z, gradients, hessians, tri) where {F}
return _compute_hiyoshi_coordinates(nc, tri, z, gradients, hessians)
end
@inline function _eval_interp(method, itp::NaturalNeighboursInterpolant, p, cache; kwargs...)
tri = get_triangulation(itp)
nc = compute_natural_coordinates(method, tri, p, cache; kwargs...)
z = get_z(itp)
return _eval_natural_coordinates(nc, z)
end
@inline function _eval_interp(method::Triangle, itp::NaturalNeighboursInterpolant, p, cache; project=true, kwargs...)
tri = get_triangulation(itp)
z = get_z(itp)
last_triangle = get_last_triangle(cache)
V = jump_and_march(tri, p; try_points=last_triangle[], kwargs...)
i, j, return_flag = check_for_extrapolation(tri, V, p, last_triangle)
if return_flag
F = number_type(tri)
if project
t = two_point_interpolate(tri, i, j, p)
return z[i] * (1 - t) + z[j] * t
else
return typemax(F)
end
else
F = number_type(tri)
i, j, k = triangle_vertices(sort_triangle(V))
if method.allow_cache && !isempty(method.s)
s₁, s₂, s₃, s₄, s₅, s₆, s₇, s₈, s₉ = method.s[(i, j, k)]
else
s₁, s₂, s₃, s₄, s₅, s₆, s₇, s₈, s₉ = _compute_triangle_shape_coefficients(tri, i, j, k)
end
α = s₁ * z[i] + s₂ * z[j] + s₃ * z[k]
β = s₄ * z[i] + s₅ * z[j] + s₆ * z[k]
γ = s₇ * z[i] + s₈ * z[j] + s₉ * z[k]
x, y = getxy(p)
return F(α * x + β * y + γ)
end
end
@inline function _eval_interp(method::Union{<:Farin,Sibson{1}}, itp::NaturalNeighboursInterpolant, p, cache; kwargs...)
gradients = get_gradient(itp)
if isnothing(gradients)
throw(ArgumentError("Gradients must be provided for Sibson-1, Farin, or Hiyoshi-2 interpolation. Consider using e.g. interpolate(tri, z; derivatives = true)."))
end
tri = get_triangulation(itp)
nc = compute_natural_coordinates(Sibson(), tri, p, cache; kwargs...)
z = get_z(itp)
λ = get_coordinates(nc)
if length(λ) ≤ 2 # 2 means extrapolation, 1 means we're evaluating at a data site
return _eval_natural_coordinates(nc, z)
end
return _eval_natural_coordinates(method, nc, z, gradients, tri)
end
@inline function _eval_interp(method::Hiyoshi{2}, itp::NaturalNeighboursInterpolant, p, cache; kwargs...)
gradients = get_gradient(itp)
hessians = get_hessian(itp)
if isnothing(gradients) || isnothing(hessians)
throw(ArgumentError("Gradients and Hessians must be provided for Hiyoshi-2 interpolation. Consider using e.g. interpolate(tri, z; derivatives = true)."))
end
tri = get_triangulation(itp)
nc = compute_natural_coordinates(Sibson(), tri, p, cache; kwargs...)
z = get_z(itp)
λ = get_coordinates(nc)
if length(λ) ≤ 2 # 2 means extrapolation, 1 means we're evaluating at a data site
return _eval_natural_coordinates(nc, z)
end
return _eval_natural_coordinates(method, nc, z, gradients, hessians, tri)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2671 | function two_point_interpolate(tri, i, j, c)
# Project c onto the line through (a, b): https://stackoverflow.com/a/15187473
# The orthogonal projection is not guaranteed to be on the line segment (corresponding
# to t < 0 or t > 1), in which case the weights are no longer a convex combination.
a, b = get_point(tri, i, j)
ax, ay = getxy(a)
bx, by = getxy(b)
cx, cy = getxy(c)
ℓ² = (ax - bx)^2 + (ay - by)^2
t = (cx - ax) * (bx - ax) + (cy - ay) * (by - ay)
t /= ℓ²
return t
end
function two_point_interpolate!(coordinates::AbstractVector{F}, envelope, tri, i, j, r, project=true) where {F} #interpolate r using two points i, j
if project
t = two_point_interpolate(tri, i, j, r)
resize!(coordinates, 2)
resize!(envelope, 2)
coordinates[1] = one(t) - t
coordinates[2] = t
envelope[1] = i
envelope[2] = j
else
resize!(coordinates, 1)
resize!(envelope, 1)
coordinates[1] = F(Inf)
envelope[1] = i
end
return NaturalCoordinates(coordinates, envelope, r, tri)
end
function check_for_extrapolation(tri, V, interpolation_point, last_triangle)
if is_ghost_triangle(V)
V = sort_triangle(V)
i, j, _ = triangle_vertices(V)
last_triangle[] = (i, j, get_adjacent(tri, j, i))
else
last_triangle[] = triangle_vertices(V)
end
F = number_type(tri)
if is_boundary_triangle(tri, V)
_V = replace_boundary_triangle_with_ghost_triangle(tri, V)
_u, _w, _ = triangle_vertices(_V)
cert = point_position_relative_to_line(tri, _u, _w, interpolation_point)
if is_collinear(cert)
cert = point_position_on_line_segment(tri, _u, _w, interpolation_point)
if is_on(cert) || is_degenerate(cert)
V = _V
end
elseif F ≠ Float64
# DelaunayTriangulation.jl only guarantees exact triangulations for Float64 types,
# due to limitations in ExactPredicates.jl. If you use Float64 everywhere, then this check
# is redundant. But if you use Float32 anywhere, then the predicate above could fail. Even
# missing a Float32 triangulation and a provided Float64 query point can cause issues.
p, q = get_point(tri, _u, _w)
A = triangle_area(p, q, interpolation_point)
if abs(A) < F(1e-14)
V = _V
end
end
end
if is_ghost_triangle(V)
V = sort_triangle(V)
i, j, _ = triangle_vertices(V)
return i, j, true
end
i, j, _ = triangle_vertices(V)
return i, j, false
end
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 10644 | """
interpolate(tri::Triangulation, z; gradient=nothing, hessian=nothing, derivatives=false, kwargs...)
interpolate(points, z; gradient=nothing, hessian=nothing, derivatives=false, kwargs...)
interpolate(x::AbstractVector, y::AbstractVector, z; gradient=nothing, hessian=nothing, derivatives=false, kwargs...)
Construct an interpolant over the data `z` at the sites defined by the triangulation `tri` (or `points`, or `(x, y)`). See the Output
section for a description of how to use the interpolant `itp`.
!!! warning "Missing vertices"
When the underlying triangulation, `tri`, has points in `get_points(tri)` that are not
vertices of the triangulation itself, the associated derivatives (relevant only if `derivatives=true`) at these points
will be set to zero.
# Keyword Arguments
- `gradient=nothing`: The gradients at the corresponding data sites of `z`. Will be generated if `isnothing(gradient)` and `derivatives==true`.
- `hessian=nothing`: The hessians at the corresponding data sites of `z`. Will be generated if `isnothing(hessian)` and `derivatives==true`.
- `derivatives=false`: Whether to generate derivatives at the data sites of `z`. See also [`generate_derivatives`](@ref).
- `kwargs...`: Keyword arguments passed to [`generate_derivatives`](@ref).
# Output
The returned value is a `NaturalNeighboursInterpolant` struct. This struct is callable, with the following methods defined:
(itp::NaturalNeighboursInterpolant)(x, y, id::Integer=1; parallel=false, method=Sibson(), project = true, kwargs...)
(itp::NaturalNeighboursInterpolant)(vals::AbstractVector, x::AbstractVector, y::AbstractVector; parallel=true, method=Sibson(), project = true, kwargs...)
(itp::NaturalNeighboursInterpolant)(x::AbstractVector, y::AbstractVector; parallel=true, method=Sibson(), project = true, kwargs...)
1. The first method is for scalars, with `id` referring to a thread id.
2. This method is an in-place method for vectors, storing `itp(x[i], y[i])` into `vals[i]`.
3. This method is similar to (2), but `vals` is constructed and returned.
In each method, `method` defines the method used for evaluating the interpolant, which is some [`AbstractInterpolator`](@ref). For the first
method, `parallel` is ignored, but for the latter two methods it defines whether to use multithreading or not for evaluating the interpolant at
all the points. The `kwargs...` argument is passed into `add_point!` from DelaunayTriangulation.jl, e.g. you could pass some `rng`. Lastly,
the `project` argument determines whether extrapolation is performed by projecting any exterior points onto the boundary of the convex hull
of the data sites and performing two-point interpolation, or to simply replace any extrapolated values with `Inf`.
!!! performance
For the best performance when evaluating the interpolant at many points, either of the second or
third methods are preferred over repeatedly calling the first.
!!! warning
Until we implement ghost point extrapolation, behaviour near the convex hull of your data sites may in some cases be undesirable,
despite the extrapolation method we describe above, even for points that are inside the convex hull. If you want to control this
behaviour so that you discard any points that are very close to the convex hull, see `identify_exterior_points` and the `tol`
keyword argument.
"""
interpolate(tri::Triangulation, z; gradient=nothing, hessian=nothing, kwargs...) = NaturalNeighboursInterpolant(tri, z, gradient, hessian; kwargs...)
"""
abstract type AbstractInterpolator{D}
Abstract type for defining the method used for evaluating an interpolant. `D` is, roughly, defined to be
the smoothness at the data sites (currently only relevant for `Sibson`). The available subtypes are:
- `Sibson(d)`: Interpolate via the Sibson interpolant, with `C(d)` continuity at the data sites. Only defined for `D ∈ (0, 1)`. If `D == 1`, gradients must be provided.
- `Triangle(d)`: Interpolate based on vertices of the triangle that the point resides in, with `C(0)` continuity at the data sites. `D` is ignored.
- `Nearest(d)`: Interpolate by returning the function value at the nearest data site. `D` doesn't mean much here (it could be `D = ∞`), and so it is ignored and replaced with `0`.
- `Laplace(d)`: Interpolate via the Laplace interpolant, with `C(0)` continuity at the data sites. `D` is ignored.
- `Farin(d)`: Interpolate using the Farin interpolant, with `C(1)` continuity at the data sites. `d` is ignored.
- `Hiyoshi(d)`: Interpolate using the Hiyoshi interpolant, with `C(d)` continuity at the data sites. Currently, only defined for `d == 2`.
Our implementation of `Sibson(0)`'s coordinates follows [this article](https://gwlucastrig.github.io/TinfourDocs/NaturalNeighborTinfourAlgorithm/index.html) with some simple modifications.
"""
abstract type AbstractInterpolator{D} end
@doc """
Sibson(d=0)
Interpolate using Sibson's coordinates with `C(d)` continuity at the data sites.
""" struct Sibson{D} <: AbstractInterpolator{D}
Sibson(d) = d ∈ (0, 1) ? new{d}() : throw(ArgumentError("The Sibson interpolant is only defined for d ∈ (0, 1)."))
Sibson() = new{0}()
end
struct Triangle{D} <: AbstractInterpolator{D}
allow_cache::Bool
s::Dict{NTuple{3,Int},NTuple{9,Float64}}
end
Triangle{D}(; allow_cache=true) where {D} = Triangle{D}(allow_cache, Dict{NTuple{3,Int},Float64}())
Base.empty!(method::Triangle) = empty!(method.s)
function populate_cache!(method::Triangle, tri::Triangulation)
method.allow_cache || return method
if length(method.s) == DelaunayTriangulation.num_solid_triangles(tri)
return method
elseif !isempty(method.s) # user is using a new triangulation
empty!(method)
end
for T in each_solid_triangle(tri)
V = sort_triangle(T)
i, j, k = triangle_vertices(V)
method.s[(i, j, k)] = _compute_triangle_shape_coefficients(tri, i, j, k)
end
return method
end
struct Nearest{D} <: AbstractInterpolator{D} end
struct Laplace{D} <: AbstractInterpolator{D} end
struct Farin{D} <: AbstractInterpolator{D} end
struct Hiyoshi{D} <: AbstractInterpolator{D} end
@doc """
Triangle(; allow_cache = true)
Interpolate using a piecewise linear interpolant over the underlying triangulation.
!!! note "Cached coordinates with `allow_cache=true`"
The `Triangle()` interpolator is special as it will cache the coordinates used
for each triangle. In particular, when an interpolator is evaluated with the
`Triangle()` method, the object returned from `Triangle()` will store all
the coordinates. For this reason, if you want to reuse `Triangle()` for different
evaluations of the interpolant, you should be sure to reuse the same instance rather
than reinstantiating it every single time. If you do not want this behaviour, set
`allow_cache = false`.
If you only ever call the scalar-argument version of the interpolant, no caching will
be done even with `allow_cache = true`.
""" Triangle(; allow_cache=true) = Triangle{0}(; allow_cache)
Triangle(d; allow_cache=true) = Triangle(; allow_cache)
@doc """
Nearest()
Interpolate by taking the function value at the nearest data site.
""" Nearest() = Nearest{0}()
Nearest(d) = Nearest{d}()
@doc """
Laplace()
Interpolate using Laplace's coordinates.
""" Laplace() = Laplace{0}()
Laplace(d) = Laplace()
@doc """
Farin()
Interpolate using Farin's C(1) interpolant.
""" Farin() = Farin{1}()
Farin(d) = Farin()
@doc """
Hiyoshi(d)
Interpolate using Hiyoshi's C(d) interpolant. Hiyoshi's interpolant C(0) is not yet implemented,
but we do not make any conversions to C(2) like in e.g. `Farin()`, e.g. `Farin()` gets
converted to `Farin(1)` but, to support possible later versions, `Hiyoshi()` does not get
converted to `Hiyoshi(2)`.
""" Hiyoshi() = Hiyoshi{0}()
Hiyoshi(d) = Hiyoshi{d}()
@inline iwrap(s::AbstractInterpolator) = s
@inline function iwrap(s::Symbol) # this is bad design, should just prohibit symbols
if s == :sibson
return Sibson()
elseif s == :sibson_1
return Sibson(1)
elseif s == :triangle
return Triangle()
elseif s == :nearest
return Nearest()
elseif s == :laplace
return Laplace()
elseif s == :farin
return Farin()
elseif s == :hiyoshi_2
return Hiyoshi(2)
else
throw(ArgumentError("Unknown interpolator."))
end
end
@inline function interpolate(points, z; gradient=nothing, hessian=nothing, kwargs...)
tri = triangulate(points, delete_ghosts=false)
return interpolate(tri, z; gradient, hessian, kwargs...)
end
@inline function interpolate(x::AbstractVector, y::AbstractVector, z; gradient=nothing, hessian=nothing, kwargs...)
@assert length(x) == length(y) == length(z) "x, y, and z must have the same length."
points = [(ξ, η) for (ξ, η) in zip(x, y)]
return interpolate(points, z; gradient, hessian, kwargs...)
end
function (itp::NaturalNeighboursInterpolant)(x, y, id::Integer=1; parallel=false, method=Sibson(), kwargs...)
tri = get_triangulation(itp)
F = number_type(tri)
p = (F(x), F(y))
cache = get_neighbour_cache(itp, id)
method = iwrap(method)
# method isa Triangle && populate_cache!(method, tri)
return _eval_interp(method, itp, p, cache; kwargs...)
end
function (itp::NaturalNeighboursInterpolant)(vals::AbstractVector, x::AbstractVector, y::AbstractVector; parallel=true, method=Sibson(), kwargs...)
@assert length(x) == length(y) == length(vals) "x, y, and vals must have the same length."
method = iwrap(method)
method isa Triangle && populate_cache!(method, get_triangulation(itp))
if !parallel
for i in eachindex(x, y)
vals[i] = itp(x[i], y[i], 1; method, kwargs...)
end
else
caches = get_neighbour_cache(itp)
nt = length(caches)
chunked_iterator = chunks(vals, nt)
Threads.@threads for (xrange, chunk_id) in chunked_iterator
for i in xrange
vals[i] = itp(x[i], y[i], chunk_id; method, kwargs...)
end
end
end
return nothing
end
@inline function (itp::NaturalNeighboursInterpolant)(x::AbstractVector, y::AbstractVector; parallel=true, method=Sibson(), kwargs...)
@assert length(x) == length(y) "x and y must have the same length."
n = length(x)
tri = get_triangulation(itp)
F = number_type(tri)
vals = zeros(F, n)
method = iwrap(method)
method isa Triangle && populate_cache!(method, tri)
itp(vals, x, y; method, parallel, kwargs...)
return vals
end
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 6994 | function compute_bowyer_envelope!(envelope, tri::Triangulation, history::InsertionEventHistory, temp_adjacent::Adjacent, point; kwargs...) #kwargs are add_point! kwargs
empty!(history)
empty!(envelope)
empty!(get_adjacent(temp_adjacent))
n = num_points(tri)
I = integer_type(tri)
V = add_point!(tri, point; store_event_history=Val(true), event_history=history, peek=Val(true), kwargs...)
all_triangles = each_added_triangle(history)
if isempty(all_triangles) # This is possible for constrained triangulations
return envelope, temp_adjacent, history, V
end
for T in all_triangles
add_triangle!(temp_adjacent, T)
end
T = first(all_triangles)
i, j, _ = triangle_vertices(T)
v = i == I(n + 1) ? j : i # get a vertex on the envelope
push!(envelope, v)
for i in 2:num_triangles(all_triangles)
v = get_adjacent(temp_adjacent, I(n + 1), v)
push!(envelope, v)
end
push!(envelope, envelope[begin])
return envelope, temp_adjacent, history, V
end
function compute_bowyer_envelope!(envelope, tri::Triangulation, point; kwargs...)
I = integer_type(tri)
E = edge_type(tri)
A = Adjacent{I,E}()
return compute_bowyer_envelope!(envelope, tri, InsertionEventHistory(tri), A, point; kwargs...)
end
function compute_bowyer_envelope(tri::Triangulation, point; kwargs...)
I = integer_type(tri)
envelope = I[]
return compute_bowyer_envelope!(envelope, tri, point; kwargs...)
end
_num_points(points::Tuple) = length(points)
_num_points(points) = num_points(points)
_get_point(points::Tuple, i...) = ntuple(j->points[i[j]], length(i))
_get_point(points, i...) = get_point(points, i...)
function polygon_area(points)
n = _num_points(points)
p, q, r, s = _get_point(points, 1, 2, n, n - 1)
px, py = getxy(p)
_, qy = getxy(q)
rx, ry = getxy(r)
_, sy = getxy(s)
area = px * (qy - ry) + rx * (py - sy)
for i in 2:(n-1)
p, q, r = _get_point(points, i, i + 1, i - 1)
px, py = getxy(p)
_, qy = getxy(q)
rx, ry = getxy(r)
area += px * (qy - ry)
end
return area / 2
end
function get_barycentric_deviation(natural_coordinates::NaturalCoordinates{F}) where {F}
coordinates = get_coordinates(natural_coordinates)
indices = get_indices(natural_coordinates)
interpolation_point = get_interpolation_point(natural_coordinates)
triangulation = get_triangulation(natural_coordinates)
x̂ = zero(F)
ŷ = zero(F)
for (λ, k) in zip(coordinates, indices)
p = get_point(triangulation, k)
px, py = getxy(p)
x̂ += λ * px
ŷ += λ * py
end
x, y = getxy(interpolation_point)
δ² = (x - x̂)^2 + (y - ŷ)^2
return sqrt(δ²)
end
function handle_duplicate_points!(tri, interpolation_point, coordinates::AbstractVector{F}, envelope, u, prev_u, next_u) where {F}
p, q, r = get_point(tri, u, prev_u, next_u)
xy = getxy(interpolation_point)
envelope_idx = if xy == p
u
elseif xy == q
prev_u
elseif xy == r
next_u
else
idx = findfirst(i -> get_point(tri, i) == getxy(interpolation_point), envelope)
envelope[idx]
end
resize!(coordinates, 1)
resize!(envelope, 1)
envelope[begin] = envelope_idx
coordinates[begin] = one(F)
return NaturalCoordinates(coordinates, envelope, interpolation_point, tri)
end
function sort_five(i, j, k, ℓ, m)
if j < i
i, j = j, i
end
if k < i
i, k = k, i
end
if ℓ < i
i, ℓ = ℓ, i
end
if m < i
i, m = m, i
end
if k < j
j, k = k, j
end
if ℓ < j
j, ℓ = ℓ, j
end
if m < j
j, m = m, j
end
if ℓ < k
k, ℓ = ℓ, k
end
if m < k
k, m = m, k
end
if m < ℓ
ℓ, m = m, ℓ
end
return i, j, k, ℓ, m
end
function count_unique_sorted(i, j, k, ℓ, m) # assumes sorted
n = 5
if i == j
n -= 1
end
if j == k
n -= 1
end
if k == ℓ
n -= 1
end
if ℓ == m
n -= 1
end
return n
end
#=
Returns (standard_sort, case)
Standard forms:
Case 1. iiiii
Case 2. iiiij
Case 3. iiijj
Case 4. iiijk
Case 5. iijjk
Case 6. iijkℓ
Case 7. ijkℓm
=#
function group_sort(i, j, k, ℓ, m)
i′, j′, k′, ℓ′, m′ = sort_five(i, j, k, ℓ, m)
num_unique = count_unique_sorted(i′, j′, k′, ℓ′, m′)
if num_unique == 1
# uuuuu
u = i′
return (u, u, u, u, u), 1
elseif num_unique == 2
if i′ == j′ == k′ == ℓ′ # iiiij
u, v = i′, m′
return (u, u, u, u, v), 2
elseif j′ == k′ == ℓ′ == m′ # jiiii
u, v = j′, i′
return (u, u, u, u, v), 2
elseif i′ == j′ == k′ # iiijj
u, v = i′, ℓ′
return (u, u, u, v, v), 3
else # jjiii
u, v = k′, i′
return (u, u, u, v, v), 3
end
elseif num_unique == 3
if i′ == j′ == k′ # iiijk
u, v, w = i′, ℓ′, m′
return (u, u, u, v, w), 4
elseif k′ == ℓ′ == m′ # jkiii
u, v, w, = k′, i′, j′
return (u, u, u, v, w), 4
elseif j′ == k′ == ℓ′ # ijjjk
u, v, w = j′, i′, m′
return (u, u, u, v, w), 4
elseif (i′ == j′) && (k′ == ℓ′) # iijjk
u, v, w = i′, k′, m′
return (u, u, v, v, w), 5
elseif (j′ == k′) && (ℓ′ == m′) # kiijj
u, v, w = j′, ℓ′, i′
return (u, u, v, v, w), 5
else # iikjj
u, v, w = i′, ℓ′, k′
return (u, u, v, v, w), 5
end
elseif num_unique == 4
if i′ == j′ # iijkℓ
u, v, w, x = i′, k′, ℓ′, m′
elseif j′ == k′ # jiikℓ
u, v, w, x = j′, i′, ℓ′, m′
elseif k′ == ℓ′ # jkiiℓ
u, v, w, x = k′, i′, j′, m′
else # jkℓii
u, v, w, x = ℓ′, i′, j′, k′
end
return (u, u, v, w, x), 6
else # ijkℓm
return (i′, j′, k′, ℓ′, m′), 7
end
end
# computes dot(∇ᵢ, xⱼ - xᵢ)
function directional_derivative(tri, i, j, N₀, ∇) # zᵢⱼ
u = N₀[i]
v = N₀[j]
p, q = get_point(tri, u, v)
px, py = getxy(p)
qx, qy = getxy(q)
dx = qx - px
dy = qy - py
∇ᵤ = ∇[u]
∇ᵤx, ∇ᵤy = getxy(∇ᵤ)
return dx * ∇ᵤx + dy * ∇ᵤy
end
# computes dot(xⱼ - xᵢ, B(xₖ - xᵢ))
function hessian_form(tri, u, v, w, N₀, H) # zᵢ,ⱼₖ
i = N₀[u]
j = N₀[v]
k = N₀[w]
xᵢ, xⱼ, xₖ = get_point(tri, i, j, k)
B = H[i]
B₁₁, B₂₂, B₁₂ = B
dxᵢⱼ = xⱼ[1] - xᵢ[1]
dyᵢⱼ = xⱼ[2] - xᵢ[2]
dxᵢₖ = xₖ[1] - xᵢ[1]
dyᵢₖ = xₖ[2] - xᵢ[2]
return dxᵢⱼ * (dxᵢₖ * B₁₁ + dyᵢₖ * B₁₂) + dyᵢⱼ * (dxᵢₖ * B₁₂ + dyᵢₖ * B₂₂)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2236 | function _compute_farin_coordinates(nc::NaturalCoordinates{F}, tri::Triangulation, z, ∇) where {F}
λ = get_coordinates(nc)
N₀ = get_indices(nc)
result = zero(F)
for i in eachindex(λ)
for j in i:lastindex(λ)
for k in j:lastindex(λ)
bezier_height, multiplicity = get_contrib(tri, i, j, k, N₀, ∇, z)
λ_prod = λ[i] * λ[j] * λ[k]
result += 6bezier_height * λ_prod / multiplicity
end
end
end
return result
end
is_bezier_point(i, j, k) = i == j == k
is_bezier_edge(i, j, k) = (i == j) || (j == k) || (k == i)
is_bezier_face(i, j, k) = (i ≠ j) && (j ≠ k) && (k ≠ i)
function find_bezier_edge(i, j, k)
if i == j
return (i, k)
elseif i == k
return (i, j)
else # j == k
return (j, i)
end
end
function bezier_point_contribution(i, N₀, z)
bezier_height = z[N₀[i]]
return bezier_height, 6
end
function bezier_edge_contribution(tri, i, j, N₀, ∇, z)
u = N₀[i]
zᵤ = z[u]
bezier_height = zᵤ + directional_derivative(tri, i, j, N₀, ∇) / 3
return bezier_height, 2
end
function bezier_face_contribution(tri, i, j, k, N₀, ∇, z)
u = N₀[i]
v = N₀[j]
w = N₀[k]
point_contrib = (z[u] + z[v] + z[w]) / 3
edge_contrib = (
directional_derivative(tri, i, j, N₀, ∇) +
directional_derivative(tri, i, k, N₀, ∇) +
directional_derivative(tri, j, i, N₀, ∇) +
directional_derivative(tri, j, k, N₀, ∇) +
directional_derivative(tri, k, i, N₀, ∇) +
directional_derivative(tri, k, j, N₀, ∇)
) / 12
return point_contrib + edge_contrib, 1
end
function get_contrib(tri, i, j, k, N₀, ∇, z)
if is_bezier_point(i, j, k)
return bezier_point_contribution(i, N₀, z)
elseif is_bezier_edge(i, j, k)
i′, j′ = find_bezier_edge(i, j, k)
return bezier_edge_contribution(tri, i′, j′, N₀, ∇, z)
else # is_bezier_face(i,j,k)
return bezier_face_contribution(tri, i, j, k, N₀, ∇, z)
end
end
function compute_natural_coordinates(::Farin, tri, interpolation_point, cache=NaturalNeighboursCache(tri); kwargs...)
return _compute_sibson_coordinates(tri, interpolation_point, cache; kwargs...)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 8383 | function _compute_hiyoshi_coordinates(nc::NaturalCoordinates{F}, tri::Triangulation, z, ∇, H) where {F}
λ = get_coordinates(nc)
N₀ = get_indices(nc)
result = zero(F)
for i in eachindex(λ)
for j in i:lastindex(λ)
for k in j:lastindex(λ)
for ℓ in k:lastindex(λ)
for m in ℓ:lastindex(λ)
(i′, j′, k′, ℓ′, m′), case = group_sort(i, j, k, ℓ, m) # could (??) be faster to sort individually at each loop step, but not bothered with that currently
bezier_height, multiplicity = get_contrib(tri, i′, j′, k′, ℓ′, m′, N₀, ∇, H, z, case)
λ_prod = λ[i′] * λ[j′] * λ[k′] * λ[ℓ′] * λ[m′]
result += 120bezier_height * λ_prod / multiplicity
end
end
end
end
end
return result
end
function _hiyoshi_case_1(i, N₀, z) # iiiii
zᵢ = z[N₀[i]]
fᵢᵢᵢᵢᵢ = zᵢ
return fᵢᵢᵢᵢᵢ
end
function _hiyoshi_case_2(tri, i, j, N₀, ∇, z) # iiiij
zᵢ = z[N₀[i]]
zᵢⱼ = directional_derivative(tri, i, j, N₀, ∇)
fᵢᵢᵢᵢⱼ = zᵢ + zᵢⱼ / 5
return fᵢᵢᵢᵢⱼ
end
function _hiyoshi_case_3(tri, i, j, N₀, ∇, H, z) # iiijj
zᵢ = z[N₀[i]]
zᵢⱼ = directional_derivative(tri, i, j, N₀, ∇)
zᵢⱼⱼ = hessian_form(tri, i, j, j, N₀, H)
fᵢᵢᵢⱼⱼ = zᵢ + 2zᵢⱼ / 5 + zᵢⱼⱼ / 20
return fᵢᵢᵢⱼⱼ
end
function _hiyoshi_case_4(tri, i, j, k, N₀, ∇, H, z) # iiijk
zᵢ = z[N₀[i]]
zᵢⱼ = directional_derivative(tri, i, j, N₀, ∇)
zᵢₖ = directional_derivative(tri, i, k, N₀, ∇)
zᵢⱼₖ = hessian_form(tri, i, j, k, N₀, H)
fᵢᵢᵢⱼₖ = zᵢ + (zᵢⱼ + zᵢₖ) / 5 + zᵢⱼₖ / 20
return fᵢᵢᵢⱼₖ
end
function _hiyoshi_case_5(tri, i, j, k, N₀, ∇, H, z) # iijjk
zᵢ = z[N₀[i]]
zⱼ = z[N₀[j]]
zₖ = z[N₀[k]]
zᵢⱼ = directional_derivative(tri, i, j, N₀, ∇)
zⱼᵢ = directional_derivative(tri, j, i, N₀, ∇)
zᵢₖ = directional_derivative(tri, i, k, N₀, ∇)
zⱼₖ = directional_derivative(tri, j, k, N₀, ∇)
zₖᵢ = directional_derivative(tri, k, i, N₀, ∇)
zₖⱼ = directional_derivative(tri, k, j, N₀, ∇)
zᵢⱼₖ = hessian_form(tri, i, j, k, N₀, H)
zⱼᵢₖ = hessian_form(tri, j, i, k, N₀, H)
zₖᵢⱼ = hessian_form(tri, k, i, j, N₀, H)
fᵢᵢⱼⱼₖ = 13(zᵢ + zⱼ) / 30 + 2zₖ / 15 + (zᵢⱼ + zⱼᵢ) / 9 + 7(zᵢₖ + zⱼₖ) / 90 + 2(zₖᵢ + zₖⱼ) / 45 + (zᵢⱼₖ + zⱼᵢₖ + zₖᵢⱼ) / 45
# fᵢᵢⱼⱼₖ = (zᵢ + zⱼ) / 2 + 3(zᵢⱼ + zⱼᵢ) / 20 + (zᵢₖ + zⱼₖ) / 10 + (zᵢⱼₖ + zⱼᵢₖ) / 30 + (zᵢⱼⱼ + zⱼᵢᵢ) / 120
return fᵢᵢⱼⱼₖ
end
function _hiyoshi_case_6(tri, i, j, k, ℓ, N₀, ∇, H, z) # iijkℓ
zᵢ = z[N₀[i]]
zⱼ = z[N₀[j]]
zₖ = z[N₀[k]]
zₗ = z[N₀[ℓ]]
zᵢⱼ = directional_derivative(tri, i, j, N₀, ∇)
zᵢₖ = directional_derivative(tri, i, k, N₀, ∇)
zᵢₗ = directional_derivative(tri, i, ℓ, N₀, ∇)
zᵢⱼₖ = hessian_form(tri, i, j, k, N₀, H)
zᵢⱼₗ = hessian_form(tri, i, j, ℓ, N₀, H)
zᵢₖₗ = hessian_form(tri, i, k, ℓ, N₀, H)
zⱼᵢ = directional_derivative(tri, j, i, N₀, ∇)
zⱼₖ = directional_derivative(tri, j, k, N₀, ∇)
zⱼₗ = directional_derivative(tri, j, ℓ, N₀, ∇)
zₖᵢ = directional_derivative(tri, k, i, N₀, ∇)
zₖⱼ = directional_derivative(tri, k, j, N₀, ∇)
zₖₗ = directional_derivative(tri, k, ℓ, N₀, ∇)
zₗᵢ = directional_derivative(tri, ℓ, i, N₀, ∇)
zₗⱼ = directional_derivative(tri, ℓ, j, N₀, ∇)
zₗₖ = directional_derivative(tri, ℓ, k, N₀, ∇)
zⱼᵢₖ = hessian_form(tri, j, i, k, N₀, H)
zⱼᵢₗ = hessian_form(tri, j, i, ℓ, N₀, H)
zⱼₖₗ = hessian_form(tri, j, k, ℓ, N₀, H)
zₖᵢⱼ = hessian_form(tri, k, i, j, N₀, H)
zₖᵢₗ = hessian_form(tri, k, i, ℓ, N₀, H)
zₖⱼₗ = hessian_form(tri, k, j, ℓ, N₀, H)
zₗᵢⱼ = hessian_form(tri, ℓ, i, j, N₀, H)
zₗᵢₖ = hessian_form(tri, ℓ, i, k, N₀, H)
zₗⱼₖ = hessian_form(tri, ℓ, j, k, N₀, H)
#=
fᵢᵢⱼₖₗ = 7(zᵢ + zⱼ + zₖ + zₗ) / 10 +
11(zᵢⱼ + zᵢₖ + zᵢₗ) / 90 +
(zᵢⱼₖ + zᵢⱼₗ + zᵢₖₗ) / 45 +
(zⱼᵢ + zⱼₖ + zⱼₗ + zₖᵢ + zₖⱼ + zₖₗ + zₗᵢ + zₗⱼ + zₗₖ) / 45 +
(zⱼᵢₖ + zⱼᵢₗ + zⱼₖₗ + zₖᵢⱼ + zₖᵢₗ + zₖⱼₗ + zₗᵢⱼ + zₗᵢₖ + zₗⱼₖ) / 180
=#
fᵢᵢⱼₖₗ = zᵢ / 2 + (zⱼ + zₖ + zₗ) / 6 + 7(zᵢⱼ + zᵢₖ + zᵢₗ) / 90 + 2(zⱼᵢ + zₖᵢ + zₗᵢ) / 45 +
(zⱼₖ + zⱼₗ + zₖⱼ + zₖₗ + zₗⱼ + zₗₖ) / 30 + (zᵢⱼₖ + zᵢⱼₗ + zᵢₖₗ) / 90 +
(zⱼᵢₖ + zⱼᵢₗ + zₖᵢⱼ + zₖᵢₗ + zₗᵢⱼ + zₗᵢₖ) / 90 + (zⱼₖₗ + zₖⱼₗ + zₗⱼₖ) / 180
return fᵢᵢⱼₖₗ
end
function _hiyoshi_case_7(tri, i, j, k, ℓ, m, N₀, ∇, H, z) # ijkℓm
zᵢ = z[N₀[i]]
zⱼ = z[N₀[j]]
zₖ = z[N₀[k]]
zₗ = z[N₀[ℓ]]
zₘ = z[N₀[m]]
zᵢⱼ = directional_derivative(tri, i, j, N₀, ∇)
zᵢₖ = directional_derivative(tri, i, k, N₀, ∇)
zᵢₗ = directional_derivative(tri, i, ℓ, N₀, ∇)
zᵢₘ = directional_derivative(tri, i, m, N₀, ∇)
zⱼᵢ = directional_derivative(tri, j, i, N₀, ∇)
zⱼₖ = directional_derivative(tri, j, k, N₀, ∇)
zⱼₗ = directional_derivative(tri, j, ℓ, N₀, ∇)
zⱼₘ = directional_derivative(tri, j, m, N₀, ∇)
zₖᵢ = directional_derivative(tri, k, i, N₀, ∇)
zₖⱼ = directional_derivative(tri, k, j, N₀, ∇)
zₖₗ = directional_derivative(tri, k, ℓ, N₀, ∇)
zₖₘ = directional_derivative(tri, k, m, N₀, ∇)
zₗᵢ = directional_derivative(tri, ℓ, i, N₀, ∇)
zₗⱼ = directional_derivative(tri, ℓ, j, N₀, ∇)
zₗₖ = directional_derivative(tri, ℓ, k, N₀, ∇)
zₗₘ = directional_derivative(tri, ℓ, m, N₀, ∇)
zₘᵢ = directional_derivative(tri, m, i, N₀, ∇)
zₘⱼ = directional_derivative(tri, m, j, N₀, ∇)
zₘₖ = directional_derivative(tri, m, k, N₀, ∇)
zₘₗ = directional_derivative(tri, m, ℓ, N₀, ∇)
zᵢⱼₖ = hessian_form(tri, i, j, k, N₀, H)
zᵢⱼₗ = hessian_form(tri, i, j, ℓ, N₀, H)
zᵢⱼₘ = hessian_form(tri, i, j, m, N₀, H)
zᵢₖₗ = hessian_form(tri, i, k, ℓ, N₀, H)
zᵢₖₘ = hessian_form(tri, i, k, m, N₀, H)
zᵢₗₘ = hessian_form(tri, i, ℓ, m, N₀, H)
zⱼᵢₗ = hessian_form(tri, j, i, ℓ, N₀, H)
zⱼᵢₖ = hessian_form(tri, j, i, k, N₀, H)
zᵢᵢₘ = hessian_form(tri, i, i, m, N₀, H)
zⱼₖₗ = hessian_form(tri, j, k, ℓ, N₀, H)
zⱼₖₘ = hessian_form(tri, j, k, m, N₀, H)
zⱼₗₘ = hessian_form(tri, j, ℓ, m, N₀, H)
zₖᵢⱼ = hessian_form(tri, k, i, j, N₀, H)
zₖᵢₗ = hessian_form(tri, k, i, ℓ, N₀, H)
zₖᵢₘ = hessian_form(tri, k, i, m, N₀, H)
zₖⱼₗ = hessian_form(tri, k, j, ℓ, N₀, H)
zₖⱼₘ = hessian_form(tri, k, j, m, N₀, H)
zₖₗₘ = hessian_form(tri, k, ℓ, m, N₀, H)
zₗᵢⱼ = hessian_form(tri, ℓ, i, j, N₀, H)
zₗᵢₖ = hessian_form(tri, ℓ, i, k, N₀, H)
zₗᵢₘ = hessian_form(tri, ℓ, i, m, N₀, H)
zₗⱼₖ = hessian_form(tri, ℓ, j, k, N₀, H)
zₗⱼₘ = hessian_form(tri, ℓ, j, m, N₀, H)
zₗₖₘ = hessian_form(tri, ℓ, k, m, N₀, H)
zₘᵢⱼ = hessian_form(tri, m, i, j, N₀, H)
zₘᵢₖ = hessian_form(tri, m, i, k, N₀, H)
zₘᵢₗ = hessian_form(tri, m, i, ℓ, N₀, H)
zₘⱼₖ = hessian_form(tri, m, j, k, N₀, H)
zₘⱼₗ = hessian_form(tri, m, j, ℓ, N₀, H)
zₘₖₗ = hessian_form(tri, m, k, ℓ, N₀, H)
fᵢⱼₖₗₘ = (zᵢ + zⱼ + zₖ + zₗ + zₘ) / 5 +
(zᵢⱼ + zᵢₖ + zᵢₗ + zᵢₘ + zⱼᵢ + zⱼₖ + zⱼₗ + zⱼₘ + zₖᵢ + zₖⱼ + zₖₗ +
zₖₘ + zₗᵢ + zₗⱼ + zₗₖ + zₗₘ + zₘᵢ + zₘⱼ + zₘₖ + zₘₗ) / 30 +
(zᵢⱼₖ + zᵢⱼₗ + zᵢⱼₘ + zᵢₖₗ + zᵢₖₘ + zᵢₗₘ + zⱼᵢₗ + zⱼᵢₖ + zᵢᵢₘ +
zⱼₖₗ + zⱼₖₘ + zⱼₗₘ + zₖᵢⱼ + zₖᵢₗ + zₖᵢₘ + zₖⱼₗ + zₖⱼₘ + zₖₗₘ +
zₗᵢⱼ + zₗᵢₖ + zₗᵢₘ + zₗⱼₖ + zₗⱼₘ + zₗₖₘ + zₘᵢⱼ + zₘᵢₖ + zₘᵢₗ +
zₘⱼₖ + zₘⱼₗ + zₘₖₗ) / 180
return fᵢⱼₖₗₘ
end
function get_contrib(tri, i, j, k, ℓ, m, N₀, ∇, H, z, case)
if case == 1
fᵢᵢᵢᵢᵢ = _hiyoshi_case_1(m, N₀, z)
return fᵢᵢᵢᵢᵢ, 120
elseif case == 2
fᵢᵢᵢᵢⱼ = _hiyoshi_case_2(tri, ℓ, m, N₀, ∇, z)
return fᵢᵢᵢᵢⱼ, 24
elseif case == 3
fᵢᵢⱼⱼⱼ = _hiyoshi_case_3(tri, k, ℓ, N₀, ∇, H, z)
return fᵢᵢⱼⱼⱼ, 12
elseif case == 4
fᵢᵢᵢⱼₖ = _hiyoshi_case_4(tri, k, ℓ, m, N₀, ∇, H, z)
return fᵢᵢᵢⱼₖ, 6
elseif case == 5
fᵢᵢⱼⱼₖ = _hiyoshi_case_5(tri, i, k, m, N₀, ∇, H, z)
return fᵢᵢⱼⱼₖ, 4
elseif case == 6
fᵢᵢⱼₖₗ = _hiyoshi_case_6(tri, j, k, ℓ, m, N₀, ∇, H, z)
return fᵢᵢⱼₖₗ, 2
else
fᵢⱼₖₗₘ = _hiyoshi_case_7(tri, i, j, k, ℓ, m, N₀, ∇, H, z)
return fᵢⱼₖₗₘ, 1
end
end
function compute_natural_coordinates(::Hiyoshi, tri, interpolation_point, cache=NaturalNeighboursCache(tri); kwargs...)
return _compute_sibson_coordinates(tri, interpolation_point, cache; kwargs...)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2462 | function _compute_laplace_coordinates(
tri::Triangulation{P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE},
interpolation_point,
cache::NaturalNeighboursCache{F}=NaturalNeighboursCache(tri);
project=true,
kwargs...
) where {P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE,F}
coordinates = get_coordinates(cache)
envelope = get_envelope(cache)
insertion_event_history = get_insertion_event_history(cache)
temp_adjacent = get_temp_adjacent(cache)
last_triangle = get_last_triangle(cache)
envelope, temp_adjacent, insertion_event_history, V = compute_bowyer_envelope!(envelope, tri, insertion_event_history, temp_adjacent, interpolation_point; try_points=last_triangle[], kwargs...) #kwargs are add_point! kwargs
i, j, return_flag = check_for_extrapolation(tri, V, interpolation_point, last_triangle)
return_flag && return two_point_interpolate!(coordinates, envelope, tri, i, j, interpolation_point, project)
resize!(coordinates, length(envelope) - 1)
w = zero(number_type(tri))
for i in firstindex(envelope):(lastindex(envelope)-1)
ratio, u, prev_u, next_u = laplace_ratio(tri, envelope, i, interpolation_point) # could reuse a circumcenter here, but it's not the dominating part of the computation anyway.
isnan(ratio) && return handle_duplicate_points!(tri, interpolation_point, coordinates, envelope, u, prev_u, next_u)
coordinates[i] = max(ratio, zero(ratio)) # coordinate types like Float32 can sometimes get e.g. -1f-8
w += coordinates[i]
end
pop!(envelope)
coordinates ./= w
return NaturalCoordinates(coordinates, envelope, interpolation_point, tri)
end
function laplace_ratio(tri, envelope, i, interpolation_point)
u = envelope[i]
prev_u = envelope[previndex_circular(envelope, i)]
next_u = envelope[nextindex_circular(envelope, i)]
p, q, r = get_point(tri, u, prev_u, next_u)
g1 = triangle_circumcenter(q, p, interpolation_point)
g2 = triangle_circumcenter(p, r, interpolation_point)
g1x, g1y = getxy(g1)
g2x, g2y = getxy(g2)
ℓ² = (g1x - g2x)^2 + (g1y - g2y)^2
px, py = getxy(p)
x, y = getxy(interpolation_point)
d² = (px - x)^2 + (py - y)^2
w = sqrt(ℓ² / d²)
return w, u, prev_u, next_u
end
function compute_natural_coordinates(::Laplace, tri, interpolation_point, cache=NaturalNeighboursCache(tri); kwargs...)
return _compute_laplace_coordinates(tri, interpolation_point, cache; kwargs...)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 875 | function _compute_nearest_coordinates(
tri::Triangulation{P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE},
interpolation_point,
cache::NaturalNeighboursCache{F}=NaturalNeighboursCache(tri);
kwargs...
) where {P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE,F}
coordinates = get_coordinates(cache)
envelope = get_envelope(cache)
last_triangle = get_last_triangle(cache)
i = jump_to_voronoi_polygon(tri, interpolation_point; try_points=last_triangle[])
resize!(coordinates, 1)
resize!(envelope, 1)
coordinates[1] = one(number_type(tri))
envelope[1] = i
return NaturalCoordinates(coordinates, envelope, interpolation_point, tri)
end
function compute_natural_coordinates(::Nearest, tri, interpolation_point, cache=NaturalNeighboursCache(tri); kwargs...)
return _compute_nearest_coordinates(tri, interpolation_point, cache; kwargs...)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 4990 | # Our implementation of these coordinates follows https://gwlucastrig.github.io/TinfourDocs/NaturalNeighborTinfourAlgorithm/index.html with some simple modifications.
function _compute_sibson_coordinates(
tri::Triangulation{P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE},
interpolation_point,
cache::NaturalNeighboursCache{F}=NaturalNeighboursCache(tri);
project=true,
kwargs...
) where {P,T,BN,W,I,E,Es,BC,BEM,GVM,GVR,BPL,C,BE,F}
coordinates = get_coordinates(cache)
envelope = get_envelope(cache)
insertion_event_history = get_insertion_event_history(cache)
poly_points = get_poly_points(cache)
temp_adjacent = get_temp_adjacent(cache)
last_triangle = get_last_triangle(cache)
envelope, temp_adjacent, insertion_event_history, V = compute_bowyer_envelope!(envelope, tri, insertion_event_history, temp_adjacent, interpolation_point; try_points=last_triangle[], kwargs...) #kwargs are add_point! kwargs
i, j, return_flag = check_for_extrapolation(tri, V, interpolation_point, last_triangle)
return_flag && return two_point_interpolate!(coordinates, envelope, tri, i, j, interpolation_point, project)
resize!(coordinates, length(envelope) - 1)
w = zero(number_type(tri))
for i in firstindex(envelope):(lastindex(envelope)-1)
pre = pre_insertion_area!(poly_points, envelope, i, tri)
post, u, prev_u, next_u = post_insertion_area(envelope, i, tri, interpolation_point)
isnan(post) && return handle_duplicate_points!(tri, interpolation_point, coordinates, envelope, u, prev_u, next_u)
coordinates[i] = max(pre - post, zero(pre)) # coordinate types like Float32 can sometimes get e.g. -1f-8
w += coordinates[i]
end
pop!(envelope)
coordinates ./= w
return NaturalCoordinates(coordinates, envelope, interpolation_point, tri)
end
# pre-insertion area component from the envelope[i]th generator
function pre_insertion_area!(poly_points, envelope, i, tri::Triangulation)
empty!(poly_points)
u = envelope[i]
prev_u = envelope[previndex_circular(envelope, i)]
next_u = envelope[nextindex_circular(envelope, i)]
v = next_u
ux, uy = get_point(tri, u)
vx, vy = get_point(tri, v)
mx1, my1 = (ux + vx) / 2, (uy + vy) / 2
push!(poly_points, (mx1, my1))
while v ≠ prev_u
w = get_adjacent(tri, u, v)
cx, cy = triangle_circumcenter(tri, (u, v, w))
push!(poly_points, (cx, cy))
v = w
end
vx, vy = get_point(tri, v)
mx, my = (ux + vx) / 2, (uy + vy) / 2
push!(poly_points, (mx, my))
push!(poly_points, (mx1, my1))
return polygon_area(poly_points)
end
# post-insertion area component from the envelope[i]th generator
# returns: AREA, (G1_NAN, G2_NAN), u, prev_u, next_u
function post_insertion_area(envelope, i, tri::Triangulation, interpolation_point)
u = envelope[i]
prev_u = envelope[previndex_circular(envelope, i)]
next_u = envelope[nextindex_circular(envelope, i)]
p, q, r = get_point(tri, u, prev_u, next_u)
px, py = getxy(p)
qx, qy = getxy(q)
rx, ry = getxy(r)
mpq = (px + qx) / 2, (py + qy) / 2
mpr = (px + rx) / 2, (py + ry) / 2
g1 = triangle_circumcenter(p, r, interpolation_point)
F = number_type(tri)
if !all(isfinite, g1)
# The circumcenter is NaN when the triangle is degenerate,
# meaning one of the points is a duplicate with another.
# Since the triangulation is assumed to be valid, it must be that
# interpolation_point is one of p or r. In particular, the new point
# is just one of the others, and so there will be no changes
# in the area. We return NaN as a flag.
return F(NaN), u, prev_u, next_u
end
g2 = triangle_circumcenter(q, p, interpolation_point)
!all(isfinite, g2) && return F(NaN), u, prev_u, next_u
points = (mpq, mpr, g1, g2, mpq)
return polygon_area(points), u, prev_u, next_u
end
function compute_natural_coordinates(::Sibson, tri, interpolation_point, cache=NaturalNeighboursCache(tri); kwargs...)
return _compute_sibson_coordinates(tri, interpolation_point, cache; kwargs...)
end
function _compute_sibson_1_coordinates(nc::NaturalCoordinates, tri::Triangulation, z, ∇) # has to be a different form since Sib0 blends two functions
λ = get_coordinates(nc)
N₀ = get_indices(nc)
p₀ = get_interpolation_point(nc)
x₀, y₀ = getxy(p₀)
F = number_type(tri)
α = zero(F)
β = zero(F)
ζ = zero(F)
γ = zero(F)
for (λₖ, k) in zip(λ, N₀)
∇ₖ = ∇[k]
zₖ = z[k]
pₖ = get_point(tri, k)
xₖ, yₖ = getxy(pₖ)
rₖ² = (xₖ - x₀)^2 + (yₖ - y₀)^2
rₖ = sqrt(rₖ²)
γₖ = λₖ / rₖ
ζₖ = zₖ + (x₀ - xₖ) * ∇ₖ[1] + (y₀ - yₖ) * ∇ₖ[2]
αₖ = λₖ * rₖ
γ += γₖ
β += λₖ * rₖ²
α += αₖ
ζ += ζₖ * γₖ
if !isfinite(γ)
return zero(F), one(F), zero(F)
end
end
ζ /= γ
α /= γ
return ζ, α, β
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 580 | function _compute_triangle_shape_coefficients(tri, i, j, k)
p, q, r = get_point(tri, i, j, k)
px, py = getxy(p)
qx, qy = getxy(q)
rx, ry = getxy(r)
Δ = qx * ry - qy * rx - px * ry + rx * py + px * qy - qx * py
s₁ = (qy - ry) / Δ
s₂ = (ry - py) / Δ
s₃ = (py - qy) / Δ
s₄ = (rx - qx) / Δ
s₅ = (px - rx) / Δ
s₆ = (qx - px) / Δ
s₇ = (qx * ry - rx * qy) / Δ
s₈ = (rx * py - px * ry) / Δ
s₉ = (px * qy - qx * py) / Δ
shape_function_coefficients = (s₁, s₂, s₃, s₄, s₅, s₆, s₇, s₈, s₉)
return shape_function_coefficients
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 4208 | using ..NaturalNeighbours
using DelaunayTriangulation
using CairoMakie
using JET
using Aqua
Aqua.test_all(NaturalNeighbours; ambiguities=false, project_extras=false) # don't care about julia < 1.2
Aqua.test_ambiguities(NaturalNeighbours) # don't pick up Base and Core...
pts = [(rand(), rand()) for _ in 1:50]
tri = triangulate(pts, delete_ghosts=false)
f = (x, y) -> sin(x * y) - cos(x - y) * exp(-(x - y)^2)
z = [f(x, y) for (x, y) in pts]
interpolate(tri, z; derivatives=false, parallel=false)
res = report_package(NaturalNeighbours; target_modules=(@__MODULE__,))
@test_opt target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=false, parallel=false)
@test_opt target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=true, parallel=false)
@test_opt target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=true, parallel=true)
@test_opt target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=true, parallel=true)
@test_opt target_modules = (@__MODULE__,) interpolate(tri, z)
itp = interpolate(tri, z)
@test_opt target_modules = (@__MODULE__,) differentiate(itp, 1)
@test_opt target_modules = (@__MODULE__,) differentiate(itp, 2)
∂ = differentiate(itp, 1)
@test_opt target_modules = (@__MODULE__,) ∂(0.3, 0.5; method = Iterative())
@test_opt target_modules = (@__MODULE__,) ∂(0.3, 0.5; method = Direct())
@test_opt target_modules = (@__MODULE__,) ∂(0.3, 0.5; method = Iterative(), project=false)
@test_opt target_modules = (@__MODULE__,) ∂(0.3, 0.5; method = Direct(),project=false)
@test_opt target_modules = (@__MODULE__,) ∂([0.3], [0.5], method=Iterative())
@test_opt target_modules = (@__MODULE__,) ∂([0.3], [0.5], method=Direct())
@test_opt target_modules = (@__MODULE__,) ∂([0.3], [0.5], method=Direct(), project=false)
@test_opt target_modules = (@__MODULE__,) ∂([0.3], [0.5], method=Iterative(),project=false)
@test_opt target_modules = (@__MODULE__,) itp(0.3, 0.5, method=Sibson())
@test_opt target_modules = (@__MODULE__,) itp(0.3, 0.5, method=Sibson(1))
@test_opt target_modules = (@__MODULE__,) itp(0.3, 0.5, method=Farin())
@test_opt target_modules = (@__MODULE__,) itp(0.3, 0.5, method=Hiyoshi())
@test_opt target_modules = (@__MODULE__,) itp(0.3, 0.5, method=Laplace())
@test_opt target_modules = (@__MODULE__,) itp(0.3, 0.5, method=Triangle())
@test_opt target_modules = (@__MODULE__,) itp(0.3, 0.5, method=Nearest())
@test_opt target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Sibson(), parallel=false)
@test_opt target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Sibson(1), parallel=true)
@test_opt target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Farin())
@test_opt target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Hiyoshi())
@test_opt target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Laplace())
@test_opt target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Triangle(; allow_cache = false))
@test_opt target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Nearest())
@test_call target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=false, parallel=false)
@test_call target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=true, parallel=false)
@test_call target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=true, parallel=true)
@test_call target_modules = (@__MODULE__,) interpolate(tri, z; derivatives=true, parallel=true)
@test_call target_modules = (@__MODULE__,) interpolate(tri, z)
itp = interpolate(tri, z)
@test_call target_modules = (@__MODULE__,) differentiate(itp, 1)
@test_call target_modules = (@__MODULE__,) differentiate(itp, 2)
∂ = differentiate(itp, 1)
@test_call target_modules = (@__MODULE__,) ∂(0.3, 0.5)
@test_call target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Sibson())
@test_call target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Sibson(1))
@test_call target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Farin())
@test_call target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Hiyoshi())
@test_call target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Laplace())
@test_call target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Triangle())
@test_call target_modules = (@__MODULE__,) itp([0.3], [0.5], method=Nearest()) | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2525 | using NaturalNeighbours
using Test
using SafeTestsets
include("jet_aqua.jl")
@testset "Interpolation" begin
@testset "Coordinates" begin
@info "Testing NaturalCoordinates"
@safetestset "Natural Coordinates" begin
include("interpolation/coordinates/natural_coordinates.jl")
end
@info "Testing Utils"
@safetestset "Utils" begin
include("interpolation/coordinates/utils.jl")
end
end
@info "Testing Basic Tests"
@safetestset "Basic Tests" begin
include("interpolation/basic_tests.jl")
end
@info "Testing Precision"
@safetestset "Precision" begin
include("interpolation/precision.jl")
end
@info "Testing Extrapolation"
@safetestset "Extrapolation" begin
include("interpolation/extrapolation.jl")
end
@info "Testing Structs"
@safetestset "Structs" begin
include("interpolation/structs.jl")
end
@info "Testing Influence"
@safetestset "Influence" begin
include("interpolation/influence.jl")
end
@info "Testing Constrained Triangulation"
@safetestset "Constrained Triangulations" begin
include("interpolation/constrained.jl")
end
end
@testset "Differentiation" begin
@info "Testing Basic Tests"
@safetestset "Basic Tests" begin
include("differentiation/basic_tests.jl")
end
@info "Testing Structs"
@safetestset "Structs" begin
include("differentiation/structs.jl")
end
@info "Testing Utils"
@safetestset "Utils" begin
include("differentiation/utils.jl")
end
end
@testset "Documentation Examples" begin
@info "Testing README Example"
@safetestset "README Example" begin
include("doc_examples/readme_example.jl")
end
@info "Testing Interpolation Example"
@safetestset "Interpolation Example" begin
include("doc_examples/interpolation.jl")
end
@info "Testing Differentiation Example"
@safetestset "Differentiation Example" begin
include("doc_examples/differentiation.jl")
end
@info "Testing Interpolation Math"
@safetestset "Interpolation Math" begin
include("doc_examples/interpolation_math.jl")
end
@info "Testing Switzerland"
@safetestset "Switzerland" begin
include("doc_examples/swiss.jl")
end
#if get(ENV, "CI", "false") == "false"
# @safetestset "Comparison" begin
# include("doc_examples/interpolant_comparisons.jl")
# end
#end
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 21449 | using ..NaturalNeighbours
const NNI = NaturalNeighbours
using DelaunayTriangulation
const DT = DelaunayTriangulation
using LinearAlgebra
using Optimization
using OptimizationNLopt
using Random
using StableRNGs
using LinearAlgebra
include(normpath(@__DIR__, "../.", "helper_functions", "slow_derivative_tests.jl"))
include(normpath(@__DIR__, "../.", "helper_functions", "point_generator.jl"))
include(normpath(@__DIR__, "../.", "helper_functions", "test_functions.jl"))
@testset "Estimating derivatives with weighted least squares" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> sin(x - y) + cos(x + y)
f′ = (x, y) -> [cos(x - y) - sin(x + y), -cos(x - y) - sin(x + y)]
f′′ = (x, y) -> [-sin(x - y)-cos(x + y) sin(x - y)-cos(x + y)
sin(x - y)-cos(x + y) -sin(x - y)-cos(x + y)]
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
@testset "Direct" begin
@testset "At data sites" begin
flag = 0
for _ in 1:100
i = rand(1:DelaunayTriangulation.num_points(tri))
p = get_point(tri, i)
# Gradient
∇opt, ∇ls = estimate_gradient_direct(tri, i, z)
@test ∇opt ≈ ∇ls rtol = 1e-2
λ, E = NNI.get_taylor_neighbourhood!(Set{Int64}(), Set{Int64}(), tri, i, 1)
∇2 = (
NNI.generate_first_order_derivatives(NNI.Direct(), tri, z, z[i], i, λ, E),
NNI._generate_first_order_derivatives_direct(tri, z, z[i], i, λ, E)
)
@test collect(∇2[1]) ≈ collect(∇2[2]) # not exactly == sometimes due to rng
@test collect(∇2[1]) ≈ ∇opt rtol = 1e-3
flag += isapprox(f′(p...), collect(∇2[1]), rtol=1e-1)
# Hessian: Cubic
(∇opt, ℋopt), (∇ls, ℋls) = estimate_gradient_hessian_cubic_direct(tri, i, z)
@test ∇opt ≈ ∇ls rtol = 1e-4
@test ℋopt ≈ ℋls rtol = 1e-4
λ, E = NNI.get_taylor_neighbourhood!(Set{Int64}(), Set{Int64}(), tri, i, 3)
∇ℋ2 = (
NNI.generate_second_order_derivatives(NNI.Direct(), tri, z, z[i], i, λ, E),
NNI._generate_second_order_derivatives_direct(tri, z, z[i], i, E, NNI.DerivativeCache(tri))
)
∇2_1, ℋ2_1 = ∇ℋ2[1]
∇2_2, ℋ2_2 = ∇ℋ2[2]
@test collect(∇2_1) ≈ collect(∇2_2)
@test collect(ℋ2_1) ≈ collect(ℋ2_2)
@test collect(∇2_1) ≈ ∇opt rtol = 1e-4
@test collect(ℋ2_1) ≈ ℋopt rtol = 1e-4
@test f′(p...) ≈ collect(∇2_1) rtol = 1e-2
@test f′′(p...) ≈ [ℋ2_1[1] ℋ2_1[3]; ℋ2_1[3] ℋ2_1[2]] rtol = 1e-1
# Hessian: Quadratic
(∇opt, ℋopt), (∇ls, ℋls) = estimate_gradient_hessian_quadratic_direct(tri, i, z)
@test ∇opt ≈ ∇ls rtol = 1e-4
@test ℋopt ≈ ℋls rtol = 1e-4
λ, E = NNI.get_taylor_neighbourhood!(Set{Int64}(), Set{Int64}(), tri, i, 2)
∇ℋ2 = (
NNI.generate_second_order_derivatives(NNI.Direct(), tri, z, z[i], i, λ, E; use_cubic_terms=false),
NNI._generate_second_order_derivatives_direct(tri, z, z[i], i, E; use_cubic_terms=false)
)
∇2_1, ℋ2_1 = ∇ℋ2[1]
∇2_2, ℋ2_2 = ∇ℋ2[2]
@test collect(∇2_1) ≈ collect(∇2_2)
@test collect(ℋ2_1) ≈ collect(ℋ2_2)
@test collect(∇2_1) ≈ ∇opt rtol = 1e-5
@test collect(ℋ2_1) ≈ ℋopt rtol = 1e-5
@test f′(p...) ≈ collect(∇2_1) rtol = 1e-1
@test f′′(p...) ≈ [ℋ2_1[1] ℋ2_1[3]; ℋ2_1[3] ℋ2_1[2]] rtol = 0.5 atol = 1e-1
end
@test flag / 100 > 0.95
end
@testset "At off-site points" begin
flag = 0
rng = StableRNG(35)
for _ in 1:100
# Gradient
itp = interpolate(tri, z; derivatives=false)
p = random_points_in_convex_hull(tri, 1; rng)[1]
∇opt, ∇ls = estimate_gradient_direct(tri, p, z)
@test ∇opt ≈ ∇ls rtol = 1e-1
λ, E = NNI.get_taylor_neighbourhood!(Set{Int64}(), Set{Int64}(), tri, p, 1)
∇2 = (
NNI.generate_first_order_derivatives(NNI.Direct(), tri, z, itp(p...), p, λ, E),
NNI._generate_first_order_derivatives_direct(tri, z, itp(p...), p, λ, E)
)
@test collect(∇2[1]) ≈ collect(∇2[2]) rtol = 1e-1
@test collect(∇2[1]) ≈ ∇opt rtol = 0.3
@test f′(p...) ≈ collect(∇2[1]) rtol = 1e-1 atol = 0.2
# Hessian: Cubic
p = random_points_in_convex_hull(tri, 1; rng)[1]
(∇opt, ℋopt), (∇ls, ℋls) = estimate_gradient_hessian_cubic_direct(tri, p, z)
@test ∇opt ≈ ∇ls rtol = 1e-2
@test ℋopt ≈ ℋls rtol = 1e-2
λ, E = NNI.get_taylor_neighbourhood!(Set{Int64}(), Set{Int64}(), tri, p, 3)
∇ℋ2 = (
NNI.generate_second_order_derivatives(NNI.Direct(), tri, z, itp(p...), p, λ, E),
NNI._generate_second_order_derivatives_direct(tri, z, itp(p...), p, E, NNI.DerivativeCache(tri))
)
∇2_1, ℋ2_1 = ∇ℋ2[1]
∇2_2, ℋ2_2 = ∇ℋ2[2]
@test collect(∇2_1) ≈ collect(∇2_2)
@test collect(ℋ2_1) ≈ collect(ℋ2_2)
@test collect(∇2_1) ≈ ∇opt rtol = 1e-4
@test collect(ℋ2_1) ≈ ℋopt rtol = 1e-4
@test f′(p...) ≈ collect(∇2_1) rtol = 1e-1
flag += isapprox(f′′(p...), [ℋ2_1[1] ℋ2_1[3]; ℋ2_1[3] ℋ2_1[2]], rtol=1e-1, atol=1e-1)
# Hessian: Quadratic
(∇opt, ℋopt), (∇ls, ℋls) = estimate_gradient_hessian_quadratic_direct(tri, p, z)
@test ∇opt ≈ ∇ls rtol = 1e-4
@test ℋopt ≈ ℋls rtol = 1e-4
λ, E = NNI.get_taylor_neighbourhood!(Set{Int64}(), Set{Int64}(), tri, p, 2)
∇ℋ2 = (
NNI.generate_second_order_derivatives(NNI.Direct(), tri, z, itp(p...), p, λ, E; use_cubic_terms=false),
NNI._generate_second_order_derivatives_direct(tri, z, itp(p...), p, E, NNI.DerivativeCache(tri); use_cubic_terms=false)
)
∇2_1, ℋ2_1 = ∇ℋ2[1]
∇2_2, ℋ2_2 = ∇ℋ2[2]
@test collect(∇2_1) ≈ collect(∇2_2)
@test collect(ℋ2_1) ≈ collect(ℋ2_2)
@test collect(∇2_1) ≈ ∇opt rtol = 1e-4
@test collect(ℋ2_1) ≈ ℋopt rtol = 1e-4
@test f′(p...) ≈ collect(∇2_1) rtol = 1e-1
flag += isapprox(f′′(p...), [ℋ2_1[1] ℋ2_1[3]; ℋ2_1[3] ℋ2_1[2]], rtol=0.2)
end
@test flag / 200 > 0.8
end
end
@testset "Iterative" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> sin(x - y) + cos(x + y)
f′ = (x, y) -> [cos(x - y) - sin(x + y), -cos(x - y) - sin(x + y)]
f′′ = (x, y) -> [-sin(x - y)-cos(x + y) sin(x - y)-cos(x + y)
sin(x - y)-cos(x + y) -sin(x - y)-cos(x + y)]
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
nt = Base.Threads.nthreads()
derivative_caches = [NNI.DerivativeCache(tri) for _ in 1:nt]
neighbour_caches = [NNI.NaturalNeighboursCache(tri) for _ in 1:nt]
derivative_method = :iterative
method = NNI.dwrap(derivative_method)
parallel_derivatives = true
initial_gradients = NNI.generate_gradients(tri, z, derivative_caches, neighbour_caches; parallel=parallel_derivatives)
_initial_gradients = deepcopy(initial_gradients)
for i in eachindex(initial_gradients)
G1, G2 = estimate_gradient_direct(tri, i, z; use_sibson_weight=true)
G3 = collect(initial_gradients[i])
@test G1 ≈ G2 rtol = 1e-4
@test G1 ≈ G3 rtol = 1e-4
@test G2 ≈ G3 rtol = 1e-4
end
Gpar = NNI.generate_gradients(tri, z, (derivative_caches), neighbour_caches; parallel=true)
Gser = NNI.generate_gradients(tri, z, (derivative_caches), neighbour_caches; parallel=false)
@test Gpar == Gser
∇par, ℋpar = NNI.generate_derivatives(tri, z, (derivative_caches), neighbour_caches; method, initial_gradients, parallel=true)
∇ser, ℋser = NNI.generate_derivatives(tri, z, (derivative_caches), neighbour_caches; method, initial_gradients, parallel=false)
@test initial_gradients == _initial_gradients # make sure initial_gradients is not modified
@test ∇par == ∇ser
@test ℋpar == ℋser
_α = (0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5)
flags = zeros(Int64, 8, length(_α))
for (j, α) in enumerate(_α)
∇, ℋ = NNI.generate_derivatives(tri, z, (derivative_caches), neighbour_caches; method, initial_gradients, parallel=true, alpha=α)
for i in eachindex(initial_gradients)
(G1, H1), (G2, H2) = estimate_gradient_hessian_from_initial_gradients(tri, i, z, α; initial_gradients)
G3 = collect(∇[i])
H3 = collect(ℋ[i])
flags[1, j] += isapprox(G1, G2, rtol=1e-2)
flags[2, j] += isapprox(G1, G3, rtol=1e-2)
flags[3, j] += isapprox(G2, G3, rtol=1e-2)
flags[4, j] += isapprox(H1, H2, rtol=1e-1)
flags[5, j] += isapprox(H1, H3, rtol=1e-1)
flags[6, j] += isapprox(H2, H3, rtol=1e-1)
pᵢ = get_point(tri, i)
xᵢ, yᵢ = getxy(pᵢ)
G4 = f′(xᵢ, yᵢ)
H4 = f′′(xᵢ, yᵢ)[[1, 4, 2]]
flags[7, j] += isapprox(G3, G4, rtol=1e-1, atol=1e-1)
flags[8, j] += isapprox(H3, H4, rtol=1e-1, atol=1e-1)
end
end
normalised_flags = flags ./ length(initial_gradients)
@test all(>(0.85), normalised_flags[:, 1])
@test all(>(0.85), normalised_flags[:, 2])
@test all(>(0.85), normalised_flags[:, 3])
@test all(>(0.85), normalised_flags[:, 4])
@test all(>(0.85), normalised_flags[:, 5])
@test all(>(0.85), normalised_flags[:, 6])
@test all(>(0.75), normalised_flags[:, 7])
@test all(>(0.75), normalised_flags[[2, 4, 5, 6, 7, 8], 8])
@test all(>(0.15), normalised_flags[[1, 3], 8])
end
end
@testset "Checking that keyword arguments are passed fine" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> x^2 + y^2 + x^3 * y
f′ = (x, y) -> [2x + 3x^2 * y; 2y + x^3]
f′′ = (x, y) -> [2+6x*y 3x^2; 3x^2 2]
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z)
∂ = differentiate(itp, 2)
@test_throws ArgumentError ∂(0.0, 0.0; method=Iterative())
@test_throws ArgumentError ∂(0.0, 0.0; method=:iterative)
itp1 = interpolate(tri, z; derivatives=false)
itp2 = interpolate(tri, z; derivatives=true)
∂11 = differentiate(itp1, 1)
∂12 = differentiate(itp1, 2)
∂21 = differentiate(itp2, 1)
∂22 = differentiate(itp2, 2)
@test slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22;
x=5.872, y=3.45, rng=StableRNG(29991),
method=Direct(), interpolant_method=Sibson(),
alpha=0.01, use_cubic_terms=true,
use_sibson_weight=true,
tri=tri, z=z)
@test slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22;
x=5.872, y=3.45, rng=StableRNG(29991),
method=Direct(), interpolant_method=Laplace(),
alpha=0.01, use_cubic_terms=true,
use_sibson_weight=true,
tri=tri, z=z)
@test slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22;
x=5.872, y=3.45, rng=StableRNG(29991),
method=Direct(), interpolant_method=Laplace(),
alpha=0.01, use_cubic_terms=true,
use_sibson_weight=false,
tri=tri, z=z)
@test slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22;
x=5.872, y=3.45, rng=StableRNG(29991),
method=Direct(), interpolant_method=Sibson(),
alpha=0.01, use_cubic_terms=false,
use_sibson_weight=true,
tri=tri, z=z)
@test slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22;
x=5.872, y=3.45, rng=StableRNG(29991),
method=Direct(), interpolant_method=Sibson(),
alpha=0.13, use_cubic_terms=false,
use_sibson_weight=true,
tri=tri, z=z)
@test_throws ArgumentError slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22;
x=5.872, y=3.45, rng=StableRNG(29991),
method=Iterative(), interpolant_method=Sibson(),
alpha=0.01, use_cubic_terms=false,
use_sibson_weight=true,
tri=tri, z=z)
itp1 = interpolate(tri, z; derivatives=true)
itp2 = interpolate(tri, z; derivatives=true)
∂11 = differentiate(itp1, 1)
∂12 = differentiate(itp1, 2)
∂21 = differentiate(itp2, 1)
∂22 = differentiate(itp2, 2)
@test slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22;
x=5.872, y=3.45, rng=StableRNG(29991),
method=Iterative(), interpolant_method=Sibson(),
alpha=0.1, use_cubic_terms=false,
use_sibson_weight=true,
tri=tri, z=z)
end
@testset "Check multithreading is working" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> x^2 + y^2 + x^3 * y
f′ = (x, y) -> [2x + 3x^2 * y; 2y + x^3]
f′′ = (x, y) -> [2+6x*y 3x^2; 3x^2 2]
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
∂1 = differentiate(itp, 1)
∂2 = differentiate(itp, 2)
x = 10rand(100)
y = 10rand(100)
@test collect.(∂1(x, y; parallel=true)) ≈ collect.(∂1(x, y; parallel=false)) rtol = 1e-13 # not == because of internal rng
@test collect.(∂1(x, y; interpolant_method=Sibson(1), parallel=true)) ≈ collect.(∂1(x, y; parallel=false, interpolant_method=Sibson(1))) rtol = 1e-7
@test collect.(getindex.(∂2(x, y; parallel=true), 1)) ≈ collect.(getindex.(∂2(x, y; parallel=false), 1)) rtol = 1e-7
@test collect.(getindex.(∂2(x, y; parallel=true), 2)) ≈ collect.(getindex.(∂2(x, y; parallel=false), 2)) rtol = 1e-7
@test collect.(getindex.(∂2(x, y; parallel=true, interpolant_method=Sibson(1)), 1)) ≈ collect.(getindex.(∂2(x, y; parallel=false, interpolant_method=Sibson(1)), 1)) rtol = 1e-7
@test collect.(getindex.(∂2(x, y; parallel=true, interpolant_method=Sibson(1)), 2)) ≈ collect.(getindex.(∂2(x, y; parallel=false, interpolant_method=Sibson(1)), 2)) rtol = 1e-7
end
@testset "Check that Iterative() errors without gradients" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> x^2 + y^2 + x^3 * y
f′ = (x, y) -> [2x + 3x^2 * y; 2y + x^3]
f′′ = (x, y) -> [2+6x*y 3x^2; 3x^2 2]
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=false)
∂2 = differentiate(itp, 2)
@test_throws ArgumentError("initial_gradients must be provided for iterative derivative estimation. Consider using e.g. interpolate(tri, z; derivatives = true).") ∂2(0.5, 0.5; method=Iterative())
end
@testset "Test Float32" begin
rng = StableRNG(123)
xs = randn(rng, 100)
ys = randn(rng, 100)
tri1 = triangulate([Float32.(xs)'; Float32.(ys)']; rng)
tri2 = triangulate([xs'; ys']; rng)
zs = sin.(xs) .* cos.(ys)
itp1 = interpolate(tri1, Float32.(zs); derivatives=true)
itp2 = interpolate(tri2, zs; derivatives=true)
∂1 = differentiate(itp1, 2)
∂2 = differentiate(itp2, 2)
for ∂ in (∂1, ∂2)
@inferred ∂(0.5, 0.5; method=Iterative())
@inferred ∂(0.5, 0.5; method=Direct())
@inferred ∂(0.5, 0.5; method=Iterative(), project=false)
@inferred ∂(0.5, 0.5; method=Direct(), project=false)
@inferred ∂(0.5f0, 0.5f0; method=Iterative())
@inferred ∂(0.5f0, 0.5f0; method=Direct())
@inferred ∂(0.5f0, 0.5f0; method=Iterative(), project=false)
@inferred ∂(0.5f0, 0.5f0; method=Direct(), project=false)
@inferred ∂(0.5f0, 0.5; method=Iterative())
@inferred ∂(0.5f0, 0.5; method=Direct())
@inferred ∂(0.5f0, 0.5; method=Iterative(), project=false)
@inferred ∂(0.5f0, 0.5; method=Direct(), project=false)
@inferred ∂(0.5, 0.5f0; method=Iterative())
@inferred ∂(0.5, 0.5f0; method=Direct())
@inferred ∂(0.5, 0.5f0; method=Iterative(), project=false)
@inferred ∂(0.5, 0.5f0; method=Direct(), project=false)
end
let ∇H1 = ∂1(0.5f0, 0.5f0; method=Iterative()), ∇H2 = ∂2(0.5f0, 0.5f0; method=Iterative())
∇1, H1 = ∇H1
∇2, H2 = ∇H2
@test collect(∇1) ≈ collect(∇2)
@test collect(H1) ≈ collect(H2)
end
let ∇H1 = ∂1(0.5f0, 0.5f0; method=Direct()), ∇H2 = ∂2(0.5f0, 0.5f0; method=Direct())
∇1, H1 = ∇H1
∇2, H2 = ∇H2
@test collect(∇1) ≈ collect(∇2)
@test collect(H1) ≈ collect(H2)
end
let ∇H1 = ∂1(0.5f0, 0.5f0; method=Iterative(), project=false), ∇H2 = ∂2(0.5f0, 0.5f0; method=Iterative(), project=false)
∇1, H1 = ∇H1
∇2, H2 = ∇H2
@test collect(∇1) ≈ collect(∇2)
@test collect(H1) ≈ collect(H2)
end
let ∇H1 = ∂1(0.5f0, 0.5f0; method=Direct(), project=false), ∇H2 = ∂2(0.5f0, 0.5f0; method=Direct(), project=false)
∇1, H1 = ∇H1
∇2, H2 = ∇H2
@test collect(∇1) ≈ collect(∇2)
@test collect(H1) ≈ collect(H2)
end
∂1 = differentiate(itp1, 1)
∂2 = differentiate(itp2, 1)
for ∂ in (∂1, ∂2)
@inferred ∂(0.5, 0.5; method=Iterative())
@inferred ∂(0.5, 0.5; method=Direct())
@inferred ∂(0.5, 0.5; method=Iterative(), project=false)
@inferred ∂(0.5, 0.5; method=Direct(), project=false)
@inferred ∂(0.5f0, 0.5f0; method=Iterative())
@inferred ∂(0.5f0, 0.5f0; method=Direct())
@inferred ∂(0.5f0, 0.5f0; method=Iterative(), project=false)
@inferred ∂(0.5f0, 0.5f0; method=Direct(), project=false)
@inferred ∂(0.5f0, 0.5; method=Iterative())
@inferred ∂(0.5f0, 0.5; method=Direct())
@inferred ∂(0.5f0, 0.5; method=Iterative(), project=false)
@inferred ∂(0.5f0, 0.5; method=Direct(), project=false)
@inferred ∂(0.5, 0.5f0; method=Iterative())
@inferred ∂(0.5, 0.5f0; method=Direct())
@inferred ∂(0.5, 0.5f0; method=Iterative(), project=false)
@inferred ∂(0.5, 0.5f0; method=Direct(), project=false)
end
let ∇1 = ∂1(0.5f0, 0.5f0; method=Iterative()), ∇2 = ∂2(0.5f0, 0.5f0; method=Iterative())
@test collect(∇1) ≈ collect(∇2)
end
let ∇1 = ∂1(0.5f0, 0.5f0; method=Direct()), ∇2 = ∂2(0.5f0, 0.5f0; method=Direct())
@test collect(∇1) ≈ collect(∇2)
end
let ∇1 = ∂1(0.5f0, 0.5f0; method=Iterative(), project=false), ∇2 = ∂2(0.5f0, 0.5f0; method=Iterative(), project=false)
@test collect(∇1) ≈ collect(∇2)
end
let ∇1 = ∂1(0.5f0, 0.5f0; method=Direct(), project=false), ∇2 = ∂2(0.5f0, 0.5f0; method=Direct(), project=false)
@test collect(∇1) ≈ collect(∇2)
end
xrange = LinRange(-3, 3, 1000) .|> Float32
yrange = LinRange(-3, 3, 1000) .|> Float32
itp_xs = [xrange[i] for i in 1:length(xrange), j in 1:length(yrange)]
itp_ys = [yrange[j] for i in 1:length(xrange), j in 1:length(yrange)]
_itp_xs = vec(itp_xs)
_itp_ys = vec(itp_ys)
∂1 = differentiate(itp1, 2)
∂2 = differentiate(itp2, 2)
vals1 = ∂1(_itp_xs, _itp_ys; method=Iterative())
vals2 = ∂2(_itp_xs, _itp_ys; method=Iterative())
∇err = [norm(g1 .- g2) for (g1, g2) in zip(first.(vals1), first.(vals2))]
Herr = [norm(h1 .- h2) for (h1, h2) in zip(last.(vals1), last.(vals2))]
points = get_points(tri1)
ch = get_convex_hull_vertices(tri1)
bad_idx = identify_exterior_points(_itp_xs, _itp_ys, points, ch; tol=1e-2) # boundary effects _really_ matter...
deleteat!(∇err, bad_idx)
deleteat!(Herr, bad_idx)
@test norm(∇err) ≈ 0 atol = 1e-2
@test norm(Herr) ≈ 0 atol = 1e-1
∂1 = differentiate(itp1, 1)
∂2 = differentiate(itp2, 1)
vals1 = ∂1(_itp_xs, _itp_ys; method=Iterative())
vals2 = ∂2(_itp_xs, _itp_ys; method=Iterative())
∇err = [norm(g1 .- g2) for (g1, g2) in zip(first.(vals1), first.(vals2))]
points = get_points(tri1)
ch = get_convex_hull_vertices(tri1)
bad_idx = identify_exterior_points(_itp_xs, _itp_ys, points, ch; tol=1e-2) # boundary effects _really_ matter...
deleteat!(∇err, bad_idx)
@test norm(∇err) ≈ 0 atol = 1e-1
end
@testset "Check that nothing breaks with a small neighbourhood" begin
m = 3
pts = [(cos(θ) + 1e-6randn(), sin(θ) + 1e-6randn()) for θ = LinRange(0, 2π, (m + 1))][1:end-1]
tri = triangulate(pts)
f = (x, y) -> sin(x) * cos(y)
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
test_interpolant(itp, first.(pts), last.(pts), f)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2468 | using DelaunayTriangulation
using ..NaturalNeighbours
const NNI = NaturalNeighbours
const DT = DelaunayTriangulation
using ElasticArrays
@testset "DerivativeCache" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
derivative_cache = NNI.DerivativeCache(tri)
@test NNI.get_iterated_neighbourhood(derivative_cache) == derivative_cache.iterated_neighbourhood == Set{Int64}()
@test NNI.get_second_iterated_neighbourhood(derivative_cache) == derivative_cache.second_iterated_neighbourhood == Set{Int64}()
@test NNI.get_linear_matrix(derivative_cache) == derivative_cache.linear_matrix == ElasticMatrix{Float64}(undef, 2, 0)
@test NNI.get_quadratic_matrix(derivative_cache) == derivative_cache.quadratic_matrix == ElasticMatrix{Float64}(undef, 9, 0)
@test NNI.get_rhs_vector(derivative_cache) == derivative_cache.rhs_vector == Float64[]
@test NNI.get_quadratic_matrix_no_cubic(derivative_cache) == derivative_cache.quadratic_matrix_no_cubic == ElasticMatrix{Float64}(undef, 5, 0)
end
@testset "dwrap" begin
@test NNI.dwrap(NNI.Direct()) == NNI.Direct()
@test NNI.dwrap(:direct) == NNI.Direct()
@test_throws ArgumentError NNI.dwrap(:dir)
@test NNI.dwrap(NNI.Iterative()) == NNI.Iterative()
@test NNI.dwrap(:iterative) == NNI.Iterative()
@test_throws ArgumentError NNI.dwrap(:iter)
end
@testset "show" begin
tri = triangulate_rectangle(0, 1, 0, 1, 2, 5)
tri = Triangulation(tri.points, tri.triangles, tri.convex_hull.vertices)
f = (x, y) -> sin(x) + cos(x - y)
unlock_convex_hull!(tri)
x = getx.(tri.points)
y = gety.(tri.points)
z = f.(x, y)
∇ = z .^ 2
H = z .^ (1 / 5)
itp = interpolate(tri, z; hessian=∇, gradient=H)
∂ = differentiate(itp, 2)
@test sprint() do io
Base.show(io, MIME"text/plain"(), ∂)
end ==
"Natural Neighbour Differentiator\n Order: 2\n z: [1.0, 1.3817732906760363, 0.9689124217106447, 1.5731598536817173, 0.8775825618903728, 1.7190535466982693, 0.7316888688738209, 1.8103834065185413, 0.5403023058681398, 1.8414709848078965]\n ∇: [1.0, 1.0668106895787572, 0.9937036950756749, 1.094849869590562, 0.9742212470670031, 1.114443051978001, 0.9394318704459826, 1.1260407621773936, 0.8841528765017798, 1.1298817035265263]\n H: [1.0, 1.909297426825682, 0.9387912809451863, 2.474831925235882, 0.7701511529340699, 2.9551450964158987, 0.5353686008338515, 3.277488078597678, 0.2919265817264289, 3.391015387889364]"
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 4423 | using ..NaturalNeighbours
using DelaunayTriangulation
using StableRNGs
const NNI = NaturalNeighbours
using Random
const DT = DelaunayTriangulation
include(normpath(@__DIR__, "../.", "helper_functions", "point_generator.jl"))
@testset "taylor_neighbourhood" begin
rng = StableRNG(123)
for _ in 1:50
pts = [(rand(rng), rand(rng)) for _ in 1:50]
tri = triangulate(pts, rng=rng, delete_ghosts=false)
f = (x, y) -> sin(x * y) - cos(x - y) * exp(-(x - y)^2)
z = [f(x, y) for (x, y) in pts]
S = Set{Int64}()
S′ = Set{Int64}()
n_cache = NNI.NaturalNeighboursCache(tri)
i = 7
d = 1
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, i, d, n_cache)
@test all(isone, λ)
@test sort(collect(E)) == sort(collect(DT.iterated_neighbourhood(tri, i, 1)))
p = get_point(tri, i)
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, p, d, n_cache)
@test all(isone, λ)
@test λ == 1
@test sort(collect(E)) == sort(collect(DT.iterated_neighbourhood(tri, i, 1)))
p = random_points_in_convex_hull(tri, 1)[1]
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, p, d, NNI.NaturalNeighboursCache(tri); rng=StableRNG(881))
nc = NNI.compute_natural_coordinates(Sibson(), tri, p, NNI.NaturalNeighboursCache(tri); rng=StableRNG(881))
@test λ == nc.coordinates
@test E == nc.indices
p = random_points_in_convex_hull(tri, 1)[1]
for _ in 1:100 # test rng is passed correctly
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, p, d, NNI.NaturalNeighboursCache(tri); rng=StableRNG(125))
nc = NNI.compute_natural_coordinates(Sibson(), tri, p, NNI.NaturalNeighboursCache(tri); rng=StableRNG(125))
@test λ == nc.coordinates
@test E == nc.indices
end
d = 2
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, i, d, n_cache)
@test λ == 1
_S = DT.iterated_neighbourhood(tri, i, 2)
@test sort(collect(E)) == sort(collect(_S))
p = get_point(tri, i)
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, p, d, n_cache)
@test λ == 1
@test sort(collect(E)) == sort(collect(_S))
p = random_points_in_convex_hull(tri, 1)[1]
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, p, d, n_cache)
nc = NNI.compute_natural_coordinates(Sibson(), tri, p)
_S = [get_neighbours(tri, i) for i in nc.indices]
_S1 = copy(nc.indices)
push!(_S1, reduce(union, _S)...)
filter!(!DT.is_ghost_vertex, _S1)
unique!(_S1)
@test sort(E) == sort(_S1)
for i in eachindex(E)
if 1 ≤ i ≤ length(λ)
@test NNI.get_λ(λ, i, true) == λ[i]
@test NNI.get_λ(λ, i, false) == 1
else
@test NNI.get_λ(λ, i, true) == NNI.get_λ(λ, i, false) == 1
end
end
d = 3
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, i, d, n_cache)
@test λ == 1
_S = DT.iterated_neighbourhood(tri, i, 3)
@test sort(collect(E)) == sort(collect(_S))
p = get_point(tri, i)
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, p, d, n_cache)
@test λ == 1
@test sort(collect(E)) == sort(collect(_S))
p = random_points_in_convex_hull(tri, 1)[1]
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, p, d, n_cache)
nc = NNI.compute_natural_coordinates(Sibson(), tri, p)
_S = [get_neighbours(tri, i) for i in nc.indices]
_S1 = copy(nc.indices)
push!(_S1, reduce(union, _S)...)
filter!(!DT.is_ghost_vertex, _S1)
unique!(_S1)
_S2 = [get_neighbours(tri, i) for i in _S1]
push!(_S1, reduce(union, _S2)...)
filter!(!DT.is_ghost_vertex, _S1)
unique!(_S1)
@test sort(E) == sort(_S1)
for i in eachindex(E)
if 1 ≤ i ≤ length(λ)
@test NNI.get_λ(λ, i, true) == λ[i]
@test NNI.get_λ(λ, i, false) == 1
else
@test NNI.get_λ(λ, i, true) == NNI.get_λ(λ, i, false) == 1
end
end
end
end
@testset "get_λ" begin
@test NNI.get_λ([1.0, 2.0, 3.0], 2, true) == 2.0
@test NNI.get_λ([1.0, 2.0, 3.0, 4.0], 5, true) == 1.0
@test NNI.get_λ([2.3, 5.0], 1, false) == 1.0
@test NNI.get_λ(1.0, 3, true) == 1.0
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 12961 | using ..NaturalNeighbours
using DelaunayTriangulation
using CairoMakie
using ReferenceTests
using StableRNGs
using LinearAlgebra
f = (x, y) -> 0.75 * exp(-((9 * x - 2)^2 + (9 * y - 2)^2) / 4) + 0.75 * exp(-(9 * x + 1)^2 / 49 - (9 * y + 1) / 10) + 0.5 * exp(-((9 * x - 7)^2 + (9 * y - 3)^2) / 4) - 0.2 * exp(-(9 * x - 4)^2 - (9 * y - 7)^2)
f′ = (x, y) -> [(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * x - 72)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * x) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * x) / 2 - 63 / 2)) / 2 - (3 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10) * ((162 * x) / 49 + 18 / 49)) / 4
(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * y - 126)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * y) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * y) / 2 - 27 / 2)) / 2 - (27 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)) / 40]
f′′ = (x, y) -> [(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/98-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49)^2)/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)^2)/5 (27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5
(27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5 (243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/400+(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*y)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*y)/2-27/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*y-126)^2)/5]
function plot_f(fig, x, y, vals, title, i, show_3d=true, zlabel="z")
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = contourf!(ax, x, y, vals, colormap=:viridis, extendhigh=:auto)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=zlabel, width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
surface!(ax, x, y, vals, color=vals, colormap=:viridis)
end
return c
end
x = LinRange(0, 1, 100)
y = LinRange(0, 1, 100)
z = [f(x, y) for x in x, y in y]
∇ = [f′(x, y) for x in x, y in y]
∇₁ = first.(∇)
∇₂ = last.(∇)
H = [f′′(x, y) for x in x, y in y]
H₁₁ = getindex.(H, 1)
H₁₂ = getindex.(H, 2)
H₂₂ = getindex.(H, 4)
fig = Figure(fontsize=50)
plot_f(fig, x, y, z, "(a): f", 1, true, "z")
plot_f(fig, x, y, ∇₁, "(b): ∂f/∂x", 2, true, "∂f/∂x")
plot_f(fig, x, y, ∇₂, "(c): ∂f/∂y", 3, true, "∂f/∂y")
plot_f(fig, x, y, H₁₁, "(d): ∂²f/∂x²", 4, true, "∂²f/∂x²")
plot_f(fig, x, y, H₂₂, "(f): ∂²f/∂y²", 5, true, "∂²f/∂y²")
plot_f(fig, x, y, H₁₂, "(e): ∂²f/∂x∂y", 6, true, "∂²f/∂x∂y")
resize_to_layout!(fig)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "differentiation_exact_surfaces.png") fig
# The data set
rng = StableRNG(9199)
x = rand(rng, 500)
y = rand(rng, 500)
z = f.(x, y)
tri = triangulate([x'; y'])
vorn = voronoi(tri)
fig = Figure(fontsize=50, size=(1800, 600))
ax = Axis(fig[1, 1], xlabel="x", ylabel="y", width=600, height=600, title="(a): Data and triangulation", titlealign=:left)
scatter!(ax, x, y, color=:black, markersize=9)
triplot!(ax, tri, strokecolor=:black, strokewidth=2)
voronoiplot!(ax, vorn, strokecolor=:blue, color=(:white, 0.0))
xlims!(ax, 0, 1)
ylims!(ax, 0, 1)
ax = Axis3(fig[1, 2], xlabel="x", ylabel="y", zlabel="z", width=600, height=600, azimuth=0.25, title="(b): Function values", titlealign=:left)
triangles = [T[j] for T in each_solid_triangle(tri), j in 1:3]
surface!(ax, x, y, z)
scatter!(ax, x, y, z, color=:black, markersize=9)
colgap!(fig.layout, 1, 75)
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "example_data.png") fig
# Generating gradients at the data sites
function plot_f2(fig, x, y, vals, title, i, tri, levels, show_3d=true, zlabel="z")
triangles = [T[j] for T in each_solid_triangle(tri), j in 1:3]
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = tricontourf!(ax, x, y, vals, triangulation=triangles', colormap=:viridis, extendhigh=:auto, levels=levels)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=zlabel, width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
mesh!(ax, hcat(x, y, vals), triangles, color=vals, colormap=:viridis, colorrange=extrema(levels))
end
return c
end
function plot_gradients(∇g, tri, f′, x, y)
∇g1 = first.(∇g)
∇g2 = last.(∇g)
fig = Figure(fontsize=50, size=(2400, 600))
plot_f2(fig, x, y, ∇g1, "(a): ∂f̂/∂x", 1, tri, -3.5:0.5:3.0, true, "∂f̂/∂x")
plot_f2(fig, x, y, ∇g2, "(b): ∂f̂/∂y", 3, tri, -3.5:0.5:3.0, true, "∂f̂/∂y")
plot_f2(fig, x, y, getindex.(f′.(x, y), 1), "(c): ∂f/∂x", 2, tri, -3.5:0.5:3.0, true, "∂f/∂x")
plot_f2(fig, x, y, getindex.(f′.(x, y), 2), "(d): ∂f/∂y", 4, tri, -3.5:0.5:3.0, true, "∂f/∂y")
plot_f2(fig, x, y, norm.(collect.(∇g) .- f′.(x, y)), "(e): Gradient error", 5, tri, 0:0.1:0.5, true, "|∇ε|")
resize_to_layout!(fig)
ε = 100sqrt(sum((norm.(collect.(∇g) .- f′.(x, y))) .^ 2) / sum(norm.(∇g) .^ 2))
return fig, ε
end
points = [x'; y']
z = f.(x, y)
tri = triangulate(points)
∇g = generate_gradients(tri, z)
fig, ε = plot_gradients(∇g, tri, f′, x, y)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "gradient_data.png") fig
@test ε ≈ 10.2511800
∇gr, _ = generate_derivatives(tri, z; method=Direct())
fig, ε = plot_gradients(∇gr, tri, f′, x, y)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "joint_gradient_data.png") fig
@test ε ≈ 7.7177915977
# Hessians
to_mat(H::NTuple{3,Float64}) = [H[1] H[3]; H[3] H[2]]
function plot_hessians(H, tri, f′′, x, y)
H₁₁ = getindex.(H, 1)
H₁₂ = getindex.(H, 3)
H₂₂ = getindex.(H, 2)
fig = Figure(fontsize=50, size=(2400, 600))
plot_f2(fig, x, y, H₁₁, "(a): ∂²f̂/∂x²", 1, tri, -35:5:30, true, "∂²f̂/∂x²")
plot_f2(fig, x, y, H₂₂, "(c): ∂²f̂/∂y²", 3, tri, -35:5:30, true, "∂²f̂/∂y²")
plot_f2(fig, x, y, H₁₂, "(e): ∂²f̂/∂x∂y", 5, tri, -35:5:30, true, "∂²f̂/∂x∂y")
plot_f2(fig, x, y, getindex.(f′′.(x, y), 1), "(b): ∂²f/∂x²", 2, tri, -35:5:30, true, "∂²f/∂x²")
plot_f2(fig, x, y, getindex.(f′′.(x, y), 4), "(d): ∂²f/∂y²", 4, tri, -35:5:30, true, "∂²f/∂y²")
plot_f2(fig, x, y, getindex.(f′′.(x, y), 2), "(f): ∂²f/∂x∂y", 6, tri, -35:5:30, true, "∂²f/∂x∂y")
resize_to_layout!(fig)
ε = 100sqrt(sum((norm.(to_mat.(H) .- f′′.(x, y))) .^ 2) / sum(norm.(to_mat.(H)) .^ 2))
return fig, ε
end
_, Hg = generate_derivatives(tri, z)
fig, ε = plot_hessians(Hg, tri, f′′, x, y)
@test ε ≈ 42.085578794
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "hessian_data.png") fig
_, Hg = generate_derivatives(tri, z, use_cubic_terms=false)
fig, ε = plot_hessians(Hg, tri, f′′, x, y)
@test ε ≈ 35.20873081
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "hessian_data_no_cubic.png") fig
_, Hg = generate_derivatives(tri, z, method=Iterative()) # the gradients will be generated first automatically
fig, ε = plot_hessians(Hg, tri, f′′, x, y)
@test ε ≈ 39.584816
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "hessian_data_iterative.png") fig
# Away from the data sites
itp = interpolate(tri, z; derivatives=true, method=Direct(), use_cubic_terms=false)
∂ = differentiate(itp, 1)
xg = LinRange(0, 1, 500)
yg = LinRange(0, 1, 500)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
∇g = ∂(_x, _y; interpolant_method=Sibson(1))
function rrmserr(z, ẑ)
num = 0.0
den = 0.0
for (zᵢ, ẑᵢ) in zip(z, ẑ)
if all(isfinite, (zᵢ..., ẑᵢ...))
num += norm(zᵢ .- ẑᵢ)^2
den += norm(ẑᵢ)^2
end
end
# num /= length(ẑ)
return 100sqrt(num / den)
end
function plot_f2(fig, x, y, vals, title, i, levels, show_3d=true, zlabel="z")
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = contourf!(ax, x, y, vals, colormap=:viridis, extendhigh=:auto, levels=levels)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=zlabel, width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
surface!(ax, x, y, vals, color=vals, colormap=:viridis, colorrange=extrema(levels))
end
return c
end
function plot_gradients(∇g, f′, xg, yg)
∇g = reshape(∇g, (length(xg), length(yg)))
∇g1 = first.(∇g)
∇g2 = last.(∇g)
∇f = [f′(x, y) for x in xg, y in yg]
fig = Figure(fontsize=50, size=(2400, 600))
plot_f2(fig, xg, yg, ∇g1, "(a): ∂f̂/∂x", 1, -3.5:0.5:3.0, true, "∂f̂/∂x")
plot_f2(fig, xg, yg, ∇g2, "(b): ∂f̂/∂y", 3, -3.5:0.5:3.0, true, "∂f̂/∂y")
plot_f2(fig, xg, yg, first.(∇f), "(c): ∂f/∂x", 2, -3.5:0.5:3.0, true, "∂f/∂x")
plot_f2(fig, xg, yg, last.(∇f), "(d): ∂f/∂y", 4, -3.5:0.5:3.0, true, "∂f/∂y")
plot_f2(fig, xg, yg, norm.(collect.(∇g) .- ∇f), "(e): Gradient error", 5, 0:0.1:0.5, true, "|∇ε|")
resize_to_layout!(fig)
ε = rrmserr(∇f, collect.(∇g))
return fig, ε
end
fig, ε = plot_gradients(∇g, f′, xg, yg)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "gradient_surface.png") fig
@test ε ≈ 13.1857476
other_methods = [Sibson(), Laplace(), Nearest(), Triangle()]
∇gs = [∂(_x, _y; interpolant_method=method) for method in other_methods]
∇f = [f′(x, y) for x in xg, y in yg]
εs = [rrmserr(∇f, collect.(∇g)) for ∇g in ∇gs]
@test εs ≈ [28.6753, 25.499, 69.5744, 27.7737] rtol = 0.5
# Second order
function plot_hessians(H, f′′, xg, yg)
H = reshape(H, (length(xg), length(yg)))
H₁₁ = getindex.(H, 1)
H₁₂ = getindex.(H, 3)
H₂₂ = getindex.(H, 2)
Hf = [f′′(x, y) for x in xg, y in yg]
fig = Figure(fontsize=50, size=(2400, 600))
plot_f2(fig, xg, yg, H₁₁, "(a): ∂²f̂/∂x²", 1, -35:5:30, true, "∂²f̂/∂x²")
plot_f2(fig, xg, yg, H₂₂, "(c): ∂²f̂/∂y²", 3, -35:5:30, true, "∂²f̂/∂y²")
plot_f2(fig, xg, yg, H₁₂, "(e): ∂²f̂/∂x∂y", 5, -35:5:30, true, "∂²f̂/∂x∂y")
plot_f2(fig, xg, yg, getindex.(Hf, 1), "(b): ∂²f/∂x²", 2, -35:5:30, true, "∂²f/∂x²")
plot_f2(fig, xg, yg, getindex.(Hf, 4), "(d): ∂²f/∂y²", 4, -35:5:30, true, "∂²f/∂y²")
plot_f2(fig, xg, yg, getindex.(Hf, 2), "(f): ∂²f/∂x∂y", 6, -35:5:30, true, "∂²f/∂x∂y")
resize_to_layout!(fig)
ε = rrmserr(Hf, to_mat.(H))
return fig, ε
end
∂ = differentiate(itp, 2)
∇Hg = ∂(_x, _y; interpolant_method=Sibson(1), method=Iterative())
∇g = first.(∇Hg)
Hg = last.(∇Hg)
fig∇, ε∇ = plot_gradients(∇g, f′, xg, yg)
figH, εH = plot_hessians(Hg, f′′, xg, yg)
zlims!(figH.content[4], -25, 25)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "hessian_surface.png") figH by = psnr_equality(18)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "gradient_surface_2.png") fig∇ by = psnr_equality(18)
@test ε∇ ≈ 19.07546882353911 rtol = 1e-1
@test εH ≈ 51.1267212244942 rtol = 1e-1
∇Hg = ∂(_x, _y; interpolant_method=Sibson(1), method=Direct())
∇g = first.(∇Hg)
Hg = last.(∇Hg)
fig∇, ε∇ = plot_gradients(∇g, f′, xg, yg)
figH, εH = plot_hessians(Hg, f′′, xg, yg)
zlims!(figH.content[4], -25, 25)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "hessian_surface_direct.png") figH
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "gradient_surface_2_direct.png") fig∇
@test εH ≈ 46.7610990050276
@test ε∇ ≈ 9.853286514069882
function rrmserr(z, ẑ, ∂, x, y)
tri = ∂.interpolant.triangulation
num = 0.0
den = 0.0
points = get_points(tri)
ch = get_convex_hull_vertices(tri)
for (zᵢ, ẑᵢ, xᵢ, yᵢ) in zip(z, ẑ, x, y)
q = (xᵢ, yᵢ)
δ = DelaunayTriangulation.distance_to_polygon(q, points, ch)
if δ > 1e-1 && all(isfinite, (zᵢ..., ẑᵢ...))
num += norm(zᵢ .- ẑᵢ)^2
den += norm(ẑᵢ)^2
end
end
# num /= length(ẑ)
return 100sqrt(num / den)
end
εH_nohull = rrmserr(f′′.(_x, _y), to_mat.(Hg), ∂, _x, _y)
ε∇_nohull = rrmserr(f′.(_x, _y), ∇g, ∂, _x, _y)
@test ε∇_nohull ≈ 7.479964687673911
@test εH_nohull ≈ 38.884740966379056 | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 26861 | using ..NaturalNeighbours
using CairoMakie
using ReferenceTests
using StableRNGs
using DelaunayTriangulation
using StaticArrays
using LinearAlgebra
using DataFrames
using StatsBase
using BenchmarkTools
using AlgebraOfGraphics
const NNI = NaturalNeighbours
## Some methods and constants
const itp_methods = (
Sibson(0),
Triangle(),
Nearest(),
Laplace(),
Sibson(1),
Farin(1),
Hiyoshi(2)
)
const diff_methods = (
Direct(),
Iterative()
)
const itp_aliases = (:Sibson0, :Triangle, :Nearest, :Laplace, :Sibson1, :Farin, :Hiyoshi)
const diff_aliases = (:Direct, :Iterative)
const itp_alias_map = Dict(itp_methods .=> itp_aliases)
const diff_alias_map = Dict(diff_methods .=> diff_aliases)
const colors = Dict(itp_aliases .=> [:red, :blue, :green, :orange, :purple, :black, :brown])
const linestyles = Dict(diff_aliases .=> [:solid, :dashdotdot])
const line_elements = [
LineElement(color=color,
linewidth=22,
linestyle=:solid) for color in values(colors)
]
const style_elements = [
LineElement(color=:black,
linewidth=22,
linestyle=linestyle) for linestyle in values(linestyles)
]
const azimuths = [0.3, 0.8, 0.3, 0.6, 0.6, 0.6, 0.45]
rng = StableRNG(123)
xg = LinRange(0, 1, 25)
yg = LinRange(0, 1, 25)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
xg2 = LinRange(0, 1, 250)
yg2 = LinRange(0, 1, 250)
xq = vec([x for x in xg2, _ in yg2])
yq = vec([y for _ in xg2, y in yg2])
tol = 1e-2
tri = triangulate([x'; y']; rng=rng)
triq = triangulate([xq'; yq']; rng=rng)
exterior_idx = identify_exterior_points(xq, yq, get_points(tri), get_convex_hull_vertices(tri); tol=tol)
interior_idx = filter(∉(exterior_idx), eachindex(xq, yq))
## The test functions
const f = [
(x, y) -> 0.75 * exp(-((9 * x - 2)^2 + (9 * y - 2)^2) / 4) + 0.75 * exp(-(9 * x + 1)^2 / 49 - (9 * y + 1) / 10) + 0.5 * exp(-((9 * x - 7)^2 + (9 * y - 3)^2) / 4) - 0.2 * exp(-(9 * x - 4)^2 - (9 * y - 7)^2)
(x, y) -> (1 / 9) * (tanh(9 * y - 9 * x) + 1)
(x, y) -> (1.25 + cos(5.4 * y)) / (6 * (1 + (3 * x - 1)^2))
(x, y) -> (1 / 3) * exp(-(81 / 16) * ((x - 1 / 2)^2 + (y - 1 / 2)^2))
(x, y) -> (1 / 3) * exp(-(81 / 4) * ((x - 1 / 2)^2 + (y - 1 / 2)^2))
(x, y) -> (1 / 9) * (64 - 81 * ((x - 1 / 2)^2 + (y - 1 / 2)^2))^(1 / 2) - 1 / 2
(x, y) -> sin(27 * x * y) - exp(-(x - y)^2 / 4) * cos(13 * x - 13 * y)
]
const ∇f = [
(x, y) -> @SVector[(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * x - 72)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * x) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * x) / 2 - 63 / 2)) / 2 - (3 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10) * ((162 * x) / 49 + 18 / 49)) / 4
(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * y - 126)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * y) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * y) / 2 - 27 / 2)) / 2 - (27 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)) / 40]
(x, y) -> @SVector[tanh(9 * x - 9 * y)^2 - 1
1 - tanh(9 * x - 9 * y)^2]
(x, y) -> @SVector[-((108 * x - 36) * (cos((27 * y) / 5) + 5 / 4)) / (6 * (3 * x - 1)^2 + 6)^2
-(27 * sin((27 * y) / 5)) / (5 * (6 * (3 * x - 1)^2 + 6))]
(x, y) -> @SVector[-(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16) * ((81 * x) / 8 - 81 / 16)) / 3
-(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16) * ((81 * y) / 8 - 81 / 16)) / 3]
(x, y) -> @SVector[-(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4) * ((81 * x) / 2 - 81 / 4)) / 3
-(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4) * ((81 * y) / 2 - 81 / 4)) / 3]
(x, y) -> @SVector[-(162 * x - 81) / (18 * (64 - 81 * (y - 1 / 2)^2 - 81 * (x - 1 / 2)^2)^(1 / 2))
-(162 * y - 81) / (18 * (64 - 81 * (y - 1 / 2)^2 - 81 * (x - 1 / 2)^2)^(1 / 2))]
(x, y) -> @SVector[27 * y * cos(27 * x * y) + 13 * exp(-(x - y)^2 / 4) * sin(13 * x - 13 * y) + exp(-(x - y)^2 / 4) * cos(13 * x - 13 * y) * (x / 2 - y / 2)
27 * x * cos(27 * x * y) - 13 * exp(-(x - y)^2 / 4) * sin(13 * x - 13 * y) - exp(-(x - y)^2 / 4) * cos(13 * x - 13 * y) * (x / 2 - y / 2)]
]
const Hf = [
(x, y) -> @SMatrix[(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/98-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49)^2)/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)^2)/5 (27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5
(27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5 (243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/400+(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*y)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*y)/2-27/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*y-126)^2)/5]
(x, y) -> @SMatrix[-2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9) 2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9)
2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9) -2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9)]
(x, y) -> @SMatrix[(2*(108*x-36)^2*(cos((27 * y) / 5)+5/4))/(6*(3*x-1)^2+6)^3-(108*(cos((27 * y) / 5)+5/4))/(6*(3*x-1)^2+6)^2 (27*sin((27 * y) / 5)*(108*x-36))/(5*(6*(3*x-1)^2+6)^2)
(27*sin((27 * y) / 5)*(108*x-36))/(5*(6*(3*x-1)^2+6)^2) -(729 * cos((27 * y) / 5))/(25*(6*(3*x-1)^2+6))]
(x, y) -> @SMatrix[(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16))/8 (exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)*((81*y)/8-81/16))/3
(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)*((81*y)/8-81/16))/3 (exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*y)/8-81/16)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16))/8]
(x, y) -> @SMatrix[(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4))/2 (exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)*((81*y)/2-81/4))/3
(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)*((81*y)/2-81/4))/3 (exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*y)/2-81/4)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4))/2]
(x, y) -> @SMatrix[-(162 * x - 81)^2/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))-9/(64-81*(y-1/2)^2-81*(x-1/2)^2)^(1/2) -((162 * x - 81) * (162 * y - 81))/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))
-((162 * x - 81) * (162 * y - 81))/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2)) -(162 * y - 81)^2/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))-9/(64-81*(y-1/2)^2-81*(x-1/2)^2)^(1/2)]
(x, y) -> @SMatrix[(339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*y^2*sin(27 * x * y)-exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2-26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2) 27*cos(27 * x * y)-(339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*x*y*sin(27 * x * y)+exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2+26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2)
27*cos(27 * x * y)-(339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*x*y*sin(27 * x * y)+exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2+26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2) (339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*x^2*sin(27 * x * y)-exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2-26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2)]
]
## Functions for assessing qualities
function normal_to_triangle(p₁, p₂, p₃, z₁, z₂, z₃)
x₁, y₁ = getxy(p₁)
x₂, y₂ = getxy(p₂)
x₃, y₃ = getxy(p₃)
Δ = x₁ * y₂ - x₂ * y₁ - x₁ * y₃ + x₃ * y₁ + x₂ * y₃ - x₃ * y₂
s₁ = (y₂ - y₃) / Δ
s₂ = (y₃ - y₁) / Δ
s₃ = (y₁ - y₂) / Δ
s₄ = (x₃ - x₂) / Δ
s₅ = (x₁ - x₃) / Δ
s₆ = (x₂ - x₁) / Δ
α = s₁ * z₁ + s₂ * z₂ + s₃ * z₃
β = s₄ * z₁ + s₅ * z₂ + s₆ * z₃
∇norm = sqrt(1 + α^2 + β^2)
∇ = @SVector[-α, -β, 1.0]
return ∇ / ∇norm
end
function normal_to_triangle(tri, z, i, j, k)
p₁, p₂, p₃ = get_point(tri, i, j, k)
z₁, z₂, z₃ = z[i], z[j], z[k]
return normal_to_triangle(p₁, p₂, p₃, z₁, z₂, z₃)
end
function ∠(v₁, v₂)
# acos is not reliable: https://people.eecs.berkeley.edu/~wkahan/Triangle.pdf, https://scicomp.stackexchange.com/a/27694/42528
a = norm(v₁)
b = norm(v₂)
c = norm(v₁ - v₂)
if a < b
a, b = b, a
end
μ = if b ≥ c
c - (a - b)
else
b - (a - c)
end
num = ((a - b) + c) * μ
den = (a + (b + c)) * ((a - c) + b)
θ = 2atan(sqrt(num / den))
return θ
end
function ∠(tri, z, i, j, k)
# Angle between pᵢpⱼ and pᵢpₖ
p₁, p₂, p₃ = get_point(tri, i, j, k)
z₁, z₂, z₃ = z[i], z[j], z[k]
px, py = getxy(p₁)
qx, qy = getxy(p₂)
rx, ry = getxy(p₃)
v₁ = @SVector[qx - px, qy - py, z₂ - z₁]
v₂ = @SVector[rx - px, ry - py, z₃ - z₁]
return ∠(v₁, v₂)
end
function average_normal_vector(tri, z, i)
# Using the mean-weighted-angle formula: https://doi.org/10.1007/s00371-004-0271-1
n = @SVector[0.0, 0.0, 0.0]
neighbouring_edges = get_adjacent2vertex(tri, i)
for (j, k) in neighbouring_edges
if !DelaunayTriangulation.is_ghost_triangle(i, j, k)
ψ = ∠(tri, z, i, j, k)
n = n + ψ * normal_to_triangle(tri, z, i, j, k)
end
end
return n / norm(n)
end
function compare_normal_vectors(tri, z, i, ∇f::Function)
# Maybe this is similar to https://doi.org/10.1007/978-3-319-40548-3_19?
# The description is so vague.
p = get_point(tri, i)
x, y = getxy(p)
n̄̂ = average_normal_vector(tri, z, i)
nx, ny = ∇f(x, y)
n = @SVector[-nx, -ny, 1.0]
n̂ = n / norm(n)
return rad2deg(∠(n̄̂, n̂))
end
function compare_normal_vectors(tri, z, ∇f::Function, interior_idx)
return [compare_normal_vectors(tri, z, i, ∇f) for i in interior_idx]
end
function compare_quantities(ŷ, y, interior_idx)
ε = 2norm.(ŷ .- y) ./ norm.(ŷ .+ y)
return to_unit(ε[interior_idx])
end
function to_unit(μ)
return max.(μ, sqrt(eps(Float64)))
end
to_mat(H) = @SMatrix[H[1] H[3]; H[3] H[2]]
## The analysis function
function analysis_function!(df, tri, triq, x, y, xq, yq, fidx, itp_method, diff_method, interior_idx)
g = f[fidx]
∇g = ∇f[fidx]
Hg = Hf[fidx]
z = g.(x, y)
itp = interpolate(tri, z; derivatives=true, method=diff_method)
∂ = differentiate(itp, 2)
ẑ = itp(xq, yq; method=itp_method)
∇̂Ĥ = ∂(xq, yq; method=diff_method, interpolant_method=itp_method)
∇̂ = SVector{2,Float64}.(first.(∇̂Ĥ))
Ĥ = to_mat.(last.(∇̂Ĥ))
z = g.(xq, yq)
∇ = ∇g.(xq, yq)
H = Hg.(xq, yq)
εz = compare_quantities(ẑ, z, interior_idx)
ε∇ = compare_quantities(∇̂, ∇, interior_idx)
εH = compare_quantities(Ĥ, H, interior_idx)
εn = compare_normal_vectors(triq, ẑ, ∇g, interior_idx)
_df = DataFrame(
:z_exact => z[interior_idx],
:z_approx => ẑ[interior_idx],
:∇_exact => ∇[interior_idx],
:∇_approx => ∇̂[interior_idx],
:H_exact => H[interior_idx],
:H_approx => Ĥ[interior_idx],
:z_error => εz,
:∇_error => ε∇,
:H_error => εH,
:n_error => εn,
:itp_method => itp_alias_map[itp_method],
:diff_method => diff_alias_map[diff_method],
:f_idx => fidx
)
append!(df, _df)
return df
end
function analysis_function(tri, triq, x, y, xq, yq, interior_idx)
df = DataFrame(
z_exact=Float64[],
z_approx=Float64[],
∇_exact=SVector{2,Float64}[],
∇_approx=SVector{2,Float64}[],
H_exact=SMatrix{2,2,Float64}[],
H_approx=SMatrix{2,2,Float64}[],
z_error=Float64[],
∇_error=Float64[],
H_error=Float64[],
n_error=Float64[],
itp_method=Symbol[],
diff_method=Symbol[],
f_idx=Int[]
)
for fidx in eachindex(f, ∇f, Hf)
for itp_method in itp_methods
for diff_method in diff_methods
analysis_function!(df, tri, triq, x, y, xq, yq, fidx, itp_method, diff_method, interior_idx)
end
end
end
return df
end
## Complete analysis
df = analysis_function(tri, triq, x, y, xq, yq, interior_idx)
gdf = groupby(df, [:f_idx, :itp_method, :diff_method])
## Plot the results
const alph = join('a':'z')
fig = Figure(fontsize=64)
z_ax = [Axis(fig[i, 1], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])1): $z$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400, xscale=log10) for i in eachindex(f, ∇f, Hf)]
∇_ax = [Axis(fig[i, 2], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])2): $\nabla$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400, xscale=log10) for i in eachindex(f, ∇f, Hf)]
H_ax = [Axis(fig[i, 3], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])3): $H$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400, xscale=log10) for i in eachindex(f, ∇f, Hf)]
n_ax = [Axis(fig[i, 4], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])4): $n$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400) for i in eachindex(f, ∇f, Hf)]
f_ax = [Axis3(fig[i, 5], xlabel=L"x", ylabel=L"y", zlabel=L"f_{%$i}(x, y)",
title=L"(%$(alph[i])5): $f_{%$i}$'s surface", titlealign=:left,
width=600, height=400, azimuth=azimuths[i]) for i in eachindex(f, ∇f, Hf)]
xℓ = [
(1e-5, 1.0) (1e-3, 1.0) (1e-2, 1.0) (0.0, 5.0)
(1e-5, 1.0) (1e-2, 1.0) (1e-1, 1.0) (0.0, 5.0)
(1e-6, 1e-1) (1e-5, 1.0) (1e-2, 1.0) (0.0, 2.0)
(1e-6, 1e-1) (1e-4, 1e-1) (1e-2, 1.0) (0.0, 1.0)
(1e-5, 1e-1) (1e-3, 1.0) (1e-2, 1.0) (0.0, 2.0)
(1e-8, 1e-1) (1e-5, 1e-1) (1e-2, 1e-1) (0.0, 0.5)
(1e-2, 1.0) (1e-2, 1.0) (1e-1, 1.0) (0.0, 15.0)
]
for i in eachindex(f)
xlims!(z_ax[i], xℓ[i, 1]...)
xlims!(∇_ax[i], xℓ[i, 2]...)
xlims!(H_ax[i], xℓ[i, 3]...)
xlims!(n_ax[i], xℓ[i, 4]...)
end
for (f_idx, itp_alias, diff_alias) in keys(gdf)
_df = gdf[(f_idx, itp_alias, diff_alias)]
clr = colors[itp_alias]
ls = linestyles[diff_alias]
_z_ax = z_ax[f_idx]
_∇_ax = ∇_ax[f_idx]
_H_ax = H_ax[f_idx]
_n_ax = n_ax[f_idx]
z_error = _df.z_error
∇_error = _df.∇_error
H_error = _df.H_error
n_error = _df.n_error
ecdfplot!(_z_ax, z_error, color=clr, linestyle=ls, linewidth=7)
ecdfplot!(_∇_ax, ∇_error, color=clr, linestyle=ls, linewidth=7)
ecdfplot!(_H_ax, H_error, color=clr, linestyle=ls, linewidth=7)
if itp_alias ≠ :Nearest
ecdfplot!(_n_ax, n_error, color=clr, linestyle=ls, linewidth=7)
end
end
for f_idx in eachindex(f)
g = f[f_idx]
fz = [g(x, y) for x in xg2, y in yg2]
_f_ax = f_ax[f_idx]
surface!(_f_ax, xg2, yg2, fz)
end
[Legend(
fig[i:(i+1), 6],
[line_elements, style_elements],
[string.(keys(colors)), string.(keys(linestyles))],
["Interpolant", "Differentiator"],
titlesize=78,
labelsize=78,
patchsize=(100, 30)
) for i in (1, 3, 5)]
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "interpolant_comparison.png") fig
## Visual comparisons
considered_itp = eachindex(itp_methods)
considered_fidx = eachindex(f)
fig = Figure(fontsize=72, size=(4800, 4900))
ax = [
Axis3(fig[i, j],
xlabel=L"x",
ylabel=L"y",
zlabel=L"f(x, y)",
title=L"(%$(alph[i])%$(j)): ($f_{%$i}$, %$(itp_aliases[j]))",
titlealign=:left,
width=600,
height=600,
azimuth=azimuths[i]
)
for i in considered_fidx, j in considered_itp
]
for (j, i) in enumerate(considered_fidx)
for (ℓ, k) in enumerate(considered_itp)
_gdf = gdf[(i, itp_aliases[k], diff_aliases[1])]
_ax = ax[j, ℓ]
_z = _gdf.z_approx
surface!(_ax, xq[interior_idx], yq[interior_idx], _z)
xlims!(_ax, 0, 1)
ylims!(_ax, 0, 1)
hidedecorations!(_ax)
end
end
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "3d_visual_interpolant_comparison.png") fig
function plot_errors(considered_fidx, considered_itp, gdf, interior_idx, error_type, colorranges)
fig = Figure(fontsize=72)
ax = [
Axis(fig[i, j],
xlabel=L"x",
ylabel=L"y",
title=L"(%$(alph[i])%$(j)): ($f_{%$i}$, %$(itp_aliases[j]))",
titlealign=:left,
width=600,
height=600,
)
for i in considered_fidx, j in considered_itp
]
for (j, i) in enumerate(considered_fidx)
for (ℓ, k) in enumerate(considered_itp)
_gdf = gdf[(i, itp_aliases[k], diff_aliases[1])]
_ax = ax[j, ℓ]
ε = _gdf[!, error_type]
heatmap!(_ax, xq[interior_idx], yq[interior_idx], ε, colorrange=colorranges[j])
xlims!(_ax, 0, 1)
ylims!(_ax, 0, 1)
hidedecorations!(_ax)
end
end
resize_to_layout!(fig)
fig
end
z_colorranges = [(1e-4, 0.01), (1e-5, 0.1), (1e-2, 0.02), (1e-3, 0.005), (1e-2, 0.05), (1e-3, 0.005), (1e-2, 0.5)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :z_error, z_colorranges)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "2d_visual_interpolant_comparison_z_error.png") fig
∇_colorranges = [(1e-2, 0.2), (1e-2, 1.0), (1e-2, 0.01), (1e-3, 0.01), (1e-2, 0.05), (1e-3, 0.01), (1e-2, 0.25)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :∇_error, ∇_colorranges)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "2d_visual_interpolant_comparison_grad_error.png") fig
H_colorranges = [(1e-1, 0.5), (0.2, 0.8), (1e-1, 0.2), (1e-2, 0.2), (1e-1, 0.25), (1e-2, 0.1), (1e-1, 0.8)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :H_error, H_colorranges)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "2d_visual_interpolant_comparison_hess_error.png") fig
n_colorranges = [(0, 5), (0, 5), (0, 2), (0, 1), (0, 2), (0, 2.5), (0, 15)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :n_error, n_colorranges)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "2d_visual_interpolant_comparison_normal_error.png") fig
## Random analysis
if get(ENV, "CI", "false") == "false"
function rrmse(y, ŷ) # interior_indices already applied
num = 0.0
den = 0.0
for (yᵢ, ŷᵢ) in zip(y, ŷ)
if all(isfinite, (yᵢ..., ŷᵢ...))
num += norm(yᵢ .- ŷᵢ)^2
den += norm(ŷᵢ)^2
end
end
return 100sqrt(num / den)
end
function median_edge_length(tri)
lengths = zeros(DelaunayTriangulation.num_solid_edges(tri))
for (k, (i, j)) in (enumerate ∘ each_solid_edge)(tri)
p, q = get_point(tri, i, j)
px, py = getxy(p)
qx, qy = getxy(q)
ℓ = sqrt((qx - px)^2 + (qy - py)^2)
lengths[k] = ℓ
end
return median(lengths)
end
function random_analysis_function(nsamples, triq, xq, yq, tol, rng)
npoints = rand(rng, 50:2500)
xs = [rand(rng, 50) for _ in 1:nsamples]
ys = [rand(rng, 50) for _ in 1:nsamples]
tris = [triangulate(tuple.(x, y); rng) for (x, y) in zip(xs, ys)]
[refine!(tri; max_points=npoints) for tri in tris]
xs = [first.(get_points(tri)) for tri in tris]
ys = [last.(get_points(tri)) for tri in tris]
exterior_idxs = [identify_exterior_points(xq, yq, get_points(tri), get_convex_hull_vertices(tri); tol=tol) for tri in tris]
interior_idxs = [filter(∉(exterior_idx), eachindex(xq, yq)) for exterior_idx in exterior_idxs]
median_lengths = [median_edge_length(tri) for tri in tris]
sortidx = sortperm(median_lengths)
[permute!(obj, sortidx) for obj in (xs, ys, tris, exterior_idxs, interior_idxs, median_lengths)]
dfs = Channel{DataFrame}(nsamples)
Base.Threads.@threads for i in 1:nsamples
tri = tris[i]
x = xs[i]
y = ys[i]
interior_idx = interior_idxs[i]
put!(dfs, analysis_function(tri, triq, x, y, xq, yq, interior_idx))
println("Processed simulation $i.")
end
close(dfs)
dfs = collect(dfs)
df = DataFrame(
f_idx=Int64[],
itp_method=Symbol[],
diff_method=Symbol[],
z_rrmse=Float64[],
∇_rrmse=Float64[],
H_rrmse=Float64[],
n_error_median=Float64[],
median_edge_length=Float64[]
)
for (i, _df) in enumerate(dfs)
_gdf = groupby(_df, [:f_idx, :itp_method, :diff_method])
_cgdf = combine(_gdf,
[:z_exact, :z_approx] => ((z_exact, z_approx) -> rrmse(z_exact, z_approx)) => :z_rrmse,
[:∇_exact, :∇_approx] => ((∇_exact, ∇_approx) -> rrmse(∇_exact, ∇_approx)) => :∇_rrmse,
[:H_exact, :H_approx] => ((H_exact, H_approx) -> rrmse(H_exact, H_approx)) => :H_rrmse,
:n_error => median => :n_error_median)
_cgdf[!, :median_edge_length] .= median_lengths[i]
append!(df, _cgdf)
end
_gdf = groupby(df, [:f_idx, :itp_method, :diff_method])
return _gdf
end
nsamples = 50
rng = StableRNG(998881)
tol = 1e-1
random_results = random_analysis_function(nsamples, triq, xq, yq, tol, rng)
fig = Figure(fontsize=64)
z_ax = [Axis(fig[i, 1], xlabel=L"$ $Median edge length", ylabel=L"$z$ error",
title=L"(%$(alph[i])1): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
∇_ax = [Axis(fig[i, 2], xlabel=L"$ $Median edge length", ylabel=L"$\nabla$ error",
title=L"(%$(alph[i])2): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
H_ax = [Axis(fig[i, 3], xlabel=L"$ $Median edge length", ylabel=L"$H$ error",
title=L"(%$(alph[i])3): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
n_ax = [Axis(fig[i, 4], xlabel=L"$ $Median edge length", ylabel=L"$n$ error",
title=L"(%$(alph[i])4): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
for (f_idx, itp_alias, diff_alias) in keys(random_results)
_df = random_results[(f_idx, itp_alias, diff_alias)]
_df = filter(:itp_method => !=(:Nearest), _df)
clr = colors[itp_alias]
ls = linestyles[diff_alias]
_z_ax = z_ax[f_idx]
_∇_ax = ∇_ax[f_idx]
_H_ax = H_ax[f_idx]
_n_ax = n_ax[f_idx]
x = _df.median_edge_length
z_error = _df.z_rrmse
∇_error = _df.∇_rrmse
H_error = _df.H_rrmse
n_error = _df.n_error_median
lines!(_z_ax, x, z_error, color=clr, linestyle=ls, linewidth=7)
lines!(_∇_ax, x, ∇_error, color=clr, linestyle=ls, linewidth=7)
lines!(_H_ax, x, H_error, color=clr, linestyle=ls, linewidth=7)
lines!(_n_ax, x, n_error, color=clr, linestyle=ls, linewidth=7)
end
[Legend(
fig[i:(i+1), 6],
[line_elements, style_elements],
[string.(keys(colors)), string.(keys(linestyles))],
["Interpolant", "Differentiator"],
titlesize=78,
labelsize=78,
patchsize=(100, 30)
) for i in (1, 3, 5)]
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "median_edge_length_comparisons.png") fig by = psnr_equality(10)
end
## Computation times
if get(ENV, "CI", "false") == "false"
function circular_example(m) # extra points are added outside of the circular barrier for derivative generation
pts = [(cos(θ) + 1e-6randn(), sin(θ) + 1e-6randn()) for θ = LinRange(0, 2π, (m + 1))][1:end-1] # avoid cocircular points
extra_pts = NTuple{2,Float64}[]
while length(extra_pts) < 50
p = (5randn(), 5randn())
if norm(p) > 1.01
push!(extra_pts, p)
end
end
append!(pts, extra_pts)
tri = triangulate(pts)
return tri
end
function running_time_analysis(itp_method, m_range, g)
running_times = zeros(length(m_range))
for (i, m) in enumerate(m_range)
tri = circular_example(m)
z = [g(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
b = @benchmark $itp($0.0, $0.0; method=$itp_method)
running_times[i] = minimum(b.times) / 1e6 # ms
end
return DataFrame(
running_times=running_times,
method=itp_alias_map[itp_method],
m=m_range
)
end
function running_time_analysis(m_range, g)
df = DataFrame(
running_times=Float64[],
method=Symbol[],
m=Int[]
)
for itp_method in itp_methods
_running_times = running_time_analysis(itp_method, m_range, g)
append!(df, _running_times)
end
return df
end
m_range = 3:20
g = f[end]
running_times = running_time_analysis(m_range, g)
fig = data(running_times) *
mapping(:m, :running_times) *
mapping(color=:method) *
visual(Scatter, markersize=14) |> plt ->
draw(plt; axis=(width=600, height=400, yscale=log10, xlabel=L"$ $Number of natural neighbours", ylabel=L"$t$ (ms)"))
vlines!(fig.figure[1, 1], [6], linewidth=3, linestyle=:dash, color=:black)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "method_benchmarks.png") fig by = psnr_equality(10)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2894 | using ..NaturalNeighbours
using DelaunayTriangulation
using CairoMakie
using ReferenceTests
using StableRNGs
include(normpath(@__DIR__, "../.", "helper_functions", "test_functions.jl"))
f = (x, y) -> (1 / 9) * (tanh(9 * y - 9 * x) + 1)
rng = StableRNG(123)
x = rand(rng, 100)
y = rand(rng, 100)
z = f.(x, y)
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(0, 1, 100)
yg = LinRange(0, 1, 100)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
sib_vals = itp(_x, _y)
sib1_vals = itp(_x, _y; method=Sibson(1))
function plot_itp(fig, x, y, vals, title, i, show_data_sites, itp, xd=nothing, yd=nothing, show_3d=true, levels=-0.1:0.05:0.3)
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = contourf!(ax, x, y, vals, colormap=:viridis, levels=levels, extendhigh=:auto)
show_data_sites && scatter!(ax, xd, yd, color=:red, markersize=9)
tri = itp.triangulation
ch_idx = get_convex_hull_vertices(tri)
lines!(ax, [get_point(tri, i) for i in ch_idx], color=:white, linewidth=4)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=L"z", width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
surface!(ax, x, y, vals, color=vals, colormap=:viridis, colorrange=extrema(levels))
zlims!(ax, 0, 0.25)
end
return c
end
fig = Figure(fontsize=36)
plot_itp(fig, _x, _y, sib_vals, "(a): Sibson", 1, false, itp, x, y)
plot_itp(fig, _x, _y, sib1_vals, "(b): Sibson-1", 2, false, itp, x, y)
plot_itp(fig, _x, _y, f.(_x, _y), "(c): Exact", 3, true, itp, x, y)
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "sibson_vs_sibson1.png") fig
sib_vals = itp(_x, _y, project=false)
sib1_vals = itp(_x, _y; method=Sibson(1), project=false)
fig = Figure(fontsize=36)
plot_itp(fig, _x, _y, sib_vals, "(a): Sibson", 1, false, itp, x, y)
plot_itp(fig, _x, _y, sib1_vals, "(b): Sibson-1", 2, false, itp, x, y)
plot_itp(fig, _x, _y, f.(_x, _y), "(c): Exact", 3, true, itp, x, y)
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "sibson_vs_sibson1_project_false.png") fig
sib_vals = itp(_x, _y)
sib1_vals = itp(_x, _y; method=Sibson(1))
sib_errs = @. 100abs(sib_vals - f(_x, _y))
sib1_errs = @. 100abs(sib1_vals - f(_x, _y))
fig = Figure(fontsize=36)
plot_itp(fig, _x, _y, sib_errs, "(a): Sibson", 1, true, itp, x, y, false, 0:0.5:3)
c = plot_itp(fig, _x, _y, sib1_errs, "(b): Sibson-1", 2, true, itp, x, y, false, 0:0.5:3)
Colorbar(fig[1, 3], c)
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "sibson_vs_sibson1_errors.png") fig by=psnr_equality(20)
esib0 = 100sqrt(sum((sib_vals .- f.(_x, _y)).^2) / sum(sib_vals.^2))
esib1 = 100sqrt(sum((sib1_vals .- f.(_x, _y)).^2) / sum(sib_vals.^2))
@test esib1 < esib0 | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 6483 | using ..NaturalNeighbours
using DelaunayTriangulation
using CairoMakie
using ReferenceTests
using StableRNGs
include(normpath(@__DIR__, "../.", "helper_functions", "test_functions.jl"))
## Example of a tessellation
points = [
(0.0, 0.0), (-1.0, 1.0), (-0.5, 1.0), (0.0, 1.0), (0.5, 1.0), (1.0, 1.0),
(1.0, 0.8), (1.0, 0.0), (1.0, -0.5), (1.0, -1.0),
(0.1, -1.0), (-0.8, -1.0), (-1.0, -1.0),
(-1.0, -0.7), (-1.0, -0.1), (-1.0, 0.6),
(-0.1, -0.8), (0.2, -0.8),
(-0.6, -0.4), (0.9, 0.0), (-0.5, 0.5), (-0.4, 0.6), (-0.1, 0.8)
]
tri = triangulate(points)
vorn = voronoi(tri)
fig, ax, sc = voronoiplot(vorn, axis=(width=400, height=400), markercolor=:red)
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "example_tessellation.png") fig
## Nearest neighbour interpolant
f, f′, f′′ = test_4()
x1, y1 = point_set_1()
z1 = f.(x1, y1)
xg = LinRange(0, 1, 50)
yg = LinRange(0, 1, 50)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
ze = f.(x, y) |> x -> reshape(x, length(xg), length(yg))
itp = interpolate(x1, y1, z1)
vals = itp(x, y, method=:nearest) |> x -> reshape(x, length(xg), length(yg))
fig = Figure(fontsize=36, size=(1700, 600))
ax = Axis3(fig[1, 1], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(a): $f^{\text{NEAR}}$", titlealign=:left)
surface!(ax, xg, yg, vals)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
ax = Axis3(fig[1, 2], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(b): $f$", titlealign=:left)
surface!(ax, xg, yg, ze)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "fnear_example.png") fig
## Laplace interpolation
rng = StableRNG(872973)
pts = [(0.0, 8.0), (0.0, 0.0), (14.0, 0.0),
(14.0, 8.0), (4.0, 4.0), (10.0, 6.0),
(6.0, 2.0), (12.0, 4.0), (0.0, 4.0),
(2.5, 5.0), (7.0, 3.3), (4.5, 5.2),
(13.0, 0.5), (12.0, 6.0), (8.5, 3.5),
(0.5, 6.0), (1.5, 6.0), (3.5, 6.0),
(0.5, 2.0), (2.5, 2.0), (2.5, 2.5),
(9.0, 2.0), (8.5, 6.0), (4.0, 2.0)]
tri = triangulate(pts, randomise=false, delete_ghosts=false, rng=rng)
vorn = voronoi(tri)
q = (5.0, 4.0)
tri2 = deepcopy(tri)
add_point!(tri2, q, rng=rng)
vorn2 = voronoi(tri2)
V = get_polygon(vorn2, DelaunayTriangulation.num_points(tri2))
AX2 = get_area(vorn2, DelaunayTriangulation.num_points(tri2))
fig, ax, sc = voronoiplot(vorn, axis=(width=400, height=400), markercolor=:red, markersize=7, color=:white)
xlims!(ax, 3, 9)
ylims!(ax, 1.5, 7)
Vcoords = [get_polygon_point(vorn2, i) for i in V]
poly!(ax, Vcoords, color=(:blue, 0.2), strokewidth=2, strokecolor=:blue)
scatter!(ax, [q], color=:magenta, markersize=14)
resize_to_layout!(fig)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "new_tile.png") fig
## Laplace interpolant
f, f′, f′′ = test_4()
x1, y1 = point_set_1()
z1 = f.(x1, y1)
xg = LinRange(0, 1, 50)
yg = LinRange(0, 1, 50)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
ze = f.(x, y) |> x -> reshape(x, length(xg), length(yg))
itp = interpolate(x1, y1, z1)
vals = itp(x, y, method=Laplace()) |> x -> reshape(x, length(xg), length(yg))
fig = Figure(fontsize=36, size=(1700, 600))
ax = Axis3(fig[1, 1], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(a): $f^{\text{LAP}}$", titlealign=:left)
surface!(ax, xg, yg, vals)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
ax = Axis3(fig[1, 2], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(b): $f$", titlealign=:left)
surface!(ax, xg, yg, ze)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "flap_example.png") fig
## Sibson interpolant
f, f′, f′′ = test_4()
x1, y1 = point_set_1()
z1 = f.(x1, y1)
xg = LinRange(0, 1, 50)
yg = LinRange(0, 1, 50)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
ze = f.(x, y) |> x -> reshape(x, length(xg), length(yg))
itp = interpolate(x1, y1, z1)
vals = itp(x, y, method=Sibson()) |> x -> reshape(x, length(xg), length(yg))
fig = Figure(fontsize=36, size=(1700, 600))
ax = Axis3(fig[1, 1], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(a): $f^{\text{SIB}0}$", titlealign=:left)
surface!(ax, xg, yg, vals)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
ax = Axis3(fig[1, 2], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(b): $f$", titlealign=:left)
surface!(ax, xg, yg, ze)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "fsib0_example.png") fig
## Sibson-1 interpolant
f, f′, f′′ = test_4()
x1, y1 = point_set_1()
z1 = f.(x1, y1)
xg = LinRange(0, 1, 50)
yg = LinRange(0, 1, 50)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
ze = f.(x, y) |> x -> reshape(x, length(xg), length(yg))
itp = interpolate(x1, y1, z1, derivatives=true)
vals = itp(x, y, method=Sibson(1)) |> x -> reshape(x, length(xg), length(yg))
fig = Figure(fontsize=36, size=(1700, 600))
ax = Axis3(fig[1, 1], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(a): $f^{\text{SIB}1}$", titlealign=:left)
surface!(ax, xg, yg, vals)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
ax = Axis3(fig[1, 2], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(b): $f$", titlealign=:left)
surface!(ax, xg, yg, ze)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "fsib1_example.png") fig
## Triangle interpolant
f, f′, f′′ = test_4()
x1, y1 = point_set_1()
z1 = f.(x1, y1)
xg = LinRange(0, 1, 50)
yg = LinRange(0, 1, 50)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
ze = f.(x, y) |> x -> reshape(x, length(xg), length(yg))
itp = interpolate(x1, y1, z1, derivatives=true)
vals = itp(x, y, method=Triangle(; allow_cache = false)) |> x -> reshape(x, length(xg), length(yg))
fig = Figure(fontsize=36, size=(1700, 600))
ax = Axis3(fig[1, 1], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(a): $f^{\text{TRI}}$", titlealign=:left)
surface!(ax, xg, yg, vals)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
ax = Axis3(fig[1, 2], xlabel=L"x", ylabel=L"y", zlabel=L"z", title=L"(b): $f$", titlealign=:left)
surface!(ax, xg, yg, ze)
scatter!(ax, x1, y1, z1, color=:red, strokewidth=2, markersize=4)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "ftri_example.png") fig
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2111 | using ..NaturalNeighbours
using CairoMakie
using ReferenceTests
using StableRNGs
## The data
f = (x, y) -> sin(x * y) - cos(x - y) * exp(-(x - y)^2)
x = vec([(i - 1) / 9 for i in (1, 3, 4, 5, 8, 9, 10), j in (1, 2, 3, 5, 6, 7, 9, 10)])
y = vec([(j - 1) / 9 for i in (1, 3, 4, 5, 8, 9, 10), j in (1, 2, 3, 5, 6, 7, 9, 10)])
z = f.(x, y)
## The interpolant and grid
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(0, 1, 100)
yg = LinRange(0, 1, 100)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
exact = f.(_x, _y)
## Evaluate some interpolants
sibson_vals = itp(_x, _y; method=Sibson())
triangle_vals = itp(_x, _y; method=Triangle())
laplace_vals = itp(_x, _y; method=Laplace())
sibson_1_vals = itp(_x, _y; method=Sibson(1))
nearest_vals = itp(_x, _y; method=Nearest())
farin_vals = itp(_x, _y; method=Farin())
hiyoshi_vals = itp(_x, _y; method=Hiyoshi(2))
## Plot
function plot_2d(fig, i, j, title, vals, xg, yg, x, y, show_scatter=true)
ax = Axis(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
contourf!(ax, xg, yg, reshape(vals, (length(xg), length(yg))), colormap=:viridis, levels=-1:0.05:0, extendlow=:auto, extendhigh=:auto)
show_scatter && scatter!(ax, x, y, color=:red, markersize=14)
end
function plot_3d(fig, i, j, title, vals, xg, yg)
ax = Axis3(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
surface!(ax, xg, yg, reshape(vals, (length(xg), length(yg))), color=vals, colormap=:viridis)
end
all_vals = (sibson_vals, triangle_vals, laplace_vals, sibson_1_vals, nearest_vals, farin_vals, hiyoshi_vals, exact)
titles = ("(a): Sibson", "(b): Triangle", "(c): Laplace", "(d): Sibson-1", "(e): Nearest", "(f): Farin", "(g): Hiyoshi", "(h): Exact")
fig = Figure(fontsize=55)
for (i, (vals, title)) in enumerate(zip(all_vals, titles))
plot_2d(fig, 1, i, title, vals, xg, yg, x, y, !(vals === exact))
plot_3d(fig, 2, i, " ", vals, xg, yg)
end
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "../..", "example.png") fig by=psnr_equality(20)
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 9683 | using ..NaturalNeighbours
using CairoMakie
using DelaunayTriangulation
using DelimitedFiles
using Downloads
using StableRNGs
using StatsBase
using ReferenceTests
## Download and setup the data
data_url = "https://gist.githubusercontent.com/DanielVandH/13687b0918e45a416a5c93cd52c91449/raw/a8da6cdc94859fd66bcff85a2307f0f9cd57a18c/data.txt"
boundary_url = "https://gist.githubusercontent.com/DanielVandH/13687b0918e45a416a5c93cd52c91449/raw/a8da6cdc94859fd66bcff85a2307f0f9cd57a18c/boundary.txt"
data_dir = Downloads.download(data_url)
boundary_dir = Downloads.download(boundary_url)
data = readdlm(data_dir, skipstart=6)
data[:, 3] ./= 1000.0 # m -> km
boundary = readdlm(boundary_dir, skipstart=6)
good_elevation = findall(≥(0), @view data[:, 3])
data = @views data[good_elevation, :]
data_sites = [(data[i, 1], data[i, 2]) for i in axes(data, 1)]
elevation_data = @views data[:, 3]
boundary_points = [(boundary[i, 1], boundary[i, 2]) for i in axes(boundary, 1)]
## Setup the data for plotting
## Need to get the triangulation for tricontourf, and we need to identify indices in data_sites for boundary_points
function nearest_tuple(q, data)
δ = Inf
nearest_idx = 0
qx, qy = getxy(q)
for (i, p) in pairs(data)
px, py = getxy(p)
δ₁ = (qx - px)^2 + (qy - py)^2
if δ₁ < δ
δ = δ₁
nearest_idx = i
end
end
return nearest_idx
end
function update_boundary(boundary_points, data_sites)
boundary_nodes = map(q -> nearest_tuple(q, data_sites), boundary_points)
unique!(boundary_nodes)
push!(boundary_nodes, boundary_nodes[begin])
reverse!(boundary_nodes) # so that the boundary is traversed clockwise
boundary_points = data_sites[boundary_nodes]
return boundary_points, data_sites, boundary_nodes
end
boundary_points, data_sites, boundary_nodes = update_boundary(boundary_points, data_sites)
## Downsample the data
rng = StableRNG(123)
desample_idx = sample(rng, axes(data, 1), 5000, replace=false)
ds_data = data[desample_idx, :]
ds_data_sites = [(ds_data[i, 1], ds_data[i, 2]) for i in axes(ds_data, 1)]
ds_elevation_data = @views ds_data[:, 3]
ds_boundary_points, ds_data_sites, ds_boundary_nodes = update_boundary(boundary_points, ds_data_sites)
reverse!(ds_boundary_nodes) # so that the boundary is traversed clockwise
ds_tri = triangulate(ds_data_sites, boundary_nodes=ds_boundary_nodes)
ds_triangles = [T[j] for T in each_solid_triangle(ds_tri), j in 1:3]
## Plot the data
colorrange = (0, 4)
levels = LinRange(colorrange..., 40)
fig = Figure(fontsize=24)
ax1 = Axis3(fig[1, 1], xlabel="Longitude", ylabel="Latitude", zlabel="Elevation (km)", width=600, height=400, azimuth=0.9, title="(c): Downsampled height data (n = $(length(ds_elevation_data)))", titlealign=:left)
mesh!(ax1, ds_data, ds_triangles, color=ds_elevation_data, colorrange=colorrange)
ax2 = Axis(fig[1, 2], xlabel="Longitude", ylabel="Latitude", width=600, height=400, title="(d): Downsampled height data (n = $(length(ds_elevation_data)))", titlealign=:left)
tf = tricontourf!(ax2, ds_tri, ds_elevation_data, levels=levels)
Colorbar(fig[1:2, 3], tf)
resize_to_layout!(fig)
# save(normpath(@__DIR__, "..", "docs", "src", "figures", "swiss_heights.png"), fig)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "swiss_heights.png") fig
## Define the interpolant
interpolant = interpolate(ds_data_sites, ds_elevation_data; derivatives=true)
## Evaluate the interpolant
a, b, c, d = DelaunayTriangulation.polygon_bounds(ds_data_sites, ds_boundary_nodes)
nx = 250
ny = 250
xg = LinRange(a, b, nx)
yg = LinRange(c, d, ny)
x = [xg[i] for i in 1:nx, j in 1:ny] |> vec
y = [yg[j] for i in 1:nx, j in 1:ny] |> vec
sibson_vals = interpolant(x, y; method=Sibson(), parallel=true)
sibson_1_vals = interpolant(x, y; method=Sibson(1), parallel=true)
laplace_vals = interpolant(x, y; method=Laplace(), parallel=true)
triangle_vals = interpolant(x, y; method=Triangle(), parallel=true)
nearest_vals = interpolant(x, y; method=Nearest(), parallel=true)
farin_vals = interpolant(x, y; method=Farin(), parallel=true)
hiyoshi_vals = interpolant(x, y; method=Hiyoshi(2), parallel=true)
## Plot the results
query_tri = triangulate([x'; y']; randomise=false)
query_triangles = [T[j] for T in each_solid_triangle(query_tri), j in 1:3]
function plot_results!(fig, i1, j1, i2, j2, x, y, xg, yg, vals, title1, title2, query_triangles, query_tri, a, b, c, d, e, f, nx, ny)
ax = Axis3(fig[i1, j1], xlabel="Longitude", ylabel="Latitude", zlabel="Elevation (km)", width=600, height=400, azimuth=0.9, title=title1, titlealign=:left)
m = mesh!(ax, hcat(x, y, vals), query_triangles, color=vals, colorrange=colorrange)
xlims!(ax, a, b)
ylims!(ax, c, d)
zlims!(ax, e, f)
ax = Axis(fig[i2, j2], xlabel="Longitude", ylabel="Latitude", width=600, height=400, title=title2, titlealign=:left)
contourf!(ax, xg, yg, reshape(vals, (nx, ny)), levels=levels)
lines!(ax, [get_point(query_tri, i) for i in get_convex_hull_vertices(query_tri)], color=:red, linewidth=4, linestyle=:dash)
lines!(ax, ds_boundary_points, color=:white, linewidth=4)
xlims!(ax, a, b)
ylims!(ax, c, d)
return m
end
function plot_results(sibson_vals, sibson_1_vals, laplace_vals, triangle_vals, nearest_vals, farin_vals, hiyoshi_vals, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, data, triangles, elevation_data, tri)
fig = Figure(fontsize=24)
m1 = plot_results!(fig, 1, 1, 1, 2, x, y, xg, yg, sibson_vals, "(a): Sibson", "(b): Sibson", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m2 = plot_results!(fig, 1, 3, 1, 4, x, y, xg, yg, sibson_1_vals, "(c): Sibson-1", "(d): Sibson-1", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m3 = plot_results!(fig, 2, 1, 2, 2, x, y, xg, yg, laplace_vals, "(e): Laplace", "(f): Laplace", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m4 = plot_results!(fig, 2, 3, 2, 4, x, y, xg, yg, triangle_vals, "(g): Triangle", "(h): Triangle", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m5 = plot_results!(fig, 3, 1, 3, 2, x, y, xg, yg, nearest_vals, "(i): Nearest", "(j): Nearest", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m6 = plot_results!(fig, 3, 3, 3, 4, x, y, xg, yg, farin_vals, "(k): Farin", "(ℓ): Farin", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m7 = plot_results!(fig, 4, 1, 4, 2, x, y, xg, yg, hiyoshi_vals, "(m): Hiyoshi", "(n): Hiyoshi", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
ax = Axis3(fig[4, 3], xlabel="Longitude", ylabel="Latitude", zlabel="Elevation (km)", width=600, height=400, azimuth=0.9, title="(k): Original height data", titlealign=:left)
mesh!(ax, data, triangles, color=elevation_data, colorrange=(0, 4))
xlims!(ax, a, b)
ylims!(ax, c, d)
zlims!(ax, e, f)
ax = Axis(fig[4, 4], xlabel="Longitude", ylabel="Latitude", width=600, height=400, title="(o): Original height data", titlealign=:left)
tricontourf!(ax, tri, elevation_data, levels=levels)
xlims!(ax, a, b)
ylims!(ax, c, d)
Colorbar(fig[1:4, 5], m1)
resize_to_layout!(fig)
return fig
end
e, f = 0.0, 4.5
fig = plot_results(sibson_vals, sibson_1_vals, laplace_vals, triangle_vals, nearest_vals, farin_vals, hiyoshi_vals, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, ds_data, ds_triangles, ds_elevation_data, ds_tri)
# save(normpath(@__DIR__, "..", "docs", "src", "figures", "swiss_heights_interpolated.png"), fig)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "swiss_heights_interpolated.png") fig
## Plot the results with project = false
sibson_vals_p = interpolant(x, y; method=Sibson(), parallel=true, project=false)
sibson_1_vals_p = interpolant(x, y; method=Sibson(1), parallel=true, project=false)
laplace_vals_p = interpolant(x, y; method=Laplace(), parallel=true, project=false)
triangle_vals_p = interpolant(x, y; method=Triangle(; allow_cache = false), parallel=true, project=false)
nearest_vals_p = interpolant(x, y; method=Nearest(), parallel=true, project=false)
farin_vals_p = interpolant(x, y; method=Farin(), parallel=true, project=false)
hiyoshi_vals_p = interpolant(x, y; method=Hiyoshi(2), parallel=true, project=false)
fig = plot_results(sibson_vals_p, sibson_1_vals_p, laplace_vals_p, triangle_vals_p, nearest_vals_p, farin_vals_p, hiyoshi_vals_p, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, ds_data, ds_triangles, ds_elevation_data, ds_tri)
# save(normpath(@__DIR__, "..", "docs", "src", "figures", "swiss_heights_interpolated_projected.png"), fig)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "swiss_heights_interpolated_projected.png") fig
## Plot the results, replacing all points outside of the boundary
exterior_idx = identify_exterior_points(x, y, ds_data_sites, ds_boundary_nodes)
sibson_vals_p[exterior_idx] .= Inf
sibson_1_vals_p[exterior_idx] .= Inf
laplace_vals_p[exterior_idx] .= Inf
triangle_vals_p[exterior_idx] .= Inf
nearest_vals_p[exterior_idx] .= Inf
farin_vals_p[exterior_idx] .= Inf
hiyoshi_vals_p[exterior_idx] .= Inf
fig = plot_results(sibson_vals_p, sibson_1_vals_p, laplace_vals_p, triangle_vals_p, nearest_vals_p, farin_vals_p, hiyoshi_vals_p, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, ds_data, ds_triangles, ds_elevation_data, ds_tri)
# save(normpath(@__DIR__, "..", "docs", "src", "figures", "swiss_heights_interpolated_projected_boundary.png"), fig)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "swiss_heights_interpolated_projected_boundary.png") fig
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 566 | function random_points_in_convex_hull(tri::Triangulation, n; rng=Random.default_rng()) # bit slow. oh well
boundary_nodes = get_convex_hull_vertices(tri)
points = get_points(tri)
bbox = DT.polygon_bounds(points, boundary_nodes)
F = DT.number_type(tri)
pts = NTuple{2,F}[]
while length(pts) < n
p = (rand(rng, F) * (bbox[2] - bbox[1]) + bbox[1], rand(rng, F) * (bbox[4] - bbox[3]) + bbox[3])
δ = DT.distance_to_polygon(p, points, boundary_nodes)
if δ > 0
push!(pts, p)
end
end
return pts
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 13891 | function objective_function_gradient_direct(θ, p)
tri, S, z, xᵢ, yᵢ, zᵢ, λ = p
ℓ = 0.0
β₁, β₂ = θ
for (λ, s) in zip(λ, S)
zₛ = z[s]
pₛ = get_point(tri, s)
xₛ, yₛ = getxy(pₛ)
wᵢ = λ / ((xₛ - xᵢ)^2 + (yₛ - yᵢ)^2)
εᵢ² = wᵢ * (zₛ - zᵢ - β₁ * (xₛ - xᵢ) - β₂ * (yₛ - yᵢ))^2
ℓ += εᵢ²
end
return ℓ
end
function estimate_gradient_direct(tri, r, z; use_sibson_weight=false, rng=Random.default_rng())
if r isa Integer
S = DT.iterated_neighbourhood(tri, r, 1)
zᵢ = z[r]
xᵢ, yᵢ = getxy(get_point(tri, r))
S = collect(S)
λ = similar(S)
fill!(λ, 1)
else
nc = NNI.compute_natural_coordinates(Sibson(), tri, r, rng=rng)
S = NNI.get_indices(nc)
λ = NNI.get_coordinates(nc)
zᵢ = 0.0
for (a, b) in zip(λ, S)
zₖ = z[b]
zᵢ += a * zₖ
end
xᵢ, yᵢ = getxy(r)
if !use_sibson_weight
fill!(λ, 1.0)
end
end
r = (xᵢ, yᵢ)
X1 = [getx(get_point(tri, s)) - getx(r) for s in S]
X2 = [gety(get_point(tri, s)) - gety(r) for s in S]
X = hcat(X1, X2)
W = [sqrt(λ) / norm(r .- get_point(tri, s)) for (λ, s) in zip(λ, S)]
W .= W .^ 2
W = Diagonal(W)
Z = [z[s] - zᵢ for s in S]
∇ = (X' * W * X) \ (X' * W * Z)
prob = OptimizationProblem(
objective_function_gradient_direct,
copy(∇),
(tri, S, z, xᵢ, yᵢ, zᵢ, λ)
)
sol = solve(prob, NLopt.LN_NELDERMEAD)
return sol.u, ∇
end
function objective_function_gradient_hessian_cubic_direct(θ, p)
tri, S, z, xᵢ, yᵢ, zᵢ = p
ℓ = 0.0
∇₁, ∇₂, H₁₁, H₂₂, H₁₂, Q₁, Q₂, Q₃, Q₄ = θ
for s in S
zₛ = z[s]
pₛ = get_point(tri, s)
xₛ, yₛ = getxy(pₛ)
wᵢ = 1 / ((xₛ - xᵢ)^2 + (yₛ - yᵢ)^2)
term₁ = ∇₁ * (xₛ - xᵢ) + ∇₂ * (yₛ - yᵢ)
term₂ = H₁₁ * (xₛ - xᵢ)^2 / 2 + H₂₂ * (yₛ - yᵢ)^2 / 2 + H₁₂ * (xₛ - xᵢ) * (yₛ - yᵢ)
term₃ = Q₁ * (xₛ - xᵢ)^3 / 6 + Q₂ * (xₛ - xᵢ)^2 * (yₛ - yᵢ) / 2 + Q₃ * (xₛ - xᵢ) * (yₛ - yᵢ)^2 / 2 + Q₄ * (yₛ - yᵢ)^3 / 6
taylor = zᵢ + term₁ + term₂ + term₃
εᵢ² = wᵢ * (zₛ - taylor)^2
ℓ += εᵢ²
end
return ℓ
end
function estimate_gradient_hessian_cubic_direct(tri, r, z; rng=Random.default_rng())
if r isa Integer
S = DT.iterated_neighbourhood(tri, r, 3)
zᵢ = z[r]
xᵢ, yᵢ = getxy(get_point(tri, r))
else
nc = NNI.compute_natural_coordinates(Sibson(), tri, r, rng=rng)
S = NNI.get_indices(nc)
λ = NNI.get_coordinates(nc)
zᵢ = 0.0
for (a, b) in zip(λ, S)
zₖ = z[b]
zᵢ += a * zₖ
end
xᵢ, yᵢ = getxy(r)
_S = [get_neighbours(tri, i) for i in nc.indices]
_S1 = copy(nc.indices)
push!(_S1, reduce(union, _S)...)
filter!(!DT.is_ghost_vertex, _S1)
unique!(_S1)
_S2 = [get_neighbours(tri, i) for i in _S1]
push!(_S1, reduce(union, _S2)...)
filter!(!DT.is_ghost_vertex, _S1)
unique!(_S1)
S = _S1
end
r = (xᵢ, yᵢ)
x = [getx(get_point(tri, s)) - getx(r) for s in S]
y = [gety(get_point(tri, s)) - gety(r) for s in S]
X = @. [x y x^2 / 2 y^2 / 2 x * y x^3 / 6 y^3 / 6 x^2 * y / 2 x * y^2 / 2]
W = [1 / norm(r .- get_point(tri, s)) for s in S]
W .= W .^ 2
W = Diagonal(W)
Z = [z[s] - zᵢ for s in S]
∇ℋ = (X' * W * X) \ (X' * W * Z)
prob = OptimizationProblem(
objective_function_gradient_hessian_cubic_direct,
copy(∇ℋ),
(tri, S, z, xᵢ, yᵢ, zᵢ)
)
sol = solve(prob, NLopt.LN_NELDERMEAD)
return (sol.u[1:2], sol.u[3:5]), (∇ℋ[1:2], ∇ℋ[3:5])
end
function objective_function_gradient_hessian_quadratic_direct(θ, p)
tri, S, z, xᵢ, yᵢ, zᵢ = p
ℓ = 0.0
∇₁, ∇₂, H₁₁, H₂₂, H₁₂ = θ
for s in S
zₛ = z[s]
pₛ = get_point(tri, s)
xₛ, yₛ = getxy(pₛ)
wᵢ = 1 / ((xₛ - xᵢ)^2 + (yₛ - yᵢ)^2)
term₁ = ∇₁ * (xₛ - xᵢ) + ∇₂ * (yₛ - yᵢ)
term₂ = H₁₁ * (xₛ - xᵢ)^2 / 2 + H₂₂ * (yₛ - yᵢ)^2 / 2 + H₁₂ * (xₛ - xᵢ) * (yₛ - yᵢ)
taylor = zᵢ + term₁ + term₂
εᵢ² = wᵢ * (zₛ - taylor)^2
ℓ += εᵢ²
end
return ℓ
end
function estimate_gradient_hessian_quadratic_direct(tri, r, z; rng=Random.default_rng())
if r isa Integer
S = DT.iterated_neighbourhood(tri, r, 2)
zᵢ = z[r]
xᵢ, yᵢ = getxy(get_point(tri, r))
else
nc = NNI.compute_natural_coordinates(Sibson(), tri, r, rng=rng)
S = NNI.get_indices(nc)
λ = NNI.get_coordinates(nc)
zᵢ = 0.0
for (a, b) in zip(λ, S)
zₖ = z[b]
zᵢ += a * zₖ
end
xᵢ, yᵢ = getxy(r)
_S = [get_neighbours(tri, i) for i in nc.indices]
_S1 = copy(nc.indices)
push!(_S1, reduce(union, _S)...)
filter!(!DT.is_ghost_vertex, _S1)
unique!(_S1)
S = _S1
end
r = (xᵢ, yᵢ)
x = [getx(get_point(tri, s)) - getx(r) for s in S]
y = [gety(get_point(tri, s)) - gety(r) for s in S]
X = @. [x y x^2 / 2 y^2 / 2 x * y]
W = [1 / norm(r .- get_point(tri, s)) for s in S]
W .= W .^ 2
W = Diagonal(W)
Z = [z[s] - zᵢ for s in S]
∇ℋ = (X' * W * X) \ (X' * W * Z)
prob = OptimizationProblem(
objective_function_gradient_hessian_quadratic_direct,
copy(∇ℋ),
(tri, S, z, xᵢ, yᵢ, zᵢ)
)
sol = solve(prob, NLopt.LN_NELDERMEAD)
return (sol.u[1:2], sol.u[3:5]), (∇ℋ[1:2], ∇ℋ[3:5])
end
function objective_function_gradient_hessian_from_initial_gradients(θ, p)
tri, S, z, xᵢ, yᵢ, zᵢ, λ, initial_gradients, α = p
ℓ = 0.0
∇₁, ∇₂, H₁₁, H₂₂, H₁₂ = θ
for (λₛ, s) in zip(λ, S)
zₛ = z[s]
∇ₛ¹, ∇ₛ² = initial_gradients[s]
pₛ = get_point(tri, s)
xₛ, yₛ = getxy(pₛ)
H = [H₁₁ H₁₂; H₁₂ H₂₂]
∇ = [∇₁; ∇₂]
∇ᵢ = [∇ₛ¹; ∇ₛ²]
x = [xₛ - xᵢ; yₛ - yᵢ]
L₁ = α * (zᵢ + ∇' * x + x' * H * x / 2 - zₛ)^2
L₂ = (1 - α) * norm(H * x + ∇ - ∇ᵢ)^2
βᵢ = λₛ / norm(x)^2
ℓ = ℓ + βᵢ * (L₁ + L₂)
end
return ℓ
end
function estimate_gradient_hessian_from_initial_gradients(tri, r, z, α=0.1; use_sibson_weight=false, initial_gradients, rng=Random.default_rng())
if r isa Integer
S = DT.iterated_neighbourhood(tri, r, 1)
zᵢ = z[r]
xᵢ, yᵢ = getxy(get_point(tri, r))
S = collect(S)
λ = similar(S)
fill!(λ, 1)
else
nc = NNI.compute_natural_coordinates(Sibson(), tri, r, rng=rng)
S = NNI.get_indices(nc)
λ = NNI.get_coordinates(nc)
zᵢ = 0.0
for (a, b) in zip(λ, S)
zₖ = z[b]
zᵢ += a * zₖ
end
xᵢ, yᵢ = getxy(r)
if !use_sibson_weight
fill!(λ, 1.0)
end
end
r = (xᵢ, yᵢ)
X1 = [getx(get_point(tri, s)) - getx(r) for s in S]
X2 = [gety(get_point(tri, s)) - gety(r) for s in S]
A = @. [X1 X2 X1^2 / 2 X2^2 / 2 X1 * X2]
for i in axes(A, 1)
A[i, :] .*= sqrt(α * λ[i] / norm(r .- get_point(tri, S[i]))^2)
end
B = [ones(size(A, 1), 1) zeros(size(A, 1), 1) X1 zeros(size(A, 1), 1) X2]
for i in axes(B, 1)
B[i, :] .*= sqrt((1 - α) * λ[i] / norm(r .- get_point(tri, S[i]))^2)
end
C = [zeros(size(A, 1), 1) ones(size(A, 1), 1) zeros(size(A, 1), 1) X2 X1]
for i in axes(C, 1)
C[i, :] .*= sqrt((1 - α) * λ[i] / norm(r .- get_point(tri, S[i]))^2)
end
D = [A; B; C]
c = vcat(zᵢ .- z[S], [initial_gradients[s][1] for s in S], [initial_gradients[s][2] for s in S])
for i in axes(A, 1)
c[i] *= sqrt(α * λ[i] / norm(r .- get_point(tri, S[i]))^2)
c[i+length(S)] *= sqrt((1 - α) * λ[i] / norm(r .- get_point(tri, S[i]))^2)
c[i+2*length(S)] *= sqrt((1 - α) * λ[i] / norm(r .- get_point(tri, S[i]))^2)
end
∇ℋ = D \ c
prob = OptimizationProblem(
objective_function_gradient_hessian_from_initial_gradients,
copy(∇ℋ),
(tri, S, z, xᵢ, yᵢ, zᵢ, λ, initial_gradients, α)
)
sol = solve(prob, NLopt.LN_NELDERMEAD)
return (sol.u[1:2], sol.u[3:5]), (∇ℋ[1:2], ∇ℋ[3:5])
end
function slow_test_derivative(;
x,
y,
tri,
z,
order,
method,
interpolant_method,
alpha,
use_cubic_terms,
use_sibson_weight,
initial_gradients,
rng)
itp = interpolate(tri, z)
if order == 1
∇opt, ∇ls = estimate_gradient_direct(tri, (x, y), z; use_sibson_weight=use_sibson_weight, rng=deepcopy(rng))
_rng = deepcopy(rng)
S = Set{Int64}()
S′ = Set{Int64}()
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, (x, y), 1, NNI.NaturalNeighboursCache(tri); rng=_rng)
∇man = NNI.generate_first_order_derivatives(
method,
tri,
z,
itp(x, y; method=interpolant_method, rng=_rng),
(x, y),
λ,
E,
use_sibson_weight=use_sibson_weight)
return ∇opt, ∇ls, collect(∇man)
else
if method == Direct()
if use_cubic_terms
(∇opt, Hopt), (∇ls, Hls) = estimate_gradient_hessian_cubic_direct(tri, (x, y), z, rng=deepcopy(rng))
else
(∇opt, Hopt), (∇ls, Hls) = estimate_gradient_hessian_quadratic_direct(tri, (x, y), z, rng=deepcopy(rng))
end
_rng = deepcopy(rng)
S = Set{Int64}()
S′ = Set{Int64}()
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, (x, y), 2 + use_cubic_terms, NNI.NaturalNeighboursCache(tri); rng=_rng)
∇man, Hman = NNI.generate_second_order_derivatives(
method,
tri,
z,
itp(x, y; method=interpolant_method, rng=_rng),
(x, y),
λ,
E,
use_cubic_terms=use_cubic_terms
)
return (∇opt, Hopt), (∇ls, Hls), (collect(∇man), collect(Hman))
else
(∇opt, Hopt), (∇ls, Hls) = estimate_gradient_hessian_from_initial_gradients(tri, (x, y), z, alpha; use_sibson_weight=use_sibson_weight, initial_gradients=initial_gradients, rng=deepcopy(rng))
_rng = deepcopy(rng)
S = Set{Int64}()
S′ = Set{Int64}()
λ, E = NNI.get_taylor_neighbourhood!(S, S′, tri, (x, y), 1, NNI.NaturalNeighboursCache(tri); rng=_rng)
∇man, Hman = NNI.generate_second_order_derivatives(
method,
tri,
z,
itp(x, y; method=interpolant_method, rng=_rng),
(x, y),
λ,
E,
alpha=alpha,
use_cubic_terms=use_cubic_terms,
initial_gradients=initial_gradients
)
return (∇opt, Hopt), (∇ls, Hls), (collect(∇man), collect(Hman))
end
end
throw("Invalid arguments.")
end
function slow_test_derivative(itp1, itp2, ∂11, ∂12, ∂21, ∂22; x, y, rng, tri, z, kwargs...)
grad11 = collect(∂11(x, y; rng=deepcopy(rng), kwargs...))
grad12, hess12 = collect.(∂12(x, y; rng=deepcopy(rng), kwargs...))
grad21 = collect(∂21(x, y; rng=deepcopy(rng), kwargs...))
grad22, hess22 = collect.(∂22(x, y; rng=deepcopy(rng), kwargs...))
gradopt11, gradls11, gradman11 = collect.(slow_test_derivative(; x, y, rng=deepcopy(rng), tri, z, order=1, initial_gradients=itp1.gradient, kwargs...))
(gradopt12, hessopt12), (gradls12, hessls12), (gradman12, hessman12) = collect.(slow_test_derivative(; x, y, tri, z, rng=deepcopy(rng), order=2, initial_gradients=itp1.gradient, kwargs...))
gradopt21, gradls21, gradman21 = collect.(slow_test_derivative(; x, y, rng=deepcopy(rng), order=1, tri, z, initial_gradients=itp2.gradient, kwargs...))
(gradopt22, hessopt22), (gradls22, hessls22), (gradman22, hessman22) = collect.(slow_test_derivative(; x, y, tri, z, rng=deepcopy(rng), order=2, initial_gradients=itp2.gradient, kwargs...))
flag1 = isapprox(grad11, gradman11, rtol=1e-1)
flag2 = isapprox(grad12, gradman12, rtol=1e-1)
flag3 = isapprox(hess12, hessman12, rtol=1e-1)
flag2 = isapprox(grad12, gradman12, rtol=1e-1)
flag3 = isapprox(hess12, hessman12, rtol=1e-1)
flag4 = isapprox(grad21, gradman21, rtol=1e-1)
flag5 = isapprox(grad22, gradman22, rtol=1e-1)
flag6 = isapprox(hess22, hessman22, rtol=1e-1)
flag7 = isapprox(grad11, gradopt11, rtol=1e-1)
flag8 = isapprox(grad12, gradopt12, rtol=1e-1)
flag9 = isapprox(hess12, hessopt12, rtol=1e-1)
flag10 = isapprox(grad21, gradopt21, rtol=1e-1)
flag11 = isapprox(grad22, gradopt22, rtol=1e-1)
flag12 = isapprox(hess22, hessopt22, rtol=1e-1)
flag13 = isapprox(grad11, gradls11, rtol=1e-1)
flag14 = isapprox(grad12, gradls12, rtol=1e-1)
flag15 = isapprox(hess12, hessls12, rtol=1e-1)
flag16 = isapprox(grad21, gradls21, rtol=1e-1)
flag17 = isapprox(grad22, gradls22, rtol=1e-1)
flag18 = isapprox(hess22, hessls22, rtol=1e-1)
flag19 = ∂11(x, y; rng=deepcopy(rng), kwargs...) |> collect == ∂11(x, y; rng=deepcopy(rng), kwargs...) |> collect
flag20 = ∂12(x, y; rng=deepcopy(rng), kwargs...) |> collect == ∂12(x, y; rng=deepcopy(rng), kwargs...) |> collect
flag21 = ∂21(x, y; rng=deepcopy(rng), kwargs...) |> collect == ∂21(x, y; rng=deepcopy(rng), kwargs...) |> collect
flag22 = ∂22(x, y; rng=deepcopy(rng), kwargs...) |> collect == ∂22(x, y; rng=deepcopy(rng), kwargs...) |> collect
all_flags = (flag1, flag2, flag3, flag4, flag5, flag6, flag7, flag8, flag9,
flag10, flag11, flag12, flag13, flag14, flag15, flag16, flag17, flag18,
flag19, flag20, flag21, flag22)
final_flag = all(all_flags)
if !final_flag
idx = findall(!, all_flags)
println("Failed at index: $idx")
return false
end
return true
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 11034 | using CairoMakie, ReferenceTests, StableRNGs
# https://hdl.handle.net/10945/35052
function test_1()
f = (x, y) -> 0.75 * exp(-((9 * x - 2)^2 + (9 * y - 2)^2) / 4) + 0.75 * exp(-(9 * x + 1)^2 / 49 - (9 * y + 1) / 10) + 0.5 * exp(-((9 * x - 7)^2 + (9 * y - 3)^2) / 4) - 0.2 * exp(-(9 * x - 4)^2 - (9 * y - 7)^2)
f′ = (x, y) -> [(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * x - 72)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * x) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * x) / 2 - 63 / 2)) / 2 - (3 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10) * ((162 * x) / 49 + 18 / 49)) / 4
(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * y - 126)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * y) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * y) / 2 - 27 / 2)) / 2 - (27 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)) / 40]
f′′ = (x, y) -> [(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/98-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49)^2)/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)^2)/5 (27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5
(27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5 (243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/400+(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*y)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*y)/2-27/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*y-126)^2)/5]
return f, f′, f′′
end
function test_2()
f = (x, y) -> (1 / 9) * (tanh(9 * y - 9 * x) + 1)
f′ = (x, y) -> [tanh(9 * x - 9 * y)^2 - 1
1 - tanh(9 * x - 9 * y)^2]
f′′ = (x, y) -> [-2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9) 2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9)
2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9) -2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9)]
return f, f′, f′′
end
function test_3()
f = (x, y) -> (1.25 + cos(5.4 * y)) / (6 * (1 + (3 * x - 1)^2))
f′ = (x, y) -> [-((108 * x - 36) * (cos((27 * y) / 5) + 5 / 4)) / (6 * (3 * x - 1)^2 + 6)^2
-(27 * sin((27 * y) / 5)) / (5 * (6 * (3 * x - 1)^2 + 6))]
f′′ = (x, y) -> [(2*(108*x-36)^2*(cos((27 * y) / 5)+5/4))/(6*(3*x-1)^2+6)^3-(108*(cos((27 * y) / 5)+5/4))/(6*(3*x-1)^2+6)^2 (27*sin((27 * y) / 5)*(108*x-36))/(5*(6*(3*x-1)^2+6)^2)
(27*sin((27 * y) / 5)*(108*x-36))/(5*(6*(3*x-1)^2+6)^2) -(729 * cos((27 * y) / 5))/(25*(6*(3*x-1)^2+6))]
return f, f′, f′′
end
function test_4()
f = (x, y) -> (1 / 3) * exp(-(81 / 16) * ((x - 1 / 2)^2 + (y - 1 / 2)^2))
f′ = (x, y) -> [-(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16) * ((81 * x) / 8 - 81 / 16)) / 3
-(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16) * ((81 * y) / 8 - 81 / 16)) / 3]
f′′ = (x, y) -> [(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16))/8 (exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)*((81*y)/8-81/16))/3
(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)*((81*y)/8-81/16))/3 (exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*y)/8-81/16)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16))/8]
return f, f′, f′′
end
function test_5()
f = (x, y) -> (1 / 3) * exp(-(81 / 4) * ((x - 1 / 2)^2 + (y - 1 / 2)^2))
f′ = (x, y) -> [-(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4) * ((81 * x) / 2 - 81 / 4)) / 3
-(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4) * ((81 * y) / 2 - 81 / 4)) / 3]
f′′ = (x, y) -> [(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4))/2 (exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)*((81*y)/2-81/4))/3
(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)*((81*y)/2-81/4))/3 (exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*y)/2-81/4)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4))/2]
return f, f′, f′′
end
function test_6()
f = (x, y) -> (1 / 9) * (64 - 81 * ((x - 1 / 2)^2 + (y - 1 / 2)^2))^(1 / 2) - 1 / 2
f′ = (x, y) -> [-(162 * x - 81) / (18 * (64 - 81 * (y - 1 / 2)^2 - 81 * (x - 1 / 2)^2)^(1 / 2))
-(162 * y - 81) / (18 * (64 - 81 * (y - 1 / 2)^2 - 81 * (x - 1 / 2)^2)^(1 / 2))]
f′′ = (x, y) -> [-(162 * x - 81)^2/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))-9/(64-81*(y-1/2)^2-81*(x-1/2)^2)^(1/2) -((162 * x - 81) * (162 * y - 81))/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))
-((162 * x - 81) * (162 * y - 81))/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2)) -(162 * y - 81)^2/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))-9/(64-81*(y-1/2)^2-81*(x-1/2)^2)^(1/2)]
return f, f′, f′′
end
function point_set_1()
A = [0.022703 -0.031021
0.021701 0.257692
0.001903 0.494360
0.039541 0.699342
0.031583 0.910765
0.132419 0.050133
0.125444 0.259297
0.076758 0.417112
0.062649 0.655223
0.095867 0.914652
0.264560 0.029294
0.208899 0.266878
0.171473 0.480174
0.190921 0.687880
0.230463 0.904651
0.366317 0.039695
0.383239 0.238955
0.346632 0.490299
0.387316 0.644523
0.379536 0.893803
0.414977 -0.028462
0.420001 0.226247
0.485566 0.389142
0.479258 0.632425
0.397776 0.848971
0.053989 0.158674
0.017513 0.341401
0.050968 0.578285
0.048706 0.747019
0.041878 0.996289
0.109027 0.091855
0.093454 0.338159
0.145187 0.561556
0.145273 0.752407
0.069556 0.963242
0.239164 0.060230
0.276733 0.369604
0.226678 0.594059
0.186765 0.818558
0.242622 0.980541
0.385766 0.068448
0.317909 0.312413
0.377659 0.519930
0.381292 0.820379
0.280351 0.971172
0.427768 0.156096
0.466363 0.317509
0.409203 0.508495
0.481228 0.751101
0.402732 0.997873
0.584869 -0.027195
0.606389 0.270927
0.574131 0.425942
0.599010 0.673378
0.609697 0.924241
0.661693 0.025596
0.639647 0.200834
0.700118 0.489070
0.690895 0.669783
0.671889 0.936610
0.773694 0.028537
0.741042 0.193658
0.730603 0.471423
0.821453 0.668505
0.807664 0.847679
0.842457 0.038050
0.836692 0.208309
0.847812 0.433563
0.917570 0.630738
0.927987 0.904231
1.044982 -0.012090
0.985788 0.269584
1.012929 0.439605
1.001985 0.694152
1.041468 0.868208
0.573008 0.127243
0.501389 0.347773
0.610695 0.608471
0.538062 0.723524
0.502619 1.030876
0.642784 0.070783
0.670396 0.325984
0.633359 0.509632
0.689564 0.775957
0.683767 1.006451
0.763533 0.102140
0.825898 0.323577
0.808661 0.609159
0.729064 0.802281
0.817095 1.051236
0.868405 0.090205
0.941846 0.331849
0.859958 0.591014
0.859633 0.814484
0.851280 0.969603
0.967063 0.133411
0.967631 0.379528
0.965704 0.504442
1.035930 0.745992
0.947151 0.980141]
return A[:, 1], A[:, 2]
end
function point_set_2()
A = [
0.00 0.00
0.00 1.00
0.00 0.50
0.50 1.00
0.10 0.15
0.15 0.30
0.30 0.35
0.10 0.75
0.05 0.45
1.00 0.00
1.00 1.00
0.50 0.00
1.00 0.50
0.20 0.10
0.25 0.20
0.60 0.25
0.90 0.35
0.80 0.40
0.70 0.20
0.95 0.90
0.60 0.65
0.65 0.70
0.35 0.85
0.60 0.85
0.90 0.80
0.85 0.25
0.80 0.65
0.75 0.85
0.70 0.90
0.70 0.65
0.75 0.10
0.75 0.35
0.55 0.95
]
return A[:, 1], A[:, 2]
end
function point_set_3()
A = [
0.1375 0.97500
0.9125 0.98750
0.7125 0.76250
0.2250 0.83750
0.0500 0.41250
0.4750 0.63750
0.0500 -0.05000
0.4500 1.03750
1.0875 0.55000
0.5375 0.80000
0.0375 0.75000
0.1875 0.57500
0.7125 0.55000
0.8500 0.43750
0.7000 0.31250
0.2750 0.42500
0.4500 0.28750
0.8125 0.18750
0.4500 -0.03750
1.0000 0.26250
0.5000 0.46250
0.1875 0.26250
0.5875 0.12500
1.0500 -0.06125
0.1000 0.11250
]
return A[:, 1], A[:, 2]
end
function test_interpolant(itp, x, y, f)
for method in (Farin(1), :sibson, :triangle, :nearest, :laplace, Sibson(1), Hiyoshi(2))
for _ in 1:25
vals = itp(x, y; parallel=false, method)
vals2 = similar(vals)
itp(vals2, x, y; parallel=false, method)
vals3 = itp(x, y; parallel=true, method)
vals4 = similar(vals3)
itp(vals4, x, y; parallel=true, method)
for i in eachindex(x, y)
_x = x[i]
_y = y[i]
if method ≠ :nearest
_z = f isa Function ? f(_x, _y) : f[i]
else
m = DT.jump_to_voronoi_polygon(itp.triangulation, (_x, _y))
_z = f isa Function ? f(get_point(itp.triangulation, m)...) : f[m]
end
@test all(val -> isapprox(val, _z, rtol=1e-1), (itp(_x, _y; method), vals[i], vals2[i], vals3[i], vals4[i]))
end
end
end
end
function rrmse(z, ẑ)
num = 0.0
den = 0.0
for (zᵢ, ẑᵢ) in zip(z, ẑ)
if all(isfinite, (zᵢ..., ẑᵢ...))
num += norm(zᵢ .- ẑᵢ)^2
den += norm(ẑᵢ)^2
end
end
return 100sqrt(num / den)
end
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 9078 | using ..NaturalNeighbours
const NNI = NaturalNeighbours
using DelaunayTriangulation
const DT = DelaunayTriangulation
using StableRNGs
using LinearAlgebra
using CairoMakie
include(normpath(@__DIR__, "../.", "helper_functions", "test_functions.jl"))
@testset "Interpolation" begin
rng = StableRNG(123)
pts = [(rand(rng), rand(rng)) for _ in 1:50]
tri = triangulate(pts, rng=rng, delete_ghosts=false)
f = (x, y) -> sin(x * y) - cos(x - y) * exp(-(x - y)^2)
z = [f(x, y) for (x, y) in pts]
itp = interpolate(tri, z; derivatives=true, parallel=false)
@test DT.get_triangulation(itp) == tri
@test NNI.get_z(itp) == z
@test length(NNI.get_neighbour_cache(itp)) == Base.Threads.nthreads()
@test length(NNI.get_derivative_cache(itp)) == Base.Threads.nthreads()
@test NNI.get_neighbour_cache(itp, 1) == itp.neighbour_cache[1]
Base.Threads.nthreads() > 1 && @test NNI.get_neighbour_cache(itp, 2) == itp.neighbour_cache[2]
@test NNI.get_derivative_cache(itp) == itp.derivative_cache
@test NNI.get_derivative_cache(itp, 1) == itp.derivative_cache[1]
Base.Threads.nthreads() > 1 && @test NNI.get_derivative_cache(itp, 2) == itp.derivative_cache[2]
@test NNI.get_gradient(itp) == itp.gradient
@test !isnothing(NNI.get_gradient(itp))
@test NNI.get_gradient(itp, 1) == itp.gradient[1]
@test NNI.get_gradient(itp, 2) == itp.gradient[2]
@test NNI.get_hessian(itp) == itp.hessian
@test !isnothing(NNI.get_hessian(itp))
@test NNI.get_hessian(itp, 1) == itp.hessian[1]
@test NNI.get_hessian(itp, 2) == itp.hessian[2]
_itp = interpolate(tri, z; derivatives=false, parallel=false)
@test NNI.get_gradient(_itp) === nothing
@test NNI.get_hessian(_itp) === nothing
@test itp isa NNI.NaturalNeighboursInterpolant
@test_throws AssertionError interpolate(tri, z[1:end-1])
w = rand(length(z))
y = rand(length(z))
__itp = interpolate(tri, z; derivatives=false, parallel=false, gradient=w)
@test NNI.get_gradient(__itp) === w
__itp = interpolate(tri, z; derivatives=false, parallel=false, gradient=w, hessian=y)
@test NNI.get_gradient(__itp) === w
@test NNI.get_hessian(__itp) === y
x = getx.(pts)
y = gety.(pts)
test_interpolant(itp, x, y, f)
test_interpolant(itp, x, y, z)
tri = triangulate_rectangle(0.0, 1.0, 0.0, 1.0, 30, 30)
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(get_points(tri), z; derivatives=true)
xx = LinRange(0, 1, 50)
yy = LinRange(0, 1, 50)
x = vec([x for x in xx, _ in yy])
y = vec([y for _ in xx, y in yy])
test_interpolant(itp, x, y, f)
x = getx.(get_points(tri))
y = gety.(get_points(tri))
test_interpolant(itp, x, y, f)
test_interpolant(itp, x, y, z)
end
@testset "Sibson(1) errors without gradients" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> x^2 + y^2 + x^3 * y
f′ = (x, y) -> [2x + 3x^2 * y; 2y + x^3]
f′′ = (x, y) -> [2+6x*y 3x^2; 3x^2 2]
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=false)
@test_throws ArgumentError("Gradients must be provided for Sibson-1, Farin, or Hiyoshi-2 interpolation. Consider using e.g. interpolate(tri, z; derivatives = true).") itp(0.5, 0.5; method=Sibson(1))
end
@testset "Hiyoshi(2) errors without gradients and Hessians" begin
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> x^2 + y^2 + x^3 * y
f′ = (x, y) -> [2x + 3x^2 * y; 2y + x^3]
f′′ = (x, y) -> [2+6x*y 3x^2; 3x^2 2]
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=false)
@test_throws ArgumentError("Gradients and Hessians must be provided for Hiyoshi-2 interpolation. Consider using e.g. interpolate(tri, z; derivatives = true).") itp(0.5, 0.5; method=Hiyoshi(2))
end
@testset "Test Float32" begin
rng = StableRNG(123)
xs = randn(rng, 100)
ys = randn(rng, 100)
tri1 = triangulate([Float32.(xs)'; Float32.(ys)']; rng)
tri2 = triangulate([xs'; ys']; rng)
zs = sin.(xs) .* cos.(ys)
itp1 = interpolate(tri1, Float32.(zs); derivatives=true)
itp2 = interpolate(tri2, zs; derivatives=true)
for itp in (itp1, itp2)
for method in (Sibson(1), Sibson(), Laplace(), Farin(1), Hiyoshi(2), Triangle(), Triangle(; allow_cache = false), Nearest())
@inferred itp(rand(), rand(); method=method)
@inferred itp(rand(), rand(); method=method, project=false)
@inferred itp(rand(Float32), rand(Float32); method=method)
@inferred itp(rand(Float32), rand(Float32); method=method, project=false)
@inferred itp(rand(Float32), rand(Float64); method=method)
@inferred itp(rand(Float32), rand(Float64); method=method, project=false)
@inferred itp(rand(Float64), rand(Float32); method=method)
@inferred itp(rand(Float64), rand(Float32); method=method, project=false)
end
end
for method in (Sibson(1), Sibson(), Laplace(), Farin(1), Hiyoshi(2), Triangle(), Nearest())
p, q = rand(2)
@test itp1(p, q; method=method) ≈ itp2(p, q; method=method)
@test itp1(p, q; method=method, project=false) ≈ itp2(p, q; method=method, project=false)
@test itp1(Float32(p), Float32(q); method=method) ≈ itp2(Float32(p), Float32(q); method=method)
@test itp1(Float32(p), Float32(q); method=method, project=false) ≈ itp2(Float32(p), Float32(q); method=method, project=false)
@test itp1(Float32(p), q; method=method) ≈ itp2(Float32(p), q; method=method)
@test itp1(Float32(p), q; method=method, project=false) ≈ itp2(Float32(p), q; method=method, project=false)
@test itp1(p, Float32(q); method=method) ≈ itp2(p, Float32(q); method=method)
@test itp1(p, Float32(q); method=method, project=false) ≈ itp2(p, Float32(q); method=method, project=false)
end
test_interpolant(itp1, xs, ys, zs)
test_interpolant(itp2, xs, ys, zs)
xrange = LinRange(-3, 3, 1000) .|> Float32
yrange = LinRange(-3, 3, 1000) .|> Float32
itp_xs = [xrange[i] for i in 1:length(xrange), j in 1:length(yrange)]
itp_ys = [yrange[j] for i in 1:length(xrange), j in 1:length(yrange)]
_itp_xs = vec(itp_xs)
_itp_ys = vec(itp_ys)
vals1 = itp1(_itp_xs, _itp_ys; method=Sibson(1))
vals2 = itp2(_itp_xs, _itp_ys; method=Sibson(1))
err = abs.(vals1 .- vals2)
points = get_points(tri1)
ch = get_convex_hull_vertices(tri1)
bad_idx = identify_exterior_points(_itp_xs, _itp_ys, points, ch; tol=1e-3) # boundary effects _really_ matter...
deleteat!(err, bad_idx)
@test norm(err) ≈ 0 atol = 1e-2
end
@testset "Test that the derivatives are all zero for missing vertices" begin
R₁ = 0.2
R₂ = 1.0
θ = collect(LinRange(0, 2π, 100))
θ[end] = 0.0 # get the endpoints to match
x = [
[R₂ .* cos.(θ)], # outer first
[reverse(R₁ .* cos.(θ))] # then inner - reverse to get clockwise orientation
]
y = [
[R₂ .* sin.(θ)], #
[reverse(R₁ .* sin.(θ))]
]
boundary_nodes, points = convert_boundary_points_to_indices(x, y)
tri = triangulate(points; boundary_nodes)
A = get_area(tri)
refine!(tri; max_area=1e-4A)
itp = interpolate(tri, ones(DelaunayTriangulation.num_points(tri)), derivatives=true, parallel=false)
ind = findall(DelaunayTriangulation.each_point_index(tri)) do i
!DelaunayTriangulation.has_vertex(tri, i)
end
for i in ind
∇ = NaturalNeighbours.get_gradient(itp, i)
@test all(iszero, ∇)
H = NaturalNeighbours.get_hessian(itp, i)
@test all(iszero, H)
end
end
@testset "Testing Triangle()'s cache" begin
tri = triangulate(rand(2, 50))
method2 = Triangle()
method = Triangle(; allow_cache=false)
s = Dict{NTuple{3,Int},NTuple{9,Float64}}()
for T in each_solid_triangle(tri)
V = DelaunayTriangulation.sort_triangle(T)
s[V] = NaturalNeighbours._compute_triangle_shape_coefficients(tri, V...)
@test sum(s[V]) ≈ 1.0
end
itp = interpolate(tri, rand(50))
itp(rand(50), rand(50); method)
@test isempty(method.s)
itp(1 / 2, 1 / 2; method=method2)
@test isempty(method2.s)
itp(rand(50), rand(50); method=method2)
@test !isempty(method2.s)
@test method2.s == s
tri2 = triangulate(rand(2, 68))
itp = interpolate(tri2, rand(68))
itp(rand(50), rand(50); method=method2)
s = Dict{NTuple{3,Int},NTuple{9,Float64}}()
for T in each_solid_triangle(tri2)
V = DelaunayTriangulation.sort_triangle(T)
s[V] = NaturalNeighbours._compute_triangle_shape_coefficients(tri2, V...)
@inferred NaturalNeighbours._compute_triangle_shape_coefficients(tri2, V...)
@test sum(s[V]) ≈ 1.0
end
@test method2.s == s # make sure that method2 knows to regenerate the cache
end
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 4932 | using ..NaturalNeighbours
const NNI = NaturalNeighbours
using DelaunayTriangulation
const DT = DelaunayTriangulation
using LinearAlgebra
using ReferenceTests
using CairoMakie
function plot_2d(fig, i, j, title, vals, xg, yg, x, y, show_scatter=true)
ax = Axis(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
contourf!(ax, xg, yg, reshape(vals, (length(xg), length(yg))), colormap=:viridis, levels=-1:0.05:0, extendlow=:auto, extendhigh=:auto)
show_scatter && scatter!(ax, vec([(i - 1) / 9 for i in (1, 3, 4, 5, 8, 9, 10), j in (1, 2, 3, 5, 6, 7, 9, 10)]), vec([(j - 1) / 9 for i in (1, 3, 4, 5, 8, 9, 10), j in (1, 2, 3, 5, 6, 7, 9, 10)]), color=:red, markersize=14)
end
function plot_3d(fig, i, j, title, vals, xg, yg)
ax = Axis3(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
surface!(ax, xg, yg, reshape(vals, (length(xg), length(yg))), color=vals, colormap=:viridis)
end
@testset "A domain with no holes" begin
a, b, c, d = 0.0, 1.0, 0.0, 1.0
nx, ny = 10, 10
tri = triangulate_rectangle(a, b, c, d, nx, ny)
f = (x, y) -> sin(x * y) - cos(x - y) * exp(-(x - y)^2)
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
xg = LinRange(0, 1, 100)
yg = LinRange(0, 1, 100)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
exact = f.(_x, _y)
sibson_vals = itp(_x, _y; method=Sibson())
triangle_vals = itp(_x, _y; method=Triangle(; allow_cache = false))
laplace_vals = itp(_x, _y; method=Laplace())
sibson_1_vals = itp(_x, _y; method=Sibson(1))
nearest_vals = itp(_x, _y; method=Nearest())
farin_vals = itp(_x, _y; method=Farin())
hiyoshi_vals = itp(_x, _y; method=Hiyoshi(2))
all_vals = (sibson_vals, triangle_vals, laplace_vals, sibson_1_vals, nearest_vals, farin_vals, hiyoshi_vals, exact)
titles = ("(a): Sibson", "(b): Triangle", "(c): Laplace", "(d): Sibson-1", "(e): Nearest", "(f): Farin", "(g): Hiyoshi", "(h): Exact")
fig = Figure(fontsize=55)
for (i, (vals, title)) in enumerate(zip(all_vals, titles))
plot_2d(fig, 1, i, title, vals, xg, yg, first.(DelaunayTriangulation.each_point(tri)), last.(DelaunayTriangulation.each_point(tri)), !(vals == exact))
plot_3d(fig, 2, i, " ", vals, xg, yg)
end
resize_to_layout!(fig)
@test_reference normpath(@__DIR__, "../..", "example.png") fig by = psnr_equality(20)
end
@testset "A domain with holes" begin
R₁ = 2.0
R₂ = 3.0
θ = (collect ∘ LinRange)(0, 2π, 250)
θ[end] = θ[begin]
x = [
[R₂ .* cos.(θ)],
[reverse(R₁ .* cos.(θ))] # inner boundaries are clockwise
]
y = [
[R₂ .* sin.(θ)],
[reverse(R₁ .* sin.(θ))] # inner boundaries are clockwise
]
boundary_nodes, points = convert_boundary_points_to_indices(x, y)
tri = triangulate(points; boundary_nodes)
A = get_area(tri)
D = 6.25e-4
Tf = (x, y) -> let r = sqrt(x^2 + y^2)
(R₂^2 - r^2) / (4D) + R₁^2 * log(r / R₂) / (2D)
end
_safe_Tf = (x, y) -> let r = sqrt(x^2 + y^2)
!(R₁ ≤ r ≤ R₂) && return Inf
return Tf(x, y)
end
z = [Tf(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
x = first.(DelaunayTriangulation.each_point(tri))
y = last.(DelaunayTriangulation.each_point(tri))
triangles = [T[j] for T in each_solid_triangle(tri), j in 1:3]
itp = interpolate(tri, z; derivatives=true)
xg = LinRange(-R₂, R₂, 75)
yg = LinRange(-R₂, R₂, 75)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
exact = _safe_Tf.(_x, _y)
sibson_vals = itp(_x, _y; method=Sibson(), project=false)
triangle_vals = itp(_x, _y; method=Triangle(), project=false)
laplace_vals = itp(_x, _y; method=Laplace(), project=false)
sibson_1_vals = itp(_x, _y; method=Sibson(1), project=false)
nearest_vals = itp(_x, _y; method=Nearest(), project=false)
farin_vals = itp(_x, _y; method=Farin(), project=false)
hiyoshi_vals = itp(_x, _y; method=Hiyoshi(2), project=false)
all_vals = (sibson_vals, triangle_vals, laplace_vals, sibson_1_vals, nearest_vals, farin_vals, hiyoshi_vals, exact)
titles = ("(a): Sibson", "(b): Triangle", "(c): Laplace", "(d): Sibson-1", "(e): Nearest", "(f): Farin", "(g): Hiyoshi", "(h): Exact")
_tri = triangulate([_x'; _y'])
_triangles = [T[j] for T in each_solid_triangle(_tri), j in 1:3]
fig = Figure(fontsize=55)
for (i, (vals, title)) in enumerate(zip(all_vals, titles))
ax = Axis(fig[1, i], width=600, height=600, title=title, titlealign=:left)
_vals = copy(vals)
_vals[isinf.(vals)] .= Inf
contourf!(ax, _x, _y, _vals, levels=0:50:900)
end
resize_to_layout!(fig)
fig
@test_reference normpath(@__DIR__, "example_constrained.png") fig by=psnr_equality(15)
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 3300 | using ..NaturalNeighbours
const NNI = NaturalNeighbours
using DelaunayTriangulation
const DT = DelaunayTriangulation
using LinearAlgebra
@testset "Two-point interpolations" begin
for _ in 1:10
tri = triangulate(rand(2, 500))
coordinates = zeros(5)
envelope = zeros(Int, 5)
for _ in 1:100
e = (rand ∘ each_edge)(tri)
i, j = DT.edge_vertices(e)
t = rand()
p = (1 - t) .* get_point(tri, i) .+ t .* get_point(tri, j)
nc = NNI.two_point_interpolate!(coordinates, envelope, tri, i, j, p)
@test sum(NNI.get_coordinates(nc)) ≈ 1
@test NNI.get_indices(nc) == [i, j]
@test NNI.get_interpolation_point(nc) == p
@test NNI.get_triangulation(nc) == tri
@test NNI.get_coordinates(nc) ≈ [1 - t, t]
λ, k = NNI.get_coordinates(nc), NNI.get_indices(nc)
@test collect(p) ≈ collect(λ[1] .* get_point(tri, k[1]) .+ λ[2] .* get_point(tri, k[2]))
@test NNI.get_barycentric_deviation(nc) ≈ 0 atol = 1e-8
end
end
end
@testset "Basic extrapolation" begin
tri = triangulate_rectangle(0.0, 1.0, 0.0, 1.0, 5, 10)
f = (x, y) -> sin(x * y) - cos(x - y) * exp(-(x - y)^2)
pts = get_points(tri)
z = [f(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(pts, z, derivatives=true)
p = (1.5, 0.7)
V = jump_and_march(tri, p)
_V = DT.sort_triangle(V)
i, j = triangle_vertices(_V)
a, b = get_point(tri, i, j)
dab = norm(b .- a)
dbp = norm((1.0, 0.7) .- b)
t = dbp / dab
_z = t * z[i] + (1 - t) * z[j]
__z = itp(getx(p), gety(p); method=:triangle)
@test _z ≈ itp(getx(p), gety(p); method=:triangle)
@test _z ≈ itp(getx(p), gety(p); method=:sibson)
@test _z ≈ itp(getx(p), gety(p); method=:laplace)
@test __z ≈ itp(1.8, 0.7; method=:triangle)
@test __z ≈ itp(1.8, 0.7; method=:sibson)
@test __z ≈ itp(1.8, 0.7; method=:laplace)
_z = itp(getx(p), gety(p); method=Sibson(1))
@test _z ≈ itp(getx(p), gety(p); method=Sibson(1))
@test _z ≈ itp(1.8, 0.7; method=Sibson(1))
@test _z ≈ itp(1.8, 0.7; method=Farin(1))
@test _z ≈ itp(1.8, 0.7; method=Hiyoshi(2))
@test isinf(itp(getx(p), gety(p); method=:triangle, project=false))
@test isinf(itp(getx(p), gety(p); method=:sibson, project=false))
@test isinf(itp(getx(p), gety(p); method=:laplace, project=false))
@test isinf(itp(getx(p), gety(p); method=Sibson(1), project=false))
@test isinf(itp(getx(p), gety(p); method=Farin(1), project=false))
@test isinf(itp(getx(p), gety(p); method=Hiyoshi(2), project=false))
@test isinf(itp(1.8, 0.7; method=:triangle, project=false))
@test isinf(itp(1.8, 0.7; method=:sibson, project=false))
@test isinf(itp(1.8, 0.7; method=:laplace, project=false))
@test isinf(itp(1.8, 0.7; method=Sibson(1), project=false))
@test isinf(itp(1.8, 0.7; method=Farin(1), project=false))
@test isinf(itp(1.8, 0.7; method=Hiyoshi(2), project=false))
∂ = differentiate(itp, 1)
@test all(isinf, ∂(getx(p), gety(p); project=false))
∂ = differentiate(itp, 2)
@test all(isinf, ∂(getx(p), gety(p); project=false)[1])
@test all(isinf, ∂(getx(p), gety(p); project=false)[2])
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 3236 | using ..NaturalNeighbours
using DelaunayTriangulation
using CairoMakie
using ReferenceTests
const NNI = NaturalNeighbours
const DT = DelaunayTriangulation
points = [
(0.0, 0.0), (-1.0, 1.0), (-0.5, 1.0), (0.0, 1.0), (0.5, 1.0), (1.0, 1.0),
(1.0, 0.8), (1.0, 0.0), (1.0, -0.5), (1.0, -1.0),
(0.1, -1.0), (-0.8, -1.0), (-1.0, -1.0),
(-1.0, -0.7), (-1.0, -0.1), (-1.0, 0.6),
(-0.1, -0.8), (0.2, -0.8),
(-0.6, -0.4), (0.9, 0.0), (-0.5, 0.5), (-0.4, 0.6), (-0.1, 0.8)
]
z = zeros(length(points))
z[1] = 1.0
itp = interpolate(points, z, derivatives=true)
vorn = voronoi(itp.triangulation)
xg = LinRange(-1, 1, 250)
yg = LinRange(-1, 1, 250)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
sibson_vals = itp(x, y; method=:sibson) |> x -> reshape(x, (length(xg), length(yg)))
triangle_vals = itp(x, y; method=:triangle) |> x -> reshape(x, (length(xg), length(yg)))
laplace_vals = itp(x, y; method=:laplace) |> x -> reshape(x, (length(xg), length(yg)))
sibson_1_vals = itp(x, y; method=Sibson(1)) |> x -> reshape(x, (length(xg), length(yg)))
nearest_vals = itp(x, y; method=:nearest) |> x -> reshape(x, (length(xg), length(yg)))
farin_vals = itp(x, y; method=Farin(1)) |> x -> reshape(x, (length(xg), length(yg)))
hiyoshi_vals = itp(x, y; method=Hiyoshi(2)) |> x -> reshape(x, (length(xg), length(yg)))
function plot_influence(i, j, title, vals, xg, yg, vorn, points)
ax = Axis(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
contourf!(ax, xg, yg, vals, colormap=:viridis, levels=0:0.05:1, extendlow=:auto, extendhigh=:auto)
voronoiplot!(ax, vorn, strokecolor=:red, color=(:white,0.0))
scatter!(ax, points, color=:red)
xlims!(ax, -1, 1)
ylims!(ax, -1, 1)
end
function plot_3d_influence(i, j, title, vals, xg, yg, vorn, points, z)
ax = Axis3(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
scatter!(ax, first.(points), last.(points), z, color=:red)
surface!(ax, xg, yg, vals, color=vals, colormap=:viridis)
xlims!(ax, -1, 1)
ylims!(ax, -1, 1)
end
fig = Figure(fontsize=36)
plot_influence(1, 1, "(a): Sibson", sibson_vals, xg, yg, vorn, points)
plot_influence(1, 2, "(b): Triangle", triangle_vals, xg, yg, vorn, points)
plot_influence(1, 3, "(c): Laplace", laplace_vals, xg, yg, vorn, points)
plot_influence(1, 4, "(d): Sibson-1", sibson_1_vals, xg, yg, vorn, points)
plot_influence(1, 5, "(e): Nearest", nearest_vals, xg, yg, vorn, points)
plot_influence(1, 6, "(f): Farin", farin_vals, xg, yg, vorn, points)
plot_influence(1, 7, "(g): Hiyoshi-2", hiyoshi_vals, xg, yg, vorn, points)
plot_3d_influence(2, 1, " ", sibson_vals, xg, yg, vorn, points, z)
plot_3d_influence(2, 2, " ", triangle_vals, xg, yg, vorn, points, z)
plot_3d_influence(2, 3, " ", laplace_vals, xg, yg, vorn, points, z)
plot_3d_influence(2, 4, " ", sibson_1_vals, xg, yg, vorn, points, z)
plot_3d_influence(2, 5, " ", nearest_vals, xg, yg, vorn, points, z)
plot_3d_influence(2, 6, " ", farin_vals, xg, yg, vorn, points, z)
plot_3d_influence(2, 7, " ", hiyoshi_vals, xg, yg, vorn, points, z)
resize_to_layout!(fig)
@test_reference normpath(@__DIR__, "../..", "docs", "src", "figures", "influence.png") fig
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 4703 | using ..NaturalNeighbours
const NNI = NaturalNeighbours
using DelaunayTriangulation
const DT = DelaunayTriangulation
using StableRNGs
using LinearAlgebra
include(normpath(@__DIR__, "../.", "helper_functions", "test_functions.jl"))
@testset "Does Sibson-0 reproduce linear functions p ↦ a + bᵀp?" begin
a = 0.9881
b = [1.7, 2.3]
p = [0.3, 0.7]
f = (x, y) -> a + b' * [x, y]
xx = LinRange(-10, 10, 25)
yy = LinRange(-10, 10, 25)
x = vec([x for x in xx, _ in yy])
y = vec([y for _ in xx, y in yy])
z = f.(x, y)
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(-10, 10, 250)
yg = LinRange(-10, 10, 250)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
vals = itp(_x, _y; method=Sibson(0))
for i in eachindex(vals)
ξ, η = _x[i], _y[i]
if DT.distance_to_polygon((ξ, η), get_points(itp.triangulation), get_convex_hull_vertices(itp.triangulation)) > 1e-7
@test vals[i] ≈ f(_x[i], _y[i]) atol = 1e-12
end
end
end
@testset "Does Laplace reproduce linear functions p ↦ a + bᵀp?" begin
a = 0.5673634
b = [11.7, 62.3]
p = [0.6, -0.7]
f = (x, y) -> a + b' * [x, y]
xx = LinRange(-10, 10, 25)
yy = LinRange(-10, 10, 25)
x = vec([x for x in xx, _ in yy])
y = vec([y for _ in xx, y in yy])
z = f.(x, y)
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(-10, 10, 250)
yg = LinRange(-10, 10, 250)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
vals = itp(_x, _y; method=Laplace())
for i in eachindex(vals)
ξ, η = _x[i], _y[i]
if DT.distance_to_polygon((ξ, η), get_points(itp.triangulation), get_convex_hull_vertices(itp.triangulation)) > 1e-7
@test vals[i] ≈ f(_x[i], _y[i]) atol = 1e-12
end
end
end
@testset "Does Sibson-1 reproduce spherical quadratics p ↦ μ(p-a)'(p-a)?" begin
μ = 0.05
a = [0.3, 0.7]
f = (x, y) -> let p = [x, y]
μ * (p - a)' * (p - a)
end
xx = LinRange(a[1] - 5, a[1] + 5, 25)
yy = LinRange(a[2] - 5, a[2] + 5, 25)
x = vec([x for x in xx, _ in yy])
y = vec([y for _ in xx, y in yy])
z = f.(x, y)
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(a[1] - 5, a[1] + 5, 250)
yg = LinRange(a[2] - 5, a[2] + 5, 250)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
vals = itp(_x, _y; method=Sibson(1))
for i in eachindex(vals)
ξ, η = _x[i], _y[i]
if DT.distance_to_polygon((ξ, η), get_points(itp.triangulation), get_convex_hull_vertices(itp.triangulation)) > 1e-7
@test vals[i] ≈ f(_x[i], _y[i]) atol = 1e-14
end
end
end
@testset "Does Farin reproduce quadratics p ↦ a + bᵀx + xᵀQx, Q = [c d; 0 f]?" begin
a = 0.29912
b = [1.7, -2.11]
Q = [2.0 1.01; 1.01 -2.30]
f = (x, y) -> a + b' * [x, y] + [x, y]' * Q * [x, y]
xx = LinRange(-10, 10, 25)
yy = LinRange(-10, 10, 25)
x = vec([x for x in xx, _ in yy])
y = vec([y for _ in xx, y in yy])
z = f.(x, y)
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(-10, 10, 250)
yg = LinRange(-10, 10, 250)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
vals = itp(_x, _y; method=Farin(1))
for i in eachindex(vals)
ξ, η = _x[i], _y[i]
if DT.distance_to_polygon((ξ, η), get_points(itp.triangulation), get_convex_hull_vertices(itp.triangulation)) > 1e-7
@test vals[i] ≈ f(_x[i], _y[i]) atol = 1e-12
end
end
end
@testset "Does Hiyoshi reproduce cubics?" begin
A, B, C, D, a, b, c, d, e, F = 1e-2*[
0.0575923
0.630772
0.529953
0.710556
0.549044
0.363113
0.587485
0.0871768
0.820668
0.399854
]
f = (x, y) -> A + B * x + C * y + D * x * y + a * x^2 + b * y^2 + c * x^2 * y + d * x * y^2 + e * x^3 + F * y^3
xx = LinRange(-1, 1, 25)
yy = LinRange(-1, 1, 25)
x = vec([x for x in xx, _ in yy])
y = vec([y for _ in xx, y in yy])
z = f.(x, y)
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(-1, 1, 250)
yg = LinRange(-1, 1, 250)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
vals = itp(_x, _y; method=Hiyoshi(2))
for i in eachindex(vals)
ξ, η = _x[i], _y[i]
if DT.distance_to_polygon((ξ, η), get_points(itp.triangulation), get_convex_hull_vertices(itp.triangulation)) > 1e-7
@test vals[i] ≈ f(_x[i], _y[i]) atol=1e-6
end
end
end
#ERROR: Some tests did not pass: 4717 passed, 4955 failed, 0 errored, 0 broken. | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 2139 | using ..NaturalNeighbours
using DelaunayTriangulation
const NNI = NaturalNeighbours
@testset "iwrap" begin
@test NNI.iwrap(NNI.Sibson()) == NNI.Sibson()
@test NNI.iwrap(NNI.Triangle()) isa NNI.Triangle
@test NNI.iwrap(NNI.Nearest()) == NNI.Nearest()
@test NNI.iwrap(NNI.Laplace()) == NNI.Laplace()
@test NNI.iwrap(:sibson) == NNI.Sibson()
@test NNI.iwrap(:triangle) isa NNI.Triangle
@test NNI.iwrap(:nearest) == NNI.Nearest()
@test NNI.iwrap(:laplace) == NNI.Laplace()
@test_throws ArgumentError NNI.iwrap(:lap)
@test NNI.iwrap(NNI.Sibson(1)) == NNI.Sibson(1)
@test NNI.iwrap(NNI.Triangle(1)) isa NNI.Triangle{0}
@test_throws ArgumentError NNI.Sibson(5)
@test NNI.iwrap(NNI.Laplace(1)) == NNI.Laplace(0)
@test NNI.iwrap(NNI.Hiyoshi(2)) == NNI.Hiyoshi(2)
@test NNI.iwrap(NNI.Hiyoshi()) == NNI.Hiyoshi(0)
@test NNI.iwrap(NNI.Farin()) == NNI.Farin(1)
@test NNI.iwrap(:sibson_1) == NNI.Sibson(1)
@test NNI.iwrap(:farin) == NNI.Farin(1)
@test NNI.iwrap(:hiyoshi_2) == NNI.Hiyoshi(2)
end
@testset "show" begin
tri = triangulate_rectangle(0, 1, 0, 1, 2, 5)
tri = Triangulation(tri.points, tri.triangles, tri.convex_hull.vertices)
f = (x, y) -> sin(x) + cos(x - y)
unlock_convex_hull!(tri)
x = getx.(tri.points)
y = gety.(tri.points)
z = f.(x, y)
∇ = z .^ 2
H = z .^ (1 / 5)
itp = interpolate(tri, z; hessian=∇, gradient=H)
@test sprint() do io
Base.show(io, MIME"text/plain"(), itp)
end ==
"Natural Neighbour Interpolant\n z: [1.0, 1.3817732906760363, 0.9689124217106447, 1.5731598536817173, 0.8775825618903728, 1.7190535466982693, 0.7316888688738209, 1.8103834065185413, 0.5403023058681398, 1.8414709848078965]\n ∇: [1.0, 1.0668106895787572, 0.9937036950756749, 1.094849869590562, 0.9742212470670031, 1.114443051978001, 0.9394318704459826, 1.1260407621773936, 0.8841528765017798, 1.1298817035265263]\n H: [1.0, 1.909297426825682, 0.9387912809451863, 2.474831925235882, 0.7701511529340699, 2.9551450964158987, 0.5353686008338515, 3.277488078597678, 0.2919265817264289, 3.391015387889364]"
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 17881 | using ..NaturalNeighbours
using StableRNGs
using DelaunayTriangulation
using Random
using LinearAlgebra
const DT = DelaunayTriangulation
const NNI = NaturalNeighbours
include(normpath(@__DIR__, "../..", "helper_functions", "point_generator.jl"))
to_mat(H) = [H[1] H[3]; H[3] H[2]]
@testset "Natural coordinates" begin
for method in (Sibson(), Nearest(), Laplace())
method = NNI.iwrap(method)
pts = [(0.0, 8.0), (0.0, 0.0), (14.0, 0.0), (14.0, 8.0), (4.0, 4.0), (10.0, 6.0), (6.0, 2.0), (12.0, 4.0), (0.0, 4.0)]
tri = triangulate(pts, randomise=false, delete_ghosts=false)
n = 250
pts = random_points_in_convex_hull(tri, n)
for p in Iterators.flatten((pts, DelaunayTriangulation.each_point(tri)))
natural_coordinates = NNI.compute_natural_coordinates(method, tri, p)
@test sum(NNI.get_coordinates(natural_coordinates)) ≈ 1
δ = NNI.get_barycentric_deviation(natural_coordinates)
if method ≠ NNI.Nearest()
@test δ ≈ 0 atol = 1e-5
else
@test δ ≈ norm(p .- get_point(tri, NNI.get_indices(natural_coordinates)[1]))
end
end
for _ in 1:5
pts = [(randn() + rand(), rand() - 2randn()) for _ in 1:250]
tri = triangulate(pts, delete_ghosts=false)
n = 500
random_points = random_points_in_convex_hull(tri, n)
for p in Iterators.flatten((random_points, DT.each_point(tri)))
natural_coordinates = NNI.compute_natural_coordinates(method, tri, p)
@test sum(NNI.get_coordinates(natural_coordinates)) ≈ 1
δ = NNI.get_barycentric_deviation(natural_coordinates)
if method ≠ NNI.Nearest()
@test δ ≈ 0 atol = 1e-5
else
@test δ ≈ norm(p .- get_point(tri, NNI.get_indices(natural_coordinates)[1]))
end
end
end
end
end
function _circular_equality(A, B, by=isequal; kwargs...) # slightly tweaked version of circular_equality from DelaunayTriangulation.jl
if DT.is_circular(A)
_A = @views A[begin:(end-1)]
else
_A = A
end
if DT.is_circular(B)
_B = @views B[begin:(end-1)]
else
_B = B
end
same_idx = findmin(abs.(_A[begin] .- _B))[2]
_mapped_B = circshift(_B, -same_idx + 1)
return by(_A, _mapped_B; kwargs...)
end
@testset "Test coefficient values for each method" begin # used GeoGebra
for _ in 1:3 # make sure rng is getting passed consistently
# Setup
rng = StableRNG(872973)
pts = [(0.0, 8.0), (0.0, 0.0), (14.0, 0.0),
(14.0, 8.0), (4.0, 4.0), (10.0, 6.0),
(6.0, 2.0), (12.0, 4.0), (0.0, 4.0),
(2.5, 5.0), (7.0, 3.3), (4.5, 5.2),
(13.0, 0.5), (12.0, 6.0), (8.5, 3.5),
(0.5, 6.0), (1.5, 6.0), (3.5, 6.0),
(0.5, 2.0), (2.5, 2.0), (2.5, 2.5),
(9.0, 2.0), (8.5, 6.0), (4.0, 2.0)]
tri = triangulate(pts, randomise=false, delete_ghosts=false, rng=rng)
vorn = voronoi(tri, clip=false)
q = (5.0, 4.0)
tri2 = deepcopy(tri)
add_point!(tri2, q, rng=rng)
vorn2 = voronoi(tri2, clip=false)
V = get_polygon(vorn2, DelaunayTriangulation.num_points(tri2))
AX2 = get_area(vorn2, DelaunayTriangulation.num_points(tri2))
# Sibson
nc = NNI.compute_natural_coordinates(NNI.Sibson(), tri, q; rng=rng)
AF1G1D1B1A1 = 1.1739978952813 # E = 5
A1B1C1 = 0.062500000375 # Z = 24
B1D1E1C1 = 0.2540749084958 # G = 7
H1I1E1D1G1 = 0.7376579777378 # K = 11
F1G1J1K1 = 0.9489911839831 # L = 12
K1I1J1 = 0.003313286868 # W = 23
AX = AF1G1D1B1A1 + A1B1C1 + B1D1E1C1 + H1I1E1D1G1 + F1G1J1K1 + K1I1J1
@test AX ≈ AX2 rtol = 1e-3
@test _circular_equality(nc.indices, [23, 12, 5, 24, 7, 11])
@test _circular_equality(nc.coordinates, [K1I1J1, F1G1J1K1, AF1G1D1B1A1, A1B1C1, B1D1E1C1, H1I1E1D1G1] ./ AX, ≈, rtol = 1e-2)
# Laplace
nc = NNI.compute_natural_coordinates(NNI.Laplace(), tri, q; rng=rng)
dqw = 4.0311288741493
dqk = 2.1189620100417
dqg = 2.2360679774998
dqz = 2.2360679774998
dqe = 1.0
dqℓ = 1.3
dc1b1 = 2.2893491697301
da1b1 = 0.0572608008105
df1b1 = 2.2260773066834
df1e1 = 1.4888476232694
de1d1 = 0.5650898843856
dd1c1 = 0.9335156761474
k = dc1b1 / dqk
w = da1b1 / dqw
ℓ = df1b1 / dqℓ
e = df1e1 / dqe
z = de1d1 / dqz
g = dd1c1 / dqg
tot = k + w + ℓ + e + z + g
k /= tot
w /= tot
ℓ /= tot
e /= tot
z /= tot
g /= tot
@test _circular_equality(nc.indices, [23, 12, 5, 24, 7, 11])
@test _circular_equality(nc.coordinates, [w, ℓ, e, z, g, k], ≈, rtol = 1e-2)
# Nearest
nc = NNI.compute_natural_coordinates(NNI.Nearest(), tri, q; rng=rng)
@test nc.indices == [5]
@test nc.coordinates ≈ [1.0]
# Sibson(1)
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> sin(x - y) + cos(x + y)
z = [f(x, y) for (x, y) in DT.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
q = (5.0, 5.0) # a data site
nc = NNI.compute_natural_coordinates(NNI.Sibson(), tri, q; rng=rng)
∇ = NNI.get_gradient(itp)
ζ, α, β = NNI._compute_sibson_1_coordinates(nc, tri, z, ∇)
@test ζ == 0.0
@test α == 1.0
@test β == 0.0
q = (5.37841, 1.3881)
nc = NNI.compute_natural_coordinates(NNI.Sibson(), tri, q; rng=rng)
ζ1, α1, β1 = NNI._compute_sibson_1_coordinates(nc, tri, z, ∇)
λ, N₀ = NNI.get_coordinates(nc), NNI.get_indices(nc)
r = [norm(q .- get_point(tri, i)) for i in N₀]
γ = λ ./ r
ζ = z[N₀] .+ [dot(∇[i], q .- get_point(tri, i)) for i in N₀]
α = dot(λ, r) / sum(γ)
β = dot(λ, r .^ 2)
ζ = dot(ζ, γ) / sum(γ)
@test α ≈ α1
@test β ≈ β1
@test ζ ≈ ζ1
# Farin(1)
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> sin(x - y) + cos(x + y)
z = [f(x, y) for (x, y) in DT.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
q = (5.37841, 1.3881)
nc = NNI.compute_natural_coordinates(NNI.Sibson(), tri, q)
∇ = NNI.get_gradient(itp)
λ = NNI.get_coordinates(nc)
N₀ = NNI.get_indices(nc)
@test NNI.is_bezier_point(1, 1, 1)
@test !NNI.is_bezier_point(1, 2, 1)
@test NNI.is_bezier_edge(1, 2, 2)
@test !NNI.is_bezier_edge(1, 2, 3)
@test NNI.is_bezier_face(1, 2, 3)
@test !NNI.is_bezier_face(1, 2, 2)
@test NNI.find_bezier_edge(1, 1, 2) == (1, 2)
@test NNI.find_bezier_edge(1, 2, 1) == (1, 2)
@test NNI.find_bezier_edge(1, 2, 2) == (2, 1)
@test collect(NNI.bezier_point_contribution(1, N₀, z)) ≈ [z[N₀[1]], 6]
@test collect(NNI.bezier_edge_contribution(tri, 2, 5, N₀, ∇, z)) ≈ [z[N₀[2]] + (1 / 3) * dot(get_point(tri, N₀[5]) .- get_point(tri, N₀[2]), ∇[N₀[2]]), 2]
@test NNI.bezier_face_contribution(tri, 2, 3, 6, N₀, ∇, z)[1] ≈ (1 / 3) * (
z[N₀[2]] +
z[N₀[3]] +
z[N₀[6]]
) + (1 / 12) * (
dot(get_point(tri, N₀[3]) .- get_point(tri, N₀[2]), ∇[N₀[2]]) +
dot(get_point(tri, N₀[6]) .- get_point(tri, N₀[2]), ∇[N₀[2]]) +
dot(get_point(tri, N₀[2]) .- get_point(tri, N₀[3]), ∇[N₀[3]]) +
dot(get_point(tri, N₀[6]) .- get_point(tri, N₀[3]), ∇[N₀[3]]) +
dot(get_point(tri, N₀[2]) .- get_point(tri, N₀[6]), ∇[N₀[6]]) +
dot(get_point(tri, N₀[3]) .- get_point(tri, N₀[6]), ∇[N₀[6]])
)
@test NNI.bezier_face_contribution(tri, 2, 3, 6, N₀, ∇, z)[2] == 1
@test collect(NNI.get_contrib(tri, 1, 1, 1, N₀, ∇, z)) ≈ collect(NNI.bezier_point_contribution(1, N₀, z))
@test collect(NNI.get_contrib(tri, 1, 2, 2, N₀, ∇, z)) ≈ collect(NNI.bezier_edge_contribution(tri, 2, 1, N₀, ∇, z))
@test collect(NNI.get_contrib(tri, 1, 2, 3, N₀, ∇, z)) ≈ collect(NNI.bezier_face_contribution(tri, 1, 2, 3, N₀, ∇, z))
n = length(λ)
s1 = 0.0
for i in 1:n, j in 1:n, k in 1:n
s1 += NNI.get_contrib(tri, i, j, k, N₀, ∇, z)[1] * λ[i] * λ[j] * λ[k]
end
@test s1 ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇)
@test NNI._eval_interp(Farin(1), itp, q, NNI.NaturalNeighboursCache(tri)) ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇)
@test NNI._eval_natural_coordinates(Farin(1), nc, z, ∇, tri) ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇)
@test itp(q..., method=Farin(1)) ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇)
@test itp(5.0, 5.0, method=Farin(1)) ≈ f(5.0, 5.0)
@test itp(5.0632, 5.0632, method=Farin(1)) ≈ f(5.0632, 5.0632) rtol = 1e-3
tri = triangulate_rectangle(0.0, 1.0, 0.0, 1.0, 300, 300)
z = [f(x, y) for (x, y) in DT.each_point(tri)]
xx = LinRange(0, 1, 50)
yy = LinRange(0, 1, 50)
x = vec([x for x in xx, _ in yy])
y = vec([y for _ in xx, y in yy])
itp = interpolate(tri, z; derivatives=true)
q = (x[52], y[52])
nc = NNI.compute_natural_coordinates(NNI.Sibson(), tri, q)
∇ = NNI.get_gradient(itp)
λ = NNI.get_coordinates(nc)
N₀ = NNI.get_indices(nc)
n = length(λ)
s1 = 0.0
for i in 1:n, j in 1:n, k in 1:n
s1 += NNI.get_contrib(tri, i, j, k, N₀, ∇, z)[1] * λ[i] * λ[j] * λ[k]
end
@test s1 ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇) rtol = 1e-5
@test NNI._eval_interp(Farin(1), itp, q, NNI.NaturalNeighboursCache(tri)) ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇)
@test NNI._eval_natural_coordinates(Farin(1), nc, z, ∇, tri) ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇)
@test itp(q..., method=Farin(1)) ≈ NNI._compute_farin_coordinates(nc, tri, z, ∇)
@test itp(q..., method=Farin(1)) ≈ f(q...) rtol = 1e-4
# Hiyoshi(2)
tri = triangulate_rectangle(0, 10, 0, 10, 101, 101)
tri = triangulate(get_points(tri), randomise=false)
f = (x, y) -> sin(x - y) + cos(x + y)
z = [f(x, y) for (x, y) in DT.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
q = (5.37841, 1.3881)
nc = NNI.compute_natural_coordinates(NNI.Sibson(), tri, q)
∇ = NNI.get_gradient(itp)
λ = NNI.get_coordinates(nc)
N₀ = NNI.get_indices(nc)
H = NNI.get_hessian(itp)
_z(i) = z[N₀[i]]
_z(i, j) = dot(∇[N₀[i]], get_point(tri, N₀[j]) .- get_point(tri, N₀[i]))
_z(i, j, k) = collect(get_point(tri, N₀[j]) .- get_point(tri, N₀[i]))' * to_mat(H[N₀[i]]) * collect(get_point(tri, N₀[k]) .- get_point(tri, N₀[i]))
@test NNI._hiyoshi_case_1(1, N₀, z) ≈ _z(1)
let i = 1, j = 2
@test NNI._hiyoshi_case_2(tri, i, j, N₀, ∇, z) ≈ _z(i) + _z(i, j) / 5
end
let i = 1, j = 2
@test NNI._hiyoshi_case_3(tri, i, j, N₀, ∇, H, z) ≈ _z(i) + 2_z(i, j) / 5 + _z(i, j, j) / 20
end
let i = 1, j = 2, k = 3
@test NNI._hiyoshi_case_4(tri, i, j, k, N₀, ∇, H, z) ≈ _z(i) + (_z(i, j) + _z(i, k)) / 5 + _z(i, j, k) / 20
end
let i = 1, j = 2, k = 3
@test NNI._hiyoshi_case_5(tri, i, j, k, N₀, ∇, H, z) ≈
(13 / 30) * (_z(i) + _z(j)) + (2 / 15) * _z(k) +
(1 / 9) * (_z(i, j) + _z(j, i)) + (7 / 90) * (_z(i, k) + _z(j, k)) +
(2 / 45) * (_z(k, i) + _z(k, j)) + (1 / 45) * (_z(i, j, k) + _z(j, i, k) + _z(k, i, j))
end
let i = 1, j = 2, k = 3, ℓ = 4
@test NNI._hiyoshi_case_6(tri, i, j, k, ℓ, N₀, ∇, H, z) ≈
(1 / 2) * _z(i) + (1 / 6) * (_z(j) + _z(k) + _z(ℓ)) +
(7 / 90) * (_z(i, j) + _z(i, k) + _z(i, ℓ)) +
(2 / 45) * (_z(j, i) + _z(k, i) + _z(ℓ, i)) +
(1 / 30) * (_z(j, k) + _z(j, ℓ) + _z(k, j) + _z(k, ℓ) + _z(ℓ, j) + _z(ℓ, k)) +
(1 / 90) * (_z(i, j, k) + _z(i, j, ℓ) + _z(i, k, ℓ)) +
(1 / 90) * (_z(j, i, k) + _z(j, i, ℓ) + _z(k, i, j) + _z(k, i, ℓ) + _z(ℓ, i, j) + _z(ℓ, i, k)) +
(1 / 180) * (_z(j, k, ℓ) + _z(k, j, ℓ) + _z(ℓ, j, k))
end
let i = 1, j = 2, k = 3, ℓ = 4, m = 5
@test NNI._hiyoshi_case_7(tri, i, j, k, ℓ, m, N₀, ∇, H, z) ≈
(_z(i) + _z(j) + _z(k) + _z(ℓ) + _z(m)) / 5 +
(1 / 30) * (_z(i, j) + _z(i, k) + _z(i, ℓ) + _z(i, m) + _z(j, i) + _z(j, k) + _z(j, ℓ) +
_z(j, m) + _z(k, i) + _z(k, j) + _z(k, ℓ) + _z(k, m) + _z(ℓ, i) + _z(ℓ, j) + _z(ℓ, k) + _z(ℓ, m) +
_z(m, i) + _z(m, j) + _z(m, k) + _z(m, ℓ)) +
(1 / 180) * (_z(i, j, k) + _z(i, j, ℓ) + _z(i, j, m) + _z(i, k, ℓ) + _z(i, k, m) + _z(i, ℓ, m) +
_z(j, i, ℓ) + _z(j, i, k) + _z(i, i, m) + _z(j, k, ℓ) + _z(j, k, m) + _z(j, ℓ, m) + _z(k, i, j) +
_z(k, i, ℓ) + _z(k, i, m) + _z(k, j, ℓ) + _z(k, j, m) + _z(k, ℓ, m) + _z(ℓ, i, j) + _z(ℓ, i, k) +
_z(ℓ, i, m) + _z(ℓ, j, k) + _z(ℓ, j, m) + _z(ℓ, k, m) + _z(m, i, j) + _z(m, i, k) + _z(m, i, ℓ) +
_z(m, j, k) + _z(m, j, ℓ) + _z(m, k, ℓ))
end
n = length(λ)
global ss = 0.0
for a in 1:n, b in 1:n, c in 1:n, d in 1:n, e in 1:n
prod = λ[a] * λ[b] * λ[c] * λ[d] * λ[e]
(i, j, k, ℓ, m), case = NNI.group_sort(a, b, c, d, e)
local s = 0.0
if case == 1
let i = i
s += _z(i)
s *= prod
end
elseif case == 2
let i = ℓ, j = m
s += _z(i) + _z(i, j) / 5
s *= prod
end
elseif case == 3
let i = i, j = m
s += _z(i) + 2_z(i, j) / 5 + _z(i, j, j) / 20
s *= prod
end
elseif case == 4
let i = i, j = ℓ, k = m
s += _z(i) + (_z(i, j) + _z(i, k)) / 5 + _z(i, j, k) / 20
s *= prod
end
elseif case == 5
let i = i, j = k, k = m
s += (13 / 30) * (_z(i) + _z(j)) + (2 / 15) * _z(k) +
(1 / 9) * (_z(i, j) + _z(j, i)) + (7 / 90) * (_z(i, k) + _z(j, k)) +
(2 / 45) * (_z(k, i) + _z(k, j)) + (1 / 45) * (_z(i, j, k) + _z(j, i, k) + _z(k, i, j))
s *= prod
end
elseif case == 6
let i = i, j = k, k = ℓ, ℓ = m
s += (1 / 2) * _z(i) + (1 / 6) * (_z(j) + _z(k) + _z(ℓ)) +
(7 / 90) * (_z(i, j) + _z(i, k) + _z(i, ℓ)) +
(2 / 45) * (_z(j, i) + _z(k, i) + _z(ℓ, i)) +
(1 / 30) * (_z(j, k) + _z(j, ℓ) + _z(k, j) + _z(k, ℓ) + _z(ℓ, j) + _z(ℓ, k)) +
(1 / 90) * (_z(i, j, k) + _z(i, j, ℓ) + _z(i, k, ℓ)) +
(1 / 90) * (_z(j, i, k) + _z(j, i, ℓ) + _z(k, i, j) + _z(k, i, ℓ) + _z(ℓ, i, j) + _z(ℓ, i, k)) +
(1 / 180) * (_z(j, k, ℓ) + _z(k, j, ℓ) + _z(ℓ, j, k))
s *= prod
end
elseif case == 7
s += (_z(i) + _z(j) + _z(k) + _z(ℓ) + _z(m)) / 5 +
(1 / 30) * (_z(i, j) + _z(i, k) + _z(i, ℓ) + _z(i, m) + _z(j, i) + _z(j, k) + _z(j, ℓ) +
_z(j, m) + _z(k, i) + _z(k, j) + _z(k, ℓ) + _z(k, m) + _z(ℓ, i) + _z(ℓ, j) + _z(ℓ, k) + _z(ℓ, m) +
_z(m, i) + _z(m, j) + _z(m, k) + _z(m, ℓ)) +
(1 / 180) * (_z(i, j, k) + _z(i, j, ℓ) + _z(i, j, m) + _z(i, k, ℓ) + _z(i, k, m) + _z(i, ℓ, m) +
_z(j, i, ℓ) + _z(j, i, k) + _z(i, i, m) + _z(j, k, ℓ) + _z(j, k, m) + _z(j, ℓ, m) + _z(k, i, j) +
_z(k, i, ℓ) + _z(k, i, m) + _z(k, j, ℓ) + _z(k, j, m) + _z(k, ℓ, m) + _z(ℓ, i, j) + _z(ℓ, i, k) +
_z(ℓ, i, m) + _z(ℓ, j, k) + _z(ℓ, j, m) + _z(ℓ, k, m) + _z(m, i, j) + _z(m, i, k) + _z(m, i, ℓ) +
_z(m, j, k) + _z(m, j, ℓ) + _z(m, k, ℓ))
s *= prod
end
ss += s
end
@test ss ≈ NNI._compute_hiyoshi_coordinates(nc, tri, z, ∇, H) rtol = 1e-3
@test NNI._eval_interp(Hiyoshi(2), itp, q, NNI.NaturalNeighboursCache(tri)) ≈ NNI._compute_hiyoshi_coordinates(nc, tri, z, ∇, H)
@test NNI._eval_natural_coordinates(Hiyoshi(2), nc, z, ∇, H, tri) ≈ NNI._compute_hiyoshi_coordinates(nc, tri, z, ∇, H)
@test itp(q..., method=Hiyoshi(2)) ≈ NNI._compute_hiyoshi_coordinates(nc, tri, z, ∇, H)
@test itp(q..., method=Hiyoshi(2)) ≈ f(q...) rtol = 1e-4
end
end
@testset "show" begin
interpolation_point = (0.3, 0.7)
indices = [1, 4, 6, 10]
coordinates = [0.371, 0.392, 0.4991, 491.20]
triangulation = triangulate(rand(2, 50))
nc = NNI.NaturalCoordinates(coordinates, indices, interpolation_point, triangulation)
@test sprint() do io
Base.show(io, MIME"text/plain"(), nc)
end == "NaturalCoordinates{Float64,Int64}\n u: (0.3, 0.7)\n λ: [0.371, 0.392, 0.4991, 491.2]\n k: [1, 4, 6, 10]"
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | code | 7440 | using ..NaturalNeighbours
using Test
const NNI = NaturalNeighbours
using DelaunayTriangulation
const DT = DelaunayTriangulation
using Combinatorics
using StatsBase
using LinearAlgebra
@testset "Computing the Bowyer-Watson envelope" begin
pts = [(0.0, 8.0), (0.0, 0.0), (14.0, 0.0), (14.0, 8.0), (4.0, 4.0), (10.0, 6.0), (6.0, 2.0), (12.0, 4.0), (0.0, 4.0)]
tri = triangulate(pts, randomise=false, delete_ghosts=false)
envelope = Int64[]
history = DT.InsertionEventHistory(tri)
A = DT.Adjacent{Int64,NTuple{2,Int64}}()
envelope, A, history, V = NNI.compute_bowyer_envelope!(envelope, tri, (6.0, 4.0))
@test !DT.is_outside(DT.point_position_relative_to_triangle(tri, V, (6.0, 4.0)))
@test DT.circular_equality(envelope, [1, 5, 7, 6, 1])
envelope, A, history, V = NNI.compute_bowyer_envelope!(envelope, tri, (6.0, 1.0))
@test !DT.is_outside(DT.point_position_relative_to_triangle(tri, V, (6.0, 1.0)))
@test DT.circular_equality(envelope, [2, 3, 8, 7, 5, 2])
envelope, A, history, V = NNI.compute_bowyer_envelope!(envelope, tri, (7.0, -1.0))
@test !DT.is_outside(DT.point_position_relative_to_triangle(tri, V, (7.0, -1.0)))
@test DT.circular_equality(envelope, [2, DT.GhostVertex, 3, 8, 7, 2])
envelope, A, history, V = NNI.compute_bowyer_envelope(tri, (7.0, -1.0))
@test !DT.is_outside(DT.point_position_relative_to_triangle(tri, V, (7.0, -1.0)))
@test DT.circular_equality(envelope, [2, DT.GhostVertex, 3, 8, 7, 2])
envelope, A, history, V = NNI.compute_bowyer_envelope!(envelope, tri, (0.0, 3.0))
@test DT.is_on(DT.point_position_relative_to_triangle(tri, V, (0.0, 3.0)))
envelope, A, history, V = NNI.compute_bowyer_envelope!(envelope, tri, history, A, (6.0, 4.0))
@test !DT.is_outside(DT.point_position_relative_to_triangle(tri, V, (6.0, 4.0)))
for T in DT.each_added_triangle(history)
i, j, k = triangle_vertices(T)
@test DT.get_adjacent(A, i, j) == k
@test DT.get_adjacent(A, j, k) == i
@test DT.get_adjacent(A, k, i) == j
end
@test length(A.adjacent) == 3length(DT.each_added_triangle(history))
envelope, A, history, V = NNI.compute_bowyer_envelope!(envelope, tri, history, A, (6.0, 1.0))
@test !DT.is_outside(DT.point_position_relative_to_triangle(tri, V, (6.0, 1.0)))
@test DT.circular_equality(envelope, [2, 3, 8, 7, 5, 2])
for T in DT.each_added_triangle(history)
i, j, k = triangle_vertices(T)
@test DT.get_adjacent(A, i, j) == k
@test DT.get_adjacent(A, j, k) == i
@test DT.get_adjacent(A, k, i) == j
end
@test length(A.adjacent) == 3length(DT.each_added_triangle(history))
@test DT.circular_equality(envelope, [2, 3, 8, 7, 5, 2])
envelope, A, history, V = NNI.compute_bowyer_envelope!(envelope, tri, history, A, (7.0, -1.0))
@test !DT.is_outside(DT.point_position_relative_to_triangle(tri, V, (7.0, -1.0)))
for T in DT.each_added_triangle(history)
i, j, k = triangle_vertices(T)
@test DT.get_adjacent(A, i, j) == k
@test DT.get_adjacent(A, j, k) == i
@test DT.get_adjacent(A, k, i) == j
end
@test length(A.adjacent) == 3length(DT.each_added_triangle(history))
@test DT.circular_equality(envelope, [2, DT.GhostVertex, 3, 8, 7, 2])
end
@testset "polygon_area" begin
θ = LinRange(0, 2π - 0.1, 25)
z = exp.(im * θ)
points = [(real(z[i]), imag(z[i])) for i in eachindex(z)]
push!(points, points[begin])
A = NNI.polygon_area(points)
_A, _ = DT.polygon_features(points, [1:length(points)-1; 1])
@test A ≈ _A
pts = rand(2, 50)
tri = triangulate(pts)
boundary_nodes = get_convex_hull_vertices(tri)
A = NNI.polygon_area(pts[:, boundary_nodes])
_A = DT.get_area(tri)
@test A ≈ _A
end
@testset "Testing sort_five" begin
@test NNI.sort_five(5, 4, 3, 2, 1) == (1, 2, 3, 4, 5)
@test NNI.sort_five(1, 2, 3, 4, 5) == (1, 2, 3, 4, 5)
@test NNI.sort_five(3, 1, 4, 5, 2) == (1, 2, 3, 4, 5)
@test NNI.sort_five(5, 5, 5, 5, 5) == (5, 5, 5, 5, 5)
@test NNI.sort_five(1, 1, 2, 2, 2) == (1, 1, 2, 2, 2)
@test NNI.sort_five(2, 2, 2, 1, 1) == (1, 1, 2, 2, 2)
@testset "random tests" begin
for _ in 1:10000
arr = rand(1:5, 5)
@test NNI.sort_five(arr...) == Tuple(sort(arr))
end
end
end
@testset "Testing count_unique_sorted" begin
for _ in 1:10000
arr = rand(1:5, 5)
i, j, k, ℓ, m = NNI.sort_five(arr...)
ct = NNI.count_unique_sorted(i, j, k, ℓ, m)
@test ct == length(unique(arr))
end
end
@testset "Testing group_sort" begin
_up(i, j, k, ℓ, m) = (unique ∘ permutations)((i, j, k, ℓ, m))
for _ in 1:10000
arr = rand(1:5, 5)
(a, b, c, d, e), case = NNI.group_sort(arr...)
@inferred NNI.group_sort(arr...)
if case == 1
# iiiii
@test a == b == c == d == e
elseif case == 2
# iiiij
@test (a == b == c == d) && (d ≠ e)
elseif case == 3
# iiijj
@test (a == b == c) && (c ≠ d) && (d == e)
elseif case == 4
# iiijk
@test (a == b == c) && (c ≠ d) && (d ≠ e)
elseif case == 5
# iijjk
@test (a == b) && (b ≠ c) && (c == d) && (d ≠ e) && (e ≠ a)
elseif case == 6
# iijkℓ
@test (a == b) && (b ≠ c) && (c ≠ d) && (d ≠ c) && (d ≠ a) && (e ≠ d) && (e ≠ c) && (e ≠ a)
elseif case == 7
# ijkℓm
@test (a ≠ b) && (b ≠ c) && (c ≠ d) && (d ≠ e) && (e ≠ a)
end
end
for i in 1:5, perm in _up(i, i, i, i, i)
_, case = NNI.group_sort(perm...)
@test case == 1
end
for i in 1:5, j in 6:10, perm in _up(i, i, i, i, j)
_, case = NNI.group_sort(perm...)
@test case == 2
end
for i in 1:5, j in 6:10, perm in _up(i, i, i, j, j)
_, case = NNI.group_sort(perm...)
@test case == 3
end
for i in 1:5, j in 6:10, k in 11:15, perm in _up(i, i, i, j, k)
_, case = NNI.group_sort(perm...)
@test case == 4
end
for i in 1:5, j in 6:10, k in 11:15, perm in _up(i, i, j, j, k)
_, case = NNI.group_sort(perm...)
@test case == 5
end
for i in 1:5, j in 6:10, k in 11:15, ℓ in 16:20, perm in _up(i, i, j, k, ℓ)
_, case = NNI.group_sort(perm...)
@test case == 6
end
for i in 1:5, j in 6:10, k in 11:15, ℓ in 16:20, m in 21:25, perm in _up(i, j, k, ℓ, m)
_, case = NNI.group_sort(perm...)
@test case == 7
end
end
@testset "directional_derivative" begin
for _ in 1:200
tri = triangulate(rand(2, 500))
i = rand(1:10)
j = rand(1:10)
∇ = [rand(2) for _ in 1:500]
N₀ = sample(1:500, 10, replace=false)
@test NNI.directional_derivative(tri, i, j, N₀, ∇) ≈ dot(get_point(tri, N₀[j]) .- get_point(tri, N₀[i]), ∇[N₀[i]])
end
end
to_mat(H) = [H[1] H[3]; H[3] H[2]]
@testset "hessian_form" begin
for _ in 1:200
tri = triangulate(rand(2, 500))
i = rand(1:10)
j = rand(1:10)
k = rand(1:10)
H = [rand(3) for _ in 1:500]
N₀ = sample(1:500, 10, replace=false)
@test NNI.hessian_form(tri, i, j, k, N₀, H) ≈ collect(get_point(tri, N₀[j]) .- get_point(tri, N₀[i]))' * to_mat(H[N₀[i]]) * collect(get_point(tri, N₀[k]) .- get_point(tri, N₀[i]))
end
end | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 4153 | # NaturalNeighbours
[](https://DanielVandH.github.io/NaturalNeighbours.jl/stable/)
[](https://DanielVandH.github.io/NaturalNeighbours.jl/dev/)
[](https://github.com/DanielVandH/NaturalNeighbours.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/DanielVandH/NaturalNeighbours.jl)
[](https://zenodo.org/badge/latestdoi/638929469)
This is a package for performing [natural neighbour interpolation](https://en.wikipedia.org/wiki/Natural_neighbor_interpolation) over planar data sets (amongst some others, like piecewise linear interpolation via triangles or nearest neighbour interpolation -- see the docs), using [DelaunayTriangulation.jl](https://github.com/DanielVandH/DelaunayTriangulation.jl) to construct the Voronoi tessellations that represents the spatial information. Most of the work in this package is based on [this great thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf).
This is not the only package for interpolation. If the methods available here do not suit your needs, see [Interpolations.jl](https://github.com/JuliaMath/Interpolations.jl) and the packages it links to in its README.
Here is a quick example of how to use the package, demonstrating the available methods for interpolation. See the docs for more examples, including examples for derivative generation. In this example, note that even though we evaluate the interpolant at $100^2$ points, the runtime is extremely fast thanks to the interpolant being local rather than global.
```julia
using NaturalNeighbours
using CairoMakie
## The data
f = (x, y) -> sin(x * y) - cos(x - y) * exp(-(x - y)^2)
x = vec([(i - 1) / 9 for i in (1, 3, 4, 5, 8, 9, 10), j in (1, 2, 3, 5, 6, 7, 9, 10)])
y = vec([(j - 1) / 9 for i in (1, 3, 4, 5, 8, 9, 10), j in (1, 2, 3, 5, 6, 7, 9, 10)])
z = f.(x, y)
## The interpolant and grid
itp = interpolate(x, y, z; derivatives=true)
xg = LinRange(0, 1, 100)
yg = LinRange(0, 1, 100)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
exact = f.(_x, _y)
## Evaluate some interpolants
sibson_vals = itp(_x, _y; method=Sibson())
triangle_vals = itp(_x, _y; method=Triangle())
laplace_vals = itp(_x, _y; method=Laplace())
sibson_1_vals = itp(_x, _y; method=Sibson(1))
nearest_vals = itp(_x, _y; method=Nearest())
farin_vals = itp(_x, _y; method=Farin())
hiyoshi_vals = itp(_x, _y; method=Hiyoshi(2))
## Plot
function plot_2d(fig, i, j, title, vals, xg, yg, x, y, show_scatter=true)
ax = Axis(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
contourf!(ax, xg, yg, reshape(vals, (length(xg), length(yg))), colormap=:viridis, levels=-1:0.05:0, extendlow=:auto, extendhigh=:auto)
show_scatter && scatter!(ax, x, y, color=:red, markersize=14)
end
function plot_3d(fig, i, j, title, vals, xg, yg)
ax = Axis3(fig[i, j], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
surface!(ax, xg, yg, reshape(vals, (length(xg), length(yg))), color=vals, colormap=:viridis)
end
all_vals = (sibson_vals, triangle_vals, laplace_vals, sibson_1_vals, nearest_vals, farin_vals, hiyoshi_vals, exact)
titles = ("(a): Sibson", "(b): Triangle", "(c): Laplace", "(d): Sibson-1", "(e): Nearest", "(f): Farin", "(g): Hiyoshi", "(h): Exact")
fig = Figure(fontsize=55)
for (i, (vals, title)) in enumerate(zip(all_vals, titles))
plot_2d(fig, 1, i, title, vals, xg, yg, x, y, !(vals === exact))
plot_3d(fig, 2, i, " ", vals, xg, yg)
end
resize_to_layout!(fig)
fig
# could keep going and differentiating, etc...
# ∂ = differentiate(itp, 2) -- see the docs.
```

| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 37555 | ```@meta
CurrentModule = NaturalNeighbours
```
# Comparison of Interpolation Methods
In this section, we will compare the methods available in this package for interpolation. We consider both quantitative and qualitative comparisons. We first make compares using errors around each point (local analysis), and later we compare errors using global metrics (global analysis). Note that there are many papers that compare interpolation methods, so if you really want a formal analysis that also considers other interpolation methods (like kriging or radial basis methods), you can search for them. The purpose here is to just show comparisons between the methods in this package, not to, say, argue that natural neighbour interpolation fits every possible situation, or to suggest that this is even the best way to perform such an analysis. You can find some good references in [Bobach's thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf). A conclusion is given at the end of this section.
# Setting up the Analysis
To setup this analysis, we use the following packages:
```julia
using NaturalNeighbours
using CairoMakie
using StableRNGs
using DelaunayTriangulation
using StaticArrays
using LinearAlgebra
using DataFrames
using StatsBase
using AlgebraOfGraphics
using BenchmarkTools
const NNI = NaturalNeighbours
```
We also define the following constants and other useful variables:
```julia
const itp_methods = (
Sibson(0),
Triangle(),
Nearest(),
Laplace(),
Sibson(1),
Farin(1),
Hiyoshi(2)
)
const diff_methods = (
Direct(),
Iterative()
)
const itp_aliases = (:Sibson0, :Triangle, :Nearest, :Laplace, :Sibson1, :Farin, :Hiyoshi)
const diff_aliases = (:Direct, :Iterative)
const itp_alias_map = Dict(itp_methods .=> itp_aliases)
const diff_alias_map = Dict(diff_methods .=> diff_aliases)
const colors = Dict(itp_aliases .=> [:red, :blue, :green, :orange, :purple, :black, :brown])
const linestyles = Dict(diff_aliases .=> [:solid, :dashdotdot])
const line_elements = [
LineElement(color=color,
linewidth=22,
linestyle=:solid) for color in values(colors)
]
const style_elements = [
LineElement(color=:black,
linewidth=22,
linestyle=linestyle) for linestyle in values(linestyles)
]
const azimuths = [0.3, 0.8, 0.3, 0.6, 0.6, 0.6, 0.45]
rng = StableRNG(123)
xg = LinRange(0, 1, 25)
yg = LinRange(0, 1, 25)
x = vec([x for x in xg, _ in yg])
y = vec([y for _ in xg, y in yg])
xg2 = LinRange(0, 1, 250)
yg2 = LinRange(0, 1, 250)
xq = vec([x for x in xg2, _ in yg2])
yq = vec([y for _ in xg2, y in yg2])
tol = 1e-2
tri = triangulate([x'; y']; rng=rng)
triq = triangulate([xq'; yq']; rng=rng)
exterior_idx = identify_exterior_points(xq, yq, get_points(tri), get_convex_hull_vertices(tri); tol=tol)
interior_idx = filter(∉(exterior_idx), eachindex(xq, yq))
```
Lastly, we define the following test functions (the first six come from [this report](https://hdl.handle.net/10945/35052)).
```julia
const f = [
(x, y) -> 0.75 * exp(-((9 * x - 2)^2 + (9 * y - 2)^2) / 4) + 0.75 * exp(-(9 * x + 1)^2 / 49 - (9 * y + 1) / 10) + 0.5 * exp(-((9 * x - 7)^2 + (9 * y - 3)^2) / 4) - 0.2 * exp(-(9 * x - 4)^2 - (9 * y - 7)^2)
(x, y) -> (1 / 9) * (tanh(9 * y - 9 * x) + 1)
(x, y) -> (1.25 + cos(5.4 * y)) / (6 * (1 + (3 * x - 1)^2))
(x, y) -> (1 / 3) * exp(-(81 / 16) * ((x - 1 / 2)^2 + (y - 1 / 2)^2))
(x, y) -> (1 / 3) * exp(-(81 / 4) * ((x - 1 / 2)^2 + (y - 1 / 2)^2))
(x, y) -> (1 / 9) * (64 - 81 * ((x - 1 / 2)^2 + (y - 1 / 2)^2))^(1 / 2) - 1 / 2
(x, y) -> sin(27 * x * y) - exp(-(x - y)^2 / 4) * cos(13 * x - 13 * y)
]
const ∇f = [
(x, y) -> @SVector[(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * x - 72)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * x) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * x) / 2 - 63 / 2)) / 2 - (3 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10) * ((162 * x) / 49 + 18 / 49)) / 4
(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * y - 126)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * y) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * y) / 2 - 27 / 2)) / 2 - (27 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)) / 40]
(x, y) -> @SVector[tanh(9 * x - 9 * y)^2 - 1
1 - tanh(9 * x - 9 * y)^2]
(x, y) -> @SVector[-((108 * x - 36) * (cos((27 * y) / 5) + 5 / 4)) / (6 * (3 * x - 1)^2 + 6)^2
-(27 * sin((27 * y) / 5)) / (5 * (6 * (3 * x - 1)^2 + 6))]
(x, y) -> @SVector[-(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16) * ((81 * x) / 8 - 81 / 16)) / 3
-(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16) * ((81 * y) / 8 - 81 / 16)) / 3]
(x, y) -> @SVector[-(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4) * ((81 * x) / 2 - 81 / 4)) / 3
-(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4) * ((81 * y) / 2 - 81 / 4)) / 3]
(x, y) -> @SVector[-(162 * x - 81) / (18 * (64 - 81 * (y - 1 / 2)^2 - 81 * (x - 1 / 2)^2)^(1 / 2))
-(162 * y - 81) / (18 * (64 - 81 * (y - 1 / 2)^2 - 81 * (x - 1 / 2)^2)^(1 / 2))]
(x, y) -> @SVector[27 * y * cos(27 * x * y) + 13 * exp(-(x - y)^2 / 4) * sin(13 * x - 13 * y) + exp(-(x - y)^2 / 4) * cos(13 * x - 13 * y) * (x / 2 - y / 2)
27 * x * cos(27 * x * y) - 13 * exp(-(x - y)^2 / 4) * sin(13 * x - 13 * y) - exp(-(x - y)^2 / 4) * cos(13 * x - 13 * y) * (x / 2 - y / 2)]
]
const Hf = [
(x, y) -> @SMatrix[(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/98-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49)^2)/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)^2)/5 (27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5
(27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5 (243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/400+(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*y)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*y)/2-27/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*y-126)^2)/5]
(x, y) -> @SMatrix[-2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9) 2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9)
2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9) -2*tanh(9 * x - 9 * y)*(9*tanh(9 * x - 9 * y)^2-9)]
(x, y) -> @SMatrix[(2*(108*x-36)^2*(cos((27 * y) / 5)+5/4))/(6*(3*x-1)^2+6)^3-(108*(cos((27 * y) / 5)+5/4))/(6*(3*x-1)^2+6)^2 (27*sin((27 * y) / 5)*(108*x-36))/(5*(6*(3*x-1)^2+6)^2)
(27*sin((27 * y) / 5)*(108*x-36))/(5*(6*(3*x-1)^2+6)^2) -(729 * cos((27 * y) / 5))/(25*(6*(3*x-1)^2+6))]
(x, y) -> @SMatrix[(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16))/8 (exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)*((81*y)/8-81/16))/3
(exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*x)/8-81/16)*((81*y)/8-81/16))/3 (exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16)*((81*y)/8-81/16)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 16 - (81 * (y - 1 / 2)^2) / 16))/8]
(x, y) -> @SMatrix[(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4))/2 (exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)*((81*y)/2-81/4))/3
(exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*x)/2-81/4)*((81*y)/2-81/4))/3 (exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4)*((81*y)/2-81/4)^2)/3-(27*exp(-(81 * (x - 1 / 2)^2) / 4 - (81 * (y - 1 / 2)^2) / 4))/2]
(x, y) -> @SMatrix[-(162 * x - 81)^2/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))-9/(64-81*(y-1/2)^2-81*(x-1/2)^2)^(1/2) -((162 * x - 81) * (162 * y - 81))/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))
-((162 * x - 81) * (162 * y - 81))/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2)) -(162 * y - 81)^2/(36*(64-81*(y-1/2)^2-81*(x-1/2)^2)^(3/2))-9/(64-81*(y-1/2)^2-81*(x-1/2)^2)^(1/2)]
(x, y) -> @SMatrix[(339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*y^2*sin(27 * x * y)-exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2-26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2) 27*cos(27 * x * y)-(339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*x*y*sin(27 * x * y)+exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2+26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2)
27*cos(27 * x * y)-(339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*x*y*sin(27 * x * y)+exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2+26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2) (339*exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y))/2-729*x^2*sin(27 * x * y)-exp(-(x - y)^2 / 4)*cos(13 * x - 13 * y)*(x/2-y/2)^2-26*exp(-(x - y)^2 / 4)*sin(13 * x - 13 * y)*(x/2-y/2)]
]
```
## Assessment Metrics
We define here the methods we will use for assessing the quality of an interpolant.
### Surface Smoothness
There are many ways to measure how rough or how smooth a surface is. I don't consider anything so complicated here, and instead I just compare normal vectors at each point. For a function $f$, the normal vector at a point $(x, y, f(x, y))$ is given by $(-\partial_xf, -\partial_yf, 1)/\sqrt{1 + \partial_xf^2 + \partial_yf^2}$.
For the interpolated surface, we cannot rely so readily on the generated gradients for this purpose. We instead triangulate the interpolated surface and then, for each point on the surface, take an angle-weighted average of the normal vectors at each triangle adjoining that point. The functions for computing this average are given below.
```julia
function normal_to_triangle(p₁, p₂, p₃, z₁, z₂, z₃)
x₁, y₁ = getxy(p₁)
x₂, y₂ = getxy(p₂)
x₃, y₃ = getxy(p₃)
Δ = x₁ * y₂ - x₂ * y₁ - x₁ * y₃ + x₃ * y₁ + x₂ * y₃ - x₃ * y₂
s₁ = (y₂ - y₃) / Δ
s₂ = (y₃ - y₁) / Δ
s₃ = (y₁ - y₂) / Δ
s₄ = (x₃ - x₂) / Δ
s₅ = (x₁ - x₃) / Δ
s₆ = (x₂ - x₁) / Δ
α = s₁ * z₁ + s₂ * z₂ + s₃ * z₃
β = s₄ * z₁ + s₅ * z₂ + s₆ * z₃
∇norm = sqrt(1 + α^2 + β^2)
∇ = @SVector[-α, -β, 1.0]
return ∇ / ∇norm
end
function normal_to_triangle(tri, z, i, j, k)
p₁, p₂, p₃ = get_point(tri, i, j, k)
z₁, z₂, z₃ = z[i], z[j], z[k]
return normal_to_triangle(p₁, p₂, p₃, z₁, z₂, z₃)
end
function ∠(v₁, v₂)
# acos is not reliable: https://people.eecs.berkeley.edu/~wkahan/Triangle.pdf, https://scicomp.stackexchange.com/a/27694/42528
a = norm(v₁)
b = norm(v₂)
c = norm(v₁ - v₂)
if a < b
a, b = b, a
end
μ = if b ≥ c
c - (a - b)
else
b - (a - c)
end
num = ((a - b) + c) * μ
den = (a + (b + c)) * ((a - c) + b)
θ = 2atan(sqrt(num / den))
return θ
end
function ∠(tri, z, i, j, k)
# Angle between pᵢpⱼ and pᵢpₖ
p₁, p₂, p₃ = get_point(tri, i, j, k)
z₁, z₂, z₃ = z[i], z[j], z[k]
px, py = getxy(p₁)
qx, qy = getxy(p₂)
rx, ry = getxy(p₃)
v₁ = @SVector[qx - px, qy - py, z₂ - z₁]
v₂ = @SVector[rx - px, ry - py, z₃ - z₁]
return ∠(v₁, v₂)
end
function average_normal_vector(tri, z, i)
# Using the mean-weighted-angle formula: https://doi.org/10.1007/s00371-004-0271-1
n = @SVector[0.0, 0.0, 1.0]
neighbouring_edges = get_adjacent2vertex(tri, i)
for (j, k) in neighbouring_edges
if !DelaunayTriangulation.is_ghost_triangle(i, j, k)
ψ = ∠(tri, z, i, j, k)
n = n + ψ * normal_to_triangle(tri, z, i, j, k)
end
end
return n / norm(n)
end
function compare_normal_vectors(tri, z, i, ∇f::Function)
# Maybe this is similar to https://doi.org/10.1007/978-3-319-40548-3_19?
# The description is so vague.
p = get_point(tri, i)
x, y = getxy(p)
n̄̂ = average_normal_vector(tri, z, i)
nx, ny = ∇f(x, y)
n = @SVector[-nx, -ny, 1.0]
n̂ = n / norm(n)
return rad2deg(∠(n̄̂, n̂))
end
function compare_normal_vectors(tri, z, ∇f::Function, interior_idx)
return [compare_normal_vectors(tri, z, i, ∇f) for i in interior_idx]
end
```
### Comparing Raw Values Locally
To compare raw values, such as heights or Hessians, we use the error definition $\varepsilon(y, y\hat) = 2\|y - y\hat\| / \|y + y\hat\|$:
```julia
function compare_quantities(ŷ, y, interior_idx)
ε = 2norm.(ŷ .- y) ./ norm.(ŷ .+ y)
return to_unit(ε[interior_idx])
end
function to_unit(μ)
return max.(μ, sqrt(eps(Float64)))
end
to_mat(H) = @SMatrix[H[1] H[3]; H[3] H[2]]
```
## Local Analysis Function
The function we use for our local analysis is defined below.
```julia
function analysis_function!(df, tri, triq, x, y, xq, yq, fidx, itp_method, diff_method, interior_idx)
g = f[fidx]
∇g = ∇f[fidx]
Hg = Hf[fidx]
z = g.(x, y)
itp = interpolate(tri, z; derivatives=true, method=diff_method)
∂ = differentiate(itp, 2)
ẑ = itp(xq, yq; method=itp_method)
∇̂Ĥ = ∂(xq, yq; method=diff_method, interpolant_method=itp_method)
∇̂ = SVector{2,Float64}.(first.(∇̂Ĥ))
Ĥ = to_mat.(last.(∇̂Ĥ))
z = g.(xq, yq)
∇ = ∇g.(xq, yq)
H = Hg.(xq, yq)
εz = compare_quantities(ẑ, z, interior_idx)
ε∇ = compare_quantities(∇̂, ∇, interior_idx)
εH = compare_quantities(Ĥ, H, interior_idx)
εn = compare_normal_vectors(triq, ẑ, ∇g, interior_idx)
_df = DataFrame(
:z_exact => z[interior_idx],
:z_approx => ẑ[interior_idx],
:∇_exact => ∇[interior_idx],
:∇_approx => ∇̂[interior_idx],
:H_exact => H[interior_idx],
:H_approx => Ĥ[interior_idx],
:z_error => εz,
:∇_error => ε∇,
:H_error => εH,
:n_error => εn,
:itp_method => itp_alias_map[itp_method],
:diff_method => diff_alias_map[diff_method],
:f_idx => fidx
)
append!(df, _df)
return df
end
function analysis_function(tri, triq, x, y, xq, yq, interior_idx)
df = DataFrame(
z_exact=Float64[],
z_approx=Float64[],
∇_exact=SVector{2,Float64}[],
∇_approx=SVector{2,Float64}[],
H_exact=SMatrix{2, 2, Float64}[],
H_approx=SMatrix{2, 2, Float64}[],
z_error=Float64[],
∇_error=Float64[],
H_error=Float64[],
n_error=Float64[],
itp_method=Symbol[],
diff_method=Symbol[],
f_idx=Int[]
)
for fidx in eachindex(f, ∇f, Hf)
for itp_method in itp_methods
for diff_method in diff_methods
analysis_function!(df, tri, triq, x, y, xq, yq, fidx, itp_method, diff_method, interior_idx)
end
end
end
return df
end
```
# Quantitative Local Analysis
Let's now give the results for our quantitative local analysis. We use our `analysis_function` as:
```julia
df = analysis_function(tri, triq, x, y, xq, yq, interior_idx)
gdf = groupby(df, [:f_idx, :itp_method, :diff_method])
```
We plot these results as follows.
```julia
const alph = join('a':'z')
fig = Figure(fontsize=64)
z_ax = [Axis(fig[i, 1], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])1): $z$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400, xscale=log10) for i in eachindex(f, ∇f, Hf)]
∇_ax = [Axis(fig[i, 2], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])2): $\nabla$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400, xscale=log10) for i in eachindex(f, ∇f, Hf)]
H_ax = [Axis(fig[i, 3], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])3): $H$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400, xscale=log10) for i in eachindex(f, ∇f, Hf)]
n_ax = [Axis(fig[i, 4], xlabel=L"\varepsilon", ylabel=L"F(\varepsilon)",
title=L"(%$(alph[i])4): $n$ $\varepsilon$ for $f_{%$i}", titlealign=:left,
width=600, height=400) for i in eachindex(f, ∇f, Hf)]
f_ax = [Axis3(fig[i, 5], xlabel=L"x", ylabel=L"y", zlabel=L"f_{%$i}(x, y)",
title=L"(%$(alph[i])5): $f_{%$i}$'s surface", titlealign=:left,
width=600, height=400, azimuth=azimuths[i]) for i in eachindex(f, ∇f, Hf)]
xℓ = [
(1e-5, 1.0) (1e-3, 1.0) (1e-2, 1.0) (0.0, 5.0)
(1e-5, 1.0) (1e-2, 1.0) (1e-1, 1.0) (0.0, 5.0)
(1e-6, 1e-1) (1e-5, 1.0) (1e-2, 1.0) (0.0, 2.0)
(1e-6, 1e-1) (1e-4, 1e-1) (1e-2, 1.0) (0.0, 1.0)
(1e-5, 1e-1) (1e-3, 1.0) (1e-2, 1.0) (0.0, 2.0)
(1e-8, 1e-1) (1e-5, 1e-1) (1e-2, 1e-1) (0.0, 0.5)
(1e-2, 1.0) (1e-2, 1.0) (1e-1, 1.0) (0.0, 15.0)
]
for i in eachindex(f)
xlims!(z_ax[i], xℓ[i, 1]...)
xlims!(∇_ax[i], xℓ[i, 2]...)
xlims!(H_ax[i], xℓ[i, 3]...)
xlims!(n_ax[i], xℓ[i, 4]...)
end
for (f_idx, itp_alias, diff_alias) in keys(gdf)
_df = gdf[(f_idx, itp_alias, diff_alias)]
clr = colors[itp_alias]
ls = linestyles[diff_alias]
_z_ax = z_ax[f_idx]
_∇_ax = ∇_ax[f_idx]
_H_ax = H_ax[f_idx]
_n_ax = n_ax[f_idx]
z_error = _df.z_error
∇_error = _df.∇_error
H_error = _df.H_error
n_error = _df.n_error
ecdfplot!(_z_ax, z_error, color=clr, linestyle=ls, linewidth=7)
ecdfplot!(_∇_ax, ∇_error, color=clr, linestyle=ls, linewidth=7)
ecdfplot!(_H_ax, H_error, color=clr, linestyle=ls, linewidth=7)
if itp_alias ≠ :Nearest
ecdfplot!(_n_ax, n_error, color=clr, linestyle=ls, linewidth=7)
end
end
for f_idx in eachindex(f)
g = f[f_idx]
fz = [g(x, y) for x in xg2, y in yg2]
_f_ax = f_ax[f_idx]
surface!(_f_ax, xg2, yg2, fz)
end
[Legend(
fig[i:(i+1), 6],
[line_elements, style_elements],
[string.(keys(colors)), string.(keys(linestyles))],
["Interpolant", "Differentiator"],
titlesize=78,
labelsize=78,
patchsize=(100, 30)
) for i in (1, 3, 5)]
resize_to_layout!(fig)
fig
```
```@raw html
<figure>
<img src='../figures/interpolant_comparison.png', alt'Comparing interpolants'><br>
</figure>
```
For these plots:
1. The first column shows the errors for the height values.
2. The second column shows the errors in the gradients.
3. The third column shows the errors in the Hessians.
4. The fourth column shows the errors in the normal vectors. The nearest neighbour interpolant is not shown for this column since it is (unsurprisingly) consistently the worst method.
5. The fifth column shows the exact surface.
Note that we use the empirical cumulative distribution function, and the function $F(\varepsilon)$ in the $y$-axis could also be interpreted as the ``fraction of data''. For example, the plot in (f3) shows the curve for the Hiyoshi interpolant being much further to the left than the other curves, implying that Hiyoshi has by far outperformed the others.
Let us discuss what we see from each column.
1. The smooth interpolants (`Sibson(1)`, `Farin(1)`, and `Hiyoshi(2)`) seem to have the best performance here, with derivative information generated with the direct method, appear to perform the best when measuring differences in heights, with the nearest neighbour interpolant having the worst performance. The `Laplace()`, `Triangle()`, and `Sibson(0)` interpolants seem to have similar performance, although the `Triangle()` interpolant probably wouldn't have as high a performance if we had e.g. randomly spaced data (analysed later).
2. The gradient estimates seem to depend only on the smoothness of the interpolant rather than the interpolant itself, and the `Direct()` method seems to outperform the `Iterative()` method for generating derivatives. In particular, for generating gradients, the `Hiyoshi(2)`, `Sibson(1)`, or `Farin(1)` interpolants perform equally as well, provided the `Direct()` method is used.
3. The results for the Hessians are similar to the gradient results.
4. For the smoothness of the interpolated surfaces, the `Hiyoshi(2)` and `Farin(1)` results are consistently the best, with `Sibson(1)` a close competitor. The `Triangle()` interpolant leads to the least smooth surface of those considered. Similarly to the gradient and Hessians results, the `Direct()` approach leads to the best results compared to `Iterative()`.
Overall, the smooth interpolants lead to the best results, and of the non-smooth interpolants (`Sibson(0)`, `Laplace()`, `Triangle()`, `Nearest()`), `Sibson(0)` seems to have the best results. For derivative generation, `Direct()` seems to give the best results.
Note that the analysis here does not consider whether using cubic terms in `Direct()` methods makes a difference, or whether varying `alpha` for the `Iterative()` approach makes a difference.
# Qualitative Local Analysis
Now we will visualise the surfaces produced by the interpolants. Based on the above results, we will only consider `Direct()` for derivative generation. Let us first look at the surfaces themselves.
```julia
considered_itp = eachindex(itp_methods)
considered_fidx = eachindex(f)
fig = Figure(fontsize=72, resolution = (4800, 4900))
ax = [
Axis3(fig[i, j],
xlabel=L"x",
ylabel=L"y",
zlabel=L"f(x, y)",
title=L"(%$(alph[i])%$(j)): ($f_{%$i}$, %$(itp_aliases[j]))",
titlealign=:left,
width=600,
height=600,
azimuth=azimuths[i]
)
for i in considered_fidx, j in considered_itp
]
for (j, i) in enumerate(considered_fidx)
for (ℓ, k) in enumerate(considered_itp)
_gdf = gdf[(i, itp_aliases[k], diff_aliases[1])]
_ax = ax[j, ℓ]
_z = _gdf.z_approx
surface!(_ax, xq[interior_idx], yq[interior_idx], _z)
xlims!(_ax, 0, 1)
ylims!(_ax, 0, 1)
hidedecorations!(_ax)
end
end
fig
```
```@raw html
<figure>
<img src='../figures/3d_visual_interpolant_comparison.png', alt'Comparing interpolant surfaces'><br>
</figure>
```
We can clearly see some of the roughness produced by the non-smooth interpolants. `Hiyoshi(2)` seems to have the best visual quality.
To assess these results further, we can look at the errors. The function we use for this is:
```julia
function plot_errors(considered_fidx, considered_itp, gdf, interior_idx, error_type, colorranges)
fig = Figure(fontsize=72)
ax = [
Axis(fig[i, j],
xlabel=L"x",
ylabel=L"y",
title=L"(%$(alph[i])%$(j)): ($f_{%$i}$, %$(itp_aliases[j]))",
titlealign=:left,
width=600,
height=600,
)
for i in considered_fidx, j in considered_itp
]
for (j, i) in enumerate(considered_fidx)
for (ℓ, k) in enumerate(considered_itp)
_gdf = gdf[(i, itp_aliases[k], diff_aliases[1])]
_ax = ax[j, ℓ]
ε = _gdf[!, error_type]
heatmap!(_ax, xq[interior_idx], yq[interior_idx], ε, colorrange=colorranges[j])
xlims!(_ax, 0, 1)
ylims!(_ax, 0, 1)
hidedecorations!(_ax)
end
end
resize_to_layout!(fig)
fig
end
```
For the height errors:
```julia
z_colorranges = [(1e-4, 0.01), (1e-5, 0.1), (1e-3, 0.05), (1e-4, 0.01), (1e-3, 0.1), (1e-4, 0.01), (1e-2, 0.5)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :z_error, z_colorranges)
```
```@raw html
<figure>
<img src='../figures/2d_visual_interpolant_comparison_z_error.png', alt'Heatmap of 2D height errors'><br>
</figure>
```
To compare the gradients, we use:
```julia
∇_colorranges = [(1e-2, 0.2), (1e-2, 1.0), (1e-2, 0.01), (1e-3, 0.01), (1e-2, 0.05), (1e-3, 0.01), (1e-2, 0.25)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :∇_error, ∇_colorranges)
```
```@raw html
<figure>
<img src='../figures/2d_visual_interpolant_comparison_grad_error.png', alt'Heatmap of 2D gradient errors'><br>
</figure>
```
Next, the Hessians are compared:
```julia
H_colorranges = [(1e-1, 0.5), (0.2, 0.8), (1e-1, 0.2), (1e-2, 0.2), (1e-1, 0.25), (1e-2, 0.1), (1e-1, 0.8)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :H_error, H_colorranges)
```
```@raw html
<figure>
<img src='../figures/2d_visual_interpolant_comparison_hess_error.png', alt'Heatmap of 2D Hessian errors'><br>
</figure>
```
Finally, we compare the normal vector fields:
```julia
n_colorranges = [(0, 5), (0, 5), (0, 2), (0, 1), (0, 2), (0, 2.5), (0, 15)]
fig = plot_errors(considered_fidx, considered_itp, gdf, interior_idx, :n_error, n_colorranges)
```
```@raw html
<figure>
<img src='../figures/2d_visual_interpolant_comparison_normal_error.png', alt'Heatmap of 2D normal errors'><br>
</figure>
```
Judging from these results, again the `Hiyoshi(2)` and `Farin(1)` methods have the best performance across all metrics.
# Quantitative Global Analysis
Now we will use global metrics to assess the interpolation quality. A limitation of the above discussion is that we are considering a fixed data set. Here, we instead consider random data sets (with the same test functions) and weighted averages of the local errors. We will measure the errors as a function of the median edge length of the data set's underlying triangulation. Note that, in these random data sets, we will not maintain a convex hull of $[0, 1]^2$. Lastly, we will use a stricter tolerance on whether to classify a point as being inside of the convex hull in this case, now using a `tol = 0.1` rather than `tol = 0.01`. The global metric we use is $100\sqrt{\frac{\sum_i \|y_i - \hat y_i\|^2}{\sum_j \|\hat y_i\|^2}}$:
```julia
function rrmse(y, ŷ) # interior_indices already applied
num = 0.0
den = 0.0
for (yᵢ, ŷᵢ) in zip(y, ŷ)
if all(isfinite, (yᵢ..., ŷᵢ...))
num += norm(yᵢ .- ŷᵢ)^2
den += norm(ŷᵢ)^2
end
end
return 100sqrt(num / den)
end
```
For comparing the normal vector errors, we will just use the median. To compute the median edge length of a triangulation, we use:
```julia
function median_edge_length(tri)
lengths = zeros(DelaunayTriangulation.num_solid_edges(tri))
for (k, (i, j)) in (enumerate ∘ each_solid_edge)(tri)
p, q = get_point(tri, i, j)
px, py = getxy(p)
qx, qy = getxy(q)
ℓ = sqrt((qx - px)^2 + (qy - py)^2)
lengths[k] = ℓ
end
return median(lengths)
end
```
The function we use for performing our random analysis is:
```julia
function random_analysis_function(nsamples, triq, xq, yq, tol, rng)
npoints = rand(rng, 50:2500)
xs = [rand(rng, 50) for _ in 1:nsamples]
ys = [rand(rng, 50) for _ in 1:nsamples]
tris = [triangulate(tuple.(x, y); rng) for (x, y) in zip(xs, ys)]
[refine!(tri; max_points=npoints) for tri in tris]
xs = [first.(get_points(tri)) for tri in tris]
ys = [last.(get_points(tri)) for tri in tris]
exterior_idxs = [identify_exterior_points(xq, yq, get_points(tri), get_convex_hull_vertices(tri); tol=tol) for tri in tris]
interior_idxs = [filter(∉(exterior_idx), eachindex(xq, yq)) for exterior_idx in exterior_idxs]
median_lengths = [median_edge_length(tri) for tri in tris]
sortidx = sortperm(median_lengths)
[permute!(obj, sortidx) for obj in (xs, ys, tris, exterior_idxs, interior_idxs, median_lengths)]
dfs = Channel{DataFrame}(nsamples)
Base.Threads.@threads for i in 1:nsamples
tri = tris[i]
x = xs[i]
y = ys[i]
interior_idx = interior_idxs[i]
put!(dfs, analysis_function(tri, triq, x, y, xq, yq, interior_idx))
println("Processed simulation $i.")
end
close(dfs)
dfs = collect(dfs)
df = DataFrame(
f_idx=Int64[],
itp_method=Symbol[],
diff_method=Symbol[],
z_rrmse=Float64[],
∇_rrmse=Float64[],
H_rrmse=Float64[],
n_error_median=Float64[],
median_edge_length=Float64[]
)
for (i, _df) in enumerate(dfs)
_gdf = groupby(_df, [:f_idx, :itp_method, :diff_method])
_cgdf = combine(_gdf,
[:z_exact, :z_approx] => ((z_exact, z_approx) -> rrmse(z_exact, z_approx)) => :z_rrmse,
[:∇_exact, :∇_approx] => ((∇_exact, ∇_approx) -> rrmse(∇_exact, ∇_approx)) => :∇_rrmse,
[:H_exact, :H_approx] => ((H_exact, H_approx) -> rrmse(H_exact, H_approx)) => :H_rrmse,
:n_error => median => :n_error_median)
_cgdf[!, :median_edge_length] .= median_lengths[i]
append!(df, _cgdf)
end
_gdf = groupby(df, [:f_idx, :itp_method, :diff_method])
return _gdf
end
```
Now let's do the actual analysis.
```julia
nsamples = 50
rng = StableRNG(998881)
tol = 1e-1
random_results = random_analysis_function(nsamples, triq, xq, yq, tol, rng)
```
With these results, we can plot the errors for each method.
```julia
fig = Figure(fontsize=64)
z_ax = [Axis(fig[i, 1], xlabel=L"$ $Median edge length", ylabel=L"$z$ error",
title=L"(%$(alph[i])1): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
∇_ax = [Axis(fig[i, 2], xlabel=L"$ $Median edge length", ylabel=L"$\nabla$ error",
title=L"(%$(alph[i])2): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
H_ax = [Axis(fig[i, 3], xlabel=L"$ $Median edge length", ylabel=L"$H$ error",
title=L"(%$(alph[i])3): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
n_ax = [Axis(fig[i, 4], xlabel=L"$ $Median edge length", ylabel=L"$n$ error",
title=L"(%$(alph[i])4): $f_{%$i}", titlealign=:left,
width=600, height=400, yscale=log10) for i in eachindex(f, ∇f, Hf)]
for (f_idx, itp_alias, diff_alias) in keys(random_results)
_df = random_results[(f_idx, itp_alias, diff_alias)]
_df = filter(:itp_method => !=(:Nearest), _df)
clr = colors[itp_alias]
ls = linestyles[diff_alias]
_z_ax = z_ax[f_idx]
_∇_ax = ∇_ax[f_idx]
_H_ax = H_ax[f_idx]
_n_ax = n_ax[f_idx]
x = _df.median_edge_length
z_error = _df.z_rrmse
∇_error = _df.∇_rrmse
H_error = _df.H_rrmse
n_error = _df.n_error_median
lines!(_z_ax, x, z_error, color=clr, linestyle=ls, linewidth=7)
lines!(_∇_ax, x, ∇_error, color=clr, linestyle=ls, linewidth=7)
lines!(_H_ax, x, H_error, color=clr, linestyle=ls, linewidth=7)
lines!(_n_ax, x, n_error, color=clr, linestyle=ls, linewidth=7)
end
[Legend(
fig[i:(i+1), 6],
[line_elements, style_elements],
[string.(keys(colors)), string.(keys(linestyles))],
["Interpolant", "Differentiator"],
titlesize=78,
labelsize=78,
patchsize=(100, 30)
) for i in (1, 3, 5)]
resize_to_layout!(fig)
fig
```
```@raw html
<figure>
<img src='../figures/median_edge_length_comparisons.png', alt'Median edge length comparisons'><br>
</figure>
```
Once again, the `Hiyoshi(2)` and `Farin(1)` methods seem preferable, and `Direct()` seems to have greater results over `Iterative()`.
# Computation Times
It is important to note that using the smooth interpolants comes at a cost of greater running time. If $n$ is the number of natural neighbours around a point $\boldsymbol x_0$, then computing $f^{\text{HIY}}(\boldsymbol x_0)$ is about $\mathcal O(n^5)$, and $f^{\text{FAR}}(\boldsymbol x_0)$ is $\mathcal O(n^3)$. Derivative generation also has this complexity when using these interpolants (since it involves solving a least squares problem). Of course, this complexity doesn't typically matter so much since (1) many points are being evaluated at using multithreading and (2) points have, on average, six natural neighbours only in most triangulations.
Let us explore here how long it takes to compute the interpolant as a function of the number of natural neighbours. There are many ways to measure this properly, e.g. collecting large samples of computation times from random data sets, but here we take a simple approach where we construct a triangulation with a point $\boldsymbol x_1 = \boldsymbol 0$ surrounded by $m$ points on a circle. This point $\boldsymbol x_1$ will have approximately $m$ natural neighbours. (Note that we do not care about the number of data points in the dataset since these interpolants are local.) The function we use for this is:
```julia
function circular_example(m) # extra points are added outside of the circular barrier for derivative generation
pts = [(cos(θ) + 1e-6randn(), sin(θ) + 1e-6randn()) for θ = LinRange(0, 2π, (m + 1))][1:end-1] # avoid cocircular points
extra_pts = NTuple{2, Float64}[]
while length(extra_pts) < 50
p = (5randn(), 5randn())
if norm(p) > 1.01
push!(extra_pts, p)
end
end
append!(pts, extra_pts)
tri = triangulate(pts)
return tri
end
```
To perform the benchmarks, we use:
```julia
function running_time_analysis(itp_method, m_range, g)
running_times = zeros(length(m_range))
for (i, m) in enumerate(m_range)
tri = circular_example(m)
z = [g(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives=true)
b = @benchmark $itp($0.0, $0.0; method=$itp_method)
running_times[i] = minimum(b.times) / 1e6 # ms
end
return DataFrame(
running_times = running_times,
method = itp_alias_map[itp_method],
m = m_range
)
end
function running_time_analysis(m_range, g)
df = DataFrame(
running_times = Float64[],
method = Symbol[],
m = Int[]
)
for itp_method in itp_methods
_running_times = running_time_analysis(itp_method, m_range, g)
append!(df, _running_times)
end
return df
end
```
Now let us benchmark and plot the results.
```julia
m_range = 3:20
g = f[end]
running_times = running_time_analysis(m_range, g)
fig = data(running_times) *
mapping(:m, :running_times) *
mapping(color=:method) *
visual(Scatter, markersize=14) |> plt ->
draw(plt; axis=(width=600, height=400, yscale=log10, xlabel=L"$ $Number of natural neighbours", ylabel=L"$t$ (ms)"))
vlines!(fig.figure[1, 1], [6], linewidth=3, linestyle=:dash, color=:black)
```
```@raw html
<figure>
<img src='../figures/method_benchmarks.png', alt'Performance of interpolants as a function of the number of natural neighbours'><br>
</figure>
```
The benchmarks are shown above, with a vertical black line at $m = 6$ (the expected number of neighbours in a triangulation). We see that the `Triangle()` and `Nearest()` methods are the fastest, as we'd expect, and are of course independent of $m$. The other methods which are $C^0$ at the data sites, `Sibson(0)` and `Laplace()`, are fast and have about the same runtime (both of them essentially perform the same number of operations). When considering smooth interpolants, `Sibson(1)` is about the same as `Sibson(0)`. The remaining two methods, `Farin(1)` and `Hiyoshi(2)`, are the slowest as we expect. `Hiyoshi(2)` can even take more than 10 seconds with 20 natural neighbours (which is not typical, but could happen on real datasets).
Overall, while `Hiyoshi(2)` seems to be the best interpolant, `Farin(1)` could probably serve as a better default if you are concerned about runtime and don't need second derivative information.
To give another benchmark, here are some benchmarks where we take a structured triangulation and evaluate the interpolants at $10,000,000$ query points.
```julia
x = LinRange(0, 1, 25)
y = LinRange(0, 1, 25)
pts = vec([(x, y) for x in x, y in y])
tri = triangulate(pts)
z = [g(x, y) for (x, y) in DelaunayTriangulation.each_point(tri)]
itp = interpolate(tri, z; derivatives = true)
n = 10_000_000
xq = rand(n)
yq = rand(n)
```
```julia-repl
julia> @time itp(xq, yq; method = Sibson(0));
1.418889 seconds (8.42 k allocations: 76.902 MiB, 0.49% compilation time)
julia> @time itp(xq, yq; method = Triangle());
0.552120 seconds (8.27 k allocations: 76.854 MiB, 1.38% compilation time)
julia> @time itp(xq, yq; method = Nearest());
0.592610 seconds (8.27 k allocations: 76.854 MiB, 1.25% compilation time)
julia> @time itp(xq, yq; method = Laplace());
1.142635 seconds (8.27 k allocations: 76.854 MiB, 0.64% compilation time)
julia> @time itp(xq, yq; method = Sibson(1));
1.498346 seconds (8.27 k allocations: 76.854 MiB, 0.47% compilation time)
julia> @time itp(xq, yq; method = Farin(1));
2.187066 seconds (8.27 k allocations: 76.855 MiB, 0.36% compilation time)
julia> @time itp(xq, yq; method = Hiyoshi(2));
13.762652 seconds (9.26 k allocations: 76.920 MiB, 0.06% compilation time)
```
# Conclusion
Overall, the smooth interpolants have the best performance, with `Farin(1)` and `Hiyoshi(2)` typically beating most interpolants. `Hiyoshi(2)` is much slower than the other interpolants, though, and `Farin(1)` may be a preferable interpolant if $C^1$ continuity at the data sites is sufficient. For generating derivatives, the `Direct()` seems to beat the results with the `Iterative()` method in most situations. | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 19818 | ```@meta
CurrentModule = NaturalNeighbours
```
# Differentiation Example
The purpose of this example is to explore derivative generation. For this, it is important to note that we are thinking of _generating_ derivatives rather than _estimating_ them: Following [Alfeld (1989)](https://doi.org/10.1016/B978-0-12-460515-2.50005-6), derivative generation only seeks to find derivatives that best fit our assumptions of the data, i.e. that give a most satisfactory interpolant, rather than trying to find exact derivative values. The complete quote for this by [Alfeld (1989)](https://doi.org/10.1016/B978-0-12-460515-2.50005-6) is below:
> It seems inevitable that in order to obtain an interpolant that is both local and smooth one has to supply derivative data. Typically, such data are not part of the interpolation problem and have to be made up from existing functional data. This process is usually referred as derivative estimation, but this is probably a misnomer. The objective is not to estimate existing but unknown values of derivatives. Instead, it is to generate values that will yield a satisfactory interpolant. Even if an underlying primitive function did exist it might be preferable to use derivative values that differ from the exact ones. (For example, a maximum error might be decreased by using the "wrong" derivative values.) Therefore, I prefer the term derivative generation rather than derivative estimation.
For the purpose of this exploration, we use Franke's test function. This function, introduced by [Franke and Nielson (1980)](https://doi.org/10.1002/nme.1620151110), is given by
```math
\begin{align*}
f(x, y) &= \frac34\exp\left\{-\frac{(9x-2)^2 + (9y-2)^2}{4}\right\} + \frac34\exp\left\{-\frac{(9x+1)^2}{49} - \frac{9y+1}{10}\right\} \\
&+ \frac12\exp\left\{-\frac{(9x-7)^2 + (9y-3)^2}{4}\right\} - \frac15\exp\left\{-(9x-4)^2 - (9y-7)^2\right\}.
\end{align*}
```
This function, and its derivatives, are defined below.
```julia
f = (x, y) -> 0.75 * exp(-((9 * x - 2)^2 + (9 * y - 2)^2) / 4) + 0.75 * exp(-(9 * x + 1)^2 / 49 - (9 * y + 1) / 10) + 0.5 * exp(-((9 * x - 7)^2 + (9 * y - 3)^2) / 4) - 0.2 * exp(-(9 * x - 4)^2 - (9 * y - 7)^2)
f′ = (x, y) -> [(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * x - 72)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * x) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * x) / 2 - 63 / 2)) / 2 - (3 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10) * ((162 * x) / 49 + 18 / 49)) / 4
(exp(-(9 * x - 4)^2 - (9 * y - 7)^2) * (162 * y - 126)) / 5 - (3 * exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4) * ((81 * y) / 2 - 9)) / 4 - (exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4) * ((81 * y) / 2 - 27 / 2)) / 2 - (27 * exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)) / 40]
f′′ = (x, y) -> [(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/98-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49)^2)/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)^2)/5 (27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5
(27*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10)*((162*x)/49+18/49))/40+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*x)/2-9)*((81*y)/2-9))/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*x)/2-63/2)*((81*y)/2-27/2))/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*x-72)*(162*y-126))/5 (243*exp(-(9 * y) / 10 - (9 * x + 1)^2 / 49 - 1 / 10))/400+(162*exp(-(9 * x - 4)^2 - (9 * y - 7)^2))/5-(243*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4))/8-(81*exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4))/4+(3*exp(-(9 * x - 2)^2 / 4 - (9 * y - 2)^2 / 4)*((81*y)/2-9)^2)/4+(exp(-(9 * x - 7)^2 / 4 - (9 * y - 3)^2 / 4)*((81*y)/2-27/2)^2)/2-(exp(-(9 * x - 4)^2 - (9 * y - 7)^2)*(162*y-126)^2)/5]
```
Here is the surface of $f$ along with its derivatives.
```julia
using CairoMakie
function plot_f(fig, x, y, vals, title, i, show_3d=true, zlabel="z")
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = contourf!(ax, x, y, vals, colormap=:viridis, extendhigh=:auto)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=zlabel, width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
surface!(ax, x, y, vals, color=vals, colormap=:viridis)
end
return c
end
x = LinRange(0, 1, 100)
y = LinRange(0, 1, 100)
z = [f(x, y) for x in x, y in y]
∇ = [f′(x, y) for x in x, y in y]
∇₁ = first.(∇)
∇₂ = last.(∇)
H = [f′′(x, y) for x in x, y in y]
H₁₁ = getindex.(H, 1)
H₁₂ = getindex.(H, 2)
H₂₂ = getindex.(H, 4)
fig = Figure(fontsize = 36)
plot_f(fig, x, y, z, "(a): f", 1, true, "z")
plot_f(fig, x, y, ∇₁, "(b): ∂f/∂x", 2, true, "∂f/∂x")
plot_f(fig, x, y, ∇₂, "(c): ∂f/∂y", 3, true, "∂f/∂y")
plot_f(fig, x, y, H₁₁, "(d): ∂²f/∂x²", 4, true, "∂²f/∂x²")
plot_f(fig, x, y, H₂₂, "(f): ∂²f/∂y²", 5, true, "∂²f/∂y²")
plot_f(fig, x, y, H₁₂, "(e): ∂²f/∂x∂y", 6, true, "∂²f/∂x∂y")
resize_to_layout!(fig)
fig
```
```@raw html
<figure>
<img src='../figures/differentiation_exact_surfaces.png', alt'Plots of the interpolants'><br>
</figure>
```
For our analysis, we use the following data set:
```julia
using StableRNGs
using DelaunayTriangulation
using CairoMakie
rng = StableRNG(9199)
x = rand(rng, 500)
y = rand(rng, 500)
z = f.(x, y)
tri = triangulate([x'; y'])
vorn = voronoi(tri)
fig = Figure(fontsize=36, size=(1800, 600))
ax = Axis(fig[1, 1], xlabel="x", ylabel="y", width=600, height=600, title="(a): Data and triangulation", titlealign=:left)
scatter!(ax, x, y, color=:black, markersize=9)
triplot!(ax, tri, strokecolor=:black, strokewidth=2, show_convex_hull=false)
voronoiplot!(ax, vorn, strokecolor=:blue)
xlims!(ax, 0, 1)
ylims!(ax, 0, 1)
ax = Axis3(fig[1, 2], xlabel="x", ylabel="y", zlabel="z", width=600, height=600, azimuth=0.25, title="(b): Function values", titlealign=:left)
triangles = [T[j] for T in each_solid_triangle(tri), j in 1:3]
surface!(ax, x, y, z)
scatter!(ax, x, y, z, color=:black, markersize=9)
colgap!(fig.layout, 1, 75)
resize_to_layout!(fig)
fig
```
```@raw html
<figure>
<img src='../figures/example_data.png', alt'Plots of the data'><br>
</figure>
```
# Generation at the Data Sites
To start with the example, we consider generating the derivatives at the data sites.
## Gradients
Let us first estimate some gradients using the direct method.
```julia
using NaturalNeighbours
using DelaunayTriangulation
using CairoMakie
using LinearAlgebra
function plot_f2(fig, x, y, vals, title, i, tri, levels, show_3d=true, zlabel="z")
triangles = [T[j] for T in each_solid_triangle(tri), j in 1:3]
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = tricontourf!(ax, x, y, vals, triangulation=triangles', colormap=:viridis, extendhigh=:auto, levels=levels)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=zlabel, width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
mesh!(ax, hcat(x, y, vals), triangles, color=vals, colormap=:viridis, colorrange=extrema(levels))
end
return c
end
function plot_gradients(∇g, tri, f′, x, y)
∇g1 = first.(∇g)
∇g2 = last.(∇g)
fig = Figure(fontsize=36, size=(2400, 600))
plot_f2(fig, x, y, ∇g1, "(a): ∂f̂/∂x", 1, tri, -3.5:0.5:3.0, true, "∂f̂/∂x")
plot_f2(fig, x, y, ∇g2, "(b): ∂f̂/∂y", 3, tri, -3.5:0.5:3.0, true, "∂f̂/∂y")
plot_f2(fig, x, y, getindex.(f′.(x, y), 1), "(c): ∂f/∂x", 2, tri, -3.5:0.5:3.0, true, "∂f/∂x")
plot_f2(fig, x, y, getindex.(f′.(x, y), 2), "(d): ∂f/∂y", 4, tri, -3.5:0.5:3.0, true, "∂f/∂y")
plot_f2(fig, x, y, norm.(collect.(∇g) .- f′.(x, y)), "(e): Gradient error", 5, tri, 0:0.1:0.5, true, "|∇ε|")
resize_to_layout!(fig)
ε = 100sqrt(sum((norm.(collect.(∇g) .- f′.(x, y))) .^ 2) / sum(norm.(∇g) .^ 2))
return fig, ε
end
points = [x'; y']
z = f.(x, y)
tri = triangulate(points)
∇g = generate_gradients(tri, z)
fig, ε = plot_gradients(∇g, tri, f′, x, y)
```
```julia-repl
julia> ε
10.251180094083372
```
```@raw html
<figure>
<img src='../figures/gradient_data.png', alt'Gradients'><br>
</figure>
```
A 10% error is not terrible, and the derivatives we obtain are reasonable.
Let's also look at the results when we jointly estimate the gradients and Hessians (this is the default option).
```julia
∇gr, _ = generate_derivatives(tri, z; method=Direct())
fig, ε = plot_gradients(∇gr, tri, f′, x, y)
```
```julia-repl
julia> ε
7.717791597731752
```
```@raw html
<figure>
<img src='../figures/joint_gradient_data.png', alt'Joint Gradients'><br>
</figure>
```
The figures are smoother, and the error has now decreased to 7.7% -- an improvement. We could also try and see what happens if we use the `Iterative()` approach, using the first gradients we got as the initial gradients, or we could see what happens with `use_cubic_terms=false` for the `Direct()` method, but we won't show that here.
## Hessians
Let's now look at estimating Hessians. We first consider the direct approach, including cubic terms.
```julia
to_mat(H::NTuple{3,Float64}) = [H[1] H[3]; H[3] H[2]]
function plot_hessians(H, tri, f′′, x, y)
H₁₁ = getindex.(H, 1)
H₁₂ = getindex.(H, 3)
H₂₂ = getindex.(H, 2)
fig = Figure(fontsize=36, size=(2400, 600))
plot_f2(fig, x, y, H₁₁, "(a): ∂²f̂/∂x²", 1, tri, -35:5:30, true, "∂²f̂/∂x²")
plot_f2(fig, x, y, H₂₂, "(c): ∂²f̂/∂y²", 3, tri, -35:5:30, true, "∂²f̂/∂y²")
plot_f2(fig, x, y, H₁₂, "(e): ∂²f̂/∂x∂y", 5, tri, -35:5:30, true, "∂²f̂/∂x∂y")
plot_f2(fig, x, y, getindex.(f′′.(x, y), 1), "(b): ∂²f/∂x²", 2, tri, -35:5:30, true, "∂²f/∂x²")
plot_f2(fig, x, y, getindex.(f′′.(x, y), 4), "(d): ∂²f/∂y²", 4, tri, -35:5:30, true, "∂²f/∂y²")
plot_f2(fig, x, y, getindex.(f′′.(x, y), 2), "(f): ∂²f/∂x∂y", 6, tri, -35:5:30, true, "∂²f/∂x∂y")
resize_to_layout!(fig)
ε = 100sqrt(sum((norm.(to_mat.(H) .- f′′.(x, y))) .^ 2) / sum(norm.(to_mat.(H)) .^ 2))
return fig, ε
end
_, Hg = generate_derivatives(tri, z)
fig, ε = plot_hessians(Hg, tri, f′′, x, y)
```
```julia-repl
julia> ε
42.085578794275605
```
```@raw html
<figure>
<img src='../figures/hessian_data.png', alt'Cubic Hessians'><br>
</figure>
```
The error is certainly quite large, but remember that we are doing derivative _generation_ here rather than _estimation_. Judging from the figures themselves, the derivatives we have obtained are actually pretty good.
Let's now see what happens if we only go up to quadratic terms.
```julia
_, Hg = generate_derivatives(tri, z, use_cubic_terms=false)
fig, ε = plot_hessians(Hg, tri, f′′, x, y)
```
```julia-repl
julia> ε
35.20873081559232
```
```@raw html
<figure>
<img src='../figures/hessian_data_no_cubic.png', alt'Quadratic Hessians'><br>
</figure>
```
The error has actually decreased, and the figures do indeed look better. So, in this case, including cubic terms does not improve the results significantly (sometimes it does).
What if we used the iterative approach?
```julia
_, Hg = generate_derivatives(tri, z, method=Iterative()) # the gradients will be generated first automatically
fig, ε = plot_hessians(Hg, tri, f′′, x, y)
```
```julia-repl
julia> ε
39.58481626576425
```
```@raw html
<figure>
<img src='../figures/hessian_data_iterative.png', alt'Iterative Hessians'><br>
</figure>
```
The results are slightly worse, and varying `alpha` doesn't seem to do much.
# Generation Away from the Data Sites
Now let's consider derivative generation away from the data sites. The function `differentiate` is used for this. We first construct our interpolant, ensuring we set `derivatives=true` so that we get the gradients at the data sites first, and then we `differentiate`.
```julia
itp = interpolate(tri, z; derivatives=true, method = Direct(), use_cubic_terms=false)
∂ = differentiate(itp, 1)
```
The second argument specifies the order of the resulting derivatives. Since we specify order 1, we will get gradients $(\partial_xf,\partial_yf)$.
Let's now define the grid for differentiating.
```julia
xg = LinRange(0, 1, 500)
yg = LinRange(0, 1, 500)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
```
We can now evaluate `∂`. To approximate the function values at each point, we will use the `Sibson(1)` method, since this will incorporate the gradient information. I would really like to eventually get Hiyoshi's $C^2$ interpolant, as discussed in [Section 3.2.7.3 here](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf), as this will also give us $C^2$ continuity at the derivative sites and thus give smoother surfaces (noting some complexity issues discussed in Section 6.5 of the linked thesis), but I've just not found the time to comprehend how to best implement it yet / digest the spline notation (see issue [#1](https://github.com/DanielVandH/NaturalNeighbours.jl/issues/1) if you are interested on contributing to this). Note also that, just as with the interpolation methods, it is best to give vectors to `∂`. Lastly, since we are evaluating away from the data sites, remember that the Sibson coordinates are now incorporated into the weights of the associated weighted least squares problem (that you could disable if you for some reason wanted to with `use_sibson_weight=false`).
```julia
∇g = ∂(_x, _y; interpolant_method = Sibson(1))
```
Let's now plot our gradients. Note that there are some `Inf` values in the computed gradients, and these correspond to points evaluated outside of the convex hull of our data sites.
```julia
function rrmserr(z, ẑ)
num = 0.0
den = 0.0
for (zᵢ, ẑᵢ) in zip(z, ẑ)
if all(isfinite, (zᵢ..., ẑᵢ...))
num += norm(zᵢ .- ẑᵢ)^2
den += norm(ẑᵢ)^2
end
end
# num /= length(ẑ)
return 100sqrt(num / den)
end
function plot_f2(fig, x, y, vals, title, i, levels, show_3d=true, zlabel="z")
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = contourf!(ax, x, y, vals, colormap=:viridis, extendhigh=:auto, levels=levels)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=zlabel, width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
surface!(ax, x, y, vals, color=vals, colormap=:viridis, colorrange=extrema(levels))
end
return c
end
function plot_gradients(∇g, f′, xg, yg)
∇g = reshape(∇g, (length(xg), length(yg)))
∇g1 = first.(∇g)
∇g2 = last.(∇g)
∇f = [f′(x, y) for x in xg, y in yg]
fig = Figure(fontsize=36, size=(2400, 600))
plot_f2(fig, xg, yg, ∇g1, "(a): ∂f̂/∂x", 1, -3.5:0.5:3.0, true, "∂f̂/∂x")
plot_f2(fig, xg, yg, ∇g2, "(b): ∂f̂/∂y", 3, -3.5:0.5:3.0, true, "∂f̂/∂y")
plot_f2(fig, xg, yg, first.(∇f), "(c): ∂f/∂x", 2, -3.5:0.5:3.0, true, "∂f/∂x")
plot_f2(fig, xg, yg, last.(∇f), "(d): ∂f/∂y", 4, -3.5:0.5:3.0, true, "∂f/∂y")
plot_f2(fig, xg, yg, norm.(collect.(∇g) .- ∇f), "(e): Gradient error", 5, 0:0.1:0.5, true, "|∇ε|")
resize_to_layout!(fig)
ε = rrmserr(∇f, collect.(∇g))
return fig, ε
end
fig, ε = plot_gradients(∇g, f′, xg, yg)
```
```julia-repl
julia> ε
13.185747607565729
```
```@raw html
<figure>
<img src='../figures/gradient_surface.png', alt'Evaluated Gradient'><br>
</figure>
```
There are of course some strange artifacts near the convex hull, but the results are not terrible. Let's see what happens to the error if we instead use the other interpolant methods.
```julia
other_methods = [Sibson(), Laplace(), Nearest(), Triangle()]
∇gs = [∂(_x, _y; interpolant_method=method) for method in other_methods]
∇f = [f′(x, y) for x in xg, y in yg]
εs = [rrmserr(∇f, collect.(∇g)) for ∇g in ∇gs]
```
```julia-repl
julia> hcat(other_methods, εs)
4×2 Matrix{Any}:
Sibson{0}() 28.6753
Laplace{0}() 25.499
Nearest{0}() 69.5744
Triangle{0}() 27.7737
```
Of course, the other methods are much worse.
Now let's go up to second order.
```julia
function plot_hessians(H, f′′, xg, yg)
H = reshape(H, (length(xg), length(yg)))
H₁₁ = getindex.(H, 1)
H₁₂ = getindex.(H, 3)
H₂₂ = getindex.(H, 2)
Hf = [f′′(x, y) for x in xg, y in yg]
fig = Figure(fontsize=36, size=(2400, 600))
plot_f2(fig, xg, yg, H₁₁, "(a): ∂²f̂/∂x²", 1, -35:5:30, true, "∂²f̂/∂x²")
plot_f2(fig, xg, yg, H₂₂, "(c): ∂²f̂/∂y²", 3, -35:5:30, true, "∂²f̂/∂y²")
plot_f2(fig, xg, yg, H₁₂, "(e): ∂²f̂/∂x∂y", 5, -35:5:30, true, "∂²f̂/∂x∂y")
plot_f2(fig, xg, yg, getindex.(Hf, 1), "(b): ∂²f/∂x²", 2, -35:5:30, true, "∂²f/∂x²")
plot_f2(fig, xg, yg, getindex.(Hf, 4), "(d): ∂²f/∂y²", 4, -35:5:30, true, "∂²f/∂y²")
plot_f2(fig, xg, yg, getindex.(Hf, 2), "(f): ∂²f/∂x∂y", 6, -35:5:30, true, "∂²f/∂x∂y")
resize_to_layout!(fig)
ε = rrmserr(Hf, to_mat.(H))
return fig, ε
end
∂ = differentiate(itp, 2)
∇Hg = ∂(_x, _y; interpolant_method=Sibson(1), method = Iterative())
∇g = first.(∇Hg)
Hg = last.(∇Hg)
zlims!(figH.content[4], -25, 25)
fig∇, ε∇ = plot_gradients(∇g, f′, xg, yg)
figH, εH = plot_hessians(Hg, f′′, xg, yg)
```
```julia-repl
julia> ε∇
19.07546882353911
julia> εH
51.1267212244942
```
```@raw html
<figure>
<img src='../figures/gradient_surface_2.png', alt'Evaluated Gradient'><br>
</figure>
```
```@raw html
<figure>
<img src='../figures/hessian_surface.png', alt'Evaluated Hessian'><br>
</figure>
```
The gradients actually look better here, despite the greater error, especially around the convex hull. The Hessians are a bit problematic around the convex hull especially, but we are really asking a lot of the interpolant to get Hessians unfortunately.
Let's see if the direct approach can give us any improvements (the default is `Iterative()` since we have derivative information in the interpolant).
```julia
∇Hg = ∂(_x, _y; interpolant_method=Sibson(1), method=Direct())
∇g = first.(∇Hg)
Hg = last.(∇Hg)
fig∇, ε∇ = plot_gradients(∇g, f′, xg, yg)
figH, εH = plot_hessians(Hg, f′′, xg, yg)
zlims!(figH.content[4], -25, 25)
```
```julia-repl
julia> ε∇
9.853286514069882
julia> εH
46.7610990050276
```
```@raw html
<figure>
<img src='../figures/gradient_surface_2_direct.png', alt'Evaluated Gradient'><br>
</figure>
```
```@raw html
<figure>
<img src='../figures/hessian_surface_direct.png', alt'Evaluated Hessian'><br>
</figure>
```
Indeed, both the gradients and the Hessians appear to have improved, with some difficulties at the convex hull. Perhaps a better way to measure the error is to only include points that are away fro the convex hull. The following function can do this for us:
```julia
function rrmserr(z, ẑ, tri, x, y)
num = 0.0
den = 0.0
points = get_point(tri)
ch = get_convex_hull_vertices(tri)
for (zᵢ, ẑᵢ, xᵢ, yᵢ) in zip(z, ẑ, x, y)
q = (xᵢ, yᵢ)
δ = DelaunayTriangulation.distance_to_polygon(q, points, ch)
if δ > 1e-4 && all(isfinite, (zᵢ..., ẑᵢ...))
num += norm(zᵢ .- ẑᵢ)^2
den += norm(ẑᵢ)^2
end
end
# num /= length(ẑ)
return 100sqrt(num / den)
end
```
If we instead use this metric, then:
```julia-repl
julia> ε∇_nohull = rrmserr(f′.(_x, _y), ∇g, ∂, _x, _y)
7.479964687679311
julia> εH_nohull = rrmserr(f′′.(_x, _y), to_mat.(Hg), ∂, _x, _y)
38.884740966379056
```
The errors are smaller, though not by much. | NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 10755 | ```@meta
CurrentModule = NaturalNeighbours
```
# Differentiation
In this section, we give some of the mathematical detail used for implementing derivative generation, following this [thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf). The discussion that follows is primarily sourced from Chapter 6 of the linked thesis. While it is possible to generate derivatives of arbitrary order, our discussion here in this section will be limited to gradient and Hessian generation. These ideas are implemented by the `generate_gradients` and `generate_derivatives` functions, which you should use via the `differentiate` function.
# Generation at Data Sites
Let us first consider generating derivatives at the data points used to define the interpolant, $(\boldsymbol x_1, z_1), \ldots, (\boldsymbol x_n, z_n)$. We consider generating the derivatives at a data site $\boldsymbol x_0$, where $\boldsymbol x_0$ is some point in $(\boldsymbol x_1,\ldots,\boldsymbol x_n)$ so that we also know $z_0$.
## Direct Generation
Let us consider a direct approach first. In this approach, we generate gradients and Hessians jointly. We approximate the underlying function $f$ by a Taylor series expansion,
```math
\tilde f(\boldsymbol x) = z_0 + \tilde f_1(\boldsymbol x) + \tilde f_2(\boldsymbol x) + \tilde f_3(\boldsymbol x),
```
where
```math
\begin{align*}
\tilde f_1(\boldsymbol x) &= \frac{\partial f(\boldsymbol x_0)}{\partial x}(x-x_0) + \frac{\partial f(\boldsymbol x_0)}{\partial y}(y - y_0), \\
\tilde f_2(\boldsymbol x) &= \frac12\frac{\partial^2 f(\boldsymbol x_0)}{\partial x^2}(x - x_0)^2 + \frac12{\partial^2 f(\boldsymbol x_0)}{\partial y^2}(y - y_0)^2 + \frac{\partial^2 f(\boldsymbol x_0)}{\partial x\partial y}(x-x_0)(y-y_0), \\
\tilde f_3(\boldsymbol x) &= \frac16\frac{\partial^3 f(\boldsymbol x_0)}{\partial x^3}(x-x_0)^3 + \frac16\frac{\partial^3 f(\boldsymbol x_0)}{\partial y^3}(y-y_0)^3 \\&+ \frac12\frac{\partial^3 f(\boldsymbol x_0)}{\partial x^2\partial y}(x-x_0)^2(y-y_0) + \frac12\frac{\partial^3 f(\boldsymbol x_0)}{\partial x\partial y^2}(x-x_0)(y-y_0)^2.
\end{align*}
```
For gradient generation only, we need only take up to $\tilde f_1$, but for Hessian generation we could include up to $\tilde f_2$ or up to $\tilde f_3$. Whatever option we choose, the neighbourhood that we use for approximating the derivatives needs to be chosen to match the order of the approximation.
To choose the neighbourhood, define the $d$-times iterated neighbourhood of $\boldsymbol x_0$ by
```math
N_0^d = \bigcup_{i \in N_0^{d-1}} N_i \setminus \{0\}, \quad N_0^1 = N_0.
```
Here, the neighbourhoods are the _Delaunay neighbourhoods_, not the natural neighbours -- for points $\boldsymbol x_0$ that are not one of the existing data sites, natural neighbours are used instead. An example of $N_0^1$ and $N_0^2$ both at a data site and away from a data site is shown below, where $\boldsymbol x_0$ is shown in magenta, $N_0^1$ in blue, and $N_0^2$ in red (and also includes the blue points).
```@raw html
<figure>
<img src='../figures/taylor_neighbourhood.png', alt'Iterated neighbourhood examples'><br>
</figure>
```
### Gradients
Let's now use the notation defined above to define how gradients are generated in `generate_derivatives`, without having to estimate Hessians at the same time. The neighbourhood we use is $N_0^1$, and we take $\tilde f = z_0 + \tilde f_1$. We define the following weighted least squares problem for the estimates $\beta_x$, $\beta_y$ of $\partial f(\boldsymbol x_0)/\partial x$ and $\partial f(\boldsymbol x_0)/\partial y$, respectively:
```math
(\beta_x, \beta_y) = \text{argmin}_{(\beta_x, \beta_y)} \sum_{i \in \mathcal N_0^1} W_i \left(\tilde z_i - \beta_1\tilde x_i - \beta_2\tilde y_i\right)^2,
```
where $W_i = 1/\|\boldsymbol x_i - \boldsymbol x_i\|^2$, $\tilde z_i = z_i-z_0$, $\tilde x_i=x_i-x_0$, and $\tilde y_i = y_i-y_0$. This weighted least squares problem is solved by solving the associated linear system $\tilde{\boldsymbol X}\boldsymbol{\beta} = \tilde{\boldsymbol z}$, where $\tilde{\boldsymbol X} \in \mathbb R^{m \times 2}$ is defined by $(\tilde{\boldsymbol X})_{i1} = \sqrt{W_i}(x_i - x_0)$ and $(\tilde{\boldsymbol X})_{i2} = \sqrt{W_i}(y_i - y_0)$, $\boldsymbol{\beta} = (\beta_1,\beta_2)^T$, and $\tilde{\boldsymbol z} = (\tilde z_1,\ldots,\tilde z_m)^T$.
### Joint Gradients and Hessians
Hessians can similarly be estimated, although currently they must be estimated jointly with gradients. We take $\tilde f = z_0 + \tilde f_1 + \tilde f_2$ in the following discussion, although taking up to $\tilde f_3$ has an obvious extension. (The reason to also allow for estimating up to the cubic terms is because sometimes it provides better estimates for the Hessians than only going up to the quadratic terms -- see the examples in Chapter 6 [here](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf).)
Defining $\beta_1 = \partial f(\boldsymbol x_0)/\partial x$, $\beta_2 = \partial f(\boldsymbol x_0)/\partial y$, $\beta_3 = \partial^2 f(\boldsymbol x_0)/\partial x^2$, $\beta_4 = \partial^2 f(\boldsymbol x_0)/\partial y^2$, and $\beta_5 = \partial^2 f(\boldsymbol x_0)/\partial x\partial y$, we have the following weighted least squares problem with $\boldsymbol{\beta}=(\beta_1,\beta_2,\beta_3,\beta_4,\beta_5)^T$:
```math
\boldsymbol{\beta} = \text{argmin}_{\boldsymbol{\beta}} \sum_{i \in N_0^2} W_i\left(\tilde z_i - \beta_1\tilde x_i - \beta_2\tilde y_i - \frac12\beta_3\tilde x_i^2 - \frac12\beta_4\tilde y_i^2 - \beta_5\tilde x_i\tilde y_i\right)^2,
```
using similar notation as in the gradient case. (In the cubic case, use $N_0^3$ and go up to $\beta_9$, discarding $\beta_6,\ldots,\beta_9$ at the end.) The associated linear system in this case has matrix $\tilde{\boldsymbol X} \in \mathbb R^{m \times 2}$ ($m = |N_0^2|$) defined by $(\tilde{\boldsymbol X})_{i1} = \sqrt{W_i}\tilde x_i$, $(\tilde{\boldsymbol X})_{i2} = \sqrt{W_i}\tilde y_i$, $(\tilde{\boldsymbol X})_{i3} = \sqrt{W_i}\tilde x_i^2$, $(\tilde{\boldsymbol X})_{i4} = \sqrt{W_i}\tilde y_i^2$, and $(\tilde{\boldsymbol X})_{i5} = \sqrt{W_i}\tilde x_i\tilde y_i$.
## Iterative Generation
Now we discuss iterative generation. Here, we suppose that we have already estimated gradients at all of the data sites $\boldsymbol x_i$ neighbouring $\boldsymbol x_0$ using the direct approach. To help with the notation, we will let $\boldsymbol g_i^1$ denote our initial estimate of the gradient at a point $\boldsymbol x_i$, and the gradient and Hessian that we are now estimating at $\boldsymbol x_0$ are given by $\boldsymbol g_0^2$ and $\boldsymbol H_0^2$, respectively.
We define the following loss function, where $\beta_i = 1/\|\boldsymbol x_i-\boldsymbol x_0\|$ and $\alpha \in (0, 1)$:
```math
\begin{align*}
\mathcal L(\boldsymbol g_0^2, \boldsymbol H_0^2) &= \sum_{i \in \mathcal N_0} W_i\left[\alpha \mathcal L_1^i(\boldsymbol g_0^2, \boldsymbol H_0^2)^2 + (1-\alpha)L_2^i(\boldsymbol g_0^2, \boldsymbol H_0^2)^2\right], \\
\mathcal L_1^i(\boldsymbol g_0^2, \boldsymbol H_0^2)^2 &= \left[\frac12(\boldsymbol x_i-\boldsymbol x_0)^T\boldsymbol H_0^2(\boldsymbol x_i - \boldsymbol x_0) + (\boldsymbol x_i-\boldsymbol x_0)^T\boldsymbol g_0^2 + z_0-z_i\right]^2, \\
\mathcal L_2^i(\boldsymbol g_0^2, \boldsymbol H_0^2) &= \left\|\boldsymbol H_0^2 \boldsymbol x_i + \boldsymbol g_0^2 - \boldsymbol g_i^1\right\|^2.
\end{align*}
```
This objective function combines the losses between $\tilde f(\boldsymbol x_i)$ and $z_i$, and between $\boldsymbol \nabla \tilde f(\boldsymbol x_i)$ and $\boldsymbol g_i^1$, weighting them by some parameter $\alpha \in (0, 1)$ (typically $\alpha \approx 0.1$ is a reasonable default). After some basic algebra and calculus, it is possible to show that minimising $\mathcal L$ is the same as solving
```math
\overline{\boldsymbol A}^T\overline{\boldsymbol w} + \overline{\boldsymbol B}^T\overline{\boldsymbol g}_1 + \overline{\boldsymbol C}^T\overline{\boldsymbol g}_2 = \left(\overline{\boldsymbol A}^T\overline{\boldsymbol A} + \overline{\boldsymbol B}^T\overline{\boldsymbol B} + \overline{\boldsymbol C}^T\overline{\boldsymbol C}\right)\boldsymbol \theta,
```
where we define:
```math
\begin{align*}
\tilde z_i &= z_i - z_0, \\
W_i &= \frac{1}{\|\boldsymbol x_i-\boldsymbol x_0\|^2},\\
\gamma_i &= \sqrt{\frac{\alpha}{W_i}}, \\
\gamma_i^\prime &= \sqrt{\frac{1-\alpha}{W_i}},\\
\overline{\boldsymbol A}_{i,:} &= \gamma_i \begin{bmatrix} x_i-x_0 & y_i-y_0 & \frac12(x_i-x_0)^2 & \frac12(y_i-y_0)^2 & (x_i-x_0)(y_i-y_0) \end{bmatrix}, \\
\overline{\boldsymbol B}_{i, :} &= \gamma_i^\prime \begin{bmatrix} 1 & 0 & x_i - x_0 & 0 & y_i - y_0 \end{bmatrix}, \\
\overline{\boldsymbol C}_{i, :} &= \gamma_i^\prime \begin{bmatrix} 0 & 1 & 0 & y_i-y_0 & x_i-x_0 \end{bmatrix}, \\
\overline{\boldsymbol w} &= \gamma_i \tilde z_i, \\
\overline{\boldsymbol g}_1 &= \gamma_i^\prime g_{i1}, \\
\overline{\boldsymbol g}_2 &= \gamma_i^\prime g_{i2}, \\
\boldsymbol{\bm\theta} &= \begin{bmatrix} \frac{\partial f(\boldsymbol x_0)}{\partial x} & \frac{\partial f(\boldsymbol x_0)}{\partial y} & \frac{\partial^2 f(\boldsymbol x_0)}{\partial x^2} & \frac{\partial f(\boldsymbol x_0)}{\partial y^2} & \frac{\partial f(\boldsymbol x_0)}{\partial x\partial y} \end{bmatrix}^T.
\end{align*}
```
To solve this linear system, let
```math
\boldsymbol D = \begin{bmatrix} \overline{\boldsymbol A} \\ \overline{\boldsymbol B} \\ \overline{\boldsymbol C} \end{bmatrix}, \quad \boldsymbol c = \begin{bmatrix} \overline{\boldsymbol w} \\ \overline{\boldsymbol g}_1 \\ \overline{\boldsymbol g}_2 \end{bmatrix},
```
so that $\boldsymbol D^T\boldsymbol D\boldsymbol\theta = \boldsymbol D^T\boldsymbol c$. These are just the normal equations for $\boldsymbol D\boldsymbol \theta = \boldsymbol c$, thus we can estimate the gradients and Hessians by simply solving $\boldsymbol D\boldsymbol \theta = \boldsymbol c$.
# Generation Away from the Data Sites
It is possible to extend these ideas so that we can approximate the derivative at any point $\boldsymbol x_0 \in \mathcal C(\boldsymbol X)$. Using the associated interpolant, simply approximate $z_0$ with the value of the interpolant at $\boldsymbol x_0$, and then replace $W_i$ by $\lambda_i/\|\boldsymbol x_i-\boldsymbol x_0\|$, where $\lambda_i$ is the Sibson coordinate at $\boldsymbol x_i$ relative to $\boldsymbol x_0$. If using a direct approach to approximate gradients and Hessians, Sibson coordinates cannot be used (because you can't extend the weights out to $N_0^2$) and so $W_i$ remains as is in that case. Note that the $N_0$ neighbourhoods are now the sets of natural neighbours.
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 1062 | ```@meta
CurrentModule = NaturalNeighbours
```
# NaturalNeighbours
Documentation for [NaturalNeighbours](https://github.com/DanielVandH/NaturalNeighbours.jl).
This is a package for working with natural neighbours of planar point sets, enabling scattered data (or spatial) interpolation via [natural neighbour interpolation](https://en.wikipedia.org/wiki/Natural_neighbor_interpolation), as well as derivative generation. We use [DelaunayTriangulation.jl](https://github.com/DanielVandH/DelaunayTriangulation.jl) to define the spatial information. Much of the work in this package is based on [this great thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf).
Please see the sections in the sidebar for some more discussion. The relevant docstrings for this package are shown below.
```@docs
interpolate
differentiate
generate_gradients
generate_derivatives
AbstractInterpolator
AbstractDifferentiator
Sibson
Hiyoshi
Farin
Laplace
Triangle
Nearest
Direct
Iterative
identify_exterior_points
```
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 8560 | ```@meta
CurrentModule = NaturalNeighbours
```
# Interpolation Example
Let us give an example of how interpolation can be performed. We consider the function
```math
f(x, y) = \frac19\left[\tanh\left(9y-9x\right) + 1\right].
```
First, let us generate some data.
```julia
using NaturalNeighbours
using CairoMakie
using StableRNGs
f = (x, y) -> (1 / 9) * (tanh(9 * y - 9 * x) + 1)
rng = StableRNG(123)
x = rand(rng, 100)
y = rand(rng, 100)
z = f.(x, y)
```
We can now construct our interpolant. To use the Sibson-1 interpolant, we need to have gradient information, so we specify `derivatives=true` to make sure these get generated at the data sites.
```julia
itp = interpolate(x, y, z; derivatives=true)
```
This `itp` is now callable. Let's generate a grid to evaluate `itp` at.
```julia
xg = LinRange(0, 1, 100)
yg = LinRange(0, 1, 100)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
```
We use vectors for this evaluation rather than evaluating like, say, `[itp(x, y) for x in xg, y in yg]`, since `itp`'s evaluation will use multithreading if we give it vectors. Consider the following times (including the time to make the vectors in the vector case):
```julia
using BenchmarkTools
function testf1(itp, xg, yg, parallel)
_x = vec([x for x in xg, _ in yg])
_y = vec([y for _ in xg, y in yg])
return itp(_x, _y; parallel=parallel)
end
testf2(itp, xg, yg) = [itp(x, y) for x in xg, y in yg]
b1 = @benchmark $testf1($itp, $xg, $yg, $true)
b2 = @benchmark $testf2($itp, $xg, $yg)
b3 = @benchmark $testf1($itp, $xg, $yg, $false)
```
```julia-repl
julia> b1
BenchmarkTools.Trial: 2310 samples with 1 evaluation.
Range (min … max): 1.333 ms … 165.550 ms ┊ GC (min … max): 0.00% … 98.28%
Time (median): 1.781 ms ┊ GC (median): 0.00%
Time (mean ± σ): 2.155 ms ± 3.446 ms ┊ GC (mean ± σ): 3.27% ± 2.04%
▄▆█▄▁
▂▂▄▃▅▆█████▆▅▃▃▂▂▂▂▂▂▂▃▂▂▃▃▃▃▃▃▄▃▄▄▄▄▅▅▄▄▄▄▄▃▃▃▄▃▂▂▂▂▁▂▁▁▁▁ ▃
1.33 ms Histogram: frequency by time 3.33 ms <
Memory estimate: 254.33 KiB, allocs estimate: 146.
julia> b2
BenchmarkTools.Trial: 257 samples with 1 evaluation.
Range (min … max): 14.790 ms … 27.120 ms ┊ GC (min … max): 0.00% … 0.00%
Time (median): 18.136 ms ┊ GC (median): 0.00%
Time (mean ± σ): 19.531 ms ± 4.177 ms ┊ GC (mean ± σ): 0.00% ± 0.00%
▅█
▆██▄▄▄▄▂▅▁▁▄▃▄▅▃▃▃▃▁▃▃▁▃▄▃▃▃▂▂▄▂▃▃▂▄▂▄▄▃▂▄▄▃▃▄▃▄▄▃▄▃▃▄▄▂▄▅▄ ▃
14.8 ms Histogram: frequency by time 26.7 ms <
Memory estimate: 78.17 KiB, allocs estimate: 2.
julia> b3
BenchmarkTools.Trial: 267 samples with 1 evaluation.
Range (min … max): 14.986 ms … 27.264 ms ┊ GC (min … max): 0.00% … 0.00%
Time (median): 17.354 ms ┊ GC (median): 0.00%
Time (mean ± σ): 18.710 ms ± 3.750 ms ┊ GC (mean ± σ): 0.00% ± 0.00%
▄█
▄██▇▄▃▃▄▃▃▃▃▃▃▃▄▄▂▃▃▃▃▃▃▃▃▃▃▃▃▂▁▃▃▃▃▃▃▃▃▃▂▃▃▂▃▃▂▂▁▂▃▃▂▃▂▃▄▃ ▃
15 ms Histogram: frequency by time 26.7 ms <
Memory estimate: 234.67 KiB, allocs estimate: 10.
```
See that `itp(_x, _y)` took about 1.3 ms, while the latter two approaches both take around 15 ms. Pretty impressive given that we are evaluating $100^2$ points - this is a big advantage of local interpolation making parallel evaluation so cheap and simple. This effect can be made even more clear if we use more points:
```julia
xg = LinRange(0, 1, 1000)
yg = LinRange(0, 1, 1000)
b1 = @benchmark $testf1($itp, $xg, $yg, $true)
b2 = @benchmark $testf2($itp, $xg, $yg)
b3 = @benchmark $testf1($itp, $xg, $yg, $false)
```
```julia-repl
julia> b1
BenchmarkTools.Trial: 27 samples with 1 evaluation.
Range (min … max): 132.766 ms … 354.348 ms ┊ GC (min … max): 0.00% … 0.00%
Time (median): 144.794 ms ┊ GC (median): 0.00%
Time (mean ± σ): 188.429 ms ± 79.396 ms ┊ GC (mean ± σ): 0.37% ± 2.38%
▁█▃▁
████▄▄▄▁▁▁▁▁▁▄▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▄▁▁▁▁▄▄▁▁▁▁▁▁▁▁▄▁▁▁▁▁▁▁▄▇ ▁
133 ms Histogram: frequency by time 354 ms <
Memory estimate: 22.91 MiB, allocs estimate: 157.
julia> b2
BenchmarkTools.Trial: 2 samples with 1 evaluation.
Range (min … max): 2.564 s … 2.794 s ┊ GC (min … max): 0.00% … 0.00%
Time (median): 2.679 s ┊ GC (median): 0.00%
Time (mean ± σ): 2.679 s ± 162.574 ms ┊ GC (mean ± σ): 0.00% ± 0.00%
█ █
█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁
2.56 s Histogram: frequency by time 2.79 s <
Memory estimate: 7.63 MiB, allocs estimate: 2.
julia> b3
BenchmarkTools.Trial: 2 samples with 1 evaluation.
Range (min … max): 2.557 s … 2.624 s ┊ GC (min … max): 0.00% … 0.00%
Time (median): 2.590 s ┊ GC (median): 0.00%
Time (mean ± σ): 2.590 s ± 46.978 ms ┊ GC (mean ± σ): 0.00% ± 0.00%
█ █
█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█ ▁
2.56 s Histogram: frequency by time 2.62 s <
Memory estimate: 22.89 MiB, allocs estimate: 10.
```
Now let's continue with the example. We compare Sibson-0 to Sibson-1 (going back to the original definitions of `xg` and `yg` with $100^2$ points):
```julia
sib_vals = itp(_x, _y)
sib1_vals = itp(_x, _y; method=Sibson(1))
```
Now we can plot.
```julia
function plot_itp(fig, x, y, vals, title, i, show_data_sites, itp, xd=nothing, yd=nothing, show_3d=true, levels=-0.1:0.05:0.3)
ax = Axis(fig[1, i], xlabel="x", ylabel="y", width=600, height=600, title=title, titlealign=:left)
c = contourf!(ax, x, y, vals, colormap=:viridis, levels=levels, extendhigh=:auto)
show_data_sites && scatter!(ax, xd, yd, color=:red, markersize=9)
tri = itp.triangulation
ch_idx = get_convex_hull_vertices(tri)
lines!(ax, [get_point(tri, i) for i in ch_idx], color=:white, linewidth=4)
if show_3d
ax = Axis3(fig[2, i], xlabel="x", ylabel="y", zlabel=L"z", width=600, height=600, title=" ", titlealign=:left, azimuth=0.49)
surface!(ax, x, y, vals, color=vals, colormap=:viridis, colorrange=(-0.1, 0.3))
zlims!(ax, 0, 0.25)
end
return c
end
```
```@raw html
<figure>
<img src='../figures/sibson_vs_sibson1.png', alt'Sibson and Sibson-1 Interpolation'><br>
</figure>
```
The red points in (c) show the data used for interpolating. The results are pretty similar, although Sibson-1 is more bumpy in the zero region of the function. Sibson-1 is smooth wave on the rising front of the function.
Note that we are extrapolating in some parts of this function, where extrapolation refers to evaluating outside of the convex hull of the data sites. This convex hull is shown in white above. If we wanted to avoid extrapolating entirely, you can use `project=false` which replaces any extrapolated values with `Inf`.
```julia
sib_vals = itp(_x, _y, project=false)
sib1_vals = itp(_x, _y; method=Sibson(1), project=false)
fig = Figure(fontsize=36)
plot_itp(fig, _x, _y, sib_vals, "(a): Sibson", 1, false, itp, x, y)
plot_itp(fig, _x, _y, sib1_vals, "(b): Sibson-1", 2, false, itp, x, y)
plot_itp(fig, _x, _y, f.(_x, _y), "(c): Exact", 3, true, itp, x, y)
resize_to_layout!(fig)
fig
```
```@raw html
<figure>
<img src='../figures/sibson_vs_sibson1_project_false.png', alt'Sibson and Sibson-1 Interpolation without Extrapolation'><br>
</figure>
```
To get a better comparison of the two interpolants, lets plot the errors at each point, including extrapolation.
```julia
sib_vals = itp(_x, _y)
sib1_vals = itp(_x, _y; method=Sibson(1))
sib_errs = @. 100abs(sib_vals - f(_x, _y))
sib1_errs = @. 100abs(sib1_vals - f(_x, _y))
fig = Figure(fontsize=36)
plot_itp(fig, _x, _y, sib_errs, "(a): Sibson", 1, true, itp, x, y, false, 0:0.5:3)
c = plot_itp(fig, _x, _y, sib1_errs, "(b): Sibson-1", 2, true, itp, x, y, false, 0:0.5:3)
Colorbar(fig[1, 3], c)
resize_to_layout!(fig)
fig
```
```@raw html
<figure>
<img src='../figures/sibson_vs_sibson1_errors.png', alt'Sibson and Sibson-1 Interpolation Errors'><br>
</figure>
```
We see that the Sibson-1 interpolant has less error overall. To summarise these errors into a single scalar, we can use the relative root mean square error, defined by
```math
\varepsilon_{\text{rrmse}}(\boldsymbol y, \hat{\boldsymbol y}) = 100\sqrt{\frac{\sum_i (y_i - \hat y_i)^2}{\sum_i \hat y_i^2}}.
```
```julia-repl
julia> esib0 = 100sqrt(sum((sib_vals .- f.(_x, _y)).^2) / sum(sib_vals.^2))
8.272516151708604
julia> esib1 = 100sqrt(sum((sib1_vals .- f.(_x, _y)).^2) / sum(sib_vals.^2))
6.974149853003652
```
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 26312 | ```@meta
CurrentModule = NaturalNeighbours
```
# Interpolation
In this section, we give some of the mathematical background behind natural neighbour interpolation, and other interpolation methods provided from this package. The discussion here will be limited, but you can see this [thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf) or this [Wikipedia article](https://en.wikipedia.org/wiki/Natural_neighbor_interpolation) for more information. The discussion that follows is primarily sourced from the linked thesis. These ideas are implemented by the `interpolate` function.
# Voronoi Tessellation
We need to first define the _Voronoi tessellation_. We consider some set of points $\boldsymbol X = \{\boldsymbol x_1, \ldots, \boldsymbol x_m\} \subseteq \mathbb R^2$. The Voronoi tessellation of $\boldsymbol X$, denoted $\mathcal V(\boldsymbol X)$, is a set of convex polygons $\{V_{\boldsymbol x_1}, \ldots, V_{\boldsymbol x_m}\}$, also called _tiles_, such that
```math
\begin{equation*}
V_{\boldsymbol x_i} = \{\boldsymbol x \in \mathbb R^2 : \|\boldsymbol x - \boldsymbol x_i\| \leq \|\boldsymbol x - \boldsymbol x_j\|,~ \boldsymbol x_j \in \boldsymbol X\}.
\end{equation*}
```
In particular, any point in $V_{\boldsymbol x_i}$ is closer to $\boldsymbol x_i$ than it is to any other point in $\boldsymbol X$. We will also denote $V_{\boldsymbol x_i}$ by $V_i$. [DelaunayTriangulation.jl](https://github.com/DanielVandH/DelaunayTriangulation.jl) is used to build $\mathcal V(\boldsymbol X)$. An example of a Voronoi tessellation is shown below.
```@raw html
<figure>
<img src='../figures/example_tessellation.png', alt='Voronoi Tessellation'><br>
</figure>
```
# Natural Neighbours
See that the tiles of the tessellation in the figure above intersect along a line, called the _Voronoi facet_, that we denote by $\mathcal F_{ij} = \mathcal V_i \cap \mathcal V_j$. Whenever $\mathcal F_{ij} \neq \emptyset$, we say that $\boldsymbol x_i$ and $\boldsymbol x_j$ are _natural neighbours_ in $\boldsymbol X$. We denote the set of natural neighbours to a point $\boldsymbol x \in \boldsymbol X$ by $N(\boldsymbol x) \subseteq \boldsymbol X$, and we denote the corresponding indices by $N_i = \{j : \boldsymbol x_j \in N(\boldsymbol x_i)\}$.
# Natural Neighbour Coordinates
We represent points locally using what are known as natural neighbour coordinates, which are based on the nearby Voronoi tiles. In particular, we make the following definition: Any set of convex coordinates $\boldsymbol \lambda$ (convex means that $\lambda_i \geq 0$ for each $i$) of $\boldsymbol x$ with respect to the natural neighbours $N(\boldsymbol x)$ of $\boldsymbol x$ that satisfies:
1. $\lambda_i > 0 \iff \boldsymbol x_i \in N(\boldsymbol x)$,
2. $\boldsymbol \lambda$ is continuous with respect to $\boldsymbol x$,
is called a set of _natural neighbour coordinates_ of $\boldsymbol x$ in $\boldsymbol X$, or just the natural neighbour coordinates of $\boldsymbol x$, or the _local coordinates_ of $\boldsymbol x$.
# Natural Neighbour Interpolation
Now that we have some definitions, we can actually define the steps involved in natural neighbour interpolation. We are supposing that we have some data $(\boldsymbol x_i, z_i)$ for $i=1,\ldots,m$, and we want to interpolate this data at some point $\boldsymbol x_0 \in \mathcal C(\boldsymbol X)$, where $\boldsymbol X$ is the point set $(\boldsymbol x_1,\ldots,\boldsymbol x_m)$ and $\mathcal C(\boldsymbol X)$ is the convex hull of $\boldsymbol x$. We let $Z$ denote the function values $(z_1,\ldots,z_m)$.
The steps are relatively straight forward.
1. First, compute local coordinates $\boldsymbol \lambda(\boldsymbol x_0)$ with respect to the natural neighbours $N(\boldsymbol x_0)$.
2. Combine the values $z_i$ associated with $\boldsymbol x_i \in N(\boldsymbol x)$ using some blending function $\varphi(\boldsymbol \lambda, Z)$.
To consider the second step, note that a major motivation for working with local coordinates is the following fact: The coordinates $\boldsymbol \lambda$ that we compute can be used to represent our point $\boldsymbol x_0$ as $\boldsymbol x_0 = \sum_{i \in N_0} \lambda_i(\boldsymbol x_0)\boldsymbol x_i$, a property known as the local coordinates property ([Sibson, 1980](https://doi.org/10.1017/S0305004100056589)), or the natural neighbour coordinates property if $\boldsymbol \lambda$ is convex (as we assume them to be).
In particular, the coordinates $\boldsymbol \lambda$ determine how much each point $\boldsymbol x_i \in N(\boldsymbol x_0)$ contributes to the representation of our query point $\boldsymbol x_0$, hence the term "natural". So, a natural interpolant is to simply take this linear combination and replace $\boldsymbol x_i$ by $z_i$, giving the scattered data interpolant
```math
f(\boldsymbol x_0) = \sum_{i \in N_0} \lambda_i z_i.
```
Note that the natural neighbour coordinates property only works for points in the convex hull of $\boldsymbol X$ (otherwise $\boldsymbol \lambda$ is not convex), hence the restriction $\boldsymbol x_0 \in \mathcal C(\boldsymbol X)$.
# Some Local Coordinates
Let us now define all the coordinates we provide in this package.
## Nearest Neighbours
To represent a point $\boldsymbol x_0$, we can use what are known as _nearest neighbour coordinates_, which simply assigns a weight of $1$ to the generator of the tile that $\boldsymbol x_0$ is in:
```math
\lambda_i^{\text{NEAR}} = \begin{cases} 1 & \boldsymbol x_0 \in \mathcal V_i, \\ 0 & \text{otherwise}. \end{cases}
```
The resulting scattered data interpolant $f^{\text{NEAR}}$ is then just
```math
f^{\text{NEAR}}(\boldsymbol x) = z_i,
```
where $\boldsymbol x \in \mathcal V_i$. An example of what this interpolant looks like is given below.
```@raw html
<figure>
<img src='../figures/fnear_example.png', alt='Nearest Neighbour Interpolation'><br>
</figure>
```
## Laplace Coordinates
Here we introduce _Laplace coordinates_, also known as _non-Sibsonian coordinates_. To define these coordinates, let us take some tessellation $\mathcal V(\boldsymbol X)$ and see what happens when we add a point into it.
```@raw html
<figure>
<img src='../figures/new_tile.png', alt='Tessellation with a new point'><br>
</figure>
```
In the figure above, the tiles with the black boundaries and no fill are the tiles of the original tessellation, and we show the tile that would be created by some query point $\boldsymbol x_0$ (the magenta point) with a blue tile. We see that the insertion of $\boldsymbol x_0$ into the tessellation has intersected some of the other tiles, in particular it has modified only the tiles corresponding to its natural neighbours in $N(\boldsymbol x_0)$.
For a given generator $\boldsymbol x_i \in N(\boldsymbol x_0)$, we see that there is a corresponding blue line passing through its tile. Denote this blue line by $\mathcal F_{0i}$, and let $r_i = \|\boldsymbol x_0 - \boldsymbol x_i\|$. With this definition, we define
```math
\lambda_i^{\text{LAP}} = \frac{\hat\lambda_i^{\text{LAP}}}{\sum_{j \in N_0} \hat\lambda_j^{\text{LAP}}}, \quad \hat\lambda_i^{\text{LAP}} = \frac{\ell(\mathcal F_{0i})}{r_i},
```
where $\ell(\mathcal F_{0i})$ is the length of the facet $\mathcal F_{0i}$. In particular, $\hat\lambda_i^{\text{LAP}}$ is the ratio of the blue line inside the tile and the distance between the generator $\boldsymbol x_i$ and the query point $\boldsymbol x_0$. These coordinates are continuous in $\mathcal C(\boldsymbol X)$ with derivative discontinuities at the data sites $\boldsymbol X$. The resulting interpolant $f^{\text{LAP}}$ inherits these properties, where
```math
f^{\text{LAP}}(\boldsymbol x_0) = \sum_{i \in N_0} \lambda_i^{\text{LAP}}z_i.
```
This interpolant has linear precision, meaning it reproduces linear functions.
An example of what this interpolant looks like is given below.
```@raw html
<figure>
<img src='../figures/flap_example.png', alt='Laplace Interpolation'><br>
</figure>
```
## Sibson Coordinates
Now we consider Sibson's coordinates. These coordinates are similar to Laplace's coordinates, except we consider the areas rather than lengths for the facets. In particular, let us return to the figure above, reprinted below for convenience:
```@raw html
<figure>
<img src='../figures/new_tile.png', alt='Tessellation with a new point'><br>
</figure>
```
The idea is to consider how much area this new blue tile "steals" from the tiles of its natural neighbours. Based on the following identity ([Sibson, 1980](https://doi.org/10.1017/S0305004100056589)),
```math
\text{Area}(\mathcal V_{\boldsymbol x_0}) \boldsymbol x = \sum_{i \in N_0} \text{Area}(\mathcal V_{\boldsymbol x} \cap \mathcal V_{\boldsymbol x_i}^{\boldsymbol x_0})\boldsymbol x_i,
```
where $\mathcal V_{\boldsymbol x_0}$ is the new tile shown in blue, and $\mathcal V_{\boldsymbol x_i}^{\boldsymbol x_0}$ is the tile associated with $\boldsymbol x_i$ in the original tessellation, i.e. prior to the insertion of $\boldsymbol x_0$, we define _Sibson's coordinates_ as
```math
\lambda_i^{\text{SIB}} = \frac{\hat\lambda_i^{\text{SIB}}}{\sum_{j \in N_0} \hat\lambda_j^{\text{SIB}}}, \quad \hat\lambda_i^{\text{SIB}} = \text{Area}(\mathcal V_{\boldsymbol x_0} \cap \mathcal V_{\boldsymbol x_i}^{\boldsymbol x_0}).
```
A clearer way to write this is as
```math
\lambda_i^{\text{SIB}} = \frac{A(\boldsymbol x_i)}{A(\boldsymbol x)},
```
where $A(\boldsymbol x_i)$ is the area of the intersection between the original tile for $\boldsymbol x_i$ and the new tile at $\boldsymbol x_0$, and $A(\boldsymbol x)$ is the total area of the new blue tile. These coordinates are $C^1$ continuous in $\mathcal C(\boldsymbol X) \setminus \boldsymbol X$, with derivative discontinuities at the data sites, and so too is the interpolant
```math
f^{\text{SIB}}(\boldsymbol x_0) = \sum_{i \in N_0} \lambda_i^{\text{SIB}}z_i.
```
We may also use $f^{\text{SIB}0}$ and $\lambda_i^{\text{SIB}0}$ rather than $f^{\text{SIB}}$ and $\lambda_i^{\text{SIB}}$, respectively.
This interpolant has linear precision, meaning it reproduces linear functions.
An example of what this interpolant looks like is given below.
```@raw html
<figure>
<img src='../figures/fsib0_example.png', alt='Sibson Interpolation'><br>
</figure>
```
Our implementation of these coordinates follows [this article](https://gwlucastrig.github.io/TinfourDocs/NaturalNeighborTinfourAlgorithm/index.html) with some simple modifications.
## Triangle Coordinates
Now we define triangle coordinates. These are not actually natural coordinates (they are not continuous in $\boldsymbol x$), but they just give a nice comparison with other methods. The idea is to interpolate based on the barycentric coordinates of the triangle that the query point is in, giving rise to a piecewise linear interpolant over $\mathcal C(\boldsymbol X)$.
Let us take our query point $\boldsymbol x=(x,y)$ and suppose it is in some triangle $V$ with coordinates $\boldsymbol x_1 = (x_1,y_1)$, $\boldsymbol x_2 = (x_2,y_2)$, and $\boldsymbol x_3=(x_3,y_3)$. We can then define:
```math
\begin{align*}
\Delta &= (y_2-y_3)(x_1-x_3)+(x_3-x_2)(y_1-y_3), \\
\lambda_1 &= \frac{(y_2-y_3)(x-x_3)+(x_3-x_2)(y-y_3)}{\Delta}, \\
\lambda_2 &= \frac{(y_3-y_1)(x-x_3) + (x_1-x_3)(y-y_3)}{\Delta}, \\
\lambda_3 &= 1-\lambda_1-\lambda_2.
\end{align*}
```
With these definitions, our interpolant is
```math
f^{\text{TRI}}(\boldsymbol x) = \lambda_1z_1 + \lambda_2z_2 + \lambda_3z_3.
```
(Of course, the subscripts would have to be modified to match the actual indices of the points rather than assuming them to be $\boldsymbol x_1$, $\boldsymbol x_2$, and $\boldsymbol x_3$.) This is the same interpolant used in [FiniteVolumeMethod.jl](https://github.com/DanielVandH/FiniteVolumeMethod.jl).
An example of what this interpolant looks like is given below.
```@raw html
<figure>
<img src='../figures/ftri_example.png', alt='Triangle Interpolation'><br>
</figure>
```
# Smooth Interpolation
All the derived interpolants above are not differentiable at the data sites. Here we describe some interpolants that are differentiable at the data sites.
## Sibson's $C^1$ Interpolant
Sibson's $C^1$ interpolant, which we call Sibson-1 interpolation, extends on Sibson coordinates above, also called Sibson-0 coordinates, is $C^1$ at the data sites. A limitation of it is that it requires an estimate of the gradient $\boldsymbol \nabla_i$ at the data sites $\boldsymbol x_i$, which may be estimated using the derivative generation techniques described in the sidebar.
Following [Bobach's thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf) or [Flötotto's thesis](https://theses.hal.science/tel-00832487/PDF/these-flototto.pdf), the Sibson-1 interpolant $f^{\text{SIB}1}$ is a linear combination of $f^{\text{SIB}0} \equiv f^{\text{SIB}}$ and another interpolant $\xi$. We define:
```math
\begin{align*}
r_i &= \|\boldsymbol x_0-\boldsymbol x_i\|, \\
\gamma_i &= \frac{\lambda_i^{\text{SIB}0}}{r_i}, \\
\zeta_i &= z_i + (\boldsymbol x_0 - \boldsymbol x_i)^T\boldsymbol\nabla_i, \\
\zeta &= \frac{\sum_{i\in N_0} \gamma_i\zeta_i}{\sum_{i\in N_0} \gamma_i}, \\
\alpha &= \frac{\sum_{i \in N_0} \lambda_i^{\text{SIB}0}r_i}{\sum_{i \in N_0} \gamma_i}, \\
\beta &= \sum_{i \in N_0} \lambda_i^{\text{SIB}0}r_i^2.
\end{align*}
```
Our interpolant is then defined by
```math
f^{\text{SIB}1}(\boldsymbol x_0) = \frac{\alpha f^{\text{SIB}0} + \beta\zeta}{\alpha + \beta}.
```
This interpolant exactly reproduces spherical quadratics $\boldsymbol x \mapsto \mu(\boldsymbol x - \boldsymbol a)^T(\boldsymbol x - \boldsymbol a)$.
An example of what this interpolant looks like is given below.
```@raw html
<figure>
<img src='../figures/fsib1_example.png', alt='Sibson-1 Interpolation'><br>
</figure>
```
Notice that the peak of the function is much smoother than it was in the other interpolant examples.
## Farin's $C^1$ Interpolant
Farin's $C^1$ interpolant, introduced by [Farin (1990)](https://doi.org/10.1016/0167-8396(90)90036-Q), is another interpolant with $C^1$ continuity at the data sites provided we have estimates of the gradients $\boldsymbol \nabla_i$ at the data sites (see the sidebar for derivative generation methods), and also makes use of the Sibson-0 coordinates. Typically these coordinates are described in the language of Bernstein-Bézier simplices (as in [Bobach's thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf) or [Flötotto's thesis](https://theses.hal.science/tel-00832487/PDF/these-flototto.pdf) and Farin's original paper), this language makes things more complicated than they need to be. Instead, we describe the interpolant using symmetric homogeneous polynomials, as in Hiyoshi and Sugihara ([2004](https://doi.org/10.1007/978-3-540-24767-8_8), [2007](https://doi.org/10.1504/IJCSE.2007.014460)). See the references mentioned above for a derivation of the interpolant we describe below.
Let $\boldsymbol x_0$ be some point in $\mathcal C(\boldsymbol X)$ and let $N_0$ be the natural neighbourhood around $\boldsymbol x_0$. We let the natural coordinates be given by Sibson's coordinates $\boldsymbol \lambda = (\lambda_1,\ldots,\lambda_n)$ with corresponding natural neighbours $\boldsymbol x_1,\ldots,\boldsymbol x_n$ (rearranging the indices accordingly to avoid using e.g. $i_1,\ldots, i_n$), where $n = |N_0|$. We define a homogeneous symmetric polynomial
```math
f(\boldsymbol x_0) = \sum_{i \in N_0}\sum_{j \in N_0}\sum_{k \in N_0} f_{ijk}\lambda_i\lambda_j\lambda_k,
```
where the coefficients $f_{ijk}$ are symmetric so that they can be uniquely determined. We define $z_{i, j} = \boldsymbol \nabla_i^T \overrightarrow{\boldsymbol x_i\boldsymbol x_j}$, where $\boldsymbol \nabla_i$ is the estimate of the gradient at $\boldsymbol x_i \in N(\boldsymbol x_0) \subset \boldsymbol X$ and $\overrightarrow{\boldsymbol x_i\boldsymbol x_j} = \boldsymbol x_j - \boldsymbol x_i$. Then, define the coefficients (using symmetry to permute the indices to the standard forms below):
```math
\begin{align*}
f_{iii} &= z_i, \\
f_{iij} &= z_i + \frac{1}{3}z_{i,j}, \\
f_{ijk} &= \frac{z_i+z_j+z_k}{3} + \frac{z_{i,j}+z_{i,k}+z_{j,i}+z_{j,k}+z_{k,i}+z_{k,j}}{12},
\end{align*}
```
where all the $i$, $j$, and $k$ are different. The resulting interpolant $f$ is Farin's $C^1$ interpolant, $f^{\text{FAR}} = f$, and has quadratic precision so that it reproduces quadratic polynomials.
Let us describe how we actually evaluate $\sum_{i \in N_0}\sum_{j \in N_0}\sum_{k \in N_0} f_{ijk}\lambda_i\lambda_j\lambda_k$ efficiently. Write this as
```math
f^{\text{FAR}}(\boldsymbol x_0) = \sum_{1 \leq i, j, k \leq n} f_{ijk}\lambda_i\lambda_j\lambda_k.
```
This looks close to the definition of a [complete homogeneous symmetric polynomial](https://en.wikipedia.org/wiki/Complete_homogeneous_symmetric_polynomial). This page shows the identity
```math
\sum_{1 \leq i \leq j \leq k \leq n} X_iX_kX_j = \sum_{1 \leq i, j, k \leq n} \frac{m_i!m_j!m_k!}{3!}X_iX_jX_k,
```
where $m_\ell$ is the multiplicity of $X_\ell$ in the summand, e.g. if the summand is $X_i^2X_k$ then $m_i=2$ and $m_k = 1$. Thus, transforming the variables accordingly, we can show that
```math
f^{\text{FAR}}(\boldsymbol x_0) = 6\underbrace{\sum_{i=1}^n\sum_{j=i}^n\sum_{k=j}^n}_{\sum_{1 \leq i \leq j \leq k \leq n}} \tilde f_{ijk}\lambda_i\lambda_j\lambda_k,
```
where $\tilde f_{iii} = f_{iii}/3! = f_{iii}/6$, $\tilde f_{iij} = f_{iij}/2$, and $\tilde f_{ijk} = f_{ijk}$. This is the implementation we use.
## Hiyoshi's $C^2$ Interpolant
Hiyoshi's $C^2$ interpolant is similar to Farin's $C^1$ interpolant, except now we have $C^2$ continuity at the data sites and we now, in addition to requiring estimates of the gradients $\boldsymbol \nabla_i$ at the data sites, require estimates of the Hessians $\boldsymbol H_i$ at the data sites (see the sidebar for derivative generation methods). As with Farin's $C^1$ interpolant, we use the language of homogeneous symmetric polynomials rather than Bernstein-Bézier simplices in what follows. There are two definitions of Hiyoshi's $C^2$ interpolant, the first being given by [Hiyoshi and Sugihara (2004)](https://doi.org/10.1007/978-3-540-24767-8_8) and described by [Bobach](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf), and the second given three years later again by [Hiyoshi and Sugihara (2007)](https://doi.org/10.1504/IJCSE.2007.014460). We use the 2007 definition - testing shows that they are basically the same, anyway.
Like in the previous section, w let $\boldsymbol x_0$ be some point in $\mathcal C(\boldsymbol X)$ and let $N_0$ be the natural neighbourhood around $\boldsymbol x_0$. We let the natural coordinates be given by Sibson's coordinates $\boldsymbol \lambda = (\lambda_1,\ldots,\lambda_n)$ with corresponding natural neighbours $\boldsymbol x_1,\ldots,\boldsymbol x_n$ (rearranging the indices accordingly to avoid using e.g. $i_1,\ldots, i_n$), where $n = |N_0|$. We define
```math
z_{i, j} = \boldsymbol \nabla_i^T\overrightarrow{\boldsymbol x_i\boldsymbol x_j}, \qquad z_{i, jk} = \overrightarrow{\boldsymbol x_i\boldsymbol x_j}^T\boldsymbol H_i\overrightarrow{\boldsymbol x_i\boldsymbol x_k}.
```
Hiyoshi's $C^2$ interpolant, written $f^{\text{HIY}}$ (or later $f^{\text{HIY}2}$, if ever we can get Hiyoshi's $C^k$ interpolant on $\mathcal C(\boldsymbol X) \setminus \boldsymbol X$ implemented --- see Hiyoshi and Sugihara ([2000](https://doi.org/10.1145/336154.336210), [2002](https://doi.org/10.1016/S0925-7721(01)00052-9)), [Bobach, Bertram, Umlauf (2006)](https://doi.org/10.1007/11919629_20), and Chapter 3.2.5.5 and Chapter 5.5. of [Bobach's thesis]((https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf))), is defined by the homogeneous symmetric polynomial
```math
f^{\text{HIY}}(\boldsymbol x_0) = \sum_{1 \leq i,j,k,\ell,m \leq n} f_{ijk\ell m}\lambda_i\lambda_j\lambda_k\lambda_\ell\lambda_m,
```
where we define the coefficients (using symmetry to permute the indices to the standard forms below):
```math
\begin{align*}
f_{iiiii} &= z_i, \\
f_{iiiij} &= z_i + \frac15z_{i,j}, \\
f_{iiijj} &= z_i + \frac25z_{i,j} + \frac{1}{20}z_{i,jj}, \\
f_{iiijk} &= z_i + \frac15\left(z_{i, j} + z_{i, k}\right) + \frac{1}{20}z_{i, jk}, \\
f_{iijjk} &= \frac{13}{30}\left(z_i + z_j\right) + \frac{2}{15}z_k + \frac{1}{9}\left(z_{i, j} + z_{j, i}\right) + \frac{7}{90}\left(z_{i, k} + z_{j, k}\right) \\&+ \frac{2}{45}\left(z_{k, i} + z_{k, j}\right) + \frac{1}{45}\left(z_{i, jk} + z_{j, ik} + z_{k, ij}\right), \\
f_{iijk\ell} &= \frac12z_i + \frac16\left(z_j + z_k + z_\ell\right) + \frac{7}{90}\left(z_{i, j} + z_{i, k} + z_{i, \ell}\right) \\&+ \frac{2}{45}\left(z_{j, i} + z_{k, i} + z_{\ell, i}\right) + \frac{1}{30}\left(z_{j, k} + z_{j, \ell} + z_{k, j} + z_{k, \ell} + z_{\ell, j} + z_{j, k}\right) \\
&+ \frac{1}{90}\left(z_{i, jk} + z_{i, j\ell} + z_{i, k\ell}\right) + \frac{1}{90}\left(z_{j, ik} + z_{j, i\ell} + z_{k, ij} + z_{k, i\ell} + z_{\ell, ij} + z_{\ell, ik}\right) \\&+ \frac{1}{180}\left(z_{j, k\ell} + z_{k, j\ell} + z_{\ell, jk}\right), \\
f_{ijk\ell m} &= \frac{1}{5}\left(z_i + z_j + z_k + z_\ell + z_m\right) \\
&+ \frac{1}{30}\left(z_{i, j} + z_{i, k} + z_{i, \ell} + z_{i, m} + z_{j, i} + \cdots + z_{m, \ell}\right) \\
&+ \frac{1}{180}\left(z_{i, jk} + z_{i, j\ell} + z_{i, jm} + z_{i, k\ell} + z_{i, km} + z_{i, \ell m} + z_{j, i\ell} + \cdots + z_{mk\ell}\right),
\end{align*}
```
where all the $i$, $j$, $k$, $\ell$, and $m$ are different. To evaluate $f^{\text{HIY}}$, we use the same relationship between $f^{\text{HIY}}$ and complete homogeneous symmetric polynomials to write
```math
f^{\text{HIY}}(\boldsymbol x_0) = 120\sum_{1 \leq i \leq j \leq k \leq \ell \leq m \leq n} \tilde f_{ijk \ell m} \lambda_i\lambda_j\lambda_k\lambda_\ell \lambda_m,
```
where $\tilde f_{iiiii} = f_{iiiii}/5! = f_{iiiii}/120$, $\tilde f_{iiiij} = f_{iiiij}/24$, $\tilde f_{iijjj} = f_{iijjj}/12$, $\tilde f_{iijjk} = f_{iijjk}/4$, $\tilde f_{iijk\ell} = f_{iijk\ell}/2$, and $\tilde f_{ijk\ell m} = f_{ijk\ell m}$.
This interpolant has cubic precision, meaning it can recover cubic polynomials. Note that this sum has
```math
\sum_{i=1}^n\sum_{j=i}^n\sum_{k=j}^n\sum_{\ell=k}^n\sum_{m=k}^n 1 = \frac{n^5}{120} + \frac{n^4}{12} + \cdots = \mathcal O(n^5)
```
terms, which could cause issues with many natural neighbours. For example, with $n = 20$ we have $n^5 = 3,200,000$. In fact, as discussed in Section 6.5 of [Bobach's thesis](https://kluedo.ub.rptu.de/frontdoor/deliver/index/docId/2104/file/diss.bobach.natural.neighbor.20090615.pdf), more than 150 million operations could be required with $n = 20$. We discuss benchmark results in the comparison section of the sidebar.
# Regions of Influence
The _region of influence_ for the natural neighbour coordinates associated with a point $\boldsymbol x_i$ is the interior the union of all circumcircles coming from the triangles of the underlying triangulation that pass through $\boldsymbol x_i$. We can visualise this for the coordinates we define above below. (this region of influence definition not necessarily generalise to the triangle and nearest neighbour coordinates, but we still compare them).
We take a set of data sites in $[-1, 1]^2$ such that all function values are zero except for $z_1 = 0$ with $\boldsymbol x_1 = \boldsymbol 0$. Using this setup, we obtain the following results (see also Figure 3.6 of Bobach's thesis linked previously):
```@raw html
<figure>
<img src='../figures/influence.png', alt='Region of Influence'><br>
</figure>
```
We can indeed see the effect of the region of influence about this single point $\boldsymbol x_1$. Note also that $f^{\text{SIB}1}$ is much smoother than the others.
# Extrapolation
An important consideration is extrapolation. Currently, all the methods above assume that the query point $\boldsymbol x_0$ is in $\mathcal C(\boldsymbol X)$, and the interpolation near the boundary of $\mathcal C(\boldsymbol X)$ often has some weird effects. There are many approaches available for extrapolation, such as with [ghost points](https://doi.org/10.1016/j.cad.2008.08.007), although these are not implemented in this package (yet!).
The approach we take for any point outside of $\mathcal C(\boldsymbol X)$, or on $\partial\mathcal C(\boldsymbol X)$, is to find the ghost triangle that $\boldsymbol x_0$ is in (ghost triangles are defined [here](https://danielvandh.github.io/DelaunayTriangulation.jl/dev/boundary_handling/#Ghost-Triangles) in the DelaunayTriangulation.jl documentation), which will have some boundary edge $\boldsymbol e_{ij}$. (Similarly, if $\boldsymbol x_0 \in \partial \mathcal C(\boldsymbol X)$, $\boldsymbol e_{ij}$ is the boundary edge that it is on.) We then simply use two-point interpolation, letting
```math
f(\boldsymbol x_0) \approx \lambda_iz_i + \lambda_jz_j,
```
where $\lambda_i = 1-t$, $\lambda_j = t$, $\ell = \|x_i - \boldsymbol x_j\|$, and $t = [(x_0 - x_i)(x_j - x_i) + (y_0 - y_i)(y_j - y_i)]/\ell^2$. Note also that in this definition of $t$ we have projected $\boldsymbol x_0$ onto the line through $\boldsymbol x_i$ and $\boldsymbol x_j$ -- this projection is not necessarily on $\boldsymbol e_{ij}$, though, so $t$ will not always be in $[0, 1]$, meaning the coordinates are not guaranteed to be (and probably won't be) convex.
This extrapolation will not always be perfect, but it is good enough until we implement more sophisticated methods. If you want to disable this approach, just use the `project = false` keyword argument when evaluating your interpolant.
Similarly, if you have points defining a boundary of some domain that isn't necessarily convex, the function `identify_exterior_points` may be useful to you, provided you have represented your boundary as defined [here in DelaunayTriangulation.jl](https://danielvandh.github.io/DelaunayTriangulation.jl/dev/boundary_handling/#Boundary-Specification). See the Switzerland example in the sidebar for more information.
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 1.3.5 | ac69089b500226d09cb611688ae67015bb107b58 | docs | 13713 | ```@meta
CurrentModule = NaturalNeighbours
```
# Switzerland Elevation Data
Here we consider a more involved example, constructing an interpolant over elevation data of Switzerland. We use data from [geoBoundaries](https://www.geoboundaries.org) who credits [OpenStreetMap](https://www.openstreetmap.org/) for the data (available under an [Open Database License](https://www.openstreetmap.org/copyright)). The data is available as a [gist](https://gist.github.com/DanielVandH/13687b0918e45a416a5c93cd52c91449), which was generated with the following R code (you don't need to run this code - we will download it directly from the gist soon):
```R
## Install and load the packages
#install.packages(c("remotes", "sf", "raster", "elevatr", "dplyr", "magrittr"))
#install.packages(c("dplyr", "magrittr"))
#remotes::install_gitlab("dickoa/rgeoboundaries")
library(rgeoboundaries)
library(sf)
library(raster)
library(elevatr)
#library(dplyr)
#library(magrittr)
## Get the elevation and polygon data
swiss_bound <- rgeoboundaries::geoboundaries("Switzerland")
elevation_data_rast <- elevatr::get_elev_raster(locations = swiss_bound, z = 7, clip = "locations")
boundary_coords <- rasterToPolygons(elevation_data_rast > -Inf, dissolve = TRUE) # https://gis.stackexchange.com/a/187800
elevation_data_xy <- as.data.frame(elevation_data_rast, xy = TRUE)
colnames(elevation_data_xy)[3] <- "elevation"
elevation_data_xy <- elevation_data_xy[complete.cases(elevation_data_xy), ]
all_polygons = boundary_coords@polygons[[1]]@Polygons
## Inspect all the polygons
#conv = function(polygon, id) {
# coords = polygon@coords
# dir = polygon@ringDir
# hole = polygon@hole
# df = tibble(x = coords[, 1], y = coords[, 1], dir = polygon@ringDir, hole = polygon@hole, id = id)
#}
#polygon_df = vector('list', length(all_polygons))
#for (i in seq_along(polygon_df)) {
# polygon_df[[i]] = conv(all_polygons[[i]], i)
#}
#polygon_df %<>% bind_rows(.id = "column_label")
# ^ After inspecting these results, the only polygon of interest is the first one.
polygons = all_polygons[[1]]@coords
x = elevation_data_xy[, 1]
y = elevation_data_xy[, 2]
z = elevation_data_xy[, 3]
bnd_x = polygons[, 1]
bnd_y = polygons[, 2]
```
For this example, load the following packages:
```julia
using NaturalNeighbours
using CairoMakie
using DelaunayTriangulation
using DelimitedFiles
using Downloads
using StableRNGs
using StatsBase
```
# Downloading the Data
To start, let us download and setup the data. We need to get the data sites, the elevation values, and also the boundary points.
```julia
data_url = "https://gist.githubusercontent.com/DanielVandH/13687b0918e45a416a5c93cd52c91449/raw/a8da6cdc94859fd66bcff85a2307f0f9cd57a18c/data.txt"
boundary_url = "https://gist.githubusercontent.com/DanielVandH/13687b0918e45a416a5c93cd52c91449/raw/a8da6cdc94859fd66bcff85a2307f0f9cd57a18c/boundary.txt"
data_dir = Downloads.download(data_url)
boundary_dir = Downloads.download(boundary_url)
data = readdlm(data_dir, skipstart=6)
data[:, 3] ./= 1000.0 # m -> km
boundary = readdlm(boundary_dir, skipstart=6)
good_elevation = findall(≥(0), @view data[:, 3])
data = @views data[good_elevation, :]
data_sites = [(data[i, 1], data[i, 2]) for i in axes(data, 1)]
elevation_data = @views data[:, 3]
boundary_points = [(boundary[i, 1], boundary[i, 2]) for i in axes(boundary, 1)]
```
# Downsampling and Setting up the Data for Plotting
We now setup the data for plotting. We want to use `tricontourf!`, so we need to get a triangulation of the data. Since the `boundary_points` do not actually store a subset of the points from `data_sites`, we can't just do e.g. `indexin(boundary_points, data_sites)` to get the associated boundary indices, so we instead find the closest data site to each boundary point.
```julia
function nearest_tuple(q, data)
δ = Inf
nearest_idx = 0
qx, qy = getxy(q)
for (i, p) in pairs(data)
px, py = getxy(p)
δ₁ = (qx - px)^2 + (qy - py)^2
if δ₁ < δ
δ = δ₁
nearest_idx = i
end
end
return nearest_idx
end
function update_boundary(boundary_points, data_sites)
boundary_nodes = map(q -> nearest_tuple(q, data_sites), boundary_points)
unique!(boundary_nodes)
push!(boundary_nodes, boundary_nodes[begin])
reverse!(boundary_nodes) # so that the boundary is traversed clockwise
boundary_points = data_sites[boundary_nodes]
return boundary_points, data_sites, boundary_nodes
end
boundary_points, data_sites, boundary_nodes = update_boundary(boundary_points, data_sites)
```
Next, before, we plot, let us downsample the data. We do this since the data set is quite large, so when we interpolate it'll be useful to have fewer data points for the purpose of this example.
```julia
rng = StableRNG(123)
desample_idx = sample(rng, axes(data, 1), 5000, replace=false)
ds_data = data[desample_idx, :]
ds_data_sites = [(ds_data[i, 1], ds_data[i, 2]) for i in axes(ds_data, 1)]
ds_elevation_data = @views ds_data[:, 3]
ds_boundary_points, ds_data_sites, ds_boundary_nodes = update_boundary(boundary_points, ds_data_sites)
reverse!(ds_boundary_nodes) # so that the boundary is traversed clockwise
ds_tri = triangulate(ds_data_sites, boundary_nodes=ds_boundary_nodes)
ds_triangles = [T[j] for T in each_solid_triangle(ds_tri), j in 1:3]
```
# Looking at the Data
Now let's look at the data.
```julia
colorrange = (0, 4)
levels = LinRange(colorrange..., 40)
fig = Figure(fontsize=24)
ax1 = Axis3(fig[1, 1], xlabel="Longitude", ylabel="Latitude", zlabel="Elevation (km)", width=600, height=400, azimuth=0.9, title="(c): Downsampled height data (n = $(length(ds_elevation_data)))", titlealign=:left)
mesh!(ax1, ds_data, ds_triangles, color=ds_elevation_data, colorrange=colorrange)
ax2 = Axis(fig[1, 2], xlabel="Longitude", ylabel="Latitude", width=600, height=400, title="(d): Downsampled height data (n = $(length(ds_elevation_data)))", titlealign=:left)
tf = tricontourf!(ax2, ds_tri, ds_elevation_data, levels=levels)
Colorbar(fig[1:2, 3], tf)
resize_to_layout!(fig)
```
```@raw html
<figure>
<img src='../figures/swiss_heights.png', alt'Switzerland Data'><br>
</figure>
```
We see that the downsampled data isn't that much different, despite having $n = 5,000$ points rather than $n = 220,175$ as in the original data set. Of course, the boundary has been trimmed a bit (if we really cared, we probably wouldn't have downsampled the boundary, but instead only downsampled the interior points - not relevant for this example).
# Interpolating
Now let's define and evaluate our interpolant.
```julia
## Define the interpolant
interpolant = interpolate(ds_data_sites, ds_elevation_data; derivatives=true)
## Evaluate the interpolant
a, b, c, d = DelaunayTriangulation.polygon_bounds(ds_data_sites, ds_boundary_nodes)
nx = 250
ny = 250
xg = LinRange(a, b, nx)
yg = LinRange(c, d, ny)
x = [xg[i] for i in 1:nx, j in 1:ny] |> vec
y = [yg[j] for i in 1:nx, j in 1:ny] |> vec
sibson_vals = interpolant(x, y; method=Sibson(), parallel=true)
sibson_1_vals = interpolant(x, y; method=Sibson(1), parallel=true)
laplace_vals = interpolant(x, y; method=Laplace(), parallel=true)
triangle_vals = interpolant(x, y; method=Triangle(), parallel=true)
nearest_vals = interpolant(x, y; method=Nearest(), parallel=true)
farin_vals = interpolant(x, y; method=Farin(), parallel=true)
hiyoshi_vals = interpolant(x, y; method=Hiyoshi(2), parallel=true)
```
Let's look at our results for each of these methods.
```julia
query_tri = triangulate([x'; y']; randomise=false)
query_triangles = [T[j] for T in each_solid_triangle(query_tri), j in 1:3]
function plot_results!(fig, i1, j1, i2, j2, x, y, xg, yg, vals, title1, title2, query_triangles, query_tri, a, b, c, d, e, f, nx, ny)
ax = Axis3(fig[i1, j1], xlabel="Longitude", ylabel="Latitude", zlabel="Elevation (km)", width=600, height=400, azimuth=0.9, title=title1, titlealign=:left)
m = mesh!(ax, hcat(x, y, vals), query_triangles, color=vals, colorrange=colorrange)
xlims!(ax, a, b)
ylims!(ax, c, d)
zlims!(ax, e, f)
ax = Axis(fig[i2, j2], xlabel="Longitude", ylabel="Latitude", width=600, height=400, title=title2, titlealign=:left)
contourf!(ax, xg, yg, reshape(vals, (nx, ny)), levels=levels)
lines!(ax, [get_point(query_tri, i) for i in get_convex_hull_vertices(query_tri)], color=:red, linewidth=4, linestyle=:dash)
lines!(ax, ds_boundary_points, color=:white, linewidth=4)
xlims!(ax, a, b)
ylims!(ax, c, d)
return m
end
function plot_results(sibson_vals, sibson_1_vals, laplace_vals, triangle_vals, nearest_vals, farin_vals, hiyoshi_vals, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, data, triangles, elevation_data, tri)
fig = Figure(fontsize=24)
m1 = plot_results!(fig, 1, 1, 1, 2, x, y, xg, yg, sibson_vals, "(a): Sibson", "(b): Sibson", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m2 = plot_results!(fig, 1, 3, 1, 4, x, y, xg, yg, sibson_1_vals, "(c): Sibson-1", "(d): Sibson-1", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m3 = plot_results!(fig, 2, 1, 2, 2, x, y, xg, yg, laplace_vals, "(e): Laplace", "(f): Laplace", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m4 = plot_results!(fig, 2, 3, 2, 4, x, y, xg, yg, triangle_vals, "(g): Triangle", "(h): Triangle", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m5 = plot_results!(fig, 3, 1, 3, 2, x, y, xg, yg, nearest_vals, "(i): Nearest", "(j): Nearest", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m6 = plot_results!(fig, 3, 3, 3, 4, x, y, xg, yg, farin_vals, "(k): Farin", "(ℓ): Farin", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
m7 = plot_results!(fig, 4, 1, 4, 2, x, y, xg, yg, hiyoshi_vals, "(m): Hiyoshi", "(n): Hiyoshi", query_triangles, interpolant.triangulation, a, b, c, d, e, f, nx, ny)
ax = Axis3(fig[4, 3], xlabel="Longitude", ylabel="Latitude", zlabel="Elevation (km)", width=600, height=400, azimuth=0.9, title="(k): Original height data", titlealign=:left)
mesh!(ax, data, triangles, color=elevation_data, colorrange=(0, 4))
xlims!(ax, a, b)
ylims!(ax, c, d)
zlims!(ax, e, f)
ax = Axis(fig[4, 4], xlabel="Longitude", ylabel="Latitude", width=600, height=400, title="(o): Original height data", titlealign=:left)
tricontourf!(ax, tri, elevation_data, levels=levels)
xlims!(ax, a, b)
ylims!(ax, c, d)
Colorbar(fig[1:4, 5], m1)
resize_to_layout!(fig)
return fig
end
e, f = 0.0, 4.5
fig = plot_results(sibson_vals, sibson_1_vals, laplace_vals, triangle_vals, nearest_vals, farin_vals, hiyoshi_vals, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, ds_data, ds_triangles, ds_elevation_data, ds_tri)
```
```@raw html
<figure>
<img src='../figures/swiss_heights_interpolated.png', alt'Switzerland Data Interpolated'><br>
</figure>
```
We see that the results are pretty similar across the methods except for `Nearest()`. We could compute the errors between the interpolant and the points that we removed from the dataset to quantify this better, but we won't do that --- we're not intending to a comprehensive analysis here.
# Eliminating Points Outside of the Convex Hull
One issue with the interpolant is that the extrapolated results are distracting. Let's set `project=false` to remove values outside of the convex hull of our data sites.
```julia
sibson_vals_p = interpolant(x, y; method=Sibson(), parallel=true, project=false)
sibson_1_vals_p = interpolant(x, y; method=Sibson(1), parallel=true, project=false)
laplace_vals_p = interpolant(x, y; method=Laplace(), parallel=true, project=false)
triangle_vals_p = interpolant(x, y; method=Triangle(), parallel=true, project=false)
nearest_vals_p = interpolant(x, y; method=Nearest(), parallel=true, project=false)
farin_vals_p = interpolant(x, y; method=Farin(), parallel=true, project=false)
hiyoshi_vals_p = interpolant(x, y; method=Hiyoshi(2), parallel=true, project=false)
fig = plot_results(sibson_vals_p, sibson_1_vals_p, laplace_vals_p, triangle_vals_p, nearest_vals_p, farin_vals_p, hiyoshi_vals_p, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, ds_data, ds_triangles, ds_elevation_data, ds_tri)
```
```@raw html
<figure>
<img src='../figures/swiss_heights_interpolated_projected.png', alt'Switzerland Data Interpolated without Projection'><br>
</figure>
```
Of course, this is still not perfect because Switzerland is not convex! There's still points being extrapolated, and we have to handle this manually.
# Eliminating Points Outside of Switzerland
The function we need is `identify_exterior_points`, which we use together with a representation of the boundary of Switzerland (hence why we got the boundary nodes earlier). We replace all exterior values with `Inf` so that they don't get plotted (using `NaN` leads to issues with `surface!`'s shading for some reason).
```julia
exterior_idx = identify_exterior_points(x, y, ds_data_sites, ds_boundary_nodes)
sibson_vals_p[exterior_idx] .= Inf
sibson_1_vals_p[exterior_idx] .= Inf
laplace_vals_p[exterior_idx] .= Inf
triangle_vals_p[exterior_idx] .= Inf
nearest_vals_p[exterior_idx] .= Inf
farin_vals_p[exterior_idx] .= Inf
hiyoshi_vals_p[exterior_idx] .= Inf
fig = plot_results(sibson_vals_p, sibson_1_vals_p, laplace_vals_p, triangle_vals_p, nearest_vals_p, farin_vals_p, hiyoshi_vals_p, query_triangles, interpolant, a, b, c, d, e, f, nx, ny, ds_data, ds_triangles, ds_elevation_data, ds_tri)
```
```@raw html
<figure>
<img src='../figures/swiss_heights_interpolated_projected_boundary.png', alt'Switzerland Data Interpolated Complete'><br>
</figure>
```
Perfect!
| NaturalNeighbours | https://github.com/DanielVandH/NaturalNeighbours.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 677 | using Documenter, SauterSchwabQuadrature
DocMeta.setdocmeta!(SauterSchwabQuadrature, :DocTestSetup, :(using SauterSchwabQuadrature); recursive=true)
makedocs(;
modules=[SauterSchwabQuadrature],
sitename="SauterSchwabQuadrature.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://ga96tik.github.io/SauterSchwabQuadrature.jl",
edit_link="master",
assets=String[],
),
pages=["Introduction" => "index.md", "Details" => "details.md", "Manual" => "manual.md", "API Reference" => "apiref.md"],
)
deploydocs(; repo="github.com/ga96tik/SauterSchwabQuadrature.jl.git", devbranch="master")
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 4002 | using CompScienceMeshes
using BEAST
using SauterSchwabQuadrature
using LinearAlgebra
τ = simplex(
point(-0.9423169199664047, 0.32954812598003336, -0.011695233370427325),
point(-0.9551397067786591, 0.2847288579921698, -0.00779682224695155),
point(-0.9238795325109631, 0.382683432365871, 3.0782976471689056e-18),
)
σ = simplex(
point(-0.9807852804031678, 0.19509032201644239, -7.632783294297951e-17),
point(-0.9238795325109631, 0.38268343236587105, -1.7210136298535816e-16),
point(-0.9551397067786591, 0.2847288579921698, -0.00779682224695155),
)
τ2 = simplex(
point(-0.9238795325109631, 0.382683432365871, 3.0782976471689056e-18),
point(-0.9423169199664047, 0.32954812598003336, -0.011695233370427325),
point(-0.9551397067786591, 0.2847288579921698, -0.00779682224695155),
)
σ2 = simplex(
point(-0.9238795325109631, 0.38268343236587105, -1.7210136298535816e-16),
point(-0.9807852804031678, 0.19509032201644239, -7.632783294297951e-17),
point(-0.9551397067786591, 0.2847288579921698, -0.00779682224695155),
)
τ3 = simplex(
point(-0.9551397067786591, 0.2847288579921698, -0.00779682224695155),
point(-0.9423169199664047, 0.32954812598003336, -0.011695233370427325),
point(-0.9238795325109631, 0.382683432365871, 3.0782976471689056e-18),
)
σ3 = simplex(
point(-0.9551397067786591, 0.2847288579921698, -0.00779682224695155),
point(-0.9807852804031678, 0.19509032201644239, -7.632783294297951e-17),
point(-0.9238795325109631, 0.38268343236587105, -1.7210136298535816e-16),
)
ϕ = BEAST.RTRefSpace{Float64}()
function integrand(x, y)
R = norm(x - y)
κ = 1.0
exp(-im * κ * R) / R / 4 / pi - 1 / R / 4 / pi
end
function INTEGRAND(û, v̂)
n1 = neighborhood(τ2, û)
n2 = neighborhood(σ2, v̂)
x = cartesian(n1)
y = cartesian(n2)
fx = ϕ(n1)[1]
gy = ϕ(n2)[2]
return integrand(x, y) * jacobian(n1) * jacobian(n2) * (dot(fx.value, gy.value) - fx.divergence * gy.divergence)
end
results = []
for nodes in 1:20
ce = SauterSchwabQuadrature.CommonEdge(SauterSchwabQuadrature._legendre(nodes, 0.0, 1.0))
result = SauterSchwabQuadrature.sauterschwab_parameterized(INTEGRAND, ce)
push!(results, result)
end
errors = abs.((results .- results[end]) / results[end])
using Plots: Plots
Plots.plot(log10.(abs.(errors[1:(end - 1)])))
p1 = point(0.0, 0.0, 0.0) # the same for both quads
p2 = point(2.0, 0.0, 0.0) # the same for both quads
p3 = point(2.0, 2.0, 0.0)
p4 = point(0.0, 2.0, 0.0)
p3 = point(2.0, -2.0, 0.0) # choose points of second quad such that parametrizations align as required in [1]
p4 = point(0.0, -2.0, 0.0)
using StaticArrays
struct Quadrilateral
p1::SVector{3}
p2::SVector{3}
p3::SVector{3}
p4::SVector{3}
end
function jacobianDet(quad::Quadrilateral, u)
aux = quad.p3 - quad.p4 + quad.p1 - quad.p2
∂ru = quad.p2 - quad.p1 + u[2] * aux
∂rv = quad.p4 - quad.p1 + u[1] * aux
D = (∂ru[2] * ∂rv[3] - ∂ru[3] * ∂rv[2])^2 + (∂ru[3] * ∂rv[1] - ∂ru[1] * ∂rv[3])^2 + (∂ru[1] * ∂rv[2] - ∂ru[2] * ∂rv[1])^2
return sqrt(D)
end
# parametrize planar quadrilateral with u, v ∈ [0,1]
function (quad::Quadrilateral)(u, v)
return quad.p1 + u * (quad.p2 - quad.p1) + v * (quad.p4 - quad.p1) + u * v * (quad.p3 - quad.p4 + quad.p1 - quad.p2) # see, e.g., [1] page 187
end
struct singularKernel
quad1::Quadrilateral
quad2::Quadrilateral
end
function (sK::singularKernel)(u, v)
x = sK.quad1(u...)
y = sK.quad2(v...)
return jacobianDet(sK.quad1, u) * jacobianDet(sK.quad2, v) / norm(x - y)
end
q1 = Quadrilateral(p1, p2, p3, p4)
q2 = Quadrilateral(p1, p2, p3, p4)
sK = singularKernel(q1, q2)
results = []
for nodes in 1:20
ce = SauterSchwabQuadrature.CommonEdge(SauterSchwabQuadrature._legendre(nodes, 0.0, 1.0))
result = SauterSchwabQuadrature.sauterschwab_parameterized(sK, ce)
push!(results, result)
end
errors = abs.((results .- results[end]) / results[end])
Plots.plot(log10.(abs.(errors[1:(end - 1)])))
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 970 | using CompScienceMeshes
using SauterSchwabQuadrature
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
pIV = point(5, 1, -3)
Sourcechart = simplex(pII, pI, pIII)
Testchart = simplex(pII, pI, pIV)
Accuracy = 12
function integrand(x, y)
return (((x - pI)' * (y - pII)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
result = sauterschwabintegral(Sourcechart, Testchart, integrand, Accuracy, Accuracy)
println(result)
#=For those who want to test the sauterschwab_nonparameterized() function,
may uncomment the following five lines=#
#sourcechart = simplex(pI,pIII,pII)
#testchart = simplex(pI,pIV,pII)
#ce = CommonEdge(Accuracy)
#result2 = sauterschwab_nonparameterized(sourcechart, testchart, integrand, ce)
#println(result2)
#=The first argument and the third argument of both simplex() functions are
equal, hence the required conditions are fulfilled. The user may also compare
the two results, and see that both are equal=#
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1044 | using CompScienceMeshes
using SauterSchwabQuadrature
Accuracy = 12
ce = CommonEdge(Accuracy)
pI = point(0, 0, 1)
function integrand(x, y)
return (((x - pI)' * (y - pI)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
function Sc(û)
u = [û[1] / (1 - û[2]), û[2]]#mapping from referencetriangle to square
return (u)
end
function Tc(v̂)
v = [v̂[1] / (1 - v̂[2]), v̂[2]]#mapping from referencetriangle to square
return (v)
end
function INTEGRAND(û, v̂)
ϴ = Sc(û)[1]
ϕ = Sc(û)[2]
ϴ1 = Tc(v̂)[1]
ϕ1 = Tc(v̂)[2] + 1
x = [sin(ϴ) * cos(ϕ), sin(ϴ) * sin(ϕ), cos(ϴ)]#spherical coordiantes
y = [sin(ϴ1) * cos(ϕ1), sin(ϴ1) * sin(ϕ1), cos(ϴ1)]#spherical coordiantes
output = integrand(x, y) * sin(ϴ) * sin(ϴ1) * (1 / (1 - û[2])) * (1 / (1 - v̂[2]))
#sin(ϴ)*sin(ϴ1) = surface element of spherical coordinates
#(1/(1-û[2]))*(1/(1-v̂[2]) = surface element of first two mappings
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, ce)
println(result)
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 886 | using CompScienceMeshes
using SauterSchwabQuadrature
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
Sourcechart = simplex(pI, pII, pIII)
Testchart = simplex(pII, pIII, pI)
Accuracy = 12
function integrand(x, y)
return (((x - pI)' * (y - pII)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
result = sauterschwabintegral(Sourcechart, Testchart, integrand, Accuracy, Accuracy)
println(result)
#=For those who want to test the sauterschwab_nonparameterized() function,
may uncomment the following five lines=#
#sourcechart = simplex(pI,pII,pIII)
#testchart = simplex(pI,pII,pIII)
#cf = CommonFace(Accuracy)
#result2 = sauterschwab_nonparameterized(sourcechart, testchart, integrand, cf)
#println(result2)
#=sourcechart = testchart, hence the required condition is fulfilled.
The user may also compare the two results and see that both are equal=#
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1181 | using CompScienceMeshes
using SauterSchwabQuadrature
Accuracy = 20
cf = CommonFace(Accuracy)
pI = point(0, 0, 1)
function integrand(x, y)
return (((x - pI)' * (y - pI)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
#1
end
function Sc(û)
u = [û[1] / (1 - û[2]), û[2]]#mapping from referencetriangle to square
return (u)
end
function Tc(v̂)
v = [v̂[1] / (1 - v̂[2]), v̂[2]]#mapping from referencetriangle to square
return (v)
end
function INTEGRAND(û, v̂)
ϴ = Sc(û)[1]
ϕ = Sc(û)[2]
ϴ1 = Tc(v̂)[1]
ϕ1 = Tc(v̂)[2]
x = [sin(ϴ) * cos(ϕ), sin(ϴ) * sin(ϕ), cos(ϴ)]#spherical coordiantes
y = [sin(ϴ1) * cos(ϕ1), sin(ϴ1) * sin(ϕ1), cos(ϴ1)]#spherical coordiantes
output = integrand(x, y) * sin(ϴ) * sin(ϴ1) * (1 / (1 - û[2])) * (1 / (1 - v̂[2]))
#sin(ϴ)*sin(ϴ1) = surface element of spherical coordinates
#(1/(1-û[2]))*(1/(1-v̂[2]) = surface element of first two mappings
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, cf)
println(result)
#proof of correctness: let integrand be 1, then the integration becomes a simple
#area integral which is equal to (-cos(1)+cos(0))^2
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 975 | using CompScienceMeshes
using SauterSchwabQuadrature
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
pIV = point(5, 1, -3)
pV = point(0, 0, 0)
Sourcechart = simplex(pI, pIII, pII)
Testchart = simplex(pV, pIV, pI)
Accuracy = 12
function integrand(x, y)
return (((x - pI)' * (y - pV)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
result = sauterschwabintegral(Sourcechart, Testchart, integrand, Accuracy, Accuracy)
println(result)
#=For those who want to test the sauterschwab_nonparameterized() function,
may uncomment the following five lines=#
#sourcechart = simplex(pI,pIII,pII)
#testchart = simplex(pI,pIV,pV)
#cv = CommonVertex(Accuracy)
#result2 = sauterschwab_nonparameterized(sourcechart, testchart, integrand, cv)
#println(result2)
#=The common vertex is the first input argument of both simplex() functions,
hence the required condition is fulfilled. The user may also compare the two
results and see that both are equal=#
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1048 | using CompScienceMeshes
using SauterSchwabQuadrature
Accuracy = 12
cv = CommonVertex(Accuracy)
pI = point(0, 0, 1)
function integrand(x, y)
return (((x - pI)' * (y - pI)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
function Sc(û)
u = [û[1] / (1 - û[2]), û[2]]#mapping from referencetriangle to square
return (u)
end
function Tc(v̂)
v = [v̂[1] / (1 - v̂[2]), v̂[2]]#mapping from referencetriangle to square
return (v)
end
function INTEGRAND(û, v̂)
ϴ = Sc(û)[1]
ϕ = Sc(û)[2]
ϴ1 = Tc(v̂)[1]
ϕ1 = Tc(v̂)[2] + 1.5
x = [sin(ϴ) * cos(ϕ), sin(ϴ) * sin(ϕ), cos(ϴ)]#spherical coordiantes
y = [sin(ϴ1) * cos(ϕ1), sin(ϴ1) * sin(ϕ1), cos(ϴ1)]#spherical coordinates
output = integrand(x, y) * sin(ϴ) * sin(ϴ1) * (1 / (1 - û[2])) * (1 / (1 - v̂[2]))
#sin(ϴ)*sin(ϴ1) = surface element of spherical coordinates
#(1/(1-û[2]))*(1/(1-v̂[2]) = surface element of first two mappings
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, cv)
println(result)
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1321 | using CompScienceMeshes
using SauterSchwabQuadrature
using LinearAlgebra
ô = point(0, 0, 0)
x̂ = point(1, 0, 0)
ŷ = point(0, 1, 0)
ẑ = point(0, 0, 1)
triangle1 = simplex(2x̂, 3ŷ, ô)
triangle2 = simplex(2x̂, 4x̂ + 3ŷ + ẑ, ô)
function fxy(p, q)
x = cartesian(p)
y = cartesian(q)
n = normal(p)
R = norm(x - y)
1 / R
# dot(n, x-y)/R^3
end
function fuv(u, v)
p = neighborhood(triangle1, u)
q = neighborhood(triangle2, v)
J = jacobian(p) * jacobian(q)
return fxy(p, q) * J
end
nmax = 30
results_ce = zeros(nmax)
results_pd = zeros(nmax)
errors = zeros(nmax)
errors[1] = 1
for n in 1:nmax
ce = CommonEdge(SauterSchwabQuadrature._legendre(n, 0.0, 1.0))
pd = PositiveDistance(SauterSchwabQuadrature._legendre(n, 0.0, 1.0))
results_ce[n] = sauterschwab_parameterized(fuv, ce)
results_pd[n] = sauterschwab_parameterized(fuv, pd)
n > 1 && (errors[n] = abs(results_ce[n] - results_ce[n - 1]))
@show errors[n]
end
exact = results_ce[end]
@assert abs(errors[end]) < 1e-5
using Plots
plot(; title="convergence SS quadrature")
plot!(log10.(abs.(exact .- results_ce)); label="common edge [A,P1,B]-[A,P2,B]")
plot!(log10.(abs.(exact .- results_pd)); label="positive distance")
# plot!(log10.(abs.(exact .- results3)))
# plot!(log10.(abs.(exact .+ resultspd)))
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 973 | using CompScienceMeshes
using SauterSchwabQuadrature
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
pVI = point(10, 11, 12)
pVII = point(10, 11, 13)
pVIII = point(11, 11, 12)
Sourcechart = simplex(pI, pII, pIII)
Testchart = simplex(pVIII, pVII, pVI)
Accuracy = 12
function integrand(x, y)
return (((x - pI)' * (y - pVII)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
result = sauterschwabintegral(Sourcechart, Testchart, integrand, Accuracy, Accuracy)
println(result)
#=For those who want to test the sauterschwab_nonparameterized() function,
may uncomment the following three lines=#
#pd = PositiveDistance(Accuracy)
#result2 = sauterschwab_nonparameterized(Sourcechart, Testchart, integrand, pd)
#println(result2)
#=In this case the two charts from above can be used and the order of the points
within the simplex() fucntions can be changed arbitrarily. The user may also
compare the two results and see that both are equal.=#
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1111 | using CompScienceMeshes
using SauterSchwabQuadrature
Accuracy = 12
pd = PositiveDistance(Accuracy)
pI = point(0, 0, -1)
pII = point(0, 0, 1)
function integrand(x, y)
return (((x - pI)' * (y - pII)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
function Sc(û)
u = [(pi / 2) * û[1] / (1 - û[2]), û[2]]#mapping from referencetriangle to rectangle
return (u)
end
function Tc(v̂)
v = [v̂[1] / (1 - v̂[2]), v̂[2]]#mapping from referencetriangle to square
return (v)
end
function INTEGRAND(û, v̂)
ϴ = Sc(û)[1] + pi / 2
ϕ = Sc(û)[2]
ϴ1 = Tc(v̂)[1]
ϕ1 = Tc(v̂)[2]
x = [sin(ϴ) * cos(ϕ), sin(ϴ) * sin(ϕ), cos(ϴ)]#spherical coordinates
y = [sin(ϴ1) * cos(ϕ1), sin(ϴ1) * sin(ϕ1), cos(ϴ1)]#spherical coordiantes
output = integrand(x, y) * sin(ϴ) * sin(ϴ1) * (pi / 2) * (1 / (1 - û[2])) * (1 / (1 - v̂[2]))
#sin(ϴ)*sin(ϴ1) = surface element of spherical coordinates
#(pi/2)*(1/(1-û[2]))*(1/(1-v̂[2])) = surface element of first two mappings
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, pd)
println(result)
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 488 | module SauterSchwabQuadrature
# -------- used packages
using LinearAlgebra
using StaticArrays
using TestItems
# -------- exportet parts
# types
export SauterSchwabStrategy
export CommonFace, CommonEdge, CommonVertex, PositiveDistance
export CommonFaceQuad, CommonEdgeQuad, CommonVertexQuad
# functions
export sauterschwab_parameterized, reorder
# -------- included files
include("sauterschwabintegral.jl")
include("pulled_back_integrands.jl")
include("reorder_vertices.jl")
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 4594 |
"""
(::CommonFace)(f, η1, η2, η3, ξ)
Regularizing coordinate transform for parametrization on the unit triangle.
Common face case.
"""
function (::CommonFace)(f, η1, η2, η3, ξ)
return (ξ^3) *
((η1)^2) *
(η2) *
(
f((1 - ξ, ξ - ξ * η1 + ξ * η1 * η2), (1 - (ξ - ξ * η1 * η2 * η3), ξ - ξ * η1)) +
f((1 - (ξ - ξ * η1 * η2 * η3), ξ - ξ * η1), (1 - ξ, ξ - ξ * η1 + ξ * η1 * η2)) +
f((1 - ξ, ξ * η1 * (1 - η2 + η2 * η3)), (1 - (ξ - ξ * η1 * η2), ξ * η1 * (1 - η2))) +
f((1 - (ξ - ξ * η1 * η2), ξ * η1 * (1 - η2)), (1 - ξ, ξ * η1 * (1 - η2 + η2 * η3))) +
f((1 - (ξ - ξ * η1 * η2 * η3), ξ * η1 * (1 - η2 * η3)), (1 - ξ, ξ * η1 * (1 - η2))) +
f((1 - ξ, ξ * η1 * (1 - η2)), (1 - (ξ - ξ * η1 * η2 * η3), ξ * η1 * (1 - η2 * η3)))
)
end
"""
(::CommonEdge)(f, η1, η2, η3, ξ)
Regularizing coordinate transform for parametrization on the unit triangle.
Common edge case.
"""
function (::CommonEdge)(f, η1, η2, η3, ξ)
ξη1 = ξ * η1
η1η2 = η1 * η2
η2η3 = η2 * η3
η1η2η3 = η1η2 * η3
# ξη2 = ξ * η2
# ξη3 = ξ * η3
return (ξ^3) * ((η1)^2) * f((1 - ξ, ξη1 * η3), (1 - ξ * (1 - η1η2), ξη1 * (1 - η2))) +
(ξ^3) *
((η1)^2) *
(η2) *
(
f((1 - ξ, ξη1), (1 - ξ * (1 - η1η2η3), ξη1 * η2 * (1 - η3))) +
f((1 - ξ * (1 - η1η2), ξη1 * (1 - η2)), (1 - ξ, ξη1 * η2η3)) +
f((1 - ξ * (1 - η1η2η3), ξη1 * η2 * (1 - η3)), (1 - ξ, ξη1)) +
f((1 - ξ * (1 - η1η2η3), ξη1 * (1 - η2η3)), (1 - ξ, ξη1 * η2))
)
end
"""
(::CommonVertex)(f, η1, η2, η3, ξ)
Regularizing coordinate transform for parametrization on the unit triangle.
Common vertex case.
"""
function (::CommonVertex)(f, η1, η2, η3, ξ)
ξη1 = ξ * η1
ξη2 = ξ * η2
return (ξ^3) * η2 * (f((1 - ξ, ξη1), (1 - ξη2, ξη2 * η3)) + f((1 - ξη2, ξη2 * η3), (1 - ξ, ξη1)))
end
function (::PositiveDistance)(f, η1, η2, η3, ξ)
u = [η1 * (1 - η2), η2]#jacobian of this transformation is (1-η2)
v = [η3 * (1 - ξ), ξ]#jacobian of this transformation is (1-ξ)
return (1 - η2) * (1 - ξ) * (f(u, v))
end
"""
(::CommonFaceQuad)(f, η1, η2, η3, ξ)
Regularizing coordinate transform for parametrization on the unit square: [0,1]² ↦ Γ.
Common face case.
"""
function (::CommonFaceQuad)(f, η1, η2, η3, ξ)
ξη1 = ξ * η1 # auxiliary
mξ = (1 - ξ)# auxiliary
mξη1 = (1 - ξη1)# auxiliary
# only 4 different terms occur as argument:
mξη3 = mξ * η3
mξη3p = mξη3 + ξ
mξη1η2 = mξη1 * η2
mξη1η2p = mξη1η2 + ξη1
return ξ *
mξ *
mξη1 *
(
f((mξη3, mξη1η2), (mξη3p, mξη1η2p)) +
f((mξη1η2, mξη3), (mξη1η2p, mξη3p)) +
f((mξη3, mξη1η2p), (mξη3p, mξη1η2)) +
f((mξη1η2, mξη3p), (mξη1η2p, mξη3)) +
f((mξη3p, mξη1η2), (mξη3, mξη1η2p)) +
f((mξη1η2p, mξη3), (mξη1η2, mξη3p)) +
f((mξη3p, mξη1η2p), (mξη3, mξη1η2)) +
f((mξη1η2p, mξη3p), (mξη1η2, mξη3))
)
end
"""
(::CommonEdgeQuad)(f, η1, η2, η3, ξ)
Regularizing coordinate transform for parametrization on the unit square: [0,1]² ↦ Γ.
Common edge case.
"""
function (::CommonEdgeQuad)(f, η1, η2, η3, ξ)
ξη1 = ξ * η1 # occurs as argument (first two kernels calls)
ξη2 = ξ * η2 # occurs as argument (last four kernels calls)
mξ = (1 - ξ) # auxiliary
mξη1 = (1 - ξη1) # auxiliary
mξη3 = mξ * η3# occurs as argument (first two kernels calls)
mξη3p = mξη3 + ξ # occurs as argument (first two kernels calls)
mξη1η3 = mξη1 * η3# occurs as argument (last four kernels calls)
mξη1η3p = mξη1η3 + ξη1# occurs as argument (last four kernels calls)
return (ξ^2) * (
mξ * (f((mξη3p, ξη2), (mξη3, ξη1)) + f((mξη3, ξη2), (mξη3p, ξη1))) +
mξη1 * (
f((mξη1η3p, ξη2), (mξη1η3, ξ)) +
f((mξη1η3p, ξ), (mξη1η3, ξη2)) +
f((mξη1η3, ξη2), (mξη1η3p, ξ)) +
f((mξη1η3, ξ), (mξη1η3p, ξη2))
)
)
end
"""
(::CommonVertexQuad)(f, η1, η2, η3, ξ)
Regularizing coordinate transform for parametrization on the unit square: [0,1]² ↦ Γ.
Common vertex case.
"""
function (::CommonVertexQuad)(f, η1, η2, η3, ξ)
# only 4 different terms occur as argument (ξ is the fourth):
ξη1 = ξ * η1
ξη2 = ξ * η2
ξη3 = ξ * η3
return (ξ^3) * (f((ξ, ξη1), (ξη2, ξη3)) + f((ξη1, ξ), (ξη2, ξη3)) + f((ξη1, ξη2), (ξ, ξη3)) + f((ξη1, ξη2), (ξη3, ξ)))
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 6169 | function reorder(t, s, strat::CommonVertex)
T = eltype(t[1])
tol = 1e3 * eps(T)
# tol = 1e5 * eps(T)
# tol = sqrt(eps(T))
# Find the permutation P of t and s that make
# Pt = [P, A1, A2]
# Ps = [P, B1, B2]
I = zeros(Int, 1)
J = zeros(Int, 1)
e = 1
for i in 1:3
v = t[i]
for j in 1:3
w = s[j]
if norm(w - v) < tol
I[e] = i
J[e] = j
e += 1
break
end
end
e == 2 && break
end
append!(I, setdiff([1, 2, 3], I))
append!(J, setdiff([1, 2, 3], J))
# # inverse permutations
# K = indexin([1,2,3], I)
# L = indexin([1,2,3], J)
K = zeros(Int, 3)
for i in 1:3
for j in 1:3
if I[j] == i
K[i] = j
break
end
end
end
L = zeros(Int, 3)
for i in 1:3
for j in 1:3
if J[j] == i
L[i] = j
break
end
end
end
return I, J, K, L
end
function reorder(t, s, strat::CommonEdge)
T = eltype(t[1])
tol = 1e3 * eps(T)
# tol = 1e5 * eps(T)
# tol = sqrt(eps(T))
I = zeros(Int, 3)
J = zeros(Int, 3)
e = 1
for i in 1:3
v = t[i]
for j in 1:3
w = s[j]
if norm(w - v) < tol
I[e] = i
J[e] = j
e += 1
break
end
end
end
I[3] = setdiff([1, 2, 3], I[1:2])[1]
J[3] = setdiff([1, 2, 3], J[1:2])[1]
I = circshift(I, -1)
J = circshift(J, -1)
# # inverse permutations
# K = indexin([1,2,3], I)
# L = indexin([1,2,3], J)
K = zeros(Int, 3)
for i in 1:3
for j in 1:3
if I[j] == i
K[i] = j
break
end
end
end
L = zeros(Int, 3)
for i in 1:3
for j in 1:3
if J[j] == i
L[i] = j
break
end
end
end
return I, J, K, L
end
function reorder(t, s, strat::CommonFace)
T = eltype(t[1])
tol = 1e3 * eps(T)
# tol = 1e5 * eps(T)
# tol = sqrt(eps(T))
I = [1, 2, 3]
J = [-1, -1, -1]
numhits = 0
for (i, v) in pairs(t)
for (j, w) in pairs(s)
if norm(w - v) < tol
J[i] = j
numhits += 1
end
end
end
@assert numhits == 3
@assert all(J .!= -1)
K = zeros(Int, 3)
for i in 1:3
for j in 1:3
if I[j] == i
K[i] = j
break
end
end
end
L = zeros(Int, 3)
for i in 1:3
for j in 1:3
if J[j] == i
L[i] = j
break
end
end
end
return I, J, K, L
end
# Find the permutation P of t and s that make
# Pt = [P, A1, A2, A3]
# Ps = [P, B1, B2, B3]
function reorder(t, s, strat::CommonVertexQuad)
T = eltype(eltype(t))
T = eltype(t[1])
tol = 1e3 * eps(T)
I = zeros(Int, 1)
J = zeros(Int, 1)
e = 1
for i in 1:4
v = t[i]
for j in 1:4
w = s[j]
if norm(w - v) < tol
I[e] = i
J[e] = j
e += 1
break
end
end
e == 2 && break
end
I = circshift([1, 2, 3, 4], 1 - I[1])
J = circshift([1, 2, 3, 4], 1 - J[1])
return I, J, nothing, nothing
end
@testitem "reorder CommonVertexQuad" begin
t = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
s = [[0.0, -1.0, 0.0], [-1.0, -1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1)
strat = SauterSchwabQuadrature.CommonVertexQuad(quadPW)
I, J, K, L = SauterSchwabQuadrature.reorder(t, s, strat)
@show I
@show J
@test I[1] == 1
@test J[1] == 4
end
function reorder(t, s, strat::CommonEdgeQuad)
T = eltype(t[1])
tol = 1e3 * eps(T)
I = zeros(Int, 2)
J = zeros(Int, 2)
e = 1
for i in 1:4
v = t[i]
for j in 1:4
w = s[j]
if norm(w - v) < tol
I[e] = i
J[e] = j
e += 1
break
end
end
e == 3 && break
end
if mod1(I[1] + 1, 4) == I[2]
I = circshift([1, 2, 3, 4], 1 - I[1])
else
I = circshift([4, 3, 2, 1], 1 - (5 - I[1]))
end
if mod1(J[1] + 1, 4) == J[2]
J = circshift([1, 2, 3, 4], 1 - J[1])
else
J = circshift([4, 3, 2, 1], 1 - (5 - J[1]))
end
# append!(I, setdiff([1, 2, 3, 4], I))
# append!(J, setdiff([1, 2, 3, 4], J))
return I, J, nothing, nothing
end
@testitem "reorder CommonEdgeQuad" begin
t = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
s = [[0.0, -1.0, 0.0], [1.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1)
strat = SauterSchwabQuadrature.CommonEdgeQuad(quadPW)
I, J, _, _ = SauterSchwabQuadrature.reorder(t, s, strat)
@show I
@show J
@test t[I[1]] ≈ s[J[1]]
@test t[I[2]] ≈ s[J[2]]
end
function reorder(t, s, strat::CommonFaceQuad)
T = eltype(eltype(t))
tol = 1e3 * eps(T)
I = [1, 2, 3, 4]
J = [-1, -1, -1, -1]
numhits = 0
for (i, v) in pairs(t)
for (j, w) in pairs(s)
if norm(w - v) < tol
J[i] = j
numhits += 1
end
end
end
@assert numhits == 4
@assert all(J .!= -1)
return I, J, nothing, nothing
end
@testitem "reorder CommonFaceQuad" begin
t = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0]]
s = [[1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1)
strat = SauterSchwabQuadrature.CommonFaceQuad(quadPW)
I, J, _, _ = SauterSchwabQuadrature.reorder(t, s, strat)
@show I
@show J
@test J == [3, 4, 1, 2]
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 2081 | using FastGaussQuadrature
abstract type SauterSchwabStrategy end
struct CommonFace{A} <: SauterSchwabStrategy
qps::A
end
struct CommonEdge{A} <: SauterSchwabStrategy
qps::A
end
struct CommonVertex{A} <: SauterSchwabStrategy
qps::A
end
struct PositiveDistance{A} <: SauterSchwabStrategy
qps::A
end
struct CommonFaceQuad{A} <: SauterSchwabStrategy
qps::A
end
struct CommonEdgeQuad{A} <: SauterSchwabStrategy
qps::A
end
struct CommonVertexQuad{A} <: SauterSchwabStrategy
qps::A
end
function _legendre(n, a, b)
x, w = FastGaussQuadrature.gausslegendre(n)
w .*= (b - a) / 2
x = (x .+ 1) / 2 * (b - a) .+ a
collect(zip(x, w))
end
"""
sauterschwab_parameterized(integrand, method::SauterSchwabStrategy)
Compute interaction integrals using the quadrature introduced in [1].
Here, `integrand` is the pull-back of the integrand into the parametric domain
of the two triangles that define the integration domain.
The second argument 'strategy' is an object whose type is for triangles one of
- `CommonFace`
- `CommonEdge`
- `CommonVertex`
- `PositiveDistance`
and for quadrilaterals one of
- `CommonFaceQuad`
- `CommonEdgeQuad`
- `CommonVertexQuad`
according to the configuration of the two patches defining the domain of integration.
The constructors of these classes take a single argument `acc` that defines
the number of quadrature points along each of the four axes of the final
rectangular (ξ,η) integration domain (see [1], Ch 5).
Note that here we use for a planar triangle the representation:
x = x[3] + u*(x[1]-x[3]) + v*(x[2]-x[3])
with `u` ranging from 0 to 1 and `v` ranging from 0 to 1-`u`. This parameter
domain and representation is different from the one used in [1].
[1] Sauter. Schwwab, 'Boundary Element Methods', Springer Berlin Heidelberg, 2011
"""
function sauterschwab_parameterized(integrand, strategy::SauterSchwabStrategy)
qps = strategy.qps
sum(w1 * w2 * w3 * w4 * strategy(integrand, η1, η2, η3, ξ) for (η1, w1) in qps, (η2, w2) in qps, (η3, w3) in qps, (ξ, w4) in qps)
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 457 |
# This file reproduces just enough functionality of BEAST to allow
# for meaningful testing of non-trivial user cases
struct RTRefSpace{T<:Real} end
function (f::RTRefSpace{T})(x) where {T}
u, v = parametric(x)
j = jacobian(x)
tu = tangents(x, 1)
tv = tangents(x, 2)
d = 2 / j
return SVector((((tu * (u - 1) + tv * v) / j, d), ((tu * u + tv * (v - 1)) / j, d), ((tu * u + tv * v) / j, d)))
end
numfunctions(::RTRefSpace) = 3
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 4254 | function numquad(integrand, test_local_space, trial_local_space, test_chart, trial_chart, out)
outer_qps = quadpoints(test_chart, 10)
inner_qps = quadpoints(trial_chart, 11)
M = numfunctions(test_local_space)
N = numfunctions(trial_local_space)
for (x, wx) in outer_qps
f = test_local_space(x)
for (y, wy) in inner_qps
g = trial_local_space(y)
G = integrand(x, y)
ds = wx * wy
out += SMatrix{M,N}([dot(f[i][1], G * g[j][1]) for i in 1:M, j in 1:N]) * ds
end
end
out
end
function numquad_cf(integrand, test_local_space, trial_local_space, test_chart, trial_chart, out)
outer_qps = quadpoints(test_chart, 10)
inner_ksi = quadpoints(simplex(point(0), point(1)), 13)
inner_eta = quadpoints(simplex(point(0), point(1)), 13)
@assert sum(w for (x, w) in inner_ksi) ≈ 1
@assert sum(w for (x, w) in inner_eta) ≈ 1
@assert sum(w for (p, w) in outer_qps) ≈ volume(test_chart)
@assert sum(w1 * w2 * (1 - cartesian(p2)[1]) for (p1, w1) in inner_ksi for (p2, w2) in inner_eta) ≈ 0.5
p1, p2, p3 = trial_chart.vertices
M = numfunctions(test_local_space)
N = numfunctions(trial_local_space)
@assert volume(trial_chart) ≈ volume(test_chart)
@assert M == N == 3
check = 0.0
for (x, wx) in outer_qps
s1 = simplex(p3, cartesian(x), p2)
s2 = simplex(p1, cartesian(x), p3)
s3 = simplex(p2, cartesian(x), p1)
@assert volume(s1) + volume(s2) + volume(s3) ≈ volume(trial_chart)
qq = carttobary(trial_chart, cartesian(x))
@assert 0 <= qq[1] <= 1
@assert 0 <= qq[2] <= 1
@assert 0 <= 1 - qq[1] - qq[2] <= 1
f = test_local_space(x)
@assert length(f) == 3
@assert f[1][1] isa SVector
inner_check = 0.0
for (ksi, wksi) in inner_ksi
for (eta, weta) in inner_eta
p = cartesian(ksi)[1]
q = cartesian(eta)[1]
@assert p isa Float64
@assert q isa Float64
u = p * (1 - q)
v = q
wuv = wksi * weta * (1 - cartesian(eta)[1])
@assert 0 <= u <= 1
@assert 0 <= v <= 1
@assert 0 <= 1 - u - v <= 1
y1 = neighborhood(s1, (u, v))
y2 = neighborhood(s2, (u, v))
y3 = neighborhood(s3, (u, v))
u1 = carttobary(s1, cartesian(y1))
u2 = carttobary(s2, cartesian(y2))
u3 = carttobary(s3, cartesian(y3))
@assert 0 <= u1[1] <= 1
@assert 0 <= u1[2] <= 1
@assert 0 <= u2[1] <= 1
@assert 0 <= u2[2] <= 1
@assert 0 <= u3[1] <= 1
@assert 0 <= u3[2] <= 1
g1 = trial_local_space(neighborhood(trial_chart, carttobary(trial_chart, cartesian(y1))))
g2 = trial_local_space(neighborhood(trial_chart, carttobary(trial_chart, cartesian(y2))))
g3 = trial_local_space(neighborhood(trial_chart, carttobary(trial_chart, cartesian(y3))))
@assert length(g1) == 3
@assert length(g2) == 3
@assert length(g3) == 3
@assert g1[1][1] isa SVector
@assert g2[1][1] isa SVector
@assert g3[1][1] isa SVector
@assert jacobian(y1) >= 0
@assert jacobian(y2) >= 0
@assert jacobian(y3) >= 0
@assert jacobian(y1) + jacobian(y2) + jacobian(y3) ≈ 2 * volume(test_chart)
G1 = integrand(x, y1) * jacobian(y1)
G2 = integrand(x, y2) * jacobian(y2)
G3 = integrand(x, y3) * jacobian(y3)
@assert wksi > 0
@assert weta > 0
@assert wx * wuv > 0
check += wx * wuv
inner_check += wuv
out +=
wx * wuv * (SMatrix{M,N}([dot(f[i][1], (G1 * g1[j][1] + G2 * g2[j][1] + G3 * g3[j][1])) for i in 1:M, j in 1:N]))
end
end
@assert inner_check ≈ 0.5
end
@assert check ≈ volume(test_chart) * 0.5
out
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1132 |
"""
Generates the integrand for use in the Sauter-Schwab quadrature formulas.
generate_integrand(kernel, test_local_space, trial_local_space,
test_chart, trial_chart)
`kernel` is a function that takes two neighborhoods `x` and `y` and returns a
scalar or dyadic value. `kernel` is the part of the integrand that is most
easily described in term of cartesian coordinates, that is non-separable in the
two arguments, and that does not depend on which of the local shape functions
is considered. In boundary element methods it is the Green function (fundamental
solution) of the underlying partial differential equations.
"""
function generate_integrand_uv(kernel, testref, trialref, testel, trialel)
function k3(u, v)
out = @SMatrix zeros(3, 3)
x = neighborhood(testel, u)
y = neighborhood(trialel, v)
kernelval = kernel(x, y)
f = testref(x)
g = trialref(y)
jx = jacobian(x)
jy = jacobian(y)
ds = jx * jy
return SMatrix{3,3}([dot(f[i][1], kernelval * g[j][1]) * ds for i in 1:3, j in 1:3])
end
return k3
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1348 |
# ------------------------------------- Geometry definition with parametrization
struct Quadrilateral
p1::SVector{3}
p2::SVector{3}
p3::SVector{3}
p4::SVector{3}
end
# parametrize planar quadrilateral with u, v ∈ [0,1]
function (quad::Quadrilateral)(u, v)
return quad.p1 + u * (quad.p2 - quad.p1) + v * (quad.p4 - quad.p1) + u * v * (quad.p3 - quad.p4 + quad.p1 - quad.p2) # see, e.g., [1] page 187
end
# Jacobi determinant of parametrization
function jacobianDet(quad::Quadrilateral, u)
aux = quad.p3 - quad.p4 + quad.p1 - quad.p2
∂ru = quad.p2 - quad.p1 + u[2] * aux
∂rv = quad.p4 - quad.p1 + u[1] * aux
D = (∂ru[2] * ∂rv[3] - ∂ru[3] * ∂rv[2])^2 + (∂ru[3] * ∂rv[1] - ∂ru[1] * ∂rv[3])^2 + (∂ru[1] * ∂rv[2] - ∂ru[2] * ∂rv[1])^2
return sqrt(D)
end
# ------------------------------------- Kernel definitions
struct singularKernel
quad1::Quadrilateral
quad2::Quadrilateral
end
function (sK::singularKernel)(u, v)
x = sK.quad1(u...)
y = sK.quad2(v...)
return jacobianDet(sK.quad1, u) * jacobianDet(sK.quad2, v) / norm(x - y)
end
struct regularKernel
quad1::Quadrilateral
quad2::Quadrilateral
end
function (rK::regularKernel)(u, v)
x = rK.quad1(u...)
y = rK.quad2(v...)
return jacobianDet(rK.quad1, u) * jacobianDet(rK.quad2, v) * norm(x - y)
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1082 | using SauterSchwabQuadrature
using Test
using LinearAlgebra
using StaticArrays
using CompScienceMeshes
using JuliaFormatter
# --- testsets
@testset "Testing SauterSchwabQuadrature functionality" begin
@testset "Triangular " begin
include("parametric_kernel_generator.jl")
include("local_space.jl")
include("numquad.jl")
include("verificationintegral.jl")
include("test_reordering.jl")
include("test_cf_tr.jl")
include("test_ce_tr.jl")
include("test_cv_tr.jl")
include("test_pd_tr.jl")
end
@testset "Quadrilateral " begin
include("quadrilateral_defs.jl")
include("test_cf_quad.jl")
include("test_ce_quad.jl")
include("test_cv_quad.jl")
end
@testset "Test formatting of files" begin
pkgpath = pkgdir(SauterSchwabQuadrature) # path of this package including name
@test format(pkgpath, overwrite=false) # check whether files are formatted according to the .JuliaFormatter.toml
end
end
using TestItemRunner
@run_package_tests
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1861 |
@testset "Common Edge" begin
# quadrilateral defined by four points
p1 = SVector(0.0, 0.0, 0.0) # the same for both quads
p2 = SVector(2.0, 0.0, 0.0) # the same for both quads
p3 = SVector(2.0, 2.0, 0.0)
p4 = SVector(0.0, 2.0, 0.0)
q1 = Quadrilateral(p1, p2, p3, p4)
p3 = SVector(2.0, -2.0, 0.0) # choose points of second quad such that parametrizations align as required in [1]
p4 = SVector(0.0, -2.0, 0.0)
q2 = Quadrilateral(p1, p2, p3, p4)
@testset "Regular Kernel" begin
# --- Kernel
K = regularKernel(q1, q2)
# --- Sauter Schwab strategy
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1) # quadpoints
intSauSw = sauterschwab_parameterized(K, CommonEdgeQuad(quadPW)) # compute
# --- Double quadrature
qOut = SauterSchwabQuadrature._legendre(25, 0, 1)
qOin = SauterSchwabQuadrature._legendre(25, 0, 1) # quadpoints
intDQ = sum(w1 * w2 * w3 * w4 * K((η1, η2), (η3, ξ)) for (η1, w1) in qOut, (η2, w2) in qOut, (η3, w3) in qOin, (ξ, w4) in qOin) # compute
# --- difference
@test abs(intSauSw - intDQ) / intSauSw < 1e-7
end
@testset "Singular Kernel" begin
# --- Kernel
K = singularKernel(q1, q2)
# --- Sauter Schwab strategy
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1) # quadpoints
intSauSw = sauterschwab_parameterized(K, CommonEdgeQuad(quadPW)) # compute
# --- Double quadrature
qOut = SauterSchwabQuadrature._legendre(10, 0, 1)
qOin = SauterSchwabQuadrature._legendre(15, 0, 1) # quadpoints
intDQ = sum(w1 * w2 * w3 * w4 * K((η1, η2), (η3, ξ)) for (η1, w1) in qOut, (η2, w2) in qOut, (η3, w3) in qOin, (ξ, w4) in qOin) # compute
# --- difference
@test abs(intSauSw - intDQ) / intSauSw < 0.2e-3
end
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 3493 |
@testset "Common Edge" begin
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
pIV = point(5, 1, -3)
Sourcechart = simplex(pI, pIII, pII)
Testchart = simplex(pI, pIV, pII)
Accuracy = 12
ce = CommonEdge(SauterSchwabQuadrature._legendre(Accuracy, 0.0, 1.0))
function integrand(x, y)
return (((x - pI)' * (y - pII)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
function INTEGRAND(û, v̂)
n1 = neighborhood(Testchart, û)
n2 = neighborhood(Sourcechart, v̂)
x = cartesian(n1)
y = cartesian(n2)
output = integrand(x, y) * jacobian(n1) * jacobian(n2)
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, ce) - verifintegral2(Sourcechart, Testchart, integrand, Accuracy)
@test norm(result) < 1.e-3
kernel(x, y) = 1 / norm(cartesian(x) - cartesian(y))
t1 = simplex(@SVector[0.180878, -0.941848, -0.283207], @SVector[0.0, -0.92388, -0.382683], @SVector[0.0, -0.980785, -0.19509])
t2 = simplex(@SVector[0.180878, -0.941848, -0.283207], @SVector[0.0, -0.92388, -0.382683], @SVector[0.158174, -0.881178, -0.44554])
@test indexin(t1.vertices, t2.vertices) == [1, 2, nothing]
rt = RTRefSpace{Float64}()
igd = generate_integrand_uv(kernel, rt, rt, t1, t2)
i5 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(5, 0.0, 1.0)))
i10 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(10, 0.0, 1.0)))
i15 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(15, 0.0, 1.0)))
# brute numerical approach
q1 = quadpoints(t1, 10)
q2 = quadpoints(t2, 10)
M = N = numfunctions(rt)
iref = zero(i5)
for (x, w1) in q1
f = rt(x)
for (y, w2) in q2
g = rt(y)
G = kernel(x, y)
ds = w1 * w2
iref += SMatrix{M,N}([dot(f[i][1], G * g[j][1]) * ds for i in 1:M, j in 1:N])
end
end
#include(joinpath(dirname(@__FILE__,),"numquad.jl"))
ibf = numquad(kernel, rt, rt, t1, t2, zero(i5))
@test i5 ≈ iref atol = 1e-3
@test i10 ≈ iref atol = 1e-3
@test i10 ≈ ibf atol = 1e-3
@test i10 ≈ i15 atol = 1e-5
# Test the more (or less) singular case of the second kind kernel
function kernel2nd(x, y)
r = cartesian(x) - cartesian(y)
R = norm(r)
gradgreen = -r / R^3
@SMatrix [
0 -gradgreen[3] gradgreen[2]
gradgreen[3] 0 -gradgreen[1]
-gradgreen[2] gradgreen[1] 0
]
end
igd = generate_integrand_uv(kernel2nd, rt, rt, t1, t2)
i10 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(10, 0.0, 1.0)))
i15 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(15, 0.0, 1.0)))
i20 = sauterschwab_parameterized(igd, CommonEdge(SauterSchwabQuadrature._legendre(20, 0.0, 1.0)))
iref = numquad(kernel2nd, rt, rt, t1, t2, zero(i15))
# # Compare to BEAST:
# tqd = CompScienceMeshes.quadpoints(rt, [t1], (12,))
# bqd = CompScienceMeshes.quadpoints(rt, [t2], (13,))
#
# SE_strategy = BEAST.WiltonSEStrategy(
# tqd[1,1],
# BEAST.DoubleQuadStrategy(
# tqd[1,1],
# bqd[1,1]))
#
# op = BEAST.MWDoubleLayer3D(0.0)
# z2 = zeros(3,3)
# BEAST.momintegrals!(op, rt, rt, t1, t2, z2, SE_strategy)
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1668 |
@testset "Common Face" begin
# quadrilateral defined by four points
p1 = SVector(0.0, 0.0, 0.0)
p2 = SVector(2.0, 0.0, 0.0)
p3 = SVector(2.0, 2.0, 0.0)
p4 = SVector(0.0, 2.0, 0.0)
q1 = Quadrilateral(p1, p2, p3, p4)
@testset "Regular Kernel" begin
# --- Kernel
K = regularKernel(q1, q1)
# --- Sauter Schwab strategy
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1) # quadpoints
intSauSw = sauterschwab_parameterized(K, CommonFaceQuad(quadPW)) # compute
# --- Double quadrature
qOut = SauterSchwabQuadrature._legendre(25, 0, 1)
qOin = SauterSchwabQuadrature._legendre(25, 0, 1) # quadpoints
intDQ = sum(w1 * w2 * w3 * w4 * K((η1, η2), (η3, ξ)) for (η1, w1) in qOut, (η2, w2) in qOut, (η3, w3) in qOin, (ξ, w4) in qOin) # compute
# --- difference
@test abs(intSauSw - intDQ) / intSauSw < 1e-4
end
@testset "Singular Kernel" begin
# --- Kernel
K = singularKernel(q1, q1)
# --- Sauter Schwab strategy
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1) # quadpoints
intSauSw = sauterschwab_parameterized(K, CommonFaceQuad(quadPW)) # compute
# --- Double quadrature
qOut = SauterSchwabQuadrature._legendre(10, 0, 1)
qOin = SauterSchwabQuadrature._legendre(15, 0, 1) # quadpoints
intDQ = sum(w1 * w2 * w3 * w4 * K((η1, η2), (η3, ξ)) for (η1, w1) in qOut, (η2, w2) in qOut, (η3, w3) in qOin, (ξ, w4) in qOin) # compute
# --- difference
@test abs(intSauSw - intDQ) / intSauSw < 0.025 # probably replace by analytical solution someday
end
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 2208 |
@testset "Common Face" begin
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
Sourcechart = Testchart = simplex(pI, pII, pIII)
Accuracy = 12
cf = CommonFace(SauterSchwabQuadrature._legendre(Accuracy, 0.0, 1.0))
function integrand(x, y)
return (((x - pI)' * (y - pII)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
function INTEGRAND(û, v̂)
n1 = neighborhood(Testchart, û)
n2 = neighborhood(Sourcechart, v̂)
x = cartesian(n1)
y = cartesian(n2)
output = integrand(x, y) * jacobian(n1) * jacobian(n2)
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, cf) - verifintegral1(Sourcechart, Testchart, integrand, Accuracy)
@test norm(result) < 1.e-3
#include(joinpath(dirname(@__FILE__),"numquad.jl"))
# Test the use of SauterSchwabQuadrature with the kernel generator utility
# using BEAST
using CompScienceMeshes
using StaticArrays
t1 = simplex(@SVector[0.180878, -0.941848, -0.283207], @SVector[0.0, -0.980785, -0.19509], @SVector[0.0, -0.92388, -0.382683])
rt = RTRefSpace{Float64}()
kernel_cf(x, y) = 1 / norm(cartesian(x) - cartesian(y))
igd = generate_integrand_uv(kernel_cf, rt, rt, t1, t1)
t1 = simplex(@SVector[0.180878, -0.941848, -0.283207], @SVector[0.0, -0.980785, -0.19509], @SVector[0.0, -0.92388, -0.382683])
i5 = sauterschwab_parameterized(igd, CommonFace(SauterSchwabQuadrature._legendre(5, 0.0, 1.0)))
i10 = sauterschwab_parameterized(igd, CommonFace(SauterSchwabQuadrature._legendre(10, 0.0, 1.0)))
iref = numquad_cf(kernel_cf, rt, rt, t1, t1, zero(i5))
# # BEAST will arbitrate
# tqd = BEAST.quadpoints(rt, [t1], (12,))
# bqd = BEAST.quadpoints(rt, [t1], (13,))
#
# SE_strategy = BEAST.WiltonSEStrategy(
# tqd[1,1],
# BEAST.DoubleQuadStrategy(
# tqd[1,1],
# bqd[1,1],
# ),
# )
#
# op = BEAST.MWSingleLayer3D(0.0, 4.0π, 0.0)
# z2 = zeros(3,3)
# BEAST.momintegrals!(op, rt, rt, t1, t1, z2, SE_strategy)
#
# @test i5 ≈ iref atol=1e-3
# @test i10 ≈ iref atol=1e-3
# @test i10 ≈ i5 atol=1e-6
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1874 |
@testset "Common Vertex" begin
# quadrilateral defined by four points
p1 = SVector(0.0, 0.0, 0.0) # the same for both quads
p2 = SVector(2.0, 0.0, 0.0)
p3 = SVector(2.0, 2.0, 0.0)
p4 = SVector(0.0, 2.0, 0.0)
q1 = Quadrilateral(p1, p2, p3, p4)
p2 = SVector(-2.0, 0.0, 0.0) # choose points of second quad such that parametrizations align as required in [1]
p3 = SVector(-2.0, -2.0, 0.0)
p4 = SVector(0.0, -2.0, 0.0)
q2 = Quadrilateral(p1, p2, p3, p4)
@testset "Regular Kernel" begin
# --- Kernel
K = regularKernel(q1, q2)
# --- Sauter Schwab strategy
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1) # quadpoints
intSauSw = sauterschwab_parameterized(K, CommonVertexQuad(quadPW)) # compute
# --- Double quadrature
qOut = SauterSchwabQuadrature._legendre(25, 0, 1)
qOin = SauterSchwabQuadrature._legendre(25, 0, 1) # quadpoints
intDQ = sum(w1 * w2 * w3 * w4 * K((η1, η2), (η3, ξ)) for (η1, w1) in qOut, (η2, w2) in qOut, (η3, w3) in qOin, (ξ, w4) in qOin) # compute
# --- difference
@test abs(intSauSw - intDQ) / intSauSw < 1e-13
end
@testset "Singular Kernel" begin
# --- Kernel
K = singularKernel(q1, q2)
# --- Sauter Schwab strategy
quadPW = SauterSchwabQuadrature._legendre(10, 0, 1) # quadpoints
intSauSw = sauterschwab_parameterized(K, CommonVertexQuad(quadPW)) # compute
# --- Double quadrature
qOut = SauterSchwabQuadrature._legendre(15, 0, 1)
qOin = SauterSchwabQuadrature._legendre(15, 0, 1) # quadpoints
intDQ = sum(w1 * w2 * w3 * w4 * K((η1, η2), (η3, ξ)) for (η1, w1) in qOut, (η2, w2) in qOut, (η3, w3) in qOin, (ξ, w4) in qOin) # compute
# --- difference
@test abs(intSauSw - intDQ) / intSauSw < 1e-7
end
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 2190 |
@testset "Common Vertex" begin
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
pIV = point(5, 1, -3)
pV = point(0, 0, 0)
Sourcechart = simplex(pI, pIII, pII)
Testchart = simplex(pI, pIV, pV)
Accuracy = 12
cv = CommonVertex(SauterSchwabQuadrature._legendre(Accuracy, 0.0, 1.0))
function integrand(x, y)
return (((x - pI)' * (y - pV)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
function INTEGRAND(û, v̂)
n1 = neighborhood(Testchart, û)
n2 = neighborhood(Sourcechart, v̂)
x = cartesian(n1)
y = cartesian(n2)
output = integrand(x, y) * jacobian(n1) * jacobian(n2)
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, cv) - verifintegral2(Sourcechart, Testchart, integrand, Accuracy)
@test norm(result) < 1.e-3
kernel(x, y) = 1 / norm(cartesian(x) - cartesian(y))
t1 = simplex(@SVector[0.180878, -0.941848, -0.283207], @SVector[0.0, -0.980785, -0.19509], @SVector[0.0, -0.92388, -0.382683])
t2 = simplex(
@SVector[0.180878, -0.941848, -0.283207], @SVector[0.373086, -0.881524, -0.289348], @SVector[0.294908, -0.944921, -0.141962]
)
@test indexin(t1.vertices, t2.vertices) == [1, nothing, nothing]
rt = RTRefSpace{Float64}()
igd = generate_integrand_uv(kernel, rt, rt, t1, t2)
i5 = sauterschwab_parameterized(igd, CommonVertex(SauterSchwabQuadrature._legendre(5, 0.0, 1.0)))
i10 = sauterschwab_parameterized(igd, CommonVertex(SauterSchwabQuadrature._legendre(10, 0.0, 1.0)))
# brute numerical approach
q1 = quadpoints(t1, 10)
q2 = quadpoints(t2, 10)
M = N = numfunctions(rt)
iref = zero(i5)
for (x, w1) in q1
f = rt(x)
for (y, w2) in q2
g = rt(y)
G = kernel(x, y)
ds = w1 * w2
iref += SMatrix{M,N}([dot(f[i][1], G * g[j][1]) * ds for i in 1:M, j in 1:N])
end
end
#include(joinpath(dirname(@__FILE__,),"numquad.jl"))
ibf = numquad(kernel, rt, rt, t1, t2, zero(i5))
@test i5 ≈ iref atol = 1e-7
@test i10 ≈ iref atol = 1e-7
@test i10 ≈ ibf atol = 1e-8
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 931 |
@testset "Positive Distance" begin
pI = point(1, 5, 3)
pII = point(2, 5, 3)
pIII = point(7, 1, 0)
pVI = point(10, 11, 12)
pVII = point(10, 11, 13)
pVIII = point(11, 11, 12)
Sourcechart = simplex(pI, pII, pIII)
Testchart = simplex(pVI, pVII, pVIII)
Accuracy = 12
pd = PositiveDistance(SauterSchwabQuadrature._legendre(Accuracy, 0.0, 1.0))
function integrand(x, y)
return (((x - pI)' * (y - pVII)) * exp(-im * 1 * norm(x - y)) / (4pi * norm(x - y)))
end
function INTEGRAND(û, v̂)
n1 = neighborhood(Testchart, û)
n2 = neighborhood(Sourcechart, v̂)
x = cartesian(n1)
y = cartesian(n2)
output = integrand(x, y) * jacobian(n1) * jacobian(n2)
return (output)
end
result = sauterschwab_parameterized(INTEGRAND, pd) - verifintegral2(Sourcechart, Testchart, integrand, Accuracy)
@test norm(result) < 1.e-3
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1449 |
@testset "Reordering" begin
@testset "Common Face" begin
p11 = point(1, 0, 0)
p12 = point(3, 0, 1)
p13 = point(3, 2, 1)
Sourcechart = SVector(p11, p12, p13)
Testchart = SVector(p13, p12, p11)
I, J, K, L = reorder(Testchart, Sourcechart, CommonFace(1))
@test I == [1, 2, 3]
@test J == [3, 2, 1]
@test K == [1, 2, 3]
@test L == [3, 2, 1]
end
@testset "Common Edge" begin
p11 = point(1, 0, 0)
p12 = point(3, 0, 0)
p13 = point(3, 2, 0)
p21 = point(0, 0, 0)
p22 = point(1, 0, 0)
p23 = point(3, 2, 0)
Sourcechart = SVector(p11, p12, p13)
Testchart = SVector(p21, p22, p23)
I, J, K, L = reorder(Testchart, Sourcechart, CommonEdge(1))
@test I == [3, 1, 2]
@test J == [3, 2, 1]
@test K == [2, 3, 1]
@test L == [3, 2, 1]
end
@testset "Common Vertex" begin
p11 = point(1, 0, 0)
p12 = point(3, 0, 0)
p13 = point(3, 2, 0)
p21 = point(0, 0, 0)
p22 = point(1, 0, 0) # common point
p23 = point(0, 1, 2)
Sourcechart = SVector(p11, p12, p13)
Testchart = SVector(p21, p22, p23)
I, J, K, L = reorder(Testchart, Sourcechart, CommonVertex(1))
@test I == [2, 1, 3]
@test J == [1, 2, 3]
@test K == [2, 1, 3]
@test L == [1, 2, 3]
end
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | code | 1588 | function k(α, β, γ)
x̂ = cartesian(α)
ξ = cartesian(β)[1]
η = cartesian(γ)[1]
ŷ = [ξ * (1 - η), η]
#jacobian of y--->ŷ = (1-y[2])
chartI = simplex(p1, x̂, p0)
chartII = simplex(p2, x̂, p1)
chartIII = simplex(p0, x̂, p2)
n1 = neighborhood(chartI, ŷ)
n2 = neighborhood(chartII, ŷ)
n3 = neighborhood(chartIII, ŷ)
yI = cartesian(n1)
yII = cartesian(n2)
yIII = cartesian(n3)
return (
Kernel(x̂, yI) * jacobian(n1) * (1 - η) + Kernel(x̂, yII) * jacobian(n2) * (1 - η) + Kernel(x̂, yIII) * jacobian(n3) * (1 - η)
)
end
function verifintegral1(
sourcechart::CompScienceMeshes.Simplex{3,2,1,3,Float64},
testchart::CompScienceMeshes.Simplex{3,2,1,3,Float64},
integrand,
accuracy::Int64,
)
global Kernel, p0, p1, p2
Kernel = integrand
p0 = sourcechart.vertices[1]
p1 = sourcechart.vertices[2]
p2 = sourcechart.vertices[3]
qps1 = quadpoints(sourcechart, accuracy)
path = simplex(point(0), point(1))
qps2 = quadpoints(path, accuracy)
result = sum(w * w2 * w1 * k(α, β, γ) for (β, w1) in qps2, (γ, w2) in qps2, (α, w) in qps1)
return (result)
end
function verifintegral2(
sourcechart::CompScienceMeshes.Simplex{3,2,1,3,Float64},
testchart::CompScienceMeshes.Simplex{3,2,1,3,Float64},
integrand,
accuracy::Int64,
)
qps1 = quadpoints(sourcechart, accuracy)
qps2 = quadpoints(testchart, accuracy)
result = sum(w2 * w1 * integrand(cartesian(x), cartesian(y)) for (x, w2) in qps2, (y, w1) in qps1)
return (result)
end
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | docs | 817 |
# SauterSchwabQuadrature.jl
[](https://github.com/ga96tik/SauterSchwabQuadrature.jl/actions)
[](http://codecov.io/github/ga96tik/SauterSchwabQuadrature.jl?branch=master)
[](https://ga96tik.github.io/SauterSchwabQuadrature.jl/dev/)
Implementation of the Sauter-Schwab regularizing coordinate transformations [1] for the computation of 4D integrals with Cauchy-singular integral kernels via numerical quadrature.
# References
[1] Sauter S. Schwab C., "Boundary Element Methods (Springer Series in Computational Mathematics)", Chapter 5, Springer, 2010.
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | docs | 7218 | # Non-Parameterized
The called function in this implementation looks like:
`sauterschwabintegral(sourcechart, testchart, integrand, accuracy, accuracy_pd)`.
`sourcechart` and `testchart` can be created by
`testchart = simplex(P1,P2,P3); sourcechart = simplex(P4,P5,P6)`.
The order of the input arguments within the `simplex()` function does not matter.
`simplex()` generates the mapping and needs input arguments of type `SVector{3,Float64}`; the points P1 to P6 can be created by
`P = point(x,y,z)`.
`x`, `y` and `z` are the coordinates of that particular point and `point()` creates a position vector which is of type `SVector{3,Float64}`.
The `integrand` must be defined as a function with two input arguments; the input arguments must be 3D vectors. The name of this function is the input argument.
Later on, the last argument `accuracy` will be discussed.
Since `simplex()` and `point()` are functions of CompScienceMeshes, CompScienceMeshes does not just have to be installed on the user's machine, but also be available in the current workspace; the same applies for this package as well. The two packages can be made available by
`using SauterSchwabQuadrature` and `using CompScienceMeshes`.
These two commands must always be run at the beginning, if using this type of implementation.
`sauterschwabintegral()` first modifies `testchart` and `sourcechart` with respect to the order of the arguments, within their `simplex()` functions. Secondly, depending on how many vertices both charts have in common, it generates an object of some type that contains the information of the accuracy and the integration strategy. After all of this has been done, this function will call another function with input arguments of the two modified charts, the original integrand and that new object.
To understand the arguments `accuracy`, `accuracy_pd` and the examples stored in the examples folder, the 'another called function' will be presented next:
## Integration
According to item 1 on the homepage, four different constellations of the two triangles are possible:
* Equal triangles ``\to`` Common Face
* Two vertices in common ``\to`` Common Edge
* One vertex in common ``\to`` Common Vertex
* Both triangles do not touch at all ``\to`` Positive Distance

As each of those four constellations has its own integration method (because of a possible singularity in the kernel), the function `sauterschwabintegral()` has to call another function that handles the situation suitably; hence, it has four methods.
In the case `sauterschwabintegral()` has to deal with a situation of the first three cases, the two area-integrals will be transformed to four 1D integrals from zero to one; `accuracy` gives the number of quadrature points on that integration path, therefore, `accuracy` is of type unsigned Int64. In the case `sauterschwabintegral()` has to deal with a situation of the last case, `accuracy_pd`, which is again of type unsigned Int64, will be considered. It is a rule of how many quadrature points are created on both triangles. `accuracy_pd` =
* 1 ``\to`` 1
* 2 ``\to`` 3
* 3 ``\to`` 4
* 4 ``\to`` 6
* 5 ``\to`` 7
* 6 ``\to`` 12
* 7 ``\to`` 13
* 8 ``\to`` 36
* 9 ``\to`` 79
* 10 ``\to`` 105
* 11 ``\to`` 120
* 12 ``\to`` 400
* 13 ``\to`` 900
quadrature point(s) is(are) created on each triangle.
The user is now able to understand the examples in the '...non_parameterized.jl' files, or rather their titles. The order of the points within the two `simplex()` functions of `Sourcechart` and `Testchart` can be changed arbitrarily, the result will always remain the same. For those, who are interested in the 'called function', or want to skip `sauterschwabintegral()` and call the integration directly, which is actually only a sorting process, may read on now.
The called function by `sauterschwabintegral()` is:
`sauterschwab_nonparameterized(sourcechart, testchart, integrand, method)`.
`sourcechart` and `testchart` are the modified versions of the original charts; `integrand` is the same as at the beginning, and `method` is that created object. The type of `method` is responsible for what method of `sauterschwab_nonparameterized` is chosen. The four methods will be listed now:
### Common Face
``\Gamma`` and ``\Gamma'`` are equal; hence, `sourcechart` and `testchart` are equal as well. The two charts have to be created by
`testchart = sourcechart = simplex(P1,P2,P3)`;
where, `P1`, `P2` and `P3` are the vertices of that particular triangle. Note, that both charts must be equal, which means that the first argument of both charts must be equal, the second argument of both charts must be equal, and the last argument of both charts must be equal.
The last argument can be created by
`cf = CommonFace(x)`.
`cf` is an object of type `CommonFace()`; x is the number of quadrature points on the integration path ``[0,1]``.
An example for this case can be found at the end of the common_face_non_parameterized.jl file in the examples folder.
### Common Edge
``\Gamma`` and ``\Gamma'`` are now different; hence, `sourcechart` and `testchart` are different as well. The two charts have to be created in the following manner:
`testchart = simplex(P1,P2,P3); sourcechart = simplex(P1,P4,P3)`.
Again, the order of the input arguments must be taken into account: The first argument of both charts must be equal, and the last argument of both charts must be equal. Consequently, the first and the last argument are the vertices which both triangles have in common.
The last argument can be created by
`ce = CommonEdge(x)`.
`ce` is an object of type `CommonEdge()`; x is the number of quadrature points on the integration path ``[0,1]``.
An example for this case can be found at the end of the common_edge_non_parameterized.jl file in the examples folder.
### Common Vertex
The two triangles and charts are again different. The two charts have to be created in the following manner:
`sourcechart = simplex(P1,P2,P3); testchart = simplex(P1,P4,P5)`.
Again, the order of the input arguments must be taken into account: The first argument of both charts must be equal, the order of `P2` and `P3` with respect to `sourcechart`, and the order of `P4` and `P5` with respect to `testchart`, does not matter. Consequently, the first argument is the vertex both triangles have in common.
The last argument is created by
`cv = CommonVertex(x)`.
`cv` is an object of type `CommonVertex()`; x is the number of quadrature points on the integration path ``[0,1]``.
An example for this case can be found at the end of the common_vertex_non_parameterized.jl file in the examples folder.
### Positive Distance
As the triangles do not touch at all, the integration can directly be calculated with Gauss´s quadrature. Therefore, the order of the arguments within the two `simplex()` functions do not matter.
The last argument can be created by
`pd = PositiveDistance(x)`.
`pd` is an object of type `PositiveDistance()`; x is the rule of how many quadrature points are created on both triangles.
An example for this case can be found at the end of the positive_distance_non_parameterized.jl file in the examples folder.
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | docs | 4046 | # Parameterized
The called function in this implementation looks like:
`sauterschwab_parameterized(integrand, method)`.
As on the homepage already mentioned, the user now has to parameterize the integration areas by himself; that means, that `integrand` is no more the original function that has to be integrated; `integrand` is now the parameterized version of the original integrand, including the two surface elements of both charts.
Before the parameterizations/charts (parameterization = chart) are built, the user has to figure out which integration method should be applied, and decide how accurate the integration shall be done. It is recommended, that the user read the page 'Non-Parameterized' before continuing to read here. Because otherwise, he may not be able to apply the concepts of 'integration method' and 'accuracy'.
The parameterization of the sourcetriangle will be called ``\chi_t``, and the parameterization of the testtriangle will be called ``\chi_\tau``. In the following, the parameterization of every single integration method will be presented.
## Common Face
``\Gamma`` and ``\Gamma'`` are equal, and both parameterizations must be equal as well: ``\chi_t(u',v') = \chi_\tau(u,v)``.

The user's task is to find a parameterization which maps the reference triangle (right) onto the real triangle (left). The reference triangle is throughout this package always the same.
The original integrand, which is a function of ``\textbf{x}`` and ``\textbf{y}``, becomes:
```math
f(\chi_\tau(u,v),\chi_t(u',v')) \cdot \|\frac{\partial \chi_\tau}{\partial u}\times\frac{\partial \chi_\tau}{\partial v}\| \cdot\|\frac{\partial \chi_t}{\partial u'}\times\frac{\partial \chi_t}{\partial v'}\|
```.
This function method as well as the following methods, transform the two area integrals in parameters domain into four 1D integrals from zero to one; therefore, the last argument is created by
`cf = CommonFace(x)`.
`cf` is an object of type `CommonFace()`; x is the number of quadrature points on the integration path ``[0,1]``.
An example for this case can be found in the common_face_parameterized.jl file in the examples folder.
## Common Edge
``\Gamma`` and ``\Gamma'`` have an edge in common, and both parameterizations must fulfill the condition ``\chi_t(s,0) = \chi_\tau(s,0)``. For example, this condition could be met if the points ``(u\in[0,1];0)`` and ``(u'\in[0,1];0)`` are mapped on the same point on the common edge.

The modified integrand looks like in the case Common Face.
The last argument can be created by
`ce = CommonEdge(x)`.
`ce` is an object of type `CommonEdge()`; x is the number of quadrature points on the integration path ``[0,1]``.
An example for this case can be found in the common_edge_parameterized.jl file in the examples folder.
## Common Vertex
``\Gamma`` and ``\Gamma'`` have one vertex in common, and both parameterizations must fulfill the condition ``\chi_t(0,0) = \chi_\tau(0,0)``. This condition means, that the origin of both reference triangles is mapped on the common vertex.

The modified integrand looks like in the case Common Face.
The last argument can be created by
`cv = CommonVertex(x)`.
`cv` is an object of type `CommonVertex()`; x is the number of quadrature points on the integration path ``[0,1]``.
An example for this case can be found in the common_vertex_parameterized.jl file in the examples folder.
## Positive Distance
The two triangles do not touch at all, and both parameterizations only need to map from the reference triangle onto the real triangle.

The modified integrand looks like in the case Common Face.
The last argument can be created by
`pd = PositiveDistance(x)`.
`pd` is an object of type `PositiveDistance()`; x is the number of quadrature points on the integration path ``[0,1]``.
An example for this case can be found in the positive_distance_parameterized.jl file in the examples folder.
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | docs | 74 |
# API Reference
```@autodocs
Modules = [SauterSchwabQuadrature]
``` | SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
|
[
"MIT"
] | 2.4.0 | b98948c567cbe250d774d01a07833b7a329ec511 | docs | 2369 |
# Details
The integrals of the form
```math
\int_{\Gamma}\int_{\Gamma'}b_i(\bm{x})\,k(\bm{x},\bm{y})\, b_j(\bm{y})\,\mathrm{d}S(\bm{y})\,\mathrm{d}S(\bm{x})
```
are solved based on relative coordinates.
## Relative Coordinates
It is assumed that by suitable parameter transforms
```math
\chi_\tau: \hat\tau \mapsto \tau
```
with ``\hat\tau = (u,v) \in [0,1]^2`` and
```math
\chi_t: \hat t \mapsto t
```
with ``\hat t = (u',v') \in [0,1]^2`` from the reference element (triangle or square) to the actual ones, the integral is brought into the form
```math
\iint_{\hat \tau}\iint_{\hat t} k'(\chi_\tau(u,v), \chi_t(u',v')) \, \mathrm{d}u \mathrm{d}v \mathrm{d}u' \mathrm{d}u'
```
where ``k'(\bm{x},\bm{y})`` contains ``k``, ``b_i``, ``b_j``, and the Jacobi determinant of the parametrization.
!!! tip
The parametrizations for triangles and quadrilaterals are commonly employed for the integration without regularizations, as well.
Hence, they are often already available.
For the regularizing parametertransform according to [1] four cases are distinguished.
### Common Face Case
``\Gamma`` and ``\Gamma'`` are equal, and both parameterizations must be equal, that is, ``\chi_t = \chi_\tau``.
```@raw html
<div align="center">
<img src="../assets/CommonFace.jpg" width="600"/>
</div>
<br/>
```
### Common Edge Case
``\Gamma`` and ``\Gamma'`` have an edge in common, and both parameterizations must fulfill the condition ``\chi_t(s,0) = \chi_\tau(s,0)``. For example, this condition could be met if the points ``(u\in[0,1];0)`` and ``(u'\in[0,1];0)`` are mapped on the same point on the common edge.
```@raw html
<div align="center">
<img src="../assets/CommonEdge.jpg" width="600"/>
</div>
<br/>
```
### Common Vertex Case
``\Gamma`` and ``\Gamma'`` have one vertex in common, and both parameterizations must fulfill the condition ``\chi_t(0,0) = \chi_\tau(0,0)``.
This condition means, that the origin of both reference triangles is mapped on the common vertex.
```@raw html
<div align="center">
<img src="../assets/CommonVertex.jpg" width="600"/>
</div>
<br/>
```
### Positive Distance Case
The two triangles do not touch at all, and both parameterizations only need to map from the reference triangle onto the real triangle.
```@raw html
<div align="center">
<img src="../assets/PositiveDistance.jpg" width="600"/>
</div>
<br/>
```
| SauterSchwabQuadrature | https://github.com/ga96tik/SauterSchwabQuadrature.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.