licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | code | 266 | using HDF5
using Test
@testset "ros3" begin
@test HDF5.has_ros3()
h5open(
"http://s3.us-east-2.amazonaws.com/hdf5ros3/GMODO-SVM01.h5";
driver=HDF5.Drivers.ROS3()
) do f
@test keys(f) == ["All_Data", "Data_Products"]
end
end
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | code | 2952 | # Work around JuliaLang/Pkg.jl#2500
if VERSION < v"1.8-"
test_project = first(Base.load_path())
preferences_file = "../LocalPreferences.toml"
test_preferences_file = joinpath(dirname(test_project), "LocalPreferences.toml")
if isfile(preferences_file) && !isfile(test_preferences_file)
cp(preferences_file, test_preferences_file)
@info "copied LocalPreferences.toml to $test_preferences_file"
end
end
using HDF5
using Test
using Pkg
filter_path = joinpath(dirname(pathof(HDF5)), "..", "filters")
if !Base.BinaryPlatforms.CPUID.test_cpu_feature(Base.BinaryPlatforms.CPUID.JL_X86_avx2)
Pkg.add(PackageSpec(; name="Blosc_jll", version=v"1.21.2+0"))
end
Pkg.develop([
PackageSpec(; path=joinpath(filter_path, "H5Zblosc")),
PackageSpec(; path=joinpath(filter_path, "H5Zbzip2")),
PackageSpec(; path=joinpath(filter_path, "H5Zlz4")),
PackageSpec(; path=joinpath(filter_path, "H5Zzstd")),
])
@static if VERSION >= v"1.6"
Pkg.develop(PackageSpec(; path=joinpath(filter_path, "H5Zbitshuffle")))
end
@info "libhdf5 v$(HDF5.API.h5_get_libversion())"
# To debug HDF5.jl tests, uncomment the next line
# ENV["JULIA_DEBUG"] = "Main"
@testset "HDF5.jl" begin
@debug "plain"
include("plain.jl")
@debug "create_dataset"
include("create_dataset.jl")
@debug "strings"
include("strings.jl")
@debug "api"
include("api.jl")
@debug "compound"
include("compound.jl")
@debug "custom"
include("custom.jl")
@debug "reference"
include("reference.jl")
@debug "dataspace"
include("dataspace.jl")
@debug "datatype"
include("datatype.jl")
@debug "hyperslab"
include("hyperslab.jl")
@debug "attributes"
include("attributes.jl")
@debug "readremote"
include("readremote.jl")
@debug "extend_test"
include("extend_test.jl")
@debug "gc"
include("gc.jl")
@debug "external"
include("external.jl")
@debug "swmr"
include("swmr.jl")
@debug "mmap"
include("mmap.jl")
@debug "properties"
include("properties.jl")
@debug "table"
include("table.jl")
@debug "filter"
include("filter.jl")
@debug "chunkstorage"
include("chunkstorage.jl")
@debug "fileio"
include("fileio.jl")
@debug "nonallocating"
include("nonallocating.jl")
@debug "filter test utils"
include("filters/FilterTestUtils.jl")
@debug "objects"
include("objects.jl")
# `h5d_get_space` seems to be broken for virtual datasets for libhdf5 1.10,
# see https://github.com/JuliaIO/HDF5.jl/pull/1061#issuecomment-1571009149
if HDF5.API.h5_get_libversion() >= v"1.12"
@debug "virtual datasets"
include("virtual_dataset.jl")
end
# basic MPI tests, for actual parallel tests we need to run in MPI mode
include("mpio.jl")
if HDF5.has_ros3()
include("ros3.jl")
end
# Clean up after all resources
HDF5.API.h5_close()
end
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | code | 1067 | using Test
using HDF5
@testset "Strings" begin
# Check creation of variable length string by passing String
fn = tempname()
h5open(fn, "w") do f
ds = create_dataset(f, "strings", String, (4,))
ds[1] = "Hello"
ds[2] = "Hi"
ds[3] = "Bonjour"
ds[4] = GenericString("Hola")
end
h5open(fn, "r") do f
ds = f["strings"]
@test ds[1] == "Hello"
@test ds[2] == "Hi"
@test ds[3] == "Bonjour"
@test ds[4] == "Hola"
end
rm(fn)
# Check multiple assignment
h5open(fn, "w") do f
ds = create_dataset(f, "strings2", String, (3,))
ds[:] = "Guten tag"
end
h5open(fn, "r") do f
ds = f["strings2"]
@test ds[1] == "Guten tag"
@test ds[2] == "Guten tag"
@test ds[3] == "Guten tag"
end
rm(fn)
# Check assignment to a scalar dataset
h5open(fn, "w") do f
ds = write_dataset(f, "string", GenericString("Hi"))
end
h5open(fn) do f
@test f["string"][] == "Hi"
end
rm(fn)
end
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | code | 4050 | # following https://support.hdfgroup.org/HDF5/doc/RM/RM_H5F.html#File-StartSwmrWrite
# and https://support.hdfgroup.org/HDF5/docNewFeatures/SWMR/HDF5_SWMR_Users_Guide.pdf
using HDF5
using Test
using Distributed
if nprocs() == 1
procs = addprocs(1)
else
procs = Int64[]
end
@everywhere using HDF5
@testset "swmr" begin
fname = tempname()
@testset "swmr modes" begin
h5open(fname, "w"; swmr=true) do h5
h5["foo"] = collect(1:10)
end
h5open(fname, "r"; swmr=true) do h5
@test read(h5["foo"]) == collect(1:10)
end
h5open(fname, "r+"; swmr=true) do h5
@test read(h5["foo"]) == collect(1:10)
end
end
@testset "h5d_oappend" begin
h5open(fname, "w") do h5
g = create_group(h5, "shoe")
d = create_dataset(g, "bar", datatype(Float64), ((1,), (-1,)); chunk=(100,))
dxpl_id = HDF5.get_create_properties(d)
v = [1.0, 2.0]
memtype = datatype(Float64)
# @test HDF5.h5d_oappend(d, dxpl_id, 0, length(v), memtype, v)
end
end
function dataset_write(d, ch_written, ch_read)
for i in 1:10
@assert take!(ch_read) == true
HDF5.set_extent_dims(d, (i * 10,))
inds::UnitRange{Int} = (1:10) .+ (i - 1) * 10
d[inds] = inds
flush(d) # flush the dataset
put!(ch_written, i)
end
end
@everywhere function dataset_read(d, ch_written, ch_read)
n = nlast = length(d)
nbigger = 0
i = 0
put!(ch_read, true)
while n < 100
i = take!(ch_written)
for j in 1:1000 # wait for new data to be available to avoid CI failures
HDF5.refresh(d)
nlast, n = n, length(d)
n > nlast && break
sleep(0.001)
end
vals = read(d)
@assert vals == collect(1:n)
n > nlast && (nbigger += 1)
put!(ch_read, true)
end
return nbigger
end
@everywhere function swmr_reader(fname, ch_written, ch_read)
h5open(fname, "r"; swmr=true) do h5
d = h5["foo"]
dataset_read(d, ch_written, ch_read)
end
end
# Spawn a reader function in a 2nd process, provide two channels for synchronization.
# Run a writing function in this process. The writing function writes,
# then notifies `ch_read`, then the reading function reads, and notifies `ch_read`. So read
# attempts should always follow writes, though there may be a delay before the data is available
# so there is a step that sleeps until data is available.
function remote_test(h5)
ch_written, ch_read = RemoteChannel(1), RemoteChannel(1)
a = @spawn(swmr_reader(fname, ch_written, ch_read))
dataset_write(h5["foo"], ch_written, ch_read)
nbigger = fetch(a)
@test nbigger == 10
end
# create datasets and attributes before staring swmr writing
function prep_h5_file(h5)
d = create_dataset(h5, "foo", datatype(Int), ((1,), (100,)); chunk=(1,))
attributes(h5)["bar"] = "bar"
g = create_group(h5, "group")
end
@testset "create by libver, then start_swmr_write" begin
#test this h5open method with keyword arg
h5open(fname, "w"; libver_bounds=(:latest, :latest), swmr=false) do h5
prep_h5_file(h5)
HDF5.start_swmr_write(h5) # after creating datasets
remote_test(h5)
end
end
@testset "create by swmr mode, then close and open again" begin
h5open(fname, "w"; swmr=true) do h5
prep_h5_file(h5)
end
# close the file after creating datasets, open again with swmr write access but not truncate
h5open(fname, "r+"; swmr=true) do h5
remote_test(h5)
end
end
rm(fname) # cleanup file created by swmr tests
if nprocs() > 1
rmprocs(procs)
end
end # testset swmr
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | code | 1378 | using HDF5
using Test
hf = h5open(tempname(), "w")
fv = 3.14
data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
floatsize = sizeof(data[1])
h5t = datatype(data[1])
title = "lal"
name = "mym"
nfield = 2
nrec = 3
recsize = nfield * floatsize
colname = ["f1_verylongnameforfun", "f2"]
offset = [0, floatsize]
tid = [h5t.id, h5t.id]
chunk = 7
fillvalue = [3.14, 2.71]
compress = 1
HDF5.API.h5tb_make_table(
title,
hf,
name,
nfield,
nrec,
recsize,
colname,
offset,
tid,
chunk,
fillvalue,
compress,
data
)
fieldsize = [floatsize, floatsize]
HDF5.API.h5tb_append_records(hf, name, nrec, recsize, offset, fieldsize, data)
HDF5.API.h5tb_write_records(
hf, name, 1, 4, recsize, offset, fieldsize, collect(1:8) .+ 20.0
)
buf = fill(0.0, 100)
HDF5.API.h5tb_read_table(hf, name, recsize, offset, fieldsize, buf)
@test buf[1:12] == [1.0, 2.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 5.0, 6.0]
buf .= 0.0
HDF5.API.h5tb_read_records(hf, name, 2, 3, recsize, offset, fieldsize, buf)
@test buf[1:6] == collect(23:28)
h5_nfields, h5_nrec = HDF5.API.h5tb_get_table_info(hf, name)
@test h5_nfields == nfield
@test h5_nrec == 6
h5_colname, h5_fieldsize, h5_offset, h5_recsize = HDF5.API.h5tb_get_field_info(hf, name)
@test h5_colname == colname
@test h5_fieldsize == fieldsize
@test h5_offset == offset
@test h5_recsize == recsize
close(hf)
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | code | 1246 | using Test, HDF5
@testset "virtual dataset" begin
dirname = mktempdir()
filename = joinpath(dirname, "main.hdf5")
h5open(filename, "w") do f
sub0 = joinpath(dirname, "sub-0.hdf5")
f0 = h5open(sub0, "w")
f0["x"] = fill(1.0, 3)
close(f0)
sub1 = joinpath(dirname, "sub-1.hdf5")
f1 = h5open(sub1, "w")
f1["x"] = fill(2.0, 3)
close(f1)
srcspace = dataspace((3,))
vspace = dataspace((3, 2); max_dims=(3, -1))
HDF5.select_hyperslab!(vspace, (1:3, HDF5.BlockRange(1; count=-1)))
d = create_dataset(
f,
"x",
datatype(Float64),
vspace;
virtual=[HDF5.VirtualMapping(vspace, "./sub-%b.hdf5", "x", srcspace)]
)
if Sys.iswindows()
@test_broken size(d) == (3, 2)
@test_broken read(d) == hcat(fill(1.0, 3), fill(2.0, 3))
else
@test size(d) == (3, 2)
@test read(d) == hcat(fill(1.0, 3), fill(2.0, 3))
end
dcpl = HDF5.get_create_properties(d)
@test dcpl.virtual isa HDF5.VirtualLayout
@test length(dcpl.virtual) == 1
@test dcpl.virtual[1] isa HDF5.VirtualMapping
end
end
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | code | 4083 | """
module FilterTestUtils
This module contains utilities for evaluating and debugging HDF5 Filters.
"""
module FilterTestUtils
import HDF5.API
import H5Zlz4: H5Z_filter_lz4
import H5Zzstd: H5Z_filter_zstd
import H5Zbzip2: H5Z_filter_bzip2
using Test
export test_filter
function test_filter_init(; cd_values=Cuint[], data=ones(UInt8, 1024))
flags = Cuint(0)
nbytes = sizeof(data)
buf_size = Ref(Csize_t(sizeof(data)))
databuf = Libc.malloc(sizeof(data))
data = reinterpret(UInt8, data)
unsafe_copyto!(Ptr{UInt8}(databuf), pointer(data), sizeof(data))
buf = Ref(Ptr{Cvoid}(databuf))
return flags, cd_values, nbytes, buf_size, buf
end
function test_filter_compress!(
filter_func,
flags::Cuint,
cd_values::Vector{Cuint},
nbytes::Integer,
buf_size::Ref{Csize_t},
buf::Ref{Ptr{Cvoid}}
)
nbytes = Csize_t(nbytes)
cd_nelmts = Csize_t(length(cd_values))
GC.@preserve flags cd_nelmts cd_values nbytes buf_size buf begin
ret_code = filter_func(
flags,
cd_nelmts,
pointer(cd_values),
Csize_t(nbytes),
Base.unsafe_convert(Ptr{Csize_t}, buf_size),
Base.unsafe_convert(Ptr{Ptr{Cvoid}}, buf)
)
@debug "Compression:" ret_code buf_size[]
if ret_code <= 0
error("Test compression failed: $ret_code.")
end
end
return ret_code
end
function test_filter_decompress!(
filter_func,
flags::Cuint,
cd_values::Vector{Cuint},
nbytes::Integer,
buf_size::Ref{Csize_t},
buf::Ref{Ptr{Cvoid}}
)
nbytes = Csize_t(nbytes)
cd_nelmts = Csize_t(length(cd_values))
flags |= UInt32(API.H5Z_FLAG_REVERSE)
GC.@preserve flags cd_nelmts cd_values nbytes buf_size buf begin
ret_code = filter_func(
flags,
cd_nelmts,
pointer(cd_values),
Csize_t(nbytes),
Base.unsafe_convert(Ptr{Csize_t}, buf_size),
Base.unsafe_convert(Ptr{Ptr{Cvoid}}, buf)
)
@debug "Decompression:" ret_code buf_size[]
end
return ret_code
end
function test_filter_cleanup!(buf::Ref{Ptr{Cvoid}})
Libc.free(buf[])
end
function test_filter(filter_func; cd_values::Vector{Cuint}=Cuint[], data=ones(UInt8, 1024))
flags, cd_values, nbytes, buf_size, buf = test_filter_init(;
cd_values=cd_values, data=data
)
nbytes_compressed, nbytes_decompressed = 0, 0
try
nbytes_compressed = test_filter_compress!(
filter_func, flags, cd_values, nbytes, buf_size, buf
)
nbytes_decompressed = test_filter_decompress!(
filter_func, flags, cd_values, nbytes_compressed, buf_size, buf
)
if nbytes_decompressed > 0
# ret_code is the number of bytes out
round_trip_data = unsafe_wrap(Array, Ptr{UInt8}(buf[]), nbytes_decompressed)
@debug "Is the data the same after a roundtrip?" data == round_trip_data
end
catch err
rethrow(err)
finally
test_filter_cleanup!(buf)
end
@debug "Compression Ratio" nbytes_compressed / nbytes_decompressed
return nbytes_compressed, nbytes_decompressed
end
function test_bzip2_filter(data=ones(UInt8, 1024))
cd_values = Cuint[8]
test_filter(H5Z_filter_bzip2; cd_values=cd_values, data=data)
end
function test_lz4_filter(data=ones(UInt8, 1024))
cd_values = Cuint[1024]
test_filter(H5Z_filter_lz4; cd_values=cd_values, data=data)
end
function test_zstd_filter(data=ones(UInt8, 1024))
cd_values = Cuint[3] # aggression
test_filter(H5Z_filter_zstd; cd_values=cd_values, data=data)
end
function __init__()
@testset "Compression Filter Unit Tests" begin
@test argmin(test_bzip2_filter()) == 1
@test argmin(test_lz4_filter()) == 1
@test argmin(test_zstd_filter()) == 1
str = codeunits(repeat("foobar", 1000))
@test argmin(test_bzip2_filter(str)) == 1
@test argmin(test_lz4_filter(str)) == 1
@test argmin(test_zstd_filter(str)) == 1
end
end
end
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 5374 | # HISTORY
Please also see the [release notes](https://github.com/JuliaIO/HDF5.jl/releases) for additional details.
## v0.18.0 (in development)
* Refactor Dataspaces (#1104)
## v0.17.2
* Fix variable length strings as attributes (#1130)
* Fix `h5_is_library_threadsafe` (#1138)
* Fix `HF5_LIBVER_LATEST` up to HDF5 v1.15 (#1145)
* Add Julia 1.11 nightly to tests (#1146)
* Add fix for `Base.ReinterpretArrays` in Julia 1.11 (#1146)
* Marked Windows virtual dataset tests as broken (#1146)
## v0.17.1
* Added `set_libraries!` to conveniently set the preferences for a custom HDF5 library
* Added new `HDF5.Filters.Registered` module containing information about registered filters
* Miscellaneous documentation fixes
## v0.17.0
* Replace build step by using Preferences.jl to use system HDF5 library
## v0.16.14
* Allow `begin` to work in a dataset
* Simplify MPIO tests and internals
* Minor updates to docs and several docstrings
## v0.16.13
* Writing compound data set support
* Invalidation fixes
* Support read-only s3 virtual driver
* Locks around API calls (may introduce a minor regression around API calls)
## v0.16.12
* Virtual dataset support
* More informative errors when missing a filter
* Add BlockRange object to represent a slab of a hyperslab selection
## v0.16.11
* Doc improvements
* Internal code re-organization and cleanup
* Fixed `track_order` usage in do block syntax
* Implement `copy` for `Properties` type
* Additional H5P functions
* Introduction of experimental context based API for property lists (internal API subject to change)
## v0.16.10
* New attribute dictionary interface
* Additional `h5f` coverage
* Better error handling for iterators
* Expande DatasetAccessProperties coverage
* New `copyto!` and `similar` methods for Datasets
## v0.16.9
* Resize buffer in h5p_get_external if no null is found
* Fix duplicate h5p_set_meta_block_size
## v0.16.8
* Fix issue related to seg fault when loading with MPI
* Add `create_dataset` convenience forms for dataspace
* Add `meta_block_size` property to H5P and add additional H5P coverage
* Add fapl and fcpl as keywords for h5open
## v0.16.7
* Fix issue related to serial driver loading when MPI is called
## v0.16.6
* Add filespace management API calls
## v0.16.5
* Core driver API support
* Addition of `fill_time` and `fill_value` dataset properties
* Add type order precision API methods
## v0.16.4
* Anonymous dataset support
* Allow property lists to be passed into `create_dataset`
## v0.16.3
* `track_order` support in `write` and `read`, integration with FileIO and `OrderedDict`'s automatic detection
* `ExternalFilter` addition as the public interface and new documentation
* External dataset support
## v0.16.2
* Minimum Blosc.jl version has been updated
* Support for the BITSHUFFLE option with the Blosc filter
## v0.16.1
* Minor bug fix to the test suite to ensure package tests pass
## v0.16.0
* Adds HDF5 support for ARM M1
* Revamped filter interface with the flexiblility to allow specification of a filter pipeline and external filter hooks
* New filter compression methods defined by external packaged: `H5Zblosc`, `H5Zlz4`, `H5Zbzip2`, `H5Zzstd`
* `filter` property name renamed to `filters`
* Generalized chunking API to accept `AbstractArray`
- New `move_link` method, which effectively renames an object
- Revamed internal `Properties` interface (non-user facing)
## v0.15.6
* Add `FileIO` integration
## v0.15.5
* Add the ability to use `attributes` for HDF5 datatypes
## v0.15.4
* Minor imporovement to an internal `ccall` wrapper
## v0.15.3
* Additional documentation on row/column ordering differences
* Improve iteration in order to support certain architectures, where the existing callbacks were failing.
## v0.15.2
* Fix `show` for `Attribute` printing
## v0.15.1
* Fix build system settings when using system provided HDF5 binaries
## v0.15.0
* Support reading of opaque data recursively
* Add support for a subset of libhdf5 table methods
* Improved error handling
* Improved `show` method printing heuristics
* Improved iteration protocol performance through the use of callbacks
## v0.14.2
* Fix performance of reading long strings
* Add additional `Dataspace` methods
## v0.14.2
* We no longer ship binaries for Linux on i686 and armv7 for the v1.12 release of HDF5_jll.
## v0.14
### Breaking Changes
* The following functions have been unexported and do not have an automatic deprecation warning. Please use the `HDF5` module prefix to call these functions:
- `file`
- `filename`
- `name`
- `get_chunk`
- `get_datasets`
- `iscontiguous`
- `ishdf5`
- `ismmappable`
- `root`
- `readmmap`
- `set_dims!`
- `get_access_properties`
- `get_create_properties`
- `create_external_dataset`
* Properties are now set using keyword arguments instead of by pairs of string and value positional arguments.
For example `dset = d_create(h5f, "A", datatype(Int64), dataspace(10,10), "chunk", (3,3))` is now written as
`dset = d_create(h5f, "A", datatype(Int64), dataspace(10,10), chunk=(3,3))`. Additionally the key type used for
directly setting `HDF5Properties` objects has changed from a `String` to a `Symbol`, e.g.
`apl["fclose_degree"] = H5F_CLOSE_STRONG` is now written as `apl[:fclose_degree] = H5F_CLOSE_STRONG` ([#632](https://github.com/JuliaIO/HDF5.jl/pull/632)).
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 3082 | <p align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="./docs/src/assets/logo-dark.svg">
<img alt="HDF5.jl" src="./docs/src/assets/logo.svg" width=350 height=125>
</picture>
</p>
_HDF5 interface for the Julia language_
-
[HDF5](https://www.hdfgroup.org/solutions/hdf5/) is a file format and library for storing and
accessing data, commonly used for scientific data. HDF5 files can be created and read by numerous
[programming languages](https://en.wikipedia.org/wiki/Hierarchical_Data_Format#Interfaces). This
package provides an interface to the HDF5 library for the Julia language.
[](https://JuliaIO.github.io/HDF5.jl/stable)
[](https://github.com/JuliaIO/HDF5.jl/actions)
[](https://codecov.io/gh/JuliaIO/HDF5.jl)
<!-- [](https://coveralls.io/github/JuliaIO/HDF5.jl?branch=master) -->
### Changelog
Please see [HISTORY.md](HISTORY.md) and the [release notes](https://github.com/JuliaIO/HDF5.jl/releases). Most changes have deprecation warnings and may not be listed in the [HISTORY.md](HISTORY.md) file.
### Installation
```julia
julia>]
pkg> add HDF5
```
For custom build instructions please refer to the [documentation](https://juliaio.github.io/HDF5.jl/stable/#Using-custom-or-system-provided-HDF5-binaries).
### Quickstart
```julia
using HDF5
```
To read and write a variable to a file, one approach is to use the filename:
```julia
A = collect(reshape(1:120, 15, 8))
h5write("/tmp/test2.h5", "mygroup2/A", A)
data = h5read("/tmp/test2.h5", "mygroup2/A", (2:3:15, 3:5))
```
where the last line reads back just `A[2:3:15, 3:5]` from the dataset.
More fine-grained control can be obtained using functional syntax:
```julia
h5open("mydata.h5", "w") do file
write(file, "A", A) # alternatively, say "@write file A"
end
c = h5open("mydata.h5", "r") do file
read(file, "A")
end
```
This allows you to add variables as they are generated to an open HDF5 file.
You don't have to use the `do` syntax (`file = h5open("mydata.h5", "w")` works
just fine), but an advantage is that it will automatically close the file (`close(file)`)
for you, even in cases of error.
Julia's high-level wrapper, providing a dictionary-like interface, may
also be of interest:
```julia
using HDF5
h5open("test.h5", "w") do file
g = create_group(file, "mygroup") # create a group
g["dset1"] = 3.2 # create a scalar dataset inside the group
attributes(g)["Description"] = "This group contains only a single dataset" # an attribute
end
```
Convenience functions for attributes attached to datasets are also provided:
```julia
A = Vector{Int}(1:10)
h5write("bar.h5", "foo", A)
h5writeattr("bar.h5", "foo", Dict("c"=>"value for metadata parameter c","d"=>"metadata d"))
h5readattr("bar.h5", "foo")
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 865 | # Third Party Licenses
HDF5.jl contains several derivative works of open source software.
In particular, the following submodules are licensed as derivative works from third-parties.
Original and derivative code in HDF5.jl is licensed according to [LICENSE.txt](LICENSE.txt)
as permitted by licenses for the original software from which they may be derived.
See the files indicated below for the copyright notices and the licenses of the original
software from which individual submodules are derived.
## Filter Plugins
* [H5Zbzip2](filters/H5Zbzip2/src/H5Zbzip2.jl): See [filters/H5Zbzip2/THIRDPARTY.txt](filters/H5Zbzip2/THIRDPARTY.txt)
* [H5Zlz4](filters/H5Zlz4/src/H5Zlz4.jl): See [filters/H5Zlz4/THIRDPARTY.txt](filters/H5Zlz4/THIRDPARTY.txt)
* [H5Zzstd](filters/H5Zzstd/src/H5Zzstd.jl): See [filters/H5Zzstd/THIRDPARTY.txt](filters/H5Zzstd/THIRDPARTY.txt)
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 1870 | # JuliaFormatterTool
## Purpose
The purpose is this tool to aid in formatting the repository with JuliaFormatter.
Rather than starting a fresh Julia session each time you want to format, this will
run the formatter in a loop. Everytime you press enter, it will format the repository.
This avoids the initial delay when starting and loading the JuliaFormatter package.
The intended use of this program is to run in a separate terminal or be suspended
(e.g. via Control-Z) while you edit the repository. Resume the program (e.g. `fg`)
and press enter to format the repository before committing.
## Invocation
The format.jl script is meant to be executed directly.
On POSIX systems that understand shebang lines the format.jl can be invoked as follows.
```
./contrib/format/format.jl
```
Supplying the file as an argument to `julia` also works.
```
julia contrib/format/format.jl
```
The script will automatically install itself by resolving and instantiating its environment.
To bypass this install step, specify the project environment:
```
julia --project=contrib/format contrib/format.jl
```
## Example Usage
```
$ julia contrib/format/format.jl
Activating project at `~/.julia/dev/HDF5/contrib/format`
No Changes to `~/.julia/dev/HDF5/contrib/format/Project.toml`
No Changes to `~/.julia/dev/HDF5/contrib/format/Manifest.toml`
Welcome to Julia Formatter Tool!
--------------------------------
Press enter to format the directory ~/.julia/dev/HDF5 or `q[enter]` to quit
format.jl>
Applying JuliaFormatter...
┌ Info: Is the current directory formatted?
│ target_dir = "~/.julia/dev/HDF5"
└ format(target_dir) = true
Press enter to format the directory ~/.julia/dev/HDF5 or `q[enter]` to quit
format.jl>
Applying JuliaFormatter...
┌ Info: Is the current directory formatted?
│ target_dir = "~/.julia/dev/HDF5"
└ format(target_dir) = true
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 12571 | ---
marp: true
---
# HDF5.jl: Hierarchical Data Storage for Julia
Mark Kittisopikul (HHMI), Simon Byrne (Caltech), Mustafa Mohamad (UCalgary)
---
# What is HDF5?
HDF5 stands for Hierarchial Data Format version 5 and is maintained by The HDF Group, formerly part of the National Center for Supercomputing Appplications (NCSA).
* HDF5 is a file format with an open specification.
* HDF5 is a C Library and API.
* HDF5 is a data model.
---
# When to use HDF5
* Store numeric array and attributes in nested groups.
* Use it when you want to compactly store binary data.
## When not to use HDF5
* You have arrays of variable-length strings. Used fixed lengths strings instead.
* You have tables of heterogeneous data. Consider using columnar layouts. Other formats are more optimized for tables.
---
# Related formats
HDF5 is used as a base for other formats
* NetCDF - Network Common Data Form v4 (Unidata, UCAR)
* MAT - MATLAB data files v7.3+
* PyTables - Pandas
* JLD/JLD2 - Julia Data Format
---
# HDF5 Specification
The HDF5 specification is open and freely available.

https://docs.hdfgroup.org/hdf5/v1_14/_f_m_t3.html
---
# What is HDF5.jl?
HDF5.jl is a wrapper around the HDF5 C Library.
It consists of
* A low level interface, a direct mapping to the C API
* A mid level interface, lightweight helpers
* A high level interface, a Julia API
<!-- give examples ? -->
---
# Related Julia Packages
* HDF5_jll.jl, C Library from HDF Group (dependency of HDF5.jl)
* MAT.jl, MATLAB files (depends on HDF5.jl)
* JLD.jl, Julia Data Format (depends on HDF5.jl)
* JLD2.jl, Julia Data Format 2: pure Julia implementation of a subset of HDF5
* NetCDF.jl & NCDatasets.jl: wrappers for the NetCDF C library, which incorporates HDF5
---
# HDF5.jl Early and Recent Contributors
* There are many contributors
* Konrad Hisen initiated Julia's support for HDF5
* Tim Holy and Simon Kornblith were the initial primary authors
* Tom Short, Blake Johnson, Isaih Norton, Elliot Saba, Steven Johnson, Mike Nolta, Jameson Nash
* Justin Willmert improved many aspects C to Julia API interface
* Other recent contributors: t-bltg, Hendrik Ranocha, Nathan Zimmerberg, Joshua Lampert, Tamas Gal, David MacMahon, Juan Ignacio Polanco, Michael Schlottke-Lakemper, linwaytin, Dmitri Iouchtchenko, Lorenzo Van Munoz, Jared Wahlstrand, Julian Samaroo, machakann, James Hester, Ralph Kube, Kristoffer Carlsson
---
# HDF5.jl Current Developers
* Mustafa Mohamad, Mark Kittisopikul, and Simon Byrne are the current maintainers
* Mark Kittisopikul has been expanding API coverage, especially with chunking
* Simon Byrne has been working on package organization, filter interface, virtual datasets, and parallelization
## Special mention
- Erik Schnetter for building HDF5 in Yggdrasil
---
# What advantages does Julia bring to HDF5.jl?
<!--reorder-->
* HDF5.jl dynamically create types to match the stored HDF5 types.
* HDF5.jl can use Julia's reflection capabilities to create corresponding HDF5 types.
* HDF5.jl is easily extensible using multiple dispatch.
* HDF5.jl can create callbacks for C for efficient iteration.
* HDF5.jl wraps the C library directly in Julia via `@ccall`.
* This is partially automated via Clang.jl and https://github.com/mkitti/LibHDF5.jl .
---
# Basic HDF5.jl Usage
```julia
using HDF5
# Write a HDF5 file
h5open("mydata.h5", "w") do h5f
# Store an array
h5f["group_A/group_B/array_C"] = rand(1024,1024)
# Store an attribute
attrs(h5f["group_A"])["access_date"] = "2023_07_21"
end
# Read a HDF5 file
C = h5open("mydata.h5") do h5f
# Access an attribute
println(attrs(h5f["group_A"])["access_date"])
# Load an array and return it as C
h5f["group_A/group_B/array_C"][:,:]
end
```
---
# Exploring a HDF5 file with HDF5.jl
```julia
julia> h5f = h5open("mydata.h5")
🗂️ HDF5.File: (read-only) mydata.h5
└─ 📂 group_A
├─ 🏷️ access_date
└─ 📂 group_B
└─ 🔢 array_C
julia> C = h5f["group_A"]["group_B"]["array_C"][1:16,1:16]
16×16 Matrix{Float64}:
...
julia> close(h5f)
```
---
# Structs and HDF5 Types
```julia
julia> struct Foo
x::Int64
y::Float64
end
julia> HDF5.datatype(Foo)
HDF5.Datatype: H5T_COMPOUND {
H5T_STD_I64LE "x" : 0;
H5T_IEEE_F64LE "y" : 8;
}
```
---
# Reading and writing structs
```julia
julia> h5open("mystruct.h5", "w") do h5f
h5f["Foo"] = [Foo(1, 3.0)]
end
1-element Vector{Foo}:
Foo(1, 3.0)
julia> h5open("mystruct.h5", "r") do h5f
h5f["Foo"][]
end
1-element Vector{NamedTuple{(:x, :y), Tuple{Int64, Float64}}}:
(x = 1, y = 3.0)
julia> h5open("mystruct.h5", "r") do h5f
read(h5f["Foo"], Foo)
end
1-element Vector{Foo}:
Foo(1, 3.0)
```
---
# Chunking and Built-in Gzip Compression Usage
In HDF5.jl version 0.16 we introduced a new general filter keyword allowing for the definition of filter pipelines.
```julia
using HDF5
h5open("simple_chunked.h5", "w", libver_bounds=v"1.12") do h5f
h5ds = create_dataset(h5f, "gzipped_data", UInt8, (16,16),
chunk=(4,4),
filters=[HDF5.Filters.Deflate()],
alloc_time = :early
)
end
```
---
# Compression Filter Plugin Packages
<!--During the talk explain what these do and when/why you would want to use them-->
Glue code written in Julia.
* H5Zblosc.jl - Blosc.jl (Thank you, Steven G. Johnson)
* H5Zzstd.jl - CodecZstd.jl
* H5Zlz4.jl - CodecLZ4.jl
* H5Zbzip2.jl - CodecBzip2.jl
* H5Zbitshuffle.jl
Future: Let's figure out how to share these with JLD2.jl!
---
# Chunking and Filter Plugin Usage
```julia
using HDF5, H5Zzstd
h5open("zstd_chunked.h5", "w", libver_bounds=v"1.12") do h5f
h5ds = create_dataset(h5f, "zstd_data", UInt8, (16,16),
chunk=(4,4),
filters=[ZstdFilter(3)]
)
end
```
TODO: Use a package extension loading mechanism when CodecZstd.jl is present.
---
# Using External Native Plugin Filters
The HDF5 C library has a filter plugin mechanism. Plugins are shared libraries located in `/usr/local/hdf5/lib/plugin` or as specified by `$HDF5_PLUGIN_DIR`.
```julia
using HDF5.Filters
bitshuf = ExternalFilter(32008, Cuint[0, 0])
bitshuf_comp = ExternalFilter(32008, Cuint[0, 2])
data_A = rand(0:31, 1024)
data_B = rand(32:63, 1024)
filename, _ = mktemp()
h5open(filename, "w") do h5f
# Indexing style
h5f["ex_data_A", chunk=(32,), filters=bitshuf] = data_A
# Procedural style
d, dt = create_dataset(h5f, "ex_data_B", data_B, chunk=(32,), filters=[bitshuf_comp])
write(d, data_B)
end
```
---
# New with HDF5 1.12.3 and 1.14.0: Efficient Chunk Based Iteration
Where are the compressed chunks and can we decompress them in parallel?
| N Chunks | H5Dchunk_iter | H5Dget_chunk_info | Ratio |
|---|---|---|---|
| 64 | 2e-4 s | 5e-4 s | 2.4 |
| 256 | 7e-4 s | 5e-3 s | 6 |
| 1024 | 3e-3 s | 5e-2 s | 16 |
| 4096 | 1e-2 s | 7e-1 s | 57 |
| 16384 | 6e-2 s | 1e2 s | 208 |
---
# The HDF5 C API does not allow for multithreaded concurrency
* The HDF5 C library is not directly compatible with multithreading for parallel I/O. The preferred parallelization is via MPI.
* There is a `H5_HAVE_THREADSAFE` compile time option that uses a recursive lock.
* In HDF5.jl we have applied a `ReentrantLock` on all API calls.
* It is now safe to use HDF5.jl with multithreading, but you may not see much of an improvement.
---
# Virtual datasets
- Maps multiple datasets into a single dataset
- Can be same or different files
- Supports patterns for sequentially numbered files/datasets
- e.g. consider a dataset made up of 100×10 blocks, across 4 files
- `data00.h5`, `data01.h5`, etc.
```julia
space = dataspace((100,40))
create_dataset(h5f, "dataset", datatype, space;
virtual=[HDF5.VirtualMapping(
HDF5.hyperslab(space, (1:100, HDF5.BlockRange(1:10; count = -1))), # block pattern
"./data0%b.h5", # filenames (%b block pattern)
"data", # path to source dataset in file
dataspace((100,10)) # view into source dataset
)]
)
```
---
# Parallelization via MPI
- Message Passing Interface (MPI) is an interface for single-program, multiple-data (SPMD) parallelism.
- Launch multiple processes running the same program
```sh
mpiexec -n <nprocs> program ...
```
- Programs determine what they should do based on their identifier (_rank_).
- Each process determines what communication operations it should do (messages)
- Multiple implementations (Open MPI, MPICH, vendor-specific)
- Widely used in HPC for large-scale distributed parallelism.
- MPI.jl provides Julia bindings
---
## Using MPI + HDF5
Load and initialize MPI
```julia
using MPI, HDF5
MPI.Init()
```
Pass MPI communicator to `h5open`, e.g.
```julia
h5 = h5open("data.h5", "w", MPI.COMM_WORLD)
```
- Needs to be _collective_ (all processes at the same time), with the same arguments.
- File needs to be on accessible from all processes (e.g. on a shared file system if distributed).
---
Usage otherwise same as normal:
- metadata operatrions(`create_dataset`, writing attributes) should be done collectively, with the same arguments.
- reading/writing data can be independently per-process.
- try to align chunks with processes
- if collective, use `dxpl_mpio=:collective` option with `create_dataset`/`open_dataset`
- some limitations (e.g no datasets with variable-length strings).
---
# Configuring HDF5 (in upcoming 0.17 release)
- May want to use specific HDF5 library
- interoperability with other languages (e.g. h5py)
- linked against custom MPI binary
- specific hardware features (burst buffers)
- Preferences.jl to specify custom HDF5 binary
```julia
using Preferences, HDF5
set_preferences!(HDF5,
"libhdf5" => "/path/to/your/libhdf5.so",
"libhdf5_hl" => "/path/to/your/libhdf5_hl.so",
force = true)
```
---
# Applications
* CliMa
* Plots.jl backend
* JLD.jl (dependency) and JLD2.jl (interop reference)
* Checkpointing.jl
---
# Summary
* HDF5 is a format, C library, and data model for storing hierarchical information.
* HDF5.jl is a wrapper providing high and low level access to the HDF5 library.
* HDF5.jl now allows for multithreaded capability through locks and may expand capabilities beyond that of HDF5 C library
* HDF5.jl works with MPI.jl to allow for distributed multiprocessing
---
# Questions?
---
# Extra Slides and Advanced Topics
* HDF5 Specification: Superblock and Hex Dump
* Iteration
---
# HDF5 Specification: Superblock
HDF5 structures are variably sized and use Bob Jenkin's Lookup3 checksum for metadata integrity.

https://docs.hdfgroup.org/hdf5/v1_14/_f_m_t3.html#Superblock
---
# A HDF5 Hex Dump
<!--Move to the end-->
```
00000000 89 48 44 46 0d 0a 1a 0a 03 08 08 00 00 00 00 00 |.HDF............|
00000010 00 00 00 00 ff ff ff ff ff ff ff ff 82 08 01 00 |................|
00000020 00 00 00 00 30 00 00 00 00 00 00 00 92 3c c0 2c |....0........<.,|
00000030 4f 48 44 52 02 20 a3 5c ae 64 a3 5c ae 64 a3 5c |OHDR. .\.d.\.d.\|
00000040 ae 64 a3 5c ae 64 78 02 12 00 00 00 00 ff ff ff |.d.\.dx.........|
00000050 ff ff ff ff ff ff ff ff ff ff ff ff ff 0a 02 00 |................|
00000060 01 00 00 06 14 00 00 01 00 09 7a 61 72 72 73 68 |..........zarrsh|
00000070 61 72 64 c3 00 00 00 00 00 00 00 00 40 00 00 00 |ard.........@...|
00000080 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
```

---
# Iteration
<!--move to end-->
For accessing data has two kinds of interfaces for accessing enumerated data:
1. `h5a_get_name_by_idx(loc_id, obj_name, index_type, order, idx, name, size, lapl_id)`
2. `h5a_iterate(obj_id::hid_t, idx_type::Cint, order::Cint, n::Ptr{hsize_t}, op::Ptr{Cvoid}, op_data::Any)`, `op` is function pointer
The `_by_idx` calls are easy to use via a simple `for` loop but are very inefficient for iterating over many items.
The `_iterate` calls require a C callback, `op`, and can be challenging to use but are efficient.
Based on `h5a_iterate` we have created a new `attrs` API replacing the former `attributes` API.
---
# Concurrency with Direct I/O
* The HDF5 C library provides byte offsets for continguous and chunked datasets
* Currently, HDF5.jl allows contiguous datasets to be memory mapped into arrays allowing for multithreaded reads.
* With efficient chunk iteration, could we perform parallel decompression in HDF5.jl by reading compressed chunks directly?
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 36452 | ```@raw html
<!-- This file is auto-generated and should not be manually editted. To update, run the
gen/gen_wrappers.jl script -->
```
```@meta
CurrentModule = HDF5.API
```
# Low-level library bindings
At the lowest level, `HDF5.jl` operates by calling the public API of the HDF5 shared
library through a set of `ccall` wrapper functions.
This page documents the function names and nominal C argument types of the API which
have bindings in this package.
Note that in many cases, high-level data types are valid arguments through automatic
`ccall` conversions.
For instance, `HDF5.Datatype` objects will be automatically converted to their `hid_t` ID
by Julia's `cconvert`+`unsafe_convert` `ccall` rules.
There are additional helper wrappers (often for out-argument functions) which are not
documented here.
---
## [[`H5`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5.html) — General Library Functions](@id H5)
- [`h5_close`](@ref h5_close)
- [`h5_dont_atexit`](@ref h5_dont_atexit)
- [`h5_free_memory`](@ref h5_free_memory)
- [`h5_garbage_collect`](@ref h5_garbage_collect)
- [`h5_get_libversion`](@ref h5_get_libversion)
- [`h5_is_library_threadsafe`](@ref h5_is_library_threadsafe)
- [`h5_open`](@ref h5_open)
- [`h5_set_free_list_limits`](@ref h5_set_free_list_limits)
```@docs
h5_close
h5_dont_atexit
h5_free_memory
h5_garbage_collect
h5_get_libversion
h5_is_library_threadsafe
h5_open
h5_set_free_list_limits
```
---
## [[`H5A`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_a.html) — Attribute Interface](@id H5A)
- [`h5a_close`](@ref h5a_close)
- [`h5a_create`](@ref h5a_create)
- [`h5a_create_by_name`](@ref h5a_create_by_name)
- [`h5a_delete`](@ref h5a_delete)
- [`h5a_delete_by_idx`](@ref h5a_delete_by_idx)
- [`h5a_delete_by_name`](@ref h5a_delete_by_name)
- [`h5a_exists`](@ref h5a_exists)
- [`h5a_exists_by_name`](@ref h5a_exists_by_name)
- [`h5a_get_create_plist`](@ref h5a_get_create_plist)
- [`h5a_get_name`](@ref h5a_get_name)
- [`h5a_get_name_by_idx`](@ref h5a_get_name_by_idx)
- [`h5a_get_space`](@ref h5a_get_space)
- [`h5a_get_type`](@ref h5a_get_type)
- [`h5a_iterate`](@ref h5a_iterate)
- [`h5a_open`](@ref h5a_open)
- [`h5a_open_by_idx`](@ref h5a_open_by_idx)
- [`h5a_read`](@ref h5a_read)
- [`h5a_rename`](@ref h5a_rename)
- [`h5a_write`](@ref h5a_write)
```@docs
h5a_close
h5a_create
h5a_create_by_name
h5a_delete
h5a_delete_by_idx
h5a_delete_by_name
h5a_exists
h5a_exists_by_name
h5a_get_create_plist
h5a_get_name
h5a_get_name_by_idx
h5a_get_space
h5a_get_type
h5a_iterate
h5a_open
h5a_open_by_idx
h5a_read
h5a_rename
h5a_write
```
---
## [[`H5D`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_d.html) — Dataset Interface](@id H5D)
- [`h5d_chunk_iter`](@ref h5d_chunk_iter)
- [`h5d_close`](@ref h5d_close)
- [`h5d_create`](@ref h5d_create)
- [`h5d_create_anon`](@ref h5d_create_anon)
- [`h5d_extend`](@ref h5d_extend)
- [`h5d_fill`](@ref h5d_fill)
- [`h5d_flush`](@ref h5d_flush)
- [`h5d_gather`](@ref h5d_gather)
- [`h5d_get_access_plist`](@ref h5d_get_access_plist)
- [`h5d_get_chunk_info`](@ref h5d_get_chunk_info)
- [`h5d_get_chunk_info_by_coord`](@ref h5d_get_chunk_info_by_coord)
- [`h5d_get_chunk_storage_size`](@ref h5d_get_chunk_storage_size)
- [`h5d_get_create_plist`](@ref h5d_get_create_plist)
- [`h5d_get_num_chunks`](@ref h5d_get_num_chunks)
- [`h5d_get_offset`](@ref h5d_get_offset)
- [`h5d_get_space`](@ref h5d_get_space)
- [`h5d_get_space_status`](@ref h5d_get_space_status)
- [`h5d_get_storage_size`](@ref h5d_get_storage_size)
- [`h5d_get_type`](@ref h5d_get_type)
- [`h5d_iterate`](@ref h5d_iterate)
- [`h5d_open`](@ref h5d_open)
- [`h5d_read`](@ref h5d_read)
- [`h5d_read_chunk`](@ref h5d_read_chunk)
- [`h5d_refresh`](@ref h5d_refresh)
- [`h5d_scatter`](@ref h5d_scatter)
- [`h5d_set_extent`](@ref h5d_set_extent)
- [`h5d_vlen_get_buf_size`](@ref h5d_vlen_get_buf_size)
- [`h5d_vlen_reclaim`](@ref h5d_vlen_reclaim)
- [`h5d_write`](@ref h5d_write)
- [`h5d_write_chunk`](@ref h5d_write_chunk)
```@docs
h5d_chunk_iter
h5d_close
h5d_create
h5d_create_anon
h5d_extend
h5d_fill
h5d_flush
h5d_gather
h5d_get_access_plist
h5d_get_chunk_info
h5d_get_chunk_info_by_coord
h5d_get_chunk_storage_size
h5d_get_create_plist
h5d_get_num_chunks
h5d_get_offset
h5d_get_space
h5d_get_space_status
h5d_get_storage_size
h5d_get_type
h5d_iterate
h5d_open
h5d_read
h5d_read_chunk
h5d_refresh
h5d_scatter
h5d_set_extent
h5d_vlen_get_buf_size
h5d_vlen_reclaim
h5d_write
h5d_write_chunk
```
---
## [[`H5E`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_e.html) — Error Interface](@id H5E)
- [`h5e_close_stack`](@ref h5e_close_stack)
- [`h5e_get_auto`](@ref h5e_get_auto)
- [`h5e_get_current_stack`](@ref h5e_get_current_stack)
- [`h5e_get_msg`](@ref h5e_get_msg)
- [`h5e_get_num`](@ref h5e_get_num)
- [`h5e_set_auto`](@ref h5e_set_auto)
- [`h5e_walk`](@ref h5e_walk)
```@docs
h5e_close_stack
h5e_get_auto
h5e_get_current_stack
h5e_get_msg
h5e_get_num
h5e_set_auto
h5e_walk
```
---
## [[`H5F`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_f.html) — File Interface](@id H5F)
- [`h5f_clear_elink_file_cache`](@ref h5f_clear_elink_file_cache)
- [`h5f_close`](@ref h5f_close)
- [`h5f_create`](@ref h5f_create)
- [`h5f_delete`](@ref h5f_delete)
- [`h5f_flush`](@ref h5f_flush)
- [`h5f_format_convert`](@ref h5f_format_convert)
- [`h5f_get_access_plist`](@ref h5f_get_access_plist)
- [`h5f_get_create_plist`](@ref h5f_get_create_plist)
- [`h5f_get_dset_no_attrs_hint`](@ref h5f_get_dset_no_attrs_hint)
- [`h5f_get_eoa`](@ref h5f_get_eoa)
- [`h5f_get_file_image`](@ref h5f_get_file_image)
- [`h5f_get_fileno`](@ref h5f_get_fileno)
- [`h5f_get_filesize`](@ref h5f_get_filesize)
- [`h5f_get_free_sections`](@ref h5f_get_free_sections)
- [`h5f_get_freespace`](@ref h5f_get_freespace)
- [`h5f_get_info`](@ref h5f_get_info)
- [`h5f_get_intent`](@ref h5f_get_intent)
- [`h5f_get_mdc_config`](@ref h5f_get_mdc_config)
- [`h5f_get_mdc_hit_rate`](@ref h5f_get_mdc_hit_rate)
- [`h5f_get_mdc_image_info`](@ref h5f_get_mdc_image_info)
- [`h5f_get_mdc_logging_status`](@ref h5f_get_mdc_logging_status)
- [`h5f_get_mdc_size`](@ref h5f_get_mdc_size)
- [`h5f_get_metadata_read_retry_info`](@ref h5f_get_metadata_read_retry_info)
- [`h5f_get_mpi_atomicity`](@ref h5f_get_mpi_atomicity)
- [`h5f_get_name`](@ref h5f_get_name)
- [`h5f_get_obj_count`](@ref h5f_get_obj_count)
- [`h5f_get_obj_ids`](@ref h5f_get_obj_ids)
- [`h5f_get_page_buffering_stats`](@ref h5f_get_page_buffering_stats)
- [`h5f_get_vfd_handle`](@ref h5f_get_vfd_handle)
- [`h5f_increment_filesize`](@ref h5f_increment_filesize)
- [`h5f_is_accessible`](@ref h5f_is_accessible)
- [`h5f_is_hdf5`](@ref h5f_is_hdf5)
- [`h5f_mount`](@ref h5f_mount)
- [`h5f_open`](@ref h5f_open)
- [`h5f_reopen`](@ref h5f_reopen)
- [`h5f_reset_mdc_hit_rate_stats`](@ref h5f_reset_mdc_hit_rate_stats)
- [`h5f_reset_page_buffering_stats`](@ref h5f_reset_page_buffering_stats)
- [`h5f_set_dset_no_attrs_hint`](@ref h5f_set_dset_no_attrs_hint)
- [`h5f_set_libver_bounds`](@ref h5f_set_libver_bounds)
- [`h5f_set_mdc_config`](@ref h5f_set_mdc_config)
- [`h5f_set_mpi_atomicity`](@ref h5f_set_mpi_atomicity)
- [`h5f_start_mdc_logging`](@ref h5f_start_mdc_logging)
- [`h5f_start_swmr_write`](@ref h5f_start_swmr_write)
- [`h5f_stop_mdc_logging`](@ref h5f_stop_mdc_logging)
- [`h5f_unmount`](@ref h5f_unmount)
```@docs
h5f_clear_elink_file_cache
h5f_close
h5f_create
h5f_delete
h5f_flush
h5f_format_convert
h5f_get_access_plist
h5f_get_create_plist
h5f_get_dset_no_attrs_hint
h5f_get_eoa
h5f_get_file_image
h5f_get_fileno
h5f_get_filesize
h5f_get_free_sections
h5f_get_freespace
h5f_get_info
h5f_get_intent
h5f_get_mdc_config
h5f_get_mdc_hit_rate
h5f_get_mdc_image_info
h5f_get_mdc_logging_status
h5f_get_mdc_size
h5f_get_metadata_read_retry_info
h5f_get_mpi_atomicity
h5f_get_name
h5f_get_obj_count
h5f_get_obj_ids
h5f_get_page_buffering_stats
h5f_get_vfd_handle
h5f_increment_filesize
h5f_is_accessible
h5f_is_hdf5
h5f_mount
h5f_open
h5f_reopen
h5f_reset_mdc_hit_rate_stats
h5f_reset_page_buffering_stats
h5f_set_dset_no_attrs_hint
h5f_set_libver_bounds
h5f_set_mdc_config
h5f_set_mpi_atomicity
h5f_start_mdc_logging
h5f_start_swmr_write
h5f_stop_mdc_logging
h5f_unmount
```
---
## [[`H5G`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_g.html) — Group Interface](@id H5G)
- [`h5g_close`](@ref h5g_close)
- [`h5g_create`](@ref h5g_create)
- [`h5g_get_create_plist`](@ref h5g_get_create_plist)
- [`h5g_get_info`](@ref h5g_get_info)
- [`h5g_get_num_objs`](@ref h5g_get_num_objs)
- [`h5g_get_objname_by_idx`](@ref h5g_get_objname_by_idx)
- [`h5g_open`](@ref h5g_open)
```@docs
h5g_close
h5g_create
h5g_get_create_plist
h5g_get_info
h5g_get_num_objs
h5g_get_objname_by_idx
h5g_open
```
---
## [[`H5I`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_i.html) — Identifier Interface](@id H5I)
- [`h5i_dec_ref`](@ref h5i_dec_ref)
- [`h5i_get_file_id`](@ref h5i_get_file_id)
- [`h5i_get_name`](@ref h5i_get_name)
- [`h5i_get_ref`](@ref h5i_get_ref)
- [`h5i_get_type`](@ref h5i_get_type)
- [`h5i_inc_ref`](@ref h5i_inc_ref)
- [`h5i_is_valid`](@ref h5i_is_valid)
```@docs
h5i_dec_ref
h5i_get_file_id
h5i_get_name
h5i_get_ref
h5i_get_type
h5i_inc_ref
h5i_is_valid
```
---
## [[`H5L`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_l.html) — Link Interface](@id H5L)
- [`h5l_create_external`](@ref h5l_create_external)
- [`h5l_create_hard`](@ref h5l_create_hard)
- [`h5l_create_soft`](@ref h5l_create_soft)
- [`h5l_delete`](@ref h5l_delete)
- [`h5l_exists`](@ref h5l_exists)
- [`h5l_get_info`](@ref h5l_get_info)
- [`h5l_get_name_by_idx`](@ref h5l_get_name_by_idx)
- [`h5l_iterate`](@ref h5l_iterate)
- [`h5l_move`](@ref h5l_move)
```@docs
h5l_create_external
h5l_create_hard
h5l_create_soft
h5l_delete
h5l_exists
h5l_get_info
h5l_get_name_by_idx
h5l_iterate
h5l_move
```
---
## [[`H5O`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_o.html) — Object Interface](@id H5O)
- [`h5o_are_mdc_flushes_disabled`](@ref h5o_are_mdc_flushes_disabled)
- [`h5o_close`](@ref h5o_close)
- [`h5o_copy`](@ref h5o_copy)
- [`h5o_decr_refcount`](@ref h5o_decr_refcount)
- [`h5o_disable_mdc_flushes`](@ref h5o_disable_mdc_flushes)
- [`h5o_enable_mdc_flushes`](@ref h5o_enable_mdc_flushes)
- [`h5o_exists_by_name`](@ref h5o_exists_by_name)
- [`h5o_flush`](@ref h5o_flush)
- [`h5o_get_comment`](@ref h5o_get_comment)
- [`h5o_get_comment_by_name`](@ref h5o_get_comment_by_name)
- [`h5o_get_info`](@ref h5o_get_info)
- [`h5o_get_info1`](@ref h5o_get_info1)
- [`h5o_get_info_by_idx`](@ref h5o_get_info_by_idx)
- [`h5o_get_info_by_name`](@ref h5o_get_info_by_name)
- [`h5o_get_native_info`](@ref h5o_get_native_info)
- [`h5o_get_native_info_by_idx`](@ref h5o_get_native_info_by_idx)
- [`h5o_get_native_info_by_name`](@ref h5o_get_native_info_by_name)
- [`h5o_incr_refcount`](@ref h5o_incr_refcount)
- [`h5o_link`](@ref h5o_link)
- [`h5o_open`](@ref h5o_open)
- [`h5o_open_by_addr`](@ref h5o_open_by_addr)
- [`h5o_open_by_idx`](@ref h5o_open_by_idx)
- [`h5o_refresh`](@ref h5o_refresh)
- [`h5o_set_comment`](@ref h5o_set_comment)
- [`h5o_set_comment_by_name`](@ref h5o_set_comment_by_name)
- [`h5o_token_cmp`](@ref h5o_token_cmp)
- [`h5o_token_from_str`](@ref h5o_token_from_str)
- [`h5o_token_to_str`](@ref h5o_token_to_str)
- [`h5o_visit`](@ref h5o_visit)
- [`h5o_visit_by_name`](@ref h5o_visit_by_name)
```@docs
h5o_are_mdc_flushes_disabled
h5o_close
h5o_copy
h5o_decr_refcount
h5o_disable_mdc_flushes
h5o_enable_mdc_flushes
h5o_exists_by_name
h5o_flush
h5o_get_comment
h5o_get_comment_by_name
h5o_get_info
h5o_get_info1
h5o_get_info_by_idx
h5o_get_info_by_name
h5o_get_native_info
h5o_get_native_info_by_idx
h5o_get_native_info_by_name
h5o_incr_refcount
h5o_link
h5o_open
h5o_open_by_addr
h5o_open_by_idx
h5o_refresh
h5o_set_comment
h5o_set_comment_by_name
h5o_token_cmp
h5o_token_from_str
h5o_token_to_str
h5o_visit
h5o_visit_by_name
```
---
## [[`H5PL`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_p_l.html) — Plugin Interface](@id H5PL)
- [`h5pl_append`](@ref h5pl_append)
- [`h5pl_get`](@ref h5pl_get)
- [`h5pl_get_loading_state`](@ref h5pl_get_loading_state)
- [`h5pl_insert`](@ref h5pl_insert)
- [`h5pl_prepend`](@ref h5pl_prepend)
- [`h5pl_remove`](@ref h5pl_remove)
- [`h5pl_replace`](@ref h5pl_replace)
- [`h5pl_set_loading_state`](@ref h5pl_set_loading_state)
- [`h5pl_size`](@ref h5pl_size)
```@docs
h5pl_append
h5pl_get
h5pl_get_loading_state
h5pl_insert
h5pl_prepend
h5pl_remove
h5pl_replace
h5pl_set_loading_state
h5pl_size
```
---
## [[`H5P`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_p.html) — Property Interface](@id H5P)
- [`h5p_add_merge_committed_dtype_path`](@ref h5p_add_merge_committed_dtype_path)
- [`h5p_all_filters_avail`](@ref h5p_all_filters_avail)
- [`h5p_close`](@ref h5p_close)
- [`h5p_close_class`](@ref h5p_close_class)
- [`h5p_copy`](@ref h5p_copy)
- [`h5p_copy_prop`](@ref h5p_copy_prop)
- [`h5p_create`](@ref h5p_create)
- [`h5p_create_class`](@ref h5p_create_class)
- [`h5p_decode`](@ref h5p_decode)
- [`h5p_encode`](@ref h5p_encode)
- [`h5p_equal`](@ref h5p_equal)
- [`h5p_exist`](@ref h5p_exist)
- [`h5p_fill_value_defined`](@ref h5p_fill_value_defined)
- [`h5p_free_merge_committed_dtype_paths`](@ref h5p_free_merge_committed_dtype_paths)
- [`h5p_get`](@ref h5p_get)
- [`h5p_get_alignment`](@ref h5p_get_alignment)
- [`h5p_get_alloc_time`](@ref h5p_get_alloc_time)
- [`h5p_get_append_flush`](@ref h5p_get_append_flush)
- [`h5p_get_attr_creation_order`](@ref h5p_get_attr_creation_order)
- [`h5p_get_attr_phase_change`](@ref h5p_get_attr_phase_change)
- [`h5p_get_btree_ratios`](@ref h5p_get_btree_ratios)
- [`h5p_get_buffer`](@ref h5p_get_buffer)
- [`h5p_get_cache`](@ref h5p_get_cache)
- [`h5p_get_char_encoding`](@ref h5p_get_char_encoding)
- [`h5p_get_chunk`](@ref h5p_get_chunk)
- [`h5p_get_chunk_cache`](@ref h5p_get_chunk_cache)
- [`h5p_get_chunk_opts`](@ref h5p_get_chunk_opts)
- [`h5p_get_class`](@ref h5p_get_class)
- [`h5p_get_class_name`](@ref h5p_get_class_name)
- [`h5p_get_class_parent`](@ref h5p_get_class_parent)
- [`h5p_get_copy_object`](@ref h5p_get_copy_object)
- [`h5p_get_core_write_tracking`](@ref h5p_get_core_write_tracking)
- [`h5p_get_create_intermediate_group`](@ref h5p_get_create_intermediate_group)
- [`h5p_get_data_transform`](@ref h5p_get_data_transform)
- [`h5p_get_driver`](@ref h5p_get_driver)
- [`h5p_get_driver_info`](@ref h5p_get_driver_info)
- [`h5p_get_dset_no_attrs_hint`](@ref h5p_get_dset_no_attrs_hint)
- [`h5p_get_dxpl_mpio`](@ref h5p_get_dxpl_mpio)
- [`h5p_get_edc_check`](@ref h5p_get_edc_check)
- [`h5p_get_efile_prefix`](@ref h5p_get_efile_prefix)
- [`h5p_get_elink_acc_flags`](@ref h5p_get_elink_acc_flags)
- [`h5p_get_elink_cb`](@ref h5p_get_elink_cb)
- [`h5p_get_elink_fapl`](@ref h5p_get_elink_fapl)
- [`h5p_get_elink_file_cache_size`](@ref h5p_get_elink_file_cache_size)
- [`h5p_get_elink_prefix`](@ref h5p_get_elink_prefix)
- [`h5p_get_est_link_info`](@ref h5p_get_est_link_info)
- [`h5p_get_evict_on_close`](@ref h5p_get_evict_on_close)
- [`h5p_get_external`](@ref h5p_get_external)
- [`h5p_get_external_count`](@ref h5p_get_external_count)
- [`h5p_get_family_offset`](@ref h5p_get_family_offset)
- [`h5p_get_fapl_core`](@ref h5p_get_fapl_core)
- [`h5p_get_fapl_family`](@ref h5p_get_fapl_family)
- [`h5p_get_fapl_hdfs`](@ref h5p_get_fapl_hdfs)
- [`h5p_get_fapl_mpio`](@ref h5p_get_fapl_mpio)
- [`h5p_get_fapl_multi`](@ref h5p_get_fapl_multi)
- [`h5p_get_fapl_ros3`](@ref h5p_get_fapl_ros3)
- [`h5p_get_fapl_splitter`](@ref h5p_get_fapl_splitter)
- [`h5p_get_fclose_degree`](@ref h5p_get_fclose_degree)
- [`h5p_get_file_image`](@ref h5p_get_file_image)
- [`h5p_get_file_image_callbacks`](@ref h5p_get_file_image_callbacks)
- [`h5p_get_file_locking`](@ref h5p_get_file_locking)
- [`h5p_get_file_space`](@ref h5p_get_file_space)
- [`h5p_get_file_space_page_size`](@ref h5p_get_file_space_page_size)
- [`h5p_get_file_space_strategy`](@ref h5p_get_file_space_strategy)
- [`h5p_get_fill_time`](@ref h5p_get_fill_time)
- [`h5p_get_fill_value`](@ref h5p_get_fill_value)
- [`h5p_get_filter`](@ref h5p_get_filter)
- [`h5p_get_filter_by_id`](@ref h5p_get_filter_by_id)
- [`h5p_get_gc_references`](@ref h5p_get_gc_references)
- [`h5p_get_hyper_vector_size`](@ref h5p_get_hyper_vector_size)
- [`h5p_get_istore_k`](@ref h5p_get_istore_k)
- [`h5p_get_layout`](@ref h5p_get_layout)
- [`h5p_get_libver_bounds`](@ref h5p_get_libver_bounds)
- [`h5p_get_link_creation_order`](@ref h5p_get_link_creation_order)
- [`h5p_get_link_phase_change`](@ref h5p_get_link_phase_change)
- [`h5p_get_local_heap_size_hint`](@ref h5p_get_local_heap_size_hint)
- [`h5p_get_mcdt_search_cb`](@ref h5p_get_mcdt_search_cb)
- [`h5p_get_mdc_config`](@ref h5p_get_mdc_config)
- [`h5p_get_mdc_image_config`](@ref h5p_get_mdc_image_config)
- [`h5p_get_mdc_log_options`](@ref h5p_get_mdc_log_options)
- [`h5p_get_meta_block_size`](@ref h5p_get_meta_block_size)
- [`h5p_get_metadata_read_attempts`](@ref h5p_get_metadata_read_attempts)
- [`h5p_get_multi_type`](@ref h5p_get_multi_type)
- [`h5p_get_nfilters`](@ref h5p_get_nfilters)
- [`h5p_get_nlinks`](@ref h5p_get_nlinks)
- [`h5p_get_nprops`](@ref h5p_get_nprops)
- [`h5p_get_obj_track_times`](@ref h5p_get_obj_track_times)
- [`h5p_get_object_flush_cb`](@ref h5p_get_object_flush_cb)
- [`h5p_get_page_buffer_size`](@ref h5p_get_page_buffer_size)
- [`h5p_get_preserve`](@ref h5p_get_preserve)
- [`h5p_get_shared_mesg_index`](@ref h5p_get_shared_mesg_index)
- [`h5p_get_shared_mesg_nindexes`](@ref h5p_get_shared_mesg_nindexes)
- [`h5p_get_shared_mesg_phase_change`](@ref h5p_get_shared_mesg_phase_change)
- [`h5p_get_sieve_buf_size`](@ref h5p_get_sieve_buf_size)
- [`h5p_get_size`](@ref h5p_get_size)
- [`h5p_get_sizes`](@ref h5p_get_sizes)
- [`h5p_get_small_data_block_size`](@ref h5p_get_small_data_block_size)
- [`h5p_get_sym_k`](@ref h5p_get_sym_k)
- [`h5p_get_type_conv_cb`](@ref h5p_get_type_conv_cb)
- [`h5p_get_userblock`](@ref h5p_get_userblock)
- [`h5p_get_version`](@ref h5p_get_version)
- [`h5p_get_virtual_count`](@ref h5p_get_virtual_count)
- [`h5p_get_virtual_dsetname`](@ref h5p_get_virtual_dsetname)
- [`h5p_get_virtual_filename`](@ref h5p_get_virtual_filename)
- [`h5p_get_virtual_prefix`](@ref h5p_get_virtual_prefix)
- [`h5p_get_virtual_printf_gap`](@ref h5p_get_virtual_printf_gap)
- [`h5p_get_virtual_srcspace`](@ref h5p_get_virtual_srcspace)
- [`h5p_get_virtual_view`](@ref h5p_get_virtual_view)
- [`h5p_get_virtual_vspace`](@ref h5p_get_virtual_vspace)
- [`h5p_get_vlen_mem_manager`](@ref h5p_get_vlen_mem_manager)
- [`h5p_get_vol_id`](@ref h5p_get_vol_id)
- [`h5p_get_vol_info`](@ref h5p_get_vol_info)
- [`h5p_insert`](@ref h5p_insert)
- [`h5p_isa_class`](@ref h5p_isa_class)
- [`h5p_iterate`](@ref h5p_iterate)
- [`h5p_modify_filter`](@ref h5p_modify_filter)
- [`h5p_register`](@ref h5p_register)
- [`h5p_remove`](@ref h5p_remove)
- [`h5p_remove_filter`](@ref h5p_remove_filter)
- [`h5p_set`](@ref h5p_set)
- [`h5p_set_alignment`](@ref h5p_set_alignment)
- [`h5p_set_alloc_time`](@ref h5p_set_alloc_time)
- [`h5p_set_append_flush`](@ref h5p_set_append_flush)
- [`h5p_set_attr_creation_order`](@ref h5p_set_attr_creation_order)
- [`h5p_set_attr_phase_change`](@ref h5p_set_attr_phase_change)
- [`h5p_set_btree_ratios`](@ref h5p_set_btree_ratios)
- [`h5p_set_buffer`](@ref h5p_set_buffer)
- [`h5p_set_cache`](@ref h5p_set_cache)
- [`h5p_set_char_encoding`](@ref h5p_set_char_encoding)
- [`h5p_set_chunk`](@ref h5p_set_chunk)
- [`h5p_set_chunk_cache`](@ref h5p_set_chunk_cache)
- [`h5p_set_chunk_opts`](@ref h5p_set_chunk_opts)
- [`h5p_set_copy_object`](@ref h5p_set_copy_object)
- [`h5p_set_core_write_tracking`](@ref h5p_set_core_write_tracking)
- [`h5p_set_create_intermediate_group`](@ref h5p_set_create_intermediate_group)
- [`h5p_set_data_transform`](@ref h5p_set_data_transform)
- [`h5p_set_deflate`](@ref h5p_set_deflate)
- [`h5p_set_driver`](@ref h5p_set_driver)
- [`h5p_set_dset_no_attrs_hint`](@ref h5p_set_dset_no_attrs_hint)
- [`h5p_set_dxpl_mpio`](@ref h5p_set_dxpl_mpio)
- [`h5p_set_edc_check`](@ref h5p_set_edc_check)
- [`h5p_set_efile_prefix`](@ref h5p_set_efile_prefix)
- [`h5p_set_elink_acc_flags`](@ref h5p_set_elink_acc_flags)
- [`h5p_set_elink_cb`](@ref h5p_set_elink_cb)
- [`h5p_set_elink_fapl`](@ref h5p_set_elink_fapl)
- [`h5p_set_elink_file_cache_size`](@ref h5p_set_elink_file_cache_size)
- [`h5p_set_elink_prefix`](@ref h5p_set_elink_prefix)
- [`h5p_set_est_link_info`](@ref h5p_set_est_link_info)
- [`h5p_set_evict_on_close`](@ref h5p_set_evict_on_close)
- [`h5p_set_external`](@ref h5p_set_external)
- [`h5p_set_family_offset`](@ref h5p_set_family_offset)
- [`h5p_set_fapl_core`](@ref h5p_set_fapl_core)
- [`h5p_set_fapl_family`](@ref h5p_set_fapl_family)
- [`h5p_set_fapl_hdfs`](@ref h5p_set_fapl_hdfs)
- [`h5p_set_fapl_log`](@ref h5p_set_fapl_log)
- [`h5p_set_fapl_mpio`](@ref h5p_set_fapl_mpio)
- [`h5p_set_fapl_multi`](@ref h5p_set_fapl_multi)
- [`h5p_set_fapl_ros3`](@ref h5p_set_fapl_ros3)
- [`h5p_set_fapl_sec2`](@ref h5p_set_fapl_sec2)
- [`h5p_set_fapl_split`](@ref h5p_set_fapl_split)
- [`h5p_set_fapl_splitter`](@ref h5p_set_fapl_splitter)
- [`h5p_set_fapl_stdio`](@ref h5p_set_fapl_stdio)
- [`h5p_set_fapl_windows`](@ref h5p_set_fapl_windows)
- [`h5p_set_fclose_degree`](@ref h5p_set_fclose_degree)
- [`h5p_set_file_image`](@ref h5p_set_file_image)
- [`h5p_set_file_image_callbacks`](@ref h5p_set_file_image_callbacks)
- [`h5p_set_file_locking`](@ref h5p_set_file_locking)
- [`h5p_set_file_space`](@ref h5p_set_file_space)
- [`h5p_set_file_space_page_size`](@ref h5p_set_file_space_page_size)
- [`h5p_set_file_space_strategy`](@ref h5p_set_file_space_strategy)
- [`h5p_set_fill_time`](@ref h5p_set_fill_time)
- [`h5p_set_fill_value`](@ref h5p_set_fill_value)
- [`h5p_set_filter`](@ref h5p_set_filter)
- [`h5p_set_filter_callback`](@ref h5p_set_filter_callback)
- [`h5p_set_fletcher32`](@ref h5p_set_fletcher32)
- [`h5p_set_gc_references`](@ref h5p_set_gc_references)
- [`h5p_set_hyper_vector_size`](@ref h5p_set_hyper_vector_size)
- [`h5p_set_istore_k`](@ref h5p_set_istore_k)
- [`h5p_set_layout`](@ref h5p_set_layout)
- [`h5p_set_libver_bounds`](@ref h5p_set_libver_bounds)
- [`h5p_set_link_creation_order`](@ref h5p_set_link_creation_order)
- [`h5p_set_link_phase_change`](@ref h5p_set_link_phase_change)
- [`h5p_set_local_heap_size_hint`](@ref h5p_set_local_heap_size_hint)
- [`h5p_set_mcdt_search_cb`](@ref h5p_set_mcdt_search_cb)
- [`h5p_set_mdc_config`](@ref h5p_set_mdc_config)
- [`h5p_set_mdc_image_config`](@ref h5p_set_mdc_image_config)
- [`h5p_set_mdc_log_options`](@ref h5p_set_mdc_log_options)
- [`h5p_set_meta_block_size`](@ref h5p_set_meta_block_size)
- [`h5p_set_metadata_read_attempts`](@ref h5p_set_metadata_read_attempts)
- [`h5p_set_multi_type`](@ref h5p_set_multi_type)
- [`h5p_set_nbit`](@ref h5p_set_nbit)
- [`h5p_set_nlinks`](@ref h5p_set_nlinks)
- [`h5p_set_obj_track_times`](@ref h5p_set_obj_track_times)
- [`h5p_set_object_flush_cb`](@ref h5p_set_object_flush_cb)
- [`h5p_set_page_buffer_size`](@ref h5p_set_page_buffer_size)
- [`h5p_set_preserve`](@ref h5p_set_preserve)
- [`h5p_set_scaleoffset`](@ref h5p_set_scaleoffset)
- [`h5p_set_shared_mesg_index`](@ref h5p_set_shared_mesg_index)
- [`h5p_set_shared_mesg_nindexes`](@ref h5p_set_shared_mesg_nindexes)
- [`h5p_set_shared_mesg_phase_change`](@ref h5p_set_shared_mesg_phase_change)
- [`h5p_set_shuffle`](@ref h5p_set_shuffle)
- [`h5p_set_sieve_buf_size`](@ref h5p_set_sieve_buf_size)
- [`h5p_set_sizes`](@ref h5p_set_sizes)
- [`h5p_set_small_data_block_size`](@ref h5p_set_small_data_block_size)
- [`h5p_set_sym_k`](@ref h5p_set_sym_k)
- [`h5p_set_szip`](@ref h5p_set_szip)
- [`h5p_set_type_conv_cb`](@ref h5p_set_type_conv_cb)
- [`h5p_set_userblock`](@ref h5p_set_userblock)
- [`h5p_set_virtual`](@ref h5p_set_virtual)
- [`h5p_set_virtual_prefix`](@ref h5p_set_virtual_prefix)
- [`h5p_set_virtual_printf_gap`](@ref h5p_set_virtual_printf_gap)
- [`h5p_set_virtual_view`](@ref h5p_set_virtual_view)
- [`h5p_set_vlen_mem_manager`](@ref h5p_set_vlen_mem_manager)
- [`h5p_set_vol`](@ref h5p_set_vol)
- [`h5p_unregister`](@ref h5p_unregister)
```@docs
h5p_add_merge_committed_dtype_path
h5p_all_filters_avail
h5p_close
h5p_close_class
h5p_copy
h5p_copy_prop
h5p_create
h5p_create_class
h5p_decode
h5p_encode
h5p_equal
h5p_exist
h5p_fill_value_defined
h5p_free_merge_committed_dtype_paths
h5p_get
h5p_get_alignment
h5p_get_alloc_time
h5p_get_append_flush
h5p_get_attr_creation_order
h5p_get_attr_phase_change
h5p_get_btree_ratios
h5p_get_buffer
h5p_get_cache
h5p_get_char_encoding
h5p_get_chunk
h5p_get_chunk_cache
h5p_get_chunk_opts
h5p_get_class
h5p_get_class_name
h5p_get_class_parent
h5p_get_copy_object
h5p_get_core_write_tracking
h5p_get_create_intermediate_group
h5p_get_data_transform
h5p_get_driver
h5p_get_driver_info
h5p_get_dset_no_attrs_hint
h5p_get_dxpl_mpio
h5p_get_edc_check
h5p_get_efile_prefix
h5p_get_elink_acc_flags
h5p_get_elink_cb
h5p_get_elink_fapl
h5p_get_elink_file_cache_size
h5p_get_elink_prefix
h5p_get_est_link_info
h5p_get_evict_on_close
h5p_get_external
h5p_get_external_count
h5p_get_family_offset
h5p_get_fapl_core
h5p_get_fapl_family
h5p_get_fapl_hdfs
h5p_get_fapl_mpio
h5p_get_fapl_multi
h5p_get_fapl_ros3
h5p_get_fapl_splitter
h5p_get_fclose_degree
h5p_get_file_image
h5p_get_file_image_callbacks
h5p_get_file_locking
h5p_get_file_space
h5p_get_file_space_page_size
h5p_get_file_space_strategy
h5p_get_fill_time
h5p_get_fill_value
h5p_get_filter
h5p_get_filter_by_id
h5p_get_gc_references
h5p_get_hyper_vector_size
h5p_get_istore_k
h5p_get_layout
h5p_get_libver_bounds
h5p_get_link_creation_order
h5p_get_link_phase_change
h5p_get_local_heap_size_hint
h5p_get_mcdt_search_cb
h5p_get_mdc_config
h5p_get_mdc_image_config
h5p_get_mdc_log_options
h5p_get_meta_block_size
h5p_get_metadata_read_attempts
h5p_get_multi_type
h5p_get_nfilters
h5p_get_nlinks
h5p_get_nprops
h5p_get_obj_track_times
h5p_get_object_flush_cb
h5p_get_page_buffer_size
h5p_get_preserve
h5p_get_shared_mesg_index
h5p_get_shared_mesg_nindexes
h5p_get_shared_mesg_phase_change
h5p_get_sieve_buf_size
h5p_get_size
h5p_get_sizes
h5p_get_small_data_block_size
h5p_get_sym_k
h5p_get_type_conv_cb
h5p_get_userblock
h5p_get_version
h5p_get_virtual_count
h5p_get_virtual_dsetname
h5p_get_virtual_filename
h5p_get_virtual_prefix
h5p_get_virtual_printf_gap
h5p_get_virtual_srcspace
h5p_get_virtual_view
h5p_get_virtual_vspace
h5p_get_vlen_mem_manager
h5p_get_vol_id
h5p_get_vol_info
h5p_insert
h5p_isa_class
h5p_iterate
h5p_modify_filter
h5p_register
h5p_remove
h5p_remove_filter
h5p_set
h5p_set_alignment
h5p_set_alloc_time
h5p_set_append_flush
h5p_set_attr_creation_order
h5p_set_attr_phase_change
h5p_set_btree_ratios
h5p_set_buffer
h5p_set_cache
h5p_set_char_encoding
h5p_set_chunk
h5p_set_chunk_cache
h5p_set_chunk_opts
h5p_set_copy_object
h5p_set_core_write_tracking
h5p_set_create_intermediate_group
h5p_set_data_transform
h5p_set_deflate
h5p_set_driver
h5p_set_dset_no_attrs_hint
h5p_set_dxpl_mpio
h5p_set_edc_check
h5p_set_efile_prefix
h5p_set_elink_acc_flags
h5p_set_elink_cb
h5p_set_elink_fapl
h5p_set_elink_file_cache_size
h5p_set_elink_prefix
h5p_set_est_link_info
h5p_set_evict_on_close
h5p_set_external
h5p_set_family_offset
h5p_set_fapl_core
h5p_set_fapl_family
h5p_set_fapl_hdfs
h5p_set_fapl_log
h5p_set_fapl_mpio
h5p_set_fapl_multi
h5p_set_fapl_ros3
h5p_set_fapl_sec2
h5p_set_fapl_split
h5p_set_fapl_splitter
h5p_set_fapl_stdio
h5p_set_fapl_windows
h5p_set_fclose_degree
h5p_set_file_image
h5p_set_file_image_callbacks
h5p_set_file_locking
h5p_set_file_space
h5p_set_file_space_page_size
h5p_set_file_space_strategy
h5p_set_fill_time
h5p_set_fill_value
h5p_set_filter
h5p_set_filter_callback
h5p_set_fletcher32
h5p_set_gc_references
h5p_set_hyper_vector_size
h5p_set_istore_k
h5p_set_layout
h5p_set_libver_bounds
h5p_set_link_creation_order
h5p_set_link_phase_change
h5p_set_local_heap_size_hint
h5p_set_mcdt_search_cb
h5p_set_mdc_config
h5p_set_mdc_image_config
h5p_set_mdc_log_options
h5p_set_meta_block_size
h5p_set_metadata_read_attempts
h5p_set_multi_type
h5p_set_nbit
h5p_set_nlinks
h5p_set_obj_track_times
h5p_set_object_flush_cb
h5p_set_page_buffer_size
h5p_set_preserve
h5p_set_scaleoffset
h5p_set_shared_mesg_index
h5p_set_shared_mesg_nindexes
h5p_set_shared_mesg_phase_change
h5p_set_shuffle
h5p_set_sieve_buf_size
h5p_set_sizes
h5p_set_small_data_block_size
h5p_set_sym_k
h5p_set_szip
h5p_set_type_conv_cb
h5p_set_userblock
h5p_set_virtual
h5p_set_virtual_prefix
h5p_set_virtual_printf_gap
h5p_set_virtual_view
h5p_set_vlen_mem_manager
h5p_set_vol
h5p_unregister
```
---
## [[`H5R`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_r.html) — Reference Interface](@id H5R)
- [`h5r_create`](@ref h5r_create)
- [`h5r_dereference`](@ref h5r_dereference)
- [`h5r_get_obj_type`](@ref h5r_get_obj_type)
- [`h5r_get_region`](@ref h5r_get_region)
```@docs
h5r_create
h5r_dereference
h5r_get_obj_type
h5r_get_region
```
---
## [[`H5S`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_s.html) — Dataspace Interface](@id H5S)
- [`h5s_close`](@ref h5s_close)
- [`h5s_combine_hyperslab`](@ref h5s_combine_hyperslab)
- [`h5s_combine_select`](@ref h5s_combine_select)
- [`h5s_copy`](@ref h5s_copy)
- [`h5s_create`](@ref h5s_create)
- [`h5s_create_simple`](@ref h5s_create_simple)
- [`h5s_extent_copy`](@ref h5s_extent_copy)
- [`h5s_extent_equal`](@ref h5s_extent_equal)
- [`h5s_get_regular_hyperslab`](@ref h5s_get_regular_hyperslab)
- [`h5s_get_select_bounds`](@ref h5s_get_select_bounds)
- [`h5s_get_select_elem_npoints`](@ref h5s_get_select_elem_npoints)
- [`h5s_get_select_elem_pointlist`](@ref h5s_get_select_elem_pointlist)
- [`h5s_get_select_hyper_blocklist`](@ref h5s_get_select_hyper_blocklist)
- [`h5s_get_select_hyper_nblocks`](@ref h5s_get_select_hyper_nblocks)
- [`h5s_get_select_npoints`](@ref h5s_get_select_npoints)
- [`h5s_get_select_type`](@ref h5s_get_select_type)
- [`h5s_get_simple_extent_dims`](@ref h5s_get_simple_extent_dims)
- [`h5s_get_simple_extent_ndims`](@ref h5s_get_simple_extent_ndims)
- [`h5s_get_simple_extent_type`](@ref h5s_get_simple_extent_type)
- [`h5s_is_regular_hyperslab`](@ref h5s_is_regular_hyperslab)
- [`h5s_is_simple`](@ref h5s_is_simple)
- [`h5s_modify_select`](@ref h5s_modify_select)
- [`h5s_offset_simple`](@ref h5s_offset_simple)
- [`h5s_select_adjust`](@ref h5s_select_adjust)
- [`h5s_select_all`](@ref h5s_select_all)
- [`h5s_select_copy`](@ref h5s_select_copy)
- [`h5s_select_elements`](@ref h5s_select_elements)
- [`h5s_select_hyperslab`](@ref h5s_select_hyperslab)
- [`h5s_select_intersect_block`](@ref h5s_select_intersect_block)
- [`h5s_select_shape_same`](@ref h5s_select_shape_same)
- [`h5s_select_valid`](@ref h5s_select_valid)
- [`h5s_set_extent_none`](@ref h5s_set_extent_none)
- [`h5s_set_extent_simple`](@ref h5s_set_extent_simple)
```@docs
h5s_close
h5s_combine_hyperslab
h5s_combine_select
h5s_copy
h5s_create
h5s_create_simple
h5s_extent_copy
h5s_extent_equal
h5s_get_regular_hyperslab
h5s_get_select_bounds
h5s_get_select_elem_npoints
h5s_get_select_elem_pointlist
h5s_get_select_hyper_blocklist
h5s_get_select_hyper_nblocks
h5s_get_select_npoints
h5s_get_select_type
h5s_get_simple_extent_dims
h5s_get_simple_extent_ndims
h5s_get_simple_extent_type
h5s_is_regular_hyperslab
h5s_is_simple
h5s_modify_select
h5s_offset_simple
h5s_select_adjust
h5s_select_all
h5s_select_copy
h5s_select_elements
h5s_select_hyperslab
h5s_select_intersect_block
h5s_select_shape_same
h5s_select_valid
h5s_set_extent_none
h5s_set_extent_simple
```
---
## [[`H5T`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_t.html) — Datatype Interface](@id H5T)
- [`h5t_array_create`](@ref h5t_array_create)
- [`h5t_close`](@ref h5t_close)
- [`h5t_commit`](@ref h5t_commit)
- [`h5t_committed`](@ref h5t_committed)
- [`h5t_copy`](@ref h5t_copy)
- [`h5t_create`](@ref h5t_create)
- [`h5t_enum_insert`](@ref h5t_enum_insert)
- [`h5t_equal`](@ref h5t_equal)
- [`h5t_get_array_dims`](@ref h5t_get_array_dims)
- [`h5t_get_array_ndims`](@ref h5t_get_array_ndims)
- [`h5t_get_class`](@ref h5t_get_class)
- [`h5t_get_cset`](@ref h5t_get_cset)
- [`h5t_get_ebias`](@ref h5t_get_ebias)
- [`h5t_get_fields`](@ref h5t_get_fields)
- [`h5t_get_member_class`](@ref h5t_get_member_class)
- [`h5t_get_member_index`](@ref h5t_get_member_index)
- [`h5t_get_member_name`](@ref h5t_get_member_name)
- [`h5t_get_member_offset`](@ref h5t_get_member_offset)
- [`h5t_get_member_type`](@ref h5t_get_member_type)
- [`h5t_get_native_type`](@ref h5t_get_native_type)
- [`h5t_get_nmembers`](@ref h5t_get_nmembers)
- [`h5t_get_offset`](@ref h5t_get_offset)
- [`h5t_get_order`](@ref h5t_get_order)
- [`h5t_get_precision`](@ref h5t_get_precision)
- [`h5t_get_sign`](@ref h5t_get_sign)
- [`h5t_get_size`](@ref h5t_get_size)
- [`h5t_get_strpad`](@ref h5t_get_strpad)
- [`h5t_get_super`](@ref h5t_get_super)
- [`h5t_get_tag`](@ref h5t_get_tag)
- [`h5t_insert`](@ref h5t_insert)
- [`h5t_is_variable_str`](@ref h5t_is_variable_str)
- [`h5t_lock`](@ref h5t_lock)
- [`h5t_open`](@ref h5t_open)
- [`h5t_set_cset`](@ref h5t_set_cset)
- [`h5t_set_ebias`](@ref h5t_set_ebias)
- [`h5t_set_fields`](@ref h5t_set_fields)
- [`h5t_set_offset`](@ref h5t_set_offset)
- [`h5t_set_order`](@ref h5t_set_order)
- [`h5t_set_precision`](@ref h5t_set_precision)
- [`h5t_set_size`](@ref h5t_set_size)
- [`h5t_set_strpad`](@ref h5t_set_strpad)
- [`h5t_set_tag`](@ref h5t_set_tag)
- [`h5t_vlen_create`](@ref h5t_vlen_create)
```@docs
h5t_array_create
h5t_close
h5t_commit
h5t_committed
h5t_copy
h5t_create
h5t_enum_insert
h5t_equal
h5t_get_array_dims
h5t_get_array_ndims
h5t_get_class
h5t_get_cset
h5t_get_ebias
h5t_get_fields
h5t_get_member_class
h5t_get_member_index
h5t_get_member_name
h5t_get_member_offset
h5t_get_member_type
h5t_get_native_type
h5t_get_nmembers
h5t_get_offset
h5t_get_order
h5t_get_precision
h5t_get_sign
h5t_get_size
h5t_get_strpad
h5t_get_super
h5t_get_tag
h5t_insert
h5t_is_variable_str
h5t_lock
h5t_open
h5t_set_cset
h5t_set_ebias
h5t_set_fields
h5t_set_offset
h5t_set_order
h5t_set_precision
h5t_set_size
h5t_set_strpad
h5t_set_tag
h5t_vlen_create
```
---
## [[`H5Z`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_z.html) — Filter Interface](@id H5Z)
- [`h5z_filter_avail`](@ref h5z_filter_avail)
- [`h5z_get_filter_info`](@ref h5z_get_filter_info)
- [`h5z_register`](@ref h5z_register)
- [`h5z_unregister`](@ref h5z_unregister)
```@docs
h5z_filter_avail
h5z_get_filter_info
h5z_register
h5z_unregister
```
---
## [[`H5FD`](https://docs.hdfgroup.org/hdf5/v1_14/_v_f_l.html) — File Drivers](@id H5FD)
- [`h5fd_core_init`](@ref h5fd_core_init)
- [`h5fd_family_init`](@ref h5fd_family_init)
- [`h5fd_log_init`](@ref h5fd_log_init)
- [`h5fd_mpio_init`](@ref h5fd_mpio_init)
- [`h5fd_multi_init`](@ref h5fd_multi_init)
- [`h5fd_sec2_init`](@ref h5fd_sec2_init)
- [`h5fd_stdio_init`](@ref h5fd_stdio_init)
```@docs
h5fd_core_init
h5fd_family_init
h5fd_log_init
h5fd_mpio_init
h5fd_multi_init
h5fd_sec2_init
h5fd_stdio_init
```
---
## [[`H5DO`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_d_o.html) — Optimized Functions Interface](@id H5DO)
- [`h5do_append`](@ref h5do_append)
- [`h5do_write_chunk`](@ref h5do_write_chunk)
```@docs
h5do_append
h5do_write_chunk
```
---
## [[`H5DS`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_d_s.html) — Dimension Scale Interface](@id H5DS)
- [`h5ds_attach_scale`](@ref h5ds_attach_scale)
- [`h5ds_detach_scale`](@ref h5ds_detach_scale)
- [`h5ds_get_label`](@ref h5ds_get_label)
- [`h5ds_get_num_scales`](@ref h5ds_get_num_scales)
- [`h5ds_get_scale_name`](@ref h5ds_get_scale_name)
- [`h5ds_is_attached`](@ref h5ds_is_attached)
- [`h5ds_is_scale`](@ref h5ds_is_scale)
- [`h5ds_set_label`](@ref h5ds_set_label)
- [`h5ds_set_scale`](@ref h5ds_set_scale)
```@docs
h5ds_attach_scale
h5ds_detach_scale
h5ds_get_label
h5ds_get_num_scales
h5ds_get_scale_name
h5ds_is_attached
h5ds_is_scale
h5ds_set_label
h5ds_set_scale
```
---
## [[`H5LT`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_l_t.html) — Lite Interface](@id H5LT)
- [`h5lt_dtype_to_text`](@ref h5lt_dtype_to_text)
```@docs
h5lt_dtype_to_text
```
---
## [[`H5TB`](https://docs.hdfgroup.org/hdf5/v1_14/group___h5_t_b.html) — Table Interface](@id H5TB)
- [`h5tb_append_records`](@ref h5tb_append_records)
- [`h5tb_get_field_info`](@ref h5tb_get_field_info)
- [`h5tb_get_table_info`](@ref h5tb_get_table_info)
- [`h5tb_make_table`](@ref h5tb_make_table)
- [`h5tb_read_records`](@ref h5tb_read_records)
- [`h5tb_read_table`](@ref h5tb_read_table)
- [`h5tb_write_records`](@ref h5tb_write_records)
```@docs
h5tb_append_records
h5tb_get_field_info
h5tb_get_table_info
h5tb_make_table
h5tb_read_records
h5tb_read_table
h5tb_write_records
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 26462 | ```@meta
CurrentModule = HDF5
DocTestSetup = quote
using HDF5
end
```
# HDF5.jl
## Overview
[HDF5](https://www.hdfgroup.org/solutions/hdf5/) stands for Hierarchical Data Format v5 and is closely modeled on file systems. In HDF5, a "group" is analogous to a directory, a "dataset" is like a file. HDF5 also uses "attributes" to associate metadata with a particular group or dataset. HDF5 uses ASCII names for these different objects, and objects can be accessed by Unix-like pathnames, e.g., "/sample1/tempsensor/firsttrial" for a top-level group "sample1", a subgroup "tempsensor", and a dataset "firsttrial".
For simple types (scalars, strings, and arrays), HDF5 provides sufficient metadata to know how each item is to be interpreted. For example, HDF5 encodes that a given block of bytes is to be interpreted as an array of `Int64`, and represents them in a way that is compatible across different computing architectures.
However, to preserve Julia objects, one generally needs additional type information to be supplied,
which is easy to provide using attributes. This is handled for you automatically in the [JLD](https://github.com/JuliaIO/JLD.jl)/[JLD2](https://github.com/JuliaIO/JLD2.jl). These specific formats (conventions) provide "extra" functionality, but they are still both regular
HDF5 files and are therefore compatible with any HDF5 reader or writer.
Language wrappers for HDF5 are often described as either "low level" or "high level." This package contains both flavors: at the low level, it directly wraps HDF5's functions, thus copying their API and making them available from within Julia. At the high level, it provides a set of functions built on the low-level wrap which may make the usage of this library more convenient.
## Installation
```julia
julia>]
pkg> add HDF5
```
Starting from Julia 1.3, the HDF5 binaries are by default downloaded using the `HDF5_jll` package.
### Using custom or system provided HDF5 binaries
!!! note "Migration from HDF5.jl v0.16 and earlier"
How to use a system-provided HDF5 library has been changed in HDF5.jl v0.17. Previously,
the library path was set by the environment variable `JULIA_HDF5_PATH`, which required to
rebuild HDF5.jl afterwards. The environment variable has been removed and no longer has an
effect (for backward compatibility it is still recommended to **also** set the environment
variable). Instead, proceed as described below.
To use system-provided HDF5 binaries instead, set the preferences `libhdf5` and `libhdf5_hl`, see also [Preferences.jl](https://github.com/JuliaPackaging/Preferences.jl). These need to point to the local paths of the libraries `libhdf5` and `libhdf5_hl`.
For example, to use HDF5 (`libhdf5-mpich-dev`) with MPI using system libraries on Ubuntu 20.04, you would run
```sh
$ sudo apt install mpich libhdf5-mpich-dev
```
If your system HDF5 library is compiled with MPI, you need to tell MPI.jl to use the same locally installed MPI implementation. This can be done in Julia by running:
```julia
using MPIPreferences
MPIPreferences.use_system_binary()
```
to set the MPI preferences, see the [documentation of MPI.jl](https://juliaparallel.org/MPI.jl/stable/configuration/). You can set the path to the system library using [Preferences.jl](https://github.com/JuliaPackaging/Preferences.jl) by:
```julia
using Preferences, HDF5
set_preferences!(
HDF5,
"libhdf5" => "/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5.so",
"libhdf5_hl" => "/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5_hl.so", force = true)
```
Alternatively, HDF5.jl provides a convenience function [`HDF5.API.set_libraries!`](@ref) that can be used as follows:
```julia
using HDF5
HDF5.API.set_libraries!("/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5.so", "/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5_hl.so")
```
Going back to the default, i.e. deleting the preferences again, can be done by calling `HDF5.API.set_libraries!()`.
If HDF5 cannot be loaded, it may be useful to use the UUID to change these settings:
```julia
using Preferences, UUIDs
set_preferences!(
UUID("f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"), # UUID of HDF5.jl
"libhdf5" => "/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5.so",
"libhdf5_hl" => "/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5_hl.so", force = true)
```
Also see the file `test/configure_packages.jl` for an example.
Both, the MPI preferences and the preferences for HDF5.jl write to a file called LocalPreferences.toml in the project directory. After performing the described steps this file could look like the following:
```toml
[MPIPreferences]
_format = "1.0"
abi = "MPICH"
binary = "system"
libmpi = "/software/mpi/lib/libmpi.so"
mpiexec = "/software/mpi/bin/mpiexec"
[HDF5]
libhdf5 = "/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5.so"
libhdf5_hl = "/usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5_hl.so"
```
If you want to switch to another HDF5 library or the library moved, you can call the `set_preferences!` commands again (or manually edit LocalPreferences.toml) to set the new paths. Using the default implementation provided by HDF5_jll can be done by simply manually deleting the LocalPreferences.toml file.
## Opening and closing files
"Plain" (i.e., with no extra formatting conventions) HDF5 files are created and/or opened with the `h5open` command:
```julia
fid = h5open(filename, mode)
```
The mode can be any one of the following:
| mode | Meaning |
| :--- | :------------------------------------------------------------------ |
| "r" | read-only |
| "r+" | read-write, preserving any existing contents |
| "cw" | read-write, create file if not existing, preserve existing contents |
| "w" | read-write, destroying any existing contents (if any) |
For example
```@repl main
using HDF5
fname = tempname(); # temporary file
fid = h5open(fname, "w")
```
This produces an object of type `HDF5File`, a subtype of the abstract type `DataFile`. This file will have no elements (groups, datasets, or attributes) that are not explicitly created by the user.
When you're finished with a file, you should close it:
```julia
close(fid)
```
Closing a file also closes any other open objects (e.g., datasets, groups) in that file. In general, you need to close an HDF5 file to "release" it for use by other applications.
## Creating a group or dataset
Groups can be created via the function [`create_group`](@ref)
```@repl main
create_group(fid, "mygroup")
```
We can write the `"mydataset"` by indexing into `fid`. This also happens to write data to the dataset.
```@repl main
fid["mydataset"] = rand()
```
Alternatively, we can call [`create_dataset`](@ref), which does not write data to the dataset. It merely creates the dataset.
```@repl main
create_dataset(fid, "myvector", Int, (10,))
```
Creating a dataset within a group is as simple as indexing into the group with the name of the dataset or calling [`create_dataset`](@ref) with the group as the first argument.
```@repl main
g = fid["mygroup"]
g["mydataset"] = "Hello World!"
create_dataset(g, "myvector", Int, (10,))
```
The `do` syntax is also supported. The file, group, and dataset handles will automatically be closed after the `do` block terminates.
```@repl main
h5open("example2.h5", "w") do fid
g = create_group(fid, "mygroup")
dset = create_dataset(g, "myvector", Float64, (10,))
write(dset,rand(10))
end
```
## Opening and closing objects
If you have a file object `fid`, and this has a group or dataset called `"mygroup"` at the top level of a file, you can open it in the following way:
```@repl main
obj = fid["mygroup"]
```
This does not read any data or attributes associated with the object, it's simply a handle for further manipulations. For example:
```@repl main
g = fid["mygroup"]
dset = g["mydataset"]
```
or simply
```@repl main
dset = fid["mygroup/mydataset"]
```
When you're done with an object, you can close it using `close(obj)`. If you forget to do this, it will be closed for you anyway when the file is closed, or if `obj` goes out of scope and gets garbage collected.
## Reading and writing data
Suppose you have a group `g` which contains a dataset with path `"mydataset"`, and that you've also opened this dataset as `dset = g["mydataset"]`.
You can read information in this dataset in any of the following ways:
```julia
A = read(dset)
A = read(g, "mydataset")
Asub = dset[2:3, 1:3]
```
The last syntax reads just a subset of the data array (assuming that `dset` is an array of sufficient size).
libhdf5 has internal mechanisms for slicing arrays, and consequently if you need only a small piece of a large array, it can be faster to read just what you need rather than reading the entire array and discarding most of it.
Datasets can be created with either
```julia
g["mydataset"] = rand(3,5)
write(g, "mydataset", rand(3,5))
```
One can use the high level interface `load` and `save` from `FileIO`, where an optional `OrderedDict` can be passed (`track_order` inferred). Note that using `track_order=true` or passing an `OrderedDict` is a promise that the read file has been created with the appropriate ordering flags.
```julia
julia> using OrderedCollections, FileIO
julia> save("track_order.h5", OrderedDict("z"=>1, "a"=>2, "g/f"=>3, "g/b"=>4))
julia> load("track_order.h5"; dict=OrderedDict())
OrderedDict{Any, Any} with 4 entries:
"z" => 1
"a" => 2
"g/f" => 3
"g/b" => 4
```
## Passing parameters
It is often required to pass parameters to specific routines, which are collected
in so-called property lists in HDF5. There are different property lists for
different tasks, e.g. for the access/creation of files, datasets, groups.
In this high level framework multiple parameters can be simply applied by
appending them at the end of function calls as keyword arguments.
```julia
g["A"] = A # basic
g["A", chunk=(5,5)] = A # add chunks
B = h5read(fn,"mygroup/B", # two parameters
fapl_mpio=(ccomm,cinfo), # if parameter requires multiple args use tuples
dxpl_mpio=HDF5.H5FD_MPIO_COLLECTIVE )
```
This will automatically create the correct property lists, add the properties,
and apply the property list while reading/writing the data.
The naming of the properties generally follows that of HDF5, i.e. the key
`fapl_mpio` returns the HDF5 functions `h5pget/set_fapl_mpio` and their
corresponding property list type `H5P_FILE_ACCESS`.
The complete list if routines and their interfaces is available at the
[H5P: Property List Interface](https://portal.hdfgroup.org/display/HDF5/Property+Lists)
documentation. Note that not all properties are available. When searching
for a property check whether the corresponding `h5pget/set` functions are
available.
## Chunking and compression
You can also optionally "chunk" and/or compress your data. For example,
```julia
A = rand(100,100)
g["A", chunk=(5,5)] = A
```
stores the matrix `A` in 5-by-5 chunks. Chunking improves efficiency if you
write or extract small segments or slices of an array, if these are not stored
contiguously.
```julia
A = rand(100,100)
g1["A", chunk=(5,5), compress=3] = A
g2["A", chunk=(5,5), shuffle=(), deflate=3] = A
using H5Zblosc # load in Blosc
g3["A", chunk=(5,5), blosc=3] = A
```
Standard compression in HDF5 (`"compress"`) corresponds to (`"deflate"`) and
uses the [deflate/zlib](http://en.wikipedia.org/wiki/DEFLATE) algorithm. The
deflate algorithm is often more efficient if prefixed by a `"shuffle"` filter.
Blosc is generally much faster than deflate -- however, reading Blosc-compressed
HDF5 files require Blosc to be installed. This is the case for Julia, but often
not for vanilla HDF5 distributions that may be used outside Julia. (In this
case, the structure of the HDF5 file is still accessible, but compressed
datasets cannot be read.) Compression requires chunking, and heuristic chunking
is automatically used if you specify compression but don't specify chunking.
It is also possible to write to subsets of an on-disk HDF5 dataset. This is
useful to incrementally save to very large datasets you don't want to keep in
memory. For example,
```julia
dset = create_dataset(g, "B", datatype(Float64), dataspace(1000,100,10), chunk=(100,100,1))
dset[:,1,1] = rand(1000)
```
creates a Float64 dataset in the file or group `g`, with dimensions 1000x100x10, and then
writes to just the first 1000 element slice.
If you know the typical size of subset reasons you'll be reading/writing, it can be beneficial to set the chunk dimensions appropriately.
For fine-grained control of filter and compression pipelines, please use the [`filters`](@ref Filters) keyword to define a filter pipeline. For example, this can be used to include external filter packages. This enables the use of Blosc, Bzip2, LZ4, ZStandard, or custom filter plugins.
## Memory mapping
If you will frequently be accessing individual elements or small regions of array datasets, it can be substantially more efficient to bypass HDF5 routines and use direct [memory mapping](https://en.wikipedia.org/wiki/Memory-mapped_file).
This is possible only under particular conditions: when the dataset is an array of standard "bits" types (e.g., `Float64` or `Int32`) and no chunking/compression is being used.
You can use the `ismmappable` function to test whether this is possible; for example,
```julia
dset = g["x"]
if HDF5.ismmappable(dset)
dset = HDF5.readmmap(dset)
end
val = dset[15]
```
Note that `readmmap` returns an `Array` rather than an HDF5 object.
**Note**: if you use `readmmap` on a dataset and subsequently close the file, the array data are still available---and file continues to be in use---until all of the arrays are garbage-collected.
This is in contrast to standard HDF5 datasets, where closing the file prevents further access to any of the datasets, but the file is also detached and can safely be rewritten immediately.
Under the default
[allocation-time policy](https://portal.hdfgroup.org/display/HDF5/H5P_SET_ALLOC_TIME),
a newly added `ismmappable` dataset can only be memory mapped after it has been written
to.
The following fails:
```julia
vec_dset = create_dataset(g, "v", datatype(Float64), dataspace(10_000,1))
HDF5.ismmappable(vec_dset) # == true
vec = HDF5.readmmap(vec_dset) # throws ErrorException("Error mmapping array")
```
because although the dataset description has been added, the space within the HDF5 file
has not yet actually been allocated (so the file region cannot be memory mapped by the OS).
The storage can be allocated by making at least one write:
```julia
vec_dset[1,1] = 0.0 # force allocation of /g/v within the file
vec = HDF5.readmmap(vec_dset) # and now the memory mapping can succeed
```
Alternatively, the policy can be set so that the space is allocated immediately upon
creation of the data set with the `alloc_time` keyword:
```julia
mtx_dset = create_dataset(g, "M", datatype(Float64), dataspace(100, 1000),
alloc_time = HDF5.H5D_ALLOC_TIME_EARLY)
mtx = HDF5.readmmap(mtx_dset) # succeeds immediately
```
## In-memory HDF5 files
It is possible to use HDF5 files without writing or reading from disk. This is useful when receiving or sending data over the network. Typically, when sending data, one might want to
1) Create a new file in memory. This can be achieved by passing `Drivers.Core(; backing_store=false)` to `h5open(...)`
2) Add data to the `HDF5.File` object
3) Get a representation of the file as a byte vector. This can be achieved by calling `Vector{UInt8}(...)` on the file object.
This is illustrated on the example below
```julia
using HDF5
# Creates a new file object without storing to disk by setting `backing_store=false`
file_as_bytes = h5open("AnyName_InMemory", "w"; driver=Drivers.Core(; backing_store=false)) do fid
fid["MyData"] = randn(5, 5) # add some data
return Vector{UInt8}(fid) # get a byte vector to send, e.g., using HTTP, MQTT or similar.
end
```
The same way, when receiving data as a vector of bytes that represent a HDF5 file, one can use `h5open(...)` with the byte vector as first argument to get a file object.
Creating a file object from a byte vector will also by default open the file in memory, without saving a copy on disk.
```julia
using HDF5
...
h5open(file_as_bytes, "r"; name = "in_memory.h5") do fid
... # Do things with the data
end
...
```
## Supported data types
`HDF5.jl` knows how to store values of the following types: signed and unsigned integers of 8, 16, 32, and 64 bits, `Float32`, `Float64`; `Complex` versions of these numeric types; `Array`s of these numeric types (including complex versions); `String`; and `Array`s of `String`.
`Array`s of strings are supported using HDF5's variable-length-strings facility.
By default `Complex` numbers are stored as compound types with `r` and `i` fields following the `h5py` convention.
When reading data, compound types with matching field names will be loaded as the corresponding `Complex` Julia type.
These field names are configurable with the `HDF5.set_complex_field_names(real::AbstractString, imag::AbstractString)` function and complex support can be completely enabled/disabled with `HDF5.enable/disable_complex_support()`.
As of HDF5.jl version 0.16.13, support was added to map Julia structs to compound HDF5 datatypes.
```jldoctest
julia> struct Point3{T}
x::T
y::T
z::T
end
julia> datatype(Point3{Float64})
HDF5.Datatype: H5T_COMPOUND {
H5T_IEEE_F64LE "x" : 0;
H5T_IEEE_F64LE "y" : 8;
H5T_IEEE_F64LE "z" : 16;
}
```
For `Array`s, note that the array dimensionality is preserved, including 0-length
dimensions:
```julia
fid["zero_vector"] = zeros(0)
fid["zero_matrix"] = zeros(0, 0)
size(fid["zero_vector"]) # == (0,)
size(fid["zero_matrix"]) # == (0, 0)
```
An _exception_ to this rule is Julia's 0-dimensional `Array`, which is stored as an HDF5
scalar because there is a value to be preserved:
```julia
fid["zero_dim_value"] = fill(1.0π)
read(fid["zero_dim_value"]) # == 3.141592653589793, != [3.141592653589793]
```
HDF5 also has the concept of a null array which contains a type but has neither size nor
contents, which is represented by the type `HDF5.EmptyArray`:
```julia
fid["empty_array"] = HDF5.EmptyArray{Float32}()
HDF5.isnull(fid["empty_array"]) # == true
size(fid["empty_array"]) # == ()
eltype(fid["empty_array"]) # == Float32
```
This module also supports HDF5's VLEN, OPAQUE, and REFERENCE types, which can be used to encode more complex types. In general, you need to specify how you want to combine these more advanced facilities to represent more complex data types. For many of the data types in Julia, the JLD module implements support. You can likewise define your own file format if, for example, you need to interact with some external program that has explicit formatting requirements.
## Creating groups and attributes
Create a new group in the following way:
```julia
g = create_group(parent, name)
```
The named group will be created as a child of the parent.
Attributes can be created using
```julia
attributes(parent)[name] = value
```
where `attributes` simply indicates that the object referenced by `name` (a string) is an attribute, not another group or dataset. (Datasets cannot have child datasets, but groups can have either.) `value` must be a simple type: `BitsKind`s, strings, and arrays of either of these. The HDF5 standard recommends against storing large objects as attributes.
The value stored in an attribute can be retrieved like
```julia
read_attribute(parent, name)
```
You can also access the value of an attribute by indexing, like so:
```julia
julia> attr = attributes(parent)[name];
julia> attr[]
```
## Getting information
```julia
HDF5.name(obj)
```
will return the full HDF5 pathname of object `obj`.
```julia
keys(g)
```
returns a string array containing all objects inside group `g`. These relative pathnames, not absolute pathnames.
You can iterate over the objects in a group, i.e.,
```julia
for obj in g
data = read(obj)
println(data)
end
```
This gives you a straightforward way of recursively exploring an entire HDF5 file.
If you need to know whether group `g` has a dataset named `mydata`, you can test that with
```julia
if haskey(g, "mydata")
...
end
tf = haskey(g, "mydata")
```
If instead you want to know whether `g` has an attribute named `myattribute`, do it this way:
```julia
tf = haskey(attributes(g), "myattribute")
```
If you have an HDF5 object, and you want to know where it fits in the hierarchy of the file, the following can be useful:
```julia
p = parent(obj) # p is the parent object (usually a group)
fn = HDF5.filename(obj) # fn is a string
g = HDF5.root(obj) # g is the group "/"
```
For array objects (datasets and attributes) the following methods work:
```
dims = size(dset)
nd = ndims(dset)
len = length(dset)
```
Objects can be created with properties, and you can query those
properties in the following way:
```
p = HDF5.get_create_properties(dset)
chunksz = HDF5.get_chunk(p)
```
The simpler syntax `chunksz = HDF5.get_chunk(dset)` is also available.
Finally, sometimes you need to be able to conveniently test whether a file is an HDF5 file:
```julia
tf = HDF5.ishdf5(filename)
```
## Mid-level routines
Sometimes you might want more fine-grained control, which can be achieved using a different set of routines. For example,
```julia
g = open_group(parent, name)
dset = open_dataset(parent, name[, apl])
attr = open_attribute(parent, name)
t = open_datatype(parent, name)
```
These open the named group, dataset, attribute, and committed datatype, respectively. For datasets, `apl` stands for "access parameter list" and provides opportunities for more sophisticated control (see the [HDF5](https://www.hdfgroup.org/solutions/hdf5/) documentation).
New objects can be created in the following ways:
```julia
g = create_group(parent, name[, lcpl, gcpl]; properties...)
dset = create_dataset(parent, name, data; properties...)
attr = create_attribute(parent, name, data)
```
creates groups, datasets, and attributes without writing any data to them. You can then use `write(obj, data)` to store the data. The optional properties and property lists allow even more fine-grained control. This syntax uses `data` to infer the object's "HDF5.datatype" and "HDF5.dataspace"; for the most explicit control, `data` can be replaced with `dtype, dspace`, where `dtype` is an `HDF5.Datatype` and `dspace` is an `HDF5.Dataspace`.
Analogously, to create committed data types, use
```julia
t = commit_datatype(parent, name, dtype[, lcpl, tcpl, tapl])
```
You can create and write data in one step,
```julia
write_dataset(parent, name, data; properties...)
write_attribute(parent, name, data)
```
You can use extendible dimensions,
```julia
d = create_dataset(parent, name, dtype, (dims, max_dims), chunk=(chunk_dims))
HDF5.set_extent_dims(d, new_dims)
```
where dims is a tuple of integers. For example
```julia
b = create_dataset(fid, "b", Int, ((1000,),(-1,)), chunk=(100,)) #-1 is equivalent to typemax(hsize_t)
HDF5.set_extent_dims(b, (10000,))
b[1:10000] = collect(1:10000)
```
when dimensions are reduced, the truncated data is lost. A maximum dimension of -1 is often referred to as unlimited dimensions, though it is limited by the maximum size of an unsigned integer.
You can copy data from one file to another:
```julia
copy_object(source, data_name, target, name)
copy_object(source[data_name], target, name)
```
Finally, it's possible to delete objects:
```julia
delete_object(parent, name) # for groups, datasets, and datatypes
delete_attribute(parent, name) # for attributes
```
## Low-level routines
Many of the most commonly-used libhdf5 functions have been wrapped in a submodule `API`. The library follows a consistent convention: for example, libhdf5's `H5Adelete` is wrapped with a Julia function called `h5a_delete`. The arguments are exactly as specified in the [HDF5](https://www.hdfgroup.org/solutions/hdf5/) reference manual. Note that the functions in the `API` submodule are not exported, so unless you import them specifically, you need to preface them with `HDF5.API` to use them: for example, `HDF5.API.h5a_delete`.
HDF5 is a large library, and the low-level wrap is not complete. However, many of the most-commonly used functions are wrapped, and in general wrapping a new function takes only a single line of code. Users who need additional functionality are encouraged to contribute it.
Note that Julia's HDF5 directly uses the "2" interfaces, e.g., `H5Dcreate2`, so you need to have version 1.8 of the HDF5 library or later.
## Language interoperability with row- and column-major order arrays
There are two main methods for storing multidimensional arrays in linear storage [row-major order and column-major order](https://en.wikipedia.org/wiki/Row-_and_column-major_order). Julia, like Fortran and MATLAB, stores multidimensional arrays in column-major order, while other languages, including C and Python (NumPy), use row-major order. Therefore when reading an array in Julia from row-major order language the dimensions may be inverted.
To read a multidimensional array into the original shape from an HDF5 file written by Python (`numpy` and `h5py`) or C/C++/Objective-C, simply reverse the dimensions. For example, one may add the following line after reading the dataset `dset`:
```julia
dset = permutedims(dset, reverse(1:ndims(dset)))
```
Note that some languages or libraries use both methods, so please check the datset's description for details. For example, NumPy arrays are row-major by default, but NumPy can use either row-major or column-major ordered arrays.
## Credits
- Konrad Hinsen initiated Julia's support for HDF5
- Tim Holy and Simon Kornblith (primary authors)
- Tom Short contributed code and ideas to the dictionary-like
interface
- Blake Johnson made several improvements, such as support for
iterating over attributes
- Isaiah Norton and Elliot Saba improved installation on Windows and OSX
- Steve Johnson contributed the `do` syntax and Blosc compression
- Mike Nolta and Jameson Nash contributed code or suggestions for
improving the handling of HDF5's constants
- Thanks also to the users who have reported bugs and tested fixes
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 5109 | # Parallel HDF5
It is possible to read and write [parallel
HDF5](https://portal.hdfgroup.org/display/HDF5/Parallel+HDF5) files using MPI.
For this, the HDF5 binaries loaded by HDF5.jl must have been compiled with
parallel support, and linked to the specific MPI implementation that will be used for parallel I/O.
Parallel-enabled HDF5 libraries are usually included in computing clusters and
linked to the available MPI implementations.
They are also available via the package manager of a number of Linux
distributions.
Finally, note that the MPI.jl package is lazy-loaded by HDF5.jl
using [Requires](https://github.com/JuliaPackaging/Requires.jl).
In practice, this means that in Julia code, `MPI` must be imported _before_
`HDF5` for parallel functionality to be available.
## Setting-up Parallel HDF5
The following step-by-step guide assumes one already has access to
parallel-enabled HDF5 libraries linked to an existent MPI installation.
### [1. Using system-provided MPI libraries](@id using_system_MPI)
Using a system-provided MPI library can be done with MPIPreferences.jl.
After installing MPIPreferences.jl and running
`julia --project -e 'using MPIPreferences; MPIPreferences.use_system_binary()'`
MPIPreferences.jl identifies any available MPI implementation and stores the information
in a file LocalPreferences.toml.
See the [MPI.jl
docs](https://juliaparallel.org/MPI.jl/stable/configuration/#Using-a-system-provided-MPI-backend)
for details.
### [2. Using parallel HDF5 libraries](@id using_parallel_HDF5)
!!! note "Migration from HDF5.jl v0.16 and earlier"
How to use a system-provided HDF5 library has been changed in HDF5.jl v0.17. Previously,
the library path was set by the environment variable `JULIA_HDF5_PATH`, which required to
rebuild HDF5.jl afterwards. The environment variable has been removed and no longer has an
effect (for backward compatibility it is still recommended to **also** set the environment
variable). Instead, proceed as described below.
As detailed in [Using custom or system provided HDF5 binaries](@ref), set the
preferences `libhdf5` and `libhdf5_hl` to the full path, where the parallel HDF5 binaries are located.
This can be done by:
```julia
julia> using HDF5
julia> HDF5.API.set_libraries!("/path/to/your/libhdf5.so", "/path/to/your/libhdf5_hl.so")
```
### 3. Loading MPI-enabled HDF5
In Julia code, MPI.jl must be loaded _before_ HDF5.jl for MPI functionality to
be available:
```julia
using MPI
using HDF5
@assert HDF5.has_parallel()
```
### Notes to HPC cluster administrators
More information for a setup at an HPC cluster can be found in the [docs of MPI.jl](https://juliaparallel.org/MPI.jl/stable/configuration/#Notes-to-HPC-cluster-administrators).
After performing the steps [1.](@ref using_system_MPI) and [2.](@ref using_parallel_HDF5) the
LocalPreferences.toml file could look something like the following:
```toml
[MPIPreferences]
_format = "1.0"
abi = "OpenMPI"
binary = "system"
libmpi = "/software/mpi/lib/libmpi.so"
mpiexec = "/software/mpi/bin/mpiexec"
[HDF5]
libhdf5 = "/path/to/your/libhdf5.so"
libhdf5_hl = "/path/to/your/libhdf5_hl.so"
```
### Reading and writing data in parallel
A parallel HDF5 file may be opened by passing a `MPI.Comm` (and optionally a
`MPI.Info`) object to [`h5open`](@ref).
For instance:
```julia
comm = MPI.COMM_WORLD
info = MPI.Info()
ff = h5open(filename, "w", comm, info)
```
MPI-distributed data is typically written by first creating a dataset
describing the global dimensions of the data.
The following example writes a `10 × Nproc` array distributed over `Nproc` MPI
processes.
```julia
Nproc = MPI.Comm_size(comm)
myrank = MPI.Comm_rank(comm)
M = 10
A = fill(myrank, M) # local data
dims = (M, Nproc) # dimensions of global data
# Create dataset
dset = create_dataset(ff, "/data", datatype(eltype(A)), dataspace(dims))
# Write local data
dset[:, myrank + 1] = A
```
Note that metadata operations, such as `create_dataset`, must be called _collectively_ (on all processes at the same time, with the same arguments), but the actual writing to the dataset may be done independently. See [Collective Calling Requirements in Parallel HDF5 Applications](https://portal.hdfgroup.org/display/HDF5/Collective+Calling+Requirements+in+Parallel+HDF5+Applications) for the exact requirements.
Sometimes, it may be more efficient to write data in chunks, so that each
process writes to a separate chunk of the file.
This is especially the case when data is uniformly distributed among MPI
processes.
In this example, this can be achieved by passing `chunk=(M, 1)` to `create_dataset`.
For better performance, it is sometimes preferable to perform [collective
I/O](https://portal.hdfgroup.org/display/HDF5/Introduction+to+Parallel+HDF5)
when reading and writing datasets in parallel.
This is achieved by passing `dxpl_mpio=:collective` to `create_dataset`.
See also the [HDF5 docs](https://portal.hdfgroup.org/display/HDF5/H5P_SET_DXPL_MPIO).
A few more examples are available in [`test/mpio.jl`](https://github.com/JuliaIO/HDF5.jl/blob/master/test/mpio.jl).
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 1085 | # Additional Resources
## JuliaCon 2023 Presentation
During JuliaCon 2023, Mark Kittisopikul and Simon Byrne presented "HDF5.jl: Hierarchial Data Storage for Julia" with assistance from Mustafa Mohamad. A recording of the talk is available on YouTube below.
```@raw html
<iframe width="560" height="315" src="https://www.youtube.com/embed/YNDDB8uR26Q?si=V5ArUwOkG6s7VesL" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
```
The slides for the JuliaCon 2023 presentation are also available [download](https://github.com/JuliaIO/HDF5.jl/blob/master/docs/juliacon_2023/juliacon_2023_presentation.pdf).
## Community Help Channels
* The Julia Discourse forum has a [hdf5 tag](https://discourse.julialang.org/tag/hdf5)
* The [Julia Community](https://julialang.org/community/) also has a number of chat options. See the [#data](https://julialang.slack.com/archives/C674VR0HH) or [#helpdesk](https://julialang.slack.com/archives/C6A044SQH) chat channels.
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 322 | # Attributes
```@meta
CurrentModule = HDF5
```
## Dictionary interface
```@docs
attrs
attributes
```
## Mid-level Interface
```@docs
Attribute
open_attribute
create_attribute
read_attribute
write_attribute
delete_attribute
rename_attribute
```
## Convenience interface
```@docs
h5readattr
h5writeattr
num_attrs
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 295 | # Configuration
```@meta
CurrentModule = HDF5
```
```@docs
has_parallel
has_ros3
```
## Display
```@docs
SHOW_TREE_ICONS
SHOW_TREE_MAX_CHILDREN
SHOW_TREE_MAX_DEPTH
```
## Internals
```@docs
get_context_property
CONTEXT
HDF5Context
```
## System Libraries
```@docs
API.set_libraries!
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 755 | # Dataset
```@meta
CurrentModule = HDF5
```
Many dataset operations are available through the indexing interface, which is aliased to the functional interface. Below describes the functional interface.
```@docs
Dataset
create_dataset
Base.copyto!
Base.similar
create_external_dataset
get_datasets
open_dataset
write_dataset
read_dataset
```
## Chunks
```@docs
do_read_chunk
do_write_chunk
get_chunk_index
get_chunk_info_all
get_chunk_length
get_chunk_offset
get_num_chunks
get_num_chunks_per_dim
read_chunk
write_chunk
```
### Private Implementation
These functions select private implementations of the public high-level API.
They should be used for diagnostic purposes only.
```@docs
_get_chunk_info_all_by_index
_get_chunk_info_all_by_iter
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 198 | # Dataspaces
```@meta
CurrentModule = HDF5
```
```@docs
Dataspace
dataspace
isnull
get_extent_dims
set_extent_dims
```
# Hyperslab
```@docs
BlockRange
select_hyperslab!
get_regular_hyperslab
``` | HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 84 | # Datatypes
```@meta
CurrentModule = HDF5
```
```@docs
Datatype
open_datatype
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 110 | # Files
```@meta
CurrentModule = HDF5
```
```@docs
h5open
ishdf5
Base.isopen
Base.read
start_swmr_write
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 5621 | # Filters
HDF5 supports filters for compression and validation: these are applied sequentially to
each chunk of a dataset when writing data, and in reverse order when reading data.
```@meta
CurrentModule = HDF5
```
These can be set by passing a filter or vector of filters as a `filters` property to
[`DatasetCreateProperties`](@ref) or via the `filters` keyword argument of [`create_dataset`](@ref).
```@meta
CurrentModule = HDF5.Filters
```
## Example
```@docs
Filters
```
## Built-in Filters
```@docs
Deflate
Shuffle
Fletcher32
Szip
NBit
ScaleOffset
ExternalFilter
```
## External Filter Packages
Several external Julia packages implement HDF5 filter plugins in Julia.
As they are independent of HDF5.jl, they must be installed in order to use their plugins.
The
[H5Zblosc.jl](https://github.com/JuliaIO/HDF5.jl/tree/master/filters/H5Zblosc),
[H5Zbzip2.jl](https://github.com/JuliaIO/HDF5.jl/tree/master/filters/H5Zbzip2),
[H5Zlz4.jl](https://github.com/JuliaIO/HDF5.jl/tree/master/filters/H5Zlz4), and
[H5Zzstd.jl](https://github.com/JuliaIO/HDF5.jl/tree/master/filters/H5Zzstd) packages are maintained as
independent subdirectory packages within the HDF5.jl repository.
### H5Zblosc.jl
```@meta
CurrentModule = H5Zblosc
```
```@docs
BloscFilter
```
### H5Zbzip2.jl
```@meta
CurrentModule = H5Zbzip2
```
```@docs
Bzip2Filter
```
### H5Zlz4.jl
```@meta
CurrentModule = H5Zlz4
```
```@docs
Lz4Filter
```
### H5Zzstd.jl
```@meta
CurrentModule = H5Zzstd
```
```@docs
ZstdFilter
```
## Other External Filters
Additional filters can be dynamically loaded by the HDF5 library. See [External Links](@ref) below for more information.
### Using an ExternalFilter
```@meta
CurrentModule = HDF5.Filters
```
[`ExternalFilter`](@ref) can be used to insert a dynamically loaded filter into the [`FilterPipeline`](@ref) in an ad-hoc fashion.
#### Example for `bitshuffle`
If we do not have a defined subtype of [`Filter`](@ref) for the [bitshuffle filter](https://github.com/kiyo-masui/bitshuffle/blob/master/src/bshuf_h5filter.h)
we can create an `ExternalFilter`. From the header file or list of registered plugins, we see that the bitshuffle filter has an id of `32008`.
Furthermore, the header describes two options:
1. `block_size` (optional). Default is `0`.
2. `compression` - This can be `0` or `BSHUF_H5_COMPRESS_LZ4` (`2` as defined in the C header)
```julia
using HDF5.Filters
bitshuf = ExternalFilter(32008, Cuint[0, 0])
bitshuf_comp = ExternalFilter(32008, Cuint[0, 2])
data_A = rand(0:31, 1024)
data_B = rand(32:63, 1024)
filename, _ = mktemp()
h5open(filename, "w") do h5f
# Indexing style
h5f["ex_data_A", chunk=(32,), filters=bitshuf] = data_A
# Procedural style
d, dt = create_dataset(h5f, "ex_data_B", data_B, chunk=(32,), filters=[bitshuf_comp])
write(d, data_B)
end
```
### Registered Filter Helpers
The HDF Group maintains a list of registered filters which have been assigned a filter ID number.
The module [`Filters.Registered`](@ref) contains information about registered filters including functions
to create an `ExternalFilter` for each registered filter.
### Creating a new Filter type
Examining the [bitshuffle filter source code](https://github.com/kiyo-masui/bitshuffle/blob/0aee87e142c71407aa097c660727f2621c71c493/src/bshuf_h5filter.c#L47-L64) we see that three additional data components get prepended to the options. These are
1. The major version
2. The minor version
3. The element size in bytes of the type via `H5Tget_size`.
```julia
import HDF5.Filters: FILTERS, Filter, FilterPipeline, filterid
using HDF5.API
const H5Z_BSHUF_ID = API.H5Z_filter_t(32008)
struct BitShuffleFilter <: HDF5.Filters.Filter
major::Cuint
minor::Cuint
elem_size::Cuint
block_size::Cuint
compression::Cuint
BitShuffleFilter(block_size, compression) = new(0, 0, 0, block_size, compression)
end
# filterid is the only required method of the filter interface
# since we are using an externally registered filter
filterid(::Type{BitShuffleFilter}) = H5Z_BSHUF_ID
FILTERS[H5Z_BSHUF_ID] = BitShuffleFilter
function Base.push!(p::FilterPipeline, f::BitShuffleFilter)
ref = Ref(f)
GC.@preserve ref begin
API.h5p_set_filter(p.plist, H5Z_BSHUF_ID, API.H5Z_FLAG_OPTIONAL, 2, pointer_from_objref(ref) + sizeof(Cuint)*3)
end
return p
end
```
Because the first three elements are not provided directly via `h5p_set_filter`, we also needed to implement a custom `Base.push!` into the `FilterPipeline`.
## Filter Interface
```@meta
CurrentModule = HDF5.Filters
```
The filter interface is used to describe filters and obtain information on them.
```@docs
Filter
FilterPipeline
UnknownFilter
FILTERS
EXTERNAL_FILTER_JULIA_PACKAGES
filterid
isavailable
isdecoderenabled
isencoderenabled
decoder_present
encoder_present
ensure_filters_available
filtername
can_apply_func
can_apply_cfunc
set_local_func
set_local_cfunc
filter_func
filter_cfunc
register_filter
```
## Registered Filters
```@autodocs
Modules = [Registered]
```
## External Links
* A [list of registered filter plugins](https://portal.hdfgroup.org/display/support/Registered+Filter+Plugins) can be found on the HDF Group website.
* [See the HDF5 Documentation of HDF5 Filter Plugins for details.](https://portal.hdfgroup.org/display/support/HDF5+Filter+Plugins)
* The source code for many external plugins have been collected in the [HDFGroup hdf5_plugins repository](https://github.com/HDFGroup/hdf5_plugins).
* [Compiled binaries of dynamically downloaded plugins](https://portal.hdfgroup.org/display/support/Downloads) by downloaded from HDF5 Group.
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 104 | # Groups
```@meta
CurrentModule = HDF5
```
```@docs
Group
create_group
open_group
create_external
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 85 | # Objects
```@meta
CurrentModule = HDF5
```
```@docs
copy_object
delete_object
```
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 1545 | # Properties
```@meta
CurrentModule = HDF5
```
HDF5 property lists are collections of name-value pairs which can be passed to other HDF5
functions to control features that are typically unimportant or whose default values are
usually used. In HDF5.jl, these options are typically handled by keyword arguments to such
functions, which will internally create the appropriate `Properties` objects, and
so users will not usually be required to construct them manually.
Not all properties defined by the HDF5 library are currently available in HDF5.jl. If you
require additional properties, please open an issue or pull request.
## Common functions
```@docs
setproperties!
```
## `Properties` types
```@docs
AttributeCreateProperties
FileAccessProperties
FileCreateProperties
GroupAccessProperties
GroupCreateProperties
DatasetCreateProperties
DatasetAccessProperties
DatatypeAccessProperties
DatasetTransferProperties
LinkCreateProperties
ObjectCreateProperties
StringCreateProperties
DatatypeCreateProperties
```
## Virtual Datasets
```@docs
VirtualMapping
VirtualLayout
```
## Drivers
```@meta
CurrentModule = HDF5
```
File drivers determine how the HDF5 is accessed. These can be set as the `driver` property in [`FileAccessProperties`](@ref).
```@meta
CurrentModule = HDF5.Drivers
```
```@docs
Core
POSIX
ROS3
MPIO
```
## Internals
```@meta
CurrentModule = HDF5
```
The following macros are used for defining new properties and property getters/setters.
```@docs
@propertyclass
@bool_property
@enum_property
@tuple_property
``` | HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 275 | # H5Zbitshuffle.jl
Implements the bitshuffle filter for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) in Julia,
with optional integrated lz4 and zstd (de)compression.
This implements [HDF5 filter ID 32008](https://portal.hdfgroup.org/display/support/Filters#Filters-32008)
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 296 | # H5Zblosc.jl
Implements the Blosc filter for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) in Julia.
See the [documentation](https://juliaio.github.io/HDF5.jl/stable/filters/#H5Zblosc.jl)
This implements [HDF5 filter ID 32001](https://portal.hdfgroup.org/display/support/Filters#Filters-32001) | HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 304 | # H5Zbzip2.jl
Implements the Bzip2 filter for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) in Julia.
See the [documentation](https://juliaio.github.io/HDF5.jl/stable/filters/#H5Zbzip2.jl)
This implements [HDF5 registered filter id 307](https://portal.hdfgroup.org/display/support/Filters#Filters-307). | HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 303 | # H5Zlz4.jl
Implements the LZ4 filter for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) in Julia.
See the [documentation](https://juliaio.github.io/HDF5.jl/stable/filters/#H5Zlz4.jl)
This implements [HDF5 registered filter id 32004](https://portal.hdfgroup.org/display/support/Filters#Filters-32004).
| HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.17.2 | e856eef26cf5bf2b0f95f8f4fc37553c72c8641c | docs | 300 | # H5Zzstd.jl
Implements the Zstd filter for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) in Julia.
See the [documentation](https://juliaio.github.io/HDF5.jl/stable/filters/#H5Zzstd.jl)
This implements [HDF5 ZStandard Filter 32015](https://portal.hdfgroup.org/display/support/Filters#Filters-32015) | HDF5 | https://github.com/JuliaIO/HDF5.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 1056 | using Documenter
using BinBencherBackend
meta = quote
using BinBencherBackend
(path_to_ref_file, path_to_bins_file, ref, binning, genome, bin) = let
dir = joinpath(Base.pkgdir(BinBencherBackend), "files")
path_to_ref_file = joinpath(dir, "ref.json")
path_to_bins_file = joinpath(dir, "clusters.tsv")
ref = open(i -> Reference(i), path_to_ref_file)
genome = first(sort!(collect(genomes(ref)); by=i -> i.name))
bins = open(i -> Binning(i, ref), path_to_bins_file)
bin = first(bins.bins)
(path_to_ref_file, path_to_bins_file, ref, bins, genome, bin)
end
end
DocMeta.setdocmeta!(BinBencherBackend, :DocTestSetup, meta; recursive=true)
makedocs(;
sitename="BinBencherBackend.jl",
modules=[BinBencherBackend],
pages=[
"Home" => "index.md",
"Walkthrough" => "walkthrough.md",
"Reference" => "reference.md",
],
checkdocs=:all,
# doctest = :fix,
)
deploydocs(; repo="github.com/jakobnissen/BinBencherBackend.jl.git", push_preview=true)
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 1708 | module BinBencherBackend
using AbstractTrees: AbstractTrees
using CodecZlib: GzipDecompressorStream
using JSON3: JSON3
using StructTypes: StructTypes
using LazilyInitializedFields: @lazy, @isinit, @init!, @uninit!, uninit
using PrecompileTools: @setup_workload, @compile_workload
include("utils.jl")
include("flags.jl")
include("sequence.jl")
include("source.jl")
include("clade.jl")
include("genome.jl")
include("reference.jl")
include("bin.jl")
include("binning.jl")
vector(x)::Vector = x isa Vector ? x : vec(collect(x))::Vector
imap(f) = x -> Iterators.map(f, x)
ifilter(f) = x -> Iterators.filter(f, x)
@setup_workload begin
dir = joinpath(dirname(dirname(pathof(BinBencherBackend))), "files")
refpath = joinpath(dir, "ref.json")
binpath = joinpath(dir, "clusters.tsv")
@compile_workload begin
ref = open(refpath) do io
Reference(io)
end
subset!(ref; sequences=Returns(true), genomes=Returns(true))
gold_standard(ref)
bins = open(binpath) do io
Binning(io, ref)
end
print_matrix(IOBuffer(), bins)
n_recovered(bins, 0.4, 0.2)
n_recovered(bins, 0.4, 0.2; assembly=false)
n_recovered(bins, 0.4, 0.2; level=1)
end
end
export Sequence,
flags,
Flag,
Flags,
FlagSet,
Source,
Clade,
Genome,
Bin,
Reference,
Binning,
n_bins,
n_seqs,
intersecting,
recall_precision,
genomes,
is_organism,
is_virus,
is_plasmid,
top_clade,
gold_standard,
n_recovered,
n_passing_bins,
subset,
subset!,
f1,
passes_f1,
passes_recall_precision,
fscore,
mrca,
print_matrix
end # module
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 13189 | """
Bin(name::AbstractString, ref::Reference, sequences)
`Bin`s each represent a bin created by the binner. Conceptually, they are simply
a set of `Sequence` with a name attached.
Practically, every `Bin` is benchmarked against all `Genome`s and `Clade`s of a given `Reference`,
so each `Bin` stores data about its intersection with every genome/clade, e.g. its purity and recall.
Like `Source`s, `Bin`s also have an _assembly size_ for a given `Genome`. This is the number
of base pairs in the genomes covered by any sequence in the `Bin`, which is always a subset
of the genome's assembly size.
Benchmark statistics for a `Bin`/`Genome` can be done with either _assemblies_
or _genomes_ as the ground truth.
* True positives (TP) are defined as the sum of assembly sizes over all sources in the genome
* False positives (FP) are the sum of length of sequences in the bin not mapping to the genome
* False negatives (FN) is either the genome assembly size or genome size minus TP.
For `Bin`/`Clade` pairs B/C, recall is the maximal recall of B/Ch for all children Ch of C.
Precision is the sum of lengths of sequences mapping to any child of the clade divided
by the sum of lengths of all sequences in the bin.
See also: [`Binning`](@ref), [`Genome`](@ref), [`Clade`](@ref)
# Examples
```jldoctest
julia> bin = first(binning.bins)
Bin "C1"
Sequences: 2
Breadth: 65
Intersecting 1 genome
julia> first(bin.sequences)
Sequence("s1", 25)
julia> f1(first(ref.genomes), bin)
0.5714285714285715
```
"""
struct Bin
name::String
sequences::Vector{Sequence}
# Asmsize: Bases covered by sequences in this bin.
# Foreign: Sum of sequences mapping to other sequences, but not this genome
# Total basepairs: Number of basepairs that map to the genome
genomes::Dict{Genome, @NamedTuple{asmsize::Int, total_bp::Int, foreign::Int}}
# Recall is max recall of all children
# Precision is (sum of length of mapping seqs) / breadth
clades::Dict{
Clade{Genome},
@NamedTuple{asm_recall::Float64, genome_recall::Float64, precision::Float64}
}
# Sum of lengths of sequences mapping to anything, cached for efficiency
breadth::Int
end
# Note: This constructor is user-facing. We use the more efficient
# `bin_by_indices` when constructing Bins in practise
function Bin(
name::AbstractString,
ref::Reference,
sequences, # iterator of Sequence
considered_genomes::Union{Nothing, Set{Genome}}=nothing,
)
indices = [ref.target_index_by_name[s.name] for s in sequences]
scratch = Vector{Tuple{Int, Int}}()
bin_by_indices(name, indices, ref.targets, scratch, considered_genomes)
end
function bin_by_indices(
name::AbstractString,
seq_indices::Vector{<:Integer},
targets::Vector{Tuple{Sequence, Vector{Target}}},
scratch::Vector{Tuple{Int, Int}},
considered_genomes::Union{Nothing, Set{Genome}},
)
seqs = [first(targets[i]) for i in seq_indices]
# Which sequences map to the given genome, ints in bitset is indices into `seqs`.
genome_mapping = Dict{Genome, BitSet}()
source_mapping = Dict{Source{Genome}, Vector{Tuple{Int, Int}}}()
mapping_breadth = 0
for (i, (seq, idx)) in enumerate(zip(seqs, seq_indices))
seq_targets = last(targets[idx])
isempty(seq_targets) || (mapping_breadth += length(seq))
for (source, span) in seq_targets
genome = source.genome
!isnothing(considered_genomes) && !in(genome, considered_genomes) && continue
push!(get!(valtype(genome_mapping), genome_mapping, genome), i)
push!(get!(valtype(source_mapping), source_mapping, source), span)
end
end
genomes = Dict{Genome, @NamedTuple{asmsize::Int, total_bp::Int, foreign::Int}}()
# Set `foreign`, which we can compute simply by knowing which sequences map to the genomes
for (genome, set) in genome_mapping
genomes[genome] = (;
asmsize=0,
total_bp=0,
foreign=mapping_breadth - sum(i -> seqs[i].length, set; init=0),
)
end
# Incrementally update `asmsize`; we need to compute this on a per-source level
for (source, spans) in source_mapping
(; asmsize, total_bp, foreign) = genomes[source.genome]
(new_asmsize, new_total_bp) =
assembly_size!(identity, scratch, spans, source.length)
genomes[source.genome] = (;
asmsize=asmsize + new_asmsize,
total_bp=total_bp + new_total_bp,
foreign=foreign,
)
end
# We store sets of mapping sequences - the set mapping to a clade is the union of those
# mapping to its children.
clade_mapping = Dict{
Clade{Genome},
@NamedTuple{asm_recall::Float64, genome_recall::Float64, mapping_seqs::BitSet}
}()
for (genome, (; asmsize)) in genomes
asm_recall = asmsize / genome.assembly_size
genome_recall = asmsize / genome.genome_size
(old_asm_recall, old_genome_recall, mapping) = get!(
() -> (; asm_recall=0.0, genome_recall=0.0, mapping_seqs=BitSet()),
clade_mapping,
genome.parent,
)
clade_mapping[genome.parent] = (;
asm_recall=max(old_asm_recall, asm_recall),
genome_recall=max(old_genome_recall, genome_recall),
mapping_seqs=union!(mapping, genome_mapping[genome]),
)
end
# Now, iteratively compute clades at a higher and higher level.
# Begin with parents of genome (this generation), then iteratively look at parents and update them
this_generation = Set((clade for clade in keys(clade_mapping)))
next_generation = empty(this_generation)
while true
while !isempty(this_generation)
clade = pop!(this_generation)
parent = clade.parent
# If top level clade: Do not continue to next generation
parent === nothing && continue
(parent_asm_recall, parent_genome_recall, parent_mapping) = get!(
() -> (; asm_recall=0.0, genome_recall=0.0, mapping_seqs=BitSet()),
clade_mapping,
parent,
)
(child_asm_recall, child_genome_recall, child_mapping) = clade_mapping[clade]
clade_mapping[parent] = (
asm_recall=max(parent_asm_recall, child_asm_recall),
genome_recall=max(parent_genome_recall, child_genome_recall),
mapping_seqs=union!(parent_mapping, child_mapping),
)
push!(next_generation, parent)
end
isempty(next_generation) && break
# Reuse the sets for next generation by swapping them and emptying the now-used up
(this_generation, next_generation) = (next_generation, this_generation)
@assert isempty(next_generation)
end
# Now, having computed the sets of mapping contigs, we can compute the actual precision values
clades = Dict{
Clade{Genome},
@NamedTuple{asm_recall::Float64, genome_recall::Float64, precision::Float64}
}()
for (clade, (asm_recall, genome_recall, set)) in clade_mapping
precision = sum(i -> seqs[i].length, set; init=0) / mapping_breadth
clades[clade] =
(; asm_recall=asm_recall, genome_recall=genome_recall, precision=precision)
end
breadth = sum(seqs |> imap(length); init=0)
Bin(String(name), seqs, genomes, clades, breadth)
end
n_seqs(x::Bin) = length(x.sequences)
"""
intersecting([Genome, Clade]=Genome, x::Bin)
Get an iterator of the `Genome`s or `Clade`s that bin `x` intersects with.
`intersecting(::Bin)` defaults to genomes.
# Example
```jldoctest
julia> collect(intersecting(bin))
1-element Vector{Genome}:
Genome(gA)
julia> sort!(collect(intersecting(Clade, bin)); by=i -> i.name)
2-element Vector{Clade{Genome}}:
Species "D", 2 genomes
Genus "F", 3 genomes
```
"""
intersecting(x::Bin) = intersecting(Genome, x)
intersecting(::Type{Genome}, x::Bin) = keys(x.genomes)
intersecting(::Type{<:Clade}, x::Bin) = keys(x.clades)
Base.show(io::IO, x::Bin) = print(io, "Bin(", x.name, ')')
function Base.show(io::IO, ::MIME"text/plain", x::Bin)
if get(io, :compact, false)
show(io, x)
else
ngenomes = length(x.genomes)
suffix = ngenomes > 1 ? "s" : ""
print(
io,
"Bin \"",
x.name,
"\"\n Sequences: ",
n_seqs(x),
"\n Breadth: ",
x.breadth,
"\n Intersecting ",
length(x.genomes),
" genome",
suffix,
)
end
end
function confusion_matrix(genome::Genome, bin::Bin; assembly::Bool=false)
(; asmsize, foreign) =
get(bin.genomes, genome, (asmsize=0, total_bp=0, foreign=bin.breadth))
fn = (assembly ? genome.assembly_size : genome.genome_size) - asmsize
(asmsize, foreign, fn)
end
"""
recall_precision(x::Union{Genome, Clade}, bin::Bin; assembly::Bool=true)
Get the recall, precision `NamedTuple` of `Float64` for the given genome/bin pair.
See the docstring for `Bin` for how this is computed.
See also: [`Bin`](@ref), [`Binning`](@ref)
# Examples
```jldoctest
julia> bingenome = only(intersecting(bin));
julia> recall_precision(bingenome, bin)
(recall = 0.4, precision = 1.0)
julia> recall_precision(bingenome, bin; assembly=false)
(recall = 0.4, precision = 1.0)
julia> recall_precision(bingenome.parent, bin; assembly=false)
(recall = 0.4, precision = 1.0)
```
"""
function recall_precision(genome::Genome, bin::Bin; assembly::Bool=false)
(tp, fp, fn) = confusion_matrix(genome, bin; assembly=assembly)
recall = tp / (tp + fn)
precision = tp / (tp + fp)
(; recall, precision)
end
function recall_precision(clade::Clade{Genome}, bin::Bin; assembly::Bool=false)
(; asm_recall, genome_recall, precision) =
get(bin.clades, clade, (; asm_recall=0.0, genome_recall=0.0, precision=0.0))
assembly ? (; recall=asm_recall, precision) : (; recall=genome_recall, precision)
end
# NB: Returns NaN if recall and precision is zero
function fscore(recall::Real, precision::Real, b::Real)
(1 + b^2) * (recall * precision) / ((b^2 * precision) + recall)
end
f1(recall::Real, precision::Real) = fscore(recall, precision, 1)
function fscore(genome::Genome, bin::Bin, b::Real; assembly::Bool=false)
(; recall, precision) = recall_precision(genome, bin; assembly)
fscore(recall, precision, b)
end
f1(genome::Genome, bin::Bin; assembly::Bool=false) = fscore(genome, bin, 1; assembly)
function recalls_precisions(::Type{Genome}, bin::Bin; assembly::Bool=false)
bin.genomes |> imap() do (genome, (; asmsize, foreign))
fn = (assembly ? genome.assembly_size : genome.genome_size) - asmsize
recall = asmsize / (asmsize + fn)
precision = asmsize / (asmsize + foreign)
(; genome, recall, precision)
end
end
function recalls_precisions(::Type{<:Clade}, bin::Bin; assembly::Bool=false)
bin.clades |> imap() do (clade, (; asm_recall, genome_recall, precision))
recall = assembly ? asm_recall : genome_recall
(; clade, recall, precision)
end
end
function recalls_precisions(bin::Bin; assembly::Bool=false)
recalls_precisions(Genome, bin; assembly)
end
# TODO: Why does this function allocate?
# Compute recall/precision of the genome with highest F1 for this bin
function recall_prec_max_f1(bin::Bin; assembly::Bool=false)
(max_recall, max_precision, max_f1) = (0.0, 0.0, 0.0)
for (; recall, precision) in recalls_precisions(bin; assembly)
this_f1 = f1(recall, precision)
if this_f1 > max_f1
(max_recall, max_precision, max_f1) = (recall, precision, this_f1)
end
end
# This can happen if the bin only contain sequences unassigned to any genome
# in which case recalls_precisions returns an iterable with zero elements
iszero(max_f1) ? nothing : (; recall=max_recall, precision=max_precision)
end
"""
passes_f1(bin::Bin, threshold::Real; assembly::Bool=false)::Bool
Computes if `bin` has an F1 score equal to, or higher than `threshold` for any genome.
# Examples
```jldoctest
julia> obs_f1 = f1(only(intersecting(bin)), bin)
0.5714285714285715
julia> passes_f1(bin, obs_f1)
true
julia> passes_f1(bin, obs_f1 + 0.001)
false
```
"""
function passes_f1(bin::Bin, threshold::Real; assembly::Bool=false)
any(recalls_precisions(Genome, bin; assembly)) do (; recall, precision)
f1(recall, precision) ≥ threshold
end
end
"""
passes_recall_precision(bin::Bin, recall::Real, precision::Real; assembly::Bool=false)::Bool
Computes if `bin` intersects with any `Genome` with at least the given recall and precision thresholds.
# Examples
```jldoctest
julia> (r, p) = recall_precision(only(intersecting(bin)), bin)
(recall = 0.4, precision = 1.0)
julia> passes_recall_precision(bin, 0.40, 1.0)
true
julia> passes_recall_precision(bin, 0.41, 1.0)
false
```
"""
function passes_recall_precision(bin::Bin, rec::Real, prec::Real; assembly::Bool=false)
any(recalls_precisions(Genome, bin; assembly)) do (; recall, precision)
recall ≥ rec && precision ≥ prec
end
end
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 23621 | const DEFAULT_RECALLS = (0.6, 0.7, 0.8, 0.9, 0.95, 0.99)
const DEFAULT_PRECISIONS = (0.6, 0.7, 0.8, 0.9, 0.95, 0.99)
struct BinStats
# Recall and precision are computed by pairing each bin
# with the Genome that yields the highest F1 score
mean_bin_recall::Float64
mean_bin_precision::Float64
mean_bin_f1::Float64
end
function BinStats(bins::Vector{Bin}; assembly::Bool=true)
(mean_bin_recall, mean_bin_precision, mean_bin_f1) =
mean_bin_recall_prec(bins; assembly)
BinStats(mean_bin_recall, mean_bin_precision, mean_bin_f1)
end
function mean_bin_recall_prec(bins::Vector{Bin}; assembly::Bool=true)
(recall_sum, prec_sum, f1_sum, nbins) = foldl(
Iterators.filter(
!isnothing,
Iterators.map(b -> recall_prec_max_f1(b; assembly), bins),
);
init=(0.0, 0.0, 0.0, 0),
) do (recall_sum, prec_sum, f1_sum, nbins), (; recall, precision)
(recall_sum + recall, prec_sum + precision, f1_sum + f1(recall, precision), nbins + 1)
end
(recall_sum / nbins, prec_sum / nbins, f1_sum / nbins)
end
"""
Binning(::Union{IO, AbstractString}, ::Reference; kwargs...)
A `Binning` represents a set of `Bin`s benchmarked against a `Reference`.
`Binning`s can be created given a set of `Bin`s and a `Reference`, where the
bins may potentially be loaded from a `.tsv` file.
The fields `recovered_asms` and `recovered_genomes` are used for benchmarking,
these are normally output using the `print_matrix` function.
A `Binning` is loaded from a tsv file, which is specified either as an `IO`,
or its path as an `AbstractString`. If the path ends with `.gz`, automatically
gzip decompress when reading the file.
See also: [`print_matrix`](@ref), [`Bin`](@ref), [`Reference`](@ref)
# Examples
```jldoctest
julia> bins = Binning(path_to_bins_file, ref);
julia> bins isa Binning
true
julia> BinBencherBackend.n_nc(binning)
0
```
# Extended help
Create with:
```julia
open(file) do io
Binning(
io::Union{IO, AbstractString},
ref::Reference;
min_size::Integer=1,
min_seqs::Integer=1,
binsplit_separator::Union{AbstractString, Char, Nothing}=nothing,
disjoint::Bool=true,
recalls=DEFAULT_RECALLS,
precisions=DEFAULT_PRECISIONS,
filter_genomes=Returns(true)
)
```
* `min_size`: Filter away bins with breadth lower than this
* `min_seqs`: Filter away bins with fewer sequences that this
* `binsplit_separator`: Split bins based on this separator
(`nothing` means no binsplitting)
* `disjoint`: Throw an error if the same sequence is seen in multiple bins
* `recalls` and `precision`: The thresholds to benchmark with
* `filter_genomes`: A function `f(genome)::Bool`. Genomes for which it returns
`false` are ignored in benchmarking.
"""
struct Binning
ref::Reference
bins::Vector{Bin}
# One matrix per rank - first is genome, then upwards
# TODO: Should these be a dense 3D tensor instead of a vector of matrices?
recovered_asms::Vector{Matrix{Int}}
recovered_genomes::Vector{Matrix{Int}}
bin_asms::Vector{Matrix{Int}}
bin_genomes::Vector{Matrix{Int}}
recalls::Vector{Float64}
precisions::Vector{Float64}
bin_asm_stats::BinStats
bin_genome_stats::BinStats
end
function Base.show(io::IO, x::Binning)
nc = n_nc(x)
print(io, summary(x), '(')
if nc isa Integer
print(io, "NC = ", nc)
end
print(io, ')')
end
function Base.show(io::IO, ::MIME"text/plain", x::Binning)
if get(io, :compact, false)
show(io, x)
else
buf = IOBuffer()
show(buf, MIME"text/plain"(), x.ref)
seekstart(buf)
println(io, "Binning")
for line in eachline(buf)
println(io, " ", line)
end
print(io, " Bins: ", n_bins(x))
nc = n_nc(x)
if nc isa Integer
print(io, "\n NC genomes: ", nc)
end
npb = n_passing_bins(x, 0.9, 0.95)
if npb isa Integer
print(io, "\n HQ bins: ", npb)
end
for (stats, name) in
[(x.bin_genome_stats, "genome "), (x.bin_asm_stats, "assembly")]
print(
io,
"\n Mean bin ",
name,
" R/P/F1: ",
round(stats.mean_bin_recall; digits=3),
" / ",
round(stats.mean_bin_precision; digits=3),
" / ",
round(stats.mean_bin_f1; digits=3),
)
end
print(io, "\n Precisions: ", repr([round(i; digits=3) for i in x.precisions]))
print(io, "\n Recalls: ", repr([round(i; digits=3) for i in x.recalls]))
print(io, "\n Reconstruction (genomes):\n")
seekstart(buf)
print_matrix(buf, x; level=0, assembly=false)
seekstart(buf)
for line in eachline(buf)
println(io, " ", line)
end
end
end
struct RecallTooHigh end
struct PrecisionTooHigh end
struct RankOutOfRange end
struct ThresholdError
x::Union{RecallTooHigh, PrecisionTooHigh, RankOutOfRange}
end
"""
n_recovered(::Binning, recall, precision; level=0, assembly=false)::Integer
Return the number of genomes or clades reconstructed in the `Binning` at the given recall
and precision levels.
If `assembly` is set, return the number of assemblies reconstructed instead.
The argument `level` sets the taxonomic rank: 0 for `Genome` (or assemblies).
# Examples
```jldoctest
julia> n_recovered(binning, 0.4, 0.71)
1
julia> n_recovered(binning, 0.4, 0.71; assembly=true)
2
julia> n_recovered(binning, 0.4, 0.71; assembly=true, level=2)
1
```
"""
function n_recovered(
binning::Binning,
recall::Real,
precision::Real;
level::Integer=0,
assembly::Bool=false,
)::Union{Integer, ThresholdError}
matrices = assembly ? binning.recovered_asms : binning.recovered_genomes
extract_from_matrix(binning, recall, precision, matrices, level)
end
"""
n_passing_bins(::Binning, recall, precision; level=0, assembly::Bool=false)::Integer
Return the number of bins which correspond to any genome or clade at the given recall
and precision levels.
If `assembly` is set, a recall of 1.0 means a bin corresponds to a whole assembly,
else it corresponds to a whole genome.
The argument `level` sets the taxonomic rank: 0 for `Genome` (or assemblies).
# Examples
```jldoctest
julia> n_passing_bins(binning, 0.4, 0.71)
1
julia> n_passing_bins(binning, 0.65, 0.71)
0
```
"""
function n_passing_bins(
binning::Binning,
recall::Real,
precision::Real;
level::Integer=0,
assembly::Bool=false,
)::Union{Integer, ThresholdError}
matrices = assembly ? binning.bin_asms : binning.bin_genomes
extract_from_matrix(binning, recall, precision, matrices, level)
end
function extract_from_matrix(
binning::Binning,
recall::Real,
precision::Real,
matrices::Vector{<:Matrix},
level::Integer=0,
)::Union{Integer, ThresholdError}
inds = recall_precision_indices(binning, recall, precision)
inds isa ThresholdError && return inds
(ri, pi) = inds
if level + 1 ∉ eachindex(matrices)
return ThresholdError(RankOutOfRange())
end
m = matrices[level + 1]
m[pi, ri]
end
function recall_precision_indices(
binning::Binning,
recall::Real,
precision::Real,
)::Union{Tuple{Int, Int}, ThresholdError}
ri = searchsortedfirst(binning.recalls, recall)
ri > length(binning.recalls) && return ThresholdError(RecallTooHigh())
pi = searchsortedfirst(binning.precisions, precision)
pi > length(binning.precisions) && return ThresholdError(PrecisionTooHigh())
(ri, pi)
end
"""
print_matrix(::Binning; level=0, assembly=false)
Print the number of reconstructed assemblies or genomes at the given taxonomic level (rank).
Level 0 corresponds to genomes, level 1 to species, etc.
If `assembly`, print the number of reconstructed assemblies, else print the level
of reconstructed genomes.
See also: [`Binning`](@ref)
"""
print_matrix(x::Binning; kwargs...) = print_matrix(stdout, x; kwargs...)
function print_matrix(io::IO, x::Binning; level::Integer=0, assembly::Bool=false)
ms = assembly ? x.recovered_asms : x.recovered_genomes
m = ms[level + 1]
rnd(x) = string(round(x; digits=3))
digitwidth(x) = sizeof(rnd(x))
width = max(
max(4, ndigits(maximum(m; init=0) + 1)),
maximum(digitwidth, x.recalls; init=0) + 1,
)
col1_width = max(3, maximum(digitwidth, x.precisions))
println(io, rpad("P\\R", col1_width), join([lpad(i, width) for i in x.recalls]))
for (prec_index, prec) in enumerate(x.precisions)
println(
io,
rpad(rnd(prec), col1_width),
join([lpad(i, width) for i in m[prec_index, :]]),
)
end
end
function n_nc(x::Binning)::Union{Int, ThresholdError}
extract_from_matrix(x, 0.9, 0.95, x.recovered_genomes, 0)
end
n_bins(x::Binning) = length(x.bins)
function Binning(path::AbstractString, ref::Reference; kwargs...)
open_perhaps_gzipped(io -> Binning(io, ref; kwargs...), String(path))
end
# This is the most common constructor in practice, because we usually load binnings from files,
# and also the most efficient. I immediately convert the sequences to integers for faster
# processing, then convert back to seqs before instantiating the bins.
function Binning(
io::IO,
ref::Reference;
min_size::Integer=1,
min_seqs::Integer=1,
binsplit_separator::Union{AbstractString, Char, Nothing}=nothing,
disjoint::Bool=true,
recalls=DEFAULT_RECALLS,
precisions=DEFAULT_PRECISIONS,
filter_genomes::Function=Returns(true),
)
idxs_by_binname = parse_bins(io, Dict, ref, binsplit_separator, disjoint)
filter!(idxs_by_binname) do (_, idxs)
length(idxs) ≥ min_seqs &&
sum((length(first(ref.targets[i])) for i in idxs); init=0) ≥ min_size
end
scratch = Tuple{Int, Int}[]
considered_genomes = if filter_genomes === Returns(true)
nothing
else
Set(g for g in genomes(ref) if filter_genomes(g))
end
bins = [
bin_by_indices(binname, seq_idxs, ref.targets, scratch, considered_genomes) for
(binname, seq_idxs) in idxs_by_binname
]
sort!(bins; by=i -> i.name)
# We already checked for disjointedness when parsing bins, so we skip it here
Binning(bins, ref; recalls, precisions, disjoint=false)
end
function Binning(
bins_,
ref::Reference;
recalls=DEFAULT_RECALLS,
precisions=DEFAULT_PRECISIONS,
disjoint::Bool=true,
)
checked_recalls = validate_recall_precision(recalls)
checked_precisions = validate_recall_precision(precisions)
bins = vector(bins_)
disjoint && check_disjoint(bins)
bin_asm_stats = BinStats(bins; assembly=true)
bin_genome_stats = BinStats(bins; assembly=false)
(asm_matrices, genome_matrices, bin_asms, bin_genomes) =
benchmark(ref, bins, checked_recalls, checked_precisions)
Binning(
ref,
bins,
asm_matrices,
genome_matrices,
bin_asms,
bin_genomes,
checked_recalls,
checked_precisions,
bin_asm_stats,
bin_genome_stats,
)
end
"""
gold_standard(
ref::Reference
[sequences, a Binning or an iterable of Sequence];
disjoint=true,
recalls=DEFAULT_RECALLS,
precisions=DEFAULT_PRECISIONS
)::Binning
Create the optimal `Binning` object given a `Reference`, by the optimal binning of
the `Sequence`s in `sequences`.
If `disjoint`, assign each sequence to only a single genome.
If `sequences` is not passed, use all sequences in `ref`. If a `Binning` is passed,
use all sequences in any of its bins. Else, pass an iterable of `Sequence`.
# Extended help
Currently, the `disjoint` option uses a simple greedy algorithm to assign
sequences to genomes.
"""
function gold_standard(ref::Reference; kwargs...)::Binning
gold_standard(ref, Set(first(v) for v in ref.targets)::Set{Sequence}; kwargs...)
end
function gold_standard(ref::Reference, binning::Binning; kwargs...)::Binning
seqs::Set{Sequence} = reduce(binning.bins; init=Set{Sequence}()) do s, bin
union!(s, bin.sequences)
end
gold_standard(ref, seqs; kwargs...)
end
function gold_standard(ref::Reference, sequences; kwargs...)::Binning
gold_standard(ref, Set(sequences)::Set{Sequence}; kwargs...)
end
function gold_standard(
ref::Reference,
sequences::Set{Sequence};
disjoint::Bool=true,
recalls=DEFAULT_RECALLS,
precisions=DEFAULT_PRECISIONS,
)::Binning
sequences_of_genome = Dict{Genome, Set{Sequence}}()
for sequence in sequences
targets = last(ref.targets[ref.target_index_by_name[sequence.name]])
isempty(targets) && continue
best_index = disjoint ? last(findmax(i -> length(last(i)), targets)) : 0
for (i, (source, _)) in enumerate(targets)
disjoint && i != best_index && continue
push!(
get!(valtype(sequences_of_genome), sequences_of_genome, source.genome),
sequence,
)
end
end
scratch = Tuple{Int, Int}[]
bins = [
bin_by_indices(
"bin_" * genome.name,
[ref.target_index_by_name[i.name] for i in seqs],
ref.targets,
scratch,
nothing,
) for (genome, seqs) in sequences_of_genome
]
sort!(bins; by=i -> i.name)
Binning(bins, ref; recalls=recalls, precisions=precisions, disjoint=false)
end
function check_disjoint(bins)
nseq = sum(i -> length(i.sequences), bins; init=0)
seen_seqs = sizehint!(Set{Sequence}(), nseq)
for bin in bins, seq in bin.sequences
in!(seen_seqs, seq) &&
error(lazy"Sequence \"$(seq.name)\" seen twice in disjoint Binning")
end
nothing
end
function validate_recall_precision(xs)::Vector{Float64}
s = Set{Float64}()
for x_ in xs
x = Float64(x_)
x in s && error(lazy"Recall/precision value $x present multiple times")
if !isfinite(x) || x <= 0.0 || x > 1.0
error(lazy"Recall precision value $x is not finite in (0,1]")
end
push!(s, x)
end
isempty(s) && error("Must provide at least 1 recall/precision value")
sort!(collect(s))
end
# TODO: This function is massive. Can I refactor it into smaller functions?
function benchmark(
ref::Reference,
bins::Vector{Bin},
recalls::Vector{Float64},
precisions::Vector{Float64};
)::NTuple{4, Vector{<:Matrix{<:Integer}}}
# We have 8 qualitatively different combinations of the three options below:
# * 1) rank 0 (a Genome), versus 2) another rank (a Clade)
# * Counting 1) unique genomes/clades recovered at a r/p threshold level, or
# 2) bins corresponding to any genome/clade at a r/p level
# * With recall=1.0 defined as 1) The assembly size, or 2) the genome size
# We loop over bins below. So, to tally up recovered genomes/clades, we need to store
# them in a dict.
# Given N distinct precision values, we compute the maximally seen recall value at that
# given precision value. We could also have done it the other way.
max_genome_recall_at_precision = Dict{Genome, Tuple{Vector{Float64}, Vector{Float64}}}()
max_clade_recall_at_precision =
Dict{Clade{Genome}, Tuple{Vector{Float64}, Vector{Float64}}}()
for genome in ref.genomes
max_genome_recall_at_precision[genome] =
(zeros(Float64, length(precisions)), zeros(Float64, length(precisions)))
end
for level in ref.clades, clade in level
max_clade_recall_at_precision[clade] =
(zeros(Float64, length(precisions)), zeros(Float64, length(precisions)))
end
# Same as above, except that because we loop over bins, we can compute at each iteration
# whether the bin passes the threshold. So we don't need to store in a dict, but can
# increment the matrices directly at the end of the loop
bin_max_genome_recall_at_precision =
[Vector{Float64}(undef, length(precisions)) for _ in 1:nranks(ref)]
bin_max_asm_recall_at_precision =
[Vector{Float64}(undef, length(precisions)) for _ in 1:nranks(ref)]
bin_genome_matrices = [
zeros(Int, length(recalls), length(precisions)) for
_ in bin_max_genome_recall_at_precision
]
bin_asm_matrices = [
zeros(Int, length(recalls), length(precisions)) for
_ in bin_max_genome_recall_at_precision
]
for bin in bins
# Since we re-use these vectors for each bin, we need to zero them out between each iteration
for vs in (bin_max_genome_recall_at_precision, bin_max_asm_recall_at_precision)
for v in vs
fill!(v, zero(eltype(v)))
end
end
# First, handle the genomes (as opposed to the clades)
bin_genome_recalls_1 = bin_max_genome_recall_at_precision[1]
bin_asm_recalls_1 = bin_max_asm_recall_at_precision[1]
for (genome, (; asmsize, foreign)) in bin.genomes
(v_asm, v_genome) = max_genome_recall_at_precision[genome]
precision = asmsize / (asmsize + foreign)
asm_recall = asmsize / genome.assembly_size
genome_recall = asmsize / genome.genome_size
# Find the index corresponding to the given precision. If the precision is lower than
# the smallest precision in `precisions`, don't do anything
precision_index = searchsortedlast(precisions, precision)
iszero(precision_index) && continue
# Get maximum recalls for genomes
v_asm[precision_index] = max(asm_recall, v_asm[precision_index])
v_genome[precision_index] = max(genome_recall, v_genome[precision_index])
# Get maximum recalls for bins
bin_genome_recalls_1[precision_index] =
max(bin_genome_recalls_1[precision_index], genome_recall)
bin_asm_recalls_1[precision_index] =
max(bin_asm_recalls_1[precision_index], asm_recall)
end
# Same as above, but for clades instead of genomes
for (clade, (asm_recall, clade_recall, precision)) in bin.clades
precision_index = searchsortedlast(precisions, precision)
iszero(precision_index) && continue
(v_asm, v_genome) = max_clade_recall_at_precision[clade]
# Get maximum recall for clades
for (v, recall) in ((v_asm, asm_recall), (v_genome, clade_recall))
v[precision_index] = max(recall, v[precision_index])
end
# Get maximum recall for bins
for (v, recall) in (
(bin_max_asm_recall_at_precision, asm_recall),
(bin_max_genome_recall_at_precision, clade_recall),
)
vr = v[clade.rank + 1]
vr[precision_index] = max(vr[precision_index], recall)
end
end
# Now that the maximal recall at given precisions for this bin has been filled out,
# we use the data to increment the correct row in the matrix.
for (vs, ms) in (
(bin_max_genome_recall_at_precision, bin_genome_matrices),
(bin_max_asm_recall_at_precision, bin_asm_matrices),
)
for (v, m) in zip(vs, ms)
# First, we make the recall vector reverse cumulative.
# If a bin was seen with recall 0.6 at precision 0.8, then it's also
# seen with at least recall 0.6 at every lower precision level
make_reverse_max!(v)
# Now, increment the correct (recall) row for each (precision) column.
# E.g. if a bin is seen at precision 0.5 with a recall of 0.6, we increment
# the corresponding rows/columns.
# Technically, we need to increment all recall values lower than the given
# recall value (by the same logic as the comment above),
# But we don't need to do this in the inner loop here. We can do it at the end
# where we only need to do it once for each matrix instead of once per bin per matrix.
for (precision_index, recall) in enumerate(v)
recall_index = searchsortedlast(recalls, recall)
iszero(recall_index) && continue
m[recall_index, precision_index] += 1
end
end
end
end
# Just like above with the bin vectors, we need to make the genome/clade vectors reverse max
for (v1, v2) in values(max_genome_recall_at_precision)
make_reverse_max!(v1)
make_reverse_max!(v2)
end
for (v1, v2) in values(max_clade_recall_at_precision)
make_reverse_max!(v1)
make_reverse_max!(v2)
end
# Now make the matrices counting recovered genomes / clades. Similar to above for
# the bins, we increment the matrix at the correct recall value.
asm_matrices = [zeros(Int, length(recalls), length(precisions)) for i in 1:nranks(ref)]
genome_matrices = [copy(i) for i in asm_matrices]
for (v_asm, v_genome) in values(max_genome_recall_at_precision)
for (v, m) in ((v_asm, asm_matrices[1]), (v_genome, genome_matrices[1]))
update_matrix!(m, v, recalls)
end
end
for (clade, (v_asm, v_genome)) in max_clade_recall_at_precision
for (v, matrices) in ((v_asm, asm_matrices), (v_genome, genome_matrices))
update_matrix!(matrices[clade.rank + 1], v, recalls)
end
end
# For all the matrices below, if a bin/genome/whatever is seen at recalls X and
# precision Y, then it's also seen at every lower value. The `make_reverse_max!` calls
# earlier updated the lower precision values with the recall values of the higher precision values.
# Now, we need to do the reverse - update the low recall values with prec. of high rec. values.
# We can do this once here, in the matrix.
for mv in (asm_matrices, genome_matrices, bin_genome_matrices, bin_asm_matrices),
m in mv
make_columnwise_reverse_cumulative!(m)
end
# For historical reasons, the matrices above are transposed.
# Instead of rewriting this massive function, simply transpose each matrix before returning
map(
v -> map(permutedims, v),
(asm_matrices, genome_matrices, bin_asm_matrices, bin_genome_matrices),
)
end
"For each precision column in the matrix, add one to the correct row
given by the recall value at the given precision"
function update_matrix!(
matrix::Matrix{<:Integer},
v::Vector{<:AbstractFloat},
recalls::Vector{Float64},
)
# Since we iterate over increasing precisions, the recall_index must shrink
# or stay the same per iteration. So, we can reduce the search space
# at each iteration by modifying imax
imax = lastindex(recalls)
for (precision_index, recall) in enumerate(v)
recall_index = searchsortedlast(view(recalls, 1:imax), recall)
iszero(recall_index) && continue
imax = min(recall_index, imax)
matrix[recall_index, precision_index] += 1
end
matrix
end
function make_reverse_max!(v::Vector{<:Real})
@inbounds for i in (length(v) - 1):-1:1
v[i] = max(v[i], v[i + 1])
end
v
end
function make_columnwise_reverse_cumulative!(m::Matrix{<:Real})
for col in eachcol(m)
for i in (length(col) - 1):-1:1
col[i] += col[i + 1]
end
end
end
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 3096 | # Type parameter G here is always Genome - I only make it parametric so I can have
# mutually recursive types b/w Clade and Genome
"""
Clade{Genome}(name::AbstractString, child::Union{Clade{Genome}, Genome})
A `Clade` represents any clade above `Genome`. Every `Genome` is expected to belong
to the same number of clades, e.g. there may be exactly 7 levels of clades above every `Genome`.
`Clade`s always have at least one child (which is either a `Genome` or a `Clade` one rank lower),
and a parent, unless it's the unique top clade from which all other clades and genomes descend from.
The rank of a `Genome` is 0, clades that contain genomes have rank 1, and clades containing rank-1
clades have rank 2 etc.
By default, zero-indexed ranks correspond to OTU, species, genus, family, order, class, phylum and domain.
# Examples
```
julia> top_clade(ref)
Genus "F", 3 genomes
├─ Species "D", 2 genomes
│ ├─ Genome(gA)
│ └─ Genome(gB)
└─ Species "E", 1 genome
└─ Genome(gC)
julia> top_clade(ref).children
2-element Vector{Clade{Genome}}:
Species "D", 2 genomes
Species "E", 1 genome
```
"""
mutable struct Clade{G}
const name::String
const rank::Int
const children::Union{Vector{Clade{G}}, Vector{G}}
ngenomes::Int
parent::Union{Clade{G}, Nothing}
function Clade(name::String, child::Union{Clade{G}, G}) where {G}
(rank, ngenomes) = if child isa G
(@isinit(child.parent)) &&
existing_parent_error(name, child.name, child.parent.name)
(1, 1)
else
parent = child.parent
parent === nothing || existing_parent_error(name, child.name, parent.name)
(child.rank + 1, child.ngenomes)
end
instance = new{G}(name, rank, [child], ngenomes, nothing)
child.parent = instance
return instance
end
end
@noinline function existing_parent_error(child_name, parent_name, other_parent_name)
error(
"Attempted to add parent \"$parent_name\" to child \"$child_name\", which already has parent \"$other_parent_name\"",
)
end
const RANKS =
["strain", "species", "genus", "family", "order", "class", "phylum", "domain", "top"]
const RANK_BY_NAME = Dict(rank => i - 1 for (i, rank) in enumerate(RANKS))
function Base.show(io::IO, x::Clade)
suffix = x.ngenomes == 1 ? "" : "s"
print(
io,
titlecase(RANKS[x.rank + 1]),
" \"",
x.name,
"\", ",
x.ngenomes,
" genome",
suffix,
)
end
function Base.show(io::IO, ::MIME"text/plain", x::Clade)
if get(io, :compact, false)
show(io, x)
else
buf = IOBuffer()
AbstractTrees.print_tree(buf, x; maxdepth=3)
seekstart(buf)
for (i, line) in zip(1:25, eachline(buf))
println(io, line)
i == 25 && print(io, '⋮')
end
end
end
AbstractTrees.children(x::Clade) = x.children
AbstractTrees.parent(x::Clade) = x.parent
AbstractTrees.treebreadth(x::Clade) = x.ngenomes
nchildren(x::Clade) = length(x.children)
istop(x::Clade) = isnothing(x.parent)
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 2365 | baremodule Flags
using Base: @enum
@enum Flag::UInt8 organism virus plasmid
export Flag
end
using .Flags
const NAME_TO_FLAG = Dict(string(f) => f for f in instances(Flags.Flag))
Base.tryparse(::Type{Flag}, s::AbstractString) = get(NAME_TO_FLAG, lowercase(s), nothing)
@doc """
Flag
A flag is a boolean associated to a `Genome`, stored in a `Flags` object.
A flag may be e.g. `Flag.organism`, signaling that the genome is known to be
an organism.
See also: [`FlagSet`](@ref), [`Genome`](@ref)
# Examples
```jldoctest
julia> tryparse(Flag, "organism") == Flags.organism
true
julia> tryparse(Flag, "Canada") === nothing
true
```
"""
Flags.Flag
"""
FlagSet <: AbstractSet{Flag}
Flags are compact sets of `Flag` associated to a Genome.
You can construct them from an iterable of `Flag`, e.g. a 1-element tuple.
`FlagSet` support most set operations efficiently.
See also: [`Flag`](@ref), [`Genome`](@ref)
# Examples
```jldoctest
julia> flags = FlagSet((Flags.organism, Flags.virus));
julia> Flags.virus in flags
true
julia> isdisjoint(flags, FlagSet((Flags.organism,)))
false
```
"""
struct FlagSet <: AbstractSet{Flag}
# We can use fewer bits here, but this struct is currently
# used in Genome, which has 64-bit padding, so we might
# as well have the content be bits here instead of padding
x::UInt64
FlagSet(x::UInt64) = new(x)
end
FlagSet() = FlagSet(zero(UInt64))
function FlagSet(itr)
result = FlagSet()
for i in itr
result = push(result, convert(Flag, i))
end
result
end
function Base.iterate(x::FlagSet, state::UInt64=x.x)
if iszero(state)
nothing
else
(reinterpret(Flag, trailing_zeros(state) % UInt8), state & (state - 1))
end
end
push(x::FlagSet, y::Flag) = FlagSet(x.x | (UInt64(1) << (reinterpret(UInt8, y) & 63)))
Base.in(flag::Flag, x::FlagSet) = isodd(x.x >>> (reinterpret(UInt8, flag) & UInt8(63)))
Base.length(x::FlagSet) = count_ones(x.x)
Base.isempty(x::FlagSet) = iszero(x.x)
Base.hash(x::FlagSet, h::UInt) = hash(x.x, h ⊻ (0x116133601de11a93 % UInt))
Base.union(x::FlagSet, y::FlagSet) = FlagSet(x.x | y.x)
Base.intersect(x::FlagSet, y::FlagSet) = FlagSet(x.x & y.x)
Base.setdiff(x::FlagSet, y::FlagSet) = FlagSet(x.x & ~y.x)
Base.issubset(x::FlagSet, y::FlagSet) = isempty(setdiff(x, y))
Base.isdisjoint(x::FlagSet, y::FlagSet) = isempty(x ∩ y)
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 5641 | @lazy mutable struct Genome
const name::String
const sources::Set{Source{Genome}}
const flags::FlagSet
@lazy parent::Clade{Genome}
@lazy genome_size::Int
@lazy assembly_size::Int
function Genome(name::AbstractString, flags::FlagSet)
new(String(name), Set{Source{Genome}}(), flags, uninit, uninit, uninit)
end
end
Genome(name::AbstractString) = Genome(name, FlagSet())
"""
Genome(name::AbstractString [flags::FlagSet])
`Genome`s represent individual target genomes (organisms, plasmids, viruses etc),
and are conceptually the lowest-level clade that can be reconstructed.
`Genome`s contain one or more `Source`s, and belong to a single parent `Clade`.
They are identified uniquely among genomes by their name.
A genome have a _genome size_, which is the sum of the length of all its sources.
We consider this to be the true size of the biological genome (assuming its full
sequence is contained in its sources), as well as an _assembly size_, which represent
the sum of the assembly sizes of each source.
See also: [`Clade`](@ref), [`Source`](@ref), [`mrca`](@ref)
# Examples
```jldoctest
julia> gA, gB, gC = collect(ref.genomes);
julia> flags(gA)
FlagSet with 1 element:
BinBencherBackend.Flags.organism
julia> mrca(gA, gB)
Species "D", 2 genomes
├─ Genome(gA)
└─ Genome(gB)
```
"""
Genome
const Target = Tuple{Source{Genome}, Tuple{Int, Int}}
const Node = Union{Genome, Clade{Genome}}
"""
flags(g::Genome)::FlagSet
Returns the `Flag`s of the `Genome` as a `FlagSet`.
See also: [`Flag`](@ref), [`FlagSet`](@ref)
# Example
```jldoctest
julia> flags(genome)
FlagSet with 1 element:
BinBencherBackend.Flags.organism
```
"""
flags(g::Genome) = g.flags
"""
is_organism(g::Genome)::Bool
Check if `g` is known to be an organism.
# Example
```jldoctest
julia> is_organism(genome)
true
```
"""
is_organism(g::Genome) = Flags.organism ∈ flags(g)
"""
is_virus(g::Genome)::Bool
Check if `g` is known to be a virus.
# Example
```jldoctest
julia> is_virus(genome)
false
```
"""
is_virus(g::Genome) = Flags.virus ∈ flags(g)
"""
is_plasmid(g::Genome)::Bool
Check if `g` is known to be a plasmid.
# Example
```jldoctest
julia> is_plasmid(genome)
false
```
"""
is_plasmid(g::Genome) = Flags.plasmid ∈ flags(g)
function add_child!(c::Clade{Genome}, g::Node)::Clade
children = c.children
if g isa Genome
push!(children::Vector{Genome}, g)
@init! g.parent = c
else
push!(children::Vector{Clade{Genome}}, g)
g.parent = c
end
c.ngenomes += (g isa Genome ? 1 : g.ngenomes)
c
end
"Delete a child from the clade tree."
function recursively_delete_child!(child::T) where {T <: Node}
parent = child.parent
# If the child is the top level clade, do nothing, as we delete from the bottom up
parent === nothing && return nothing
children = parent.children::Vector{T}
deleteat!(children, findfirst(i -> i === child, children)::Integer)
# If we delete a genome, we remove that genome from all ancestors count
if child isa Genome
p = parent
while p !== nothing
p.ngenomes -= 1
p = p.parent
end
end
# If the clade now has no children, we can delete the clade
isempty(children) && recursively_delete_child!(parent)
end
Base.:(==)(g1::Genome, g2::Genome) = g1.name == g2.name
Base.hash(x::Genome, h::UInt) = hash(x.name, h ⊻ UInt(21323125590))
function add_source!(genome::Genome, name::AbstractString, length::Integer)
@isinit(genome.genome_size) &&
error("Can't add source to genome after calling finish! on it.")
source = Source(genome, name, length)
in(source, genome.sources) &&
error(lazy"Genome $(genome.name) already have source $(source.name)")
push!(genome.sources, source)
genome
end
function finish!(genome::Genome, scratch::Vector{Tuple{Int, Int}})
@isinit(genome.genome_size) && return genome
@isinit(genome.parent) ||
error(lazy"finish! called on genome \"$(genome.name)\" without assigned parent.")
for source in genome.sources
@isinit(source.assembly_size) || finish!(source, scratch)
end
@init! genome.genome_size = sum(i -> i.length, genome.sources; init=0)
@init! genome.assembly_size = sum(i -> i.assembly_size, genome.sources; init=0)
genome
end
Base.show(io::IO, x::Genome) = print(io, "Genome(", x.name, ')')
function Base.show(io::IO, ::MIME"text/plain", x::Genome)
if get(io, :compact, false)
show(io, x)
else
asm = (x.assembly_size / x.genome_size) * 100
print(
io,
"Genome \"",
x.name,
"\"\n Parent: ",
'"',
x.parent.name,
'"',
"\n Genome size: ",
x.genome_size,
"\n Assembly size: ",
x.assembly_size,
" (",
round(asm; digits=1),
" %)",
"\n Sources: ",
length(x.sources),
"\n Flags: ",
Int(x.flags.x),
" (",
join(x.flags, ", "),
')',
)
end
end
"""
mrca(a::Node, b::Node)::Node
Compute the most recent common ancestor (MRCA) of `a` and `b`.
"""
function mrca(a::Node, b::Node)::Node
a === b && return a
ca = a isa Genome ? a.parent : a
cb = b isa Genome ? b.parent : b
(lo, hi) = ca.rank < cb.rank ? (ca, cb) : (cb, ca)
while lo.rank < hi.rank
lo = lo.parent::Clade
end
while lo !== hi
lo = lo.parent::Clade
hi = hi.parent::Clade
end
lo
end
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 14526 | @lazy mutable struct Reference
const genomes::Set{Genome}
# Sequence name => index into targets
const target_index_by_name::Dict{String, UInt32}
const targets::Vector{Tuple{Sequence, Vector{Target}}}
const clades::Vector{Vector{Clade{Genome}}}
@lazy shortest_seq_len::Int
@lazy fraction_assembled::Float64
end
"""
Reference(::Union{IO, AbstractString})
A `Reference` contains the ground truth to benchmark against.
Conceptually, it consists of the following parts:
* A list of genomes, each with sources
* The full taxonomic tree, as lists of clades
* A list of sequences, each with a list of (source, span) to where it maps.
Normally, the types `FlagSet` `Genome`, `Source`, `Clade` and `Sequence` do not
need to be constructed manually, but are constructed when the `Reference` is loaded
from a JSON file.
A `Reference` is loaded from a JSON file, which is specified either as an `IO`,
or its path as an `AbstractString`. If the path ends with `.gz`, automatically
gzip decompress when reading the file.
# Examples
```jldoctest
julia> ref = Reference(path_to_ref_file);
julia> ref isa Reference
true
julia> length(genomes(ref))
3
julia> n_seqs(ref)
11
julia> first(ref.genomes) isa Genome
true
```
See also: [`subset`](@ref), [`Genome`](@ref), [`Clade`](@ref)
"""
Reference
function Reference(::Unsafe)
Reference(
Set{Genome}(),
Dict{String, Int}(),
Vector{Tuple{Sequence, Vector{Target}}}(),
Vector{Clade{Genome}}[],
uninit,
uninit,
)
end
Base.show(io::IO, ::Reference) = print(io, "Reference()")
function Base.show(io::IO, ::MIME"text/plain", x::Reference)
if get(io, :compact, false)
show(io, x)
else
print(
io,
"Reference",
"\n Genomes: ",
length(genomes(x)),
"\n Sequences: ",
n_seqs(x),
"\n Ranks: ",
nranks(x),
"\n Seq length: ",
x.shortest_seq_len,
"\n Assembled: ",
round(x.fraction_assembled * 100; digits=1),
" %",
)
end
end
top_clade(x::Reference) = only(last(x.clades))
genomes(x::Reference) = x.genomes
n_seqs(x::Reference) = length(x.targets)
function nranks(x::Reference)
isempty(x.genomes) && return 0
length(x.clades) + 1
end
function finish!(ref::Reference)
@isinit(ref.fraction_assembled) && return ref
scratch = Tuple{Int, Int}[]
foreach(g -> finish!(g, scratch), ref.genomes)
assembly_size = genome_size = 0
for genome in ref.genomes
assembly_size += genome.assembly_size
genome_size += genome.genome_size
end
shortest_seq_len = minimum(i -> length(first(i)), ref.targets; init=typemax(Int))
shortest_seq_len == typemax(Int) &&
error("Cannot initialize a Reference with no sequences")
@init! ref.shortest_seq_len = shortest_seq_len
@init! ref.fraction_assembled = assembly_size / genome_size
ref
end
"""
subset!(
ref::Reference;
sequences::Function=Returns(true),
genomes::Function=Returns(true)
)::Reference
Mutate `ref` in place, removing genomes and sequences.
Keep only sequences S where `sequences(S)` returns `true` and genomes G for which
`genomes(G)` returns `true`.
See also: [`subset`](@ref), [`Reference`](@ref)
# Examples
```jldoctest
julia> ref
Reference
Genomes: 3
Sequences: 11
Ranks: 3
Seq length: 10
Assembled: 61.9 %
julia> subset(ref; genomes=g -> Flags.organism in flags(g))
Reference
Genomes: 2
Sequences: 11
Ranks: 3
Seq length: 10
Assembled: 91.3 %
julia> BinBencherBackend.subset(ref; sequences=s -> length(s) ≥ 25)
Reference
Genomes: 3
Sequences: 9
Ranks: 3
Seq length: 25
Assembled: 56.2 %
```
"""
function subset!(
ref::Reference;
sequences::Function=Returns(true),
genomes::Function=Returns(true),
)::Reference
ref = uninit!(ref)
# Cache the sequences and genomes to remove in order to not
# need to compute the predicates multiple times
genomes_to_remove = Genome[]
sources_to_remove = Set{Source}()
# Update both ref.targets and ref.target_index_by_name
mask = BitVector(sequences(first(i)) for i in ref.targets)
new_idx = cumsum(mask)
keepat!(ref.targets, mask)
filter!(kv -> mask[last(kv)], ref.target_index_by_name)
map!(i -> new_idx[i], values(ref.target_index_by_name))
# Populate genomes_to_remove and remove the genomes from ref.genomes
filter!(ref.genomes) do g
keep = genomes(g)::Bool
keep || push!(genomes_to_remove, g)
keep
end
# Compute sources to remove, and remove the sources from ref.targets
for genome in genomes_to_remove
union!(sources_to_remove, genome.sources)
end
for (_, targets) in ref.targets
filter!(targets) do (source, _)
!in(source, sources_to_remove)
end
end
# Remove references to deleted genomes from parents
# (and recursively remove now-empty Clades)
for genome in genomes_to_remove
recursively_delete_child!(genome)
end
# Remove references to Clades we deleted from the Clade tree,
# but which may still be present in ref.clades
for i in (length(ref.clades) - 1):-1:1
empty!(ref.clades[i])
for parent in ref.clades[i + 1]
union!(ref.clades[i], parent.children::Vector{Clade{Genome}})
end
end
# Remove sequences from sources
for genome in ref.genomes, source in genome.sources
filter!(i -> sequences(first(i))::Bool, source.sequences)
end
# Re-initialize the now completely filtered Reference
finish!(ref)
end
# This deepcopy is quite slow, and it would be nice to optimise it.
# However, manual deepcopying of references is quite error prone, and having
# an incomplete deepcopy could lead to nasty bugs, so I'll just eat it.
"""
subset(ref::Reference; kwargs...)
Non-mutating copying version of `subset!`.
This is currently much slower than `subset!`.
See also: [`subset!`](@ref)
"""
subset(ref::Reference; kwargs...) = subset!(deepcopy(ref); kwargs...)
function uninit!(ref::Reference)
@uninit! ref.fraction_assembled
@uninit! ref.shortest_seq_len
for genome in ref.genomes
@uninit! genome.genome_size
@uninit! genome.assembly_size
for source in genome.sources
@uninit! source.assembly_size
@uninit! source.total_bp
end
end
ref
end
function add_genome!(ref::Reference, genome::Genome)
in(genome, ref.genomes) && error(lazy"Genome $(genome.name) already in reference")
push!(ref.genomes, genome)
ref
end
function add_sequence!(ref::Reference, seq::Sequence, targets::Vector{Target})
for (source, span) in targets
add_sequence!(source, seq, span)
end
L = length(ref.target_index_by_name)
if L == typemax(UInt32)
error("References can only hold 4294967295 sequences")
end
i = get!(ref.target_index_by_name, seq.name, L + 1)
if i != L + 1
error(lazy"Duplicate sequence in reference: $(seq.name)")
end
push!(ref.targets, (seq, targets))
ref
end
function parse_bins(
io::IO,
::Type{Dict},
ref::Reference,
binsplit_sep::Union{Nothing, AbstractString, Char}=nothing,
disjoint::Bool=true,
)::Dict{<:AbstractString, <:Vector{<:Integer}}
lines = eachline(io)
header = "clustername\tcontigname"
it = iterate(lines)
if (isnothing(it) ? nothing : rstrip(first(it))) != header
error(lazy"Expected following header line in cluster file: $(repr(header))")
end
itr = tab_pairs(lines)
itr = isnothing(binsplit_sep) ? itr : binsplit_tab_pairs(itr, binsplit_sep)
seen_indices = falses(length(ref.targets))
idxs_by_binname = Dict{SubString{String}, Vector{UInt32}}()
@inbounds for (binname, seqname) in itr
i = ref.target_index_by_name[seqname]
if seen_indices[i]
name = first(ref.targets[i]).name
error(lazy"Sequence \"$(name)\" seen twice in disjoint Binning")
end
seen_indices[i] = true
push!(get!(valtype(idxs_by_binname), idxs_by_binname, binname), i)
end
idxs_by_binname
end
const JSON_VERSION = 2
# [(name, flags, [(sourcename, length)])]
const GENOMES_JSON_T = Vector{Tuple{String, Int, Vector{Tuple{String, Int}}}}
# [Sequence => sequence_length, [(subject, from, to)]]
const SEQUENCES_JSON_T = Vector{Tuple{String, Int, Vector{Tuple{String, Int, Int}}}}
# [[(child, parent)], [(parent, grandparent)] ...]
const TAXMAPS_JSON_T = Vector{Vector{Tuple{String, String}}}
struct ReferenceJSON
version::Int
genomes::GENOMES_JSON_T
sequences::SEQUENCES_JSON_T
taxmaps::TAXMAPS_JSON_T
end
StructTypes.StructType(::Type{ReferenceJSON}) = StructTypes.Struct()
Reference(path::AbstractString) = open_perhaps_gzipped(i -> Reference(i), String(path))
Reference(io::IO) = Reference(JSON3.read(io, ReferenceJSON))
function Reference(json_struct::ReferenceJSON)
if json_struct.version != JSON_VERSION
@warn (
"Deserializing reference JSON of version $(json_struct.version), " *
"but the supported version of the currently loaded version of BinBencherBackend is $(JSON_VERSION)."
)
end
ref = Reference(unsafe)
# Parse genomes
for (genomename, flags, sourcesdict) in json_struct.genomes
genome = Genome(genomename, FlagSet(UInt(flags)))
add_genome!(ref, genome)
for (source_name, source_length) in sourcesdict
add_source!(genome, source_name, source_length)
end
end
# Check for unique sources
source_by_name = Dict{String, Source{Genome}}()
for genome in ref.genomes, source in genome.sources
if haskey(source_by_name, source.name)
error(
lazy"Duplicate source: \"$(source.name)\" belongs to both genome ",
lazy"\"$(source_by_name[source.name].genome.name)\" and \"$(genome.name)\".",
)
end
source_by_name[source.name] = source
end
# Parse sequences
for (seq_name, seq_length, targs) in json_struct.sequences
targets = map(targs) do (source_name, from, to)
source = get(source_by_name, source_name, nothing)
if source === nothing
error(
lazy"Sequence \"$(seq_name)\" maps to source \"$(source_name)\", but no such source in reference",
)
end
(source, (Int(from), Int(to)))
end
seq = Sequence(seq_name, seq_length)
add_sequence!(ref, seq, targets)
end
# Add taxonomy
copy!(ref.clades, parse_taxonomy(ref.genomes, json_struct.taxmaps))
# Finalize the reference
finish!(ref)
end
function save(io::IO, ref::Reference)
json_dict = Dict{Symbol, Any}()
json_dict[:version] = JSON_VERSION
# Genomes
genomes::GENOMES_JSON_T = [
(genome.name, Int(genome.flags.x), [(s.name, s.length) for s in genome.sources]) for genome in ref.genomes
]
json_dict[:genomes] = genomes
# Sequences
sequences::SEQUENCES_JSON_T = [
(
seq.name,
seq.length,
[(source.name, first(span), last(span)) for (source, span) in targets],
) for (seq, targets) in ref.targets
]
json_dict[:sequences] = sequences
# Taxmaps
taxmaps::TAXMAPS_JSON_T =
[[(genome.name, genome.parent.name) for genome in ref.genomes]]
json_dict[:taxmaps] = taxmaps
for clades in ref.clades
length(clades) == 1 && isnothing(only(clades).parent) && break
v = eltype(taxmaps)()
push!(taxmaps, v)
for clade in clades
parent = clade.parent::Clade
push!(v, (clade.name, parent.name))
end
end
JSON3.write(io, json_dict)
end
# Invariants for the input list:
# The input is a vector of ranks, consisting of (child, parent) names
# On every rank, the children is a unique set (each child present once) of the parent of the previous rank:
# - On the first rank, the children is a unique set of genome names
# - On the last rank, the parent names can be arbitrary.
# If there is more than a single unique name, then a top node will be added
function parse_taxonomy(
genomes::Set{Genome},
taxmaps::Vector{Vector{Tuple{String, String}}},
)::Vector{Vector{Clade{Genome}}}
child_by_name = Dict{String, Node}(g.name => g for g in genomes)
assigned_children_names = Set{String}()
parent_by_name = empty(child_by_name)
result = Vector{Vector{Clade{Genome}}}()
for (rank, taxmap) in enumerate(taxmaps)
cladeset = Clade{Genome}[]
empty!(assigned_children_names)
for (child_name, parent_name) in taxmap
child_name ∈ assigned_children_names && error(
lazy"At rank $(rank), child \"$(child_name)\" is present more than once.",
)
# Get child
child = get(child_by_name, child_name, nothing)
child === nothing && error(
lazy"At rank $(rank), found child name \"$(child_name)\", but this does not exist " *
"on the previous rank.",
)
# Create parent if it does not already exist
parent =
get(parent_by_name, parent_name, nothing)::Union{Clade{Genome}, Nothing}
if parent === nothing
parent = Clade(parent_name, child)
parent_by_name[parent_name] = parent
push!(cladeset, parent)
else
add_child!(parent, child)
end
push!(assigned_children_names, child_name)
end
if length(assigned_children_names) != length(child_by_name)
missing_child = first(setdiff(keys(child_by_name), assigned_children_names))
error(lazy"At rank $(rank), child $(missing_child) has no parent")
end
parent_by_name, child_by_name = child_by_name, parent_by_name
empty!(parent_by_name)
push!(result, cladeset)
end
# Create top node, if needed
if length(last(result)) > 1
top = Clade("top", first(last(result)))
for child in @view(result[end][2:end])
add_child!(top, child)
end
push!(result, [top])
end
foreach(i -> sort!(i; by=j -> j.name), result)
return result
end
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 1074 | """
Sequence(name::AbstractString, length::Integer)
Type that represents a binnable sequence. Sequences do not contain other information
than their name and their length, and are identified by their name.
# Examples
```jldoctest
julia> Sequence("abc", 5)
Sequence("abc", 5)
julia> Sequence("abc", 5) == Sequence("abc", 9)
true
julia> Sequence("abc", 0)
ERROR: ArgumentError: Cannot instantiate an empty sequence
[...]
"""
struct Sequence
name::String
length::Int
function Sequence(name::AbstractString, length::Integer)
str = String(name)
if isempty(str) || isspace(first(str)) || isspace(last(str))
error(
lazy"Sequence name \"$(str)\" cannot be empty or have leading or trailing whitespace",
)
end
length < 1 && throw(ArgumentError("Cannot instantiate an empty sequence"))
new(str, Int(length))
end
end
Base.length(x::Sequence) = x.length
Base.hash(x::Sequence, h::UInt) = hash(x.name, h ⊻ UInt(24364341))
Base.:(==)(s1::Sequence, s2::Sequence) = s1.name == s2.name
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 5207 | @lazy mutable struct Source{G}
const name::String
const genome::G
const length::Int
const sequences::Vector{Tuple{Sequence, Tuple{Int, Int}}}
@lazy assembly_size::Int
@lazy total_bp::Int
function Source(genome::G, name::AbstractString, length::Integer) where {G}
length ≤ 0 && error("Source length must be at least 1")
new{G}(
String(name),
genome,
Int(length),
Vector{Tuple{Sequence, Tuple{Int, Int}}}(),
uninit,
uninit,
)
end
end
"""
Source{Genome}(g::Genome, name::AbstractString, length::Integer)
Sources are the "ground truth" sequences that the binning attempts to recreate.
For example, the assembled contigs of the reference genome (typically full, closed circular
contigs) as found in NCBI or elsewhere are each `Source`s.
Many `Genome`s only contain a single `Source` namely its full assembled genome.
Each `Source` has a single parent `Genome`, and a unique name which identifies it.
`Source`s have zero or more mapping `Sequence`s, that each map to the `Source` at a given
span given by a 2-tuple `Tuple{Int, Int}`.
`Source`s have an _assembly size_, which is the number of base pairs where any sequence map
to.
"""
Source
Base.:(==)(s1::Source, s2::Source) = s1.name == s2.name
Base.hash(x::Source, h::UInt) = hash(x.name, h ⊻ UInt(344509130945))
function add_sequence!(source::Source, seq::Sequence, span::Tuple{Int, Int})
(small, big) = minmax(span...)
(small < 1 || big > source.length) && error(
lazy"Attempted to add sequence \"$(seq.name)\" to source \"$(source.name)\" ",
lazy"at span $(first(span)):$(last(span)), but valid source indices are 1:$(source.length)",
)
@isinit(source.assembly_size) &&
error("Can't add sequence to source after calling finish! on it.")
push!(source.sequences, (seq, span))
source
end
function finish!(source::Source, scratch::Vector{Tuple{Int, Int}})
@isinit(source.assembly_size) && return source
(asm_size, total_bp) = assembly_size!(last, scratch, source.sequences, source.length)
@assert asm_size ≤ source.length
@init! source.assembly_size = asm_size
@init! source.total_bp = total_bp
source
end
"""Compute -> (breadth, total_bp), where breadth is the number of positions in `v` covered at least once,
and total_bp the sum of the lengths of the sequences.
`v` must be a `Vector` such that `all(by(i) isa Tuple{Integer, Integer} for i in v)`.
The `scratch` input is mutated.
"""
function assembly_size!(
by::Function,
scratch::Vector{Tuple{Int, Int}},
v::Vector, # v: Vector of X, where by(X) isa Tuple{Integer, Integer}
source_len::Int,
)::Tuple{Integer, Integer}
# First pass: Convert elements into Tuple{Int, Int}, and count the number
# of circularly mapping spans (i.e. spans mapping from the end of the source
# to the beginning)
n_circular_mappings = 0
resize!(scratch, length(v))
@inbounds for i in eachindex(scratch, v)
(start_, stop_) = by(v[i])::Tuple{Integer, Integer}
(start, stop) = (Int(start_), Int(stop_))
n_circular_mappings += start > stop
scratch[i] = (start, stop)
end
# If we have any circular mappings, then these needs to be broken up into
# two non-circular spans.
# This is probably rare, so this function is written to be significantly
# faster when this branch is not taken
if !iszero(n_circular_mappings)
old_size = length(scratch)
new_size = old_size + n_circular_mappings
resize!(scratch, new_size)
# We write the extra split circular spans from the end of the vector,
# to avoid overwriting elements that we are currently reading
written_extras = 0
@inbounds for i in 1:old_size
(start, stop) = scratch[i]
if start > stop
scratch[i] = (1, stop)
scratch[new_size - written_extras] = (start, source_len)
written_extras += 1
else
scratch[i] = (start, stop)
end
end
# @assert written_extras == n_circular_mappings # (should always hold)
end
# Now we know we have a Vector{Tuple{Int, Int}} with no circular mappings,
# so we can compute the assembly size
size = total_bp = rightmost_end = 0
for span in sort!(scratch; by=first, alg=QuickSort)
size += max(last(span), rightmost_end) - max(first(span) - 1, rightmost_end)
total_bp += last(span) - first(span) + 1
rightmost_end = max(rightmost_end, last(span))
end
(size, total_bp)
end
Base.show(io::IO, x::Source) = print(io, "Source(", x.name, ", ", x.length, ')')
function Base.show(io::IO, ::MIME"text/plain", x::Source)
if get(io, :compact, false)
show(io, x)
else
print(
io,
"Source \"",
x.name,
"\"\ngenome: ",
x.genome,
"\n Length: ",
x.length,
"\n Assembly size: ",
x.assembly_size,
"\n Sequences: ",
length(x.sequences),
)
end
end
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 1551 | struct Unsafe end
const unsafe = Unsafe()
function tab_pairs(lines)
lines |>
imap(strip) |>
ifilter(!isempty) |>
imap() do line
cu = codeunits(line)
t1 = findfirst(isequal(UInt8('\t')), cu)
t1 === nothing && error(lazy"No tabs in line $line")
t2 = findnext(isequal(UInt8('\t')), cu, t1 + 1)
t2 === nothing || error(lazy"More than two tab-sep fields in $line")
f1 = SubString(line, 1:prevind(line, t1))
f2 = SubString(line, (t1 + 1):lastindex(line))
(f1, f2)
end
end
function binsplit_tab_pairs(t_pairs, sep::Union{Char, AbstractString})
t_pairs |> imap() do (binname, seqname)
p = findfirst(sep, seqname)
p === nothing && error(lazy"Seperator $sep not found in seq name $seqname")
before = SubString(seqname, 1:last(p))
new_binname_ = string(before, binname)
new_binname = SubString(new_binname_, 1:lastindex(new_binname_))
(new_binname, seqname)
end
end
function open_perhaps_gzipped(f::Function, path::String)
if endswith(path, ".gz")
stream = GzipDecompressorStream(open(path; lock=false))
try
f(stream)
finally
close(stream)
end
else
open(f, path; lock=false)
end
end
# This function exists in order to be able to use a Set
# to detect duplicates without hashing and indexing the Set
# twice.
function in!(s::AbstractSet, x)::Bool
xT = convert(eltype(s), x)
L = length(s)
push!(s, xT)
return length(s) == L
end
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | code | 9152 | using Test
using BinBencherBackend:
BinBencherBackend,
FlagSet,
Flag,
Flags,
Reference,
Binning,
Bin,
Genome,
Source,
Sequence,
is_organism,
is_virus,
mrca,
gold_standard,
top_clade,
genomes,
f1,
recall_precision,
n_recovered,
n_seqs,
n_bins,
subset,
subset!
using CodecZlib: GzipCompressor
const DIR = joinpath(dirname(dirname(pathof(BinBencherBackend))), "files")
REF_PATH = joinpath(DIR, "ref.json")
REF_STR = read(REF_PATH, String)
CLUSTERS_PATH = joinpath(DIR, "clusters.tsv")
CLUSTERS_STR = read(CLUSTERS_PATH, String)
@assert isdir(DIR)
ngenomes(ref) = length(genomes(ref))
@testset "Misc" begin
@test isnan(f1(0.0, 0.0))
@test !isnan(f1(1e-6, 1e-6))
end
@testset "Flags" begin
empt = FlagSet()
a = FlagSet([Flags.organism, Flags.plasmid])
b = FlagSet([Flags.virus])
c = FlagSet([Flags.plasmid, Flags.virus])
d = FlagSet([Flags.organism, Flags.plasmid, Flags.plasmid])
@test only(b) in c
@test Set(d) == Set([Flags.organism, Flags.plasmid, Flags.plasmid])
@test only(b) == Flags.virus
@test_throws Exception only(a)
@test_throws Exception only(d)
@test tryparse(Flag, "oRgANisM") == Flags.organism
@test tryparse(Flag, "banana") === nothing
@test FlagSet((tryparse(Flag, "virus"), tryparse(Flag, "organism"))) ==
FlagSet([Flags.organism, Flags.virus])
flagsets = [empt, a, b, c, d]
all_pairs = [(i, j) for i in flagsets for j in flagsets]
@test all(length(i) == length(Set(i)) for i in flagsets)
@test all(isempty(i) == isempty(Set(i)) for i in flagsets)
for f in [issubset, isdisjoint]
@test all(all_pairs) do (a, b)
f(a, b) == f(Set(a), Set(b))
end
end
for f in [union, intersect, setdiff]
@test all(all_pairs) do (a, b)
Set(f(a, b)) == f(Set(a), Set(b))
end
end
end
@testset "Construction" begin
global ref = Reference(IOBuffer(REF_STR))
global binning = Binning(IOBuffer(CLUSTERS_STR), ref)
global bins = sort!(collect(binning.bins); by=i -> i.name)
@test ref isa Reference
@test binning isa Binning
@test bins isa Vector{Bin}
end
function test_is_same_reference(a::Reference, b::Reference)
@test genomes(a) == genomes(b)
@test a.targets == b.targets
@test a.target_index_by_name == b.target_index_by_name
@test n_seqs(a) == n_seqs(b)
@test [[c.name for c in v] for v in a.clades] == [[c.name for c in v] for v in b.clades]
end
@testset "Reference" begin
@test length(genomes(ref)) == 3
@test n_seqs(ref) == 11
buf = IOBuffer()
BinBencherBackend.save(buf, ref)
ref2 = Reference(IOBuffer(take!(buf)))
test_is_same_reference(ref, ref2)
ref3 = Reference(REF_PATH)
test_is_same_reference(ref, ref3)
end
@testset "Sequence" begin
s1 = Sequence("abc", 5)
s2 = Sequence("abc abc", 6)
s3 = Sequence(SubString(" abc", 2:4), 7)
seqs = [s1, s2, s3]
@test_throws Exception Sequence("abc", 0)
@test_throws Exception Sequence("abc", -5)
# Bad names
@test_throws Exception Sequence("", 5)
@test_throws Exception Sequence(" abc", 5)
@test_throws Exception Sequence("abc ", 5)
@test map(length, seqs) == [5, 6, 7]
@test s1 == s3 # we might change this behaviour
@test isequal(s1, s3)
@test hash(s1) === hash(s3)
end
@testset "Genome" begin
gens = sort!(collect(genomes(ref)); by=i -> i.name)
@test is_organism(gens[1])
@test is_organism(gens[2])
@test is_virus(gens[3])
@test !is_organism(gens[3])
@test !is_virus(gens[1])
end
@testset "Clade" begin
(gA, gB, gC) = sort!(collect(genomes(ref)); by=i -> i.name)
@test mrca(gA, gB).name == "D"
@test mrca(gA, gC).name == mrca(gB, gC).name == "F"
D = mrca(gA, gB)
@test mrca(gA, D) === D
F = mrca(D, mrca(gA, gC))
@test mrca(F, F) == F
@test mrca(gA, F) == F
end
@testset "Subsetting" begin
seq_pred = s -> length(s) ≥ 25
genome_pred = !is_virus
ref = Reference(IOBuffer(REF_STR))
ref2 = subset(ref; sequences=seq_pred)
ref3 = subset(ref; genomes=genome_pred)
ref4 = subset(ref3; sequences=seq_pred)
ref5 = subset(ref; sequences=seq_pred, genomes=genome_pred)
refs = (ref, ref2, ref3, ref4, ref5)
for i in 1:4, j in (i + 1):5
@test refs[i] !== refs[j]
end
@test (ngenomes(ref3) + 1 == ngenomes(ref) == ngenomes(ref4) + 1 == ngenomes(ref5) + 1)
@test (n_seqs(ref2) == n_seqs(ref4) == n_seqs(ref5))
@test (n_seqs(ref) == n_seqs(ref3) != n_seqs(ref2))
@test (
top_clade(ref3).ngenomes + 1 ==
top_clade(ref4).ngenomes + 1 ==
top_clade(ref5).ngenomes + 1 ==
top_clade(ref).ngenomes
)
# Test subsetting preserves relationship between seq names and their targets
ref6 = deepcopy(ref)
trgs_of = Dict(name => ref6.targets[i] for (name, i) in ref6.target_index_by_name)
subset!(ref6; sequences=seq_pred)
@test all(ref6.target_index_by_name) do (name, i)
ref6.targets[i] === trgs_of[name]
end
end
function test_is_same_binning(a::Binning, b::Binning)
@test a.ref === b.ref
@test [i.name for i in a.bins] == [i.name for i in b.bins]
for field in [:recovered_asms, :recovered_genomes, :recalls, :precisions]
@test getfield(a, field) == getfield(b, field)
end
end
@testset "Binning" begin
ref = Reference(IOBuffer(REF_STR))
bins = Binning(IOBuffer(CLUSTERS_STR), ref)
@test bins isa Binning
@test n_bins(bins) == 6
@test n_recovered(bins, 0.4, 0.71) == 1
@test n_recovered(bins, 0.4, 0.71; assembly=true) == 2
@test n_recovered(bins, 0.4, 0.71; assembly=true, level=2) == 1
allgenomes = collect(genomes(ref))
for (ir, recall) in enumerate(bins.recalls)
for (ip, precision) in enumerate(bins.precisions)
for (asm, mats) in
[(true, bins.recovered_asms), (false, bins.recovered_genomes)]
found = falses(length(allgenomes))
for (ig, genome) in enumerate(allgenomes), bin in bins.bins
(nr, np) = recall_precision(genome, bin; assembly=asm)
found[ig] |= (nr >= recall && np >= precision)
end
@test sum(found) == mats[1][ip, ir]
for (rank, mat) in zip(ref.clades, mats[2:end])
found = falses(length(rank))
for bin in bins.bins, (ic, clade) in enumerate(rank)
(nr, np) = recall_precision(clade, bin; assembly=asm)
found[ic] |= (nr >= recall && np >= precision)
end
@test sum(found) == mat[ip, ir]
end
end
end
end
# Test filter_genomes works
empty_binning = Binning(IOBuffer(CLUSTERS_STR), ref; filter_genomes=Returns(false))
@test n_recovered(empty_binning, 0.1, 0.1) == 0
@test all(m -> all(iszero, m), empty_binning.recovered_asms)
@test all(m -> all(iszero, m), empty_binning.recovered_genomes)
only_virus = Binning(IOBuffer(CLUSTERS_STR), ref; filter_genomes=is_virus)
@test BinBencherBackend.n_nc(only_virus) == 0
@test n_recovered(only_virus, 0.1, 0.1; assembly=true) == 1
# This test depends on the exact state of the ref and binning used
@test all(m -> all(isone, m), only_virus.recovered_asms)
@test all(m -> all(iszero, m), only_virus.recovered_genomes)
bins2 = Binning(CLUSTERS_PATH, ref)
test_is_same_binning(bins, bins2)
@test bins.bin_genome_stats.mean_bin_recall ≈ 0.4916363636363636
@test bins.bin_genome_stats.mean_bin_precision ≈ 1
@test bins.bin_asm_stats.mean_bin_recall ≈ 0.636734693877551
@test bins.bin_asm_stats.mean_bin_precision ≈ 1
end
@testset "Gold standard" begin
ref = Reference(IOBuffer(REF_STR))
gold_standards = [
gold_standard(ref; disjoint=true)
gold_standard(ref; disjoint=false)
]
for bins in gold_standards
@test bins isa Binning
@test n_bins(bins) == ngenomes(ref)
end
end
@testset "From gzipped" begin
mktempdir() do path
ref_path = joinpath(path, "ref.json.gz")
open(io -> write(io, transcode(GzipCompressor, REF_STR)), ref_path, "w")
ref1 = Reference(REF_PATH)
ref2 = Reference(ref_path)
test_is_same_reference(ref1, ref2)
bins_path = joinpath(path, "bins.tsv.gz")
open(io -> write(io, transcode(GzipCompressor, CLUSTERS_STR)), bins_path, "w")
bins1 = Binning(CLUSTERS_PATH, ref1)
bins2 = Binning(bins_path, ref1)
test_is_same_binning(bins1, bins2)
end
end
@testset "ref.json specifics" begin
ref = Reference(IOBuffer(REF_STR))
sources_by_name = Dict{String, Source{Genome}}()
for genome in genomes(ref), source in genome.sources
sources_by_name[source.name] = source
end
# Circular mapping
C2 = sources_by_name["subjC2"]
@test C2.length == 100
@test C2.assembly_size == 21 + 20 + 21
@test sources_by_name["subjC3"].assembly_size == 0
end
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | docs | 189 | # Changelog
This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
Breaking changes, and some notable changes will appear in this file from version 1.0 onwards.
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | docs | 1032 | # BinBencherBackend
[](https://jakobnissen.github.io/BinBencherBackend.jl/dev)
[](https://github.com/jakobnissen/BinBencherBackend.jl/releases/latest)
This package is used to benchmark and interactively explore the results of metagenomic binning given a dataset.
This is the Julia backend of the command-line tool [BinBencher [work in progress]](https://github.com/jakobnissen/BinBencher.jl).
## Installation
* Install Julia (ideally using Juliaup: https://github.com/JuliaLang/juliaup) or from the official website `www.julialang.org`
* Use the Julia package manager to install this package: `] add BinBencherBackend`
## Documentation
Basic usage:
```julia
using BinBencherBackend
ref = Reference("files/ref.json")
bins = Binning("files/clusters.tsv", ref)
print_matrix(bins)
```
For more, see the [documentation](https://jakobnissen.github.io/BinBencherBackend.jl/dev)
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | docs | 2125 | # BinBencherBackend
BinBencherBackend.jl is a package for efficient benchmarking and interactive exploration of a set of metagenomic assembled genomes (MAGs) against a reference.
This is designed to be used for benchmarking metagenomic binners against a simulated metagenome.
## Installation
* Install Julia - preferably using `juliaup`: https://github.com/JuliaLang/juliaup
* Launch Julia: `julia`
* Press `]` to enter package mode. You can exit package mode with backspace.
* In package mode, type `add BinBencherBackend` to download and install the benchmarking software
## Quickstart
```julia
using BinBencherBackend
ref = Reference("files/ref.json")
bins = Binning("files/clusters.tsv", ref)
print_matrix(bins)
```
## Concepts
* A `Sequence` is a sequence (e.g. contig) clustered by the binner
* A `Genome` is a target genome that should be reconstructed by the binner.
It can be a virus, organism, plasmid etc. Every `Genome` have several `Source`s, and one parent `Clade`.
* A `Flag` marks the certaincy about a boolean attribute of a genome, like "is this a virus?".
* `Source`s are the sequences that `Genome`s are composed of.
These are typically the reference genome sequences originally obtained by assembly of a purified genome (e.g. clonal colony).
`Sequence`s map to zero or more `Source`s at particular _spans_, i.e. locations.
* A `Clade` contain one or more `Genome`s or `Clade`s. Clades containing genomes are rank 1, and clades containing rank N clades are rank N+1 clades.
All genomes descend from a chain of exactly N ranks of clades, where N > 0.
* A `Bin` is a set of `Sequence`s created by the binner. Every bin is benchmarked against all genomes and clades in the reference.
* A `Reference` is composed of:
- The _genomes_, a set of `Genome`s, each with a set of `Source`s and `Flags`
- The _taxmaps_, a full set of Clades that encompasses every `Genome` at N ranks (where N > 0)
- The _sequences_, a list of `Sequence`s, each with zero or more mappings to `Source`s.
* A `Binning` is a set of `Bin`s benchmarked against a `Reference`
See the Reference in the left sidebar.
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | docs | 88 | # Reference
```@autodocs
Modules = [BinBencherBackend]
Order = [:type, :function]
```
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.3.0 | fdeb55687884a44d464429fd6af7a9da298d7d7e | docs | 7826 | ```@meta
DocTestSetup = quote
using BinBencherBackend
(path_to_ref_file, path_to_bins_file) = let
dir = joinpath(Base.pkgdir(BinBencherBackend), "files")
(joinpath(dir, "ref.json"), joinpath(dir, "clusters.tsv"))
end
end
```
# Example walkthrough
## Loading the reference
First, let's load the reference:
```jldoctest walk
julia> using BinBencherBackend
julia> ref = Reference(path_to_ref_file)
Reference
Genomes: 3
Sequences: 11
Ranks: 3
Seq length: 10
Assembled: 61.9 %
```
This gives us a few statistics about the reference:
* Number of genomes
* Number of sequences
* Number of taxonomic ranks (strain, species, genus...)
* Length of shortest sequence
* Total length of genomes that are assembled
The genomes here contain both plasmids and organisms.
Let's filter the reference using [`subset!`](@ref) to only retain organisms, and sequences of length 10 or more:
```jldoctest walk
julia> subset!(ref;
genomes = is_organism,
sequences = s -> length(s) >= 10
)
Reference
Genomes: 2
Sequences: 11
Ranks: 3
Seq length: 10
Assembled: 91.3 %
```
!!! note
The function [`subset!`](@ref) will mutate the reference, whereas the function [`subset`](@ref)
will create a new independent reference. At the moment, the latter is much slower.
We removed a single genome, namely one labeled as virus.
## Genomes
We can explore the genomes contained in the reference with the `genomes` function,
which returns an iterable of `Genome` (in this case, a `Set`):
```jldoctest walk; filter = r"\s+Genome\([A-Za-z0-9\.]+\)"
julia> genomes(ref)
Set{Genome} with 2 elements:
Genome(gA)
Genome(gB)
```
Let's look at a `Genome` in more detail:
```jldoctest walk
julia> genome, genome2 = genomes(ref);
julia> genome
Genome "gA"
Parent: "D"
Genome size: 100
Assembly size: 88 (88.0 %)
Sources: 1
Flags: 1 (organism)
```
The _flags_ can be extracted with the `flags(genome)` function - each genome contains zero or more flags:
```jldoctest walk
julia> flags(genome)
FlagSet with 1 element:
BinBencherBackend.Flags.organism
```
... in this case, this genome is an organism as opposed to a plasmid or virus.
You can see all possible flags with `instances(Flags.Flag)`.
See also the helper functions [`is_organism`](@ref), [`is_virus`](@ref) and [`is_plasmid`](@ref)
## Sources
The `genome` has one source - let's look at that
```jldoctest walk
julia> source = only(genome.sources)
Source "subjA1"
genome: Genome(gA)
Length: 100
Assembly size: 88
Sequences: 6
```
A `Source` is one of the genomic sequences that genomes are composed of.
This is distinct from the assembled sequences that we will be binning - a `Source` represents the reference sequence,
typically the full genome, assembled from a sequencing run on a clonal colony.
For this genome, we can see it has a length of 100 bp, and that 5 sequences map to this source, covering 88 bp.
We can get the sequences mapping to this source:
```jldoctest walk
julia> source.sequences
6-element Vector{Tuple{Sequence, Tuple{Int64, Int64}}}:
(Sequence("s1", 25), (5, 29))
(Sequence("s1", 25), (10, 34))
(Sequence("s2", 40), (1, 40))
(Sequence("s3", 50), (51, 98))
(Sequence("s7", 20), (21, 40))
(Sequence("s8", 25), (2, 26))
```
Where, e.g. the first entrance tells us that the sequence "s2" with a length of 40 maps to positions 1:40 (both inclusive).
## Clades
Genomes are organised into a taxonomic hierarchy.
We can find the immediate parent of a genome by accessing the field `genome.parent`.
Let's look at another genome:
```jldoctest walk
julia> genome2
Genome "gB"
Parent: "D"
Genome size: 50
Assembly size: 49 (98.0 %)
Sources: 2
Flags: 1 (organism)
julia> clade = genome2.parent
Species "D", 2 genomes
├─ Genome(gA)
└─ Genome(gB)
```
The parent is an instance of a `Clade`.
`Clade`s are at a specific rank: Rank 1 for species, 2 for genus, 3 for family, etc.
Every clade has one or more children: These are the clades one rank lower.
Conceptually, rank zero corresponds to `Genome`s (OTUs, for this reference dataset)
```jldoctest walk
julia> clade.children
2-element Vector{Genome}:
Genome(gA)
Genome(gB)
```
We can find the most recent common ancestor (MRCA) of `genome` and `genome2` like this:
```jldoctest walk
julia> mrca(genome, genome2)
Species "D", 2 genomes
├─ Genome(gA)
└─ Genome(gB)
```
They are very distantly related, so the domain "Bacteria", one of the highest ranked `Clade`s, are their most recent common ancestor.
The top clade can be found with the `top_clade(ref)` function, which is the universal ancestor of all clades in the reference.
## Binnings
A `Binning` is a set of bins benchmarked against a reference.
We can load a set of Vamb bins and turn it into a `Binning` object like this:
```jldoctest walk
julia> binning = Binning(path_to_bins_file, ref)
Binning
Reference
Genomes: 2
Sequences: 11
Ranks: 3
Seq length: 10
Assembled: 91.3 %
Bins: 6
NC genomes: 0
HQ bins: 0
Mean bin genome R/P/F1: 0.51 / 1.0 / 0.672
Mean bin assembly R/P/F1: 0.546 / 1.0 / 0.704
Precisions: [0.6, 0.7, 0.8, 0.9, 0.95, 0.99]
Recalls: [0.6, 0.7, 0.8, 0.9, 0.95, 0.99]
Reconstruction (genomes):
P\R 0.6 0.7 0.8 0.9 0.95 0.99
0.6 1 0 0 0 0 0
0.7 1 0 0 0 0 0
0.8 1 0 0 0 0 0
0.9 1 0 0 0 0 0
0.95 1 0 0 0 0 0
0.99 1 0 0 0 0 0
```
A wealth of information is readily available:
* `binning.ref` gives the underlying `Reference`
* `binning.recalls` and `binning.precisions` gives the recall/precision thresholds used in benchmarking
The function `print_matrix` will display the number of recovered genomes/assemblies.
It takes two optional keyword: `level`, the taxonomic rank (defaults to 0, meaning strain level),
and `assembly` which defaults to `true`. If set to `false`, benchmark number of recovered genomes,
not number of recovered assemblies.
```jldoctest walk
julia> print_matrix(binning; level=1, assembly=false)
P\R 0.6 0.7 0.8 0.9 0.95 0.99
0.6 1 0 0 0 0 0
0.7 1 0 0 0 0 0
0.8 1 0 0 0 0 0
0.9 1 0 0 0 0 0
0.95 1 0 0 0 0 0
0.99 1 0 0 0 0 0
```
You can also get the number of genomes or assemblies reconstructed at a given
precision/recall level directly with `n_recovered`:
```jldoctest walk
julia> n_recovered(binning, 0.6, 0.7; assembly=true)
1
julia> n_recovered(binning, 0.66, 0.91; level=1)
0
```
## Bins
The `Binning` object obviously contains our bins.
Let's pick a random bin:
```jldoctest walk
julia> bin = binning.bins[4]
Bin "C4"
Sequences: 3
Breadth: 55
Intersecting 2 genomes
```
The "breadth" here is the sum of the length of its sequences.
`bin.sequences` gets an iterable of its sequences:
```jldoctest walk
julia> collect(bin.sequences)
3-element Vector{Sequence}:
Sequence("s5", 25)
Sequence("s6", 10)
Sequence("s7", 20)
```
The "Intersecting 2 genomes" means that the sequences map to 2 different genomes - the only two in the reference.
We can get that with the function [`intersecting`](@ref), then get the precision/recall
with [`recall_precision`](@ref):
```jldoctest walk
julia> Set(intersecting(bin)) == genomes(ref)
true
julia> recall_precision(genome2, bin)
(recall = 0.6, precision = 1.0)
```
Or do the same for a higher clade - let's say a genus. In this case, we get the same result.
```jldoctest walk
julia> genus = only(Iterators.filter(i -> i.rank == 2, intersecting(Clade, bin)));
julia> recall_precision(genus, bin)
(recall = 0.6, precision = 1.0)
```
| BinBencherBackend | https://github.com/jakobnissen/BinBencherBackend.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 1906 | using DataFrames, CSV
include("../src/sdistance.jl") # 必ずダブルクオーテーション
include("../test/draw.jl")
include("../src/SignedDistanceFunction.jl")
import .Sdistance: benchmark_floodfill, benchmark_singlecurves_isinside #,signedDistance2D
using .SignedDistanceFunction
import .Draw: parformance_graphs
@enum ExecuteKinds _multicurves _singlecurve _singlecurve_floodfill
function _exe(kinds)
_execute_times = 2
_phi = []
# NOTE: N is for image size(N x N)
init_N = 100
increment_N = 200
runtime = zeros(_execute_times + 1, 2)
N = [init_N + increment_N * item for item = 0:_execute_times]
if kinds == 1
for i = 0:_execute_times
runtime[i+1, 1] = benchmark_floodfill(init_N + increment_N * i, "./test/mock_csv_data/multiple_curves.csv")
runtime[i+1, 2] = benchmark_floodfill(init_N + increment_N * i, "./test/mock_csv_data/multiple_curves.csv", false)
end
parformance_graphs(N, runtime, "multiple_curves", ["Parallel processing", "Normal processing"])
elseif kinds == 2
for i = 0:_execute_times
runtime[i+1, 1] = benchmark_floodfill(init_N + increment_N * i, "./test/mock_csv_data/interface.csv")
runtime[i+1, 2] = benchmark_floodfill(init_N + increment_N * i, "./test/mock_csv_data/interface.csv", false)
end
parformance_graphs(N, runtime, "interface_floodfill", ["Parallel processing", "Normal processing"])
elseif kinds == 3
for i = 0:_execute_times
runtime[i+1, 1] = benchmark_singlecurves_isinside(init_N + increment_N * i, "./test/mock_csv_data/interface.csv")
runtime[i+1, 2] = benchmark_singlecurves_isinside(init_N + increment_N * i, "./test/mock_csv_data/interface.csv", false)
end
parformance_graphs(N, runtime, "the jordan curve", ["Parallel processing", "Normal processing"])
end
end
# _exe(1)
# _exe(2)
_exe(3)
| SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 123 | module SignedDistanceFunction
include("./sdistance.jl")
using .Sdistance # exportされたmethodのみ使える
export signedDistance2D
end | SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 3823 | module DistanceFunction
include("./environments.jl")
import DataFrames, DelimitedFiles, Luxor, BenchmarkTools
using DataFrames, DelimitedFiles, Luxor, BenchmarkTools
# Calculate the distance between a point on all ganma curves for a given point in the definition.
function distanceToCurve(px::Float64, py::Float64, gem::Array) # TODO: 型
min_distance = 10000.0
for i = 1:length(gem[:, 1])
distnow = sqrt((gem[i, 1] - px)^2 + (gem[i, 2] - py)^2)
if (distnow < min_distance)
min_distance = distnow
end
end
return min_distance
end
precompile(distanceToCurve, (Float64, Float64, Array))
# Multi processing, multi jordan curves.
function create_distance_function_multiprocess(_x::Array, _y::Array, _gamma::Array)
x_length = length(_x[:, 1])
return_value = zeros(Float64, x_length, x_length)
if JULIA_MULTI_PROCESS
Threads.@threads for indexI = 1:length(_y)
for indexJ = 1:length(_x)
return_value[indexI, indexJ] = 1.0 * distanceToCurve(_x[indexJ], _y[indexI], _gamma)
end
end
else
for indexI = 1:length(_y)
for indexJ = 1:length(_x)
return_value[indexI, indexJ] = 1.0 * distanceToCurve(_x[indexJ], _y[indexI], _gamma)
end
end
end
return return_value
end
precompile(create_distance_function_multiprocess, (Array, Array, Array))
function create_distance_function(_x::Array, _y::Array, _gamma::Array)
x_length = length(_x[:, 1])
return_value = zeros(Float64, x_length, x_length)
for indexI = 1:length(_y)
for indexJ = 1:length(_x)
return_value[indexI, indexJ] = 1.0 * distanceToCurve(_x[indexJ], _y[indexI], _gamma)
end
end
return return_value
end
precompile(create_distance_function, (Array, Array, Array))
# Normal processing, a jordan curve.
function create_signed_distance_function(_x::Array, _y::Array, _gamma::Array)
x_length = length(_x[:, 1])
return_value = zeros(Float64, x_length, x_length)
for indexI = 1:length(_y)
for indexJ = 1:length(_x)
sdist = 1.0 * distanceToCurve(_x[indexJ], _y[indexI], _gamma)
# ganma must be a closed curve.
# Error happened when the point is on the edge of subdomain.
if Point(_x[indexJ], _y[indexI]) in [Point(_gamma[i, 1], _gamma[i, 2]) for i = 1:length(_gamma[:, 1])]
sdist = 0
elseif isinside(Point(_x[indexJ], _y[indexI]), [Point(_gamma[i, 1], _gamma[i, 2]) for i = 1:length(_gamma[:, 1])])
sdist = (-1) * sdist
end
return_value[indexI, indexJ] = sdist
end
end
return return_value
end
precompile(create_signed_distance_function, (Array, Array, Array))
# Multi processing, a jordan curve.
function create_signed_distance_function_multiprocess(_x::Array, _y::Array, _gamma::Array)
x_length = length(_x[:, 1])
return_value = zeros(Float64, x_length, x_length)
Threads.@threads for indexI = 1:length(_y)
for indexJ = 1:length(_x)
sdist = 1.0 * distanceToCurve(_x[indexJ], _y[indexI], _gamma)
if Point(_x[indexJ], _y[indexI]) in [Point(_gamma[i, 1], _gamma[i, 2]) for i = 1:length(_gamma[:, 1])]
sdist = 0
elseif isinside(Point(_x[indexJ], _y[indexI]), [Point(_gamma[i, 1], _gamma[i, 2]) for i = 1:length(_gamma[:, 1])])
sdist = (-1) * sdist
end
return_value[indexI, indexJ] = sdist
end
end
return return_value
end
precompile(create_signed_distance_function_multiprocess, (Array, Array, Array))
export create_signed_distafnce_function_multiprocess, create_signed_distance_function, distanceToCurve, create_distance_function, create_distance_function_multiprocess
end | SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 60 | JULIA_MULTI_PROCESS = true
L = 1.5
# dev | rls
STAGE = "rls" | SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 10027 | module Floodfill
include("./environments.jl")
#===
Flood-fill (node):
1. Set Q to the empty queue or stack.
2. Add node to the end of Q.
3. While Q is not empty:
4. Set n equal to the first element of Q.
5. Remove first element from Q.
6. If n is Inside:
Set the n
Add the node to the west of n to the end of Q.
Add the node to the east of n to the end of Q.
Add the node to the north of n to the end of Q.
Add the node to the south of n to the end of Q.
7. Continue looping until Q is exhausted.
8. Return.
===#
function floodfill(_phi::Array, N, L, filled, beginx, beginy, filled_index, indexI = nothing, multiprocess = true)
# -> 閉曲線が2箇所で境界に接していたりすると、その領域のみで色塗り(符合つけ)が終わってしまうから。
point_que = [(beginx, beginy)]
closed_zero = L * 2 * 1.42 / (N + 1)
# println("The lattice size: ",size(_phi), " the beginning point: ", point_que)
if indexI !== nothing
bounse_x = size(_phi[:, 1])[1] + 1
bounse_min_x = 0
else
bounse_x = N + 1
bounse_min_x = 0
end
bounse_y = size(_phi)[2] + 1
bounse_min_y = 0
STEP = 2
while length(point_que) > 0
_x = copy(point_que[1][1])
_y = copy(point_que[1][2])
# 現在の格子点の符合を反転させる
if _phi[_x, _y] <= 0
_phi[_x, _y] *= (-1)
end
filled[filled_index] = point_que[1]
popfirst!(point_que)
filled_index += 1
# 下
if bounse_min_x < _x - STEP < bounse_x && !((_x - STEP, _y) in filled) && abs(_phi[_x-STEP, _y]) > closed_zero
if !((_x - STEP, _y) in point_que) && _phi[_x-STEP, _y] <= 0
append!(point_que, [(_x - STEP, _y)]) # 下側の格子点をqueueに積む
end
end
# 左
if bounse_min_y < _y - STEP < bounse_y && !((_x, _y - STEP) in filled) && abs(_phi[_x, _y-STEP]) > closed_zero
if !((_x, _y - STEP) in point_que) && _phi[_x, _y-STEP] <= 0
append!(point_que, [(_x, _y - STEP)]) # 左側の格子点をqueueに積む
end
end
# 上
if bounse_min_x < _x + STEP < bounse_x && !((_x + STEP, _y) in filled) && abs(_phi[_x+STEP, _y]) > closed_zero
if !((_x + STEP, _y) in point_que) && _phi[_x+STEP, _y] <= 0
append!(point_que, [(_x + STEP, _y)]) # 上側の格子点をqueueに積む
end
end
# 右
if _y + STEP < bounse_y && !((_x, _y + STEP) in filled) && abs(_phi[_x, _y+STEP]) > closed_zero
if !((_x, _y + STEP) in point_que) && _phi[_x, _y+STEP] <= 0
append!(point_que, [(_x, _y + STEP)]) # 右側の格子点をqueueに積む
end
end
end
# end
if STEP == 2
_phi = assign_signs(_phi, STEP, N, L, multiprocess)
end
return _phi, filled_index#, filled
end
precompile(floodfill, (Array, Int, Float64, Array, Int, Int, Int, Int, Int))
## 今のところSTEP=2の場合のみ対応
function assign_signs(_phi, STEP, N, L, multiprocess = true)
# 閉曲線内部が「-」である。デフォが
# Int((N-1)/100) # 再帰回数
loops = Int(log2(STEP))
steps_signed_grid = STEP
steps_unsigned_grid = Int(STEP / 2)
if multiprocess
while loops > 0
if STEP != 1
Threads.@threads for i = 1:steps_signed_grid:length(_phi[1, :]) # 各行を一つ飛ばしで。
for j = 1:steps_signed_grid:length(_phi[:, 1])
# ケツでない
if j + steps_signed_grid <= length(_phi[:, 1]) && j + steps_unsigned_grid <= length(_phi[:, 1])
# 掛けたらマイナス->境界を示す. かつ 両方の距離を足したらgrid点の間の2倍の距離になる(数値誤差を考慮?できてる?)
if _phi[i, j] * _phi[i, j+steps_signed_grid] <= 0
if abs(_phi[i, j]) <= abs(_phi[i, j+steps_signed_grid]) # jの方が近い
if _phi[i, j] < 0 # jが内
_phi[i, j+steps_unsigned_grid] *= (-1) # 外側にある
end
else # j+2の方が近い
if _phi[i, j] >= 0 # jが外
_phi[i, j+steps_unsigned_grid] *= (-1)
end
end
elseif _phi[i, j] > 0 && _phi[i, j+steps_signed_grid] > 0# 外側
_phi[i, j+steps_unsigned_grid] *= (-1)
end
end
end
end
Threads.@threads for j = 1:Int(steps_signed_grid / 2):length(_phi[:, 1])
for i = 1:steps_signed_grid:length(_phi[:, 1])
# ケツでない
if i + steps_signed_grid <= length(_phi[:, 1]) && i + steps_unsigned_grid <= length(_phi[:, 1])
if _phi[i, j] * _phi[i+steps_signed_grid, j] <= 0
if abs(_phi[i, j]) <= abs(_phi[i+steps_signed_grid, j]) # iの方が近い
if _phi[i, j] < 0 # iが内
_phi[i+steps_unsigned_grid, j] *= (-1) # 外
end
else # i+2の方が近い
# if _phi[i+steps_signed_grid, j] < 0 # i+steps_signed_gridが内
if _phi[i, j] >= 0 # iが外
_phi[i+steps_unsigned_grid, j] *= (-1) # 外
end
end
elseif _phi[i, j] > 0 && _phi[i+steps_signed_grid, j] > 0# 外側
_phi[i+steps_unsigned_grid, j] *= (-1)
end
end
end
end
end
steps_signed_grid = steps_signed_grid >= 4 ? deepcopy(Int(steps_signed_grid / 2)) : undef
steps_unsigned_grid = steps_unsigned_grid >= 2 ? deepcopy(Int(steps_unsigned_grid / 2)) : undef
loops -= 1
end
else
while loops > 0
if STEP != 1
for i = 1:steps_signed_grid:length(_phi[1, :]) # 各行を一つ飛ばしで。
for j = 1:steps_signed_grid:length(_phi[:, 1])
# ケツでない
if j + steps_signed_grid <= length(_phi[:, 1]) && j + steps_unsigned_grid <= length(_phi[:, 1])
# 掛けたらマイナス->境界を示す. かつ 両方の距離を足したらgrid点の間の2倍の距離になる(数値誤差を考慮?できてる?)
if _phi[i, j] * _phi[i, j+steps_signed_grid] <= 0
if abs(_phi[i, j]) < abs(_phi[i, j+steps_signed_grid]) # jの方が近い
if _phi[i, j] < 0 # jが内
_phi[i, j+steps_unsigned_grid] *= (-1) # 外側にある
end
else # j+2の方が近い
if _phi[i, j] >= 0 # jが外
_phi[i, j+steps_unsigned_grid] *= (-1)
end
end
elseif _phi[i, j] > 0 && _phi[i, j+steps_signed_grid] > 0# 外側
_phi[i, j+steps_unsigned_grid] *= (-1)
end
end
end
end
for j = 1:Int(steps_signed_grid / 2):length(_phi[:, 1])
for i = 1:steps_signed_grid:length(_phi[:, 1])
# ケツでない
if i + steps_signed_grid <= length(_phi[:, 1]) && i + steps_unsigned_grid <= length(_phi[:, 1])
if _phi[i, j] * _phi[i+steps_signed_grid, j] <= 0
if abs(_phi[i, j]) < abs(_phi[i+steps_signed_grid, j]) # jの方が近い
if _phi[i, j] < 0 # jが内
_phi[i+steps_unsigned_grid, j] *= (-1)
end
else # j+2の方が近い
if _phi[i, j] >= 0 # jが外
_phi[i+steps_unsigned_grid, j] *= (-1)
end
end
elseif _phi[i, j] > 0 && _phi[i+steps_signed_grid, j] > 0# 外側
_phi[i+steps_unsigned_grid, j] *= (-1)
end
end
end
end
end
steps_signed_grid = steps_signed_grid >= 4 ? deepcopy(Int(steps_signed_grid / 2)) : undef
steps_unsigned_grid = steps_unsigned_grid >= 2 ? deepcopy(Int(steps_unsigned_grid / 2)) : undef
loops -= 1
end
end
return _phi
end
function signining_field(_phi::Array, N, L, multiprocess = true)
_phi .*= (-1)
filled = Array{Tuple{Int64,Int64}}(undef, N * N)
filled_index = 1
beginx = 1
beginy = N
indexI = 1
_phi = floodfill(_phi, N, L, filled, beginx, beginy, filled_index, indexI, multiprocess)
return _phi
end
precompile(signining_field, (Array, Int, Float64))
export signining_field
end | SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 5434 | module Sdistance
include("./distance_function.jl")
include("./floodfill.jl")
include("./utils/utils.jl")
include("./environments.jl")
import .DistanceFunction: create_signed_distance_function_multiprocess, create_signed_distance_function, distanceToCurve, create_distance_function, create_distance_function_multiprocess
import .Floodfill: signining_field
import .Utils: is_jordan_curve, interpolation
import CSV, DataFrames, DelimitedFiles, Luxor, BenchmarkTools
using CSV, DataFrames, DelimitedFiles, Luxor, BenchmarkTools
if STAGE == "dev"
import Plots, TimerOutputs
using Plots, TimerOutputs
const tmr = TimerOutput()
end
"""
benchmark用method
- floodfill使用時のmultiprocessとsingleprocessの比較
"""
function benchmark_floodfill(N::Int = 1000, _csv_datafile::String = "../test/mock_csv_data/interface.csv", multiprocess::Bool = true)
exetimes = 3
runtime = 0
if multiprocess
for i = 1:exetimes
_phi, time = @timed signedDistance2D(_csv_datafile, N, "multi")
runtime += time
end
else
for i = 1:exetimes
_phi, time = @timed signedDistance2D_singleprocess(_csv_datafile, N, "multi")
runtime += time
end
end
return (runtime / exetimes)
end
"""
benchmark用method
- single curveの時のisinsideメソッド使用時ののmultiprocessとsingleprocessの比較
"""
function benchmark_singlecurves_isinside(N::Int = 1000, _csv_datafile::String = "../test/mock_csv_data/interface.csv", multiprocess::Bool = true)
exetimes = 3
runtime = 0
if multiprocess
for i = 1:exetimes
_phi, time = @timed signedDistance2D(_csv_datafile, N, "single")
runtime += time
end
else
for i = 1:exetimes
_phi, time = @timed signedDistance2D_singleprocess(_csv_datafile, N, "single")
runtime += time
end
end
return (runtime / exetimes)
end
"""
csv_datafile::Union{String, DataFrame}
N::Int
curves::Union{String, Nothing}
"""
function signedDistance2D(csv_datafile::Union{String,DataFrame}, N::Int = 100, curves::Union{String,Nothing} = nothing)
#=== case: multiple circles ===#
if curves == "multi"
# create the computational domain
_phi = zeros(Float64, N + 1, N + 1)
# loading the csv file(the circle data)
_gamma = Array{Any}(undef, 0, 2)
_gamma = readdlm(csv_datafile, ',', Float64)
if length(_gamma) < 200
_gamma = interpolation(_gamma, Int(floor(log(length(_gamma) / 2, N^1.5))) + 2, true)
elseif Int(floor(log(length(_gamma) / 2, N^1.5))) > 0
_gamma = interpolation(_gamma, Int(floor(log(length(_gamma) / 2, N^1.5))), true)
end
_x = [i for i = -L:2*L/N:L] # len:N+1
_y = [i for i = -L:2*L/N:L] # len:N+1
println("Data format: Multi curves\nThe CSV data size: ", size(_gamma))
_phi = create_distance_function_multiprocess(_x, _y, _gamma)
signining_field(_phi, N + 1, L)
return _phi
#=== case: simple circle ===#
else
# create the computational domain
_phi = zeros(Float64, N + 1, N + 1)
_gamma = readdlm(csv_datafile, ',', Float64)
_x = [i for i = -L:2*L/N:L] # len:N+1
_y = [i for i = -L:2*L/N:L] # len:N+1
is_jordan_curve(_gamma) # TODO: 丁寧なError messageを付与
_gamma = interpolation(_gamma, Int(floor(log(length(_gamma) / 2, 2 * N))) + 1, false)
println("Data format: The jordan curve\nThe CSV data size: ", size(_gamma))
_phi = create_signed_distance_function_multiprocess(_x, _y, _gamma) # parallel processing
return _phi
end
end
function signedDistance2D_singleprocess(csv_datafile::Union{String,DataFrame}, N::Int = 100, curves::Union{String,Nothing} = nothing)
#=== case: double circle ===#
if curves == "multi"
# create the computational domain
_phi = zeros(Float64, N + 1, N + 1)
_gamma = Array{Any}(undef, 0, 2)
_gamma = readdlm(csv_datafile, ',', Float64)
if length(_gamma) < 200
_gamma = interpolation(_gamma, Int(floor(log(length(_gamma) / 2, N^1.5))) + 2, true)
elseif Int(floor(log(length(_gamma) / 2, N^1.5))) > 0
_gamma = interpolation(_gamma, Int(floor(log(length(_gamma) / 2, N^1.5))), true)
end
println("Data format: Multi curves\nThe CSV data size: ", size(_gamma))
_x = [i for i = -L:2*L/N:L] # len:N+1
_y = [i for i = -L:2*L/N:L] # len:N+1
_phi = create_distance_function(_x, _y, _gamma)
signining_field(_phi, N + 1, L, false)
return _phi
#=== case: simple circle ===#
else
# create the computational domain
_phi = zeros(Float64, N + 1, N + 1)
_gamma = readdlm(csv_datafile, ',', Float64)
_x = [i for i = -L:2*L/N:L] # len:N+1
_y = [i for i = -L:2*L/N:L] # len:N+1
is_jordan_curve(_gamma) # TODO: 丁寧なError messageを付与
_gamma = interpolation(_gamma, Int(floor(log(length(_gamma) / 2, 2 * N))) + 1, false)
println("Data format: The jordan curve\nThe CSV data size: ", size(_gamma))
_phi = create_signed_distance_function(_x, _y, _gamma) # single processing
return _phi
end
end
precompile(signedDistance2D, (Union{String,DataFrame}, Int, Union{String,Nothing}))
export signedDistance2D, benchmark_floodfill, benchmark_singlecurves_isinside
end
| SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 530 | #= Sysimage for Developers. Sysimage.so must be written on .gitignore because it is so large file! =#
include("./environments.jl")
using Pkg
Pkg.add("DelimitedFiles")
Pkg.add("Test")
if STAGE == "dev"
Pkg.add("PackageCompiler")
Pkg.add("TimerOutputs")
Pkg.add("Plots")
using PackageCompiler
PackageCompiler.create_sysimage([:CSV, :DataFrames, :Plots, :Luxor, :BenchmarkTools, :TimerOutputs, :Test]; sysimage_path="Sysimage.so")
end
# >> You can use packages with 'import and using'
# $ julia -JSysimage.so
| SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 3161 | module Utils
# ジョルダン曲線: ねじれのない閉曲線のこと.
"""
ジョルダン閉曲線であるかどうか
Whether Array Data represents the jordan curve or not.
"""
function is_jordan_curve(_gamma::Array)
progression_of_differences = [sqrt((_gamma[i, 1] - _gamma[i+1, 1])^2 + (_gamma[i, 2] - _gamma[i+1, 2])^2) for i = 1:(length(_gamma[:, 1])-1)]
ave_distance = sum(progression_of_differences) / length(progression_of_differences)
if ave_distance * 2 < abs(_gamma[1, 1] - _gamma[length(_gamma[:, 1]), 1])
return true
else
return false
end
end
# FIXME: 倍々ではなく, linearに補完数を指定できるように。
"""
n行目とn+1行目の間のデータを補完.
set data between array[n] and array[n+1]: ..., array[n], new data, array[n+1], ...
array size is from (x, 2) to (x*2-1, 2)
"""
function set_data_between_array_elements(array::Array, multiple::Bool, average_interval_between_points::Float64)
(x, y) = size(array)
return_value = Array{Float64}(undef, 2 * x, y)
if multiple
for i = 1:x-1
# 点の間隔が平均以下なので、補間する -> 同一の曲線とみなす
if sqrt(sum((array[i, :] .- array[i+1, :]) .^ 2)) < average_interval_between_points * 5 # NOTE: ここの加減が難しい. Nに依存?
return_value[i*2-1, :] = array[i, :]
return_value[i*2, :] = (array[i, :] .+ array[i+1, :]) ./ 2
# 点の間隔が離れているので違う曲線とみる
else
return_value[i*2-1, :] = array[i, :]
return_value[i*2, :] = array[i, :]
end
end
return_value[x*2-1, :] = array[x, :]
return_value[x*2, :] = array[x, :]
else
for i = 1:x-1
# 点の間隔が平均以下なので、補間する -> 同一の曲線とみなす
return_value[i*2-1, :] = array[i, :]
return_value[i*2, :] = (array[i, :] .+ array[i+1, :]) ./ 2
end
return_value[x*2-1, :] = array[x, :]
return_value[x*2, :] = array[1, :] # Note: _gamma += _gamma's head line coz boundary condition. size: (N+1,2)
end
return return_value
end
precompile(set_data_between_array_elements, (Array, Bool, Float64))
function remove_equal_value_in_next_points(array::Any)
if length(array) == 0
return
end
return_value = Array{Any}(undef, 0, length(array[1, :]))
array_length = length(array[:, 1])
for i = 1:array_length-1
if array[i, :] != array[i+1, :]
return_value = vcat(return_value, array[i, :]')
end
end
return_value = vcat(return_value, array[array_length, :]')
return return_value
end
"""
times回, 倍の数だけデータを補完する
"""
function interpolation(array::Array, times::Int, multiple = false)
if times > 0
tmp = []
(x, y) = size(array)
"""
データの点と点の間隔の平均
"""
average_interval_between_points = sum([sqrt(sum((array[i, :] .- array[i+1, :]) .^ 2)) for i = 1:x-1]) / x
for _ = 1:times
tmp = set_data_between_array_elements(array, multiple, average_interval_between_points)
array = tmp
end
return remove_equal_value_in_next_points(tmp)
end
end
precompile(interpolation, (Array, Int, Bool))
export is_jordan_curve, interpolation, remove_equal_value_in_next_points
end
| SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 1670 | using DataFrames, CSV
using Profile
include("../src/environments.jl")
if STAGE == "dev"
include("../src/sdistance.jl") # 必ずダブルクオーテーション
include("../test/draw.jl")
include("../src/SignedDistanceFunction.jl")
include("../test/APT.jl")
import .Draw: parformance_graphs
using .SignedDistanceFunction
using .APT
# This script run by test.sh
# ====== Debug ======
# p = signedDistance2D("./test/mock_csv_data/interface.csv", parse(Int, ARGS[1]))
# DataFrame(p, :auto) |> CSV.write("./test/result/interface_result_n"*ARGS[1]*".csv", header=false)
# p = signedDistance2D("./test/mock_csv_data/interface.csv", parse(Int, ARGS[1]), "multi")
# DataFrame(p, :auto) |> CSV.write("./test/result/interface_floodfill_result_n"*ARGS[1]*".csv", header=false)
# p = signedDistance2D("./test/mock_csv_data/multiple_curves.csv", parse(Int, ARGS[1]), "multi")
# DataFrame(p, :auto) |> CSV.write("./test/result/multiple_curves_result_n"*ARGS[1]*".csv", header=false)
# ====== profiling =======
# @profile signedDistance2D("./test/mock_csv_data/interface.csv",parse(Int, ARGS[1]))
# @profile signedDistance2D("./test/mock_csv_data/interface.csv",parse(Int, ARGS[1]), "multi")
# Profile.print()
# open("prof.txt", "w") do s
# Profile.print(IOContext(s, :displaysize => (24, 500)))
# end
# ====== memory size =======
p = @allocated signedDistance2D("./test/mock_csv_data/interface.csv", parse(Int, ARGS[1]), "multi")
# p = @allocated signedDistance2D("./test/mock_csv_data/multiple_curves.csv", parse(Int, ARGS[1]), "multi")
println("\nUsed memory size: ",p/(1024*1024), " MB")
end | SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 1017 | using DataFrames, CSV
using Profile
include("../src/sdistance.jl") # 必ずダブルクオーテーション
include("../test/draw.jl")
include("../src/SignedDistanceFunction.jl")
include("../src/environments.jl")
include("../test/APT.jl")
import .Draw: parformance_graphs
using .SignedDistanceFunction
using .APT
# ====== Application product testing ======
p = plot_for_debug(parse(Int, ARGS[2]), "./test/mock_csv_data/interface.csv")
# p = plot_for_debug(parse(Int, ARGS[2]), "./test/mock_csv_data/circle.csv")
# p = plot_for_debug(parse(Int, ARGS[2]), "./test/mock_csv_data/interface.csv", "multi")
# p = plot_for_debug(parse(Int, ARGS[2]), "./test/mock_csv_data/multiple_curves.csv", "multi")
# p = plot_for_debug(parse(Int, ARGS[2]), "./test/mock_csv_data/double_circle.csv", "multi")
# plots_contours([i for i = 50:50:300], "./test/mock_csv_data/interface.csv", "multi")
# plots_contours([i for i = 50:50:300], "./test/mock_csv_data/interface.csv")
# plots_wireframe([i for i = 50:50:300], "./test/mock_csv_data/interface.csv", "multi") | SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | code | 683 | using SignedDistanceFunction
using Test
using DelimitedFiles
include("../src/utils/utils.jl")
using .Utils
@testset "SignedDistanceFunction.jl" begin
@test signedDistance2D("mock_csv_data/interface.csv", 300) == readdlm("result/interface_result_n300.csv", ',', Float64)
end
@testset "utils.jl" begin
@test remove_equal_value_in_next_points([1 2; 2 3; 3 2; 3 2; 3 2; 1 2; 1 2]) == [1 2; 2 3; 3 2; 1 2]
@test remove_equal_value_in_next_points([100 100; 10000 10000; 21.2 12.0]) == [100 100; 10000 10000; 21.2 12.0]
@test remove_equal_value_in_next_points([10.112233 10.112233]) == [10.112233 10.112233]
@test remove_equal_value_in_next_points([]) === nothing
end | SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.1.0 | a3dd389e90ba295014d1ca87a36ad2e780a5887b | docs | 2666 | # SignedDistanceFunction.jl
<!-- [](https://app.travis-ci.com/jabelic/SignedDistanceFunction.jl) -->
[](https://github.com/jabelic/SignedDistanceFunction.jl/actions/workflows/ci.yml)
[](https://codecov.io/gh/jabelic/SignedDistanceFunction.jl)
[](https://coveralls.io/github/jabelic/SignedDistanceFunction.jl?branch=main)
SignedDistanceFunction.jl is a package to compute signed distance function.
Main features are:
- Creating a signed distance to compute the signed distance function of the jordan closed curve data set(2D).
- Creating a signed distance to compute the signed distance function of the multiple closed curve data set(2D).
<!-- レベルセット法のためのレベルセット関数を計算する際に初期値として必要な付合付き距離関数を閉曲線データから提供する。■ -->
## Usage
The closed curve data must be `.csv` file. N is an `Int`, representing the number of subdomain divisions, and returns the SignedDistanceFunction(SDF) of NxN.
`signedDistance2D("XXXXXX.csv", N)`
`signedDistance2D("XXXXXX.csv", N, "multi")`
<img src="./test/image/ForREADME/2x2interface_multicurves_multiprocess_300.png" width=50%><img src="./test/image/ForREADME/2x2multiple_curves_multicurves_multiprocess_300.png" width=50%>
## Contribution
### Setup
#### macOS
`$ julia>`
`$ Pkg(1.5)> add PackageCompiler`
`$ Pkg(1.5)> add DelimitedFiles`
`$ Pkg(1.5)> add TimerOutputs`
`$ Pkg(1.5)> add Test`
`$ Pkg(1.5)> add Plots`
`$ julia> using PackageCompiler`
`$ julia> PackageCompiler.create_sysimage([:CSV, :DataFrames, :Plots, :Luxor, :BenchmarkTools, :TimerOutputs, :Test]; sysimage_path="Sysimage.so")`
`$ Pkg(1.5)> activate .`
`$ (SignedDistanceFunction)>`
`$ julia> using SignedDistanceFunction`
`$ julia> signedDistance2D("xxxxxx.csv", N)`
`$ echo "Sysimage.so" >> .gitignore`
### Debug in REPL
`$julia>`
enter the Pkg mode(`]`)
`$ pkg>`
`$ pkg> activate .`
`(SignedDistanceFunction) pkg>`
return the REPL(`Delete/Backspace`)
`julia> `
`julia> using SignedDistanceFunction`
`julia> signedDistance2D("XXXXXX.csv", N)`
<!-- Plots sample data:
`julia> using CSV, DataFrames, Plots, DelimitedFiles, Luxor, BenchmarkTools`
`julia> gamma = readdlm("data.csv", ',', Float64)`
`julia> plot(gamma[:, 1], gamma[:, 2], st=:scatter, title="infty_shape", markersize=2, grid=false)`
`julia> savefig("interface.png") -->
| SignedDistanceFunction | https://github.com/jabelic/SignedDistanceFunction.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 933 | module DistributedStwdLDA
using Distributions, StatsBase, Statistics, SpecialFunctions
using LinearAlgebra, Distributed
using SharedArrays,DataFrames,CSV
using FileIO,MCMCChains,ProgressMeter
using Random: Random
export Word,getCorp,getSynthData
export getBitarrays
export initShared
export mat2docind, vec2procind, initterms!,initcorp,initcorpBeta,initcorpChain
export getZ,getJ,getW
export nj2token,nw2token
export chunkSharedSV!,sampleSharedSV!,runChainSharedSV
export splitApprox,epochIndex,filterTokens,getIndRand
export findGenePrefix,filterCells,filterGenes,getParamDict,filterPbulkparams,dsLDA_E_step,procsN,getTheta!
include("corpus.jl") # corpus data and methods to get bit arrays from
include("bitarrays.jl") # methods on bit arrays
include("partition.jl") # partitioning and allocation
include("sampling.jl") # sampler code
include("chains.jl") # sampling diagnostics
include("durian.jl") # dslda stuff for durian
end
| DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 4568 | """
getBitarrays(corpus,inds;ttype=Int64,atype=Int64,dtype=Int64)
Get the bit array representation of the corpus data.
Return initial topic indices `T`, lexicon index `A`, and the document indices `D` of the tokens.
Shape is `Array{type,2}(data,1,length(data))`.
Optionally specify numeric types for the output.
# Examples
```
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
T,A = getBitArrays(corp,1:10,Int16,Int16)
```
"""
function getBitarrays(corpus,inds;ttype=Int64,atype=Int64,dtype=Int64)
tokens = [map(w->w.t,d) for d in corpus[inds]]
topics = [map(w->w.a,d) for d in corpus[inds]]
docinds = [map(i->d,corpus[d]) for d in inds]
T = Array{ttype}(reduce(vcat,tokens))
A = Array{atype}(reduce(vcat,topics))
D = Array{dtype}(reduce(vcat,docinds))
T = reshape(T,(1,length(T)))
A = reshape(A,(1,length(A)))
D = reshape(D,(1,length(D)))
(T=T,A=A,D=D)
end
"""
getZ(topics,k;inds=nothing)
Generate an indicator-representation of the topic-assignment matrix.
`Z_{i,j} = 1` when token `j` is generated from latent topic `i`
Optionally specify a term index filter `inds` to sample a distributed epoch.
# Examples
```
getZ(StatsBase.sample(1:3,10),3))
```
"""
function getZ(topics,k;inds=nothing)
if !isnothing(inds)
topics = topics[inds]
end
z = zeros(Bool,k,length(topics))
for t in 1:length(topics)
z[topics[t],t] += 1
end
z
end
"""
getJ(corpus;inds=nothing)
Generate the indicator document/topic loadings `J` s.t. `Z*J=nj`.
`J_{i,j} = 1` when token `i` is in document `j`.
Optionally specify a term index filter `inds` to sample a distributed epoch.
# Examples
```
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
c = getCorp(c)
A,T = getBitarrays(c)
J = getJ(T)
```
"""
function getJ(corpus;inds=nothing)
tokens = reduce(vcat,[map(w->w.t,d) for d in corpus])
if !isnothing(inds)
tokens = filter(!isnothing,indexin(tokens,inds))
end
j = zeros(Bool,length(tokens),length(corpus))
ct = 0
for d in 1:length(corpus)
for t in 1:length(corpus[d])
ct+=1
j[ct,d]+=1
end
end
j
end
"""
getJ(corpus,drange,ndocs;inds=nothing)
See `getJ(corpus;inds=nothing)`. Inserts padding columns for vertical concatenation.
Optionally specify a term index filter `inds` to sample a distributed epoch.
# Examples
```
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
c = getCorp(c)
Z = getZ(c,3)
J = getJ(c[1:2],1:2,3)
```
"""
function getJ(corpus,drange,ndocs;inds=nothing)
tokens = reduce(vcat,[map(w->w.t,d) for d in corpus])
if !isnothing(inds)
tokens = filter(!isnothing,indexin(tokens,inds))
end
j = zeros(Bool,length(tokens),ndocs)
ct = 0
for d in 1:length(corpus)
for t in 1:length(corpus[d])
ct+=1
j[ct,drange[d]]+=1
end
end
j
end
"""
getW(corpus,l;inds=nothing)
Generate the indicator term/topic loadings `W` s.t. `Z*W=nw`.
`W_{i,j} = 1` when token `i` corresponds to term `j` in the lexicon with length `l`.
Optionally specify a term index filter `inds` to sample a distributed epoch.
# Examples
```
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
c = getCorp(c)
Z = getZ(c,3)
W = getW(c)
```
"""
function getW(corpus,l;inds=nothing)
tokens = reduce(vcat,[map(w->w.t,d) for d in corpus])
if !isnothing(inds)
tokens = filter(!isnothing,indexin(tokens,inds))
end
w = zeros(Bool,length(tokens),l)
for t in 1:l
w[:,t] .= .!isnothing.(indexin(tokens,[t]))
end
w
end
"""
nj2token(nj,J)
Expand `nj` (the topic x document counts) over tokens.
`NJK_{i,j} = n` where `n` is the number of times topic `j` is assigned to some token in the parent document of token `i`.
# Examples
```
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
c = getCorp(c)
Z = getZ(c,3)
J = getJ(c)
nj2token(Z*J,J)
```
"""
function nj2token(nj,J)
d = map(i->i[2],findmax(J,dims=2)[2])
NJK = nj[:,vec(d)]
end
"""
nw2token(nw,W)
Expand `nw` (the topic x term counts) over tokens.
`NWK_{i,j} = n` where `n` is the number of times topic `j` is assigned to some token in the parent document of token `i` corresponding to the same term in the lexicon.
# Examples
```
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
c = getCorp(c)
Z = getZ(c,3)
W = getW(c)
nj2token(Z*W,W)
```
"""
function nw2token(nw,W)
t = map(i->i[2],findmax(W,dims=2)[2])
NWK = nw[:,vec(t)]
end
| DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 3181 | """
getPhi(Zchain,Beta,W,niter,nchains,k)
get the topic/word posterior for `niter` samples
# Examples
```
getPhi(Zchain,Beta,W,niter,nchains,k)
```
"""
function getPhi(Zchain,Beta,W,niter,nchains,k,nlex;verbose=false)
println("niter:",niter)
Phi = SharedArray(zeros(niter,nlex,k,nchains))
println("Phi size: ",size(Phi))
for j in 1:nchains
Threads.@threads for i in 1:niter
verbose ? println("getting chain $j, iter $i") : nothing
nw = permutedims(getZ(Zchain[i,:,j],k)*W)
Phi[i,:,:,j] .= (nw .+ Beta) ./ sum(nw .+ Beta)
end
end
Phi
end
"""
getTheta(Zchain,J,doc_id,niter,nchains,k)
get the thetas for `niter` samples of document `doc_id`
# Examples
```
getTheta(Zchain,J,doc_id,niter,nchains,k)
```
"""
function getTheta!(Thetas,Zchain,J,Alpha,doc_id,niter,chain_id,k;verbose=false)
@sync @distributed for i = 1:niter
thetaIter!(Thetas,Zchain[i,:,chain_id],J,doc_id,Alpha,i,chain_id,k)
end
end
function thetaIter!(Theta,Z,J,doc_id,Alpha,iter,chain_id,k)
nj = getZ(Z,k)*J
Theta[iter,:,chain_id,doc_id] .= vec((nj[:,doc_id] .+ Alpha) ./ sum(nj[:,doc_id] .+ Alpha))
end
"""
getTheta(Zchain,J,doc_id,niter,nchains,k)
get the thetas for `niter` samples of document `doc_id`
# Examples
```
getTheta(Zchain,J,doc_id,niter,nchains,k)
```
"""
function getTheta(Zchain,J,Alpha,doc_id,niter,nchains,k;verbose=false)
Theta = SharedArray(zeros(niter,k,nchains))
for j in 1:nchains
@sync @distributed for i = 1:niter
thetaIterOld!(Theta,Zchain,J,doc_id,Alpha,i,j,k)
end
end
Theta
end
function thetaIterOld!(Theta,Zchain,J,doc_id,Alpha,i,j,k)
nj = getZ(Zchain[i,:,j],k)*J
Theta[i,:,j] .= vec((nj[:,doc_id] .+ Alpha) ./ sum(nj[:,doc_id] .+ Alpha))
end
"""
getThetaHat(thetas,ndocs,ntopic)
Get the estimate of theta by averaging samples across chains
# Examples
```
getThetaHat(thetas,ndocs,ntopic)
```
"""
function getThetaHat(thetas,ndocs,ntopic)
thetahat0 = mean(thetas,dims=[1,3])
thetahat = zeros(ndocs,ntopic)
for i in 1:size(thetahat0)[4]
thetahat[i,:] .= vec(thetahat0[:,:,:,i])
end
thetahat
end
"""
getPhiHat(phis,nlex,ntopic)
Get the estimate of phi by averaging samples across chains
# Examples
```
getPhiHat(phis,nlex,ntopic)
```
"""
function getPhiHat(phis,nlex,ntopic)
phihat0 = mean(phis,dims=[1,4])
phihat = zeros(nlex,ntopic)
for i in 1:size(phihat0)[3]
phihat[:,i] .= phihat0[1,:,i,1]
end
phihat
end
"""
getHPI(x; alpha=0.05)
Get the highest-probability interval
# Examples
```
getHPI(rand(Beta(6,2),10000))
```
"""
function getHPI(x; alpha=0.05)
# this is MCMCChains._hpd @ master "2ff0a26beda7223552e47d25d948d6c7e44baa95"
n = length(x)
m = max(1, ceil(Int, alpha * n))
y = sort(x)
a = y[1:m]
b = y[(n - m + 1):n]
_, i = findmin(b - a)
return [a[i], b[i]]
end
"""
getETI(x; alpha=0.05)
Get the equal-tailed interval (quantiles)
# Examples
```
getETI(rand(Beta(6,2),10000))
```
"""
function getETI(x; alpha=0.05)
a=quantile!(x,alpha/2)
b=quantile!(x,1-alpha/2)
return [a,b]
end | DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 4888 | """
Word{T,A}
Word corresponding to token `t` and assignment `a`.
# Examples
```
# Generate single words
Word(1,Int16(2))
Word("a",Int16(2))
# Generate a corpus (Int64,full loop)
c = []
k = 3 # number of topics
ndoc = 10 # number of documents
nlex = 100 # number of terms in lexicon
nread = 1000 # number of tokens per document
lexicon = [1:nlex;]
topics = [1:k;]
for i in 1:ndoc
x = rand(lexicon,1000)
y = rand(topics,1000)
arr = Word.(x,y)
push!(c,arr)
end
# Generate a corpus (Int8 topic, map)
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
```
"""
struct Word{T,A}
t::T # the index of the token in the lexicon
a::A # the latent topic associated with the token
end
"""
getCorp(corp)
Get the parametric representation of a corpus.
# Examples
```
# Generate a corpus (Int8 topic, map)
corp = map(x->Word.(rand(1:100,x),Int8.(rand(Categorical([0.5,0.3,0.2]),x))),[10,50,100])
c = getCorp(c)
```
"""
getCorp(corp) = Array{Array{Word{typeof(corp[1][1].t),typeof(corp[1][1].a)},1},1}(corp)
"""
mat2docind(mat)
Given a matrix of counts, get the indices of the resulting row-wise doc vector for each lexicon term.
# Examples
```
mat2docind(mat)
```
"""
function mat2docind(mat)
ends = cumsum(mat,dims=2)
starts = hcat(ones(Int,size(mat)[1]),(cumsum(mat,dims=2) .+ 1)[:,1:end-1])
[[starts[i],ends[i]] for i in CartesianIndices(mat)]
end
"""
vec2procind(vlen,nproc)
Partition the indices of a vector of length `vlen` into `nproc` approximately equal groups.
# Examples
```
vec2procind(vlen,nproc)
```
"""
function vec2procind(vlen,nproc)
pvec = mod.(1:vlen,nproc).+1
[findall(x->x==i,pvec) for i in unique(pvec)]
end
"""
initterms!(data,Tdoc,Adoc,datind,docind,k)
For each term corresponding to `datind` in a row (`data`) from a doc x terms matrix (as in gene expression data), draw initial assignments from `[1:k;]` for tokens (corresponding to `docind`) as `Adoc` and update the token indices in `Tdoc`.
# Examples
```
initterms!(data,Tdoc,Adoc,datind,docind,k)
```
"""
function initterms!(data,Tdoc,Adoc,datind,docind,k)
for i in 1:length(datind)
# println("initializing term:",datind[i])
Tdoc[docind[i][1]:docind[i][2]] .= datind[i]
Adoc[docind[i][1]:docind[i][2]] .= rand([1:k;],data[datind[i]])
end
# println("init complete")
end
"""
initcorp(counts,inds,procinds,doclengths,k,nwork)
Initialize a corpus over the matrix `counts` using `nwork` available workers.
# Examples
```
initcorp(counts,inds,procinds,doclengths,k,nwork)
```
"""
function initcorp(counts,inds,procinds,doclengths,k,nwork)
corp = []
for d in 1:size(counts)[1]
println("initializing doc $d")
Tdoc = SharedArray(zeros(Int32,doclengths[d]))
Adoc = SharedArray(zeros(Int32,doclengths[d]))
# @sync begin
for i in 1:nwork
# p = workers()[i]
# initterms!(counts[d,:],Tdoc,Adoc,procinds[i],inds[d,procinds[i]],k)
initterms!(counts[d,:],Tdoc,Adoc,procinds[i],inds[d,procinds[i]],k)
# @async remotecall_wait(initterms!,p,counts[d,:],Tdoc,Adoc,procinds[i],inds[d,procinds[i]],k)
end
# end
push!(corp,[Word(Tdoc[i],Adoc[i]) for i in 1:doclengths[d]])
end
corp
end
"""
initcorpBeta!(corp,beta)
Randomly initialize the word assignments in a corpus, based on the topic word prior
# Examples
```
initcorpBeta!(corp,beta)
```
"""
function initcorpBeta(corp,beta)
newcorp = deepcopy(corp)
for i in 1:length(corp)
for j in 1:length(corp[i])
atype = typeof(corp[i][j].a)
betanorm = beta[corp[i][j].t,:] ./ sum(beta[corp[i][j].t,:])
global newcorp[i][j] = Word(corp[i][j].t,atype(rand(Categorical(betanorm))))
end
end
newcorp
end
"""
initcorpChain!(corp,chain_i)
Initialize the word assignments in a corpus, based on the chain id
# Examples
```
initcorpChain!(corp,chain_i)
```
"""
function initcorpChain(corp,chain_i,k)
newcorp = deepcopy(corp)
for i in 1:length(corp)
for j in 1:length(corp[i])
atype = typeof(corp[i][j].a)
global newcorp[i][j] = Word(corp[i][j].t,atype(mod(chain_i,k)+1))
end
end
newcorp
end
"""
initcorpChain!(corp,chain_i)
Uniform random initialization
# Examples
```
initcorpUnif!(corp,chain_i)
```
"""
function initcorpUnif(corp,k)
newcorp = deepcopy(corp)
for i in 1:length(corp)
for j in 1:length(corp[i])
atype = typeof(corp[i][j].a)
global newcorp[i][j] = Word(corp[i][j].t,atype(rand([1:k;])))
end
end
newcorp
end
"""
getSynthData(nbulk,ncells,nreads,zeta,K)
Create `nbulk` pseudobulk samples from `ncells` simulated cells.
Generate n x `ncells` reads where n ~ Poisson(λ = `nreads`).
Allocate these bulk reads based to a queue based on fidelity matrix `zeta`.
# Examples
```
getSynthData(nbulk,ncells,nreads,zeta,K)
```
"""
function getSynthData(nbulk,ncells,nreads,zeta,K)
println("done")
end
| DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 18972 | function procsN(n)
if nprocs() < n
addprocs(n-nprocs())
end
end
function getParamDict(paramarray)
paramnames = []
paramvalues = []
paramtypes = Vector{DataType}()
for i in 1:length(paramarray)
n,v = split(paramarray[i],".")
push!(paramnames,n)
if !isnothing(tryparse(Int,v))
push!(paramtypes,Int64)
push!(paramvalues,parse(Int64,v))
elseif !isnothing(tryparse(Float64,v))
push!(paramtypes,Float64)
push!(paramvalues,parse(Float64,v))
else
push!(paramtypes,String)
push!(paramvalues,v)
end
end
Dict{Symbol,Any}(Symbol.(paramnames) .=> paramvalues)
end
function filterPbulkparams(pbulkparams,dfrow)
matched = []
ikeys = collect(keys(pbulkparams))
ivals = collect(values(pbulkparams))
for i in 1:length(ikeys)
push!(matched,dfrow[ikeys[i]] == ivals[i])
end
all(matched)
end
"""
findGenePrefix(prefs,genes)
Get the gene names starting with `prefs[i]`
# Examples
```
findGenePrefix(prefs,genes)
```
"""
function findGenePrefix(prefs,genes)
inds = []
for i in 1:length(genes)
for j in 1:length(prefs)
if startswith(genes[i],prefs[j])
push!(inds,i)
end
end
end
genes[inds]
end
"""
filterCells!(data,param)
Filter cells based on mAD stats
# Examples
```
filterCells!(data,param)
```
"""
function filterCells(data,param,thresh=3.0)
stats = combine(groupby(data,:cellType), [param] =>
((s) -> (
lower=median(s) - thresh*std(s),
upper=median(s) + thresh*std(s))) => AsTable)
datathresh = innerjoin(data,stats,on=:cellType)
inbounds = datathresh[:,:lower] .< datathresh[:,param] .< datathresh[:,:upper]
delete!(datathresh,.!inbounds)
select(datathresh,Not([:lower,:upper]))[:,:cellID]
end
"""
filterGenes!(data,minratio)
Filter genes based on minratio percent of cells expressing
# Examples
```
filterGenes!(data,minratio)
```
"""
function filterGenes(bulkdata,scdata,genes;
minratio=0.05,protectedgenes=[])
protected_expressed = intersect(genes,protectedgenes)
ncells = size(scdata)[1]
n_sample_exp_bulk = vec(sum(Array(bulkdata[:,genes]) .> 0,dims=1))
n_sample_exp_sc = vec(sum(Array(scdata[:,genes]) .> 0,dims=1))
# find genes expressed in at least minratio of cells
genes_thresh = genes[findall(n_sample_exp_sc .>= ncells*minratio)]
protected_plus_thresh = sort(unique([protected_expressed...,genes_thresh...]))
end
"""
dsLDA_E_step()
Perform deconvolution for DURIAN. Function parameters described in comments.
"""
function dsLDA_E_step(
scReads, # the sc reads (samples x genes)
scGenes, # the sc genes
scIDs, # the sc cell ids
scMeta, # the sc metadata
bulkReads, # the bulk reads (samples x genes)
bulkGenes, # the bulk genes
bulkIDs, # the bulk sample ids
outputdir; # "where the output should be stored"
minCellsTopicCorp = 0, # the number of cells that must exist in a population of the topic corpus after qc
ldamodelname="dsLDA", # "name of the model, to prepend sampler output dir"
niter=100, # "number of mcmc iterations"
nparts=4, # "number of partitions for distributed inference, must be ≤ nworkers"
nchains=2, # "number of chains to run"
blocksize=10, # "number of iterations to store per block"
betapseudo=0.0, # "pseudocount to add to beta prior to scaling"
betaeps=0.000001, # "small value to add to allow well defined prob at each term"
alpha=0.1, # "symmetric concentration parameter for cell types theta"
ldanscthresh=3.0, # "filter cells outside nscthresh standard deviations for library size, deconvolution."
scrnscthresh=3.0, # "filter cells outside nscthresh standard deviations for library size, imputation."
ldagenethresh=0.01, # "filter genes expressed in less than genethresh*#cells for each cell type, deconvolution."
scrgenethresh=0.3, # "filter genes expressed in less than genethresh*#cells for each cell type, imputation."
protectedgenes=[], # array of gene names to protect from qc
scalesc="column", # "how to scale the single cell reference."
scalebulk="lognorm", # "how to scale the bulk data."
bulkfactor=10000, # "scale factor for bulk data"
scfactor=1.0, # "scale factor for sc data"
initflavor="unif", # "how to initialize the latent assignments."
verbose=false, # "produce diagnostic output"
philatent=0, # "infer phi, single cell data will be used as beta concentration parameter"
ldageneinds=nothing,
ldascinds=nothing,
thinning=2, # "keep mod(i,thinning) for i=1:niter"
rmchains=true, # "remove saved chains after analysis"
burn=0.5, # "burn in rate (eg 0.5 means burn for n/2). 0 means no burn in, start at n=1"
runqc=false) # "if not set, genethresh and ncthresh will be ignored, intended for real data, not benchmarking already clean pseudobulk"
println("detected $nparts partition")
println("running $ldamodelname with nworkers:",nworkers())
@assert nworkers() >= nparts "must have at least as many workers as partitions"
modelparams = Dict(
"ldamodelname"=>ldamodelname,
"philatent"=>philatent,
"scalebulk"=>scalebulk,
"scalesc"=>scalesc,
"scfactor"=>scfactor,
"bulkfactor"=>bulkfactor,
"betapseudo"=>betapseudo,
"betaeps"=>betaeps,
"alpha"=>alpha)
params = Dict(
"ldanscthresh"=>ldanscthresh,
"scrnscthresh"=>scrnscthresh,
"ldagenethresh"=>ldagenethresh,
"scrgenethresh"=>scrgenethresh,
"mincelltc"=>minCellsTopicCorp,
"runqc"=>runqc,
"ldamodelname"=>ldamodelname,
"philatent"=>philatent,
"scalebulk"=>scalebulk,
"scalesc"=>scalesc,
"scfactor"=>scfactor,
"bulkfactor"=>bulkfactor,
"betapseudo"=>betapseudo,
"betaeps"=>betaeps,
"alpha"=>alpha,
"niter"=>niter,
"nchains"=>nchains,
"initflavor"=>initflavor,
"blocksize"=>blocksize,
"verbose"=>verbose,
"rmchains"=>rmchains,
"thinning"=>thinning,
"burn"=>burn)
modelparamvec = join([string(k,".",get(modelparams,k,nothing)) for k in keys(modelparams)],"-")
paramvec = join([string(k,".",get(params,k,nothing)) for k in keys(params)],"-")
println("dsLDA: loading bulk data")
bulkdata = DataFrame(bulkReads,:auto)
insertcols!(bulkdata, 1, :bulkID=>bulkIDs)
rename!(bulkdata,["bulkID",bulkGenes...])
println("dsLDA: loading sc data")
scdata = DataFrame(scReads,:auto)
insertcols!(scdata, 1, :scID=>scIDs)
rename!(scdata,["cellID",scGenes...])
println("dsLDA: loading meta data")
rename!(scMeta,["cellID","cellType","sampleID"])
scdata = innerjoin(scMeta,scdata,on="cellID")
# ensure all genes are expressed in at least 1 sample
ind_exp_bulk = findall(vec(sum(Array(bulkdata[:,bulkGenes]),dims=1)) .> 0)
bulkdata = bulkdata[:,["bulkID",bulkGenes[ind_exp_bulk]...]]
ind_exp_sc = findall(vec(sum(Array(scdata[:,scGenes]),dims=1)) .> 0)
scdata = scdata[:,["cellID","cellType",scGenes[ind_exp_sc]...]]
# intersect and reorder *expressed* genes for sc and bulk
genes = sort(intersect(names(bulkdata),names(scdata)))
if !isnothing(ldageneinds) && any(ldageneinds)
ldagenes = scGenes[ldageneids]
ldacells = ids_sc[ldascinds]
elseif runqc
#######################################################################################
# # begin QC for co-expressed genes and single cells
#######################################################################################
# run the qc normally done in cobos simulations
println("pre-qc size of sc data= ",size(scdata[:,genes]))
println("pre-qc size of bulk data= ",size(bulkdata[:,genes]))
# # filter genes
ldagenes = filterGenes(bulkdata,scdata,genes;minratio=ldagenethresh,protectedgenes=protectedgenes);
scrgenes = filterGenes(bulkdata,scdata,genes;minratio=scrgenethresh,protectedgenes=protectedgenes);
# # filter cells
# add the library size to each sc sample
ldalibsize = vec(sum(Array(scdata[:,ldagenes]),dims=2))
scrlibsize = vec(sum(Array(scdata[:,scrgenes]),dims=2))
insertcols!(scdata,1,:ldalibsize=>ldalibsize)
insertcols!(scdata,1,:scrlibsize=>scrlibsize)
ldacells = filterCells(scdata,:ldalibsize,ldanscthresh)
scrcells = filterCells(scdata,:scrlibsize,scrnscthresh)
celltypes = sort(unique(filter(row -> row.cellID in ldacells, scdata)[:,"cellType"]))
topic_corpus = DataFrame(zeros(length(ldagenes),length(celltypes)),:auto)
celltype_counts = []
qc_cells = filter(row -> row.cellID in ldacells, scdata) # get the data for the qc cells
for i in 1:length(celltypes)
t = celltypes[i]
topic_corpus[:,i] .= vec(mean(Matrix(filter(row -> row.cellID in ldacells, scdata)[findall(x->x==t,qc_cells[:,"cellType"]),ldagenes]),dims=1))
push!(celltype_counts,findall(x->x==t,qc_cells.cellType)) # keep track of the celltypes that pass
end
rename!(topic_corpus,celltypes)
insertcols!(topic_corpus, 1, :gene => ldagenes )
qc_topics = [ct>minCellsTopicCorp for ct in map(x->length(x),celltype_counts)] # find the celltypes that have enough cells after qc
celltypes = celltypes[qc_topics]
topic_corpus = topic_corpus[:,["gene",celltypes...]]
resize_tc_genes = findall(x->x>0,vec(sum(Matrix(topic_corpus[:,2:end]),dims=2)))
ngenes_found = length(resize_tc_genes)
println("found $ngenes_found genes expressed in remaining cell types after removing cell types with < $minCellsTopicCorp cells post-qc")
topic_corpus = topic_corpus[resize_tc_genes,:]
ldagenes = ldagenes[resize_tc_genes]
# save the qc data
if length(outputdir) > 1
qct_fn = joinpath(outputdir,"scldaPostqcT.csv")
qcc_sclda_fn = joinpath(outputdir,"qc_sclda_C.csv")
qcc_scr_fn = joinpath(outputdir,"qc_scr_C.csv")
CSV.write(qct_fn, bulkdata[:,Symbol.(ldagenes)])
CSV.write(qcc_sclda_fn, filter(row -> row.cellID in scrcells, scdata)[:,["cellID","cellType",ldagenes...]])
CSV.write(qcc_scr_fn, filter(row -> row.cellID in scrcells, scdata)[:,["cellID","cellType",scrgenes...]])
end
println("deconvolution post-cell qc size of sc data= ",size(filter(row -> row.cellID in ldacells, scdata)[:,Symbol.(ldagenes)]))
println("imputation post-cell qc size of sc data= ",size(filter(row -> row.cellID in scrcells, scdata)[:,Symbol.(scrgenes)]))
println("post-qc size of bulk data= ",size(bulkdata[:,Symbol.(ldagenes)]))
else
ldacells = scdata[:,"cellID"]
ldagenes = genes
scrcells = ldacells
scrgenes = ldagenes
celltypes = sort(unique(filter(row -> row.cellID in ldacells, scdata)[:,"cellType"]))
topic_corpus = DataFrame(zeros(length(ldagenes),length(celltypes)),:auto)
celltype_counts = []
qc_cells = filter(row -> row.cellID in ldacells, scdata) # get the data for the qc cells
for i in 1:length(celltypes)
t = celltypes[i]
topic_corpus[:,i] .= vec(mean(Matrix(filter(row -> row.cellID in ldacells, scdata)[findall(x->x==t,qc_cells[:,"cellType"]),ldagenes]),dims=1))
push!(celltype_counts,findall(x->x==t,qc_cells.cellType)) # keep track of the celltypes that pass
end
rename!(topic_corpus,celltypes)
insertcols!(topic_corpus, 1, :gene => ldagenes )
qc_topics = [ct>minCellsTopicCorp for ct in map(x->length(x),celltype_counts)] # find the celltypes that have enough cells after qc
celltypes = celltypes[qc_topics]
topic_corpus = topic_corpus[:,["gene",celltypes...]]
println("post filter size(topic_corpus)=$(size(topic_corpus))")
println("post filter unique(celltypes)=$(unique(celltypes))")
resize_tc_genes = findall(x->x>0,vec(sum(Matrix(topic_corpus[:,2:end]),dims=2)))
ngenes_found = length(resize_tc_genes)
println("found $ngenes_found genes expressed in remaining cell types after removing cell types with < $minCellsTopicCorp cells post-qc")
topic_corpus = topic_corpus[resize_tc_genes,:]
ldagenes = ldagenes[resize_tc_genes]
end
#######################################################################################
# # end QC
#######################################################################################
cmat = Matrix{Float64}(bulkdata[:,ldagenes])
if scalebulk == "column"
println("bulk column scaling selected, divide the counts by the total counts for each cell type, multiply by bulkfactor($bulkfactor)")
cmat .= cmat ./ sum(cmat,dims=1)
cmat .= cmat .* bulkfactor
elseif scalebulk == "ident"
println("bulk ident selected, no change")
cmat .= cmat
elseif scalebulk == "lognorm"
println("bulk lognorm scaling selected, divide the counts by the total counts for each sample, multiply by bulkfactor($bulkfactor), add pseudocount of 1, then log transform")
cmat .= cmat ./ sum(cmat,dims=1)
cmat .= cmat .* bulkfactor
cmat .= log.(cmat .+ 1)
elseif scalebulk == "log1p"
println("bulk log1p scaling selected, add pseudocount of 1, then log transform")
cmat .= log.(cmat .+ 1)
end
cmat = SharedArray(Int32.(round.(cmat)))
@assert size(cmat)[2] == length(ldagenes)
topicnames = celltypes
k = length(celltypes)
topic_corpus = permutedims(Matrix(topic_corpus[:,2:end]))
inds = mat2docind(cmat)
procinds = vec2procind(size(cmat)[2],nworkers())
doclengths = [i[2] for i in inds[:,end]]
corpus = initcorp(cmat,inds,procinds,doclengths,k,nworkers())
k,nlex = size(topic_corpus)
println("checking env....")
println("set $nparts for corpus of length $(length(corpus))")
nparts = minimum([nparts,length(corpus)])
if nparts > length(corpus)
println("partition count > length(corpus), using workers()[1:$nparts]")
end
###########################################################
# # sampling parameters
###########################################################
c_perm,partition,partmap = splitApprox(corpus,nparts)
@assert c_perm == corpus[partmap] "partition failed"
@assert size(topic_corpus) == (k,nlex)
if scalesc == "column"
println("sc column scaling selected, divide the counts by the total counts for each cell type, add betapsueudo($betapseudo), multiply by scfactor($scfactor)")
betalocal = topic_corpus ./ sum(topic_corpus,dims=2)
betalocal = (betalocal .+ betapseudo) .* scfactor
elseif scalesc == "ident"
println("sc ident selected, add betapseudo($betapseudo),betaeps($betaeps)")
betalocal = topic_corpus .+ betapseudo .+ betaeps
elseif scalesc == "logpseudo"
if betapseudo < 1
println("sc logpseudo scaling selected but betapseudo < 1 (negative values will be created), updating betapseudo = 1")
betapseudo = 1.0
end
println("sc logpseudo scaling selected, add betapseudo($betapseudo), log transform, multiply by scfactor($scfactor), add betaeps($betaeps)")
betalocal = log.(topic_corpus .+ betapseudo) .* scfactor .+ betaeps
elseif scalesc == "symmetric" # debug full lda with dirichlet prior on beta
betalocal = zeros(size(topic_corpus)) .+ betapseudo .+ betaeps
end
@assert size(betalocal) == (k,nlex)
@assert all(betalocal .>= 0) "beta: negative values detected, make sure that betapseudo is large enough to cover log transform"
@assert all(sum(betalocal,dims=1) .> 0) "beta: some genes have zero expression across all cell types"
###########################################################
# # run chains
###########################################################
# @everywhere Random.seed!(myid())
meanrhat_high = [true]
niter_inc = [0]
# thetahat=[nothing]
thetahat = Vector{Matrix{Float64}}(undef,1)
meanrhat=[0.0]
# get thinning inds
if burn > 0
nstart = Int64(round(niter*burn))
else
nstart = 1
end
indthin = [mod(i,thinning)==0 ? i : nothing for i in 1:niter]
indthin = indthin[indthin.!=nothing]
indthin = indthin[indthin .> nstart]
Thetas = SharedArray(zeros(Float64,length(indthin),k,nchains,length(partmap)))
try_niter!(
alpha,betalocal,blocksize,c_perm,initflavor,k,meanrhat,meanrhat_high,niter,
nlex,nparts,outputdir,partition,partmap,philatent,thetahat,
burn,nchains,thinning,topic_corpus,topicnames,Thetas,indthin;
rmchains=rmchains,verbose=verbose)
# back up thetahat to a separate file (useful to debug EM/E-step)
# P_fn = joinpath(outputdir,"P.csv")
# CSV.write(P_fn, DataFrame(thetahat[1],:auto))
Thetachains = []
for i in 1:length(partmap)
push!(Thetachains,Chains(Thetas[:,:,:,i],topicnames))
end
thetaresult = getThetaHat(Thetas,length(partmap),k)[sortperm(partmap),:]
meanrhatresult = mean([mean(summarystats(Thetachains[i])[:,:rhat]) for i in 1:length(Thetachains)])
resultarr = [thetaresult,meanrhatresult,ldagenes,scrgenes,ldacells,scrcells,celltypes]
return resultarr
end
# increase niter until mean r-hat is below threshold, otherwise this will cause
# imputation iterations to halt
function try_niter!(
alpha,betalocal,blocksize,c_perm,initflavor,k,meanrhat,meanrhat_high,niter,
nlex,nparts,outputdir,partition,partmap,philatent,thetahat,
burn,nchains,thinning,topic_corpus,topicnames,Thetas,indthin;
rhatthresh=1.1,rmchains=rmchains,verbose=verbose)
while meanrhat_high[1]
for i in 1:nchains
println("starting MCMC chain $i with n = $niter")
# @everywhere Random.seed!($i*myid())
runChainSharedSV(c_perm,partition,partmap,betalocal,fill(alpha,k),
k,nlex,philatent,
Int64,Float64,nparts,blocksize,niter,i,
outputdir,initflavor,Thetas,indthin,verbose=verbose)
end
println("analysis: begin convergence diagnostics")
if meanrhat[1] < rhatthresh
meanrhat_high[1] = false
println("\n r-hat < $rhatthresh, deconvolution succeeds \n")
else
niter = niter*2
println("\n r-hat above threshold=$rhatthresh, MCMC restart with n = ",niter,"\n")
end
end
end | DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 6703 | """
getIndRand(cc)
Get the row of the indicator for column `cc`, else return a random row.
# Examples
```
getIndRand(cc)
```
"""
function getIndRand(cc)
if all(cc .== 0)
println("zeros encountered")
return rand(1:length(cc))
else
return findfirst(x->x==1,cc)
end
end
# """
# invEpInd(epochinds,Z)
# Invert the partitioned vector of indicator matrices `Z` and return an array similar to `getA()`.
# # Examples
# ```
# invEpInd(epochinds,Z)
# ```
# """
# function invEpInd(epochinds,Z)
# inds = reduce(vcat,[reduce(vcat,epi) for epi in epochinds])
# a = reduce(hcat,convert.(Array,Z))[:,sortperm(inds)]
# map(t->getIndRand(t), eachcol(a))
# end
"""
initShared(corpus,partition,k,l,alphalocal,betalocal)
Initialize:
1) shared data vectors `T`,`D`
2) shared assignment vector `Z`
3) shared indicator matrices `Z`,`J`,`W`
4) shared sufficient statistics `nj`
5) global sufficient statistics `nwglobal`,`nglobal`
6) shared increment matrices `nwinc`,`ninc`
7) vectors of 'local indices': `indt`,`inda`,`indd`,`indnj`,`indnw` corresponding to each worker id in `wkids`
# Examples
```
using Distributions
corp = map(x->Word.(rand(1:100,x),Categorical([0.5,0.3,0.2])),[10,50,100])
dist = [[1,2],[3]]
initShared(corpus,partition,3,100,fill(0.1,3),fill(0.1,100))
```
"""
function initShared(corpus,partition,k,l,alphalocal,betalocal;statefloat=Float32,stateint=Int32,sstatfloat=Float32,sstatint=Int32)
Z = SharedArray(ones(Bool,k,sum(map(x->length(x),corpus)))) # size Z is k by # corpus tokens
J = SharedArray(ones(Bool,sum(map(x->length(x),corpus)),length(partition))) # size J is # corpus tokens by # docs
W = SharedArray(ones(Bool,sum(map(x->length(x),corpus)),l)) # size W is # corpus tokens by # terms
T = SharedVector(ones(stateint,sum(map(x->length(x),corpus))))
A = SharedVector(ones(stateint,sum(map(x->length(x),corpus))))
D = SharedVector(ones(stateint,sum(map(x->length(x),corpus))))
ends = cumsum(map(x->length(x),corpus)) # ends of the segments for each doc
starts = cumsum(map(x->length(x),corpus)) .- map(x->length(x),corpus) .+ 1 # the starts of the segments for each doc
nwstarts = map(x->x-l,cumsum(fill(l,length(partition)))).+1
nwends = cumsum(fill(l,length(partition)))
nj = SharedArray(zeros(sstatint,k,length(partition)))
nw = SharedArray(zeros(sstatint,k,l,length(partition)))
nwinc = SharedArray(zeros(sstatint,k,l*length(partition)))
indnw = [(1:k,nwstarts[1]:nwends[1])]
indt = [starts[1]:ends[1]]
for i in 2:length(partition)
push!(indnw,(1:k,nwstarts[i]:nwends[i]))
push!(indt,starts[i]:ends[i])
end
indt=SharedArray(indt)
indnw=SharedArray(indnw)
@sync @distributed for i in 1:length(partition)
ba = getBitarrays(corpus,partition[i],ttype=stateint,atype=stateint,dtype=stateint)
T[starts[i]:ends[i]] .= vec(ba[:T])
A[starts[i]:ends[i]] .= vec(ba[:A])
D[starts[i]:ends[i]] .= vec(ba[:D])
Z[1:k,starts[i]:ends[i]] = getZ(ba[:A],k)
J[starts[i]:ends[i],1:length(partition)] = getJ(corpus[partition[i]],partition[i],length(partition))
W[starts[i]:ends[i],1:l] = getW(corpus[partition[i]],l)
nw[:,:,i] .= Z[1:k,starts[i]:ends[i]] * W[starts[i]:ends[i],1:l]
nj[1:k,i] .= vec(Z[1:k,starts[i]:ends[i]]*J[starts[i]:ends[i],1:length(partition)][:,partition[i]])
end
nw = SharedArray{sstatint,2}(sum(nw,dims=3)[:,:,1])
println("initialization complete for:")
println("Z{",typeof(Z),"}: ",Base.summarysize(Z)," bytes")
println("J{",typeof(J),"}: ",Base.summarysize(J)," bytes")
println("W{",typeof(W),"}: ",Base.summarysize(W)," bytes")
println("T{",typeof(T),"}: ",Base.summarysize(T)," bytes")
println("A{",typeof(A),"}: ",Base.summarysize(A)," bytes")
println("D{",typeof(D),"}: ",Base.summarysize(D)," bytes")
println("nwinc{",typeof(nwinc),"}: ",Base.summarysize(nwinc)," bytes")
# copy the global counts to separate sampling vectors
if length(partition) > 1
for p in 2:length(partition)
nw = hcat(nw,nw[:,1:l])
end
println("allocated nw{",typeof(nw),"}: ",sizeof(nw)," bytes")
end
return (T=T,A=A,D=D,Z=Z,J=J,W=W,
nj=nj,nwinc=nwinc,nw=nw,
alpha=SharedArray{sstatfloat,1}(alphalocal),beta=SharedArray{sstatfloat,2}(betalocal),
indt=indt,indnw=indnw)
end
# """
# getwids(arr)
# Get the worker ids of a DArray or SharedArray.
# # Examples
# ```
# getwids(arr)
# ```
# """
# function getwids(arr)
# if typeof(arr) <: DArray
# ids = [(i,@fetchfrom i DistributedArrays.localindices(arr)) for i in workers()]
# elseif typeof(arr) <: SharedArray
# ids = [(i,@fetchfrom i SharedArrays.localindices(arr)) for i in workers()]
# end
# ids = filter(!isnothing,[length(i[2][1]) > 0 ? i[1] : nothing for i in ids])
# end
"""
splitApprox(corpus,n)
Re-order `corpus` into `n` chunks each containing approximately the same number of documents.
Return the shuffled view `c_view`, a vector `partition` of unit-ranges for each chunk, and vector `partmap`, where `c_view[i] = corpus[partmap[i]]`.
# Examples
```
splitApprox(corpus,n)
```
"""
function splitApprox(corpus,n)
parts = [findall(x->x==i,mod.([1:length(corpus);],n) .+ 1) for i in 1:n]
partmap = reduce(vcat,parts)
ind1 = [0,cumsum([length(i) for i in parts])[1:end-1]...] .+ 1
ind2 = cumsum([length(i) for i in parts])
partition = UnitRange.(ind1,ind2)
c_perm = corpus[partmap]
c_perm,partition,partmap
end
"""
epochIndex(corpus,n,l)
Make random, approximately equal-length partition of docs and terms.
# Examples
```
epochIndex(corpus,n,l)
```
"""
function epochIndex(corpus,n,l)
# partition the documents
docindex = [findall(x->x==i,mod.([1:length(corpus);],n) .+ 1) for i in 1:n]
# partition the terms
termindex = [findall(x->x==i,mod.([1:l;],n) .+ 1) for i in 1:n]
return (docindex=docindex,termindex=termindex)
end
"""
filterTokens(n,D,T,docindex,termindex)
Generate global token epoch indices by filtering concatenated tokens.
# Examples
```
filterTokens(n,D,T,docindex,termindex)
```
"""
function filterTokens(n,D,T,docindex,termindex)
epochs = []
for i in 1:n
termindexShft = circshift(termindex,i-1)
chunks = []
for j in 1:n
docs = reduce(vcat,[findall(d->d==di,vec(D)) for di in docindex[j]])
terms = reduce(vcat,[findall(t->t==ti,vec(T)) for ti in termindexShft[j]])
inds = intersect(docs,terms)
push!(chunks,inds)
end
push!(epochs,chunks)
end
epochs
end
| DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 7272 | """
chunkSharedSV!(f,philatent,Y,T,A,D,nj,nwinc,ninc,nw,n,alpha,beta,indt,indnw,indn; verbose=false)
Iterate over chunk `p` of data on a SharedArray, updating copies of sufficient stats: `nj`,`nwinc`,`n`, and assignments: `A` for all tokens.
Sample the topic index `z ~ Z` from the sufficient stats of token from document `j` corresponding to lexicon term index `i`.
```math
p(z=k) \\propto \\frac{nw^{\\lnot ij}+\\beta[i]}{n^{\\lnot ij} + W\\beta[i]} (nj^{\\lnot ij} + \\alpha)
```
The function \$ f(A_{\\lnot i},Y_{\\lnot j})\$ is applied to the assignments (`A`) and response data `Y` during sampling of \$ a_{i,j}\$ for term \$i\$ of document \$j\$.
The function `philatent(\beta,nw)` denotes prior knowledge of the word/topic associations.
# Examples
```
chunkSharedSV!(f,philatent,Y,T,A,D,nj,nwinc,ninc,nw,n,alpha,beta,indt,indnw,indn; verbose=false)
```
"""
function chunkSharedSV!(philatent,T,A,D,nj,nwinc,nw,alpha,beta,indt,indnw,nlex; verbose=false)
indnw_linear = LinearIndices(nw)[indnw...]
for i in 1:length(indt)
t = T[indt[i]]
a = A[indt[i]]
d = D[indt[i]]
# take current doc/term complement of nj,nw
njI = LinearIndices(nj)
nji = njI[CartesianIndex(a,d)]
nj_old = nj[nji] # copy old for debug
if verbose
println("attempting to access a:$a, t:$t of LinearIndices(nw)[indnw...]:")
end
nwi = indnw_linear[CartesianIndex(a,t)]
nw_old = nw[nwi] # copy old for debug
if verbose
println("\n \n")
println("sampling chunk token $i of ",length(indt)," from doc $d")
println("nji=$nji,nwi=$nwi")
println("nj[nji]=",nj[nji])
println("nw[nwi]=",nw[nwi])
println("nj[:,d]=",nj[:,d])
println("nw[:,t]=",nw[:,t])
println("indnw:\n",indnw)
println("size(nw[indnw...]):",size(nw[indnw...]))
println("size(nw):",size(nw))
println("beta[:,t]:",beta[:,t])
end
@assert nj_old > 0 "nji must be positive"
@assert nw_old > 0 "nwi must be positive"
nj[nji] -= 1
nw[nwi] -= 1
# sample the new assignment
@assert philatent in [2,1,0] "latent phi needs to be specified"
if philatent == 0
wprob = beta[:,t]
elseif philatent == 1
wprob = beta[:,t] ./ (beta[:,t] .* nlex)
elseif philatent == 2
wprob = (nw[indnw...][:,t] .+ beta[:,t]) ./ sum(nw[indnw...] .+ beta[:,t],dims=2)
end
dprob = (nj[:,d] .+ vec(alpha)) ./ sum(nj[:,d] .+ vec(alpha))
z = dprob .* wprob
if verbose
println("\n \n")
println("z:",z)
println("wprob:",wprob)
println("dprob:",dprob)
end
samp = rand()*sum(z)
a_new = findfirst(x->x>0,vec(cumsum(z,dims=1)) .- samp)
if verbose
println("\n \n")
println("a_new:",a_new)
println("a:",a)
println("d:",d)
println("njI:",njI)
println("CartesianIndex(a_new,d):",CartesianIndex(a_new,d))
end
if isnothing(a_new)
a_new = a
end
# update nj, nw
nji_new = njI[CartesianIndex(a_new,d)]
nwi_new = indnw_linear[CartesianIndex(a_new,t)]
nj[nji_new] += 1
nw[nwi_new] += 1
A[indt[i]] = a_new
# record assignment nwinc
nwinc_old = nwinc[nwi_new]
nwinc[nwi_new] += 1
if verbose
println("\n \n")
println("A=$a->$a_new")
println("nwi=$nwi,size(nw[indnw...])=",size(nw[indnw...]))
println("nwinc update $nwi_new: $nwinc_old->",nwinc[nwi_new])
end
if verbose
println("nj update: doc=$d, topic $nj_old -> ",nj[nji])
println("nw update: term=$t, topic $nw_old -> ",nw[nwi])
# sleep(0.2)
end
end
end
"""
sampleSharedSV!(f,philatent,Y,T,A,D,nj,nwinc,ninc,nw,n,alpha,beta,indt,indnw,indn,k,l,wids; verbose=false)
Parallel loop over SharedArray data, iterating locally on each worker in `wids`.
See `initShared` for parameter definitions: `T`,`A`,`D`,`nj`,`nwinc`,`ninc`,`nw`,`n`,`alpha`,`beta`.
Additional parameters: number of topics `k`, number of lexicon terms `l`.
The function \$ f(A_{\\lnot i},Y_{\\lnot j})\$ is applied to the assignments (`A`) and orthogonal data `Y` during sampling of \$ a_{i,j}\$ for term \$i\$ of document \$j\$.
The function `philatent(\beta,nw)` denotes prior knowledge of the word/topic associations.
# Examples
```
sampleSharedSV!(f,philatent,Y,T,A,D,nj,nwinc,ninc,nw,n,alpha,beta,indt,indnw,indn,k,l,wids; verbose=false)
```
"""
function sampleSharedSV!(philatent,T,A,D,nj,nwinc,nw,alpha,beta,indt,indnw,k,nlex,wids; verbose=false)
nw_global = zeros(k,nlex)
# sample the assignments
# test
if verbose
for i in 1:length(wids)
println("i:",i)
println("indt[i]:",indt[i])
println("indnw[i]:",indnw[i])
chunkSharedSV!(philatent,T,A,D,nj,nwinc,nw,alpha,beta,indt[i],indnw[i],nlex,verbose=verbose)
end
else
@sync begin
for i in 1:length(wids)
@async remotecall_wait(chunkSharedSV!,wids[i],philatent,T,A,D,nj,nwinc,nw,alpha,beta,indt[i],indnw[i],nlex,verbose=verbose)
end
end
end
# synchronize global nw,n
for i in 1:length(wids)
nw_global += nwinc[indnw[i]...]
end
for i in 1:length(wids)
nw[indnw[i]...] .= nw_global
end
end
"""
runChainSharedSV(corpus,partition,partmap,betalocal,alphalocal,k,l,drnm,nlex,stateint,statefloat,philatent,f; verbose=false)
Parallel loop over the data, iterating locally on each worker.
See `initDistributed` for parameter definitions: `T`,`A`,`D`,`nj`,`nwinc`,`ninc`,`nw`,`n`,`alpha`,`beta`.
Additional parameters: number of topics `k`, number of lexicon terms `l`.
# Examples
```
runChainSharedSV(corpus,partition,partmap,betalocal,alphalocal,k,l,drnm,nlex,stateint,statefloat,philatent,f; verbose=false)
```
"""
function runChainSharedSV(corpus0,partition,partmap,betalocal,alphalocal,
k,nlex,philatent,
stateint,statefloat,nworkers_local,chainblockn,n_iter,
chain_i,drnm,initcorp,Thetas,indthin; verbose=false)
ccopy = deepcopy(corpus0)
if initcorp == "beta"
corpus = initcorpBeta(corpus0,betalocal)
elseif initcorp == "chain"
corpus = initcorpChain(corpus0,chain_i,k)
elseif initcorp == "unif"
corpus = initcorpUnif(corpus0,k)
end
@assert ccopy != corpus "re-initialization failed"
println("starting chain $chain_i")
println("begin initializing state arrays: T,A,D,Z,J,W")
T,A,D,Z,J,W,nj,nwinc,nw,alpha,beta,indt,indnw = initShared(corpus,partition,k,nlex,alphalocal,betalocal,sstatint=Int64)
@assert size(nw) == (k,nlex*nworkers_local) "nw: incorrect shape or number of distributed copies, topics"
@assert size(nj) == (k,length(corpus)) "nj: incorrect shape or number of documents, topics"
wids = workers()[1:nworkers_local]
thin_ind = 1 # the index of the saved thinned samples
@showprogress 1 "Running $n_iter samples..." for i in 1:Int(n_iter)
# println("iteration $i")
sampleSharedSV!(philatent,T,A,D,nj,nwinc,nw,alpha,beta,indt,indnw,k,nlex,wids; verbose=verbose)
if i in indthin
@sync @distributed for doc_id = 1:length(partmap)
thetaIter!(Thetas,A,J,doc_id,alpha,thin_ind,chain_i,k)
end
thin_ind+=1
end
end
println("sampling: chain $chain_i mcmc has finished")
end | DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | code | 2670 | using DistributedStwdLDA
using Test, Statistics, StatsBase, Random, Distributions, CSV, DataFrames
using Distributed, SharedArrays
nwork = 4
while length(workers()) < nwork
addprocs(1)
end
@everywhere using DistributedStwdLDA
function fillpb(scMeta,scReads,trueP;ncells=100,celltypes=["alpha","beta","gamma","delta"])
pbulk = zeros(Int,size(trueP)[1],size(scReads)[2]-1)
for i in 1:size(trueP)[1]
for j in 1:size(trueP)[2]
# @infiltrate
allids = scMeta[scMeta.cellType.==celltypes[j],:].cellID
inds = StatsBase.sample(1:length(allids),Int(ncells*trueP[i,j]),replace=true)
ids = allids[inds]
pbulk[i,:] .= vec(sum(Matrix(filter(x -> x.cellID in ids,scReads)[:,2:end]),dims=1))
end
end
pbulk
end
@testset "deconvolution: baron" begin
data_dir = joinpath(@__DIR__,"data","baron")
C_fn = joinpath(data_dir,"BaronSC.DM.isletVST_C.csv")
pDataC_fn = joinpath(data_dir,"BaronSC.DM.isletVST_pDataC.csv")
thetabenchmark=nothing
# sc metadata
scMeta = DataFrame(CSV.File(pDataC_fn))[:,["cellID","cellType","sampleID"]]
scMeta = filter(x -> x.cellType in ["alpha","beta","gamma","delta"],scMeta)
# sc reads
scReads = DataFrame(CSV.File(C_fn,transpose=true)) # this is an R data frame with "row names"
rename!(scReads,Symbol.(["cellID",names(scReads)[2:end]...]))
scReads = filter(x -> x.cellID in scMeta.cellID,scReads)
# true proportions
trueP = [
0.1 0.3 0.5 0.1;
0.5 0.1 0.3 0.1;
0.3 0.1 0.1 0.5]
# bulk reads
pbulk = fillpb(scMeta,scReads,trueP,ncells=50,celltypes=["alpha","beta","gamma","delta"])
bulkReads = DataFrame(pbulk,:auto)
rename!(bulkReads,names(scReads[:,2:end]))
insertcols!(bulkReads, 1, :bulkID => ["Bulk1","Bulk2","Bulk3"] )
trueP = DataFrame(trueP,:auto)
rename!(trueP,["alpha","beta","gamma","delta"])
insertcols!(trueP, 1, :bulkID => ["Bulk1","Bulk2","Bulk3"] )
jl_output = dsLDA_E_step(
Matrix(scReads[:,2:end]),
names(scReads[:,2:end]),
scReads.cellID,
scMeta,
Matrix(bulkReads[:,2:end]),
names(bulkReads[:,2:end]),
bulkReads.bulkID,
"",
nparts=3,
runqc=true,
ldagenethresh=0.1,
minCellsTopicCorp=1,
scalebulk="log1p",
bulkfactor=1000,
scalesc="ident",
betapseudo=0.0,
scfactor=1.0,
betaeps=0.01,
nchains=2,
alpha=0.1,
philatent=2,
blocksize=5,
niter=1000,
initflavor="unif",
verbose=false,
burn=0.5,
thinning=5,
rmchains=true
)
print(sqrt(mean((jl_output[1].-Matrix(trueP[:,2:end])).^2)))
@test size(jl_output[1]) == size(trueP[:,2:end])
end
| DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.2.1 | 3ee7c987e2fbe00cabe6ef0463d7cb9d91135df1 | docs | 56 | # DistributedStwdLDA
distributed, static topic/word LDA
| DistributedStwdLDA | https://github.com/mkarikom/DistributedStwdLDA.jl.git |
|
[
"MIT"
] | 0.1.5 | 40a0f73be804e174efa838cdc60d86b3165f37a1 | code | 258 | module MathieuF
using LinearAlgebra
using MathieuFunctions
export MathieuCharA,MathieuCharB,MathieuCharλ,MathieuCharVecλ,MathieuCharVecWronλ,MathieuWron,MathieuFunc,MathieuFuncPrime,MathieuExponent
include("char-coeff-new-indexing.jl")
end | MathieuF | https://github.com/Lightup1/MathieuF.jl.git |
|
[
"MIT"
] | 0.1.5 | 40a0f73be804e174efa838cdc60d86b3165f37a1 | code | 8236 |
"""
MathieuCharA(ν,q)
char value A_ν for Mathieu's equation
y'' + (A_ν - 2 q cos( 2z )) y = 0
where
q ∈ ℝ - parameter
ν ∈ ℝ - characteristic exponent
"""
function MathieuCharA(ν::Real,q::Real)
if isinteger(ν)
return MathieuCharA(Int(ν),q)
else
return MathieuCharλ(ν,q)
end
end
function MathieuCharA(ν::Int,q::Real)
if ν==0
return charλ(abs(q), 0.0; k=1:1)[1]
else
return charA(q; k=abs(ν):abs(ν))[1]
end
end
"""
MathieuCharB(ν,q)
char value B_ν for Mathieu's equation
y'' + (B_ν - 2 q cos( 2z )) y = 0
where
q ∈ ℝ - parameter
ν ∈ ℝ - fractional part of the non-integer order
"""
function MathieuCharB(ν::Real,q::Real)
if isinteger(ν)
return MathieuCharB(Int(ν),q)
else
return MathieuCharλ(ν,q)
end
end
function MathieuCharB(ν::Int,q::Real)
return charB(q; k=abs(ν):abs(ν))[1]
end
function MathieuCharλ(ν::Real,q::Real) # reduced = true
#nu = reduced ? rem(nu_+1,2)-1 : nu_;
ν_abs=abs(ν)
(k,ν_)=divrem(ν_abs,2,RoundNearest)
# Set matrix size using formula from Shirts paper (1993), Eqs. (2.1)-(2.2).
# nu0 = nu + maximum(k)
C = (8.46 + 0.444*ν)/(1 + 0.085*ν)
D = (0.24 + 0.0214*ν)/(1 + 0.059*ν)
N = ceil(Int, (ν + 2 + C*abs(q)^D)/2) # matrix size is 2N+1
(two, q2, nu2) = float.(promote(2, q, ν_)) # d0 and d1 must be of the same type
d0 = (two .* (-N:N) .+ nu2).^2
d1 = fill(q2, 2 * N)
A = SymTridiagonal(d0, d1)
a = eigvals!(A, trunc(Int,ν)+1:trunc(Int,ν)+1)[1]
return a
end
"""
Return eigenvalue, eigenvector (Fourier coefficient) of the Mathieu characteristic value problem and the index of the k=0 Fourier coefficient.
"""
function MathieuCharVecλ(ν::Real,q::Real) # reduced = true
#nu = reduced ? rem(nu_+1,2)-1 : nu_;
ν_abs=abs(ν)
(k,ν_)=divrem(ν_abs,2,RoundNearest)
# Set matrix size using formula from Shirts paper (1993), Eqs. (2.1)-(2.2).
# nu0 = nu + maximum(k)
C = (8.46 + 0.444*ν)/(1 + 0.085*ν)
D = (0.24 + 0.0214*ν)/(1 + 0.059*ν)
N = ceil(Int, (ν + 2 + C*abs(q)^D)/2) # matrix size is 2N+1
(two, q2, nu2) = float.(promote(2, q, ν_)) # d0 and d1 must be of the same type
d0 = (two .* (-N:N) .+ nu2).^2
d1 = fill(q2, 2 * N)
A = SymTridiagonal(d0, d1)
vals,vecs = eigen!(A)
center_index=Int(N+1+k)
return vals[trunc(Int,ν)+1],vecs[:,trunc(Int,ν)+1],center_index
end
"""
Return eigenvalue, eigenvector (Fourier coefficient) of the Mathieu characteristic value problem, the index of the k=0 Fourier coefficient and the Wronskian.
For Mathieu's equation
y'' + (B_ν - 2 q cos( 2z )) y = 0,
the Wronskian is defined by
```math
\\frac{\\dot{f} f^*-f \\dot{f^*}}{2i}
```
where ``f`` is the solution of the Mathieu equation and ``f^*`` is its complex conjugate.
"""
function MathieuCharVecWronλ(ν::Real,q::Real)
a,C_2k,index=MathieuCharVecλ(ν,q)
W=MathieuWron(ν,C_2k,index)
return a,C_2k,index,W
end
"""
`W=MathieuWron(ν,q)` or `W=MathieuWron(ν,C_k,index)`.
Return the Wronskian.
For Mathieu's equation
y'' + (a- 2 q cos( 2z )) y = 0,
the Wronskian is defined by
```math
\\frac{\\dot{f} f^*-f \\dot{f^*}}{2i}
```
where ``f=e^{i\\nu z}\\sum_k{C_{k}e^{i2kz}}`` with ``\\sum_k{C_{k}^2}=1`` is the solution of the Mathieu equation and ``f^*`` is its complex conjugate.
"""
function MathieuWron(ν,q)
_,C_2k,index=MathieuCharVecλ(ν,q)
return MathieuWron(ν,C_2k,index)
end
function MathieuWron(ν,C_2k::Vector,index::Int)
W=0.0
for i in eachindex(C_2k),j in eachindex(C_2k)
W+=C_2k[i]*C_2k[j]*(ν+(i-index+j-index))
end
return W # For use in real physical motion, W should be multiplied by ω_d/2 with normalization satisfying f(0)=1.
end
"""
``f=e^{i\\nu z}\\sum_k{C_{2k}e^{i2kz}}`` with ``\\sum_k{C_{2k}^2}=1`` is the solution of the Mathieu equation.
"""
function MathieuFunc(ν,q,z)
_,C_2k,index=MathieuCharVecλ(ν,q)
f=sum(C_2k.*exp.(im*(2*collect(1-index:length(C_2k)-index).+ν)*z))
# TODO use @inbound for to speedup
return f
end
"""
``\\partial f/\\partial z``
"""
function MathieuFuncPrime(ν,q,z)
_,C_2k,index=MathieuCharVecλ(ν,q)
f=sum(im*(2*collect(1-index:length(C_2k)-index).+ν).*C_2k.*exp.(im*(2*collect(1-index:length(C_2k)-index).+ν)*z))
# TODO use @inbound for to speedup
return f
end
"""
[ν,c]=mathieu_mathieuexp(a,q;ndet::Int=20)
This program evaluates the characteristic exponent ν,
corresponding to solutions of Mathieu Equation
y''(t)+(a-2q cos(2t)) y(t)=0;
where a and q are fixed real variables.
ndet is a positive integer number: it is the matrix dimension used
in the algorithm. Precision increases by increasing ndet.
Default value is ndet=20
The alghoritm consider two different cases:
a=(2k)^2 or not (k integer).
ν is such that its real part belongs to the interval [0,2]
Of course, every other solutions are obtained by the formula
±ν+2k, with k integer.
"""
function MathieuExponent(a,q;ndet::Int=20,has_img::Bool=true,max_ndet::Int=1000)
x=(a>=0)&& sqrt(abs(a))/2%1==0
N=2*ndet+1 #matrix dimension
a,q=float.(promote(a,q))
d=q./((2*(-ndet:ndet) .+x).^2 .-a)
m=Tridiagonal(d[2:N], ones(N), d[1:N-1])
delta=det(m)
if x
if 0<=delta<=1
alpha=acos(2*delta-1)/pi
ν=mod(alpha,2) #modular reduction to the solution [0,2]
H_nu=SymTridiagonal((ν .+ 2*(-ndet:ndet)).^2 .- a,q*ones(N-1))
ck=eigvecs(H_nu,[0.0])[:,1]
return ν,ck
elseif has_img==true
alpha=acos(2*Complex(delta)-1)/pi
ν=alpha*(2*(imag(alpha)>=0)-1) #change an overall sign so that the imaginary part is always positive.
ν=mod(real(ν),2)+im*imag(ν) #modular reduction to the solution [0,2]
q=Complex(q)
H_nu=Matrix(SymTridiagonal((ν .+ 2*(-ndet:ndet)).^2 .- a,q*ones(N-1)))
vals,vecs=eigen(H_nu)
_,idx=findmin(abs,vals)
return ν,vecs[:,idx]
elseif ndet<max_ndet
MathieuExponent(a,q;ndet=min(2*ndet,max_ndet),has_img=false,max_ndet=max_ndet)
else
@warn "Expect real output for a=$a and q=$q, but the result is complex even for ndet=$ndet."
alpha=acos(2*Complex(delta)-1)/pi
ν=alpha*(2*(imag(alpha)>=0)-1) #change an overall sign so that the imaginary part is always positive.
ν=mod(real(ν),2)+im*imag(ν) #modular reduction to the solution [0,2]
q=Complex(q)
H_nu=Matrix(SymTridiagonal((ν .+ 2*(-ndet:ndet)).^2 .- a,q*ones(N-1)))
vals,vecs=eigen(H_nu)
_,idx=findmin(abs,vals)
return ν,vecs[:,idx]
end
else
beta=delta*sin(pi*sqrt(Complex(a))/2)^2
beta=real(beta) # beta should be real.
if 0<=beta<=1
alpha=2*asin(sqrt(beta))/pi
ν=mod(alpha,2) #modular reduction to the solution [0,2]
H_nu=SymTridiagonal((ν .+ 2*(-ndet:ndet)).^2 .- a,q*ones(N-1))
ck=eigvecs(H_nu,[0.0])[:,1]
return ν,ck
elseif has_img==true
alpha=2*asin(sqrt(Complex(beta)))/pi
ν=alpha*(2*(imag(alpha)>=0)-1) #change an overall sign so that the imaginary part is always positive.
ν=mod(real(ν),2)+im*imag(ν) #modular reduction to the solution [0,2]
q=Complex(q)
H_nu=Matrix(SymTridiagonal((ν .+ 2*(-ndet:ndet)).^2 .- a,q*ones(N-1)))
vals,vecs=eigen(H_nu)
_,idx=findmin(abs,vals)
return ν,vecs[:,idx]
elseif ndet<max_ndet
MathieuExponent(a,q;ndet=min(2*ndet,max_ndet),has_img=false,max_ndet=max_ndet)
else
@warn "Expect real output for a=$a and q=$q, but the result is complex even for ndet=$ndet."
alpha=2*asin(sqrt(Complex(beta)))/pi
ν=alpha*(2*(imag(alpha)>=0)-1) #change an overall sign so that the imaginary part is always positive.
ν=mod(real(ν),2)+im*imag(ν) #modular reduction to the solution [0,2]
q=Complex(q)
H_nu=Matrix(SymTridiagonal((ν .+ 2*(-ndet:ndet)).^2 .- a,q*ones(N-1)))
vals,vecs=eigen(H_nu)
_,idx=findmin(abs,vals)
return ν,vecs[:,idx]
end
end
end
| MathieuF | https://github.com/Lightup1/MathieuF.jl.git |
|
[
"MIT"
] | 0.1.5 | 40a0f73be804e174efa838cdc60d86b3165f37a1 | code | 5124 | using Test, MathieuFunctions,MathieuF
using LinearAlgebra
using DelimitedFiles
readcsv(f) = DelimitedFiles.readdlm(f, ',')
function tapprox(a, b; atol=1e-15)
normval = norm(a - b, Inf)
@info "normval = $normval"
isapprox(a, b; norm= x -> norm(x, Inf), atol = atol)
end
@testset "basic" begin
@test maximum([MathieuCharA(ν,0) for ν in 0:100] - [0:100;].^2) == 0
@test norm([MathieuCharB(ν,0) for ν in 1:100] - [1:100;].^2) == 0
end
filename = "MathieuCharacteristicA-1.csv"
@testset "$filename" begin
test1 = readcsv(filename)
r=[MathieuCharA(ν,q) for ν in 0:10, q in -10:.01:10]
@test tapprox(test1, r; atol=7.5e-13)
end
filename = "MathieuCharacteristicA-2.csv"
@testset "$filename" begin
test1 = readcsv(filename)
r = [MathieuCharA(ν,q) for ν in 0:3, q in 30:.01:50]
@test tapprox(test1, r; atol=7.6e-13) # NOTE: was 7.5e-13
end
filename = "MathieuCharacteristicB-1.csv"
@testset "$filename" begin
test1 = readcsv(filename)
r = [MathieuCharB(ν,q) for ν in 1:10, q in -10:.01:10]
@test tapprox(test1, r; atol=7.5e-13)
end
filename = "MathieuCharacteristicB-2.csv"
@testset "$filename" begin
test1 = readcsv(filename)
r = [MathieuCharB(ν,q) for ν in 1:3, q in 30:.01:50]
@test tapprox(test1, r; atol=2.8e-11)
end
filename = "MathieuCharacteristicL-1.csv"
@testset "$filename" begin
test1 = readcsv(filename)[1:100,:]
test2 = Float64[MathieuCharA(ν,q) for ν in [0:.01:0.99;], q in -5:.01:5]
@test tapprox(test1, test2, atol=7.5e-15)
end
filename = "MathieuCharacteristicL-2.csv"
@testset "$filename" begin
test1 = readcsv(filename)[1:100,:]
test2 = Float64[MathieuCharA(ν,q) for ν in [0:.01:0.99;], q in 30:.01:50]
@test tapprox(test1, test2, atol=4.5e-14)
end
filename = "MathieuCharacteristicL-3.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[MathieuCharA(ν,q) for ν in [1.01:.01:1.99;], q in -5:.01:5]
test3 = Float64[MathieuCharB(ν,q) for ν in [1.01:.01:1.99;], q in -5:.01:5]
@test tapprox(test1, test2, atol=6e-14)
@test tapprox(test1, test3, atol=6e-14)
end
filename = "MathieuCharacteristicL-4.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[MathieuCharA(ν,q) for ν in [1.01:.01:1.99;], q in 30:.01:50]
test3 = Float64[MathieuCharB(ν,q) for ν in [1.01:.01:1.99;], q in 30:.01:50]
@test tapprox(test1, test2, atol=1e-12)
@test tapprox(test1, test3, atol=1e-12)
end
filename = "MathieuCharacteristicL-5.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[MathieuCharA(ν,q) for ν in [20.01:.01:20.99;], q in -5:.01:5]
test3 = Float64[MathieuCharB(ν,q) for ν in [20.01:.01:20.99;], q in -5:.01:5]
@test tapprox(test1, test2, atol=1e-12)
@test tapprox(test1, test3, atol=1e-12)
end
filename = "MathieuCharacteristicL-6.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[MathieuCharA(ν,q) for ν in [20.01:.01:20.99;], q in 30:.01:50]
test3 = Float64[MathieuCharB(ν,q) for ν in [20.01:.01:20.99;], q in 30:.01:50]
@test tapprox(test1, test2, atol=1e-12)
@test tapprox(test1, test3, atol=1e-12)
end
function coefR_10(ν,q)
_,vec,center_index=MathieuCharVecλ(ν,q)
vec[center_index+1]/vec[center_index]
end
filename = "MathieuFouriercoef-1.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[coefR_10(ν,q) for ν in 0.1:.1:0.9, q in -5:.1:5]
@test tapprox(test1, test2, atol=1e-7)
end
filename = "MathieuFouriercoef-2.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[coefR_10(ν,q) for ν in 0.1:.1:0.9, q in 30:.1:50]
@test tapprox(test1, test2, atol=1e-7)
end
filename = "MathieuFouriercoef-3.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[coefR_10(ν,q) for ν in 1.1:.1:1.9, q in -5:.1:5]
@test tapprox(test1, test2, atol=1e-6)
end
filename = "MathieuFouriercoef-4.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[coefR_10(ν,q) for ν in 1.1:.1:1.9, q in 30:.1:50]
@test tapprox(test1, test2, atol=1e-6)
end
filename = "MathieuFouriercoef-5.csv"
@testset "$filename" begin
test1 = readcsv(filename)
test2 = Float64[coefR_10(ν,q) for ν in 20.1:.1:20.9, q in 30:.1:50]
@test tapprox(test1, test2, atol=1e-6)
end
let
ν=[0.1:.01:0.99;4.01:.01:4.99;]
q=[0:.01:1;]
@test ≈([MathieuExponent(a,q,ndet=200)[1] for (a,q) in zip(Float64[MathieuCharA(ν,q) for ν in ν, q in q],[q for ν in ν, q in q])],
[mod(ν,2) for ν in ν, q in q],
atol=1e-5,
norm= x -> norm(x, Inf)
)
end
let
ν=[0.1:.01:0.99;4.01:.01:4.99;]
q=[10:.1:20;]
@test ≈(
[MathieuExponent(a,q,ndet=round(Int,q^2*2))[1] for (a,q) in zip(Float64[MathieuCharA(ν,q) for ν in ν, q in q],[q for ν in ν, q in q])],
[mod(ν,2) for ν in ν, q in q],
atol=1e-4,
norm= x -> norm(x, Inf)
)
end
# here the large atol required may caused by MathieuCharA which uses an imperical formula to calculate the corresponding ndet.
# larger q needs larger ndet | MathieuF | https://github.com/Lightup1/MathieuF.jl.git |
|
[
"MIT"
] | 0.1.5 | 40a0f73be804e174efa838cdc60d86b3165f37a1 | docs | 1764 | # MathieuF.jl
Julia package for Mathieu Functions with function forms similar to Mathieu related functions in Mathematica.
Mathieu functions are the eigenvalues and eigenfunction of *Mathieu's
equation* (for more details, see [NIST's Digital Library of
Mathematical Functions](http://dlmf.nist.gov/28)).
Related Package: [MathieuFunctions.jl](https://github.com/BBN-Q/MathieuFunctions.jl)
## Highlights
- Support Fourier coefficients of non-integer order eigenfunctions
- Support a,q as input, see [Mathieu functions toolbox](https://atoms.scilab.org/toolboxes/Mathieu/4.0.61)
- Support output of the related Wronskian.
## Examples
```julia
nu,ck=MathieuExponent(a,q)
```
where `nu` is the characteristic exponent and vector `ck` is the Fourier coefficients of the eigenfunction with `norm(ck)≈1`.
Note that `nu` is reduced to the interval `[0,2]` and `c0` corresponds to `ck[(length(ck)-1)÷2]` with the reduced `nu`. (For `nu` is real, the procedure actually reduces `nu` into `[0,1]`).
```julia
W=MathieuWron(nu,ck::Vector,index::Int)
```
where `W` is the Wronskian of the eigenfunction with and `index` refers to the index of `c0` in `ck`.
For example,
```julia
a=0.1;q=0.5;
nu,ck=MathieuExponent(a,q)
idx=(length(ck)-1)÷2+1
W=MathieuWron(nu,ck,idx)
```
In some cases, `W` could be negative. One can replace `nu` with `-nu` and reverse `ck`, i.e., `reverse!(ck)`, to get a positive `W`.
If one prefers a positive `nu`, one can further shift `nu` with `nu+=2` and `idx` with `idx+=1`.
Code example:
```julia
nu=2-nu
reverse!(ck)
idx+=1
W_new=MathieuWron(nu,ck,idx)
```
It can be verified that `W_new==-W`.
If one knows `nu` (not reduced) and `q`, one can use
```julia
W=MathieuWron(nu,q)
```
During my test, the result is positive with such method.
| MathieuF | https://github.com/Lightup1/MathieuF.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 610 | using Documenter, SpinGlassNetworks
_pages = [
"User guide" => "userguide.md",
"Ising graph" => "ising.md",
"Lattice geometries" => "lattice.md",
"Clustered hamiltonian" => "clh.md",
"Local dimensional reduction" => "bp.md",
"API Reference for auxiliary functions" => "api.md",
]
# ============================
format =
Documenter.HTML(edit_link = "master", prettyurls = get(ENV, "CI", nothing) == "true")
# format = Documenter.LaTeX(platform="none")
makedocs(
sitename = "SpinGlassNetworks.jl",
modules = [SpinGlassNetworks],
pages = _pages,
format = format,
)
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 1427 | using SpinGlassNetworks
using LabelledGraphs
using Graphs
using MetaGraphs
using Logging
"""
Instance below looks like this:
1 -- 2 -- 3
|
4 -- 5 -- 6
|
7 -- 8 -- 9
"""
function create_larger_example_potts_hamiltonian_tree()
instance = Dict(
(1, 1) => 0.5,
(2, 2) => 0.25,
(3, 3) => 0.3,
(4, 4) => 0.1,
(5, 5) => -0.1,
(6, 6) => 0.1,
(7, 7) => 0.0,
(8, 8) => 0.1,
(9, 9) => 0.01,
(1, 2) => -1.0,
(2, 3) => 1.0,
(1, 4) => 1.0,
(4, 5) => 1.0,
(5, 6) => 1.0,
(4, 7) => 1.0,
(7, 8) => 1.0,
(8, 9) => 1.0,
)
ig = ising_graph(instance)
assignment_rule = Dict(
1 => (1, 1, 1),
2 => (1, 2, 1),
3 => (1, 3, 1),
4 => (2, 1, 1),
5 => (2, 2, 1),
6 => (2, 3, 1),
7 => (3, 1, 1),
8 => (3, 2, 1),
9 => (3, 3, 1),
)
potts_h = potts_hamiltonian(
ig,
Dict{NTuple{3,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule,
)
ig, potts_h
end
ig, potts_h = create_larger_example_potts_hamiltonian_tree()
beta = 0.1
iter = 0
beliefs = belief_propagation(potts_h, beta; iter = iter)
for v in vertices(potts_h)
en = get_prop(potts_h, v, :spectrum).energies
println("vertex ", v, " energy = ", en .- minimum(en), " bp = ", beliefs[v])
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 3472 | using HDF5
using Graphs
using LinearAlgebra
using LabelledGraphs
using MetaGraphs
using SpinGlassNetworks
function load_openGM(fname::String, Nx::Integer, Ny::Integer)
file = h5open(fname, "r")
file_keys = collect(keys(read(file)))
data = read(file[file_keys[1]])
H = collect(Int64, data["header"])
F = Array{Int64}(data["factors"])
J = Array{Int64}(data["function-id-16000"]["indices"])
V = Array{Real}(data["function-id-16000"]["values"])
N = Array{Int64}(data["numbers-of-states"])
F = reverse(F)
factors = Dict()
while length(F) > 0
f1 = pop!(F)
z1 = pop!(F)
nn = pop!(F)
n = []
for _ = 1:nn
tt = pop!(F)
ny, nx = divrem(tt, Nx)
push!(n, ny, nx)
end
if length(n) == 4
if abs(n[1] - n[3]) + abs(n[2] - n[4]) != 1
throw(Exception("Not nearest neighbour"))
end
end
if length(n) == 2
if (n[1] >= Ny) || (n[2] >= Nx)
throw(Exception("Wrong size"))
end
end
factors[tuple(n...)] = f1
if z1 != 0
throw(Exception("Something wrong with the expected convention."))
end
end
J = reverse(J)
functions = Dict()
ii = -1
lower = 0
while length(J) > 0
ii += 1
nn = pop!(J)
n = []
for _ = 1:nn
push!(n, pop!(J))
end
upper = lower + prod(n)
functions[ii] = reshape(V[lower+1:upper], reverse(n)...)'
lower = upper
end
result = Dict(
"fun" => functions,
"fac" => factors,
"N" => reshape(N, (Ny, Nx)),
"Nx" => Nx,
"Ny" => Ny,
)
result
end
function potts_hamiltonian(fname::String, Nx::Integer = 240, Ny::Integer = 320)
loaded_rmf = load_openGM(fname, Nx, Ny)
functions = loaded_rmf["fun"]
factors = loaded_rmf["fac"]
N = loaded_rmf["N"]
clusters = super_square_lattice((Nx, Ny, 1))
potts_h = LabelledGraph{MetaDiGraph}(sort(collect(values(clusters))))
for v ∈ potts_h.labels
x, y = v
sp = Spectrum(
Vector{Real}(undef, 1),
Array{Vector{Int}}(undef, 1, 1),
Vector{Int}(undef, 1),
)
set_props!(potts_h, v, Dict(:cluster => v, :spectrum => sp))
end
for (index, value) in factors
if length(index) == 2
y, x = index
Eng = sum(functions[value])
set_props!(potts_h, (x + 1, y + 1), Dict(:eng => Eng))
elseif length(index) == 4
y1, x1, y2, x2 = index
add_edge!(potts_h, (x1 + 1, y1 + 1), (x2 + 1, y2 + 1))
Eng = sum(functions[value], dims = 2)
set_props!(
potts_h,
(x1 + 1, y1 + 1),
(x2 + 1, y2 + 1),
Dict(
:outer_edges => ((x1 + 1, y1 + 1), (x2 + 1, y2 + 1)),
:eng => Eng,
:pl => I,
:pr => I,
),
)
else
throw(
ErrorException(
"Something is wrong with factor index, it has length $(length(index))",
),
)
end
end
potts_h
end
x, y = 240, 320
filename = "/home/tsmierzchalski/.julia/dev/SpinGlassNetworks/examples/penguin-small.h5"
cf = potts_hamiltonian(filename, x, y)
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 461 | module SpinGlassNetworks
using LabelledGraphs
using Graphs
using MetaGraphs # TODO: to be replaced by MetaGraphsNext
using CSV
using DocStringExtensions
using LinearAlgebra, MKL
using Base.Cartesian
using SparseArrays
using CUDA, CUDA.CUSPARSE
using SpinGlassTensors
import Base.Prehashed
include("ising.jl")
include("spectrum.jl")
include("lattice.jl")
include("potts_hamiltonian.jl")
include("bp.jl")
include("truncate.jl")
include("utils.jl")
end # module
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 20779 |
export belief_propagation,
potts_hamiltonian_2site,
projector,
get_neighbors,
MergedEnergy,
update_message,
merge_vertices_potts_h,
local_energy,
interaction_energy,
SparseCSC
"""
$(TYPEDSIGNATURES)
Perform loopy belief propagation on a given Potts Hamiltonian.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labelled graph.
- `beta::Real`: The inverse temperature parameter for the belief propagation algorithm.
- `tol::Real (optional, default=1e-6)`: The convergence tolerance. The algorithm stops when the message updates between iterations are smaller than this value.
- `iter::Int (optional, default=1)`: The maximum number of iterations to perform.
# Returns:
- `beliefs::Dict`: A dictionary where keys are vertices of Potts Hamiltonian, and values are the
resulting beliefs after belief propagation.
The function implements loopy belief propagation on the given Potts Hamiltonian `potts_h` to calculate beliefs for each vertex.
Belief propagation is an iterative algorithm that computes beliefs by passing messages between vertices and edges of the Potts Hamiltonian.
The algorithm continues until convergence or until the specified maximum number of iterations is reached.
The beliefs are computed based on the inverse temperature parameter `beta`, which controls the influence of energy values on the beliefs.
"""
function belief_propagation(
potts_h::LabelledGraph{S,T},
beta::Real;
tol = 1e-6,
iter = 1,
) where {S,T}
messages_ve = Dict()
messages_ev = Dict()
# Initialize messages with uniform probabilities
for v in vertices(potts_h)
for (n, pv, _) in get_neighbors(potts_h, v)
push!(messages_ev, (n, v) => ones(maximum(pv)))
end
end
# Perform message passing until convergence
converged = false
iteration = 0
while !converged && iteration < iter # Set an appropriate number of iterations and convergence threshold
iteration += 1
old_messages_ev = deepcopy(messages_ev)
for v in vertices(potts_h)
#update messages from vertex to edge
node_messages = Dict()
for (n1, pv1, _) ∈ get_neighbors(potts_h, v)
node_messages[n1, v] = messages_ev[n1, v][pv1]
end
for (n1, pv1, _) ∈ get_neighbors(potts_h, v)
E_local = get_prop(potts_h, v, :spectrum).energies
temp = exp.(-(E_local .- minimum(E_local)) * beta)
for (n2, pv2, _) in get_neighbors(potts_h, v)
if n1 == n2
continue
end
temp .*= node_messages[n2, v] # messages_ev[n2, v][pv2]
end
temp ./= sum(temp)
messages_ve[v, n1] = SparseCSC(eltype(temp), pv1) * temp
end
end
#update messages from edge to vertex
for v in vertices(potts_h)
for (n, _, en) ∈ get_neighbors(potts_h, v)
messages_ev[n, v] = update_message(en, messages_ve[n, v], beta)
end
end
# Check convergence
converged = all([
all(abs.(old_messages_ev[v] .- messages_ev[v]) .< tol) for
v in keys(messages_ev)
])
end
beliefs = Dict()
for v in vertices(potts_h)
E_local = get_prop(potts_h, v, :spectrum).energies
beliefs[v] = exp.(-E_local * beta)
for (n, pv, _) ∈ get_neighbors(potts_h, v)
beliefs[v] .*= messages_ev[n, v][pv]
end
beliefs[v] = -log.(beliefs[v]) ./ beta
beliefs[v] = beliefs[v] .- minimum(beliefs[v])
end
beliefs
end
"""
$(TYPEDSIGNATURES)
Returns the neighbors of a given vertex in a Potts Hamiltonian.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `vertex::NTuple`: The vertex for which neighbors are to be retrieved.
# Returns:
- `neighbors::Vector{Tuple}`: A vector of tuples representing the neighbors of the specified vertex.
Each tuple contains the following information:
- `dst_node::T`: The neighboring vertex.
- `pv::Matrix`: The projector associated with the edge connecting the vertex and its neighbor.
- `en::Real`: The energy associated with the edge connecting the vertex and its neighbor.
This function retrieves the neighbors of a given vertex in a Potts Hamiltonian graph.
It iterates through the edges of the graph and identifies edges connected to the specified vertex.
For each neighboring edge, it extracts and returns the neighboring vertex, the associated projector, and the energy.
"""
function get_neighbors(potts_h::LabelledGraph{S,T}, vertex::NTuple) where {S,T}
neighbors = []
for edge in edges(potts_h)
src_node, dst_node = src(edge), dst(edge)
if src_node == vertex
en = get_prop(potts_h, src_node, dst_node, :en)
idx_pv = get_prop(potts_h, src_node, dst_node, :ipl)
pv = get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pv, :CPU)
push!(neighbors, (dst_node, pv, en))
elseif dst_node == vertex
en = get_prop(potts_h, src_node, dst_node, :en)'
idx_pv = get_prop(potts_h, src_node, dst_node, :ipr)
pv = get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pv, :CPU)
push!(neighbors, (src_node, pv, en))
end
end
return neighbors
end
"""
$(TYPEDSIGNATURES)
A custom Julia struct representing energy values in a merged format for use in specific calculations.
# Fields:
- `e11::AbstractMatrix{T}`
- `e12::AbstractMatrix{T}`
- `e21::AbstractMatrix{T}`
- `e22::AbstractMatrix{T}`
The `MergedEnergy` struct is used to represent energy values that are organized in a merged format.
This format is often utilized in certain computational tasks, where energy values are categorized based on combinations of left and right factors.
Each field of the `MergedEnergy` struct stores energy values as an `AbstractMatrix{T}` of type `T`,
where `T` is a subtype of the `Real` abstract type.
The specific organization and interpretation of these energy values depend on the context in which this struct is used.
"""
struct MergedEnergy{T<:Real}
e11::AbstractMatrix{T}
e12::AbstractMatrix{T}
e21::AbstractMatrix{T}
e22::AbstractMatrix{T}
end
Base.adjoint(s::MergedEnergy) = MergedEnergy(s.e11', s.e21', s.e12', s.e22')
"""
$(TYPEDSIGNATURES)
Update a message using energy values and temperature.
# Arguments:
- `E_bond::AbstractArray`: An array of energy values associated with a bond or interaction.
- `message::Vector`: The input message vector to be updated.
- `beta::Real`: The temperature parameter controlling the influence of energy values.
# Returns:
- `updated_message::Vector`: The updated message vector after applying the energy-based update.
This function takes energy values `E_bond` associated with a bond or interaction, an input message vector `message`,
and a temperature parameter `beta`. It updates the message by first adjusting the energy values relative to their minimum value,
exponentiating them with a negative sign and scaling by `beta`, and then multiplying them element-wise with the input message.
The result is an updated message that reflects the influence of energy values and temperature.
"""
function update_message(E_bond::AbstractArray, message::Vector, beta::Real)
E_bond = E_bond .- minimum(E_bond)
exp.(-beta * E_bond) * message
end
"""
$(TYPEDSIGNATURES)
Update a message using energy values and temperature in a merged energy format.
# Arguments:
- `E_bond::MergedEnergy`: An instance of the `MergedEnergy` type representing energy values for the bond or interaction.
- `message::Vector`: The input message vector to be updated.
- `beta::Real`: The temperature parameter controlling the influence of energy values.
# Returns:
- `updated_message::Vector`: The updated message vector after applying the energy-based update.
This function takes energy values `E_bond` in a merged energy format, an input message vector `message`,
and a temperature parameter `beta`. It updates the message based on the energy values and temperature using a specified algorithm.
The `MergedEnergy` type represents energy values in a merged format, and the function processes these values
accordingly to update the message vector.
"""
function update_message(E_bond::MergedEnergy, message::Vector, beta::Real)
e11, e12, e21, e22 = E_bond.e11, E_bond.e12, E_bond.e21, E_bond.e22
# equivalent to
# @cast E[(l1, l2), (r1, r2)] := e11[l1, r1] + e21[l2, r1] + e12[l1, r2] + e22[l2, r2]
# exp.(-beta * E) * message
e11 = exp.(-beta .* (e11 .- minimum(e11)))
e12 = exp.(-beta .* (e12 .- minimum(e12)))
e21 = exp.(-beta .* (e21 .- minimum(e21)))
e22 = exp.(-beta .* (e22 .- minimum(e22)))
sl1, sl2, sr1, sr2 = size(e11, 1), size(e21, 1), size(e21, 2), size(e22, 2)
if sl1 * sl2 * sr1 * sr2 < max(sr1 * sr2 * min(sl1, sl2), sl1 * sl2 * min(sr1, sr2))
R = reshape(e11, sl1, 1, sr1, 1) .* reshape(e21, 1, sl2, sr1, 1)
R = R .* reshape(e12, sl1, 1, 1, sr2)
R = R .* reshape(e22, 1, sl2, 1, sr2)
R = reshape(R, sl1 * sl2, sr1 * sr2) * message
elseif sl1 <= sl2 && sr1 <= sr2
R = reshape(e12, sl1, 1, sr2) .* reshape(message, 1, sr1, sr2)
R = reshape(reshape(R, sl1 * sr1, sr2) * e22', sl1, sr1, sl2) # [l1, r1, l2]
R .*= reshape(e11, sl1, sr1, 1) # [l1, r1, l2] .* [l1, r1, :]
R .*= reshape(e21', 1, sr1, sl2) # [l1, r1, l2] .* [:, r1, l2]
R = reshape(sum(R, dims = 2), sl1 * sl2)
elseif sl1 <= sl2 && sr2 <= sr1
R = reshape(e11', sr1, sl1, 1) .* reshape(message, sr1, 1, sr2)
R = reshape(e21 * reshape(R, sr1, sl1 * sr2), sl2, sl1, sr2)
R .*= reshape(e12, 1, sl1, sr2) # [l2, l1, r2] .* [:, l1, r2]
R .*= reshape(e22, sl2, 1, sr2) # [l2, l1, r2] .* [l2, :, r2]
R = reshape(reshape(sum(R, dims = 3), sl2, sl1)', sl1 * sl2)
elseif sl2 <= sl1 && sr1 <= sr2
R = reshape(e22, sl2, 1, sr2) .* reshape(message, 1, sr1, sr2)
R = reshape(reshape(R, sl2 * sr1, sr2) * e12', sl2, sr1, sl1) # [l2, r1, l1]
R .*= reshape(e11', 1, sr1, sl1) # [l2, r1, l1] .* [:, r1, l1]
R .*= reshape(e21, sl2, sr1, 1) # [l2, r1, l1] .* [l2, r1, :]
R = reshape(reshape(sum(R, dims = 2), sl2, sl1)', sl1 * sl2)
else # sl2 <= sl1 && sr2 <= sr1
R = reshape(e21', sr1, sl2, 1) .* reshape(message, sr1, 1, sr2)
R = reshape(e11 * reshape(R, sr1, sl2 * sr2), sl1, sl2, sr2)
R .*= reshape(e12, sl1, 1, sr2) # [l1, l2, r2] .* [l1, :, r2]
R .*= reshape(e22, 1, sl2, sr2) # [l1, l2, r2] .* [:, l2, r2]
R = reshape(sum(R, dims = 3), sl1 * sl2)
end
R
end
"""
$(TYPEDSIGNATURES)
Constructs a Potts Hamiltonian for a given Potts Hamiltonian with a 2-site cluster approximation used in Pegasus graph.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labelled graph.
- `beta::Real`: The inverse temperature parameter for the 2-site cluster Hamiltonian construction.
# Returns:
- `new_potts_h::LabelledGraph{MetaDiGraph}`: A new labelled graph representing the 2-site cluster Hamiltonian.
This function constructs a Potts Hamiltonian `potts_h` by applying a 2-site cluster approximation.
It combines and merges vertices and edges of the original graph to create a simplified representation of the Hamiltonian.
The resulting `new_potts_h` graph represents the 2-site cluster Hamiltonian with simplified interactions between clusters.
The energy values, projectors, and spectra associated with the new vertices and edges are computed based on
the provided temperature parameter `beta`.
"""
function potts_hamiltonian_2site(potts_h::LabelledGraph{S,T}, beta::Real) where {S,T}
unified_vertices = unique([vertex[1:2] for vertex in vertices(potts_h)])
new_potts_h = LabelledGraph{MetaDiGraph}(unified_vertices)
new_lp = PoolOfProjectors{Int}()
vertx = Set()
for v in vertices(potts_h)
i, j, _ = v
if (i, j) ∈ vertx
continue
end
E1 = local_energy(potts_h, (i, j, 1))
E2 = local_energy(potts_h, (i, j, 2))
E = energy_2site(potts_h, i, j) .+ reshape(E1, :, 1) .+ reshape(E2, 1, :)
sp = Spectrum(reshape(E, :), [], [])
set_props!(new_potts_h, (i, j), Dict(:spectrum => sp))
push!(vertx, (i, j))
end
edge_states = Set()
for e ∈ edges(potts_h)
if e in edge_states
continue
end
v, w = src(e), dst(e)
v1, v2, _ = v
w1, w2, _ = w
if (v1, v2) == (w1, w2)
continue
end
add_edge!(new_potts_h, (v1, v2), (w1, w2))
E, pl, pr = merge_vertices_potts_h(potts_h, beta, v, w)
ipl = add_projector!(new_lp, pl)
ipr = add_projector!(new_lp, pr)
set_props!(new_potts_h, (v1, v2), (w1, w2), Dict(:ipl => ipl, :en => E, :ipr => ipr))
push!(edge_states, sort([(v1, v2), (w1, w2)]))
end
set_props!(new_potts_h, Dict(:pool_of_projectors => new_lp))
new_potts_h
end
"""
$(TYPEDSIGNATURES)
Merge two vertices in a Potts Hamiltonian to create a single merged vertex.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `β::Real`: The temperature parameter controlling the influence of energy values.
- `node1::NTuple{3, Int64}`: The coordinates of the first vertex to merge.
- `node2::NTuple{3, Int64}`: The coordinates of the second vertex to merge.
# Returns:
- `merged_energy::MergedEnergy`: An instance of the `MergedEnergy` type representing the merged energy values.
- `pl::AbstractVector`: The merged left projector.
- `pr::AbstractVector`: The merged right projector.
This function merges two vertices in a Potts Hamiltonian graph `potts_h` to create a single merged vertex.
The merging process combines projectors and energy values associated with the original vertices based on
the provided temperature parameter `β`.
The merged energy values, left projector `pl`, and right projector `pr` are computed based on the interactions
between the original vertices and their respective projectors.
"""
function merge_vertices_potts_h(
potts_h::LabelledGraph{S,T},
β::Real,
node1::NTuple{3,Int64},
node2::NTuple{3,Int64},
) where {S,T}
i1, j1, _ = node1
i2, j2, _ = node2
p21l = projector(potts_h, (i1, j1, 2), (i2, j2, 1))
p22l = projector(potts_h, (i1, j1, 2), (i2, j2, 2))
p12l = projector(potts_h, (i1, j1, 1), (i2, j2, 2))
p11l = projector(potts_h, (i1, j1, 1), (i2, j2, 1))
p1l, (p11l, p12l) = fuse_projectors((p11l, p12l))
p2l, (p21l, p22l) = fuse_projectors((p21l, p22l))
p11r = projector(potts_h, (i2, j2, 1), (i1, j1, 1))
p21r = projector(potts_h, (i2, j2, 1), (i1, j1, 2))
p12r = projector(potts_h, (i2, j2, 2), (i1, j1, 1))
p22r = projector(potts_h, (i2, j2, 2), (i1, j1, 2))
p1r, (p11r, p21r) = fuse_projectors((p11r, p21r))
p2r, (p12r, p22r) = fuse_projectors((p12r, p22r))
pl = outer_projector(p1l, p2l)
pr = outer_projector(p1r, p2r)
e11 = interaction_energy(potts_h, (i1, j1, 1), (i2, j2, 1))
e12 = interaction_energy(potts_h, (i1, j1, 1), (i2, j2, 2))
e21 = interaction_energy(potts_h, (i1, j1, 2), (i2, j2, 1))
e22 = interaction_energy(potts_h, (i1, j1, 2), (i2, j2, 2))
e11 = e11[p11l, p11r]
e21 = e21[p21l, p21r]
e12 = e12[p12l, p12r]
e22 = e22[p22l, p22r]
MergedEnergy(e11, e12, e21, e22), pl, pr
end
"""
$(TYPEDSIGNATURES)
Get the local energy associated with a vertex in a Potts Hamiltonian.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `v::NTuple{3, Int64}`: The coordinates of the vertex for which the local energy is requested.
# Returns:
- `local_energy::AbstractVector`: An abstract vector containing the local energy values associated with the specified vertex.
This function retrieves the local energy values associated with a given vertex `v` in a Potts Hamiltonian graph `potts_h`.
If the vertex exists in the graph and has associated energy values, it returns those values; otherwise, it returns a vector of zeros.
The local energy values are typically obtained from the spectrum associated with the vertex.
"""
function local_energy(potts_h::LabelledGraph{S,T}, v::NTuple{3,Int64}) where {S,T}
has_vertex(potts_h, v) ? get_prop(potts_h, v, :spectrum).energies : zeros(1)
end
"""
$(TYPEDSIGNATURES)
Get the interaction energy between two vertices in a Potts Hamiltonian.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `v::NTuple{3, Int64}`: The coordinates of the first vertex.
- `w::NTuple{3, Int64}`: The coordinates of the second vertex.
# Returns:
- `interaction_energy::AbstractMatrix`: An abstract matrix containing the interaction energy values between the specified vertices.
This function retrieves the interaction energy values between two vertices, `v` and `w`, in a Potts Hamiltonian graph `potts_h`.
If there is a directed edge from `w` to `v`, it returns the corresponding energy values;
if there is a directed edge from `v` to `w`, it returns the transpose of the energy values;
otherwise, it returns a matrix of zeros.
The interaction energy values represent the energy associated with the interaction or connection between the two vertices.
"""
function interaction_energy(
potts_h::LabelledGraph{S,T},
v::NTuple{3,Int64},
w::NTuple{3,Int64},
) where {S,T}
if has_edge(potts_h, w, v)
get_prop(potts_h, w, v, :en)'
elseif has_edge(potts_h, v, w)
get_prop(potts_h, v, w, :en)
else
zeros(1, 1)
end
end
"""
$(TYPEDSIGNATURES)
Get the projector associated with an edge between two vertices in a Potts Hamiltonian.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `v::NTuple{N, Int64}`: The coordinates of one of the two vertices connected by the edge.
- `w::NTuple{N, Int64}`: The coordinates of the other vertex connected by the edge.
# Returns:
- `p::AbstractVector`: An abstract vector representing the projector associated with the specified edge.
This function retrieves the projector associated with an edge between two vertices, `v` and `w`,
in a Potts Hamiltonian graph `potts_h`.
If there is a directed edge from `w` to `v`, it returns the index of right projector (`:ipr`);
if there is a directed edge from `v` to `w`, it returns the index of left projector (`:ipl`).
If no edge exists between the vertices, it returns a vector of ones.
"""
function projector(
potts_h::LabelledGraph{S,T},
v::NTuple{N,Int64},
w::NTuple{N,Int64},
) where {S,T,N}
if has_edge(potts_h, w, v)
idx_p = get_prop(potts_h, w, v, :ipr)
p = get_projector!(get_prop(potts_h, :pool_of_projectors), idx_p, :CPU)
elseif has_edge(potts_h, v, w)
idx_p = get_prop(potts_h, v, w, :ipl)
p = get_projector!(get_prop(potts_h, :pool_of_projectors), idx_p, :CPU)
else
p = ones(
Int,
v ∈ vertices(potts_h) ? length(get_prop(potts_h, v, :spectrum).energies) : 1,
)
end
end
function fuse_projectors(projectors::NTuple{N,K}) where {N,K}
fused, transitions_matrix = rank_reveal(hcat(projectors...), :PE)
transitions = Tuple(Array(t) for t ∈ eachcol(transitions_matrix))
fused, transitions
end
function outer_projector(p1::Array{T,1}, p2::Array{T,1}) where {T<:Number}
reshape(reshape(p1, :, 1) .+ maximum(p1) .* reshape(p2 .- 1, 1, :), :)
end
"""
$(TYPEDSIGNATURES)
Create a sparse column-compressed (CSC) matrix with specified column indices and values.
# Arguments:
- `::Type{R}`: The element type of the sparse matrix (e.g., `Float64`, `Int64`).
- `p::Vector{Int64}`: A vector of column indices for the non-zero values.
# Returns:
- `sparse_matrix::SparseMatrixCSC{R}`: A sparse column-compressed matrix with non-zero values at specified columns.
This constructor function creates a sparse column-compressed (CSC) matrix of element type `R` based on the provided
column indices `p` and values. The resulting matrix has non-zero values at the specified column indices, while all other elements are zero.
The `SparseCSC` constructor is useful for creating sparse matrices with specific column indices and values efficiently.
"""
function SparseCSC(::Type{R}, p::Vector{Int64}) where {R<:Real}
n = length(p)
mp = maximum(p)
cn = collect(1:n)
co = ones(R, n)
sparse(p, cn, co, mp, n)
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 7401 | using LabelledGraphs
export ising_graph,
rank_vec,
cluster,
rank,
nodes,
basis_size,
biases,
couplings,
IsingGraph,
prune,
inter_cluster_edges
const Instance = Union{String,Dict}
const IsingGraph{T} = LabelledGraph{MetaGraph{Int,T}}
function unique_nodes(ising_tuples)
sort(collect(Set(Iterators.flatten((i, j) for (i, j, _) ∈ ising_tuples))))
end
"""
ising_graph(instance, sgn::Number = 1.0, rank_override::Dict{Int,Int} = Dict{Int,Int}())
Create an Ising graph from interaction data.
This function creates an Ising graph (LabelledGraph) from interaction data provided in the form of an `inst` argument.
The Ising graph represents a system of spins, where each spin is associated with a vertex,
and interactions between spins are represented as edges with corresponding weights.
# Arguments:
- `::Type{T}`: The type of the edge weights, typically `Float64` or `Float32`.
- `inst::Instance`: Interaction data, which can be either a file path to a CSV file or a collection of triples `(i, j, J)` representing interactions between spins, where `i` and `j` are spin indices, and `J` is the interaction strength.
- `scale::Real`: The scale factor establishes the convention in the Hamiltonian (default is 1).
- `rank_override::Dict`: A dictionary specifying the rank (number of states) for each vertex. If not provided, a default rank of 2 is used for all vertices.
# Returns:
- `ig::IsingGraph{T}`: The Ising graph (LabelledGraph) representing the spin system.
The function reads interaction data and constructs an Ising graph `ig`.
It assigns interaction strengths to edges between spins and optionally scales them by the `scale` factor. 'Scale' option allows for the change of convention in the Hamiltonian.
The `rank_override` dictionary can be used to specify the rank (number of states) for individual vertices, allowing customization of the Ising model.
Convention: H = scale * sum_{i, j} (J_{ij} * s_i * s_j + J_{ii} * s_i)
"""
function ising_graph(
::Type{T},
inst::Instance;
scale::Real = 1,
rank_override::Dict = Dict{Int,Int}(),
) where {T}
if inst isa String
ising = CSV.File(inst, types = [Int, Int, T], header = 0, comment = "#")
else
ising = [(i, j, T(J)) for ((i, j), J) ∈ inst]
end
ig = IsingGraph{T}(unique_nodes(ising))
set_prop!.(Ref(ig), vertices(ig), :h, zero(T))
foreach(v -> set_prop!(ig, v, :rank, get(rank_override, v, 2)), vertices(ig))
for (i, j, v) ∈ ising
v *= T(scale)
if i == j
set_prop!(ig, i, :h, v)
else
add_edge!(ig, i, j) || throw(ArgumentError("Duplicate Egde ($i, $j)"))
set_prop!(ig, i, j, :J, v)
end
end
set_prop!(ig, :rank, Dict(v => get(rank_override, v, 2) for v in vertices(ig)))
ig
end
function ising_graph(inst::Instance; scale::Real = 1, rank_override::Dict = Dict{Int,Int}())
ising_graph(Float64, inst; scale = scale, rank_override = rank_override)
end
Base.eltype(ig::IsingGraph{T}) where {T} = T
rank_vec(ig::IsingGraph) = Int[get_prop((ig), v, :rank) for v ∈ vertices(ig)]
basis_size(ig::IsingGraph) = prod(rank_vec(ig))
biases(ig::IsingGraph) = get_prop.(Ref(ig), vertices(ig), :h)
"""
$(TYPEDSIGNATURES)
Return the coupling strengths between vertices of an Ising graph.
This function computes and returns the coupling strengths (interaction energies) between pairs of vertices in an Ising graph `ig`.
The coupling strengths are represented as a matrix, where each element `(i, j)` corresponds to the interaction energy between vertex `i` and vertex `j`.
# Arguments:
- `ig::IsingGraph{T}`: The Ising graph representing a system of spins with associated interaction strengths.
# Returns:
- `J::Matrix{T}`: A matrix of coupling strengths between vertices of the Ising graph.
The function iterates over the edges of the Ising graph and extracts the interaction strengths associated with each edge, populating the `J` matrix accordingly.
"""
function couplings(ig::IsingGraph{T}) where {T}
J = zeros(T, nv(ig), nv(ig))
for edge ∈ edges(ig)
i = ig.reverse_label_map[src(edge)]
j = ig.reverse_label_map[dst(edge)]
@inbounds J[i, j] = get_prop(ig, edge, :J)
end
J
end
cluster(ig::IsingGraph, verts) = induced_subgraph(ig, collect(verts))
"""
$(TYPEDSIGNATURES)
Return the dense adjacency matrix between clusters of vertices in an Ising graph.
This function computes and returns the dense adjacency matrix `J` between clusters of vertices represented by two
Ising graphs, `cl1` and `cl2`, within the context of the larger Ising graph `ig`.
The adjacency matrix represents the interaction strengths between clusters of vertices,
where each element `(i, j)` corresponds to the interaction strength between cluster `i` in `cl1` and cluster `j` in `cl2`.
# Arguments:
- `ig::IsingGraph{T}`: The Ising graph representing a system of spins with associated interaction strengths.
- `cl1::IsingGraph{T}`: The first Ising graph representing one cluster of vertices.
- `cl2::IsingGraph{T}`: The second Ising graph representing another cluster of vertices.
# Returns:
- `outer_edges::Vector{LabelledEdge}`: A vector of labeled edges representing the interactions between clusters.
- `J::Matrix{T}`: A dense adjacency matrix representing interaction strengths between clusters.
The function first identifies the outer edges that connect vertices between the two clusters in the context of the larger Ising graph `ig`.
It then computes the interaction strengths associated with these outer edges and populates the dense adjacency matrix `J` accordingly.
"""
function inter_cluster_edges(
ig::IsingGraph{T},
cl1::IsingGraph{T},
cl2::IsingGraph{T},
) where {T}
outer_edges =
[LabelledEdge(i, j) for i ∈ vertices(cl1), j ∈ vertices(cl2) if has_edge(ig, i, j)]
J = zeros(T, nv(cl1), nv(cl2))
for e ∈ outer_edges
i, j = cl1.reverse_label_map[src(e)], cl2.reverse_label_map[dst(e)]
@inbounds J[i, j] = get_prop(ig, e, :J)
end
outer_edges, J
end
"""
$(TYPEDSIGNATURES)
Used only in MPS_search, would be obsolete if MPS_search uses QMps.
Remove non-existing spins from an Ising graph.
This function removes non-existing spins from the given Ising graph `ig`.
Non-existing spins are those that have zero degree (no connections to other spins) and also have an external
magnetic field (`h`) that is not approximately equal to zero within the specified tolerance `atol`.
# Arguments:
- `ig::IsingGraph`: The Ising graph to be pruned.
- `atol::Real`: The tolerance for considering the external magnetic field as zero. The default value is `1e-14`.
# Returns:
- `pruned_graph::IsingGraph`: A new Ising graph with non-existing spins removed.
The function returns a pruned version of the input Ising graph, where non-existing spins and their associated properties are removed.
"""
function prune(ig::IsingGraph; atol::Real = 1e-14)
to_keep = vcat(
findall(!iszero, degree(ig)),
findall(
x ->
iszero(degree(ig, x)) && !isapprox(get_prop(ig, x, :h), 0, atol = atol),
vertices(ig),
),
)
gg = ig[ig.labels[to_keep]]
labels = collect(vertices(gg.inner_graph))
reverse_label_map = Dict(i => i for i = 1:nv(gg.inner_graph))
LabelledGraph(labels, gg.inner_graph, reverse_label_map)
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 10416 | export super_square_lattice,
pegasus_lattice,
pegasus_lattice_masoud,
pegasus_lattice_tomek,
j_function,
zephyr_lattice,
zephyr_lattice_5tuple,
zephyr_lattice_5tuple_rotated,
periodic_lattice
"""
$(TYPEDSIGNATURES)
Create a mapping from Ising graph coordinates to a super square lattice arrangement.
Variable number of Ising graph -> cluster hamiltonian coordinate system
This function generates a mapping that relates Ising graph coordinates to a super square lattice arrangement.
The super square lattice is defined by the size of five dimensions: `(m, um, n, un, t)`,
where m is the number of columns, n is the number of rows and t denotes the number of spins stored in the cluster.
# Arguments:
- `size::NTuple{5, Int}`: A tuple specifying the size of the super square lattice in five dimensions: `(m, um, n, un, t)`.
# Returns:
- `coord_map::Dict`: A dictionary that maps Ising graph coordinates to the corresponding lattice coordinates.
The `size` tuple represents the dimensions of the super square lattice. The function creates a dictionary where
ising graph coordinates are associated with their corresponding lattice coordinates.
"""
function super_square_lattice(size::NTuple{5,Int})
m, um, n, un, t = size
old = LinearIndices((1:t, 1:un, 1:n, 1:um, 1:m))
Dict(old[k, uj, j, ui, i] => (i, j) for i = 1:m, ui = 1:um, j = 1:n, uj = 1:un, k = 1:t)
end
"""
$(TYPEDSIGNATURES)
Create a mapping from Ising graph coordinates to a simplified super square lattice arrangement.
This function generates a mapping that relates Ising graph coordinates to a simplified super square lattice arrangement.
The simplified super square lattice is defined by the size of three dimensions: `(m, n, t)`, where m is the number of columns,
n is the number of rows and t denotes the number of spins stored in the cluster.
# Arguments:
- `size::NTuple{3, Int}`: A tuple specifying the size of the simplified super square lattice in three dimensions: `(m, n, t)`, where `m` is number of columns, `n` number of rows and `t` denotes numberr of spins in cluster.
# Returns:
- `coord_map::Dict`: A dictionary that maps Ising graph coordinates to the corresponding lattice coordinates.
The `size` tuple represents the dimensions of the simplified super square lattice.
The function internally adds the required dimensions `(1, 1)` to make it compatible with the `super_square_lattice` function, which deals with five dimensions.
"""
function super_square_lattice(size::NTuple{3,Int})
m, n, t = size
super_square_lattice((m, 1, n, 1, t))
end
pegasus_lattice(size::NTuple{2,Int}) = pegasus_lattice((size[1], size[2], 3))
"""
$(TYPEDSIGNATURES)
Create a mapping from Ising graph coordinates to Pegasus lattice coordinates.
This function generates a mapping that relates Ising graph coordinates to Pegasus lattice coordinates
based on the specified size of the Pegasus lattice in three dimensions: `(m, n, t)`.
# Arguments:
- `size::NTuple{3, Int}`: A tuple specifying the size of the Pegasus lattice in three dimensions: `(m, n, t)`, where `m` is number of columns, `n` number of rows and `t` denotes number of spins in cluster. Convention: `t` is already divided by 8, so `t`=3 for Pegasus lattice.
# Returns:
- `coord_map::Dict`: A dictionary that maps Ising graph coordinates to the corresponding Pegasus lattice coordinates.
The `pegasus_lattice` allows you to build the graph relevant for D-Wave Pegasus architecture.
"""
function pegasus_lattice(size::NTuple{3,Int})
m, n, t = size
old = LinearIndices((1:8*t, 1:n, 1:m))
map = Dict(
old[k, j, i] => (i, j, 1) for i = 1:m, j = 1:n,
k ∈ (p * 8 + q for p ∈ 0:t-1, q ∈ 1:4)
)
for i = 1:m, j = 1:n, k ∈ (p * 8 + q for p ∈ 0:t-1, q ∈ 5:8)
push!(map, old[k, j, i] => (i, j, 2))
end
map
end
# TODO masoud / tomek should be removed from function names
function pegasus_lattice_alternative(size::NTuple{3,Int})
m, n, t = size
old = LinearIndices((1:8*t, 1:n, 1:m))
map = Dict(
old[k, j, i] => (i, j, 2) for i = 1:m, j = 1:n,
k ∈ (p * 8 + q for p ∈ 0:t-1, q ∈ 1:4)
)
for i = 1:m, j = 1:n, k ∈ (p * 8 + q for p ∈ 0:t-1, q ∈ 5:8)
push!(map, old[k, j, i] => (i, j, 1))
end
map
end
function pegasus_lattice_old_numering(size::NTuple{3,Int})
m, n, t = size
old = LinearIndices((1:8*t, 1:n, 1:m))
map = Dict(
old[k, j, i] => (i, n - j + 1, 2) for i = 1:m, j = 1:n,
k ∈ (p * 8 + q for p ∈ 0:t-1, q ∈ 1:4)
)
for i = 1:m, j = 1:n, k ∈ (p * 8 + q for p ∈ 0:t-1, q ∈ 5:8)
push!(map, old[k, j, i] => (i, n - j + 1, 1))
end
map
end
function zephyr_lattice_z1(size::NTuple{3,Int})
m, n, t = size # t is identical to dwave (Tile parameter for the Zephyr lattice)
map = Dict{Int,NTuple{3,Int}}()
for i = 1:2*n, j ∈ 1:2*m
for p in p_func(i, j, t, n, m)
push!(
map,
(i - 1) * (2 * n * t) +
(j - 1) * (2 * m * t) +
p * n +
(i - 1) * (j % 2) +
1 => (i, j, 1),
)
end
for q ∈ q_func(i, j, t, n, m)
push!(
map,
2 * t * (2 * n + 1) +
(i - 1) * (2 * n * t) +
(j % 2) * (2 * m * t) +
q * m +
(j - 1) * (i - 1) +
1 => (i, j, 2),
)
end
end
map
end
function j_function(i::Int, n::Int)
i ∈ collect(1:n) && return collect((n+1-i):(n+i))
collect((i-n):(3*n+1-i))
end
zephyr_lattice(size::NTuple{2,Int}) = zephyr_lattice((size[1], size[2], 4))
"""
$(TYPEDSIGNATURES)
Create a mapping from Ising graph coordinates to Zephyr lattice coordinates.
This function generates a mapping that relates Ising graph coordinates to Zephyr lattice
coordinates based on the specified size of the Zephyr lattice in three dimensions: `(m, n, t)`.
# Arguments:
- `size::NTuple{3, Int}`: A tuple specifying the size of the Zephyr lattice in three dimensions: `(m, n, t)`, where `m` is double number of columns, `n` double number of rows and `t` denotes number of spins in cluster. Convention: `t` is already divided by 4, so `t`=4 for Zephyr lattice. E.g. to create 3x3x16 Zephyr lattice, you should use `m`=6, `n`=6, `t`=4.
# Returns:
- `coord_map::Dict`: A dictionary that maps Ising graph coordinates to the corresponding Zephyr lattice coordinates.
The `zephyr_lattice` allows you to build the graph relevant for D-Wave Zephyr architecture.
"""
function zephyr_lattice(size::NTuple{3,Int})
m, n, t = size
zephyr_lattice_5tuple_rotated(
m + 1,
n + 1,
zephyr_lattice_5tuple((Int(m / 2), Int(n / 2), t)),
)
end
function zephyr_lattice_5tuple(size::NTuple{3,Int})
m, n, t = size # t is identical to dwave (Tile parameter for the Zephyr lattice)
map = Dict{Int,NTuple{3,Int}}()
# ( u, w, k, ζ, z)
for u in 0
for w ∈ 0:2:2*m, k ∈ 0:t-1, ζ ∈ 0:1, (i, z) ∈ enumerate(0:n-1)
push!(map, zephyr_to_linear(m, t, (u, w, k, ζ, z)) => (2 * i, w + 1, 1))
end
for w ∈ 1:2:2*m, k ∈ 0:t-1, ζ ∈ 0:1, z ∈ 0:n-1
push!(
map,
zephyr_to_linear(m, t, (u, w, k, ζ, z)) => (2 * z + 2 * ζ + 1, w + 1, 1),
)
end
end
for u in 1
for w ∈ 0:2:2*m, k ∈ 0:t-1, ζ ∈ 0:1, (i, z) ∈ enumerate(0:n-1)
push!(map, zephyr_to_linear(m, t, (u, w, k, ζ, z)) => (w + 1, 2 * i, 2))
end
for w ∈ 1:2:2*m, k ∈ 0:t-1, ζ ∈ 0:1, z ∈ 0:n-1
push!(
map,
zephyr_to_linear(m, t, (u, w, k, ζ, z)) => (w + 1, 2 * z + 2 * ζ + u, 2),
)
end
end
map
end
function rotate(m::Int, n::Int)
new_dict = Dict{NTuple{3,Int},NTuple{3,Int}}()
for (k, j) ∈ enumerate(1:2:m)
for (l, i) ∈ enumerate(n-1:-2:1)
push!(new_dict, (i, j, 1) => (i / 2 + k - 1, l + k - 1, 1))
push!(new_dict, (i, j, 2) => (i / 2 + k - 1, l + k - 1, 2))
end
end
for (k, j) ∈ enumerate(2:2:m)
for (l, i) ∈ enumerate(n:-2:1)
push!(new_dict, (i, j, 1) => ((i - 1) / 2 + k, l + k - 1, 1))
push!(new_dict, (i, j, 2) => ((i - 1) / 2 + k, l + k - 1, 2))
end
end
new_dict
end
function empty_clusters(m::Int, n::Int)
p = (m - 1) / 2
count, ii = 0, []
for (i, j) ∈ enumerate(1:p-1)
count += i
push!(ii, i)
end
(count, reverse(ii))
end
function zephyr_lattice_5tuple_rotated(m::Int, n::Int, map::Dict{Int,NTuple{3,Int}})
rotated_map = rotate(m, n)
new_map = Dict{Int,NTuple{3,Int}}()
(empty, ii) = empty_clusters(m, n)
for k in keys(map)
push!(new_map, k => rotated_map[map[k]])
end
empty_vertices = empty_indexing(m, n)
for (k, l) ∈ enumerate(empty_vertices)
push!(new_map, -k => l)
end
new_map
end
function empty_indexing(m::Int, n::Int)
(empty, ii) = empty_clusters(m, n)
p = Int((m - 1) / 2)
empty_vertices = []
for (k, l) ∈ enumerate(ii)
for i ∈ 1:l
push!(empty_vertices, (k, i, 1))
push!(empty_vertices, (k, i, 2))
push!(empty_vertices, (k, i + m - p + k - 1, 1))
push!(empty_vertices, (k, i + m - p + k - 1, 2))
end
end
for (k, l) ∈ enumerate(reverse(ii))
for i ∈ 1:l
push!(empty_vertices, (k + m - p, i, 1))
push!(empty_vertices, (k + m - p, i, 2))
push!(empty_vertices, (k + m - p, ii[k] + m - p + k - i, 1))
push!(empty_vertices, (k + m - p, ii[k] + m - p + k - i, 2))
end
end
empty_vertices
end
function periodic_lattice(size::NTuple{3,Int})
mm, nn, tt = size
m, n = 2 * mm, 2 * nn
map = super_square_lattice((m, n, 1))
new_map = Dict{Int,NTuple{2,Int}}()
for (key, val) ∈ map
i, j = val
if i <= m / 2
if j <= m / 2
push!(new_map, key => (i, j))
elseif j > m / 2
push!(new_map, key => (i, m - j + 1))
end
elseif i > m / 2
if j <= m / 2
push!(new_map, key => (m - i + 1, j))
elseif j > m / 2
push!(new_map, key => (m - i + 1, m - j + 1))
end
end
end
new_map
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 22182 | export potts_hamiltonian,
rank_reveal,
split_into_clusters,
decode_potts_hamiltonian_state,
energy,
energy_2site,
cluster_size,
truncate_potts_hamiltonian,
exact_cond_prob,
bond_energy,
cluster_size
"""
$(TYPEDSIGNATURES)
Group spins into clusters based on an assignment rule, mapping Potts Hamiltonian coordinates to groups of spins in the Ising graph.
Dict(Potts Hamiltonian coordinates -> group of spins in Ising graph)
# Arguments:
- `ig::LabelledGraph{G, L}`: The Ising graph represented as a labeled graph.
- `assignment_rule`: A mapping that assigns Ising graph vertices to clusters based on Potts Hamiltonian coordinates.
# Returns:
- `clusters::Dict{L, Vertex}`: A dictionary mapping cluster identifiers to representative vertices in the Ising graph.
This function groups spins in the Ising graph into clusters based on an assignment rule.
The assignment rule defines how Potts Hamiltonian coordinates correspond to clusters of spins in the Ising graph.
Each cluster is represented by a vertex from the Ising graph.
The `split_into_clusters` function is useful for organizing and analyzing spins in complex spin systems, particularly in the context of Potts Hamiltonian.
"""
function split_into_clusters(ig::LabelledGraph{G,L}, assignment_rule) where {G,L}
cluster_id_to_verts = Dict(i => L[] for i in values(assignment_rule))
for v in vertices(ig)
push!(cluster_id_to_verts[assignment_rule[v]], v)
end
Dict(i => first(cluster(ig, verts)) for (i, verts) ∈ cluster_id_to_verts)
end
"""
$(TYPEDSIGNATURES)
Create a Potts Hamiltonian.
This function constructs a Potts Hamiltonian from an Ising graph by introducing a natural order in Potts Hamiltonian coordinates.
# Arguments:
- `ig::IsingGraph`: The Ising graph representing the spin system.
- `num_states_cl::Int`: The number of states per cluster taken into account when calculating the spectrum. In every cluster the number of states is constant.
- `spectrum::Function`: A function for calculating the spectrum of the Potts Hamiltonian. It can be `full_spectrum` or `brute_force`.
- `cluster_assignment_rule::Dict{Int, L}`: A dictionary specifying the assignment rule that maps Ising graph vertices to clusters. It can be `super_square_lattice`, `pegasus_lattice` or `zephyr_lattice`.
# Returns:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labelled graph.
The `potts_hamiltonian` function takes an Ising graph (`ig`) as input and constructs a Potts Hamiltonian by
introducing a natural order in Potts Hamiltonian coordinates.
It allows you to specify the number of states per cluster, a spectrum calculation function,
and a cluster assignment rule, which maps Ising graph vertices to clusters.
"""
function potts_hamiltonian(
ig::IsingGraph,
num_states_cl::Int;
spectrum::Function = full_spectrum,
cluster_assignment_rule::Dict{Int,L}, # e.g. square lattice
) where {L}
ns = Dict(i => num_states_cl for i ∈ Set(values(cluster_assignment_rule)))
potts_hamiltonian(
ig,
ns,
spectrum = spectrum,
cluster_assignment_rule = cluster_assignment_rule,
)
end
"""
$(TYPEDSIGNATURES)
Create a Potts Hamiltonian.
This function constructs a Potts Hamiltonian from an Ising graph by introducing a natural order in Potts Hamiltonian coordinates.
# Arguments:
- `ig::IsingGraph`: The Ising graph representing the spin system.
- `num_states_cl::Dict{T, Int}`: A dictionary specifying the number of states per cluster for different clusters. Number of states are considered when calculating the spectrum.
- `spectrum::Function`: A function for calculating the spectrum of the Potts Hamiltonian. It can be `full_spectrum` or `brute_force`.
- `cluster_assignment_rule::Dict{Int, T}`: A dictionary specifying the assignment rule that maps Ising graph vertices to clusters. It can be `super_square_lattice`, `pegasus_lattice` or `zephyr_lattice`.
# Returns:
- `potts_h::LabelledGraph{MetaDiGraph}`: The Potts Hamiltonian represented as a labelled graph.
The `potts_hamiltonian` function takes an Ising graph (`ig`) as input and constructs a Potts Hamiltonian
by introducing a natural order in Potts Hamiltonian coordinates. It allows you to specify the number of
states per cluster which can vary for different clusters, a spectrum calculation function,
and a cluster assignment rule, which maps Ising graph vertices to clusters.
"""
function potts_hamiltonian(
ig::IsingGraph,
num_states_cl::Dict{T,Int};
spectrum::Function = full_spectrum,
cluster_assignment_rule::Dict{Int,T},
) where {T}
potts_h = LabelledGraph{MetaDiGraph}(sort(unique(values(cluster_assignment_rule))))
lp = PoolOfProjectors{Int}()
for (v, cl) ∈ split_into_clusters(ig, cluster_assignment_rule)
sp = spectrum(cl, num_states = get(num_states_cl, v, basis_size(cl)))
set_props!(potts_h, v, Dict(:cluster => cl, :spectrum => sp))
end
for (i, v) ∈ enumerate(vertices(potts_h)), w ∈ vertices(potts_h)[i+1:end]
cl1, cl2 = get_prop(potts_h, v, :cluster), get_prop(potts_h, w, :cluster)
outer_edges, J = inter_cluster_edges(ig, cl1, cl2)
if !isempty(outer_edges)
ind1 = any(i -> i != 0, J, dims = 2)
ind2 = any(i -> i != 0, J, dims = 1)
ind1 = reshape(ind1, length(ind1))
ind2 = reshape(ind2, length(ind2))
JJ = J[ind1, ind2]
states_v = get_prop(potts_h, v, :spectrum).states
states_w = get_prop(potts_h, w, :spectrum).states
pl, unique_states_v = rank_reveal([s[ind1] for s ∈ states_v], :PE)
pr, unique_states_w = rank_reveal([s[ind2] for s ∈ states_w], :PE)
en = inter_cluster_energy(unique_states_v, JJ, unique_states_w)
ipl = add_projector!(lp, pl)
ipr = add_projector!(lp, pr)
add_edge!(potts_h, v, w)
set_props!(
potts_h,
v,
w,
Dict(:outer_edges => outer_edges, :ipl => ipl, :en => en, :ipr => ipr),
)
end
end
set_props!(potts_h, Dict(:pool_of_projectors => lp))
potts_h
end
"""
$(TYPEDSIGNATURES)
Create a Potts Hamiltonian with optional cluster sizes.
This function constructs a Potts Hamiltonian from an Ising graph by introducing a natural order in Potts Hamiltonian coordinates.
# Arguments:
- `ig::IsingGraph`: The Ising graph representing the spin system.
- `spectrum::Function`: A function for calculating the spectrum of the Potts Hamiltonian. It can be `full_spectrum` or `brute_force`. Default is `full_spectrum`.
- `cluster_assignment_rule::Dict{Int, T}`: A dictionary specifying the assignment rule that maps Ising graph vertices to clusters. It can be `super_square_lattice`, `pegasus_lattice` or `zephyr_lattice`.
# Returns:
- `potts_h::LabelledGraph{MetaDiGraph}`: The Potts Hamiltonian represented as a labelled graph.
The `potts_hamiltonian` function takes an Ising graph (`ig`) as input and constructs a Potts Hamiltonian
by introducing a natural order in Potts Hamiltonian coordinates.
You can optionally specify a spectrum calculation function and a cluster assignment rule, which maps Ising graph vertices to clusters.
This version of `potts_hamiltonian` function does not truncate states in the cluster while calculating the spectrum.
If you want to specify custom cluster sizes, use the alternative version of this function by
passing a `Dict{T, Int}` containing the number of states per cluster as `num_states_cl`.
"""
function potts_hamiltonian(
ig::IsingGraph;
spectrum::Function = full_spectrum,
cluster_assignment_rule::Dict{Int,T},
) where {T}
potts_hamiltonian(
ig,
Dict{T,Int}(),
spectrum = spectrum,
cluster_assignment_rule = cluster_assignment_rule,
)
end
# """
# $(TYPEDSIGNATURES)
# Reveal ranks and energies in a specified order.
# This function calculates and reveals the ranks and energies of a set of states in either the
# 'PE' (Projector Energy) or 'EP' (Energy Projector) order.
# # Arguments:
# - `energy`: The energy values of states.
# - `order::Symbol`: The order in which to reveal the ranks and energies.
# It can be either `:PE` for 'Projector Energy)' order (default) or `:EP` for 'Energy Projector' order.
# # Returns:
# - If `order` is `:PE`, the function returns a tuple `(P, E)` where:
# - `P`: A permutation matrix representing projectors.
# - `E`: An array of energy values.
# - If `order` is `:EP`, the function returns a tuple `(E, P)` where:
# - `E`: An array of energy values.
# - `P`: A permutation matrix representing projectors.
# """
# function rank_reveal(energy, order=:PE) #TODO: add type
# @assert order ∈ (:PE, :EP)
# dim = order == :PE ? 1 : 2
# E, idx = unique_dims(energy, dim)
# P = identity.(idx)
# order == :PE ? (P, E) : (E, P)
# end
"""
$(TYPEDSIGNATURES)
TODO: check the order consistency over external packages.
Decode a Potts Hamiltonian state into Ising graph spin values.
This function decodes a state from a Potts Hamiltonian into Ising graph spin values and
returns a dictionary mapping each Ising graph vertex to its corresponding spin value.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `state::Vector{Int}`: The state to be decoded, represented as an array of state indices for each vertex in the Potts Hamiltonian.
# Returns:
- `spin_values::Dict{Int, Int}`: A dictionary mapping each Ising graph vertex to its corresponding spin value.
This function assumes that the state has the same order as the vertices in the Potts Hamiltonian.
It decodes the state consistently based on the cluster assignments and spectra of the Potts Hamiltonian.
"""
function decode_potts_hamiltonian_state(
potts_h::LabelledGraph{S,T},
state::Vector{Int},
) where {S,T}
ret = Dict{Int,Int}()
for (i, vert) ∈ zip(state, vertices(potts_h))
spins = get_prop(potts_h, vert, :cluster).labels
states = get_prop(potts_h, vert, :spectrum).states
if length(states) > 0
curr_state = states[i]
merge!(ret, Dict(k => v for (k, v) ∈ zip(spins, curr_state)))
end
end
ret
end
"""
$(TYPEDSIGNATURES)
Calculate the energy of a Potts Hamiltonian state.
This function calculates the energy of a given state in a Potts Hamiltonian.
The state is represented as a dictionary mapping each Ising graph vertex to its corresponding spin value.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `σ::Dict{T, Int}`: A dictionary mapping Ising graph vertices to their spin values.
# Returns:
- `en_potts_h::Float64`: The energy of the state in the Potts Hamiltonian.
This function computes the energy by summing the energies associated with individual
clusters and the interaction energies between clusters.
It takes into account the cluster spectra and projectors stored in the Potts Hamiltonian.
"""
function energy(potts_h::LabelledGraph{S,T}, σ::Dict{T,Int}) where {S,T}
en_potts_h = 0.0
for v ∈ vertices(potts_h)
en_potts_h += get_prop(potts_h, v, :spectrum).energies[σ[v]]
end
for edge ∈ edges(potts_h)
idx_pl = get_prop(potts_h, edge, :ipl)
pl = get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pl, :CPU)
idx_pr = get_prop(potts_h, edge, :ipr)
pr = get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pr, :CPU)
en = get_prop(potts_h, edge, :en)
en_potts_h += en[pl[σ[src(edge)]], pr[σ[dst(edge)]]]
end
en_potts_h
end
"""
$(TYPEDSIGNATURES)
Calculate the interaction energy between two nodes in a Potts Hamiltonian.
This function computes the interaction energy between two specified nodes in a Potts Hamiltonian, represented as a labeled graph.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `i::Int`: The index of the first site.
- `j::Int`: The index of the second site.
# Returns:
- `int_eng::AbstractMatrix{T}`: The interaction energy matrix between the specified sites.
The function checks if there is an interaction edge between the two sites (i, j) in both directions (i -> j and j -> i).
If such edges exist, it retrieves the interaction energy matrix, projectors, and calculates the interaction energy.
If no interaction edge is found, it returns a zero matrix.
"""
function energy_2site(potts_h::LabelledGraph{S,T}, i::Int, j::Int) where {S,T}
# matrix of interaction energies between two nodes
if has_edge(potts_h, (i, j, 1), (i, j, 2))
en12 = copy(get_prop(potts_h, (i, j, 1), (i, j, 2), :en))
idx_pl = get_prop(potts_h, (i, j, 1), (i, j, 2), :ipl)
pl = copy(get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pl, :CPU))
idx_pr = get_prop(potts_h, (i, j, 1), (i, j, 2), :ipr)
pr = copy(get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pr, :CPU))
int_eng = en12[pl, pr]
elseif has_edge(potts_h, (i, j, 2), (i, j, 1))
en21 = copy(get_prop(potts_h, (i, j, 2), (i, j, 1), :en))
idx_pl = get_prop(potts_h, (i, j, 2), (i, j, 1), :ipl)
pl = copy(get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pl, :CPU))
idx_pr = get_prop(potts_h, (i, j, 2), (i, j, 1), :ipr)
pr = copy(get_projector!(get_prop(potts_h, :pool_of_projectors), idx_pr, :CPU))
int_eng = en21[pl, pr]'
else
int_eng = zeros(1, 1)
end
int_eng
end
"""
$(TYPEDSIGNATURES)
Calculate the bond energy between two clusters in a Potts Hamiltonian.
This function computes the bond energy between two specified clusters (cluster nodes) in a Potts Hamiltonian, represented as a labeled graph.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `potts_h_u::NTuple{N, Int64}`: The coordinates of the first cluster.
- `potts_h_v::NTuple{N, Int64}`: The coordinates of the second cluster.
- `σ::Int`: Index for which the bond energy is calculated.
# Returns:
- `energies::AbstractVector{T}`: The bond energy vector between the two clusters for the specified index.
The function checks if there is an edge between the two clusters (u -> v and v -> u).
If such edges exist, it retrieves the bond energy matrix and projectors and calculates the bond energy.
If no bond edge is found, it returns a zero vector.
"""
function bond_energy(
potts_h::LabelledGraph{S,T},
potts_h_u::NTuple{N,Int64},
potts_h_v::NTuple{N,Int64},
σ::Int,
) where {S,T,N}
if has_edge(potts_h, potts_h_u, potts_h_v)
ipu, en, ipv = get_prop.(Ref(potts_h), Ref(potts_h_u), Ref(potts_h_v), (:ipl, :en, :ipr))
pu = get_projector!(get_prop(potts_h, :pool_of_projectors), ipu, :CPU)
pv = get_projector!(get_prop(potts_h, :pool_of_projectors), ipv, :CPU)
@inbounds energies = en[pu, pv[σ]]
elseif has_edge(potts_h, potts_h_v, potts_h_u)
ipv, en, ipu = get_prop.(Ref(potts_h), Ref(potts_h_v), Ref(potts_h_u), (:ipl, :en, :ipr))
pu = get_projector!(get_prop(potts_h, :pool_of_projectors), ipu, :CPU)
pv = get_projector!(get_prop(potts_h, :pool_of_projectors), ipv, :CPU)
@inbounds energies = en[pv[σ], pu]
else
energies = zeros(cluster_size(potts_h, potts_h_u))
end
end
"""
$(TYPEDSIGNATURES)
Get the size of a cluster in a Potts Hamiltonian.
This function returns the size (number of states) of a cluster in a Potts Hamiltonian, represented as a labeled graph.
# Arguments:
- `potts_hamiltonian::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `vertex::T`: The vertex (cluster) for which the size is to be determined.
# Returns:
- `size::Int`: The number of states in the specified cluster.
The function retrieves the spectrum associated with the specified cluster and returns the length of the energy vector in that spectrum.
"""
function cluster_size(potts_hamiltonian::LabelledGraph{S,T}, vertex::T) where {S,T}
length(get_prop(potts_hamiltonian, vertex, :spectrum).energies)
end
"""
$(TYPEDSIGNATURES)
Calculate the exact conditional probability of a target state in a Potts Hamiltonian.
This function computes the exact conditional probability of a specified target state in a Potts Hamiltonian, represented as a labelled graph.
# Arguments:
- `potts_hamiltonian::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `beta`: The inverse temperature parameter.
- `target_state::Dict`: A dictionary specifying the target state as a mapping of cluster vertices to Ising spin values.
# Returns:
- `prob::Float64`: The exact conditional probability of the target state.
The function generates all possible states for the clusters in the Potts Hamiltonian,
calculates their energies, and computes the probability distribution based on the given inverse temperature parameter.
It then calculates the conditional probability of the specified target state by summing the probabilities of states that match the target state.
"""
function exact_cond_prob(
potts_hamiltonian::LabelledGraph{S,T},
beta,
target_state::Dict,
) where {S,T}
# TODO: Not going to work without PoolOfProjectors
ver = vertices(potts_hamiltonian)
rank = cluster_size.(Ref(potts_hamiltonian), ver)
states = [Dict(ver .=> σ) for σ ∈ Iterators.product([1:r for r ∈ rank]...)]
energies = SpinGlassNetworks.energy.(Ref(potts_hamiltonian), states)
prob = exp.(-beta .* energies)
prob ./= sum(prob)
sum(prob[findall([all(s[k] == v for (k, v) ∈ target_state) for s ∈ states])])
end
"""
$(TYPEDSIGNATURES)
Truncate a Potts Hamiltonian based on specified states.
This function truncates a given Potts Hamiltonian by selecting a subset of states for each cluster based on the provided `states` dictionary.
The resulting truncated Hamiltonian contains only the selected states for each cluster.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `states::Dict`: A dictionary specifying the states to be retained for each cluster.
# Returns:
- `new_potts_h::LabelledGraph{MetaDiGraph}`: The truncated Potts Hamiltonian with reduced states.
The function creates a new Potts Hamiltonian `new_potts_h` with the same structure as the input `potts_h`.
It then updates the spectrum of each cluster in `new_potts_h` by selecting the specified states from the original spectrum.
Additionally, it updates the interactions and projectors between clusters based on the retained states.
The resulting `new_potts_h` represents a truncated version of the original Hamiltonian.
"""
function truncate_potts_hamiltonian(potts_h::LabelledGraph{S,T}, states::Dict) where {S,T}
new_potts_h = LabelledGraph{MetaDiGraph}(vertices(potts_h))
new_lp = PoolOfProjectors{Int}()
for v ∈ vertices(new_potts_h)
cl = get_prop(potts_h, v, :cluster)
sp = get_prop(potts_h, v, :spectrum)
if sp.states == Vector{Int64}[]
sp = Spectrum(sp.energies[states[v]], sp.states, [1])
else
sp = Spectrum(sp.energies[states[v]], sp.states[states[v]])
end
set_props!(new_potts_h, v, Dict(:cluster => cl, :spectrum => sp))
end
for e ∈ edges(potts_h)
v, w = src(e), dst(e)
add_edge!(new_potts_h, v, w)
outer_edges = get_prop(potts_h, v, w, :outer_edges)
ipl = get_prop(potts_h, v, w, :ipl)
pl = get_projector!(get_prop(potts_h, :pool_of_projectors), ipl, :CPU)
ipr = get_prop(potts_h, v, w, :ipr)
pr = get_projector!(get_prop(potts_h, :pool_of_projectors), ipr, :CPU)
en = get_prop(potts_h, v, w, :en)
pl = pl[states[v]]
pr = pr[states[w]]
pl_transition, pl_unique = rank_reveal(pl, :PE)
pr_transition, pr_unique = rank_reveal(pr, :PE)
en = en[pl_unique, pr_unique]
ipl = add_projector!(new_lp, pl_transition)
ipr = add_projector!(new_lp, pr_transition)
set_props!(
new_potts_h,
v,
w,
Dict(:outer_edges => outer_edges, :ipl => ipl, :en => en, :ipr => ipr),
)
end
set_props!(new_potts_h, Dict(:pool_of_projectors => new_lp))
new_potts_h
end
function potts_hamiltonian(
fname::String,
Nx::Union{Integer,Nothing} = nothing,
Ny::Union{Integer,Nothing} = nothing,
)
loaded_rmf = load_openGM(fname, Nx, Ny)
functions = loaded_rmf["fun"]
factors = loaded_rmf["fac"]
N = loaded_rmf["N"]
X, Y = loaded_rmf["Nx"], loaded_rmf["Ny"]
clusters = super_square_lattice((X, Y, 1))
potts_h = LabelledGraph{MetaDiGraph}(sort(collect(values(clusters))))
lp = PoolOfProjectors{Int}()
for v ∈ potts_h.labels
set_props!(potts_h, v, Dict(:cluster => v))
end
for (index, value) in factors
if length(index) == 2
y, x = index
Eng = functions[value]'
sp = Spectrum(collect(Eng), Vector{Vector{Int}}[], zeros(Int, N[y+1, x+1]))
set_props!(potts_h, (x + 1, y + 1), Dict(:spectrum => sp))
elseif length(index) == 4
y1, x1, y2, x2 = index
add_edge!(potts_h, (x1 + 1, y1 + 1), (x2 + 1, y2 + 1))
Eng = functions[value]
ipl = add_projector!(lp, collect(1:N[y1+1, x1+1]))
ipr = add_projector!(lp, collect(1:N[y2+1, x2+1]))
set_props!(
potts_h,
(x1 + 1, y1 + 1),
(x2 + 1, y2 + 1),
Dict(
:outer_edges => ((x1 + 1, y1 + 1), (x2 + 1, y2 + 1)),
:en => Eng,
:ipl => ipl,
:ipr => ipr,
),
)
else
throw(
ErrorException(
"Something is wrong with factor index, it has length $(length(index))",
),
)
end
end
set_props!(potts_h, Dict(:pool_of_projectors => lp, :Nx => X, :Ny => Y))
potts_h
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 7521 | export all_states,
local_basis,
gibbs_tensor,
brute_force,
full_spectrum,
Spectrum,
idx,
local_basis,
energy,
matrix_to_integers
@inline idx(σ::Int) = (σ == -1) ? 1 : σ + 1
@inline local_basis(d::Int) = union(-1, 1:d-1)
all_states(rank::Union{Vector,NTuple}) = Iterators.product(local_basis.(rank)...)
const State = Vector{<:Integer}
"""
$(TYPEDSIGNATURES)
A `Spectrum` represents the energy spectrum of a system.
A `Spectrum` consists of energy levels, their corresponding states, and integer representations of the states.
# Fields:
- `energies::Vector{<:Real}`: An array of energy levels.
- `states::AbstractArray{State}`: An array of states.
- `states_int::Vector{Int}`: An array of integer representations of states.
# Constructors:
- `Spectrum(energies, states, states_int)`: Creates a `Spectrum` object with the specified energy levels, states, and integer representations.
- `Spectrum(energies, states)`: Creates a `Spectrum` object with the specified energy levels and states, automatically generating integer representations.
"""
struct Spectrum
energies::Vector{<:Real}
states::AbstractArray{State}
states_int::Vector{Int}
function Spectrum(energies, states, states_int)
new(energies, states, states_int)
end
function Spectrum(energies, states::Matrix)
states_int = matrix_to_integers(states)
new(energies, Vector{eltype(states)}[eachcol(states)...], states_int)
end
function Spectrum(energies, states::Vector{<:State})
states_int = matrix_to_integers(states)
new(energies, states, states_int)
end
end
"""
$(TYPEDSIGNATURES)
Converts a matrix of binary vectors to their integer representations.
This function takes a matrix of binary vectors, where each row represents a binary vector, and converts them into their corresponding integer representations.
# Arguments:
- `matrix::Vector{Vector{T}}`: A matrix of binary vectors.
# Returns:
- `Vector{Int}`: An array of integer representations of the binary vectors.
"""
function matrix_to_integers(matrix::Vector{<:Vector{<:Integer}})
nrows = length(matrix[1])
multipliers = 2 .^ collect(0:nrows-1)
div.((hcat(matrix...)' .+ 1), 2) * multipliers
end
function matrix_to_integers(matrix::Matrix)
nrows = size(matrix, 1)
multipliers = 2 .^ collect(0:nrows-1)
div.(matrix' .+ 1, 2) * multipliers
end
"""
energy(σ::Vector, ig::IsingGraph)
Calculates the energy of a state in an Ising graph.
This function calculates the energy of a given state in the context of an Ising graph.
The energy is computed based on the interactions between spins and their associated biases.
# Arguments:
- `σ::AbstractArray{State}`: An array representing the state of spins in the Ising graph.
- `ig::IsingGraph`: The Ising graph defining the interactions and biases.
# Returns:
- `Vector{Float64}`: An array of energy values for each state.
"""
function energy(σ::AbstractArray{<:State}, ig::IsingGraph)
J, h = couplings(ig), biases(ig)
dot.(σ, Ref(J), σ) + dot.(Ref(h), σ)
end
"""
$(TYPEDSIGNATURES)
Calculates the energy of a state in an Ising graph.
This function computes the energy of a given state in the context of an Ising graph.
The energy is calculated based on the interactions between spins and their associated biases.
# Arguments:
- `ig::IsingGraph{T}`: The Ising graph defining the interactions and biases.
- `ig_state::Dict{Int, Int}`: A dictionary mapping spin indices to their corresponding states.
# Returns:
- `T`: The energy of the state in the Ising graph.
"""
function energy(ig::IsingGraph{T}, ig_state::Dict{Int,Int}) where {T}
en = zero(T)
for (i, σ) ∈ ig_state
en += get_prop(ig, i, :h) * σ
for (j, η) ∈ ig_state
if has_edge(ig, i, j)
en += T(1 / 2) * σ * get_prop(ig, i, j, :J) * η
elseif has_edge(ig, j, i)
en += T(1 / 2) * σ * get_prop(ig, j, i, :J) * η
end
end
end
en
end
"""
$(TYPEDSIGNATURES)
Generates the energy spectrum for an Ising graph.
This function computes the energy spectrum (energies and corresponding states) for a given Ising graph.
The energy spectrum represents all possible energy levels and their associated states in the Ising graph.
# Arguments:
- `ig::IsingGraph{T}`: The Ising graph for which the energy spectrum is generated.
# Returns:
- `Spectrum`: An instance of the `Spectrum` type containing the energy levels and states.
"""
function Spectrum(ig::IsingGraph{T}) where {T}
L = nv(ig)
N = 2^L
energies = zeros(T, N)
states = Vector{State}(undef, N)
J, h = couplings(ig), biases(ig)
Threads.@threads for i = 0:N-1
σ = 2 .* digits(i, base = 2, pad = L) .- 1
@inbounds energies[i+1] = dot(σ, J, σ) + dot(h, σ)
@inbounds states[i+1] = σ
end
Spectrum(energies, states)
end
"""
$(TYPEDSIGNATURES)
Computes the Gibbs tensor for an Ising graph at a given inverse temperature.
This function calculates the Gibbs tensor for an Ising graph at a specified inverse temperature (β).
The Gibbs tensor represents the conditional probabilities of states given the inverse temperature and the Ising graph.
# Arguments:
- `ig::IsingGraph{T}`: The Ising graph for which the Gibbs tensor is computed.
- `β::T (optional)`: The inverse temperature parameter. Default is 1.
# Returns:
- `Matrix{T}`: A matrix representing the Gibbs tensor with conditional probabilities.
"""
function gibbs_tensor(ig::IsingGraph{T}, β::T = 1) where {T}
σ = collect.(all_states(rank_vec(ig)))
ρ = exp.(-β .* energy(σ, ig))
ρ ./ sum(ρ)
end
function brute_force(ig::IsingGraph, s::Symbol = :CPU; num_states::Int = 1)
brute_force(ig, Val(s); num_states)
end
"""
$(TYPEDSIGNATURES)
TODO only one of brute_force and full_spectrum should remain
Performs brute-force calculation of the lowest-energy states and their energies for an Ising graph.
This function exhaustively computes the lowest-energy states and their corresponding energies for an Ising graph.
The calculation is done using brute-force enumeration, making it feasible only for small Ising graphs.
# Arguments:
- `ig::IsingGraph{T}`: The Ising graph for which the lowest-energy states are computed.
- `::Val{:CPU}`: A value indicating that the computation is performed on the CPU.
- `num_states::Int (optional)`: The maximum number of lowest-energy states to calculate. Default is 1.
# Returns:
- `Spectrum`: A `Spectrum` object containing the lowest-energy states and their energies.
"""
function brute_force(ig::IsingGraph{T}, ::Val{:CPU}; num_states::Int = 1) where {T}
L = nv(ig)
L == 0 && return Spectrum(zeros(T, 1), Vector{Vector{Int}}[], zeros(T, 1))
sp = Spectrum(ig)
num_states = min(num_states, prod(rank_vec(ig)))
idx = partialsortperm(vec(sp.energies), 1:num_states)
Spectrum(sp.energies[idx], sp.states[idx])
end
function full_spectrum(ig::IsingGraph{T}; num_states::Int = 1) where {T}
nv(ig) == 0 && return Spectrum(zeros(T, 1), Vector{Vector{Int}}[], zeros(T, 1))
ig_rank = rank_vec(ig)
num_states = min(num_states, prod(ig_rank))
σ = collect.(all_states(ig_rank))
energies = energy(σ, ig)
Spectrum(energies[begin:num_states], σ[begin:num_states])
end
function inter_cluster_energy(
cl1_states::Vector{<:State},
J::Matrix{<:Real},
cl2_states::Vector{<:State},
)
hcat(collect.(cl1_states)...)' * J * hcat(collect.(cl2_states)...)
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 6668 | export truncate_potts_hamiltonian_2site_energy,
truncate_potts_hamiltonian_1site_BP,
truncate_potts_hamiltonian_2site_BP,
select_numstate_best
"""
$(TYPEDSIGNATURES)
Truncates a Potts Hamiltonian using belief propagation (BP) for a single site cluster.
This function employs belief propagation (BP) to approximate the most probable states and energies for a Potts Hamiltonian
associated with a single-site cluster. It then truncates the Potts Hamiltonian based on the most probable states.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `num_states::Int`: The maximum number of most probable states to keep.
- `beta::Real (optional)`: The inverse temperature parameter for the BP algorithm. Default is 1.0.
- `tol::Real (optional)`: The tolerance value for convergence in BP. Default is 1e-10.
- `iter::Int (optional)`: The maximum number of BP iterations. Default is 1.
# Returns:
- `LabelledGraph{S, T}`: A truncated Potts Hamiltonian.
"""
function truncate_potts_hamiltonian_1site_BP(
potts_h::LabelledGraph{S,T},
num_states::Int;
beta = 1.0,
tol = 1e-10,
iter = 1,
) where {S,T}
states = Dict()
beliefs = belief_propagation(potts_h, beta; tol = tol, iter = iter)
for node in vertices(potts_h)
indices = partialsortperm(beliefs[node], 1:min(num_states, length(beliefs[node])))
push!(states, node => indices)
end
truncate_potts_hamiltonian(potts_h, states)
end
"""
$(TYPEDSIGNATURES)
Truncate a Potts Hamiltonian based on 2-site energy states.
This function truncates a Potts Hamiltonian by considering 2-site energy states and selecting the most probable states
to keep. It computes the energies for all 2-site combinations and selects the states that maximize the probability.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labeled graph.
- `num_states::Int`: The maximum number of most probable states to keep.
# Returns:
- `LabelledGraph{S, T}`: A truncated Potts Hamiltonian.
"""
function truncate_potts_hamiltonian_2site_energy(
potts_h::LabelledGraph{S,T},
num_states::Int,
) where {S,T}
# TODO: name to be clean to make it consistent with square2 and squarestar2
states = Dict()
for node in vertices(potts_h)
if node in keys(states)
continue
end
i, j, _ = node
E1 = copy(get_prop(potts_h, (i, j, 1), :spectrum).energies)
E2 = copy(get_prop(potts_h, (i, j, 2), :spectrum).energies)
E = energy_2site(potts_h, i, j) .+ reshape(E1, :, 1) .+ reshape(E2, 1, :)
sx, sy = size(E)
E = reshape(E, sx * sy)
ind1, ind2 = select_numstate_best(E, sx, num_states)
push!(states, (i, j, 1) => ind1)
push!(states, (i, j, 2) => ind2)
end
truncate_potts_hamiltonian(potts_h, states)
end
function load_file(filename)
if isfile(filename)
try
load_object(string(filename))
catch e
return nothing
end
else
return nothing
end
end
"""
$(TYPEDSIGNATURES)
Truncate a Potts Hamiltonian based on 2-site belief propagation states.
This function truncates a Potts Hamiltonian by considering 2-site belief propagation states and selecting the most probable states
to keep. It computes the beliefs for all 2-site combinations and selects the states that maximize the probability.
# Arguments:
- `potts_h::LabelledGraph{S, T}`: The Potts Hamiltonian represented as a labelled graph.
- `beliefs::Dict`: A dictionary containing belief values for 2-site interactions.
- `num_states::Int`: The maximum number of most probable states to keep.
- `beta::Real (optional)`: The inverse temperature parameter (default is 1.0).
# Returns:
- `LabelledGraph{S, T}`: A truncated Potts Hamiltonian.
"""
function truncate_potts_hamiltonian_2site_BP(
potts_h::LabelledGraph{S,T},
beliefs::Dict,
num_states::Int,
result_folder::String = "results_folder",
inst::String = "inst";
beta = 1.0,
) where {S,T}
states = Dict()
saved_states = load_file(joinpath(result_folder, "$(inst).jld2"))
for node in vertices(potts_h)
if node in keys(states)
continue
end
i, j, _ = node
sx =
has_vertex(potts_h, (i, j, 1)) ?
length(get_prop(potts_h, (i, j, 1), :spectrum).energies) : 1
E = beliefs[(i, j)]
ind1, ind2 = select_numstate_best(E, sx, num_states)
push!(states, (i, j, 1) => ind1)
push!(states, (i, j, 2) => ind2)
end
path = joinpath(result_folder, "$(inst).jld2")
save_object(string(path), states)
truncate_potts_hamiltonian(potts_h, states)
end
"""
$(TYPEDSIGNATURES)
Select a specified number of best states based on energy.
This function selects a specified number of best states from a list of energies based on energy values in two nodes of Potts Hamiltonian.
It fine-tunes the selection to ensure that the resulting states have the expected number.
# Arguments:
- `E::Vector{Real}`: A vector of energy values.
- `sx::Int`: The size of the Potts Hamiltonian for one of the nodes.
- `num_states::Int`: The desired number of states to select.
# Returns:
- `Tuple{Vector{Int}, Vector{Int}}`: A tuple containing two vectors of indices, `ind1` and `ind2`,
which represent the selected states for two nodes of a Potts Hamiltonian.
"""
function select_numstate_best(E, sx, num_states)
low, high = 1, min(num_states, length(E))
while true
guess = div(low + high, 2)
ind = partialsortperm(E, 1:guess)
ind1 = mod.(ind .- 1, sx) .+ 1
ind2 = div.(ind .- 1, sx) .+ 1
ind1 = sort([Set(ind1)...])
ind2 = sort([Set(ind2)...])
if high - low <= 1
return ind1, ind2
end
if length(ind1) * length(ind2) > num_states
high = guess
else
low = guess
end
end
end
function truncate_potts_hamiltonian(
potts_h,
β,
cs,
result_folder,
inst;
tol = 1e-6,
iter = iter,
)
states = Dict()
saved_states = load_file(joinpath(result_folder, "$(inst).jld2"))
if isnothing(saved_states)
new_potts_h = potts_hamiltonian_2site(potts_h, β)
beliefs = belief_propagation(new_potts_h, β; tol = 1e-6, iter = iter)
potts_h = truncate_potts_hamiltonian_2site_BP(
potts_h,
beliefs,
cs,
result_folder,
inst;
beta = β,
)
else
states = saved_states
potts_h = truncate_potts_hamiltonian(potts_h, states)
end
potts_h
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 5722 | export zephyr_to_linear, unique_neighbors, load_openGM
import Base.Prehashed
using HDF5
"""
$(TYPEDSIGNATURES)
Rewriten from Dwave-networkx
m - Grid parameter for the Zephyr lattice.
t - Tile parameter for the Zephyr lattice; must be even.
"""
function zephyr_to_linear(m::Int, t::Int, q::NTuple{5,Int})
M = 2 * m + 1
u, w, k, j, z = q
(((u * M + w) * t + k) * 2 + j) * m + z + 1
end
unique_neighbors(ig::LabelledGraph, i::Int) = filter(j -> j > i, neighbors(ig, i))
# @generated function unique_dims(A::AbstractArray{T,N}, dim::Integer) where {T,N}
# quote
# 1 <= dim <= $N || return copy(A)
# hashes = zeros(UInt, axes(A, dim))
# # Compute hash for each row
# k = 0
# @nloops $N i A d->(if d == dim; k = i_d; end) begin
# @inbounds hashes[k] = hash(hashes[k], hash((@nref $N A i)))
# end
# # Collect index of first row for each hash
# uniquerow = similar(Array{Int}, axes(A, dim))
# firstrow = Dict{Prehashed,Int}()
# for k = axes(A, dim)
# uniquerow[k] = get!(firstrow, Prehashed(hashes[k]), k)
# end
# uniquerows = collect(values(firstrow))
# # Check for collisions
# collided = falses(axes(A, dim))
# @inbounds begin
# @nloops $N i A d->(if d == dim
# k = i_d
# j_d = uniquerow[k]
# else
# j_d = i_d
# end) begin
# if (@nref $N A j) != (@nref $N A i)
# collided[k] = true
# end
# end
# end
# if any(collided)
# nowcollided = similar(BitArray, axes(A, dim))
# while any(collided)
# # Collect index of first row for each collided hash
# empty!(firstrow)
# for j = axes(A, dim)
# collided[j] || continue
# uniquerow[j] = get!(firstrow, Prehashed(hashes[j]), j)
# end
# for v ∈ values(firstrow)
# push!(uniquerows, v)
# end
# # Check for collisions
# fill!(nowcollided, false)
# @nloops $N i A d->begin
# if d == dim
# k = i_d
# j_d = uniquerow[k]
# (!collided[k] || j_d == k) && continue
# else
# j_d = i_d
# end
# end begin
# if (@nref $N A j) != (@nref $N A i)
# nowcollided[k] = true
# end
# end
# (collided, nowcollided) = (nowcollided, collided)
# end
# end
# (@nref $N A d->d == dim ? sort!(uniquerows) : (axes(A, d))), indexin(uniquerow, uniquerows)
# end
# end
"""
$(TYPEDSIGNATURES)
Loads some factored graphs written in openGM format. Assumes rectangular lattice.
Args:
file_name (str): a path to file with factor graph in openGM format.
ints Nx, Ny: it is assumed that graph if forming an :math:N_x \times N_y lattice with
nearest-neighbour interactions only.
Returns:
dictionary with factors and funcitons defining the energy functional.
"""
function load_openGM(
fname::String,
Nx::Union{Integer,Nothing} = nothing,
Ny::Union{Integer,Nothing} = nothing,
)
file = h5open(fname, "r")
file_keys = collect(keys(read(file)))
data = read(file[file_keys[1]])
H = collect(Int64, data["header"])
F = Array{Int64}(data["factors"])
J = Array{Int64}(data["function-id-16000"]["indices"])
V = Array{Real}(data["function-id-16000"]["values"])
N = Array{Int64}(data["numbers-of-states"])
if isnothing(Nx) || isnothing(Ny)
filename, _ = splitext(basename(fname))
Nx, Ny = benchmark_names[filename]
end
F = reverse(F)
factors = Dict()
while length(F) > 0
f1 = pop!(F)
z1 = pop!(F)
nn = pop!(F)
n = []
for _ = 1:nn
tt = pop!(F)
ny, nx = divrem(tt, Nx)
push!(n, ny, nx)
end
if length(n) == 4
if abs(n[1] - n[3]) + abs(n[2] - n[4]) ∉ [1, 2] || (
abs(n[1] - n[3]) + abs(n[2] - n[4]) == 2 &&
(abs(n[1] - n[3]) == 2 || abs(n[2] - n[4] == 2))
)
throw(ErrorException("Not nearest neighbour or diagonal neighbors"))
end
end
if length(n) == 2
if (n[1] >= Ny) || (n[2] >= Nx)
throw(ErrorException("Wrong size"))
end
end
factors[tuple(n...)] = f1
if z1 != 0
throw(ErrorException("Something wrong with the expected convention."))
end
end
J = reverse(J)
functions = Dict()
ii = -1
lower = 0
while length(J) > 0
ii += 1
nn = pop!(J)
n = []
for _ = 1:nn
push!(n, pop!(J))
end
upper = lower + prod(n)
functions[ii] = reshape(V[lower+1:upper], reverse(n)...)'
lower = upper
end
result = Dict(
"fun" => functions,
"fac" => factors,
"N" => reshape(N, (Ny, Nx)),
"Nx" => Nx,
"Ny" => Ny,
)
result
end
benchmark_names = Dict(
"penguin-small" => (240, 320),
"palm-small" => (240, 360),
"clownfish-small" => (240, 360),
"crops-small" => (240, 360),
"pfau-small" => (240, 320),
"lake-small" => (240, 360),
"snail" => (240, 320),
"fourcolors" => (240, 320),
"strawberry-glass-2-small" => (320, 240),
)
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 3555 | """
Instance below looks like this:
1 -- 2
|
3 -- 4
"""
function create_larger_example_potts_hamiltonian_tree_basic()
instance = Dict(
(1, 1) => -0.50,
(2, 2) => 0.25,
(3, 3) => -0.30,
(4, 4) => 0.10,
(1, 2) => -0.23,
(1, 3) => 1.10,
(3, 4) => 0.71,
)
ig = ising_graph(instance)
assignment_rule = Dict(1 => (1, 1, 1), 2 => (1, 2, 1), 3 => (2, 1, 1), 4 => (2, 2, 2))
potts_h = potts_hamiltonian(
ig,
Dict{NTuple{3,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule,
)
ig, potts_h
end
"""
Instance below looks like this:
1 -- 2 -- 3
|
4 -- 5 -- 6
|
7 -- 8 -- 9
"""
function create_larger_example_potts_hamiltonian_tree()
instance = Dict(
(1, 1) => 0.53,
(2, 2) => -0.25,
(3, 3) => 0.30,
(4, 4) => -0.10,
(5, 5) => -0.10,
(6, 6) => 0.10,
(8, 8) => 0.10,
(9, 9) => 0.01,
(1, 2) => -1.00,
(2, 3) => 1.00,
(1, 4) => 0.33,
(4, 5) => 0.76,
(5, 6) => -0.45,
(4, 7) => -0.28,
(7, 8) => 0.36,
(8, 9) => -1.07,
)
ig = ising_graph(instance)
assignment_rule = Dict(
1 => (1, 1, 1),
2 => (1, 2, 1),
3 => (1, 3, 1),
4 => (2, 1, 1),
5 => (2, 2, 1),
6 => (2, 3, 1),
7 => (3, 1, 1),
8 => (3, 2, 1),
9 => (3, 3, 1),
)
potts_h = potts_hamiltonian(
ig,
Dict{NTuple{3,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule,
)
ig, potts_h
end
"""
Instance below looks like this:
123 -- 45 -- 6
| |
7 8
"""
function create_larger_example_potts_hamiltonian_tree_pathological()
instance = Dict(
(1, 1) => 0.52,
(2, 2) => 0.25,
(3, 3) => -0.31,
(4, 4) => 0.17,
(5, 5) => -0.12,
(6, 6) => 0.13,
(7, 7) => 0.00,
(8, 8) => 0.43,
(1, 2) => -1.01,
(1, 3) => 1.00,
(3, 4) => 0.97,
(3, 5) => -0.98,
(5, 6) => 1.00,
(2, 7) => 0.53,
(3, 7) => 1.06,
(5, 8) => -0.64,
)
ig = ising_graph(instance)
assignment_rule = Dict(
1 => (1, 1),
2 => (1, 1),
3 => (1, 1),
4 => (1, 2),
5 => (1, 2),
6 => (1, 3),
7 => (2, 1),
8 => (2, 2),
)
potts_h = potts_hamiltonian(
ig,
Dict{NTuple{2,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule,
)
ig, potts_h
end
@testset "Belief propagation" begin
for (ig, potts_h) ∈ [
create_larger_example_potts_hamiltonian_tree_basic(),
create_larger_example_potts_hamiltonian_tree(),
create_larger_example_potts_hamiltonian_tree_pathological(),
]
for beta ∈ [0.5, 1]
iter = 16
beliefs = belief_propagation(potts_h, beta; iter = iter)
exact_marginal = Dict()
for k in keys(beliefs)
push!(
exact_marginal,
k => [
exact_cond_prob(potts_h, beta, Dict(k => a)) for
a = 1:length(beliefs[k])
],
)
end
for v in keys(beliefs)
temp = -log.(exact_marginal[v]) ./ beta
@test beliefs[v] ≈ temp .- minimum(temp)
end
end
end
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 5854 | """
Instance below looks like this:
12 -- 34
|
56 -- 78
"""
function create_larger_example_potts_hamiltonian_tree_2site()
instance = Dict(
(1, 1) => 0.50,
(2, 2) => -0.25,
(3, 3) => 0.30,
(4, 4) => -0.10,
(5, 5) => 0.10,
(6, 6) => 0.20,
(7, 7) => -0.40,
(8, 8) => 0.50,
(1, 2) => -0.01,
(1, 3) => 1.00,
(3, 4) => 1.10,
(2, 5) => 0.77,
(6, 7) => -1.43,
(6, 8) => 0.21,
(6, 2) => 1.00,
)
ig = ising_graph(instance)
assignment_rule1 = Dict(
1 => (1, 1),
2 => (1, 1),
3 => (1, 2),
4 => (1, 2),
5 => (2, 1),
6 => (2, 1),
7 => (2, 2),
8 => (2, 2),
)
assignment_rule2 = Dict(
1 => (1, 1, 1),
2 => (1, 1, 2),
3 => (1, 2, 1),
4 => (1, 2, 2),
5 => (2, 1, 1),
6 => (2, 1, 2),
7 => (2, 2, 1),
8 => (2, 2, 2),
)
potts_h1 = potts_hamiltonian(
ig,
Dict{NTuple{2,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule1,
)
potts_h2 = potts_hamiltonian(
ig,
Dict{NTuple{3,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule2,
)
ig, potts_h1, potts_h2
end
"""
Instance below looks like this:
12 -- 345 -- 6
|
789
|
10
"""
function create_larger_example_potts_hamiltonian_tree_2site_pathological()
instance = Dict(
(1, 1) => -0.50,
(2, 2) => 0.25,
(3, 3) => 0.31,
(4, 4) => 0.77,
(5, 5) => -0.15,
(6, 6) => 0.21,
(7, 7) => -0.10,
(8, 8) => 0.13,
(9, 9) => 0.01,
(10, 10) => 0.5,
(1, 2) => -0.01,
(1, 3) => 0.79,
(2, 5) => -0.93,
(5, 6) => 0.81,
(5, 9) => -0.17,
(3, 4) => 0.71,
(4, 6) => -0.43,
(5, 8) => 0.75,
(7, 10) => -1.03,
(8, 9) => 0.65,
(9, 10) => -0.57,
)
ig = ising_graph(instance)
assignment_rule1 = Dict(
1 => (1, 1),
2 => (1, 1),
3 => (1, 2),
4 => (1, 2),
5 => (1, 2),
6 => (1, 3),
7 => (2, 2),
8 => (2, 2),
9 => (2, 2),
10 => (3, 2),
)
assignment_rule2 = Dict(
1 => (1, 1, 1),
2 => (1, 1, 2),
3 => (1, 2, 1),
4 => (1, 2, 1),
5 => (1, 2, 2),
6 => (1, 3, 2),
7 => (2, 2, 1),
8 => (2, 2, 1),
9 => (2, 2, 2),
10 => (3, 2, 2),
)
potts_h1 = potts_hamiltonian(
ig,
Dict{NTuple{2,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule1,
)
potts_h2 = potts_hamiltonian(
ig,
Dict{NTuple{3,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule2,
)
ig, potts_h1, potts_h2
end
@testset "Belief propagation 2site" begin
for (ig, potts_h1, potts_h2) ∈ [
create_larger_example_potts_hamiltonian_tree_2site(),
create_larger_example_potts_hamiltonian_tree_2site_pathological(),
]
for beta ∈ [0.6, 1.1]
tol = 1e-12
iter = 16
num_states = 10
new_potts_h1 = potts_hamiltonian_2site(potts_h2, beta)
@test vertices(new_potts_h1) == vertices(potts_h1)
@test edges(new_potts_h1) == edges(potts_h1)
for e ∈ vertices(new_potts_h1)
@test get_prop(new_potts_h1, e, :spectrum).energies ≈
get_prop(potts_h1, e, :spectrum).energies
end
for e ∈ edges(new_potts_h1)
E = get_prop(new_potts_h1, src(e), dst(e), :en)
# @cast E[(l1, l2), (r1, r2)] :=
# E.e11[l1, r1] + E.e21[l2, r1] + E.e12[l1, r2] + E.e22[l2, r2]
a11 = reshape(CuArray(E.e11), size(E.e11, 1), :, size(E.e11, 2))
a21 = reshape(CuArray(E.e21), :, size(E.e21, 1), size(E.e21, 2))
a12 = reshape(CuArray(E.e12), size(E.e12, 1), 1, 1, size(E.e12, 2))
a22 = reshape(CuArray(E.e22), 1, size(E.e22, 1), 1, size(E.e22, 2))
E = @__dot__(a11 + a21 + a12 + a22)
E = reshape(E, size(E, 1) * size(E, 2), size(E, 3) * size(E, 4))
@test Array(E) == get_prop(potts_h1, src(e), dst(e), :en)
end
for e ∈ edges(new_potts_h1)
il1 = get_prop(new_potts_h1, src(e), dst(e), :ipl)
il2 = get_prop(potts_h1, src(e), dst(e), :ipl)
ir1 = get_prop(new_potts_h1, src(e), dst(e), :ipr)
ir2 = get_prop(potts_h1, src(e), dst(e), :ipr)
pl1 = get_projector!(get_prop(new_potts_h1, :pool_of_projectors), il1, :CPU)
pl2 = get_projector!(get_prop(potts_h1, :pool_of_projectors), il2, :CPU)
pr1 = get_projector!(get_prop(new_potts_h1, :pool_of_projectors), ir1, :CPU)
pr2 = get_projector!(get_prop(potts_h1, :pool_of_projectors), ir2, :CPU)
@test pl1 == pl2
@test pr1 == pr2
end
beliefs = belief_propagation(new_potts_h1, beta; iter = iter, tol = tol)
exact_marginal = Dict()
for k in keys(beliefs)
temp =
-1 / beta .*
log.([
exact_cond_prob(potts_h1, beta, Dict(k => a)) for
a = 1:length(beliefs[k])
])
push!(exact_marginal, k => temp .- minimum(temp))
end
for v in keys(beliefs)
@test beliefs[v] ≈ exact_marginal[v]
end
end
end
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 9672 | using CSV
using LinearAlgebra
using LabelledGraphs
@testset "Ising graph cannot be created" begin
@testset "if input instance contains duplicate edges" begin
for T ∈ [Float16, Float32, Float64]
@test_throws ArgumentError ising_graph(
T,
Dict((1, 1) => 2.0, (1, 2) => 0.5, (2, 1) => -1.0),
)
end
end
end
for T ∈ [Float16, Float32, Float64]
for (instance, source) ∈ (
("$(@__DIR__)/instances/example.txt", "file"),
(
Dict{Tuple{Int,Int},T}(
(1, 1) => 0.1,
(2, 2) => 0.5,
(1, 4) => -2.0,
(4, 2) => 1.0,
(1, 2) => -0.3,
),
"array",
),
)
@testset "Ising graph created from $(source)" begin
expected_num_vertices = 3
expected_biases = [T(1 / 10), T(1 / 2), T(0)]
expected_couplings = Dict(
LabelledEdge(1, 2) => -T(3 / 10),
LabelledEdge(1, 4) => -T(2),
LabelledEdge(2, 4) => T(1),
)
expected_J_matrix =
[[T(0) -T(3 / 10) -T(2)]; [T(0) T(0) T(1)]; [T(0) T(0) T(0)]]
ig = ising_graph(T, instance)
@test eltype(ig) == T
@testset "contains the same number vertices as original instance" begin
@test nv(ig) == expected_num_vertices
end
@testset "has collection of edges comprising all interactions from instance" begin
# This test uses the fact that edges iterates in the lex ordering.
@test collect(edges(ig)) ==
[LabelledEdge(e...) for e ∈ [(1, 2), (1, 4), (2, 4)]]
end
@testset "stores biases as property of vertices" begin
@test biases(ig) == expected_biases
end
@testset "stores couplings both as property of edges and its own property" begin
@test couplings(ig) == expected_J_matrix
end
@testset "has default rank stored for each active vertex" begin
@test get_prop(ig, :rank) == Dict(1 => 2, 2 => 2, 4 => 2)
end
end
end
end
@testset "Ising graph created with additional parameters" begin
expected_biases = [-0.1, -0.5, 0.0]
expected_couplings = Dict(Edge(1, 2) => 0.3, Edge(1, 3) => 2.0, Edge(2, 3) => -1.0)
expected_couplings = Dict(Edge(1, 2) => 0.3, Edge(1, 3) => 2.0, Edge(2, 3) => -1.0)
expected_J_matrix = [
[0 0.3 2.0]
[0 0 -1.0]
[0 0 0]
]
ig = ising_graph(
"$(@__DIR__)/instances/example.txt",
scale = -1,
rank_override = Dict(1 => 3, 4 => 4),
)
@testset "has rank overriden by rank_override dict" begin
# TODO: update default value of 2 once original implementation
# is also updated.
@test get_prop(ig, :rank) == Dict(1 => 3, 2 => 2, 4 => 4)
end
@testset "has coefficients multiplied by given sign" begin
@test biases(ig) == expected_biases
@test couplings(ig) == expected_J_matrix
end
end
@testset "Ising model is correct" begin
L = 4
N = L^2
instance = "$(@__DIR__)/instances/$(N)_001.txt"
ig = ising_graph(instance)
@test nv(ig) == N
for i ∈ 1:N
@test has_vertex(ig, i)
end
A = adjacency_matrix(ig)
B = zeros(Int, N, N)
for i ∈ 1:N
nbrs = SpinGlassNetworks.unique_neighbors(ig, i)
for j ∈ nbrs
B[i, j] = 1
end
end
@test B + B' == A
@testset "Reading from Dict" begin
instance_dict = Dict()
ising = CSV.File(instance, types = [Int, Int, Float64], header = 0, comment = "#")
ising = CSV.File(instance, types = [Int, Int, Float64], header = 0, comment = "#")
for (i, j, v) ∈ ising
push!(instance_dict, (i, j) => v)
end
ig = ising_graph(instance)
ig_dict = ising_graph(instance_dict)
@test nv(ig_dict) == nv(ig)
@test collect(edges(ig)) == collect(edges(ig_dict))
end
@testset "Ground state energy for pathological instance " begin
m = 3
n = 4
t = 3
β = 1
instance = "$(@__DIR__)/instances/pathological/test_$(m)_$(n)_$(t).txt"
ising = CSV.File(instance, types = [Int, Int, Float64], header = 0, comment = "#")
ig = ising_graph(instance)
conf = [
[-1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1],
[-1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1],
[-1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, 1],
[-1, 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, -1, -1],
]
eng = energy(conf, ig)
couplings = Dict()
for (i, j, v) ∈ ising
push!(couplings, (i, j) => v)
end
cedges = Dict()
push!(cedges, (1, 2) => [(1, 4), (1, 5), (1, 6)])
push!(cedges, (1, 5) => [(1, 13)])
push!(cedges, (2, 3) => [(4, 7), (5, 7), (6, 8), (6, 9)])
push!(cedges, (2, 6) => [(6, 16), (6, 18), (5, 16)])
push!(cedges, (5, 6) => [(13, 16), (13, 18)])
push!(cedges, (6, 10) => [(18, 28)])
push!(
cedges,
(10, 11) => [
(28, 31),
(28, 32),
(28, 33),
(29, 31),
(29, 32),
(29, 33),
(30, 31),
(30, 32),
(30, 33),
],
)
push!(cedges, (2, 2) => [(4, 5), (4, 6), (5, 6), (6, 6)])
push!(cedges, (3, 3) => [(7, 8), (7, 9)])
push!(cedges, (6, 6) => [(16, 18), (16, 16)])
push!(cedges, (10, 10) => [(28, 29), (28, 30), (29, 30)])
config = Dict()
push!(config, 1 => [-1, -1, -1, -1])
push!(config, 2 => [0, 0, 0, 0])
push!(config, 3 => [0, 0, 0, 0])
push!(config, 4 => [1, 1, 1, 1])
push!(config, 5 => [1, 1, 1, 1])
push!(config, 6 => [-1, -1, -1, -1])
push!(config, 7 => [-1, -1, -1, -1])
push!(config, 8 => [-1, -1, 1, 1])
push!(config, 9 => [1, 1, 1, 1])
push!(config, 10 => [0, 0, 0, 0])
push!(config, 11 => [0, 0, 0, 0])
push!(config, 12 => [0, 0, 0, 0])
push!(config, 13 => [1, 1, 1, 1])
push!(config, 14 => [0, 0, 0, 0])
push!(config, 15 => [0, 0, 0, 0])
push!(config, 16 => [1, 1, 1, 1])
push!(config, 17 => [0, 0, 0, 0])
push!(config, 18 => [-1, -1, -1, -1])
push!(config, 19 => [0, 0, 0, 0])
push!(config, 20 => [0, 0, 0, 0])
push!(config, 21 => [0, 0, 0, 0])
push!(config, 22 => [0, 0, 0, 0])
push!(config, 23 => [0, 0, 0, 0])
push!(config, 24 => [0, 0, 0, 0])
push!(config, 25 => [0, 0, 0, 0])
push!(config, 26 => [0, 0, 0, 0])
push!(config, 27 => [0, 0, 0, 0])
push!(config, 28 => [1, 1, 1, 1])
push!(config, 29 => [1, 1, 1, 1])
push!(config, 30 => [-1, -1, -1, -1])
push!(config, 31 => [1, 1, 1, 1])
push!(config, 32 => [-1, -1, -1, -1])
push!(config, 33 => [1, -1, 1, -1])
push!(config, 34 => [0, 0, 0, 0])
push!(config, 35 => [0, 0, 0, 0])
push!(config, 36 => [0, 0, 0, 0])
num_config = length(config[1])
exact_energy = _energy(config, couplings, cedges, num_config)
low_energies = [
-16.4,
-16.4,
-16.4,
-16.4,
-16.1,
-16.1,
-16.1,
-16.1,
-15.9,
-15.9,
-15.9,
-15.9,
-15.9,
-15.9,
-15.6,
-15.6,
-15.6,
-15.6,
-15.6,
-15.6,
-15.4,
-15.4,
]
for i ∈ 1:num_config
@test exact_energy[i] == low_energies[i] == eng[i]
end
end
end
@testset "Pruning" begin
@testset "No vertices of degree zero" begin
instance = Dict(
(1, 1) => 0.1,
(2, 2) => 0.5,
(1, 4) => -2.0,
(4, 2) => 1.0,
(1, 2) => -0.3,
)
ig = ising_graph(instance)
ng = prune(ig)
@test nv(ig) == nv(ng)
end
@testset "All vertices of degree zero with no local fields" begin
instance = Dict((1, 1) => 0.0, (2, 2) => 0.0)
ig = ising_graph(instance)
ng = prune(ig)
@test nv(ng) == 0
end
@testset "All vertices of degree zero, but with local fields" begin
instance = Dict((1, 1) => 0.1, (2, 2) => 0.5)
ig = ising_graph(instance)
ng = prune(ig)
@test nv(ng) == 2
end
@testset "Some vertices of degree zero, but nonzero field" begin
instance = Dict(
(1, 1) => 0.1,
(2, 2) => 0.5,
(1, 4) => -2.0,
(4, 2) => 1.0,
(1, 2) => -0.3,
(5, 5) => 0.1,
)
ig = ising_graph(instance)
ng = prune(ig)
@test nv(ng) == nv(ig)
@test vertices(ng) == collect(1:nv(ng))
end
@testset "Some vertices of degree zero and zero field" begin
instance = Dict(
(1, 1) => 0.1,
(2, 2) => 0.5,
(1, 4) => -2.0,
(4, 2) => 1.0,
(1, 2) => -0.3,
(5, 5) => 0.0,
)
ig = ising_graph(instance)
ng = prune(ig)
@test nv(ng) == nv(ig) - 1
@test vertices(ng) == collect(1:nv(ng))
end
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 8691 | using MetaGraphs
using Graphs
using CSV
enum(vec) = Dict(v => i for (i, v) ∈ enumerate(vec))
@testset "Lattice graph" begin
m = 4
n = 4
t = 4
L = 128
instance = "$(@__DIR__)/instances/chimera_droplets/$(L)power/001.txt"
for T ∈ [Float16, Float32, Float64]
ig = ising_graph(T, instance)
potts_h = potts_hamiltonian(
ig,
2,
cluster_assignment_rule = super_square_lattice((m, n, 2 * t)),
)
@test collect(vertices(potts_h)) == [(i, j) for i ∈ 1:m for j ∈ 1:n]
clv = []
cle = []
rank = rank_vec(ig)
for v ∈ vertices(potts_h)
cl = get_prop(potts_h, v, :cluster)
push!(clv, vertices(cl))
push!(cle, collect(edges(cl)))
@test rank_vec(cl) == get_prop.(Ref(ig), vertices(cl), :rank)
end
# Check if graph is factored correctly
@test isempty(intersect(clv...))
@test isempty(intersect(cle...))
end
end
@testset "Factor graph builds on pathological instance" begin
m = 3
n = 4
t = 3
L = n * m * t
instance = "$(@__DIR__)/instances/pathological/test_$(m)_$(n)_$(t).txt"
ising = CSV.File(instance, types = [Int, Int, Float64], header = 0, comment = "#")
couplings = Dict((i, j) => v for (i, j, v) ∈ ising)
cedges = Dict(
((1, 1), (1, 2)) => [(1, 4), (1, 5), (1, 6)],
((1, 1), (2, 1)) => [(1, 13)],
((1, 2), (1, 3)) => [(4, 7), (5, 7), (6, 8), (6, 9)],
((1, 2), (2, 2)) => [(6, 16), (6, 18), (5, 16)],
((2, 1), (2, 2)) => [(13, 16), (13, 18)],
((2, 2), (3, 2)) => [(18, 28)],
((3, 2), (3, 3)) => [
(28, 31),
(28, 32),
(28, 33),
(29, 31),
(29, 32),
(29, 33),
(30, 31),
(30, 32),
(30, 33),
],
)
cells = Dict(
(1, 1) => [1],
(1, 2) => [4, 5, 6],
(1, 3) => [7, 8, 9],
(1, 4) => [],
(2, 1) => [13],
(2, 2) => [16, 18],
(2, 3) => [],
(2, 4) => [],
(3, 1) => [],
(3, 2) => [28, 29, 30],
(3, 3) => [31, 32, 33],
(3, 4) => [],
)
d = 2
rank = Dict(c => fill(d, length(idx)) for (c, idx) ∈ cells if !isempty(idx))
bond_dimensions = [2, 2, 8, 4, 2, 2, 8]
for T ∈ [Float16, Float32, Float64]
ig = ising_graph(T, instance)
@test eltype(ig) == T
potts_h = potts_hamiltonian(
ig,
spectrum = full_spectrum,
cluster_assignment_rule = super_square_lattice((m, n, t)),
)
for (bd, e) in zip(bond_dimensions, edges(potts_h))
ipl, en, ipr =
get_prop(potts_h, e, :ipl), get_prop(potts_h, e, :en), get_prop(potts_h, e, :ipr)
pl = get_projector!(get_prop(potts_h, :pool_of_projectors), ipl, :CPU)
pr = get_projector!(get_prop(potts_h, :pool_of_projectors), ipr, :CPU)
@test minimum(size(en)) == bd
@test maximum(pl) == size(en, 1)
@test maximum(pr) == size(en, 2)
end
for ((i, j), cedge) ∈ cedges
ipl, en, ipr = get_prop(potts_h, i, j, :ipl),
get_prop(potts_h, i, j, :en),
get_prop(potts_h, i, j, :ipr)
pl = get_projector!(get_prop(potts_h, :pool_of_projectors), ipl, :CPU)
pr = get_projector!(get_prop(potts_h, :pool_of_projectors), ipr, :CPU)
base_i = all_states(rank[i])
base_j = all_states(rank[j])
idx_i = enum(cells[i])
idx_j = enum(cells[j])
# Change it to test if energy is calculated using passed 'energy' function
energy = zeros(T, prod(rank[i]), prod(rank[j]))
for (ii, σ) ∈ enumerate(base_i), (jj, η) ∈ enumerate(base_j)
eij = zero(T)
for (k, l) ∈ values(cedge)
kk = enum(cells[i])[k]
ll = enum(cells[j])[l]
s = σ[idx_i[k]]
r = η[idx_j[l]]
J = couplings[k, l]
eij += s * J * r
end
energy[ii, jj] = eij
end
@test eltype(energy) == T == eltype(en)
@test energy ≈ en[pl, pr]
end
@testset "each cluster comprises expected cells" begin
for v ∈ vertices(potts_h)
cl = get_prop(potts_h, v, :cluster)
@test issetequal(vertices(cl), cells[v])
end
end
@testset "each edge comprises expected bunch of edges from source Ising graph" begin
for e ∈ edges(potts_h)
outer_edges = get_prop(potts_h, e, :outer_edges)
@test issetequal(
cedges[(src(e), dst(e))],
[(src(oe), dst(oe)) for oe ∈ outer_edges],
)
end
end
end
end
function create_example_potts_hamiltonian(::Type{T}) where {T}
J12 = -1
h1 = 1 / 2
h2 = 0.75
D = Dict((1, 2) => J12, (1, 1) => h1, (2, 2) => h2)
ig = ising_graph(T, D)
potts_hamiltonian(
ig,
Dict((1, 1) => 2, (1, 2) => 2),
spectrum = full_spectrum,
cluster_assignment_rule = Dict(1 => (1, 1), 2 => (1, 2)),
)
end
potts_h_state_to_spin =
[([1, 1], [-1, -1]), ([1, 2], [-1, 1]), ([2, 1], [1, -1]), ([2, 2], [1, 1])]
@testset "Decoding solution gives correct spin assignment" begin
for T ∈ [Float16, Float32, Float64]
potts_h = create_example_potts_hamiltonian(T)
@test all(eltype(get_prop(potts_h, e, :en)) == T for e ∈ edges(potts_h))
for (state, spin_values) ∈ potts_h_state_to_spin
d = decode_potts_hamiltonian_state(potts_h, state)
states = collect(values(d))[collect(keys(d))]
@test states == spin_values
end
end
end
"""
Instance below looks like this:
1 -- 2 -- 3
| | |
4 -- 5 -- 6
| | |
7 -- 8 -- 9
And we group the following spins together: [1, 2, 4, 5], [3, 6], [7, 8], [9].
"""
function create_larger_example_potts_hamiltonian()
instance = Dict(
(1, 1) => 0.5,
(2, 2) => 0.25,
(3, 3) => 0.3,
(4, 4) => 0.1,
(5, 5) => 0.0,
(6, 6) => -2.0,
(7, 7) => -1.0,
(8, 8) => 2.0,
(9, 9) => 3.1,
(1, 2) => -1.0,
(2, 3) => 1.0,
(4, 5) => 0.5,
(5, 6) => -0.3,
(7, 8) => 0.1,
(8, 9) => 2.2,
(1, 4) => -1.7,
(4, 7) => 1.2,
(2, 5) => 0.2,
(5, 8) => 0.3,
(3, 6) => 1.1,
(6, 9) => 0.7,
)
ig = ising_graph(instance)
assignment_rule = Dict(
1 => (1, 1),
2 => (1, 1),
4 => (1, 1),
5 => (1, 1),
3 => (1, 2),
6 => (1, 2),
7 => (2, 1),
8 => (2, 1),
9 => (2, 2),
)
potts_h = potts_hamiltonian(
ig,
Dict{NTuple{2,Int},Int}(),
spectrum = full_spectrum,
cluster_assignment_rule = assignment_rule,
)
ig, potts_h
end
function potts_hamiltonian_energy(potts_h, state)
# This is highly inefficient, but simple, which makes it suitable for testing.
# If such a function is needed elsewhere, we need to implement it properly.
total_en = 0
# Collect local terms from each cluster
for (s, v) ∈ zip(state, vertices(potts_h))
total_en += get_prop(potts_h, v, :spectrum).energies[s]
end
# Collect inter-cluster terms
for edge ∈ edges(potts_h)
i, j = potts_h.reverse_label_map[src(edge)], potts_h.reverse_label_map[dst(edge)]
ipl, en, ipr = get_prop(potts_h, edge, :ipl),
get_prop(potts_h, edge, :en),
get_prop(potts_h, edge, :ipr)
pl = get_projector!(get_prop(potts_h, :pool_of_projectors), ipl, :CPU)
pr = get_projector!(get_prop(potts_h, :pool_of_projectors), ipr, :CPU)
edge_energy = en[pl, pr]
total_en += edge_energy[state[i], state[j]]
end
total_en
end
@testset "Decoding solution gives spins configuration with corresponding energies" begin
ig, potts_h = create_larger_example_potts_hamiltonian()
# Corresponding bases sizes for each cluster are 16, 4, 4, 2.
all_states = [[i, j, k, l] for i ∈ 1:16 for j ∈ 1:4 for k ∈ 1:4 for l ∈ 1:2]
for state ∈ all_states
d = decode_potts_hamiltonian_state(potts_h, state)
spins = zeros(length(d))
for (k, v) ∈ d
spins[k] = v
end
σ = [Int.(spins)]
@test potts_hamiltonian_energy(potts_h, state) ≈ energy(σ, ig)[]
end
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 822 | using SpinGlassNetworks
using SpinGlassTensors
using LabelledGraphs
using Graphs
using MetaGraphs
using Logging
using Test
using CUDA
function _energy(config::Dict, couplings::Dict, cedges::Dict, n::Int)
eng = zeros(1, n)
for (i, j) ∈ keys(cedges)
for (k, l) ∈ values(cedges[i, j])
for m ∈ 1:length(config[k])
s = config[k][m]
r = config[l][m]
J = couplings[k, l]
if k == l
eng[m] += dot(s, J)
else
eng[m] += dot(s, J, r)
end
end
end
end
eng
end
my_tests = [
"ising.jl",
"potts_hamiltonian.jl",
"bp_1site.jl",
"bp_2site.jl",
"utils.jl",
"spectrum.jl",
]
for my_test ∈ my_tests
include(my_test)
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 1311 | using CSV
using LinearAlgebra
using LabelledGraphs
@testset "Spectrum and related properties of the Ising model are correct" begin
L = 4
N = L^2
instance = "$(@__DIR__)/instances/$(N)_001.txt"
ig = ising_graph(instance)
@testset "Naive brute force for +/-1" begin
k = 2^N
sp = brute_force(ig, num_states = k)
β = rand(Float64)
ρ = gibbs_tensor(ig, β)
r = exp.(-β .* sp.energies)
R = r ./ sum(r)
@test size(ρ) == Tuple(fill(2, N))
@test sum(R) ≈ sum(ρ) ≈ 1
@test sp.energies ≈ energy(sp.states, ig)
@test [ρ[idx.(σ)...] for σ ∈ sp.states] ≈ R
for (i, state) in enumerate(sp.states)
state_dict = Dict(i => s for (i, s) ∈ enumerate(state))
energy(ig, state_dict) ≈ sp.energies[i]
end
end
@testset "Naive brute force for general spins" begin
L = 4
ig = ising_graph("$(@__DIR__)/instances/$(L)_001.txt")
set_prop!(ig, :rank, [3, 2, 5, 4])
rank = get_prop(ig, :rank)
all = prod(rank)
sp = full_spectrum(ig, num_states = all)
β = rand(Float64)
ρ = exp.(-β .* sp.energies)
ϱ = ρ ./ sum(ρ)
ϱ̃ = gibbs_tensor(ig, β)
@test [ϱ̃[idx.(σ)...] for σ ∈ sp.states] ≈ ϱ
end
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | code | 805 | instance_dir = "$(@__DIR__)/instances/pegasus/"
instances = ["P2"] #, "P4", "P8", "P16"]
@testset verbose = true "Renumerated instances generate correct Potts Hamiltonian" begin
size = [2, 4, 8, 16]
@testset "$instance" for (i, instance) ∈ enumerate(instances)
instance = instance * ".txt"
s = size[i] - 1
m, n, t = s, s, 24
max_cl_states = 2
ig = ising_graph(joinpath(instance_dir, instance))
potts_h = potts_hamiltonian(
ig,
max_cl_states,
spectrum = brute_force,
cluster_assignment_rule = super_square_lattice((m, n, t)),
)
@test nv(potts_h) == s^2
if s > 1
@test all(has_edge(potts_h, (l, k), (l + 1, k - 1)) for l ∈ 1:s-1, k ∈ 2:s)
end
end
end
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 466 | [](https://coveralls.io/github/iitis/SpinGlassNetworks.jl?branch=master)
# SpinGlassNetworks.jl
Part of SpinGlassPEPS package. It constitutes the basis for the preparation of tensors and operations on them.
We don't expect the user to interact with this package, as it is more of a "back-end" type. Nevertheless, we provide API references should the need arise. | SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 816 | # Library
```@meta
CurrentModule = SpinGlassNetworks
```
## Ising Graphs
```@docs
inter_cluster_edges
prune
couplings
```
## Clustered Hamiltonian
```@docs
split_into_clusters
decode_potts_hamiltonian_state
rank_reveal
energy
energy_2site
cluster_size
bond_energy
exact_cond_prob
truncate_potts_hamiltonian
```
## Belief propagation
```@docs
local_energy
interaction_energy
get_neighbors
MergedEnergy
update_message
merge_vertices_potts_h
projector
SparseCSC
```
## Projectors
```@docs
PoolOfProjectors
get_projector!
add_projector!
empty!
```
## Spectrum
```@docs
Spectrum
matrix_to_integers
gibbs_tensor
brute_force
```
## Truncate
```@docs
truncate_potts_hamiltonian_1site_BP
truncate_potts_hamiltonian_2site_energy
select_numstate_best
```
## Auxiliary Functions
```@docs
zephyr_to_linear
load_openGM
``` | SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 491 | ## Belief propagation
Local dimensional reduction can be achieved by selectively choosing states in the Potts Hamiltonian that have the lowest local energy in the cluster. This approach aims to reduce the dimensionality of the problem by focusing on the most relevant and energetically favorable states. It can be done by truncation based on energy or truncation based on Loopy Belief Propagation.
```@docs
potts_hamiltonian_2site
belief_propagation
truncate_potts_hamiltonian_2site_BP
``` | SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 980 | # Introduction
A Potts Hamiltonian is a graphical representation that allows for a convenient and intuitive way to describe the structure of a network.
The concept of a Potts Hamiltonian within `SpinGlassNetworks.jl` introduces a mechanism for organizing spins into desired geometries, facilitating a structured approach to modeling complex spin systems. Analogous to a standard factor graph, the Potts Hamiltonian involves nodes that represent tensors within the underlying network. The edges connecting these nodes in the Potts Hamiltonian correspond to the indices shared between the respective tensors in the tensor network.
```@docs
potts_hamiltonian
```
## Simple example
```@example
using SpinGlassNetworks
# Prepare simple instance
instance = "$(@__DIR__)/../../src/instances/square_diagonal/5x5/diagonal.txt"
ig = ising_graph(instance)
# Create Potts Hamiltonian
potts_h = potts_hamiltonian(
ig,
cluster_assignment_rule = super_square_lattice((5,5,4))
)
``` | SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 235 | # SpinGlassNetworks
A [Julia](http://julialang.org) package for building and interacting with Ising spin glass models in context of tensor networks. Part of [SpinGlassPEPS](https://github.com/euro-hpc-pl/SpinGlassPEPS.jl) package.
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 1323 | # Ising model
The Ising model is a mathematical model used to describe the behavior of interacting particles, such as atoms or molecules, in a magnetic field. In the Ising model, each particle is represented as a binary variable $s_i$ that can take on the values of either +1 or -1. The total energy of the system is given by the Hamiltonian:
$H = \sum_{(i,j) \in \mathcal{E}} J_{ij} s_i s_j + \sum_{i} h_i s_i$
where $J_{ij}$ is the coupling constant between particles $i$ and $j$, $h_i$ is the external magnetic field at particle $i$, and the sum is taken over all pairs of particles and all particles in the system $\mathcal{E}$, respectively.
In `SpinGlassPEPS.jl` package, an Ising graph can be created using the command `ising_graph`.
```@docs
ising_graph
```
## Simple example
In this simple example below we show how to create Ising graph of a instance given as txt file in a format (i, j, Jij). The resulting graph has vertices corresponding to positions of spins in the system and edges defining coupling strength between spins. Each vertex contains information about local field.
```@example
using SpinGlassNetworks
# Create Ising instance
instance = "$(@__DIR__)/../../src/instances/square_diagonal/5x5/diagonal.txt"
ig = ising_graph(instance)
# View graph properties
@show biases(ig), couplings(ig)
``` | SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 3916 | # Lattice geometries
The Ising graph allowed for loading instances directly from a file and translating them into a graph. The next step towards constructing the tensor network is to build a lattice, based on which we will transform the Ising graph into a Potts Hamiltonian.
Within the `SpinGlassNetworks.jl` package, users have the flexibility to construct three types of lattice geometries, each tailored to specific needs.
## Super square lattice
The `super_square_lattice` geometry represents a square lattice with nearest neighbors interactions (horizontal and vertical interactions between unit cells) and next nearest neighbor interactions (diagonal interactions). Unit cells depicted on the schematic picture below as red ellipses can consist of multiple spins.
This geometry allows for an exploration of spin interactions beyond the traditional square lattice framework.
```@raw html
<img src="../images/sd.png" width="200%" class="center"/>
```
In `SpinGlassPEPS.jl` solver, a grid of this type can be loaded using the command `super_square_lattice`.
```@docs
super_square_lattice
```
Below you find simple example of usage `super_square_latttice` function.
```@example
using SpinGlassEngine, SpinGlassNetworks, LabelledGraphs
instance = "$(@__DIR__)/../../src/instances/square_diagonal/5x5/diagonal.txt"
ig = ising_graph(instance)
m = 5
n = 5
t = 4
potts_h = potts_hamiltonian(
ig,
cluster_assignment_rule = super_square_lattice((m, n, t))
)
println("Number of nodes in oryginal instance: ", length(LabelledGraphs.vertices(ig)), "\n", " Number of nodes in Potts Hamiltonian: ", length(LabelledGraphs.vertices(potts_h)))
```
## Pegasus graphs
The Pegasus graph is a type of graph architecture used in quantum computing systems, particularly in the quantum annealing machines developed by D-Wave Systems. It is designed to provide a grid of qubits with specific connectivity patterns optimized for solving certain optimization problems. Futer details can be found [here](https://docs.dwavesys.com/docs/latest/c_gs_4.html#pegasus-graph).
```@raw html
<img src="../images/peg.png" width="200%" class="center"/>
```
In `SpinGlassPEPS.jl` solver, a grid of this type can be loaded using the command `pegasus_lattice`.
```@docs
pegasus_lattice
```
Below you find simple example of usage `pegasus_latttice` function.
```@example
using SpinGlassEngine, SpinGlassNetworks, LabelledGraphs
# load Chimera instance and create Ising graph
instance = "$(@__DIR__)/../../src/instances/pegasus_random/P4/RAU/001_sg.txt"
ig = ising_graph(instance)
# Loaded instance is pegasus graph
m = 3
n = 3
t = 3
potts_h = potts_hamiltonian(
ig,
cluster_assignment_rule = pegasus_lattice((m, n, t))
)
println("Number of nodes in original instance: ", length(LabelledGraphs.vertices(ig)), "\n", " Number of nodes in Potts Hamiltonian: ", length(LabelledGraphs.vertices(potts_h))/2)
```
## Zephyr graphs
The Zephyr graph is a type of graph architecture used in quantum computing systems, particularly in the quantum annealing machines developed by D-Wave Systems. Futer details can be found [here](https://docs.dwavesys.com/docs/latest/c_gs_4.html#zephyr-graph).
```@raw html
<img src="../images/zep.png" width="200%" class="center"/>
```
In `SpinGlassPEPS.jl` solver, a grid of this type can be loaded using the command `zephyr_lattice`.
```@docs
zephyr_lattice
```
Below you find simple example of usage `zephyr_latttice` function.
```@example
using SpinGlassEngine, SpinGlassNetworks, LabelledGraphs
# load Chimera instance and create Ising graph
instance = "$(@__DIR__)/../../src/instances/zephyr_random/Z3/RAU/001_sg.txt"
ig = ising_graph(instance)
# Loaded instance is zephyr graph
m = 6
n = 6
t = 4
potts_h = potts_hamiltonian(
ig,
cluster_assignment_rule = zephyr_lattice((m, n, t))
)
println("Number of nodes in oryginal instance: ", length(LabelledGraphs.vertices(ig)))
``` | SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"Apache-2.0"
] | 1.2.0 | d7eb94c71ddcf3ff4e96c2ef15c6db2c81fe60a4 | docs | 1018 | # Introduction
A [Julia](http://julialang.org) package for building and interacting with Ising spin glass models in context of tensor networks. Part of [SpinGlassPEPS](https://github.com/euro-hpc-pl/SpinGlassPEPS.jl) package.
The contents of our package are illustrated through comprehensive examples, showcasing the practical utilization of key functions. Specifically, the `ising_graph` function is highlighted, demonstrating its capacity to generate Ising model graphs — a fundamental step in modeling spin systems. Additionally, the `potts_hamiltonian` function is presented as a tool for converting Ising graphs into Potts Hamiltonians. The package delves into various lattice geometries, providing insights into constructing diverse structures such as the `super_square_lattice`, `pegasus_lattice`, and `zephyr_lattice`. Moreover, the documentation outlines methods for local dimensional reduction, shedding light on techniques to streamline computations and enhance the efficiency of spin system simulations.
| SpinGlassNetworks | https://github.com/euro-hpc-pl/SpinGlassNetworks.jl.git |
|
[
"MIT"
] | 1.0.0 | f9458a3b5fa9cc7aef3e6f105695db69964c4b72 | code | 4252 | module Vision
using HTTP
using JSON
using URIs
export makeRequestBody
export visionFeature
export getResponse
export parseFeatures
"""
makeRequestBody(image, features)
Create the request body for the Google Vision API.
# Arguments
- `image` base64 encoded image of type `String` or URI of type `URI`
- `features::Array{Dict{String, Any}}` list of features to request from the API
# Returns
- `Dict{String, Any}` request body
"""
function makeRequestBody end
function makeRequestBody(imageString::String, features)
Dict("requests" =>
[
Dict(
"image" => Dict("content" => imageString),
"features" => features
)
]
) |> JSON.json
end
function makeRequestBody(imageURI::URI, features)
Dict("requests" =>
[
Dict(
"image" => Dict("source" => Dict("imageUri" => URIs.uristring(imageURI))),
"features" => features
)
]
) |> JSON.json
end
"""
makeRequest(featureType::String, maxResults::Int=10)
Create a dictionary containing the feature type and max results.
# Arguments
- `featureType::String` type of feature to request from the API
- `maxResults::Int=10` maximum number of results to return
"""
function visionFeature(featureType::String, maxResults::Int=10)
if featureType ∉ [
"TEXT_DETECTION",
"DOCUMENT_TEXT_DETECTION",
"LABEL_DETECTION",
"FACE_DETECTION",
"LANDMARK_DETECTION",
"LOGO_DETECTION",
"SAFE_SEARCH_DETECTION",
"IMAGE_PROPERTIES",
"CROP_HINTS",
"WEB_DETECTION",
"OBJECT_LOCALIZATION"
]
throw(ArgumentError("Invalid feature type, see https://cloud.google.com/vision/docs/features-list for valid feature types"))
else
Dict("type" => featureType, "maxResults" => maxResults)
end
end
"""
getResponse(requestBody, URL, headers)
Make a request to the Google Vision API and return as a dictionary.
# Arguments
- `requestBody` JSON request body
- `URL` URL of the API, defaults to `https://vision.googleapis.com/v1/images:annotate`, this can be changed to use a different API or if it isn't possible to load environment variables.
- `headers` headers for the request, defaults to []
# Returns
- `Dict{String, Any}` response from the API
"""
function getResponse(requestBody::String,
URL::String="https://vision.googleapis.com/v1/images:annotate?key=$(ENV["JULIA_VISION_API_KEY"])",
headers = [])
# Send request to Google Vision API
# requestBody: request body
# return: response body
response = HTTP.post(URL, headers, requestBody)
return JSON.parse(String(response.body))
end
"""
parseFeatures(responseBody)
Parse the response body from the Google Vision API, returns the dictionary if the method hasn't been implemented. Otherwise return formatted output.
# Arguments
- `responseBody` response body from the API
# Returns
- Dictionary containing raw features or formatted output
"""
function parseFeatures(responseBody)
function getBBox(boundingPoly)
vertices = boundingPoly["vertices"]
map(x -> Tuple(values(x)), vertices)
end
responses = responseBody["responses"][1]
parsedResponse = Dict()
for (key, value) in responses
if key == "textAnnotations"
annotationsDict = Dict("combined" => Dict(), "annotations" => [])
# Get combined text since this is different from the other annotations
annotationsDict["combined"]["locale"] = value[1]["locale"]
annotationsDict["combined"]["text"] = value[1]["description"]
annotationsDict["combined"]["boundingPoly"] = getBBox(value[1]["boundingPoly"])
for annotation in value[2:end]
push!(annotationsDict["annotations"], Dict(
"text" => annotation["description"],
"boundingPoly" => getBBox(annotation["boundingPoly"])
))
end
parsedResponse[key] = annotationsDict
else
@warn "Method not implemented for $key, returning raw response"
parsedResponse[key] = value
end
end
parsedResponse
end
end | Vision | https://github.com/joshniemela/Vision.jl.git |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.