licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1881 | # Julia/Python Performance Test Result
## Summary
Julia is ~20.7x faster than Python/Pandas
## Test File
Iterations: 100
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
productsales.sas7bdat|148.5 kB|1440|10|5|5
## Python
```
$ python -V
Python 3.6.3 :: Anaconda, Inc.
$ python perf_test1.py data_pandas/productsales.sas7bdat 100
Minimum: 0.0280 seconds
Median: 0.0318 seconds
Mean: 0.0328 seconds
Maximum: 0.0491 seconds
```
## Julia (ObjectPool)
```
Julia Version 0.6.2
Commit d386e40c17 (2017-12-13 18:08 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin14.5.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
BLAS: libopenblas (USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell)
LAPACK: libopenblas64_
LIBM: libopenlibm
LLVM: libLLVM-3.9.1 (ORCJIT, haswell)
BenchmarkTools.Trial:
memory estimate: 1.07 MiB
allocs estimate: 18468
--------------
minimum time: 2.022 ms (0.00% GC)
median time: 2.213 ms (0.00% GC)
mean time: 2.383 ms (4.38% GC)
maximum time: 5.835 ms (51.17% GC)
--------------
samples: 100
evals/sample: 1
```
## Julia (Regular String Array)
```
Julia Version 0.6.2
Commit d386e40c17 (2017-12-13 18:08 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin14.5.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
BLAS: libopenblas (USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell)
LAPACK: libopenblas64_
LIBM: libopenlibm
LLVM: libLLVM-3.9.1 (ORCJIT, haswell)
BenchmarkTools.Trial:
memory estimate: 1.06 MiB
allocs estimate: 18646
--------------
minimum time: 1.352 ms (0.00% GC)
median time: 1.441 ms (0.00% GC)
mean time: 1.896 ms (7.26% GC)
maximum time: 7.269 ms (0.00% GC)
--------------
samples: 100
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1874 | # Julia/Python Performance Test Result
## Summary
Julia is ~96.3x faster than Python/Pandas
## Test File
Iterations: 100
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
test1.sas7bdat|131.1 kB|10|100|75|25
## Python
```
$ python -V
Python 3.6.3 :: Anaconda, Inc.
$ python perf_test1.py data_pandas/test1.sas7bdat 100
Minimum: 0.0827 seconds
Median: 0.0869 seconds
Mean: 0.0882 seconds
Maximum: 0.1081 seconds
```
## Julia (ObjectPool)
```
Julia Version 0.6.2
Commit d386e40c17 (2017-12-13 18:08 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin14.5.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
BLAS: libopenblas (USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell)
LAPACK: libopenblas64_
LIBM: libopenlibm
LLVM: libLLVM-3.9.1 (ORCJIT, haswell)
BenchmarkTools.Trial:
memory estimate: 920.72 KiB
allocs estimate: 7946
--------------
minimum time: 858.533 μs (0.00% GC)
median time: 914.394 μs (0.00% GC)
mean time: 1.063 ms (8.94% GC)
maximum time: 3.900 ms (60.65% GC)
--------------
samples: 100
evals/sample: 1
```
## Julia (Regular String Array)
```
Julia Version 0.6.2
Commit d386e40c17 (2017-12-13 18:08 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin14.5.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
BLAS: libopenblas (USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell)
LAPACK: libopenblas64_
LIBM: libopenlibm
LLVM: libLLVM-3.9.1 (ORCJIT, haswell)
BenchmarkTools.Trial:
memory estimate: 1.04 MiB
allocs estimate: 10880
--------------
minimum time: 1.569 ms (0.00% GC)
median time: 1.714 ms (0.00% GC)
mean time: 1.941 ms (5.08% GC)
maximum time: 4.866 ms (54.98% GC)
--------------
samples: 100
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1880 | # Julia/Python Performance Test Result
## Summary
Julia is ~10.7x faster than Python/Pandas
## Test File
Iterations: 30
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
topical.sas7bdat|13.6 MB|84355|114|8|106
## Python
```
$ python -V
Python 3.6.3 :: Anaconda, Inc.
$ python perf_test1.py data_AHS2013/topical.sas7bdat 30
Minimum: 18.1673 seconds
Median: 20.0589 seconds
Mean: 20.0653 seconds
Maximum: 23.6490 seconds
```
## Julia (ObjectPool)
```
Julia Version 0.6.2
Commit d386e40c17 (2017-12-13 18:08 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin14.5.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
BLAS: libopenblas (USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell)
LAPACK: libopenblas64_
LIBM: libopenlibm
LLVM: libLLVM-3.9.1 (ORCJIT, haswell)
BenchmarkTools.Trial:
memory estimate: 632.36 MiB
allocs estimate: 18653372
--------------
minimum time: 2.183 s (9.05% GC)
median time: 2.306 s (10.79% GC)
mean time: 2.282 s (10.57% GC)
maximum time: 2.357 s (11.76% GC)
--------------
samples: 3
evals/sample: 1
```
## Julia (Regular String Array)
```
Julia Version 0.6.2
Commit d386e40c17 (2017-12-13 18:08 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin14.5.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
BLAS: libopenblas (USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell)
LAPACK: libopenblas64_
LIBM: libopenlibm
LLVM: libLLVM-3.9.1 (ORCJIT, haswell)
BenchmarkTools.Trial:
memory estimate: 596.44 MiB
allocs estimate: 18612061
--------------
minimum time: 1.699 s (0.00% GC)
median time: 2.076 s (48.51% GC)
mean time: 2.071 s (37.46% GC)
maximum time: 2.440 s (54.15% GC)
--------------
samples: 3
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1917 | # Performance Comparison (0.7.0-beta vs 0.6.3)
## Small Data Set
Reading data_pandas/productsales.sas7bdat (140K) is 12% faster in v0.7.
v0.7
```
julia> @benchmark readsas("data_pandas/productsales.sas7bdat", verbose_level = 0)
BenchmarkTools.Trial:
memory estimate: 1.01 MiB
allocs estimate: 14727
--------------
minimum time: 1.748 ms (0.00% GC)
median time: 1.843 ms (0.00% GC)
mean time: 2.027 ms (5.61% GC)
maximum time: 58.967 ms (96.56% GC)
--------------
samples: 2458
evals/sample: 1
```
v0.6.3
```
julia> @benchmark readsas("data_pandas/productsales.sas7bdat", verbose_level = 0)
BenchmarkTools.Trial:
memory estimate: 1.07 MiB
allocs estimate: 18505
--------------
minimum time: 1.987 ms (0.00% GC)
median time: 2.150 ms (0.00% GC)
mean time: 2.367 ms (5.56% GC)
maximum time: 10.130 ms (70.77% GC)
--------------
samples: 2108
evals/sample: 1
```
## Larger Data Set
Reading data_AHS2013/topical.sas7bdat (14 MB) is 18% faster in v0.7.
v0.7
```
julia> @benchmark readsas("data_AHS2013/topical.sas7bdat", verbose_level = 0) seconds=60
BenchmarkTools.Trial:
memory estimate: 649.63 MiB
allocs estimate: 19011924
--------------
minimum time: 1.959 s (10.46% GC)
median time: 2.042 s (12.78% GC)
mean time: 2.061 s (12.59% GC)
maximum time: 2.348 s (12.17% GC)
--------------
samples: 30
evals/sample: 1
```
v0.6.3
```
julia> @benchmark readsas("data_AHS2013/topical.sas7bdat", verbose_level = 0) seconds=60
BenchmarkTools.Trial:
memory estimate: 632.36 MiB
allocs estimate: 18653427
--------------
minimum time: 2.391 s (10.82% GC)
median time: 2.520 s (13.07% GC)
mean time: 2.524 s (12.84% GC)
maximum time: 2.638 s (12.87% GC)
--------------
samples: 24
evals/sample: 1
``` | SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1676 | # Julia/Python Performance Test Result
## Summary
Julia is ~27.9x faster than Python/Pandas
## Test File
Iterations: 50
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
homimp.sas7bdat|1.2 MB|46641|6|1|5
## Python
```
$ python -V
Python 3.7.1
$ python perf_test1.py data_AHS2013/homimp.sas7bdat 50
Minimum: 0.5793 seconds
```
## Julia (ObjectPool)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 20.20 MiB
allocs estimate: 494963
--------------
minimum time: 39.500 ms (0.00% GC)
median time: 44.556 ms (0.00% GC)
mean time: 44.054 ms (4.70% GC)
maximum time: 63.587 ms (7.46% GC)
--------------
samples: 50
evals/sample: 1
```
## Julia (Regular String Array)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 18.02 MiB
allocs estimate: 428420
--------------
minimum time: 20.776 ms (0.00% GC)
median time: 25.170 ms (0.00% GC)
mean time: 29.005 ms (18.45% GC)
maximum time: 109.289 ms (73.77% GC)
--------------
samples: 50
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1051 | # Julia/Python Performance Test Result
## Summary
Julia is ~10.2x faster than Python/Pandas
## Test File
Iterations: 100
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
numeric_1000000_2.sas7bdat|16.3 MB|1000000|2|2|0
## Python
```
$ python -V
Python 3.7.1
$ python perf_test1.py data_misc/numeric_1000000_2.sas7bdat 100
Minimum: 1.8784 seconds
```
## Julia
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 168.83 MiB
allocs estimate: 1004863
--------------
minimum time: 183.319 ms (6.02% GC)
median time: 208.804 ms (14.80% GC)
mean time: 235.003 ms (25.50% GC)
maximum time: 383.528 ms (54.19% GC)
--------------
samples: 22
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1680 | # Julia/Python Performance Test Result
## Summary
Julia is ~46.9x faster than Python/Pandas
## Test File
Iterations: 100
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
productsales.sas7bdat|148.5 kB|1440|10|4|6
## Python
```
$ python -V
Python 3.7.1
$ python perf_test1.py data_pandas/productsales.sas7bdat 100
Minimum: 0.0505 seconds
```
## Julia (ObjectPool)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 1.17 MiB
allocs estimate: 14693
--------------
minimum time: 1.745 ms (0.00% GC)
median time: 2.431 ms (0.00% GC)
mean time: 2.679 ms (2.39% GC)
maximum time: 5.482 ms (60.67% GC)
--------------
samples: 100
evals/sample: 1
```
## Julia (Regular String Array)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 1.15 MiB
allocs estimate: 14638
--------------
minimum time: 1.078 ms (0.00% GC)
median time: 3.277 ms (0.00% GC)
mean time: 6.618 ms (3.48% GC)
maximum time: 83.970 ms (0.00% GC)
--------------
samples: 100
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1671 | # Julia/Python Performance Test Result
## Summary
Julia is ~118.8x faster than Python/Pandas
## Test File
Iterations: 100
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
test1.sas7bdat|131.1 kB|10|100|73|27
## Python
```
$ python -V
Python 3.7.1
$ python perf_test1.py data_pandas/test1.sas7bdat 100
Minimum: 0.1036 seconds
```
## Julia (ObjectPool)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 1.00 MiB
allocs estimate: 7132
--------------
minimum time: 871.807 μs (0.00% GC)
median time: 1.254 ms (0.00% GC)
mean time: 1.470 ms (6.75% GC)
maximum time: 6.470 ms (78.01% GC)
--------------
samples: 100
evals/sample: 1
```
## Julia (Regular String Array)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 990.86 KiB
allocs estimate: 6819
--------------
minimum time: 1.119 ms (0.00% GC)
median time: 2.666 ms (0.00% GC)
mean time: 9.009 ms (6.71% GC)
maximum time: 161.985 ms (0.00% GC)
--------------
samples: 100
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 1676 | # Julia/Python Performance Test Result
## Summary
Julia is ~27.3x faster than Python/Pandas
## Test File
Iterations: 30
Filename|Size|Rows|Columns|Numeric Columns|String Columns
--------|----|----|-------|---------------|--------------
topical.sas7bdat|13.6 MB|84355|114|8|106
## Python
```
$ python -V
Python 3.7.1
$ python perf_test1.py data_AHS2013/topical.sas7bdat 30
Minimum: 46.9720 seconds
```
## Julia (ObjectPool)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 685.66 MiB
allocs estimate: 19193161
--------------
minimum time: 1.720 s (6.37% GC)
median time: 1.806 s (11.83% GC)
mean time: 1.796 s (10.69% GC)
maximum time: 1.863 s (13.57% GC)
--------------
samples: 3
evals/sample: 1
```
## Julia (Regular String Array)
```
Julia Version 1.3.0
Commit 46ce4d7933 (2019-11-26 06:09 UTC)
Platform Info:
OS: macOS (x86_64-apple-darwin19.0.0)
CPU: Intel(R) Core(TM) i5-4258U CPU @ 2.40GHz
WORD_SIZE: 64
LIBM: libopenlibm
LLVM: libLLVM-6.0.1 (ORCJIT, haswell)
Environment:
JULIA_NUM_THREADS = 4
BenchmarkTools.Trial:
memory estimate: 648.04 MiB
allocs estimate: 19048983
--------------
minimum time: 1.994 s (46.01% GC)
median time: 2.559 s (51.16% GC)
mean time: 2.559 s (51.16% GC)
maximum time: 3.123 s (54.45% GC)
--------------
samples: 2
evals/sample: 1
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 1.3.1 | c0f87a4a459e6ac5eb4fa8eea7b1357a4b5fc10b | docs | 3311 | # SASLib vs ReadStat test
Key | Description |
--------|-------------------------|
F64 | number of Float64 columns|
STR | number of String columns|
DT | number of date/time coumns|
COMP | compression method|
S/R | SASLib time divided by ReadStat time|
SA/R | SASLib time (regular string arrays) divided by ReadStat time|
SASLibA | SASLib (regular string arrays)|
```
Filename : ReadStat SASLib S/R SASLibA SA/R F64 STR DT COMP
data_misc/numeric_1000000_2.sas7bdat : 367.403 ms 164.249 ms ( 45%) 165.407 ms ( 45%) 2 0 0 none
data_misc/types.sas7bdat : 0.067 ms 0.132 ms (196%) 0.132 ms (196%) 5 1 0 none
data_AHS2013/homimp.sas7bdat : 54.358 ms 39.673 ms ( 73%) 21.815 ms ( 40%) 1 5 0 none
data_AHS2013/omov.sas7bdat : 3.644 ms 6.631 ms (182%) 5.451 ms (150%) 3 5 0 none
data_AHS2013/owner.sas7bdat : 18.117 ms 13.985 ms ( 77%) 8.112 ms ( 45%) 0 3 0 none
data_AHS2013/ratiov.sas7bdat : 6.723 ms 6.038 ms ( 90%) 3.197 ms ( 48%) 0 9 0 none
data_AHS2013/rmov.sas7bdat : 72.551 ms 90.487 ms (125%) 63.868 ms ( 88%) 2 21 0 none
data_AHS2013/topical.sas7bdat : 3394.267 ms 1755.026 ms ( 52%) 1153.360 ms ( 34%) 8 106 0 none
data_pandas/airline.sas7bdat : 0.093 ms 0.114 ms (122%) 0.117 ms (125%) 6 0 0 none
data_pandas/datetime.sas7bdat : 0.061 ms 0.133 ms (219%) 0.132 ms (217%) 1 1 2 none
data_pandas/productsales.sas7bdat : 2.812 ms 1.726 ms ( 61%) 1.075 ms ( 38%) 4 5 1 none
data_pandas/test1.sas7bdat : 0.606 ms 0.900 ms (148%) 0.836 ms (138%) 73 25 2 none
data_pandas/test2.sas7bdat : 0.624 ms 0.693 ms (111%) 0.690 ms (111%) 73 25 2 RLE
data_pandas/test4.sas7bdat : 0.607 ms 0.885 ms (146%) 0.849 ms (140%) 73 25 2 none
data_pandas/test5.sas7bdat : 0.625 ms 0.721 ms (115%) 0.693 ms (111%) 73 25 2 RLE
data_pandas/test7.sas7bdat : 0.606 ms 0.912 ms (151%) 0.855 ms (141%) 73 25 2 none
data_pandas/test9.sas7bdat : 0.622 ms 0.701 ms (113%) 0.705 ms (113%) 73 25 2 RLE
data_pandas/test10.sas7bdat : 0.606 ms 0.955 ms (158%) 0.844 ms (139%) 73 25 2 none
data_pandas/test12.sas7bdat : 0.625 ms 0.702 ms (112%) 0.683 ms (109%) 73 25 2 RLE
data_pandas/test13.sas7bdat : 0.606 ms 0.924 ms (152%) 0.860 ms (142%) 73 25 2 none
data_pandas/test15.sas7bdat : 0.623 ms 0.725 ms (116%) 0.698 ms (112%) 73 25 2 RLE
data_pandas/test16.sas7bdat : 0.614 ms 1.572 ms (256%) 1.626 ms (265%) 73 25 2 none
data_reikoch/barrows.sas7bdat : 11.242 ms 6.438 ms ( 57%) 6.513 ms ( 58%) 72 0 0 RLE
data_reikoch/extr.sas7bdat : 0.077 ms 0.310 ms (400%) 0.303 ms (391%) 0 1 0 none
data_reikoch/ietest2.sas7bdat : 0.048 ms 0.106 ms (221%) 0.106 ms (221%) 0 1 0 RLE
```
| SASLib | https://github.com/tk3369/SASLib.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 1232 | using BenchmarkTools
using ItemResponseFunctions
const SUITE = BenchmarkGroup()
models = [OnePL, TwoPL, ThreePL, FourPL, FivePL, PCM, GPCM, RSM, GRSM]
function make_pars(; nthresholds = 3)
a = rand() * 2
b = randn()
c = rand() * 0.5
d = rand() * 0.5 + 0.5
e = 1 + rand() * 0.5
t = randn(nthresholds)
return (; a, b, c, d, e, t)
end
for model in models
m = string(model)
SUITE[m] = BenchmarkGroup()
SUITE[m]["irf"] = @benchmarkable(
irf($model, theta, beta, $1),
evals = 10,
samples = 1000,
setup = (theta = randn(); beta = make_pars())
)
SUITE[m]["iif"] = @benchmarkable(
iif($model, theta, beta, $1),
evals = 10,
samples = 1000,
setup = (theta = randn(); beta = make_pars())
)
SUITE[m]["expected_score"] = @benchmarkable(
expected_score($model, theta, betas),
evals = 10,
samples = 1000,
setup = (theta = randn(); betas = [make_pars() for _ in 1:20])
)
SUITE[m]["information"] = @benchmarkable(
information($model, theta, betas),
evals = 10,
samples = 1000,
setup = (theta = randn(); betas = [make_pars() for _ in 1:20])
)
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 859 | using ItemResponseFunctions
using Documenter
DocMeta.setdocmeta!(
ItemResponseFunctions,
:DocTestSetup,
:(using ItemResponseFunctions);
recursive = true,
)
makedocs(;
checkdocs = :exported,
modules = [ItemResponseFunctions],
authors = "Philipp Gewessler",
sitename = "ItemResponseFunctions.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://juliapsychometrics.github.io/ItemResponseFunctions.jl",
edit_link = "main",
repolink = "https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl/blob/{commit}{path}#{line}",
assets = String[],
),
pages = ["Home" => "index.md", "API" => "api.md"],
plugins = [],
)
deploydocs(;
repo = "github.com/JuliaPsychometrics/ItemResponseFunctions.jl",
devbranch = "main",
)
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 1502 | module ItemResponseFunctions
using AbstractItemResponseModels: Dichotomous, Nominal, checkresponsetype
using DocStringExtensions: SIGNATURES, TYPEDEF, METHODLIST, FIELDS
using ForwardDiff: derivative, derivative!
using LogExpFunctions: logistic, cumsum!, softmax!
using Reexport: @reexport
using SimpleUnPack: @unpack
# AbstractItemResponseModels interface extensions
@reexport import AbstractItemResponseModels:
ItemResponseModel, irf, iif, expected_score, information
import AbstractItemResponseModels: response_type
export DichotomousItemResponseModel,
FivePL,
FiveParameterLogisticModel,
FourPL,
FourParameterLogisticModel,
GPCM,
GRSM,
GeneralizedPartialCreditModel,
GeneralizedRatingScaleModel,
ItemParameters,
OnePL,
OnePLG,
OneParameterLogisticModel,
OneParameterLogisticPlusGuessingModel,
PCM,
PartialCreditModel,
PolytomousItemResponseModel,
RSM,
RatingScaleModel,
ThreePL,
ThreeParameterLogisticModel,
TwoPL,
TwoParameterLogisticModel,
irf!,
iif!,
partial_credit,
derivative_theta,
derivative_theta!,
second_derivative_theta,
second_derivative_theta!,
likelihood,
loglikelihood
include("model_types.jl")
include("item_parameters.jl")
include("utils.jl")
include("irf.jl")
include("iif.jl")
include("expected_score.jl")
include("information.jl")
include("scoring_functions.jl")
include("derivatives.jl")
include("likelihood.jl")
include("precompile.jl")
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 8646 | function second_derivative(f, x)
return derivative(x -> derivative(f, x), x)
end
"""
$(SIGNATURES)
Calculate the derivative of the item (category) response function with respect to `theta` of
model `M` given item parameters `beta` for all possible responses. This function overwrites
`probs` and `derivs` with the item category response probabilities and derivatives respectively.
"""
function derivative_theta!(
M::Type{<:ItemResponseModel},
probs,
derivs,
theta,
beta;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
return _derivative_theta!(M, probs, derivs, theta, pars; scoring_function)
end
function _derivative_theta!(
M::Type{<:ItemResponseModel},
probs,
derivs,
theta,
beta;
scoring_function::F,
) where {F}
f = (y, x) -> _irf!(M, y, x, beta; scoring_function)
derivative!(derivs, f, probs, theta)
return probs, derivs
end
# this is implemented for all except 5PL
const DichModelsWithDeriv = Union{OnePL,TwoPL,ThreePL,FourPL}
function _derivative_theta!(
M::Type{<:DichModelsWithDeriv},
probs,
derivs,
theta,
beta;
scoring_function::F,
) where {F}
probs[1], derivs[1] = _derivative_theta(M, theta, beta, 0; scoring_function)
probs[2], derivs[2] = _derivative_theta(M, theta, beta, 1; scoring_function)
return probs, derivs
end
const PolyModelsWithDeriv = Union{GPCM,PCM,GRSM,RSM}
function _derivative_theta!(
M::Type{<:PolyModelsWithDeriv},
probs,
derivs,
theta,
beta;
scoring_function::F,
) where {F}
@unpack a, b, t = beta
score = _expected_score!(M, probs, theta, beta)
_irf!(M, probs, theta, beta; scoring_function)
for c in eachindex(probs)
derivs[c] = a * probs[c] * (c - score)
end
return probs, derivs
end
"""
$(SIGNATURES)
Calculate the derivative of the item (category) response function with respect to `theta` of
model `M` given item parameters `beta` for response `y`.
Returns the primal value and the first derivative.
If `y` is omitted, returns probabilities and derivatives for all possible responses (see
also [`derivative_theta!`](@ref)).
"""
function derivative_theta(
M::Type{<:ItemResponseModel},
theta,
beta;
scoring_function::F = one,
) where {F}
ncat = M <: DichotomousItemResponseModel ? 2 : length(beta.t) + 1
probs = zeros(ncat)
derivs = similar(probs)
return derivative_theta!(M, probs, derivs, theta, beta; scoring_function)
end
function derivative_theta(
M::Type{OnePL},
theta,
beta::Real;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
return derivative_theta(M, theta, pars; scoring_function)
end
function derivative_theta(
M::Type{<:PolytomousItemResponseModel},
theta,
beta,
y;
scoring_function::F = one,
) where {F}
probs, derivs = derivative_theta(M, theta, beta; scoring_function)
return probs[y], derivs[y]
end
function derivative_theta(
M::Type{<:DichotomousItemResponseModel},
theta,
beta,
y;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
f = x -> irf(M, x, pars, y) * scoring_function(y)
prob = f(theta)
deriv = derivative(f, theta)
return prob, deriv
end
function derivative_theta(
M::Type{<:DichModelsWithDeriv},
theta,
beta,
y;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
return _derivative_theta(M, theta, pars, y; scoring_function)
end
# analytic first derivative implementations
function _derivative_theta(
M::Type{<:DichModelsWithDeriv},
theta,
beta,
y;
scoring_function::F,
) where {F}
@unpack a, c, d = beta
score = scoring_function(y)
prob = irf(M, theta, beta, y) * score
# unconstrained response probabilities
pu = irf(TwoPL, theta, beta, 1)
qu = 1 - pu
deriv = score * (d - c) * a * (pu * qu) * ifelse(y == 1, 1, -1)
return prob, deriv
end
"""
$(SIGNATURES)
Calculate the second derivative of the item (category) response function with respect to
`theta` of model `M` given item parameters `beta` for response `y`.
Returns the primal value, the first derivative, and the second derivative
If `y` is omitted, returns values and derivatives for all possible responses.
This function overwrites `probs`, `derivs` and `derivs2` with the respective values.
"""
function second_derivative_theta!(
M::Type{<:ItemResponseModel},
probs,
derivs,
derivs2,
theta,
beta;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
return _second_derivative_theta!(
M,
probs,
derivs,
derivs2,
theta,
pars;
scoring_function,
)
end
function _second_derivative_theta!(
M::Type{<:DichotomousItemResponseModel},
probs,
derivs,
derivs2,
theta,
beta;
scoring_function::F,
) where {F}
_derivative_theta!(M, probs, derivs, theta, beta; scoring_function)
f0 = x -> irf(M, x, beta, 0) * scoring_function(0)
f1 = x -> irf(M, x, beta, 1) * scoring_function(1)
derivs2[1] = second_derivative(f0, theta)
derivs2[2] = second_derivative(f1, theta)
return probs, derivs, derivs2
end
function _second_derivative_theta!(
M::Type{<:DichModelsWithDeriv},
probs,
derivs,
derivs2,
theta,
beta;
scoring_function::F,
) where {F}
probs[1], derivs[1], derivs2[1] =
_second_derivative_theta(FourPL, theta, beta, 0; scoring_function)
probs[2], derivs[2], derivs2[2] =
_second_derivative_theta(FourPL, theta, beta, 1; scoring_function)
return probs, derivs, derivs2
end
function _second_derivative_theta!(
M::Type{<:PolyModelsWithDeriv},
probs,
derivs,
derivs2,
theta,
beta;
scoring_function::F,
) where {F}
@unpack a, b, t = beta
score = _expected_score!(M, probs, theta, beta)
score2 = sum(c^2 * probs[c] for c in eachindex(probs))
_irf!(M, probs, theta, beta; scoring_function)
for c in eachindex(probs)
derivs[c] = a * probs[c] * (c - score)
derivs2[c] = a^2 * probs[c] * (c^2 - 2 * c * score + 2 * score^2 - score2)
end
return probs, derivs, derivs2
end
"""
$(SIGNATURES)
Calculate the second derivative of the item (category) response function with respect to
`theta` of model `M` given item parameters `beta` for response `y`.
Returns the primal value, the first derivative and the second derivative.
If `y` is omitted, returns primals and derivatives for all possible responses (see also
[`second_derivative_theta!`](@ref)).
"""
function second_derivative_theta(
M::Type{<:ItemResponseModel},
theta,
beta;
scoring_function::F = one,
) where {F}
ncat = M <: DichotomousItemResponseModel ? 2 : length(beta.t) + 1
probs = zeros(ncat)
derivs = similar(probs)
derivs2 = similar(probs)
return second_derivative_theta!(
M,
probs,
derivs,
derivs2,
theta,
beta;
scoring_function,
)
end
function second_derivative_theta(
M::Type{OnePL},
theta,
beta::Real;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
return second_derivative_theta(M, theta, pars; scoring_function)
end
function second_derivative_theta(M::Type{<:PolytomousItemResponseModel}, theta, beta, y)
probs, derivs, derivs2 = second_derivative_theta(M, theta, beta)
return probs[y], derivs[y], derivs2[y]
end
function second_derivative_theta(
M::Type{<:DichotomousItemResponseModel},
theta,
beta,
y;
scoring_function::F = one,
) where {F}
f = x -> irf(M, x, beta, y) * scoring_function(y)
prob = f(theta)
deriv = derivative(f, theta)
deriv2 = second_derivative(f, theta)
return prob, deriv, deriv2
end
function second_derivative_theta(
M::Type{<:DichModelsWithDeriv},
theta,
beta,
y;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
return _second_derivative_theta(M, theta, pars, y; scoring_function)
end
# analytic implementations of second derivatives
function _second_derivative_theta(
M::Type{<:DichModelsWithDeriv},
theta,
beta,
y;
scoring_function::F,
) where {F}
@unpack a, c, d = beta
prob, deriv = derivative_theta(M, theta, beta, y; scoring_function)
score = scoring_function(y)
pu = irf(TwoPL, theta, beta, 1) # unconstrained probability
qu = 1 - pu
deriv2 = a^2 * score * (d - c) * (2 * (qu^2 * pu) - pu * qu) * ifelse(y == 1, 1, -1)
return prob, deriv, deriv2
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 3238 | """
$(SIGNATURES)
Calculate the expected score of an item response model `M` at the ability value `theta`
given a vector of item parameters `betas`. The values of betas are considered item
parameters for different items.
Expected scores are calculated from the models [`irf`](@ref) function. For details on how
to pass item parameters to [`irf`](@ref), see the respective function documentation.
## Response scoring
The expected score is defined as the expected value of an observed response pattern `x`.
Thus, the expected value for an arbitrary function `f(x)` can also be defined.
We call the function `f` the `scoring_function` that maps responses to arbitrary values.
## Examples
### 1 Parameter Logistic Model
```jldoctest
julia> betas = fill(0.0, 10);
julia> expected_score(OnePL, 0.0, betas)
5.0
julia> expected_score(OnePL, 0.0, betas; scoring_function = x -> 2x)
10.0
```
### 2 Parameter Logistic Model
```jldoctest
julia> betas = fill((a = 1.5, b = 0.0), 5);
julia> expected_score(TwoPL, 0.0, betas)
2.5
julia> expected_score(TwoPL, 0.0, betas; scoring_function = x -> x + 1)
7.5
```
### 3 Parameter Logistic Model
```jldoctest
julia> betas = fill((a = 0.4, b = 0.5, c = 0.1), 6);
julia> expected_score(ThreePL, 0.0, betas)
3.030896414512619
```
### 4 Parameter Logistic Model
```jldoctest
julia> betas = fill((a = 1.4, b = 1.0, c = 0.15, d = 0.9), 7);
julia> expected_score(FourPL, 0.0, betas)
2.0885345850674453
```
"""
function expected_score(
M::Type{<:ItemResponseModel},
theta,
betas::AbstractVector;
scoring_function::F = identity,
) where {F}
score = zero(theta)
for beta in betas
score += expected_score(M, theta, beta; scoring_function)
end
return score
end
function expected_score(
M::Type{<:DichotomousItemResponseModel},
theta,
beta::Union{<:Real,NamedTuple,ItemParameters};
scoring_function::F = identity,
) where {F}
score = zero(theta)
for y in 0:1
score += irf(M, theta, beta, y) * scoring_function(y)
end
return score
end
# for models with non-item specific thresholds vector holding category probabilities can be
# pre-allocated
function expected_score(
M::Type{<:Union{RSM,GRSM}},
theta::T,
betas::AbstractVector;
scoring_function::F = identity,
) where {T<:Real,F}
score = zero(T)
probs = zeros(T, length(first(betas).t) + 1)
for beta in betas
pars = ItemParameters(M, beta)
score += _expected_score!(M, probs, theta, pars; scoring_function)
end
return score
end
function expected_score(
M::Type{<:PolytomousItemResponseModel},
theta::T,
beta::Union{NamedTuple,ItemParameters};
scoring_function::F = identity,
) where {T<:Real,F}
probs = zeros(T, length(beta.t) + 1)
pars = ItemParameters(M, beta)
return _expected_score!(M, probs, theta, pars; scoring_function)
end
function _expected_score!(
M::Type{<:PolytomousItemResponseModel},
probs,
theta,
beta::ItemParameters;
scoring_function::F = identity,
) where {F}
score = zero(theta)
irf!(M, probs, theta, beta)
for (category, prob) in enumerate(probs)
score += prob * scoring_function(category)
end
return score
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 3506 | """
$(SIGNATURES)
Evaluate the item information function of an item response model `M` for response `y` at
the ability value `theta` given item parameters `beta`.
If `y` is omitted, the item category information functions for all categories are returned.
## Examples
### 1 Parameter Logistic Model
```jldoctest
julia> iif(OnePL, 0.0, 0.0, 1)
0.125
julia> iif(OnePL, 0.0, (; b = 0.0))
2-element Vector{Float64}:
0.125
0.125
```
### 2 Parameter Logistic Model
```jldoctest
julia> iif(TwoPL, 0.0, (a = 1.3, b = 0.2))
2-element Vector{Float64}:
0.2345721809921237
0.1808672521393781
```
### 3 Parameter Logistic Model
```jldoctest
julia> iif(ThreePL, 0.0, (a = 1.5, b = 0.5, c = 0.15))
2-element Vector{Float64}:
0.2830301834782102
0.033256997107963204
```
### 4 Parameter Logistic Model
```jldoctest
julia> iif(FourPL, 0.0, (a = 2.1, b = -0.2, c = 0.15, d = 0.9))
2-element Vector{Float64}:
0.1936328888005068
0.3995140205278245
```
"""
function iif(M::Type{<:DichotomousItemResponseModel}, theta, beta, y)
checkresponsetype(response_type(M), y)
return _iif(M, theta, beta, y; scoring_function = one)
end
function iif(M::Type{<:DichotomousItemResponseModel}, theta, beta)
info = zeros(2)
return _iif!(M, info, theta, beta; scoring_function = one)
end
function _iif(
M::Type{<:DichotomousItemResponseModel},
theta,
beta,
y;
scoring_function::F,
) where {F}
prob, deriv, deriv2 = second_derivative_theta(M, theta, beta, y; scoring_function)
prob == 0.0 && return 0.0 # early return to avoid NaNs
info = deriv^2 / prob - deriv2
return info
end
function _iif!(
M::Type{<:DichotomousItemResponseModel},
info,
theta,
beta;
scoring_function::F,
) where {F}
info[1] = _iif(M, theta, beta, 0; scoring_function)
info[2] = _iif(M, theta, beta, 1; scoring_function)
return info
end
# polytomous models
function iif(M::Type{<:PolytomousItemResponseModel}, theta, beta, y)
pars = ItemParameters(M, beta)
return iif(M, theta, pars)[y]
end
function iif(M::Type{<:PolytomousItemResponseModel}, theta, beta)
pars = ItemParameters(M, beta)
infos = zeros(length(beta.t) + 1)
return _iif!(M, infos, theta, pars; scoring_function = one)
end
function _iif!(
M::Type{<:PolytomousItemResponseModel},
infos,
theta,
beta;
scoring_function::F,
) where {F}
@unpack a = beta
derivs = similar(infos)
derivs2 = similar(infos)
second_derivative_theta!(M, infos, derivs, derivs2, theta, beta; scoring_function)
for c in eachindex(infos)
if iszero(derivs[c])
infos[c] = 0.0
else
infos[c] = derivs[c]^2 / infos[c] - derivs2[c]
end
end
return infos
end
"""
$(SIGNATURES)
An in-place version of [`iif`](@ref).
Provides efficient computation of the item category information functions by mutating `infos`
in-place, thus avoiding allocation of an intermediate arrays and output vector.
## Examples
```jldoctest
julia> beta = (a = 0.3, b = 0.1, t = (0.2, -0.5));
julia> infos = zeros(length(beta.t) + 1);
julia> iif!(GPCM, infos, 0.0, beta)
3-element Vector{Float64}:
0.019962114838441732
0.020570051742573044
0.0171815514677598
julia> infos
3-element Vector{Float64}:
0.019962114838441732
0.020570051742573044
0.0171815514677598
```
"""
function iif!(M::Type{<:ItemResponseModel}, infos, theta, beta)
pars = ItemParameters(M, beta)
return _iif!(M, infos, theta, pars; scoring_function = one)
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 3031 | """
$(SIGNATURES)
Calculate the test information of an item response mode `M` at the ability value `theta`
given a vector of item parameters `betas`. The values of betas are considered item
parameters for different items.
The test information is calculated from the models [`iif`](@ref) function. For details on
how to pass item parameters to [`iif`](@ref), see the respective function documentation.
## Examples
### 1 Parameter Logistic Model
```jldoctest
julia> information(OnePL, 0.0, zeros(6))
1.5
julia> betas = fill((; b = 0.0), 6);
julia> information(OnePL, 0.0, betas)
1.5
```
### 2 Parameter Logistic Model
```jldoctest
julia> betas = fill((; a = 1.2, b = 0.4), 4);
julia> information(TwoPL, 0.0, betas)
1.3601401228069936
```
### 3 Parameter Logistic Model
```jldoctest
julia> betas = fill((; a = 1.5, b = 0.5, c = 0.2), 4);
julia> information(ThreePL, 0.0, betas)
1.1021806599852655
```
### 4 Parameter Logistic Model
```jldoctest
julia> betas = fill((; a = 0.5, b = 1.4, c = 0.13, d = 0.94), 6);
julia> information(FourPL, 0.0, betas)
0.20178122985757524
```
"""
function information(
M::Type{<:DichotomousItemResponseModel},
theta,
betas::AbstractVector;
scoring_function::F = one,
) where {F}
info = zero(theta)
for beta in betas
info += information(M, theta, beta; scoring_function)
end
return info
end
function information(
M::Type{<:DichotomousItemResponseModel},
theta,
beta::Union{<:Real,NamedTuple,ItemParameters};
scoring_function::F = one,
) where {F}
info = zero(theta)
pars = ItemParameters(M, beta)
for y in 0:1
info += _iif(M, theta, pars, y; scoring_function)
end
return info
end
# polytomous models
function information(
M::Type{<:PolytomousItemResponseModel},
theta,
betas::AbstractVector;
scoring_function::F = one,
) where {F}
info = zero(theta)
for beta in betas
info += information(M, theta, beta; scoring_function)
end
return info
end
# for models with non-item specific thresholds vector holding category probabilities can be
# pre-allocated
function information(
M::Type{<:Union{RSM,GRSM}},
theta::T,
betas::AbstractVector;
scoring_function::F = one,
) where {T<:Real,F}
info = zero(T)
infos = zeros(T, length(first(betas).t) + 1)
for beta in betas
info += _information!(M, infos, theta, beta; scoring_function)
end
return info
end
function information(
M::Type{<:PolytomousItemResponseModel},
theta::T,
beta::Union{NamedTuple,ItemParameters};
scoring_function::F = one,
) where {T<:Real,F}
infos = zeros(T, length(beta.t) + 1)
return _information!(M, infos, theta, beta; scoring_function)
end
function _information!(
M::Type{<:PolytomousItemResponseModel},
infos,
theta,
beta::Union{NamedTuple,ItemParameters};
scoring_function::F,
) where {F}
pars = ItemParameters(M, beta)
_iif!(M, infos, theta, pars; scoring_function)
return sum(infos)
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 4326 | """
$(SIGNATURES)
Evaluate the item response function of an item response model `M` for response `y` at the
ability value `theta` given item parameters `beta`.
If `y` is omitted, then the item response function is evaluated for all possible responses.
## Examples
### 1 Parameter Logistic Model
```jldoctest
julia> irf(OnePL, 0.0, 0.0, 1)
0.5
julia> irf(OnePL, 0.0, (; b = 0.5), 1)
0.37754066879814546
julia> irf(OnePL, 0.0, 0.5)
2-element Vector{Float64}:
0.6224593312018545
0.37754066879814546
```
### 2 Parameter Logistic Model
```jldoctest
julia> beta = (a = 1.5, b = 0.5);
julia> irf(TwoPL, 0.0, beta, 1)
0.32082130082460697
```
### 3 Parameter Logistic Model
```jldoctest
julia> beta = (a = 1.5, b = 0.5, c = 0.2);
julia> irf(ThreePL, 0.0, beta, 1)
0.4566570406596856
```
### 4 Parameter Logistic Model
```jldoctest
julia> beta = (a = 1.5, b = 0.5, c = 0.2, d = 0.8);
julia> irf(FourPL, 0.0, beta, 1)
0.3924927804947642
```
### Partial Credit Model
```jldoctest
julia> beta = (b = -0.3, t = [-0.5, 1.3, -0.2]);
julia> irf(PCM, 0.0, beta)
4-element Vector{Float64}:
0.09656592461423529
0.07906149218108449
0.3915941342939724
0.4327784489107078
julia> irf(PCM, 0.0, beta, 3)
0.3915941342939724
```
### Generalized Partial Credit Model
```jldoctest
julia> beta = (a = 1.3, b = 0.25, t = [0.0, 1.0]);
julia> irf(GPCM, 0.0, beta)
3-element Vector{Float64}:
0.27487115408319557
0.1986019275522736
0.5265269183645309
julia> irf(GPCM, 0.0, beta, 1)
0.27487115408319557
```
### Rating Scale Model
```jldoctest
julia> beta = (b = 0.0, t = zeros(2));
julia> irf(RSM, 0.0, beta)
3-element Vector{Float64}:
0.3333333333333333
0.3333333333333333
0.3333333333333333
julia> irf(RSM, 0.0, beta, 3)
0.3333333333333333
```
"""
function irf(M::Type{<:DichotomousItemResponseModel}, theta, beta, y)
checkresponsetype(response_type(M), y)
pars = ItemParameters(M, beta)
return _irf(M, theta, pars, y; scoring_function = one)
end
function irf(M::Type{<:DichotomousItemResponseModel}, theta, beta)
pars = ItemParameters(M, beta)
probs = zeros(2)
return _irf!(M, probs, theta, pars; scoring_function = one)
end
function _irf(
M::Type{<:DichotomousItemResponseModel},
theta::Real,
beta,
y;
scoring_function::F,
) where {F}
@unpack a, b, c, d, e = beta
prob = c + (d - c) * logistic(a * (theta - b))^e
res = ifelse(y == 1, prob, 1 - prob) * scoring_function(y)
return res
end
function _irf!(
M::Type{<:DichotomousItemResponseModel},
probs,
theta,
beta;
scoring_function::F,
) where {F}
probs[1] = _irf(M, theta, beta, 0; scoring_function)
probs[2] = _irf(M, theta, beta, 1; scoring_function)
return probs
end
# polytomous models
function irf(M::Type{<:PolytomousItemResponseModel}, theta, beta, y)
checkresponsetype(response_type(M), y)
return irf(M, theta, beta)[y]
end
function irf(M::Type{<:PolytomousItemResponseModel}, theta::T, beta) where {T}
pars = ItemParameters(M, beta)
@unpack t = beta
probs = zeros(T, length(t) + 1)
return _irf!(M, probs, theta, pars, scoring_function = one)
end
function _irf!(
M::Type{<:PolytomousItemResponseModel},
probs,
theta,
beta;
scoring_function::F,
) where {F}
@unpack a, b, t = beta
probs[1] = zero(eltype(probs))
@. probs[2:end] = a * (theta - b + t)
cumsum!(probs, probs)
softmax!(probs, probs)
# response scoring
for c in eachindex(probs)
probs[c] *= scoring_function(c)
end
return probs
end
"""
$(SIGNATURES)
An in-place version of [`irf`](@ref).
Provides efficient computation by mutating `probs` in-place, thus avoiding allocation of an
output vector.
## Examples
```jldoctest
julia> beta = (a = 1.2, b = 0.3, t = zeros(3));
julia> probs = zeros(length(beta.t) + 1);
julia> irf!(GPCM, probs, 0.0, beta)
4-element Vector{Float64}:
0.3961927292844976
0.2764142877832629
0.19284770477416754
0.13454527815807202
julia> probs
4-element Vector{Float64}:
0.3961927292844976
0.2764142877832629
0.19284770477416754
0.13454527815807202
```
"""
function irf!(
M::Type{<:ItemResponseModel},
probs,
theta,
beta;
scoring_function::F = one,
) where {F}
pars = ItemParameters(M, beta)
return _irf!(M, probs, theta, pars; scoring_function)
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 3879 | """
$(TYPEDEF)
A struct representing item parameters for an item response model.
## Fields
$(FIELDS)
## Examples
```jldoctest
julia> pars = ItemParameters(TwoPL, a = 1.5, b = 0.0)
ItemParameters{TwoParameterLogisticModel, 0, Float64}(1.5, 0.0, 0.0, 1.0, 1.0, ())
julia> ItemParameters(OnePL, pars)
ItemParameters{OneParameterLogisticModel, 0, Float64}(1.0, 0.0, 0.0, 1.0, 1.0, ())
```
"""
struct ItemParameters{M<:ItemResponseModel,N,T<:Real}
"the item discrimination"
a::T
"the item difficulty (location)"
b::T
"the lower asymptote"
c::T
"the upper asymptote"
d::T
"the item stiffness"
e::T
"a tuple of threshold parameters"
t::NTuple{N,T}
function ItemParameters(
model::Type{<:ItemResponseModel},
a,
b,
c,
d,
e,
t::NTuple{N,T},
) where {N,T}
a = has_discrimination(model) ? a : one(b)
c = has_lower_asymptote(model) ? c : zero(b)
d = has_upper_asymptote(model) ? d : one(d)
e = has_stiffness(model) ? e : one(b)
check_discrimination(model, a)
check_lower_asymptote(model, c)
check_upper_asymptote(model, c, d)
check_stiffness(model, e)
beta = promote(a, b, c, d, e)
return new{model,N,eltype(beta)}(beta..., t)
end
end
function ItemParameters(
M::Type{<:ItemResponseModel};
b,
a = one(b),
c = zero(b),
d = one(b),
e = one(b),
t = (),
)
beta = promote(a, b, c, d, e)
return ItemParameters(M, beta..., Tuple(t))
end
function ItemParameters(M::Type{<:ItemResponseModel}, beta::ItemParameters)
@unpack a, b, c, d, e, t = beta
return ItemParameters(M, a, b, c, d, e, t)
end
ItemParameters(M::Type{<:ItemResponseModel}, beta::NamedTuple) = ItemParameters(M; beta...)
ItemParameters(M::Type{<:ItemResponseModel}, beta::Real) = ItemParameters(M; b = beta)
"""
$(SIGNATURES)
Check the validity of the item parameters `beta`.
"""
function check_pars(M::Type{<:DichotomousItemResponseModel}, beta)
@unpack a, c, d, e = beta
check_discrimination(M, a)
check_lower_asymptote(M, c)
check_upper_asymptote(M, c, d)
check_stiffness(M, e)
return true
end
function check_pars(M::Type{<:PolytomousItemResponseModel}, beta)
@unpack a = beta
check_discrimination(M, a)
return true
end
function check_discrimination(M::Type{<:ItemResponseModel}, a)
if !has_discrimination(M)
if a != 1
err = "discrimination parameter a != 1"
throw(ArgumentError(err))
end
else
return true
end
end
function check_lower_asymptote(M::Type{<:ItemResponseModel}, c)
if has_lower_asymptote(M)
if c < 0 || c > 1
err = "lower asymptote c must be in interval (0, 1)"
throw(ArgumentError(err))
end
else
if c != 0
err = "lower asymptote c != 0"
throw(ArgumentError(err))
end
end
return true
end
function check_upper_asymptote(M::Type{<:ItemResponseModel}, c, d)
if has_upper_asymptote(M)
if d < 0 || d > 1
err = "upper asymptote d must be in interval (0, 1)"
throw(ArgumentError(err))
end
if c > d
err = "lower asymptote c must be smaller than upper asymptote d"
throw(ArgumentError(err))
end
else
if d != 1
err = "upper asymptote d != 1"
throw(ArgumentError(err))
end
end
end
function check_stiffness(M::Type{<:ItemResponseModel}, e)
if has_stiffness(M)
if e < 0
err = "stiffness parameter e must be positive"
throw(ArgumentError(err))
end
else
if e != 1
err = "stiffness parameter e != 1"
throw(ArgumentError(err))
end
end
return true
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 926 | """
$(SIGNATURES)
Evaluate the likelihood of an item response model `M` at `theta`, given item parameters
`betas` and response pattern `responses`.
Items are assumed to be independent. Then, the likelihood function is defined as,
``
L(\\boldsymbol{u} | \\theta, \\boldsymbol{\\beta}) = \\prod_{j=1}^J P(Y_j = y | \\theta, \\boldsymbol{\\beta}_j)
``
See also [`loglikelihood`](@ref).
"""
function likelihood(M::Type{<:ItemResponseModel}, theta, betas, responses)
L = one(theta)
for (beta, y) in zip(betas, responses)
L *= irf(M, theta, beta, y)
end
return L
end
"""
$(SIGNATURES)
Evaluate the log-likelihood of an item response model `M` at `theta`, given item parameters
`betas` and response pattern `responses`,
See also [`likelihood`](@ref).
"""
function loglikelihood(M::Type{<:ItemResponseModel}, theta, betas, responses)
return log(likelihood(M, theta, betas, responses))
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 7731 | """
$(TYPEDEF)
An abstract type representing an item response model with dichotomous responses.
"""
abstract type DichotomousItemResponseModel <: ItemResponseModel end
response_type(::Type{<:DichotomousItemResponseModel}) = Dichotomous
"""
$(TYPEDEF)
An abstract type representing an item response model with polytomous responses.
"""
abstract type PolytomousItemResponseModel <: ItemResponseModel end
response_type(::Type{<:PolytomousItemResponseModel}) = Nominal
has_stiffness(::Type{<:PolytomousItemResponseModel}) = false
has_lower_asymptote(::Type{<:PolytomousItemResponseModel}) = false
has_upper_asymptote(::Type{<:PolytomousItemResponseModel}) = false
"""
$(TYPEDEF)
An abstract representation of a 1 Parameter Logistic Model with an item response function
given by
``P(Y_{ij}=1|\\theta_i,\\boldsymbol{\\beta}_j) = \\mathrm{logistic}(\\theta_i - b_j)``
The item parameter `beta` can be passed as a number or a destructurable object with the
following fields:
- `b`: the item difficulty (location)
**Alias:** `OnePL`
"""
abstract type OneParameterLogisticModel <: DichotomousItemResponseModel end
const OnePL = OneParameterLogisticModel
has_discrimination(::Type{OnePL}) = false
has_lower_asymptote(::Type{OnePL}) = false
has_upper_asymptote(::Type{OnePL}) = false
has_stiffness(::Type{OnePL}) = false
"""
$(TYPEDEF)
An abstract representation of the 1 Parameter Logistic + Guessing Model with an item
response function given by
``P(Y_{ij}=1|\\theta_i,\\boldsymbol{\\beta}_j) = c + (1 - c) \\cdot \\mathrm{logistic}(\\theta_i - b_j)``
The item parameters `beta` must be a destructurable object with the following fields:
- `b`: the item difficulty (location)
- `c`: the lower asymptote
**Alias:** `OnePLG`
"""
abstract type OneParameterLogisticPlusGuessingModel <: DichotomousItemResponseModel end
const OnePLG = OneParameterLogisticPlusGuessingModel
has_discrimination(::Type{OnePLG}) = false
has_lower_asymptote(::Type{OnePLG}) = true
has_upper_asymptote(::Type{OnePLG}) = false
has_stiffness(::Type{OnePLG}) = false
"""
$(TYPEDEF)
An abstract representation of a 2 Parameter Logistic Model with an item response function
given by
``P(Y_{ij}=1|\\theta_i,\\boldsymbol{\\beta}_j) = \\mathrm{logistic}(a_j(\\theta_i - b_j))``
The item parameters `beta` must be a destructurable object with the following fields:
- `a`: the item discrimination
- `b`: the item difficulty (location)
**Alias:** `TwoPL`
"""
abstract type TwoParameterLogisticModel <: DichotomousItemResponseModel end
const TwoPL = TwoParameterLogisticModel
has_discrimination(::Type{TwoPL}) = true
has_lower_asymptote(::Type{TwoPL}) = false
has_upper_asymptote(::Type{TwoPL}) = false
has_stiffness(::Type{TwoPL}) = false
"""
$(TYPEDEF)
An abstract representation of a 3 Parameter Logistic Model with an item response function
given by
``P(Y_{ij}=1|\\theta_i,\\boldsymbol{\\beta}_j) = c_j + (1 - c_j)\\cdot\\mathrm{logistic}(a_j(\\theta_i - b_j))``
The item parameters `beta` must be a destructurable object with the following fields:
- `a`: the item discrimination
- `b`: the item difficulty (location)
- `c`: the lower asymptote
**Alias:** `ThreePL`
"""
abstract type ThreeParameterLogisticModel <: DichotomousItemResponseModel end
const ThreePL = ThreeParameterLogisticModel
has_discrimination(::Type{ThreePL}) = true
has_lower_asymptote(::Type{ThreePL}) = true
has_upper_asymptote(::Type{ThreePL}) = false
has_stiffness(::Type{ThreePL}) = false
"""
$(TYPEDEF)
An abstract representation of a 4 Parameter Logistic Model with an item response function
given by
``P(Y_{ij}=1|\\theta_i,\\boldsymbol{\\beta}_j) = c_j + (d_j - c_j)\\cdot\\mathrm{logistic}(a_j(\\theta_i - b_j))``
The item parameters `beta` must be a destructurable object with the following fields:
- `a`: the item discrimination
- `b`: the item difficulty (location)
- `c`: the lower asymptote
- `d`: the upper asymptote
**Alias:** `FourPL`
"""
abstract type FourParameterLogisticModel <: DichotomousItemResponseModel end
const FourPL = FourParameterLogisticModel
has_discrimination(::Type{FourPL}) = true
has_lower_asymptote(::Type{FourPL}) = true
has_upper_asymptote(::Type{FourPL}) = true
has_stiffness(::Type{FourPL}) = false
"""
$(TYPEDEF)
An abstract representation of a 5 Parameter Logistic Model with an item response function
given by
``P(Y_{ij}=1|\\theta_i,\\boldsymbol{\\beta}_j) = c_j + (d_j - c_j)\\cdot\\mathrm{logistic}(a_j(\\theta_i - b_j))^{e_j}``
The item parameters `beta` must be a destructurable object with the following fields:
- `a`: the item discrimination
- `b`: the item difficulty (location)
- `c`: the lower asymptote
- `d`: the upper asymptote
- `e`: the item stiffness
**Alias:** `FivePL`
"""
abstract type FiveParameterLogisticModel <: DichotomousItemResponseModel end
const FivePL = FiveParameterLogisticModel
has_discrimination(::Type{FivePL}) = true
has_lower_asymptote(::Type{FivePL}) = true
has_upper_asymptote(::Type{FivePL}) = true
has_stiffness(::Type{FivePL}) = true
"""
$(TYPEDEF)
An abstract type representing a Generalized Partial Credit Model with an item category
response function given by
``P(Y_{ij} = y,| \\theta_i, \\boldsymbol{\\beta}_j) =
\\frac{\\exp \\sum_{s=1}^y (a_j (\\theta_i - b_j + t_{js}))}
{1 + \\sum_{k=1}^{K_j} \\exp \\sum_{s=1}^k (a_j (\\theta_i - b_j + t_{js}))}``
The item parameters `beta` must be a destructurable object with the following fields:
- `a`: the item discrimination
- `b`: the item difficulty (location)
- `t`: a vector of threshold parameters
**Alias:** `GPCM`
"""
abstract type GeneralizedPartialCreditModel <: PolytomousItemResponseModel end
const GPCM = GeneralizedPartialCreditModel
has_discrimination(::Type{GPCM}) = true
"""
$(TYPEDEF)
An abstract type representing a Partial Credit Model with an item category response function
given by
``P(Y_{ij} = y,| \\theta_i, \\boldsymbol{\\beta}_j) =
\\frac{\\exp \\sum_{s=1}^y (\\theta_i - b_j + t_{js})}
{1 + \\sum_{k=1}^{K_j} \\exp \\sum_{s=1}^k (\\theta_i - b_j + t_{js})}``
The item parameters `beta` must be a destructurable object with the following fields:
- `b`: the item difficulty (location)
- `t`: a vector of threshold parameters
**Alias:** `PCM`
"""
abstract type PartialCreditModel <: PolytomousItemResponseModel end
const PCM = PartialCreditModel
has_discrimination(::Type{PCM}) = false
"""
$(TYPEDEF)
An abstract type representing a Rating Scale Model with an item category response function
given by
``P(Y_{ij} = y,| \\theta_i, \\boldsymbol{\\beta}_j) =
\\frac{\\exp \\sum_{s=1}^y (\\theta_i - b_j + t_{s})}
{1 + \\sum_{k=1}^{K_j} \\exp \\sum_{s=1}^k (\\theta_i - b_j + t_{s})}``
The item parameters `beta` must be a destructurable object with the following fields:
- `b`: the item difficulty (location)
- `t`: a vector of threshold parameters
**Alias:** `RSM`
"""
abstract type RatingScaleModel <: PolytomousItemResponseModel end
const RSM = RatingScaleModel
has_discrimination(::Type{RSM}) = false
"""
$(TYPEDEF)
An abstract type representing a Generalized Rating ScaleModel with an item category
response function given by
``P(Y_{ij} = y,| \\theta_i, \\boldsymbol{\\beta}_j) =
\\frac{\\exp \\sum_{s=1}^y (a_j (\\theta_i - b_j + t_{s}))}
{1 + \\sum_{k=1}^{K_j} \\exp \\sum_{s=1}^k (a_j (\\theta_i - b_j + t_{s}))}``
The item parameters `beta` must be a destructurable object with the following fields:
- `a`: the item discrimination
- `b`: the item difficulty (location)
- `t`: a vector of threshold parameters
**Alias:** `GRSM`
"""
abstract type GeneralizedRatingScaleModel <: PolytomousItemResponseModel end
const GRSM = GeneralizedRatingScaleModel
has_discrimination(::Type{GRSM}) = true
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 573 | using PrecompileTools: @setup_workload, @compile_workload
@setup_workload begin
models = [OnePL, OnePLG, TwoPL, ThreePL, FourPL, FivePL, PCM, GPCM, RSM, GRSM]
beta = (a = 1.0, b = 0.0, c = 0.0, d = 1.0, e = 1.0, t = zeros(3))
betas = fill(beta, 3)
@compile_workload begin
for model in models
irf(model, 0.0, beta, 1)
irf(model, 0.0, beta)
iif(model, 0.0, beta, 1)
iif(model, 0.0, beta)
expected_score(model, 0.0, betas)
information(model, 0.0, betas)
end
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 558 | """
$(SIGNATURES)
Return a scoring function that maps `n` response categories to a score (0, `max_score`).
## Examples
```jldoctest
julia> my_partial_credit = partial_credit(4);
julia> my_partial_credit.(1:4)
4-element Vector{Float64}:
0.0
0.3333333333333333
0.6666666666666666
1.0
```
```jldoctest
julia> my_partial_credit = partial_credit(5, max_score = 3);
julia> my_partial_credit.(1:5)
5-element Vector{Float64}:
0.0
0.75
1.5
2.25
3.0
```
"""
function partial_credit(n; max_score = 1)
return x -> (x - 1) / (n - 1) * max_score
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 5136 | import ForwardDiff
scoring_functions =
[one, zero, identity, x -> 1 + x, x -> 1 + 10x, x -> ifelse(x == 0, -1, 1)]
function test_derivatives(M::Type{<:DichotomousItemResponseModel}, beta)
theta = rand()
for scoring_function in scoring_functions
for y in 0:1
# equivalency to 5PL
@test derivative_theta(M, theta, beta, y; scoring_function)[1] ≈
derivative_theta(FivePL, theta, beta, y; scoring_function)[1]
@test derivative_theta(M, theta, beta, y; scoring_function)[2] ≈
derivative_theta(FivePL, theta, beta, y; scoring_function)[2]
@test second_derivative_theta(M, theta, beta, y)[1] ≈
second_derivative_theta(FivePL, theta, beta, y)[1]
@test second_derivative_theta(M, theta, beta, y)[2] ≈
second_derivative_theta(FivePL, theta, beta, y)[2]
@test second_derivative_theta(M, theta, beta, y)[3] ≈
second_derivative_theta(FivePL, theta, beta, y)[3]
# equivalency of methods
@test derivative_theta(M, theta, beta, y; scoring_function)[1] ≈
derivative_theta(M, theta, beta; scoring_function)[1][y+1]
@test derivative_theta(M, theta, beta, y; scoring_function)[2] ≈
derivative_theta(M, theta, beta; scoring_function)[2][y+1]
@test second_derivative_theta(M, theta, beta, y)[1] ≈
second_derivative_theta(M, theta, beta)[1][y+1]
@test second_derivative_theta(M, theta, beta, y)[2] ≈
second_derivative_theta(M, theta, beta)[2][y+1]
@test second_derivative_theta(M, theta, beta, y)[3] ≈
second_derivative_theta(M, theta, beta)[3][y+1]
end
end
end
abstract type GPCMAutodiff <: PolytomousItemResponseModel end
export GPCMAutodiff
ItemResponseFunctions.has_discrimination(::Type{GPCMAutodiff}) = true
function test_derivatives(M::Type{<:PolytomousItemResponseModel}, beta)
theta = rand()
categories = 1:(length(beta.t)+1)
for c in categories
# equivalent to autodiff
@test derivative_theta(M, theta, beta, c)[1] ≈
derivative_theta(GPCMAutodiff, theta, beta, c)[1]
@test derivative_theta(M, theta, beta, c)[2] ≈
derivative_theta(GPCMAutodiff, theta, beta, c)[2]
# equivalency of methods
@test derivative_theta(M, theta, beta, c)[1] ≈
derivative_theta(M, theta, beta)[1][c]
@test derivative_theta(M, theta, beta, c)[2] ≈
derivative_theta(M, theta, beta)[2][c]
@test second_derivative_theta(M, theta, beta, c)[1] ≈
second_derivative_theta(M, theta, beta)[1][c]
@test second_derivative_theta(M, theta, beta, c)[2] ≈
second_derivative_theta(M, theta, beta)[2][c]
@test second_derivative_theta(M, theta, beta, c)[3] ≈
second_derivative_theta(M, theta, beta)[3][c]
end
end
@testset "derivatives" begin
@testset "FivePL" begin
beta = (a = 2.3, b = 0.1, c = 0.1, d = 0.95, e = 0.88)
test_derivatives(FivePL, beta)
end
@testset "FourPL" begin
beta = (a = 2.3, b = 0.1, c = 0.1, d = 0.95, e = 1)
test_derivatives(FourPL, beta)
end
@testset "ThreePL" begin
beta = (a = 2.3, b = 0.1, c = 0.1, d = 1, e = 1)
test_derivatives(ThreePL, beta)
end
@testset "TwoPL" begin
beta = (a = 2.3, b = 0.1, c = 0, d = 1, e = 1)
test_derivatives(TwoPL, beta)
end
@testset "OnePLG" begin
beta = (a = 1, b = 0.1, c = 0.15, d = 1, e = 1)
test_derivatives(OnePLG, beta)
end
@testset "OnePL" begin
beta = (a = 1, b = 0.1, c = 0, d = 1, e = 1)
test_derivatives(OnePL, beta)
@test all(
derivative_theta(OnePL, 0.0, 0.1, 1) .≈ derivative_theta(OnePL, 0.0, beta, 1),
)
@test all(
second_derivative_theta(OnePL, 0.0, 0.1, 1) .≈
second_derivative_theta(OnePL, 0.0, beta, 1),
)
@test derivative_theta(OnePL, 0.0, 0.1)[1] == derivative_theta(OnePL, 0.0, beta)[1]
@test derivative_theta(OnePL, 0.0, 0.1)[2] == derivative_theta(OnePL, 0.0, beta)[2]
@test second_derivative_theta(OnePL, 0.0, 0.1)[1] ==
second_derivative_theta(OnePL, 0.0, beta)[1]
@test second_derivative_theta(OnePL, 0.0, 0.1)[2] ==
second_derivative_theta(OnePL, 0.0, beta)[2]
@test second_derivative_theta(OnePL, 0.0, 0.1)[3] ==
second_derivative_theta(OnePL, 0.0, beta)[3]
end
@testset "GPCM" begin
beta = (a = 1.3, b = 0.0, t = (0.2, -0.2))
test_derivatives(GPCM, beta)
end
@testset "PCM" begin
beta = (a = 1.0, b = 1.48, t = randn(3))
test_derivatives(PCM, beta)
end
@testset "GRSM" begin
beta = (a = 0.23, b = 1.48, t = randn(3))
test_derivatives(GRSM, beta)
end
@testset "RSM" begin
beta = (a = 1.0, b = 1.48, t = randn(3))
test_derivatives(RSM, beta)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 971 | @testset "ItemParameters" begin
# default construction
pars = ItemParameters(OnePL, b = 0.0)
@test pars.a == 1.0
@test pars.b == 0.0
@test pars.c == 0.0
@test pars.d == 1.0
@test pars.e == 1.0
@test pars.t == ()
# default values should match typeof(beta.b)
for T in [Float16, Float32, Float64]
pars = ItemParameters(OnePL, zero(T))
@test pars.a isa T
@test pars.b isa T
@test pars.c isa T
@test pars.d isa T
@test pars.e isa T
end
# construction from tuple
beta_tuple = (a = 1.2, b = 0.2, c = 0.4)
pars = ItemParameters(OnePL, beta_tuple)
@test pars.a == 1.0
@test pars.b == beta_tuple.b
@test pars.c == 0.0
pars = ItemParameters(ThreePL, beta_tuple)
@test pars.a == beta_tuple.a
@test pars.b == beta_tuple.b
@test pars.c == beta_tuple.c
# construction from Real
pars = ItemParameters(FourPL, 1.3)
@test pars.b == 1.3
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 493 | @testset "likelihood" begin
betas = [(; b = 0.0) for _ in 1:4]
@test likelihood(OnePL, Inf, betas, ones(length(betas))) == 1.0
@test likelihood(OnePL, -Inf, betas, ones(length(betas))) == 0.0
@test likelihood(OnePL, Inf, betas, zeros(length(betas))) == 0.0
@test likelihood(OnePL, -Inf, betas, zeros(length(betas))) == 1.0
@test loglikelihood(OnePL, Inf, betas, ones(length(betas))) == 0.0
@test loglikelihood(OnePL, -Inf, betas, ones(length(betas))) == -Inf
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 932 | using Test
using ItemResponseFunctions
using ItemResponseFunctions:
has_discrimination,
has_upper_asymptote,
has_lower_asymptote,
has_stiffness,
check_pars,
ItemParameters
using ForwardDiff: Dual
@testset "ItemResponseFunctions.jl" begin
include("utils.jl")
include("scoring_functions.jl")
include("item_parameters.jl")
include("models/one_parameter_logistic.jl")
include("models/one_parameter_logistic_plus_guessing.jl")
include("models/two_parameter_logistic.jl")
include("models/three_parameter_logistic.jl")
include("models/four_parameter_logistic.jl")
include("models/five_parameter_logistic.jl")
include("models/generalized_partial_credit_model.jl")
include("models/partial_credit_model.jl")
include("models/rating_scale_model.jl")
include("models/generalized_rating_scale_model.jl")
include("derivatives.jl")
include("likelihood.jl")
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 323 | @testset "scoring functions" begin
@testset "partial_credit" begin
f = partial_credit(3)
@test f(1) == 0.0
@test f(2) == 0.5
@test f(3) == 1.0
f = partial_credit(3; max_score = 2)
@test f(1) == 0.0
@test f(2) == 0.5 * 2
@test f(3) == 1.0 * 2
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 1702 | @testset "utils" begin
@testset "check_pars" begin
# all merged pars should pass
beta = (a = 1.2, b = 0.2, c = 0.1, d = 0.8, e = 1.4, t = zeros(3))
@test check_pars(OnePL, ItemParameters(OnePL, beta))
@test check_pars(TwoPL, ItemParameters(TwoPL, beta))
@test check_pars(ThreePL, ItemParameters(ThreePL, beta))
@test check_pars(FourPL, ItemParameters(FourPL, beta))
@test check_pars(FivePL, ItemParameters(FivePL, beta))
@test check_pars(PCM, ItemParameters(PCM, beta))
@test check_pars(GPCM, ItemParameters(GPCM, beta))
@test check_pars(RSM, ItemParameters(RSM, beta))
@test check_pars(GRSM, ItemParameters(GRSM, beta))
@test_throws "a != 1" check_pars(OnePL, beta)
@test_throws "c != 0" check_pars(TwoPL, beta)
@test_throws "d != 1" check_pars(ThreePL, beta)
@test_throws "e != 1" check_pars(FourPL, beta)
@test_throws "c must be in interval (0, 1)" check_pars(
ThreePL,
(; a = 1.0, c = -0.2, d = 1.0, e = 1.0),
)
@test_throws "d must be in interval (0, 1)" check_pars(
FourPL,
(; a = 1.0, c = 0.2, d = 1.2, e = 1.0),
)
@test_throws "smaller than upper asymptote" check_pars(
FourPL,
(; a = 1.0, c = 0.8, d = 0.2, e = 1.0),
)
@test_throws "e must be positive" check_pars(
FivePL,
(; a = 1.0, c = 0.0, d = 1.0, e = -1.0),
)
end
@testset "irf accepts dual numbers" begin
@test irf(OnePL, Dual(0.0), 0.0, 1) isa Real
@test irf(PCM, Dual(0.0), (; b = 0.0, t = 0.0), 1) isa Real
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 221 | @testset "FiveParameterLogisticModel" begin
T = FivePL
@test has_discrimination(T) == true
@test has_lower_asymptote(T) == true
@test has_upper_asymptote(T) == true
@test has_stiffness(T) == true
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 2201 | @testset "FourParameterLogisticModel" begin
T = FourParameterLogisticModel
@test has_discrimination(T) == true
@test has_lower_asymptote(T) == true
@test has_upper_asymptote(T) == true
@testset "irf" begin
beta = (a = 1.0, b = 0.0, c = 0.0, d = 1.0)
@test irf(T, 0.0, beta, 1) == 0.5
@test irf(T, Inf, beta, 1) == 1.0
@test irf(T, -Inf, beta, 1) == 0.0
@test irf(T, 0.0, beta) == [0.5, 0.5]
beta = (a = 1.5, b = 0.0, c = 0.2, d = 0.8)
@test irf(T, 0.0, beta, 1) == 0.5
@test irf(T, Inf, beta, 1) == 0.8
@test irf(T, -Inf, beta, 1) == 0.2
@test irf(T, 0.0, beta) == [0.5, 0.5]
end
@testset "iif" begin
beta = (a = 1.0, b = 0.0, c = 0.0, d = 1.0)
@test iif(T, 0.0, beta) == [0.125, 0.125]
@test iif(T, Inf, beta) == [0.0, 0.0]
@test iif(T, -Inf, beta) == [0.0, 0.0]
beta = (a = 2.1, b = 0.2, c = 0.2, d = 0.95)
@test iif(T, Inf, beta) == [0.0, 0.0]
@test iif(T, -Inf, beta) == [0.0, 0.0]
end
@testset "expected_score" begin
betas = fill((a = 1.0, b = 0.0, c = 0.0, d = 1.0), 6)
@test expected_score(T, 0.0, betas) == 3.0
@test expected_score(T, Inf, betas) == 6.0
@test expected_score(T, -Inf, betas) == 0.0
beta = (a = 1.0, b = 0.0, c = 0.1, d = 0.6)
betas = fill(beta, 6)
@test expected_score(T, 0.0, betas) ≈ (beta.c + (beta.d - beta.c) / 2) * 6
@test expected_score(T, Inf, betas) ≈ betas[1].d * 6
@test expected_score(T, -Inf, betas) ≈ betas[1].c * 6
@test expected_score(T, 0.0, beta) == irf(T, 0.0, beta, 1)
end
@testset "information" begin
betas = fill((a = 1.0, b = 0.0, c = 0.0, d = 1.0), 3)
@test information(T, 0.0, betas) == 0.25 * 3
@test information(T, Inf, betas) == 0.0
@test information(T, -Inf, betas) == 0.0
beta = (a = 1.2, b = 0.3, c = 0.1, d = 0.88)
betas = fill(beta, 3)
@test information(T, Inf, betas) == 0.0
@test information(T, -Inf, betas) == 0.0
@test information(T, 0.0, beta) == sum(iif(T, 0.0, beta, y) for y in 0:1)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 2803 | @testset "GeneralizedPartialCreditModel" begin
@test has_discrimination(GPCM) == true
@test has_lower_asymptote(GPCM) == false
@test has_upper_asymptote(GPCM) == false
@testset "irf" begin
beta = (a = 1.0, b = 0.0, t = zeros(3))
@test length(irf(GPCM, 0.0, beta)) == length(beta.t) + 1
@test sum(irf(GPCM, 0.0, beta)) ≈ 1.0
@test irf(GPCM, 0.0, beta) == fill(0.25, 4)
@test irf(GPCM, -Inf, beta) == [1.0, 0.0, 0.0, 0.0]
@test_broken irf(GPCM, Inf, beta) == [0.0, 0.0, 0.0, 1.0] # issues with Inf in softmax!
@test irf(GPCM, 1e16, beta) == [0.0, 0.0, 0.0, 1.0]
@test irf(GPCM, 0.0, beta, 1) == 0.25
@test irf(GPCM, -Inf, beta, 1) == 1.0
@test irf(GPCM, -Inf, beta, 4) == 0.0
# equivalent to 2PL for dichotomous items
beta = (a = 0.87, b = -0.25, t = 0.0)
@test irf(GPCM, 0.0, beta, 1) ≈ irf(TwoPL, 0.0, beta, 0)
@test irf(GPCM, 0.0, beta, 2) ≈ irf(TwoPL, 0.0, beta, 1)
end
@testset "iif" begin
beta = (a = 1.3, b = 0.4, t = zeros(2))
@test_broken iif(GPCM, Inf, beta) == 0.0 # produces NaN
@test iif(GPCM, 1e16, beta) == [0.0, 0.0, 0.0]
@test iif(GPCM, -Inf, beta) == [0.0, 0.0, 0.0]
@test iif(GPCM, 1e16, beta, 1) == 0.0
@test iif(GPCM, -Inf, beta, 1) == 0.0
# equivalent to 2PL for dichotomous items
beta = (a = 0.87, b = 0.22, t = 0.0)
@test iif(GPCM, 0.0, beta, 1) ≈ iif(TwoPL, 0.0, beta, 0)
@test iif(GPCM, 0.0, beta, 2) ≈ iif(TwoPL, 0.0, beta, 1)
end
@testset "expected_score" begin
beta = (a = 1.0, b = 0.0, t = zeros(2))
betas = fill(beta, 6)
@test expected_score(GPCM, 0.0, betas) == 2.0 * 6
@test expected_score(GPCM, -Inf, betas) == 1.0 * 6
@test_broken expected_score(GPCM, Inf, betas) == 3.0 * 6 # produces NaN
@test expected_score(GPCM, 1e16, betas) == 3.0 * 6
# equivalent to 2PL for dichotomous items
beta = (a = 1.3, b = 0.4, t = 0.0)
betas = fill(beta, 3)
@test expected_score(GPCM, 0.0, betas, scoring_function = partial_credit(2)) ≈
expected_score(TwoPL, 0.0, betas)
end
@testset "information" begin
beta = (a = 1.0, b = 0.0, t = zeros(2))
betas = fill(beta, 6)
@test information(GPCM, -Inf, betas) == 0.0
@test_broken information(GPCM, Inf, betas) == 0.0 # produces NaN
@test information(GPCM, 1e16, betas) == 0.0
# equivalent to 2PL for dichotomous items
beta = (a = 0.3, b = 0.4, t = 0.0)
@test information(GPCM, 0.0, beta) ≈ information(TwoPL, 0.0, beta)
betas = fill(beta, 4)
@test information(GPCM, 0.0, betas) ≈ information(TwoPL, 0.0, betas)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 639 | @testset "GeneralizedRatingScaleModel" begin
@test has_discrimination(GRSM) == true
@test has_lower_asymptote(GRSM) == false
@test has_upper_asymptote(GRSM) == false
beta = (a = 1.3, b = 0.2, t = randn(4))
@test irf(GRSM, 0.0, beta) == irf(GPCM, 0.0, beta)
@test irf(GRSM, 0.0, beta, 1) == irf(GPCM, 0.0, beta, 1)
@test iif(GRSM, 0.0, beta) == iif(GPCM, 0.0, beta)
@test iif(GRSM, 0.0, beta, 1) == iif(GPCM, 0.0, beta, 1)
betas = fill(beta, 4)
@test expected_score(GRSM, 0.0, betas) == expected_score(GPCM, 0.0, betas)
@test information(GRSM, 0.0, betas) == information(GPCM, 0.0, betas)
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 2384 | @testset "OneParameterLogisticModel" begin
T = OneParameterLogisticModel
@test has_discrimination(T) == false
@test has_lower_asymptote(T) == false
@test has_upper_asymptote(T) == false
@testset "irf" begin
@test irf(T, 0.0, 0.0, 1) == 0.5
@test irf(T, 0.0, 0.0, 0) == 0.5
@test irf(T, 0.0, -Inf, 1) == 1.0
@test irf(T, 0.0, Inf, 1) == 0.0
@test irf(T, 0.0, 0.0) == [0.5, 0.5]
@test irf(T, -Inf, 0.0) == [1.0, 0.0]
@test irf(T, Inf, 0.0) == [0.0, 1.0]
beta = (; b = 0.0)
@test irf(T, 0.0, beta, 1) == 0.5
@test irf(T, 0.0, beta, 0) == 0.5
@test irf(T, -Inf, beta, 1) == 0.0
@test irf(T, Inf, beta, 1) == 1.0
@test irf(T, 0.0, beta) == [0.5, 0.5]
@test irf(T, -Inf, beta) == [1.0, 0.0]
@test irf(T, Inf, beta) == [0.0, 1.0]
end
@testset "iif" begin
@test iif(T, 0.0, 0.0) == [0.125, 0.125]
@test iif(T, 0.0, Inf) == [0.0, 0.0]
@test iif(T, 0.0, -Inf) == [0.0, 0.0]
for y in 0:1
@test iif(T, 0.0, 0.0, y) == 0.125
@test iif(T, 0.0, Inf, y) == 0.0
@test iif(T, 0.0, -Inf, y) == 0.0
end
beta = (; b = 0.0)
@test iif(T, 0.0, beta) == [0.125, 0.125]
@test iif(T, Inf, beta) == [0.0, 0.0]
@test iif(T, -Inf, beta) == [0.0, 0.0]
for y in 0:1
@test iif(T, 0.0, beta, y) == 0.125
@test iif(T, Inf, beta, y) == 0.0
@test iif(T, -Inf, beta, y) == 0.0
end
end
@testset "expected_score" begin
@test expected_score(T, 0.0, zeros(3)) == 1.5
@test expected_score(T, 0.0, fill(-Inf, 3)) == 3.0
betas = fill((; b = 0.0), 3)
@test expected_score(T, 0.0, betas) == 1.5
@test expected_score(T, Inf, betas) == 3.0
@test expected_score(T, 0.0, betas; scoring_function = x -> 2x) == 3.0
@test expected_score(T, 0.0, 0.0) == irf(T, 0.0, 0.0, 1)
end
@testset "information" begin
@test information(T, 0.0, zeros(3)) == 0.75
@test information(T, 0.0, fill(Inf, 3)) == 0.0
betas = fill((; b = 0.0), 3)
@test information(T, 0.0, betas) == 0.25 * 3
@test information(T, Inf, betas) == 0.0
@test information(T, 0.0, 0.0) == sum(iif(T, 0.0, 0.0, y) for y in 0:1)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 1862 | @testset "OneParameterLogisticPlusGuessingModel" begin
T = OneParameterLogisticPlusGuessingModel
@test has_discrimination(T) == false
@test has_lower_asymptote(T) == true
@test has_upper_asymptote(T) == false
@testset "irf" begin
@test irf(T, 0.0, (; b = 0.0, c = 0.0), 1) == 0.5
@test irf(T, 0.0, (; b = 0.0, c = 0.0), 0) == 0.5
@test irf(T, -Inf, (; b = 0.0, c = 0.0), 1) == 0.0
@test irf(T, Inf, (; b = 0.0, c = 0.0), 1) == 1.0
@test irf(T, -Inf, (; b = 0.0, c = 0.2), 1) == 0.2
@test irf(T, Inf, (; b = 0.0, c = 0.2), 1) == 1.0
@test irf(T, 0.0, (; b = 0.0, c = 0.2)) ≈ [0.4, 0.6]
end
@testset "iif" begin
beta = (b = 0.0, c = 0.0)
@test iif(T, 0.0, beta) == [0.125, 0.125]
@test iif(T, -Inf, beta) == [0.0, 0.0]
@test iif(T, Inf, beta) == [0.0, 0.0]
@test iif(T, 0.0, beta, 1) == 0.125
@test iif(T, -Inf, beta, 1) == 0.0
@test iif(T, Inf, beta, 1) == 0.0
beta = (b = 0.0, c = 0.1)
@test iif(T, -Inf, beta) == [0.0, 0.0]
@test iif(T, Inf, beta) == [0.0, 0.0]
@test iif(T, -Inf, beta, 1) == 0.0
@test iif(T, Inf, beta, 1) == 0.0
end
@testset "expected_score" begin
beta = (b = 0.0, c = 0.2)
betas = fill(beta, 10)
@test expected_score(T, 0.0, betas) ≈ 0.6 * 10
@test expected_score(T, -Inf, betas) ≈ 0.2 * 10
@test expected_score(T, Inf, betas) ≈ 1.0 * 10
@test expected_score(T, 0.0, beta) == irf(T, 0.0, beta, 1)
end
@testset "information" begin
beta = (b = 0.0, c = 0.25)
betas = fill(beta, 5)
@test information(T, -Inf, betas) == 0.0
@test information(T, Inf, betas) == 0.0
@test information(T, 0.0, beta) == sum(iif(T, 0.0, beta, y) for y in 0:1)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 2158 | @testset "PartialCreditModel" begin
@test has_discrimination(PCM) == false
@test has_lower_asymptote(PCM) == false
@test has_upper_asymptote(PCM) == false
@testset "irf" begin
# equivalent to GPCM for a = 1
beta = (a = 1.0, b = 0.0, t = randn(3))
@test irf(PCM, 0.0, beta) == irf(GPCM, 0.0, beta)
@test irf(PCM, 0.0, beta, 1) == irf(GPCM, 0.0, beta, 1)
# equivalent to 1PL for dichotomous items
beta = (a = 1.12, b = 0.0, t = 0.0)
for theta in rand(10)
@test irf(PCM, theta, beta, 1) ≈ irf(OnePL, theta, beta, 0)
@test irf(PCM, theta, beta, 2) ≈ irf(OnePL, theta, beta, 1)
end
end
@testset "iif" begin
# equivalent to GPCM for a = 1
beta = (a = 1.0, b = 0.0, t = randn(3))
@test iif(PCM, 0.0, beta) == iif(GPCM, 0.0, beta)
@test iif(PCM, 0.0, beta, 1) == iif(GPCM, 0.0, beta, 1)
# equivalent to 1PL for dichotomous items
beta = (a = 1.12, b = 0.0, t = 0.0)
for theta in rand(10)
@test iif(PCM, theta, beta, 1) ≈ iif(OnePL, theta, beta, 0)
@test iif(PCM, theta, beta, 2) ≈ iif(OnePL, theta, beta, 1)
end
end
@testset "expected_score" begin
# equivalent to GPCM for a = 1
beta = (a = 1.0, b = 0.0, t = randn(3))
betas = fill(beta, 4)
@test expected_score(PCM, 0.0, betas) == expected_score(GPCM, 0.0, betas)
# equivalent to 1PL for dichotomous items
beta = (a = 1.4, b = 0.3, t = 0.0)
betas = fill(beta, 4)
@test expected_score(PCM, 0.0, betas, scoring_function = partial_credit(2)) ≈
expected_score(OnePL, 0.0, betas)
end
@testset "information" begin
# equivalent to GPCM for a = 1
beta = (a = 1.0, b = 0.0, t = randn(3))
betas = fill(beta, 4)
@test information(PCM, 0.0, betas) == information(GPCM, 0.0, betas)
# equivalent to 1PL for dichotomous items
beta = (a = 1.4, b = 0.3, t = 0.0)
betas = fill(beta, 4)
@test information(PCM, 0.0, betas) ≈ information(OnePL, 0.0, betas)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 620 | @testset "RatingScaleModel" begin
@test has_discrimination(RSM) == false
@test has_lower_asymptote(RSM) == false
@test has_upper_asymptote(RSM) == false
beta = (a = 1.0, b = 0.0, t = randn(2))
@test irf(RSM, 0.0, beta) == irf(GPCM, 0.0, beta)
@test irf(RSM, 0.0, beta, 1) == irf(GPCM, 0.0, beta, 1)
@test iif(RSM, 0.0, beta) == iif(GPCM, 0.0, beta)
@test iif(RSM, 0.0, beta, 1) == iif(GPCM, 0.0, beta, 1)
betas = fill(beta, 4)
@test expected_score(RSM, 0.0, betas) == expected_score(GPCM, 0.0, betas)
@test information(RSM, 0.0, betas) == information(GPCM, 0.0, betas)
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 1948 | @testset "ThreeParameterLogisticModel" begin
T = ThreeParameterLogisticModel
@test has_discrimination(T) == true
@test has_lower_asymptote(T) == true
@test has_upper_asymptote(T) == false
@testset "irf" begin
beta = (a = 1.5, b = 0.0, c = 0.2)
@test irf(T, 0.0, beta, 1) ≈ 0.5 + beta.c / 2
@test irf(T, Inf, beta, 1) == 1.0
@test irf(T, -Inf, beta, 1) == beta.c
@test irf(T, 0.0, beta) ≈ [0.5 - beta.c / 2, 0.5 + beta.c / 2]
end
@testset "iif" begin
beta = (a = 1.0, b = 0.0, c = 0.0)
@test iif(T, 0.0, beta) == [0.125, 0.125]
@test iif(T, Inf, beta) == [0.0, 0.0]
@test iif(T, -Inf, beta) == [0.0, 0.0]
beta = (a = 1.5, b = 0.0, c = 0.25)
@test iif(T, Inf, beta) == [0.0, 0.0]
@test iif(T, -Inf, beta) == [0.0, 0.0]
end
@testset "expected_score" begin
beta = (a = 1.0, b = 0.0, c = 0.0)
betas = fill(beta, 4)
@test expected_score(T, 0.0, betas) == 2.0
@test expected_score(T, Inf, betas) == 4.0
@test expected_score(T, -Inf, betas) == 0.0
beta = (a = 1.5, b = 0.0, c = 0.3)
betas = fill(beta, 4)
@test expected_score(T, 0.0, betas) ≈ (0.5 + betas[1].c / 2) * 4
@test expected_score(T, Inf, betas) == 4.0
@test expected_score(T, -Inf, betas) == 0.3 * 4
@test expected_score(T, 0.0, beta) == irf(T, 0.0, beta, 1)
end
@testset "information" begin
betas = fill((a = 1.0, b = 0.0, c = 0.0), 3)
@test information(T, 0.0, betas) == 0.25 * 3
@test information(T, Inf, betas) == 0.0
@test information(T, -Inf, betas) == 0.0
beta = (a = 1.3, b = 0.0, c = 0.2)
betas = fill(beta, 3)
@test information(T, Inf, betas) == 0.0
@test information(T, -Inf, betas) == 0.0
@test information(T, 0.0, beta) == sum(iif(T, 0.0, beta, y) for y in 0:1)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | code | 1430 | @testset "TwoParameterLogisticModel" begin
T = TwoParameterLogisticModel
@test has_discrimination(T) == true
@test has_lower_asymptote(T) == false
@test has_upper_asymptote(T) == false
@testset "irf" begin
@test irf(T, 0.0, (a = 1.5, b = 0.0), 1) == 0.5
@test irf(T, Inf, (a = 1.5, b = 0.0), 1) == 1.0
@test irf(T, -Inf, (a = 1.5, b = 0.0), 1) == 0.0
@test irf(T, 0.0, (a = 1.5, b = 0.0)) == [0.5, 0.5]
end
@testset "iif" begin
@test iif(T, 0.0, (a = 1.0, b = 0.0)) == [0.125, 0.125]
@test iif(T, Inf, (a = 1.0, b = 0.0)) == [0.0, 0.0]
@test iif(T, -Inf, (a = 1.0, b = 0.0)) == [0.0, 0.0]
@test iif(T, 0.0, (a = 1.5, b = 0.0), 1) == 1.5^2 * 0.125
end
@testset "expected_score" begin
beta = (a = 2.0, b = 0.0)
betas = fill(beta, 5)
@test expected_score(T, 0.0, betas) == 2.5
@test expected_score(T, Inf, betas) == 5.0
@test expected_score(T, -Inf, betas) == 0.0
@test expected_score(T, 0.0, beta) == irf(T, 0.0, beta, 1)
end
@testset "information" begin
beta = (a = 1.5, b = 0.0)
betas = fill(beta, 5)
@test information(T, 0.0, betas) == 1.5^2 * 0.25 * 5
@test information(T, Inf, betas) == 0.0
@test information(T, -Inf, betas) == 0.0
@test information(T, 0.0, beta) == sum(iif(T, 0.0, beta, y) for y in 0:1)
end
end
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | docs | 2109 | # ItemResponseFunctions.jl
[](https://juliapsychometrics.github.io/ItemResponseFunctions.jl/stable/)
[](https://juliapsychometrics.github.io/ItemResponseFunctions.jl/dev/)
[](https://github.com/juliapsychometrics/ItemResponseFunctions.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/juliapsychometrics/ItemResponseFunctions.jl)
[ItemResponseFunctions.jl](https://github.com/juliapsychometrics/ItemResponseFunctions.jl) implements basic functions for Item Response Theory models. It is built based on the interface designed in [AbstractItemResponseModels.jl](https://github.com/JuliaPsychometrics/AbstractItemResponseModels.jl).
## Installation
You can install ItemResponseFunctions.jl from the General package registry:
```julia
] add ItemResponseFunctions
```
## Usage
ItemResponseFunctions.jl exports the following functions for Item Response Theory models:
- `irf`: The item response function
- `iif`: The item information function
- `expected_score`: The expected score / test response function
- `information`: The test information function
Calling the function requires a model type `M`, a person ability `theta` and item parameters `beta`.
For a simple 1-Parameter Logistic model,
```julia
using ItemResponseFunctions
beta = (; b = 0.5)
irf(OnePL, 0.0, beta, 1)
iif(OnePL, 0.0, beta, 1)
```
evaluates the item response function and item information function for response `y` at ability value `0.0` for an item with difficulty `0.5`.
Given an array of item parameters (a test) and an ability value, the test response function and test information can be calculated by
```julia
betas = [
(; b = -0.3),
(; b = 0.25),
(; b = 1.0),
]
expected_score(OnePL, 0.0, betas)
information(OnePL, 0.0, betas)
```
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | docs | 686 | ```@meta
CurrentModule = ItemResponseFunctions
```
# API
## Models
### Dichotomous response models
```@docs
OneParameterLogisticModel
OneParameterLogisticPlusGuessingModel
TwoParameterLogisticModel
ThreeParameterLogisticModel
FourParameterLogisticModel
FiveParameterLogisticModel
```
### Polytomous response models
```@docs
PartialCreditModel
GeneralizedPartialCreditModel
RatingScaleModel
GeneralizedRatingScaleModel
```
## Functions
### Item Response Functions
```@docs
irf
irf!
iif
expected_score
information
```
### Utilities
```@docs
ItemParameters
derivative_theta
derivative_theta!
likelihood
loglikelihood
partial_credit
second_derivative_theta
second_derivative_theta!
```
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.1.8 | 195af880fc6d55313200fadf36251406905fc4b5 | docs | 2109 | # ItemResponseFunctions.jl
[](https://juliapsychometrics.github.io/ItemResponseFunctions.jl/stable/)
[](https://juliapsychometrics.github.io/ItemResponseFunctions.jl/dev/)
[](https://github.com/juliapsychometrics/ItemResponseFunctions.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/juliapsychometrics/ItemResponseFunctions.jl)
[ItemResponseFunctions.jl](https://github.com/juliapsychometrics/ItemResponseFunctions.jl) implements basic functions for Item Response Theory models. It is built based on the interface designed in [AbstractItemResponseModels.jl](https://github.com/JuliaPsychometrics/AbstractItemResponseModels.jl).
## Installation
You can install ItemResponseFunctions.jl from the General package registry:
```julia
] add ItemResponseFunctions
```
## Usage
ItemResponseFunctions.jl exports the following functions for Item Response Theory models:
- `irf`: The item response function
- `iif`: The item information function
- `expected_score`: The expected score / test response function
- `information`: The test information function
Calling the function requires a model type `M`, a person ability `theta` and item parameters `beta`.
For a simple 1-Parameter Logistic model,
```julia
using ItemResponseFunctions
beta = (; b = 0.5)
irf(OnePL, 0.0, beta, 1)
iif(OnePL, 0.0, beta, 1)
```
evaluates the item response function and item information function for response `y` at ability value `0.0` for an item with difficulty `0.5`.
Given an array of item parameters (a test) and an ability value, the test response function and test information can be calculated by
```julia
betas = [
(; b = -0.3),
(; b = 0.25),
(; b = 1.0),
]
expected_score(OnePL, 0.0, betas)
information(OnePL, 0.0, betas)
```
| ItemResponseFunctions | https://github.com/JuliaPsychometrics/ItemResponseFunctions.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 623 | using Pkg
using Documenter, XAM
makedocs(
checkdocs = :all,
linkcheck = true,
format = Documenter.HTML(
edit_link = "develop"
),
modules = [XAM, XAM.SAM, XAM.BAM],
sitename = "XAM.jl",
pages = [
"Home" => "index.md",
"SAM and BAM" => "man/hts-files.md",
"API Reference" => "man/api.md"
],
authors = replace(join(Pkg.TOML.parsefile("Project.toml")["authors"], ", "), r" <.*?>" => "" ) * ", The BioJulia Organisation, and other contributors."
)
deploydocs(
repo = "github.com/BioJulia/XAM.jl.git",
devbranch = "develop",
push_preview = true
)
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 365 | module XAM
using BioGenerics
import BioGenerics: isfilled #Note: used by `ismapped`.
export
SAM,
BAM
abstract type XAMRecord end
abstract type XAMReader <: BioGenerics.IO.AbstractReader end
abstract type XAMWriter <: BioGenerics.IO.AbstractWriter end
include("flags.jl")
include("sam/sam.jl")
include("bam/bam.jl")
using .SAM
using .BAM
end # module
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 7352 | # Flags
# =========
#
"""
flags(record::XAMRecord})::UInt16
Get the bitwise flags of `record`.
The returned value is a `UInt16` of each flag being OR'd together.
The possible flags are:
0x0001 template having multiple segments in sequencing
0x0002 each segment properly aligned according to the aligner
0x0004 segment unmapped
0x0008 next segment in the template unmapped
0x0010 SEQ being reverse complemented
0x0020 SEQ of the next segment in the template being reverse complemented
0x0040 the first segment in the template
0x0080 the last segment in the template
0x0100 secondary alignment
0x0200 not passing filters, such as platform/vendor quality controls
0x0400 PCR or optical duplicate
0x0800 supplementary alignment
"""
function flags end
# Bitwise flag (or FLAG).
for (name, bits, doc) in [
(:PAIRED, UInt16(0x001), "the segment is paired with other segments"),
(:PROPER_PAIR, UInt16(0x002), "the segment is in a template where all segments are properly aligned according to the aligner"),
(:UNMAPPED, UInt16(0x004), "the segment itself is unmapped; conflictive with FLAG_PROPER_PAIR"),
(:NEXT_UNMAPPED, UInt16(0x008), "the next segment in the template is unmapped"),
(:REVERSE, UInt16(0x010), "the *SEQ*uence is reverse complemented"),
(:NEXT_REVERSE, UInt16(0x020), "the *SEQ*uence of the next segment in the template is reverse complemented" ),
(:FIRST_SEGMENT, UInt16(0x040), "the segment is the first in the template"),
(:LAST_SEGMENT, UInt16(0x080), "the segment is last in the template"),
(:SECONDARY, UInt16(0x100), "not primary alignment"),
(:QCFAIL, UInt16(0x200), "QC failure"),
(:DUPLICATE, UInt16(0x400), "optical or PCR duplicate"),
(:SUPPLEMENTARY, UInt16(0x800), "supplementary alignment"),
]
@assert bits isa UInt16 "The bits must be of type UInt16."
sym = Symbol("FLAG_", name)
docstring = """ $sym
SAM/BAM flags: $doc
See also: [`flags`](@ref)
"""
@eval begin
@doc $(docstring) const $(sym) = $(bits)
end
end
"""
ispaired(record::XAMRecord)::Bool
Query whether the segment is in a template having multiple segments in sequencing.
"""
function ispaired(record::XAMRecord)::Bool
return flags(record) & FLAG_PAIRED == FLAG_PAIRED
end
"""
isproperpair(record::XAMRecord)::Bool
Query whether the segment is in a template where all segments are properly aligned according to the aligner.
"""
function isproperpair(record::XAMRecord)::Bool
return flags(record) & PROPER_PAIR == PROPER_PAIR
end
"""
isunmapped(record::XAMRecord)::Bool
Query whether the segment is unmapped.
"""
function isunmapped(record::XAMRecord)::Bool
return flags(record) & FLAG_UNMAPPED == FLAG_UNMAPPED
end
"""
ismapped(record::XAMRecord)::Bool
Query whether the segment is mapped.
"""
function ismapped(record::XAMRecord)::Bool
# return flags(record) & FLAG_UNMAPPED == 0
return isfilled(record) && (flags(record) & FLAG_UNMAPPED == 0)
end
"""
isnextunmapped(record::XAMRecord)::Bool
Query whether the next segment in the template is unmapped.
"""
function isnextunmapped(record::XAMRecord)::Bool
return flags(record) & FLAG_NEXT_UNMAPPED == FLAG_NEXT_UNMAPPED
end
"""
isnextmapped(record::XAMRecord)::Bool
Query whether the next segment in the template is mapped.
"""
function isnextmapped(record::XAMRecord)::Bool
return flags(record) & FLAG_NEXT_UNMAPPED == 0
end
"""
isreverse(record::XAMRecord)::Bool
Query whether the `record.SEQ`uence is reverse complemented.
"""
function isreversecomplemented(record::XAMRecord)::Bool
return flags(record) & FLAG_REVERSE == FLAG_REVERSE
end
"""
isforward(record::XAMRecord)::Bool
Query whether the `record.SEQ`uence is mapped to the forward strand.
"""
function isforwardstrand(record::XAMRecord)::Bool
# return flags(record) & FLAG_REVERSE == 0
return !isreversecomplemented(record) # Note: this is an interpretation of FLAG_REVERSE.
end
"""
ispositivestrand(record::XAMRecord)::Bool
Query whether the `record.SEQ`uence is aligned to the positive strand.
"""
function ispositivestrand(record::XAMRecord)::Bool
return isforwardstrand(record)
end
"""
isreversestrand(record::XAMRecord)::Bool
Query whether the `record.SEQ`uence is aligned to the reverse strand.
"""
function isreversestrand(record::XAMRecord)::Bool
return isreversecomplemented(record) # Note: this is an interpretation of FLAG_REVERSE.
end
"""
ispositivestrand(record::XAMRecord)::Bool
Query whether the `record.SEQ`uence is aligned to the negative strand.
"""
function isnegativestrand(record::XAMRecord)::Bool
return isreversestrand(record)
end
"""
isnextreversecomplemented(record::XAMRecord)::Bool
Query whether the next segment in the template is reverse complemented.
"""
function isnextreversecomplemented(record::XAMRecord)::Bool
return flags(record) & FLAG_NEXT_REVERSE == FLAG_NEXT_REVERSE
end
"""
isfirstsegment(record::XAMRecord)::Bool
Query whether the segemnt is first in the template.
"""
function isfirstsegment(record::XAMRecord)::Bool
return flags(record) & FLAG_FIRST_SEGMENT == FLAG_FIRST_SEGMENT
end
"""
isread1(record::XAMRecord)::Bool
From a paired-end sequencing point of view, query whether the read is read1.
"""
function isread1(record::XAMRecord)::Bool
return isfirstsegment(record)
end
"""
islastsegment(record::XAMRecord)::Bool
Query whether the segemnt is last in the template.
"""
function islastsegment(record::XAMRecord)::Bool
return flags(record) & FLAG_LAST_SEGMENT == FLAG_LAST_SEGMENT
end
"""
isread2(record::XAMRecord)::Bool
From a paired-end sequencing point of view, query whether the read is read2.
"""
function isread2(record::XAMRecord)::Bool
return islastsegment(record)
end
"""
issecondaryalignment(record::XAMRecord)::Bool
Query whether the read is considered to be the secondary alignment.
"""
function issecondaryalignment(record::XAMRecord)::Bool
return flags(record) & FLAG_SECONDARY == FLAG_SECONDARY
end
"""
isqcfail(record::XAMRecord)::Bool
Query whether the read failed filters, such as platform/vendor quality controls.
"""
function isqcfail(record::XAMRecord)::Bool
return flags(record) & FLAG_QCFAIL == FLAG_QCFAIL
end
"""
isduplicate(record::XAMRecord)::Bool
Query whether the read is a PCR or optical duplicate.
"""
function isduplicate(record::XAMRecord)::Bool
return flags(record) & FLAG_DUPLICATE == FLAG_DUPLICATE
end
"""
issupplementaryalignment(record::XAMRecord)::Bool
Query whether the read alignment is considered to be a supplementary alignment.
"""
function issupplementaryalignment(record::XAMRecord)::Bool
return flags(record) & FLAG_SUPPLEMENTARY == FLAG_SUPPLEMENTARY
end
"""
isprimaryalignment(record::XAMRecord)::Bool
Query whether the read alignment is considered to be the primary alignment.
This is primary line of the read and is equivalent to `flags(record) & 0x900 == 0`.
"""
function isprimaryalignment(record::XAMRecord)::Bool
# return !issecondaryalignment(record) && !issupplementaryalignment(record)
return flags(record) & 0x900 == 0
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 4230 | # BAM Auxiliary Data
# ==================
struct AuxData <: AbstractDict{String,Any}
data::Vector{UInt8}
end
function Base.getindex(aux::AuxData, tag::AbstractString)
checkauxtag(tag)
return getauxvalue(aux.data, 1, length(aux.data), UInt8(tag[1]), UInt8(tag[2]))
end
function Base.length(aux::AuxData)
data = aux.data
p = 1
len = 0
while p ≤ length(data)
len += 1
p = next_tag_position(data, p)
end
return len
end
function Base.iterate(aux::AuxData, pos=1)
if pos > length(aux.data)
return nothing
end
data = aux.data
@label doit
t1 = data[pos]
t2 = data[pos+1]
pos, typ = loadauxtype(data, pos + 2)
pos, value = loadauxvalue(data, pos, typ)
if t1 == t2 == 0xff
@goto doit
end
return Pair{String,Any}(String([t1, t2]), value), pos
end
# Internals
# ---------
function checkauxtag(tag::AbstractString)
if sizeof(tag) != 2
throw(ArgumentError("tag length must be 2"))
end
end
function getauxvalue(data::Vector{UInt8}, start::Int, stop::Int, t1::UInt8, t2::UInt8)
pos = findauxtag(data, start, stop, t1, t2)
if pos == 0
throw(KeyError(String([t1, t2])))
end
pos, T = loadauxtype(data, pos + 2)
_, val = loadauxvalue(data, pos, T)
return val
end
function loadauxtype(data::Vector{UInt8}, p::Int)
function auxtype(b)
return (
b == UInt8('A') ? Char :
b == UInt8('c') ? Int8 :
b == UInt8('C') ? UInt8 :
b == UInt8('s') ? Int16 :
b == UInt8('S') ? UInt16 :
b == UInt8('i') ? Int32 :
b == UInt8('I') ? UInt32 :
b == UInt8('f') ? Float32 :
b == UInt8('Z') ? String :
error("invalid type tag: '$(Char(b))'"))
end
t = data[p]
if t == UInt8('B')
return p + 2, Vector{auxtype(data[p+1])}
end
return p + 1, auxtype(t)
end
function loadauxvalue(data::Vector{UInt8}, p::Int, ::Type{T}) where T
return p + sizeof(T), unsafe_load(Ptr{T}(pointer(data, p)))
end
function loadauxvalue(data::Vector{UInt8}, p::Int, ::Type{Char})
return p + 1, Char(unsafe_load(pointer(data, p)))
end
function loadauxvalue(data::Vector{UInt8}, p::Int, ::Type{Vector{T}}) where T
n = unsafe_load(Ptr{Int32}(pointer(data, p)))
p += 4
xs = Vector{T}(undef, n)
unsafe_copyto!(pointer(xs), Ptr{T}(pointer(data, p)), n)
return p + n * sizeof(T), xs
end
function loadauxvalue(data::Vector{UInt8}, p::Int, ::Type{String})
dataptr = pointer(data, p)
endptr = ccall(:memchr, Ptr{Cvoid}, (Ptr{Cvoid}, Cint, Csize_t), dataptr, '\0', length(data) - p + 1)
q::Int = p + (endptr - dataptr) - 1
return q + 2, String(data[p:q])
end
function findauxtag(data::Vector{UInt8}, start::Int, stop::Int, t1::UInt8, t2::UInt8)
pos = start
while pos ≤ stop && !(data[pos] == t1 && data[pos+1] == t2)
pos = next_tag_position(data, pos)
end
if pos > stop
return 0
end
return pos
end
# Find the starting position of a next tag in `data` after `p`.
# `(data[p], data[p+1])` is supposed to be a current tag.
function next_tag_position(data::Vector{UInt8}, p::Int)
typ = Char(data[p+2])
p += 3
if typ == 'A'
return p += 1
end
if typ == 'c' || typ == 'C'
return p += 1
end
if typ == 's' || typ == 'S'
return p += 2
end
if typ == 'i' || typ == 'I'
return p += 4
end
if typ == 'f'
return p += 4
end
if typ == 'd'
return p += 8
end
if typ == 'Z' || typ == 'H'
while data[p] != 0x00 # NULL-terminalted string
p += 1
end
return p += 1
end
if typ == 'B'
eltyp = Char(data[p])
elsize = eltyp == 'c' || eltyp == 'C' ? 1 :
eltyp == 's' || eltyp == 'S' ? 2 :
eltyp == 'i' || eltye == 'I' || eltyp == 'f' ? 4 :
error("invalid type tag: '$(Char(eltyp))'")
p += 1
n = unsafe_load(Ptr{Int32}(pointer(data, p)))
return p += 4 + elsize * n
end
error("invalid type tag: '$(Char(typ))'")
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 1059 | # BAI
# ===
#
# Index for BAM files.
# An index type for the BAM file format.
struct BAI
# BGZF file index
index::Indexes.BGZFIndex
# number of unmapped reads
n_no_coors::Union{Nothing, Int}
end
"""
BAI(filename::AbstractString)
Load a BAI index from `filename`.
"""
function BAI(filename::AbstractString)
return open(read_bai, filename)
end
"""
BAI(input::IO)
Load a BAI index from `input`.
"""
function BAI(input::IO)
return read_bai(input)
end
# Read a BAI object from `input`.
function read_bai(input::IO)
# check magic bytes
B = read(input, UInt8)
A = read(input, UInt8)
I = read(input, UInt8)
x = read(input, UInt8)
if B != UInt8('B') || A != UInt8('A') || I != UInt8('I') || x != 0x01
error("input is not a valid BAI file")
end
# read contents
n_refs = read(input, Int32)
index = Indexes.read_bgzfindex(input, n_refs)
if !eof(input)
n_no_coors = read(input, UInt64)
else
n_no_coors = nothing
end
return BAI(index, n_no_coors)
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 587 | # BAM File Format
# ===============
module BAM
using BioGenerics
using GenomicFeatures
using XAM.SAM
import ..XAM: flags, XAMRecord, XAMReader, XAMWriter,
ismapped, isprimaryalignment, ispositivestrand, isnextmapped #TODO: Deprecate import of flag queries. These were imported to preseve existing API.
import BGZFStreams
import BioAlignments
import Indexes
import BioSequences
import BioGenerics: isfilled, header
import GenomicFeatures: eachoverlap
include("bai.jl")
include("auxdata.jl")
include("reader.jl")
include("record.jl")
include("writer.jl")
include("overlap.jl")
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 2998 | # BAM Overlap
# ===========
struct OverlapIterator{T}
reader::Reader{T}
refname::String
interval::UnitRange{Int}
end
function Base.IteratorSize(::Type{OverlapIterator{T}}) where T
return Base.SizeUnknown()
end
function Base.eltype(::Type{OverlapIterator{T}}) where T
return Record
end
function GenomicFeatures.eachoverlap(reader::Reader, interval::Interval)
return GenomicFeatures.eachoverlap(reader, GenomicFeatures.seqname(interval), GenomicFeatures.leftposition(interval):GenomicFeatures.rightposition(interval))
end
function GenomicFeatures.eachoverlap(reader::Reader, interval)
return GenomicFeatures.eachoverlap(reader, convert(Interval, interval))
end
function GenomicFeatures.eachoverlap(reader::Reader, refname::AbstractString, interval::UnitRange)
return OverlapIterator(reader, String(refname), interval)
end
# Iterator
# --------
mutable struct OverlapIteratorState
# reference index
refindex::Int
# possibly overlapping chunks
chunks::Vector{Indexes.Chunk}
# current chunk index
chunkid::Int
# pre-allocated record
record::Record
end
function Base.iterate(iter::OverlapIterator)
refindex = findfirst(isequal(iter.refname), iter.reader.refseqnames)
if refindex === nothing
throw(ArgumentError("sequence name $(iter.refname) is not found in the header"))
end
@assert iter.reader.index !== nothing "Reader index cannot be nothing."
chunks = Indexes.overlapchunks(iter.reader.index.index, refindex, iter.interval)
if isempty(chunks)
return nothing
end
state = OverlapIteratorState(refindex, chunks, 1, Record())
seek(iter.reader, state.chunks[state.chunkid].start)
return iterate(iter, state)
end
function Base.iterate(iter::OverlapIterator, state)
while state.chunkid ≤ lastindex(state.chunks)
chunk = state.chunks[state.chunkid]
while BGZFStreams.virtualoffset(iter.reader.stream) < chunk.stop
read!(iter.reader, state.record)
c = compare_intervals(state.record, (state.refindex, iter.interval))
if c == 0 # overlapping
return copy(state.record), state
end
if c > 0
# no more overlapping records in this chunk since records are sorted
break
end
end
state.chunkid += 1
if state.chunkid ≤ lastindex(state.chunks)
seek(iter.reader, state.chunks[state.chunkid].start)
end
end
# no more overlapping records
return nothing
end
function compare_intervals(record::Record, interval::Tuple{Int,UnitRange{Int}})
rid = refid(record)
if rid < interval[1] || (rid == interval[1] && rightposition(record) < first(interval[2]))
# strictly left
return -1
end
if rid > interval[1] || (rid == interval[1] && leftposition(record) > last(interval[2]))
# strictly right
return +1
end
# overlapping
return 0
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 3785 | # BAM Reader
# ==========
"""
BAM.Reader(input::IO; index=nothing)
Create a data reader of the BAM file format.
# Arguments
* `input`: data source
* `index=nothing`: filepath to a random access index (currently *bai* is supported) or BAI object
"""
mutable struct Reader{T} <: XAMReader
stream::BGZFStreams.BGZFStream{T}
header::SAM.Header
start_offset::BGZFStreams.VirtualOffset
refseqnames::Vector{String}
refseqlens::Vector{Int}
index::Union{Nothing, BAI}
end
function Base.eltype(::Type{Reader{T}}) where T
return Record
end
function BioGenerics.IO.stream(reader::Reader)
return reader.stream
end
function Reader(input::IO; index=nothing)
reader = init_bam_reader(input)
reader.index = init_bam_index(index)
return reader
end
function Base.show(io::IO, reader::Reader)
println(io, summary(reader), ":")
print(io, " number of contigs: ", length(reader.refseqnames))
end
"""
header(reader::Reader; fillSQ::Bool=false)::SAM.Header
Get the header of `reader`.
If `fillSQ` is `true`, this function fills missing "SQ" metainfo in the header.
"""
function header(reader::Reader; fillSQ::Bool=false)::SAM.Header
header = reader.header
if fillSQ
if !isempty(findall(reader.header, "SQ"))
throw(ArgumentError("SAM header already has SQ records"))
end
header = copy(header)
for (name, len) in zip(reader.refseqnames, reader.refseqlens)
push!(header, SAM.MetaInfo("SQ", ["SN" => name, "LN" => len]))
end
end
return header
end
function Base.seek(reader::Reader, voffset::BGZFStreams.VirtualOffset)
seek(reader.stream, voffset)
end
function Base.seekstart(reader::Reader)
seek(reader.stream, reader.start_offset)
end
function Base.iterate(reader::Reader, nextone = Record())
if BioGenerics.IO.tryread!(reader, nextone) === nothing
return nothing
end
return copy(nextone), empty!(nextone)
end
# Initialize a BAM reader by reading the header section.
function init_bam_reader(input::BGZFStreams.BGZFStream)
# magic bytes
B = read(input, UInt8)
A = read(input, UInt8)
M = read(input, UInt8)
x = read(input, UInt8)
if B != UInt8('B') || A != UInt8('A') || M != UInt8('M') || x != 0x01
error("input was not a valid BAM file")
end
# SAM header
textlen = read(input, Int32)
samreader = SAM.Reader(IOBuffer(read(input, textlen)))
# reference sequences
n_refs = read(input, Int32)
refseqnames = Vector{String}(undef, n_refs)
refseqlens = Vector{Int}(undef, n_refs)
@inbounds for i in 1:n_refs
namelen = read(input, Int32)
data = read(input, namelen)
seqname = unsafe_string(pointer(data))
seqlen = read(input, Int32)
refseqnames[i] = seqname
refseqlens[i] = seqlen
end
voffset = isa(input.io, Base.AbstractPipe) ?
BGZFStreams.VirtualOffset(0, 0) :
BGZFStreams.virtualoffset(input)
return Reader(
input,
samreader.header,
voffset,
refseqnames,
refseqlens,
nothing)
end
function init_bam_reader(input::IO)
return init_bam_reader(BGZFStreams.BGZFStream(input))
end
init_bam_index(index::AbstractString) = BAI(index)
init_bam_index(index::BAI) = index
init_bam_index(index::Nothing) = nothing
init_bam_index(index) = error("unrecognizable index argument")
function _read!(reader::Reader, record)
unsafe_read(
reader.stream,
pointer_from_objref(record),
FIXED_FIELDS_BYTES)
dsize = data_size(record)
if length(record.data) < dsize
resize!(record.data, dsize)
end
unsafe_read(reader.stream, pointer(record.data), dsize)
record.reader = reader
return record
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 17060 | # BAM Record
# ==========
"""
BAM.Record()
Create an unfilled BAM record.
"""
mutable struct Record <: XAMRecord
# fixed-length fields (see BMA specs for the details)
block_size::Int32
refid::Int32
pos::Int32
l_read_name::UInt8
mapq::UInt8
bin::UInt16
n_cigar_op::UInt16
flags::UInt16
l_seq::Int32
next_refid::Int32
next_pos::Int32
tlen::Int32
# variable length data
data::Vector{UInt8}
reader::Union{Reader, Nothing}
function Record()
return new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, UInt8[])
end
end
# the data size of fixed-length fields (block_size-tlen)
const FIXED_FIELDS_BYTES = 36
function Record(data::Vector{UInt8})
return convert(Record, data)
end
function Base.convert(::Type{Record}, data::Vector{UInt8})
length(data) < FIXED_FIELDS_BYTES && throw(ArgumentError("data too short"))
record = Record()
dst_pointer = Ptr{UInt8}(pointer_from_objref(record))
unsafe_copyto!(dst_pointer, pointer(data), FIXED_FIELDS_BYTES)
dsize = data_size(record)
resize!(record.data, dsize)
length(data) < dsize + FIXED_FIELDS_BYTES && throw(ArgumentError("data too short"))
unsafe_copyto!(record.data, 1, data, FIXED_FIELDS_BYTES + 1, dsize)
return record
end
function Base.:(==)(a::Record, b::Record)
return a.block_size == b.block_size &&
a.refid == b.refid &&
a.pos == b.pos &&
a.l_read_name == b.l_read_name &&
a.mapq == b.mapq &&
a.bin == b.bin &&
a.n_cigar_op == b.n_cigar_op &&
a.flags == b.flags &&
a.l_seq == b.l_seq &&
a.next_refid == b.next_refid &&
a.next_pos == b.next_pos &&
a.tlen == b.tlen &&
a.data[1:data_size(a)] == b.data[1:data_size(b)]
end
function Base.copy(record::Record)
copy = Record()
GC.@preserve copy record begin
dst_pointer = Ptr{UInt8}(pointer_from_objref(copy))
src_pointer = Ptr{UInt8}(pointer_from_objref(record))
unsafe_copyto!(dst_pointer, src_pointer, FIXED_FIELDS_BYTES)
end
copy.data = record.data[1:data_size(record)]
copy.reader = record.reader
return copy
end
function Base.empty!(record::Record)
record.block_size = 0
record.refid = 0
record.pos = 0
record.l_read_name = 0
record.mapq = 0
record.bin = 0
record.flags = 0
record.n_cigar_op = 0
record.l_seq = 0
record.next_refid = 0
record.next_pos = 0
record.tlen = 0
#Note: data will be overwritten and indexed using data_size.
return record
end
function Base.show(io::IO, record::Record)
print(io, summary(record), ':')
if isfilled(record)
println(io)
println(io, " template name: ", tempname(record))
println(io, " flags: ", flags(record))
println(io, " reference ID: ", refid(record))
println(io, " position: ", position(record))
println(io, " mapping quality: ", mappingquality(record))
println(io, " CIGAR: ", cigar(record))
println(io, " next reference ID: ", nextrefid(record))
println(io, " next position: ", nextposition(record))
println(io, " template length: ", templength(record))
println(io, " sequence: ", sequence(record))
# TODO: pretty print base quality
println(io, " base quality: ", quality(record))
print(io, " auxiliary data:")
for field in keys(auxdata(record))
print(io, ' ', field, '=', record[field])
end
else
print(io, " <not filled>")
end
end
function Base.read!(reader::Reader, record::Record)
return _read!(reader, record)
end
# Accessor Fuctions
# -----------------
function flags(record::Record)::UInt16
checkfilled(record)
return record.flags
end
function hasflags(record::Record)
return isfilled(record)
end
"""
refid(record::Record)::Int
Get the reference sequence ID of `record`.
The ID is 1-based (i.e. the first sequence is 1) and is 0 for a record without a mapping position.
See also: `BAM.rname`
"""
function refid(record::Record)::Int
checkfilled(record)
return record.refid + 1
end
function hasrefid(record::Record)
return isfilled(record)
end
function checked_refid(record::Record)
id = refid(record)
if id == 0
throw(ArgumentError("record is not mapped"))
end
if !isdefined(record, :reader)
throw(ArgumentError("reader is not defined"))
end
return id
end
"""
refname(record::Record)::String
Get the reference sequence name of `record`.
See also: `BAM.refid`
"""
function refname(record::Record)::String
checkfilled(record)
id = checked_refid(record)
return record.reader.refseqnames[id]
end
"""
reflen(record::Record)::Int
Get the length of the reference sequence this record applies to.
"""
function reflen(record::Record)::Int
checkfilled(record)
id = checked_refid(record)
return record.reader.refseqlens[id]
end
function hasrefname(record::Record)
return hasrefid(record)
end
"""
position(record::Record)::Int
Get the 1-based leftmost mapping position of `record`.
"""
function position(record::Record)::Int
checkfilled(record)
return record.pos + 1
end
function hasposition(record::Record)
return isfilled(record)
end
"""
rightposition(record::Record)::Int
Get the 1-based rightmost mapping position of `record`.
"""
function rightposition(record::Record)::Int
checkfilled(record)
return Int32(position(record) + alignlength(record) - 1)
end
function hasrightposition(record::Record)
return isfilled(record) && ismapped(record)
end
"""
nextrefid(record::Record)::Int
Get the next/mate reference sequence ID of `record`.
"""
function nextrefid(record::Record)::Int
checkfilled(record)
return record.next_refid + 1
end
function hasnextrefid(record::Record)
return isfilled(record)
end
"""
nextrefname(record::Record)::String
Get the reference name of the mate/next read of `record`.
"""
function nextrefname(record::Record)::String
checkfilled(record)
id = nextrefid(record)
if id == 0
throw(ArgumentError("next record is not mapped"))
elseif !isdefined(record, :reader)
throw(ArgumentError("reader is not defined"))
end
return record.reader.refseqnames[id]
end
function hasnextrefname(record::Record)
return isfilled(record) && isnextmapped(record)
end
"""
nextposition(record::Record)::Int
Get the 1-based leftmost mapping position of the next/mate read of `record`.
"""
function nextposition(record::Record)::Int
checkfilled(record)
return record.next_pos + 1
end
function hasnextposition(record::Record)
return isfilled(record)
end
"""
mappingquality(record::Record)::UInt8
Get the mapping quality of `record`.
"""
function mappingquality(record::Record)::UInt8
return record.mapq
end
function hasmappingquality(record::Record)
return isfilled(record)
end
"""
n_cigar_op(record::Record, checkCG::Bool = true)
Return the number of operations in the CIGAR string of `record`.
Note that in the BAM specification, the field called `cigar` typically stores the cigar string of the record.
However, this is not always true, sometimes the true cigar is very long, and due to some constraints of the BAM format, the actual cigar string is stored in an extra tag: `CG:B,I`, and the `cigar` field stores a pseudo-cigar string.
Calling this method with `checkCG` set to `true` (default) this method will always yield the number of operations in the true cigar string, because this is probably what you want, the vast majority of the time.
If you have a record that stores the true cigar in a `CG:B,I` tag, but you still want to get the number of operations in the `cigar` field of the BAM record, then set `checkCG` to `false`.
"""
function n_cigar_op(record::Record, checkCG::Bool = true)
return cigar_position(record, checkCG)[2]
end
"""
cigar(record::Record)::String
Get the CIGAR string of `record`.
Note that in the BAM specification, the field called `cigar` typically stores the cigar string of the record.
However, this is not always true, sometimes the true cigar is very long, and due to some constraints of the BAM format, the actual cigar string is stored in an extra tag: `CG:B,I`, and the `cigar` field stores a pseudo-cigar string.
Calling this method with `checkCG` set to `true` (default) this method will always yield the true cigar string, because this is probably what you want the vast majority of the time.
If you have a record that stores the true cigar in a `CG:B,I` tag, but you still want to access the pseudo-cigar that is stored in the `cigar` field of the BAM record, then you can set checkCG to `false`.
See also `BAM.cigar_rle`.
"""
function cigar(record::Record, checkCG::Bool = true)::String
buf = IOBuffer()
for (op, len) in zip(cigar_rle(record, checkCG)...)
print(buf, len, convert(Char, op))
end
return String(take!(buf))
end
"""
cigar_rle(record::Record, checkCG::Bool = true)::Tuple{Vector{BioAlignments.Operation},Vector{Int}}
Get a run-length encoded tuple `(ops, lens)` of the CIGAR string in `record`.
Note that in the BAM specification, the field called `cigar` typically stores the cigar string of the record.
However, this is not always true, sometimes the true cigar is very long, and due to some constraints of the BAM format, the actual cigar string is stored in an extra tag: `CG:B,I`, and the `cigar` field stores a pseudo-cigar string.
Calling this method with `checkCG` set to `true` (default) this method will always yield the true cigar string, because this is probably what you want the vast majority of the time.
If you have a record that stores the true cigar in a `CG:B,I` tag, but you still want to access the pseudo-cigar that is stored in the `cigar` field of the BAM record, then you can set checkCG to `false`.
See also `BAM.cigar`.
"""
function cigar_rle(record::Record, checkCG::Bool = true)::Tuple{Vector{BioAlignments.Operation},Vector{Int}}
checkfilled(record)
idx, nops = cigar_position(record, checkCG)
ops, lens = extract_cigar_rle(record.data, idx, nops)
return ops, lens
end
function extract_cigar_rle(data::Vector{UInt8}, offset, n)
ops = Vector{BioAlignments.Operation}()
lens = Vector{Int}()
for i in offset:4:offset + (n - 1) * 4
x = unsafe_load(Ptr{UInt32}(pointer(data, i)))
op = BioAlignments.Operation(x & 0x0F)
push!(ops, op)
push!(lens, x >> 4)
end
return ops, lens
end
function cigar_position(record::Record, checkCG::Bool = true)::Tuple{Int, Int}
cigaridx, nops = seqname_length(record) + 1, record.n_cigar_op
if !checkCG
return cigaridx, nops
end
if nops != 2
return cigaridx, nops
end
x = unsafe_load(Ptr{UInt32}(pointer(record.data, cigaridx)))
if x != UInt32(seqlength(record) << 4 | 4)
return cigaridx, nops
end
start = auxdata_position(record)
stop = data_size(record)
tagidx = findauxtag(record.data, start, stop, UInt8('C'), UInt8('G'))
if tagidx == 0
return cigaridx, nops
end
# Tag exists, validate type is BI.
typ = unsafe_load(Ptr{UInt16}(pointer(record.data, tagidx += 2)))
if typ != (UInt16('I') << 8 | UInt16('B'))
return cigaridx, nops
end
# If got this far, the CG tag is valid and contains the cigar.
# Get the true n_cigar_ops, and return it and the idx of the first
nops = UInt32(unsafe_load(Ptr{Int32}(pointer(record.data, tagidx += 2))))
tagidx += 4
return tagidx, nops
end
"""
alignment(record::Record)::BioAlignments.Alignment
Get the alignment of `record`.
"""
function alignment(record::Record)::BioAlignments.Alignment
if ismapped(record)
return BioAlignments.Alignment(cigar(record), 1, position(record))
end
return BioAlignments.Alignment(BioAlignments.AlignmentAnchor[])
end
function hasalignment(record::Record)
return ismapped(record)
end
"""
alignlength(record::Record)::Int
Get the alignment length of `record`.
"""
function alignlength(record::Record)::Int
offset = seqname_length(record)
length::Int = 0
for i in offset + 1:4:offset + n_cigar_op(record, false) * 4
x = unsafe_load(Ptr{UInt32}(pointer(record.data, i)))
op = BioAlignments.Operation(x & 0x0F)
if BioAlignments.ismatchop(op) || BioAlignments.isdeleteop(op)
length += x >> 4
end
end
return length
end
"""
tempname(record::Record)::String
Get the query template name of `record`.
"""
function tempname(record::Record)::String
checkfilled(record)
# drop the last NUL character
return unsafe_string(pointer(record.data), max(seqname_length(record) - 1, 0))
end
function hastempname(record::Record)
return isfilled(record)
end
"""
templength(record::Record)::Int
Get the template length of `record`.
"""
function templength(record::Record)::Int
checkfilled(record)
return record.tlen
end
function hastemplength(record::Record)
return isfilled(record)
end
"""
sequence(record::Record)::BioSequences.LongDNA{4}
Get the segment sequence of `record`.
"""
function sequence(record::Record)
checkfilled(record)
seqlen = seqlength(record)
if seqlen == 0
return nothing
end
data = Vector{UInt64}(undef, cld(seqlen, 16))
src::Ptr{UInt64} = pointer(record.data, seqname_length(record) + n_cigar_op(record, false) * 4 + 1)
for i in 1:lastindex(data)
# copy data flipping high and low nybble
x = unsafe_load(src, i)
data[i] = (x & 0x0f0f0f0f0f0f0f0f) << 4 | (x & 0xf0f0f0f0f0f0f0f0) >> 4
end
return BioSequences.LongDNA{4}(data, UInt(seqlen))
end
function hassequence(record::Record)
return isfilled(record)
end
"""
seqlength(record::Record)::Int
Get the sequence length of `record`.
"""
function seqlength(record::Record)::Int
checkfilled(record)
return record.l_seq % Int
end
function hasseqlength(record::Record)
return isfilled(record)
end
"""
quality(record::Record)
Get the base quality of `record`.
"""
function quality(record::Record)
checkfilled(record)
seqlen = seqlength(record)
offset = seqname_length(record) + n_cigar_op(record, false) * 4 + cld(seqlen, 2)
return record.data[(1+offset):(seqlen+offset)]
end
function hasquality(record::Record)
return isfilled(record)
end
"""
auxdata(record::Record)::BAM.AuxData
Get the auxiliary data of `record`.
"""
function auxdata(record::Record)
checkfilled(record)
return AuxData(record.data[auxdata_position(record):data_size(record)])
end
function hasauxdata(record::Record)
return isfilled(record)
end
function Base.getindex(record::Record, tag::AbstractString)
checkauxtag(tag)
start = auxdata_position(record)
stop = data_size(record)
return getauxvalue(record.data, start, stop, UInt8(tag[1]), UInt8(tag[2]))
end
function Base.haskey(record::Record, tag::AbstractString)
checkauxtag(tag)
start = auxdata_position(record)
stop = data_size(record)
return findauxtag(record.data, start, stop, UInt8(tag[1]), UInt8(tag[2])) > 0
end
function Base.keys(record::Record)
return collect(keys(auxdata(record)))
end
function Base.values(record::Record)
return [record[key] for key in keys(record)]
end
# BioGenerics Methods
# -----------
function BioGenerics.isfilled(record::Record)
return record.block_size != 0
end
function BioGenerics.seqname(record::Record)
return tempname(record)
end
function BioGenerics.hasseqname(record::Record)
return hastempname(record)
end
function BioGenerics.sequence(record::Record)
return sequence(record)
end
function BioGenerics.hassequence(record::Record)
return hassequence(record)
end
function BioGenerics.leftposition(record::Record)
return position(record)
end
function BioGenerics.hasleftposition(record::Record)
return hasposition(record)
end
function BioGenerics.rightposition(record::Record)
return rightposition(record)
end
function BioGenerics.hasrightposition(record::Record)
return hasrightposition(record)
end
# Helper Functions
# ----------------
# Return the size of the `.data` field.
function data_size(record::Record)
if isfilled(record)
return record.block_size - FIXED_FIELDS_BYTES + sizeof(record.block_size)
end
return 0
end
function checkfilled(record::Record)
if !isfilled(record)
throw(ArgumentError("unfilled BAM record"))
end
end
function auxdata_position(record::Record)
seqlen = seqlength(record)
return seqname_length(record) + n_cigar_op(record, false) * 4 + cld(seqlen, 2) + seqlen + 1
end
# Return the length of the read name.
function seqname_length(record::Record)
return record.l_read_name
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 1652 | # BAM Writer
# ==========
"""
BAM.Writer(output::BGZFStream, header::SAM.Header)
Create a data writer of the BAM file format.
# Arguments
* `output`: data sink
* `header`: SAM header object
"""
mutable struct Writer <: XAMWriter
stream::BGZFStreams.BGZFStream
end
function Writer(stream::BGZFStreams.BGZFStream, header::SAM.Header)
refseqnames = String[]
refseqlens = Int[]
for metainfo in findall(header, "SQ")
push!(refseqnames, metainfo["SN"])
push!(refseqlens, parse(Int, metainfo["LN"]))
end
write_header(stream, header, refseqnames, refseqlens)
return Writer(stream)
end
function BioGenerics.IO.stream(writer::Writer)
return writer.stream
end
function Base.write(writer::Writer, record::Record)
n = 0
n += unsafe_write(writer.stream, pointer_from_objref(record), FIXED_FIELDS_BYTES)
n += unsafe_write(writer.stream, pointer(record.data), data_size(record))
return n
end
function write_header(stream, header, refseqnames, refseqlens)
@assert length(refseqnames) == length(refseqlens) "Lengths of refseq names and lengths must match."
n = 0
# magic bytes
n += write(stream, "BAM\1")
# SAM header
buf = IOBuffer()
l = write(SAM.Writer(buf), header)
n += write(stream, Int32(l))
n += write(stream, take!(buf))
# reference sequences
n += write(stream, Int32(length(refseqnames)))
for (seqname, seqlen) in zip(refseqnames, refseqlens)
namelen = length(seqname)
n += write(stream, Int32(namelen + 1))
n += write(stream, seqname, '\0')
n += write(stream, Int32(seqlen))
end
return n
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 1148 | # SAM Header
# ==========
struct Header
metainfo::Vector{MetaInfo}
end
"""
SAM.Header()
Create an empty header.
"""
function Header()
return Header(MetaInfo[])
end
function Base.:(==)(a::Header, b::Header)
return a.metainfo == b.metainfo
end
function Base.copy(header::Header)
return Header(header.metainfo)
end
function Base.eltype(::Type{Header})
return MetaInfo
end
function Base.length(header::Header)
return length(header.metainfo)
end
function Base.iterate(header::Header, i=1)
if i > length(header.metainfo)
return nothing
end
return header.metainfo[i], i + 1
end
"""
findall(header::Header, key::AbstractString)::Vector{MetaInfo}
Find metainfo objects satisfying `SAM.tag(metainfo) == key`.
"""
function Base.findall(header::Header, key::AbstractString)::Vector{MetaInfo}
return filter(m -> isequalkey(m, key), header.metainfo)
end
function Base.pushfirst!(header::Header, metainfo::MetaInfo)
pushfirst!(header.metainfo, metainfo)
return header
end
function Base.push!(header::Header, metainfo::MetaInfo)
push!(header.metainfo, metainfo)
return header
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 5935 | # SAM Meta-Information
# ====================
mutable struct MetaInfo
# data and filled range
data::Vector{UInt8}
filled::UnitRange{Int}
# indexes
tag::UnitRange{Int}
val::UnitRange{Int}
dictkey::Vector{UnitRange{Int}}
dictval::Vector{UnitRange{Int}}
end
function Base.:(==)(a::MetaInfo, b::MetaInfo)
return a.data == b.data &&
a.filled == b.filled &&
a.tag == b.tag &&
a.val == b.val &&
a.dictkey == b.dictkey &&
a.dictval == b.dictval
end
function MetaInfo(data::Vector{UInt8}=UInt8[])
metainfo = MetaInfo(data, 1:0, 1:0, 1:0, UnitRange{Int}[], UnitRange{Int}[])
if !isempty(data)
index!(metainfo)
end
return metainfo
end
"""
MetaInfo(str::AbstractString)
Create a SAM metainfo from `str`.
# Examples
julia> SAM.MetaInfo("@CO\tsome comment")
BioAlignments.SAM.MetaInfo:
tag: CO
value: some comment
julia> SAM.MetaInfo("@SQ\tSN:chr1\tLN:12345")
BioAlignments.SAM.MetaInfo:
tag: SQ
value: SN=chr1 LN=12345
"""
function MetaInfo(str::AbstractString)
return MetaInfo(collect(UInt8, str))
end
"""
MetaInfo(tag::AbstractString, value)
Create a SAM metainfo with `tag` and `value`.
`tag` is a two-byte ASCII string. If `tag` is `"CO"`, `value` must be a string; otherwise, `value` is an iterable object with key and value pairs.
# Examples
julia> SAM.MetaInfo("CO", "some comment")
BioAlignments.SAM.MetaInfo:
tag: CO
value: some comment
julia> string(ans)
"@CO\tsome comment"
julia> SAM.MetaInfo("SQ", ["SN" => "chr1", "LN" => 12345])
BioAlignments.SAM.MetaInfo:
tag: SQ
value: SN=chr1 LN=12345
julia> string(ans)
"@SQ\tSN:chr1\tLN:12345"
"""
function MetaInfo(tag::AbstractString, value)
buf = IOBuffer()
if tag == "CO" # comment
if !isa(value, AbstractString)
throw(ArgumentError("value must be a string"))
end
write(buf, "@CO\t", value)
elseif occursin(r"[A-Z][A-Z]", tag)
print(buf, '@', tag)
for (key, val) in value
print(buf, '\t', key, ':', val)
end
else
throw(ArgumentError("tag must match r\"[A-Z][A-Z]\""))
end
return MetaInfo(take!(buf))
end
function initialize!(metainfo::MetaInfo)
metainfo.filled = 1:0
metainfo.tag = 1:0
metainfo.val = 1:0
empty!(metainfo.dictkey)
empty!(metainfo.dictval)
return metainfo
end
function isfilled(metainfo::MetaInfo)
return !isempty(metainfo.filled)
end
function datarange(metainfo::MetaInfo)
return metainfo.filled
end
function checkfilled(metainfo::MetaInfo)
if !isfilled(metainfo)
throw(ArgumentError("unfilled SAM metainfo"))
end
end
function isequalkey(metainfo::MetaInfo, key::AbstractString)
if !isfilled(metainfo) || sizeof(key) != 2
return false
end
k1, k2 = UInt8(key[1]), UInt8(key[2])
return metainfo.data[metainfo.tag[1]] == k1 && metainfo.data[metainfo.tag[2]] == k2
end
function Base.show(io::IO, metainfo::MetaInfo)
print(io, summary(metainfo), ':')
if isfilled(metainfo)
println(io)
println(io, " tag: ", tag(metainfo))
print(io, " value:")
if !iscomment(metainfo)
for (key, val) in zip(keys(metainfo), values(metainfo))
print(io, ' ', key, '=', val)
end
else
print(io, ' ', value(metainfo))
end
else
print(io, " <not filled>")
end
end
function Base.print(io::IO, metainfo::MetaInfo)
write(io, metainfo)
return nothing
end
function Base.write(io::IO, metainfo::MetaInfo)
checkfilled(metainfo)
r = datarange(metainfo)
return unsafe_write(io, pointer(metainfo.data, first(r)), length(r))
end
# Accessor Functions
# ------------------
"""
iscomment(metainfo::MetaInfo)::Bool
Test if `metainfo` is a comment (i.e. its tag is "CO").
"""
function iscomment(metainfo::MetaInfo)::Bool
return isequalkey(metainfo, "CO")
end
"""
tag(metainfo::MetaInfo)::String
Get the tag of `metainfo`.
"""
function tag(metainfo::MetaInfo)::String
checkfilled(metainfo)
return String(metainfo.data[metainfo.tag])
end
"""
value(metainfo::MetaInfo)::String
Get the value of `metainfo` as a string.
"""
function value(metainfo::MetaInfo)::String
checkfilled(metainfo)
return String(metainfo.data[metainfo.val])
end
"""
keyvalues(metainfo::MetaInfo)::Vector{Pair{String,String}}
Get the values of `metainfo` as string pairs.
"""
function keyvalues(metainfo::MetaInfo)::Vector{Pair{String,String}}
checkfilled(metainfo)
if iscomment(metainfo)
throw(ArgumentError("not a dictionary"))
end
return Pair{String, String}.(keys(metainfo), values(metainfo))
end
function Base.keys(metainfo::MetaInfo)
checkfilled(metainfo)
if iscomment(metainfo)
throw(ArgumentError("not a dictionary"))
end
return [String(metainfo.data[r]) for r in metainfo.dictkey]
end
function Base.values(metainfo::MetaInfo)
checkfilled(metainfo)
if iscomment(metainfo)
throw(ArgumentError("not a dictionary"))
end
return [String(metainfo.data[r]) for r in metainfo.dictval]
end
function Base.haskey(metainfo::MetaInfo, key::AbstractString)
return findkey(metainfo, key) > 0
end
function Base.getindex(metainfo::MetaInfo, key::AbstractString)
i = findkey(metainfo, key)
if i == 0
throw(KeyError(key))
end
return String(metainfo.data[metainfo.dictval[i]])
end
function findkey(metainfo::MetaInfo, key::AbstractString)
checkfilled(metainfo)
if sizeof(key) != 2
return 0
end
t1, t2 = UInt8(key[1]), UInt8(key[2])
for (i, k) in enumerate(metainfo.dictkey)
if metainfo.data[first(k)] == t1 && metainfo.data[first(k)+1] == t2
return i
end
end
return 0
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 2878 | # SAM Reader
# =========
mutable struct Reader{S <: TranscodingStream} <: XAMReader
state::State{S}
header::Header
end
function Reader(state::State{S}) where {S <: TranscodingStream}
rdr = Reader(state, Header())
cs, ln = readheader!(rdr.state.stream, rdr.header, (1, rdr.state.linenum))
rdr.state.state = 1 # Get the reader ready to read the body.
rdr.state.linenum = ln
rdr.state.filled = false
if cs != -1 && cs != 0 #Note: the header is finished when the state machine fails to transition after a new line (state 1).
throw(ArgumentError("Malformed SAM file header at line $(ln). Machine failed to transition from state $(cs)."))
end
return rdr
end
"""
SAM.Reader(input::IO)
Create a data reader of the SAM file format.
# Arguments
* `input`: data source
"""
function Reader(input::IO)
if input isa TranscodingStream
return Reader(State(input, 1, 1, false))
end
stream = TranscodingStreams.NoopStream(input)
return Reader(State(stream, 1, 1, false))
end
function Base.eltype(::Type{<:Reader})
return Record
end
function BioGenerics.IO.stream(reader::Reader)
return reader.state.stream
end
"""
header(reader::Reader)::Header
Get the header of `reader`.
"""
function header(reader::Reader)::Header
return reader.header
end
function Base.close(reader::Reader)
if reader.state.stream isa IO
close(reader.state.stream)
end
return nothing
end
function index!(record::MetaInfo)
stream = TranscodingStreams.NoopStream(IOBuffer(record.data))
cs = index!(stream, record)
if cs != 0
throw(ArgumentError("Invalid SAM metadata. Machine failed to transition from state $(cs)."))
end
return record
end
function index!(record::Record)
stream = TranscodingStreams.NoopStream(IOBuffer(record.data))
cs = index!(stream, record)
if cs != 0
throw(ArgumentError("Invalid SAM record. Machine failed to transition from state $(cs)."))
end
return record
end
function Base.iterate(reader::Reader, nextone::Record = Record())
if BioGenerics.IO.tryread!(reader, nextone) === nothing
return nothing
end
return copy(nextone), empty!(nextone)
end
"""
read!(rdr::Reader, rec::Record)
Read a `Record` into `rec`; overwriting or adding to existing field values.
It is assumed that `rec` is already initialized or empty.
"""
function Base.read!(rdr::Reader, record::Record)
cs, ln, found = readrecord!(rdr.state.stream, record, (rdr.state.state, rdr.state.linenum))
rdr.state.state = cs
rdr.state.linenum = ln
rdr.state.filled = found
if found
return record
end
if cs == 0 || eof(rdr.state.stream)
throw(EOFError())
end
throw(ArgumentError("Malformed SAM file record at line $(ln). Machine failed to transition from state $(cs)."))
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 7824 | # Automa.jl generated readrecord! and readmetainfo! functions
# ========================================
import Automa: @re_str, rep, onexit!, onenter!, CodeGenContext, generate_reader, compile
# file = header . body
# header = metainfo*
# body = record*
const sam_machine_metainfo, sam_machine_record, sam_machine_header, sam_machine_body, sam_machine = let
metainfo = let
tag = onexit!(onenter!(re"[A-Z][A-Z]" \ re"CO", :pos1), :metainfo_tag)
dict = let
key = onexit!(onenter!(re"[A-Za-z][A-Za-z0-9]", :pos2), :metainfo_dict_key)
val = onexit!(onenter!(re"[ -~]+", :pos2), :metainfo_dict_val)
keyval = key * ':' * val
keyval * rep('\t' * keyval)
end
onexit!(onenter!(dict, :pos1), :metainfo_val)
co = onexit!(onenter!(re"CO", :pos1), :metainfo_tag)
comment = onexit!(onenter!(re"[^\r\n]*", :pos1), :metainfo_val) # Note: Only single line comments are allowed.
'@' * ((tag * '\t' * dict) | (co * '\t' * comment))
end
onexit!(onenter!(metainfo, :mark), :metainfo)
record = let
qname = onexit!(onenter!(re"[!-?A-~]+", :pos), :record_qname)
flags = onexit!(onenter!(re"[0-9]+", :pos), :record_flags)
rname = onexit!(onenter!(re"\*|[!-()+-<>-~][!-~]*", :pos), :record_rname)
pos = onexit!(onenter!(re"[0-9]+", :pos), :record_pos)
mapq = onexit!(onenter!(re"[0-9]+", :pos), :record_mapq)
cigar = onexit!(onenter!(re"\*|([0-9]+[MIDNSHPX=])+", :pos), :record_cigar)
rnext = onexit!(onenter!(re"\*|=|[!-()+-<>-~][!-~]*", :pos), :record_rnext)
pnext = onexit!(onenter!(re"[0-9]+", :pos), :record_pnext)
tlen = onexit!(onenter!(re"[-+]?[0-9]+", :pos), :record_tlen)
seq = onexit!(onenter!(re"\*|[A-Za-z=.]+", :pos), :record_seq)
qual = onexit!(onenter!(re"[!-~]+", :pos), :record_qual)
field = let
tag = re"[A-Za-z][A-Za-z0-9]"
val = (
re"A:[!-~]" |
re"i:[-+]?[0-9]+" |
re"f:[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?" |
re"Z:[ !-~]*" |
re"H:([0-9A-F][0-9A-F])*" |
re"B:[cCsSiIf](,[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)+"
)
tag * ':' * val
end
onexit!(onenter!(field, :pos), :record_field)
qname * '\t' *
flags * '\t' *
rname * '\t' *
pos * '\t' *
mapq * '\t' *
cigar * '\t' *
rnext * '\t' *
pnext * '\t' *
tlen * '\t' *
seq * '\t' *
qual *
rep('\t' * field)
end
onexit!(onenter!(record, :mark), :record)
newline = "\r?" * onenter!(re"\n", :countline)
header = onexit!(rep(metainfo * newline), :header)
body = onexit!(rep(record * newline), :body)
sam = header * body
map(compile, (metainfo, record, header, body, sam))
end
# write("sam_machine_metainfo.dot", Automa.machine2dot(sam_machine_metainfo))
# run(`dot -Tsvg -o sam_machine_metainfo.svg sam_machine_metainfo.dot`)
#
# write("sam_machine_record.dot", Automa.machine2dot(sam_machine_record))
# run(`dot -Tsvg -o sam_machine_record.svg sam_machine_record.dot`)
#
# write("sam_machine_header.dot", Automa.machine2dot(sam_machine_header))
# run(`dot -Tsvg -o sam_machine_header.svg sam_machine_header.dot`)
#
# write("sam_machine_body.dot", Automa.machine2dot(sam_machine_body))
# run(`dot -Tsvg -o sam_machine_body.svg sam_machine_body.dot`)
#
# write("sam_machine.dot", Automa.machine2dot(sam_machine))
# run(`dot -Tsvg -o sam_machine.svg sam_machine.dot`)
function appendfrom!(dst, dpos, src, spos, n)
if length(dst) < dpos + n - 1
resize!(dst, dpos + n - 1)
end
unsafe_copyto!(dst, dpos, src, spos, n)
return dst
end
const sam_actions_metainfo = Dict(
:mark => :(@mark),
:pos1 => :(pos1 = @relpos(p)),
:pos2 => :(pos2 = @relpos(p)),
:metainfo_tag => :(metainfo.tag = pos1:@relpos(p-1)),
:metainfo_val => :(metainfo.val = pos1:@relpos(p-1)),
:metainfo_dict_key => :(push!(metainfo.dictkey, pos2:@relpos(p-1))),
:metainfo_dict_val => :(push!(metainfo.dictval, pos2:@relpos(p-1))),
:metainfo => quote
appendfrom!(metainfo.data, 1, data, @markpos, p-@markpos)
metainfo.filled = 1:(p-@markpos)
end
)
const sam_actions_header = merge(
sam_actions_metainfo,
Dict(
:countline => :(linenum += 1),
:metainfo => quote
$(sam_actions_metainfo[:metainfo])
push!(header, metainfo)
metainfo = MetaInfo()
end,
:header => :(@escape)
)
)
const sam_actions_record = Dict(
:mark => :(@mark),
:pos => :(pos = @relpos(p)),
:record_qname => :(record.qname = pos:@relpos(p-1)),
:record_flags => :(record.flags = pos:@relpos(p-1)),
:record_rname => :(record.rname = pos:@relpos(p-1)),
:record_pos => :(record.pos = pos:@relpos(p-1)),
:record_mapq => :(record.mapq = pos:@relpos(p-1)),
:record_cigar => :(record.cigar = pos:@relpos(p-1)),
:record_rnext => :(record.rnext = pos:@relpos(p-1)),
:record_pnext => :(record.pnext = pos:@relpos(p-1)),
:record_tlen => :(record.tlen = pos:@relpos(p-1)),
:record_seq => :(record.seq = pos:@relpos(p-1)),
:record_qual => :(record.qual = pos:@relpos(p-1)),
:record_field => :(push!(record.fields, pos:@relpos(p-1))),
:record => quote
appendfrom!(record.data, 1, data, @markpos, p-@markpos)
record.filled = 1:(p-@markpos)
end
)
const sam_actions_body = merge(
sam_actions_record,
Dict(
:countline => :(linenum += 1),
:record => quote
found_record = true
$(sam_actions_record[:record])
@escape
end,
:body => :(@escape)
)
)
const sam_context = CodeGenContext(generator = :goto)
const sam_initcode_metainfo = quote
pos1 = 0
pos2 = 0
end
const sam_initcode_header = quote
$(sam_initcode_metainfo)
metainfo = MetaInfo()
cs, linenum = state
end
const sam_initcode_record = quote
pos = 0
end
const sam_initcode_body = quote
$(sam_initcode_record)
found_record = false
cs, linenum = state
end
generate_reader(
:index!,
sam_machine_metainfo,
arguments = (:(metainfo::MetaInfo),),
actions = sam_actions_metainfo,
context = sam_context,
initcode = sam_initcode_metainfo,
) |> eval
const sam_returncode_header = quote
return cs, linenum
end
generate_reader(
:readheader!,
sam_machine_header,
arguments = (:(header::SAM.Header), :(state::Tuple{Int,Int})),
actions = sam_actions_header,
context = sam_context,
initcode = sam_initcode_header,
returncode = sam_returncode_header,
errorcode = quote
# We expect the SAM header machine to error, as it finds the first non-header byte.
# This happens at state 1 (hence error state -1), and before reaching EOF.
if cs == -1 && !(is_eof && p < p_end)
@goto __return__
else
error("Expected input byte after SAM header.")
end
end
) |> eval
const sam_loopcode_body = quote
if found_record
@goto __return__
end
end
generate_reader(
:index!,
sam_machine_record,
arguments = (:(record::Record),),
actions = sam_actions_record,
context = sam_context,
initcode = sam_initcode_record,
) |> eval
const sam_returncode_body = quote
return cs, linenum, found_record
end
generate_reader(
:readrecord!,
sam_machine_body,
arguments = (:(record::Record), :(state::Tuple{Int,Int})),
actions = sam_actions_body,
context = sam_context,
initcode = sam_initcode_body,
loopcode = sam_loopcode_body,
returncode = sam_returncode_body
) |> eval
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 16057 | # SAM Record
# ==========
mutable struct Record <: XAMRecord
# Data and filled range.
data::Vector{UInt8}
filled::UnitRange{Int} # Note: Specifies the data in use.
# Mandatory fields.
qname::UnitRange{Int}
flags::UnitRange{Int}
rname::UnitRange{Int}
pos::UnitRange{Int}
mapq::UnitRange{Int}
cigar::UnitRange{Int}
rnext::UnitRange{Int}
pnext::UnitRange{Int}
tlen::UnitRange{Int}
seq::UnitRange{Int}
qual::UnitRange{Int}
# Auxiliary fields.
fields::Vector{UnitRange{Int}}
end
"""
SAM.Record()
Create an unfilled SAM record.
"""
function Record()
return Record(
UInt8[], 1:0,
# qname-mapq
1:0, 1:0, 1:0, 1:0, 1:0,
# cigar-seq
1:0, 1:0, 1:0, 1:0, 1:0,
# qual and fields
1:0, UnitRange{Int}[])
end
"""
SAM.Record(data::Vector{UInt8})
Create a SAM record from `data`.
This function verifies the format and indexes fields for accessors.
Note that the ownership of `data` is transferred to a new record object.
"""
function Record(data::Vector{UInt8})
return convert(Record, data)
end
function Base.convert(::Type{Record}, data::Vector{UInt8})
record = Record(
data, 1:0,
# qname-mapq
1:0, 1:0, 1:0, 1:0, 1:0,
# cigar-seq
1:0, 1:0, 1:0, 1:0, 1:0,
# qual and fields
1:0, UnitRange{Int}[])
index!(record)
return record
end
"""
SAM.Record(str::AbstractString)
Create a SAM record from `str`.
This function verifies the format and indexes fields for accessors.
"""
function Record(str::AbstractString)
return convert(Record, str)
end
function Base.convert(::Type{Record}, str::AbstractString)
return Record(collect(UInt8, str))
end
function Base.:(==)(a::Record, b::Record)
return a.filled == b.filled &&
a.qname == b.qname &&
a.flags == b.flags &&
a.rname == b.rname &&
a.pos == b.pos &&
a.mapq == b.mapq &&
a.cigar == b.cigar &&
a.rnext == b.rnext &&
a.pnext == b.pnext &&
a.tlen == b.tlen &&
a.seq == b.seq &&
a.qual == b.qual &&
a.fields == b.fields &&
a.data[a.filled] == b.data[b.filled]
end
function Base.show(io::IO, record::Record)
print(io, summary(record), ':')
if isfilled(record)
println(io)
println(io, " template name: ", hastempname(record) ? tempname(record) : "<missing>")
println(io, " flags: ", hasflags(record) ? flags(record) : "<missing>")
println(io, " reference: ", hasrefname(record) ? refname(record) : "<missing>")
println(io, " position: ", hasposition(record) ? position(record) : "<missing>")
println(io, " mapping quality: ", hasmappingquality(record) ? mappingquality(record) : "<missing>")
println(io, " CIGAR: ", hascigar(record) ? cigar(record) : "<missing>")
println(io, " next reference: ", hasnextrefname(record) ? nextrefname(record) : "<missing>")
println(io, " next position: ", hasnextposition(record) ? nextposition(record) : "<missing>")
println(io, " template length: ", hastemplength(record) ? templength(record) : "<missing>")
println(io, " sequence: ", hassequence(record) ? sequence(String, record) : "<missing>")
println(io, " base quality: ", hasquality(record) ? quality(String, record) : "<missing>")
print(io, " auxiliary data:")
for field in record.fields
print(io, ' ', String(record.data[field]))
end
else
print(io, " <not filled>")
end
end
function Base.print(io::IO, record::Record)
write(io, record)
return nothing
end
function Base.write(io::IO, record::Record)
checkfilled(record)
return unsafe_write(io, pointer(record.data, first(record.filled)), length(record.filled))
end
function Base.copy(record::Record)
return Record(
copy(record.data),
record.filled,
record.qname,
record.flags,
record.rname,
record.pos,
record.mapq,
record.cigar,
record.rnext,
record.pnext,
record.tlen,
record.seq,
record.qual,
copy(record.fields))
end
# Accessor Functions
# ------------------
function flags(record::Record)::UInt16
checkfilled(record)
return unsafe_parse_decimal(UInt16, record.data, record.flags)
end
function hasflags(record::Record)
return isfilled(record)
end
"""
refname(record::Record)::String
Get the reference sequence name of `record`.
"""
function refname(record::Record)
checkfilled(record)
if ismissing(record, record.rname)
missingerror(:refname)
end
return String(record.data[record.rname])
end
function hasrefname(record::Record)
return isfilled(record) && !ismissing(record, record.rname)
end
"""
position(record::Record)::Int
Get the 1-based leftmost mapping position of `record`.
"""
function position(record::Record)::Int
checkfilled(record)
pos = unsafe_parse_decimal(Int, record.data, record.pos)
# if pos == 0
# missingerror(:position)
# end
return pos
end
function hasposition(record::Record)
return isfilled(record) && (length(record.pos) != 1 || record.data[first(record.pos)] != UInt8('0'))
end
"""
rightposition(record::Record)::Int
Get the 1-based rightmost mapping position of `record`.
"""
function rightposition(record::Record)
return position(record) + alignlength(record) - 1
end
function hasrightposition(record::Record)
return hasposition(record) && hasalignment(record)
end
"""
nextrefname(record::Record)::String
Get the reference name of the mate/next read of `record`.
"""
function nextrefname(record::Record)::String
checkfilled(record)
if ismissing(record, record.rnext)
missingerror(:nextrefname)
end
return String(record.data[record.rnext])
end
function hasnextrefname(record::Record)
return isfilled(record) && !ismissing(record, record.rnext)
end
"""
nextposition(record::Record)::Int
Get the position of the mate/next read of `record`.
"""
function nextposition(record::Record)::Int
checkfilled(record)
pos = unsafe_parse_decimal(Int, record.data, record.pnext)
# if pos == 0
# missingerror(:nextposition)
# end
return pos
end
function hasnextposition(record::Record)
return isfilled(record) && (length(record.pnext) != 1 || record.data[first(record.pnext)] != UInt8('0'))
end
"""
mappingquality(record::Record)::UInt8
Get the mapping quality of `record`.
"""
function mappingquality(record::Record)::UInt8
checkfilled(record)
qual = unsafe_parse_decimal(UInt8, record.data, record.mapq)
if qual == 0xff
missingerror(:mappingquality)
end
return qual
end
function hasmappingquality(record::Record)
return isfilled(record) && unsafe_parse_decimal(UInt8, record.data, record.mapq) != 0xff
end
"""
cigar(record::Record)::String
Get the CIGAR string of `record`.
"""
function cigar(record::Record)::String
checkfilled(record)
if ismissing(record, record.cigar)
# missingerror(:cigar)
return ""
end
return String(record.data[record.cigar])
end
function hascigar(record::Record)
return isfilled(record) && !ismissing(record, record.cigar)
end
"""
alignment(record::Record)::BioAlignments.Alignment
Get the alignment of `record`.
"""
function alignment(record::Record)::BioAlignments.Alignment
if ismapped(record)
return BioAlignments.Alignment(cigar(record), 1, position(record))
end
return BioAlignments.Alignment(BioAlignments.AlignmentAnchor[])
end
function hasalignment(record::Record)
return isfilled(record) && hascigar(record)
end
"""
alignlength(record::Record)::Int
Get the alignment length of `record`.
"""
function alignlength(record::Record)::Int
if length(record.cigar) == 1 && record.data[first(record.cigar)] == UInt8('*')
return 0
end
ret::Int = 0
len = 0 # operation length
for i in record.cigar
c = record.data[i]
if in(c, UInt8('0'):UInt8('9'))
len = len * 10 + (c - UInt8('0'))
continue
end
op = convert(BioAlignments.Operation, Char(c))
if BioAlignments.ismatchop(op) || BioAlignments.isdeleteop(op) #Note: reference consuming ops ('M', 'D', 'N', '=', 'X').
ret += len
end
len = 0
end
return ret
end
"""
tempname(record::Record)::String
Get the query template name of `record`.
"""
function tempname(record::Record)::String
checkfilled(record)
if ismissing(record, record.qname)
missingerror(:tempname)
end
return String(record.data[record.qname])
end
function hastempname(record::Record)
return isfilled(record) && !ismissing(record, record.qname)
end
"""
templength(record::Record)::Int
Get the template length of `record`.
"""
function templength(record::Record)::Int
checkfilled(record)
len = unsafe_parse_decimal(Int, record.data, record.tlen)
# if len == 0
# missingerror(:tlen)
# end
return len
end
function hastemplength(record::Record)
return isfilled(record) && (length(record.tlen) != 1 || record.data[first(record.tlen)] != UInt8('0'))
end
"""
sequence(record::Record)::BioSequences.LongDNA{4}
Get the segment sequence of `record`.
"""
function sequence(record::Record)
checkfilled(record)
if ismissing(record, record.seq)
# missingerror(:sequence)
return nothing
end
seqlen = length(record.seq)
ret = BioSequences.LongDNA{4}(undef, seqlen)
copyto!(ret, 1, record.data, first(record.seq), seqlen)
return ret
end
function hassequence(record::Record)
return isfilled(record) && !ismissing(record, record.seq)
end
"""
sequence(::Type{String}, record::Record)::String
Get the segment sequence of `record` as `String`.
"""
function sequence(::Type{String}, record::Record)::String
checkfilled(record)
return String(record.data[record.seq])
end
"""
seqlength(record::Record)::Int
Get the sequence length of `record`.
"""
function seqlength(record::Record)::Int
checkfilled(record)
if ismissing(record, record.seq)
missingerror(:seq)
end
return length(record.seq)
end
function hasseqlength(record::Record)
return isfilled(record) && !ismissing(record, record.seq)
end
"""
quality(record::Record)::Vector{UInt8}
Get the Phred-scaled base quality of `record`.
"""
function quality(record::Record)::Vector{UInt8}
checkfilled(record)
if ismissing(record, record.qual)
missingerror(:quality)
end
qual = record.data[record.qual]
for i in 1:lastindex(qual)
@inbounds qual[i] -= 33
end
return qual
end
function hasquality(record::Record)
return isfilled(record) && !ismissing(record, record.qual)
end
"""
quality(::Type{String}, record::Record)::String
Get the ASCII-encoded base quality of `record`.
"""
function quality(::Type{String}, record::Record)::String
checkfilled(record)
return String(record.data[record.qual])
end
"""
auxdata(record::Record)::Dict{String,Any}
Get the auxiliary data (optional fields) of `record`.
"""
function auxdata(record::Record)::Dict{String,Any}
checkfilled(record)
return Dict(k => record[k] for k in keys(record))
end
function Base.haskey(record::Record, tag::AbstractString)
return findauxtag(record, tag) > 0
end
function Base.getindex(record::Record, tag::AbstractString)
i = findauxtag(record, tag)
if i == 0
throw(KeyError(tag))
end
field = record.fields[i]
# data type
typ = record.data[first(field)+3]
lo = first(field) + 5
if i == lastindex(record.fields)
hi = last(field)
else
hi = first(record.fields[i+1]) - 2
end
if typ == UInt8('A')
@assert lo == hi "Values lo and hi must be equivalent."
return Char(record.data[lo])
end
if typ == UInt8('i')
return unsafe_parse_decimal(Int, record.data, lo:hi)
end
if typ == UInt8('f')
# TODO: Call a C function directly for speed?
return parse(Float32, SubString(record.data[lo:hi]))
end
if typ == UInt8('Z')
return String(record.data[lo:hi])
end
if typ == UInt8('H')
return parse_hexarray(record.data, lo:hi)
end
if typ == UInt8('B')
return parse_typedarray(record.data, lo:hi)
end
throw(ArgumentError("type code '$(Char(typ))' is not defined"))
end
function Base.keys(record::Record)
checkfilled(record)
return [String(record.data[first(f):first(f)+1]) for f in record.fields]
end
function Base.values(record::Record)
return [record[k] for k in keys(record)]
end
# Bio Methods
# -----------
function BioGenerics.isfilled(record::Record)
return !isempty(record.filled)
end
function BioGenerics.seqname(record::Record)
return tempname(record)
end
function BioGenerics.hasseqname(record::Record)
return hastempname(record)
end
function BioGenerics.sequence(record::Record)
return sequence(record)
end
function BioGenerics.hassequence(record::Record)
return hassequence(record)
end
function BioGenerics.rightposition(record::Record)
return rightposition(record)
end
function BioGenerics.hasrightposition(record::Record)
return hasrightposition(record)
end
function BioGenerics.leftposition(record::Record)
return position(record)
end
function BioGenerics.hasleftposition(record::Record)
return hasposition(record)
end
# Helper Functions
# ----------------
function Base.empty!(record::Record)
record.filled = 1:0
record.qname = 1:0
record.flags = 1:0
record.rname = 1:0
record.pos = 1:0
record.mapq = 1:0
record.cigar = 1:0
record.rnext = 1:0
record.pnext = 1:0
record.tlen = 1:0
record.seq = 1:0
record.qual = 1:0
empty!(record.fields)
return record
end
function initialize!(record::Record) #TODO: deprecate.
return empty!(record)
end
function checkfilled(record::Record)
if !isfilled(record)
throw(ArgumentError("unfilled SAM record"))
end
end
function findauxtag(record::Record, tag::AbstractString)
checkfilled(record)
if sizeof(tag) != 2
return 0
end
t1, t2 = UInt8(tag[1]), UInt8(tag[2])
for (i, field) in enumerate(record.fields)
p = first(field)
if record.data[p] == t1 && record.data[p+1] == t2
return i
end
end
return 0
end
function parse_hexarray(data::Vector{UInt8}, range::UnitRange{Int})
@assert iseven(length(range))
ret = Vector{UInt8}(length(range) >> 1)
byte2hex(b) = b ∈ 0x30:0x39 ? (b - 0x30) : b ∈ 0x41:0x46 ? (b - 0x41 + 0x0A) : error("not in [0-9A-F]")
j = 1
for i in first(range):2:last(range)-1
ret[j] = (byte2hex(data[range[i]]) << 4) | byte2hex(data[range[i+1]])
j += 1
end
return ret
end
function parse_typedarray(data::Vector{UInt8}, range::UnitRange{Int})
# format: [cCsSiIf](,[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)+
t = data[first(range)]
xs = split(String(data[first(range)+2:last(range)]))
if t == UInt8('c')
return [parse(Int8, x) for x in xs]
end
if t == UInt8('C')
return [parse(UInt8, x) for x in xs]
end
if t == UInt8('s')
return [parse(Int16, x) for x in xs]
end
if t == UInt8('S')
return [parse(UInt16, x) for x in xs]
end
if t == UInt8('i')
return [parse(Int32, x) for x in xs]
end
if t == UInt8('I')
return [parse(UInt32, x) for x in xs]
end
if t == UInt8('f')
return [parse(Float32, x) for x in xs]
end
throw(ArgumentError("type code '$(Char(t))' is not defined"))
end
function ismissing(record::Record, range::UnitRange{Int})
return length(range) == 1 && record.data[first(range)] == UInt8('*')
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 1611 | # SAM File Format
# ===============
module SAM
using BioGenerics
import BioAlignments
import BioGenerics: BioGenerics, isfilled, header
import BioGenerics.Exceptions: missingerror
import BioGenerics.Automa: State
import BioSequences
import TranscodingStreams: TranscodingStreams, TranscodingStream
import ..XAM: flags, XAMRecord, XAMReader, XAMWriter,
ismapped, isprimaryalignment, ispositivestrand, isnextmapped #TODO: Deprecate import of flag queries. These were imported to preseve existing API.
using Printf: @sprintf
#TODO: update import BioCore.RecordHelper: unsafe_parse_decimal
# r"[0-9]+" must match `data[range]`.
function unsafe_parse_decimal(::Type{T}, data::Vector{UInt8}, range::UnitRange{Int}) where {T<:Unsigned}
x = zero(T)
@inbounds for i in range
x = Base.Checked.checked_mul(x, 10 % T)
x = Base.Checked.checked_add(x, (data[i] - UInt8('0')) % T)
end
return x
end
# r"[-+]?[0-9]+" must match `data[range]`.
function unsafe_parse_decimal(::Type{T}, data::Vector{UInt8}, range::UnitRange{Int}) where {T<:Signed}
lo = first(range)
if data[lo] == UInt8('-')
sign = T(-1)
lo += 1
elseif data[lo] == UInt8('+')
sign = T(+1)
lo += 1
else
sign = T(+1)
end
x = zero(T)
@inbounds for i in lo:last(range)
x = Base.Checked.checked_mul(x, 10 % T)
x = Base.Checked.checked_add(x, (data[i] - UInt8('0')) % T)
end
return sign * x
end
include("metainfo.jl")
include("record.jl")
include("header.jl")
include("reader.jl")
include("readrecord.jl")
include("writer.jl")
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 895 | # SAM Writer
# ==========
"""
Writer(output::IO, header::Header=Header())
Create a data writer of the SAM file format.
# Arguments
* `output`: data sink
* `header=Header()`: SAM header object
"""
mutable struct Writer <: XAMWriter
stream::IO
function Writer(output::IO, header::Header=Header())
writer = new(output)
write(writer, header)
return writer
end
end
function BioGenerics.IO.stream(writer::Writer)
return writer.stream
end
function Base.write(writer::Writer, header::Header)
n = 0
for metainfo in header
n += write(writer, metainfo)
end
return n
end
function Base.write(writer::Writer, metainfo::MetaInfo)
checkfilled(metainfo)
return write(writer.stream, metainfo, '\n')
end
function Base.write(writer::Writer, record::Record)
checkfilled(record)
return write(writer.stream, record, '\n')
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 764 | using Test
using Documenter
using BioGenerics
using FormatSpecimens
using GenomicFeatures
using XAM
import BioAlignments: Alignment, AlignmentAnchor, OP_START, OP_MATCH, OP_DELETE
import BGZFStreams: BGZFStream
import BioGenerics.Exceptions: MissingFieldException
import BioSequences: @dna_str, @aa_str
# Generate a random range within `range`.
function randrange(range)
x = rand(range)
y = rand(range)
if x < y
return x:y
else
return y:x
end
end
@testset "XAM" begin
include("test_sam.jl")
include("test_bam.jl")
include("test_issues.jl")
include("test_crosscheck.jl")
# Include doctests.
DocMeta.setdocmeta!(XAM, :DocTestSetup, :(using XAM); recursive=true)
doctest(XAM; manual = false)
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 11510 | @testset "BAM" begin
bamdir = path_of_format("BAM")
@testset "AuxData" begin
auxdata = BAM.AuxData(UInt8[])
@test isempty(auxdata)
buf = IOBuffer()
write(buf, "NM", UInt8('s'), Int16(1))
auxdata = BAM.AuxData(take!(buf))
@test length(auxdata) == 1
@test auxdata["NM"] === Int16(1)
@test collect(auxdata) == ["NM" => Int16(1)]
buf = IOBuffer()
write(buf, "AS", UInt8('c'), Int8(-18))
write(buf, "NM", UInt8('s'), Int16(1))
write(buf, "XA", UInt8('f'), Float32(3.14))
write(buf, "XB", UInt8('Z'), "some text\0")
write(buf, "XC", UInt8('B'), UInt8('i'), Int32(3), Int32[10, -5, 8])
auxdata = BAM.AuxData(take!(buf))
@test length(auxdata) == 5
@test auxdata["AS"] === Int8(-18)
@test auxdata["NM"] === Int16(1)
@test auxdata["XA"] === Float32(3.14)
@test auxdata["XB"] == "some text"
@test auxdata["XC"] == Int32[10, -5, 8]
@test convert(Dict{String,Any}, auxdata) == Dict(
"AS" => Int8(-18),
"NM" => Int16(1),
"XA" => Float32(3.14),
"XB" => "some text",
"XC" => Int32[10, -5, 8])
end
@testset "Record" begin
record = BAM.Record()
@test !isfilled(record)
@test repr(record) == "XAM.BAM.Record: <not filled>"
@test_throws ArgumentError BAM.flags(record)
end
@testset "Reader" begin
reader = open(BAM.Reader, joinpath(bamdir, "ce#1.bam"))
@test isa(reader, BAM.Reader)
@test eltype(reader) === BAM.Record
@test startswith(repr(reader), "XAM.BAM.Reader{IOStream}:")
# header
h = header(reader)
@test isa(h, SAM.Header)
# first record
record = BAM.Record()
read!(reader, record)
@test BAM.ismapped(record)
@test BAM.isprimaryalignment(record)
@test !BAM.ispositivestrand(record)
@test BAM.refname(record) == "CHROMOSOME_I"
@test BAM.refid(record) === 1
@test BAM.hasnextrefid(record)
@test BAM.nextrefid(record) === 0
@test BAM.hasposition(record) === hasleftposition(record) === true
@test BAM.position(record) === leftposition(record) === 2
@test BAM.hasnextposition(record)
@test BAM.nextposition(record) === 0
@test rightposition(record) == 102
@test BAM.hastempname(record) === hasseqname(record) === true
@test BAM.tempname(record) == seqname(record) == "SRR065390.14978392"
@test BAM.hassequence(record) === hassequence(record) === true
@test BAM.sequence(record) == sequence(record) == dna"""
CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCT
AAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA
"""
@test BAM.seqlength(record) === 100
@test BAM.hasquality(record)
@test eltype(BAM.quality(record)) == UInt8
@test BAM.quality(record) == [Int(x) - 33 for x in "#############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"]
@test BAM.flags(record) === UInt16(16)
@test BAM.cigar(record) == "27M1D73M"
@test BAM.alignment(record) == Alignment([
AlignmentAnchor( 0, 1, 0, OP_START),
AlignmentAnchor( 27, 28, 27, OP_MATCH),
AlignmentAnchor( 27, 29, 28, OP_DELETE),
AlignmentAnchor(100, 102, 101, OP_MATCH)])
@test record["XG"] == 1
@test record["XM"] == 5
@test record["XN"] == 0
@test record["XO"] == 1
@test record["AS"] == -18
@test record["XS"] == -18
@test record["YT"] == "UU"
@test keys(record) == ["XG","XM","XN","XO","AS","XS","YT"]
@test values(record) == [1, 5, 0, 1, -18, -18, "UU"]
@test eof(reader)
close(reader)
# Test conversion from byte array to record
dsize = BAM.data_size(record)
array = Vector{UInt8}(undef, BAM.FIXED_FIELDS_BYTES + dsize)
GC.@preserve array record begin
ptr = Ptr{UInt8}(pointer_from_objref(record))
unsafe_copyto!(pointer(array), ptr, BAM.FIXED_FIELDS_BYTES)
unsafe_copyto!(array, BAM.FIXED_FIELDS_BYTES + 1, record.data, 1, dsize)
end
new_record = convert(BAM.Record, array)
@test record.l_read_name == new_record.l_read_name
@test record.mapq == new_record.mapq
@test record.bin == new_record.bin
@test record.block_size == new_record.block_size
@test record.flags == new_record.flags
@test record.n_cigar_op == new_record.n_cigar_op
@test record.l_seq == new_record.l_seq
@test record.next_refid == new_record.next_refid
@test record.next_pos == new_record.next_pos
@test record.refid == new_record.refid
@test record.pos == new_record.pos
@test record.tlen == new_record.tlen
@test record.data == new_record.data
# rightposition (also implicitly alignlength)
records = collect(open(BAM.Reader, joinpath(bamdir, "ce#5b.bam")))
@test BAM.rightposition(records[6]) == rightposition(records[6]) == 83
# iterator
@test length(collect(open(BAM.Reader, joinpath(bamdir, "ce#1.bam")))) == 1
@test length(collect(open(BAM.Reader, joinpath(bamdir, "ce#2.bam")))) == 2
# IOStream
@test length(collect(BAM.Reader(open(joinpath(bamdir, "ce#1.bam"))))) == 1
@test length(collect(BAM.Reader(open(joinpath(bamdir, "ce#2.bam"))))) == 2
end
@testset "Read long CIGARs" begin
function get_cigar_lens(rec::BAM.Record)
cigar_ops, cigar_n = BAM.cigar_rle(rec)
field_ops, field_n = BAM.cigar_rle(rec, false)
cigar_l = length(cigar_ops)
field_l = length(field_ops)
return cigar_l, field_l
end
function check_cigar_vs_field(rec::BAM.Record)
cigar = BAM.cigar(rec)
field = BAM.cigar(rec, false)
cigar_l, field_l = get_cigar_lens(rec)
return cigar != field && cigar_l != field_l
end
function check_cigar_lens(rec::BAM.Record, field_len, cigar_len)
cigar_l, field_l = get_cigar_lens(rec)
return cigar_l == cigar_len && field_l == field_len
end
reader = open(BAM.Reader, joinpath(bamdir, "cigar-64k.bam"))
rec = BAM.Record()
read!(reader, rec)
@test !check_cigar_vs_field(rec)
read!(reader, rec)
@test check_cigar_vs_field(rec)
@test check_cigar_lens(rec, 2, 72091)
end
function compare_records(xs, ys)
if length(xs) != length(ys)
return false
end
for (a, b) in zip(xs, ys)
if !(
a.block_size == b.block_size &&
a.refid == b.refid &&
a.pos == b.pos &&
a.l_read_name == b.l_read_name &&
a.mapq == b.mapq &&
a.bin == b.bin &&
a.n_cigar_op == b.n_cigar_op &&
a.flags == b.flags &&
a.l_seq == b.l_seq &&
a.next_refid == b.next_refid &&
a.next_pos == b.next_pos &&
a.tlen == b.tlen &&
a.data[1:BAM.data_size(a)] == b.data[1:BAM.data_size(b)])
return false
end
end
return true
end
@testset "Round trip" begin
for specimen in list_valid_specimens("BAM")
filepath = joinpath(bamdir, filename(specimen))
mktemp() do path, _
# copy
if hastags(specimen) && in("bai", tags(specimen))
reader = open(BAM.Reader, filepath, index=filepath * ".bai")
else
reader = open(BAM.Reader, filepath)
end
header_original = header(reader)
writer = BAM.Writer(BGZFStream(path, "w"), BAM.header(reader, fillSQ=isempty(findall(header(reader), "SQ"))))
records = BAM.Record[]
for record in reader
push!(records, record)
write(writer, record)
end
close(reader)
close(writer)
# Check that EOF_BLOCK gets written.
nbytes = filesize(path)
@test BAM.BGZFStreams.EOF_BLOCK == open(path) do io
seek(io, nbytes - length(BAM.BGZFStreams.EOF_BLOCK))
read(io)
end
reader = open(BAM.Reader, path)
@test header(reader) == header_original
@test compare_records(collect(reader), records)
close(reader)
end
end
end
@testset "In-Place-Reading Pattern" begin
file_bam = joinpath(bamdir, "ce#5b.bam")
records = open(collect, BAM.Reader, file_bam)
reader = open(BAM.Reader, file_bam)
record = BAM.Record()
i = 0
while !eof(reader)
empty!(record) # Reset the record.
read!(reader, record)
i = i + 1
@test records[i] == record
end
close(reader)
end
@testset "BAI" begin
filepath = joinpath(bamdir, "GSE25840_GSM424320_GM06985_gencode_spliced.head.bam")
index = BAM.BAI(filepath * ".bai")
reader = open(BAM.Reader, filepath, index=index)
@test isa(eachoverlap(reader, "chr1", 1:100), BAM.OverlapIterator)
close(reader)
@test_throws ErrorException open(BAM.Reader, filepath, index=1234)
end
@testset "Random access" begin
filepath = joinpath(bamdir, "GSE25840_GSM424320_GM06985_gencode_spliced.head.bam")
reader = open(BAM.Reader, filepath, index=filepath * ".bai")
@test isa(eachoverlap(reader, "chr1", 1:100), BAM.OverlapIterator)
@test isa(eachoverlap(reader, GenomicFeatures.Interval("chr1", 1, 100)), BAM.OverlapIterator)
# expected values are counted using samtools
for (refname, interval, expected) in [
("chr1", 1_000:10000, 21),
("chr1", 8_000:10000, 20),
("chr1", 766_000:800_000, 142),
("chr1", 786_000:800_000, 1),
("chr1", 796_000:800_000, 0)]
intsect = eachoverlap(reader, refname, interval)
@test eltype(intsect) == BAM.Record
@test count(_ -> true, intsect) == expected
# check that the intersection iterator is stateless
@test count(_ -> true, intsect) == expected
end
# randomized tests
for n in 1:50
refindex = 1
refname = "chr1"
range = randrange(1:1_000_000)
seekstart(reader)
# linear scan
expected = filter(collect(reader)) do record
BAM.compare_intervals(record, (refindex, range)) == 0
end
# indexed scan
actual = collect(eachoverlap(reader, refname, range))
@test compare_records(actual, expected)
end
close(reader)
filepath = joinpath(bamdir, "R_12h_D06.uniq.q40.bam")
reader = open(BAM.Reader, filepath, index=filepath * ".bai")
@test isempty(collect(eachoverlap(reader, "chr19", 5823708:5846478)))
close(reader)
end
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 1237 | @testset "Cross Check Properties" begin
Broadcast.broadcastable(x::XAM.BAM.Record) = Ref(x) #TODO: consider moving to XAM.jl.
Broadcast.broadcastable(x::XAM.SAM.Record) = Ref(x) #TODO: consider moving to XAM.jl
function crosscheck(bam::BAM.Record, sam::SAM.Record, property::Symbol)
bam_property = getproperty(XAM.BAM, property)
sam_property = getproperty(XAM.SAM, property)
if bam_property(bam) != sam_property(sam)
@warn "$property result is not the same" bam_property(bam) sam_property(sam)
return false
end
return true
end
samdir = path_of_format("SAM")
bamdir = path_of_format("BAM")
filenames = [
"ce#1",
"ce#2",
"ce#5",
"ce#5b",
"ce#unmap",
"ce#unmap1",
"ce#unmap2",
]
properties = [
:position,# POS
:tempname,# QNAME
:mappingquality,# MAPQ
:cigar, # CIGAR
:flags, # FLAG
:sequence, # SEQ
:nextposition, # PNEXT
:templength, # TLEN
]
for filename in filenames
records_bam = collect(open(BAM.Reader, joinpath(bamdir, filename * ".bam")))
records_sam = collect(open(SAM.Reader, joinpath(samdir, filename * ".sam")))
for (bam, sam) in zip(records_bam, records_sam)
@test all(crosscheck.(bam, sam, properties)) == true
end
end
end # testset Crosscheck
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 420 | @testset "Issues" begin
# https://github.com/BioJulia/XAM.jl/issues/31
path_bam = joinpath(path_of_format("BAM"), "SRR7993829_1.100K.forward.bam")
open(BAM.Reader, path_bam, index = path_bam * ".bai") do reader
@test count(overlap -> true, eachoverlap(reader, "JH584304.1", 51000:51200)) == 0
@test count(overlap -> true, eachoverlap(reader, "JH584304.1", 51000:51715)) == 1
end
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | code | 7609 | @testset "SAM" begin
samdir = path_of_format("SAM")
@testset "MetaInfo" begin
metainfo = SAM.MetaInfo()
@test !isfilled(metainfo)
@test occursin("not filled", repr(metainfo))
metainfo = SAM.MetaInfo("CO", "some comment (parens)")
@test isfilled(metainfo)
@test string(metainfo) == "@CO\tsome comment (parens)"
@test occursin("CO", repr(metainfo))
@test SAM.tag(metainfo) == "CO"
@test SAM.value(metainfo) == "some comment (parens)"
@test_throws ArgumentError keys(metainfo)
@test_throws ArgumentError values(metainfo)
metainfo = SAM.MetaInfo("HD", ["VN" => "1.0", "SO" => "coordinate"])
@test isfilled(metainfo)
@test string(metainfo) == "@HD\tVN:1.0\tSO:coordinate"
@test occursin("HD", repr(metainfo))
@test SAM.tag(metainfo) == "HD"
@test SAM.value(metainfo) == "VN:1.0\tSO:coordinate"
@test keys(metainfo) == ["VN", "SO"]
@test values(metainfo) == ["1.0", "coordinate"]
@test SAM.keyvalues(metainfo) == ["VN" => "1.0", "SO" => "coordinate"]
@test haskey(metainfo, "VN")
@test haskey(metainfo, "SO")
@test !haskey(metainfo, "GO")
@test metainfo["VN"] == "1.0"
@test metainfo["SO"] == "coordinate"
@test_throws KeyError metainfo["GO"]
end
@testset "Header" begin
header = SAM.Header()
@test isempty(header)
push!(header, SAM.MetaInfo("@HD\tVN:1.0\tSO:coordinate"))
@test !isempty(header)
@test length(header) == 1
push!(header, SAM.MetaInfo("@CO\tsome comment"))
@test length(header) == 2
@test isa(collect(header), Vector{SAM.MetaInfo})
end
@testset "Record" begin
record = SAM.Record()
@test !isfilled(record)
@test !SAM.ismapped(record)
@test repr(record) == "XAM.SAM.Record: <not filled>"
@test_throws ArgumentError SAM.flags(record)
record = SAM.Record("r001\t99\tchr1\t7\t30\t8M2I4M1D3M\t=\t37\t39\tTTAGATAAAGGATACTG\t*")
@test isfilled(record)
@test occursin(r"^XAM.SAM.Record:\n", repr(record))
@test SAM.ismapped(record)
@test SAM.isprimaryalignment(record)
@test SAM.hastempname(record)
@test SAM.tempname(record) == "r001"
@test SAM.hasflags(record)
@test SAM.flags(record) === UInt16(99)
@test SAM.hasrefname(record)
@test SAM.refname(record) == "chr1"
@test SAM.hasposition(record)
@test SAM.position(record) === 7
@test SAM.hasmappingquality(record)
@test SAM.mappingquality(record) === UInt8(30)
@test SAM.hascigar(record)
@test SAM.cigar(record) == "8M2I4M1D3M"
@test SAM.hasnextrefname(record)
@test SAM.nextrefname(record) == "="
@test SAM.hasnextposition(record)
@test SAM.nextposition(record) === 37
@test SAM.hastemplength(record)
@test SAM.templength(record) === 39
@test SAM.hassequence(record)
@test SAM.sequence(record) == dna"TTAGATAAAGGATACTG"
@test !SAM.hasquality(record)
@test_throws MissingFieldException SAM.quality(record)
end
@testset "Reader" begin
reader = open(SAM.Reader, joinpath(samdir, "ce#1.sam"))
@test isa(reader, SAM.Reader)
@test eltype(reader) === SAM.Record
# header
h = header(reader)
@test string.(findall(header(reader), "SQ")) == ["@SQ\tSN:CHROMOSOME_I\tLN:1009800"]
# first record
record = SAM.Record()
read!(reader, record)
@test SAM.ismapped(record)
@test SAM.refname(record) == "CHROMOSOME_I"
@test SAM.position(record) == leftposition(record) == 2
@test SAM.rightposition(record) == rightposition(record) == 102
@test SAM.tempname(record) == seqname(record) == "SRR065390.14978392"
@test SAM.sequence(record) == sequence(record) == dna"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA"
@test SAM.sequence(String, record) == "CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA"
@test SAM.seqlength(record) == 100
@test SAM.quality(record) == (b"#############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC" .- 33)
@test SAM.quality(String, record) == "#############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
@test SAM.flags(record) == 16
@test SAM.cigar(record) == "27M1D73M"
@test SAM.alignment(record) == Alignment([
AlignmentAnchor( 0, 1, 0, OP_START),
AlignmentAnchor( 27, 28, 27, OP_MATCH),
AlignmentAnchor( 27, 29, 28, OP_DELETE),
AlignmentAnchor(100, 102, 101, OP_MATCH)])
@test record["XG"] == 1
@test record["XM"] == 5
@test record["XN"] == 0
@test record["XO"] == 1
@test record["AS"] == -18
@test record["XS"] == -18
@test record["YT"] == "UU"
@test eof(reader)
close(reader)
# rightposition (also implicitly alignlength)
records = collect(open(SAM.Reader, joinpath(samdir, "ce#5b.sam")))
@test SAM.rightposition(records[6]) == rightposition(records[6]) == 83
# iterator
@test length(collect(open(SAM.Reader, joinpath(samdir, "ce#1.sam")))) == 1
@test length(collect(open(SAM.Reader, joinpath(samdir, "ce#2.sam")))) == 2
# IOStream
@test length(collect(SAM.Reader(open(joinpath(samdir, "ce#1.sam"))))) == 1
@test length(collect(SAM.Reader(open(joinpath(samdir, "ce#2.sam"))))) == 2
end
@testset "Round trip" begin
function compare_records(xs, ys)
if length(xs) != length(ys)
return false
end
for (x, y) in zip(xs, ys)
if x.data[x.filled] != y.data[y.filled]
return false
end
end
return true
end
for specimen in list_valid_specimens("SAM")
filepath = joinpath(samdir, filename(specimen))
mktemp() do path, io
# copy
reader = open(SAM.Reader, filepath)
header_original = header(reader)
writer = SAM.Writer(io, header_original)
records = SAM.Record[]
for record in reader
push!(records, record)
write(writer, record)
end
close(reader)
close(writer)
reader = open(SAM.Reader, path)
@test header(reader) == header_original
@test compare_records(collect(reader), records)
close(reader)
end
end
end
@testset "In-Place-Reading Pattern" begin
file_sam = joinpath(samdir, "ce#5b.sam")
records = open(collect, SAM.Reader, file_sam)
reader = open(SAM.Reader, file_sam)
record = SAM.Record()
i = 0
while !eof(reader)
empty!(record) # Reset the record.
read!(reader, record)
i = i + 1
@test records[i] == record
end
close(reader)
# Test blank file.
file_sam = joinpath(samdir, "xx#blank.sam")
records = open(collect, SAM.Reader, file_sam)
@test records == []
end
end
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | docs | 2408 | # Changelog
All notable changes to XAM.jl will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.4.0]
### Added
- Added BAM.Reader index support for BAI object ([#56](https://github.com/BioJulia/XAM.jl/pull/56)).
- Added doi badge.
- Added test to ensure EOF_BLOCK gets written.
- Added `isreversestrand`.
- Added `isfirstsegment`.
- Added `islastsegment`.
### Changed
- Subtype from XAMReader and XAMWriter from common abstract types.
- Subtype from XAMRecord.
- Unified flag queries.
- Improved Slack link.
- Updated to use [Automa](https://github.com/BioJulia/Automa.jl) v1 ([#65](https://github.com/BioJulia/XAM.jl/pull/65)).
- Pointed the Unit Tests badge at the develop branch.
- Pluralised flag.
- Renamed `ismateunmapped` to `isnextunmapped`.
- Renamed `isreverse` to `isreversecomplemented`.
- Renamed `isforward` to `isforwardstrand`.
- `ispositivestrand` aliases `isforwardstrand`.
- `isnegativestrand` aliases `isreversestrand`.
- Renamed `ismatereverse` to `isnextreversecomplemented`.
- `isread1` aliases `isfirstsegment`.
- `isread2` aliases `islastsegment`.
### Fixed
- Updated hts-files.md ([#62](https://github.com/BioJulia/XAM.jl/pull/62)).
- Corrected the behaviour of `isprimaryalignment` with `isprimary`.
### Removed
- Moved the functionality of `isprimary` into `isprimaryalignment`.
## [0.3.1]
### Changed
- Upgraded to BioAlignments v3 ([#55](https://github.com/BioJulia/XAM.jl/pull/55)).
## [0.3.0] - 2022-10-10
## Added
- Crosschecks for SAM and BAM ([#29](https://github.com/BioJulia/XAM.jl/pull/29)).
- Improved documentation for flags ([#43](https://github.com/BioJulia/XAM.jl/pull/43)).
### Changed
- `BAM.quality` performance improved ([#21](https://github.com/BioJulia/XAM.jl/issues/21)).
- Updated BioAlignments to v2.2 and BioSequences to v3 ([#48](https://github.com/BioJulia/XAM.jl/pull/48)).
### Fixed
- `BAM.Record` layout now matches the BAM specs ([#26](https://github.com/BioJulia/XAM.jl/pull/26)).
[Unreleased]: https://github.com/BioJulia/XAM.jl/compare/v0.4.0...HEAD
[0.4.0]: https://github.com/BioJulia/XAM.jl/compare/v0.3.1...0.4.0
[0.3.1]: https://github.com/BioJulia/XAM.jl/compare/v0.3.0...v0.3.1
[0.3.0]: https://github.com/BioJulia/XAM.jl/compare/v0.2.8...v0.3.0
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | docs | 4846 | # <img src="./docs/src/assets/logo.svg" width="30%" align="right" /> XAM.jl
[](https://www.repostatus.org/#active)
[](https://github.com/BioJulia/XAM.jl/releases/latest)
[](https://zenodo.org/badge/latestdoi/201858041)
[](https://github.com/BioJulia/XAM.jl/blob/master/LICENSE)
[](https://biojulia.github.io/XAM.jl/stable)
[](https://biojulia.github.io/XAM.jl/dev/)
> This project follows the [semver](http://semver.org) pro forma and uses the [git-flow branching model](https://nvie.com/posts/a-successful-git-branching-model/ "original
blog post").
## Description
The XAM package provides I/O and utilities for manipulating SAM and BAM formatted alignment map files.
## Installation
You can install the XAM package from the [Julia REPL](https://docs.julialang.org/en/v1/manual/getting-started/).
Press `]` to enter [pkg mode](https://docs.julialang.org/en/v1/stdlib/Pkg/), then enter the following command:
```julia
add XAM
```
If you are interested in the cutting edge of the development, please check out the [develop branch](https://github.com/BioJulia/XAM.jl/tree/develop) to try new features before release.
## Testing
XAM is tested against Julia `1.X` on Linux, OS X, and Windows.
**Latest build status:**
[](https://github.com/BioJulia/XAM.jl/actions/workflows/UnitTests.yml)
[](https://github.com/BioJulia/XAM.jl/actions?query=workflow%3ADocumentation+branch%3Amaster)
[](https://codecov.io/gh/BioJulia/XAM.jl)
## Contributing
We appreciate [contributions](https://github.com/BioJulia/XAM.jl/graphs/contributors) from users including reporting bugs, fixing issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing) detailed contributor and maintainer guidelines, and code of conduct.
### Financial contributions
We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/biojulia).
Anyone can file an expense.
If the expense makes sense for the development the core contributors and the person who filed the expense will be reimbursed.
## Backers & Sponsors
Thank you to all our backers and sponsors!
Love our work and community? [Become a backer](https://opencollective.com/biojulia#backer).
[](https://opencollective.com/biojulia#backers)
Does your company use BioJulia?
Help keep BioJulia feature rich and healthy by [sponsoring the project](https://opencollective.com/biojulia#sponsor).
Your logo will show up here with a link to your website.
[](https://opencollective.com/biojulia/sponsor/0/website)
[](https://opencollective.com/biojulia/sponsor/1/website)
[](https://opencollective.com/biojulia/sponsor/2/website)
[](https://opencollective.com/biojulia/sponsor/3/website)
[](https://opencollective.com/biojulia/sponsor/4/website)
[](https://opencollective.com/biojulia/sponsor/5/website)
[](https://opencollective.com/biojulia/sponsor/6/website)
[](https://opencollective.com/biojulia/sponsor/7/website)
[](https://opencollective.com/biojulia/sponsor/8/website)
[](https://opencollective.com/biojulia/sponsor/9/website)
## Questions?
If you have a question about contributing or using BioJulia software, come on over and chat to us on [the Julia Slack workspace](https://julialang.slack.com/channels/biology), or you can try the [Bio category of the Julia discourse site](https://discourse.julialang.org/c/domain/bio).
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | docs | 1874 | <!--- Provide a general summary of the issue in the Title above -->
> _This template is rather extensive. Fill out all that you can, if are a new contributor or you're unsure about any section, leave it unchanged and a reviewer will help you_ :smile:. _This template is simply a tool to help everyone remember the BioJulia guidelines, if you feel anything in this template is not relevant, simply delete it._
## Expected Behavior
<!--- If you're describing a bug, tell us what you expect to happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution / Implementation
<!--- If describing a bug, suggest a fix/reason for the bug (optional) -->
<!--- If you're suggesting a change/improvement, suggest ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs)
<!--- You may include copy/pasteable snippets or a list of steps to reproduce the bug -->
1.
2.
3.
4.
<!--- Optionally, provide a link to a live example -->
<!--- You can use [this tool](https://www.cockos.com/licecap/) -->
<!--- ...Or [this tool](https://github.com/colinkeenan/silentcast) -->
<!--- ...Or [this tool](https://github.com/GNOME/byzanz) on Linux -->
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
- Package Version used:
- Julia Version used:
- Operating System and version (desktop or mobile):
- Link to your project:
<!-- Can you list installed packages here? -->
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | docs | 2791 | # A clear and descriptive title (No issue numbers please)
> _This template is rather extensive. Fill out all that you can, if are a new contributor or you're unsure about any section, leave it unchanged and a reviewer will help you_ :smile:. _This template is simply a tool to help everyone remember the BioJulia guidelines, if you feel anything in this template is not relevant, simply delete it._
## Types of changes
This PR implements the following changes:
_(Please tick any or all of the following that are applicable)_
* [ ] :sparkles: New feature (A non-breaking change which adds functionality).
* [ ] :bug: Bug fix (A non-breaking change, which fixes an issue).
* [ ] :boom: Breaking change (fix or feature that would cause existing functionality to change).
## :clipboard: Additional detail
- If you have implemented new features or behaviour
- **Provide a description of the addition** in as many details as possible.
- **Provide justification of the addition**.
- **Provide a runnable example of use of your addition**. This lets reviewers
and others try out the feature before it is merged or makes it's way to release.
- If you have changed current behaviour...
- **Describe the behaviour prior to you changes**
- **Describe the behaviour after your changes** and justify why you have made the changes,
Please describe any breakages you anticipate as a result of these changes.
- **Does your change alter APIs or existing exposed methods/types?**
If so, this may cause dependency issues and breakages, so the maintainer
will need to consider this when versioning the next release.
- If you are implementing changes that are intended to increase performance, you
should provide the results of a simple performance benchmark exercise
demonstrating the improvement. Especially if the changes make code less legible.
## :ballot_box_with_check: Checklist
- [ ] :art: The changes implemented is consistent with the [julia style guide](https://docs.julialang.org/en/stable/manual/style-guide/).
- [ ] :blue_book: I have updated and added relevant docstrings, in a manner consistent with the [documentation styleguide](https://docs.julialang.org/en/stable/manual/documentation/).
- [ ] :blue_book: I have added or updated relevant user and developer manuals/documentation in `docs/src/`.
- [ ] :ok: There are unit tests that cover the code changes I have made.
- [ ] :ok: The unit tests cover my code changes AND they pass.
- [ ] :pencil: I have added an entry to the `[UNRELEASED]` section of the manually curated `CHANGELOG.md` file for this repository.
- [ ] :ok: All changes should be compatible with the latest stable version of Julia.
- [ ] :thought_balloon: I have commented liberally for any complex pieces of internal code.
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | docs | 4663 | # XAM.jl
[](https://www.repostatus.org/#active)
[](https://github.com/BioJulia/XAM.jl/releases/latest)
[](https://github.com/BioJulia/XAM.jl/blob/master/LICENSE)
[](https://biojulia.github.io/XAM.jl/stable)
[](https://biojulia.github.io/XAM.jl/dev/)
> This project follows the [semver](http://semver.org) pro forma and uses the [git-flow branching model](https://nvie.com/posts/a-successful-git-branching-model/).
## Description
The XAM package provides I/O and utilities for manipulating SAM and BAM formatted alignment map files.
## Installation
You can install the XAM package from the [Julia REPL](https://docs.julialang.org/en/v1/manual/getting-started/).
Press `]` to enter [pkg mode](https://docs.julialang.org/en/v1/stdlib/Pkg/), then enter the following command:
```julia
add XAM
```
If you are interested in the cutting edge of the development, please check out the [develop branch](https://github.com/BioJulia/XAM.jl/tree/develop) to try new features before release.
## Testing
XAM is tested against Julia `1.X` on Linux, OS X, and Windows.
**Latest build status:**
[](https://github.com/BioJulia/XAM.jl/actions/workflows/UnitTests.yml)
[](https://github.com/BioJulia/XAM.jl/actions?query=workflow%3ADocumentation+branch%3Amaster)
[](https://codecov.io/gh/BioJulia/XAM.jl)
## Contributing
We appreciate [contributions](https://github.com/BioJulia/XAM.jl/graphs/contributors) from users including reporting bugs, fixing issues, improving performance and adding new features.
Take a look at the [contributing files](https://github.com/BioJulia/Contributing) detailed contributor and maintainer guidelines, and code of conduct.
### Financial contributions
We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/biojulia).
Anyone can file an expense.
If the expense makes sense for the development the core contributors and the person who filed the expense will be reimbursed.
## Backers & Sponsors
Thank you to all our backers and sponsors!
Love our work and community? [Become a backer](https://opencollective.com/biojulia#backer).
[](https://opencollective.com/biojulia#backers)
Does your company use BioJulia?
Help keep BioJulia feature rich and healthy by [sponsoring the project](https://opencollective.com/biojulia#sponsor).
Your logo will show up here with a link to your website.
[](https://opencollective.com/biojulia/sponsor/0/website)
[](https://opencollective.com/biojulia/sponsor/1/website)
[](https://opencollective.com/biojulia/sponsor/2/website)
[](https://opencollective.com/biojulia/sponsor/3/website)
[](https://opencollective.com/biojulia/sponsor/4/website)
[](https://opencollective.com/biojulia/sponsor/5/website)
[](https://opencollective.com/biojulia/sponsor/6/website)
[](https://opencollective.com/biojulia/sponsor/7/website)
[](https://opencollective.com/biojulia/sponsor/8/website)
[](https://opencollective.com/biojulia/sponsor/9/website)
## Questions?
If you have a question about contributing or using BioJulia software, come on over and chat to us on [the Julia Slack workspace](https://julialang.slack.com/channels/biology), or you can try the [Bio category of the Julia discourse site](https://discourse.julialang.org/c/domain/bio).
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | docs | 377 | ```@meta
CurrentModule = XAM
DocTestSetup = quote
using XAM
end
```
# API Reference
## SAM API
### Public
```@autodocs
Modules = [XAM.SAM]
private = false
```
### Internal
```@autodocs
Modules = [XAM.SAM]
public = false
```
## BAM API
### Public
```@autodocs
Modules = [XAM.BAM]
private = false
```
### Internal
```@autodocs
Modules = [XAM.BAM]
public = false
```
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.4.0 | e402710abf3f0bbed192896851bef8e483cf7952 | docs | 9828 | # SAM and BAM
## Introduction
The `XAM` package offers high-performance tools for SAM and BAM file formats, which are the most popular file formats.
If you have questions about the SAM and BAM formats or any of the terminology used when discussing these formats, see the published [specification](https://samtools.github.io/hts-specs/SAMv1.pdf), which is maintained by the [samtools group](https://samtools.github.io/).
A very very simple SAM file looks like the following:
```
@HD VN:1.6 SO:coordinate
@SQ SN:ref LN:45
r001 99 ref 7 30 8M2I4M1D3M = 37 39 TTAGATAAAGGATACTG *
r002 0 ref 9 30 3S6M1P1I4M * 0 0 AAAAGATAAGGATA *
r003 0 ref 9 30 5S6M * 0 0 GCCTAAGCTAA * SA:Z:ref,29,-,6H5M,17,0;
r004 0 ref 16 30 6M14N5M * 0 0 ATAGCTTCAGC *
r003 2064 ref 29 17 6H5M * 0 0 TAGGC * SA:Z:ref,9,+,5S6M,30,1;
r001 147 ref 37 30 9M = 7 -39 CAGCGGCAT * NM:i:1
```
Where the first two lines are part of the "header", and the following lines are "records".
Each record describes how a read aligns to some reference sequence.
Sometimes one record describes one read, but there are other cases like chimeric reads and split alignments, where multiple records apply to one read.
In the example above, `r003` is a chimeric read, and `r004` is a split alignment, and `r001` are mate pair reads.
Again, we refer you to the official [specification](https://samtools.github.io/hts-specs/SAMv1.pdf) for more details.
A BAM file stores this same information but in a binary and compressible format that does not make for pretty printing here!
## Reading SAM and BAM files
A typical script iterating over all records in a file looks like below:
```julia
using XAM
# Open a BAM file.
reader = open(BAM.Reader, "data.bam")
# Iterate over BAM records.
for record in reader
# `record` is a BAM.Record object.
if BAM.ismapped(record)
# Print the mapped position.
println(BAM.refname(record), ':', BAM.position(record))
end
end
# Close the BAM file.
close(reader)
```
The size of a BAM file is often extremely large.
The iterator interface demonstrated above allocates an object for each record and that may be a bottleneck of reading data from a BAM file.
In-place reading reuses a pre-allocated object for every record and less memory allocation happens in reading:
```julia
reader = open(BAM.Reader, "data.bam")
record = BAM.Record()
while !eof(reader)
empty!(record)
read!(reader, record)
# do something
end
```
## SAM and BAM Headers
Both `SAM.Reader` and `BAM.Reader` implement the `header` function, which returns a `SAM.Header` object.
To extract certain information out of the headers, you can use the `findall` method on the header to extract information according to SAM/BAM tag.
Again we refer you to the [specification](https://samtools.github.io/hts-specs/SAMv1.pdf) for full details of all the different tags that can occur in headers, and what they mean.
Below is an example of extracting all the info about the reference sequences from the BAM header.
In SAM/BAM, any description of a reference sequence is stored in the header, under a tag denoted `SQ` (think `reference SeQuence`!).
```jlcon
julia> reader = open(SAM.Reader, "data.sam");
julia> findall(SAM.header(reader), "SQ")
7-element Array{Bio.Align.SAM.MetaInfo,1}:
Bio.Align.SAM.MetaInfo:
tag: SQ
value: SN=Chr1 LN=30427671
Bio.Align.SAM.MetaInfo:
tag: SQ
value: SN=Chr2 LN=19698289
Bio.Align.SAM.MetaInfo:
tag: SQ
value: SN=Chr3 LN=23459830
Bio.Align.SAM.MetaInfo:
tag: SQ
value: SN=Chr4 LN=18585056
Bio.Align.SAM.MetaInfo:
tag: SQ
value: SN=Chr5 LN=26975502
Bio.Align.SAM.MetaInfo:
tag: SQ
value: SN=chloroplast LN=154478
Bio.Align.SAM.MetaInfo:
tag: SQ
value: SN=mitochondria LN=366924
```
In the above we can see there were 7 sequences in the reference: 5 chromosomes, one chloroplast sequence, and one mitochondrial sequence.
## SAM and BAM Records
### SAM.Record
The `XAM` package supports the following accessors for `SAM.Record` types.
```@docs
XAM.SAM.flags
XAM.SAM.ismapped
XAM.SAM.isprimaryalignment
XAM.SAM.refname
XAM.SAM.position
XAM.SAM.rightposition
XAM.SAM.isnextmapped
XAM.SAM.nextrefname
XAM.SAM.nextposition
XAM.SAM.mappingquality
XAM.SAM.cigar
XAM.SAM.alignment
XAM.SAM.alignlength
XAM.SAM.tempname
XAM.SAM.templength
XAM.SAM.sequence
XAM.SAM.seqlength
XAM.SAM.quality
XAM.SAM.auxdata
```
### BAM.Record
The `XAM` package supports the following accessors for `BAM.Record` types.
```@docs
XAM.BAM.flags
XAM.BAM.ismapped
XAM.BAM.isprimaryalignment
XAM.BAM.refid
XAM.BAM.refname
XAM.BAM.reflen
XAM.BAM.position
XAM.BAM.rightposition
XAM.BAM.isnextmapped
XAM.BAM.nextrefid
XAM.BAM.nextrefname
XAM.BAM.nextposition
XAM.BAM.mappingquality
XAM.BAM.cigar
XAM.BAM.alignment
XAM.BAM.alignlength
XAM.BAM.tempname
XAM.BAM.templength
XAM.BAM.sequence
XAM.BAM.seqlength
XAM.BAM.quality
XAM.BAM.auxdata
```
## Accessing auxiliary data
SAM and BAM records support the storing of optional data fields associated with tags.
Tagged auxiliary data follows a format of `TAG:TYPE:VALUE`.
`TAG` is a two-letter string, and each tag can only appear once per record.
`TYPE` is a single case-sensetive letter which defined the format of `VALUE`.
| Type | Description |
|------|-----------------------------------|
| 'A' | Printable character |
| 'i' | Signed integer |
| 'f' | Single-precision floating number |
| 'Z' | Printable string, including space |
| 'H' | Byte array in Hex format |
| 'B' | Integer of numeric array |
For more information about these tags and their types we refer you to the [SAM/BAM specification](https://samtools.github.io/hts-specs/SAMv1.pdf) and the additional [optional fields specification](https://samtools.github.io/hts-specs/SAMtags.pdf) document.
There are some tags that are reserved, predefined standard tags, for specific uses.
To access optional fields stored in tags, you use `getindex` indexing syntax on the record object.
Note that accessing optional tag fields will result in type instability in Julia.
This is because the type of the optional data is not known until run-time, as the tag is being read.
This can have a significant impact on performance.
To limit this, if the user knows the type of a value in advance, specifying it as a type annotation will alleviate the problem:
Below is an example of looping over records in a bam file and using indexing syntax to get the data stored in the "NM" tag.
Note the `UInt8` type assertion to alleviate type instability.
```julia
for record in open(BAM.Reader, "data.bam")
nm = record["NM"]::UInt8
# do something
end
```
## Getting records in a range
The `XAM` package supports the BAI index to fetch records in a specific range from a BAM file.
[Samtools](https://samtools.github.io/) provides `index` subcommand to create an index file (.bai) from a sorted BAM file.
```console
$ samtools index -b SRR1238088.sort.bam
$ ls SRR1238088.sort.bam*
SRR1238088.sort.bam SRR1238088.sort.bam.bai
```
The method `eachoverlap(reader, chrom, range)` returns an iterator of BAM records overlapping the query interval:
```julia
reader = open(BAM.Reader, "SRR1238088.sort.bam", index="SRR1238088.sort.bam.bai")
for record in eachoverlap(reader, "Chr2", 10000:11000)
# `record` is a BAM.Record object
# ...
end
close(reader)
```
## Getting records overlapping genomic features
The `eachoverlap` method also accepts the `Interval` type defined in [GenomicFeatures.jl](https://github.com/BioJulia/GenomicFeatures.jl).
This allows you to do things like first read in the genomic features from a GFF3 file, and then for each feature, iterate over all the BAM records that overlap with that feature.
```julia
using GenomicFeatures
using GFF3
using XAM
# Load genomic features from a GFF3 file.
features = open(collect, GFF3.Reader, "TAIR10_GFF3_genes.gff")
# Keep mRNA features.
filter!(x -> GFF3.featuretype(x) == "mRNA", features)
# Open a BAM file and iterate over records overlapping mRNA transcripts.
reader = open(BAM.Reader, "SRR1238088.sort.bam", index = "SRR1238088.sort.bam.bai")
for feature in features
for record in eachoverlap(reader, feature)
# `record` overlaps `feature`.
# ...
end
end
close(reader)
```
## Writing files
In order to write a BAM or SAM file, you must first create a `SAM.Header`.
A `SAM.Header` is constructed from a vector of `SAM.MetaInfo` objects.
For example, to create the following simple header:
```
@HD VN:1.6 SO:coordinate
@SQ SN:ref LN:45
```
```julia
julia> a = SAM.MetaInfo("HD", ["VN" => 1.6, "SO" => "coordinate"])
SAM.MetaInfo:
tag: HD
value: VN=1.6 SO=coordinate
julia> b = SAM.MetaInfo("SQ", ["SN" => "ref", "LN" => 45])
SAM.MetaInfo:
tag: SQ
value: SN=ref LN=45
julia> h = SAM.Header([a, b])
SAM.Header(SAM.MetaInfo[SAM.MetaInfo:
tag: HD
value: VN=1.6 SO=coordinate, SAM.MetaInfo:
tag: SQ
value: SN=ref LN=45])
```
Then to create the writer for a SAM file, construct a `SAM.Writer` using the header and an `IO` type:
```julia
julia> samw = SAM.Writer(open("my-data.sam", "w"), h)
SAM.Writer(IOStream(<file my-data.sam>))
```
To make a BAM Writer is slightly different, as you need to use a specific stream type from the [https://github.com/BioJulia/BGZFStreams.jl](https://github.com/BioJulia/BGZFStreams.jl) package:
```julia
julia> using BGZFStreams
julia> bamw = BAM.Writer(BGZFStream(open("my-data.bam", "w"), "w"))
BAM.Writer(BGZFStreams.BGZFStream{IOStream}(<mode=write>))
```
Once you have a BAM or SAM writer, you can use the `write` method to write `BAM.Record`s or `SAM.Record`s to file:
```julia
julia> write(bamw, rec) # Here rec is a `BAM.Record`
330780
```
| XAM | https://github.com/BioJulia/XAM.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | code | 730 | using RuntimeGeneratedFunctions
using Documenter
cp("./docs/Manifest.toml", "./docs/src/assets/Manifest.toml", force = true)
cp("./docs/Project.toml", "./docs/src/assets/Project.toml", force = true)
makedocs(sitename = "RuntimeGeneratedFunctions.jl",
authors = "Chris Rackauckas",
modules = [RuntimeGeneratedFunctions],
clean = true, doctest = false, linkcheck = true,
format = Documenter.HTML(assets = ["assets/favicon.ico"],
canonical = "https://docs.sciml.ai/RuntimeGeneratedFunctions/stable/"),
pages = [
"RuntimeGeneratedFunctions.jl: Efficient Staged Compilation" => "index.md",
"API" => "api.md"
])
deploydocs(;
repo = "github.com/SciML/RuntimeGeneratedFunctions.jl")
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | code | 13505 | module RuntimeGeneratedFunctions
using ExprTools, Serialization, SHA
export RuntimeGeneratedFunction, @RuntimeGeneratedFunction, drop_expr
const _rgf_docs = """
@RuntimeGeneratedFunction(function_expression)
@RuntimeGeneratedFunction(context_module, function_expression, opaque_closures=true)
RuntimeGeneratedFunction(cache_module, context_module, function_expression; opaque_closures=true)
Construct a function from `function_expression` which can be called immediately
without world age problems. Somewhat like using `eval(function_expression)` and
then calling the resulting function. The differences are:
* The result can be called immediately (immune to world age errors)
* The result is not a named generic function, and doesn't participate in
generic function dispatch; it's more like a callable method.
You need to use `RuntimeGeneratedFunctions.init(your_module)` a single time at
the top level of `your_module` before any other uses of the macro.
If provided, `context_module` is the module in which symbols within
`function_expression` will be looked up. By default, this is the module in which
`@RuntimeGeneratedFunction` is expanded.
`cache_module` is the module where the expression `code` will be cached. If
`RuntimeGeneratedFunction` is used during precompilation, this must be a module
which is currently being precompiled. Normally this would be set to
`@__MODULE__` using one of the macro constructors.
If `opaque_closures` is `true`, all closures in `function_expression` are
converted to
[opaque closures](https://github.com/JuliaLang/julia/pull/37849#issue-496641229).
This allows for the use of closures and generators inside the generated function,
but may not work in all cases due to slightly different semantics. This feature
requires Julia 1.7.
# Examples
```
RuntimeGeneratedFunctions.init(@__MODULE__) # Required at module top-level
function foo()
expression = :((x,y)->x+y+1) # May be generated dynamically
f = @RuntimeGeneratedFunction(expression)
f(1,2) # May be called immediately
end
```
"""
"$_rgf_docs"
struct RuntimeGeneratedFunction{argnames, cache_tag, context_tag, id, B} <: Function
body::B
function RuntimeGeneratedFunction(cache_tag, context_tag, ex; opaque_closures = true)
def = splitdef(ex)
args = normalize_args(get(def, :args, Symbol[]))
body = def[:body]
if opaque_closures && isdefined(Base, :Experimental) &&
isdefined(Base.Experimental, Symbol("@opaque"))
body = closures_to_opaque(body)
end
id = expr_to_id(body)
cached_body = _cache_body(cache_tag, id, body)
new{Tuple(args), cache_tag, context_tag, id, typeof(cached_body)}(cached_body)
end
# For internal use in deserialize() - doesen't check whether the body is in the cache!
function RuntimeGeneratedFunction{
argnames,
cache_tag,
context_tag,
id
}(body) where {
argnames,
cache_tag,
context_tag,
id
}
new{argnames, cache_tag, context_tag, id, typeof(body)}(body)
end
end
function drop_expr(::RuntimeGeneratedFunction{
a,
cache_tag,
c,
id
}) where {a, cache_tag, c,
id}
# When dropping the reference to the body from an RGF, we need to upgrade
# from a weak to a strong reference in the cache to prevent the body being
# GC'd.
lock(_cache_lock) do
cache = getfield(parentmodule(cache_tag), _cachename)
body = cache[id]
if body isa WeakRef
cache[id] = body.value
end
end
RuntimeGeneratedFunction{a, cache_tag, c, id}(nothing)
end
function _check_rgf_initialized(mods...)
for mod in mods
if !isdefined(mod, _tagname)
error("""You must use `RuntimeGeneratedFunctions.init(@__MODULE__)` at module
top level before using runtime generated functions in $mod""")
end
end
end
function RuntimeGeneratedFunction(cache_module::Module, context_module::Module, code;
opaque_closures = true)
_check_rgf_initialized(cache_module, context_module)
RuntimeGeneratedFunction(getfield(cache_module, _tagname),
getfield(context_module, _tagname),
code;
opaque_closures = opaque_closures)
end
"$_rgf_docs"
macro RuntimeGeneratedFunction(code)
quote
RuntimeGeneratedFunction(@__MODULE__, @__MODULE__, $(esc(code)))
end
end
macro RuntimeGeneratedFunction(context_module, code, opaque_closures = true)
quote
RuntimeGeneratedFunction(@__MODULE__, $(esc(context_module)), $(esc(code));
opaque_closures = $(esc(opaque_closures)))
end
end
function Base.show(io::IO, ::MIME"text/plain",
f::RuntimeGeneratedFunction{argnames, cache_tag, context_tag, id}) where {
argnames,
cache_tag,
context_tag,
id
}
cache_mod = parentmodule(cache_tag)
context_mod = parentmodule(context_tag)
func_expr = Expr(:->, Expr(:tuple, argnames...), _lookup_body(cache_tag, id))
print(io, "RuntimeGeneratedFunction(#=in $cache_mod=#, #=using $context_mod=#, ",
repr(func_expr), ")")
end
function (f::RuntimeGeneratedFunction)(args::Vararg{Any, N}) where {N}
generated_callfunc(f, args...)
end
# We'll generate a method of this function in every module which wants to use
# @RuntimeGeneratedFunction
function generated_callfunc end
function generated_callfunc_body(argnames, cache_tag, id, __args)
setup = (:($(argnames[i]) = @inbounds __args[$i]) for i in 1:length(argnames))
body = _lookup_body(cache_tag, id)
@assert body !== nothing
quote
$(setup...)
$(body)
end
end
### Body caching and lookup
#
# Looking up the body of a RuntimeGeneratedFunction based on the id is a little
# complicated because we want the `id=>body` mapping to survive precompilation.
# This means we need to store the mapping created by a module in that module
# itself.
#
# For that, we need a way to lookup the correct module from an instance of
# RuntimeGeneratedFunction. Modules can't be type parameters, but we can use
# any type which belongs to the module as a proxy "tag" for the module.
#
# (We could even abuse `typeof(__module__.eval)` for the tag, though this is a
# little non-robust to weird special cases like Main.eval being
# Base.MainInclude.eval.)
# It appears we can't use a ReentrantLock here, as contention seems to lead to
# deadlock. Perhaps because it triggers a task switch while compiling the
# @generated function.
_cache_lock = Threads.SpinLock()
_cachename = Symbol("#_RuntimeGeneratedFunctions_cache")
_tagname = Symbol("#_RGF_ModTag")
function _cache_body(cache_tag, id, body)
lock(_cache_lock) do
cache = getfield(parentmodule(cache_tag), _cachename)
# Caching is tricky when `id` is the same for different AST instances:
#
# 1. If a function body with the same `id` was cached previously, we need
# to use that older instance of the body AST as the canonical one
# rather than `body`. This ensures the lifetime of the body in the
# cache will always cover the lifetime of all RGFs which share the same
# `id`.
#
# 2. Unless we hold a separate reference to `cache[id].value`, the GC
# can collect it (causing it to become `nothing`). So root it in a
# local variable first.
#
cached_body = get(cache, id, nothing)
if !isnothing(cached_body)
if cached_body isa WeakRef
# `value` may be nothing here if it was previously cached but GC'd
cached_body = cached_body.value
end
end
if isnothing(cached_body)
cached_body = body
# Use a WeakRef to allow `body` to be garbage collected. (After GC, the
# cache will still contain an empty entry with key `id`.)
cache[id] = WeakRef(cached_body)
end
return cached_body
end
end
function _lookup_body(cache_tag, id)
lock(_cache_lock) do
cache = getfield(parentmodule(cache_tag), _cachename)
body = cache[id]
body isa WeakRef ? body.value : body
end
end
"""
RuntimeGeneratedFunctions.init(mod)
Use this at top level to set up your module `mod` before using
`@RuntimeGeneratedFunction`.
"""
function init(mod)
lock(_cache_lock) do
if !isdefined(mod, _cachename)
mod.eval(quote
const $_cachename = Dict()
struct $_tagname end
# We create method of `generated_callfunc` in the user's module
# so that any global symbols within the body will be looked up
# in the user's module scope.
#
# This is straightforward but clunky. A neater solution should
# be to explicitly expand in the user's module and return a
# CodeInfo from `generated_callfunc`, but it seems we'd need
# `jl_expand_and_resolve` which doesn't exist until Julia 1.3
# or so. See:
# https://github.com/JuliaLang/julia/pull/32902
# https://github.com/NHDaly/StagedFunctions.jl/blob/master/src/StagedFunctions.jl#L30
@inline @generated function $RuntimeGeneratedFunctions.generated_callfunc(
f::$RuntimeGeneratedFunctions.RuntimeGeneratedFunction{
argnames,
cache_tag,
$_tagname,
id
},
__args...) where {
argnames,
cache_tag,
id
}
$RuntimeGeneratedFunctions.generated_callfunc_body(argnames,
cache_tag,
id, __args)
end
end)
end
end
end
###
### Utilities
###
normalize_args(args::Vector) = map(normalize_args, args)
normalize_args(arg::Symbol) = arg
function normalize_args(arg::Expr)
arg.head === :(::) || error("argument malformed. Got $arg")
arg.args[1]
end
function expr_to_id(ex)
io = IOBuffer()
Serialization.serialize(io, ex)
return Tuple(reinterpret(UInt32, sha1(take!(io))))
end
@nospecialize
closures_to_opaque(x, _ = nothing) = x
_tconvert(T, x) = Expr(:(::), Expr(:call, GlobalRef(Base, :convert), T, x), T)
function closures_to_opaque(ex::Expr, return_type = nothing)
head, args = ex.head, ex.args
fdef = splitdef(ex; throw = false)
if fdef !== nothing
body = get(fdef, :body, nothing)
if haskey(fdef, :rtype)
body = _tconvert(fdef[:rtype], closures_to_opaque(body, fdef[:rtype]))
delete!(fdef, :rtype)
else
body = closures_to_opaque(body)
end
fdef[:head] = :(->)
fdef[:body] = body
name = get(fdef, :name, nothing)
name !== nothing && delete!(fdef, :name)
_ex = Expr(:opaque_closure, combinedef(fdef))
# TODO: emit named opaque closure for better stacktraces
# (ref https://github.com/JuliaLang/julia/pull/40242)
if name !== nothing
name isa Symbol ||
error("Unsupported function definition `$ex` in RuntimeGeneratedFunction.")
_ex = Expr(:(=), name, _ex)
end
return _ex
elseif head === :generator
f_args = Expr(:tuple, Any[x.args[1] for x in args[2:end]]...)
iters = Any[x.args[2] for x in args[2:end]]
return Expr(:call,
GlobalRef(Base, :Generator),
closures_to_opaque(Expr(:(->), f_args, args[1])),
iters...)
elseif head === :opaque_closure
return closures_to_opaque(args[1])
elseif head === :return && return_type !== nothing
return Expr(:return,
_tconvert(return_type, closures_to_opaque(args[1], return_type)))
end
return Expr(head, Any[closures_to_opaque(x, return_type) for x in args]...)
end
function get_expression(rgf::RuntimeGeneratedFunction{argnames, cache_tag,
context_tag, id, B}) where {
argnames,
cache_tag,
context_tag,
id,
B
}
func_expr = Expr(:->, Expr(:tuple, argnames...), _lookup_body(cache_tag, id))
end
# We write an explicit serialize() and deserialize() here to manage caching of
# the body on a remote node when using Serialization.jl (in Distributed.jl
# and elsewhere)
function Serialization.serialize(s::AbstractSerializer,
rgf::RuntimeGeneratedFunction{argnames, cache_tag,
context_tag, id, B}) where {
argnames,
cache_tag,
context_tag,
id,
B
}
body = _lookup_body(cache_tag, id)
Serialization.serialize_type(s,
RuntimeGeneratedFunction{argnames, cache_tag, context_tag,
id, B})
serialize(s, body)
end
function Serialization.deserialize(s::AbstractSerializer,
::Type{
<:RuntimeGeneratedFunction{argnames, cache_tag,
context_tag, id, B}}) where {
argnames,
cache_tag,
context_tag,
id,
B
}
body = deserialize(s)
cached_body = _cache_body(cache_tag, id, body)
f = RuntimeGeneratedFunction{argnames, cache_tag, context_tag, id}(cached_body)
B === Nothing ? drop_expr(f) : f
end
@specialize
end
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | code | 523 | using RuntimeGeneratedFunctions, Aqua
@testset "Aqua" begin
Aqua.find_persistent_tasks_deps(RuntimeGeneratedFunctions)
Aqua.test_ambiguities(RuntimeGeneratedFunctions, recursive = false)
Aqua.test_deps_compat(RuntimeGeneratedFunctions)
Aqua.test_piracies(RuntimeGeneratedFunctions)
Aqua.test_project_extras(RuntimeGeneratedFunctions)
Aqua.test_stale_deps(RuntimeGeneratedFunctions)
Aqua.test_unbound_args(RuntimeGeneratedFunctions)
Aqua.test_undefined_exports(RuntimeGeneratedFunctions)
end
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | code | 4740 | using RuntimeGeneratedFunctions, BenchmarkTools
using Serialization
using Test
include("qa.jl")
RuntimeGeneratedFunctions.init(@__MODULE__)
function f(_du, _u, _p, _t)
@inbounds _du[1] = _u[1]
@inbounds _du[2] = _u[2]
nothing
end
ex1 = :((_du, _u, _p, _t) -> begin
@inbounds _du[1] = _u[1]
@inbounds _du[2] = _u[2]
nothing
end)
ex2 = :(function f(_du, _u, _p, _t)
@inbounds _du[1] = _u[1]
@inbounds _du[2] = _u[2]
nothing
end)
ex3 = :(function (_du::T, _u::Vector{E}, _p::P, _t::Any) where {T <: Vector, E, P}
@inbounds _du[1] = _u[1]
@inbounds _du[2] = _u[2]
nothing
end)
f0 = @RuntimeGeneratedFunction(:(() -> 42))
f1 = @RuntimeGeneratedFunction(ex1)
f2 = @RuntimeGeneratedFunction(ex2)
f3 = @RuntimeGeneratedFunction(ex3)
@test f0() === 42
@test f1 isa Function
du = rand(2)
u = rand(2)
p = nothing
t = nothing
@test f1(du, u, p, t) === nothing
du == u
du = rand(2)
f2(du, u, p, t)
@test du == u
du = rand(2)
@test f3(du, u, p, t) === nothing
du == u
t1 = @belapsed $f($du, $u, $p, $t)
t2 = @belapsed $f1($du, $u, $p, $t)
t3 = @belapsed $f2($du, $u, $p, $t)
t4 = @belapsed $f3($du, $u, $p, $t)
@test t1≈t2 atol=3e-8
@test t1≈t3 atol=3e-8
@test t1≈t4 atol=3e-8
function no_worldage()
ex = :(function f(_du, _u, _p, _t)
@inbounds _du[1] = _u[1]
@inbounds _du[2] = _u[2]
nothing
end)
f1 = @RuntimeGeneratedFunction(ex)
du = rand(2)
u = rand(2)
p = nothing
t = nothing
f1(du, u, p, t)
end
@test no_worldage() === nothing
# Test show()
@test sprint(show, MIME"text/plain"(),
@RuntimeGeneratedFunction(Base.remove_linenums!(:((x, y) -> x + y + 1)))) ==
"""
RuntimeGeneratedFunction(#=in $(@__MODULE__)=#, #=using $(@__MODULE__)=#, :((x, y)->begin
x + y + 1
end))"""
# Test with precompilation
push!(LOAD_PATH, joinpath(@__DIR__, "precomp"))
using RGFPrecompTest
@test RGFPrecompTest.f(1, 2) == 3
@test RGFPrecompTest.g(40) == 42
# Test that RuntimeGeneratedFunction with identical body expressions (but
# allocated separately) don't clobber each other when one is GC'd.
f_gc = @RuntimeGeneratedFunction(Base.remove_linenums!(:((x, y) -> x + y + 100001)))
let
@RuntimeGeneratedFunction(Base.remove_linenums!(:((x, y) -> x + y + 100001)))
end
GC.gc()
@test f_gc(1, -1) == 100001
# Test that drop_expr works
f_drop1, f_drop2 = let
ex = Base.remove_linenums!(:(x -> x - 1))
# Construct two identical RGFs here to test the cache deduplication code
(drop_expr(@RuntimeGeneratedFunction(ex)),
drop_expr(@RuntimeGeneratedFunction(ex)))
end
GC.gc()
@test f_drop1(1) == 0
@test f_drop2(1) == 0
# Test that threaded use works
tasks = []
for k in 1:4
let k = k
t = Threads.@spawn begin
r = Bool[]
for i in 1:100
f = @RuntimeGeneratedFunction(Base.remove_linenums!(:((x, y) -> x + y +
$i * $k)))
x = 1
y = 2
push!(r, f(x, y) == x + y + i * k)
end
r
end
push!(tasks, t)
end
end
@test all(all.(fetch.(tasks)))
# Test that globals are resolved within the correct scope
module GlobalsTest
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(@__MODULE__)
y_in_GlobalsTest = 40
f = @RuntimeGeneratedFunction(:(x -> x + y_in_GlobalsTest))
end
@test GlobalsTest.f(2) == 42
f_outside = @RuntimeGeneratedFunction(GlobalsTest, :(x -> x + y_in_GlobalsTest))
@test f_outside(2) == 42
@test_throws ErrorException @eval(module NotInitTest
using RuntimeGeneratedFunctions
# RuntimeGeneratedFunctions.init(@__MODULE__) # <-- missing
f = @RuntimeGeneratedFunction(:(x -> x + y))
end)
ex = :(x -> (y -> x + y))
@test @RuntimeGeneratedFunction(ex)(2)(3) === 5
ex = :(x -> (f(y::Int)::Float64 = x + y; f))
@test @RuntimeGeneratedFunction(ex)(2)(3) === 5.0
ex = :(x -> function (y::Int)
return x + y
end)
@test @RuntimeGeneratedFunction(ex)(2)(3) === 5
ex = :(x -> function f(y::Int)::UInt8
return x + y
end)
@test @RuntimeGeneratedFunction(ex)(2)(3) === 0x05
ex = :(x -> sum(i^2 for i in 1:x))
@test @RuntimeGeneratedFunction(ex)(3) === 14
ex = :(x -> [2i for i in 1:x])
@test @RuntimeGeneratedFunction(ex)(3) == [2, 4, 6]
# Serialization
proj = dirname(Base.active_project())
buf = IOBuffer(read(`$(Base.julia_cmd()) --startup-file=no --project=$proj "serialize_rgf.jl"`))
deserialized_f, deserialized_g = deserialize(buf)
@test deserialized_f(11) == "Hi from a separate process. x=11"
@test deserialized_f.body isa Expr
@test deserialized_g(12) == "Serialization with dropped body. y=12"
@test deserialized_g.body isa Nothing
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | code | 354 | # Must be run in a separate process from the rest of the tests!
using RuntimeGeneratedFunctions
using Serialization
RuntimeGeneratedFunctions.init(@__MODULE__)
f = @RuntimeGeneratedFunction(:(x -> "Hi from a separate process. x=$x"))
g = drop_expr(@RuntimeGeneratedFunction(:(y -> "Serialization with dropped body. y=$y")))
serialize(stdout, (f, g))
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | code | 222 | module RGFPrecompTest
using RuntimeGeneratedFunctions
using RGFPrecompTest2
RuntimeGeneratedFunctions.init(@__MODULE__)
f = @RuntimeGeneratedFunction(:((x, y) -> x + y))
g = RGFPrecompTest2.generate_rgf(@__MODULE__)
end
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | code | 388 | module RGFPrecompTest2
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(@__MODULE__)
y_in_RGFPrecompTest2 = 2
# Simulates a helper function which generates an RGF, but caches it in a
# different module.
function generate_rgf(cache_module)
context_module = @__MODULE__
RuntimeGeneratedFunction(cache_module, @__MODULE__, :((x) -> y_in_RGFPrecompTest2 + x))
end
end
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | docs | 8411 | # RuntimeGeneratedFunctions.jl
[](https://julialang.zulipchat.com/#narrow/stream/279055-sciml-bridged)
[](https://docs.sciml.ai/RuntimeGeneratedFunctions/stable/)
[](https://codecov.io/gh/SciML/RuntimeGeneratedFunctions.jl)
[](https://github.com/SciML/RuntimeGeneratedFunctions.jl/actions?query=workflow%3ACI)
[](https://github.com/SciML/ColPrac)
[](https://github.com/SciML/SciMLStyle)
`RuntimeGeneratedFunctions` are functions generated at runtime without world-age
issues and with the full performance of a standard Julia anonymous function. This
builds functions in a way that avoids `eval`.
Note that `RuntimeGeneratedFunction` does not handle closures. Please use the
[GeneralizedGenerated.jl](https://github.com/JuliaStaging/GeneralizedGenerated.jl)
package for more fixable staged programming. While `GeneralizedGenerated.jl` is
more powerful, `RuntimeGeneratedFunctions.jl` handles large expressions better.
## Tutorials and Documentation
For information on using the package,
[see the stable documentation](https://docs.sciml.ai/RuntimeGeneratedFunctions/stable/). Use the
[in-development documentation](https://docs.sciml.ai/RuntimeGeneratedFunctions/dev/) for the version of
the documentation, which contains the unreleased features.
## Simple Example
Here's an example showing how to construct and immediately call a runtime
generated function:
```julia
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(@__MODULE__)
function no_worldage()
ex = :(function f(_du, _u, _p, _t)
@inbounds _du[1] = _u[1]
@inbounds _du[2] = _u[2]
nothing
end)
f1 = @RuntimeGeneratedFunction(ex)
du = rand(2)
u = rand(2)
p = nothing
t = nothing
f1(du, u, p, t)
end
no_worldage()
```
## Changing how global symbols are looked up
If you want to use helper functions or global variables from a different
module within your function expression you'll need to pass a `context_module`
to the `@RuntimeGeneratedFunction` constructor. For example
```julia
RuntimeGeneratedFunctions.init(@__MODULE__)
module A
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(A)
helper_function(x) = x + 1
end
function g()
expression = :(f(x) = helper_function(x))
# context module is `A` so that `helper_function` can be found.
f = @RuntimeGeneratedFunction(A, expression)
@show f(1)
end
```
## Precompilation and setting the function expression cache
For technical reasons RuntimeGeneratedFunctions needs to cache the function
expression in a global variable within some module. This is normally
transparent to the user, but if the `RuntimeGeneratedFunction` is evaluated
during module precompilation, the cache module must be explicitly set to the
module currently being precompiled. This is relevant for helper functions in
some module which construct a RuntimeGeneratedFunction on behalf of the user.
For example, in the following code, any third party user of
`HelperModule.construct_rgf()` user needs to pass their own module as the
`cache_module` if they want the returned function to work after precompilation:
```julia
RuntimeGeneratedFunctions.init(@__MODULE__)
# Imagine HelperModule is in a separate package and will be precompiled
# separately.
module HelperModule
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(HelperModule)
function construct_rgf(cache_module, context_module, ex)
ex = :((x) -> $ex^2 + x)
RuntimeGeneratedFunction(cache_module, context_module, ex)
end
end
function g()
ex = :(x + 1)
# Here cache_module is set to the module currently being compiled so that
# the returned RGF works with Julia's module precompilation system.
HelperModule.construct_rgf(@__MODULE__, @__MODULE__, ex)
end
f = g()
@show f(1)
```
## Retrieving Expressions
From a constructed RuntimeGeneratedFunction, you can retrieve the expressions using the
`RuntimeGeneratedFunctions.get_expression` command. For example:
```julia
ex = :((x) -> x^2)
rgf = @RuntimeGeneratedFunction(ex)
julia> RuntimeGeneratedFunctions.get_expression(rgf)
#=
quote
#= c:\Users\accou\OneDrive\Computer\Desktop\test.jl:39 =#
x ^ 2
end
=#
```
This can be used to get the expression even if `drop_expr` has been performed.
### Example: Retrieving Expressions from ModelingToolkit.jl
[ModelingToolkit.jl](https://github.com/SciML/ModelingToolkit.jl) uses
RuntimeGeneratedFunctions.jl for the construction of its functions to avoid issues of
world-age. Take for example its tutorial:
```julia
using ModelingToolkit, RuntimeGeneratedFunctions
using ModelingToolkit: t_nounits as t, D_nounits as D
@mtkmodel FOL begin
@parameters begin
τ # parameters
end
@variables begin
x(t) # dependent variables
end
@equations begin
D(x) ~ (1 - x) / τ
end
end
using DifferentialEquations: solve
@mtkbuild fol = FOL()
prob = ODEProblem(fol, [fol.x => 0.0], (0.0, 10.0), [fol.τ => 3.0])
```
If we check the function:
```julia
julia> prob.f
(::ODEFunction{true, SciMLBase.AutoSpecialize, ModelingToolkit.var"#f#697"{RuntimeGeneratedFunction{(:ˍ₋arg1, :ˍ₋arg2, :t), ModelingToolkit.var"#_RGF_ModTag", ModelingToolkit.var"#_RGF_ModTag", (0x2cce5cf2, 0xd20b0d73, 0xd14ed8a6, 0xa4d56c4f, 0x72958ea1), Nothing}, RuntimeGeneratedFunction{(:ˍ₋out, :ˍ₋arg1, :ˍ₋arg2, :t), ModelingToolkit.var"#_RGF_ModTag", ModelingToolkit.var"#_RGF_ModTag", (0x7f3c227e, 0x8f116bb1, 0xb3528ad5, 0x9c57c605, 0x60f580c3), Nothing}}, UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, ModelingToolkit.var"#852#generated_observed#706"{Bool, ODESystem, Dict{Any, Any}, Vector{Any}}, Nothing, ODESystem, Nothing, Nothing}) (generic function with 1 method)
```
It's a RuntimeGeneratedFunction. We can find the code for this system using the retrieval
command on the function we want. For example, for the in-place function:
```julia
julia> RuntimeGeneratedFunctions.get_expression(prob.f.f.f_iip)
:((ˍ₋out, ˍ₋arg1, ˍ₋arg2, t)->begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:373 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:374 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:375 =#
begin
begin
begin
#= C:\Users\accou\.julia\packages\Symbolics\HIg7O\src\build_function.jl:546 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:422 =# @inbounds begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:418 =#
ˍ₋out[1] = (/)((+)(1, (*)(-1, ˍ₋arg1[1])), ˍ₋arg2[1])
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:420 =#
nothing
end
end
end
end
end)
```
or the out-of-place function:
```julia
julia> RuntimeGeneratedFunctions.get_expression(prob.f.f.f_oop)
:((ˍ₋arg1, ˍ₋arg2, t)->begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:373 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:374 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:375 =#
begin
begin
begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:468 =#
(SymbolicUtils.Code.create_array)(typeof(ˍ₋arg1), nothing, Val{1}(), Val{(1,)}(), (/)((+)(1, (*)(-1, ˍ₋arg1[1])), ˍ₋arg2[1]))
end
end
end
end)
```
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | docs | 62 | # API
```@autodocs
Modules = [RuntimeGeneratedFunctions]
```
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 0.5.13 | 04c968137612c4a5629fa531334bb81ad5680f00 | docs | 8579 | # RuntimeGeneratedFunctions.jl: Generate functions at runtime
`RuntimeGeneratedFunctions` are functions generated at runtime without world-age
issues and with the full performance of a standard Julia anonymous function. This
builds functions in a way that avoids `eval`.
Note that `RuntimeGeneratedFunction` does not handle closures. Please use the
[GeneralizedGenerated.jl](https://github.com/JuliaStaging/GeneralizedGenerated.jl)
package for more fixable staged programming. While `GeneralizedGenerated.jl` is
more powerful, `RuntimeGeneratedFunctions.jl` handles large expressions better.
## Tutorials and Documentation
For information on using the package,
[see the stable documentation](https://docs.sciml.ai/RuntimeGeneratedFunctions/stable/). Use the
[in-development documentation](https://docs.sciml.ai/RuntimeGeneratedFunctions/dev/) for the version of
the documentation, which contains the unreleased features.
## Simple Example
Here's an example showing how to construct and immediately call a runtime
generated function:
```julia
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(@__MODULE__)
function no_worldage()
ex = :(function f(_du, _u, _p, _t)
@inbounds _du[1] = _u[1]
@inbounds _du[2] = _u[2]
nothing
end)
f1 = @RuntimeGeneratedFunction(ex)
du = rand(2)
u = rand(2)
p = nothing
t = nothing
f1(du, u, p, t)
end
no_worldage()
```
## Changing how global symbols are looked up
If you want to use helper functions or global variables from a different
module within your function expression, you'll need to pass a `context_module`
to the `@RuntimeGeneratedFunction` constructor. For example
```julia
RuntimeGeneratedFunctions.init(@__MODULE__)
module A
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(A)
helper_function(x) = x + 1
end
function g()
expression = :(f(x) = helper_function(x))
# context module is `A` so that `helper_function` can be found.
f = @RuntimeGeneratedFunction(A, expression)
@show f(1)
end
```
## Precompilation and setting the function expression cache
For technical reasons, RuntimeGeneratedFunctions needs to cache the function
expression in a global variable within some module. This is normally
transparent to the user, but if the `RuntimeGeneratedFunction` is evaluated
during module precompilation, the cache module must be explicitly set to the
module currently being precompiled. This is relevant for helper functions in
some module, which construct a RuntimeGeneratedFunction on behalf of the user.
For example, in the following code, any third party user of
`HelperModule.construct_rgf()` user needs to pass their own module as the
`cache_module` if they want the returned function to work after precompilation:
```julia
RuntimeGeneratedFunctions.init(@__MODULE__)
# Imagine HelperModule is in a separate package and will be precompiled
# separately.
module HelperModule
using RuntimeGeneratedFunctions
RuntimeGeneratedFunctions.init(HelperModule)
function construct_rgf(cache_module, context_module, ex)
ex = :((x) -> $ex^2 + x)
RuntimeGeneratedFunction(cache_module, context_module, ex)
end
end
function g()
ex = :(x + 1)
# Here cache_module is set to the module currently being compiled so that
# the returned RGF works with Julia's module precompilation system.
HelperModule.construct_rgf(@__MODULE__, @__MODULE__, ex)
end
f = g()
@show f(1)
```
## Retrieving Expressions
From a constructed RuntimeGeneratedFunction, you can retrieve the expressions using the
`RuntimeGeneratedFunctions.get_expression` command. For example:
```julia
ex = :((x) -> x^2)
rgf = @RuntimeGeneratedFunction(ex)
julia> RuntimeGeneratedFunctions.get_expression(rgf)
:((x,)->begin
#= REPL[14]:1 =#
x ^ 2
end)
```
This can be used to get the expression even if `drop_expr` has been performed.
### Example: Retrieving Expressions from ModelingToolkit.jl
[ModelingToolkit.jl](https://github.com/SciML/ModelingToolkit.jl) uses
RuntimeGeneratedFunctions.jl for the construction of its functions to avoid issues of
world-age. Take for example its tutorial:
```julia
using ModelingToolkit, RuntimeGeneratedFunctions
using ModelingToolkit: t_nounits as t, D_nounits as D
@mtkmodel FOL begin
@parameters begin
τ # parameters
end
@variables begin
x(t) # dependent variables
end
@equations begin
D(x) ~ (1 - x) / τ
end
end
using DifferentialEquations: solve
@mtkbuild fol = FOL()
prob = ODEProblem(fol, [fol.x => 0.0], (0.0, 10.0), [fol.τ => 3.0])
```
If we check the function:
```julia
julia> prob.f
(::ODEFunction{true, SciMLBase.AutoSpecialize, ModelingToolkit.var"#f#697"{RuntimeGeneratedFunction{(:ˍ₋arg1, :ˍ₋arg2, :t), ModelingToolkit.var"#_RGF_ModTag", ModelingToolkit.var"#_RGF_ModTag", (0x2cce5cf2, 0xd20b0d73, 0xd14ed8a6, 0xa4d56c4f, 0x72958ea1), Nothing}, RuntimeGeneratedFunction{(:ˍ₋out, :ˍ₋arg1, :ˍ₋arg2, :t), ModelingToolkit.var"#_RGF_ModTag", ModelingToolkit.var"#_RGF_ModTag", (0x7f3c227e, 0x8f116bb1, 0xb3528ad5, 0x9c57c605, 0x60f580c3), Nothing}}, UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, ModelingToolkit.var"#852#generated_observed#706"{Bool, ODESystem, Dict{Any, Any}, Vector{Any}}, Nothing, ODESystem, Nothing, Nothing}) (generic function with 1 method)
```
It's a RuntimeGeneratedFunction. We can find the code for this system using the retrieval
command on the function we want. For example, for the in-place function:
```julia
julia> RuntimeGeneratedFunctions.get_expression(prob.f.f.f_iip)
:((ˍ₋out, ˍ₋arg1, ˍ₋arg2, t)->begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:373 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:374 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:375 =#
begin
begin
begin
#= C:\Users\accou\.julia\packages\Symbolics\HIg7O\src\build_function.jl:546 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:422 =# @inbounds begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:418 =#
ˍ₋out[1] = (/)((+)(1, (*)(-1, ˍ₋arg1[1])), ˍ₋arg2[1])
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:420 =#
nothing
end
end
end
end
end)
```
or the out-of-place function:
```julia
julia> RuntimeGeneratedFunctions.get_expression(prob.f.f.f_oop)
:((ˍ₋arg1, ˍ₋arg2, t)->begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:373 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:374 =#
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:375 =#
begin
begin
begin
#= C:\Users\accou\.julia\packages\SymbolicUtils\c0xQb\src\code.jl:468 =#
(SymbolicUtils.Code.create_array)(typeof(ˍ₋arg1), nothing, Val{1}(), Val{(1,)}(), (/)((+)(1, (*)(-1, ˍ₋arg1[1])), ˍ₋arg2[1]))
end
end
end
end)
```
## Reproducibility
```@raw html
<details><summary>The documentation of this SciML package was built using these direct dependencies,</summary>
```
```@example
using Pkg # hide
Pkg.status() # hide
```
```@raw html
</details>
```
```@raw html
<details><summary>and using this machine and Julia version.</summary>
```
```@example
using InteractiveUtils # hide
versioninfo() # hide
```
```@raw html
</details>
```
```@raw html
<details><summary>A more complete overview of all dependencies and their versions is also provided.</summary>
```
```@example
using Pkg # hide
Pkg.status(; mode = PKGMODE_MANIFEST) # hide
```
```@raw html
</details>
```
```@eval
using TOML
using Markdown
version = TOML.parse(read("../../Project.toml", String))["version"]
name = TOML.parse(read("../../Project.toml", String))["name"]
link_manifest = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
"/assets/Manifest.toml"
link_project = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version *
"/assets/Project.toml"
Markdown.parse("""You can also download the
[manifest]($link_manifest)
file and the
[project]($link_project)
file.
""")
```
| RuntimeGeneratedFunctions | https://github.com/SciML/RuntimeGeneratedFunctions.jl.git |
|
[
"MIT"
] | 1.1.7 | 33197e1a5f9369429d71684e67541bce35b6b82e | code | 16571 | module AlphaStableDistributions
using LinearAlgebra, Statistics, Random
using StatsBase, Distributions, StaticArrays
using FileIO, JLD2, SpecialFunctions, ToeplitzMatrices
using Interpolations
export AlphaStable, SymmetricAlphaStable, AlphaSubGaussian, fit
Base.@kwdef struct AlphaStable{T} <: Distributions.ContinuousUnivariateDistribution
α::T = 1.5
β::T = zero(α)
scale::T = one(α)
location::T = zero(α)
end
AlphaStable(α::Integer, β::Integer, scale::Integer, location::Integer) = AlphaStable(float(α), float(β), float(scale), float(location))
function AlphaStable(α,β,scale,location)
αT,βT,scaleT,locationT = promote(α,β,scale,location)
AlphaStable(αT,βT,scaleT,locationT)
end
Distributions.params(d::AlphaStable) = (d.α, d.β, d.scale, d.location)
# sampler(d::AlphaStable) = error("Not implemented")
# pdf(d::AlphaStable, x::Real) = error("Not implemented")
# logpdf(d::AlphaStable, x::Real) = error("Not implemented")
# cdf(d::AlphaStable, x::Real) = error("Not implemented")
# quantile(d::AlphaStable, q::Real) = error("Not implemented")
# minimum(d::AlphaStable) = error("Not implemented")
# maximum(d::AlphaStable) = error("Not implemented")
# insupport(d::AlphaStable, x::Real) = error("Not implemented")
Statistics.mean(d::AlphaStable) = d.α > 1 ? d.location : error("Not defined")
Statistics.var(d::AlphaStable) = d.α == 2 ? 2d.scale^2 : Inf
# modes(d::AlphaStable) = error("Not implemented")
# mode(d::AlphaStable) = error("Not implemented")
# skewness(d::AlphaStable) = error("Not implemented")
# kurtosis(d::Distribution, ::Bool) = error("Not implemented")
# entropy(d::AlphaStable, ::Real) = error("Not implemented")
# mgf(d::AlphaStable, ::Any) = error("Not implemented")
function Distributions.cf(d::AlphaStable{S}, t::Real) where {S}
T = float(promote_type(S, typeof(t)))
α,β,c,δ = params(d)
Φ = if α == one(α)
T(-2/π * log(abs(t)))
else
T(tan(π*α/2))
end
exp(im*t*δ - abs(c*t)^α * (1 - im*β*sign(t)*Φ))
end
# lookup tables from McCulloch (1986)
const _ena = [
2.4388
2.5120
2.6080
2.7369
2.9115
3.1480
3.4635
3.8824
4.4468
5.2172
6.3140
7.9098
10.4480
14.8378
23.4831
44.2813
]
const _να = [
2.439
2.500
2.600
2.700
2.800
3.000
3.200
3.500
4.000
5.000
6.000
8.000
10.000
15.000
25.000
]
const _νβ = [
0.0
0.1
0.2
0.3
0.5
0.7
1.0
]
const ψ₁ = [
2.000 2.000 2.000 2.000 2.000 2.000 2.000
1.916 1.924 1.924 1.924 1.924 1.924 1.924
1.808 1.813 1.829 1.829 1.829 1.829 1.829
1.729 1.730 1.737 1.745 1.745 1.745 1.745
1.664 1.663 1.663 1.668 1.676 1.676 1.676
1.563 1.560 1.553 1.548 1.547 1.547 1.547
1.484 1.480 1.471 1.460 1.448 1.438 1.438
1.391 1.386 1.378 1.364 1.337 1.318 1.318
1.279 1.273 1.266 1.250 1.210 1.184 1.150
1.128 1.121 1.114 1.101 1.067 1.027 0.973
1.029 1.021 1.014 1.004 0.974 0.935 0.874
0.896 0.892 0.887 0.883 0.855 0.823 0.769
0.818 0.812 0.806 0.801 0.780 0.756 0.691
0.698 0.695 0.692 0.689 0.676 0.656 0.595
0.593 0.590 0.588 0.586 0.579 0.563 0.513
]
const ψ₂ = [
0.0 2.160 1.000 1.000 1.000 1.000 1.000
0.0 1.592 3.390 1.000 1.000 1.000 1.000
0.0 0.759 1.800 1.000 1.000 1.000 1.000
0.0 0.482 1.048 1.694 2.229 1.000 1.000
0.0 0.360 0.760 1.232 2.229 1.000 1.000
0.0 0.253 0.518 0.823 1.575 1.000 1.000
0.0 0.203 0.410 0.632 1.244 1.906 1.000
0.0 0.165 0.332 0.499 0.943 1.560 1.000
0.0 0.136 0.271 0.404 0.689 1.230 2.195
0.0 0.109 0.216 0.323 0.539 0.827 1.917
0.0 0.096 0.190 0.284 0.472 0.693 1.759
0.0 0.082 0.163 0.243 0.412 0.601 1.596
0.0 0.074 0.147 0.220 0.377 0.546 1.482
0.0 0.064 0.128 0.191 0.330 0.478 1.362
0.0 0.056 0.112 0.167 0.285 0.428 1.274
]
const _α =[
0.5
0.6
0.7
0.8
0.9
1.0
1.1
1.2
1.3
1.4
1.5
1.6
1.7
1.8
1.9
2.0
]
const _β = [
0.0
0.25
0.50
0.75
1.00
]
const ϕ₃ = [
2.588 3.073 4.534 6.636 9.144
2.337 2.635 3.542 4.808 6.247
2.189 2.392 3.004 3.844 4.775
2.098 2.244 2.676 3.265 3.912
2.040 2.149 2.461 2.886 3.356
2.000 2.085 2.311 2.624 2.973
1.980 2.040 2.205 2.435 2.696
1.965 2.007 2.125 2.294 2.491
1.955 1.984 2.067 2.188 2.333
1.946 1.967 2.022 2.106 2.211
1.939 1.952 1.988 2.045 2.116
1.933 1.940 1.962 1.997 2.043
1.927 1.930 1.943 1.961 1.987
1.921 1.922 1.927 1.936 1.947
1.914 1.915 1.916 1.918 1.921
1.908 1.908 1.908 1.908 1.908
]
const ϕ₅ = [
0.0 -0.061 -0.279 -0.659 -1.198
0.0 -0.078 -0.272 -0.581 -0.997
0.0 -0.089 -0.262 -0.52 -0.853
0.0 -0.096 -0.25 -0.469 -0.742
0.0 -0.099 -0.237 -0.424 -0.652
0.0 -0.098 -0.223 -0.383 -0.576
0.0 -0.095 -0.208 -0.346 -0.508
0.0 -0.09 -0.192 -0.31 -0.447
0.0 -0.084 -0.173 -0.276 -0.39
0.0 -0.075 -0.154 -0.241 -0.335
0.0 -0.066 -0.134 -0.206 -0.283
0.0 -0.056 -0.111 -0.17 -0.232
0.0 -0.043 -0.088 -0.132 -0.179
0.0 -0.03 -0.061 -0.092 -0.123
0.0 -0.017 -0.032 -0.049 -0.064
0.0 0.0 0.0 0.0 0.0
]
"""
fit(d::Type{<:AlphaStable}, x; alg=QuickSort)
Fit an α stable distribution to data.
returns `AlphaStable`
α∈[0.6,2.0], β∈[-1,1] , c∈[0,∞] and δ∈[-∞,∞] are the characteristic exponent,
skewness parameter, scale parameter (dispersion^1/α) and location parameter respectively.
α, β, c and δ are computed based on McCulloch (1986) fractile.
"""
function Distributions.fit(::Type{<:AlphaStable}, x::AbstractArray{T}, alg=QuickSort) where {T<:AbstractFloat}
sx = sort(x, alg=alg)
p = quantile.(Ref(sx), (0.05, 0.25, 0.28, 0.5, 0.72, 0.75, 0.95), sorted=true)
να = (p[7]-p[1]) / (p[6]-p[2])
νβ = (p[7]+p[1]-2p[4]) / (p[7]-p[1])
(να < _να[1]) && (να = _να[1])
(να > _να[end]) && (να = _να[end])
itp₁ = interpolate((_να, _νβ), ψ₁, Gridded(Linear()))
α = itp₁(να, abs(νβ))
itp₂ = interpolate((_να, _νβ), ψ₂, Gridded(Linear()))
β = sign(νβ) * itp₂(να, abs(νβ))
(β > 1.0) && (β = 1.0)
(β < -1.0) && (β = -1.0)
itp₃ = interpolate((_α, _β), ϕ₃, Gridded(Linear()))
c = (p[6]-p[2]) / itp₃(α, abs(β))
itp₄ = interpolate((_α, _β), ϕ₅, Gridded(Linear()))
ζ = p[4] + c * sign(β) * itp₄(α, abs(β))
if abs(α - 1.0) < 0.05
δ = ζ
else
δ = ζ - β * c * tan(π*α/2)
end
return AlphaStable(α=T(α), β=T(β), scale=T(c), location=T(δ))
end
Base.@kwdef struct SymmetricAlphaStable{T} <: Distributions.ContinuousUnivariateDistribution
α::T = 1.5
scale::T = one(α)
location::T = zero(α)
end
Distributions.params(d::SymmetricAlphaStable) = (d.α, d.scale, d.location)
Distributions.cf(d::SymmetricAlphaStable, t::Real) = cf(AlphaStable(d), t)
Random.rand(rng::AbstractRNG, d::SymmetricAlphaStable) = rand(rng, AlphaStable(d))
Base.eltype(::Type{<:SymmetricAlphaStable{T}}) where {T<:AbstractFloat} = T
function AlphaStable(d::SymmetricAlphaStable)
AlphaStable(α=d.α,scale=d.scale,location=d.location)
end
"""
fit(d::Type{<:SymmetricAlphaStable}, x; alg=QuickSort)
Fit a symmetric α stable distribution to data.
returns `SymmetricAlphaStable`
α∈[1,2], c∈[0,∞] and δ∈[-∞,∞] are the characteristic exponent, scale parameter
(dispersion^1/α) and location parameter respectively.
α is computed based on McCulloch (1986) fractile.
scale is computed based on Fama & Roll (1971) fractile.
location is the 50% trimmed mean of the sample.
"""
function Distributions.fit(::Type{<:SymmetricAlphaStable}, x::AbstractArray{T}, alg=QuickSort) where {T<:AbstractFloat}
sx = sort(x, alg=alg)
δ = mean(@view(sx[end÷4:(3*end)÷4]))
p = quantile.(Ref(sx), (0.05, 0.25, 0.28, 0.72, 0.75, 0.95), sorted=true)
c = (p[4]-p[3]) / 1.654
an = (p[6]-p[1]) / (p[5]-p[2])
if an < 2.4388
α = 2.
else
α = 0.
j = findfirst(>=(an), _ena) # _np.where(an <= _ena[:,0])[0]
(j === nothing || j == length(_ena)) && (j = length(_ena))
t = (an-_ena[j-1])/(_ena[j]-_ena[j-1])
α = (22-j-t)/10
end
if α < 0.5
α = 0.5
end
return SymmetricAlphaStable(α=T(α), scale=T(c), location=T(δ))
end
"""
Generate independent stable random numbers.
:param α: characteristic exponent (0.1 to 2.0)
:param β: skew (-1 to +1)
:param scale: scale parameter
:param loc: location parameter (mean for α > 1, median/mode when β=0)
This implementation is based on the method in J.M. Chambers, C.L. Mallows
and B.W. Stuck, "A Method for Simulating Stable Random Variables," JASA 71 (1976): 340-4.
McCulloch's MATLAB implementation (1996) served as a reference in developing this code.
"""
function Base.rand(rng::AbstractRNG, d::AlphaStable{T}) where {T<:AbstractFloat}
α=d.α; β=d.β; sc=d.scale; loc=d.location
(α < 0.1 || α > 2) && throw(DomainError(α, "α must be in the range 0.1 to 2"))
abs(β) > 1 && throw(DomainError(β, "β must be in the range -1 to 1"))
# added eps(T) to prevent DomainError: x ^ y where x < 0
ϕ = (rand(rng, T) - T(0.5)) * π * (one(T) - eps(T))
if α == one(T) && β == zero(T)
return loc + sc * tan(ϕ)
end
w = -log(rand(rng, T))
α == 2 && (return loc + 2*sc*sqrt(w)*sin(ϕ))
β == zero(T) && (return loc + sc * ((cos((one(T)-α)*ϕ) / w)^(one(T)/α - one(T)) * sin(α * ϕ) / cos(ϕ)^(one(T)/α)))
cosϕ = cos(ϕ)
if abs(α - one(T)) > 1e-8
ζ = β * tan(π * α / 2)
aϕ = α * ϕ
a1ϕ = (one(T) - α) * ϕ
return loc + sc * ((((sin(aϕ) + ζ * cos(aϕ))/cosϕ) * ((cos(a1ϕ) + ζ*sin(a1ϕ)) / (w*cosϕ))^((one(T)-α)/α)))
end
bϕ = π/2 + β*ϕ
x = 2/π * (bϕ * tan(ϕ) - β * log(π/2*w*cosϕ/bϕ))
α == one(T) || (x += β * tan(π*α/2))
return loc + sc * x
end
Base.eltype(::Type{<:AlphaStable{T}}) where {T<:AbstractFloat} = T
"""
Generate alpha-sub-Gaussian (aSG) random numbers.
The implementation is based on https://github.com/ahmd-mahm/alpha-SGNm/blob/master/asgn.m
Reference:
A. Mahmood and M. Chitre, "Generating random variates for stable sub-Gaussian processes
with memory", Signal Processing, Volume 131, Pages 271-279, 2017.
(https://doi.org/10.1016/j.sigpro.2016.08.016.)
# Arguments
- `α`: characteristic exponent associated with the aSGN(m) process. This is
a scalar input and should lie within `collect(1.1:0.01:1.98)`.
- `R`: covariance matrix of any adjacent `m+1` samples in an aSGN(m) process.
The dimension of `R` is equal to `m+1`. It should be a symmetric toeplitz matrix.
The maximum acceptable size of `R` is `10x10`
- `n`: number of samples required
# Examples
```jldoctest
julia> x = rand(AlphaSubGaussian(n=1000))
```
"""
Base.@kwdef struct AlphaSubGaussian{T<:AbstractFloat,M<:AbstractMatrix} <: Distributions.ContinuousUnivariateDistribution
α::T = 1.50
R::M = SMatrix{5,5}(collect(SymmetricToeplitz([1.0000, 0.5804, 0.2140, 0.1444, -0.0135])))
n::Int
end
"""
Generates the conditional probability f(X2|X1) if [X1, X2] is a sub-Gaussian
stable random vector such that X1(i)~X2~S(alpha,delta) and rho is the correlation
coefficient of the underlying Gaussian vector. We assume the joint-probabiluty is given by f(X1,X2).
"""
function subgausscondprobtabulate(α, x1, x2_ind, invRx1, invR, vjoint, nmin, nmax, step, rind, kappa, k1, k2, kmarg)::Float64
m = length(x1)
r1 = sqrt(x1'*invRx1*x1)
x = SVector{length(x1)+1, Float64}(x1..., x2_ind)
r = sqrt(x'*invR*x)
if r1<nmin
grad = (vjoint[m, 1]-k2[1])/nmin
cons = k2[1]
vjointR1 = grad*r1+cons
elseif r1>nmax
vjointR1 = α*k1[1]*(r1^(-α-m))
else
ti = (log10(r1)-log10(nmin))/step+1
tempind = (floor(Int, ti), ceil(Int, ti))
grad = (vjoint[m, tempind[1]]-vjoint[m, tempind[2]])/(rind[tempind[1]]-rind[tempind[2]])
cons = vjoint[m, tempind[1]]-grad*rind[tempind[1]]
vjointR1 = grad*r1+cons
end
if r<nmin
grad = (vjoint[m+1, 1]-k2[2])/nmin
cons = k2[2]
vjointR = grad*r+cons
elseif r>nmax
vjointR = α*k1[2]*(r^(-α-m-1))
else
ti = (log10(r)-log10(nmin))/step+1
tempind = (floor(Int, ti), ceil(Int, ti))
grad = (vjoint[m+1, tempind[1]]-vjoint[m+1, tempind[2]])/(rind[tempind[1]]-rind[tempind[2]])
cons = vjoint[m+1, tempind[1]]-grad*rind[tempind[1]]
vjointR = grad*r+cons
end
(1/sqrt(kappa))*kmarg*vjointR/vjointR1
end
function Random.rand!(rng::AbstractRNG, d::AlphaSubGaussian{T}, x::AbstractArray{T}) where {T<:AbstractFloat}
α=d.α; R=d.R; n=d.n
length(x) >= n || throw(ArgumentError("length of x must be at least n"))
α ∈ 1.10:0.01:1.98 || throw(DomainError(α, "α must lie within `1.10:0.01:1.98`"))
m = size(R, 1)-1
funk1 = x -> (2^α)*sin(π*α/2)*gamma((α+2)/2)*gamma((α+x)/2)/(gamma(x/2)*π*α/2)
funk2 = x -> 4*gamma(x/α)/((α*2^2)*gamma(x/2)^2)
funkmarg = x -> gamma(x/2)/(gamma((x-1)/2)*sqrt(π))
c = 1.2
k1 = (funk1(m), funk1(m+1))
k2 = (funk2(m), funk2(m+1))
kmarg = funkmarg(m+1)
onetom = StaticArrays.SOneTo(m)
kappa = det(R)/det(R[onetom, onetom])
invR = inv(R)
invRx1 = inv(R[onetom, onetom])
sigrootx1 = cholesky(R[onetom, onetom]).L
modefactor = R[end, onetom]'/R[onetom, onetom]
matdict = load(joinpath(@__DIR__(),"vr_repo/vr_alpha=$(α).jld2"))
nmax, nmin, res, rind, vjoint = matdict["Nmax"]::Float64, matdict["Nmin"]::Float64, matdict["res"]::Float64, vec(matdict["rind"])::Vector{Float64}, matdict["vJoint"]::Matrix{Float64}
step = (log10(nmax)-log10(nmin))/res
m>size(vjoint, 1)-1 && throw(DomainError(R, "The dimensions of `R` exceed the maximum possible 10x10"))
A = rand(rng,AlphaStable(T(α/2), one(T), T(2*cos(π*α/4)^(2.0/α)), zero(T)))
CT = rand(rng,Chisq(m))
S = randn(rng,m)
S = S/sqrt(sum(abs2,S))
xtmp = ((sigrootx1*sqrt(A*CT))*S)'
if n<=m
copyto!(x, @view(xtmp[1:n]))
else
# x = zeros(n)
x[onetom] = xtmp
vstud = α+m
norms = pdf(TDist(vstud), 0.0)
@inbounds for i = m+1:n
x1 = SVector{m,Float64}(view(x,i-m:i-1))
mode = modefactor*x1
norm1 = subgausscondprobtabulate(α, x1, mode, invRx1, invR, vjoint, nmin, nmax, step, rind, kappa, k1, k2, kmarg)
notaccept = true
while notaccept
u = rand(rng)
v = (norms/norm1)*rand(rng,TDist(vstud)) + mode
gv = (norm1/norms)*pdf(TDist(vstud), (v-mode)*(norm1/norms))
fv = subgausscondprobtabulate(α, x1, v, invRx1, invR, vjoint, nmin, nmax, step, rind, kappa, k1, k2, kmarg)
if c*u <= fv/gv
x[i] = v
notaccept = false
end
end
end
end
x
end
Base.rand(rng::AbstractRNG, d::AlphaSubGaussian) = rand!(rng, d, zeros(eltype(d), d.n))
Base.eltype(::Type{<:AlphaSubGaussian}) = Float64
"""
fit(d::Type{<:AlphaSubGaussian}, x, m; p=1.0)
Fit an aSGN(m) model to data via the covariation method.
The covariation method requires an additional parameter `p`. Ideally, 1 < p < α. In most practical impulsive scenarios p=1.0 is sufficient.
`m` is the number of lags in the covariance matrix.
The implementation is based on https://github.com/ahmd-mahm/alpha-SGNm/blob/master/param_est/asgnfit.m
Reference:
A. Mahmood and M. Chitre, "Generating random variates for stable sub-Gaussian processes
with memory", Signal Processing, Volume 131, Pages 271-279, 2017.
(https://doi.org/10.1016/j.sigpro.2016.08.016.)
"""
function Distributions.fit(d::Type{<:AlphaSubGaussian}, x::AbstractVector{T}, m::Integer; p=one(T)) where T
d1 = fit(AlphaStable, x)
α = d1.α; sc=d1.scale
cov = zeros(T, m+1, m+1)
xlen = length(x)
c = ((sum(x->abs(x)^p, x)/xlen)^(1/p))/sc
for i in 1:m
tempxlen = xlen-mod(xlen, i)
xtemp = reshape(x[1:end-mod(xlen, i)], i, tempxlen÷i)
if mod(tempxlen÷i, 2) != 0
xtemp = xtemp[:, 1:end-1]
tempxlen = size(xtemp, 1)*size(xtemp, 2)
end
xtemp = reshape(xtemp', 2, tempxlen÷2)
@views r = (2/(c^p))*(sc^(2-p))*(xtemp[1, :]'*((sign.(xtemp[2, :]).*(abs.(xtemp[2, :]).^(p-1)))))/(tempxlen/2)
cov[diagind(cov, i)] .+= r
end
cov = (cov+cov')+2*(sc^2)*I(m+1)
cov ./= 2*sc^2
AlphaSubGaussian(α=α, R=cov, n=length(x))
end
end # module
| AlphaStableDistributions | https://github.com/org-arl/AlphaStableDistributions.jl.git |
|
[
"MIT"
] | 1.1.7 | 33197e1a5f9369429d71684e67541bce35b6b82e | code | 6979 | using AlphaStableDistributions
using Test, Random, Distributions
@testset "Reproducibility" begin
@test rand(MersenneTwister(0), AlphaStable() ) == rand(MersenneTwister(0), AlphaStable() )
@test rand(MersenneTwister(0), SymmetricAlphaStable()) == rand(MersenneTwister(0), SymmetricAlphaStable())
@test rand(MersenneTwister(0), AlphaSubGaussian(n=10)) == rand(MersenneTwister(0), AlphaSubGaussian(n=10))
end
@testset "cf" begin
rng = MersenneTwister(1)
for _ in 1:100
d = AlphaStable(
α=rand(rng,Uniform(0,2)),
β=rand(rng,Uniform(-1,1)),
scale=rand(rng,Uniform(0,10)),
location=rand(rng,Uniform(0,10)),
)
@test cf(d,0) ≈ 1
x = rand(rng,Uniform(-10,10))
@test abs(cf(d, x)) <= 1
d32 = AlphaStable(Float32.(Distributions.params(d))...)
@test cf(d32, Float32(x)) isa Complex{Float32}
@test cf(d32, Float32(x)) ≈ cf(d,x) atol=100*eps(Float32)
end
for _ in 1:100
# test stability under convolution
d = AlphaStable(
α=rand(rng,Uniform(0.1,2)),
scale=1.0,
location=0.0,
)
x = rand(rng,Uniform(-1,1))
n = rand(rng,1:10^6)
s = n^inv(d.α)
@test cf(d, x) ≈ cf(d, x/s)^n
end
xs = range(-1,1,length=100)
d1 = SymmetricAlphaStable(α=2.0, scale=1/sqrt(2), location=0.0)
d2 = AlphaStable(d1)
d_ref = Normal(0.0,1.0)
@test cf.(Ref(d1),xs) ≈ cf.(Ref(d_ref),xs)
@test cf.(Ref(d2),xs) ≈ cf.(Ref(d_ref),xs)
xs = range(-10,10,length=100)
d1 = SymmetricAlphaStable(α=1.0, scale=1.0, location=0.0)
d2 = AlphaStable(d1)
d_ref = Cauchy(0.0,1.0)
@test cf.(Ref(d1),xs) ≈ cf.(Ref(d_ref),xs)
@test cf.(Ref(d2),xs) ≈ cf.(Ref(d_ref),xs)
d1 = SymmetricAlphaStable(α=1.0, scale=17.9, location=42.0)
d2 = AlphaStable(d1)
d_ref = Cauchy(42.0,17.9)
@test cf.(Ref(d1),xs) ≈ cf.(Ref(d_ref),xs)
@test cf.(Ref(d2),xs) ≈ cf.(Ref(d_ref),xs)
d1 = AlphaStable(α=1/2, β=1.0, scale=12.0, location=-7.2)
d_ref = Levy(-7.2, 12.0)
@test cf.(Ref(d1),xs) ≈ cf.(Ref(d_ref),xs)
@test @inferred(cf(AlphaStable(α = 1.0) , 1.0)) isa Complex{Float64}
@test @inferred(cf(AlphaStable(α = 1 ) , 1 )) isa Complex{Float64}
@test @inferred(cf(AlphaStable(α = 1.0) , 1f0)) isa Complex{Float64}
@test @inferred(cf(AlphaStable(α = 1f0) , 1 )) isa Complex{Float32}
@test @inferred(cf(AlphaStable(α = 1f0) , 1f0)) isa Complex{Float32}
end
@testset "AlphaStableDistributions.jl" begin
@test AlphaStable(α=1, scale=1.5) === AlphaStable(α=1.0, scale=1.5)
@test Distributions.params(AlphaStable()) === (1.5, 0.0, 1.0, 0.0)
@test Distributions.params(SymmetricAlphaStable()) === (1.5, 1.0, 0.0)
rng = MersenneTwister(0)
sampletypes = [Float32,Float64]
stabletypes = [AlphaStable,SymmetricAlphaStable]
αs = [0.6:0.1:2,1:0.1:2]
betas = [-1:0.5:1,0.0]
sc = 2.0
for sampletype ∈ sampletypes
for (i, stabletype) in enumerate(stabletypes)
for α in αs[i]
for β in betas[i]
d1 = if stabletype == AlphaStable
stabletype(α=sampletype(α), β=sampletype(β), scale=sampletype(sc))
else
stabletype(α=sampletype(α), scale=sampletype(sc))
end
s = rand(rng, d1, 10^6)
@test eltype(s) == sampletype
d2 = fit(stabletype, s)
@test typeof(d2.α) == sampletype
@test d1.α ≈ d2.α rtol=0.1
if (stabletype != SymmetricAlphaStable) && (α != 2)
@test d1.β ≈ d2.β atol=0.2
end
# the quantile method is less accurate
@test d1.scale ≈ d2.scale rtol=0.2 * sc
@test d1.location ≈ d2.location atol=0.9 * sc
end
end
xnormal = rand(rng,Normal(3.0, 4.0), 96000)
d = fit(stabletype, xnormal)
@test d.α ≈ 2 rtol=0.2
stabletype != SymmetricAlphaStable && @test d.β ≈ 0 atol=0.2
@test d.scale ≈ 4/√2 rtol=0.2
@test d.location ≈ 3 rtol=0.1
xcauchy = rand(rng,Cauchy(3.0, 4.0), 96000)
d = fit(stabletype, xcauchy)
@test d.α ≈ 1 rtol=0.2
stabletype != SymmetricAlphaStable && @test d.β ≈ 0 atol=0.2
@test d.scale ≈ 4 rtol=0.2
@test d.location ≈ 3 rtol=0.1
end
end
for α in 1.1:0.1:1.9
d = AlphaSubGaussian(α=α, n=96000)
x = rand(rng,d)
x2 = copy(x)
rand!(rng,d, x2)
@test x != x2
d3 = fit(AlphaStable, x)
@test d3.α ≈ α rtol=0.2
@test d3.β ≈ 0 atol=0.2
@test d3.scale ≈ 1 rtol=0.2
@test d3.location ≈ 0 atol=0.2
end
d4 = AlphaSubGaussian(α=1.5, n=96000)
m = size(d4.R, 1) - 1
x = rand(rng,d4)
d5 = fit(AlphaSubGaussian, x, m, p=1.0)
@test d4.α ≈ d5.α rtol=0.1
@test d4.R ≈ d5.R rtol=0.1
end
# 362.499 ms (4620903 allocations: 227.64 MiB)
# 346.520 ms (4621052 allocations: 209.62 MiB) # StaticArrays in outer fun
# 345.925 ms (4225524 allocations: 167.66 MiB) # tempind to tuple
# 395.606 ms (3637770 allocations: 164.76 MiB) # x1 to SVector
# 336.877 ms (10125987 allocations: 236.71 MiB) # typeassert on subgprt
# 328.315 ms (3636312 allocations: 164.69 MiB) # revert x1 svector
# 320.845 ms (3440006 allocations: 161.71 MiB)
# 210.449 ms (3438629 allocations: 86.64 MiB) # full typeinfo in x creation
#
#
# @code_warntype rand(Random.GLOBAL_RNG, AlphaSubGaussian(n=96000))
#
#
#
#
# d = AlphaSubGaussian(n=96)
# using SpecialFunctions
# rand(d)
# rng = Random.GLOBAL_RNG
#
# α=d.α; R=d.R; n=d.n
# α ∈ 1.10:0.01:1.98 || throw(DomainError(α, "α must lie within `1.10:0.01:1.98`"))
# m = size(R, 1)-1
# funk1 = x -> (2^α)*sin(π*α/2)*gamma((α+2)/2)*gamma((α+x)/2)/(gamma(x/2)*π*α/2)
# funk2 = x -> 4*gamma(x/α)/((α*2^2)*gamma(x/2)^2)
# funkmarg = x -> gamma(x/2)/(gamma((x-1)/2)*sqrt(π))
# c = 1.2
# k1 = (funk1(m), funk1(m+1))
# k2 = (funk2(m), funk2(m+1))
#
# kmarg = funkmarg(m+1)
# onetoendm1 = StaticArrays.SOneTo(size(R,1)-1)
# kappa = det(R)/det(R[onetoendm1, onetoendm1])
# invR = inv(R)
# invRx1 = inv(R[onetoendm1, onetoendm1])
# sigrootx1 = cholesky(R[onetoendm1, onetoendm1]).L
# modefactor = R[end, onetoendm1]'*inv(R[onetoendm1, onetoendm1])
#
# @code_warntype AlphaStableDistributions.subgausscondprobtabulate(α, SVector(1.,2,3,4), 3., invRx1, invR, randn(2,2), 0.1, 0.1, 0.1, [1,2], 0.1, k1, k2, kmarg)
#
#
# @MVector zeros(n)
#
#
#
# using Cthulhu
# d = AlphaSubGaussian(n=96)
# @descend_code_warntype rand(Random.GLOBAL_RNG, d)
#
# using AlphaStableDistributions
# d1 = AlphaStable(α=1.5)
# s = rand(d1, 100000)
# using ThreadsX
# @btime fit($AlphaStable, $s, $ThreadsX.MergeSort)
| AlphaStableDistributions | https://github.com/org-arl/AlphaStableDistributions.jl.git |
|
[
"MIT"
] | 1.1.7 | 33197e1a5f9369429d71684e67541bce35b6b82e | docs | 1529 | # AlphaStableDistributions
[](https://github.com/org-arl/AlphaStableDistributions.jl/actions/workflows/CI.yml)
[](https://codecov.io/gh/org-arl/AlphaStableDistributions.jl)
This library is a port of functionality from [arlpy](https://github.com/org-arl/arlpy/blob/master/arlpy/stable.py). The two distributions supported are
- [alpha-stable distribution](https://en.wikipedia.org/wiki/Stable_distribution) (`rand` and `fit`)
- [alpha sub-Gaussian distribution with memory](https://arl.nus.edu.sg/twiki6/pub/ARL/BibEntries/SigProc2016RandomVariate.pdf) (`rand` and `fit`)
## Installation
```julia
using Pkg; pkg"add https://github.com/org-arl/AlphaStableDistributions.jl"
```
## Usage
```julia
julia> using AlphaStableDistributions
julia> d1 = AlphaStable()
AlphaStable{Float64}(α=1.5, β=0.0, scale=1.0, location=0.0)
julia> s = rand(d1, 100000);
julia> d2 = fit(AlphaStable, s, alg=QuickSort) # See ThreadsX.QuickSort for a threaded algorithm
AlphaStable{Float64}(α=1.4748701622930906, β=0.0, scale=1.006340087707924, location=-0.0036724481641865715)
julia> x = rand(AlphaSubGaussian(n=9600));
julia> plot(x)
```

### Credits
Julia code by [@ymtoo](https://github.com/ymtoo) and [@baggepinnen](https://github.com/baggepinnen), original implementation by [@mchitre](https://github.com/mchitre) and others.
| AlphaStableDistributions | https://github.com/org-arl/AlphaStableDistributions.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 9302 | # EXCLUDE FROM TESTING
#
# example how to integrate GPUCompiler.jl with an LLVM Orc-based JIT.
# as it heavily relies on Julia's JIT internals, it breaks easily and is thus not tested.
# the main focus of GPUCompiler is currently not to provide integration with Julia's JIT,
# but only with its code generator.
# TODO: do provide and test this kind of integration as part of GPUCompiler
using GPUCompiler
module TestRuntime
# dummy methods
signal_exception() = return
malloc(sz) = C_NULL
report_oom(sz) = return
report_exception(ex) = return
report_exception_name(ex) = return
report_exception_frame(idx, func, file, line) = return
end
struct TestCompilerParams <: AbstractCompilerParams end
GPUCompiler.runtime_module(::CompilerJob{<:Any,TestCompilerParams}) = TestRuntime
## JIT integration
using LLVM, LLVM.Interop
function absolute_symbol_materialization(name, ptr)
address = LLVM.API.LLVMOrcJITTargetAddress(reinterpret(UInt, ptr))
flags = LLVM.API.LLVMJITSymbolFlags(LLVM.API.LLVMJITSymbolGenericFlagsExported, 0)
symbol = LLVM.API.LLVMJITEvaluatedSymbol(address, flags)
gv = if LLVM.version() >= v"15"
LLVM.API.LLVMOrcCSymbolMapPair(name, symbol)
else
LLVM.API.LLVMJITCSymbolMapPair(name, symbol)
end
return LLVM.absolute_symbols(Ref(gv))
end
function define_absolute_symbol(jd, name)
ptr = LLVM.find_symbol(name)
if ptr !== C_NULL
LLVM.define(jd, absolute_symbol_materialization(name, ptr))
return true
end
return false
end
struct CompilerInstance
jit::LLVM.LLJIT
lctm::LLVM.LazyCallThroughManager
ism::LLVM.IndirectStubsManager
end
const jit = Ref{CompilerInstance}()
function get_trampoline(job)
compiler = jit[]
lljit = compiler.jit
lctm = compiler.lctm
ism = compiler.ism
# We could also use one dylib per job
jd = JITDylib(lljit)
entry_sym = String(gensym(:entry))
target_sym = String(gensym(:target))
flags = LLVM.API.LLVMJITSymbolFlags(
LLVM.API.LLVMJITSymbolGenericFlagsCallable |
LLVM.API.LLVMJITSymbolGenericFlagsExported, 0)
entry = LLVM.API.LLVMOrcCSymbolAliasMapPair(
mangle(lljit, entry_sym),
LLVM.API.LLVMOrcCSymbolAliasMapEntry(
mangle(lljit, target_sym), flags))
mu = LLVM.reexports(lctm, ism, jd, Ref(entry))
LLVM.define(jd, mu)
# 2. Lookup address of entry symbol
addr = lookup(lljit, entry_sym)
# 3. add MU that will call back into the compiler
sym = LLVM.API.LLVMOrcCSymbolFlagsMapPair(mangle(lljit, target_sym), flags)
function materialize(mr)
buf = JuliaContext() do ctx
ir, meta = GPUCompiler.compile(:llvm, job; validate=false)
# Rename entry to match target_sym
LLVM.name!(meta.entry, target_sym)
# So 1. serialize the module
buf = convert(MemoryBuffer, ir)
# 2. deserialize and wrap by a ThreadSafeModule
ThreadSafeContext() do ts_ctx
tsm = context!(context(ts_ctx)) do
mod = parse(LLVM.Module, buf)
ThreadSafeModule(mod)
end
il = LLVM.IRTransformLayer(lljit)
LLVM.emit(il, mr, tsm)
end
end
return nothing
end
function discard(jd, sym)
end
mu = LLVM.CustomMaterializationUnit(entry_sym, Ref(sym), materialize, discard)
LLVM.define(jd, mu)
return addr
end
import GPUCompiler: deferred_codegen_jobs
@generated function deferred_codegen(f::F, ::Val{tt}, ::Val{world}) where {F,tt,world}
# manual version of native_job because we have a function type
source = methodinstance(F, Base.to_tuple_type(tt), world)
target = NativeCompilerTarget(; jlruntime=true, llvm_always_inline=true)
# XXX: do we actually require the Julia runtime?
# with jlruntime=false, we reach an unreachable.
params = TestCompilerParams()
config = CompilerConfig(target, params; kernel=false)
job = CompilerJob(source, config, world)
# XXX: invoking GPUCompiler from a generated function is not allowed!
# for things to work, we need to forward the correct world, at least.
addr = get_trampoline(job)
trampoline = pointer(addr)
id = Base.reinterpret(Int, trampoline)
deferred_codegen_jobs[id] = job
quote
ptr = ccall("extern deferred_codegen", llvmcall, Ptr{Cvoid}, (Ptr{Cvoid},), $trampoline)
assume(ptr != C_NULL)
return ptr
end
end
@generated function abi_call(f::Ptr{Cvoid}, rt::Type{RT}, tt::Type{T}, func::F, args::Vararg{Any, N}) where {T, RT, F, N}
argtt = tt.parameters[1]
rettype = rt.parameters[1]
argtypes = DataType[argtt.parameters...]
argexprs = Union{Expr, Symbol}[]
ccall_types = DataType[]
before = :()
after = :(ret)
# Note this follows: emit_call_specfun_other
JuliaContext() do ctx
if !isghosttype(F) && !Core.Compiler.isconstType(F)
isboxed = GPUCompiler.deserves_argbox(F)
argexpr = :(func)
if isboxed
push!(ccall_types, Any)
else
et = convert(LLVMType, func)
if isa(et, LLVM.SequentialType) # et->isAggregateType
push!(ccall_types, Ptr{F})
argexpr = Expr(:call, GlobalRef(Base, :Ref), argexpr)
else
push!(ccall_types, F)
end
end
push!(argexprs, argexpr)
end
T_jlvalue = LLVM.StructType(LLVMType[])
T_prjlvalue = LLVM.PointerType(T_jlvalue, #= AddressSpace::Tracked =# 10)
for (source_i, source_typ) in enumerate(argtypes)
if GPUCompiler.isghosttype(source_typ) || Core.Compiler.isconstType(source_typ)
continue
end
argexpr = :(args[$source_i])
isboxed = GPUCompiler.deserves_argbox(source_typ)
et = isboxed ? T_prjlvalue : convert(LLVMType, source_typ)
if isboxed
push!(ccall_types, Any)
elseif isa(et, LLVM.SequentialType) # et->isAggregateType
push!(ccall_types, Ptr{source_typ})
argexpr = Expr(:call, GlobalRef(Base, :Ref), argexpr)
else
push!(ccall_types, source_typ)
end
push!(argexprs, argexpr)
end
if GPUCompiler.isghosttype(rettype) || Core.Compiler.isconstType(rettype)
# Do nothing...
# In theory we could set `rettype` to `T_void`, but ccall will do that for us
# elseif jl_is_uniontype?
elseif !GPUCompiler.deserves_retbox(rettype)
rt = convert(LLVMType, rettype)
if !isa(rt, LLVM.VoidType) && GPUCompiler.deserves_sret(rettype, rt)
before = :(sret = Ref{$rettype}())
pushfirst!(argexprs, :(sret))
pushfirst!(ccall_types, Ptr{rettype})
rettype = Nothing
after = :(sret[])
end
else
# rt = T_prjlvalue
end
end
quote
$before
ret = ccall(f, $rettype, ($(ccall_types...),), $(argexprs...))
$after
end
end
@inline function call_delayed(f::F, args...) where F
tt = Tuple{map(Core.Typeof, args)...}
rt = Core.Compiler.return_type(f, tt)
world = GPUCompiler.tls_world_age()
ptr = deferred_codegen(f, Val(tt), Val(world))
abi_call(ptr, rt, tt, f, args...)
end
optlevel = LLVM.API.LLVMCodeGenLevelDefault
tm = GPUCompiler.JITTargetMachine(optlevel=optlevel)
LLVM.asm_verbosity!(tm, true)
lljit = LLJIT(;tm)
jd_main = JITDylib(lljit)
prefix = LLVM.get_prefix(lljit)
dg = LLVM.CreateDynamicLibrarySearchGeneratorForProcess(prefix)
add!(jd_main, dg)
if Sys.iswindows() && Int === Int64
# TODO can we check isGNU?
define_absolute_symbol(jd_main, mangle(lljit, "___chkstk_ms"))
end
es = ExecutionSession(lljit)
lctm = LLVM.LocalLazyCallThroughManager(triple(lljit), es)
ism = LLVM.LocalIndirectStubsManager(triple(lljit))
jit[] = CompilerInstance(lljit, lctm, ism)
atexit() do
ci = jit[]
dispose(ci.ism)
dispose(ci.lctm)
dispose(ci.jit)
end
## demo
using Test
# smoke test
f(A) = (A[] += 42; nothing)
global flag = [0]
function caller()
call_delayed(f, flag::Vector{Int})
end
@test caller() === nothing
@test flag[] == 42
# test that we can call a function with a return value
add(x, y) = x+y
function call_add(x, y)
call_delayed(add, x, y)
end
@test call_add(1, 3) == 4
incr(r) = r[] += 1
function call_incr(r)
call_delayed(incr, r)
end
r = Ref{Int}(0)
@test call_incr(r) == 1
@test r[] == 1
function call_real(c)
call_delayed(real, c)
end
@test call_real(1.0+im) == 1.0
# tests struct return
if Sys.ARCH != :aarch64
@test call_delayed(complex, 1.0, 2.0) == 1.0+2.0im
else
@test_broken call_delayed(complex, 1.0, 2.0) == 1.0+2.0im
end
throws(arr, i) = arr[i]
@test call_delayed(throws, [1], 1) == 1
@test_throws BoundsError call_delayed(throws, [1], 0)
struct Closure
x::Int64
end
(c::Closure)(b) = c.x+b
@test call_delayed(Closure(3), 5) == 8
struct Closure2
x::Integer
end
(c::Closure2)(b) = c.x+b
@test call_delayed(Closure2(3), 5) == 8
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 797 | using GPUCompiler
module TestRuntime
# dummy methods
signal_exception() = return
malloc(sz) = C_NULL
report_oom(sz) = return
report_exception(ex) = return
report_exception_name(ex) = return
report_exception_frame(idx, func, file, line) = return
end
struct TestCompilerParams <: AbstractCompilerParams end
GPUCompiler.runtime_module(::CompilerJob{<:Any,TestCompilerParams}) = TestRuntime
kernel() = nothing
function main()
source = methodinstance(typeof(kernel), Tuple{})
target = NativeCompilerTarget()
params = TestCompilerParams()
config = CompilerConfig(target, params)
job = CompilerJob(source, config)
output = JuliaContext() do ctx
GPUCompiler.compile(:asm, job)
end
println(output[1])
end
isinteractive() || main()
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1493 | module GPUCompiler
using LLVM
using LLVM.Interop
using TimerOutputs
using ExprTools: splitdef, combinedef
using Libdl
using Serialization
using Scratch: @get_scratch!
using Preferences
const CC = Core.Compiler
using Core: MethodInstance, CodeInstance, CodeInfo
compile_cache = nothing # set during __init__()
const pkgver = Base.pkgversion(GPUCompiler)
include("utils.jl")
include("mangling.jl")
# compiler interface and implementations
include("interface.jl")
include("error.jl")
include("native.jl")
include("ptx.jl")
include("gcn.jl")
include("spirv.jl")
include("bpf.jl")
include("metal.jl")
include("runtime.jl")
# compiler implementation
include("jlgen.jl")
include("irgen.jl")
include("optim.jl")
include("validation.jl")
include("rtlib.jl")
include("mcgen.jl")
include("debug.jl")
include("driver.jl")
# other reusable functionality
include("execution.jl")
include("reflection.jl")
include("precompile.jl")
function __init__()
STDERR_HAS_COLOR[] = get(stderr, :color, false)
dir = @get_scratch!("compiled")
## add the Julia version
dir = joinpath(dir, "v$(VERSION.major).$(VERSION.minor)")
## also add the package version
if pkgver !== nothing
# XXX: `Base.pkgversion` is buggy and sometimes returns `nothing`, see e.g.
# JuliaLang/PackageCompiler.jl#896 and JuliaGPU/GPUCompiler.jl#593
dir = joinpath(dir, "v$(pkgver.major).$(pkgver.minor)")
end
mkpath(dir)
global compile_cache = dir
end
end # module
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 947 | # implementation of the GPUCompiler interfaces for generating eBPF code
## target
export BPFCompilerTarget
Base.@kwdef struct BPFCompilerTarget <: AbstractCompilerTarget
function_pointers::UnitRange{Int}=1:1000 # set of valid function "pointers"
end
llvm_triple(::BPFCompilerTarget) = "bpf-bpf-bpf"
llvm_datalayout(::BPFCompilerTarget) = "e-m:e-p:64:64-i64:64-n32:64-S128"
function llvm_machine(target::BPFCompilerTarget)
triple = llvm_triple(target)
t = Target(;triple=triple)
cpu = ""
feat = ""
tm = TargetMachine(t, triple, cpu, feat)
asm_verbosity!(tm, true)
return tm
end
## job
runtime_slug(job::CompilerJob{BPFCompilerTarget}) = "bpf"
const bpf_intrinsics = () # TODO
isintrinsic(::CompilerJob{BPFCompilerTarget}, fn::String) = in(fn, bpf_intrinsics)
valid_function_pointer(job::CompilerJob{BPFCompilerTarget}, ptr::Ptr{Cvoid}) =
reinterpret(UInt, ptr) in job.config.target.function_pointers
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 2333 | # tools for dealing with compiler debug information
# generate a pseudo-backtrace from LLVM IR instruction debug information
#
# this works by looking up the debug information of the instruction, and inspecting the call
# sites of the containing function. if there's only one, repeat the process from that call.
# finally, the debug information is converted to a Julia stack trace.
function backtrace(inst::LLVM.Instruction, bt = StackTraces.StackFrame[])
done = Set{LLVM.Instruction}()
while true
if in(inst, done)
break
end
push!(done, inst)
# look up the debug information from the current instruction
if haskey(metadata(inst), LLVM.MD_dbg)
loc = metadata(inst)[LLVM.MD_dbg]
while loc !== nothing
scope = LLVM.scope(loc)
if scope !== nothing
name = replace(LLVM.name(scope), r";$"=>"")
file = LLVM.file(scope)
path = joinpath(LLVM.directory(file), LLVM.filename(file))
line = LLVM.line(loc)
push!(bt, StackTraces.StackFrame(name, path, line))
end
loc = LLVM.inlined_at(loc)
end
end
# move up the call chain
f = LLVM.parent(LLVM.parent(inst))
## functions can be used as a *value* in eg. constant expressions, so filter those out
callers = filter(val -> isa(user(val), LLVM.CallInst), collect(uses(f)))
## get rid of calls without debug info
filter!(callers) do call
md = metadata(user(call))
haskey(md, LLVM.MD_dbg)
end
if !isempty(callers)
# figure out the call sites of this instruction
call_sites = unique(callers) do call
# there could be multiple calls, originating from the same source location
md = metadata(user(call))
md[LLVM.MD_dbg]
end
if length(call_sites) > 1
frame = StackTraces.StackFrame("multiple call sites", "unknown", 0)
push!(bt, frame)
elseif length(call_sites) == 1
inst = user(first(call_sites))
continue
end
end
break
end
return bt
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 15444 | # compiler driver and main interface
## LLVM context handling
export JuliaContext
# transitionary feature to deal versions of Julia that rely on a global context
#
# Julia 1.9 removed the global LLVM context, requiring to pass a context to codegen APIs,
# so the GPUCompiler APIs have been adapted to require passing a Context object as well.
# however, on older versions of Julia we cannot make codegen emit into that context. we
# could use a hack (serialize + deserialize) to move code into the correct context, however
# as it turns out some of our optimization passes erroneously rely on the context being
# global and unique, resulting in segfaults when we use a local context instead.
#
# to work around this mess, and still present a reasonably unified API, we introduce the
# JuliaContext helper below, which returns a local context on Julia 1.9, and the global
# unique context on all other versions. Once we only support Julia 1.9, we'll deprecate
# this helper to a regular `Context()` call.
function JuliaContext(; opaque_pointers=nothing)
# XXX: remove
ThreadSafeContext(; opaque_pointers)
end
function JuliaContext(f; kwargs...)
ts_ctx = JuliaContext(; kwargs...)
# for now, also activate the underlying context
# XXX: this is wrong; we can't expose the underlying LLVM context, but should
# instead always go through the callback in order to unlock it properly.
# rework this once we depend on Julia 1.9 or later.
ctx = context(ts_ctx)
activate(ctx)
try
f(ctx)
finally
deactivate(ctx)
dispose(ts_ctx)
end
end
## compiler entrypoint
export compile
# NOTE: the keyword arguments to compile/codegen control those aspects of compilation that
# might have to be changed (e.g. set libraries=false when recursing, or set
# strip=true for reflection). What remains defines the compilation job itself,
# and those values are contained in the CompilerJob struct.
# (::CompilerJob)
const compile_hook = Ref{Union{Nothing,Function}}(nothing)
"""
compile(target::Symbol, job::CompilerJob; kwargs...)
Compile a function `f` invoked with types `tt` for device capability `cap` to one of the
following formats as specified by the `target` argument: `:julia` for Julia IR, `:llvm` for
LLVM IR and `:asm` for machine code.
The following keyword arguments are supported:
- `toplevel`: indicates that this compilation is the outermost invocation of the compiler
(default: true)
- `libraries`: link the GPU runtime and `libdevice` libraries (default: true, if toplevel)
- `optimize`: optimize the code (default: true, if toplevel)
- `cleanup`: run cleanup passes on the code (default: true, if toplevel)
- `validate`: enable optional validation of input and outputs (default: true, if toplevel)
- `strip`: strip non-functional metadata and debug information (default: false)
- `only_entry`: only keep the entry function, remove all others (default: false).
This option is only for internal use, to implement reflection's `dump_module`.
Other keyword arguments can be found in the documentation of [`cufunction`](@ref).
"""
function compile(target::Symbol, @nospecialize(job::CompilerJob); kwargs...)
if compile_hook[] !== nothing
compile_hook[](job)
end
return codegen(target, job; kwargs...)
end
function codegen(output::Symbol, @nospecialize(job::CompilerJob); toplevel::Bool=true,
libraries::Bool=toplevel, optimize::Bool=toplevel, cleanup::Bool=toplevel,
validate::Bool=toplevel, strip::Bool=false, only_entry::Bool=false,
parent_job::Union{Nothing, CompilerJob}=nothing)
if context(; throw_error=false) === nothing
error("No active LLVM context. Use `JuliaContext()` do-block syntax to create one.")
end
@timeit_debug to "Validation" begin
check_method(job) # not optional
validate && check_invocation(job)
end
prepare_job!(job)
## LLVM IR
ir, ir_meta = emit_llvm(job; libraries, toplevel, optimize, cleanup, only_entry, validate)
if output == :llvm
if strip
@timeit_debug to "strip debug info" strip_debuginfo!(ir)
end
return ir, ir_meta
end
## machine code
format = if output == :asm
LLVM.API.LLVMAssemblyFile
elseif output == :obj
LLVM.API.LLVMObjectFile
else
error("Unknown assembly format $output")
end
asm, asm_meta = emit_asm(job, ir; strip, validate, format)
if output == :asm || output == :obj
return asm, (; asm_meta..., ir_meta..., ir)
end
error("Unknown compilation output $output")
end
# primitive mechanism for deferred compilation, for implementing CUDA dynamic parallelism.
# this could both be generalized (e.g. supporting actual function calls, instead of
# returning a function pointer), and be integrated with the nonrecursive codegen.
const deferred_codegen_jobs = Dict{Int, Any}()
# We make this function explicitly callable so that we can drive OrcJIT's
# lazy compilation from, while also enabling recursive compilation.
Base.@ccallable Ptr{Cvoid} function deferred_codegen(ptr::Ptr{Cvoid})
ptr
end
@generated function deferred_codegen(::Val{ft}, ::Val{tt}) where {ft,tt}
id = length(deferred_codegen_jobs) + 1
deferred_codegen_jobs[id] = (; ft, tt)
# don't bother looking up the method instance, as we'll do so again during codegen
# using the world age of the parent.
#
# this also works around an issue on <1.10, where we don't know the world age of
# generated functions so use the current world counter, which may be too new
# for the world we're compiling for.
quote
# TODO: add an edge to this method instance to support method redefinitions
ccall("extern deferred_codegen", llvmcall, Ptr{Cvoid}, (Int,), $id)
end
end
const __llvm_initialized = Ref(false)
@locked function emit_llvm(@nospecialize(job::CompilerJob); toplevel::Bool,
libraries::Bool, optimize::Bool, cleanup::Bool,
validate::Bool, only_entry::Bool)
if !__llvm_initialized[]
InitializeAllTargets()
InitializeAllTargetInfos()
InitializeAllAsmPrinters()
InitializeAllAsmParsers()
InitializeAllTargetMCs()
__llvm_initialized[] = true
end
@timeit_debug to "IR generation" begin
ir, compiled = irgen(job)
if job.config.entry_abi === :specfunc
entry_fn = compiled[job.source].specfunc
else
entry_fn = compiled[job.source].func
end
entry = functions(ir)[entry_fn]
end
# finalize the current module. this needs to happen before linking deferred modules,
# since those modules have been finalized themselves, and we don't want to re-finalize.
entry = finish_module!(job, ir, entry)
# deferred code generation
has_deferred_jobs = toplevel && !only_entry && haskey(functions(ir), "deferred_codegen")
jobs = Dict{CompilerJob, String}(job => entry_fn)
if has_deferred_jobs
dyn_marker = functions(ir)["deferred_codegen"]
# iterative compilation (non-recursive)
changed = true
while changed
changed = false
# find deferred compiler
# TODO: recover this information earlier, from the Julia IR
worklist = Dict{CompilerJob, Vector{LLVM.CallInst}}()
for use in uses(dyn_marker)
# decode the call
call = user(use)::LLVM.CallInst
id = convert(Int, first(operands(call)))
global deferred_codegen_jobs
dyn_val = deferred_codegen_jobs[id]
# get a job in the appopriate world
dyn_job = if dyn_val isa CompilerJob
# trust that the user knows what they're doing
dyn_val
else
ft, tt = dyn_val
dyn_src = methodinstance(ft, tt, tls_world_age())
CompilerJob(dyn_src, job.config)
end
push!(get!(worklist, dyn_job, LLVM.CallInst[]), call)
end
# compile and link
for dyn_job in keys(worklist)
# cached compilation
dyn_entry_fn = get!(jobs, dyn_job) do
dyn_ir, dyn_meta = codegen(:llvm, dyn_job; toplevel=false,
parent_job=job)
dyn_entry_fn = LLVM.name(dyn_meta.entry)
merge!(compiled, dyn_meta.compiled)
@assert context(dyn_ir) == context(ir)
link!(ir, dyn_ir)
changed = true
dyn_entry_fn
end
dyn_entry = functions(ir)[dyn_entry_fn]
# insert a pointer to the function everywhere the entry is used
T_ptr = convert(LLVMType, Ptr{Cvoid})
for call in worklist[dyn_job]
@dispose builder=IRBuilder() begin
position!(builder, call)
fptr = if LLVM.version() >= v"17"
T_ptr = LLVM.PointerType()
bitcast!(builder, dyn_entry, T_ptr)
elseif VERSION >= v"1.12.0-DEV.225"
T_ptr = LLVM.PointerType(LLVM.Int8Type())
bitcast!(builder, dyn_entry, T_ptr)
else
ptrtoint!(builder, dyn_entry, T_ptr)
end
replace_uses!(call, fptr)
end
erase!(call)
end
end
end
# all deferred compilations should have been resolved
@compiler_assert isempty(uses(dyn_marker)) job
erase!(dyn_marker)
end
if libraries
# load the runtime outside of a timing block (because it recurses into the compiler)
if !uses_julia_runtime(job)
runtime = load_runtime(job)
runtime_fns = LLVM.name.(defs(runtime))
runtime_intrinsics = ["julia.gc_alloc_obj"]
end
@timeit_debug to "Library linking" begin
# target-specific libraries
undefined_fns = LLVM.name.(decls(ir))
@timeit_debug to "target libraries" link_libraries!(job, ir, undefined_fns)
# GPU run-time library
if !uses_julia_runtime(job) && any(fn -> fn in runtime_fns ||
fn in runtime_intrinsics,
undefined_fns)
@timeit_debug to "runtime library" link_library!(ir, runtime)
end
end
end
@timeit_debug to "IR post-processing" begin
# mark everything internal except for entrypoints and any exported
# global variables. this makes sure that the optimizer can, e.g.,
# rewrite function signatures.
if toplevel
preserved_gvs = collect(values(jobs))
for gvar in globals(ir)
if linkage(gvar) == LLVM.API.LLVMExternalLinkage
push!(preserved_gvs, LLVM.name(gvar))
end
end
if LLVM.version() >= v"17"
run!(InternalizePass(; preserved_gvs), ir,
llvm_machine(job.config.target))
else
@dispose pm=ModulePassManager() begin
internalize!(pm, preserved_gvs)
run!(pm, ir)
end
end
end
# mark the kernel entry-point functions (optimization may need it)
if job.config.kernel
push!(metadata(ir)["julia.kernel"], MDNode([entry]))
# IDEA: save all jobs, not only kernels, and save other attributes
# so that we can reconstruct the CompileJob instead of setting it globally
end
if optimize
@timeit_debug to "optimization" begin
optimize!(job, ir; job.config.opt_level)
# deferred codegen has some special optimization requirements,
# which also need to happen _after_ regular optimization.
# XXX: make these part of the optimizer pipeline?
if has_deferred_jobs
@dispose pb=NewPMPassBuilder() begin
add!(pb, NewPMFunctionPassManager()) do fpm
add!(fpm, InstCombinePass())
end
add!(pb, AlwaysInlinerPass())
add!(pb, NewPMFunctionPassManager()) do fpm
add!(fpm, SROAPass())
add!(fpm, GVNPass())
end
add!(pb, MergeFunctionsPass())
run!(pb, ir, llvm_machine(job.config.target))
end
end
end
# optimization may have replaced functions, so look the entry point up again
entry = functions(ir)[entry_fn]
end
if cleanup
@timeit_debug to "clean-up" begin
@dispose pb=NewPMPassBuilder() begin
add!(pb, RecomputeGlobalsAAPass())
add!(pb, GlobalOptPass())
add!(pb, GlobalDCEPass())
add!(pb, StripDeadPrototypesPass())
add!(pb, ConstantMergePass())
run!(pb, ir, llvm_machine(job.config.target))
end
end
end
# finish the module
#
# we want to finish the module after optimization, so we cannot do so
# during deferred code generation. instead, process the deferred jobs
# here.
if toplevel
entry = finish_ir!(job, ir, entry)
for (job′, fn′) in jobs
job′ == job && continue
finish_ir!(job′, ir, functions(ir)[fn′])
end
end
# replace non-entry function definitions with a declaration
# NOTE: we can't do this before optimization, because the definitions of called
# functions may affect optimization.
if only_entry
for f in functions(ir)
f == entry && continue
isdeclaration(f) && continue
LLVM.isintrinsic(f) && continue
empty!(f)
end
end
end
if validate
@timeit_debug to "Validation" begin
check_ir(job, ir)
end
end
if should_verify()
@timeit_debug to "verification" verify(ir)
end
return ir, (; entry, compiled)
end
@locked function emit_asm(@nospecialize(job::CompilerJob), ir::LLVM.Module;
strip::Bool, validate::Bool, format::LLVM.API.LLVMCodeGenFileType)
# NOTE: strip after validation to get better errors
if strip
@timeit_debug to "Debug info removal" strip_debuginfo!(ir)
end
@timeit_debug to "LLVM back-end" begin
@timeit_debug to "preparation" prepare_execution!(job, ir)
code = @timeit_debug to "machine-code generation" mcgen(job, ir, format)
end
return code, ()
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 2263 | # error handling
export KernelError, InternalCompilerError
struct KernelError <: Exception
job::CompilerJob
message::String
help::Union{Nothing,String}
bt::StackTraces.StackTrace
KernelError(@nospecialize(job::CompilerJob), message::String, help=nothing;
bt=StackTraces.StackTrace()) =
new(job, message, help, bt)
end
function Base.showerror(io::IO, err::KernelError)
println(io, "GPU compilation of ", err.job.source, " failed")
println(io, "KernelError: $(err.message)")
println(io)
println(io, something(err.help, "Try inspecting the generated code with any of the @device_code_... macros."))
Base.show_backtrace(io, err.bt)
end
struct InternalCompilerError <: Exception
job::CompilerJob
message::String
meta::Dict
InternalCompilerError(job, message; kwargs...) = new(job, message, kwargs)
end
function Base.showerror(io::IO, err::InternalCompilerError)
println(io, """GPUCompiler.jl encountered an unexpected internal error.
Please file an issue attaching the following information, including the backtrace,
as well as a reproducible example (if possible).""")
println(io, "\nInternalCompilerError: $(err.message)")
println(io, "\nCompiler invocation: ", err.job)
if !isempty(err.meta)
println(io, "\nAdditional information:")
for (key,val) in err.meta
println(io, " - $key = $(repr(val))")
end
end
let Pkg = Base.require(Base.PkgId(Base.UUID("44cfe95a-1eb2-52ea-b672-e2afdf69b78f"), "Pkg"))
println(io, "\nInstalled packages:")
for (uuid, pkg) in Pkg.dependencies()
println(io, " - $(pkg.name) = $(repr(pkg.version))")
end
end
println(io)
let InteractiveUtils = Base.require(Base.PkgId(Base.UUID("b77e0a4c-d291-57a0-90e8-8db25a27a240"), "InteractiveUtils"))
InteractiveUtils.versioninfo(io)
end
end
macro compiler_assert(ex, job, kwargs...)
msg = "$ex, at $(__source__.file):$(__source__.line)"
return :($(esc(ex)) ? $(nothing)
: throw(InternalCompilerError($(esc(job)), $msg;
$(map(esc, kwargs)...)))
)
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 10421 | # reusable functionality to implement code execution
export split_kwargs, assign_args!
## macro tools
# split keyword arguments expressions into groups. returns vectors of keyword argument
# values, one more than the number of groups (unmatched keywords in the last vector).
# intended for use in macros; the resulting groups can be used in expressions.
function split_kwargs(kwargs, kw_groups...)
kwarg_groups = ntuple(_->[], length(kw_groups) + 1)
for kwarg in kwargs
# decode
Meta.isexpr(kwarg, :(=)) || throw(ArgumentError("non-keyword argument like option '$kwarg'"))
key, val = kwarg.args
isa(key, Symbol) || throw(ArgumentError("non-symbolic keyword '$key'"))
# find a matching group
group = length(kwarg_groups)
for (i, kws) in enumerate(kw_groups)
if key in kws
group = i
break
end
end
push!(kwarg_groups[group], kwarg)
end
return kwarg_groups
end
# assign arguments to variables, handle splatting
function assign_args!(code, _args)
nargs = length(_args)
# handle splatting
splats = Vector{Bool}(undef, nargs)
args = Vector{Any}(undef, nargs)
for i in 1:nargs
splats[i] = Meta.isexpr(_args[i], :(...))
args[i] = splats[i] ? _args[i].args[1] : _args[i]
end
# assign arguments to variables
vars = Vector{Symbol}(undef, nargs)
for i in 1:nargs
vars[i] = gensym()
push!(code.args, :($(vars[i]) = $(args[i])))
end
# convert the arguments, compile the function and call the kernel
# while keeping the original arguments alive
var_exprs = Vector{Any}(undef, nargs)
for i in 1:nargs
var_exprs[i] = splats[i] ? Expr(:(...), vars[i]) : vars[i]
end
return vars, var_exprs
end
## cached compilation
### Notes on interactions with package images and disk cache.
# Julia uses package images (pkgimg) to cache both the result of inference,
# and the result of native code emissions. Up until Julia v1.11 neither the
# inferred nor the nativce code of foreign abstract interpreters was cached
# across sessions. Julia v1.11 allows for caching of inference results across
# sessions as long as those inference results are created during precompilation.
#
# Julia cache hierarchy is roughly as follows:
# Function (name of a thing)
# -> Method (particular piece of code to dispatch to with a signature)
# -> MethodInstance (A particular Method + particular signature)
# -> CodeInstance (A MethodInstance compiled for a world)
#
# In order to cache code across sessions we need to insert CodeInstance(owner=GPUCompilerCacheToken)
# into the internal cache. Once we have done so we know that a particular CodeInstance is unique in
# the system. (During pkgimg loading conflicts will be resolved).
#
# When a pkgimg is loaded we check it's validity, this means checking that all depdencies are the same,
# the pkgimg was created for the right set of compiler flags, and that all source code that was used
# to create this pkgimg is the same. When a CodeInstance is inside a pkgimg we can extend the chain of
# validity even for GPU code, we cannot verify a "runtime" CodeInstance in the same way.
#
# Therefore when we see a compilation request for a CodeInstance that is originating from a pkgimg
# we can use it as part of the hash for the on-disk cache. (see `cache_file`)
"""
disk_cache_enabled()
Query if caching to disk is enabled.
"""
disk_cache_enabled() = parse(Bool, @load_preference("disk_cache", "false"))
"""
enable_disk_cache!(state::Bool=true)
Activate the GPUCompiler disk cache in the current environment.
You will need to restart your Julia environment for it to take effect.
!!! note
The cache functionality requires Julia 1.11
"""
function enable_disk_cache!(state::Bool=true)
@set_preferences!("disk_cache"=>string(state))
end
disk_cache_path() = @get_scratch!("disk_cache")
clear_disk_cache!() = rm(disk_cache_path(); recursive=true, force=true)
const cache_lock = ReentrantLock()
"""
cached_compilation(cache::Dict{Any}, src::MethodInstance, cfg::CompilerConfig,
compiler, linker)
Compile a method instance `src` with configuration `cfg`, by invoking `compiler` and
`linker` and storing the result in `cache`.
The `cache` argument should be a dictionary that can be indexed using any value and store
whatever the `linker` function returns. The `compiler` function should take a `CompilerJob`
and return data that can be cached across sessions (e.g., LLVM IR). This data is then
forwarded, along with the `CompilerJob`, to the `linker` function which is allowed to create
session-dependent objects (e.g., a `CuModule`).
"""
function cached_compilation(cache::AbstractDict{<:Any,V},
src::MethodInstance, cfg::CompilerConfig,
compiler::Function, linker::Function) where {V}
# NOTE: we index the cach both using (mi, world, cfg) keys, for the fast look-up,
# and using CodeInfo keys for the slow look-up. we need to cache both for
# performance, but cannot use a separate private cache for the ci->obj lookup
# (e.g. putting it next to the CodeInfo's in the CodeCache) because some clients
# expect to be able to wipe the cache (e.g. CUDA.jl's `device_reset!`)
# fast path: index the cache directly for the *current* world + compiler config
world = tls_world_age()
key = (objectid(src), world, cfg)
# NOTE: we store the MethodInstance's objectid to avoid an expensive allocation.
# Base does this with a multi-level lookup, first keyed on the mi,
# then a linear scan over the (typically few) entries.
# NOTE: no use of lock(::Function)/@lock/get! to avoid try/catch and closure overhead
lock(cache_lock)
obj = get(cache, key, nothing)
unlock(cache_lock)
if obj === nothing || compile_hook[] !== nothing
obj = actual_compilation(cache, src, world, cfg, compiler, linker)::V
lock(cache_lock)
cache[key] = obj
unlock(cache_lock)
end
return obj::V
end
@noinline function cache_file(ci::CodeInstance, cfg::CompilerConfig)
h = hash(Base.objectid(ci))
@static if isdefined(Base, :object_build_id)
bid = Base.object_build_id(ci)
if bid === nothing # CI is from a runtime compilation, not worth caching on disk
return nothing
else
bid = bid % UInt64 # The upper 64bit are a checksum, unavailable during precompilation
end
h = hash(bid, h)
end
h = hash(cfg, h)
gpucompiler_buildid = Base.module_build_id(@__MODULE__)
if (gpucompiler_buildid >> 64) % UInt64 == 0xffffffffffffffff
return nothing # Don't cache during precompilation of GPUCompiler
end
return joinpath(
disk_cache_path(),
# bifurcate the cache by build id of GPUCompiler
string(gpucompiler_buildid),
string(h, ".jls"))
end
struct DiskCacheEntry
src::Type # Originally MethodInstance, but upon deserialize they were not uniqued...
cfg::CompilerConfig
asm
end
@noinline function actual_compilation(cache::AbstractDict, src::MethodInstance, world::UInt,
cfg::CompilerConfig, compiler::Function, linker::Function)
job = CompilerJob(src, cfg, world)
obj = nothing
# fast path: find an applicable CodeInstance and see if we have compiled it before
ci = ci_cache_lookup(ci_cache(job), src, world, world)::Union{Nothing,CodeInstance}
if ci !== nothing
key = (ci, cfg)
obj = get(cache, key, nothing)
end
# slow path: compile and link
if obj === nothing || compile_hook[] !== nothing
asm = nothing
path = nothing
ondisk_hit = false
@static if VERSION >= v"1.11.0-"
# Don't try to hit the disk cache if we are for a *compile* hook
# TODO:
# - Sould we hit disk cache if Base.generating_output()
# - Should we allow backend to opt out?
if ci !== nothing && obj === nothing && disk_cache_enabled()
path = cache_file(ci, cfg)
@debug "Looking for on-disk cache" job path
if path !== nothing && isfile(path)
ondisk_hit = true
try
@debug "Loading compiled kernel" job path
# The MI we deserialize here didn't get uniqued...
entry = deserialize(path)::DiskCacheEntry
if entry.src == src.specTypes && entry.cfg == cfg
asm = entry.asm
else
@show entry.src == src.specTypes
@show entry.cfg == cfg
@warn "Cache missmatch" src.specTypes cfg entry.src entry.cfg
end
catch ex
@warn "Failed to load compiled kernel" job path exception=(ex, catch_backtrace())
end
end
end
end
if asm === nothing || compile_hook[] !== nothing
# Run the compiler in-case we need to hook it.
asm = compiler(job)
end
if obj !== nothing
# we got here because of a *compile* hook; don't bother linking
return obj
end
@static if VERSION >= v"1.11.0-"
if !ondisk_hit && path !== nothing && disk_cache_enabled()
@debug "Writing out on-disk cache" job path
mkpath(dirname(path))
entry = DiskCacheEntry(src.specTypes, cfg, asm)
# atomic write to disk
tmppath, io = mktemp(dirname(path); cleanup=false)
serialize(io, entry)
close(io)
@static if VERSION >= v"1.12.0-DEV.1023"
mv(tmppath, path; force=true)
else
Base.rename(tmppath, path, force=true)
end
end
end
obj = linker(job, asm)
if ci === nothing
ci = ci_cache_lookup(ci_cache(job), src, world, world)::CodeInstance
key = (ci, cfg)
end
cache[key] = obj
end
return obj
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 3618 | # implementation of the GPUCompiler interfaces for generating GCN code
## target
export GCNCompilerTarget
Base.@kwdef struct GCNCompilerTarget <: AbstractCompilerTarget
dev_isa::String
features::String=""
end
GCNCompilerTarget(dev_isa; features="") = GCNCompilerTarget(dev_isa, features)
llvm_triple(::GCNCompilerTarget) = "amdgcn-amd-amdhsa"
function llvm_machine(target::GCNCompilerTarget)
triple = llvm_triple(target)
t = Target(triple=triple)
cpu = target.dev_isa
feat = target.features
reloc = LLVM.API.LLVMRelocPIC
tm = TargetMachine(t, triple, cpu, feat; reloc)
asm_verbosity!(tm, true)
return tm
end
## job
# TODO: encode debug build or not in the compiler job
# https://github.com/JuliaGPU/CUDAnative.jl/issues/368
runtime_slug(job::CompilerJob{GCNCompilerTarget}) = "gcn-$(job.config.target.dev_isa)$(job.config.target.features)"
const gcn_intrinsics = () # TODO: ("vprintf", "__assertfail", "malloc", "free")
isintrinsic(::CompilerJob{GCNCompilerTarget}, fn::String) = in(fn, gcn_intrinsics)
function finish_module!(@nospecialize(job::CompilerJob{GCNCompilerTarget}),
mod::LLVM.Module, entry::LLVM.Function)
lower_throw_extra!(mod)
if job.config.kernel
# calling convention
callconv!(entry, LLVM.API.LLVMAMDGPUKERNELCallConv)
# work around bad byval codegen (JuliaGPU/GPUCompiler.jl#92)
entry = lower_byval(job, mod, entry)
end
return entry
end
## LLVM passes
function lower_throw_extra!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
@timeit_debug to "lower throw (extra)" begin
throw_functions = [
r"julia_bounds_error.*",
r"julia_throw_boundserror.*",
r"julia_error_if_canonical_getindex.*",
r"julia_error_if_canonical_setindex.*",
r"julia___subarray_throw_boundserror.*",
]
for f in functions(mod)
f_name = LLVM.name(f)
for fn in throw_functions
if occursin(fn, f_name)
for use in uses(f)
call = user(use)::LLVM.CallInst
# replace the throw with a trap
@dispose builder=IRBuilder() begin
position!(builder, call)
emit_exception!(builder, f_name, call)
end
# remove the call
nargs = length(parameters(f))
call_args = arguments(call)
erase!(LLVM.parent(call), call)
# HACK: kill the exceptions' unused arguments
for arg in call_args
# peek through casts
if isa(arg, LLVM.AddrSpaceCastInst)
cast = arg
arg = first(operands(cast))
isempty(uses(cast)) && erase!(cast)
end
if isa(arg, LLVM.Instruction) && isempty(uses(arg))
erase!(arg)
end
end
changed = true
end
@compiler_assert isempty(uses(f)) job
end
end
end
end
return changed
end
function emit_trap!(job::CompilerJob{GCNCompilerTarget}, builder, mod, inst)
trap_ft = LLVM.FunctionType(LLVM.VoidType())
trap = if haskey(functions(mod), "llvm.trap")
functions(mod)["llvm.trap"]
else
LLVM.Function(mod, "llvm.trap", trap_ft)
end
call!(builder, trap_ft, trap)
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 11490 | # interfaces for defining new compilers
# the definition of a new GPU compiler is typically split in two:
# - a generic compiler that lives in GPUCompiler.jl (e.g., emitting PTX, SPIR-V, etc)
# - a more specific version in a package that targets an environment (e.g. CUDA, ROCm, etc)
#
# the first level of customizability is found in the AbstractCompilerTarget hierarchy,
# with methods and interfaces that can only be implemented within GPUCompiler.jl.
#
# further customization should be put in a concrete instance of the AbstractCompilerParams
# type, and can be used to customize interfaces defined on CompilerJob.
## target
export AbstractCompilerTarget
# container for state handled by targets defined in GPUCompiler.jl
abstract type AbstractCompilerTarget end
source_code(@nospecialize(target::AbstractCompilerTarget)) = "text"
llvm_triple(@nospecialize(target::AbstractCompilerTarget)) = error("Not implemented")
function llvm_machine(@nospecialize(target::AbstractCompilerTarget))
triple = llvm_triple(target)
t = Target(triple=triple)
tm = TargetMachine(t, triple)
asm_verbosity!(tm, true)
return tm
end
llvm_datalayout(target::AbstractCompilerTarget) = DataLayout(llvm_machine(target))
# the target's datalayout, with Julia's non-integral address spaces added to it
function julia_datalayout(@nospecialize(target::AbstractCompilerTarget))
dl = llvm_datalayout(target)
dl === nothing && return nothing
DataLayout(string(dl) * "-ni:10:11:12:13")
end
have_fma(@nospecialize(target::AbstractCompilerTarget), T::Type) = false
dwarf_version(target::AbstractCompilerTarget) = Int32(4) # It seems every target supports v4 bar cuda
## params
export AbstractCompilerParams
# container for state handled by external users of GPUCompiler.jl
abstract type AbstractCompilerParams end
## config
export CompilerConfig
# the configuration of the compiler
"""
CompilerConfig(target, params; kernel=true, entry_abi=:specfunc, name=nothing,
always_inline=false)
Construct a `CompilerConfig` that will be used to drive compilation for the given `target`
and `params`.
Several keyword arguments can be used to customize the compilation process:
- `kernel`: specifies if the function should be compiled as a kernel, or as a regular
function. This is used to determine the calling convention and for validation purposes.
- `entry_abi`: can be either `:specfunc` the default, or `:func`. `:specfunc` expects the
arguments to be passed in registers, simple return values are returned in registers as
well, and complex return values are returned on the stack using `sret`, the calling
convention is `fastcc`. The `:func` abi is simpler with a calling convention of the first
argument being the function itself (to support closures), the second argument being a
pointer to a vector of boxed Julia values and the third argument being the number of
values, the return value will also be boxed. The `:func` abi will internally call the
`:specfunc` abi, but is generally easier to invoke directly.
- `name`: the name that will be used for the entrypoint function. If `nothing` (the
default), the name will be generated automatically.
- `always_inline` specifies if the Julia front-end should inline all functions into one if
possible.
"""
struct CompilerConfig{T,P}
target::T
params::P
kernel::Bool
name::Union{Nothing,String}
entry_abi::Symbol
always_inline::Bool
opt_level::Int
function CompilerConfig(target::AbstractCompilerTarget,
params::AbstractCompilerParams;
kernel=true,
name=nothing,
entry_abi=:specfunc,
always_inline=false,
opt_level=2)
if entry_abi ∉ (:specfunc, :func)
error("Unknown entry_abi=$entry_abi")
end
new{typeof(target), typeof(params)}(target, params, kernel, name, entry_abi,
always_inline, opt_level)
end
end
# copy constructor
CompilerConfig(cfg::CompilerConfig; target=cfg.target, params=cfg.params,
kernel=cfg.kernel, name=cfg.name, entry_abi=cfg.entry_abi,
always_inline=cfg.always_inline, opt_level=cfg.opt_level) =
CompilerConfig(target, params; kernel, entry_abi, name, always_inline, opt_level)
function Base.show(io::IO, @nospecialize(cfg::CompilerConfig{T})) where {T}
print(io, "CompilerConfig for ", T)
end
function Base.hash(cfg::CompilerConfig, h::UInt)
h = hash(cfg.target, h)
h = hash(cfg.params, h)
h = hash(cfg.kernel, h)
h = hash(cfg.name, h)
h = hash(cfg.entry_abi, h)
h = hash(cfg.always_inline, h)
h = hash(cfg.opt_level, h)
return h
end
## job
export CompilerJob
using Core: MethodInstance
# a specific invocation of the compiler, bundling everything needed to generate code
struct CompilerJob{T,P}
source::MethodInstance
config::CompilerConfig{T,P}
world::UInt
CompilerJob(src::MethodInstance, cfg::CompilerConfig{T,P},
world=tls_world_age()) where {T,P} =
new{T,P}(src, cfg, world)
end
function Base.hash(job::CompilerJob, h::UInt)
h = hash(job.source, h)
h = hash(job.config, h)
h = hash(job.world, h)
return h
end
## default definitions that can be overridden to influence GPUCompiler's behavior
# Has the runtime available and does not require special handling
uses_julia_runtime(@nospecialize(job::CompilerJob)) = false
# Should emit PTLS lookup that can be relocated
dump_native(@nospecialize(job::CompilerJob)) = false
# the Julia module to look up target-specific runtime functions in (this includes both
# target-specific functions from the GPU runtime library, like `malloc`, but also
# replacements functions for operations like `Base.sin`)
runtime_module(@nospecialize(job::CompilerJob)) = error("Not implemented")
# check if a function is an intrinsic that can assumed to be always available
isintrinsic(@nospecialize(job::CompilerJob), fn::String) = false
# provide a specific interpreter to use.
if VERSION >= v"1.11.0-DEV.1552"
get_interpreter(@nospecialize(job::CompilerJob)) =
GPUInterpreter(job.world; method_table=method_table(job),
token=ci_cache_token(job), inf_params=inference_params(job),
opt_params=optimization_params(job))
else
get_interpreter(@nospecialize(job::CompilerJob)) =
GPUInterpreter(job.world; method_table=method_table(job),
code_cache=ci_cache(job), inf_params=inference_params(job),
opt_params=optimization_params(job))
end
# does this target support throwing Julia exceptions with jl_throw?
# if not, calls to throw will be replaced with calls to the GPU runtime
can_throw(@nospecialize(job::CompilerJob)) = uses_julia_runtime(job)
# does this target support loading from Julia safepoints?
# if not, safepoints at function entry will not be emitted
can_safepoint(@nospecialize(job::CompilerJob)) = uses_julia_runtime(job)
# generate a string that represents the type of compilation, for selecting a compiled
# instance of the runtime library. this slug should encode everything that affects
# the generated code of this compiler job (with exception of the function source)
runtime_slug(@nospecialize(job::CompilerJob)) = error("Not implemented")
# the type of the kernel state object, or Nothing if this back-end doesn't need one.
#
# the generated code will be rewritten to include an object of this type as the first
# argument to each kernel, and pass that object to every function that accesses the kernel
# state (possibly indirectly) via the `kernel_state_pointer` function.
kernel_state_type(@nospecialize(job::CompilerJob)) = Nothing
# Does the target need to pass kernel arguments by value?
needs_byval(@nospecialize(job::CompilerJob)) = true
# whether pointer is a valid call target
valid_function_pointer(@nospecialize(job::CompilerJob), ptr::Ptr{Cvoid}) = false
# Care is required for anything that impacts:
# - method_table
# - inference_params
# - optimization_params
# By default that is just always_inline
# the cache token is compared with jl_egal
struct GPUCompilerCacheToken
target_type::Type
always_inline::Bool
method_table::Core.MethodTable
end
ci_cache_token(@nospecialize(job::CompilerJob)) =
GPUCompilerCacheToken(typeof(job.config.target), job.config.always_inline, method_table(job))
# the codeinstance cache to use -- should only be used for the constructor
if VERSION >= v"1.11.0-DEV.1552"
# Soft deprecated user should use `CC.code_cache(get_interpreter(job))`
ci_cache(@nospecialize(job::CompilerJob)) = CC.code_cache(get_interpreter(job))
else
function ci_cache(@nospecialize(job::CompilerJob))
lock(GLOBAL_CI_CACHES_LOCK) do
cache = get!(GLOBAL_CI_CACHES, job.config) do
CodeCache()
end
return cache
end
end
end
# the method table to use
method_table(@nospecialize(job::CompilerJob)) = GLOBAL_METHOD_TABLE
# the inference parameters to use when constructing the GPUInterpreter
function inference_params(@nospecialize(job::CompilerJob))
if VERSION >= v"1.12.0-DEV.1017"
CC.InferenceParams()
else
CC.InferenceParams(; unoptimize_throw_blocks=false)
end
end
# the optimization parameters to use when constructing the GPUInterpreter
function optimization_params(@nospecialize(job::CompilerJob))
kwargs = NamedTuple()
if job.config.always_inline
kwargs = (kwargs..., inline_cost_threshold=typemax(Int))
end
return CC.OptimizationParams(;kwargs...)
end
# how much debuginfo to emit
function llvm_debug_info(@nospecialize(job::CompilerJob))
if Base.JLOptions().debug_level == 0
LLVM.API.LLVMDebugEmissionKindNoDebug
elseif Base.JLOptions().debug_level == 1
LLVM.API.LLVMDebugEmissionKindLineTablesOnly
elseif Base.JLOptions().debug_level >= 2
LLVM.API.LLVMDebugEmissionKindFullDebug
end
end
## extension points at important stages of compilation
# prepare the environment for compilation of a job. this can involve, e.g.,
# priming the cache with entries that cannot be easily inferred.
prepare_job!(@nospecialize(job::CompilerJob)) = return
# early extension point used to link-in external bitcode files.
# this is typically used by downstream packages to link vendor libraries.
link_libraries!(@nospecialize(job::CompilerJob), mod::LLVM.Module,
undefined_fns::Vector{String}) = return
# finalization of the module, before deferred codegen and optimization
finish_module!(@nospecialize(job::CompilerJob), mod::LLVM.Module, entry::LLVM.Function) =
entry
# post-Julia optimization processing of the module
optimize_module!(@nospecialize(job::CompilerJob), mod::LLVM.Module) = return
# final processing of the IR, right before validation and machine-code generation
finish_ir!(@nospecialize(job::CompilerJob), mod::LLVM.Module, entry::LLVM.Function) =
entry
# whether an LLVM function is valid for this back-end
validate_ir(@nospecialize(job::CompilerJob), mod::LLVM.Module) = IRError[]
# deprecated
struct DeprecationMarker end
process_module!(@nospecialize(job::CompilerJob), mod::LLVM.Module) = DeprecationMarker()
process_entry!(@nospecialize(job::CompilerJob), mod::LLVM.Module, entry::LLVM.Function) =
DeprecationMarker()
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 31045 | # LLVM IR generation
function irgen(@nospecialize(job::CompilerJob))
mod, compiled = @timeit_debug to "emission" compile_method_instance(job)
if job.config.entry_abi === :specfunc
entry_fn = compiled[job.source].specfunc
else
entry_fn = compiled[job.source].func
end
@assert entry_fn !== nothing
entry = functions(mod)[entry_fn]
# clean up incompatibilities
@timeit_debug to "clean-up" begin
for llvmf in functions(mod)
if Base.isdebugbuild()
# only occurs in debug builds
delete!(function_attributes(llvmf),
EnumAttribute("sspstrong", 0))
end
delete!(function_attributes(llvmf),
StringAttribute("probe-stack", "inline-asm"))
if Sys.iswindows()
personality!(llvmf, nothing)
end
# remove the non-specialized jfptr functions
# TODO: Do we need to remove these?
if job.config.entry_abi === :specfunc
if startswith(LLVM.name(llvmf), "jfptr_")
erase!(llvmf)
end
end
end
# remove the exception-handling personality function
if Sys.iswindows() && "__julia_personality" in functions(mod)
llvmf = functions(mod)["__julia_personality"]
@compiler_assert isempty(uses(llvmf)) job
erase!(llvmf)
end
end
deprecation_marker = process_module!(job, mod)
if deprecation_marker != DeprecationMarker()
Base.depwarn("GPUCompiler.process_module! is deprecated; implement GPUCompiler.finish_module! instead", :process_module)
end
# sanitize global values (Julia doesn't when using the external codegen policy)
for val in [collect(globals(mod)); collect(functions(mod))]
isdeclaration(val) && continue
old_name = LLVM.name(val)
new_name = safe_name(old_name)
if old_name != new_name
LLVM.name!(val, new_name)
end
end
# rename and process the entry point
if job.config.name !== nothing
LLVM.name!(entry, safe_name(job.config.name))
elseif job.config.kernel
LLVM.name!(entry, mangle_sig(job.source.specTypes))
end
deprecation_marker = process_entry!(job, mod, entry)
if deprecation_marker != DeprecationMarker()
Base.depwarn("GPUCompiler.process_entry! is deprecated; implement GPUCompiler.finish_module! instead", :process_entry)
entry = deprecation_marker
end
if job.config.entry_abi === :specfunc
func = compiled[job.source].func
specfunc = LLVM.name(entry)
else
func = LLVM.name(entry)
specfunc = compiled[job.source].specfunc
end
compiled[job.source] =
(; compiled[job.source].ci, func, specfunc)
# minimal required optimization
@timeit_debug to "rewrite" begin
if job.config.kernel && needs_byval(job)
# pass all bitstypes by value; by default Julia passes aggregates by reference
# (this improves performance, and is mandated by certain back-ends like SPIR-V).
args = classify_arguments(job, function_type(entry))
for arg in args
if arg.cc == BITS_REF
llvm_typ = convert(LLVMType, arg.typ)
attr = TypeAttribute("byval", llvm_typ)
push!(parameter_attributes(entry, arg.idx), attr)
end
end
end
# internalize all functions and, but keep exported global variables.
linkage!(entry, LLVM.API.LLVMExternalLinkage)
preserved_gvs = String[LLVM.name(entry)]
for gvar in globals(mod)
push!(preserved_gvs, LLVM.name(gvar))
end
if LLVM.version() >= v"17"
@dispose pb=NewPMPassBuilder() begin
add!(pb, InternalizePass(; preserved_gvs))
add!(pb, AlwaysInlinerPass())
run!(pb, mod, llvm_machine(job.config.target))
end
else
@dispose pm=ModulePassManager() begin
internalize!(pm, preserved_gvs)
always_inliner!(pm)
run!(pm, mod)
end
end
global current_job
current_job = job
can_throw(job) || lower_throw!(mod)
end
return mod, compiled
end
## exception handling
# this pass lowers `jl_throw` and friends to GPU-compatible exceptions.
# this isn't strictly necessary, but has a couple of advantages:
# - we can kill off unused exception arguments that otherwise would allocate or invoke
# - we can fake debug information (lacking a stack unwinder)
#
# once we have thorough inference (ie. discarding `@nospecialize` and thus supporting
# exception arguments) and proper debug info to unwind the stack, this pass can go.
function lower_throw!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
@timeit_debug to "lower throw" begin
throw_functions = [
# unsupported runtime functions that are used to throw specific exceptions
"jl_throw" => "exception",
"jl_error" => "error",
"jl_too_few_args" => "too few arguments exception",
"jl_too_many_args" => "too many arguments exception",
"jl_type_error" => "type error",
"jl_type_error_rt" => "type error",
"jl_undefined_var_error" => "undefined variable error",
"jl_bounds_error" => "bounds error",
"jl_bounds_error_v" => "bounds error",
"jl_bounds_error_int" => "bounds error",
"jl_bounds_error_tuple_int" => "bounds error",
"jl_bounds_error_unboxed_int" => "bounds error",
"jl_bounds_error_ints" => "bounds error",
"jl_eof_error" => "EOF error",
]
for f in functions(mod)
fn = LLVM.name(f)
for (throw_fn, name) in throw_functions
occursin(throw_fn, fn) || continue
for use in uses(f)
call = user(use)::LLVM.CallInst
# replace the throw with a PTX-compatible exception
@dispose builder=IRBuilder() begin
position!(builder, call)
emit_exception!(builder, name, call)
end
# remove the call
call_args = arguments(call)
erase!(call)
# HACK: kill the exceptions' unused arguments
# this is needed for throwing objects with @nospecialize constructors.
for arg in call_args
# peek through casts
if isa(arg, LLVM.AddrSpaceCastInst)
cast = arg
arg = first(operands(cast))
isempty(uses(cast)) && erase!(cast)
end
if isa(arg, LLVM.Instruction) && isempty(uses(arg))
erase!(arg)
end
end
changed = true
end
@compiler_assert isempty(uses(f)) job
break
end
end
end
return changed
end
# report an exception in a GPU-compatible manner
#
# the exact behavior depends on the debug level. in all cases, a `trap` will be emitted, On
# debug level 1, the exception name will be printed, and on debug level 2 the individual
# stack frames (as recovered from the LLVM debug information) will be printed as well.
function emit_exception!(builder, name, inst)
job = current_job::CompilerJob
bb = position(builder)
fun = LLVM.parent(bb)
mod = LLVM.parent(fun)
# report the exception
if Base.JLOptions().debug_level >= 1
name = globalstring_ptr!(builder, name, "exception")
if Base.JLOptions().debug_level == 1
call!(builder, Runtime.get(:report_exception), [name])
else
call!(builder, Runtime.get(:report_exception_name), [name])
end
end
# report each frame
if Base.JLOptions().debug_level >= 2
rt = Runtime.get(:report_exception_frame)
ft = convert(LLVM.FunctionType, rt)
bt = backtrace(inst)
for (i,frame) in enumerate(bt)
idx = ConstantInt(parameters(ft)[1], i)
func = globalstring_ptr!(builder, String(frame.func), "di_func")
file = globalstring_ptr!(builder, String(frame.file), "di_file")
line = ConstantInt(parameters(ft)[4], frame.line)
call!(builder, rt, [idx, func, file, line])
end
end
# signal the exception
call!(builder, Runtime.get(:signal_exception))
emit_trap!(job, builder, mod, inst)
end
function emit_trap!(@nospecialize(job::CompilerJob), builder, mod, inst)
trap_ft = LLVM.FunctionType(LLVM.VoidType())
trap = if haskey(functions(mod), "llvm.trap")
functions(mod)["llvm.trap"]
else
LLVM.Function(mod, "llvm.trap", trap_ft)
end
call!(builder, trap_ft, trap)
end
## kernel promotion
@enum ArgumentCC begin
BITS_VALUE # bitstype, passed as value
BITS_REF # bitstype, passed as pointer
MUT_REF # jl_value_t*, or the anonymous equivalent
GHOST # not passed
end
# Determine the calling convention of a the arguments of a Julia function, given the
# LLVM function type as generated by the Julia code generator. Returns an vector with one
# element for each Julia-level argument, containing a tuple with the following fields:
# - `cc`: the calling convention of the argument
# - `typ`: the Julia type of the argument
# - `name`: the name of the argument
# - `idx`: the index of the argument in the LLVM function type, or `nothing` if the argument
# is not passed at the LLVM level.
function classify_arguments(@nospecialize(job::CompilerJob), codegen_ft::LLVM.FunctionType)
source_sig = job.source.specTypes
source_types = [source_sig.parameters...]
source_argnames = Base.method_argnames(job.source.def)
while length(source_argnames) < length(source_types)
# this is probably due to a trailing vararg; repeat its name
push!(source_argnames, source_argnames[end])
end
codegen_types = parameters(codegen_ft)
args = []
codegen_i = 1
for (source_i, (source_typ, source_name)) in enumerate(zip(source_types, source_argnames))
if isghosttype(source_typ) || Core.Compiler.isconstType(source_typ)
push!(args, (cc=GHOST, typ=source_typ, name=source_name, idx=nothing))
continue
end
codegen_typ = codegen_types[codegen_i]
if codegen_typ isa LLVM.PointerType
llvm_source_typ = convert(LLVMType, source_typ; allow_boxed=true)
# pointers are used for multiple kinds of arguments
# - literal pointer values
if source_typ <: Ptr || source_typ <: Core.LLVMPtr
@assert llvm_source_typ == codegen_typ
push!(args, (cc=BITS_VALUE, typ=source_typ, name=source_name, idx=codegen_i))
# - boxed values
# XXX: use `deserves_retbox` instead?
elseif llvm_source_typ isa LLVM.PointerType
@assert llvm_source_typ == codegen_typ
push!(args, (cc=MUT_REF, typ=source_typ, name=source_name, idx=codegen_i))
# - references to aggregates
else
@assert llvm_source_typ != codegen_typ
push!(args, (cc=BITS_REF, typ=source_typ, name=source_name, idx=codegen_i))
end
else
push!(args, (cc=BITS_VALUE, typ=source_typ, name=source_name, idx=codegen_i))
end
codegen_i += 1
end
return args
end
function is_immutable_datatype(T::Type)
isa(T,DataType) && !Base.ismutabletype(T)
end
function is_inlinealloc(T::Type)
mayinlinealloc = (T.name.flags >> 2) & 1 == true
# FIXME: To simple
if mayinlinealloc
if !Base.datatype_pointerfree(T)
t_name(dt::DataType)=dt.name
if t_name(T).n_uninitialized != 0
return false
end
end
return true
end
return false
end
function is_concrete_immutable(T::Type)
is_immutable_datatype(T) && T.layout !== C_NULL
end
function is_pointerfree(T::Type)
if !is_immutable_datatype(T)
return false
end
return Base.datatype_pointerfree(T)
end
function deserves_stack(@nospecialize(T))
if !is_concrete_immutable(T)
return false
end
return is_inlinealloc(T)
end
deserves_argbox(T) = !deserves_stack(T)
deserves_retbox(T) = deserves_argbox(T)
function deserves_sret(T, llvmT)
@assert isa(T,DataType)
sizeof(T) > sizeof(Ptr{Cvoid}) && !isa(llvmT, LLVM.FloatingPointType) && !isa(llvmT, LLVM.VectorType)
end
# byval lowering
#
# some back-ends don't support byval, or support it badly, so lower it eagerly ourselves
# https://reviews.llvm.org/D79744
function lower_byval(@nospecialize(job::CompilerJob), mod::LLVM.Module, f::LLVM.Function)
ft = function_type(f)
@timeit_debug to "lower byval" begin
# classify the arguments
args = classify_arguments(job, ft)
filter!(args) do arg
arg.cc != GHOST
end
# find the byval parameters
byval = BitVector(undef, length(parameters(ft)))
for i in 1:length(byval)
attrs = collect(parameter_attributes(f, i))
byval[i] = any(attrs) do attr
kind(attr) == kind(TypeAttribute("byval", LLVM.VoidType()))
end
end
# fixup metadata
#
# Julia emits invariant.load and const TBAA metadata on loads from pointer args,
# which is invalid now that we have materialized the byval.
for (i, param) in enumerate(parameters(f))
if byval[i]
# collect all uses of the argument
worklist = Vector{LLVM.Instruction}(user.(collect(uses(param))))
while !isempty(worklist)
value = popfirst!(worklist)
# remove the invariant.load attribute
md = metadata(value)
if haskey(md, LLVM.MD_invariant_load)
delete!(md, LLVM.MD_invariant_load)
end
if haskey(md, LLVM.MD_tbaa)
delete!(md, LLVM.MD_tbaa)
end
# recurse on the output of some instructions
if isa(value, LLVM.BitCastInst) ||
isa(value, LLVM.GetElementPtrInst) ||
isa(value, LLVM.AddrSpaceCastInst)
append!(worklist, user.(collect(uses(value))))
end
end
end
end
# generate the new function type & definition
new_types = LLVM.LLVMType[]
for (i, param) in enumerate(parameters(ft))
if byval[i]
llvm_typ = convert(LLVMType, args[i].typ)
push!(new_types, llvm_typ)
else
push!(new_types, param)
end
end
new_ft = LLVM.FunctionType(return_type(ft), new_types)
new_f = LLVM.Function(mod, "", new_ft)
linkage!(new_f, linkage(f))
for (arg, new_arg) in zip(parameters(f), parameters(new_f))
LLVM.name!(new_arg, LLVM.name(arg))
end
# emit IR performing the "conversions"
new_args = LLVM.Value[]
@dispose builder=IRBuilder() begin
entry = BasicBlock(new_f, "conversion")
position!(builder, entry)
# perform argument conversions
for (i, param) in enumerate(parameters(ft))
if byval[i]
# copy the argument value to a stack slot, and reference it.
llvm_typ = convert(LLVMType, args[i].typ)
ptr = alloca!(builder, llvm_typ)
if LLVM.addrspace(param) != 0
ptr = addrspacecast!(builder, ptr, param)
end
store!(builder, parameters(new_f)[i], ptr)
push!(new_args, ptr)
else
push!(new_args, parameters(new_f)[i])
for attr in collect(parameter_attributes(f, i))
push!(parameter_attributes(new_f, i), attr)
end
end
end
# map the arguments
value_map = Dict{LLVM.Value, LLVM.Value}(
param => new_args[i] for (i,param) in enumerate(parameters(f))
)
value_map[f] = new_f
clone_into!(new_f, f; value_map,
changes=LLVM.API.LLVMCloneFunctionChangeTypeGlobalChanges)
# fall through
br!(builder, blocks(new_f)[2])
end
# remove the old function
# NOTE: if we ever have legitimate uses of the old function, create a shim instead
fn = LLVM.name(f)
@assert isempty(uses(f))
replace_metadata_uses!(f, new_f)
erase!(f)
LLVM.name!(new_f, fn)
return new_f
end
end
# kernel state arguments
#
# to facilitate passing stateful information to kernels without having to recompile, e.g.,
# the storage location for exception flags, or the location of a I/O buffer, we enable the
# back-end to specify a Julia object that will be passed to the kernel by-value, and to
# every called function by-reference. Access to this object is done using the
# `julia.gpu.state_getter` intrinsic. after optimization, these intrinsics will be lowered
# to refer to the state argument.
#
# note that we deviate from the typical Julia calling convention, by always passing the
# state objects by value instead of by reference, this to ensure that the state object
# is not copied to the stack (because LLVM doesn't see that all uses are read-only).
# in principle, `readonly byval` should be equivalent, but LLVM doesn't realize that.
# also see https://github.com/JuliaGPU/CUDA.jl/pull/1167 and the comments in that PR.
# once LLVM supports this pattern, consider going back to passing the state by reference,
# so that the julia.gpu.state_getter` can be simplified to return an opaque pointer.
# add a state argument to every function in the module, starting from the kernel entry point
function add_kernel_state!(mod::LLVM.Module)
job = current_job::CompilerJob
# check if we even need a kernel state argument
state = kernel_state_type(job)
@assert job.config.kernel
if state === Nothing
return false
end
T_state = convert(LLVMType, state)
# intrinsic returning an opaque pointer to the kernel state.
# this is both for extern uses, and to make this transformation a two-step process.
state_intr = kernel_state_intr(mod, T_state)
state_intr_ft = LLVM.FunctionType(T_state)
kernels = []
kernels_md = metadata(mod)["julia.kernel"]
for kernel_md in operands(kernels_md)
push!(kernels, Value(operands(kernel_md)[1]))
end
# determine which functions need a kernel state argument
#
# previously, we add the argument to every function and relied on unused arg elim to
# clean-up the IR. however, some libraries do Funny Stuff, e.g., libdevice bitcasting
# function pointers. such IR is hard to rewrite, so instead be more conservative.
worklist = Set{LLVM.Function}([state_intr, kernels...])
worklist_length = 0
while worklist_length != length(worklist)
# iteratively discover functions that use the intrinsic or any function calling it
worklist_length = length(worklist)
additions = LLVM.Function[]
function check_user(val)
if val isa Instruction
bb = LLVM.parent(val)
new_f = LLVM.parent(bb)
in(new_f, worklist) || push!(additions, new_f)
elseif val isa ConstantExpr
# constant expressions don't have a parent; we need to look up their uses
for use in uses(val)
check_user(user(use))
end
else
error("Don't know how to check uses of $val. Please file an issue.")
end
end
for f in worklist, use in uses(f)
check_user(user(use))
end
for f in additions
push!(worklist, f)
end
end
delete!(worklist, state_intr)
# add a state argument
workmap = Dict{LLVM.Function, LLVM.Function}()
for f in worklist
fn = LLVM.name(f)
ft = function_type(f)
LLVM.name!(f, fn * ".stateless")
# create a new function
new_param_types = [T_state, parameters(ft)...]
new_ft = LLVM.FunctionType(return_type(ft), new_param_types)
new_f = LLVM.Function(mod, fn, new_ft)
LLVM.name!(parameters(new_f)[1], "state")
linkage!(new_f, linkage(f))
for (arg, new_arg) in zip(parameters(f), parameters(new_f)[2:end])
LLVM.name!(new_arg, LLVM.name(arg))
end
workmap[f] = new_f
end
# clone and rewrite the function bodies, replacing uses of the old stateless function
# with the newly created definition that includes the state argument.
#
# most uses are rewritten by LLVM by putting the functions in the value map.
# a separate value materializer is used to recreate constant expressions.
#
# note that this only _replaces_ the uses of these functions, we'll still need to
# _correct_ the uses (i.e. actually add the state argument) afterwards.
function materializer(val)
if val isa ConstantExpr
if opcode(val) == LLVM.API.LLVMBitCast
target = operands(val)[1]
if target isa LLVM.Function && haskey(workmap, target)
# the function is being bitcasted to a different function type.
# we need to mutate that function type to include the state argument,
# or we'd be invoking the original function in an invalid way.
#
# XXX: ptrtoint/inttoptr pairs can also lose the state argument...
# is all this even sound?
typ = value_type(val)::LLVM.PointerType
ft = eltype(typ)::LLVM.FunctionType
new_ft = LLVM.FunctionType(return_type(ft), [T_state, parameters(ft)...])
return const_bitcast(workmap[target], LLVM.PointerType(new_ft, addrspace(typ)))
end
elseif opcode(val) == LLVM.API.LLVMPtrToInt
target = operands(val)[1]
if target isa LLVM.Function && haskey(workmap, target)
return const_ptrtoint(workmap[target], value_type(val))
end
end
end
return nothing # do not claim responsibility
end
for (f, new_f) in workmap
# use a value mapper for rewriting function arguments
value_map = Dict{LLVM.Value, LLVM.Value}()
for (param, new_param) in zip(parameters(f), parameters(new_f)[2:end])
LLVM.name!(new_param, LLVM.name(param))
value_map[param] = new_param
end
# rewrite references to the old function
merge!(value_map, workmap)
clone_into!(new_f, f; value_map, materializer,
changes=LLVM.API.LLVMCloneFunctionChangeTypeGlobalChanges)
# remove the function IR so that we won't have any uses left after this pass.
empty!(f)
end
# ensure the old (stateless) functions don't have uses anymore, and remove them
for f in keys(workmap)
for use in uses(f)
val = user(use)
if val isa ConstantExpr
# XXX: shouldn't clone_into! remove unused CEs?
isempty(uses(val)) || error("old function still has uses (via a constant expr)")
LLVM.unsafe_destroy!(val)
else
error("old function still has uses")
end
end
replace_metadata_uses!(f, workmap[f])
erase!(f)
end
# update uses of the new function, modifying call sites to include the kernel state
function rewrite_uses!(f, ft)
# update uses
@dispose builder=IRBuilder() begin
for use in uses(f)
val = user(use)
if val isa LLVM.CallBase && called_operand(val) == f
# NOTE: we don't rewrite calls using Julia's jlcall calling convention,
# as those have a fixed argument list, passing actual arguments
# in an array of objects. that doesn't matter, for now, since
# GPU back-ends don't support such calls anyhow. but if we ever
# want to support kernel state passing on more capable back-ends,
# we'll need to update the argument array instead.
if callconv(val) == 37 || callconv(val) == 38
# TODO: update for LLVM 15 when JuliaLang/julia#45088 is merged.
continue
end
# forward the state argument
position!(builder, val)
state = call!(builder, state_intr_ft, state_intr, Value[], "state")
new_val = if val isa LLVM.CallInst
call!(builder, ft, f, [state, arguments(val)...], operand_bundles(val))
else
# TODO: invoke and callbr
error("Rewrite of $(typeof(val))-based calls is not implemented: $val")
end
callconv!(new_val, callconv(val))
replace_uses!(val, new_val)
@assert isempty(uses(val))
erase!(val)
elseif val isa LLVM.CallBase
# the function is being passed as an argument. to avoid having to
# rewrite the target function, instead case the rewritten function to
# the old stateless type.
# XXX: we won't have to do this with opaque pointers.
position!(builder, val)
target_ft = called_type(val)
new_args = map(zip(parameters(target_ft),
arguments(val))) do (param_typ, arg)
if value_type(arg) != param_typ
const_bitcast(arg, param_typ)
else
arg
end
end
new_val = call!(builder, called_type(val), called_operand(val), new_args,
operand_bundles(val))
callconv!(new_val, callconv(val))
replace_uses!(val, new_val)
@assert isempty(uses(val))
erase!(val)
elseif val isa LLVM.StoreInst
# the function is being stored, which again we'll permit like before.
elseif val isa ConstantExpr
rewrite_uses!(val, ft)
else
error("Cannot rewrite $(typeof(val)) use of function: $val")
end
end
end
end
for f in values(workmap)
ft = function_type(f)
rewrite_uses!(f, ft)
end
return true
end
AddKernelStatePass() = NewPMModulePass("AddKernelStatePass", add_kernel_state!)
# lower calls to the state getter intrinsic. this is a two-step process, so that the state
# argument can be added before optimization, and that optimization can introduce new uses
# before the intrinsic getting lowered late during optimization.
function lower_kernel_state!(fun::LLVM.Function)
job = current_job::CompilerJob
mod = LLVM.parent(fun)
changed = false
# check if we even need a kernel state argument
state = kernel_state_type(job)
if state === Nothing
return false
end
# fixup all uses of the state getter to use the newly introduced function state argument
if haskey(functions(mod), "julia.gpu.state_getter")
state_intr = functions(mod)["julia.gpu.state_getter"]
state_arg = nothing # only look-up when needed
@dispose builder=IRBuilder() begin
for use in uses(state_intr)
inst = user(use)
@assert inst isa LLVM.CallInst
bb = LLVM.parent(inst)
LLVM.parent(bb) == fun || continue
position!(builder, inst)
bb = LLVM.parent(inst)
f = LLVM.parent(bb)
if state_arg === nothing
# find the kernel state argument. this should be the first argument of
# the function, but only when this function needs the state!
state_arg = parameters(fun)[1]
T_state = convert(LLVMType, state)
@assert value_type(state_arg) == T_state
end
replace_uses!(inst, state_arg)
@assert isempty(uses(inst))
erase!(inst)
changed = true
end
end
end
return changed
end
LowerKernelStatePass() = NewPMFunctionPass("LowerKernelStatePass", lower_kernel_state!)
function cleanup_kernel_state!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
# remove the getter intrinsic
if haskey(functions(mod), "julia.gpu.state_getter")
intr = functions(mod)["julia.gpu.state_getter"]
if isempty(uses(intr))
# if we're not emitting a kernel, we can't resolve the intrinsic to an argument.
erase!(intr)
changed = true
end
end
return changed
end
CleanupKernelStatePass() = NewPMModulePass("CleanupKernelStatePass", cleanup_kernel_state!)
function kernel_state_intr(mod::LLVM.Module, T_state)
state_intr = if haskey(functions(mod), "julia.gpu.state_getter")
functions(mod)["julia.gpu.state_getter"]
else
LLVM.Function(mod, "julia.gpu.state_getter", LLVM.FunctionType(T_state))
end
push!(function_attributes(state_intr), EnumAttribute("readnone", 0))
return state_intr
end
# run-time equivalent
function kernel_state_value(state)
@dispose ctx=Context() begin
T_state = convert(LLVMType, state)
# create function
llvm_f, _ = create_function(T_state)
mod = LLVM.parent(llvm_f)
# get intrinsic
state_intr = kernel_state_intr(mod, T_state)
state_intr_ft = function_type(state_intr)
# generate IR
@dispose builder=IRBuilder() begin
entry = BasicBlock(llvm_f, "entry")
position!(builder, entry)
val = call!(builder, state_intr_ft, state_intr, Value[], "state")
ret!(builder, val)
end
call_function(llvm_f, state)
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 25763 | # Julia compiler integration
## world age lookups
# `tls_world_age` should be used to look up the current world age. in most cases, this is
# what you should use to invoke the compiler with.
if isdefined(Base, :tls_world_age)
import Base: tls_world_age
else
tls_world_age() = ccall(:jl_get_tls_world_age, UInt, ())
end
## looking up method instances
export methodinstance, generic_methodinstance
@inline function signature_type_by_tt(ft::Type, tt::Type)
u = Base.unwrap_unionall(tt)::DataType
return Base.rewrap_unionall(Tuple{ft, u.parameters...}, tt)
end
# create a MethodError from a function type
# TODO: fix upstream
function unsafe_function_from_type(ft::Type)
if isdefined(ft, :instance)
ft.instance
else
# HACK: dealing with a closure or something... let's do somthing really invalid,
# which works because MethodError doesn't actually use the function
Ref{ft}()[]
end
end
function MethodError(ft::Type{<:Function}, tt::Type, world::Integer=typemax(UInt))
Base.MethodError(unsafe_function_from_type(ft), tt, world)
end
MethodError(ft, tt, world=typemax(UInt)) = Base.MethodError(ft, tt, world)
# generate a LineInfoNode for the current source code location
macro LineInfoNode(method)
Core.LineInfoNode(__module__, method, __source__.file, Int32(__source__.line), Int32(0))
end
"""
methodinstance(ft::Type, tt::Type, [world::UInt])
Look up the method instance that corresponds to invoking the function with type `ft` with
argument typed `tt`. If the `world` argument is specified, the look-up is static and will
always return the same result. If the `world` argument is not specified, the look-up is
dynamic and the returned method instance will depende on the current world age. If no method
is found, a `MethodError` is thrown.
This function is highly optimized, and results do not need to be cached additionally.
Only use this function with concrete signatures, i.e., using the types of values you would
pass at run time. For non-concrete signatures, use `generic_methodinstance` instead.
"""
methodinstance
function generic_methodinstance(@nospecialize(ft::Type), @nospecialize(tt::Type),
world::Integer=tls_world_age())
sig = signature_type_by_tt(ft, tt)
match, _ = CC._findsup(sig, nothing, world)
match === nothing && throw(MethodError(ft, tt, world))
mi = CC.specialize_method(match)
return mi::MethodInstance
end
# on 1.11 (JuliaLang/julia#52572, merged as part of JuliaLang/julia#52233) we can use
# Julia's cached method lookup to simply look up method instances at run time.
if VERSION >= v"1.11.0-DEV.1552"
# XXX: version of Base.method_instance that uses a function type
@inline function methodinstance(@nospecialize(ft::Type), @nospecialize(tt::Type),
world::Integer=tls_world_age())
sig = signature_type_by_tt(ft, tt)
@assert Base.isdispatchtuple(sig) # JuliaLang/julia#52233
mi = ccall(:jl_method_lookup_by_tt, Any,
(Any, Csize_t, Any),
sig, world, #=method_table=# nothing)
mi === nothing && throw(MethodError(ft, tt, world))
mi = mi::MethodInstance
# `jl_method_lookup_by_tt` and `jl_method_lookup` can return a unspecialized mi
if !Base.isdispatchtuple(mi.specTypes)
mi = CC.specialize_method(mi.def, sig, mi.sparam_vals)::MethodInstance
end
return mi
end
# on older versions of Julia, we always need to use the generic lookup
else
const methodinstance = generic_methodinstance
function methodinstance_generator(world::UInt, source, self, ft::Type, tt::Type)
@nospecialize
@assert CC.isType(ft) && CC.isType(tt)
ft = ft.parameters[1]
tt = tt.parameters[1]
stub = Core.GeneratedFunctionStub(identity, Core.svec(:methodinstance, :ft, :tt), Core.svec())
# look up the method match
method_error = :(throw(MethodError(ft, tt, $world)))
sig = Tuple{ft, tt.parameters...}
min_world = Ref{UInt}(typemin(UInt))
max_world = Ref{UInt}(typemax(UInt))
match = ccall(:jl_gf_invoke_lookup_worlds, Any,
(Any, Any, Csize_t, Ref{Csize_t}, Ref{Csize_t}),
sig, #=mt=# nothing, world, min_world, max_world)
match === nothing && return stub(world, source, method_error)
# look up the method and code instance
mi = ccall(:jl_specializations_get_linfo, Ref{MethodInstance},
(Any, Any, Any), match.method, match.spec_types, match.sparams)
ci = CC.retrieve_code_info(mi, world)
# prepare a new code info
new_ci = copy(ci)
empty!(new_ci.code)
empty!(new_ci.codelocs)
empty!(new_ci.linetable)
empty!(new_ci.ssaflags)
new_ci.ssavaluetypes = 0
# propagate edge metadata
new_ci.min_world = min_world[]
new_ci.max_world = max_world[]
new_ci.edges = MethodInstance[mi]
# prepare the slots
new_ci.slotnames = Symbol[Symbol("#self#"), :ft, :tt]
new_ci.slotflags = UInt8[0x00 for i = 1:3]
# return the method instance
push!(new_ci.code, CC.ReturnNode(mi))
push!(new_ci.ssaflags, 0x00)
push!(new_ci.linetable, @LineInfoNode(methodinstance))
push!(new_ci.codelocs, 1)
new_ci.ssavaluetypes += 1
return new_ci
end
@eval function methodinstance(ft, tt)
$(Expr(:meta, :generated_only))
$(Expr(:meta, :generated, methodinstance_generator))
end
end
## code instance cache
const HAS_INTEGRATED_CACHE = VERSION >= v"1.11.0-DEV.1552"
if !HAS_INTEGRATED_CACHE
struct CodeCache
dict::IdDict{MethodInstance,Vector{CodeInstance}}
CodeCache() = new(IdDict{MethodInstance,Vector{CodeInstance}}())
end
function Base.show(io::IO, ::MIME"text/plain", cc::CodeCache)
print(io, "CodeCache with $(mapreduce(length, +, values(cc.dict); init=0)) entries")
if !isempty(cc.dict)
print(io, ": ")
for (mi, cis) in cc.dict
println(io)
print(io, " ")
show(io, mi)
function worldstr(min_world, max_world)
if min_world == typemax(UInt)
"empty world range"
elseif max_world == typemax(UInt)
"worlds $(Int(min_world))+"
else
"worlds $(Int(min_world)) to $(Int(max_world))"
end
end
for (i,ci) in enumerate(cis)
println(io)
print(io, " CodeInstance for ", worldstr(ci.min_world, ci.max_world))
end
end
end
end
Base.empty!(cc::CodeCache) = empty!(cc.dict)
const GLOBAL_CI_CACHES = Dict{CompilerConfig, CodeCache}()
const GLOBAL_CI_CACHES_LOCK = ReentrantLock()
## method invalidations
function CC.setindex!(cache::CodeCache, ci::CodeInstance, mi::MethodInstance)
# make sure the invalidation callback is attached to the method instance
add_codecache_callback!(cache, mi)
cis = get!(cache.dict, mi, CodeInstance[])
push!(cis, ci)
end
# invalidation (like invalidate_method_instance, but for our cache)
struct CodeCacheCallback
cache::CodeCache
end
@static if VERSION ≥ v"1.11.0-DEV.798"
function add_codecache_callback!(cache::CodeCache, mi::MethodInstance)
callback = CodeCacheCallback(cache)
CC.add_invalidation_callback!(callback, mi)
end
function (callback::CodeCacheCallback)(replaced::MethodInstance, max_world::UInt32)
cis = get(callback.cache.dict, replaced, nothing)
if cis === nothing
return
end
for ci in cis
if ci.max_world == ~0 % Csize_t
@assert ci.min_world - 1 <= max_world "attempting to set illogical constraints"
@static if VERSION >= v"1.11.0-DEV.1390"
@atomic ci.max_world = max_world
else
ci.max_world = max_world
end
end
@assert ci.max_world <= max_world
end
end
else
function add_codecache_callback!(cache::CodeCache, mi::MethodInstance)
callback = CodeCacheCallback(cache)
if !isdefined(mi, :callbacks)
mi.callbacks = Any[callback]
elseif !in(callback, mi.callbacks)
push!(mi.callbacks, callback)
end
end
function (callback::CodeCacheCallback)(replaced::MethodInstance, max_world::UInt32,
seen::Set{MethodInstance}=Set{MethodInstance}())
push!(seen, replaced)
cis = get(callback.cache.dict, replaced, nothing)
if cis === nothing
return
end
for ci in cis
if ci.max_world == ~0 % Csize_t
@assert ci.min_world - 1 <= max_world "attempting to set illogical constraints"
ci.max_world = max_world
end
@assert ci.max_world <= max_world
end
# recurse to all backedges to update their valid range also
if isdefined(replaced, :backedges)
backedges = filter(replaced.backedges) do @nospecialize(mi)
if mi isa MethodInstance
mi ∉ seen
elseif mi isa Type
# an `invoke` call, which is a `(sig, MethodInstance)` pair.
# let's ignore the `sig` and process the `MethodInstance` next.
false
else
error("invalid backedge")
end
end
# Don't touch/empty backedges `invalidate_method_instance` in C will do that later
# replaced.backedges = Any[]
for mi in backedges
callback(mi::MethodInstance, max_world, seen)
end
end
end
end
end # !HAS_INTEGRATED_CACHE
## method overrides
Base.Experimental.@MethodTable(GLOBAL_METHOD_TABLE)
## interpreter
@static if VERSION >= v"1.11.0-DEV.1498"
import Core.Compiler: get_inference_world
using Base: get_world_counter
else
import Core.Compiler: get_world_counter, get_world_counter as get_inference_world
end
using Core.Compiler: OverlayMethodTable
const MTType = Core.MethodTable
if isdefined(Core.Compiler, :CachedMethodTable)
using Core.Compiler: CachedMethodTable
const GPUMethodTableView = CachedMethodTable{OverlayMethodTable}
get_method_table_view(world::UInt, mt::MTType) =
CachedMethodTable(OverlayMethodTable(world, mt))
else
const GPUMethodTableView = OverlayMethodTable
get_method_table_view(world::UInt, mt::MTType) = OverlayMethodTable(world, mt)
end
struct GPUInterpreter <: CC.AbstractInterpreter
world::UInt
method_table::GPUMethodTableView
@static if HAS_INTEGRATED_CACHE
token::Any
else
code_cache::CodeCache
end
inf_cache::Vector{CC.InferenceResult}
inf_params::CC.InferenceParams
opt_params::CC.OptimizationParams
end
@static if HAS_INTEGRATED_CACHE
function GPUInterpreter(world::UInt=Base.get_world_counter();
method_table::MTType,
token::Any,
inf_params::CC.InferenceParams,
opt_params::CC.OptimizationParams)
@assert world <= Base.get_world_counter()
method_table = get_method_table_view(world, method_table)
inf_cache = Vector{CC.InferenceResult}()
return GPUInterpreter(world, method_table,
token, inf_cache,
inf_params, opt_params)
end
function GPUInterpreter(interp::GPUInterpreter;
world::UInt=interp.world,
method_table::GPUMethodTableView=interp.method_table,
token::Any=interp.token,
inf_cache::Vector{CC.InferenceResult}=interp.inf_cache,
inf_params::CC.InferenceParams=interp.inf_params,
opt_params::CC.OptimizationParams=interp.opt_params)
return GPUInterpreter(world, method_table,
token, inf_cache,
inf_params, opt_params)
end
else
function GPUInterpreter(world::UInt=Base.get_world_counter();
method_table::MTType,
code_cache::CodeCache,
inf_params::CC.InferenceParams,
opt_params::CC.OptimizationParams)
@assert world <= Base.get_world_counter()
method_table = get_method_table_view(world, method_table)
inf_cache = Vector{CC.InferenceResult}()
return GPUInterpreter(world, method_table,
code_cache, inf_cache,
inf_params, opt_params)
end
function GPUInterpreter(interp::GPUInterpreter;
world::UInt=interp.world,
method_table::GPUMethodTableView=interp.method_table,
code_cache::CodeCache=interp.code_cache,
inf_cache::Vector{CC.InferenceResult}=interp.inf_cache,
inf_params::CC.InferenceParams=interp.inf_params,
opt_params::CC.OptimizationParams=interp.opt_params)
return GPUInterpreter(world, method_table,
code_cache, inf_cache,
inf_params, opt_params)
end
end # HAS_INTEGRATED_CACHE
CC.InferenceParams(interp::GPUInterpreter) = interp.inf_params
CC.OptimizationParams(interp::GPUInterpreter) = interp.opt_params
#=CC.=#get_inference_world(interp::GPUInterpreter) = interp.world
CC.get_inference_cache(interp::GPUInterpreter) = interp.inf_cache
if HAS_INTEGRATED_CACHE
CC.cache_owner(interp::GPUInterpreter) = interp.token
else
CC.code_cache(interp::GPUInterpreter) = WorldView(interp.code_cache, interp.world)
end
# No need to do any locking since we're not putting our results into the runtime cache
CC.lock_mi_inference(interp::GPUInterpreter, mi::MethodInstance) = nothing
CC.unlock_mi_inference(interp::GPUInterpreter, mi::MethodInstance) = nothing
function CC.add_remark!(interp::GPUInterpreter, sv::CC.InferenceState, msg)
@safe_debug "Inference remark during GPU compilation of $(sv.linfo): $msg"
end
CC.may_optimize(interp::GPUInterpreter) = true
CC.may_compress(interp::GPUInterpreter) = true
CC.may_discard_trees(interp::GPUInterpreter) = true
CC.verbose_stmt_info(interp::GPUInterpreter) = false
CC.method_table(interp::GPUInterpreter) = interp.method_table
# semi-concrete interepretation is broken with overlays (JuliaLang/julia#47349)
function CC.concrete_eval_eligible(interp::GPUInterpreter,
@nospecialize(f), result::CC.MethodCallResult, arginfo::CC.ArgInfo, sv::CC.InferenceState)
# NOTE it's fine to skip overloading with `sv::IRInterpretationState` since we disables
# semi-concrete interpretation anyway.
ret = @invoke CC.concrete_eval_eligible(interp::CC.AbstractInterpreter,
f::Any, result::CC.MethodCallResult, arginfo::CC.ArgInfo, sv::CC.InferenceState)
if ret === :semi_concrete_eval
return :none
end
return ret
end
function CC.concrete_eval_eligible(interp::GPUInterpreter,
@nospecialize(f), result::CC.MethodCallResult, arginfo::CC.ArgInfo)
ret = @invoke CC.concrete_eval_eligible(interp::CC.AbstractInterpreter,
f::Any, result::CC.MethodCallResult, arginfo::CC.ArgInfo)
ret === false && return nothing
return ret
end
## world view of the cache
using Core.Compiler: WorldView
if !HAS_INTEGRATED_CACHE
function CC.haskey(wvc::WorldView{CodeCache}, mi::MethodInstance)
CC.get(wvc, mi, nothing) !== nothing
end
function CC.get(wvc::WorldView{CodeCache}, mi::MethodInstance, default)
# check the cache
for ci in get!(wvc.cache.dict, mi, CodeInstance[])
if ci.min_world <= wvc.worlds.min_world && wvc.worlds.max_world <= ci.max_world
# TODO: if (code && (code == jl_nothing || jl_ir_flag_inferred((jl_array_t*)code)))
src = if ci.inferred isa Vector{UInt8}
ccall(:jl_uncompress_ir, Any, (Any, Ptr{Cvoid}, Any),
mi.def, C_NULL, ci.inferred)
else
ci.inferred
end
return ci
end
end
return default
end
function CC.getindex(wvc::WorldView{CodeCache}, mi::MethodInstance)
r = CC.get(wvc, mi, nothing)
r === nothing && throw(KeyError(mi))
return r::CodeInstance
end
function CC.setindex!(wvc::WorldView{CodeCache}, ci::CodeInstance, mi::MethodInstance)
CC.setindex!(wvc.cache, ci, mi)
end
end # HAS_INTEGRATED_CACHE
## codegen/inference integration
function ci_cache_populate(interp, cache, mi, min_world, max_world)
if VERSION >= v"1.12.0-DEV.15"
inferred_ci = CC.typeinf_ext_toplevel(interp, mi, CC.SOURCE_MODE_FORCE_SOURCE) # or SOURCE_MODE_FORCE_SOURCE_UNCACHED?
@assert inferred_ci !== nothing "Inference of $mi failed"
# inference should have populated our cache
wvc = WorldView(cache, min_world, max_world)
@assert CC.haskey(wvc, mi)
ci = CC.getindex(wvc, mi)
# if ci is rettype_const, the inference result won't have been cached
# (because it is normally not supposed to be used ever again).
# to avoid the need to re-infer, set that field here.
if ci.inferred === nothing
CC.setindex!(wvc, inferred_ci, mi)
ci = CC.getindex(wvc, mi)
end
else
src = CC.typeinf_ext_toplevel(interp, mi)
# inference should have populated our cache
wvc = WorldView(cache, min_world, max_world)
@assert CC.haskey(wvc, mi)
ci = CC.getindex(wvc, mi)
# if ci is rettype_const, the inference result won't have been cached
# (because it is normally not supposed to be used ever again).
# to avoid the need to re-infer, set that field here.
if ci.inferred === nothing
@atomic ci.inferred = src
end
end
return ci::CodeInstance
end
function ci_cache_lookup(cache, mi, min_world, max_world)
wvc = WorldView(cache, min_world, max_world)
ci = CC.get(wvc, mi, nothing)
if ci !== nothing && ci.inferred === nothing
# if for some reason we did end up with a codeinfo without inferred source, e.g.,
# because of calling `Base.return_types` which only sets rettyp, pretend we didn't
# run inference so that we re-infer now and not during codegen (which is disallowed)
return nothing
end
return ci
end
## interface
# for platforms without @cfunction-with-closure support
const _method_instances = Ref{Any}()
const _cache = Ref{Any}()
function _lookup_fun(mi, min_world, max_world)
push!(_method_instances[], mi)
ci_cache_lookup(_cache[], mi, min_world, max_world)
end
@enum CompilationPolicy::Cint begin
CompilationPolicyDefault = 0
CompilationPolicyExtern = 1
end
# HACK: in older versions of Julia, `jl_create_native` doesn't take a world argument
# but instead always generates code for the current world. note that this doesn't
# actually change the world age, but just spoofs the counter `jl_create_native` reads.
# XXX: Base.get_world_counter is supposed to be monotonically increasing and is runtime global.
macro in_world(world, ex)
quote
actual_world = Base.get_world_counter()
world_counter = cglobal(:jl_world_counter, Csize_t)
unsafe_store!(world_counter, $(esc(world)))
try
$(esc(ex))
finally
unsafe_store!(world_counter, actual_world)
end
end
end
"""
precompile(job::CompilerJob)
Compile the GPUCompiler job. In particular this will run inference using the foreign
abstract interpreter.
"""
function Base.precompile(@nospecialize(job::CompilerJob))
if job.source.def.primary_world > job.world || job.world > job.source.def.deleted_world
error("Cannot compile $(job.source) for world $(job.world); method is only valid in worlds $(job.source.def.primary_world) to $(job.source.def.deleted_world)")
end
# populate the cache
interp = get_interpreter(job)
cache = CC.code_cache(interp)
if ci_cache_lookup(cache, job.source, job.world, job.world) === nothing
ci_cache_populate(interp, cache, job.source, job.world, job.world)
return ci_cache_lookup(cache, job.source, job.world, job.world) !== nothing
end
return true
end
function compile_method_instance(@nospecialize(job::CompilerJob))
if job.source.def.primary_world > job.world || job.world > job.source.def.deleted_world
error("Cannot compile $(job.source) for world $(job.world); method is only valid in worlds $(job.source.def.primary_world) to $(job.source.def.deleted_world)")
end
# populate the cache
interp = get_interpreter(job)
cache = CC.code_cache(interp)
if ci_cache_lookup(cache, job.source, job.world, job.world) === nothing
ci_cache_populate(interp, cache, job.source, job.world, job.world)
@assert ci_cache_lookup(cache, job.source, job.world, job.world) !== nothing
end
# create a callback to look-up function in our cache,
# and keep track of the method instances we needed.
method_instances = []
if Sys.ARCH == :x86 || Sys.ARCH == :x86_64
function lookup_fun(mi, min_world, max_world)
push!(method_instances, mi)
ci_cache_lookup(cache, mi, min_world, max_world)
end
lookup_cb = @cfunction($lookup_fun, Any, (Any, UInt, UInt))
else
_cache[] = cache
_method_instances[] = method_instances
lookup_cb = @cfunction(_lookup_fun, Any, (Any, UInt, UInt))
end
# set-up the compiler interface
debug_info_kind = llvm_debug_info(job)
cgparams = (;
track_allocations = false,
code_coverage = false,
prefer_specsig = true,
gnu_pubnames = false,
debug_info_kind = Cint(debug_info_kind),
lookup = Base.unsafe_convert(Ptr{Nothing}, lookup_cb),
safepoint_on_entry = can_safepoint(job),
gcstack_arg = false)
params = Base.CodegenParams(; cgparams...)
# generate IR
GC.@preserve lookup_cb begin
# create and configure the module
ts_mod = ThreadSafeModule("start")
ts_mod() do mod
triple!(mod, llvm_triple(job.config.target))
if julia_datalayout(job.config.target) !== nothing
datalayout!(mod, julia_datalayout(job.config.target))
end
flags(mod)["Dwarf Version", LLVM.API.LLVMModuleFlagBehaviorWarning] =
Metadata(ConstantInt(dwarf_version(job.config.target)))
flags(mod)["Debug Info Version", LLVM.API.LLVMModuleFlagBehaviorWarning] =
Metadata(ConstantInt(DEBUG_METADATA_VERSION()))
end
native_code = ccall(:jl_create_native, Ptr{Cvoid},
(Vector{MethodInstance}, LLVM.API.LLVMOrcThreadSafeModuleRef, Ptr{Base.CodegenParams}, Cint, Cint, Cint, Csize_t),
[job.source], ts_mod, Ref(params), CompilationPolicyExtern, #=imaging mode=# 0, #=external linkage=# 0, job.world)
@assert native_code != C_NULL
llvm_mod_ref =
ccall(:jl_get_llvm_module, LLVM.API.LLVMOrcThreadSafeModuleRef,
(Ptr{Cvoid},), native_code)
@assert llvm_mod_ref != C_NULL
# XXX: this is wrong; we can't expose the underlying LLVM module, but should
# instead always go through the callback in order to unlock it properly.
# rework this once we depend on Julia 1.9 or later.
llvm_ts_mod = LLVM.ThreadSafeModule(llvm_mod_ref)
llvm_mod = nothing
llvm_ts_mod() do mod
llvm_mod = mod
end
end
if !(Sys.ARCH == :x86 || Sys.ARCH == :x86_64)
cache_gbl = nothing
end
# process all compiled method instances
compiled = Dict()
for mi in method_instances
ci = ci_cache_lookup(cache, mi, job.world, job.world)
ci === nothing && continue
# get the function index
llvm_func_idx = Ref{Int32}(-1)
llvm_specfunc_idx = Ref{Int32}(-1)
ccall(:jl_get_function_id, Nothing,
(Ptr{Cvoid}, Any, Ptr{Int32}, Ptr{Int32}),
native_code, ci, llvm_func_idx, llvm_specfunc_idx)
@assert llvm_func_idx[] != -1 || llvm_specfunc_idx[] != -1 "Static compilation failed"
# get the function
llvm_func = if llvm_func_idx[] >= 1
llvm_func_ref = ccall(:jl_get_llvm_function, LLVM.API.LLVMValueRef,
(Ptr{Cvoid}, UInt32), native_code, llvm_func_idx[]-1)
@assert llvm_func_ref != C_NULL
LLVM.name(LLVM.Function(llvm_func_ref))
else
nothing
end
llvm_specfunc = if llvm_specfunc_idx[] >= 1
llvm_specfunc_ref = ccall(:jl_get_llvm_function, LLVM.API.LLVMValueRef,
(Ptr{Cvoid}, UInt32), native_code, llvm_specfunc_idx[]-1)
@assert llvm_specfunc_ref != C_NULL
LLVM.name(LLVM.Function(llvm_specfunc_ref))
else
nothing
end
# NOTE: it's not safe to store raw LLVM functions here, since those may get
# removed or renamed during optimization, so we store their name instead.
compiled[mi] = (; ci, func=llvm_func, specfunc=llvm_specfunc)
end
# ensure that the requested method instance was compiled
@assert haskey(compiled, job.source)
return llvm_mod, compiled
end
# partially revert JuliaLangjulia#49391
@static if v"1.11.0-DEV.1603" <= VERSION < v"1.12.0-DEV.347" && # reverted on master
!(v"1.11-beta2" <= VERSION < v"1.12") # reverted on 1.11-beta2
function CC.typeinf(interp::GPUInterpreter, frame::CC.InferenceState)
if CC.__measure_typeinf__[]
CC.Timings.enter_new_timer(frame)
v = CC._typeinf(interp, frame)
CC.Timings.exit_current_timer(frame)
return v
else
return CC._typeinf(interp, frame)
end
end
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 3787 | # name mangling
# safe name generation
# LLVM doesn't like names with special characters, so we need to sanitize them.
# note that we are stricter than LLVM, because of `ptxas`.
safe_name(fn::String) = replace(fn, r"[^A-Za-z0-9]"=>"_")
safe_name(t::DataType) = safe_name(String(nameof(t)))
function safe_name(t::Type{<:Function})
# like Base.nameof, but for function types
mt = t.name.mt
fn = if mt === Symbol.name.mt
# uses shared method table, so name is not unique to this function type
nameof(t)
else
mt.name
end
safe_name(string(fn))
end
safe_name(::Type{Union{}}) = "Bottom"
safe_name(x) = safe_name(repr(x))
# C++ mangling
# we generate function names that look like C++ functions, because many tools, like NVIDIA's
# profilers, support them (grouping different instantiations of the same kernel together).
function mangle_param(t, substitutions=String[])
t == Nothing && return "v"
if isa(t, DataType) && t <: Ptr
tn = mangle_param(eltype(t), substitutions)
"P$tn"
elseif isa(t, DataType)
tn = safe_name(t)
# handle substitutions
sub = findfirst(isequal(tn), substitutions)
if sub === nothing
str = "$(length(tn))$tn"
push!(substitutions, tn)
elseif sub == 1
str = "S_"
else
str = "S$(sub-2)_"
end
# encode typevars as template parameters
if !isempty(t.parameters)
str *= "I"
for t in t.parameters
str *= mangle_param(t, substitutions)
end
str *= "E"
end
str
elseif isa(t, Union)
tn = "Union"
# handle substitutions
sub = findfirst(isequal(tn), substitutions)
if sub === nothing
str = "$(length(tn))$tn"
push!(substitutions, tn)
elseif sub == 1
str = "S_"
else
str = "S$(sub-2)_"
end
# encode union types as template parameters
if !isempty(Base.uniontypes(t))
str *= "I"
for t in Base.uniontypes(t)
str *= mangle_param(t, substitutions)
end
str *= "E"
end
str
elseif isa(t, Union{Bool, Cchar, Cuchar, Cshort, Cushort, Cint, Cuint, Clong, Culong, Clonglong, Culonglong, Int128, UInt128})
ts = t isa Bool ? 'b' : # bool
t isa Cchar ? 'a' : # signed char
t isa Cuchar ? 'h' : # unsigned char
t isa Cshort ? 's' : # short
t isa Cushort ? 't' : # unsigned short
t isa Cint ? 'i' : # int
t isa Cuint ? 'j' : # unsigned int
t isa Clong ? 'l' : # long
t isa Culong ? 'm' : # unsigned long
t isa Clonglong ? 'x' : # long long, __int64
t isa Culonglong ? 'y' : # unsigned long long, __int64
t isa Int128 ? 'n' : # __int128
t isa UInt128 ? 'o' : # unsigned __int128
error("Invalid type")
tn = string(abs(t), base=10)
if t < 0
tn = 'n'*tn
end
"L$(ts)$(tn)E"
else
tn = safe_name(t) # TODO: actually does support digits...
if startswith(tn, r"\d")
# C++ classes cannot start with a digit, so mangling doesn't support it
tn = "_$(tn)"
end
"$(length(tn))$tn"
end
end
function mangle_sig(sig)
ft, tt... = sig.parameters
# mangle the function name
fn = safe_name(ft)
str = "_Z$(length(fn))$fn"
# mangle each parameter
substitutions = String[]
for t in tt
str *= mangle_param(t, substitutions)
end
return str
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 2595 | # machine code generation
# final preparations for the module to be compiled to machine code
# these passes should not be run when e.g. compiling to write to disk.
function prepare_execution!(@nospecialize(job::CompilerJob), mod::LLVM.Module)
global current_job
current_job = job
@dispose pb=NewPMPassBuilder() begin
register!(pb, ResolveCPUReferencesPass())
add!(pb, RecomputeGlobalsAAPass())
add!(pb, GlobalOptPass())
add!(pb, ResolveCPUReferencesPass())
add!(pb, GlobalDCEPass())
add!(pb, StripDeadPrototypesPass())
run!(pb, mod, llvm_machine(job.config.target))
end
return
end
# some Julia code contains references to objects in the CPU run-time,
# without actually using the contents or functionality of those objects.
#
# prime example are type tags, which reference the address of the allocated type.
# since those references are ephemeral, we can't eagerly resolve and emit them in the IR,
# but at the same time the GPU can't resolve them at run-time.
#
# this pass performs that resolution at link time.
function resolve_cpu_references!(mod::LLVM.Module)
job = current_job::CompilerJob
changed = false
for f in functions(mod)
fn = LLVM.name(f)
if isdeclaration(f) && !LLVM.isintrinsic(f) && startswith(fn, "jl_")
# eagerly resolve the address of the binding
address = ccall(:jl_cglobal, Any, (Any, Any), fn, UInt)
dereferenced = unsafe_load(address)
dereferenced = LLVM.ConstantInt(dereferenced)
function replace_bindings!(value)
changed = false
for use in uses(value)
val = user(use)
if isa(val, LLVM.ConstantExpr)
# recurse
changed |= replace_bindings!(val)
elseif isa(val, LLVM.LoadInst)
# resolve
replace_uses!(val, dereferenced)
erase!(val)
# FIXME: iterator invalidation?
changed = true
end
end
changed
end
changed |= replace_bindings!(f)
end
end
return changed
end
ResolveCPUReferencesPass() =
NewPMModulePass("ResolveCPUReferences", resolve_cpu_references!)
function mcgen(@nospecialize(job::CompilerJob), mod::LLVM.Module, format=LLVM.API.LLVMAssemblyFile)
tm = llvm_machine(job.config.target)
return String(emit(tm, mod, format))
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 45362 | # implementation of the GPUCompiler interfaces for generating Metal code
## target
export MetalCompilerTarget
Base.@kwdef struct MetalCompilerTarget <: AbstractCompilerTarget
# version numbers
macos::VersionNumber
air::VersionNumber
metal::VersionNumber
end
# for backwards compatibility
MetalCompilerTarget(macos::VersionNumber) =
MetalCompilerTarget(; macos, air=v"2.4", metal=v"2.4")
function Base.hash(target::MetalCompilerTarget, h::UInt)
h = hash(target.macos, h)
end
source_code(target::MetalCompilerTarget) = "text"
# Metal is not supported by our LLVM builds, so we can't get a target machine
llvm_machine(::MetalCompilerTarget) = nothing
llvm_triple(target::MetalCompilerTarget) = "air64-apple-macosx$(target.macos)"
llvm_datalayout(target::MetalCompilerTarget) =
"e-p:64:64:64"*
"-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"*
"-f32:32:32-f64:64:64"*
"-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024"*
"-n8:16:32"
needs_byval(job::CompilerJob{MetalCompilerTarget}) = false
## job
# TODO: encode debug build or not in the compiler job
# https://github.com/JuliaGPU/CUDAnative.jl/issues/368
runtime_slug(job::CompilerJob{MetalCompilerTarget}) = "metal-macos$(job.config.target.macos)"
isintrinsic(@nospecialize(job::CompilerJob{MetalCompilerTarget}), fn::String) =
return startswith(fn, "air.")
function finish_module!(@nospecialize(job::CompilerJob{MetalCompilerTarget}), mod::LLVM.Module, entry::LLVM.Function)
entry_fn = LLVM.name(entry)
# update calling conventions
if job.config.kernel
entry = pass_by_reference!(job, mod, entry)
add_input_arguments!(job, mod, entry)
entry = LLVM.functions(mod)[entry_fn]
end
# emit the AIR and Metal version numbers as constants in the module. this makes it
# possible to 'query' these in device code, relying on LLVM to optimize the checks away
# and generate static code. note that we only do so if there's actual uses of these
# variables; unconditionally creating a gvar would result in duplicate declarations.
for (name, value) in ["air_major" => job.config.target.air.major,
"air_minor" => job.config.target.air.minor,
"metal_major" => job.config.target.metal.major,
"metal_minor" => job.config.target.metal.minor]
if haskey(globals(mod), name)
gv = globals(mod)[name]
initializer!(gv, ConstantInt(LLVM.Int32Type(), value))
# change the linkage so that we can inline the value
linkage!(gv, LLVM.API.LLVMPrivateLinkage)
end
end
# add metadata to AIR intrinsics LLVM doesn't know about
annotate_air_intrinsics!(job, mod)
# we emit properties (of the air and metal version) as private global constants,
# so run the optimizer so that they are inlined before the rest of the optimizer runs.
@dispose pb=NewPMPassBuilder() begin
add!(pb, RecomputeGlobalsAAPass())
add!(pb, GlobalOptPass())
run!(pb, mod)
end
return functions(mod)[entry_fn]
end
function validate_ir(job::CompilerJob{MetalCompilerTarget}, mod::LLVM.Module)
errors = IRError[]
# Metal does not support double precision, except for logging
function is_illegal_double(val)
T_bad = LLVM.DoubleType()
if value_type(val) != T_bad
return false
end
function used_for_logging(use::LLVM.Use)
usr = user(use)
if usr isa LLVM.CallInst
callee = called_operand(usr)
if callee isa LLVM.Function && startswith(name(callee), "metal_os_log")
return true
end
end
return false
end
if all(used_for_logging, uses(val))
return false
end
return true
end
append!(errors, check_ir_values(mod, is_illegal_double, "use of double value"))
# Metal never supports 128-bit integers
append!(errors, check_ir_values(mod, LLVM.IntType(128)))
errors
end
# hide `noreturn` function attributes, which cause issues with the back-end compiler,
# probably because of thread-divergent control flow as we've encountered with CUDA.
# note that it isn't enough to remove the function attribute, because the Metal LLVM
# compiler re-optimizes and will rediscover the property. to avoid this, we inline
# all functions that are marked noreturn, i.e., until LLVM cannot rediscover it.
function hide_noreturn!(mod::LLVM.Module)
noreturn_attr = EnumAttribute("noreturn", 0)
noinline_attr = EnumAttribute("noinline", 0)
alwaysinline_attr = EnumAttribute("alwaysinline", 0)
any_noreturn = false
for f in functions(mod)
attrs = function_attributes(f)
if noreturn_attr in collect(attrs)
delete!(attrs, noreturn_attr)
delete!(attrs, noinline_attr)
push!(attrs, alwaysinline_attr)
any_noreturn = true
end
end
any_noreturn || return false
@dispose pb=NewPMPassBuilder() begin
add!(pb, AlwaysInlinerPass())
add!(pb, NewPMFunctionPassManager()) do fpm
add!(fpm, SimplifyCFGPass())
add!(fpm, InstCombinePass())
end
run!(pb, mod)
end
return true
end
function finish_ir!(@nospecialize(job::CompilerJob{MetalCompilerTarget}), mod::LLVM.Module,
entry::LLVM.Function)
entry_fn = LLVM.name(entry)
# add kernel metadata
if job.config.kernel
entry = add_address_spaces!(job, mod, entry)
add_argument_metadata!(job, mod, entry)
add_module_metadata!(job, mod)
end
# JuliaLang/Metal.jl#113
hide_noreturn!(mod)
# get rid of unreachable control flow (JuliaLang/Metal.jl#370).
# note that this currently works in tandem with the `hide_noreturn!` pass above,
# as `replace_unreachable!` doesn't handle functions that _only_ contain `unreachable`.
if job.config.target.macos < v"15"
for f in functions(mod)
replace_unreachable!(job, f)
end
end
# lower LLVM intrinsics that AIR doesn't support
changed = false
for f in functions(mod)
changed |= lower_llvm_intrinsics!(job, f)
end
if changed
# lowering may have introduced additional functions marked `alwaysinline`
@dispose pb=NewPMPassBuilder() begin
add!(pb, AlwaysInlinerPass())
add!(pb, NewPMFunctionPassManager()) do fpm
add!(fpm, SimplifyCFGPass())
add!(fpm, InstCombinePass())
end
run!(pb, mod)
end
end
# perform codegen passes that would normally run during machine code emission
# XXX: codegen passes don't seem available in the new pass manager yet
@dispose pm=ModulePassManager() begin
expand_reductions!(pm)
run!(pm, mod)
end
return functions(mod)[entry_fn]
end
@unlocked function mcgen(job::CompilerJob{MetalCompilerTarget}, mod::LLVM.Module,
format=LLVM.API.LLVMObjectFile)
# our LLVM version does not support emitting Metal libraries
return nothing
end
# generic pointer removal
#
# every pointer argument (i.e. byref objs) to a kernel needs an address space attached.
# this pass rewrites pointers to reference arguments to be located in address space 1.
#
# NOTE: this pass only rewrites byref objs, not plain pointers being passed; the user is
# responsible for making sure these pointers have an address space attached (using LLVMPtr).
#
# NOTE: this pass also only rewrites pointers _without_ address spaces, which requires it to
# be executed after optimization (where Julia's address spaces are stripped). If we ever
# want to execute it earlier, adapt remapType to rewrite all pointer types.
function add_address_spaces!(@nospecialize(job::CompilerJob), mod::LLVM.Module, f::LLVM.Function)
ft = function_type(f)
# find the byref parameters
byref = BitVector(undef, length(parameters(ft)))
args = classify_arguments(job, ft)
filter!(args) do arg
arg.cc != GHOST
end
for arg in args
byref[arg.idx] = (arg.cc == BITS_REF)
end
function remapType(src)
# TODO: shouldn't we recurse into structs here, making sure the parent object's
# address space matches the contained one? doesn't matter right now as we
# only use LLVMPtr (i.e. no rewriting of contained pointers needed) in the
# device addrss space (i.e. no mismatch between parent and field possible)
dst = if src isa LLVM.PointerType && addrspace(src) == 0
if supports_typed_pointers(context())
LLVM.PointerType(remapType(eltype(src)), #=device=# 1)
else
LLVM.PointerType(#=device=# 1)
end
else
src
end
return dst
end
# generate the new function type & definition
new_types = LLVMType[]
for (i, param) in enumerate(parameters(ft))
if byref[i]
push!(new_types, remapType(param::LLVM.PointerType))
else
push!(new_types, param)
end
end
new_ft = LLVM.FunctionType(return_type(ft), new_types)
new_f = LLVM.Function(mod, "", new_ft)
linkage!(new_f, linkage(f))
for (arg, new_arg) in zip(parameters(f), parameters(new_f))
LLVM.name!(new_arg, LLVM.name(arg))
end
# we cannot simply remap the function arguments, because that will not propagate the
# address space changes across, e.g, bitcasts (the dest would still be in AS 0).
# using a type remapper on the other hand changes too much, including unrelated insts.
# so instead, we load the arguments in stack slots and dereference them so that we can
# keep on using the original IR that assumed pointers without address spaces
new_args = LLVM.Value[]
@dispose builder=IRBuilder() begin
entry = BasicBlock(new_f, "conversion")
position!(builder, entry)
# perform argument conversions
for (i, param) in enumerate(parameters(ft))
if byref[i]
# load the argument in a stack slot
llvm_typ = convert(LLVMType, args[i].typ)
val = load!(builder, llvm_typ, parameters(new_f)[i])
ptr = alloca!(builder, llvm_typ)
store!(builder, val, ptr)
push!(new_args, ptr)
else
push!(new_args, parameters(new_f)[i])
end
for attr in collect(parameter_attributes(f, i))
push!(parameter_attributes(new_f, i), attr)
end
end
# map the arguments
value_map = Dict{LLVM.Value, LLVM.Value}(
param => new_args[i] for (i,param) in enumerate(parameters(f))
)
value_map[f] = new_f
clone_into!(new_f, f; value_map,
changes=LLVM.API.LLVMCloneFunctionChangeTypeGlobalChanges)
# fall through
br!(builder, blocks(new_f)[2])
end
# remove the old function
fn = LLVM.name(f)
@assert isempty(uses(f))
replace_metadata_uses!(f, new_f)
erase!(f)
LLVM.name!(new_f, fn)
# clean-up after this pass (which runs after optimization)
@dispose pb=NewPMPassBuilder() begin
add!(pb, SimplifyCFGPass())
add!(pb, SROAPass())
add!(pb, EarlyCSEPass())
add!(pb, InstCombinePass())
run!(pb, mod)
end
return new_f
end
# value-to-reference conversion
#
# Metal doesn't support passing valuse, so we need to convert those to references instead
function pass_by_reference!(@nospecialize(job::CompilerJob), mod::LLVM.Module, f::LLVM.Function)
ft = function_type(f)
# generate the new function type & definition
args = classify_arguments(job, ft)
new_types = LLVM.LLVMType[]
bits_as_reference = BitVector(undef, length(parameters(ft)))
for arg in args
if arg.cc == BITS_VALUE && !(arg.typ <: Ptr || arg.typ <: Core.LLVMPtr)
# pass the value as a reference instead
push!(new_types, LLVM.PointerType(parameters(ft)[arg.idx], #=Constant=# 1))
bits_as_reference[arg.idx] = true
elseif arg.cc != GHOST
push!(new_types, parameters(ft)[arg.idx])
bits_as_reference[arg.idx] = false
end
end
new_ft = LLVM.FunctionType(return_type(ft), new_types)
new_f = LLVM.Function(mod, "", new_ft)
linkage!(new_f, linkage(f))
for (i, (arg, new_arg)) in enumerate(zip(parameters(f), parameters(new_f)))
LLVM.name!(new_arg, LLVM.name(arg))
end
# emit IR performing the "conversions"
new_args = LLVM.Value[]
@dispose builder=IRBuilder() begin
entry = BasicBlock(new_f, "entry")
position!(builder, entry)
# perform argument conversions
for arg in args
if arg.cc != GHOST
if bits_as_reference[arg.idx]
# load the reference to get a value back
val = load!(builder, parameters(ft)[arg.idx], parameters(new_f)[arg.idx])
push!(new_args, val)
else
push!(new_args, parameters(new_f)[arg.idx])
end
end
end
# map the arguments
value_map = Dict{LLVM.Value, LLVM.Value}(
param => new_args[i] for (i,param) in enumerate(parameters(f))
)
value_map[f] = new_f
clone_into!(new_f, f; value_map,
changes=LLVM.API.LLVMCloneFunctionChangeTypeLocalChangesOnly)
# fall through
br!(builder, blocks(new_f)[2])
end
# set the attributes (needs to happen _after_ cloning)
# TODO: verify that clone copies other attributes,
# and that other uses of clone don't set parameters before cloning
for i in 1:length(parameters(new_f))
if bits_as_reference[i]
# add appropriate attributes
# TODO: other attributes (nonnull, readonly, align, dereferenceable)?
## we've just emitted a load, so the pointer itself cannot be captured
push!(parameter_attributes(new_f, i), EnumAttribute("nocapture", 0))
## Metal.jl emits separate buffers for each scalar argument
push!(parameter_attributes(new_f, i), EnumAttribute("noalias", 0))
end
end
# remove the old function
# NOTE: if we ever have legitimate uses of the old function, create a shim instead
fn = LLVM.name(f)
@assert isempty(uses(f))
erase!(f)
LLVM.name!(new_f, fn)
return new_f
end
# kernel input arguments
#
# hardware index counters (thread id, group id, etc) aren't accessed via intrinsics,
# but using special arguments to the kernel function.
const kernel_intrinsics = Dict()
for intr in [
"dispatch_quadgroups_per_threadgroup", "dispatch_simdgroups_per_threadgroup",
"quadgroup_index_in_threadgroup", "quadgroups_per_threadgroup",
"simdgroup_index_in_threadgroup", "simdgroups_per_threadgroup",
"thread_index_in_quadgroup", "thread_index_in_simdgroup",
"thread_index_in_threadgroup", "thread_execution_width", "threads_per_simdgroup"],
(llvm_typ, julia_typ) in [
("i32", UInt32),
("i16", UInt16),
]
push!(kernel_intrinsics, "julia.air.$intr.$llvm_typ" => (name=intr, typ=julia_typ))
end
for intr in [
"dispatch_threads_per_threadgroup",
"grid_origin", "grid_size",
"thread_position_in_grid", "thread_position_in_threadgroup",
"threadgroup_position_in_grid", "threadgroups_per_grid",
"threads_per_grid", "threads_per_threadgroup"],
(llvm_typ, julia_typ) in [
("i32", UInt32),
("v2i32", NTuple{2, VecElement{UInt32}}),
("v3i32", NTuple{3, VecElement{UInt32}}),
("i16", UInt16),
("v2i16", NTuple{2, VecElement{UInt16}}),
("v3i16", NTuple{3, VecElement{UInt16}}),
]
push!(kernel_intrinsics, "julia.air.$intr.$llvm_typ" => (name=intr, typ=julia_typ))
end
function argument_type_name(typ)
if typ isa LLVM.IntegerType && width(typ) == 16
"ushort"
elseif typ isa LLVM.IntegerType && width(typ) == 32
"uint"
elseif typ isa LLVM.VectorType
argument_type_name(eltype(typ)) * string(Int(length(typ)))
else
error("Cannot encode unknown type `$typ`")
end
end
function add_input_arguments!(@nospecialize(job::CompilerJob), mod::LLVM.Module,
entry::LLVM.Function)
entry_fn = LLVM.name(entry)
# figure out which intrinsics are used and need to be added as arguments
used_intrinsics = filter(keys(kernel_intrinsics)) do intr_fn
haskey(functions(mod), intr_fn)
end |> collect
nargs = length(used_intrinsics)
# determine which functions need these arguments
worklist = Set{LLVM.Function}([entry])
for intr_fn in used_intrinsics
push!(worklist, functions(mod)[intr_fn])
end
worklist_length = 0
while worklist_length != length(worklist)
# iteratively discover functions that use an intrinsic or any function calling it
worklist_length = length(worklist)
additions = LLVM.Function[]
for f in worklist, use in uses(f)
inst = user(use)::Instruction
bb = LLVM.parent(inst)
new_f = LLVM.parent(bb)
in(new_f, worklist) || push!(additions, new_f)
end
for f in additions
push!(worklist, f)
end
end
for intr_fn in used_intrinsics
delete!(worklist, functions(mod)[intr_fn])
end
# add the arguments
# NOTE: we don't need to be fine-grained here, as unused args will be removed during opt
workmap = Dict{LLVM.Function, LLVM.Function}()
for f in worklist
fn = LLVM.name(f)
ft = function_type(f)
LLVM.name!(f, fn * ".orig")
# create a new function
new_param_types = LLVMType[parameters(ft)...]
for intr_fn in used_intrinsics
llvm_typ = convert(LLVMType, kernel_intrinsics[intr_fn].typ)
push!(new_param_types, llvm_typ)
end
new_ft = LLVM.FunctionType(return_type(ft), new_param_types)
new_f = LLVM.Function(mod, fn, new_ft)
linkage!(new_f, linkage(f))
for (arg, new_arg) in zip(parameters(f), parameters(new_f))
LLVM.name!(new_arg, LLVM.name(arg))
end
for (intr_fn, new_arg) in zip(used_intrinsics, parameters(new_f)[end-nargs+1:end])
LLVM.name!(new_arg, kernel_intrinsics[intr_fn].name)
end
workmap[f] = new_f
end
# clone and rewrite the function bodies.
# we don't need to rewrite much as the arguments are added last.
for (f, new_f) in workmap
# map the arguments
value_map = Dict{LLVM.Value, LLVM.Value}()
for (param, new_param) in zip(parameters(f), parameters(new_f))
LLVM.name!(new_param, LLVM.name(param))
value_map[param] = new_param
end
value_map[f] = new_f
clone_into!(new_f, f; value_map,
changes=LLVM.API.LLVMCloneFunctionChangeTypeLocalChangesOnly)
# we can't remove this function yet, as we might still need to rewrite any called,
# but remove the IR already
empty!(f)
end
# drop unused constants that may be referring to the old functions
# XXX: can we do this differently?
for f in worklist
for use in uses(f)
val = user(use)
if val isa LLVM.ConstantExpr && isempty(uses(val))
LLVM.unsafe_destroy!(val)
end
end
end
# update other uses of the old function, modifying call sites to pass the arguments
function rewrite_uses!(f, new_f)
# update uses
@dispose builder=IRBuilder() begin
for use in uses(f)
val = user(use)
if val isa LLVM.CallInst || val isa LLVM.InvokeInst || val isa LLVM.CallBrInst
callee_f = LLVM.parent(LLVM.parent(val))
# forward the arguments
position!(builder, val)
new_val = if val isa LLVM.CallInst
call!(builder, function_type(new_f), new_f,
[arguments(val)..., parameters(callee_f)[end-nargs+1:end]...],
operand_bundles(val))
else
# TODO: invoke and callbr
error("Rewrite of $(typeof(val))-based calls is not implemented: $val")
end
callconv!(new_val, callconv(val))
replace_uses!(val, new_val)
@assert isempty(uses(val))
erase!(val)
elseif val isa LLVM.ConstantExpr && opcode(val) == LLVM.API.LLVMBitCast
# XXX: why isn't this caught by the value materializer above?
target = operands(val)[1]
@assert target == f
new_val = LLVM.const_bitcast(new_f, value_type(val))
rewrite_uses!(val, new_val)
# we can't simply replace this constant expression, as it may be used
# as a call, taking arguments (so we need to rewrite it to pass the input arguments)
# drop the old constant if it is unused
# XXX: can we do this differently?
if isempty(uses(val))
LLVM.unsafe_destroy!(val)
end
else
error("Cannot rewrite unknown use of function: $val")
end
end
end
end
for (f, new_f) in workmap
rewrite_uses!(f, new_f)
@assert isempty(uses(f))
erase!(f)
end
# replace uses of the intrinsics with references to the input arguments
for (i, intr_fn) in enumerate(used_intrinsics)
intr = functions(mod)[intr_fn]
for use in uses(intr)
val = user(use)
callee_f = LLVM.parent(LLVM.parent(val))
if val isa LLVM.CallInst || val isa LLVM.InvokeInst || val isa LLVM.CallBrInst
replace_uses!(val, parameters(callee_f)[end-nargs+i])
else
error("Cannot rewrite unknown use of function: $val")
end
@assert isempty(uses(val))
erase!(val)
end
@assert isempty(uses(intr))
erase!(intr)
end
return
end
# argument metadata generation
#
# module metadata is used to identify buffers that are passed as kernel arguments.
function add_argument_metadata!(@nospecialize(job::CompilerJob), mod::LLVM.Module,
entry::LLVM.Function)
entry_ft = function_type(entry)
## argument info
arg_infos = Metadata[]
# Iterate through arguments and create metadata for them
args = classify_arguments(job, entry_ft)
i = 1
for arg in args
arg.idx === nothing && continue
@assert parameters(entry_ft)[arg.idx] isa LLVM.PointerType
# NOTE: we emit the bare minimum of argument metadata to support
# bindless argument encoding. Actually using the argument encoder
# APIs (deprecated in Metal 3) turned out too difficult, given the
# undocumented nature of the argument metadata, and the complex
# arguments we encounter with typical Julia kernels.
md = Metadata[]
# argument index
@assert arg.idx == i
push!(md, Metadata(ConstantInt(Int32(i-1))))
push!(md, MDString("air.buffer"))
push!(md, MDString("air.location_index"))
push!(md, Metadata(ConstantInt(Int32(i-1))))
# XXX: unknown
push!(md, Metadata(ConstantInt(Int32(1))))
push!(md, MDString("air.read_write")) # TODO: Check for const array
push!(md, MDString("air.address_space"))
push!(md, Metadata(ConstantInt(Int32(addrspace(parameters(entry_ft)[arg.idx])))))
arg_type = if arg.typ <: Core.LLVMPtr
arg.typ.parameters[1]
else
arg.typ
end
push!(md, MDString("air.arg_type_size"))
push!(md, Metadata(ConstantInt(Int32(sizeof(arg_type)))))
push!(md, MDString("air.arg_type_align_size"))
push!(md, Metadata(ConstantInt(Int32(Base.datatype_alignment(arg_type)))))
push!(md, MDString("air.arg_type_name"))
push!(md, MDString(repr(arg.typ)))
push!(md, MDString("air.arg_name"))
push!(md, MDString(String(arg.name)))
push!(arg_infos, MDNode(md))
i += 1
end
# Create metadata for argument intrinsics last
for intr_arg in parameters(entry)[i:end]
intr_fn = LLVM.name(intr_arg)
arg_info = Metadata[]
push!(arg_info, Metadata(ConstantInt(Int32(i-1))))
push!(arg_info, MDString("air.$intr_fn" ))
push!(arg_info, MDString("air.arg_type_name" ))
push!(arg_info, MDString(argument_type_name(value_type(intr_arg))))
arg_info = MDNode(arg_info)
push!(arg_infos, arg_info)
i += 1
end
arg_infos = MDNode(arg_infos)
## stage info
stage_infos = Metadata[]
stage_infos = MDNode(stage_infos)
kernel_md = MDNode([entry, stage_infos, arg_infos])
push!(metadata(mod)["air.kernel"], kernel_md)
return
end
# module-level metadata
# TODO: determine limits being set dynamically
function add_module_metadata!(@nospecialize(job::CompilerJob), mod::LLVM.Module)
# register max device buffer count
max_buff = Metadata[]
push!(max_buff, Metadata(ConstantInt(Int32(7))))
push!(max_buff, MDString("air.max_device_buffers"))
push!(max_buff, Metadata(ConstantInt(Int32(31))))
max_buff = MDNode(max_buff)
push!(metadata(mod)["llvm.module.flags"], max_buff)
# register max constant buffer count
max_const_buff_md = Metadata[]
push!(max_const_buff_md, Metadata(ConstantInt(Int32(7))))
push!(max_const_buff_md, MDString("air.max_constant_buffers"))
push!(max_const_buff_md, Metadata(ConstantInt(Int32(31))))
max_const_buff_md = MDNode(max_const_buff_md)
push!(metadata(mod)["llvm.module.flags"], max_const_buff_md)
# register max threadgroup buffer count
max_threadgroup_buff_md = Metadata[]
push!(max_threadgroup_buff_md, Metadata(ConstantInt(Int32(7))))
push!(max_threadgroup_buff_md, MDString("air.max_threadgroup_buffers"))
push!(max_threadgroup_buff_md, Metadata(ConstantInt(Int32(31))))
max_threadgroup_buff_md = MDNode(max_threadgroup_buff_md)
push!(metadata(mod)["llvm.module.flags"], max_threadgroup_buff_md)
# register max texture buffer count
max_textures_md = Metadata[]
push!(max_textures_md, Metadata(ConstantInt(Int32(7))))
push!(max_textures_md, MDString("air.max_textures"))
push!(max_textures_md, Metadata(ConstantInt(Int32(128))))
max_textures_md = MDNode(max_textures_md)
push!(metadata(mod)["llvm.module.flags"], max_textures_md)
# register max write texture buffer count
max_rw_textures_md = Metadata[]
push!(max_rw_textures_md, Metadata(ConstantInt(Int32(7))))
push!(max_rw_textures_md, MDString("air.max_read_write_textures"))
push!(max_rw_textures_md, Metadata(ConstantInt(Int32(8))))
max_rw_textures_md = MDNode(max_rw_textures_md)
push!(metadata(mod)["llvm.module.flags"], max_rw_textures_md)
# register max sampler count
max_samplers_md = Metadata[]
push!(max_samplers_md, Metadata(ConstantInt(Int32(7))))
push!(max_samplers_md, MDString("air.max_samplers"))
push!(max_samplers_md, Metadata(ConstantInt(Int32(16))))
max_samplers_md = MDNode(max_samplers_md)
push!(metadata(mod)["llvm.module.flags"], max_samplers_md)
# add compiler identification
llvm_ident_md = Metadata[]
push!(llvm_ident_md, MDString("Julia $(VERSION) with Metal.jl"))
llvm_ident_md = MDNode(llvm_ident_md)
push!(metadata(mod)["llvm.ident"], llvm_ident_md)
# add AIR version
air_md = Metadata[]
push!(air_md, Metadata(ConstantInt(Int32(job.config.target.air.major))))
push!(air_md, Metadata(ConstantInt(Int32(job.config.target.air.minor))))
push!(air_md, Metadata(ConstantInt(Int32(job.config.target.air.patch))))
air_md = MDNode(air_md)
push!(metadata(mod)["air.version"], air_md)
# add Metal language version
air_lang_md = Metadata[]
push!(air_lang_md, MDString("Metal"))
push!(air_lang_md, Metadata(ConstantInt(Int32(job.config.target.metal.major))))
push!(air_lang_md, Metadata(ConstantInt(Int32(job.config.target.metal.minor))))
push!(air_lang_md, Metadata(ConstantInt(Int32(job.config.target.metal.patch))))
air_lang_md = MDNode(air_lang_md)
push!(metadata(mod)["air.language_version"], air_lang_md)
# set sdk version
sdk_version!(mod, job.config.target.macos)
return
end
# intrinsics handling
#
# we don't have a proper back-end, so we're missing out on intrinsics-related functionality.
# replace LLVM intrinsics with AIR equivalents
function lower_llvm_intrinsics!(@nospecialize(job::CompilerJob), fun::LLVM.Function)
isdeclaration(fun) && return false
# TODO: fastmath
mod = LLVM.parent(fun)
changed = false
# determine worklist
worklist = LLVM.CallBase[]
for bb in blocks(fun), inst in instructions(bb)
isa(inst, LLVM.CallBase) || continue
call_fun = called_operand(inst)
isa(call_fun, LLVM.Function) || continue
LLVM.isintrinsic(call_fun) || continue
push!(worklist, inst)
end
# lower intrinsics
for call in worklist
bb = LLVM.parent(call)
call_fun = called_operand(call)
call_ft = function_type(call_fun)
intr = LLVM.Intrinsic(call_fun)
# unsupported, but safe to remove
unsupported_intrinsics = LLVM.Intrinsic.([
"llvm.experimental.noalias.scope.decl",
"llvm.lifetime.start",
"llvm.lifetime.end",
"llvm.assume"
])
if intr in unsupported_intrinsics
erase!(call)
changed = true
end
# intrinsics that map straight to AIR
mappable_intrinsics = Dict(
# one argument
LLVM.Intrinsic("llvm.abs") => ("air.abs", true),
LLVM.Intrinsic("llvm.fabs") => ("air.fabs", missing),
# two arguments
LLVM.Intrinsic("llvm.umin") => ("air.min", false),
LLVM.Intrinsic("llvm.smin") => ("air.min", true),
LLVM.Intrinsic("llvm.umax") => ("air.max", false),
LLVM.Intrinsic("llvm.smax") => ("air.max", true),
LLVM.Intrinsic("llvm.minnum") => ("air.fmin", missing),
LLVM.Intrinsic("llvm.maxnum") => ("air.fmax", missing),
)
if haskey(mappable_intrinsics, intr)
fn, signed = mappable_intrinsics[intr]
# determine type of the intrinsic
typ = value_type(call)
function type_suffix(typ)
# XXX: can't we use LLVM to do this kind of mangling?
if typ isa LLVM.IntegerType
"i$(width(typ))"
elseif typ == LLVM.HalfType()
"f16"
elseif typ == LLVM.FloatType()
"f32"
elseif typ == LLVM.DoubleType()
"f64"
elseif typ isa LLVM.VectorType
"v$(length(typ))$(type_suffix(eltype(typ)))"
else
error("Unsupported intrinsic type: $typ")
end
end
if typ isa LLVM.IntegerType || (typ isa LLVM.VectorType && eltype(typ) isa LLVM.IntegerType)
fn *= "." * (signed::Bool ? "s" : "u") * "." * type_suffix(typ)
else
fn *= "." * type_suffix(typ)
end
new_intr = if haskey(functions(mod), fn)
functions(mod)[fn]
else
LLVM.Function(mod, fn, call_ft)
end
@dispose builder=IRBuilder() begin
position!(builder, call)
debuglocation!(builder, call)
new_value = call!(builder, call_ft, new_intr, arguments(call))
replace_uses!(call, new_value)
erase!(call)
changed = true
end
end
# copysign
if intr == LLVM.Intrinsic("llvm.copysign")
arg0, arg1 = operands(call)
@assert value_type(arg0) == value_type(arg1)
typ = value_type(call)
# XXX: LLVM C API doesn't have getPrimitiveSizeInBits
jltyp = if typ == LLVM.HalfType()
Float16
elseif typ == LLVM.FloatType()
Float32
elseif typ == LLVM.DoubleType()
Float64
else
error("Unsupported copysign type: $typ")
end
@dispose builder=IRBuilder() begin
position!(builder, call)
debuglocation!(builder, call)
# get bits
typ′ = LLVM.IntType(8*sizeof(jltyp))
arg0′ = bitcast!(builder, arg0, typ′)
arg1′ = bitcast!(builder, arg1, typ′)
# twiddle bits
sign = and!(builder, arg1′, LLVM.ConstantInt(typ′, Base.sign_mask(jltyp)))
mantissa = and!(builder, arg0′, LLVM.ConstantInt(typ′, ~Base.sign_mask(jltyp)))
new_value = or!(builder, sign, mantissa)
new_value = bitcast!(builder, new_value, typ)
replace_uses!(call, new_value)
erase!(call)
changed = true
end
end
# IEEE 754-2018 compliant maximum/minimum, propagating NaNs and treating -0 as less than +0
if intr == LLVM.Intrinsic("llvm.minimum") || intr == LLVM.Intrinsic("llvm.maximum")
typ = value_type(call)
is_minimum = intr == LLVM.Intrinsic("llvm.minimum")
# XXX: LLVM C API doesn't have getPrimitiveSizeInBits
jltyp = if typ == LLVM.HalfType()
Float16
elseif typ == LLVM.FloatType()
Float32
elseif typ == LLVM.DoubleType()
Float64
else
error("Unsupported maximum/minimum type: $typ")
end
# create a function that performs the IEEE-compliant operation.
# normally we'd do this inline, but LLVM.jl doesn't have BB split functionality.
new_intr_fn = if is_minimum
"air.minimum.f$(8*sizeof(jltyp))"
else
"air.maximum.f$(8*sizeof(jltyp))"
end
if haskey(functions(mod), new_intr_fn)
new_intr = functions(mod)[new_intr_fn]
else
new_intr = LLVM.Function(mod, new_intr_fn, call_ft)
push!(function_attributes(new_intr), EnumAttribute("alwaysinline"))
arg0, arg1 = parameters(new_intr)
@assert value_type(arg0) == value_type(arg1)
bb_check_arg0 = BasicBlock(new_intr, "check_arg0")
bb_nan_arg0 = BasicBlock(new_intr, "nan_arg0")
bb_check_arg1 = BasicBlock(new_intr, "check_arg1")
bb_nan_arg1 = BasicBlock(new_intr, "nan_arg1")
bb_check_zero = BasicBlock(new_intr, "check_zero")
bb_compare_zero = BasicBlock(new_intr, "compare_zero")
bb_fallback = BasicBlock(new_intr, "fallback")
@dispose builder=IRBuilder() begin
# first, check if either argument is NaN, and return it if so
position!(builder, bb_check_arg0)
arg0_nan = fcmp!(builder, LLVM.API.LLVMRealUNO, arg0, arg0)
br!(builder, arg0_nan, bb_nan_arg0, bb_check_arg1)
position!(builder, bb_nan_arg0)
ret!(builder, arg0)
position!(builder, bb_check_arg1)
arg1_nan = fcmp!(builder, LLVM.API.LLVMRealUNO, arg1, arg1)
br!(builder, arg1_nan, bb_nan_arg1, bb_check_zero)
position!(builder, bb_nan_arg1)
ret!(builder, arg1)
# then, check if both arguments are zero and have a mismatching sign.
# if so, return in accordance to the intrinsic (minimum or maximum)
position!(builder, bb_check_zero)
typ′ = LLVM.IntType(8*sizeof(jltyp))
arg0′ = bitcast!(builder, arg0, typ′)
arg1′ = bitcast!(builder, arg1, typ′)
arg0_zero = fcmp!(builder, LLVM.API.LLVMRealUEQ, arg0,
LLVM.ConstantFP(typ, zero(jltyp)))
arg1_zero = fcmp!(builder, LLVM.API.LLVMRealUEQ, arg1,
LLVM.ConstantFP(typ, zero(jltyp)))
args_zero = and!(builder, arg0_zero, arg1_zero)
arg0_sign = and!(builder, arg0′, LLVM.ConstantInt(typ′, Base.sign_mask(jltyp)))
arg1_sign = and!(builder, arg1′, LLVM.ConstantInt(typ′, Base.sign_mask(jltyp)))
sign_mismatch = icmp!(builder, LLVM.API.LLVMIntNE, arg0_sign, arg1_sign)
relevant_zero = and!(builder, args_zero, sign_mismatch)
br!(builder, relevant_zero, bb_compare_zero, bb_fallback)
position!(builder, bb_compare_zero)
arg0_negative = icmp!(builder, LLVM.API.LLVMIntNE, arg0_sign,
LLVM.ConstantInt(typ′, 0))
val = if is_minimum
select!(builder, arg0_negative, arg0, arg1)
else
select!(builder, arg0_negative, arg1, arg0)
end
ret!(builder, val)
# finally, it's safe to use the existing minnum/maxnum intrinsics
position!(builder, bb_fallback)
fallback_intr_fn = if is_minimum
"air.fmin.f$(8*sizeof(jltyp))"
else
"air.fmax.f$(8*sizeof(jltyp))"
end
fallback_intr = if haskey(functions(mod), fallback_intr_fn)
functions(mod)[fallback_intr_fn]
else
LLVM.Function(mod, fallback_intr_fn, call_ft)
end
val = call!(builder, call_ft, fallback_intr, collect(parameters(new_intr)))
ret!(builder, val)
end
end
@dispose builder=IRBuilder() begin
position!(builder, call)
debuglocation!(builder, call)
new_value = call!(builder, call_ft, new_intr, arguments(call))
replace_uses!(call, new_value)
erase!(call)
changed = true
end
end
end
return changed
end
# annotate AIR intrinsics with optimization-related metadata
function annotate_air_intrinsics!(@nospecialize(job::CompilerJob), mod::LLVM.Module)
changed = false
for f in functions(mod)
isdeclaration(f) || continue
fn = LLVM.name(f)
attrs = function_attributes(f)
function add_attributes(names...)
for name in names
if LLVM.version() >= v"16" && name in ["argmemonly", "inaccessiblememonly",
"inaccessiblemem_or_argmemonly",
"readnone", "readonly", "writeonly"]
# XXX: workaround for changes from https://reviews.llvm.org/D135780
continue
end
push!(attrs, EnumAttribute(name, 0))
end
changed = true
end
# synchronization
if fn == "air.wg.barrier" || fn == "air.simdgroup.barrier"
add_attributes("nounwind", "convergent")
# atomics
elseif match(r"air.atomic.(local|global).load", fn) !== nothing
# TODO: "memory(argmem: read)" on LLVM 16+
add_attributes("argmemonly", "readonly", "nounwind")
elseif match(r"air.atomic.(local|global).store", fn) !== nothing
# TODO: "memory(argmem: write)" on LLVM 16+
add_attributes("argmemonly", "writeonly", "nounwind")
elseif match(r"air.atomic.(local|global).(xchg|cmpxchg)", fn) !== nothing
# TODO: "memory(argmem: readwrite)" on LLVM 16+
add_attributes("argmemonly", "nounwind")
elseif match(r"^air.atomic.(local|global).(add|sub|min|max|and|or|xor)", fn) !== nothing
# TODO: "memory(argmem: readwrite)" on LLVM 16+
add_attributes("argmemonly", "nounwind")
end
end
return changed
end
# replace unreachable control flow with branches to the exit block
#
# before macOS 15, code generated by Julia 1.11 causes compilation failures in the back-end.
# the reduced example contains unreachable control flow executed divergently, so this is a
# similar issue as encountered with NVIDIA, albeit causing crashes instead of miscompiles.
#
# the proposed solution is to avoid (divergent) unreachable control flow, instead replacing
# it by branches to the exit block. since `unreachable` doesn't lower to anything that
# aborts the kernel anyway (can we fix this?), this transformation should be safe.
function replace_unreachable!(@nospecialize(job::CompilerJob), f::LLVM.Function)
# find unreachable instructions and exit blocks
unreachables = Instruction[]
exit_blocks = BasicBlock[]
for bb in blocks(f), inst in instructions(bb)
if isa(inst, LLVM.UnreachableInst)
push!(unreachables, inst)
end
if isa(inst, LLVM.RetInst)
push!(exit_blocks, bb)
end
end
isempty(unreachables) && return false
# if we don't have an exit block, we can't do much. we could insert a return, but that
# would probably keep the problematic control flow just as it is.
isempty(exit_blocks) && return false
@dispose builder=IRBuilder() begin
# if we have multiple exit blocks, take the last one, which is hopefully the least
# divergent (assuming divergent control flow is the root of the problem here).
exit_block = last(exit_blocks)
ret = terminator(exit_block)
# create a return block with only the return instruction, so that we only have to
# care about any values returned, and not about any other SSA value in the block.
if first(instructions(exit_block)) == ret
# we can reuse the exit block if it only contains the return
return_block = exit_block
else
# split the exit block right before the ret
return_block = BasicBlock(f, "ret")
move_after(return_block, exit_block)
# emit a branch
position!(builder, ret)
br!(builder, return_block)
# move the return
remove!(ret)
position!(builder, return_block)
insert!(builder, ret)
end
# when returning a value, add a phi node to the return block, so that we can later
# add incoming undef values when branching from `unreachable` blocks
if !isempty(operands(ret))
position!(builder, ret)
# XXX: support aggregate returns?
val = only(operands(ret))
phi = phi!(builder, value_type(val))
for pred in predecessors(return_block)
push!(incoming(phi), (val, pred))
end
operands(ret)[1] = phi
end
# replace the unreachable with a branch to the return block
for unreachable in unreachables
bb = LLVM.parent(unreachable)
# remove preceding traps to avoid reconstructing unreachable control flow
prev = previnst(unreachable)
if isa(prev, LLVM.CallInst) && name(called_operand(prev)) == "llvm.trap"
erase!(prev)
end
# replace the unreachable with a branch to the return block
position!(builder, unreachable)
br!(builder, return_block)
erase!(unreachable)
# patch up any phi nodes in the return block
for inst in instructions(return_block)
if isa(inst, LLVM.PHIInst)
undef = UndefValue(value_type(inst))
vals = incoming(inst)
push!(vals, (undef, bb))
end
end
end
end
return true
end
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
|
[
"MIT"
] | 0.27.8 | 1d6f290a5eb1201cd63574fbc4440c788d5cb38f | code | 1323 | # native target for CPU execution
## target
export NativeCompilerTarget
Base.@kwdef struct NativeCompilerTarget <: AbstractCompilerTarget
cpu::String=(LLVM.version() < v"8") ? "" : unsafe_string(LLVM.API.LLVMGetHostCPUName())
features::String=(LLVM.version() < v"8") ? "" : unsafe_string(LLVM.API.LLVMGetHostCPUFeatures())
llvm_always_inline::Bool=false # will mark the job function as always inline
jlruntime::Bool=false # Use Julia runtime for throwing errors, instead of the GPUCompiler support
end
llvm_triple(::NativeCompilerTarget) = Sys.MACHINE
function llvm_machine(target::NativeCompilerTarget)
triple = llvm_triple(target)
t = Target(triple=triple)
tm = TargetMachine(t, triple, target.cpu, target.features)
asm_verbosity!(tm, true)
return tm
end
function finish_module!(job::CompilerJob{NativeCompilerTarget}, mod::LLVM.Module, entry::LLVM.Function)
if job.config.target.llvm_always_inline
push!(function_attributes(entry), EnumAttribute("alwaysinline", 0))
end
return entry
end
## job
runtime_slug(job::CompilerJob{NativeCompilerTarget}) = "native_$(job.config.target.cpu)-$(hash(job.config.target.features))$(job.config.target.jlruntime ? "-jlrt" : "")"
uses_julia_runtime(job::CompilerJob{NativeCompilerTarget}) = job.config.target.jlruntime
| GPUCompiler | https://github.com/JuliaGPU/GPUCompiler.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.