licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 6051 | using Base: Float64
const Ar_H = 1.007975 # Atomic weight of hydrogen
const Ry = R_∞ * c_0 * h # Rydberg energy
const Ryh = Ry / (1 + m_e / (Ar_H * m_u)) # Hydrogen ionisation energy
"""
n_eff(energy_upper::Unitful.Energy, energy_lower::Unitful.Energy, Z::Integer)
Compute the effective principal quantum number for a given energy difference
and atomic charge, `Z`= atomic charge + 1 (ie, 1 for neutral, 2 for singly ionised).
`energy_upper` is the ionisation energy for the given stage.
"""
function n_eff(energy_upper::Unitful.Energy, energy_lower::Unitful.Energy, Z::Integer)
return (Z * sqrt(Ryh / (energy_upper - energy_lower))) |> u"J/J"
end
function n_eff(energy_upper::PerLength, energy_lower::PerLength, Z::Integer)
e_u = wavenumber_to_energy(energy_upper)
e_l = wavenumber_to_energy(energy_lower)
return n_eff(e_u, e_l, Z)
end
"""
If input is in wavenumber, convert to energy. Otherwise keep as energy.
"""
function wavenumber_to_energy(a::Quantity{T}) where T <: AbstractFloat
if typeof(a) <: PerLength
a = convert(Unitful.Quantity{T, Unitful.𝐋^2 * Unitful.𝐓^-2 * Unitful.𝐌},
(h * c_0 * a) |> u"aJ")
end
@assert typeof(a) <: Unitful.Energy{T} "Input units must either be wavenumber or energy"
return a
end
const ABO_sp_σ =
[ 126 140 165 202 247 299 346 383 435 491 553 617 685 769 838 925 1011 1082
140 150 162 183 218 273 327 385 440 501 557 620 701 764 838 923 1025 1085
154 167 175 192 216 251 299 357 423 487 549 617 684 759 834 910 1014 1064
166 180 192 206 226 253 291 339 397 459 532 600 676 755 832 896 1002 1055
208 194 207 223 242 265 296 335 384 445 511 583 656 726 817 889 988 1044
262 254 220 239 261 283 310 344 388 442 496 568 635 725 791 890 970 1036
311 306 299 251 280 304 330 361 396 443 500 563 630 704 796 880 951 1033
358 359 350 338 293 323 352 381 416 455 511 566 635 706 780 859 946 1039
411 409 405 392 370 340 375 406 439 478 525 580 644 714 790 873 961 1050
462 463 459 450 443 400 394 432 467 501 546 595 650 711 786 873 963 1050
522 525 529 524 516 518 438 454 495 532 565 621 671 741 813 874 951 1034
589 593 590 583 579 568 565 483 517 560 600 644 691 752 821 904 978 1048
658 655 666 657 649 653 649 587 549 592 674 674 728 782 833 902 992 1084
738 742 747 725 721 729 699 730 626 622 668 721 765 809 887 938 1001 1109
838 838 810 809 790 800 769 815 757 679 704 755 806 854 901 974 1034 1105
942 946 925 901 918 895 919 897 933 890 785 797 859 908 976 1020 1115 1173
1059 1061 1056 1061 1074 1031 1036 1036 993 1038 932 852 878 943 1003 1074 1131 1200
1069 1076 1083 1095 1102 1091 1126 1156 1103 1149 1157 1036 972 1007 1064 1124 1209 1283
1338 1350 1356 1354 1324 1301 1312 1318 1257 1239 1297 1233 1089 1059 1106 1180 1218 1317
1409 1398 1367 1336 1313 1313 1409 1354 1317 1287 1353 1386 1279 1158 1141 1188 1260 1335
1328 1332 1342 1369 1405 1451 1502 1524 1506 1477 1522 1594 1572 1436 1328 1325 1382 1446]
const ABO_sp_α =
[.268 .269 .335 .377 .327 .286 .273 .270 .271 .268 .267 .264 .264 .264 .261 .256 .248 .245
.261 .256 .254 .282 .327 .355 .321 .293 .287 .271 .267 .273 .270 .270 .268 .268 .264 .263
.266 .264 .257 .252 .267 .289 .325 .339 .319 .301 .292 .284 .281 .281 .277 .282 .276 .274
.262 .274 .258 .251 .247 .254 .273 .291 .316 .322 .320 .302 .294 .290 .287 .292 .283 .277
.322 .275 .264 .259 .250 .245 .273 .255 .271 .284 .294 .308 .296 .299 .288 .289 .282 .278
.267 .300 .260 .268 .254 .242 .243 .242 .239 .246 .267 .277 .280 .290 .282 .281 .274 .271
.259 .274 .275 .252 .265 .248 .249 .237 .283 .236 .247 .254 .254 .271 .268 .267 .258 .262
.260 .255 .268 .268 .268 .264 .248 .239 .229 .240 .236 .234 .238 .244 .252 .251 .244 .255
.255 .255 .244 .247 .317 .246 .255 .244 .237 .231 .227 .231 .235 .232 .235 .241 .237 .245
.256 .254 .254 .249 .227 .319 .253 .253 .240 .237 .238 .233 .231 .230 .228 .234 .227 .241
.257 .254 .252 .235 .253 .240 .284 .251 .246 .241 .235 .228 .222 .225 .225 .219 .228 .233
.244 .240 .245 .238 .248 .230 .283 .252 .244 .244 .238 .235 .234 .236 .228 .224 .225 .231
.244 .241 .244 .237 .237 .249 .219 .324 .239 .245 .242 .242 .232 .233 .221 .227 .231 .218
.241 .245 .249 .239 .243 .250 .217 .254 .308 .237 .247 .244 .234 .228 .233 .224 .227 .226
.243 .243 .232 .227 .235 .253 .227 .220 .320 .270 .243 .252 .248 .238 .234 .241 .225 .227
.225 .226 .234 .230 .226 .233 .249 .225 .216 .300 .286 .237 .240 .247 .243 .234 .231 .238
.268 .260 .247 .238 .233 .241 .254 .248 .207 .227 .315 .260 .226 .237 .240 .239 .239 .240
.248 .246 .238 .226 .213 .221 .226 .226 .204 .194 .248 .316 .234 .216 .236 .233 .221 .230
.200 .202 .198 .194 .206 .207 .227 .224 .207 .185 .198 .275 .315 .233 .229 .231 .233 .236
.202 .209 .221 .226 .230 .245 .202 .257 .246 .225 .215 .246 .320 .321 .244 .239 .251 .253
.246 .248 .255 .265 .274 .285 .292 .284 .273 .250 .225 .239 .295 .352 .320 .258 .260 .269]
const ABO_sp_np = 1.3:0.1:3.0
const ABO_sp_ns = 1.0:0.1:3.0
const ABO_sp_interp_σ = cubic_spline_interpolation((ABO_sp_ns, ABO_sp_np), ABO_sp_σ,
extrapolation_bc=Line())
const ABO_sp_interp_α = cubic_spline_interpolation((ABO_sp_ns, ABO_sp_np), ABO_sp_α,
extrapolation_bc=Line())
"""
Compute Barklem σ and α for an sp transition.
# Arguments
- E_cont: continuum energy (upper continuum of the stage)
- E_lev_s: s level energy
- E_lev_p: p level energy
- Z: atomic charge + 1
"""
function ABO_factors_sp(E_cont, E_lev_s, E_lev_p, Z)
ns = n_eff(E_cont, E_lev_s, Z)
np = n_eff(E_cont, E_lev_p, Z)
α = ABO_sp_interp_α(ns, np)::Float64
σ = ABO_sp_interp_σ(ns, np)::Float64
return (σ, α)
end
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 1918 | """
Tools to compute partition functions.
"""
"""
partition_function(
g::Array{<: Real}, χ::Array{<: Unitful.Energy, 1}, temp::Unitful.Temperature
)
partition_function(data::AtomicStage, temp::Unitful.Temperature)
Calculate partition function from a set of levels defined by arrays
of statistical weights and energies (or an AtomicStage struct),
for a given temperature.
"""
function partition_function(
g::Array{<: Real}, χ::Array{<: Unitful.Energy, 1}, temp::Unitful.Temperature,
)
return sum(g .* exp.(-χ ./ (k_B * temp)))
end
function partition_function(atom::AtomicStage, temp::Unitful.Temperature)
return partition_function(atom.g, atom.χ, temp)
end
"""
partition_function_interpolator(
atom::AtomicStage, temperatures::Array{<: Unitful.Temperature},
)
partition_function_interpolator(
element::String, stage, temperatures::Array{<: Unitful.Temperature};
source="NIST"
)
Returns an interpolator for the partition function of a given atomic stage.
# Arguments
- `atom`: an `AtomicStage` structure with the data for the given stage
- `temperatures`: array with temperatures used to build the interpolation table
# Returns
- `interpolator`: linear interpolator object that takes temperature to give
partition function
"""
function partition_function_interpolator(
atom::AtomicStage, temperatures::Array{<: Unitful.Temperature},
)
pfunc = partition_function.(Ref(atom), temperatures)
return linear_interpolation(temperatures, pfunc, extrapolation_bc=Line())
end
function partition_function_interpolator(
element::String, stage, temperatures::Array{<: Unitful.Temperature};
source="NIST"
)
if source == "NIST"
atom = read_NIST(element, stage)
else
error("NotImplemented: Atomic data source $source not supported")
end
return partition_function_interpolator(atom, temperatures)
end
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 3548 | """
Functions to read atomic data.
"""
"""
get_atomic_stage(element::String, stage; source="NIST")
Returns the level structure for a given atomic stage for `element`
and ionisation stage `stage`. Uses data from the given `source`,
returns an `AtomicStage` struct. Currently, the only supported source
is `"NIST"`, using locally-saved data obtained from the
[NIST Atomic Spectra Database Levels Form](https://physics.nist.gov/PhysRefData/ASD/levels_form.html).
# Examples
```julia-repl
julia> MgII = get_atomic_stage("Mg", "II")
AtomicStage("Mg", "Mg_II", 2, II, 137, (...))
julia> MgII = get_atomic_stage("Mg", 2)
AtomicStage("Mg", "Mg_II", 2, II, 137, (...))
```
"""
function get_atomic_stage(element::String, stage; source="NIST")
if element ∉ element_symbols
error("Invalid element $element")
end
if source == "NIST"
return read_NIST(element, stage)
else
error("NotImplemented: atomic data source $source not available.")
end
end
"""
Reads NIST atomic level data saved locally. The data were extracted from the
[NIST Atomic Spectra Database Levels Form](https://physics.nist.gov/PhysRefData/ASD/levels_form.html).
"""
function read_NIST(element::String, stage)
stage = RomanNumeral(stage)
file = string(element, "_", repr(stage), ".txt")
filepath = joinpath(@__DIR__, "..", "data", "NIST", file)
if isfile(filepath)
data = readdlm(filepath)
# Index entries that have statistical weights
index = typeof.(data[:, 4]) .== Int
index .*= data[:, 3] .!= "---" # some cases with mismatched wnum as Int
index .*= data[:, 3] .!= "" # cases with no J or g
g = convert.(Int, data[index, 4])
χ = NIST_wavenumber_to_energy.(data[index, 5])
# Find first ionisation edge
if sum(data[:, 2] .== "Limit") == 0
χ_ion = 0.0u"J"
else
wavenum_ion = data[data[:, 2] .== "Limit", 4][1]
χ_ion = NIST_wavenumber_to_energy(wavenum_ion)
end
return AtomicStage(element, stage, g, χ, χ_ion)
else
error("NIST data for $element $(repr(stage)) not found.")
end
end
"""
Parses level energy field from NIST tables. Brackets (round or square)
indicate interpolated or theoretical values. Converts from wavenumber
to energy.
"""
function NIST_wavenumber_to_energy(wavenum)
to_remove = ["[", "]", "(", ")", "?", "a", "l", "x", "y", "z",
"u", "+", "†", "&dgger;"]
if typeof(wavenum) in [String, SubString{String}]
for suffix in to_remove
wavenum = replace(wavenum, suffix => "")
end
wn = parse(Float64, wavenum)u"cm^-1"
elseif typeof(wavenum) <: Real
wn = convert(Float64, wavenum)u"cm^-1"
else
error("Invalid type $(typeof(wavenum)) for wave number")
end
return (h * c_0 * wn) |> u"J"
end
"""
Reads abundance file and converts to a dictionary with symbols for element names.
Also converts abundances to linear scale, from usual log scale (relative to hydrogen).
Source file must be YAML formatted, following the format of files under
data/solar_abundances/*yaml
"""
function read_abundances(abundance_file)
data = YAML.load_file(abundance_file)["abundances"]["data"]
result = Dict()
for (Z, el, abund, _) in data
linear_abundance = 10 ^ (abund - 12)
result[Symbol(el)] = linear_abundance
end
return result
end
"""
Returns directory with model atom files.
"""
get_atom_dir() = joinpath(@__DIR__, "..", "data", "atoms")
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 2489 | # Adapted from https://github.com/anthonyclays/RomanNumerals.jl
# Copyright (c) 2014: Anthony Clays.
# Licensed under a MIT "Expat" License, see licenses/RomanNumerals.md
struct RomanNumeral <: Integer
val::Int
str::String
RomanNumeral(int::Integer) = new(int, toroman(int))
function RomanNumeral(str::AbstractString)
num = parse(RomanNumeral, str)
new(num, toroman(num))
end
end
const RN = RomanNumeral
# Standard functions
# Conversion + promotion
Base.convert(::Type{Bool}, ::RN) = true
Base.convert(::Type{T}, x::RN) where {T<:Real} = T(x.val)
Base.promote_rule(::Type{RN}, ::Type{T}) where {T <: Integer} = T
# IO
Base.show(io::IO, num::RN) = write(io, num.str)
Base.length(num::RN) = length(num.str)
Base.hash(num::RN) = xor(hash(num.str), hash(num.val))
const VALID_ROMAN_PATTERN =
r"""
^\s* # Skip leading whitespace
(
M* # Thousands
(C{0,9}|CD|DC{0,4}|CM) # Hundreds
(X{0,9}|XL|LX{0,4}|XC) # Tens
(I{0,9}|IV|VI{0,4}|IX) # Ones
)
\s*$ # Skip trailing whitespace
"""ix # Be case-insensitive and verbose
const NUMERAL_MAP = [
(1000, "M")
(900, "CM")
(500, "D")
(400, "CD")
(100, "C")
(90, "XC")
(50, "L")
(40, "XL")
(10, "X")
(9, "IX")
(5, "V")
(4, "IV")
(1, "I")
]
import Base: parse
function parse(::Type{RomanNumeral}, str::AbstractString)
m = match(VALID_ROMAN_PATTERN, str)
m ≡ nothing && throw(Meta.ParseError(str * " is not a valid roman numeral"))
# Strip whitespace and make uppercase
str = uppercase(m.captures[1])
i = 1
val = 0
strlen = length(str)
for (num_val, numeral) in NUMERAL_MAP
numlen = length(numeral)
while i+numlen-1 <= strlen && str[i:i+numlen-1] == numeral
val += num_val
i += numlen
end
end
val
end
using Logging: @warn
function toroman(val::Integer)
val <= 0 && throw(DomainError(val, "in ancient Rome there were only strictly positive numbers"))
val > 5000 && @warn "Roman numerals do not handle large numbers well"
str = IOBuffer()
for (num_val, numeral) in NUMERAL_MAP
i = div(val, num_val)
# Never concatenate an empty string to `str`
i == 0 && continue
print(str, repeat(numeral,i))
val -= i*num_val
# Stop when ready
val == 0 && break
end
String(take!(str))
end
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 560 | include("roman_numerals.jl")
struct AtomicStage
element::String
name::String
stage::Int
stage_rn::RomanNumeral
n_levels::Int
g::Vector{Int}
χ::typeof([1.0, 2.0]u"J")
χ_ion::Unitful.Energy
function AtomicStage(element, stage, g, χ, χ_ion)
n_levels = length(χ)
stage_rn = RomanNumeral(stage)
stage = convert(Int, stage_rn) # Ensures it can be called with string or Int
name = string(element, "_", repr(stage_rn))
new(element, name, stage, stage_rn, n_levels, g, χ, χ_ion)
end
end
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | code | 3282 | using AtomicData
using Test
using Unitful
import PhysicalConstants.CODATA2018: h, k_B, c_0
@testset "Read utils" begin
MgII = get_atomic_stage("Mg", "II")
Mg2 = get_atomic_stage("Mg", 2)
Mg2rm = get_atomic_stage("Mg", AtomicData.RomanNumeral(2))
@test MgII.g == Mg2.g == Mg2rm.g
@test MgII.χ == Mg2.χ == Mg2rm.χ
# Reading all neutral stages to check for parsing errors
function read_all_neutral()
elements = ["Ag", "Al", "Ar", "As", "Au", "B", "Ba", "Be", "Bi", "Br", "C", "Ca",
"Cd", "Ce", "Cl", "Co", "Cr", "Cs", "Cu", "Dy", "Er", "Eu", "F", "Fe",
"Ga", "Gd", "Ge", "H", "He", "Hf", "Hg", "Ho", "I", "In", "Ir", "K",
"Kr", "La", "Li", "Lu", "Mg", "Mn", "Mo", "N", "Na", "Nb", "Nd", "Ne",
"Ni", "O", "Os", "P", "Pb", "Pd", "Pr", "Pt", "Rb", "Re", "Rh", "Ru",
"S", "Sb", "Sc", "Se", "Si", "Sm", "Sn", "Sr", "Ta", "Tb", "Te", "Th",
"Ti", "Tl", "Tm", "U", "V", "W", "Xe", "Y", "Yb", "Zn", "Zr"]
for e in elements
get_atomic_stage(e, 1)
end
return true
end
@test read_all_neutral()
@test_throws ArgumentError AtomicData.NIST_wavenumber_to_energy("NOT_NUMBER_1")
@test_throws ErrorException AtomicData.NIST_wavenumber_to_energy([1, 2])
@test_throws ErrorException read_NIST("H", 3)
@test_throws ErrorException get_atomic_stage("Mm", 2)
@test_throws ErrorException get_atomic_stage("Mg", "II"; source="MY_SOURCE")
end
@testset "Partition function" begin
temp = 1u"K" / ustrip(k_B)
@test partition_function([1, 1, 1], [1, 1, 1]u"J", temp) ≈ exp(-1) * 3
@test partition_function([1, 1, 1], [1, 1, 1]u"J", 0u"K") ≈ 0
@test partition_function([0, 0, 0], [1, 1, 1]u"J", temp) ≈ 0
@test partition_function([1, 1, 1], [0, 0, 0]u"J", temp) ≈ 3
SchI = AtomicStage("Sch", 1, [1, 1, 1], [0, 0, 0]u"J", 0u"J")
@test partition_function(SchI, temp) ≈ 3
AlI = get_atomic_stage("Al", "I")
temp = [5000, 10000]u"K"
itp = partition_function_interpolator(AlI, temp)
@test itp == partition_function_interpolator("Al", "I", temp)
@test itp(5000u"K") == partition_function(AlI, 5000u"K")
@test itp(10000u"K") == partition_function(AlI, 10000u"K")
@test_throws ErrorException partition_function_interpolator("Al", "I", temp;
source="MY_SOURCE")
end
@testset "Roman Numerals" begin
@test convert(Bool, AtomicData.RomanNumeral(10)) == true
@test convert(Float64, AtomicData.RomanNumeral("II")) == 2.0
@test promote(AtomicData.RomanNumeral("V"), 1) == (5, 1)
@test length(AtomicData.RomanNumeral("X")) == 1
@test hash(AtomicData.RomanNumeral("L")) == xor(hash("L"), hash(50))
end
@testset "Abundances" begin
aag2021 = get_solar_abundances(source="AAG2021")
ags2009 = get_solar_abundances(source="Asplund2009")
gs1998 = get_solar_abundances(source="GS1998")
@test aag2021[:H] == 1
@test ags2009[:H] == 1
@test gs1998[:H] == 1
@test log10(aag2021[:O]) + 12 ≈ 8.69
@test log10(ags2009[:O]) + 12 ≈ 8.69
@test log10(gs1998[:O]) + 12 ≈ 8.83
@test_throws ErrorException get_solar_abundances(source="my_source")
end
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | docs | 537 | [](https://github.com/tiagopereira/AtomicData.jl/actions)
[](https://codecov.io/gh/tiagopereira/AtomicData.jl)
# AtomicData.jl
A package to provide and manipulate atomic data in an astrophysical context. Currently includes atomic level data from the [NIST Atomic Spectra Database](https://www.nist.gov/pml/atomic-spectra-database), version 5.8 (October 2020).
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | docs | 617 | ## Model atoms
This directory contains model atoms in YAML format to be used in radiative transfer calculations. The atom format is still subject to change, and is based on quantities necessary from existing codes such as MULTI or RH.
Each model atom is divided into 6 main parts:
* element: data about element, optionally also including abundance.
* atomic_levels: properties of each atomic level.
* radiative_bound_bound: properties of spectral lines.
* radiative_bound_free: properties of bound-free radiative transitions.
* collisional: properties of collisional transitions, both excitation and ionisation.
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"BSD-3-Clause"
] | 0.1.6 | c7fbc3eecf2c0d9097ffdcd7b24f8dc712b741bb | docs | 110 | ```@meta
CurrentModule = AtomicData
```
# AtomicData
```@index
```
```@autodocs
Modules = [AtomicData]
```
| AtomicData | https://github.com/tiagopereira/AtomicData.jl.git |
|
[
"MIT"
] | 0.1.0 | 2c528935e80f0acc67cdd3eb31acad26d2e1f941 | code | 4334 | module FuzzyEmbeddingMatch
# Exports
export EmbeddedString, MatchCandidate, bestmatch, allmatches, corpus
using PromptingTools
using LinearAlgebra
using Memoize
using ProgressMeter
"""
EmbeddedString{T,E}
A string `content` with an embedding `embedding`.
"""
struct EmbeddedString{T,E}
content::T
embedding::E
end
function EmbeddedString(content::AbstractString)
EmbeddedString(content, embed(content))
end
"""
Overrides the `show` function for `EmbeddedString` to only show the
`content` of the string.
"""
function Base.show(io::IO, x::EmbeddedString)
print(io, "EmbeddedString(\"", x.content, "\")")
end
"""
embed(x::AbstractString)
Return the embedding of `x` using `aiembed` from PromptingTools.jl.
Memoized to reduce the number of API calls to `aiembed`.
"""
@memoize embed(x::AbstractString) = aiembed(x; verbose=false).content
"""
MatchCandidate{T,E}
A candidate match between two strings `content_a` and `content_b` with
embeddings `embedding_a` and `embedding_b` and a score `score`.
"""
struct MatchCandidate{T,E}
content_a::T
content_b::T
embedding_a::E
embedding_b::E
score::Float64
end
function MatchCandidate(
a::EmbeddedString,
b::EmbeddedString,
)
MatchCandidate(
a.content,
b.content,
a.embedding,
b.embedding,
cosinesimilarity(a, b)
)
end
# Show method override for `MatchCandidate`.
function Base.show(io::IO, x::MatchCandidate)
print(io, "MatchCandidate(\"", x.content_a, "\", \"", x.content_b, "\", ", x.score, ")")
end
"""
corpus(things::AbstractVector{<:AbstractString})
Return the corpus of all the strings in `things`. Used to cache the
embeddings of all the strings in `things` to reduce the number of
API calls to `aiembed`.
"""
function corpus(things::AbstractVector{<:AbstractString})
# Get the embeddings of all the strings in `things`.
embeds = getembeddings(things)
# Return the corpus of all the embeddings.
return embeds
end
"""
getembeddings(things::AbstractVector{<:AbstractString})
Return the embeddings of each string in `things`.
"""
function getembeddings(things::AbstractVector{<:AbstractString})
embeds = @showprogress map(unique(things)) do x
EmbeddedString(x)
end
# Return the embeddings in their original order.
return map(x -> embeds[findfirst(y -> y.content == x, embeds)], things)
end
"""
bestmatch(
thing::AbstractString,
candidates::AbstractVector{<:AbstractString};
threshold=0.5
)
Return the best match for `thing` in `candidates` by comparing the embedding
of `thing` to the embeddings of `candidates`. The embeddings are computed
using `aiembed` from PromptingTools.jl.
"""
function bestmatch(
thing::AbstractString,
candidates::AbstractVector{<:AbstractString};
verbose=false
)
# Get all matches
matches = allmatches(thing, candidates)
# Sort the matches by score.
sort!(matches, by=x -> x.score, rev=true)
# Return the best match.
return matches[1]
end
"""
allmatches(
thing::AbstractString,
candidates::AbstractVector{<:AbstractString};
threshold=0.5
)
Return all the matches for `thing` in `candidates` by comparing the embedding
of `thing` to the embeddings of `candidates`. The embeddings are computed
using `aiembed` from PromptingTools.jl.
"""
function allmatches(
thing::AbstractString,
candidates::AbstractVector{<:AbstractString};
)
# Get the embeddings of `thing` and `candidates`.
thing_embed = EmbeddedString(thing)
candidate_embeds = getembeddings(candidates)
# Compare all the embeds
return map(x -> MatchCandidate(thing_embed, x), candidate_embeds)
end
"""
cosinesimilarity(a::AbstractVector, b::AbstractVector)
Compare the embeddings of `a` and `b` by computing the cosine similarity.
"""
cosinesimilarity(a::EmbeddedString, b::EmbeddedString) = cosinesimilarity(a.embedding, b.embedding)
function cosinesimilarity(a::AbstractVector, b::AbstractVector)
# Compute the dot product of `a` and `b`.
dot_product = dot(a, b)
# Compute the norm of `a`.
norm_a = norm(a)
# Compute the norm of `b`.
norm_b = norm(b)
# Compute the cosine similarity.
dot_product / (norm_a * norm_b)
end
end # module FuzzyEmbeddingMatch
| FuzzyEmbeddingMatch | https://github.com/cpfiffer/FuzzyEmbeddingMatch.jl.git |
|
[
"MIT"
] | 0.1.0 | 2c528935e80f0acc67cdd3eb31acad26d2e1f941 | docs | 2506 | # FuzzyEmbeddingMatch.jl
## Overview
The `FuzzyEmbeddingMatch` module is designed to facilitate fuzzy string matching by leveraging embeddings. It primarily consists of structures and functions to embed strings, calculate similarities between these embeddings, and find the best or all matches within a set of candidates. Key components include `EmbeddedString`, `MatchCandidate`, `bestmatch`, and `allmatches`.
This module uses memoization for embedding strings to reduce API calls.
## Installation
You can install this package with
```julia
import Pkg
Pkg.add("FuzzyEmbeddingMatch")
```
or, from the REPL:
```julia
] add FuzzyEmbeddingMatch
```
## Usage
To begin, make sure that your environment variable `OPENAI_API_KEY` is set. If you do not have the environment variable set at the system level, you can add it with
```julia
ENV["OPENAI_API_KEY"] = "........" # Replace this with your key
```
### Structures
- `EmbeddedString`: Represents a string with its associated embedding.
- `MatchCandidate`: A candidate for matching, containing two strings, their embeddings, and a similarity score.
### Key Functions
- `embed`: Embeds a string using `aiembed` from `PromptingTools.jl`.
- `corpus`: Generates a corpus of embedded strings.
- `getembeddings`: Returns embeddings for a vector of strings.
- `cosinesimilarity`: Calculates cosine similarity between two embeddings.
### Matching Functions
- `allmatches`: Finds all matches for a given string in a list of candidates.
- `bestmatch`: Finds the best match for a given string in a list of candidates.
## Examples
### Using `allmatches`
```julia
# Example strings and candidates
thing = "Example string"
candidates = ["Sample text", "Example string", "Another example"]
# Finding all matches
matches = allmatches(thing, candidates)
# Output the matches
for match in matches
println(match)
end
```
Output:
```plaintext
MatchCandidate("Example string", "Sample text", 0.9022957888579418)
MatchCandidate("Example string", "Example string", 0.9999999999999998)
MatchCandidate("Example string", "Another example", 0.8847227646389876)
```
### Using `bestmatch`
```julia
# Example string and candidates
thing = "Example string"
candidates = ["Sample text", "Example string", "Another example"]
# Finding the best match
best_match = bestmatch(thing, candidates)
# Output the best match
println("Best match: ", best_match)
```
Output:
```plaintext
Best match: MatchCandidate("Example string", "Example string", 0.9999999999999998)
```
| FuzzyEmbeddingMatch | https://github.com/cpfiffer/FuzzyEmbeddingMatch.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 8542 | if VERSION==v"1.4" && Sys.isapple() && !(haskey(ENV, "DOCUMENTER_KEY"))
error("""Your Julia version is =1.4, and your operation system is MacOSX.
Currently, there is a compatibility issue for this combination.
Please downgrade your Julia version.""")
end
if haskey(ENV, "MANUAL") && ENV["MANUAL"]=="1"
error("""****** You indicated you want to build ADCME package manually.
To this end, you need to create a dependency file
$(joinpath(@__DIR__, "deps.jl"))
and populate it with appropriate binary locations.
--------------------------------------------------------------------------------------------
BINDIR =
LIBDIR =
TF_INC =
TF_ABI =
PREFIXDIR =
CC =
CXX =
CMAKE =
MAKE =
GIT =
PYTHON =
TF_LIB_FILE =
LIBCUDA =
CUDA_INC =
NINJA =
INCDIR =
__STR__ = join([BINDIR,LIBDIR,TF_INC,TF_ABI,PREFIXDIR,CC,CXX,CMAKE,MAKE,GIT,PYTHON,TF_LIB_FILE,LIBCUDA,CUDA_INC,NINJA,INCDIR], ";")
--------------------------------------------------------------------------------------------
""")
end
JULIA_ADCME_DIR = homedir()
if haskey(ENV, "JULIA_ADCME_DIR")
JULIA_ADCME_DIR = abspath(ENV["JULIA_ADCME_DIR"])
@info "Found JULIA_ADCME_DIR=$JULIA_ADCME_DIR in environment variables. ADCME will install the dependencies to JULIA_ADCME_DIR."
if !ispath(joinpath(JULIA_ADCME_DIR, ".julia"))
mkpath(joinpath(JULIA_ADCME_DIR, ".julia"))
end
end
push!(LOAD_PATH, "@stdlib")
using Pkg
using CMake
using LibGit2
ENVDIR = joinpath(JULIA_ADCME_DIR, ".julia", "adcme")
VER = haskey(Pkg.installed(),"ADCME") ? Pkg.installed()["ADCME"] : "NOT_INSTALLED"
@info """Your Julia version is $VERSION, current ADCME version is $VER, ADCME dependencies installation path: $ENVDIR"""
@info " --------------- (1/7) Install Tensorflow Dependencies --------------- "
FORCE_REINSTALL_ADCME = haskey(ENV, "FORCE_REINSTALL_ADCME") && ENV["FORCE_REINSTALL_ADCME"] in [1, "1"]
include("install_adcme.jl")
BINDIR = Sys.iswindows() ? abspath("$ENVDIR/Scripts") : abspath("$ENVDIR/bin")
GIT = "LibGit2"
PYTHON = joinpath(BINDIR, "python")
if Sys.iswindows()
PYTHON = abspath(joinpath(ENVDIR, "python.exe"))
end
@info " --------------- (2/7) Check Python Version --------------- "
!haskey(Pkg.installed(), "PyCall") && Pkg.add("PyCall")
ENV["PYTHON"]=PYTHON
Pkg.build("PyCall")
using PyCall
@info """
PyCall Python version: $(PyCall.python)
Conda Python version: $PYTHON
"""
@info " --------------- (3/7) Looking for TensorFlow Dynamic Libraries --------------- "
tf = pyimport("tensorflow")
core_path = abspath(joinpath(tf.sysconfig.get_compile_flags()[1][3:end], ".."))
lib = readdir(core_path)
if Sys.iswindows()
global TF_LIB_FILE = abspath(joinpath(core_path, "python/_pywrap_tensorflow_internal.lib"))
else
global TF_LIB_FILE = joinpath(core_path,lib[findall(occursin.("libtensorflow_framework", lib))[end]])
end
TF_INC = tf.sysconfig.get_compile_flags()[1][3:end]
TF_ABI = tf.sysconfig.get_compile_flags()[2][end:end]
if Sys.iswindows()
if !isdir(joinpath(TF_INC, "tensorflow"))
@info " --------------- (Windows) Downloading Include Files for Custom Operators --------------- "
run(`cmd /c rmdir /s /q $TF_INC`)
LibGit2.clone("https://github.com/kailaix/tensorflow-1.15-include", TF_INC)
end
end
@info " --------------- (4/7) Preparing Custom Operator Environment --------------- "
LIBDIR = abspath("$ENVDIR/lib/Libraries")
if !isdir(LIBDIR)
@info "Downloading dependencies to $LIBDIR..."
mkdir(LIBDIR)
end
if !isfile("$LIBDIR/eigen.zip")
download("https://gitlab.com/libeigen/eigen/-/archive/3.3.7/eigen-3.3.7.zip","$LIBDIR/eigen.zip")
end
if !isdir("$LIBDIR/eigen3")
UNZIP = joinpath(BINDIR, "unzip")
if Sys.iswindows()
if !isfile("$LIBDIR/unzip.exe")
download("http://stahlworks.com/dev/unzip.exe", joinpath(LIBDIR, "unzip.exe"))
end
UNZIP = joinpath(LIBDIR, "unzip.exe")
end
run(`$UNZIP -qq $LIBDIR/eigen.zip -d $LIBDIR`)
mv("$LIBDIR/eigen-3.3.7", "$LIBDIR/eigen3", force=true)
end
CONDA = ""
if Sys.iswindows()
CONDA = "$(JULIA_ADCME_DIR)/.julia/adcme/Scripts/conda.exe"
else
CONDA = "$(JULIA_ADCME_DIR)/.julia/adcme/bin/conda"
end
PIP = ""
if Sys.iswindows()
PIP = "$(JULIA_ADCME_DIR)/.julia/adcme/Scripts/pip.exe"
else
PIP = "$(JULIA_ADCME_DIR)/.julia/adcme/bin/pip"
end
@info " --------------- (5/7) Install Python Dependencies --------------- "
# install matplotlib
pkgs = read(`$PIP list`, String)
if !occursin("matplotlib", pkgs)
run(`$PIP install matplotlib`)
end
# If the system has `nvcc` but "GPU" is not specified, warn the users to build with
# ENV["GPU"] = 1
if !haskey(ENV, "GPU")
try
if Sys.iswindows()
run(`cmd /c nvcc --version`)
else
run(`which nvcc`)
end
@warn("""We detected that you have `nvcc` installed but ENV[\"GPU\"] is not set.
>>> If you want to install ADCME with GPU capabiity enabled, please set `ENV[\"GPU\"]=1`.""")
catch
end
end
LIBCUDA = ""
CUDA_INC = ""
if Sys.islinux() && haskey(ENV, "GPU") && ENV["GPU"] in ["1", 1]
@info " --------------- (6/7) Installing GPU Dependencies --------------- "
NVCC = readlines(pipeline(`which nvcc`))[1]
s = join(readlines(pipeline(`nvcc --version`)), " ")
ver = match(r"V(\d+\.\d)", s)[1]
if ver[1:2]!="10"
error("TensorFlow backend of ADCME requires CUDA 10.0. But you have CUDA $ver")
end
if ver[1:4]!="10.0"
@warn("TensorFlow is compiled using CUDA 10.0, but you have CUDA $ver. This might cause some problems.")
end
pkg_dir = "$(JULIA_ADCME_DIR)/.julia/adcme/pkgs/"
files = readdir(pkg_dir)
libpath = filter(x->startswith(x, "cudatoolkit") && isdir(joinpath(pkg_dir,x)), files)
if length(libpath)==0
@warn "cudatoolkit* not found in $pkg_dir"
elseif length(libpath)>1
@warn "more than 1 cudatoolkit found, use $(libpath[1]) by default"
end
if length(libpath)>=1
LIBCUDA = abspath(joinpath(pkg_dir, libpath[1], "lib"))
end
libcudatoolkit_path = filter(x->startswith(x, "cudnn") && isdir(joinpath(pkg_dir,x)), files)
if length(libcudatoolkit_path)==0
@warn "cudnn* not found in $pkg_dir"
elseif length(libcudatoolkit_path)>1
@warn "more than 1 cudatoolkit found, use $(libpath[1]) by default"
end
if length(libcudatoolkit_path)>=1
LIBCUDA = LIBCUDA*":"*abspath(joinpath(pkg_dir, libcudatoolkit_path[1], "lib"))
@info " --------------- CUDA include headers --------------- "
cudnn = joinpath(pkg_dir, libcudatoolkit_path[1], "include", "cudnn.h")
cp(cudnn, joinpath(TF_INC, "cudnn.h"), force=true)
end
CUDA_INC = joinpath(splitdir(splitdir(NVCC)[1])[1], "include")
else
@info " --------------- (6/7) Skipped: Installing GPU Dependencies --------------- "
end
@info """ --------------- (7/7) Write Dependency Files --------------- """
s = ""
t = []
function adding(k, v)
global s
if Sys.iswindows()
v = replace(v, "\\"=>"\\\\")
end
s *= "$k = \"$v\"\n"
push!(t, "$k")
end
adding("BINDIR", BINDIR)
if Sys.iswindows()
D = abspath(joinpath(LIBDIR, "lib"))
!isdir(D) && mkdir(D)
adding("LIBDIR", D)
else
adding("LIBDIR", abspath(joinpath(ENVDIR, "lib")))
end
adding("TF_INC", TF_INC)
adding("TF_ABI", TF_ABI)
adding("PREFIXDIR", LIBDIR)
if Sys.isapple()
adding("CC", joinpath(BINDIR, "clang"))
adding("CXX", joinpath(BINDIR, "clang++"))
elseif Sys.islinux()
adding("CC", joinpath(BINDIR, "x86_64-conda_cos6-linux-gnu-gcc"))
adding("CXX", joinpath(BINDIR, "x86_64-conda_cos6-linux-gnu-g++"))
else
adding("CC", "")
adding("CXX", "")
end
if Sys.islinux()
adding("CMAKE", joinpath(BINDIR, "cmake"))
else
adding("CMAKE", cmake)
end
if Sys.iswindows()
adding("MAKE", "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\MSBuild\\15.0\\Bin\\MSBuild.exe")
else
adding("MAKE", joinpath(BINDIR, "make"))
end
adding("GIT", GIT)
adding("PYTHON", PyCall.python)
adding("TF_LIB_FILE", TF_LIB_FILE)
adding("LIBCUDA", LIBCUDA)
adding("CUDA_INC", CUDA_INC)
if Sys.iswindows()
adding("NINJA", "")
else
adding("NINJA", joinpath(BINDIR, "ninja"))
end
adding("INCDIR", abspath(joinpath(BINDIR, "..", "include")))
t = "join(["*join(t, ",")*"], \";\")"
s *= "__STR__ = $t"
open("deps.jl", "w") do io
write(io, s)
end
@info """ --------------- Finished: $(abspath("deps.jl")) --------------- """
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2935 | ## define variables...
INSTALL_GPU = Sys.islinux() && haskey(ENV, "GPU") && ENV["GPU"] in [1, "1"]
CONDA = ""
if Sys.iswindows()
CONDA = "$(JULIA_ADCME_DIR)/.julia/adcme/Scripts/conda.exe"
else
CONDA = "$(JULIA_ADCME_DIR)/.julia/adcme/bin/conda"
end
INSTALLER = ""
if Sys.islinux()
INSTALLER = "Miniconda3-py37_4.8.3-Linux-x86_64.sh"
elseif Sys.isapple()
INSTALLER = "Miniconda3-py37_4.8.3-MacOSX-x86_64.sh"
else
INSTALLER = "Miniconda3-py37_4.8.3-Windows-x86_64.exe"
end
function check_install()
if Sys.iswindows()
else
for bin in ["unzip", "ninja"]
if !isfile(joinpath("$(JULIA_ADCME_DIR)/.julia/adcme/bin"), bin)
return false
end
end
end
return true
end
function install_conda()
PWD = pwd()
cd("$(JULIA_ADCME_DIR)/.julia/")
if !(INSTALLER in readdir("."))
@info "Downloading miniconda installer..."
download("https://repo.anaconda.com/miniconda/"*INSTALLER, INSTALLER)
end
if isdir("adcme")
if FORCE_REINSTALL_ADCME
error("""ADCME dependencies directory already exist, and you indicate FORCE_REINSTALL_ADCME=true.
Please (1) quit Julia, (2) remove the path $(joinpath(pwd(), "adcme")) and (3) rebuild ADCME.""")
else
@info "ADCME dependencies have already been installed."
return
end
end
@info "Installing miniconda..."
if Sys.iswindows()
run(`cmd /c start /wait "" $INSTALLER /InstallationType=JustMe /RegisterPython=0 /S /D=$(JULIA_ADCME_DIR)\\.julia\\adcme`)
else
run(`bash $INSTALLER -f -b -p adcme`)
end
cd(PWD)
end
function install_conda_envs()
cd(@__DIR__)
ENV_ = copy(ENV)
if Sys.iswindows()
platform = "windows"
ENV_["PATH"] = "$(JULIA_ADCME_DIR)/.julia/adcme/Scripts;$(JULIA_ADCME_DIR)/.julia/adcme/Library/bin;$(JULIA_ADCME_DIR)/.julia/adcme/" * ENV_["PATH"]
elseif Sys.islinux()
platform = INSTALL_GPU ? "linux-gpu" : "linux"
else
platform = "osx"
end
if check_install()
if (platform in ["windows", "linux", "osx"] && occursin("tensorflow", read(`$CONDA list`, String)))
return
end
if platform == "linux-gpu"
if occursin("tensorflow-gpu", read(`$CONDA list`, String))
return
elseif occursin("tensorflow", read(`$CONDA list`, String))
@error """You have already installed tensorflow-cpu. In order to install tensorflow-gpu, try the following steps:
- Quit Julia and remove the directory
$(JULIA_ADCME_DIR)/.julia/adcme
- In Julia, run the following command
```
ENV["GPU"] = 1
using Pkg; Pkg.build("ADCME")
```
"""
end
end
end
@info "Installing conda dependencies..."
run(setenv(`$CONDA env update -n base --file $platform.yml`, ENV_))
end
install_conda()
install_conda_envs()
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 715 | using ADCME
PWD = pwd()
change_directory()
http_file("https://github.com/kailaix/hypre/archive/v2.19.0.tar.gz", "v2.19.0.tar.gz")
uncompress("v2.19.0.tar.gz", "hypre-2.19.0")
change_directory("hypre-2.19.0/src/build")
ROOT = joinpath(ADCME.BINDIR, "..")
run_with_env(`$(ADCME.CMAKE) -G Ninja -DCMAKE_MAKE_PROGRAM=$(ADCME.NINJA)
-DHYPRE_SHARED:BOOL=ON -DHYPRE_INSTALL_PREFIX:PATH=$ROOT
-DCMAKE_C_COMPILER:FILEPATH=$(ADCME.CC) -DCMAKE_CXX_COMPILER:FILEPATH=$(ADCME.CXX) ..`)
run_with_env(`$(ADCME.CMAKE) -G Ninja -DCMAKE_MAKE_PROGRAM=$(ADCME.NINJA) -L ..`)
ADCME.make()
run_with_env(`$(ADCME.NINJA) install`)
# run_with_env(`mv $(ROOT)/lib64/libHYPRE.so $(ADCME.LIBDIR)/libHYPRE.so`)
cd(PWD) | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 749 | using ADCME
CONDA_ROOT = abspath(joinpath(ADCME.LIBDIR, ".."))
change_directory(ADCME.PREFIXDIR)
http_file("https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.4.tar.gz", "openmpi.tar.gz")
uncompress("openmpi.tar.gz", "openmpi-4.0.4")
change_directory(joinpath("openmpi-4.0.4", "build"))
require_file("Makefile") do
run_with_env(`../configure CC=$(ADCME.CC) CXX=$(ADCME.CXX) --enable-mpi-thread-multiple --prefix=$(CONDA_ROOT)
--enable-mpirun-prefix-by-default --enable-mpi-fortran=no --with-mpi-param-check=always`, Dict("LDFLAGS"=>"-L"*ADCME.LIBDIR))
end
require_library("mpi") do
run_with_env(`make -j all`)
end
require_library(joinpath(ADCME.LIBDIR, "libmpi")) do
run_with_env(`make install`)
end
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1719 | using Revise
using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
# TODO: specify your input parameters
x = sort(rand(10))
y = @. x^2 + 1.0
z = [x[1]; x[2]; rand(5) * (x[end]-x[1]) .+ x[1]; x[end]]
u = interp1(x,y,z)
sess = Session(); init(sess)
@show run(sess, u)-[1.026422850882909
1.044414684090653
1.312604319732756
1.810845361128137
1.280789421523103
1.600084940795178
1.930560200260898
1.972130181835701]
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
return sum(interp1(x,m,z)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(10))
v_ = rand(10)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 387 | # mpiexec.exe -n 4 julia .\mpisum.jl
using ADCME
mpi_init()
r = mpi_rank()
a = constant(ones(10) * r)
b = mpi_bcast(a, 3)
L = sum(b^2)
L = mpi_sum(L)
g = gradients(L, a)
sess = Session(); init(sess)
v, G = run(sess, [b, G])
@info r, v
# gradient test
# expected [ Info: (3, [24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0])
G = run(sess, g)
@info r, G
mpi_finalize() | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 564 | # mpiexec.exe -n 4 julia .\mpisum.jl
using ADCME
mpi_init()
r = mpi_rank()
a = constant(ones(10) * r)
a = mpi_sendrecv(a, 0, 2)
# Equivalently, we can use
# if r==2
# global a
# a = mpi_send(a, 0)
# end
# if r==0
# global a
# a = mpi_recv(a,2)
# end
L = sum(a^2)
g = gradients(L, a)
sess = Session(); init(sess)
v, G = run(sess, [a,g])
# processor 0 should have the same `v` as processor 2
@info r, v
G = run(sess, g)
# gradients on processor 0 should be the same as processor 2
# because `a` is received from 2
@info r, G
mpi_finalize() | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 377 | # mpiexec.exe -n 4 julia .\mpisum.jl
using ADCME
mpi_init()
r = mpi_rank()
a = constant(Float64.(Array(1:10) * r))
b = mpi_sum(a)
L = sum(b)
g = gradients(L, a)
sess = Session(); init(sess)
v, G = run(sess, [b,g])
@info r, G
if r==0
ref = zeros(10)
for k = 0:mpi_size()-1
global ref
ref += Array(1:10)*k
end
@info v, ref
end
mpi_finalize() | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1554 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
sk = load_op_and_grad("./build/libSinkhornKnopp","sinkhorn_knopp", multiple=true)
# TODO: specify your input parameters
a = constant(ones(10)/10)
b = constant(ones(20)/20)
m = constant(rand(10,20))
reg = constant(1.0)
iter = constant(10000, dtype=Int64)
tol = constant(1e-10)
method = constant(1);
u = sk(a,b,m,reg,iter,tol,method)
sess = tf.Session()
init(sess)
M, _ = run(sess, u)
@show sum(M, dims=1), sum(M, dims=2)
# error()
# TODO: change your test parameter to `m`
# gradient check -- v
function scalar_function(m)
return sk(a,b,m,reg,iter,tol,method)[2]
end
# TODO: change `m_` and `v_` to appropriate values
G = rand(10,20)
m_ = constant(G)
v_ = rand(10,20)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1570 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
function print_tensor(in)
print_tensor_ = load_op_and_grad("./build/libPrintTensor","print_tensor")
in = convert_to_tensor(Any[in], [Float64]); in = in[1]
print_tensor_(in)
end
in = rand(5,5)
# TODO: specify your input parameters
u = print_tensor(in)
sess = Session(); init(sess)
@show run(sess, u) - in
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
return sum(print_tensor(x)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(5,5))
v_ = rand(5,5)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1482 | using ADCME
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
kd = 3
xc = rand(10)
yc = rand(10)
e = rand(10)
c = rand(10)
d = rand(3)
r = RBF2D(xc, yc; c=c, eps=e, d=d, kind = kd)
x = rand(5)
y = rand(5)
o = r(x, y)
sess = Session(); init(sess)
@show run(sess, o)
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
r = RBF2D(xc, m; c=c, eps=e, d=d, kind = kd)
return sum(r(x,y)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(10))
v_ = rand(10)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 445 | using Revise
using ADCME
using LinearAlgebra
xc = rand(10)
yc = rand(10)
e = rand(10)
c = rand(10)
d = rand(3)
r = RBF2D(xc, yc; c=c, eps=e, d=d, kind = 0)
x = rand(5)
y = rand(5)
o = r(x, y)
O = zeros(5)
for i = 1:5
for j = 1:10
d = sqrt((x[i]-xc[j])^2 + (y[i]-yc[j])^2)
O[i] += c[j] * exp(-(e[j]*d)^2)
end
O[i] += d[1] + d[2] * x[i] + d[3] * y[i]
end
sess = Session(); init(sess)
@test norm(run(sess, o)-O)<1e-5 | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1698 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
sess = Session(); init(sess)
# TODO: specify your input parameters
nc = 100
n = 10
xc = rand(nc)
yc = rand(nc)
zc = rand(nc)
x = rand(n)
y = rand(n)
z = rand(n)
c = rand(nc)
d = rand(4)
e = rand(nc)
# problem with eps, xc
for kind in [0,1,2,3]
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
rbf = RBF3D(xc, m, zc, c = c, d = d, eps = e, kind = kind)
return sum(rbf(x,y,z)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(size(xc)...))
v_ = rand(size(m_)...)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("test$kind.png")
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1697 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using Test
using Statistics
Random.seed!(233)
sess = Session(); init(sess)
n = 10
window = 9
# TODO: specify your input parameters
inu = rand(n)
for ops in [(rollsum, sum), (rollmean, mean), (rollstd, std), (rollvar, var)]
@info ops
u = ops[1](inu,window)
out = zeros(n-window+1)
for i = window:n
out[i-window+1] = ops[2](inu[i-window+1:i])
end
@test maximum(abs.(run(sess, u)-out )) < 1e-8
end
# uncomment it for testing gradients
# error()
n = 10
window = 9
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
return sum(rollstd(x,window)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(n))
v_ = rand(n)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("test.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1699 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
function solve_batched_rhs(a,rhs)
solve_batched_rhs_ = load_op_and_grad("./build/libSolveBatchedRhs","solve_batched_rhs")
a,rhs = convert_to_tensor([a,rhs], [Float64,Float64])
solve_batched_rhs_(a,rhs)
end
a = rand(10,5)
rhs = rand(100, 10)
sol = (a\rhs')'
# TODO: specify your input parameters
u = solve_batched_rhs(a,rhs)
sess = Session(); init(sess)
@show run(sess, u) - sol
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
return sum(solve_batched_rhs(m,rhs)^2)
# return sum(solve_batched_rhs(a,m)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(10,10))
v_ = rand(10,10)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 4564 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using Test
# Random.seed!(233)
import ADCME: SparseAssembler
sparse_accumulator = load_op_and_grad("./build/libSparseAccumulator","sparse_accumulator", multiple=false)
sparse_accumulator_add = load_op_and_grad("./build/libSparseAccumulator","sparse_accumulator_add", multiple=false)
sparse_accumulator_copy = load_op_and_grad("./build/libSparseAccumulator","sparse_accumulator_copy", multiple=true)
"""
SparseAssembler(handle::Union{PyObject, <:Integer}, n::Union{PyObject, <:Integer}, tol::Union{PyObject, <:Real}=0.0)
Creates a SparseAssembler for accumulating `row`, `col`, `val` for sparse matrices.
- `handle`: an integer handle for creating a sparse matrix. If the handle already exists, `SparseAssembler` return the existing sparse matrix handle. If you are creating different sparse matrices, the handles should be different.
- `n`: Number of rows of the sparse matrix.
- `tol` (optional): Tolerance. `SparseAssembler` will treats any values less than `tol` as zero.
# Example
```julia
handle = SparseAssembler(100, 5, 1e-8)
op1 = accumulate(handle, 1, [1;2;3], [1.0;2.0;3.0])
op2 = accumulate(handle, 2, [1;2;3], [1.0;2.0;3.0])
J = assemble(5, 5, [op1;op2])
```
`J` will be a [`SparseTensor`](@ref) object.
"""
function SparseAssembler(handle::Union{PyObject, <:Integer}, n::Union{PyObject, <:Integer}, tol::Union{PyObject, <:Real}=0.0)
n = convert_to_tensor(n, dtype=Int32)
tol = convert_to_tensor(tol, dtype=Float64)
handle = convert_to_tensor(handle, dtype=Int32)
sparse_accumulator(tol, n, handle)
end
"""
accumulate(handle::PyObject, row::Union{PyObject, <:Integer}, cols::Union{PyObject, Array{<:Integer}}, vals::Union{PyObject, Array{<:Real}})
Accumulates `row`-th row. It adds the value to the sparse matrix
```julia
for k = 1:length(cols)
A[row, cols[k]] += vals[k]
end
```
`handle` is the handle created by [`SparseAssembler`](@ref).
See [`SparseAssembler`](@ref) for an example.
!!! Note
`accumulate` returns a `op::PyObject`. Only when `op` is executed, the nonzero values are populated into the sparse matrix.
"""
function accumulate(handle::PyObject, row::Union{PyObject, <:Integer}, cols::Union{PyObject, Array{<:Integer}},
vals::Union{PyObject, Array{<:Real}})
row = convert_to_tensor(row, dtype=Int32)
cols = convert_to_tensor(cols, dtype=Int32)
vals = convert_to_tensor(vals, dtype=Float64)
return sparse_accumulator_add(acc, row, cols, vals)
end
"""
assemble(m::Union{PyObject, <:Integer}, n::Union{PyObject, <:Integer}, ops::PyObject)
Assembles the sparse matrix from the `ops` created by [`accumulate`](@ref). `ops` is either a single output from `accumulate`, or concated from several `ops`
```julia
op1 = accumulate(handle, 1, [1;2;3], [1.0;2.0;3.0])
op2 = accumulate(handle, 2, [1;2;3], [1.0;2.0;3.0])
op = [op1;op2] # equivalent to `vcat([op1, op2]...)`
```
`m` and `n` are rows and columns of the sparse matrix.
See [`SparseAssembler`](@ref) for an example.
"""
function assemble(m::Union{PyObject, <:Integer}, n::Union{PyObject, <:Integer}, ops::PyObject)
if length(size(ops))==0
ops = reshape(ops, 1)
end
ii, jj, vv = sparse_accumulator_copy(ops)
return SparseTensor(ii, jj, vv, m, n)
end
handle = SparseAssembler(100, 5, 1e-8)
op1 = accumulate(handle, 1, [1;2;3], [1.0;2.0;3.0])
op2 = accumulate(handle, 2, [1;2;3], [1.0;2.0;3.0])
J = assemble(5, 5, [op1;op2])
# J = assemble(acc, 5, 5)
sess = Session(); init(sess)
run(sess, J)
m = 20
n = 100
handle = SparseAssembler(100, m, 0.0)
op = PyObject[]
A = zeros(m, n)
for i = 1:1
ncol = rand(1:n, 10)
row = rand(1:m)
v = rand(10)
for (k,val) in enumerate(v)
@show k
A[row, ncol[k]] += val
end
@show v
push!(op, accumulate(handle, row, ncol, v))
end
op = vcat(op...)
J = assemble(m, n, op)
B = run(sess, J)
@test norm(A-B)<1e-8
handle = SparseAssembler(100, 5, 1.0)
op1 = accumulate(handle, 1, [1;2;3], [2.0;0.5;0.5])
J = assemble(5, 5, op1)
B = run(sess, J)
@test norm(B-[2.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0])<1e-8
handle = SparseAssembler(100, 5, 0.0)
op1 = accumulate(handle, 1, [1;1], [1.0;1.0])
op2 = accumulate(handle, 1, [1;2], [1.0;1.0])
J = assemble(5, 5, [op1;op2])
B = run(sess, J)
@test norm(B-[3.0 1.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0])<1e-8
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1514 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using SparseArrays
Random.seed!(233)
A0 = sprand(10,10,0.3)
II, JJ, VV = findnz(A0)
A = SparseTensor(II, JJ, VV, 10, 10)
u = compress(A)
sess = Session(); init(sess)
@show run(sess, u)-run(sess, A)
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
A = SparseTensor(II, JJ, m, 10, 10)
return sum(compress(A).o.values^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(length(VV)))
v_ = rand(length(VV))
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("gradtest.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2467 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using SparseArrays
using Random
Random.seed!(233)
function sparse_concate(A1, A2,hcat_::Bool)
m1,n1 = size(A1)
m2,n2 = size(A2)
if !isa(A1, SparseTensor); A1 = SparseTensor(A1); end
if !isa(A2, SparseTensor); A2 = SparseTensor(A2); end
ii1,jj1,vv1 = find(A1)
ii2,jj2,vv2 = find(A2)
sparse_concate_ = load_op_and_grad("./build/libSparseConcate","sparse_concate", multiple=true)
ii1,jj1,vv1,m1_,n1_,ii2,jj2,vv2,m2_,n2_ = convert_to_tensor([ii1,jj1,vv1,m1,n1,ii2,jj2,vv2,m2,n2], [Int64,Int64,Float64,Int32,Int32,Int64,Int64,Float64,Int32,Int32])
ii,jj,vv = sparse_concate_(ii1,jj1,vv1,m1_,n1_,ii2,jj2,vv2,m2_,n2_,constant(hcat_))
if hcat_
SparseTensor(ii,jj,vv, m1, n1+n2)
else
SparseTensor(ii,jj,vv,m1+m2,n1)
end
end
# TODO: specify your input parameters
A1 = sprand(10,10,0.4)
A2 = sprand(10,10,0.3)
u = sparse_concate(A1, A2, true)
sess = Session(); init(sess)
@show run(sess, u)-[A1 A2]
u = sparse_concate(A1, A2, false)
sess = Session(); init(sess)
@show run(sess, u)-[A1;A2]
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
A1 = SparseTensor(ii1,jj1,m,10,10)
A2 = SparseTensor(ii2,jj2,m,10,10)
return sum(sparse_concate(A1, A2, false))^2
end
# TODO: change `m_` and `v_` to appropriate values
ii1 = rand(1:10,10)
jj1 = rand(1:10,10)
ii2 = rand(1:10,10)
jj2 = rand(1:10,10)
vv1 = rand(10)
vv2 = rand(10)
m_ = constant(rand(10))
v_ = rand(10)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 3304 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using SparseArrays
# using ADCMEKit
Random.seed!(233)
function sparse_factorization(A, s=99)
ii, jj, vv = find(constant(A))
d = size(A, 1)
sparse_factorization_ = load_op_and_grad("./build/libfactorization","sparse_factorization")
ii,jj,vv,d,s = convert_to_tensor([ii,jj,vv,d,s], [Int64,Int64,Float64,Int64,Int64])
stop_gradient(sparse_factorization_(ii,jj,vv,d,s))
end
function sparse_solve(A,rhs,o)
ii, jj, vv = find(constant(A))
solve_ = load_op_and_grad("./build/libSolve","solve")
rhs,ii, jj, vv,o = convert_to_tensor([rhs,ii, jj, vv,o], [Float64,Int64, Int64, Float64,Int64])
solve_(rhs,ii, jj, vv,o)
end
function sparse_factorization_solve(A, rhs)
o = sparse_factorization(A)
sparse_solve(A, rhs, o)
end
# TODO: specify your input parameters
A = sprand(10,10,0.3)
rhs1 = rand(10)
rhs2 = rand(10)
u = sparse_factorization(A)
out1 = sparse_solve(A, rhs1, u)
out2 = sparse_solve(A, rhs2, u)
sess = Session(); init(sess)
u, o1, o2 = run(sess, [u, out1, out2])
@info u
@show o1 - A\rhs1
@show o2 - A\rhs2
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
ii, jj, vv = find(constant(A))
rhs = rand(10)
function scalar_function(m)
A = SparseTensor(ii, jj, m, 10, 10)
return sum(sparse_factorization_solve(A, rhs)^2)
end
# TODO: change `m_` and `v_` to appropriate values
k = length(vv)
m_ = constant(rand(k))
v_ = rand(k)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
A = sprand(10, 10, 0.8)
ii, jj, vv = find(constant(A))
k = length(vv)
function while_loop_simulation(vv, rhs, ns = 10)
A = SparseTensor(ii, jj, vv, 10, 10) + spdiag(10)*100.
o = sparse_factorization(A)
ta = TensorArray(ns)
i = constant(2, dtype=Int32)
ta = write(ta, 1, ones(10))
function condition(i, ta)
i<= ns
end
function body(i, ta)
u = read(ta, i-1)
res = sparse_solve(A, u + rhs, o)
# res = u
ta = write(ta, i, res)
i+1, ta
end
_, out = while_loop(condition, body, [i, ta])
sum(stack(out)^2)
end
vv_ = run(sess, vv)
pl = placeholder(vv_)
# test for vv
pl = placeholder(rand(k))
res = while_loop_simulation(pl, rhs , 100)
gradview(sess, pl, res, rand(k))
# test for right hand side
pl = placeholder(rand(10))
res = while_loop_simulation(vv_, pl , 100)
gradview(sess, pl, res, rand(10))
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2166 | using Revise
using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using Test
using SparseArrays
Random.seed!(233)
sparse_indexing = load_op_and_grad("build/libSparseIndexing", "sparse_indexing", multiple=true)
# function Base.:getindex(s::SparseTensor, i1::Union{PyObject,Array{S,1}},
# i2::Union{PyObject,Array{T,1}}) where {S<:Real,T<:Real}
# m_, n_ = length(i1), length(i2)
# i1 = convert_to_tensor(i1, dtype=Int64)
# i2 = convert_to_tensor(i2, dtype=Int64)
# ii1, jj1, vv1 = find(s)
# m = tf.convert_to_tensor(s.o.shape[1],dtype=tf.int64)
# n = tf.convert_to_tensor(s.o.shape[2],dtype=tf.int64)
# ii2, jj2, vv2 = sparse_indexing(ii1,jj1,vv1,m,n,i1,i2)
# SparseTensor(ii2, jj2, vv2, m_, n_)
# end
################## End Load Operator ##################
i1 = unique(rand(1:20,3))
j1 = unique(rand(1:30,3))
A = sprand(20,30,0.3)
@show i1, j1
Ad = Array(A[i1, j1])
B = SparseTensor(A)
Bd = Array(B[i1, j1])
sess = Session()
init(sess)
Bd_ = run(sess, Bd)
@test Ad≈Bd_
# error()
# TODO: change your test parameter to `m`
# gradient check -- v
function scalar_function(m_)
A = SparseTensor(ii1, jj1, m_, 20, 30)
B = A[i1, j1]
return sum(B)^2
end
# TODO: change `m_` and `v_` to appropriate values
m = 20
n = 30
ii1,jj1,vv1 = find(B)
m_ = constant(rand(length(B.o.values)))
v_ = rand(length(B.o.values))
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1474 | using Revise
using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using SparseArrays
using Random
# Random.seed!(233)
# TODO: specify your input parameters
A = sprand(10,5,0.3)
f = rand(10)
sol = A\f
u = constant(A)\f
sess = Session()
init(sess)
@show run(sess, u)-sol
# error()
# TODO: change your test parameter to `m`
# gradient check -- v
function scalar_function(m)
return sum((constant(A)\m)^2)
B = SparseTensor(ii, jj, m, size(A)...)
return sum((B\Array([f f]'))^2)
end
ii, jj, vv = find(constant(A))
# TODO: change `m_` and `v_` to appropriate values
# m_ = constant(rand(length(vv)))
# v_ = rand(length(vv))
m_ = constant(rand(5,10))
v_ = rand(5,10)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1529 | using ADCME
using PyCall
using LinearAlgebra
using SparseArrays
using PyPlot
using Random
using Test
Random.seed!(233)
sparse_mat_mul = load_op("build/libSparseMatMul", "sparse_sparse_mat_mul")
diag_sparse_mat_mul = load_op("build/libSparseMatMul", "diag_sparse_mat_mul")
sparse_diag_mat_mul = load_op("build/libSparseMatMul", "sparse_diag_mat_mul")
function Base.:*(s1::SparseTensor, s2::SparseTensor)
ii1, jj1, vv1 = find(s1)
ii2, jj2, vv2 = find(s2)
m, n = size(s1)
n_, k = size(s2)
if n!=n_
error("IGACS: matrix size mismatch: ($m, $n) vs ($n_, $k)")
end
mat_mul_fn = sparse_mat_mul
if s1._diag
mat_mul_fn = diag_sparse_mat_mul
elseif s2._diag
mat_mul_fn = sparse_diag_mat_mul
end
ii3, jj3, vv3 = mat_mul_fn(ii1-1,jj1-1,vv1,ii2-1,jj2-1,vv2,m,n,k)
SparseTensor(ii3, jj3, vv3, m, k)
end
# sparse_mat_mul = py"sparse_mat_mul"
################## End Load Operator ##################
A = sprand(10,5,0.3)
B = sprand(5,20,0.3)
C = A*B
CC = SparseTensor(A)*SparseTensor(B)
# TODO: specify your input parameters
sess = Session()
init(sess)
C_ = run(sess, CC)
@test C_≈C
A = spdiagm(0=>[1.;2.;3;4;5])
B = sprand(5,20,0.3)
C = A*B
CC = SparseTensor(A)*SparseTensor(B)
# TODO: specify your input parameters
sess = Session()
init(sess)
C_ = run(sess, CC)
@test C_≈C
A = sprand(10,5,0.5)
B = spdiagm(0=>[1.;2.;3;4;5])
C = A*B
CC = SparseTensor(A)*SparseTensor(B)
# TODO: specify your input parameters
sess = Session()
init(sess)
C_ = run(sess, CC)
@test C_≈C
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2596 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using SparseArrays
using Random
Random.seed!(233)
function scatter_update(A::Union{SparseTensor, SparseMatrixCSC{Float64,Int64}},
ii,jj,B::Union{SparseTensor, SparseMatrixCSC{Float64,Int64}})
!isa(A, SparseTensor) && (A=SparseTensor(A))
!isa(B, SparseTensor) && (B=SparseTensor(B))
ii1, jj1, vv1 = find(A)
m1_, n1_ = size(A)
ii2, jj2, vv2 = find(B)
sparse_scatter_update_ = load_op_and_grad("./build/libSparseScatterUpdate","sparse_scatter_update", multiple=true)
ii1,jj1,vv1,m1,n1,ii2,jj2,vv2,ii,jj = convert_to_tensor([ii1,jj1,vv1,m1_,n1_,ii2,jj2,vv2,ii,jj], [Int64,Int64,Float64,Int64,Int64,Int64,Int64,Float64,Int64,Int64])
ii, jj, vv = sparse_scatter_update_(ii1,jj1,vv1,m1,n1,ii2,jj2,vv2,ii,jj)
SparseTensor(ii, jj, vv, m1_, n1_)
end
# TODO: specify your input parameters
A = sprand(10,10,0.3)
B = sprand(3,3,0.6)
ii = [1;4;5]
jj = [2;4;6]
u = scatter_update(A, ii, jj, B)
C = copy(A)
C[ii,jj] = B
sess = Session(); init(sess)
@show run(sess, u)-C
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(A, B, m)
ii1, jj1, vv1 = find(A)
ii2, jj2, vv2 = find(B)
if length(m)==length(vv1)
A = SparseTensor(ii1, jj1, m, size(A)...)
else
B = SparseTensor(ii2, jj2, m, size(B)...)
end
C = scatter_update(A, ii, jj, B)
return sum(C)^2
end
# TODO: change `m_` and `v_` to appropriate values
A = sprand(10,10,0.3)|>SparseTensor
B = sprand(3,3,0.6)|>SparseTensor
ii = [1;4;5]
jj = [2;4;6]
m_ = constant(rand(length(values(A))))
v_ = rand(length(values(A)))
y_ = scalar_function(A, B, m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(A, B, ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2033 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using SparseArrays
Random.seed!(233)
function sparse_solver(ii,jj,vv,f,method="SparseLU")
sparse_solver_ = load_op_and_grad("./build/libSparseSolver","sparse_solver")
ii,jj,vv,f = convert_to_tensor([ii,jj,vv,f], [Int64,Int64,Float64,Float64])
sparse_solver_(ii,jj,vv,f,method)
end
# TODO: specify your input parameters
sess = Session(); init(sess)
for method in ["SparseLU", "SparseQR", "SimplicialLDLT", "SimplicialLLT"]
A = sprand(10,10,0.6)
global A = A'*A
global f = rand(10)
A_ = SparseTensor(A)
global ii, jj, vv = find(A_)
u = sparse_solver(ii,jj,vv,f,method)
@show norm(run(sess, u)-A\f)
end
# uncomment it for testing gradients
# error()
# py"""
# import traceback
# try:
# $run($sess, $dy_)
# except Exception:
# print(traceback.format_exc())
# """
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
return sum(sparse_solver(ii,jj,vv,m)^2)
end
# TODO: change `m_` and `v_` to appropriate values
# m_ = constant(rand(length(vv)))
# v_ = rand(length(vv))
m_ = constant(f)
v_ = rand(length(f))
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1860 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using SparseArrays
Random.seed!(233)
function sparse_to_dense(A)
A = constant(A)
ij = A.o.indices
vv = values(A)
m, n = size(A)
sparse_to_dense_ = load_op_and_grad("./build/libSparseToDense","sparse_to_dense_ad")
m_, n_ = convert_to_tensor(Any[m,n], [Int64,Int64])
out = sparse_to_dense_(ij, vv, m_,n_)
set_shape(out, (m, n))
end
A = sprand(10,10,0.3)
# TODO: specify your input parameters
u = sparse_to_dense(A)
sess = Session(); init(sess)
@show run(sess, u) - Array(A)
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
B = SparseTensor(ij[:,1]+1, ij[:,2]+1, x, m, n)
return sum(sparse_to_dense(B)^2)
end
# TODO: change `m_` and `v_` to appropriate values
A = constant(A)
m, n = size(A)
K = length(values(A))
ij = A.o.indices
m_ = constant(rand(K))
v_ = rand(K)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1734 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
function tri_lu(u,num,lu)
tri_lu_ = load_op_and_grad("./build/libTriLu","tri_lu")
u,num,lu = convert_to_tensor([u,num,lu], [Float64,Int64,Int64])
tri_lu_(u,num,lu)
end
lu = 1
num = 15
m = 5
n = 10
u = rand(1,m, n)
ref = zeros(size(u,1), m, n)
for i = 1:size(u,1)
if lu==0
ref[i,:,:] = triu(u[i,:,:], num)
else
ref[i,:,:] = tril(u[i,:,:], num)
end
end
out = tri_lu(u,num,lu)
sess = Session(); init(sess)
@show run(sess, out)- ref
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
return sum(tri_lu(m,num,lu)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(10, m, n))
v_ = rand(10, m, n)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 4864 | using ADCME
using PyPlot
using ForwardDiff
using NiLang, NiLang.AD
# ADCME
function myloss_adcme(b; n=101)
h = 1/(n-1)
x = LinRange(0,1,n)[2:end-1]
f = @. (4*(2 + x - x^2))
u = trisolve(-b/h^2 * ones(n-3), 2b/h^2+1 * ones(n-2), -b/h^2 * ones(n-3), f)
ue = u[div(n+1,2)] # extract values at x=0.5
return (ue-1.0)^2
end
function benchmark_adcme(n)
b = Variable(1.0)
loss = myloss_adcme(b; n = n)
g = gradients(loss, b)
sess = Session(); init(sess)
run(sess, g)
ts_fwd = 0.0
ts_bwd = 0.0
for i = 1:11
init(sess)
d1 = @timed run(sess, loss)
(i>1) && (ts_fwd += d1[2])
init(sess)
d2 = @timed run(sess, g)
(i>1) && (ts_bwd += d2[2])
@info "adcme", n, d1[2], d2[2]
end
ts_fwd /= 10
ts_bwd /= 10
return ts_fwd, ts_bwd
end
# ForwardDiff
function trisolve!(a::T, b::T, c::T, D::AbstractVector, X::AbstractVector) where T
N = length(X)
D = copy(D)
B = zeros(T, N)
@inbounds B[1] = b
@inbounds for i = 2:N
w = a / B[i-1]
B[i] = b - w * c
D[i] = D[i] - w * D[i - 1]
end
@inbounds X[N] = D[N] / B[N]
@inbounds for i = N-1:-1:1
X[i] = (D[i] - c * X[i + 1]) / B[i]
end
return X
end
function myloss(b::T; n=101) where T
h = 1/(n-1)
x = LinRange(0,1,n)[2:end-1]
f = @. T(4*(2 + x - x^2))
u = trisolve!(-b/h^2, 2b/h^2+1, -b/h^2, f, zeros(T, n-2))
ue = u[div(n+1,2)] # extract values at x=0.5
return (ue-1.0)^2
end
function benchmark_forwarddiff(n)
ts_fwd = 0.0
ts_bwd = 0.0
for i = 1:11
d1 = @timed myloss(10.0, n = n)
(i>1) && (ts_fwd += d1[2])
d2 = @timed myloss(ForwardDiff.Dual(10.0, 1.0), n = n)
(i>1) && (ts_bwd += d2[2])
@info "forwarddiff", n, d1[2], d2[2]
end
ts_fwd /= 10
ts_bwd /= 10
ts_fwd, ts_bwd
end
# NiLang
@i function i_trisolve!(a::T, b::T, c::T, D!::AbstractVector, X!::AbstractVector, B!::AbstractVector) where T
@invcheckoff @inbounds begin
B![1] += b
for i = 2:length(X!)
@routine begin
w ← zero(T)
w += a / B![i-1]
end
B![i] += b
B![i] -= w * c
D![i] -= w * D![i - 1]
~@routine
end
X![end] += D![end] / B![end]
for i = length(X!)-1:-1:1
@routine begin
anc ← zero(T)
anc += D![i]
anc -= c * X![i + 1]
end
X![i] += anc / B![i]
~@routine
end
end
end
@i function i_myloss!(loss::T, f!::AbstractVector{T}, u!::AbstractVector{T},
b_cache!::AbstractVector{T}, b::T) where T
@invcheckoff @inbounds begin
n ← length(f!) + 2
h ← zero(T)
h += 1 / (n-1)
for i=1:length(f!)
@routine begin
@zeros T xi anc
xi += i * h
anc += 2 + xi
anc -= xi ^ 2
end
f![i] += 4 * anc
~@routine
end
@routine begin
@zeros T h2 factor_a factor_b factor_c
h2 += h^2
factor_a -= b / h2
factor_c += factor_a
factor_b -= 2 * factor_a
factor_b += 1
end
i_trisolve!(factor_a, factor_b, factor_c, f!, u!, b_cache!)
~@routine
@routine begin
ue ← zero(T)
ue += u![div(n+1,2)] # extract values at x=0.5
ue -= 1
end
loss += ue^2
~@routine
h -= 1 / (n-1)
end
end
function benchmark_nilang(n)
ts_fwd = 0.0
ts_bwd = 0.0
for i = 1:11
d1 = @timed i_myloss!(0.0, zeros(n-2), zeros(n-2), zeros(n-2), 10.0)
(i>1) && (ts_fwd += d1[2])
d2 = @timed Grad(i_myloss!)(Val(1), 0.0, zeros(n-2), zeros(n-2), zeros(n-2), 10.0)
(i>1) && (ts_bwd += d2[2])
@info "nilang", n, d1[2], d2[2]
end
ts_fwd /= 10
ts_bwd /= 10
ts_fwd, ts_bwd
end
tsf, tsb = zeros(3,6), zeros(3,6)
for (k,n) in enumerate([101, 1001,10001, 100001, 1000001, 10000001])
tsf[1,k], tsb[1,k] = benchmark_adcme(n)
tsf[2,k], tsb[2,k] = benchmark_forwarddiff(n)
tsf[3,k], tsb[3,k] = benchmark_nilang(n)
end
figure(figsize=(10,4))
subplot(121)
plot([101, 1001,10001, 100001, 1000001, 10000001], tsf[1,:], "o-", label="ADCME")
plot([101, 1001,10001, 100001, 1000001, 10000001], tsf[2,:], "o-", label="Julia")
plot([101, 1001,10001, 100001, 1000001, 10000001], tsf[3,:], "o-", label="NiLang")
title("Forward Computation")
legend()
xlabel("\$n\$")
ylabel("Time (seconds)")
grid("on")
subplot(122)
plot([101, 1001,10001, 100001, 1000001, 10000001], tsb[1,:], "o-", label="ADCME")
plot([101, 1001,10001, 100001, 1000001, 10000001], tsb[2,:], "o-", label="ForwardDiff")
plot([101, 1001,10001, 100001, 1000001, 10000001], tsb[3,:], "o-", label="NiLang")
legend()
title("Gradient Back-Propagation")
xlabel("\$n\$")
ylabel("Time (seconds)")
grid("on")
savefig("benchmark.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1686 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using Test
Random.seed!(233)
function tri_solve(a,b,c,d)
tri_solve_ = load_op_and_grad("./build/libTriSolve","tri_solve")
a,b,c,d = convert_to_tensor(Any[a,b,c,d], [Float64,Float64,Float64,Float64])
tri_solve_(a,b,c,d)
end
n = 10
a = rand(n-1)
b = rand(n).+10
c = rand(n-1)
d = rand(n)
A = diagm(0=>b, -1=>a, 1=>c)
x = A\d
# TODO: specify your input parameters
u = tri_solve(a,b,c,d)
sess = Session(); init(sess)
@test run(sess, u)≈x
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
return sum(tri_solve(a,b,x,d)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(n-1))
v_ = rand(n-1)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2356 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
relu(x) = max(x, 0)
function extended_nn(x,config,theta,activation)
extended_nn_ = load_op_and_grad("./build/libExtendedNn","extended_nn", multiple=true)
x,config_,theta = convert_to_tensor([x,config,theta], [Float64,Int64,Float64])
u, du = extended_nn_(x,config_,theta,activation)
n = length(x)÷config[1]
reshape(u, (n, config[end])), du
end
# TODO: specify your input parameters
x = rand(20)
config = [2,20,50,8]
activation = "tanh"
W1 = rand(2,20); b1 = rand(20)
W2 = rand(20,50); b2 = rand(50);
W3 = rand(50,8); b3 = rand(8)
X = reshape(x, 2, 10)'|>Array
y1 = tanh.(X*W1 .+ b1')
y2 = tanh.(y1*W2 .+ b2')
y3 = y2*W3 .+ b3'
# x = rand(10)
# config = [1,2]
# activation = "tanh"
# W1 = ones(1,2); b1 = ones(2)
# X = reshape(x, 1, 10)'|>Array
# y1 = X*W1 .+ b1'
θ = [W1'[:];b1[:];W2'[:];b2[:];W3'[:];b3[:]]
# θ = [W1'[:];b1[:]]
tfx = constant(x)
u = extended_nn(x,config,θ,activation)
sess = Session(); init(sess)
@show run(sess, u)[1]-y3
# S = gradients(u[1][:,1], tfx)
# run(sess, S)
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
return sum(extended_nn(m,config,θ,activation)[1]^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(length(θ)))
v_ = rand(length(θ))
m_ = constant(rand(length(x)))
v_ = rand(length(x))
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1853 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
function halo_exchange_two_d(u,fill_value,m,n)
halo_exchange_two_d_ = load_op_and_grad("./build/libHaloExchangeTwoD","halo_exchange_two_d")
u,fill_value,m,n = convert_to_tensor(Any[u,fill_value,m,n], [Float64,Float64,Int64,Int64])
halo_exchange_two_d_(u,fill_value,m,n)
end
mpi_init()
U = reshape(1:24, 4, 6)'|>Array
m = 3
n = 2
fill_value = 1.0
M = mpi_rank()÷n+1
N = mpi_rank()%n+1
ulocal = U[(M-1)*2 + 1: M * 2, (N-1)*2+1:N*2]
sess = Session(); init(sess)
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
x = mpi_bcast(x)
u = x .* ulocal
return mpi_sum(sum(halo_exchange_two_d(u,fill_value,m,n)^2))
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(2,2))
v_ = rand(2,2)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
if mpi_rank()==0
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("gradtest.png")
end
mpi_finalize()
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 832 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using DelimitedFiles
Random.seed!(233)
function halo_exchange_two_d(u,fill_value,m,n)
halo_exchange_two_d_ = load_op_and_grad("./build/libHaloExchangeTwoD","halo_exchange_two_d")
u,fill_value,m,n = convert_to_tensor(Any[u,fill_value,m,n], [Float64,Float64,Int64,Int64])
halo_exchange_two_d_(u,fill_value,m,n)
end
mpi_init()
U = reshape(1:24, 4, 6)'|>Array
m = 3
n = 2
fill_value = 1.0
M = mpi_rank()÷n+1
N = mpi_rank()%n+1
ulocal = U[(M-1)*2 + 1: M * 2, (N-1)*2+1:N*2]
# TODO: specify your input parameters
u = halo_exchange_two_d(ulocal,fill_value,m,n)
sess = Session(); init(sess)
Uval = run(sess, u)
sleep(mpi_rank())
println("=========================== rank = $(mpi_rank()) ======================")
writedlm(stdout, Uval)
mpi_finalize()
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1951 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using DelimitedFiles
using Random
Random.seed!(233)
function halo_exchange_neighbor_two(u,fill_value,m,n,tag,w)
halo_exchange_neighbor_two_ = load_op_and_grad("./build/libHaloExchangeNeighborTwo","halo_exchange_neighbor_two")
u,fill_value,m,n,tag,w = convert_to_tensor(Any[u,fill_value,m,n,tag,w], [Float64,Float64,Int64,Int64,Int64,Float64])
halo_exchange_neighbor_two_(u,fill_value,m,n,tag,w)
end
mpi_init()
u = rand(5, 5)
fill_value = 10.0
m = 1
n = 1
tag = 1
w = 1.0
# TODO: specify your input parameters
uext = halo_exchange_neighbor_two(u,fill_value,m,n,tag,w)
sess = Session(); init(sess)
uval = run(sess, uext)
writedlm(stdout, round.(uval, digits=3))
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
return sum(halo_exchange_neighbor_two(x,fill_value,m,n,tag,w)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(5, 5))
v_ = rand(5, 5)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("test.png")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1079 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using DelimitedFiles
using Random
Random.seed!(233)
function halo_exchange_neighbor_two(u,fill_value,m,n,tag,w)
halo_exchange_neighbor_two_ = load_op_and_grad("./build/libHaloExchangeNeighborTwo","halo_exchange_neighbor_two")
u,fill_value,m,n,tag,w = convert_to_tensor(Any[u,fill_value,m,n,tag,w], [Float64,Float64,Int64,Int64,Int64,Float64])
halo_exchange_neighbor_two_(u,fill_value,m,n,tag,w)
end
mpi_init()
u = reshape(Array(1:36), 6, 6)|>Array
if mpi_rank()==0
u = u[1:3,1:3]
end
if mpi_rank()==1
u = u[1:3,4:end]
end
if mpi_rank()==2
u = u[4:end,1:3]
end
if mpi_rank()==3
u = u[4:end,4:end]
end
fill_value = 10.0
m = 2
n = 2
tag = 1
w = 1.0
# TODO: specify your input parameters
uext = halo_exchange_neighbor_two(u,fill_value,m,n,tag,w)
sess = Session(); init(sess)
uval = run(sess, uext)
sleep(mpi_rank())
@info "rank = $(mpi_rank())===================================================="
writedlm(stdout, round.(uval, digits=3))
# uncomment it for testing gradients
mpi_finalize() | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1371 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using SparseArrays
Random.seed!(233)
function mpi_create_matrix(indices,values,ilower,iupper)
mpi_create_matrix_ = load_op_and_grad("./build/libMPITensor","mpi_create_matrix", multiple=true)
indices,values,ilower,iupper = convert_to_tensor(Any[indices,values,ilower,iupper], [Int64,Float64,Int64,Int64])
mpi_create_matrix_(indices,values,ilower,iupper)
end
function mpi_get_matrix(rows,ncols,cols,ilower,iupper,values, N)
mpi_get_matrix_ = load_op_and_grad("./build/libMPITensor","mpi_get_matrix", multiple=true)
rows,ncols,cols,ilower_,iupper_,values = convert_to_tensor(Any[rows,ncols,cols,ilower,iupper,values], [Int32,Int32,Int32,Int64,Int64,Float64])
indices, vals = mpi_get_matrix_(rows,ncols,cols,ilower_,iupper_,values)
SparseTensor(tf.SparseTensor(indices, vals, (iupper-ilower+1, N)), false)
end
function mpi_tensor_solve(rows,ncols,cols,values,rhs,ilower,iupper,solver = "BoomerAMG",printlevel = 2)
mpi_tensor_solve_ = load_op_and_grad("./build/libMPITensor","mpi_tensor_solve")
rows,ncols,cols,values,rhs,ilower,iupper,printlevel = convert_to_tensor(Any[rows,ncols,cols,values,rhs,ilower,iupper,printlevel], [Int32,Int32,Int32,Float64,Float64,Int64,Int64,Int64])
mpi_tensor_solve_(rows,ncols,cols,values,rhs,ilower,iupper,solver,printlevel)
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1719 | include("ops.jl")
using Test
# TODO: specify your input parameters
indices = [
0 0
0 2
0 4
1 3
1 4
4 2
]
values = [1.0;2.0;3.0;4.0;5.0;6.0]
ilower = 10
iupper = 15
rows, ncols, cols, out = mpi_create_matrix(indices,values,ilower,iupper)
u = mpi_get_matrix(rows,ncols,cols,ilower,iupper,out, 5)
sess = Session(); init(sess)
run(sess, Array(u))≈[1.0 0.0 2.0 0.0 3.0; 0.0 0.0 0.0 4.0 5.0; 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 6.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0]
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
return sum(mpi_get_matrix(rows,ncols,cols,ilower,iupper,x, 5).o.values^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(6))
v_ = rand(6)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("test.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1696 | include("ops.jl")
using Test
# TODO: specify your input parameters
indices = [
0 0
0 2
0 4
1 3
1 4
4 2
]
values = [1.0;2.0;3.0;4.0;5.0;6.0]
ilower = 10
iupper = 15
rows, ncols, cols, out = mpi_create_matrix(indices,values,ilower,iupper)
sess = Session(); init(sess)
@test run(sess, rows) == Int32[0, 1, 4] .+ ilower
@test run(sess, cols) == Int32[0, 2, 4, 3, 4, 2]
@test run(sess, ncols) == Int32[3, 2, 1]
@test run(sess, out) == [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
# uncomment it for testing gradients
# error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(x)
return sum(mpi_create_matrix(indices,x,ilower,iupper)[end]^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(6))
v_ = rand(6)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
savefig("test.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1742 | include("ops.jl")
using Test
mpi_init()
A = Float64[1 0 2 0 3
0 0 1 2 3
0 3 4 1 0
0 4 2 1 0
1 1 2 0 1]
A = A * A'
B = SparseTensor(A)
rhs = rand(5)
ilower = 0
iupper = 4
solver = "GMRES"
printlevel = 2
rows, ncols, cols, out = mpi_create_matrix(B.o.indices,B.o.values,ilower,iupper)
# TODO: specify your input parameters
# error()
u = mpi_tensor_solve(rows,ncols,cols,out,rhs,ilower,iupper,solver,printlevel)
sess = Session(); init(sess)
u_out = run(sess, u)
u_ref = A\rhs
@show u_out - u_ref
# uncomment it for testing gradients
error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
return sum(mpi_tensor_solve(rows,ncols,cols,values,rhs,ilower,iupper,solver,printlevel)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(10,20))
v_ = rand(10,20)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1919 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
function mpi_tensor_transpose(row,col,ncol,val,n,rank,nt)
require_mpi()
mpi_tensor_transpose_ = load_op_and_grad("./build/libMPITensor.so","mpi_tensor_transpose", multiple=true)
row,col,ncol,val,n,rank,nt = convert_to_tensor(Any[row,col,ncol,val,n,rank,nt], [Int32,Int32,Int32,Float64,Int64,Int64,Int64])
indices, vals = mpi_tensor_transpose_(row,col,ncol,val,n,rank,nt)
end
mpi_init()
row = [0;1]
col = [0;1;0;1]
ncol = [2;2]
val = [1.0;2.0;5.0;6.0]
n = 2
rank = 0
nt = 2
# TODO: specify your input parameters
u = mpi_tensor_transpose(row,col,ncol,val,n,rank,nt)
sess = Session(); init(sess)
@show run(sess, u)
# uncomment it for testing gradients
error()
# TODO: change your test parameter to `m`
# in the case of `multiple=true`, you also need to specify which component you are testings
# gradient check -- v
function scalar_function(m)
return sum(mpi_tensor_transpose(row,col,ncol,val,n,rank,nt)^2)
end
# TODO: change `m_` and `v_` to appropriate values
m_ = constant(rand(10,20))
v_ = rand(10,20)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session(); init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1071 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using DelimitedFiles
Random.seed!(233)
function mpi_tensor_transpose(row,col,ncol,val,n,rank,nt)
require_mpi()
mpi_tensor_transpose_ = load_op_and_grad("./build/libMPITensor.so","mpi_tensor_transpose", multiple=true)
row,col,ncol,val,n,rank,nt = convert_to_tensor(Any[row,col,ncol,val,n,rank,nt], [Int32,Int32,Int32,Float64,Int64,Int64,Int64])
indices, vals = mpi_tensor_transpose_(row,col,ncol,val,n,rank,nt)
end
mpi_init()
if mpi_rank()==0
row = [0;1]
col = [0;1;2;3;0;1;2;3]
ncol = [4;4]
val = Float64[1;2;3;4;5;6;7;8]
else
row = [2,3]
col = [0;1;2;3;0;1;2;3]
ncol = [4;4]
val = Float64[1;2;3;4;5;6;7;8] .+ 8.0
end
n = 2
rank = mpi_rank()
nt = 4
# TODO: specify your input parameters
u = mpi_tensor_transpose(row,col,ncol,val,n,rank,nt)
sess = Session(); init(sess)
indices, vals = run(sess, u)
sleep(mpi_rank())
println("===================================================")
writedlm(stdout, indices)
writedlm(stdout, vals)
mpi_finalize() | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 954 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using DelimitedFiles
Random.seed!(233)
function mpi_tensor_transpose(row,col,ncol,val,n,rank,nt)
require_mpi()
mpi_tensor_transpose_ = load_op_and_grad("./build/libMPITensor.so","mpi_tensor_transpose", multiple=true)
row,col,ncol,val,n,rank,nt = convert_to_tensor(Any[row,col,ncol,val,n,rank,nt], [Int32,Int32,Int32,Float64,Int64,Int64,Int64])
indices, vals = mpi_tensor_transpose_(row,col,ncol,val,n,rank,nt)
end
mpi_init()
row = [0;1] .+ mpi_rank()*2
col = [0;1;2;3;4;5;0;1;2;3;4;5]
ncol = [6;6]
val = Array(1:12) .+ 12 * mpi_rank()
n = 2
rank = mpi_rank()
nt = 6
# TODO: specify your input parameters
u = mpi_tensor_transpose(row,col,ncol,val,n,rank,nt)
sess = Session(); init(sess)
indices, vals = run(sess, u)
sleep(mpi_rank())
println("===================================================")
writedlm(stdout, indices)
writedlm(stdout, vals)
mpi_finalize() | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 273 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using DelimitedFiles
using SparseArrays
Random.seed!(233)
mpi_init()
sp = sprand(10,10,0.3)
SP = mpi_SparseTensor(sp)
SPt = SP'
sess = Session(); init(sess)
Array(run(sess, SP))-Array(run(sess, SPt))' | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1386 | cd(@__DIR__)
using Pkg
Pkg.activate(@__DIR__)
Pkg.instantiate()
using Documenter, ADCME
makedocs(sitename="ADCME", modules=[ADCME],
pages = Any[
"index.md",
"Tutorial"=>["tutorial.md","videos_and_slides.md", "tu_whatis.md", "tu_basic.md", "tu_optimization.md", "tu_sparse.md", "tu_fd.md", "tu_fem.md",
"tu_inv.md", "tu_recipe.md", "tu_nn.md", "tu_implicit.md", "tu_customop.md","tu_debug.md", "exercise.md"],
"Resources" => ["newton_raphson.md", "parallel.md", "optimizers.md", "optim.md", "ode.md", "global.md",
"julia_customop.md", "nn.md", "ot.md", "resource_manager.md", "alphascheme.md", "factorization.md", "customopt.md",
"options.md", "mcmc.md", "mpi.md", "mpi_benchmark.md", "multithreading.md", "rbf.md", "topopt.md", "quadrature.md",
"sqlite3.md", "hessian.md", "plotly.md"],
"Physics Informed Machine Learning" => ["fdtd.md"],
"Deep Learning Schemes" => ["vae.md", "flow.md", "convnet.md", "bnn.md", "reinforcement_learning.md"],
"Developer Guide" => ["designpattern.md", "toolchain.md", "installmpi.md", "windows_installation.md", "docker.md"],
"Applications" => ["apps.md", "apps_ana.md", "apps_levy.md",
"apps_constitutive_law.md", "apps_ad.md", "apps_adseismic.md", "apps_nnfem.md"],
"api.md"
],
authors = "Kailai Xu")
deploydocs(
repo = "github.com/kailaix/ADCME.jl.git",
) | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2769 | using ADCME
using PyPlot
using ProgressMeter
using Statistics
function f(x, σ)
ε = randn(size(x)...) * σ
return 10 * sin.(2π*x) + ε
end
batch_size = 32
noise = 1.0
X = reshape(LinRange(-0.5, 0.5, batch_size)|>Array, :, 1)
y = f(X, noise)
y_true = f(X, 0.0)
close("all")
scatter(X, y, marker="+", label="Training Data")
plot(X, y_true, label="Truth")
legend()
savefig("bnn_training_data.png")
mutable struct VariationalLayer
units
activation
prior_σ1
prior_σ2
prior_π1
prior_π2
Wμ
bμ
Wρ
bρ
init_σ
end
function VariationalLayer(units; activation=relu, prior_σ1=1.5, prior_σ2=0.1,
prior_π1=0.5)
init_σ = sqrt(
prior_π1 * prior_σ1^2 + (1-prior_π1)*prior_σ2^2
)
VariationalLayer(units, activation, prior_σ1, prior_σ2, prior_π1, 1-prior_π1,
missing, missing, missing, missing, init_σ)
end
function kl_loss(vl, w, μ, σ)
dist = ADCME.Normal(μ,σ)
return sum(logpdf(dist, w)-logprior(vl, w))
end
function logprior(vl, w)
dist1 = ADCME.Normal(constant(0.0), vl.prior_σ1)
dist2 = ADCME.Normal(constant(0.0), vl.prior_σ2)
log(vl.prior_π1*exp(logpdf(dist1, w)) + vl.prior_π2*exp(logpdf(dist2, w)))
end
function (vl::VariationalLayer)(x)
x = constant(x)
if ismissing(vl.bμ)
vl.Wμ = get_variable(vl.init_σ*randn(size(x,2), vl.units))
vl.Wρ = get_variable(zeros(size(x,2), vl.units))
vl.bμ = get_variable(vl.init_σ*randn(1, vl.units))
vl.bρ = get_variable(zeros(1, vl.units))
end
Wσ = softplus(vl.Wρ)
W = vl.Wμ + Wσ.*normal(size(vl.Wμ)...)
bσ = softplus(vl.bρ)
b = vl.bμ + bσ.*normal(size(vl.bμ)...)
loss = kl_loss(vl, W, vl.Wμ, Wσ) + kl_loss(vl, b, vl.bμ, bσ)
out = vl.activation(x * W + b)
return out, loss
end
function neg_log_likelihood(y_obs, y_pred, σ)
y_obs = constant(y_obs)
dist = ADCME.Normal(y_pred, σ)
sum(-logpdf(dist, y_obs))
end
ipt = placeholder(X)
x, loss1 = VariationalLayer(20, activation=relu)(ipt)
x, loss2 = VariationalLayer(20, activation=relu)(x)
x, loss3 = VariationalLayer(1, activation=x->x)(x)
loss_lf = neg_log_likelihood(y, x, noise)
loss = loss1 + loss2 + loss3 + loss_lf
opt = AdamOptimizer(0.08).minimize(loss)
sess = Session(); init(sess)
@showprogress for i = 1:5000
run(sess, opt)
end
X_test = reshape(LinRange(-1.5,1.5,32)|>Array, :, 1)
y_pred_list = []
@showprogress for i = 1:10000
y_pred = run(sess, x, ipt=>X_test)
push!(y_pred_list, y_pred)
end
y_preds = hcat(y_pred_list...)
y_mean = mean(y_preds, dims=2)[:]
y_std = std(y_preds, dims=2)[:]
close("all")
plot(X_test, y_mean)
scatter(X[:], y[:], marker="+")
fill_between(X_test[:], y_mean-2y_std, y_mean+2y_std, alpha=0.5)
savefig("bnn_prediction.png")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 6031 | # Adapted from https://github.com/karpathy/pytorch-normalizing-flows
using Revise
using ADCME
using PyCall
using PyPlot
using Random
# `nmoons` is adapted from https://github.com/wildart/nmoons
function nmoons(::Type{T}, n::Int=100, c::Int=2;
shuffle::Bool=false, ε::Real=0.1, d::Int = 2,
translation::Vector{T}=zeros(T, d),
rotations::Dict{Pair{Int,Int},T} = Dict{Pair{Int,Int},T}(),
seed::Union{Int,Nothing}=nothing) where {T <: Real}
rng = seed === nothing ? Random.GLOBAL_RNG : MersenneTwister(Int(seed))
ssize = floor(Int, n/c)
ssizes = fill(ssize, c)
ssizes[end] += n - ssize*c
@assert sum(ssizes) == n "Incorrect partitioning"
pi = convert(T, π)
R(θ) = [cos(θ) -sin(θ); sin(θ) cos(θ)]
X = zeros(d,0)
for (i, s) in enumerate(ssizes)
circ_x = cos.(range(zero(T), pi, length=s)).-1.0
circ_y = sin.(range(zero(T), pi, length=s))
C = R(-(i-1)*(2*pi/c)) * hcat(circ_x, circ_y)'
C = vcat(C, zeros(d-2, s))
dir = zeros(d)-C[:,end] # translation direction
X = hcat(X, C .+ dir.*translation)
end
y = vcat([fill(i,s) for (i,s) in enumerate(ssizes)]...)
if shuffle
idx = randperm(rng, n)
X, y = X[:, idx], y[idx]
end
# Add noise to the dataset
if ε > 0.0
X += randn(rng, size(X)).*convert(T,ε/d)
end
# Rotate dataset
for ((i,j),θ) in rotations
X[[i,j],:] .= R(θ)*view(X,[i,j],:)
end
return X, y
end
function sample_moons(n)
X, _ = nmoons(Float64, n, 2, ε=0.05, d=2, translation=[0.25, -0.25])
return Array(X')
end
#------------------------------------------------------------------------------------------
# RealNVP
function mlp(x, k, id)
x = constant(x)
variable_scope("layer$k$id") do
x = dense(x, 24, activation="leaky_relu")
x = dense(x, 24, activation="leaky_relu")
x = dense(x, 24, activation="leaky_relu")
x = dense(x, 1)
end
return x
end
flows = [AffineHalfFlow(2, mod(i,2)==1, x->mlp(x, i, 0), x->mlp(x, i, 1)) for i = 0:8]
#------------------------------------------------------------------------------------------
# NICE
# function mlp(x, k, id)
# x = constant(x)
# variable_scope("layer$k$id") do
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 1)
# end
# return x
# end
# flow1 = [AffineHalfFlow(2, mod(i,2)==1, missing, x->mlp(x, i, 1)) for i = 0:4]
# flow2 = [AffineConstantFlow(2, shift=false)]
# flows = [flow1;flow2]
# SlowMAF
#------------------------------------------------------------------------------------------
# function mlp(x, k, id)
# x = constant(x)
# variable_scope("layer$k$id") do
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 2)
# end
# return x
# end
# flows = [SlowMAF(2, mod(i,2)==1, [x->mlp(x, i, 0)]) for i = 0:3]
# MAF
#------------------------------------------------------------------------------------------
# flows = [MAF(2, mod(i,2)==1, [24, 24, 24], name="layer$i") for i = 0:3]
# IAF
#------------------------------------------------------------------------------------------
# flows = [IAF(2, mod(i,2)==1, [24, 24, 24], name="layer$i") for i = 0:3]
# prior = ADCME.MultivariateNormalDiag(loc=zeros(2))
# model = NormalizingFlowModel(prior, flows)
# Insert ActNorm to any of the flows
#------------------------------------------------------------------------------------------
# flow2 = [ActNorm(2, "ActNorm$i") for i = 1:length(flows)]
# flows = permutedims(hcat(flow2, flows))[:]
# # error()
# # msample = rand(model,1)
# # zs, prior_logprob, log_det = model([0.0040 0.4426])
# # sess = Session(); init(sess)
# # run(sess, msample)
# # run(sess,zs)
# GLOW
#------------------------------------------------------------------------------------------
# function mlp(x, k, id)
# x = constant(x)
# variable_scope("layer$k$id") do
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 24, activation="leaky_relu")
# x = dense(x, 1)
# end
# return x
# end
# flows = [Invertible1x1Conv(2, "conv$i") for i = 0:2]
# norms = [ActNorm(2, "ActNorm$i") for i = 0:2]
# couplings = [AffineHalfFlow(2, mod(i, 2)==1, x->mlp(x, i, 0), x->mlp(x, i, 1)) for i = 0:length(flows)-1]
# flows = permutedims(hcat(norms, flows, couplings))[:]
#------------------------------------------------------------------------------------------
# Neural Splines Coupling
# function mlp(x, k, id)
# x = constant(x)
# variable_scope("fc$k$id") do
# x = dense(x, 16, activation="leaky_relu")
# x = dense(x, 16, activation="leaky_relu")
# x = dense(x, 16, activation="leaky_relu")
# x = dense(x, 3K-1)
# end
# return x
# end
# K = 8
# flows = [NeuralCouplingFlow(2, x->mlp(x, i, 0), x->mlp(x, i, 1), K) for i = 0:2]
# convs = [Invertible1x1Conv(2, "conv$i") for i = 0:2]
# norms = [ActNorm(2, "ActNorm$i") for i = 0:2]
# flows = permutedims(hcat(norms, convs, flows))[:]
#------------------------------------------------------------------------------------------
prior = ADCME.MultivariateNormalDiag(loc=zeros(2))
model = NormalizingFlowModel(prior, flows)
x = placeholder(rand(128,2))
zs, prior_logpdf, logdet = model(x)
log_pdf = prior_logpdf + logdet
loss = -sum(log_pdf)
model_samples = rand(model, 128*8)
sess = Session(); init(sess)
opt = AdamOptimizer(1e-4).minimize(loss)
sess = Session(); init(sess)
for i = 1:10000
_, l = run(sess, [opt, loss], x=>sample_moons(128))
if mod(i,100)==0
@info i, l
end
end
z = run(sess, model_samples[end])
x = sample_moons(128*8)
scatter(x[:,1], x[:,2], c="b", s=5, label="data")
scatter(z[:,1], z[:,2], c="r", s=5, label="prior --> posterior")
axis("scaled"); xlabel("x"); ylabel("y")# | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2088 | using ADCME
using PyPlot
using ADCMEKit
n = 200
pml = 30
C = 1000.0
NT = 1000
Δt = 1.0/NT
x0 = LinRange(0, 1, n+1)
h = 1/n
xE = Array((0:n+2pml)*h .- pml*h)
xH = (xE[2:end]+xE[1:end-1])/2
N = n + 2pml + 1
σE = zeros(N)
for i = 1:pml
d = i*h
σE[pml + n + 1 + i] = C* (d/(pml*h))^3
σE[pml+1-i] = C* (d/(pml*h))^3
end
σH = zeros(N-1)
for i = 1:pml
d = (i-1/2)*h
σH[pml + n + i] = C* (d/(pml*h))^3
σH[pml+1-i] = C* (d/(pml*h))^3
end
function ricker(dt = 0.002, f0 = 5.0)
nw = 2/(f0*dt)
nc = floor(Int, nw/2)
t = dt*collect(-nc:1:nc)
b = (π*f0*t).^2
w = @. (1 - 2b)*exp(-b)
end
R = ricker()
if length(R)<NT+1
R = [R;zeros(NT+1-length(R))]
end
R = R[1:NT+1]
# tn = ( 0:NT ) *Δt
# R = @. exp( -20(tn-0.3)^2)
# error()
R_ = constant(R)
cH = ones(length(xH)) * 2.
cH[pml + Int(round(0.5÷h)): pml + Int(round(0.75÷h))] .= 1.0
cH[pml + Int(round(0.75÷h)): end] .= 1.
cE = (cH[1:end-1]+cH[2:end])/2
Z = zeros(N)
Z[pml + pml÷2] = 1.0
Z = Z[2:end-1]
function condition(i, E_arr, H_arr)
i<=NT+1
end
function body(i, E_arr, H_arr)
E = read(E_arr, i-1)
H = read(H_arr, i-1)
ΔH = cH * (E[2:end]-E[1:end-1])/h - σH*H
H += ΔH * Δt
ΔE = cE * (H[2:end]-H[1:end-1])/h - σE[2:end-1]*E[2:end-1] + R_[i] * Z
E = scatter_add(E, 2:N-1, ΔE * Δt)
# E = scatter_update(E, N÷2, R_[i])
i+1, write(E_arr, i, E), write(H_arr, i, H)
end
E_arr = TensorArray(NT+1)
H_arr = TensorArray(NT+1)
E_arr = write(E_arr, 1, zeros(N))
H_arr = write(H_arr, 1, zeros(N-1))
i = constant(2, dtype = Int32)
_, E, H = while_loop(condition, body, [i, E_arr, H_arr])
E = stack(E)
H = stack(H)
sess = Session(); init(sess)
E_, H_ = run(sess, [E, H])
pl, = plot([], [], ".-")
xlim(-0.5,1.5)
ylim(minimum(E_), maximum(E_))
xlabel("x")
ylabel("y")
t = title("time = 0.0000")
function update(i)
t.set_text("time = $(round(i*Δt, digits=4))")
pl.set_data([xE E_[i,:]]'|>Array)
end
p = animate(update, 1:10:NT+1)
# saveanim(p, "fdtd.gif") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2125 | using ADCME
using PyPlot
using ADCMEKit
n = 200
pml = 30
C = 1000.0
NT = 1000
Δt = 1.0/NT
x0 = LinRange(0, 1, n+1)
h = 1/n
xE = Array((0:n+2pml)*h .- pml*h)
xH = (xE[2:end]+xE[1:end-1])/2
N = n + 2pml + 1
σE = zeros(N)
for i = 1:pml
d = i*h
σE[pml + n + 1 + i] = C* (d/(pml*h))^3
σE[pml+1-i] = C* (d/(pml*h))^3
end
σH = zeros(N-1)
for i = 1:pml
d = (i-1/2)*h
σH[pml + n + i] = C* (d/(pml*h))^3
σH[pml+1-i] = C* (d/(pml*h))^3
end
function ricker(dt = 0.002, f0 = 5.0)
nw = 2/(f0*dt)
nc = floor(Int, nw/2)
t = dt*collect(-nc:1:nc)
b = (π*f0*t).^2
w = @. (1 - 2b)*exp(-b)
end
R = ricker()
if length(R)<NT+1
R = [R;zeros(NT+1-length(R))]
end
R = R[1:NT+1]
# tn = ( 0:NT ) *Δt
# R = @. exp( -20(tn-0.3)^2)
# error()
R_ = constant(R)
cH = ones(length(xH)) * 2.
cH[pml + Int(round(0.5÷h)): pml + Int(round(0.75÷h))] .= 1.0
cH[pml + Int(round(0.75÷h)): end] .= 1.
mask = ones(length(xH))
mask[pml+1:end-pml] .= 0.0
c_ = Variable(ones(N-2pml))
x0 = collect(((pml:N-pml -1) .- pml)*h)
# c_ = squeeze(fc(x0, [20,20,1])) + 1.0
# c_ = 2tanh(c_) + 2.5
cH = scatter_update(constant(cH.*mask), pml+1:N-pml, c_)
# cH = constant(cH)
cE = (cH[1:end-1]+cH[2:end])/2
Z = zeros(N)
Z[pml + pml÷2] = 1.0
Z = Z[2:end-1]
function condition(i, E_arr, H_arr)
i<=NT+1
end
function body(i, E_arr, H_arr)
E = read(E_arr, i-1)
H = read(H_arr, i-1)
ΔH = cH * (E[2:end]-E[1:end-1])/h - σH*H
H += ΔH * Δt
ΔE = cE * (H[2:end]-H[1:end-1])/h - σE[2:end-1]*E[2:end-1] + R_[i] * Z
E = scatter_add(E, 2:N-1, ΔE * Δt)
# E = scatter_update(E, N÷2, R_[i])
i+1, write(E_arr, i, E), write(H_arr, i, H)
end
E_arr = TensorArray(NT+1)
H_arr = TensorArray(NT+1)
E_arr = write(E_arr, 1, zeros(N))
H_arr = write(H_arr, 1, zeros(N-1))
i = constant(2, dtype = Int32)
_, E, H = while_loop(condition, body, [i, E_arr, H_arr])
E = stack(E); E = set_shape(E, (NT+1, N))
H = stack(H)
loss = sum((E[:,pml+pml÷2] - E_[:, pml+pml÷2])^2)
sess = Session(); init(sess)
BFGS!(sess, loss) | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 513 | using Revise
using ADCME
using PyPlot
A = rand(10,10)
function hfunc(x)
A*x
end
θ = ones(10)
obs = hfunc(θ) + randn(10)
θ_est = A\obs
mc = MCMCSimple(obs, hfunc, 1.0, θ, 0., 2., 0.1)
sim = simulate(mc, 20000)
diagnose(mc)
plt.suptitle("Step size = 0.1")
mc = MCMCSimple(obs, hfunc, 1.0, θ, 0., 2., 0.01)
sim = simulate(mc, 20000)
diagnose(mc)
plt.suptitle("Step size = 0.01")
mc = MCMCSimple(obs, hfunc, 1.0, θ, 0., 2., 1.0)
sim = simulate(mc, 20000)
diagnose(mc)
plt.suptitle("Step size = 1.0")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1209 | using LinearAlgebra
using PyPlot
using Statistics
using Distributions
using ProgressMeter
σ = 1
τ = sqrt(10)
μ = 5
n = 5
X = [9.37;10.18;9.16;11.60;10.33]
μn = mean(X)*n/σ^2/(n/σ^2 + 1/τ^2) + μ*(1/τ^2)/(n/σ^2 + 1/τ^2)
τn2 = 1/(n/σ^2+1/τ^2)
# Hyperparameters
# proposal step size
δ = 3/100
burnin = 2000
N = 10000
θ0 = 0.0
function logf(θ)
-sum((X.-θ).^2)/2σ^2 - (θ-μ)^2/2τ^2
end
function proposal(x)
x + (rand()-0.5)*2 * δ
end
sim = zeros(N)
sim[1] = θ0
@showprogress for i = 1:N-1
x = sim[i]
x_star = proposal(x)
Δ = logf(x_star) - logf(x)
if log(rand())<Δ
sim[i+1] = x_star
else
sim[i+1] = x
end
end
L = logf.(sim)
# sim = sim[burnin+1:end]
figure(figsize=(12,4))
subplot(131)
plot(sim)
plot(1:length(sim), ones(length(sim))*μn, "--")
title("\$\\theta\$ Value")
xlabel("Iteration")
ylabel("\$\\theta\$")
subplot(132)
plot(L)
title("Log likelihood")
xlabel("Iteration")
ylabel("Log likelihood")
subplot(133)
hist(sim[burnin+1:end], density=true, bins=50)
y = pdf.(Normal(μn, sqrt(τn2)), LinRange(8.5,11.5,100))
plot(LinRange(8.5,11.5,100), y, label="Exact")
title("Distribution")
xlabel("\$\\theta\$")
ylabel("Density")
legend()
tight_layout()
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2712 | using ADCME
using PyPlot
using MLDatasets
using ProgressMeter
function encoder(x, n_hidden, n_output, rate)
local μ, σ
variable_scope("encoder") do
y = dense(x, n_hidden, activation = "elu")
y = dropout(y, rate, ADCME.options.training.training)
y = dense(y, n_hidden, activation = "tanh")
y = dropout(y, rate, ADCME.options.training.training)
y = dense(y, 2n_output)
μ = y[:, 1:n_output]
σ = 1e-6 + softplus(y[:,n_output+1:end])
end
return μ, σ
end
function decoder(z, n_hidden, n_output, rate)
local y
variable_scope("decoder") do
y = dense(z, n_hidden, activation="tanh")
y = dropout(y, rate, ADCME.options.training.training)
y = dense(y, n_hidden, activation="elu")
y = dropout(y, rate, ADCME.options.training.training)
y = dense(y, n_output, activation="sigmoid")
end
return y
end
function autoencoder(xh, x, dim_img, dim_z, n_hidden, rate)
μ, σ = encoder(xh, n_hidden, dim_z, rate)
z = μ + σ .* tf.random_normal(size(μ), 0, 1, dtype=tf.float64)
y = decoder(z, n_hidden, dim_img, rate)
y = clip(y, 1e-8, 1-1e-8)
marginal_likelihood = sum(x .* log(y) + (1-x).*log(1-y), dims=2)
KL_divergence = 0.5 * sum(μ^2 + σ^2 - log(1e-8 + σ^2) - 1, dims=2)
marginal_likelihood = mean(marginal_likelihood)
KL_divergence = mean(KL_divergence)
ELBO = marginal_likelihood - KL_divergence
loss = -ELBO
return y, loss, -marginal_likelihood, KL_divergence
end
function step(epoch)
tx = train_x[1:batch_size,:]
@showprogress for i = 1:div(60000, batch_size)
idx = Array((i-1)*batch_size+1:i*batch_size)
run(sess, opt, x=>train_x[idx,:])
end
y_, loss_, ml_, kl_ = run(sess, [y, loss, ml, KL_divergence],
feed_dict = Dict(
ADCME.options.training.training=>false,
x => tx
))
println("epoch $epoch: L_tot = $(loss_), L_likelihood = $(ml_), L_KL = $(kl_)")
close("all")
for i = 1:3
for j = 1:3
k = (i-1)*3 + j
img = reshape(y_[k,:], 28, 28)'|>Array
subplot(3,3,k)
imshow(img)
end
end
savefig("result$epoch.png")
end
n_hidden = 500
rate = 0.1
dim_z = 20
dim_img = 28^2
batch_size = 128
ADCME.options.training.training = placeholder(true)
x = placeholder(Float64, shape = [128, 28^2])
xh = x
y, loss, ml, KL_divergence = autoencoder(xh, x, dim_img, dim_z, n_hidden, rate)
opt = AdamOptimizer(1e-3).minimize(loss)
# prepare data
train_x = MNIST.traintensor(Float64);
train_x = Array(reshape(train_x, :, 60000)');
sess = Session(); init(sess)
for i = 1:100
step(i)
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1842 | using ADCME
using JLD2
@load "data.jld2" z
n = 64
noise = parse(Float64, ARGS[1])
τ = Variable(1.0)
x = placeholder(Float64, shape = [n,])
y = placeholder(Float64, shape = [n,])
w = placeholder(Float64, shape = [n,])
σ, κ = 0.08, 0.5
α = 0.5
Δt = 0.01
yhat = (
x + κ * (τ - α*x)*Δt + σ * sqrt(x) * sqrt(Δt) * w + 1/4*σ^2*Δt*(w^2-1)
)/(1+(1-α)*κ*Δt)
θ = Variable(fc_init([2,20,20,20,1]))
hDξ = sigmoid(squeeze(fc([x yhat], [20,20,20,1], θ)))
Dξ = sigmoid(squeeze(fc([x y], [20,20,20,1], θ)))
# LF = mean(log((1-hDξ)/hDξ))
LF = -mean(log(hDξ))
LD = -mean(log(Dξ) + log(1-hDξ))
function generate_batch_data()
idx = rand(1:9999, n)
abs.(z[idx] + noise * randn(n)), abs.(z[idx.+1] + noise * randn(n)), randn(n)
end
optF = RMSPropOptimizer(1e-3).minimize(LF, var_list=[τ])
optD = RMSPropOptimizer(1e-3).minimize(LD, var_list=[θ])
sess = Session(); init(sess)
x0, y0, w0 = generate_batch_data()
fd = Dict(
x => x0, y => y0, w => w0
)
@info run(sess, [LF, LD], feed_dict = fd)
fiter = 1
diter = 5
l1 = []
l2 = []
τs = []
for i = 1:50000
x0, y0, w0 = generate_batch_data()
fd = Dict(
x => x0, y => y0, w => w0
)
ld = 0
for k = 1:diter
_, ld = run(sess, [optD, LD], feed_dict = fd)
end
lf = 0
for k = 1:fiter
_, lf = run(sess, [optF, LF], feed_dict = fd)
end
τ0 = run(sess, τ)
# if length(loss)==0
# loss = [l1[end]]
# end
push!(l1, ld)
push!(l2, lf)
push!(τs, τ0)
if mod(i, 100)==0
@info i, ld, lf, τ0
end
end
kappa = join(string.(τs), ',')
db = Database("noise.db")
execute(db, """
CREATE TABLE IF NOT EXISTS tau (
noise real,
kappa text
)
""")
execute(db, """
INSERT INTO tau VALUES ($noise, \"$kappa\")
""")
close(db)
# close("all")
# plot(τs)
# savefig("test$noise.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 604 | using ADCME
using PyPlot
using PyCall
mpl = pyimport("tikzplotlib")
db = Database("noise.db")
res = execute(db, """
select noise, kappa from tau
""")|>collect
noise = [x[1] for x in res]
noise = round.(noise, sigdigits = 2)
kappa = [map(z->parse(Float64, z), split(x[2],',')) for x in res]
close("all")
for k = 1:length(noise)
plot(1:100:40000, kappa[k][1:100:40000], label = "\$\\sigma_n = $(noise[k])\$")
end
hlines(0.06, 0, 40000, linestyle = "--", color = "k")
legend()
xlabel("Iterations")
ylabel("\$\\tau\$")
grid("on")
minorticks_on()
mpl.save("with_noise.tex")
savefig("with_noise.png")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 409 | using PyPlot
using JLD2
function simulate(σ, κ, τ)
z = zeros(10000)
α = 0.5
Δt = 0.01
z[1] = τ
for i = 2:length(z)
x = z[i-1]
W = randn()
z[i] = (
x + κ * (τ - α*x)*Δt + σ * sqrt(x) * sqrt(Δt) * W + 1/4*σ^2*Δt*(W^2-1)
)/(1+(1-α)*κ*Δt)
end
z
end
close("all")
z = simulate(0.08, 0.5, 0.06)
@save "data.jld2" z
plot(z)
savefig("test.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 536 | using ADCME
using PyPlot
db = Database("hessian.db")
close("all")
plot(npoints, npoints, "k--")
for seed in [2, 23, 233]
c = execute(db, """
SELECT nlambda, npoints from eigvals where seed=$seed order by npoints
""")
vals = collect(c)
global npoints = [x[2] for x in vals]
vals = [x[1] for x in vals]
plot(npoints, vals, ".-", label = "Seed = $seed")
end
legend()
xlabel("\$||\\mathcal{I}||\$")
ylabel("Positive Eigenvalues")
ylim(0, 15)
grid("on")
minorticks_on()
savefig("hessian_eigenvalue_rank.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 787 | using ADCME
using LinearAlgebra
using Random
npoints = parse(Int64, ARGS[1])
SEED = parse(Int64, ARGS[2])
Random.seed!(SEED)
x0 = LinRange(0, 1, 100)|>collect
y0 = sin.(π*x0)
θ = Variable(fc_init([1,20,20,20,1]))
y = fc(x0, [20,20,20,1], θ)
idx = Int64.(round.(collect(LinRange(1, 100, npoints+2))))[2:end-1]
loss = sum((y[idx] - y0[idx])^2)
H = hessian(loss, θ)
sess = Session(); init(sess)
BFGS!(sess, loss)
H0 = run(sess, H)
λ = real.(eigvals(H0))
nlambda = length(findall(λ .> λ[end] * 1e-6))
db = Database("hessian.db")
execute(db, """
CREATE TABLE IF NOT EXISTS eigvals (
seed integer,
npoints integer,
nlambda integer,
PRIMARY KEY (seed, npoints)
)""")
execute(db, """
INSERT OR REPLACE INTO eigvals VALUES ($SEED, $npoints, $nlambda)
""")
close(db)
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1745 | # reference: https://www.learndatasci.com/tutorials/reinforcement-q-learning-scratch-python-openai-gym/
using PyCall
gym = pyimport("gym")
env = gym.make("Taxi-v3").env
# Training
Q = zeros(env.observation_space.n, env.action_space.n)
alpha = 0.1
gamma = 0.6
epsilon = 0.1
all_epochs = []
all_penalties = []
for i = 1:10000
state = env.reset()
epochs, penalties, reward, = 0, 0, 0
done = false
while !done
if rand() < epsilon
action = env.action_space.sample() # Explore action space
else
action = argmax(Q[state+1,:])-1 # Exploit learned values
end
next_state, reward, done, info = env.step(action)
next_max = maximum(Q[next_state+1, :])
Q[state+1, action+1] = (1-alpha) * Q[state+1, action+1] + alpha * (reward + gamma * next_max)
if reward == -10
penalties += 1
end
state = next_state
epochs += 1
end
if mod(i, 100)==0
println("Episode = $i")
end
end
# Testing the learned Q function on a new environment
state = env.reset()
epochs, penalties, reward = 0, 0, 0
done = false
frames = []
while !done
action = argmax(Q[state+1,:]) - 1
state, reward, done, info = env.step(action)
if reward == -10
penalties += 1
end
push!(frames, Dict(
"frame"=> env.render(mode="ansi"),
"state"=> state,
"action"=> action,
"reward"=> reward
)
)
epochs += 1
end
# Visualize the result
for (k,f) in enumerate(frames)
for _ = 1:9
print("\033[F\033[K")
end
print(f["frame"])
print("Time = $k\n")
sleep(0.1)
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1915 | # reference: https://www.learndatasci.com/tutorials/reinforcement-q-learning-scratch-python-openai-gym/
using PyCall
gym = pyimport("gym")
env = gym.make("Taxi-v3").env
# Training
Q = zeros(env.observation_space.n, env.action_space.n)
alpha = 0.1
gamma = 0.6
epsilon = 0.1
all_epochs = []
all_penalties = []
for i = 1:10000
state = env.reset()
epochs, penalties, reward, = 0, 0, 0
done = false
while !done
if rand() < epsilon
action = env.action_space.sample() # Explore action space
else
action = argmax(Q[state+1,:])-1 # Exploit learned values
end
next_state, reward, done, info = env.step(action)
if rand() < epsilon
action2 = env.action_space.sample() # Explore action space
else
action2 = argmax(Q[next_state+1,:])-1 # Exploit learned values
end
Q[state+1, action+1] = (1-alpha) * Q[state+1, action+1] + alpha * (reward + gamma * Q[next_state+1, action2+1])
if reward == -10
penalties += 1
end
state = next_state
epochs += 1
end
if mod(i, 100)==0
println("Episode = $i")
end
end
# Testing the learned Q function on a new environment
state = env.reset()
epochs, penalties, reward = 0, 0, 0
done = false
frames = []
while !done
action = argmax(Q[state+1,:]) - 1
state, reward, done, info = env.step(action)
if reward == -10
penalties += 1
end
push!(frames, Dict(
"frame"=> env.render(mode="ansi"),
"state"=> state,
"action"=> action,
"reward"=> reward
)
)
epochs += 1
end
# Visualize the result
for (k,f) in enumerate(frames)
for _ = 1:9
print("\033[F\033[K")
end
print(f["frame"])
print("Time = $k\n")
sleep(0.1)
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 888 | include("../../ccode/mpiops.jl")
mpi_init()
function kappa(x, y)
return 1+x^2+y^2
end
function rhs(x, y)
-2*x*(1 - x)*(x^2 + y^2 + 1) + 2*x*(-x*y*(1 - y) + y*(1 - x)*(1 - y)) -
2*y*(1 - y)*(x^2 + y^2 + 1) + 2*y*(-x*y*(1 - x) + x*(1 - x)*(1 - y))
end
function ufunc(x, y)
x * (1-x) * y * (1-y)
end
mc = MPIConfig(300)
global_to_local, local_to_global = dofmap(mc)
X, Y = get_xy(mc)
f_local = rhs.(X, Y)
# using Random; Random.seed!(233)
# θ = Variable(fc_init([2,20,20,20,1]))
# θ_shared = mpi_bcast(θ)
# κ_local = (fc([X'[:] Y'[:]], [20,20,20,1], θ_shared) + 5.0)|>squeeze
# κ_local = reshape(κ_local, (mc.n, mc.n))
κ_local = kappa.(X, Y)
u_local = poisson_solver(κ_local, f_local, mc)
sess = Session(); init(sess)
U = run(sess, u_local)
# change_directory("data")
@save "data/$(mpi_size())_$(mpi_rank()).jld2" U
if mpi_size()>1
mpi_finalize()
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1454 | include("../../ccode/mpiops.jl")
using ADOPT
mpi_init()
function kappa(x, y)
return 1+x^2+y^2
end
function rhs(x, y)
-2*x*(1 - x)*(x^2 + y^2 + 1) + 2*x*(-x*y*(1 - y) + y*(1 - x)*(1 - y)) -
2*y*(1 - y)*(x^2 + y^2 + 1) + 2*y*(-x*y*(1 - x) + x*(1 - x)*(1 - y))
end
function ufunc(x, y)
x * (1-x) * y * (1-y)
end
mc = MPIConfig(300)
global_to_local, local_to_global = dofmap(mc)
X, Y = get_xy(mc)
f_local = rhs.(X, Y)
using Random; Random.seed!(233)
θ = Variable(fc_init([2,20,20,20,1]))
θ_shared = mpi_bcast(θ)
κ_local = abs(fc([X'[:] Y'[:]], [20,20,20,1], θ_shared) + 5.0)|>squeeze
κ_local = reshape(κ_local, (mc.n, mc.n))
u_local = poisson_solver(κ_local, f_local, mc)
@load "data/$(mpi_size())_$(mpi_rank()).jld2" U
loss = sum(mpi_sum((u_local - U)^2))
g = gradients(loss, θ)
sess = Session(); init(sess)
L = run(sess, loss)
if mpi_rank()==0
@info "Initial loss = $L"
end
function calculate_loss(x)
L = run(sess, loss, θ=>x)
L
end
function calculate_gradients(G, x)
G[:] = run(sess, g, θ=>x)
end
losses = Float64[]
function step_callback(x)
@info "Loss = $x"
push!(losses, x)
end
initial_x = run(sess, θ)
options = Options()
result = ADOPT.mpi_optimize(calculate_loss, calculate_gradients, initial_x, LBFGS(), options; step_callback = step_callback)
if mpi_rank()==0
minimizer = result.minimizer
@save "result.jld2" result losses
end
if mpi_size()>1
mpi_finalize()
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1925 | include("../../ccode/mpiops.jl")
using ADOPT
mpi_init()
function kappa(x, y)
return 1+x^2+y^2
end
function rhs(x, y)
-2*x*(1 - x)*(x^2 + y^2 + 1) + 2*x*(-x*y*(1 - y) + y*(1 - x)*(1 - y)) -
2*y*(1 - y)*(x^2 + y^2 + 1) + 2*y*(-x*y*(1 - x) + x*(1 - x)*(1 - y))
end
function ufunc(x, y)
x * (1-x) * y * (1-y)
end
n = Int64(round(sqrt(parse(Int64, ARGS[1]))))
mc = MPIConfig(1800÷n)
global_to_local, local_to_global = dofmap(mc)
X, Y = get_xy(mc)
f_local = rhs.(X, Y)
using Random; Random.seed!(233)
θ = Variable(fc_init([2,20,20,20,1]))
θ_shared = mpi_bcast(θ)
κ_local = abs(fc([X'[:] Y'[:]], [20,20,20,1], θ_shared) + 5.0)|>squeeze
κ_local = reshape(κ_local, (mc.n, mc.n))
u_local = poisson_solver(κ_local, f_local, mc)
loss = sum(mpi_sum((u_local)^2))
g = gradients(loss, θ)
sess = Session(); init(sess)
function calculate_loss(x)
L = run(sess, loss, θ=>x)
L
end
function calculate_gradients(x)
run(sess, g, θ=>x)
end
x = run(sess, θ)
calculate_loss(x)
@info "HERE"
ccall((:MPITensor_Solve_Timer_SetZero, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cvoid, ())
stats = @timed begin
for i = 1:3
calculate_loss(x)
end
end
t0 = stats[2]/3
s0 = ccall((:MPITensor_Solve_Timer_Get, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cdouble, ())/3
calculate_gradients(x)
ccall((:MPITensor_Solve_Timer_SetZero, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cvoid, ())
stats = @timed begin
for i = 1:3
calculate_gradients(x)
end
end
t1 = stats[2]/3
s1 = ccall((:MPITensor_Solve_Timer_Get, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cdouble, ())/3
using DelimitedFiles
if mpi_rank()==0
i = parse(Int64, ARGS[2])
open("result.txt", "a") do io
writedlm(io, [mpi_size() i t0 t1 s0 s1])
end
end
if mpi_size()>=1
mpi_finalize()
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1131 | using PyPlot
using DelimitedFiles
R = readdlm("result.txt")
n = [1, 4, 9, 16, 25]
close("all")
loglog(n[end:-1:1], R[1:5, 3], "or-", label = "1 Core")
loglog(n[4:-1:1], R[6:9, 3], "xg-", label = "2 Cores")
loglog(n[3:-1:1], R[10:12, 3], "^b-", label = "3 Cores")
loglog(n[end:-1:1], R[1:5, 5], "or--")
loglog(n[4:-1:1], R[6:9, 5], "xg--")
loglog(n[3:-1:1], R[10:12, 5], "^b--")
loglog(n[end-2:-1:1], 8 ./n[end-2:-1:1], "k--", label = "\$\\mathcal{O}(1/n_{{proc}})\$")
legend()
title("Strong Scaling for Forward Computation")
xlabel("MPI Processors")
ylabel("Time (sec)")
savefig("forward_strong.png")
close("all")
loglog(n[end:-1:1], R[1:5, 4], "or-", label = "1 Core")
loglog(n[4:-1:1], R[6:9, 4], "xg-", label = "2 Cores")
loglog(n[3:-1:1], R[10:12, 4], "^b-", label = "3 Cores")
loglog(n[end:-1:1], R[1:5, 6], "or--")
loglog(n[4:-1:1], R[6:9, 6], "xg--")
loglog(n[3:-1:1], R[10:12, 6], "^b--")
loglog(n[end-2:-1:1], 12 ./n[end-2:-1:1], "k--", label = "\$\\mathcal{O}(1/n_{{proc}})\$")
legend()
title("Strong Scaling for Gradient Backpropagation")
xlabel("MPI Processors")
ylabel("Time (sec)")
savefig("backward_strong.png")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1869 | include("../../ccode/mpiops.jl")
using ADOPT
mpi_init()
function kappa(x, y)
return 1+x^2+y^2
end
function rhs(x, y)
-2*x*(1 - x)*(x^2 + y^2 + 1) + 2*x*(-x*y*(1 - y) + y*(1 - x)*(1 - y)) -
2*y*(1 - y)*(x^2 + y^2 + 1) + 2*y*(-x*y*(1 - x) + x*(1 - x)*(1 - y))
end
function ufunc(x, y)
x * (1-x) * y * (1-y)
end
mc = MPIConfig(300)
global_to_local, local_to_global = dofmap(mc)
X, Y = get_xy(mc)
f_local = rhs.(X, Y)
using Random; Random.seed!(233)
θ = Variable(fc_init([2,20,20,20,1]))
θ_shared = mpi_bcast(θ)
κ_local = abs(fc([X'[:] Y'[:]], [20,20,20,1], θ_shared) + 5.0)|>squeeze
κ_local = reshape(κ_local, (mc.n, mc.n))
u_local = poisson_solver(κ_local, f_local, mc)
loss = sum(mpi_sum((u_local)^2))
g = gradients(loss, θ)
sess = Session(); init(sess)
function calculate_loss(x)
L = run(sess, loss, θ=>x)
L
end
function calculate_gradients(x)
run(sess, g, θ=>x)
end
x = run(sess, θ)
calculate_loss(x)
ccall((:MPITensor_Solve_Timer_SetZero, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cvoid, ())
stats = @timed begin
for i = 1:10
calculate_loss(x)
end
end
t0 = stats[2]/10
s0 = ccall((:MPITensor_Solve_Timer_Get, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cdouble, ())/10
calculate_gradients(x)
ccall((:MPITensor_Solve_Timer_SetZero, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cvoid, ())
stats = @timed begin
for i = 1:10
calculate_gradients(x)
end
end
t1 = stats[2]/10
s1 = ccall((:MPITensor_Solve_Timer_Get, "../../../../../../../deps/Plugin/MPITensor/build/libMPITensor.so"), Cdouble, ())/10
using DelimitedFiles
if mpi_rank()==0
i = parse(Int64, ARGS[1])
open("result.txt", "a") do io
writedlm(io, [mpi_size() i t0 t1 s0 s1])
end
end
if mpi_size()>=1
mpi_finalize()
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 844 | using PyPlot
using DelimitedFiles
R = readdlm("result.txt")
n = sqrt.([1, 4, 9, 16, 25])
close("all")
plot(n, R[1:5, 3], "or-", label = "1 Core")
plot(n[1:4], R[6:9, 3], "xg-", label = "2 Cores")
plot(n[1:3], R[10:12, 3], "^b-", label = "3 Cores")
plot(n, R[1:5, 5], "or--")
plot(n[1:4], R[6:9, 5], "xg--")
plot(n[1:3], R[10:12, 5], "^b--")
legend()
title("Weak Scaling for Forward Computation")
xlabel("\$N\$")
ylabel("Time (sec)")
savefig("forward_weak.png")
close("all")
plot(n, R[1:5, 4], "or-", label = "1 Core")
plot(n[1:4], R[6:9, 4], "xg-", label = "2 Cores")
plot(n[1:3], R[10:12, 4], "^b-", label = "3 Cores")
plot(n, R[1:5, 6], "or--")
plot(n[1:4], R[6:9, 6], "xg--")
plot(n[1:3], R[10:12, 6], "^b--")
legend()
title("Strong Scaling for Gradient Backpropagation")
xlabel("\$N\$")
ylabel("Time (sec)")
savefig("backward_weak.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 4231 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using DelimitedFiles
using JLD2
Random.seed!(233)
ADCME.options.mpi.printlevel = 0
mutable struct MPIConfig
n::Int64
N::Int64
h::Float64
function MPIConfig(n::Int64)
r = mpi_size()
m = Int64(round(sqrt(r)))
if m^2 != r
error("$r must be a squared number")
end
h = 1/(n*m+1)
new(n, m, h)
end
end
function getIJ(mc::MPIConfig)
r = mpi_rank()
N = mc.N
I = r÷N + 1
J = r%N + 1
return I, J
end
function get_xy(mc::MPIConfig)
n, N, h = mc.n, mc.N, mc.h
x = Float64[]
y = Float64[]
I, J = getIJ(mc)
x0 = (I-1)*n*h
y0 = (J-1)*n *h
for i = 1:n
for j = 1:n
push!(x, i*h+x0)
push!(y, j*h+y0)
end
end
X, Y = Array(reshape(x, (mc.n, mc.n))'), Array(reshape(y, (mc.n, mc.n))')
end
"""
Given an extended `κ_ext` (size = `(n+2)×(n+2)`), solves the forward computation problem
"""
function poisson_solver(κ_local, f_local, mc::MPIConfig)
@assert size(κ_local) == (mc.n, mc.n)
@assert size(f_local) == (mc.n, mc.n)
global κ_ext = mpi_halo_exchange(κ_local, mc.N, mc.N; fill_value = 1.0)
h = mc.h
f_local = constant(f_local)
rhs = 2*h^2*reshape(f_local, (-1,))
poisson_linear_solver(κ_ext, rhs, mc)
end
function poisson_linear_solver(κ_ext, rhs, mc)
function forward(κ_ext)
A = get_poisson_matrix(κ_ext, mc)
A\rhs
end
function backward(du, u, κ_ext)
A = get_poisson_matrix(κ_ext, mc)
B = A'
x = -(B\du)
x = reshape(x, (mc.n, mc.n))
u = reshape(u, (mc.n, mc.n))
uext = mpi_halo_exchange(u, mc.N, mc.N; fill_value = 0.0)
out = get_poisson_grad(x, uext, mc.N)
set_shape(out, (mc.n+2, mc.n+2))
end
register(forward, backward)(κ_ext)
end
function get_poisson_grad(x,uext,N)
get_poisson_grad_ = load_op_and_grad("$(@__DIR__)/build/libGetPoissonMatrix","get_poisson_grad")
x,uext,cn = convert_to_tensor(Any[x,uext,N], [Float64,Float64,Int64])
get_poisson_grad_(x,uext,cn)
end
function dofmap(mc::MPIConfig)
n, N = mc.n, mc.N
global_to_local = zeros(Int64, (n*N)^2)
local_to_global = zeros(Int64, (n*N)^2)
for I = 1:N
for J = 1:N
for i = 1:n
for j = 1:n
ii = (I-1)*n + i
jj = (J-1)*n + j
global_to_local[ (ii-1)*n*N + jj ] = ((I-1)*N+J-1)*n^2 + (i-1)*n+j
local_to_global[ ((I-1)*N+J-1)*n^2 + (i-1)*n+j ] = (ii-1)*n*N + jj
end
end
end
end
return global_to_local, local_to_global
end
function get_colext(mc::MPIConfig)
I, J = getIJ(mc)
n, N = mc.n, mc.N
NDOF = n*N
DMAP, _ = dofmap(mc)
colext = zeros(Int64, n+2, n+2)
for i = 1:n+2
for j = 1:n+2
ii = (I-1)*n+(i-1)
jj = (J-1)*n+(j-1)
if 1<=ii<=NDOF && 1<=jj<=NDOF
idx = (ii-1)*NDOF + jj
colext[i,j] = DMAP[idx]
end
end
end
colext = colext .- 1
ncolsize = Int64[]
for i = 2:n+1
for j = 2:n+1
ns = (colext[i-1,j]>=0) + (colext[i+1,j]>=0) +
(colext[i,j-1]>=0) + (colext[i,j+1]>=0) + (colext[i,j]>=0)
push!(ncolsize, ns)
end
end
ilower = ((I-1)*N + J-1)*n^2
iupper = ((I-1)*N + J)*n^2-1
rows = Int32.(Array(0:n^2 .- 1))
rows = rows .+ ilower
return colext, sum(ncolsize), Int32.(rows), Int32.(ncolsize), ilower, iupper
end
function get_poisson_matrix(kext,mc; deps = missing)
colext, colssize, rows, ncols, ilower, iupper = get_colext(mc)
if ismissing(deps)
deps = kext[1,1]
end
get_poisson_matrix_ = load_op_and_grad("$(@__DIR__)/build/libGetPoissonMatrix","get_poisson_matrix", multiple=true)
kext,colext,colssize,deps = convert_to_tensor(Any[kext,colext,colssize,deps], [Float64,Int64,Int64,Float64])
global cols, vals = get_poisson_matrix_(kext,colext,colssize,deps)
mpi_SparseTensor(rows, ncols, cols, vals, ilower, iupper, (mc.n*mc.N)^2)
end
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 203 | using SymPy
x,y = @vars x y
u = x*(1-x)*y*(1-y)
ux = diff(u, x)
uy = diff(u, y)
κ = 1+x^2+y^2
f = diff(ux*κ, x) + diff(uy*κ, y)
s = replace(replace(sympy.julia_code(f), ".*"=>"*"), ".^"=>"^")
print(s) | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1306 | include("../mpiops.jl")
mpi_init()
function kappa(x, y)
return 1+x^2+y^2
end
function rhs(x, y)
-2*x*(1 - x)*(x^2 + y^2 + 1) + 2*x*(-x*y*(1 - y) + y*(1 - x)*(1 - y)) -
2*y*(1 - y)*(x^2 + y^2 + 1) + 2*y*(-x*y*(1 - x) + x*(1 - x)*(1 - y))
end
function ufunc(x, y)
x * (1-x) * y * (1-y)
end
mc = MPIConfig(50)
global_to_local, local_to_global = dofmap(mc)
X, Y = get_xy(mc)
f_local = rhs.(X, Y)
using Random; Random.seed!(233)
θ = Variable(fc_init([2,20,20,20,1]))
θ = mpi_bcast(θ)
κ_local = (fc([X'[:] Y'[:]], [20,20,20,1], θ) + 5.0)|>squeeze
κ_local = reshape(κ_local, (mc.n, mc.n))
u_local = poisson_solver(κ_local, f_local, mc)
u = mpi_gather(u_local)[global_to_local]
Uexact = reshape(constant(ufunc.(X, Y)), (-1,))
Uexact = mpi_gather(Uexact)[global_to_local]
sess = Session(); init(sess)
U = run(sess, u)
Uexact = run(sess, Uexact)
if mpi_rank()==0
close("all")
figure(figsize = (15,4))
subplot(131)
pcolormesh(reshape(U, mc.n * mc.N, mc.n * mc.N))
colorbar()
subplot(132)
pcolormesh(reshape(Uexact, mc.n * mc.N, mc.n * mc.N))
colorbar()
subplot(133)
pcolormesh(reshape(abs.(U - Uexact), mc.n * mc.N, mc.n * mc.N))
colorbar()
savefig("poisson_test_$(mpi_size())_$(mc.n).png")
end
if mpi_size()>1
mpi_finalize()
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1036 | include("../mpiops.jl")
mpi_init()
function kappa(x, y)
return 1+x^2+y^2
end
function rhs(x, y)
-2*x*(1 - x)*(x^2 + y^2 + 1) + 2*x*(-x*y*(1 - y) + y*(1 - x)*(1 - y)) -
2*y*(1 - y)*(x^2 + y^2 + 1) + 2*y*(-x*y*(1 - x) + x*(1 - x)*(1 - y))
end
function ufunc(x, y)
x * (1-x) * y * (1-y)
end
mc = MPIConfig(50)
global_to_local, local_to_global = dofmap(mc)
X, Y = get_xy(mc)
f_local = rhs.(X, Y)
using Random; Random.seed!(233)
θ = Variable(fc_init([2,20,20,20,1]))
θ_shared = mpi_bcast(θ)
κ_local = (fc([X'[:] Y'[:]], [20,20,20,1], θ_shared) + 5.0)|>squeeze
κ_local = reshape(κ_local, (mc.n, mc.n))
u_local = poisson_solver(κ_local, f_local, mc)
u = mpi_gather(u_local)[global_to_local]
loss = sum(u^2)
g = gradients(loss, θ)
sess = Session(); init(sess)
function calculate_loss(x)
run(sess, loss, θ=>x)
end
function calculate_gradients(x)
run(sess, g, θ=>x)
end
f(x) = (calculate_loss(x), calculate_gradients(x))
test_gradients(f, run(sess, θ), mpi=true)
if mpi_size()>1
mpi_finalize()
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1140 | include("../mpiops.jl")
mpi_init()
function kappa(x, y)
return 1+x^2+y^2
end
function rhs(x, y)
-2*x*(1 - x)*(x^2 + y^2 + 1) + 2*x*(-x*y*(1 - y) + y*(1 - x)*(1 - y)) -
2*y*(1 - y)*(x^2 + y^2 + 1) + 2*y*(-x*y*(1 - x) + x*(1 - x)*(1 - y))
end
function ufunc(x, y)
x * (1-x) * y * (1-y)
end
mc = MPIConfig(50)
global_to_local, local_to_global = dofmap(mc)
X, Y = get_xy(mc)
f_local = rhs.(X, Y)
κ_local = kappa.(X, Y)
u_local = poisson_solver(κ_local, f_local, mc)
u = mpi_gather(u_local)[global_to_local]
Uexact = reshape(constant(ufunc.(X, Y)), (-1,))
Uexact = mpi_gather(Uexact)[global_to_local]
sess = Session(); init(sess)
U = run(sess, u)
Uexact = run(sess, Uexact)
if mpi_rank()==0
close("all")
figure(figsize = (15,4))
subplot(131)
pcolormesh(reshape(U, mc.n * mc.N, mc.n * mc.N))
colorbar()
subplot(132)
pcolormesh(reshape(Uexact, mc.n * mc.N, mc.n * mc.N))
colorbar()
subplot(133)
pcolormesh(reshape(abs.(U - Uexact), mc.n * mc.N, mc.n * mc.N))
colorbar()
savefig("poisson_test_$(mpi_size())_$(mc.n).png")
end
if mpi_size()>1
mpi_finalize()
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 383 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
my_assign = load_op("./build/libMyAssign","my_assign")
# TODO: specify your input parameters
u = Variable([0.1,0.2,0.3])
v = constant(Array{Float64}(1:3))
u2 = u^2
w = my_assign(u,v)
sess = tf.Session()
init(sess)
@show run(sess, u)
@show run(sess, u2)
@show run(sess, w)
@show run(sess, u2)
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 759 |
using AdFem
using ADCME
using PyPlot
using JLD2
function kappa(x, y)
return 2 + exp(10x) - (10y)^2
end
function f(x, y)
return sin(2π*10y+π/8)
end
mmesh = Mesh(joinpath(PDATA, "twoholes_large.stl"))
Kappa = eval_f_on_gauss_pts(kappa, mmesh)
F = eval_f_on_gauss_pts(f, mmesh)
L = compute_fem_laplace_matrix1(Kappa, mmesh)
RHS = compute_fem_source_term1(F, mmesh)
bd = bcnode(mmesh)
L, RHS = impose_Dirichlet_boundary_conditions(L, RHS, bd, zeros(length(bd)))
SOL = L\RHS
close("all")
figure(figsize = (10, 4))
subplot(121)
visualize_scalar_on_gauss_points(Kappa, mmesh)
title("\$\\kappa\$")
subplot(122)
visualize_scalar_on_fem_points(SOL, mmesh)
title("Solution")
make_directory("data")
savefig("data/fwd_ps.png")
@save "data/fwd.jld2" SOL | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 804 | using AdFem
using ADCME
using PyPlot
using JLD2
using Revise
using ADCME
using PyCall
DIR = abspath(joinpath(@__DIR__, ".."))
py"""
import sys
sys.path.insert(0, $DIR)
"""
opt = pyimport("optim.optim")
@load "data/fwd.jld2" SOL
function f(x, y)
return sin(2π*10y+π/8)
end
mmesh = Mesh(joinpath(PDATA, "twoholes_large.stl"))
using Random; Random.seed!(SEED)
idx = rand(1:length(SOL), length(SOL)÷5)
xy = gauss_nodes(mmesh)
θ = Variable(fc_init([2,20,20,20,1]))
Kappa = squeeze(fc(xy, [20, 20, 20,1], θ, activation="tanh")) + 2.0
F = eval_f_on_gauss_pts(f, mmesh)
L = compute_fem_laplace_matrix1(Kappa, mmesh)
RHS = compute_fem_source_term1(F, mmesh)
bd = bcnode(mmesh)
L, RHS = impose_Dirichlet_boundary_conditions(L, RHS, bd, zeros(length(bd)))
sol = L\RHS
loss = sum((sol - SOL)^2)*1e10 | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 647 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
test_resource_manager = load_op_and_grad("./build/libTestResourceManager","test_resource_manager")
# TODO: specify your input parameters
k = constant(0, dtype=Int32)
u1 = test_resource_manager(k)
u2 = test_resource_manager(k)
u3 = test_resource_manager(k)
control_dependencies([u1, u2, u3]) do
global z = test_resource_manager(k) # use z to read the summation
end
# z = test_resource_manager(k)
sess = Session(); init(sess)
run(sess, z)
# output
# Create a new container
# Current Value=1
# Current Value=2
# Current Value=3
# Current Value=4
# 4 | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 426 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
function compute_sin(input)
compute_sin_ = load_op_and_grad("./build/libComputeSin","compute_sin")
input = convert_to_tensor(Any[input], [Float64]); input = input[1]
compute_sin_(input)
end
# TODO: specify your input parameters
input = rand(10)
u = compute_sin(input)
sess = Session(CPU=1); init(sess)
@show run(sess, u)
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 449 | ENV["OMP_NUM_THREADS"] = 5
using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
function compute_sin(input)
compute_sin_ = load_op_and_grad("./build/libComputeSin","compute_sin")
input = convert_to_tensor(Any[input], [Float64]); input = input[1]
compute_sin_(input)
end
# TODO: specify your input parameters
input = rand(10)
u = compute_sin(input)
sess = Session(); init(sess)
@show run(sess, u)
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1020 | using ADCME
if Sys.iswindows()
global save_tensor = load_op_and_grad("./build/Release/libSaver","save_tensor")
global get_tensor = load_op_and_grad("./build/Release/libSaver","get_tensor")
global delete_tensor = load_op_and_grad("./build/Release/libSaver","delete_tensor")
else
global save_tensor = load_op_and_grad("./build/libSaveTensor","save_tensor")
global get_tensor = load_op_and_grad("./build/libGetTensor","get_tensor")
global delete_tensor = load_op_and_grad("./build/libDeleteTensor","delete_tensor")
end
val = constant(rand(10))
t1 = constant("tensor1")
t2 = constant("tensor2")
t3 = constant("tensor3")
u1 = save_tensor(t1,val)
u2 = save_tensor(t2,2*val)
u3 = save_tensor(t3,3*val)
z1 = get_tensor(t1);
z2 = get_tensor(t2);
z3 = get_tensor(t3);
d1 = delete_tensor(t1);
d2 = delete_tensor(t2);
d3 = delete_tensor(t3);
sess = Session();
run(sess, [u1,u2,u3]) # add all the keys
# get the keys one by one
run(sess, z1)
run(sess, z2)
run(sess, z3)
# delete 2nd key
run(sess, d2) | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 466 | using ADCME
function test_thread_pool(a)
test_thread_pool_ = load_op_and_grad("./build/libTestThreadPool","test_thread_pool")
a = convert_to_tensor(Any[a], [Float64]); a = a[1]
test_thread_pool_(a)
end
# TODO: specify your input parameters
a = 1.0
u = test_thread_pool(a)
config = tf.ConfigProto(inter_op_parallelism_threads=2, intra_op_parallelism_threads=3, device_count=Dict("CPU"=>1))
sess = Session(config = config); init(sess)
@show run(sess, u)
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1853 | using ADCME
using PyPlot
x0 = rand(100)
x0 = @. x0*0.4 + 0.3
x1 = collect(LinRange(0, 1, 100))
y0 = sin.(2π*x0)
w = Variable(fc_init([1, 20, 20, 20, 1]))
y = squeeze(fc(x0, [20, 20, 20, 1], w))
loss = sum((y - y0)^2)
sess = Session(); init(sess)
BFGS!(sess, loss)
y1 = run(sess, y)
plot(x0, y0, ".", label="Data")
x_dnn = run(sess, squeeze(fc(x1, [20, 20, 20, 1], w)))
plot(x1, x_dnn, "--", label="DNN Estimation")
legend()
w1 = run(sess, w)
##############################
μ = Variable(w1)
ρ = Variable(zeros(length(μ)))
σ = log(1+exp(ρ))
function likelihood(z)
w = μ + σ * z
y = squeeze(fc(x0, [20, 20, 20, 1], w))
sum((y - y0)^2) - sum((w-μ)^2/(2σ^2)) + sum((w-w1)^2)
end
function inference(x)
z = tf.random_normal((length(σ),), dtype=tf.float64)
w = μ + σ * z
y = squeeze(fc(x, [20, 20, 20, 1], w))|>squeeze
end
W = tf.random_normal((10, length(w)), dtype=tf.float64)
L = constant(0.0)
for i = 1:10
global L += likelihood(W[i])
end
y2 = inference(x1)
opt = AdamOptimizer(0.01).minimize(L)
init(sess)
# run(sess, L)
losses = []
for i = 1:2000
_, l = run(sess, [opt, L])
push!(losses, l)
@info i, l
end
Y = zeros(100, 1000)
for i = 1:1000
Y[:,i] = run(sess, y2)
end
for i = 1:1000
plot(x1, Y[:,i], "--", color="gray", alpha=0.5)
end
plot(x1, x_dnn, label="DNN Estimation")
plot(x0, y1, ".", label="Data")
legend()
##############################
# Naive Uncertainty Quantification
function inference_naive(x)
z = tf.random_normal((length(w1),), dtype=tf.float64)
w = w1 + log(2)*z
y = squeeze(fc(x, [20, 20, 20, 1], w))|>squeeze
end
y3 = inference(x1)
Y = zeros(100, 1000)
for i = 1:1000
Y[:,i] = run(sess, y3)
end
for i = 1:1000
plot(x1, Y[:,i], "--", color="gray", alpha=0.5)
end
plot(x1, x_dnn, label="DNN Estimation")
plot(x0, y1, ".", label="Data")
legend()
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 343 | using ADCME
do_it = load_op("./build/libDoItOp", "do_it_op")
function myfun(y, x)
@. y = 2x
end
x = constant(rand(100))
y = 2x # or `y = Variable(rand(100))`
u = do_it(y)
config = tf.ConfigProto(inter_op_parallelism_threads=1)
sess = tf.Session()
init(sess)
ccall((:get_id, "./build/libDoItOp.so"), Cvoid, ())
run(sess, u, x=>rand(100))
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 347 | using ADCME
function gpu_test(a)
gpu_test_ = load_op_and_grad("$(@__DIR__)/build/libGpuTest","gpu_test")
a = convert_to_tensor([a], [Float64]); a = a[1]
gpu_test_(a)
end
# TODO: specify your input parameters
a = rand(3)
u = gpu_test(a)
sess = Session(); init(sess)
v1 = run(sess, u)
v2 = 2a
println("Computed: $v1; expected: $v2")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1964 | using ADCME
using PyPlot
encoding_dim = 32
x = placeholder(Float64, shape=[nothing, 784])
variable_scope("encoder", reuse=AUTO_REUSE) do
net = x
net = dense(net, 200, activation="relu")
net = dense(net, 200, activation="relu")
global encoded = dense(net, encoding_dim, activation="relu")
end
variable_scope("decoder", reuse=AUTO_REUSE) do
net = dense(encoded, 200, activation="relu")
net = dense(net, 200, activation="relu")
global decoded = dense(net, 784, activation="sigmoid")
end
x_train, _, x_test, = mnist(Float64)
x_train /= 255
x_test /= 255
x_train = reshape(x_train, size(x_train,1), length(x_train[1,:,:]))
x_test = reshape(x_test, size(x_test,1), length(x_test[1,:,:]))
loss = sum(tf.keras.backend.binary_crossentropy(x, decoded))
variable_scope("nn2", reuse=AUTO_REUSE) do
opt = AdamOptimizer()
global train_op = minimize(opt, loss)
end
sess = Session()
init(sess)
batchsize=256
for i = 1:10000
II = (1+(i-1)*batchsize):i*batchsize
II = mod.(II, size(x_train,1)).+1
data = x_train[II, :]
_, los = run(sess, [train_op, loss], feed_dict=Dict(x=>data))
if mod(i,100)==0
println("#iter=$i, loss=$los")
end
end
p = rand(1:size(x_train,1),6)
V = run(sess, decoded, feed_dict=Dict(x=>x_train[p,:]))
# V = Array{Int64}(V .> 0.5)
figure()
for i = 1:5
subplot(2,5,i)
imshow(reshape(V[i,:],28,28))
subplot(2,5,i+5)
imshow(reshape(x_train[p[i],:],28,28))
end
savefig("tmp.png")
ec = run(sess, encoded, feed_dict=Dict(x=>x_train[p,:]))
ec2 = (ec[1:3,:]+ec[4:6,:])/2
y = placeholder(Float64, shape=[nothing, 32])
variable_scope("decoder", reuse=AUTO_REUSE) do
net = dense(y, 200, activation="relu")
net = dense(net, 200, activation="relu")
global imgs = dense(net, 784, activation="sigmoid")
end
IMGS = run(sess, imgs, feed_dict=Dict(y=>ec2))
# IMGS = IMGS .> 0.5
figure()
for i = 1:3
subplot(1,3,i)
imshow(reshape(IMGS[i,:],28,28))
end
savefig("tmp2.png")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2930 | # rewrite of [TensorFlow Probability Example](https://medium.com/tensorflow/an-introduction-to-probabilistic-programming-now-available-in-tensorflow-probability-6dcc003ca29e) in `ADCME`
using Statistics
using ADCME
using PyCall
using PyPlot
using DelimitedFiles
if !("challenger_data.csv" in readdir("."))
download("https://raw.githubusercontent.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/master/Chapter2_MorePyMC/data/challenger_data.csv", "challenger_data.csv")
end
F = readdlm("challenger_data.csv",',', header=true)[1][:,2:3]
I = ones(Bool, size(F,1))
for i = 1:size(F,1)
if F[i,2]=="NA" || F[i,2] == "Challenger Accident"
I[i] = false
end
end
F = Array{Float32}(F[I,:]);
sess = Session()
temperature = constant(F[:,1], dtype=Float32)
D = constant(F[:,2], dtype=Float32)
function challenger_joint_log_prob(D, temperature, α, β)
rv_alpha = Normal(loc=0., scale=1000.)
rv_beta = Normal(loc=0., scale=1000.)
logistic_p = 1.0/(1+exp(β*temperature+α ))
rv_observed = Bernoulli(probs = logistic_p)
return log_prob(rv_alpha, α) + log_prob(rv_beta, β) +
sum(log_prob(rv_observed, D))
end
number_of_steps = 40000
burnin = 38000
initial_chain_state = [
constant(0.0, dtype=Float32),
constant(0.0, dtype=Float32)
]
function unnormalized_posterior_log_prob(args...)
challenger_joint_log_prob(D, temperature,args...)
end
unconstraining_bijectors = [Identity(), Identity()]
variable_scope("mcmc", reuse=AUTO_REUSE) do
global step_size = get_variable(
"step_size",
initializer=constant(0.5, dtype=Float32),
trainable=false
)
end
ham=TransformedTransitionKernel(HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
num_leapfrog_steps=2,
step_size_update_fn=make_simple_step_size_update_policy(),
state_gradients_are_stopped=true,
step_size=step_size),
bijector=unconstraining_bijectors)
states,is_accepted_, kernel_results = sample_chain(
num_results = number_of_steps,
num_burnin_steps = burnin,
current_state=initial_chain_state,
kernel=ham
)
init(sess)
posterior_α, posterior_β, is_accepted = run(sess, [states[1], states[2], is_accepted_])
println("Accepted rate=", sum(is_accepted)/length(is_accepted))
function logistic(x, beta, alpha)
return 1.0 ./ (1.0 .+ exp.(beta*x .+ alpha) )
end
figure()
xval = LinRange(minimum(F[:,1])-1.0, maximum(F[:,1])+1.0, 150)
y1 = logistic(xval, posterior_β[end-2000], posterior_α[end-2000])
y2 = logistic(xval, posterior_β[end-8], posterior_α[end-8])
y3 = logistic(xval, mean(posterior_β), mean(posterior_α))
scatter(F[:,1], F[:,2])
plot(xval, y1, label="posterior1")
plot(xval, y2, label="posterior2")
plot(xval, y3, label="mean")
ylim(-0.1,1.1)
legend()
# challenger
figure()
plt.hist(logistic(31, posterior_β[end-2000:end], posterior_α[end-2000:end]), 50)
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2352 | using ADCME, LinearAlgebra, PyCall
using DelimitedFiles
using PyPlot
# read data
elem = readdlm("meshdata/elem.txt", Int64)
node = readdlm("meshdata/nodes.txt")
dof = readdlm("meshdata/dof.txt", Int64)[:]
elem_ = constant(elem)
ne = size(elem,1)
nv = size(node, 1)
# precompute
localcoef = zeros(ne, 3, 3)
areas = zeros(ne)
for e = 1:ne
el = elem[e,:]
x1, y1 = node[el[1],:]
x2, y2 = node[el[2],:]
x3, y3 = node[el[3],:]
A = [x1 y1 1.0; x2 y2 1.0; x3 y3 1.0]
localcoef[e,:,:] = inv(A)
areas[e] = 0.5*abs(det(A))
end
# compute right hand side using midpoint rule
rhs = zeros(nv)
for i = 1:ne
el = elem[i,:]
rhs[el] .+= 4*areas[i]/3
end
areas = constant(areas)
localcoef = constant(localcoef)
D = constant(diagm(0=>ones(2)))
# D = Variable(2.0) .* [1.0 0.0;0.0 1.0]
function body(i, tai, taj, tav)
el = elem_[i-1]
a = areas[i-1]
L = localcoef[i-1]
LocalStiff = Array{PyObject}(undef, 3, 3)
for i = 1:3
for j = 1:3
LocalStiff[i,j] = a*[L[1,i] L[2,i]]*D*[L[1,j];L[2,j]]|>squeeze
end
end
ii = reshape([el el el], (-1,))
jj = reshape([el;el;el], (-1,))
tai = write(tai, i, ii)
taj = write(taj, i, jj)
# op = tf.print(el)
# i = bind(i, op)
tav = write(tav, i, vcat(LocalStiff[:]...))
return i+1, tai, taj, tav
end
i = constant(2, dtype=Int32)
tai = TensorArray(ne+1, dtype=Int64)
taj = TensorArray(ne+1, dtype=Int64)
tav = TensorArray(ne+1)
tai = write(tai, 1, constant(ones(Int64,9)))
taj = write(taj, 1, constant(ones(Int64,9)))
tav = write(tav, 1, constant(zeros(9)))
_, ii, jj, vv = while_loop((i, tas...)->i<=ne+1, body, [i, tai, taj, tav])
ii = reshape(stack(ii),(-1,)); jj = reshape(stack(jj),(-1,)); vv = reshape(stack(vv),(-1,))
A = SparseTensor(ii, jj, vv, nv, nv)
ndof = [x for x in setdiff(Set(1:nv), Set(dof))]
A = scatter_update(A, dof, ndof, spzero(length(dof), length(ndof)))
A = scatter_update(A, ndof, dof, spzero(length(ndof), length(dof)))
A = scatter_update(A, dof, dof, spdiag(length(dof)))
rhs[dof] .= 0.0
sol = A\rhs
# loss = sum((sol - (@. 1-node[:,1]^2-node[:,2]^2))^2)
sess = Session(); init(sess)
S = run(sess, sol)
close("all")
scatter3D(node[:,1], node[:,2], S, marker="^", label = "FEM")
scatter3D(node[:,1], node[:,2], (@. 1-node[:,1]^2-node[:,2]^2), marker = "+", label = "Exact")
legend()
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 1651 | using ADCME
using PyPlot
function residual_and_jacobian(θ, u)
X = ae(u, config, θ) + 1.0 # (1)
Xp = tf.gradients(X, u)[1]
Xpp = tf.gradients(Xp, u)[1]
up = [u[2:end];constant(zeros(1))]
un = [constant(zeros(1)); u[1:end-1]]
R = Xp .* ((up-un)/2h)^2 + X .* (up+un-2u)/h^2 - φ
dRdu = Xpp .* ((up-un)/2h)^2 + Xp.*(up+un-2u)/h^2 - 2/h^2*X
dRdun = -Xp[2:end]/h .* (up-un)[2:end]/2h + X[2:end]/h^2
dRdup = Xp[1:end-1]/h .* (up-un)[1:end-1]/2h + X[1:end-1]/h^2
J = spdiag(n-1,
-1=>dRdun,
0=>dRdu,
1=>dRdup) # (2)
return R, J
end
config = [20,20,20,1]
n = 100
h = 1/n
x = collect(LinRange(0, 1.0, n+1))
φ = @. (1 - 2*x)*(-100*x^2*(2*x - 2) - 200*x*(1 - x)^2)/(100*x^2*(1 - x)^2 + 1)^2 - 2 - 2/(100*x^2*(1 - x)^2 + 1)
φ = φ[2:end-1]
θ = Variable(ae_init([1,config...]))
u0 = constant(zeros(n-1))
function L(u) # (3)
u_obs = (@. x * (1-x))[2:end-1]
loss = mean((u - u_obs)^2)
end
loss, solution, grad = NonlinearConstrainedProblem(residual_and_jacobian, L, θ, u0)
X_pred = ae(collect(LinRange(0.0,0.25,100)), config, θ) + 1.0
sess = Session(); init(sess)
BFGS!(sess, loss, grad, θ)
x_pred, sol = run(sess, [X_pred, solution])
figure(figsize=(10,4))
subplot(121)
s = LinRange(0.0,0.25,100)
x_exact = @. 1/(1+100*s^2) + 1
plot(s, x_exact, "-", linewidth=3, label="Exact")
plot(s, x_pred, "o", markersize=2, label="Estimated")
legend()
xlabel("u")
ylabel("X(u)")
subplot(122)
s = LinRange(0.0,1.0,101)[2:end-1]
plot(s, (@. s * (1-s)), "-", linewidth=3, label="Exact")
plot(s, sol, "o", markersize=2, label="Estimated")
legend()
xlabel("x")
ylabel("u")
savefig("nn.png") | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 629 | using LinearAlgebra
using ADCME
n = 101 # number of grid nodes in [0,1]
h = 1/(n-1)
x = LinRange(0,1,n)[2:end-1]
b = Variable(10.0) # create a Variable for `b`
A = diagm(0=>2/h^2*ones(n-2), -1=>-1/h^2*ones(n-3), 1=>-1/h^2*ones(n-3)) # discrete Laplacian matrix
B = b*A + diagm(0=>ones(n-2)) # coefficient matrix
f = @. 4*(2 + x - x^2) # right hand side
u = B\f # solve the equation
ue = u[div(n+1,2)] # extract values at x=0.5
loss = (ue-1.0)^2 # form the loss function
# Optimization
opt = ScipyOptimizerInterface(loss)
sess = Session(); init(sess)
ScipyOptimizerMinimize(sess, opt)
println("Estimated b = ", run(sess, b)) | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 5064 | # This is an example of WGAN_GP using ADCME.jl
# Reference: https://github.com/eriklindernoren/Keras-GAN/blob/master/wgan_gp/wgan_gp.py#L147
using ADCME
using PyPlot
# step 0: parameters
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
variable_scope("rms", reuse=AUTO_REUSE) do
global optimizer1 = RMSPropOptimizer(0.00005)
global optimizer2 = RMSPropOptimizer(0.00005)
end
n_critic = 5
batch_size = 32
Xtrain,_,_,_ = mnist(Float32)
Xtrain = (Xtrain.-127.5f0)/127.5f0
Xtrain = reshape(Xtrain, size(Xtrain)...,1)
function generator(x)
scope = "generator"
variable_scope(scope, reuse=AUTO_REUSE) do
x = dense(x, 128*7*7, activation="relu")
x = Reshape(x, (7,7,128),name="2")
x = UpSampling2D(x,name="3")
x = conv2d(x, 128, kernel_size=4, padding="same")
# x = BatchNormalization(x, momentum=0.8)
x = relu(x)
x = UpSampling2D(x,name="5")
x = conv2d(x, 64, kernel_size=4, padding="same")
# x = BatchNormalization(x,momentum=0.8)
x = relu(x)
x = conv2d(x, channels, kernel_size=4, padding="same")
x = tanh(x)
end
return x
end
function critic(x)
variable_scope("critic", reuse=AUTO_REUSE) do
x = conv2d(x, 16, kernel_size=3, strides=2, padding="same")
x = leaky_relu(x, alpha=0.2)
x = dropout(x, 0.25)
x = conv2d(x, 32, kernel_size=3, strides=2, padding="same")
x = ZeroPadding2D(x, padding=((0,1),(0,1)))
# x = BatchNormalization(x, momentum=0.8)
x = leaky_relu(x, alpha=0.2)
x = dropout(x, 0.25)
x = conv2d(x, 64, kernel_size=3, strides=2, padding="same")
# x = BatchNormalization(x, momentum=0.8)
x = leaky_relu(x, alpha=0.2)
x = dropout(x, 0.25)
x = conv2d(x, 128, kernel_size=3, strides=1, padding="same")
# x = BatchNormalization(x, momentum=0.8)
x = leaky_relu(x, alpha=0.2)
x = dropout(x, 0.25)
x = flatten(x)
x = dense(x, 1)
x = sigmoid(x)
end
return x
end
function wasserstein_loss(ypred, ytrue)
return mean(ytrue.*ypred)
end
function gradient_penalty_loss(y_pred, averaged_samples)
grad = tf.gradients(y_pred, averaged_samples)[1]
grd_sqr_sum = sum(grad^2, dims=[2;3;4])
grd_l2_norm = sqrt(grd_sqr_sum)
gradient_penalty = (1 - grd_l2_norm)^2
return mean(gradient_penalty)
end
# step 1: create generator neural network
real_img = placeholder(Float32, shape=[batch_size, img_shape...])
z_disc = placeholder(Float32, shape=[batch_size, latent_dim])
fake_img = generator(z_disc)
# error("inspection")
fake = critic(fake_img)
valid = critic(real_img)
α = random_uniform((32,1,1,1), dtype=Float32)
interpolated_img = α * real_img + (1 - α) * fake_img
validity_interpolated = critic(interpolated_img)
l1 = wasserstein_loss(valid, -ones(Float32,batch_size,1))
l2 = wasserstein_loss(fake, ones(Float32,batch_size,1))
l3 = gradient_penalty_loss(validity_interpolated, interpolated_img)
l_critic = l1 + l2 + 10*l3
z_gen = placeholder(Float32, shape=[batch_size, latent_dim])
img = generator(z_gen)
valid = critic(img)
l_generator = wasserstein_loss(valid, -ones(Float32,batch_size,1))
# error("inspection")
critic_var = get_collection("critic")
generator_var = get_collection("generator")
variable_scope("rms", reuse=AUTO_REUSE) do
global train_critic = minimize(optimizer1, l_critic, var_list=critic_var)
global train_generator = minimize(optimizer2, l_generator, var_list=generator_var)
end
si_z_gen = placeholder(Float32, shape=[25, 100])
si_img = generator(si_z_gen)
function sample_images(epoch)
noise = randn(25, latent_dim)
gen_images = run(sess, si_img, feed_dict=Dict(si_z_gen=>noise))
gen_images = 0.5*gen_images .+ 1
close("all")
figure()
cnt = 1
for i = 1:5
for j = 1:5
subplot(5,5,i+(j-1)*5)
imshow(gen_images[cnt,:,:,1], cmap="gray")
axis("off")
cnt+=1
end
end
savefig("images/mnist_$epoch.png")
close("all")
end
# # step 2: training
sess = Session()
init(sess)
epochs = 600000
d_loss = nothing
noise = nothing
_l1 = nothing
_l2 = nothing
_l3 = nothing
noise = nothing
for epoch = 1:epochs
for _ = 1:n_critic
global d_loss, _l1, _l2, _l3, noise
idx = rand(1:size(Xtrain,1), batch_size)
imgs = Xtrain[idx,:,:,:]
noise = randn(Float32, batch_size, latent_dim)
_l1,_l2,_l3,d_loss,_ = run(sess, [l1,l2,l3,l_critic, train_critic],
feed_dict=Dict(real_img=>imgs, z_disc=>noise))
end
# noise = randn(batch_size, latent_dim)
#k1,k2 = run(sess, [l2, l_generator], feed_dict=Dict(z_disc=>noise, z_gen=>noise))
#@show k1, k2
g_loss,_ = run(sess, [l_generator, train_generator],
feed_dict=Dict(z_gen=>noise))
if mod(epoch, 100)==1
sample_images(epoch)
end
println("$epoch [D loss: $(d_loss)] [G loss: $(g_loss)]")
println("Diagnose: $_l1, $_l2, $_l3")
end
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 2277 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
Random.seed!(233)
if Sys.islinux()
py"""
import tensorflow as tf
libDirichletBD = tf.load_op_library('build/libDirichletBD.so')
@tf.custom_gradient
def dirichlet_bd(ii,jj,dof,vv):
uu = libDirichletBD.dirichlet_bd(ii,jj,dof,vv)
def grad(dy):
return libDirichletBD.dirichlet_bd_grad(dy, uu, ii,jj,dof,vv)
return uu, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libDirichletBD = tf.load_op_library('build/libDirichletBD.dylib')
@tf.custom_gradient
def dirichlet_bd(ii,jj,dof,vv):
uu = libDirichletBD.dirichlet_bd(ii,jj,dof,vv)
def grad(dy):
return libDirichletBD.dirichlet_bd_grad(dy, uu, ii,jj,dof,vv)
return uu, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libDirichletBD = tf.load_op_library('build/libDirichletBD.dll')
@tf.custom_gradient
def dirichlet_bd(ii,jj,dof,vv):
uu = libDirichletBD.dirichlet_bd(ii,jj,dof,vv)
def grad(dy):
return libDirichletBD.dirichlet_bd_grad(dy, uu, ii,jj,dof,vv)
return uu, grad
"""
end
dirichlet_bd = py"dirichlet_bd"
# TODO:
ii = constant([1;2;3;2], dtype=Int32)
jj = constant([1;2;3;3], dtype=Int32)
vv = constant([2.0;3.0;4.0;4.0])
dof = constant([3], dtype=Int32)
u = dirichlet_bd(ii,jj,dof,vv)
sess = Session()
init(sess)
run(sess, u)
# error("")
# TODO:
# gradient check -- v
function scalar_function(m)
return sum(tanh(dirichlet_bd(ii,jj,dof,m)))
end
m_ = vv
v_ = rand(4)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 3061 | using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using Random
using SparseArrays
Random.seed!(233)
################## Load Operator ##################
if Sys.islinux()
py"""
import tensorflow as tf
libSparseSolver = tf.load_op_library('build/libSparseSolver.so')
@tf.custom_gradient
def sparse_solver(ii,jj,vv,kk,ff,d):
u = libSparseSolver.sparse_solver(ii,jj,vv,kk,ff,d)
def grad(dy):
return libSparseSolver.sparse_solver_grad(dy, u, ii,jj,vv,kk,ff,d)
return u, grad
"""
elseif Sys.isapple()
py"""
import tensorflow as tf
libSparseSolver = tf.load_op_library('build/libSparseSolver.dylib')
@tf.custom_gradient
def sparse_solver(ii,jj,vv,kk,ff,d):
u = libSparseSolver.sparse_solver(ii,jj,vv,kk,ff,d)
def grad(dy):
return libSparseSolver.sparse_solver_grad(dy, u, ii,jj,vv,kk,ff,d)
return u, grad
"""
elseif Sys.iswindows()
py"""
import tensorflow as tf
libSparseSolver = tf.load_op_library('build/libSparseSolver.dll')
@tf.custom_gradient
def sparse_solver(ii,jj,vv,kk,ff,d):
u = libSparseSolver.sparse_solver(ii,jj,vv,kk,ff,d)
def grad(dy):
return libSparseSolver.sparse_solver_grad(dy, u, ii,jj,vv,kk,ff,d)
return u, grad
"""
end
sparse_solver = py"sparse_solver"
################## End Load Operator ##################
# TODO:
d0 = 30
nv = 100
nf = 100
aug = Array(1:d0)
ii = [rand(1:d0,nv);aug...]
jj = [rand(1:d0,nv);aug...]
vv = [rand(nv);rand(d0)]
A = sparse(ii,jj,vv,d0,d0)
kk = rand(1:d0,nf)
ff = rand(nf)
rhs = (sparse(kk,ones(Int64,nf),ff,d0,1)|>Array)[:,1]
u_ = A\rhs
# @show Array(A), rhs
ii = constant(ii,dtype=Int32)
jj = constant(jj,dtype=Int32)
vv = constant(vv)
kk = constant(kk,dtype=Int32)
ff = constant(ff)
d = constant(d0,dtype=Int32)
# ii = constant([1;2;3;2],dtype=Int32)
# jj = constant([1;2;3;3],dtype=Int32)
# vv = constant([1.0;2.0;2.0;1.0])
# kk = constant([1;2;3],dtype=Int32)
# ff = constant([1.0;2.0;3.0])
# d = constant(3,dtype=Int32)
u = sparse_solver(ii,jj,vv,kk,ff,d)
sess = Session()
init(sess)
@show norm(run(sess, u)-u_)
# error("")
# TODO:
# TODO: change your test parameter to `m`
# gradient check -- v
function scalar_function(m)
return sum(tanh(sparse_solver(ii,jj,m,kk,ff,d)))
end
m_ = vv
v_ = rand(nv+d0)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
sess = Session()
@show run(sess, dy_)
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 3074 |
__precompile__(true)
module ADCME
export tf,
tfp,
AUTO_REUSE,
GLOBAL_VARIABLES,
TRAINABLE_VARIABLES,
UPDATE_OPS
using PyCall
using Random
using LinearAlgebra
using SparseArrays
using LibGit2
using Libdl
tf = PyNULL()
tfp = PyNULL()
plotly = PyNULL()
libadcme = missing
#------------------------------------------------------------------------------------------
# Global Storage
DTYPE = Dict{Type, PyObject}()
LIBADCME = abspath(joinpath("$(@__DIR__)", "../deps/CustomOps/build", "libadcme.$dlext"))
if Sys.iswindows()
LIBADCME = abspath(joinpath("$(@__DIR__)", "../deps/CustomOps/build", "adcme.dll"))
end
LIBPLUGIN = abspath(joinpath("$(@__DIR__)", "../deps/Plugin"))
if isfile("$(@__DIR__)/../deps/deps.jl")
include("$(@__DIR__)/../deps/deps.jl")
if Sys.iswindows()
ENV["PATH"] = LIBDIR*";"*ENV["PATH"]
else
if haskey(ENV, "LD_LIBRARY_PATH")
ENV["LD_LIBRARY_PATH"] = LIBDIR*":"*ENV["LD_LIBRARY_PATH"]
else
ENV["LD_LIBRARY_PATH"] = LIBDIR
end
end
else
error("ADCME is not properly built; run `Pkg.build(\"ADCME\")` to fix the problem.")
end
run_metadata = nothing
STORAGE = Dict{String, Any}()
function __init__()
global AUTO_REUSE, GLOBAL_VARIABLES, TRAINABLE_VARIABLES, UPDATE_OPS, DTYPE, libadcme
copy!(tf, pyimport("tensorflow"))
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
try
copy!(tfp, pyimport("tensorflow_probability"))
catch
end
DTYPE = Dict(Float64=>tf.float64,
Float32=>tf.float32,
Int64=>tf.int64,
Int32=>tf.int32,
Bool=>tf.bool,
ComplexF64=>tf.complex128,
ComplexF32=>tf.complex64,
String=>tf.string,
Char=>tf.string)
AUTO_REUSE = tf.compat.v1.AUTO_REUSE
GLOBAL_VARIABLES = tf.compat.v1.GraphKeys.GLOBAL_VARIABLES
TRAINABLE_VARIABLES = tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES
UPDATE_OPS = tf.compat.v1.GraphKeys.UPDATE_OPS
global options = Options()
try
libadcme = tf.load_op_library(LIBADCME)
catch
@warn "Cannot load $LIBADCME. Please recompile the shared library by `ADCME.precompile()` for using custom operators."
end
end
include("core.jl")
include("io.jl")
include("optim.jl")
include("run.jl")
include("variable.jl")
include("ops.jl")
include("layers.jl")
include("extra.jl")
include("install.jl")
include("sparse.jl")
include("random.jl")
include("gan.jl")
include("ot.jl")
include("ode.jl")
include("flow.jl")
include("options.jl")
include("mpi.jl")
include("toolchain.jl")
include("kit.jl")
include("rbf.jl")
include("pcl.jl")
include("sqlite.jl")
include("gpu.jl")
include("plotly.jl")
end
| ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 10467 | export
reset_default_graph,
get_collection,
enable_eager_execution,
control_dependencies,
while_loop,
if_else,
stop_gradient,
independent,
tensor,
tensorname,
has_mpi,
get_mpi,
get_mpirun,
@cpu,
@gpu
# only for eager eager execution
enable_eager_execution() = tf.enable_eager_execution()
Base.:values(o::PyObject) = o.numpy()
"""
reset_default_graph()
Resets the graph by removing all the operators.
"""
function reset_default_graph()
global STORAGE
tf.compat.v1.reset_default_graph()
STORAGE = Dict{String, Any}()
nothing
end
"""
get_collection(name::Union{String, Missing})
Returns the collection with name `name`. If `name` is `missing`, returns all the trainable variables.
"""
function get_collection(name::Union{String, Missing}=missing)
if !ismissing(name) && (name in [TRAINABLE_VARIABLES, UPDATE_OPS])
return tf.compat.v1.get_collection(name)
end
if ismissing(name)
res = tf.compat.v1.get_collection(TRAINABLE_VARIABLES)
else
res = []
vs = tf.compat.v1.get_collection(TRAINABLE_VARIABLES)
rname = @eval @r_str $name
for v in vs
if occursin(rname, v.name)
push!(res, v)
end
end
end
return unique(res)
end
"""
tensor(s::String)
Returns the tensor with name `s`. See [`tensorname`](@ref).
"""
function tensor(s::String)
tf.get_default_graph().get_tensor_by_name(s)
end
"""
tensorname(o::PyObject)
Returns the name of the tensor. See [`tensor`](@ref).
"""
function tensorname(o::PyObject)
o.name
end
function jlargs(kwargs)
kwargs = Dict{Any, Any}(kwargs)
if :axis in keys(kwargs)
@error("axis is not a valid keyword, using dims instead (base = 1)")
end
if :dtype in keys(kwargs)
kwargs[:dtype] = DTYPE[kwargs[:dtype]]
end
if :dims in keys(kwargs)
kwargs[:axis] = kwargs[:dims] .- 1
if isa(kwargs[:axis], Array)
kwargs[:axis] = Tuple(kwargs[:axis])
end
delete!(kwargs, :dims)
end
if :colocate in keys(kwargs)
kwargs[:colocate_gradients_with_ops] = kwargs[:colocate]
delete!(kwargs, :colocate)
end
kwargs
end
# control_dependencies can be used to fix the memory problem
# https://stackoverflow.com/questions/39350164/tensorflow-parallel-for-loop-results-in-out-of-memory
"""
control_dependencies(f, ops::Union{Array{PyObject}, PyObject})
Executes all operations in `ops` before any operations _created_ inside the block.
```julia
op1 = tf.print("print op1")
op3 = tf.print("print op3")
control_dependencies(op1) do
global op2 = tf.print("print op2")
end
run(sess, [op2,op3])
```
In this example, `op1` must be executed before `op2`. But there is no guarantee when `op3` will be executed.
There are several possible outputs of the program such as
```julia-repl
print op3
print op1
print op2
```
or
```
print op1
print op3
print op2
```
"""
function control_dependencies(f, ops::Union{Tuple, Array{PyObject}, PyObject})
if isa(ops, PyObject)
ops = [ops]
end
@pywith tf.control_dependencies(ops) begin
f()
end
end
"""
bind(op::PyObject, ops...)
Adding operations `ops` to the dependencies of `op`. `ops` are guaranteed to be executed **before** `op`.
The function is useful when we want to execute `ops` but `ops` is not
in the dependency of the final output. For example, if we want to print `i` each time `i` is evaluated
```julia
i = constant(1.0)
op = tf.print(i)
i = bind(i, op)
```
"""
function Base.:bind(op::PyObject, ops...)
local op1
control_dependencies(ops) do
op1 = tf.identity(op)
end
return op1
end
@doc raw"""
while_loop(condition::Union{PyObject,Function}, body::Function, loop_vars::Union{PyObject, Array{Any}, Array{PyObject}};
parallel_iterations::Int64=10, kwargs...)
Loops over `loop_vars` while `condition` is true. This operator only creates one extra node to mark the loops in the computational graph.
# Example
The following script computes
```math
\sum_{i=1}^{10} i
```
```julia
function condition(i, ta)
i <= 10
end
function body(i, ta)
u = read(ta, i-1)
ta = write(ta, i, u+1)
i+1, ta
end
ta = TensorArray(10)
ta = write(ta, 1, constant(1.0))
i = constant(2, dtype=Int32)
_, out = while_loop(condition, body, [i, ta])
summation = stack(out)[10]
```
"""
function while_loop(condition::Union{PyObject,Function}, body::Function, loop_vars::Union{PyObject, Array{Any}, Array{PyObject}};
parallel_iterations::Int64=10, kwargs...)
# @warn "TensorArray must be initialized (writedown at index 1) outside" maxlog=1
if isa(loop_vars, PyObject)
lv = [loop_vars]
else
lv = loop_vars
end
if get_dtype(loop_vars[1])!=Int32
error("Loop index must be Int32, got $(get_dtype(loop_vars[1]))")
end
res = tf.while_loop(condition, body, loop_vars=lv; parallel_iterations=parallel_iterations, kwargs...)
if isa(loop_vars, PyObject)
return res[1]
else
return res
end
end
function if_else_v1(condition::Union{PyObject}, fn1, fn2, args...;kwargs...)
fn1_ = ifelse(isa(fn1, Function), fn1, ()->fn1)
fn2_ = ifelse(isa(fn2, Function), fn2, ()->fn2)
tf.cond(condition, fn1_, fn2_, args...;kwargs...)
end
function if_else_v2(condition::PyObject, fn1::Union{Nothing, PyObject, Array},
fn2::Union{Nothing, PyObject, Array})
fn1 = convert_to_tensor(fn1)
fn2 = convert_to_tensor(fn2)
tf.compat.v2.where(condition, fn1, fn2)
end
"""
if_else(condition::Union{PyObject,Array,Bool}, fn1, fn2, args...;kwargs...)
- If `condition` is a scalar boolean, it outputs `fn1` or `fn2` (a function with no input argument or a tensor) based on whether `condition` is true or false.
- If `condition` is a boolean array, if returns `condition .* fn1 + (1 - condition) .* fn2`
!!! info
If you encounter an error like this:
```
tensorflow.python.framework.errors_impl.InvalidArgumentError: Retval[0] does not have value
```
It's probably that your code within `if_else` is not valid.
"""
function if_else(condition::Union{PyObject,Array,Bool}, fn1, fn2, args...;kwargs...)
if isa(condition, Array) || isa(condition, Bool)
condition = convert_to_tensor(condition)
end
if isa(condition, Function) || (eltype(condition)<:Bool && length(size(condition))==0)
if_else_v1(condition, fn1, fn2, args...;kwargs...)
else
if_else_v2(condition, fn1, fn2)
end
end
"""
has_mpi(verbose::Bool = true)
Determines whether MPI is installed.
"""
function has_mpi(verbose::Bool = true)
if Sys.iswindows()
if haskey(ENV, "MSMPI_INC") && haskey(ENV, "MSMPI_LIB64")
return true
else
return false
end
end
if haskey(ENV, "MPI_INCLUDE_PATH") && haskey(ENV, "MPI_C_LIBRARIES")
if !(isdir(ENV["MPI_INCLUDE_PATH"]) && "mpi.h" in readdir(ENV["MPI_INCLUDE_PATH"]))
error("mpi.h is not found in ENV[\"MPI_INCLUDE_PATH\"] = $(ENV["MPI_INCLUDE_PATH"])")
end
if !isfile(ENV["MPI_C_LIBRARIES"])
error("ENV[\"MPI_C_LIBRARIES\"]=$(ENV["MPI_C_LIBRARIES"]) does not exists.")
end
verbose && (@info "Use MPI libraries: $(ENV["MPI_C_LIBRARIES"])")
return true
end
if isfile(get_library(joinpath(ADCME.LIBDIR, "mpi"))) && isfile(joinpath(ADCME.INCDIR, "mpi.h"))
verbose && (@info "Use default MPI library (OpenMPI)")
return true
end
return false
end
"""
get_mpi()
Returns the MPI include directory and shared library.
"""
function get_mpi()
if Sys.iswindows()
if haskey(ENV, "MSMPI_INC") && haskey(ENV, "MSMPI_LIB64")
return ENV["MSMPI_INC"], joinpath(ENV["MSMPI_LIB64"], "msmpi.lib")
else
return false
end
end
if haskey(ENV, "MPI_INCLUDE_PATH") && haskey(ENV, "MPI_C_LIBRARIES")
return ENV["MPI_INCLUDE_PATH"], ENV["MPI_C_LIBRARIES"]
end
if isfile(get_library(joinpath(ADCME.LIBDIR, "mpi"))) && isfile(joinpath(ADCME.INCDIR, "mpi.h"))
return ADCME.INCDIR, get_library(joinpath(ADCME.LIBDIR, "mpi"))
end
error("""MPI Library is not found.
- On Windows, you can download Microsoft MPI (https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi)
- On Unix, you can install OpenMPI via `install_openmpi()`
""")
end
"""
get_mpirun()
Returns the **default** mpirun executable.
"""
function get_mpirun()
if !has_mpi(false)
error("""MPI Library is not found.
- On Windows, you can download Microsoft MPI (https://docs.microsoft.com/en-us/message-passing-interface/microsoft-mpi)
- On Unix, you can install OpenMPI via `install_openmpi()`
""")
end
if Sys.iswindows()
return joinpath(ENV["MPI_BIN"], "mpiexec.exe")
else
if haskey(ENV, "MPI_INCLUDE_PATH") && haskey(ENV, "MPI_C_LIBRARIES")
@warn("You are not using the default MPI. Trying to detect the executable...")
mpirun = abspath(joinpath(ENV["MPI_INCLUDE_PATH"], "..", "bin", "mpirun"))
if !isfile(mpirun)
error("Failed.")
else
return mpirun
end
end
mpirun = joinpath(ADCME.BINDIR, "mpirun")
if !isfile(mpirun)
error("Failed.")
end
return mpirun
end
end
"""
independent(o::PyObject, args...; kwargs...)
Returns `o` but when computing the gradients, the top gradients will not be back-propagated into dependent variables of `o`.
"""
independent(o::PyObject, args...; kwargs...) = stop_gradient(o, args...; kwargs...)
@deprecate stop_gradient independent
function stop_gradient(o::PyObject, args...;kwargs...)
tf.stop_gradient(o, args...;kwargs...)
end
macro cpu(device_id, expr)
device = "/cpu:"*string(device_id)
quote
@pywith tf.device($device) begin
$(esc(expr))
end
end
end
macro cpu(expr)
device = "/cpu:0"
quote
@pywith tf.device($device) begin
$(esc(expr))
end
end
end
macro gpu(device_id, expr)
device = "/gpu:"*string(device_id)
quote
@pywith tf.device($device) begin
$(esc(expr))
end
end
end
macro gpu(expr)
device = "/gpu:0"
quote
@pywith tf.device($device) begin
$(esc(expr))
end
end
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
|
[
"MIT"
] | 0.7.3 | 4ecfc24dbdf551f92b5de7ea2d99da3f7fde73c9 | code | 31973 | using Random
export customop,
xavier_init,
load_op_and_grad,
load_op,
use_gpu,
install,
load_system_op,
register,
debug,
doctor,
nnuq,
compile,
list_physical_devices,
MCMCSimple,
simulate,
diagnose,
get_placement,
timestamp,
load_library,
sleep_for,
get_library_symbols
"""
xavier_init(size, dtype=Float64)
Returns a matrix of size `size` and its values are from Xavier initialization.
"""
function xavier_init(size, dtype=Float64)
in_dim = size[1]
xavier_stddev = 1. / sqrt(in_dim / 2.)
return randn(dtype, size...)*xavier_stddev
end
############### custom operators ##################
"""
cmake(DIR::String=".."; CMAKE_ARGS::Union{Array{String}, String} = "")
The built-in Cmake command for building C/C++ libraries. If extra Cmake arguments are needed, please specify it through `CMAKE_ARGS`.
# Example
```
ADCME.cmake(CMAKE_ARGS=["SHARED=YES", "STAITC=NO"])
```
The executed command might be:
```
/home/darve/kailaix/.julia/adcme/bin/cmake -G Ninja -DCMAKE_MAKE_PROGRAM=/home/darve/kailaix/.julia/adcme/bin/ninja -DJULIA=/home/darve/kailaix/julia-1.3.1/bin/julia -DCMAKE_C_COMPILER=/home/darve/kailaix/.julia/adcme/bin/x86_64-conda_cos6-linux-gnu-gcc -DCMAKE_CXX_COMPILER=/home/darve/kailaix/.julia/adcme/bin/x86_64-conda_cos6-linux-gnu-g++ SHARED=YES STATIC=NO ..
```
"""
function cmake(DIR::String=".."; CMAKE_ARGS::Union{Array{String}, String} = "")
DIR = abspath(DIR)
ENV_ = copy(ENV)
LD_PATH = Sys.iswindows() ? "PATH" : "LD_LIBRARY_PATH"
if haskey(ENV_, LD_PATH)
ENV_[LD_PATH] = ENV[LD_PATH]*":$LIBDIR"
else
ENV_[LD_PATH] = LIBDIR
end
if has_mpi(false)
ENV_["MPI_INCLUDE_PATH"], ENV_["MPI_C_LIBRARIES"] = get_mpi()
end
if Sys.iswindows()
if !haskey(ENV_, "VS150COMNTOOLS")
# @warn "VS150COMNTOOLS is not set, default to /c/Program Files (x86)/Microsoft Visual Studio/2017/Community/Common7/Tools" maxlog=1
ENV_["VS150COMNTOOLS"] = "/c/Program Files (x86)/Microsoft Visual Studio/2017/Community/Common7/Tools"
end
# @info "Do remember to add ADD_DEFINITIONS(-DNOMINMAX) to your CMakeLists.txt" maxlog=1
run(setenv(`$CMAKE -G"Visual Studio 15" -DJULIA="$(joinpath(Sys.BINDIR, "julia"))" -A x64 $CMAKE_ARGS $DIR`, ENV_)) # very important, x64
else
run(setenv(`$CMAKE -G Ninja -DCMAKE_MAKE_PROGRAM=$NINJA -DJULIA="$(joinpath(Sys.BINDIR, "julia"))" -DCMAKE_C_COMPILER=$CC -DCMAKE_CXX_COMPILER=$CXX $CMAKE_ARGS $DIR`, ENV_))
end
end
function make()
ENV_ = copy(ENV)
LD_PATH = Sys.iswindows() ? "PATH" : "LD_LIBRARY_PATH"
if haskey(ENV_, LD_PATH)
ENV_[LD_PATH] = ENV[LD_PATH]*":$LIBDIR"
else
ENV_[LD_PATH] = LIBDIR
end
if Sys.iswindows()
sln_file = filter(x->endswith(x, ".sln"), readdir())
if length(sln_file)==0
error("No .sln file found. Did you run `ADCME.cmake()`?")
elseif length(sln_file)>1
error("More than 1 .sln file found. Check your program.")
else
sln_file = sln_file[1]
end
run(`cmd /c $CMAKE --build . -j --target ALL_BUILD --config Release`)
else
if haskey(ENV, "TRAVIS_BRANCH")
run(setenv(`$NINJA -j1`, ENV_))
else
run(setenv(`$NINJA -j20`, ENV_))
end
end
end
"""
make_library(Libdir::String)
Make shared library in `Libdir`. The structure of the source codes files are
```
- Libdir
- *.cpp
- *.h
- CMakeLists
- build (Optional)
```
"""
function make_library(Libdir::String)
if !isdir(Libdir)
error("$Libdir is not a valid directory.")
end
PWD = pwd()
cd(Libdir)
if !isdir("build")
mkdir("build")
end
cd("build")
if !isfile("Makefile")
ADCME.cmake()
end
ADCME.make()
cd(PWD)
end
load_op_dict = Dict{Tuple{String, String}, PyObject}()
load_op_grad_dict = Dict{Tuple{String, String}, PyObject}()
@doc """
load_op(oplibpath::Union{PyObject, String}, opname::String; verbose::Union{Missing, Bool} = missing)
Loads the operator `opname` from library `oplibpath`.
"""
function load_op(oplibpath::Union{PyObject, String}, opname::String; verbose::Union{Missing, Bool} = missing)
verbose = coalesce(verbose, options.customop.verbose)
oplibpath = get_library(oplibpath)
if haskey(load_op_dict, (oplibpath,opname))
return load_op_dict[(oplibpath,opname)]
end
s = getproperty(load_library(oplibpath), opname)
load_op_dict[(oplibpath,opname)] = s
verbose && printstyled("Load library operator: $oplibpath ==> $opname\n", color=:green)
return s
end
@doc """
load_op_and_grad(oplibpath::Union{PyObject, String}, opname::String; multiple::Bool=false)
Loads the operator `opname` from library `oplibpath`; gradients are also imported.
If `multiple` is true, the operator is assumed to have multiple outputs.
"""
function load_op_and_grad(oplibpath::Union{PyObject, String}, opname::String;
multiple::Bool=false, verbose::Union{Missing, Bool} = missing)
verbose = coalesce(verbose, options.customop.verbose)
is_system_op = oplibpath == LIBADCME
if isa(oplibpath, String)
if Sys.iswindows()
a, b = splitdir(oplibpath)
if length(b) >=3 && b[1:3]=="lib"
b = b[4:end]
end
oplibpath = joinpath(a, b)
end
if splitext(oplibpath)[2]==""
oplibpath = oplibpath * (Sys.islinux() ?
".so" : Sys.isapple() ? ".dylib" : ".dll")
end
oplibpath = abspath(oplibpath)
if haskey(load_op_grad_dict, (oplibpath,opname))
return load_op_grad_dict[(oplibpath,opname)]
end
if !isfile(oplibpath)
error("Library $oplibpath does not exist.")
end
end
opname_grad = opname*"_grad"
fn_name = opname*randstring(8)
try
if is_system_op
py"""
import tensorflow as tf
lib$$fn_name = $libadcme
"""
elseif isa(oplibpath, String)
py"""
import tensorflow as tf
lib$$fn_name = tf.load_op_library($oplibpath)
"""
else
py"""
import tensorflow as tf
lib$$fn_name = $oplibpath
"""
end
if !multiple
py"""
@tf.custom_gradient
def $$fn_name(*args):
u = lib$$fn_name.$$opname(*args)
def grad(dy):
return lib$$fn_name.$$opname_grad(dy, u, *args)
return u, grad
"""
else
py"""
@tf.custom_gradient
def $$fn_name(*args):
u = lib$$fn_name.$$opname(*args)
def grad(*dy):
dy = [y for y in dy if y is not None and y.dtype in [tf.float64, tf.float32]] # only float64 and float32 can backpropagate gradients
return lib$$fn_name.$$opname_grad(*dy, *u, *args)
return u, grad
"""
end
catch(e)
printstyled("Failed load $oplibpath or its symbols. Error Message from the TensorFlow backend\n$(string(e))\n", color=:red)
Libdl.dlopen(oplibpath)
end
s = py"$$fn_name"
if isa(oplibpath, String)
load_op_grad_dict[(oplibpath,opname)] = s
verbose && printstyled("Load library operator (with gradient, multiple outputs = $multiple): $oplibpath ==> $opname\n", color=:green)
end
return s
end
"""
load_system_op(opname::String, grad::Bool=true; multiple::Bool=false)
Loads custom operator from CustomOps directory (shipped with ADCME instead of TensorFlow)
For example
```
s = "SparseOperator"
oplib = "libSO"
grad = true
```
this will direct Julia to find library `CustomOps/SparseOperator/libSO.dylib` on MACOSX
"""
function load_system_op(opname::String, grad::Bool=true; multiple::Bool=false)
if !isfile(LIBADCME)
@info "$LIBADCME does not exist. Precompiling..."
ADCME.precompile()
end
if grad
load_op_and_grad(LIBADCME, opname; multiple=multiple, verbose=false)
else
load_op(LIBADCME, opname, verbose=false)
end
end
"""
compile(s::String; force::Bool=false)
Compiles the library given by path `deps/s`. If `force` is false, `compile` first check whether
the binary product exists. If the binary product exists, return 2. Otherwise, `compile` tries to
compile the binary product, and returns 0 if successful; it return 1 otherwise.
"""
function compile(s::String; force::Bool=false, customdir::Bool = false)
PWD = pwd()
dir = s
if !customdir
dir = joinpath(joinpath("$(@__DIR__)", "../deps/CustomOps"), s)
end
if !isdir(dir)
@warn("Folder for the operator $s does not exist: $dir")
return 1
end
cd(dir)
local surfix
if Sys.isapple()
surfix = ".dylib"
elseif Sys.islinux()
surfix = ".so"
elseif Sys.iswindows()
surfix = ".dll"
end
if !force && isdir("build") # check if product exists
files = readdir("build")
if any([endswith(x, surfix) for x in files])
@warn("The binary product exists.")
cd(PWD)
return 2
end
end
rm("build",force=true,recursive=true)
mkdir("build")
cd("build")
try
cmake()
make()
cd(PWD)
return 0
catch e
error("Compilation error: $e")
cd(PWD)
return 1
end
end
"""
precompile(force::Bool=false)
Precompile the built-in custom operators.
"""
function Base.:precompile(force::Bool=false)
PWD = pwd()
cd("$(@__DIR__)/../deps/CustomOps")
if force
try
rm("build", force=true, recursive=true)
catch
error("""Failed to remove build directory. Follow the following steps and try again:
1. Quit ALL julia processes that use ADCME;
2. Remove $(joinpath(pwd(), "build")) manually.""")
end
end
change_directory("build")
require_cmakecache() do
if Sys.isapple()
try
ADCME.cmake()
catch
@info "Use system clang..."
mv(joinpath(BINDIR, "clang"), joinpath(BINDIR, "clang_original"))
mv(joinpath(BINDIR, "clang++"), joinpath(BINDIR, "clang++_original"))
symlink("/usr/bin/clang",joinpath(BINDIR, "clang"))
symlink("/usr/bin/clang++",joinpath(BINDIR, "clang++"))
ADCME.cmake()
end
else
ADCME.cmake()
end
end
require_library("adcme") do
ADCME.make()
end
cd(PWD)
global libadcme = tf.load_op_library(LIBADCME)
end
"""
compile()
Compile a custom operator in the current directory. A `CMakeLists.txt` must be present.
"""
function compile()
PWD = pwd()
if !isfile("CMakeLists.txt")
error(SystemError("No CMakeLists.txt in the current directory found."))
end
if !isdir("build")
mkdir("build")
end
cd("build")
try
cmake()
make()
catch e
@warn "Compiling failed: $e"
finally
cd(PWD)
end
end
"""
customop(;with_mpi::Bool = false)
Create a new custom operator. Typically users call `customop` twice: the first call generates a `customop.txt`,
users edit the content in the file; the second all generates C++ source code, CMakeLists.txt, and gradtest.jl from `customop.txt`.
# Example
```julia-repl
julia> customop() # create an editable `customop.txt` file
[ Info: Edit custom_op.txt for custom operators
julia> customop() # after editing `customop.txt`, call it again to generate interface files.
```
# Options
- `with_mpi`: Whether the custom operator uses MPI
"""
function customop(;with_mpi::Bool = false)
# install_custom_op_dependency()
py_dir = "$(@__DIR__)/../deps/CustomOpsTemplate"
if !("custom_op.txt" in readdir("."))
cp("$(py_dir)/custom_op.example", "custom_op.txt")
@info "Edit custom_op.txt for custom operators"
return
else
python = PyCall.python
with_mpi = with_mpi ? 1 : 0
run(`$python $(py_dir)/customop.py custom_op.txt $py_dir $with_mpi`)
end
nothing
end
function use_gpu(i::Union{Nothing,Int64}=nothing)
if length(CUDA_INC)==0
error("""ADCME is not built against GPU. Set ENV["GPU"]=1 and rebuild GPU.""")
end
dl = pyimport("tensorflow.python.client.device_lib")
if !isnothing(i) && i>=1
i = join(collect(0:i-1),',')
ENV["CUDA_VISIBLE_DEVICES"] = i
elseif !isnothing(i) && i==0
ENV["CUDA_VISIBLE_DEVICES"] = ""
end
local_device_protos = dl.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == "GPU"]
end
function list_physical_devices(cpu_or_gpu::String = "all")
dl = pyimport("tensorflow.python.client.device_lib")
local_device_protos = dl.list_local_devices()
CPU = [x.name for x in local_device_protos if x.device_type == "CPU"]
GPU = [x.name for x in local_device_protos if x.device_type == "GPU"]
if cpu_or_gpu == "all"
return [CPU;GPU]
elseif cpu_or_gpu == "GPU"
return GPU
elseif cpu_or_gpu == "CPU"
return CPU
else
error(ArgumentError("$cpu_or_gpu is not a valid input. Expected: all, CPU, or GPU"))
end
end
"""
install(s::String; force::Bool = false, islocal::Bool = false)
Install a custom operator from a URL, a directory (when `islocal` is true), or a string. In any of the three case,
`install` copy the folder to $(abspath(joinpath(LIBADCME, "../../Plugin"))).
When `s` is a string, `s` is converted to
https://github.com/ADCMEMarket/<s>
"""
function install(s::String; force::Bool = false, islocal::Bool = false)
if !islocal && !startswith(s, "http")
s = "https://github.com/ADCMEMarket/"*s
end
_, name = splitdir(s)
if force
rm(joinpath(LIBPLUGIN, name), force=true, recursive=true)
elseif isdir(joinpath(LIBPLUGIN, name)) && ("build" in readdir(joinpath(LIBPLUGIN, name)))
return _plugin_lib(joinpath(LIBPLUGIN, name))
end
try
LibGit2.clone(s, joinpath(LIBPLUGIN, name))
catch
LibGit2.clone("git://$(s[9:end]).git", joinpath(LIBPLUGIN, name))
end
PWD = pwd()
cd(joinpath(LIBPLUGIN, name))
if isfile(joinpath(LIBPLUGIN, name, "build.jl"))
include(joinpath(LIBPLUGIN, name, "build.jl"))
end
cmakelists = String(read("CMakeLists.txt"))
if !occursin("cmake_minimum_required", cmakelists)
cmakelists = replace(String(read(joinpath(LIBPLUGIN, "CMakeLists.txt"))), "[INSTRUCTION]"=>cmakelists)
end
open("CMakeLists.txt", "w") do io
write(io, cmakelists)
end
mkdir("build")
cd("build")
ADCME.cmake()
ADCME.make()
cd(PWD)
return _plugin_lib(joinpath(LIBPLUGIN, name))
end
function _plugin_lib(D)
files = readdir(joinpath(D, "build"))
dylib = filter(x->endswith(x, ".$dlext"), files)
if length(dylib)==0
error(SystemError("No dynamic library found."))
elseif length(dylib)>1
error(SystemError("More then one dynamic libraries found."))
else
return joinpath(D, "build", dylib[1])
end
end
@doc raw"""
register(forward::Function, backward::Function; multiple::Bool=false)
Register a function `forward` with back-propagated gradients rule `backward` to the backward.
∘ `forward`: it takes $n$ inputs and outputs $m$ tensors. When $m>1$, the keyword `multiple` must be true.
∘ `backward`: it takes $\tilde m$ top gradients from float/double output tensors of `forward`, $m$ outputs of the `forward`,
and $n$ inputs of the `forward`. `backward` outputs $n$ gradients for each input of `forward`. When input $i$ of
`forward` is not float/double, `backward` should return `nothing` for the corresponding gradients.
# Example
```julia
forward = x->log(1+exp(x))
backward = (dy, y, x)->dy*(1-1/(1+y))
f = register(forward, backward)
```
"""
function register(forward::Function, backward::Function; multiple::Bool=false)
fn_name = "customgrad_"*randstring(8)
if !multiple
py"""
import tensorflow as tf
@tf.custom_gradient
def $$fn_name(*args):
u = $forward(*args)
def grad(dy):
return $backward(dy, u, *args)
return u, grad
"""
else
py"""
import tensorflow as tf
@tf.custom_gradient
def $$fn_name(*args):
u = forward_$$fn_name(*args)
def grad(*dy):
dy = [y for y in dy if y is not None and y.dtype in [tf.float64, tf.float32]] # only float64 and float32 can backpropagate gradients
return backward_$$fn_name(*dy, *u, *args)
return u, grad
"""
end
return py"$$fn_name"
end
"""
debug(sess::PyObject, o::PyObject)
In the case a session run yields an error from the TensorFlow backend, this function can help print the exact error.
For example, you might encounter `InvalidArgumentError()` with no detailed error information, and this function can be useful for debugging.
"""
function debug(sess::PyObject, o::PyObject)
py"""
import tensorflow as tf
import traceback
try:
$sess.run($o)
except Exception:
print(traceback.format_exc())
"""
end
"""
debug(libfile::String = "")
Loading custom operator shared library. If the loading fails, detailed error message is printed.
"""
function debug(libfile::String = "")
if libfile==""
libfile = LIBADCME
end
py"""
import tensorflow as tf
import traceback
try:
tf.load_op_library($libfile)
except Exception:
print(traceback.format_exc())
"""
end
"""
doctor()
Reports health of the current installed ADCME package. If some components are broken, possible fix is proposed.
"""
function doctor()
function yes(name)
printstyled("[✔️] $name\n", color=:green, bold=true)
end
function no(name, diagnose, instruction)
printstyled("[✘] $name\n", color=:red, bold=true)
printstyled("\n[Reason]\n", color=:magenta)
printstyled("$diagnose\n\n", color=:blue)
printstyled("\n[Instruction]\n", color=:magenta)
printstyled("$instruction\n\n", color=:blue)
end
c = true
if VERSION>=v"1.4" && VERSION<v"1.5" && Sys.isapple()
c = false
end
if c
yes("Julia version")
else
no("Julia version",
"""Your Julia version is $VERSION, and your system is MACOSX. This combination may have a compatibility issue.""",
"""If `using ADCME` freezes, consider install another version of Julia.""")
end
c = (length(tf.__version__)>=6) && (tf.__version__[1:6]=="1.15.0")
if c
yes("TensorFlow version")
else
no("TensorFlow version",
"""Your TensorFlow version is $(tf.__version__). The TensorFlow version shipped with ADCME is 1.15.0.""",
"""Set ENV["FORCE_REINSTALL_ADCME"] = 1 and rebuild ADCME
julia> ENV["FORCE_REINSTALL_ADCME"] = 1
julia> ]
pkg> build ADCME""")
end
c = (PyCall.python==ADCME.PYTHON)
if c
yes("Python executable file")
else
no("Python executable file",
"""PyCall Python path $(PyCall.python) does not match the ADCME-compatible Python $(ADCME.PYTHON)""",
"""Rebuild PyCall with a compatible Python version:
using Pkg
ENV["PYTHON"] = "$(ADCME.PYTHON)"
Pkg.build("PyCall")
""")
end
c = true
try
if Sys.iswindows()
run(`cmd /c where julia`)
else
read(`which julia`)
end
catch
c = false
end
if c
yes("Julia path")
else
no("Julia path (Optional)",
"""`julia` outputs nothing. This will break custom operator compilation.""",
"""Add your julia binary path to your environment path, e.g. (Unix systems)
export PATH=$(Sys.BINDIR):\$PATH
For convenience, you can add the above line to your `~/.bashrc` (Linux) or `~/.bash_profile` (Apple).
For Windows, you need to add it to system environment.""")
end
c = Sys.iswindows() ?
haskey(ENV, "PATH") && occursin(ADCME.LIBDIR, ENV["PATH"]) :
haskey(ENV, "LD_LIBRARY_PATH") && occursin(ADCME.LIBDIR, ENV["LD_LIBRARY_PATH"])
if c
yes("Dynamic library path")
else
no("Dynamic library path (Optional)",
"""$(ADCME.LIBDIR) is not in LD_LIBRARY_PATH. This MAY break custom operator compilation. However, in most cases, ADCME automatic fixes this problem for you.""",
"""Add your dynamic library path path to your environment path, e.g. (Unix systems)
export LD_LIBRARY_PATH=$(ADCME.LIBDIR):\$LD_LIBRARY_PATH
For convenience, you can add the above line to your `~/.bashrc` (Linux or Apple).
For Windows, you need to add it to PATH instead of LD_LIBRARY_PATH.""")
end
c = Sys.WORD_SIZE==64
if c
yes("Memory Address Length = 64")
else
no("Memory Address Length",
"""Your memory address length is $(Sys.WORD_SIZE). ADCME is only tested against 64-bit machine.""",
"""If you do not need custom operators, then it's fine. Otherwise you need to switch to a 64-bit machine""")
end
if Sys.iswindows()
c = isfile(ADCME.MAKE*".exe") && occursin("15", (ADCME.MAKE)) && occursin("2017", ADCME.MAKE)
if c
yes("C Compiler")
else
no("C Compiler",
"""You specified that the C compiler for custom operators is
$(ADCME.MAKE)
However, one of the following requirements is not met:
1*. The file you specified $(ADCME.MAKE*".exe") does not exist.
2**. (Optional) For compatibility, we suggest you use Microsoft Visual Studio 2017 (Version number: 15).
* The path is actually not needed in compilation, but we raise such an issue here in case you obtain some compilation errors in the future.
* We check the version by looking for "15" and "2017" in the path specification. If you are sure your compiler is correct, you can ignore this message. """,
"""Manually edit $(abspath(joinpath(splitdir(pathof(ADCME))[1], "../deps/deps.jl"))) and modify `MAKE` to be the correct compiler.""")
end
end
c = haskey(ENV, "PATH") && occursin(ADCME.BINDIR, ENV["PATH"])
if c
yes("Binaries path")
else
no("Binaries path",
"""$(ADCME.BINDIR) is not in PATH. This path contains compatible tools such as a GCC compiler, `cmake`, `make`, or any other tools you want to use directly from terminal.
However, setting the path is NOT a requirement, and ADCME works totally fine without any action.""",
"""(Optional) Add your binary path to your environment path, e.g. (Unix systems)
export PATH=$(ADCME.BINDIR):\$PATH
For convenience, you can add the above line to your `~/.bashrc` (Linux) or `~/.bash_profile` (Apple).
For Windows, you need to add it to system environment.""")
end
if length(ADCME.CUDA_INC)>0
c = Sys.iswindows() ?
haskey(ENV, "PATH") && occursin(ADCME.LIBCUDA, ENV["PATH"]) :
haskey(ENV, "LD_LIBRARY_PATH") && occursin(ADCME.LIBCUDA, ENV["LD_LIBRARY_PATH"])
if c
yes("CUDA LD_LIBRARY_PATH")
else
no("CUDA LD_LIBRARY_PATH",
"""$(ADCME.LIBCUDA) is not in LD_LIBRARY_PATH. This path contains compatible tools such as a GCC compiler, `cmake`, `make`, etc.""",
"""The fix is OPTIONAL.
Add your binary path to your environment path, e.g. (Unix systems)
export LD_LIBRARY_PATH=$(ADCME.LIBCUDA):\$LD_LIBRARY_PATH
For convenience, you can add the above line to your `~/.bashrc` (Linux or Apple).
For Windows, you need to add it to PATH instead of LD_LIBRARY_PATH.""")
end
try
if !Sys.iswindows()
Libdl.dlpath("libcuda")
Libdl.dlpath("libcudnn")
Libdl.dlpath("libcublas")
else
Libdl.dlpath("cudart64_100")
Libdl.dlpath("cudnn64_7")
Libdl.dlpath("cublas64_100")
end
yes("CUDA Shared Library")
catch
no("CUDA Shared Library",
"""libcuda, libcudnn, and (or) libcublas can not be loaded.""",
"""If you intend to use GPU, this fix is mandatory. Make sure cudatoolkit and cudnn libraries can be found in
$(ADCME.LIBCUDA)
and `nvcc` is in your path.""")
end
c = isdir(ADCME.CUDA_INC) && "cuda.h" in readdir(ADCME.CUDA_INC)
if c
yes("CUDA Header Files")
if !isfile(joinpath(ADCME.TF_INC, "third_party/gpus/cuda/include/cuda_fp16.h"))
println("Fixing third_party/gpus/cuda/include...")
if !ispath(joinpath(ADCME.TF_INC, "third_party/gpus/cuda/"))
mkpath(joinpath(ADCME.TF_INC, "third_party/gpus/cuda/"))
end
rm(joinpath(ADCME.TF_INC, "third_party/gpus/cuda/include/"), force=true, recursive=true)
symlink(ADCME.CUDA_INC, joinpath(ADCME.TF_INC, "third_party/gpus/cuda/include"))
end
else
no("CUDA Header Files",
"""Cuda include library does not exist or `cuda.h` is missing.""",
"""It might be possible that your cuda include library is located somewhere else other than $(ADCME.CUDA_INC). Fix the dependency file.""")
end
else
no("GPU Support (Optional)",
"""ADCME is not compiled against GPU.""",
"""If you intend to use GPU, set ENV["GPU"] = 1 and then rebuild ADCME.""")
end
depsfile = abspath(joinpath(@__DIR__, "../deps/deps.jl"))
println("============================================================\n$(depsfile)\n============================================================")
println(read(depsfile, String))
@assert isdir(ADCME.BINDIR)
@assert isdir(ADCME.LIBDIR)
@assert isdir(ADCME.TF_INC)
@assert isdir(ADCME.PREFIXDIR)
if !Sys.iswindows()
@assert isfile(ADCME.CC) || islink(ADCME.CC)
@assert isfile(ADCME.CXX) || islink(ADCME.CXX)
@assert isfile(ADCME.NINJA)
else
@assert isfile(ADCME.MAKE)
end
@assert isfile(ADCME.CMAKE)
@assert isfile(ADCME.TF_LIB_FILE) || islink(ADCME.TF_LIB_FILE)
@assert isdir(ADCME.INCDIR)
end
"""
test_gpu()
Tests the GPU ultilities
"""
function test_gpu()
PWD = pwd()
run(`which nvcc`)
cd("$(@__DIR__)/../examples/gpu_custom_op")
mkdir("build")
cd("build")
ADCME.cmake()
ADCME.make()
cd("..")
include("gputest.jl")
cd(PWD)
end
@doc raw"""
nnuq(H::Array{Float64,2}, invR::Union{Float64, Array{Float64,2}}, invQ::Union{Float64, Array{Float64,2}})
Returns the variance matrix for the Baysian inversion.
The negative log likelihood function is
$$l(s) =\frac{1}{2} (y-h(s))^T R^{-1} (y-h(s)) + \frac{1}{2} s^T Q^{-1} s$$
The covariance matrix is computed by first linearizing $h(s)$
$$h(s)\approx h(s_0) + \nabla h(s_0) (s-s_0)$$
and then computing the second order derivative
$$V = \left(\frac{\partial^2 l}{\partial s^T\partial s}\right)^{-1} = (H^T R^{-1} H + Q^{-1})^{-1}$$
Note the result is independent of $s_0$, $y_0$, and only depends on $\nabla h(s_0)$
"""
function nnuq(H::Array{Float64,2}, invR::Union{Float64, Array{Float64,2}}, invQ::Union{Float64, Array{Float64,2}})
if isa(invQ, Float64)
invQ = invQ * I
end
Σ = inv(H' * invR * H + invQ)
(Σ + Σ')/2
end
mutable struct MCMCSimple
logf::Function
proposal::Function
θ0::Array{Float64, 1}
ub::Float64
lb::Float64
L::Array{Float64, 1}
AC::Array{Float64, 1}
end
@doc raw"""
MCMCSimple(obs::Array{Float64, 1}, h::Function,
σ::Float64, θ0::Array{Float64,1}, lb::Float64, ub::Float64)
A very simple yet useful interface for MCMC simulation in many scientific computing problems.
- `obs`: Observations
- `h`: Forward computation function
- `σ`: Noise standard deviation for the observed data
- `ub`, `lb`: upper and lower bound
- `θ0`: Initial guess
The mathematical model is
```math
y_{obs} = h(\theta)
```
and we have a hard constraint `lb\leq \theta \leq ub`.
"""
function MCMCSimple(obs::Array{Float64, 1}, h::Function,
σ::Float64, θ0::Array{Float64,1}, lb::Float64, ub::Float64,
δ::Union{Missing, Float64}=missing)
τ = (ub-lb)/6
δ = coalesce(δ, (ub-lb)/100)
function logf(x)
-sum((obs - h(x)).^2/2σ^2) - sum((x-θ0).^2)/2τ^2
end
function proposal(x)
x + (rand(length(x)) .- 0.5)*2 * δ
end
MCMCSimple(logf, proposal, θ0, ub, lb, zeros(0), zeros(0))
end
function simulate(ms::MCMCSimple, N::Int64, burnin::Union{Int64, Missing} = missing)
burnin = coalesce(burnin, Int64(round(N*0.2)))
sim = zeros(N, length(ms.θ0))
sim[1,:] = ms.θ0
L = zeros(N)
AC = ones(N)
AC[1] = NaN
L[1] = ms.logf(ms.θ0)
k = 1
for i = 2:N
sim[i,:], L[i], k_ = _MCMCSimple_simulate(ms, sim[i-1,:])
k += k_
AC[i] = k/i
end
ms.AC = AC
ms.L = L
return sim
end
function diagnose(ms::MCMCSimple)
if !isdefined(Main, :PyPlot)
error("Package PyPlot.jl must be imported in the main module using `import PyPlot` or `using PyPlot`")
end
Main.PyPlot.figure(figsize = (10, 4))
Main.PyPlot.subplot(121)
Main.PyPlot.title("Acceptance Rate")
Main.PyPlot.plot(ms.AC)
Main.PyPlot.ylim(0,1.05)
Main.PyPlot.subplot(122)
Main.PyPlot.title("Log Likelihood")
Main.PyPlot.plot(ms.L)
end
function _MCMCSimple_simulate(ms::MCMCSimple, x::Array{Float64})
local x_star
while true
x_star = ms.proposal(x)
if all(x_star.<=ms.ub) && all(x_star.>=ms.lb)
break
end
end
Δ = ms.logf(x_star) - ms.logf(x)
if log(rand())<Δ
return x_star, ms.logf(x_star), 1
else
return x, ms.logf(x), 0
end
end
"""
get_placement()
Returns the operation placements.
"""
function get_placement()
sess = Session(config=tf.ConfigProto(log_device_placement=true))
originalSTDOUT = stdout
(outRead, outWrite) = redirect_stdout()
init(sess)
close(outWrite)
data = readavailable(outRead)
close(outRead)
redirect_stdout(originalSTDOUT)
lines = split(String(data), '\n')[1:end-1]
end
"""
sleep_for(t::Union{PyObject, <:Real})
Sleeps for `t` seconds.
"""
function sleep_for(t::Union{PyObject, <:Real})
sleep_for_ = load_system_op("sleep_for", false)
sleep_for_(convert_to_tensor(t, dtype=Float64))
end
"""
timestamp(deps::Union{PyObject, <:Real, Missing}=missing)
These functions are usually used with [`bind`](@ref) for profiling.
Note the timing is not very accurate in a multithreaded environment.
- `deps`: `deps` is always executed before returning the timestamp.
# Example
```julia
a = constant(3.0)
t0 = timestamp(a)
sleep_time = sleep_for(a)
t1 = timestamp(sleep_time)
sess = Session(); init(sess)
t0_, t1_ = run(sess, [t0, t1])
time = t1_ - t0_
```
"""
function timestamp(deps::Union{PyObject, <:Real, Missing}=missing)
deps = coalesce(deps, 0.0)
deps = convert_to_tensor(deps, dtype = Float64)
timer_ = load_system_op("timer", false)
timer_(deps)
end
"""
load_library(filename::String)
Load custom operator libraries. If used with
"""
function load_library(filename::String)
filename = get_library(filename)
keyname = "custom_op_library_"*filename
if haskey(STORAGE, keyname)
return STORAGE[keyname]
end
if !isfile(filename)
error("File $filename not found. If you intend to load a library by absolute path, try `tf.load_op_library(filename)`.")
end
try
STORAGE[keyname] = tf.load_op_library(filename)
catch e
error("Failed to load library: $filename. Original error message:\n$e")
end
return STORAGE[keyname]
end
"""
get_library_symbols(file::Union{String, PyObject})
Returns the symbols in the custom op library `file`.
"""
function get_library_symbols(files::Union{String, PyObject})
if isa(files, String)
files = load_library(files)
end
files = keys(files)
filter(x->islowercase(String(x)[1]) && String(x)!="tf_export" && !(occursin("fallback", String(x))) && String(x)!="deprecated_endpoints", files)
end
function Base.:NamedTuple(df::PyObject)
names = tuple(Symbol.(df.index.tolist())...)
vals = tuple(df.values...)
NamedTuple{names}(vals)
end | ADCME | https://github.com/kailaix/ADCME.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.