licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 3137 | import QPDAS
#import OSQP
#Type to store n linear equalities + 1 pair of linear equalities
mutable struct SavedPlanes{T}
A::Array{T,2}
b::Array{T,1}
n::Int64
neq::Int64
nineq::Int64
end
function projectonnormals!(s::SavedPlanes{T},x,y) where T
n = length(x)
m = length(s.b)
eqidx = 1:(s.neq*(s.n+1))
ineqidx = (s.neq*(s.n+1)+1):(s.neq+s.nineq)*(s.n+1)
A = s.A[ eqidx,:]
b = s.b[eqidx]
C = s.A[ineqidx,:]
d = s.b[ineqidx]
z = -x
#
qp = QPDAS.QuadraticProgram(BigFloat.(A), BigFloat.(b), BigFloat.(-C), BigFloat.(-d), BigFloat.(z), I, scaling=true, ϵ=1e-12)
sol, val = QPDAS.solve!(qp)
y .= sol
# model = OSQP.Model()
# M = [A;-C]
# u = [b;-d]
# l = [b;fill(-Inf, length(d))]
# OSQP.setup!(model; P=SparseMatrixCSC{Float64}(I, n, n), l=l, A=sparse(M), u=u, verbose=false,
# eps_abs=0.001*eps(), eps_rel=0.001*eps(),
# eps_prim_inf=0.001*eps(), eps_dual_inf=0.001*eps(), max_iter=10000)
#
# OSQP.update!(model; q=z)
# results = OSQP.solve!(model)
# y .= results.x
# println(size(s.A))
# println("A:")
# println(s.A[ eqidx,:])
# println("b:")
# println(s.b[ eqidx])
# println("C:")
# println(s.A[ineqidx,:])
# println("d:")
# println(s.b[ineqidx])
# println("x:")
# println(x)
return false
end
function SavedPlanes(x::AbstractVector{T}, n::Int, neq, nineq) where T
total = (n+1)*neq+(n+1)*nineq #Save all eq and last ineq
SavedPlanes{T}(similar(x,total, length(x)), similar(x,total), n, neq, nineq)
end
#
# #Type to store n linear equalities + 1 pair of linear equalities
# mutable struct SavedPlanes{T}
# A::Array{T,2}
# b::Array{T,1}
# n::Int64
# neq::Int64
# nineq::Int64
# end
#
# function projectonnormals!(s::SavedPlanes{T},x,y) where T
# n = length(x)
# m = length(s.b)
# #println(s.A)
# #println(s.b)
# AAt = s.A*transpose(s.A)
# AAtF = try
# bkfact!(AAt)
# catch
# y .= x
# #println("Non symmetric")
# return true
# end
# tmp = copy(s.b)
# # tmp = -Ax+b
# LinearAlgebra.gemv!('N', -one(T), s.A, x, one(T), tmp)
# # tmp = (AA')⁻¹*(-Ax+b)
# try
# ldiv!(AAtF,tmp)
# catch
# y .= x
# println("Noninvertable")
# return true
# end
# # y = A'(AA')⁻¹*(-Ax+b)
# mul!(y, transpose(s.A), tmp)
# # y = x + A'(AA')⁻¹*(-Ax+b) = (I-A'(AA')⁻¹*A)x + A'(AA')⁻¹b
# y .= y .+ x
# #println("in 2")
# return false
# end
#
# function SavedPlanes(x::AbstractVector{T}, n::Int, neq, nineq) where T
# total = (n+1)*neq+nineq #Save all eq and last ineq
# SavedPlanes{T}(similar(x,total, length(x)), similar(x,total), n, neq, nineq)
# end
#Add planes to a specific location
function addplanesat(v1,b1,i,s::SavedPlanes)
s.A[i,:] .= v1
s.b[i] = b1
end
#Add planes to a random location in 1:n with probability p=0.05
function addplanesrand(v1,b1,s::SavedPlanes, p = 0.05)
if s.n > 0 && rand() < p
i = rand(1:s.n)
addplanesat(v1,b1,i,s)
end
end
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 1781 | using FirstOrderSolvers: HSDEMatrixQ, HSDEMatrix
using SparseArrays
using ProximalOperators: IndAffine, prox!
import LinearAlgebra: mul!
function getQ1Q2(A)
m, n = size(A)
b = randn(m)
c = randn(n)
Q1 = [zeros(n,n) A' c;
-A zeros(m,m) b;
-c' -b' 0]
Q2 = HSDEMatrixQ(A, b, c)
return Q1, Q2
end
function getHSDEMatrix(Q1,Q2)
M1 = [I sparse(Q1') ;
sparse(Q1) -I ]
M2 = HSDEMatrix(Q2)
return M1, M2
end
function testHSDEQ_A_mul_B(Q1, Q2, m, n)
rhs1 = randn(m+n+1)
rhs2 = copy(rhs1)
y1, y2 = randn(m+n+1), randn(m+n+1)
mul!(y1, Q1, rhs1)
mul!(y2, Q2, rhs2)
@test rhs1 == rhs2
@test y1 ≈ y2
mul!(y1, transpose(Q1), rhs1)
mul!(y2, transpose(Q2), rhs2)
@test rhs1 == rhs2
@test y1 ≈ y2
end
function testHSDEMatrix_A_mul_B(M1, M2, m, n)
rhs1 = randn(2m+2n+2)
rhs2 = copy(rhs1)
y1, y2 = randn(2m+2n+2), randn(2m+2n+2)
mul!(y1, M1, rhs1)
mul!(y2, M2, rhs2)
@test rhs1 == rhs2
@test y1 ≈ y2
mul!(y1, transpose(M1), rhs1)
mul!(y2, transpose(M2), rhs2)
@test rhs1 == rhs2
@test y1 ≈ y2
end
function testHSDE(A, m, n)
Q1, Q2 = getQ1Q2(A)
testHSDEQ_A_mul_B(Q1, Q2, m, n)
M1, M2 = getHSDEMatrix(Q1, Q2)
testHSDEMatrix_A_mul_B(M1, M2, m, n)
b = randn(size(M2)[1])
S1 = IndAffine([sparse(Q1) -I], zeros(size(Q1,1)))
y1 = similar(b)
y2 = similar(b)
FirstOrderSolvers.prox!(y2, M2, b)
prox!(y1, S1, b)
y3 = M1\b
y3[(size(Q1,1)+1):end] .= Q1*y3[1:size(Q1,1)]
@test y1 ≈ y2
@test y2 ≈ y3
end
Random.seed!(1)
ma,na = 10,20
A = randn(10*ma,10*na)
testHSDE(A, size(A)...)
A = sprandn(100*ma,100*na,0.001)
testHSDE(A, size(A)...)
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 1309 | using FirstOrderSolvers: KKTMatrix, AffinePlusLinear
using Convex
Random.seed!(10)
#Test KKTMatrix
A = randn(10,20)
M1 = [I A';A -I]
M2 = KKTMatrix(A)
x = randn(30)
y1 = randn(30)
y2 = randn(30)
mul!(y1, M1, x)
mul!(y2, M2, x)
@test y1 ≈ y2
mul!(y1, transpose(M1), x)
mul!(y2, transpose(M2), x)
@test y1 ≈ y2
#Test AffinePlusLinear
x0 = randn(20)
z0 = randn(10)
q = randn(20)
b = randn(10)
# β = 1
β = 1
S2 = AffinePlusLinear(A, b, q, β)
y2 = Array{Float64,1}(undef, 30)
FirstOrderSolvers.prox!(y2, S2, [x0;z0])
x2 = view(y2,1:20)
z2 = view(y2,21:30)
# #This test is equivalent to y3
# x = Variable(20)
# z = Variable(10)
# p = minimize(1/2*norm(x-x0)^2+1/2*norm(z-z0)^2+dot(q,x), [A*x-z==b])
# solve!(p, FirstOrderSolvers.DR(eps=1e-9))
# x1 = x.value
# z1 = z.value
# y1 = [x1; z1]
# @test y1 ≈ y2
y3 = [I A'; A -I]\[x0-q+A'z0; b]
@test y3 ≈ y2
β = -1
S2 = AffinePlusLinear(A, b, q, β)
y2 = Array{Float64,1}(undef, 30)
FirstOrderSolvers.prox!(y2, S2, [x0;z0])
x2 = view(y2,1:20)
z2 = view(y2,21:30)
# #This test is equivalent to y3
# x = Variable(20)
# z = Variable(10)
# p = minimize(1/2*norm(x-x0)^2+1/2*norm(z-z0)^2+dot(q,x), [A*x-β*z==b])
# solve!(p, FirstOrderSolvers.DR(eps=1e-9))
# x1 = x.value
# z1 = z.value
# y1 = [x1; z1]
# @test y1 ≈ y2
y3 = [I -A'; A I]\[x0-q-A'z0; b]
@test y3 ≈ y2
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 808 | using FirstOrderSolvers: conjugategradient!, CGdata
Random.seed!(2)
# a = randn(1000,1000)
# A = a'a
A = rand(1000,1000)
A = A'*A
b = randn(1000)
x0 = randn(1000)
x = copy(x0)
#anrm = sqrt(sum(abs2, A, 2))[:]
#A = A./anrm
#b = b./anrm
function test(A, b, x0, x, cgdata)
x .= x0
conjugategradient!(x, A, b, cgdata.r, cgdata.p, cgdata.z)
end
cgdata = CGdata(similar(b), similar(b), similar(b))
conjugategradient!(x, A, b, cgdata.r, cgdata.p, cgdata.z, max_iters = 100)
n0 = norm(A*x-b) # 48
conjugategradient!(x, A, b, cgdata.r, cgdata.p, cgdata.z, max_iters = 5000)
n1 = norm(A*x-b) # 3e-6
@test n1 < 1e-5
xcopy = x .+ 1e-5.*randn(size(x))
n2 = norm(A*xcopy-b) #0.9
conjugategradient!(xcopy, A, b, cgdata.r, cgdata.p, cgdata.z, max_iters = 100)
n3 = norm(A*xcopy-b) #3e-5
@test n3 < 10*n2
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 314 | using Convex
Random.seed!(2)
m = 40; n = 50
A = randn(m, n); b = randn(m, 1)
x = Variable(n)
problem = minimize(sumsquares(A * x - b), [x >= 0])
ϵ = 1e-8
opt = 12.38418747141913
solver = LineSearchWrapper(GAP(0.5, 1.0, 1.0, eps=ϵ, verbose=1, checki=1, max_iters=10000), lsinterval=100)
solve!(problem, solver)
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 558 | using FirstOrderSolvers
using LinearAlgebra
using Test
using Random
println("Test: conjugateGradient.jl")
include("conjugateGradient.jl")
println("Test: HSDEAffine.jl")
include("HSDEAffine.jl")
println("Test: affinepluslinear.jl")
include("affinepluslinear.jl")
println("Test: testDRandGAPA.jl")
include("testDRandGAPA.jl")
println("Test: testPSD.jl")
include("testPSD.jl")
println("Test: testprint.jl")
include("testprint.jl")
#println("Test: testspecific.jl")
#include("testspecific.jl")
# println("Test: testconvex.jl")
# include("testconvex.jl")
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 1059 | using Convex
Random.seed!(2)
m = 40; n = 50
A = randn(m, n); b = randn(m, 1)
x = Variable(n)
problem = minimize(sumsquares(A * x - b), [x >= 0])
ϵ = 1e-8
opt = 12.38418747141913
solve!(problem, DR(eps=ϵ, verbose=0))
@test problem.status == :Optimal
@test problem.optval ≈ opt
@test abs(minimum(x.value)) < 10*ϵ
xsave = copy(x.value)
# Test indirect
problem = minimize(sumsquares(A * x - b), [x >= 0])
solve!(problem, GAPA(eps=1e-4, verbose=0))
@test problem.status == :Optimal
@test abs((problem.optval - opt)/opt) < 2e-3
@test maximum(abs.(x.value-xsave)) < 1e-3
#Test direct
problem = minimize(sumsquares(A * x - b), [x >= 0])
solve!(problem, GAPA(direct=true, eps=1e-4, verbose=0))
@test problem.status == :Optimal
@test abs((problem.optval - opt)/opt) < 2e-3
@test maximum(abs.(x.value-xsave)) < 1e-3
# Test β in GAPA
problem = minimize(sumsquares(A * x - b), [x >= 0])
solve!(problem, GAPA(0.5, 0.9, eps=1e-9, verbose=0))
@test problem.status == :Optimal
@test abs((problem.optval - opt)/opt) < 1e-10
@test maximum(abs.(x.value-xsave)) < 1e-7
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 533 | using Convex, FirstOrderSolvers, SCS, Test, ProximalOperators
ys = [-0.0064709 -0.22443;
-0.22443 -1.02411]
y = Variable((2, 2))
#Solve with SCS
p = minimize(norm(vec(y-ys)), isposdef(y))
solve!(p, SCSSolver(eps=1e-8, verbose=0))
ysol = copy(y.value)
#Solve with projection
Y = Symmetric(ys)
X = similar(Y)
prox!(X, IndPSD(), Y)
#Testing SCS and projection equal
@test X.data ≈ ysol atol=1e-8
#Solve with DR
p = minimize(norm(vec(y-ys)), isposdef(y))
solve!(p, DR(eps=1e-8, verbose=0))
@test y.value ≈ ysol atol=1e-8
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 377 | #Test against Convex.jl
using Convex
pkgdir(pkg::String) = abspath(joinpath(dirname(Base.find_package(pkg)), ".."))
single_solver_test_file = joinpath(pkgdir("Convex"),"test/runtests_single_solver.jl")
set_default_solver(DR(eps=1e-8, verbose=0, debug=0))
include(single_solver_test_file)
# TODO TODO
# #TODO Throws error, integrate with Base.Test
# FactCheck.exitstatus()
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 1379 | using Convex
function readmylines(rd)
readline(rd) #Initialize time
readline(rd) #Dashes
l1 = readline(rd)
readline(rd) #Dashes
l2 = readline(rd)[1:7]
l3 = readline(rd)[1:7]
l4 = readline(rd)
return l1, l2, l3, l4
end
#Expected outputs
o11 = " Iter | pri res | dua res | rel gap | pri obj | dua obj | kap/tau | cg | time"
o12 = " Iter | pri res | dua res | rel gap | pri obj | dua obj | kap/tau | time"
o2 = " 100|"
o3 = " 200|"
o4 = "Found solution i=200"
Random.seed!(10)
n = 500
A = sprandn(n, 2n, 0.1)
x̄ = randn(2n)
b = A*x̄
x = Variable(2n)
p = minimize(norm(A*x-b), sum(x) == sum(x̄))
origstdout = stdout
#With cg output
s = GAPA(0.8, 0.9, direct=false, verbose=2, debug=0, eps=1e-8, checki=100)
rd,wr = redirect_stdout()
solve!(p,s)
l1, l2, l3, l4 = readmylines(rd)
redirect_stdout(origstdout)
@test l1 == o11
@test l2 == o2
@test l3 == o3
@test l4 == o4
@test evaluate(sum(x)-sum(x̄)) ≈ 0.0 atol=1e-8
@test evaluate(maximum(abs(A*x-b))) ≈ 0.0 atol=1e-8
#Without cg output
s = GAPA(0.8, 0.9, direct=true, verbose=2, debug=0, eps=1e-8, checki=100)
rd,wr = redirect_stdout()
solve!(p,s)
l1, l2, l3, l4 = readmylines(rd)
redirect_stdout(origstdout)
@test l1 == o12
@test l2 == o2
@test l3 == o3
@test l4 == o4
@test evaluate(sum(x)-sum(x̄)) ≈ 0.0 atol=1e-8
@test evaluate(maximum(abs(A*x-b))) ≈ 0.0 atol=1e-8
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | code | 1077 | # Problematic problems from Convex.jl test files
using Convex
### Test 1
solver = DR(eps=1e-6, checki=100, max_iters=100000)
solver = GAPA(eps=1e-8, max_iters=10000)
solver = GAPA(eps=1e-6, max_iters=100000, checki=100, direct=true)
solver = LongstepWrapper(GAPA(eps=1e-6, max_iters=100000, checki=100, direct=true), longinterval=900, nsave=4)
solver = AP(eps=1e-8, max_iters=10000, checki=10, direct=true)
x = Variable(Positive())
y = Variable((3, 3))
p = minimize(x + y[1, 1], isposdef(y), x >= 1, y[2, 1] == 1)
x = Variable(Positive())
solve!(p, solver)
p.optval
solver = GAPA(eps=1e-8, max_iters=100000, checki=100, direct=true)
solver = DR(eps=1e-8, max_iters=100000, checki=10, direct=true)
solver = LongstepWrapper(GAPA(eps=1e-8, max_iters=10000, checki=10, direct=true), longinterval=50, nsave=2)
solver = LongstepWrapper(GAPA(eps=1e-8, max_iters=10000, checki=10, direct=true), longinterval=50, nsave=5)
using Random
x = Variable(200, 1)
Random.seed!(1)
A = randn(500,200)
b = randn(500)
p = minimize(norm2(A * x + b))
solve!(p, solver)
p.optval
v0 = p.optval
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.2.0 | b887fff9ad1cadd1e324fd0cf0ad502cd45501c0 | docs | 3692 | # FirstOrderSolvers
[](https://travis-ci.org/mfalt/FirstOrderSolvers.jl)
[](https://coveralls.io/github/mfalt/FirstOrderSolvers.jl?branch=master)
[](http://codecov.io/github/mfalt/FirstOrderSolvers.jl?branch=master)
Package for large scale convex optimization solvers in julia. This package is intended to allow for easy **implementation**, **testing**, and **running** of solvers through the [Convex.jl](https://github.com/JuliaOpt/Convex.jl) interface.
The package is currently under **active development** and uses the [ProximalOperators.jl](https://github.com/kul-forbes/ProximalOperators.jl) package to do the low level projections.
## Installation
To run the solvers you need to have the following packages
```julia
Pkg.add("Convex")
Pkg.clone("https://github.com/mfalt/FirstOrderSolvers.jl.git")
```
## Usage
Define an optimization problem in the format supported by `Convex.jl`, and supply the desired solver to the `solve!` function. Exaple using DR for feasibility problems with the `GAP` solver
```julia
using Convex, FirstOrderSolvers
m = 40; n = 50
A = randn(m, n); b = randn(m, 1)
x = Variable(n)
problem = minimize(sumsquares(A * x - b), [x >= 0])
solve!(problem, GAP(0.5, 2.0, 2.0, max_iters=2000))
```
## Solvers
Currently, the available solvers are
| Solver | Description | Reference |
| --- | --- | --- |
| `GAP(α=0.8, α1=1.8, α2=1.8; kwargs...)` | Generalized Alternating Projections | |
| `DR(α=0.5; kwargs...)` | Douglas-Rachford (`GAP(α, 2.0, 2.0)`) | Douglas, Rachford (1956) |
| `AP(α=0.5; kwargs...)` | Alternating Projections (`GAP(α, 1.0, 1.0)`) | Agmon (1954), Bregman (1967) |
| `GAPA(α=1.0; kwargs...)` | GAP Adaptive | [Fält, Giselsson (2017)](https://arxiv.org/abs/1703.10547) |
| `FISTA(α=1.0; kwargs...)` | FISTA | Beck, Teboulle (2009) |
| `Dykstra(; kwargs...)` | Dykstra | Boyle, Dykstra (1986) |
| `GAPP(α=0.8, α1=1.8, α2=1.8; iproj=100; kwargs...)` | Projected GAP | [Fält, Giselsson (2016)](https://arxiv.org/abs/1609.05920) |
## Keyword Arguments
All solvers accept for the following keyword arguments:
| Argument | Default | Description (Values) |
| --- | --- | --- |
| `max_iters` | `10000` | Maximum number of iterations |
| `verbose` | `1` | Print verbosity level `0,1` |
| `debug` | `1` | Level of debug data to save `0,1,2` |
| `eps` | `1e-5` | Accuracy of solution |
| `checki` | `100` | Interval for checking convergence |
## Debugging
If the keyword argument debug is set to `1` or `2` the following values will be stored in a [`ValueHistories.MVHistory`](https://github.com/JuliaPackageMirrors/ValueHistories.jl) in `problem.model.history`, for each iteration the convergence check is run:
| Name | Debug Level Required | Description |
| --- | --- | --- |
| `:p` | 1 | Relative Primal Residual |
| `:d` | 1 | Relative Dual Residual |
| `:g` | 1 | Relative Duality Gap |
| `:ctx` | 1 | `cᵀx` |
| `:bty` | 1 | `bᵀy` |
| `:κ` | 1 | `κ` |
| `:τ` | 1 | `τ` |
| `:x` | 2 | `x` |
| `:y` | 2 | `y` |
| `:s` | 2 | `s` |
These values correspond to the values in the paper [Conic Optimization via Operator Splitting and Homogeneous Self-Dual Embedding (O'Donoghue et.al)](https://arxiv.org/abs/1312.3039).
| FirstOrderSolvers | https://github.com/mfalt/FirstOrderSolvers.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 558 | using GraphMLDatasets
using Documenter
makedocs(;
modules=[GraphMLDatasets],
authors="Yueh-Hua Tu",
repo="https://github.com/yuehhua/GraphMLDatasets.jl/blob/{commit}{path}#L{line}",
sitename="GraphMLDatasets.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://yuehhua.github.io/GraphMLDatasets.jl/stable/",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/yuehhua/GraphMLDatasets.jl",
target = "build",
)
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 694 | module GraphMLDatasets
using InteractiveUtils: subtypes
using SparseArrays: SparseMatrixCSC, sparse, findnz
using DelimitedFiles
using CSV, DataFrames
using CodecZlib
using DataDeps: DataDep, register, @datadep_str
using FileIO
using HTTP
using JLD2
using JSON
using Graphs
using MAT
using NPZ
using Pickle
using PyCall
using ZipFile
include("dataset.jl")
include("ogb.jl")
include("preprocess.jl")
include("interfaces.jl")
include("utils.jl")
include("register.jl")
function __init__()
init_dataset(Dataset)
end
# precompile(read_heterogeneous_graph, (ZipFile.Reader, String))
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 17603 | export
Dataset,
Planetoid,
Cora,
PPI,
Reddit,
QM7b,
Entities,
OGBNProteins,
OGBNProducts,
OGBNArxiv,
# OGBNMag,
OGBNPapers100M
abstract type Dataset end
abstract type OGBDataset <: Dataset end
abstract type NodePropPredDataset <: OGBDataset end
abstract type EdgePropPredDataset <: OGBDataset end
abstract type GraphPropPredDataset <: OGBDataset end
"""
Planetoid()
Planetoid dataset contains Cora, CiteSeer, PubMed three citation networks.
Nodes represent documents and edges represent citation links.
Implements: [`graphdata`](@ref), [`traindata`](@ref), [`testdata`](@ref), [`alldata`](@ref), [`rawdata`](@ref),
[`metadata`](@ref)
"""
struct Planetoid <: Dataset end
"""
Cora()
Cora dataset contains full Cora citation networks.
Nodes represent documents and edges represent citation links.
Implements: [`graphdata`](@ref), [`alldata`](@ref), [`rawdata`](@ref), [`metadata`](@ref)
"""
struct Cora <: Dataset end
"""
PPI()
PPI dataset contains the protein-protein interaction networks.
Nodes represent proteins and edges represent if proteins have interaction with each other.
Positional gene sets, motif gene sets and immunological signatures as features (50 in total)
and gene ontology sets as labels (121 in total).
Implements: [`traindata`](@ref), [`validdata`](@ref), [`testdata`](@ref)
"""
struct PPI <: Dataset end
"""
Reddit()
Reddit dataset contains Reddit post networks. Reddit is a large online discussion forum where
users post and comment in 50 communities. Reddit posts belonging to different communities.
Nodes represent posts and edges represent if the same user comments on both posts.
The task is to predict post categories of community.
Implements: [`graphdata`](@ref), [`alldata`](@ref), [`rawdata`](@ref), [`metadata`](@ref)
"""
struct Reddit <: Dataset end
"""
QM7b()
QM7b dataset contains molecular structure graphs and is subset of the GDB-13 database.
It contains stable and synthetically organic molecular structures.
Nodes represent atoms in a molecule and edges represent there is a chemical bond between atoms.
The 3D Cartesian coordinates of the stable conformation is given as features.
The task is to predict the electronic properties. It contains 7,211 molecules with 14 regression
targets.
Implements: [`rawdata`](@ref)
"""
struct QM7b <: Dataset end
"""
Entities()
Entities dataset contains relational entities networks "AIFB", "MUTAG", "BGS" and "AM".
Nodes represent entities and directed edges represent subject-object relations.
The task is to predict properties of a group of entities in a knowledge graph.
Implements: [`graphdata`](@ref), [`traindata`](@ref), [`testdata`](@ref)
"""
struct Entities <: Dataset end
"""
OGBNProteins()
`OGBNProteins` dataset contains protein-protein interaction network.
The task to predict the presence of protein functions in a multi-label binary classification.
Training/validation/test splits are given by node indices.
# Description
- Graph: undirected, weighted, and typed (according to species) graph.
- Node: proteins.
- Edge: different types of biologically meaningful associations
between proteins, e.g., physical interactions, co-expression or homology.
# References
1. Damian Szklarczyk, Annika L Gable, David Lyon, Alexander Junge, Stefan Wyder,
Jaime Huerta- Cepas, Milan Simonovic, Nadezhda T Doncheva, John H Morris, Peer Bork, et al.
STRING v11: protein–protein association networks with increased coverage, supporting functional
discovery in genome-wide experimental datasets. Nucleic Acids Research, 47(D1):D607–D613, 2019.
2. Gene Ontology Consortium. The gene ontology resource: 20 years and still going strong.
Nucleic Acids Research, 47(D1):D330–D338, 2018.
Implements: [`graphdata`](@ref), [`train_indices`](@ref), [`valid_indices`](@ref), [`test_indices`](@ref), [`edge_features`](@ref),
[`node_labels`](@ref)
"""
struct OGBNProteins <: NodePropPredDataset end
"""
OGBNProducts()
`OGBNProducts` dataset contains an Amazon product co-purchasing network.
The task to predict the category of a product in a multi-class classification.
Training/validation/test splits are given by node indices.
# Description
- Graph: undirected and unweighted graph.
- Node: products sold in Amazon.
- Edge: the products are purchased together.
# References
1. http://manikvarma.org/downloads/XC/XMLRepository.html
Implements: [`graphdata`](@ref), [`train_indices`](@ref), [`valid_indices`](@ref), [`test_indices`](@ref), [`node_features`](@ref),
[`node_labels`](@ref)
"""
struct OGBNProducts <: NodePropPredDataset end
"""
OGBNArxiv()
`OGBNArxiv` dataset contains the citation network between all Computer Science (CS) arXiv papers
indexed by MAG.
The task to predict the primary categories of the arXiv papers from 40 subject areas
in a multi-class classification.
Training/validation/test splits are given by node indices.
# Description
- Graph: directed graph.
- Node: arXiv paper.
- Edge: each directed edge indicates that one paper cites another one.
# References
1. Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.
Microsoft academic graph: When experts are not enough. Quantitative Science Studies,
1(1):396–413, 2020.
2. Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean.
Distributed representationsof words and phrases and their compositionality.
In Advances in Neural Information Processing Systems (NeurIPS), pp. 3111–3119, 2013.
Implements: [`graphdata`](@ref), [`train_indices`](@ref), [`valid_indices`](@ref), [`test_indices`](@ref), [`node_features`](@ref),
[`node_labels`](@ref)
"""
struct OGBNArxiv <: NodePropPredDataset end
# """
# OGBNMag()
# `OGBNMag` dataset contains a heterogeneous network composed of a subset of the Microsoft Academic Graph (MAG).
# The task to predict the venue (conference or journal) of each paper, given its content, references,
# authors, and authors’ affiliations, in a multi-class classification setting.
# Training/validation/test splits are given by node indices. Only paper nodes have features and labels.
# # Description
# - Graph: directed heterogeneous graph.
# - Node: four types of entities.
# - papers (736,389 nodes)
# - authors (1,134,649 nodes)
# - institutions (8,740 nodes)
# - fields of study (59,965 nodes)
# - Edge: four types of directed relations.
# - an author is "affiliated with" an institution
# - an author "writes" a paper
# - a paper "cites" a paper
# - a paper "has a topic of" a field of study
# # References
# 1. Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.
# Microsoft academic graph: When experts are not enough. Quantitative Science Studies, 1(1):396–413, 2020.
# Implements: [`graphdata`](@ref), [`train_indices`](@ref), [`valid_indices`](@ref), [`test_indices`](@ref), [`node_features`](@ref),
# [`node_labels`](@ref)
# """
# struct OGBNMag <: NodePropPredDataset end
"""
OGBNPapers100M()
`OGBNPapers100M` dataset contains a citation graph of 111 million papers indexed by MAG.
The task to predict the subject areas of the subset of papers that are published in arXiv
in a multi-class classification setting.
Training/validation/test splits are given by node indices.
# Description
- Graph: directed graph.
- Node: arXiv paper.
- Edge: each directed edge indicates that one paper cites another one.
# References
1. Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.
Microsoft academic graph: When experts are not enough. Quantitative Science Studies, 1(1):396–413, 2020.
Implements: [`graphdata`](@ref), [`train_indices`](@ref), [`valid_indices`](@ref), [`test_indices`](@ref), [`node_features`](@ref),
[`node_labels`](@ref)
"""
struct OGBNPapers100M <: NodePropPredDataset end
dataset_url(::Type{Planetoid}) = "https://github.com/kimiyoung/planetoid/raw/master/data"
dataset_url(::Type{Entities}) = "https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/"
subdatasets(::Type{Planetoid}) = [:citeseer, :cora, :pubmed]
subdatasets(::Type{Entities}) = [:aifb, :am, :mutag, :bgs]
dataset_exts(::Type{Planetoid}) = ["allx", "ally", "graph", "test.index", "tx", "ty", "x", "y"]
dataset_name(::Type{Planetoid}) = "Planetoid"
dataset_name(::Type{Cora}) = "Cora"
dataset_name(::Type{PPI}) = "PPI"
dataset_name(::Type{Reddit}) = "Reddit"
dataset_name(::Type{QM7b}) = "QM7b"
dataset_name(::Type{Entities}) = "Entities"
dataset_name(::Type{OGBNProteins}) = "OGBN-Proteins"
dataset_name(::Type{OGBNProducts}) = "OGBN-Products"
dataset_name(::Type{OGBNArxiv}) = "OGBN-Arxiv"
# dataset_name(::Type{OGBNMag}) = "OGBN-Mag"
dataset_name(::Type{OGBNPapers100M}) = "OGBN-Papers100M"
function dataset_message(::Type{Planetoid})
"""
The citation network datasets "Cora", "CiteSeer", "PubMed" from
"Revisiting Semi-Supervised Learning with Graph Embeddings"
<https://arxiv.org/abs/1603.08861> paper.
Nodes represent documents and edges represent citation links.
"""
end
function dataset_message(::Type{Cora})
"""
The full Cora citation network dataset from the
`"Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via
Ranking" <https://arxiv.org/abs/1707.03815>`_ paper.
Nodes represent documents and edges represent citation links.
"""
end
function dataset_message(::Type{PPI})
"""
The protein-protein interaction networks from the `"Predicting
Multicellular Function through Multi-layer Tissue Networks"
<https://arxiv.org/abs/1707.04638>`_ paper, containing positional gene
sets, motif gene sets and immunological signatures as features (50 in
total) and gene ontology sets as labels (121 in total).
"""
end
function dataset_message(::Type{Reddit})
"""
The Reddit dataset from the `"Inductive Representation Learning on
Large Graphs" <https://arxiv.org/abs/1706.02216>`_ paper, containing
Reddit posts belonging to different communities.
"""
end
function dataset_message(::Type{QM7b})
"""
The QM7b dataset from the `"MoleculeNet: A Benchmark for Molecular
Machine Learning" <https://arxiv.org/abs/1703.00564>`_ paper, consisting of
7,211 molecules with 14 regression targets.
"""
end
function dataset_message(::Type{Entities})
"""
The relational entities networks "AIFB", "MUTAG", "BGS" and "AM" from
the `"Modeling Relational Data with Graph Convolutional Networks"
<https://arxiv.org/abs/1703.06103>`_ paper.
Training and test splits are given by node indices.
"""
end
function dataset_message(::Type{OGBNProteins})
"""
The dataset contains protein-protein interaction network.
The task to predict the presence of protein functions in a multi-label binary classification.
Training/validation/test splits are given by node indices.
# Description
- Graph: undirected, weighted, and typed (according to species) graph.
- Node: proteins.
- Edge: different types of biologically meaningful associations
between proteins, e.g., physical interactions, co-expression or homology.
# References
1. Damian Szklarczyk, Annika L Gable, David Lyon, Alexander Junge, Stefan Wyder,
Jaime Huerta- Cepas, Milan Simonovic, Nadezhda T Doncheva, John H Morris, Peer Bork, et al.
STRING v11: protein–protein association networks with increased coverage, supporting functional
discovery in genome-wide experimental datasets. Nucleic Acids Research, 47(D1):D607–D613, 2019.
2. Gene Ontology Consortium. The gene ontology resource: 20 years and still going strong.
Nucleic Acids Research, 47(D1):D330–D338, 2018.
"""
end
function dataset_message(::Type{OGBNProducts})
"""
The dataset contains an Amazon product co-purchasing network.
The task to predict the category of a product in a multi-class classification.
Training/validation/test splits are given by node indices.
# Description
- Graph: undirected and unweighted graph.
- Node: products sold in Amazon.
- Edge: the products are purchased together.
# References
1. http://manikvarma.org/downloads/XC/XMLRepository.html
"""
end
function dataset_message(::Type{OGBNArxiv})
"""
The dataset contains the citation network between all Computer Science (CS) arXiv papers
indexed by MAG.
The task to predict the primary categories of the arXiv papers from 40 subject areas
in a multi-class classification.
Training/validation/test splits are given by node indices.
# Description
- Graph: directed graph.
- Node: arXiv paper.
- Edge: each directed edge indicates that one paper cites another one.
# References
1. Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.
Microsoft academic graph: When experts are not enough. Quantitative Science Studies,
1(1):396–413, 2020.
2. Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean.
Distributed representationsof words and phrases and their compositionality.
In Advances in Neural Information Processing Systems (NeurIPS), pp. 3111–3119, 2013.
"""
end
# function dataset_message(::Type{OGBNMag})
# """
# The dataset contains a heterogeneous network composed of a subset of the Microsoft Academic Graph (MAG).
# The task to predict the venue (conference or journal) of each paper, given its content, references,
# authors, and authors’ affiliations, in a multi-class classification setting.
# Training/validation/test splits are given by node indices.
# # Description
# - Graph: directed heterogeneous graph.
# - Node: four types of entities.
# - papers (736,389 nodes)
# - authors (1,134,649 nodes)
# - institutions (8,740 nodes)
# - fields of study (59,965 nodes)
# - Edge: four types of directed relations.
# - an author is "affiliated with" an institution
# - an author "writes" a paper
# - a paper "cites" a paper
# - a paper "has a topic of" a field of study
# # References
# 1. Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.
# Microsoft academic graph: When experts are not enough. Quantitative Science Studies, 1(1):396–413, 2020.
# """
# end
function dataset_message(::Type{OGBNPapers100M})
"""
The dataset contains a citation graph of 111 million papers indexed by MAG.
The task to predict the subject areas of the subset of papers that are published in arXiv
in a multi-class classification setting.
Training/validation/test splits are given by node indices.
# Description
- Graph: directed graph.
- Node: arXiv paper.
- Edge: each directed edge indicates that one paper cites another one.
# References
1. Kuansan Wang, Zhihong Shen, Chiyuan Huang, Chieh-Han Wu, Yuxiao Dong, and Anshul Kanakia.
Microsoft academic graph: When experts are not enough. Quantitative Science Studies, 1(1):396–413, 2020.
"""
end
function dataset_remote_path(dataset::Type{Planetoid})
url = dataset_url(dataset)
subds = subdatasets(dataset)
exts = dataset_exts(dataset)
dataurls = [joinpath(url, "ind.$(d).$(ext)") for d in subds, ext in exts]
return reshape(dataurls, :)
end
dataset_remote_path(::Type{Cora}) = "https://github.com/abojchevski/graph2gauss/raw/master/data/cora.npz"
dataset_remote_path(::Type{PPI}) = "https://data.dgl.ai/dataset/ppi.zip"
dataset_remote_path(::Type{Reddit}) = "https://data.dgl.ai/dataset/reddit.zip"
dataset_remote_path(::Type{QM7b}) = "http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/qm7b.mat"
dataset_remote_path(dataset::Type{Entities}) = [joinpath(dataset_url(dataset), "$(d).tgz") for d in subdatasets(dataset)]
dataset_remote_path(::Type{OGBNProteins}) = "http://snap.stanford.edu/ogb/data/nodeproppred/proteins.zip"
dataset_remote_path(::Type{OGBNProducts}) = "http://snap.stanford.edu/ogb/data/nodeproppred/products.zip"
dataset_remote_path(::Type{OGBNArxiv}) = "http://snap.stanford.edu/ogb/data/nodeproppred/arxiv.zip"
# dataset_remote_path(::Type{OGBNMag}) = "http://snap.stanford.edu/ogb/data/nodeproppred/mag.zip"
dataset_remote_path(::Type{OGBNPapers100M}) = "http://snap.stanford.edu/ogb/data/nodeproppred/papers100M-bin.zip"
dataset_checksum(::Type{Planetoid}) = "f52b3d47f5993912d7509b51e8090b6807228c4ba8c7d906f946868005c61c18"
dataset_checksum(::Type{Cora}) = "62e054f93be00a3dedb15b7ac15a2a07168ceab68b40bf95f54d2289d024c6bc"
dataset_checksum(::Type{PPI}) = "1f5b2b09ac0f897fa6aa1338c64ab75a5473674cbba89380120bede8cddb2a6a"
dataset_checksum(::Type{Reddit}) = "9a16353c28f8ddd07148fc5ac9b57b818d7911ea0fbe9052d66d49fc32b372bf"
dataset_checksum(::Type{QM7b}) = "e2a9d670d86eba769fa7b5eadeb592184067d2ec12468b1a220bfc38502dda61"
dataset_checksum(::Type{Entities}) = "e58bcfddd240d9bbc830bcae74e9854f1f778a96d3072930395f47d3c8e6f342"
dataset_checksum(::Type{OGBNProteins}) = "1cd3113dc2a6f0c87a549332b77d78be45cf99804c254c18d9c72029164a0859"
dataset_checksum(::Type{OGBNProducts}) = "5ea0a112edaec2141c0a2a612dd4aed58df97ff3e1ab1a0ca8238f43cbbb50a8"
dataset_checksum(::Type{OGBNArxiv}) = "49f85c801589ecdcc52cfaca99693aaea7b8af16a9ac3f41dd85a5f3193fe276"
# dataset_checksum(::Type{OGBNMag}) = "2afe62ead87f2c301a7398796991d347db85b2d01c5442c95169372bf5a9fca4"
dataset_checksum(::Type{OGBNPapers100M}) = "xxx"
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 8946 | export
traindata,
train_indices,
validdata,
valid_indices,
testdata,
test_indices,
graphdata,
rawdata,
alldata,
metadata,
node_features,
edge_features,
node_labels
function check_precondition(::Planetoid, dataset::Symbol)
@assert dataset in subdatasets(Planetoid) "`dataset` should be one of citeseer, cora, pubmed."
end
function check_precondition(::Entities, dataset::Symbol)
@assert dataset in subdatasets(Entities) "`dataset` should be one of :aifb, :am, :mutag, :bgs."
end
"""
traindata(dataset)
Returns training data for `dataset`.
"""
traindata(::Dataset) = throw(ArgumentError("Training set not defined."))
function traindata(pla::Planetoid, dataset::Symbol; padding::Bool=false)
check_precondition(pla, dataset)
filename = @datadep_str "Planetoid/$(dataset).train.jld2"
X, y = JLD2.load(filename, "train_X", "train_y")
if padding
T = eltype(X)
idx = train_indices(pla, dataset)
padded_X = zeros(T, size(X, 1), nv(pla, dataset))
padded_y = zeros(T, size(y, 1), nv(pla, dataset))
padded_X[:, idx] .= X
padded_y[:, idx] .= y
return padded_X, padded_y
else
return X, y
end
end
# traindata(cora::Cora) = JLD2.load(datadep"Cora/cora.train.jld2", "graph", "train_X", "train_y")
traindata(::PPI) = JLD2.load(datadep"PPI/ppi.train.jld2", "graph", "X", "y", "ids")
# function traindata(ent::Entities, dataset::Symbol)
# check_precondition(ent, dataset)
# return JLD2.load(@datadep_str "Entities/$(dataset).train.jld2", "graph", "train_X", "train_y")
# end
"""
train_indices(dataset)
Returns indices of training data for `dataset`.
"""
function train_indices(pla::Planetoid, dataset::Symbol)
check_precondition(pla, dataset)
if dataset == :cora
return 1:140
elseif dataset == :citeseer
return 1:120
else # pubmed
return 1:60
end
end
train_indices(::OGBNProteins) = JLD2.load(datadep"OGBN-Proteins/indices.jld2", "train_indices")
train_indices(::OGBNProducts) = JLD2.load(datadep"OGBN-Products/indices.jld2", "train_indices")
train_indices(::OGBNArxiv) = JLD2.load(datadep"OGBN-Arxiv/indices.jld2", "train_indices")
# train_indices(::OGBNMag) = JLD2.load(datadep"OGBN-Mag/indices.jld2", "train_indices")
"""
validdata(dataset)
Returns validation data for `dataset`.
"""
validdata(::Dataset) = throw(ArgumentError("Validation set not defined."))
validdata(::PPI) = JLD2.load(datadep"PPI/ppi.valid.jld2", "graph", "X", "y", "ids")
"""
valid_indices(dataset)
Returns indices of validation data for `dataset`.
"""
function valid_indices(pla::Planetoid, dataset::Symbol)
check_precondition(pla, dataset)
if dataset == :cora
return 141:640
elseif dataset == :citeseer
return 121:520
else # pubmed
return 61:560
end
end
valid_indices(::OGBNProteins) = JLD2.load(datadep"OGBN-Proteins/indices.jld2", "valid_indices")
valid_indices(::OGBNProducts) = JLD2.load(datadep"OGBN-Products/indices.jld2", "valid_indices")
valid_indices(::OGBNArxiv) = JLD2.load(datadep"OGBN-Arxiv/indices.jld2", "valid_indices")
# valid_indices(::OGBNMag) = JLD2.load(datadep"OGBN-Mag/indices.jld2", "train_indices")
"""
testdata(dataset)
Returns testing data for `dataset`.
"""
testdata(::Dataset) = throw(ArgumentError("Testing set not defined."))
function testdata(pla::Planetoid, dataset::Symbol; padding::Bool=false)
check_precondition(pla, dataset)
filename = @datadep_str "Planetoid/$(dataset).test.jld2"
X, y = JLD2.load(filename, "test_X", "test_y")
if padding
T = eltype(X)
idx = test_indices(pla, dataset)
padded_X = zeros(T, size(X, 1), nv(pla, dataset))
padded_y = zeros(T, size(y, 1), nv(pla, dataset))
padded_X[:, idx] .= X
padded_y[:, idx] .= y
return padded_X, padded_y
else
return X, y
end
end
# testdata(::Cora) = load(datadep"Cora/cora.test.jld2", :graph, "test_X", "test_y")
testdata(::PPI) = JLD2.load(datadep"PPI/ppi.test.jld2", "graph", "X", "y", "ids")
# function testdata(ent::Entities, dataset::Symbol)
# check_precondition(ent, dataset)
# return JLD2.load(@datadep_str "Entities/$(dataset).test.jld2", "graph", "test_X", "test_y")
# end
"""
test_indices(dataset)
Returns indices of testing data for `dataset`.
"""
function test_indices(pla::Planetoid, dataset::Symbol)
check_precondition(pla, dataset)
filename = @datadep_str "Planetoid/$(dataset).indices.jld2"
return JLD2.load(filename, "test_indices")
end
test_indices(::OGBNProteins) = JLD2.load(datadep"OGBN-Proteins/indices.jld2", "test_indices")
test_indices(::OGBNProducts) = JLD2.load(datadep"OGBN-Products/indices.jld2", "test_indices")
test_indices(::OGBNArxiv) = JLD2.load(datadep"OGBN-Arxiv/indices.jld2", "test_indices")
# test_indices(::OGBNMag) = JLD2.load(datadep"OGBN-Mag/indices.jld2", "train_indices")
"""
graphdata(dataset)
Returns graph for `dataset` in the form of JuliaGraphs objects.
"""
function graphdata(pla::Planetoid, dataset::Symbol)
check_precondition(pla, dataset)
filename = @datadep_str "Planetoid/$(dataset).graph.jld2"
return JLD2.load(filename, "sg")
end
graphdata(::Cora) = JLD2.load(datadep"Cora/cora.graph.jld2", "sg")
graphdata(::Reddit) = JLD2.load(datadep"Reddit/reddit.graph.jld2", "sg")
graphdata(::OGBNProteins) = JLD2.load(datadep"OGBN-Proteins/graph.jld2", "sg")
graphdata(::OGBNProducts) = JLD2.load(datadep"OGBN-Products/graph.jld2", "sg")
graphdata(::OGBNArxiv) = JLD2.load(datadep"OGBN-Arxiv/graph.jld2", "sg")
# graphdata(::OGBNMag) = JLD2.load(datadep"OGBN-Mag/graph.jld2", "g")
function Graphs.nv(pla::Planetoid, dataset::Symbol)
check_precondition(pla, dataset)
if dataset == :cora
return 2708
elseif dataset == :citeseer
return 3312
else # pubmed
return 19717
end
end
"""
alldata(dataset)
Returns the whole dataset for `dataset`.
"""
function alldata(pla::Planetoid, dataset::Symbol; padding::Bool=false)
check_precondition(pla, dataset)
filename = @datadep_str "Planetoid/$(dataset).all.jld2"
X, y = JLD2.load(filename, "all_X", "all_y")
if padding
T = eltype(X)
idx = 1:size(X, 2)
padded_X = zeros(T, size(X, 1), nv(pla, dataset))
padded_y = zeros(T, size(y, 1), nv(pla, dataset))
padded_X[:, idx] .= X
padded_y[:, idx] .= y
return padded_X, padded_y
else
return X, y
end
end
alldata(::Cora) = JLD2.load(datadep"Cora/cora.all.jld2", "all_X", "all_y")
alldata(::Reddit) = JLD2.load(datadep"Reddit/reddit.all.jld2", "all_X", "all_y")
"""
rawdata(dataset)
Returns the raw data for `dataset`.
"""
function rawdata(pla::Planetoid, dataset::Symbol)
check_precondition(pla, dataset)
filename = @datadep_str "Planetoid/$(dataset).raw.jld2"
return JLD2.load(filename,
"graph", "train_X", "train_y", "test_X", "test_y", "all_X", "all_y")
end
rawdata(::Cora) = JLD2.load(datadep"Cora/cora.raw.jld2", "graph", "all_X", "all_y")
rawdata(::Reddit) = JLD2.load(datadep"Reddit/reddit.raw.jld2", "graph", "X", "y", "ids", "types")
rawdata(::QM7b) = JLD2.load(datadep"QM7b/qm7b.raw.jld2", "names", "X", "T")
"""
metadata(dataset)
Returns the auxiliary data about `dataset`.
"""
function metadata(pla::Planetoid, dataset::Symbol)
check_precondition(pla, dataset)
filename = @datadep_str "Planetoid/$(dataset).metadata.jld2"
return JLD2.load(filename, "meta")
end
metadata(::Cora) = JLD2.load(datadep"Cora/cora.metadata.jld2", "meta")
metadata(::Reddit) = JLD2.load(datadep"Reddit/reddit.metadata.jld2", "meta")
"""
edge_features(dataset)
Returns all the edge features for `dataset`.
"""
edge_features(d::Dataset) = throw(ArgumentError("No existing edge features for $d."))
edge_features(::OGBNProteins) = JLD2.load(datadep"OGBN-Proteins/edge_feat.jld2", "edge_feat")
"""
node_features(dataset)
Returns all the node features for `dataset`.
"""
node_features(d::Dataset) = throw(ArgumentError("No existing node features for $d."))
node_features(::OGBNProducts) = JLD2.load(datadep"OGBN-Products/node_feat.jld2", "node_feat")
node_features(::OGBNArxiv) = JLD2.load(datadep"OGBN-Arxiv/node_feat.jld2", "node_feat")
# node_features(::OGBNMag) = JLD2.load(datadep"OGBN-Mag/node_feat.jld2", "node_feat")
"""
node_labels(dataset)
Returns all the node labels for `dataset`.
"""
node_labels(d::Dataset) = throw(ArgumentError("No existing node labels for $d."))
node_labels(::OGBNProteins) = JLD2.load(datadep"OGBN-Proteins/node_label.jld2", "node_label")
node_labels(::OGBNProducts) = JLD2.load(datadep"OGBN-Products/node_label.jld2", "node_label")
node_labels(::OGBNArxiv) = JLD2.load(datadep"OGBN-Arxiv/node_label.jld2", "node_label")
# node_labels(::OGBNMag) = JLD2.load(datadep"OGBN-Mag/node_label.jld2", "node_label")
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 6099 | # Dataset metadata
num_tasks(::Type{OGBNProteins}) = 112
num_tasks(::Type{OGBNProducts}) = 1
num_tasks(::Type{OGBNArxiv}) = 1
# num_tasks(::Type{OGBNMag}) = 1
num_tasks(::Type{OGBNPapers100M}) = 1
num_classes(::Type{OGBNProteins}) = 2
num_classes(::Type{OGBNProducts}) = 47
num_classes(::Type{OGBNArxiv}) = 40
# num_classes(::Type{OGBNMag}) = 349
num_classes(::Type{OGBNPapers100M}) = 172
eval_metric(::Type{OGBNProteins}) = "ROCAUC"
eval_metric(::Type{OGBNProducts}) = "Accuracy"
eval_metric(::Type{OGBNArxiv}) = "Accuracy"
# eval_metric(::Type{OGBNMag}) = "Accuracy"
eval_metric(::Type{OGBNPapers100M}) = "Accuracy"
task_type(::Type{OGBNProteins}) = "binary classification"
task_type(::Type{OGBNProducts}) = "multiclass classification"
task_type(::Type{OGBNArxiv}) = "multiclass classification"
# task_type(::Type{OGBNMag}) = "multiclass classification"
task_type(::Type{OGBNPapers100M}) = "multiclass classification"
feature_dim(dataset::Type{<:OGBDataset}, kind::Symbol) = feature_dim(dataset, Val(kind))
feature_dim(dataset::Type{<:OGBDataset}, ::Val{<:Any}) = throw(ArgumentError("not existing such kind of feature dim."))
feature_dim(::Type{OGBNProteins}, ::Val{:edge}) = 8
feature_dim(::Type{OGBNProducts}, ::Val{:node}) = 100
feature_dim(::Type{OGBNArxiv}, ::Val{:node}) = 128
# feature_dim(::Type{OGBNMag}, ::Val{:node}) = 128
split_prefix(::Type{OGBNProteins}) = "species"
split_prefix(::Type{OGBNProducts}) = "sales_ranking"
split_prefix(::Type{OGBNArxiv}) = "time"
# split_prefix(::Type{OGBNMag}) = "time/paper"
split_prefix(::Type{OGBNPapers100M}) = "time"
# read indices
function read_indices(obg::Type{<:OGBDataset}, reader, dir::String)
prefix = joinpath(dir, "split", split_prefix(obg))
train_indices = read_train_indices(reader, prefix)
valid_indices = read_valid_indices(reader, prefix)
test_indices = read_test_indices(reader, prefix)
return train_indices, valid_indices, test_indices
end
function read_train_indices(reader, dir::String)
filename = joinpath(dir, "train.csv.gz")
header = [:index]
df = read_zipfile(reader, filename, header)
df.index .+= 1
return df.index
end
function read_valid_indices(reader, dir::String)
filename = joinpath(dir, "valid.csv.gz")
header = [:index]
df = read_zipfile(reader, filename, header)
df.index .+= 1
return df.index
end
function read_test_indices(reader, dir::String)
filename = joinpath(dir, "test.csv.gz")
header = [:index]
df = read_zipfile(reader, filename, header)
df.index .+= 1
return df.index
end
# read graph and metadata
function read_graph(reader, dir::String)
V = read_num_node(reader, dir)
E = read_num_edge(reader, dir)
edges = read_edges(reader, dir)
return V, E, edges
end
function read_num_node(reader, dir::String)
filename = joinpath(dir, "num-node-list.csv.gz")
header = [:number]
df = read_zipfile(reader, filename, header)
return df.number[1]
end
function read_num_edge(reader, dir::String)
filename = joinpath(dir, "num-edge-list.csv.gz")
header = [:number]
df = read_zipfile(reader, filename, header)
return df.number[1]
end
function read_edges(reader, dir::String)
filename = joinpath(dir, "edge.csv.gz")
header = [:node1, :node2]
df = read_zipfile(reader, filename, header)
df.node1 .+= 1
df.node2 .+= 1
return df
end
# function read_heterogeneous_graph(reader, dir::String)
# Ns = read_num_node_dict(reader, dir)
# g = MetaDiGraph{Int32,Float32}(sum(values(Ns)))
# offset = calculate_offset(Ns)
# set_node_prop!(g, Ns, offset)
# # add edges and props for heterogeneous graphs
# triplets = read_triplets(reader, dir)
# for trip in triplets
# relation = triplet_to_dir(trip)
# dir2 = joinpath(dir, "relations", relation)
# E = read_num_edge(reader, dir2)
# reltype = read_edge_reltype(reader, dir2)
# edges = read_edges(reader, dir2)
# for i in 1:E
# src = edges.node1[i] + offset[trip.src]
# dst = edges.node2[i] + offset[trip.dst]
# props = Dict(:reltype => reltype[i], :relation => trip.relation)
# add_edge!(g, src, dst, props)
# end
# end
# return g
# end
# function read_triplets(reader, dir::String)
# filename = joinpath(dir, "triplet-type-list.csv.gz")
# header = [:src, :relation, :dst]
# df = read_zipfile(reader, filename, header)
# triplets = [(src=r.src, relation=r.relation, dst=r.dst) for r in eachrow(df)]
# return triplets
# end
# triplet_to_dir(trip::NamedTuple) = "$(trip.src)___$(trip.relation)___$(trip.dst)"
# function read_num_node_dict(reader, dir::String)
# filename = joinpath(dir, "num-node-dict.csv.gz")
# header = 1
# df = read_zipfile(reader, filename, header)
# return OrderedDict(n => df[1,n] for n in names(df))
# end
# function read_edge_reltype(reader, dir::String)
# filename = joinpath(dir, "edge_reltype.csv.gz")
# header = [:reltype]
# df = read_zipfile(reader, filename, header)
# return df.reltype
# end
# function calculate_offset(d::OrderedDict)
# acc = 0
# offset = OrderedDict{String,Int32}()
# for (ent, n) in d
# offset[ent] = acc
# acc += n
# end
# return offset
# end
# function set_node_prop!(g::MetaDiGraph, Ns, offset)
# for (ent, n) in Ns
# ofs = offset[ent]
# for i in 1:n
# node = i + ofs
# set_prop!(g, node, :entity, ent)
# end
# end
# return g
# end
# read features and labels
function read_labels(dataset::Type{<:OGBDataset}, reader, dir::String, csv_prefix::String)
filename = joinpath(dir, csv_prefix * "-label.csv.gz")
header = false
df = read_zipfile(reader, filename, header)
return Matrix{UInt16}(df)
end
function read_features(dataset::Type{<:OGBDataset}, reader, dir::String, csv_prefix::String)
filename = joinpath(dir, csv_prefix * "-feat.csv.gz")
header = false
df = read_zipfile(reader, filename, header)
return Matrix{Float32}(df)
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 15366 | function dataset_preprocess(dataset::Type{Planetoid})
return function preprocess(local_path)
for subds in subdatasets(dataset)
graph_file = @datadep_str "Planetoid/ind.$(subds).graph"
trainX_file = @datadep_str "Planetoid/ind.$(subds).x"
trainy_file = @datadep_str "Planetoid/ind.$(subds).y"
testX_file = @datadep_str "Planetoid/ind.$(subds).tx"
testy_file = @datadep_str "Planetoid/ind.$(subds).ty"
allX_file = @datadep_str "Planetoid/ind.$(subds).allx"
ally_file = @datadep_str "Planetoid/ind.$(subds).ally"
test_idx_file = @datadep_str "Planetoid/ind.$(subds).test.index"
train_X = Pickle.npyload(trainX_file)
train_y = Pickle.npyload(trainy_file)
test_X = Pickle.npyload(testX_file)
test_y = Pickle.npyload(testy_file)
all_X = Pickle.npyload(allX_file)
all_y = Pickle.npyload(ally_file)
graph = Pickle.npyload(graph_file)
test_idx = vec(readdlm(test_idx_file, ' ', Int64))
num_V = length(graph)
sg = to_simplegraph(graph, num_V)
num_E = ne(sg)
feat_dim = size(all_X, 2)
label_dim = size(all_y, 2)
train_X, train_y = sparse(train_X'), sparse(train_y')
test_X, test_y = sparse(test_X'), sparse(test_y')
all_X, all_y = sparse(all_X'), sparse(all_y')
raw = Dict("graph"=>graph, "train_X"=>train_X, "train_y"=>train_y,
"test_X"=>test_X, "test_y"=>test_y, "all_X"=>all_X, "all_y"=>all_y)
meta = (graph=(num_V=num_V, num_E=num_E),
train=(features_dim=(feat_dim, size(train_X, 2)), labels_dim=(label_dim, size(train_y, 2))),
test=(features_dim=(feat_dim, size(test_X, 2)), labels_dim=(label_dim, size(test_y, 2))),
all=(features_dim=(feat_dim, size(all_X, 2)), labels_dim=(label_dim, size(all_y, 2)))
)
graphfile = replace(graph_file, "ind.$(subds).graph"=>"$(subds).graph.jld2")
trainfile = replace(graph_file, "ind.$(subds).graph"=>"$(subds).train.jld2")
testfile = replace(graph_file, "ind.$(subds).graph"=>"$(subds).test.jld2")
allfile = replace(graph_file, "ind.$(subds).graph"=>"$(subds).all.jld2")
test_idxfile = replace(graph_file, "ind.$(subds).graph"=>"$(subds).indices.jld2")
rawfile = replace(graph_file, "ind.$(subds).graph"=>"$(subds).raw.jld2")
metadatafile = replace(graph_file, "ind.$(subds).graph"=>"$(subds).metadata.jld2")
JLD2.save(graphfile, "sg", sg)
JLD2.save(trainfile, "train_X", train_X, "train_y", train_y)
JLD2.save(testfile, "test_X", test_X, "test_y", test_y)
JLD2.save(allfile, "all_X", all_X, "all_y", all_y)
JLD2.save(test_idxfile, "test_indices", test_idx)
JLD2.save(rawfile, raw)
JLD2.save(metadatafile, "meta", meta)
end
end
end
read_index(filename) = map(x -> parse(Int64, x), readlines(filename))
## Cora dataset
function dataset_preprocess(dataset::Type{Cora})
return function preprocess(local_path)
reader = ZipFile.Reader(local_path)
mA, nA = read_npyarray(reader, "adj_shape")
adj_data = read_npyarray(reader, "adj_data")
adj_indptr = read_npyarray(reader, "adj_indptr") .+ 1
adj_indices = read_npyarray(reader, "adj_indices") .+ 1
mX, nX = read_npyarray(reader, "attr_shape")
attr_data = read_npyarray(reader, "attr_data")
attr_indptr = read_npyarray(reader, "attr_indptr") .+ 1
attr_indices = read_npyarray(reader, "attr_indices") .+ 1
nzA, colptrA, rowvalA = Pickle.csr_to_csc(mA, nA, adj_data, adj_indptr, adj_indices)
nzX, colptrX, rowvalX = Pickle.csr_to_csc(mX, nX, attr_data, attr_indptr, attr_indices)
graph = SparseMatrixCSC(mA, nA, colptrA, rowvalA, nzA)
X = SparseMatrixCSC(mX, nX, colptrX, rowvalX, nzX)
y = read_npyarray(reader, "labels")
all_X = sparse(X')
all_y = Matrix{UInt16}(y')
sg = to_simpledigraph(graph)
meta = (graph=(num_V=nv(sg), num_E=ne(sg)),
all=(features_dim=size(all_X), labels_dim=size(all_y))
)
graphfile = replace(local_path, "cora.npz"=>"cora.graph.jld2")
# trainfile = replace(local_path, "cora.npz"=>"cora.train.jld2")
# testfile = replace(local_path, "cora.npz"=>"cora.test.jld2")
allfile = replace(local_path, "cora.npz"=>"cora.all.jld2")
rawfile = replace(local_path, "cora.npz"=>"cora.raw.jld2")
metadatafile = replace(local_path, "cora.npz"=>"cora.metadata.jld2")
JLD2.save(graphfile, "sg", sg)
# JLD2.save(trainfile, "train_X", train_X, "train_y", train_y)
# JLD2.save(testfile, "test_X", test_X, "test_y", test_y)
JLD2.save(allfile, Dict("all_X"=>all_X, "all_y"=>all_y))
JLD2.save(rawfile, Dict("graph"=>graph, "all_X"=>X, "all_y"=>y))
JLD2.save(metadatafile, "meta", meta)
end
end
## PPI dataset
function dataset_preprocess(dataset::Type{PPI})
return function preprocess(local_path)
reader = ZipFile.Reader(local_path)
for phase in ["train", "test", "valid"]
ids = read_npyarray(reader, "$(phase)_graph_id")
X = Matrix{Float32}(read_npyarray(reader, "$(phase)_feats"))
y = SparseMatrixCSC{Int32,Int64}(read_npyarray(reader, "$(phase)_labels"))
i = findfirst(x -> x.name == "$(phase)_graph.json", reader.files)
graph = read_ppi_graph(reader.files[i])
jld2file = replace(local_path, "ppi.zip"=>"ppi.$(phase).jld2")
JLD2.save(jld2file, Dict("graph"=>graph, "X"=>X, "y"=>y, "ids"=>ids))
end
end
end
function read_ppi_graph(io::IO)
d = JSON.parse(io)
g = SimpleDiGraph{Int32}(length(d["nodes"]))
for pair in d["links"]
add_edge!(g, pair["source"], pair["target"])
end
g
end
## Reddit dataset
function dataset_preprocess(dataset::Type{Reddit})
return function preprocess(local_path)
reader = ZipFile.Reader(local_path)
graph_file = IOBuffer(read(reader.files[1])) # reddit_graph.npz
data_file = IOBuffer(read(reader.files[2])) # reddit_data.npz
rawfile = replace(local_path, "reddit.zip"=>"reddit.raw.jld2")
graph, X, y = to_reddit_rawfile(graph_file, data_file, rawfile)
sg = to_simplegraph(graph)
all_X = Matrix(X')
all_y = Matrix{UInt16}(y')
meta = (graph=(num_V=nv(sg), num_E=ne(sg)),
all=(features_dim=size(all_X), labels_dim=size(all_y)))
graphfile = replace(local_path, "reddit.zip"=>"reddit.graph.jld2")
allfile = replace(local_path, "reddit.zip"=>"reddit.all.jld2")
metadatafile = replace(local_path, "reddit.zip"=>"reddit.metadata.jld2")
JLD2.save(graphfile, "sg", sg)
JLD2.save(allfile, "all_X", all_X, "all_y", all_y)
JLD2.save(metadatafile, "meta", meta)
end
end
function to_reddit_rawfile(graph_file, data_file, rawfile)
reader = ZipFile.Reader(data_file)
graph = read_reddit_graph(graph_file)
X = Matrix{Float32}(read_npyarray(reader, "feature"))
y = Vector{Int32}(read_npyarray(reader, "label"))
ids = Vector{Int32}(read_npyarray(reader, "node_ids"))
types = Vector{UInt8}(read_npyarray(reader, "node_types"))
JLD2.save(rawfile, Dict("graph"=>graph, "X"=>X, "y"=>y, "ids"=>ids, "types"=>types))
graph, X, y
end
function read_reddit_graph(graph_file)
reader = ZipFile.Reader(graph_file)
row = read_npyarray(reader, "row") .+ 1
col = read_npyarray(reader, "col") .+ 1
data = read_npyarray(reader, "data")
return sparse(row, col, data)
end
## QM7b dataset
function dataset_preprocess(dataset::Type{QM7b})
return function preprocess(local_path)
vars = matread(local_path)
names = vars["names"]
X = vars["X"]
T = Matrix{Float32}(vars["T"])
rawfile = replace(local_path, "qm7b.mat"=>"qm7b.raw.jld2")
JLD2.save(rawfile, Dict("names"=>names, "X"=>X, "T"=>T))
end
end
## Entities dataset
function dataset_preprocess(dataset::Type{Entities})
return function preprocess(local_path)
for subds in subdatasets(dataset)
tgz_file = @datadep_str "Entities/$(subds).tgz"
dir = joinpath(dirname(tgz_file), "$(subds)")
unzip_tgz(tgz_file, dir)
# nt_file = joinpath(dir, "$(dataset)_stripped.nt.gz")
# train_file = joinpath(dir, "trainingSet.tsv")
# test_file = joinpath(dir, "testSet.tsv")
# py"""
# import rdflib as rdf
# import gzip
# g = rdf.Graph()
# with gzip.open($nt_file, 'rb') as file:
# g.parse(file, format='nt')
# """
# train = CSV.read(train_file, delim='\t')
# test = CSV.read(test_file, delim='\t')
# uri = HTTP.URI("http://data.bgs.ac.uk/id/Geochronology/Division/CN")
end
end
end
## OGBNProteins dataset
function dataset_preprocess(dataset::Type{OGBNProteins})
return function preprocess(local_path)
reader = ZipFile.Reader(local_path)
train_indices, valid_indices, test_indices = read_indices(dataset, reader, "proteins")
V, E, edges = read_graph(reader, "proteins/raw")
edge_feat = read_features(dataset, reader, "proteins/raw", "edge")
node_label = read_labels(dataset, reader, "proteins/raw", "node")
graph = to_simplegraph(edges, V)
indices = Dict("train_indices"=>train_indices, "valid_indices"=>valid_indices, "test_indices"=>test_indices)
indicesfile = replace(local_path, "proteins.zip"=>"indices.jld2")
graphfile = replace(local_path, "proteins.zip"=>"graph.jld2")
featfile = replace(local_path, "proteins.zip"=>"edge_feat.jld2")
labelfile = replace(local_path, "proteins.zip"=>"node_label.jld2")
JLD2.save(indicesfile, indices)
JLD2.save(graphfile, "sg", graph)
JLD2.save(featfile, "edge_feat", edge_feat)
JLD2.save(labelfile, "node_label", node_label)
end
end
function read_node_species(reader, dir::String)
filename = joinpath(dir, "node_species.csv.gz")
header = [:species]
df = read_zipfile(reader, filename, header)
return df
end
## OGBNProducts dataset
function dataset_preprocess(dataset::Type{OGBNProducts})
return function preprocess(local_path)
reader = ZipFile.Reader(local_path)
train_indices, valid_indices, test_indices = read_indices(dataset, reader, "products")
V, E, edges = read_graph(reader, "products/raw")
node_feat = read_features(dataset, reader, "products/raw", "node")
node_label = read_labels(dataset, reader, "products/raw", "node")
graph = to_simplegraph(edges, V)
indices = Dict("train_indices"=>train_indices, "valid_indices"=>valid_indices, "test_indices"=>test_indices)
indicesfile = replace(local_path, "products.zip"=>"indices.jld2")
graphfile = replace(local_path, "products.zip"=>"graph.jld2")
featfile = replace(local_path, "products.zip"=>"node_feat.jld2")
labelfile = replace(local_path, "products.zip"=>"node_label.jld2")
JLD2.save(indicesfile, indices)
JLD2.save(graphfile, "sg", graph)
JLD2.save(featfile, "node_feat", node_feat)
JLD2.save(labelfile, "node_label", node_label)
end
end
## OGBNArxiv dataset
function dataset_preprocess(dataset::Type{OGBNArxiv})
return function preprocess(local_path)
reader = ZipFile.Reader(local_path)
train_indices, valid_indices, test_indices = read_indices(dataset, reader, "arxiv")
V, E, edges = read_graph(reader, "arxiv/raw")
node_feat = read_features(dataset, reader, "arxiv/raw", "node")
node_label = read_labels(dataset, reader, "arxiv/raw", "node")
graph = to_simpledigraph(edges, V)
indices = Dict("train_indices"=>train_indices, "valid_indices"=>valid_indices, "test_indices"=>test_indices)
indicesfile = replace(local_path, "arxiv.zip"=>"indices.jld2")
graphfile = replace(local_path, "arxiv.zip"=>"graph.jld2")
featfile = replace(local_path, "arxiv.zip"=>"node_feat.jld2")
labelfile = replace(local_path, "arxiv.zip"=>"node_label.jld2")
JLD2.save(indicesfile, indices)
JLD2.save(graphfile, "sg", graph)
JLD2.save(featfile, "node_feat", node_feat)
JLD2.save(labelfile, "node_label", node_label)
end
end
## OGBNMag dataset
# function dataset_preprocess(dataset::Type{OGBNMag})
# return function preprocess(local_path)
# reader = ZipFile.Reader(local_path)
# train_indices, valid_indices, test_indices = read_indices(dataset, reader, "mag")
# graph = read_heterogeneous_graph(reader, "mag/raw")
# node_year = read_mag_year(reader, "mag/raw/node-feat/paper")
# node_feat = read_features(dataset, reader, "mag/raw/node-feat/paper", "node")
# node_label = read_labels(dataset, reader, "mag/raw/node-label/paper", "node")
# indices = Dict("train_indices"=>train_indices, "valid_indices"=>valid_indices, "test_indices"=>test_indices)
# indicesfile = replace(local_path, "mag.zip"=>"indices.jld2")
# graphfile = replace(local_path, "mag.zip"=>"graph.jld2")
# featfile = replace(local_path, "mag.zip"=>"node_feat.jld2")
# labelfile = replace(local_path, "mag.zip"=>"node_label.jld2")
# JLD2.save(indicesfile, indices)
# JLD2.save(graphfile, "g", graph)
# JLD2.save(featfile, "node_feat", node_feat, "node_year", node_year)
# JLD2.save(labelfile, "node_label", node_label)
# end
# end
# function read_mag_year(reader, dir::String)
# filename = joinpath(dir, "node_year.csv.gz")
# header = [:year]
# df = read_zipfile(reader, filename, header)
# return df
# end
## OGBNPapers100M dataset
function dataset_preprocess(dataset::Type{OGBNPapers100M})
return function preprocess(local_path)
reader = ZipFile.Reader(local_path)
# train_indices, valid_indices, test_indices = read_indices(dataset, reader, "products")
# V, E, edges = read_graph(reader, "products/raw")
# edge_feat = read_edge_feat(dataset, reader, "products/raw")
# node_label = read_node_label(dataset, reader, "products/raw")
# graph = to_simplegraph(edges, V)
# indices = Dict("train_indices"=>train_indices, "valid_indices"=>valid_indices, "test_indices"=>test_indices)
# indicesfile = replace(local_path, "proteins.zip"=>"indices.jld2")
# graphfile = replace(local_path, "proteins.zip"=>"graph.jld2")
# featfile = replace(local_path, "proteins.zip"=>"edge_feat.jld2")
# labelfile = replace(local_path, "proteins.zip"=>"node_label.jld2")
# JLD2.save(indicesfile, indices)
# JLD2.save(graphfile, "sg", graph)
# JLD2.save(featfile, "edge_feat", edge_feat)
# JLD2.save(labelfile, "node_label", node_label)
end
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 641 | datasets() = datasets(Dataset)
function datasets(dt::Type{<:Dataset})
if isconcretetype(dt)
return [dt]
elseif isabstracttype(dt)
ds = [datasets(s) for s in subtypes(dt)]
return collect(Iterators.flatten(ds))
end
end
function init_dataset(dataset::Type{<:Dataset})
register(DataDep(
dataset_name(dataset),
dataset_message(dataset),
dataset_remote_path(dataset),
dataset_checksum(dataset);
post_fetch_method=dataset_preprocess(dataset),
))
end
function init_dataset(::Type{Dataset})
for dataset in datasets()
init_dataset(dataset)
end
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 1812 | function unzip_zip(src::String, dest::String=dirname(src))
run(`unzip $src -d $dest`)
end
function unzip_tgz(src::String, dest::String)
isdir(dest) || mkdir("$dest")
run(`tar zxf $src -C $dest`)
end
function to_simplegraph(data::AbstractDict, num_V::Int)
g = SimpleGraph{UInt32}(num_V)
for (i, js) in data
for j in Set(js)
add_edge!(g, i, j)
end
end
g
end
function to_simplegraph(data::SparseMatrixCSC)
num_V = size(data, 1)
g = SimpleGraph{UInt32}(num_V)
for (i, j, nz) in zip(findnz(data)...)
if nz == 1
add_edge!(g, i, j)
end
end
g
end
function to_simplegraph(edges::DataFrame, num_V::Integer)
g = SimpleGraph{Int32}(num_V)
for row in eachrow(edges)
add_edge!(g, row.node1, row.node2)
end
return g
end
function to_simpledigraph(data::SparseMatrixCSC)
num_V = size(data, 1)
g = SimpleDiGraph{UInt32}(num_V)
for (i, j, nz) in zip(findnz(data)...)
if nz == 1
add_edge!(g, i, j)
end
end
g
end
function to_simpledigraph(edges::DataFrame, num_V::Integer)
g = SimpleDiGraph{Int32}(num_V)
for row in eachrow(edges)
add_edge!(g, row.node1, row.node2)
end
return g
end
function read_npyarray(reader, index::String)
i = findfirst(x -> x.name == (index * ".npy"), reader.files)
return NPZ.npzreadarray(reader.files[i])
end
function read_npzarray(reader, index::String)
i = findfirst(x -> x.name == (index * ".npz"), reader.files)
return NPZ.npzreadarray(reader.files[i])
end
function read_zipfile(reader, filename::String, header)
file = filter(x -> x.name == filename, reader.files)[1]
df = CSV.File(transcode(GzipDecompressor, read(file)); header=header) |> DataFrame
return df
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 654 | @testset "cora" begin
graph = graphdata(Cora())
@test typeof(graph) == SimpleDiGraph{UInt32}
@test nv(graph) == 19793
@test ne(graph) == 65311
all_X, all_y = alldata(Cora())
@test typeof(all_X) == SparseMatrixCSC{Float32,Int64}
@test size(all_X) == (8710, 19793)
@test typeof(all_y) == Matrix{UInt16}
@test size(all_y) == (1, 19793)
g, all_X, all_y = rawdata(Cora())
@test g isa SparseMatrixCSC{Float32,Int64}
@test all_X isa SparseMatrixCSC{Float32,Int64}
@test all_y isa Vector{Int64}
meta = metadata(Cora())
@test meta.graph.num_V == nv(graph)
@test meta.graph.num_E == ne(graph)
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 387 | tests = [
"planetoid",
"cora",
"ppi",
"reddit",
"qm7b",
"entities"
]
@testset "datasets" begin
@test_throw ArgumentError traindata(Reddit())
@test_throw ArgumentError validdata(Reddit())
@test_throw ArgumentError testdata(Reddit())
@test_throw AssertionError traindata(Planetoid(), :abc)
for t in tests
include("$(t).jl")
end
end | GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 617 | @testset "entities" begin
# g, train_X, train_y = traindata(Entities(), :bgs)
# @test typeof(g) == Dict{Any,Any}
# @test typeof(train_X) == SparseMatrixCSC{Float32,Int64}
# @test size(train_X) == (140, 1433)
# @test typeof(train_y) == SparseMatrixCSC{Int32,Int64}
# @test size(train_y) == (140, 7)
# g, test_X, test_y = testdata(Entities(), :bgs)
# @test typeof(g) == Dict{Any,Any}
# @test typeof(test_X) == SparseMatrixCSC{Float32,Int64}
# @test size(test_X) == (1000, 1433)
# @test typeof(test_y) == SparseMatrixCSC{Int32,Int64}
# @test size(test_y) == (1000, 7)
end | GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 4194 | @testset "ogb" begin
@test GraphMLDatasets.num_tasks(OGBNProteins) == 112
@test GraphMLDatasets.num_tasks(OGBNProducts) == 1
@test GraphMLDatasets.num_tasks(OGBNArxiv) == 1
@test_skip GraphMLDatasets.num_tasks(OGBNMag) == 1
@test GraphMLDatasets.num_tasks(OGBNPapers100M) == 1
@test GraphMLDatasets.num_classes(OGBNProteins) == 2
@test GraphMLDatasets.num_classes(OGBNProducts) == 47
@test GraphMLDatasets.num_classes(OGBNArxiv) == 40
@test_skip GraphMLDatasets.num_classes(OGBNMag) == 349
@test GraphMLDatasets.num_classes(OGBNPapers100M) == 172
@test GraphMLDatasets.eval_metric(OGBNProteins) == "ROCAUC"
@test GraphMLDatasets.eval_metric(OGBNProducts) == "Accuracy"
@test GraphMLDatasets.eval_metric(OGBNArxiv) == "Accuracy"
@test_skip GraphMLDatasets.eval_metric(OGBNMag) == "Accuracy"
@test GraphMLDatasets.eval_metric(OGBNPapers100M) == "Accuracy"
@test GraphMLDatasets.task_type(OGBNProteins) == "binary classification"
@test GraphMLDatasets.task_type(OGBNProducts) == "multiclass classification"
@test GraphMLDatasets.task_type(OGBNArxiv) == "multiclass classification"
@test_skip GraphMLDatasets.task_type(OGBNMag) == "multiclass classification"
@test GraphMLDatasets.task_type(OGBNPapers100M) == "multiclass classification"
@testset "OGBNProteins" begin
@test length(train_indices(OGBNProteins())) == 86619
@test length(valid_indices(OGBNProteins())) == 21236
@test length(test_indices(OGBNProteins())) == 24679
graph = graphdata(OGBNProteins())
@test graph isa SimpleGraph{Int32}
@test nv(graph) == 132534
@test ne(graph) == 39561252
ef = edge_features(OGBNProteins())
@test ef isa Matrix{Float32}
@test size(ef) == (ne(graph), GraphMLDatasets.feature_dim(OGBNProteins, :edge))
nl = node_labels(OGBNProteins())
@test nl isa Matrix{UInt16}
@test size(nl) == (nv(graph), GraphMLDatasets.num_tasks(OGBNProteins))
end
@testset "OGBNProducts" begin
@test length(train_indices(OGBNProducts())) == 196615
@test length(valid_indices(OGBNProducts())) == 39323
@test length(test_indices(OGBNProducts())) == 2213091
graph = graphdata(OGBNProducts())
@test graph isa SimpleGraph{Int32}
@test nv(graph) == 2449029
@test ne(graph) == 61859140
nf = node_features(OGBNProducts())
@test nf isa Matrix{Float32}
@test size(nf) == (nv(graph), GraphMLDatasets.feature_dim(OGBNProducts, :node))
nl = node_labels(OGBNProducts())
@test nl isa Matrix{UInt16}
@test size(nl) == (nv(graph), GraphMLDatasets.num_tasks(OGBNProducts))
end
@testset "OGBNArxiv" begin
@test length(train_indices(OGBNArxiv())) == 90941
@test length(valid_indices(OGBNArxiv())) == 29799
@test length(test_indices(OGBNArxiv())) == 48603
graph = graphdata(OGBNArxiv())
@test graph isa SimpleDiGraph{Int32}
@test nv(graph) == 169343
@test ne(graph) == 1166243
nf = node_features(OGBNArxiv())
@test nf isa Matrix{Float32}
@test size(nf) == (nv(graph), GraphMLDatasets.feature_dim(OGBNArxiv, :node))
nl = node_labels(OGBNArxiv())
@test nl isa Matrix{UInt16}
@test size(nl) == (nv(graph), GraphMLDatasets.num_tasks(OGBNArxiv))
end
@testset "OGBNMag" begin
@test_skip length(train_indices(OGBNMag())) == 629571
@test_skip length(valid_indices(OGBNMag())) == 64879
@test_skip length(test_indices(OGBNMag())) == 41939
# graph = graphdata(OGBNMag())
@test_skip graph isa MetaDiGraph{Int32}
@test_skip nv(graph) == 1939743
@test_skip ne(graph) == 21111007
# nf = node_features(OGBNMag())
@test_skip nf isa Matrix{Float32}
@test_skip size(nf) == (nv(graph), GraphMLDatasets.feature_dim(OGBNMag, :node))
# nl = node_labels(OGBNMag())
@test_skip nl isa Matrix{UInt16}
@test_skip size(nl) == (nv(graph), GraphMLDatasets.num_tasks(OGBNMag))
end
@testset "OGBNPapers100M" begin
end
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 1975 | @testset "planetoid" begin
graph = graphdata(Planetoid(), :cora)
@test typeof(graph) == SimpleGraph{UInt32}
@test nv(graph) == 2708
@test ne(graph) == 5275
train_X, train_y = traindata(Planetoid(), :cora)
@test typeof(train_X) == SparseMatrixCSC{Float32,Int64}
@test size(train_X) == (1433, 140)
@test typeof(train_y) == SparseMatrixCSC{Int32,Int64}
@test size(train_y) == (7, 140)
@test train_indices(Planetoid(), :cora) == 1:140
@test train_indices(Planetoid(), :citeseer) == 1:120
@test train_indices(Planetoid(), :pubmed) == 1:60
@test valid_indices(Planetoid(), :cora) == 141:640
@test valid_indices(Planetoid(), :citeseer) == 121:520
@test valid_indices(Planetoid(), :pubmed) == 61:560
test_X, test_y = testdata(Planetoid(), :cora)
@test typeof(test_X) == SparseMatrixCSC{Float32,Int64}
@test size(test_X) == (1433, 1000)
@test typeof(test_y) == SparseMatrixCSC{Int32,Int64}
@test size(test_y) == (7, 1000)
@test length(test_indices(Planetoid(), :cora)) == 1000
@test length(test_indices(Planetoid(), :citeseer)) == 1000
@test length(test_indices(Planetoid(), :pubmed)) == 1000
all_X, all_y = alldata(Planetoid(), :cora)
@test typeof(all_X) == SparseMatrixCSC{Float32,Int64}
@test size(all_X) == (1433, 1708)
@test typeof(all_y) == SparseMatrixCSC{Int32,Int64}
@test size(all_y) == (7, 1708)
g, train_X, train_y, test_X, test_y, all_X, all_y = rawdata(Planetoid(), :cora)
# @test g isa DataStructures.DefaultDict
@test train_X isa SparseMatrixCSC{Float32,Int64}
@test train_y isa SparseMatrixCSC{Int32,Int64}
@test test_X isa SparseMatrixCSC{Float32,Int64}
@test test_y isa SparseMatrixCSC{Int32,Int64}
@test all_X isa SparseMatrixCSC{Float32,Int64}
@test all_y isa SparseMatrixCSC{Int32,Int64}
meta = metadata(Planetoid(), :cora)
@test meta.graph.num_V == nv(graph)
@test meta.graph.num_E == ne(graph)
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 1009 | @testset "ppi" begin
g, train_X, train_y, train_ids = traindata(PPI())
@test typeof(g) == SimpleDiGraph{Int32}
@test nv(g) == 44906
@test ne(g) == 1271267
@test typeof(train_X) == Array{Float32,2}
@test size(train_X) == (44906, 50)
@test typeof(train_y) == SparseMatrixCSC{Int32,Int64}
@test size(train_y) == (44906, 121)
g, valid_X, valid_y, valid_ids = validdata(PPI())
@test typeof(g) == SimpleDiGraph{Int32}
@test nv(g) == 6514
@test ne(g) == 205395
@test typeof(valid_X) == Array{Float32,2}
@test size(valid_X) == (6514, 50)
@test typeof(valid_y) == SparseMatrixCSC{Int32,Int64}
@test size(valid_y) == (6514, 121)
g, test_X, test_y, test_ids = testdata(PPI())
@test typeof(g) == SimpleDiGraph{Int32}
@test nv(g) == 5524
@test ne(g) == 167461
@test typeof(test_X) == Array{Float32,2}
@test size(test_X) == (5524, 50)
@test typeof(test_y) == SparseMatrixCSC{Int32,Int64}
@test size(test_y) == (5524, 121)
end | GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 259 | @testset "qm7b" begin
names, X, T = rawdata(QM7b())
@test names isa Vector{String}
@test size(names) == (14,)
@test X isa Array{Float32,3}
@test size(X) == (7211, 23, 23)
@test T isa Matrix{Float32}
@test size(T) == (7211, 14)
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 697 | @testset "reddit" begin
graph = graphdata(Reddit())
@test typeof(graph) == SimpleGraph{UInt32}
@test nv(graph) == 232965
@test ne(graph) == 57307946
all_X, all_y = alldata(Reddit())
@test typeof(all_X) == Matrix{Float32}
@test size(all_X) == (602, 232965)
@test typeof(all_y) == Matrix{UInt16}
@test size(all_y) == (1, 232965)
g, X, y, ids, types = rawdata(Reddit())
@test g isa SparseMatrixCSC{Int64,Int64}
@test X isa Matrix{Float32}
@test y isa Vector{Int32}
@test ids isa Vector{Int32}
@test types isa Vector{UInt8}
meta = metadata(Reddit())
@test meta.graph.num_V == nv(graph)
@test meta.graph.num_E == ne(graph)
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 317 | using GraphMLDatasets
using Graphs
using SparseArrays
using Test
ENV["DATADEPS_ALWAYS_ACCEPT"] = true
tests = [
"planetoid",
"cora",
"ppi",
"reddit",
"qm7b",
"entities",
"ogb",
"utils",
]
@testset "GraphMLDatasets.jl" begin
for t in tests
include("$(t).jl")
end
end
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | code | 437 | @testset "utils" begin
source = joinpath(pkgdir(GraphMLDatasets), "test", "data")
zipfile = joinpath(source, "test.zip")
tgzfile = joinpath(source, "test.tgz")
target_file = joinpath(source, "random.file")
GraphMLDatasets.unzip_zip(zipfile)
@test isfile(target_file)
rm(target_file, force=true)
GraphMLDatasets.unzip_tgz(tgzfile, source)
@test isfile(target_file)
rm(target_file, force=true)
end | GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | docs | 570 | # Changelog
All notable changes to this project will be documented in this file.
## [0.1.7]
- bug fix
## [0.1.6]
- padding features for all nodes (only for `Planetoid` dataset)
## [0.1.5]
- add OGBNProteins, OGBNProducts, OGBNArxiv dataset
- add Planetoid indices
- improve Reddit preprocessing
- replace part of PyCall with Pickle
## [0.1.4]
- Refactor project
## [0.1.3]
- Support JLD2 up to v0.4 and MAT up to v0.10
## [0.1.2]
- Support Julia v1.6
## [0.1.1]
- Refactor API for datasets
## [0.1.0]
- Add Planetoid, Cora, PPI, Reddit and QM7b dataset
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | docs | 574 | # GraphMLDatasets.jl
[](https://yuehhua.github.io/GraphMLDatasets.jl/stable)
[](https://yuehhua.github.io/GraphMLDatasets.jl/dev)
[](https://travis-ci.com/yuehhua/GraphMLDatasets.jl)
[](https://codecov.io/gh/yuehhua/GraphMLDatasets.jl)
A library for machine learning datasets on graph | GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 0.1.7 | 3874bd47a47cda82504d3f205a2912d756af94e4 | docs | 1001 | ```@meta
CurrentModule = GraphMLDatasets
```
# GraphMLDatasets
```@index
```
## Usage
```julia
graph = graphdata(Planetoid(), :cora)
train_X, train_y = traindata(Planetoid(), :cora)
test_X, test_y = testdata(Planetoid(), :cora)
# OBG datasets
graph = graphdata(OGBNProteins())
ef = edge_features(OGBNProteins())
nl = node_labels(OGBNProteins())
```
## APIs
```@docs
traindata
validdata
testdata
train_indices
valid_indices
test_indices
graphdata
rawdata
alldata
metadata
node_features
edge_features
node_labels
```
## Available datasets
### Planetoid dataset
```@docs
Planetoid
```
### Cora dataset
```@docs
Cora
```
### PPI dataset
```@docs
PPI
```
### Reddit dataset
```@docs
Reddit
```
### QM7b dataset
```@docs
QM7b
```
### OGB Node Property Prediction
#### OGBNProteins dataset
```@docs
OGBNProteins
```
#### OGBNProducts dataset
```@docs
OGBNProducts
```
#### OGBNArxiv dataset
```@docs
OGBNArxiv
```
### OGB Link Property Prediction
### OGB Graph Property Prediction
| GraphMLDatasets | https://github.com/yuehhua/GraphMLDatasets.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 169 | using JuliaFormatter
format(file; kwargs...) = JuliaFormatter.format(joinpath(@__DIR__, file); kwargs...)
format("src"; verbose = true)
format("test"; verbose = true)
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1020 | using Documenter
using Metatheory
using Literate
using Metatheory.EGraphs
using Metatheory.Library
TUTORIALSDIR = joinpath(dirname(pathof(Metatheory)), "../test/tutorials/")
OUTDIR = abspath(joinpath(@__DIR__, "src", "tutorials"))
# Generate markdown document using Literate.jl for each file in the tutorials directory.
for f in readdir(TUTORIALSDIR)
if endswith(f, ".jl")
input = abspath(joinpath(TUTORIALSDIR, f))
name = basename(input)
Literate.markdown(input, OUTDIR)
elseif f != "README.md"
@info "Copying $f"
cp(joinpath(TUTORIALSDIR, input), joinpath(OUTDIR, f); force = true)
end
end
tutorials = [joinpath("tutorials", f[1:(end - 3)]) * ".md" for f in readdir(TUTORIALSDIR) if endswith(f, ".jl")]
makedocs(
modules = [Metatheory, Metatheory.EGraphs],
sitename = "Metatheory.jl",
pages = [
"index.md"
"rewrite.md"
"egraphs.md"
"visualizing.md"
"api.md"
"Tutorials" => tutorials
],
)
deploydocs(repo = "github.com/JuliaSymbolics/Metatheory.jl.git")
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 173 | include("prop_logic_theory.jl")
include("prover.jl")
ex = rewrite(:(((p => q) && (r => s) && (p || r)) => (q || s)), impl)
prove(t, ex, 1, 25)
@profview prove(t, ex, 2, 7)
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1870 | include("eggify.jl")
using Metatheory.Library
using Metatheory.EGraphs.Schedulers
or_alg = @theory begin
((p || q) || r) == (p || (q || r))
(p || q) == (q || p)
(p || p) => p
(p || true) => true
(p || false) => p
end
and_alg = @theory begin
((p && q) && r) == (p && (q && r))
(p && q) == (q && p)
(p && p) => p
(p && true) => p
(p && false) => false
end
comb = @theory begin
# DeMorgan
!(p || q) == (!p && !q)
!(p && q) == (!p || !q)
# distrib
(p && (q || r)) == ((p && q) || (p && r))
(p || (q && r)) == ((p || q) && (p || r))
# absorb
(p && (p || q)) => p
(p || (p && q)) => p
# complement
(p && (!p || q)) => p && q
(p || (!p && q)) => p || q
end
negt = @theory begin
(p && !p) => false
(p || !(p)) => true
!(!p) == p
end
impl = @theory begin
(p == !p) => false
(p == p) => true
(p == q) => (!p || q) && (!q || p)
(p => q) => (!p || q)
end
fold = @theory begin
(true == false) => false
(false == true) => false
(true == true) => true
(false == false) => true
(true || false) => true
(false || true) => true
(true || true) => true
(false || false) => false
(true && true) => true
(false && true) => false
(true && false) => false
(false && false) => false
!(true) => false
!(false) => true
end
theory = or_alg ∪ and_alg ∪ comb ∪ negt ∪ impl ∪ fold
query = :(!(((!p || q) && (!r || s)) && (p || r)) || (q || s))
###########################################
params = SaturationParams(timeout = 22, eclasslimit = 3051, scheduler = ScoredScheduler)#, schedulerparams=(1000,5, Schedulers.exprsize))
for i in 1:2
G = EGraph(query)
report = saturate!(G, theory, params)
ex = extract!(G, astsize)
println("Best found: $ex")
println(report)
end
open("src/main.rs", "w") do f
write(f, rust_code(theory, query, params))
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1831 | include("eggify.jl")
using Metatheory.Library
using Metatheory.EGraphs.Schedulers
mult_t = commutative_monoid(:(*), 1)
plus_t = commutative_monoid(:(+), 0)
minus_t = @theory begin
a - a => 0
a + (-b) => a - b
end
mulplus_t = @theory begin
0 * a => 0
a * 0 => 0
a * (b + c) == ((a * b) + (a * c))
a + (b * a) => ((b + 1) * a)
end
pow_t = @theory begin
(y^n) * y => y^(n + 1)
x^n * x^m == x^(n + m)
(x * y)^z == x^z * y^z
(x^p)^q == x^(p * q)
x^0 => 1
0^x => 0
1^x => 1
x^1 => x
inv(x) == x^(-1)
end
function customlt(x, y)
if typeof(x) == Expr && Expr == typeof(y)
false
elseif typeof(x) == typeof(y)
isless(x, y)
elseif x isa Symbol && y isa Number
false
else
true
end
end
canonical_t = @theory begin
# restore n-arity
(x + (+)(ys...)) => +(x, ys...)
((+)(xs...) + y) => +(xs..., y)
(x * (*)(ys...)) => *(x, ys...)
((*)(xs...) * y) => *(xs..., y)
(*)(xs...) |> Expr(:call, :*, sort!(xs; lt = customlt)...)
(+)(xs...) |> Expr(:call, :+, sort!(xs; lt = customlt)...)
end
cas = mult_t ∪ plus_t ∪ minus_t ∪ mulplus_t ∪ pow_t
theory = cas
query = cleanast(:(a + b + (0 * c) + d))
function simplify(ex)
g = EGraph(ex)
params = SaturationParams(
scheduler = BackoffScheduler,
timeout = 20,
schedulerparams = (1000, 5), # fuel and bantime
)
report = saturate!(g, cas, params)
println(report)
res = extract!(g, astsize)
res = rewrite(res, canonical_t; clean = false, m = @__MODULE__) # this just orders symbols and restores n-ary plus and mult
res
end
###########################################
params = SaturationParams(timeout = 20, schedulerparams = (1000, 5))
for i in 1:2
ex = simplify(:(a + b + (0 * c) + d))
println("Best found: $ex")
end
open("src/main.rs", "w") do f
write(f, rust_code(theory, query))
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1654 | using Metatheory
using Metatheory.EGraphs
to_sexpr_pattern(p::PatLiteral) = "$(p.val)"
to_sexpr_pattern(p::PatVar) = "?$(p.name)"
function to_sexpr_pattern(p::PatTerm)
e1 = join([p.head; to_sexpr_pattern.(p.args)], ' ')
"($e1)"
end
to_sexpr(e::Symbol) = e
to_sexpr(e::Int64) = e
to_sexpr(e::Expr) = "($(join(to_sexpr.(e.args),' ')))"
function eggify(rules)
egg_rules = []
for rule in rules
l = to_sexpr_pattern(rule.left)
r = to_sexpr_pattern(rule.right)
if rule isa SymbolicRule
push!(egg_rules, "\tvec![rw!( \"$(rule.left) => $(rule.right)\" ; \"$l\" => \"$r\" )]")
elseif rule isa EqualityRule
push!(egg_rules, "\trw!( \"$(rule.left) == $(rule.right)\" ; \"$l\" <=> \"$r\" )")
else
println("Unsupported Rewrite Mode")
@assert false
end
end
return join(egg_rules, ",\n")
end
function rust_code(theory, query, params = SaturationParams())
"""
use egg::{*, rewrite as rw};
//use std::time::Duration;
fn main() {
let rules : &[Rewrite<SymbolLang, ()>] = &vec![
$(eggify(theory))
].concat();
let start = "$(to_sexpr(cleanast(query)))".parse().unwrap();
let runner = Runner::default().with_expr(&start)
// More options here https://docs.rs/egg/0.6.0/egg/struct.Runner.html
.with_iter_limit($(params.timeout))
.with_node_limit($(params.enodelimit))
.run(rules);
runner.print_report();
let mut extractor = Extractor::new(&runner.egraph, AstSize);
let (best_cost, best_expr) = extractor.find_best(runner.roots[0]);
println!("best cost: {}, best expr {}", best_cost, best_expr);
}
"""
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 3018 | # constructs a semantic theory about a commutative monoid
# A monoid whose operation is commutative is called a
# commutative monoid (or, less commonly, an abelian monoid).
include("docstrings.jl")
module Library
using Metatheory.Patterns
using Metatheory.Rules
macro commutativity(op)
RewriteRule(PatTerm(:call, op, [PatVar(:a), PatVar(:b)]), PatTerm(:call, op, [PatVar(:b), PatVar(:a)]))
end
macro right_associative(op)
RewriteRule(
PatTerm(:call, op, [PatVar(:a), PatTerm(:call, op, [PatVar(:b), PatVar(:c)])]),
PatTerm(:call, op, [PatTerm(:call, op, [PatVar(:a), PatVar(:b)]), PatVar(:c)]),
)
end
macro left_associative(op)
RewriteRule(
PatTerm(:call, op, [PatTerm(:call, op, [PatVar(:a), PatVar(:b)]), PatVar(:c)]),
PatTerm(:call, op, [PatVar(:a), PatTerm(:call, op, [PatVar(:b), PatVar(:c)])]),
)
end
macro identity_left(op, id)
RewriteRule(PatTerm(:call, op, [id, PatVar(:a)]), PatVar(:a))
end
macro identity_right(op, id)
RewriteRule(PatTerm(:call, op, [PatVar(:a), id]), PatVar(:a))
end
macro inverse_left(op, id, invop)
RewriteRule(PatTerm(:call, op, [PatTerm(:call, invop, [PatVar(:a)]), PatVar(:a)]), id)
end
macro inverse_right(op, id, invop)
RewriteRule(PatTerm(:call, op, [PatVar(:a), PatTerm(:call, invop, [PatVar(:a)])]), id)
end
macro associativity(op)
esc(quote
[(@left_associative $op), (@right_associative $op)]
end)
end
macro monoid(op, id)
esc(quote
[(@left_associative($op)), (@right_associative($op)), (@identity_left($op, $id)), (@identity_right($op, $id))]
end)
end
macro commutative_monoid(op, id)
esc(quote
[(@commutativity $op), (@left_associative $op), (@right_associative $op), (@identity_left $op $id)]
end)
end
# constructs a semantic theory about a an abelian group
# The definition of a group does not require that a ⋅ b = b ⋅ a
# for all elements a and b in G. If this additional condition holds,
# then the operation is said to be commutative, and the group is called an abelian group.
macro commutative_group(op, id, invop)
# @assert Base.isbinaryoperator(op)
# @assert Base.isunaryoperator(invop)
esc(quote
(@commutative_monoid $op $id) ∪ [@inverse_right $op $id $invop]
end)
end
macro distrib(outop, inop)
esc(quote
[(@distrib_left $outop $inop), (@distrib_right $outop $inop)]
end)
end
# distributivity of two operations
# example: `@distrib (⋅) (⊕)`
macro distrib_left(outop, inop)
esc(quote
@rule a b c ($outop)(a, $(inop)(b, c)) == $(inop)($(outop)(a, b), $(outop)(a, c))
end)
end
macro distrib_right(outop, inop)
esc(quote
@rule a b c ($outop)($(inop)(a, b), c) == $(inop)($(outop)(a, c), $(outop)(b, c))
end)
end
# theory generation macros
export @commutativity
export @associativity
export @identity_left
export @identity_right
export @distrib_left
export @distrib_right
export @distrib
export @monoid
export @commutative_monoid
export @commutative_group
export @left_associative
export @right_associative
export @inverse_left
export @inverse_right
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 904 | module Metatheory
using DataStructures
using Base.Meta
using Reexport
using TermInterface
@inline alwaystrue(x) = true
function lookup_pat end
function maybelock! end
include("docstrings.jl")
include("utils.jl")
export @timer
export @iftimer
export @timerewrite
export @matchable
include("Patterns.jl")
@reexport using .Patterns
include("ematch_compiler.jl")
@reexport using .EMatchCompiler
include("matchers.jl")
include("Rules.jl")
@reexport using .Rules
include("Syntax.jl")
@reexport using .Syntax
include("EGraphs/EGraphs.jl")
@reexport using .EGraphs
include("Library.jl")
export Library
include("Rewriters.jl")
using .Rewriters
export Rewriters
function rewrite(expr, theory; order = :outer)
if order == :inner
Fixpoint(Prewalk(Fixpoint(Chain(theory))))(expr)
elseif order == :outer
Fixpoint(Postwalk(Fixpoint(Chain(theory))))(expr)
end
end
export rewrite
end # module
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 4560 | module Patterns
using Metatheory: binarize, cleanast, alwaystrue
using AutoHashEquals
using TermInterface
"""
Abstract type representing a pattern used in all the various pattern matching backends.
"""
abstract type AbstractPat end
struct UnsupportedPatternException <: Exception
p::AbstractPat
end
Base.showerror(io::IO, e::UnsupportedPatternException) = print(io, "Pattern ", e.p, " is unsupported in this context")
Base.:(==)(a::AbstractPat, b::AbstractPat) = false
TermInterface.arity(p::AbstractPat) = 0
"""
A ground pattern contains no pattern variables and
only literal values to match.
"""
isground(p::AbstractPat) = false
isground(x) = true # literals
# PatVar is equivalent to SymbolicUtils's Slot
"""
PatVar{P}(name, debrujin_index, predicate::P)
Pattern variables will first match on one subterm
and instantiate the substitution to that subterm.
Matcher pattern may contain pattern variables with attached predicates,
where `predicate` is a function that takes a matched expression and returns a
boolean value. Such a slot will be considered a match only if `f` returns true.
`predicate` can also be a `Type{<:t}`, this predicate is called a
type assertion. Type assertions on a `PatVar`, will match if and only if
the type of the matched term for the pattern variable is a subtype of `T`.
"""
mutable struct PatVar{P} <: AbstractPat
name::Symbol
idx::Int
predicate::P
predicate_code
end
Base.:(==)(a::PatVar, b::PatVar) = a.idx == b.idx
PatVar(var) = PatVar(var, -1, alwaystrue, nothing)
PatVar(var, i) = PatVar(var, i, alwaystrue, nothing)
"""
If you want to match a variable number of subexpressions at once, you will need
a **segment pattern**.
A segment pattern represents a vector of subexpressions matched.
You can attach a predicate `g` to a segment variable. In the case of segment variables `g` gets a vector of 0 or more
expressions and must return a boolean value.
"""
mutable struct PatSegment{P} <: AbstractPat
name::Symbol
idx::Int
predicate::P
predicate_code
end
PatSegment(v) = PatSegment(v, -1, alwaystrue, nothing)
PatSegment(v, i) = PatSegment(v, i, alwaystrue, nothing)
"""
Term patterns will match
on terms of the same `arity` and with the same
function symbol `operation` and expression head `exprhead`.
"""
struct PatTerm <: AbstractPat
exprhead::Any
operation::Any
args::Vector
PatTerm(eh, op, args) = new(eh, op, args) #Ref{UInt}(0))
end
TermInterface.istree(::PatTerm) = true
TermInterface.exprhead(e::PatTerm) = e.exprhead
TermInterface.operation(p::PatTerm) = p.operation
TermInterface.arguments(p::PatTerm) = p.args
TermInterface.arity(p::PatTerm) = length(arguments(p))
TermInterface.metadata(p::PatTerm) = nothing
function TermInterface.similarterm(x::PatTerm, head, args, symtype = nothing; metadata = nothing, exprhead = :call)
PatTerm(exprhead, head, args)
end
isground(p::PatTerm) = all(isground, p.args)
# ==============================================
# ================== PATTERN VARIABLES =========
# ==============================================
"""
Collects pattern variables appearing in a pattern into a vector of symbols
"""
patvars(p::PatVar, s) = push!(s, p.name)
patvars(p::PatSegment, s) = push!(s, p.name)
patvars(p::PatTerm, s) = (patvars(operation(p), s); foreach(x -> patvars(x, s), arguments(p)); s)
patvars(x, s) = s
patvars(p) = unique!(patvars(p, Symbol[]))
# ==============================================
# ================== DEBRUJIN INDEXING =========
# ==============================================
function setdebrujin!(p::Union{PatVar,PatSegment}, pvars)
p.idx = findfirst((==)(p.name), pvars)
end
# literal case
setdebrujin!(p, pvars) = nothing
function setdebrujin!(p::PatTerm, pvars)
setdebrujin!(operation(p), pvars)
foreach(x -> setdebrujin!(x, pvars), p.args)
end
to_expr(x) = x
to_expr(x::PatVar{T}) where {T} = Expr(:call, :~, Expr(:(::), x.name, x.predicate_code))
to_expr(x::PatSegment{T}) where {T<:Function} = Expr(:..., Expr(:call, :~, Expr(:(::), x.name, x.predicate_code)))
to_expr(x::PatVar{typeof(alwaystrue)}) = Expr(:call, :~, x.name)
to_expr(x::PatSegment{typeof(alwaystrue)}) = Expr(:..., Expr(:call, :~, x.name))
to_expr(x::PatTerm) = similarterm(Expr(:call, :x), operation(x), map(to_expr, arguments(x)); exprhead = exprhead(x))
Base.show(io::IO, pat::AbstractPat) = print(io, to_expr(pat))
# include("rules/patterns.jl")
export AbstractPat
export PatVar
export PatTerm
export PatSegment
export patvars
export setdebrujin!
export isground
export UnsupportedPatternException
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 6490 | """
A rewriter is any function which takes an expression and returns an expression
or `nothing`. If `nothing` is returned that means there was no changes applicable
to the input expression.
The `Rewriters` module contains some types which create and transform
rewriters.
- `Empty()` is a rewriter which always returns `nothing`
- `Chain(itr)` chain an iterator of rewriters into a single rewriter which applies
each chained rewriter in the given order.
If a rewriter returns `nothing` this is treated as a no-change.
- `RestartedChain(itr)` like `Chain(itr)` but restarts from the first rewriter once on the
first successful application of one of the chained rewriters.
- `IfElse(cond, rw1, rw2)` runs the `cond` function on the input, applies `rw1` if cond
returns true, `rw2` if it retuns false
- `If(cond, rw)` is the same as `IfElse(cond, rw, Empty())`
- `Prewalk(rw; threaded=false, thread_cutoff=100)` returns a rewriter which does a pre-order
traversal of a given expression and applies the rewriter `rw`. Note that if
`rw` returns `nothing` when a match is not found, then `Prewalk(rw)` will
also return nothing unless a match is found at every level of the walk.
`threaded=true` will use multi threading for traversal. `thread_cutoff` is
the minimum number of nodes in a subtree which should be walked in a
threaded spawn.
- `Postwalk(rw; threaded=false, thread_cutoff=100)` similarly does post-order traversal.
- `Fixpoint(rw)` returns a rewriter which applies `rw` repeatedly until there are no changes to be made.
- `FixpointNoCycle` behaves like [`Fixpoint`](@ref) but instead it applies `rw` repeatedly only while it is returning new results.
- `PassThrough(rw)` returns a rewriter which if `rw(x)` returns `nothing` will instead
return `x` otherwise will return `rw(x)`.
"""
module Rewriters
using TermInterface
using Metatheory: @timer
export Empty, IfElse, If, Chain, RestartedChain, Fixpoint, Postwalk, Prewalk, PassThrough
# Cache of printed rules to speed up @timer
const repr_cache = IdDict()
cached_repr(x) = Base.get!(() -> repr(x), repr_cache, x)
struct Empty end
(rw::Empty)(x) = nothing
instrument(x, f) = f(x)
instrument(x::Empty, f) = x
struct IfElse{F,A,B}
cond::F
yes::A
no::B
end
instrument(x::IfElse, f) = IfElse(x.cond, instrument(x.yes, f), instrument(x.no, f))
function (rw::IfElse)(x)
rw.cond(x) ? rw.yes(x) : rw.no(x)
end
If(f, x) = IfElse(f, x, Empty())
struct Chain
rws
end
function (rw::Chain)(x)
for f in rw.rws
y = @timer cached_repr(f) f(x)
if y !== nothing
x = y
end
end
return x
end
instrument(c::Chain, f) = Chain(map(x -> instrument(x, f), c.rws))
struct RestartedChain{Cs}
rws::Cs
end
instrument(c::RestartedChain, f) = RestartedChain(map(x -> instrument(x, f), c.rws))
function (rw::RestartedChain)(x)
for f in rw.rws
y = @timer cached_repr(f) f(x)
if y !== nothing
return Chain(rw.rws)(y)
end
end
return x
end
@generated function (rw::RestartedChain{<:NTuple{N,Any}})(x) where {N}
quote
Base.@nexprs $N i -> begin
let f = rw.rws[i]
y = @timer cached_repr(repr(f)) f(x)
if y !== nothing
return Chain(rw.rws)(y)
end
end
end
return x
end
end
struct Fixpoint{C}
rw::C
end
instrument(x::Fixpoint, f) = Fixpoint(instrument(x.rw, f))
function (rw::Fixpoint)(x)
f = rw.rw
y = @timer cached_repr(f) f(x)
while x !== y && !isequal(x, y)
y === nothing && return x
x = y
y = @timer cached_repr(f) f(x)
end
return x
end
"""
FixpointNoCycle(rw)
`FixpointNoCycle` behaves like [`Fixpoint`](@ref),
but returns a rewriter which applies `rw` repeatedly until
it produces a result that was already produced before, for example,
if the repeated application of `rw` produces results `a, b, c, d, b` in order,
`FixpointNoCycle` stops because `b` has been already produced.
"""
struct FixpointNoCycle{C}
rw::C
hist::Vector{UInt64} # vector of hashes for history
end
instrument(x::FixpointNoCycle, f) = Fixpoint(instrument(x.rw, f))
function (rw::FixpointNoCycle)(x)
f = rw.rw
push!(rw.hist, hash(x))
y = @timer cached_repr(f) f(x)
while x !== y && hash(x) ∉ rw.hist
if y === nothing
empty!(rw.hist)
return x
end
push!(rw.hist, y)
x = y
y = @timer cached_repr(f) f(x)
end
empty!(rw.hist)
return x
end
struct Walk{ord,C,F,threaded}
rw::C
thread_cutoff::Int
similarterm::F
end
function instrument(x::Walk{ord,C,F,threaded}, f) where {ord,C,F,threaded}
irw = instrument(x.rw, f)
Walk{ord,typeof(irw),typeof(x.similarterm),threaded}(irw, x.thread_cutoff, x.similarterm)
end
using .Threads
function Postwalk(rw; threaded::Bool = false, thread_cutoff = 100, similarterm = similarterm)
Walk{:post,typeof(rw),typeof(similarterm),threaded}(rw, thread_cutoff, similarterm)
end
function Prewalk(rw; threaded::Bool = false, thread_cutoff = 100, similarterm = similarterm)
Walk{:pre,typeof(rw),typeof(similarterm),threaded}(rw, thread_cutoff, similarterm)
end
struct PassThrough{C}
rw::C
end
instrument(x::PassThrough, f) = PassThrough(instrument(x.rw, f))
(p::PassThrough)(x) = (y = p.rw(x); y === nothing ? x : y)
passthrough(x, default) = x === nothing ? default : x
function (p::Walk{ord,C,F,false})(x) where {ord,C,F}
@assert ord === :pre || ord === :post
if istree(x)
if ord === :pre
x = p.rw(x)
end
if istree(x)
x = p.similarterm(x, operation(x), map(PassThrough(p), unsorted_arguments(x)); exprhead = exprhead(x))
end
return ord === :post ? p.rw(x) : x
else
return p.rw(x)
end
end
function (p::Walk{ord,C,F,true})(x) where {ord,C,F}
@assert ord === :pre || ord === :post
if istree(x)
if ord === :pre
x = p.rw(x)
end
if istree(x)
_args = map(arguments(x)) do arg
if node_count(arg) > p.thread_cutoff
Threads.@spawn p(arg)
else
p(arg)
end
end
args = map((t, a) -> passthrough(t isa Task ? fetch(t) : t, a), _args, arguments(x))
t = p.similarterm(x, operation(x), args; exprhead = exprhead(x))
end
return ord === :post ? p.rw(t) : t
else
return p.rw(x)
end
end
function instrument_io(x)
function io_instrumenter(r)
function (args...)
println("Rule: ", r)
println("Input: ", args)
res = r(args...)
println("Output: ", res)
res
end
end
instrument(x, io_instrumenter)
end
end # end module
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 5528 | module Rules
using TermInterface
using AutoHashEquals
using Metatheory.EMatchCompiler
using Metatheory.Patterns
using Metatheory.Patterns: to_expr
using Metatheory: cleanast, binarize, matcher, instantiate
const EMPTY_DICT = Base.ImmutableDict{Int,Any}()
abstract type AbstractRule end
# Must override
Base.:(==)(a::AbstractRule, b::AbstractRule) = false
abstract type SymbolicRule <: AbstractRule end
abstract type BidirRule <: SymbolicRule end
struct RuleRewriteError
rule
expr
end
getdepth(::Any) = typemax(Int)
showraw(io, t) = Base.show(IOContext(io, :simplify => false), t)
showraw(t) = showraw(stdout, t)
@noinline function Base.showerror(io::IO, err::RuleRewriteError)
msg = "Failed to apply rule $(err.rule) on expression "
msg *= sprint(io -> showraw(io, err.expr))
print(io, msg)
end
"""
Rules defined as `left_hand --> right_hand` are
called *symbolic rewrite* rules. Application of a *rewrite* Rule
is a replacement of the `left_hand` pattern with
the `right_hand` substitution, with the correct instantiation
of pattern variables. Function call symbols are not treated as pattern
variables, all other identifiers are treated as pattern variables.
Literals such as `5, :e, "hello"` are not treated as pattern
variables.
```julia
@rule ~a * ~b --> ~b * ~a
```
"""
@auto_hash_equals fields = (left, right) struct RewriteRule <: SymbolicRule
left
right
matcher
patvars::Vector{Symbol}
ematcher!
end
function RewriteRule(l, r)
pvars = patvars(l) ∪ patvars(r)
# sort!(pvars)
setdebrujin!(l, pvars)
setdebrujin!(r, pvars)
RewriteRule(l, r, matcher(l), pvars, ematcher_yield(l, length(pvars)))
end
Base.show(io::IO, r::RewriteRule) = print(io, :($(r.left) --> $(r.right)))
function (r::RewriteRule)(term)
# n == 1 means that exactly one term of the input (term,) was matched
success(bindings, n) = n == 1 ? instantiate(term, r.right, bindings) : nothing
try
r.matcher(success, (term,), EMPTY_DICT)
catch err
throw(RuleRewriteError(r, term))
end
end
# ============================================================
# EqualityRule
# ============================================================
"""
An `EqualityRule` can is a symbolic substitution rule that
can be rewritten bidirectional. Therefore, it should only be used
with the EGraphs backend.
```julia
@rule ~a * ~b == ~b * ~a
```
"""
@auto_hash_equals struct EqualityRule <: BidirRule
left
right
patvars::Vector{Symbol}
ematcher!
end
function EqualityRule(l, r)
pvars = patvars(l) ∪ patvars(r)
extravars = setdiff(pvars, patvars(l) ∩ patvars(r))
if !isempty(extravars)
error("unbound pattern variables $extravars when creating bidirectional rule")
end
setdebrujin!(l, pvars)
setdebrujin!(r, pvars)
EqualityRule(l, r, pvars, ematcher_yield_bidir(l, r, length(pvars)))
end
Base.show(io::IO, r::EqualityRule) = print(io, :($(r.left) == $(r.right)))
function (r::EqualityRule)(x)
throw(RuleRewriteError(r, x))
end
# ============================================================
# UnequalRule
# ============================================================
"""
This type of *anti*-rules is used for checking contradictions in the EGraph
backend. If two terms, corresponding to the left and right hand side of an
*anti-rule* are found in an [`EGraph`], saturation is halted immediately.
```julia
!a ≠ a
```
"""
@auto_hash_equals struct UnequalRule <: BidirRule
left
right
patvars::Vector{Symbol}
ematcher!
end
function UnequalRule(l, r)
pvars = patvars(l) ∪ patvars(r)
extravars = setdiff(pvars, patvars(l) ∩ patvars(r))
if !isempty(extravars)
error("unbound pattern variables $extravars when creating bidirectional rule")
end
# sort!(pvars)
setdebrujin!(l, pvars)
setdebrujin!(r, pvars)
UnequalRule(l, r, pvars, ematcher_yield_bidir(l, r, length(pvars)))
end
Base.show(io::IO, r::UnequalRule) = print(io, :($(r.left) ≠ $(r.right)))
# ============================================================
# DynamicRule
# ============================================================
"""
Rules defined as `left_hand => right_hand` are
called `dynamic` rules. Dynamic rules behave like anonymous functions.
Instead of a symbolic substitution, the right hand of
a dynamic `=>` rule is evaluated during rewriting:
matched values are bound to pattern variables as in a
regular function call. This allows for dynamic computation
of right hand sides.
Dynamic rule
```julia
@rule ~a::Number * ~b::Number => ~a*~b
```
"""
@auto_hash_equals struct DynamicRule <: AbstractRule
left
rhs_fun::Function
rhs_code
matcher
patvars::Vector{Symbol} # useful set of pattern variables
ematcher!
end
function DynamicRule(l, r::Function, rhs_code = nothing)
pvars = patvars(l)
setdebrujin!(l, pvars)
isnothing(rhs_code) && (rhs_code = repr(rhs_code))
DynamicRule(l, r, rhs_code, matcher(l), pvars, ematcher_yield(l, length(pvars)))
end
Base.show(io::IO, r::DynamicRule) = print(io, :($(r.left) => $(r.rhs_code)))
function (r::DynamicRule)(term)
# n == 1 means that exactly one term of the input (term,) was matched
success(bindings, n) =
if n == 1
bvals = [bindings[i] for i in 1:length(r.patvars)]
return r.rhs_fun(term, nothing, bvals...)
end
try
return r.matcher(success, (term,), EMPTY_DICT)
catch err
rethrow(err)
throw(RuleRewriteError(r, term))
end
end
export SymbolicRule
export RewriteRule
export BidirRule
export EqualityRule
export UnequalRule
export DynamicRule
export AbstractRule
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 12481 | module Syntax
using Metatheory.Patterns
using Metatheory.Rules
using TermInterface
using Metatheory: alwaystrue, cleanast, binarize
export @rule
export @theory
export @slots
export @capture
# FIXME this thing eats up macro calls!
"""
Remove LineNumberNode from quoted blocks of code
"""
rmlines(e::Expr) = Expr(e.head, map(rmlines, filter(x -> !(x isa LineNumberNode), e.args))...)
rmlines(a) = a
function_object_or_quote(op::Symbol, mod)::Expr = :(isdefined($mod, $(QuoteNode(op))) ? $op : $(QuoteNode(op)))
function_object_or_quote(op, mod) = op
function makesegment(s::Expr, pvars)
if !(exprhead(s) == :(::))
error("Syntax for specifying a segment is ~~x::\$predicate, where predicate is a boolean function or a type")
end
name, predicate = arguments(s)
name ∉ pvars && push!(pvars, name)
return :($PatSegment($(QuoteNode(name)), -1, $predicate, $(QuoteNode(predicate))))
end
function makesegment(name::Symbol, pvars)
name ∉ pvars && push!(pvars, name)
PatSegment(name)
end
function makevar(s::Expr, pvars)
if !(exprhead(s) == :(::))
error("Syntax for specifying a slot is ~x::\$predicate, where predicate is a boolean function or a type")
end
name, predicate = arguments(s)
name ∉ pvars && push!(pvars, name)
return :($PatVar($(QuoteNode(name)), -1, $predicate, $(QuoteNode(predicate))))
end
function makevar(name::Symbol, pvars)
name ∉ pvars && push!(pvars, name)
PatVar(name)
end
# Make a dynamic rule right hand side
function makeconsequent(expr::Expr)
head = exprhead(expr)
args = arguments(expr)
op = operation(expr)
if head === :call
if op === :(~)
if args[1] isa Symbol
return args[1]
elseif args[1] isa Expr && operation(args[1]) == :(~)
n = arguments(args[1])[1]
@assert n isa Symbol
return n
else
error("Error when parsing right hand side")
end
else
return Expr(head, makeconsequent(op), map(makeconsequent, args)...)
end
else
return Expr(head, map(makeconsequent, args)...)
end
end
makeconsequent(x) = x
# treat as a literal
function makepattern(x, pvars, slots, mod = @__MODULE__, splat = false)
x in slots ? (splat ? makesegment(x, pvars) : makevar(x, pvars)) : x
end
function makepattern(ex::Expr, pvars, slots, mod = @__MODULE__, splat = false)
head = exprhead(ex)
op = operation(ex)
# Retrieve the function object if available
# Optionally quote function objects
args = arguments(ex)
istree(op) && (op = makepattern(op, pvars, slots, mod))
if head === :call
if operation(ex) === :(~) # is a variable or segment
let v = args[1]
if v isa Expr && operation(v) == :(~)
# matches ~~x::predicate or ~~x::predicate...
makesegment(arguments(v)[1], pvars)
elseif splat
# matches ~x::predicate...
makesegment(v, pvars)
else
makevar(v, pvars)
end
end
else # Matches a term
patargs = map(i -> makepattern(i, pvars, slots, mod), args) # recurse
:($PatTerm(:call, $(function_object_or_quote(op, mod)), [$(patargs...)]))
end
elseif head === :...
makepattern(args[1], pvars, slots, mod, true)
elseif head == :(::) && args[1] in slots
splat ? makesegment(ex, pvars) : makevar(ex, pvars)
elseif head === :ref
# getindex
patargs = map(i -> makepattern(i, pvars, slots, mod), args) # recurse
:($PatTerm(:ref, getindex, [$(patargs...)]))
elseif head === :$
args[1]
else
patargs = map(i -> makepattern(i, pvars, slots, mod), args) # recurse
:($PatTerm($(QuoteNode(head)), $(function_object_or_quote(op, mod)), [$(patargs...)]))
end
end
function rule_sym_map(ex::Expr)
h = operation(ex)
if h == :(-->) || h == :(→)
RewriteRule
elseif h == :(=>)
DynamicRule
elseif h == :(==)
EqualityRule
elseif h == :(!=) || h == :(≠)
UnequalRule
else
error("Cannot parse rule with operator '$h'")
end
end
rule_sym_map(ex) = error("Cannot parse rule from $ex")
"""
rewrite_rhs(expr::Expr)
Rewrite the `expr` by dealing with `:where` if necessary.
The `:where` is rewritten from, for example, `~x where f(~x)` to `f(~x) ? ~x : nothing`.
"""
function rewrite_rhs(ex::Expr)
if exprhead(ex) == :where
rhs, predicate = arguments(ex)
return :($predicate ? $rhs : nothing)
end
ex
end
rewrite_rhs(x) = x
function addslots(expr, slots)
if expr isa Expr
if expr.head === :macrocall &&
expr.args[1] in [Symbol("@rule"), Symbol("@capture"), Symbol("@slots"), Symbol("@theory")]
Expr(:macrocall, expr.args[1:2]..., slots..., expr.args[3:end]...)
else
Expr(expr.head, addslots.(expr.args, (slots,))...)
end
else
expr
end
end
"""
@slots [SLOTS...] ex
Declare SLOTS as slot variables for all `@rule` or `@capture` invocations in the expression `ex`.
_Example:_
```julia
julia> @slots x y z a b c Chain([
(@rule x^2 + 2x*y + y^2 => (x + y)^2),
(@rule x^a * y^b => (x*y)^a * y^(b-a)),
(@rule +(x...) => sum(x)),
])
```
See also: [`@rule`](@ref), [`@capture`](@ref)
"""
macro slots(args...)
length(args) >= 1 || ArgumentError("@slots requires at least one argument")
slots = args[1:(end - 1)]
expr = args[end]
return esc(addslots(expr, slots))
end
"""
@rule [SLOTS...] LHS operator RHS
Creates an `AbstractRule` object. A rule object is callable, and takes an
expression and rewrites it if it matches the LHS pattern to the RHS pattern,
returns `nothing` otherwise. The rule language is described below.
LHS can be any possibly nested function call expression where any of the arugments can
optionally be a Slot (`~x`) or a Segment (`~x...`) (described below).
SLOTS is an optional list of symbols to be interpeted as slots or segments
directly (without using `~`). To declare slots for several rules at once, see
the `@slots` macro.
If an expression matches LHS entirely, then it is rewritten to the pattern in
the RHS , whose local scope includes the slot matches as variables. Segment
(`~x`) and slot variables (`~~x`) on the RHS will substitute the result of the
matches found for these variables in the LHS.
**Rule operators**:
- `LHS => RHS`: create a `DynamicRule`. The RHS is *evaluated* on rewrite.
- `LHS --> RHS`: create a `RewriteRule`. The RHS is **not** evaluated but *symbolically substituted* on rewrite.
- `LHS == RHS`: create a `EqualityRule`. In e-graph rewriting, this rule behaves like `RewriteRule` but can go in both directions. Doesn't work in classical rewriting
- `LHS ≠ RHS`: create a `UnequalRule`. Can only be used in e-graphs, and is used to eagerly stop the process of rewriting if LHS is found to be equal to RHS.
**Slot**:
A Slot variable is written as `~x` and matches a single expression. `x` is the name of the variable. If a slot appears more than once in an LHS expression then expression matched at every such location must be equal (as shown by `isequal`).
_Example:_
Simple rule to turn any `sin` into `cos`:
```julia
julia> r = @rule sin(~x) --> cos(~x)
sin(~x) --> cos(~x)
julia> r(:(sin(1+a)))
:(cos((1 + a)))
```
A rule with 2 segment variables
```julia
julia> r = @rule sin(~x + ~y) --> sin(~x)*cos(~y) + cos(~x)*sin(~y)
sin(~x + ~y) --> sin(~x) * cos(~y) + cos(~x) * sin(~y)
julia> r(:(sin(a + b)))
:(cos(a)*sin(b) + sin(a)*cos(b))
```
A rule that matches two of the same expressions:
```julia
julia> r = @rule sin(~x)^2 + cos(~x)^2 --> 1
sin(~x) ^ 2 + cos(~x) ^ 2 --> 1
julia> r(:(sin(2a)^2 + cos(2a)^2))
1
julia> r(:(sin(2a)^2 + cos(a)^2))
# nothing
```
A rule without `~`
```julia
julia> r = @slots x y z @rule x(y + z) --> x*y + x*z
x(y + z) --> x*y + x*z
```
**Segment**:
A Segment variable matches zero or more expressions in the function call.
Segments may be written by splatting slot variables (`~x...`).
_Example:_
```julia
julia> r = @rule f(~xs...) --> g(~xs...);
julia> r(:(f(1, 2, 3)))
:(g(1,2,3))
```
**Predicates**:
There are two kinds of predicates, namely over slot variables and over the whole rule.
For the former, predicates can be used on both `~x` and `~~x` by using the `~x::f` or `~~x::f`.
Here `f` can be any julia function. In the case of a slot the function gets a single
matched subexpression, in the case of segment, it gets an array of matched expressions.
The predicate should return `true` if the current match is acceptable, and `false`
otherwise.
```julia
julia> two_πs(x::Number) = abs(round(x/(2π)) - x/(2π)) < 10^-9
two_πs (generic function with 1 method)
julia> two_πs(x) = false
two_πs (generic function with 2 methods)
julia> r = @rule sin(~~x + ~y::two_πs + ~~z) => :(sin(\$(Expr(:call, :+, ~~x..., ~~z...))))
sin(~(~x) + ~(y::two_πs) + ~(~z)) --> sin(+(~(~x)..., ~(~z)...))
julia> r(:(sin(a+\$(3π))))
julia> r(:(sin(a+\$(6π))))
:(sin(+a))
julia> r(sin(a+6π+c))
:(sin(a + c))
```
Predicate function gets an array of values if attached to a segment variable (`~x...`).
For the predicate over the whole rule, use `@rule <LHS> => <RHS> where <predicate>`:
```
julia> predicate(x) = x === a;
julia> r = @rule ~x => ~x where f(~x);
julia> r(a)
a
julia> r(b) === nothing
true
```
Note that this is syntactic sugar and that it is the same as
`@rule ~x => f(~x) ? ~x : nothing`.
**Compatibility**:
Segment variables may still be written as (`~~x`), and slot (`~x`) and segment (`~x...` or `~~x`) syntaxes on the RHS will still substitute the result of the matches.
See also: [`@capture`](@ref), [`@slots`](@ref)
"""
macro rule(args...)
length(args) >= 1 || ArgumentError("@rule requires at least one argument")
slots = args[1:(end - 1)]
expr = args[end]
e = macroexpand(__module__, expr)
e = rmlines(e)
RuleType = rule_sym_map(e)
l, r = arguments(e)
pvars = Symbol[]
lhs = makepattern(l, pvars, slots, __module__)
rhs = RuleType <: SymbolicRule ? esc(makepattern(r, [], slots, __module__)) : r
if RuleType == DynamicRule
rhs_rewritten = rewrite_rhs(r)
rhs_consequent = makeconsequent(rhs_rewritten)
params = Expr(:tuple, :_lhs_expr, :_egraph, pvars...)
rhs = :($(esc(params)) -> $(esc(rhs_consequent)))
return quote
$(__source__)
DynamicRule($(esc(lhs)), $rhs, $(QuoteNode(rhs_consequent)))
end
end
quote
$(__source__)
($RuleType)($(esc(lhs)), $rhs)
end
end
# Theories can just be vectors of rules!
"""
@theory [SLOTS...] begin (LHS operator RHS)... end
Syntax sugar to define a vector of rules in a nice and readable way. Can use `@slots` or have the slots
as the first arguments:
```
julia> t = @theory x y z begin
x * (y + z) --> (x * y) + (x * z)
x + y == (y + x)
#...
end;
```
Is the same thing as writing
```
julia> v = [
@rule x y z x * (y + z) --> (x * y) + (x * z)
@rule x y x + y == (y + x)
#...
];
```
"""
macro theory(args...)
length(args) >= 1 || ArgumentError("@rule requires at least one argument")
slots = args[1:(end - 1)]
expr = args[end]
e = macroexpand(__module__, expr)
e = rmlines(e)
# e = interp_dollar(e, __module__)
if exprhead(e) == :block
ee = Expr(:vect, map(x -> addslots(:(@rule($x)), slots), arguments(e))...)
esc(ee)
else
error("theory is not in form begin a => b; ... end")
end
end
"""
@capture ex pattern
Uses a `Rule` object to capture an expression if it matches the `pattern`. Returns `true` and injects
slot variable match results into the calling scope when the `pattern` matches, otherwise returns false. The
rule language for specifying the `pattern` is the same in @capture as it is in `@rule`. Contextual matching
is not yet supported
```julia
julia> @syms a; ex = a^a;
julia> if @capture ex (~x)^(~x)
@show x
elseif @capture ex 2(~y)
@show y
end;
x = a
```
See also: [`@rule`](@ref)
"""
macro capture(args...)
length(args) >= 2 || ArgumentError("@capture requires at least two arguments")
slots = args[1:(end - 2)]
ex = args[end - 1]
lhs = args[end]
lhs = macroexpand(__module__, lhs)
lhs = rmlines(lhs)
pvars = Symbol[]
lhs_term = makepattern(lhs, pvars, slots, __module__)
bind = Expr(
:block,
map(key -> :($(esc(key)) = getindex(__MATCHES__, findfirst((==)($(QuoteNode(key))), $pvars))), pvars)...,
)
quote
$(__source__)
lhs_pattern = $(esc(lhs_term))
__MATCHES__ = DynamicRule(lhs_pattern, (_lhs_expr, _egraph, pvars...) -> pvars, nothing)($(esc(ex)))
if __MATCHES__ !== nothing
$bind
true
else
false
end
end
end
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 879 | ## Docstring Templates
using DocStringExtensions
@template (FUNCTIONS, METHODS, MACROS) = """
$(DOCSTRING)
---
# Signatures
$(TYPEDSIGNATURES)
---
## Methods
$(METHODLIST)
"""
@template (TYPES) = """
$(TYPEDEF)
$(DOCSTRING)
---
## Fields
$(TYPEDFIELDS)
"""
@template MODULES = """
$(DOCSTRING)
---
## Imports
$(IMPORTS)
"""
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 4747 | module EMatchCompiler
using TermInterface
using ..Patterns
using Metatheory: islist, car, cdr, assoc, drop_n, lookup_pat, LL, maybelock!
function ematcher(p::Any)
function literal_ematcher(next, g, data, bindings)
!islist(data) && return
ecid = lookup_pat(g, p)
if ecid > 0 && ecid == car(data)
next(bindings, 1)
end
end
end
checktype(n, T) = istree(n) ? symtype(n) <: T : false
function predicate_ematcher(p::PatVar, pred::Type)
function type_ematcher(next, g, data, bindings)
!islist(data) && return
id = car(data)
eclass = g[id]
for (enode_idx, n) in enumerate(eclass)
if !istree(n) && operation(n) isa pred
next(assoc(bindings, p.idx, (id, enode_idx)), 1)
end
end
end
end
function predicate_ematcher(p::PatVar, pred)
function predicate_ematcher(next, g, data, bindings)
!islist(data) && return
id::Int = car(data)
eclass = g[id]
if pred(eclass)
enode_idx = 0
# Is this for cycle needed?
for (j, n) in enumerate(eclass)
# Find first literal if available
if !istree(n)
enode_idx = j
break
end
end
next(assoc(bindings, p.idx, (id, enode_idx)), 1)
end
end
end
function ematcher(p::PatVar)
pred_matcher = predicate_ematcher(p, p.predicate)
function var_ematcher(next, g, data, bindings)
id = car(data)
ecid = get(bindings, p.idx, 0)[1]
if ecid > 0
ecid == id ? next(bindings, 1) : nothing
else
# Variable is not bound, check predicate and bind
pred_matcher(next, g, data, bindings)
end
end
end
Base.@pure @inline checkop(x::Union{Function,DataType}, op) = isequal(x, op) || isequal(nameof(x), op)
Base.@pure @inline checkop(x, op) = isequal(x, op)
function canbind(p::PatTerm)
eh = exprhead(p)
op = operation(p)
ar = arity(p)
function canbind(n)
istree(n) && exprhead(n) == eh && checkop(op, operation(n)) && arity(n) == ar
end
end
function ematcher(p::PatTerm)
ematchers = map(ematcher, arguments(p))
if isground(p)
return function ground_term_ematcher(next, g, data, bindings)
!islist(data) && return
ecid = lookup_pat(g, p)
if ecid > 0 && ecid == car(data)
next(bindings, 1)
end
end
end
canbindtop = canbind(p)
function term_ematcher(success, g, data, bindings)
!islist(data) && return nothing
function loop(children_eclass_ids, bindings′, ematchers′)
if !islist(ematchers′)
# term is empty
if !islist(children_eclass_ids)
# we have correctly matched the term
return success(bindings′, 1)
end
return nothing
end
car(ematchers′)(g, children_eclass_ids, bindings′) do b, n_of_matched # next
# recursion case:
# take the first matcher, on success,
# keep looping by matching the rest
# by removing the first n matched elements
# from the term, with the bindings,
loop(drop_n(children_eclass_ids, n_of_matched), b, cdr(ematchers′))
end
end
for n in g[car(data)]
if canbindtop(n)
loop(LL(arguments(n), 1), bindings, ematchers)
end
end
end
end
const EMPTY_ECLASS_DICT = Base.ImmutableDict{Int,Tuple{Int,Int}}()
"""
Substitutions are efficiently represented in memory as vector of tuples of two integers.
This should allow for static allocation of matches and use of LoopVectorization.jl
The buffer has to be fairly big when e-matching.
The size of the buffer should double when there's too many matches.
The format is as follows
* The first pair denotes the index of the rule in the theory and the e-class id
of the node of the e-graph that is being substituted. The rule number should be negative if it's a bidirectional
the direction is right-to-left.
* From the second pair on, it represents (e-class id, literal position) at the position of the pattern variable
* The end of a substitution is delimited by (0,0)
"""
function ematcher_yield(p, npvars::Int, direction::Int)
em = ematcher(p)
function ematcher_yield(g, rule_idx, id)::Int
n_matches = 0
em(g, (id,), EMPTY_ECLASS_DICT) do b, n
maybelock!(g) do
push!(g.buffer, assoc(b, 0, (rule_idx * direction, id)))
n_matches += 1
end
end
n_matches
end
end
ematcher_yield(p, npvars) = ematcher_yield(p, npvars, 1)
function ematcher_yield_bidir(l, r, npvars::Int)
eml, emr = ematcher_yield(l, npvars, 1), ematcher_yield(r, npvars, -1)
function ematcher_yield_bidir(g, rule_idx, id)::Int
eml(g, rule_idx, id) + emr(g, rule_idx, id)
end
end
ematcher(p::AbstractPattern) = error("Unsupported pattern in e-matching $p")
export ematcher_yield, ematcher_yield_bidir
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 4531 | #### Pattern matching
### Matching procedures
# A matcher is a function which takes 3 arguments
# 1. Callback: takes arguments Dictionary × Number of elements matched
# 2. Expression
# 3. Vector of matches debrujin-indexed by pattern variables
#
using Metatheory: islist, car, cdr, assoc, drop_n, take_n
function matcher(val::Any)
function literal_matcher(next, data, bindings)
islist(data) && isequal(car(data), val) ? next(bindings, 1) : nothing
end
end
function matcher(slot::PatVar)
pred = slot.predicate
if slot.predicate isa Type
pred = x -> typeof(x) <: slot.predicate
end
function slot_matcher(next, data, bindings)
!islist(data) && return
val = get(bindings, slot.idx, nothing)
if val !== nothing
if isequal(val, car(data))
return next(bindings, 1)
end
else
# Variable is not bound, first time it is found
# check the predicate
if pred(car(data))
next(assoc(bindings, slot.idx, car(data)), 1)
end
end
end
end
# returns n == offset, 0 if failed
function trymatchexpr(data, value, n)
if !islist(value)
return n
elseif islist(value) && islist(data)
if !islist(data)
# didn't fully match
return nothing
end
while isequal(car(value), car(data))
n += 1
value = cdr(value)
data = cdr(data)
if !islist(value)
return n
elseif !islist(data)
return nothing
end
end
return !islist(value) ? n : nothing
elseif isequal(value, data)
return n + 1
end
end
function matcher(segment::PatSegment)
function segment_matcher(success, data, bindings)
val = get(bindings, segment.idx, nothing)
if val !== nothing
n = trymatchexpr(data, val, 0)
if !isnothing(n)
success(bindings, n)
end
else
res = nothing
for i in length(data):-1:0
subexpr = take_n(data, i)
if segment.predicate(subexpr)
res = success(assoc(bindings, segment.idx, subexpr), i)
!isnothing(res) && break
end
end
return res
end
end
end
# Try to match both against a function symbol or a function object at the same time.
# Slows compile time down a bit but lets this matcher work at the same time on both purely symbolic Expr-like object.
# Execution time should not be affected.
# and SymbolicUtils-like objects that store function references as operations.
function head_matcher(f::Union{Function,DataType,UnionAll})
checkhead(x) = isequal(x, f) || isequal(x, nameof(f))
function head_matcher(next, data, bindings)
h = car(data)
if islist(data) && checkhead(h)
next(bindings, 1)
else
nothing
end
end
end
head_matcher(x) = matcher(x)
function matcher(term::PatTerm)
op = operation(term)
matchers = (head_matcher(op), map(matcher, arguments(term))...)
function term_matcher(success, data, bindings)
!islist(data) && return nothing
!istree(car(data)) && return nothing
function loop(term, bindings′, matchers′) # Get it to compile faster
# Base case, no more matchers
if !islist(matchers′)
# term is empty
if !islist(term)
# we have correctly matched the term
return success(bindings′, 1)
end
return nothing
end
car(matchers′)(term, bindings′) do b, n
# recursion case:
# take the first matcher, on success,
# keep looping by matching the rest
# by removing the first n matched elements
# from the term, with the bindings,
loop(drop_n(term, n), b, cdr(matchers′))
end
end
loop(car(data), bindings, matchers) # Try to eat exactly one term
end
end
function TermInterface.similarterm(
x::Expr,
head::Union{Function,DataType},
args,
symtype = nothing;
metadata = nothing,
exprhead = exprhead(x),
)
similarterm(x, nameof(head), args, symtype; metadata, exprhead)
end
function instantiate(left, pat::PatTerm, mem)
args = []
for parg in arguments(pat)
enqueue = parg isa PatSegment ? append! : push!
enqueue(args, instantiate(left, parg, mem))
end
reference = istree(left) ? left : Expr(:call, :_)
similarterm(reference, operation(pat), args; exprhead = exprhead(pat))
end
instantiate(left, pat::Any, mem) = pat
instantiate(left, pat::AbstractPat, mem) = error("Unsupported pattern ", pat)
function instantiate(left, pat::PatVar, mem)
mem[pat.idx]
end
function instantiate(left, pat::PatSegment, mem)
mem[pat.idx]
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 6090 | using Base: ImmutableDict
function binarize(e::T) where {T}
!istree(e) && return e
head = exprhead(e)
if head == :call
op = operation(e)
args = arguments(e)
meta = metadata(e)
if op ∈ binarize_ops && arity(e) > 2
return foldl((x, y) -> similarterm(e, op, [x, y], symtype(e); metadata = meta, exprhead = head), args)
end
end
return e
end
"""
Recursive version of binarize
"""
function binarize_rec(e::T) where {T}
!istree(e) && return e
head = exprhead(e)
op = operation(e)
args = map(binarize_rec, arguments(e))
meta = metadata(e)
if head == :call
if op ∈ binarize_ops && arity(e) > 2
return foldl((x, y) -> similarterm(e, op, [x, y], symtype(e); metadata = meta, exprhead = head), args)
end
end
return similarterm(e, op, args, symtype(e); metadata = meta, exprhead = head)
end
const binarize_ops = [:(+), :(*), (+), (*)]
function cleanast(e::Expr)
# TODO better line removal
if isexpr(e, :block)
return Expr(e.head, filter(x -> !(x isa LineNumberNode), e.args)...)
end
# Binarize
if isexpr(e, :call)
op = e.args[1]
if op ∈ binarize_ops && length(e.args) > 3
return foldl((x, y) -> Expr(:call, op, x, y), @view e.args[2:end])
end
end
return e
end
# Linked List interface
@inline assoc(d::ImmutableDict, k, v) = ImmutableDict(d, k => v)
struct LL{V}
v::V
i::Int
end
islist(x) = istree(x) || !isempty(x)
Base.empty(l::LL) = empty(l.v)
Base.isempty(l::LL) = l.i > length(l.v)
Base.length(l::LL) = length(l.v) - l.i + 1
@inline car(l::LL) = l.v[l.i]
@inline cdr(l::LL) = isempty(l) ? empty(l) : LL(l.v, l.i + 1)
# Base.length(t::Term) = length(arguments(t)) + 1 # PIRACY
# Base.isempty(t::Term) = false
# @inline car(t::Term) = operation(t)
# @inline cdr(t::Term) = arguments(t)
@inline car(v) = istree(v) ? operation(v) : first(v)
@inline function cdr(v)
if istree(v)
arguments(v)
else
islist(v) ? LL(v, 2) : error("asked cdr of empty")
end
end
@inline take_n(ll::LL, n) = isempty(ll) || n == 0 ? empty(ll) : @views ll.v[(ll.i):(n + ll.i - 1)] # @views handles Tuple
@inline take_n(ll, n) = @views ll[1:n]
@inline function drop_n(ll, n)
if n === 0
return ll
else
istree(ll) ? drop_n(arguments(ll), n - 1) : drop_n(cdr(ll), n - 1)
end
end
@inline drop_n(ll::Union{Tuple,AbstractArray}, n) = drop_n(LL(ll, 1), n)
@inline drop_n(ll::LL, n) = LL(ll.v, ll.i + n)
isliteral(::Type{T}) where {T} = x -> x isa T
is_literal_number(x) = isliteral(Number)(x)
# are there nested ⋆ terms?
function isnotflat(⋆)
function (x)
args = arguments(x)
for t in args
if istree(t) && operation(t) === (⋆)
return true
end
end
return false
end
end
function hasrepeats(x)
length(x) <= 1 && return false
for i in 1:(length(x) - 1)
if isequal(x[i], x[i + 1])
return true
end
end
return false
end
function merge_repeats(merge, xs)
length(xs) <= 1 && return false
merged = Any[]
i = 1
while i <= length(xs)
l = 1
for j in (i + 1):length(xs)
if isequal(xs[i], xs[j])
l += 1
else
break
end
end
if l > 1
push!(merged, merge(xs[i], l))
else
push!(merged, xs[i])
end
i += l
end
return merged
end
# Take a struct definition and make it be able to match in `@rule`
macro matchable(expr)
@assert expr.head == :struct
name = expr.args[2]
if name isa Expr
name.head === :(<:) && (name = name.args[1])
name isa Expr && name.head === :curly && (name = name.args[1])
end
fields = filter(x -> !(x isa LineNumberNode), expr.args[3].args)
get_name(s::Symbol) = s
get_name(e::Expr) = (@assert(e.head == :(::)); e.args[1])
fields = map(get_name, fields)
quote
$expr
TermInterface.istree(::$name) = true
TermInterface.operation(::$name) = $name
TermInterface.arguments(x::$name) = getfield.((x,), ($(QuoteNode.(fields)...),))
TermInterface.arity(x::$name) = $(length(fields))
Base.length(x::$name) = $(length(fields) + 1)
end |> esc
end
using TimerOutputs
const being_timed = Ref{Bool}(false)
macro timer(name, expr)
:(
if being_timed[]
@timeit $(esc(name)) $(esc(expr))
else
$(esc(expr))
end
)
end
macro iftimer(expr)
esc(expr)
end
function timerewrite(f)
reset_timer!()
being_timed[] = true
x = f()
being_timed[] = false
print_timer()
println()
x
end
"""
@timerewrite expr
If `expr` calls `simplify` or a `RuleSet` object, track the amount of time
it spent on applying each rule and pretty print the timing.
This uses [TimerOutputs.jl](https://github.com/KristofferC/TimerOutputs.jl).
## Example:
```julia
julia> expr = foldr(*, rand([a,b,c,d], 100))
(a ^ 26) * (b ^ 30) * (c ^ 16) * (d ^ 28)
julia> @timerewrite simplify(expr)
────────────────────────────────────────────────────────────────────────────────────────────────
Time Allocations
────────────────────── ───────────────────────
Tot / % measured: 340ms / 15.3% 92.2MiB / 10.8%
Section ncalls time %tot avg alloc %tot avg
────────────────────────────────────────────────────────────────────────────────────────────────
Rule((~y) ^ ~n * ~y => (~y) ^ (~n ... 667 11.1ms 21.3% 16.7μs 2.66MiB 26.8% 4.08KiB
RHS 92 277μs 0.53% 3.01μs 14.4KiB 0.14% 160B
Rule((~x) ^ ~n * (~x) ^ ~m => (~x)... 575 7.63ms 14.6% 13.3μs 1.83MiB 18.4% 3.26KiB
(*)(~(~(x::!issortedₑ))) => sort_arg... 831 6.31ms 12.1% 7.59μs 738KiB 7.26% 910B
RHS 164 3.03ms 5.81% 18.5μs 250KiB 2.46% 1.52KiB
...
...
────────────────────────────────────────────────────────────────────────────────────────────────
(a ^ 26) * (b ^ 30) * (c ^ 16) * (d ^ 28)
```
"""
macro timerewrite(expr)
:(timerewrite(() -> $(esc(expr))))
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 981 | module EGraphs
include("../docstrings.jl")
using DataStructures
using TermInterface
using TimerOutputs
using Metatheory: alwaystrue, cleanast, binarize
using Metatheory.Patterns
using Metatheory.Rules
using Metatheory.EMatchCompiler
include("intdisjointmap.jl")
export IntDisjointSet
export in_same_set
include("egraph.jl")
export AbstractENode
export ENodeLiteral
export ENodeTerm
export EClassId
export EClass
export hasdata
export getdata
export setdata!
export find
export lookup
export arity
export EGraph
export merge!
export in_same_class
export addexpr!
export rebuild!
export settermtype!
export gettermtype
include("analysis.jl")
export analyze!
export extract!
export astsize
export astsize_inv
export getcost!
export Sub
include("Schedulers.jl")
export Schedulers
using .Schedulers
include("saturation.jl")
export SaturationGoal
export EqualityGoal
export reached
export SaturationParams
export saturate!
export areequal
export @areequal
export @areequalg
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 6413 | module Schedulers
include("../docstrings.jl")
using Metatheory.Rules
using Metatheory.EGraphs
using Metatheory.Patterns
using DocStringExtensions
export AbstractScheduler
export SimpleScheduler
export BackoffScheduler
export ScoredScheduler
export cansaturate
export cansearch
export inform!
export setiter!
"""
Represents a rule scheduler for the equality saturation process
"""
abstract type AbstractScheduler end
"""
Should return `true` if the e-graph can be said to be saturated
```
cansaturate(s::AbstractScheduler)
```
"""
function cansaturate end
"""
Should return `false` if the rule `r` should be skipped
```
cansearch(s::AbstractScheduler, r::Rule)
```
"""
function cansearch end
"""
This function is called **after** pattern matching on the e-graph,
informs the scheduler about the yielded matches.
Returns `false` if the matches should not be yielded and ignored.
```
inform!(s::AbstractScheduler, r::AbstractRule, n_matches)
```
"""
function inform! end
function setiter! end
# ===========================================================================
# SimpleScheduler
# ===========================================================================
"""
A simple Rewrite Scheduler that applies every rule every time
"""
struct SimpleScheduler <: AbstractScheduler end
cansaturate(s::SimpleScheduler) = true
cansearch(s::SimpleScheduler, r::AbstractRule) = true
function SimpleScheduler(G::EGraph, theory::Vector{<:AbstractRule})
SimpleScheduler()
end
inform!(s::SimpleScheduler, r, n_matches) = true
setiter!(s::SimpleScheduler, iteration) = nothing
# ===========================================================================
# BackoffScheduler
# ===========================================================================
mutable struct BackoffSchedulerEntry
match_limit::Int
ban_length::Int
times_banned::Int
banned_until::Int
end
"""
A Rewrite Scheduler that implements exponential rule backoff.
For each rewrite, there exists a configurable initial match limit.
If a rewrite search yield more than this limit, then we ban this rule
for number of iterations, double its limit, and double the time it
will be banned next time.
This seems effective at preventing explosive rules like
associativity from taking an unfair amount of resources.
"""
mutable struct BackoffScheduler <: AbstractScheduler
data::IdDict{AbstractRule,BackoffSchedulerEntry}
G::EGraph
theory::Vector{<:AbstractRule}
curr_iter::Int
end
cansearch(s::BackoffScheduler, r::AbstractRule)::Bool = s.curr_iter > s.data[r].banned_until
function BackoffScheduler(g::EGraph, theory::Vector{<:AbstractRule})
# BackoffScheduler(g, theory, 128, 4)
BackoffScheduler(g, theory, 1000, 5)
end
function BackoffScheduler(G::EGraph, theory::Vector{<:AbstractRule}, match_limit::Int, ban_length::Int)
gsize = length(G.uf)
data = IdDict{AbstractRule,BackoffSchedulerEntry}()
for rule in theory
data[rule] = BackoffSchedulerEntry(match_limit, ban_length, 0, 0)
end
return BackoffScheduler(data, G, theory, 1)
end
# can saturate if there's no banned rule
cansaturate(s::BackoffScheduler)::Bool = all(kv -> s.curr_iter > last(kv).banned_until, s.data)
function inform!(s::BackoffScheduler, rule::AbstractRule, n_matches)
rd = s.data[rule]
treshold = rd.match_limit << rd.times_banned
if n_matches > treshold
ban_length = rd.ban_length << rd.times_banned
rd.times_banned += 1
rd.banned_until = s.curr_iter + ban_length
return false
end
return true
end
function setiter!(s::BackoffScheduler, curr_iter)
s.curr_iter = curr_iter
end
# ===========================================================================
# ScoredScheduler
# ===========================================================================
mutable struct ScoredSchedulerEntry
match_limit::Int
ban_length::Int
times_banned::Int
banned_until::Int
weight::Int
end
"""
A Rewrite Scheduler that implements exponential rule backoff.
For each rewrite, there exists a configurable initial match limit.
If a rewrite search yield more than this limit, then we ban this rule
for number of iterations, double its limit, and double the time it
will be banned next time.
This seems effective at preventing explosive rules like
associativity from taking an unfair amount of resources.
"""
mutable struct ScoredScheduler <: AbstractScheduler
data::IdDict{AbstractRule,ScoredSchedulerEntry}
G::EGraph
theory::Vector{<:AbstractRule}
curr_iter::Int
end
cansearch(s::ScoredScheduler, r::AbstractRule)::Bool = s.curr_iter > s.data[r].banned_until
exprsize(a) = 1
function exprsize(e::PatTerm)
c = 1 + length(e.args)
for a in e.args
c += exprsize(a)
end
return c
end
function exprsize(e::Expr)
start = Meta.isexpr(e, :call) ? 2 : 1
c = 1 + length(e.args[start:end])
for a in e.args[start:end]
c += exprsize(a)
end
return c
end
function ScoredScheduler(g::EGraph, theory::Vector{<:AbstractRule})
# BackoffScheduler(g, theory, 128, 4)
ScoredScheduler(g, theory, 1000, 5, exprsize)
end
function ScoredScheduler(
G::EGraph,
theory::Vector{<:AbstractRule},
match_limit::Int,
ban_length::Int,
complexity::Function,
)
gsize = length(G.uf)
data = IdDict{AbstractRule,ScoredSchedulerEntry}()
for rule in theory
if rule isa DynamicRule
w = 2
data[rule] = ScoredSchedulerEntry(match_limit, ban_length, 0, 0, w)
continue
end
(l, r) = rule.left, rule.right
cl = complexity(l)
cr = complexity(r)
if cl > cr
w = 1 # reduces complexity
elseif cr > cl
w = 3 # augments complexity
else
w = 2 # complexity is equal
end
data[rule] = ScoredSchedulerEntry(match_limit, ban_length, 0, 0, w)
end
return ScoredScheduler(data, G, theory, 1)
end
# can saturate if there's no banned rule
cansaturate(s::ScoredScheduler)::Bool = all(kv -> s.curr_iter > last(kv).banned_until, s.data)
function inform!(s::ScoredScheduler, rule::AbstractRule, n_matches)
rd = s.data[rule]
treshold = rd.match_limit * (rd.weight^rd.times_banned)
if n_matches > treshold
ban_length = rd.ban_length * (rd.weight^rd.times_banned)
rd.times_banned += 1
rd.banned_until = s.curr_iter + ban_length
# @info "banning rule $rule until $(rd.banned_until)!"
return false
end
return true
end
function setiter!(s::ScoredScheduler, curr_iter)
s.curr_iter = curr_iter
end
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 6557 | analysis_reference(x::Symbol) = Val(x)
analysis_reference(x::Function) = x
analysis_reference(x) = error("$x is not a valid analysis reference")
"""
islazy(::Val{analysis_name})
Should return `true` if the EGraph Analysis `an` is lazy
and false otherwise. A *lazy* EGraph Analysis is computed
only when [analyze!](@ref) is called. *Non-lazy*
analyses are instead computed on-the-fly every time ENodes are added to the EGraph or
EClasses are merged.
"""
islazy(::Val{analysis_name}) where {analysis_name} = false
islazy(analysis_name) = islazy(analysis_reference(analysis_name))
"""
modify!(::Val{analysis_name}, g, id)
The `modify!` function for EGraph Analysis can optionally modify the eclass
`g[id]` after it has been analyzed, typically by adding an ENode.
It should be **idempotent** if no other changes occur to the EClass.
(See the [egg paper](https://dl.acm.org/doi/pdf/10.1145/3434304)).
"""
modify!(::Val{analysis_name}, g, id) where {analysis_name} = nothing
modify!(an, g, id) = modify!(analysis_reference(an), g, id)
"""
join(::Val{analysis_name}, a, b)
Joins two analyses values into a single one, used by [analyze!](@ref)
when two eclasses are being merged or the analysis is being constructed.
"""
join(analysis::Val{analysis_name}, a, b) where {analysis_name} =
error("Analysis $analysis_name does not implement join")
join(an, a, b) = join(analysis_reference(an), a, b)
"""
make(::Val{analysis_name}, g, n)
Given an ENode `n`, `make` should return the corresponding analysis value.
"""
make(::Val{analysis_name}, g, n) where {analysis_name} = error("Analysis $analysis_name does not implement make")
make(an, g, n) = make(analysis_reference(an), g, n)
analyze!(g::EGraph, analysis_ref, id::EClassId) = analyze!(g, analysis_ref, reachable(g, id))
analyze!(g::EGraph, analysis_ref) = analyze!(g, analysis_ref, collect(keys(g.classes)))
"""
analyze!(egraph, analysis_name, [ECLASS_IDS])
Given an [EGraph](@ref) and an `analysis` identified by name `analysis_name`,
do an automated bottom up trasversal of the EGraph, associating a value from the
domain of analysis to each ENode in the egraph by the [make](@ref) function.
Then, for each [EClass](@ref), compute the [join](@ref) of the children ENodes analyses values.
After `analyze!` is called, an analysis value will be associated to each EClass in the EGraph.
One can inspect and retrieve analysis values by using [hasdata](@ref) and [getdata](@ref).
"""
function analyze!(g::EGraph, analysis_ref, ids::Vector{EClassId})
addanalysis!(g, analysis_ref)
ids = sort(ids)
# @assert isempty(g.dirty)
did_something = true
while did_something
did_something = false
for id in ids
eclass = g[id]
id = eclass.id
pass = mapreduce(x -> make(analysis_ref, g, x), (x, y) -> join(analysis_ref, x, y), eclass)
if !isequal(pass, getdata(eclass, analysis_ref, missing))
setdata!(eclass, analysis_ref, pass)
did_something = true
push!(g.dirty, id)
end
end
end
for id in ids
eclass = g[id]
id = eclass.id
if !hasdata(eclass, analysis_ref)
error("failed to compute analysis for eclass ", id)
end
end
return true
end
"""
A basic cost function, where the computed cost is the size
(number of children) of the current expression.
"""
function astsize(n::ENodeTerm, g::EGraph)
cost = 1 + arity(n)
for id in arguments(n)
eclass = g[id]
!hasdata(eclass, astsize) && (cost += Inf; break)
cost += last(getdata(eclass, astsize))
end
return cost
end
astsize(n::ENodeLiteral, g::EGraph) = 1
"""
A basic cost function, where the computed cost is the size
(number of children) of the current expression, times -1.
Strives to get the largest expression
"""
function astsize_inv(n::ENodeTerm, g::EGraph)
cost = -(1 + arity(n)) # minus sign here is the only difference vs astsize
for id in arguments(n)
eclass = g[id]
!hasdata(eclass, astsize_inv) && (cost += Inf; break)
cost += last(getdata(eclass, astsize_inv))
end
return cost
end
astsize_inv(n::ENodeLiteral, g::EGraph) = -1
"""
When passing a function to analysis functions it is considered as a cost function
"""
make(f::Function, g::EGraph, n::AbstractENode) = (n, f(n, g))
join(f::Function, from, to) = last(from) <= last(to) ? from : to
islazy(::Function) = true
modify!(::Function, g, id) = nothing
function rec_extract(g::EGraph, costfun, id::EClassId; cse_env = nothing)
eclass = g[id]
if !isnothing(cse_env) && haskey(cse_env, id)
(sym, _) = cse_env[id]
return sym
end
(n, ck) = getdata(eclass, costfun, (nothing, Inf))
ck == Inf && error("Infinite cost when extracting enode")
if n isa ENodeLiteral
return n.value
elseif n isa ENodeTerm
children = map(arg -> rec_extract(g, costfun, arg; cse_env = cse_env), n.args)
meta = getdata(eclass, :metadata_analysis, nothing)
T = symtype(n)
egraph_reconstruct_expression(T, operation(n), collect(children); metadata = meta, exprhead = exprhead(n))
else
error("Unknown ENode Type $(typeof(n))")
end
end
"""
Given a cost function, extract the expression
with the smallest computed cost from an [`EGraph`](@ref)
"""
function extract!(g::EGraph, costfun::Function; root = -1, cse = false)
if root == -1
root = g.root
end
analyze!(g, costfun, root)
if cse
# TODO make sure there is no assignments/stateful code!!
cse_env = OrderedDict{EClassId,Tuple{Symbol,Any}}() #
collect_cse!(g, costfun, root, cse_env, Set{EClassId}())
body = rec_extract(g, costfun, root; cse_env = cse_env)
assignments = [Expr(:(=), name, val) for (id, (name, val)) in cse_env]
# return body
Expr(:let, Expr(:block, assignments...), body)
else
return rec_extract(g, costfun, root)
end
end
# Builds a dict e-class id => (symbol, extracted term) of common subexpressions in an e-graph
function collect_cse!(g::EGraph, costfun, id, cse_env, seen)
eclass = g[id]
(cn, ck) = getdata(eclass, costfun, (nothing, Inf))
ck == Inf && error("Error when computing CSE")
if cn isa ENodeTerm
if id in seen
cse_env[id] = (gensym(), rec_extract(g, costfun, id))#, cse_env=cse_env)) # todo generalize symbol?
return
end
for child_id in arguments(cn)
collect_cse!(g, costfun, child_id, cse_env, seen)
end
push!(seen, id)
end
end
function getcost!(g::EGraph, costfun; root = -1)
if root == -1
root = g.root
end
analyze!(g, costfun, root)
bestnode, cost = getdata(g[root], costfun)
return cost
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 14704 | # Functional implementation of https://egraphs-good.github.io/
# https://dl.acm.org/doi/10.1145/3434304
abstract type AbstractENode end
import Metatheory: maybelock!
const AnalysisData = NamedTuple{N,T} where {N,T<:Tuple}
const EClassId = Int64
const TermTypes = Dict{Tuple{Any,Int},Type}
# TODO document bindings
const Bindings = Base.ImmutableDict{Int,Tuple{Int,Int}}
const DEFAULT_BUFFER_SIZE = 1048576
struct ENodeLiteral <: AbstractENode
value
hash::Ref{UInt}
ENodeLiteral(a) = new(a, Ref{UInt}(0))
end
Base.:(==)(a::ENodeLiteral, b::ENodeLiteral) = hash(a) == hash(b)
TermInterface.istree(n::ENodeLiteral) = false
TermInterface.exprhead(n::ENodeLiteral) = nothing
TermInterface.operation(n::ENodeLiteral) = n.value
TermInterface.arity(n::ENodeLiteral) = 0
function Base.hash(t::ENodeLiteral, salt::UInt)
!iszero(salt) && return hash(hash(t, zero(UInt)), salt)
h = t.hash[]
!iszero(h) && return h
h′ = hash(t.value, salt)
t.hash[] = h′
return h′
end
mutable struct ENodeTerm <: AbstractENode
exprhead::Union{Symbol,Nothing}
operation::Any
symtype::Type
args::Vector{EClassId}
hash::Ref{UInt} # hash cache
ENodeTerm(exprhead, operation, symtype, c_ids) = new(exprhead, operation, symtype, c_ids, Ref{UInt}(0))
end
function Base.:(==)(a::ENodeTerm, b::ENodeTerm)
hash(a) == hash(b) && a.operation == b.operation
end
TermInterface.istree(n::ENodeTerm) = true
TermInterface.symtype(n::ENodeTerm) = n.symtype
TermInterface.exprhead(n::ENodeTerm) = n.exprhead
TermInterface.operation(n::ENodeTerm) = n.operation
TermInterface.arguments(n::ENodeTerm) = n.args
TermInterface.arity(n::ENodeTerm) = length(n.args)
# This optimization comes from SymbolicUtils
# The hash of an enode is cached to avoid recomputing it.
# Shaves off a lot of time in accessing dictionaries with ENodes as keys.
function Base.hash(t::ENodeTerm, salt::UInt)
!iszero(salt) && return hash(hash(t, zero(UInt)), salt)
h = t.hash[]
!iszero(h) && return h
h′ = hash(t.args, hash(t.exprhead, hash(t.operation, salt)))
t.hash[] = h′
return h′
end
# parametrize metadata by M
mutable struct EClass
g # EGraph
id::EClassId
nodes::Vector{AbstractENode}
parents::Vector{Pair{AbstractENode,EClassId}}
data::AnalysisData
end
function toexpr(n::ENodeTerm)
Expr(:call, :ENode, exprhead(n), operation(n), symtype(n), arguments(n))
end
function Base.show(io::IO, x::ENodeTerm)
print(io, toexpr(x))
end
toexpr(n::ENodeLiteral) = operation(n)
Base.show(io::IO, x::ENodeLiteral) = print(io, toexpr(x))
EClass(g, id) = EClass(g, id, AbstractENode[], Pair{AbstractENode,EClassId}[], nothing)
EClass(g, id, nodes, parents) = EClass(g, id, nodes, parents, NamedTuple())
# Interface for indexing EClass
Base.getindex(a::EClass, i) = a.nodes[i]
Base.setindex!(a::EClass, v, i) = setindex!(a.nodes, v, i)
Base.firstindex(a::EClass) = firstindex(a.nodes)
Base.lastindex(a::EClass) = lastindex(a.nodes)
Base.length(a::EClass) = length(a.nodes)
# Interface for iterating EClass
Base.iterate(a::EClass) = iterate(a.nodes)
Base.iterate(a::EClass, state) = iterate(a.nodes, state)
# Showing
function Base.show(io::IO, a::EClass)
print(io, "EClass $(a.id) (")
print(io, "[", Base.join(a.nodes, ", "), "], ")
print(io, a.data)
print(io, ")")
end
function addparent!(a::EClass, n::AbstractENode, id::EClassId)
push!(a.parents, (n => id))
end
function Base.union!(to::EClass, from::EClass)
# TODO revisit
append!(to.nodes, from.nodes)
append!(to.parents, from.parents)
if !isnothing(to.data) && !isnothing(from.data)
to.data = join_analysis_data!(to.g, something(to.data), something(from.data))
elseif to.data === nothing
to.data = from.data
end
return to
end
function join_analysis_data!(g, dst::AnalysisData, src::AnalysisData)
new_dst = merge(dst, src)
for analysis_name in keys(src)
analysis_ref = g.analyses[analysis_name]
if hasproperty(dst, analysis_name)
ref = getproperty(new_dst, analysis_name)
ref[] = join(analysis_ref, ref[], getproperty(src, analysis_name)[])
end
end
new_dst
end
# Thanks to Shashi Gowda
hasdata(a::EClass, analysis_name::Symbol) = hasproperty(a.data, analysis_name)
hasdata(a::EClass, f::Function) = hasproperty(a.data, nameof(f))
getdata(a::EClass, analysis_name::Symbol) = getproperty(a.data, analysis_name)[]
getdata(a::EClass, f::Function) = getproperty(a.data, nameof(f))[]
getdata(a::EClass, analysis_ref::Union{Symbol,Function}, default) =
hasdata(a, analysis_ref) ? getdata(a, analysis_ref) : default
setdata!(a::EClass, f::Function, value) = setdata!(a, nameof(f), value)
function setdata!(a::EClass, analysis_name::Symbol, value)
if hasdata(a, analysis_name)
ref = getproperty(a.data, analysis_name)
ref[] = value
else
a.data = merge(a.data, NamedTuple{(analysis_name,)}((Ref{Any}(value),)))
end
end
function funs(a::EClass)
map(operation, a.nodes)
end
function funs_arity(a::EClass)
map(a.nodes) do x
(operation(x), arity(x))
end
end
"""
A concrete type representing an [`EGraph`].
See the [egg paper](https://dl.acm.org/doi/pdf/10.1145/3434304)
for implementation details.
"""
mutable struct EGraph
"stores the equality relations over e-class ids"
uf::IntDisjointSet
"map from eclass id to eclasses"
classes::Dict{EClassId,EClass}
"hashcons"
memo::Dict{AbstractENode,EClassId} # memo
"worklist for ammortized upwards merging"
dirty::Vector{EClassId}
root::EClassId
"A vector of analyses associated to the EGraph"
analyses::Dict{Union{Symbol,Function},Union{Symbol,Function}}
"a cache mapping function symbols to e-classes that contain e-nodes with that function symbol."
symcache::Dict{Any,Vector{EClassId}}
default_termtype::Type
termtypes::TermTypes
numclasses::Int
numnodes::Int
"If we use global buffers we may need to lock. Defaults to true."
needslock::Bool
"Buffer for e-matching which defaults to a global. Use a local buffer for generated functions."
buffer::Vector{Bindings}
"Buffer for rule application which defaults to a global. Use a local buffer for generated functions."
merges_buffer::Vector{Tuple{Int,Int}}
lock::ReentrantLock
end
"""
EGraph(expr)
Construct an EGraph from a starting symbolic expression `expr`.
"""
function EGraph(; needslock::Bool = false, buffer_size = DEFAULT_BUFFER_SIZE)
EGraph(
IntDisjointSet(),
Dict{EClassId,EClass}(),
Dict{AbstractENode,EClassId}(),
EClassId[],
-1,
Dict{Union{Symbol,Function},Union{Symbol,Function}}(),
Dict{Any,Vector{EClassId}}(),
Expr,
TermTypes(),
0,
0,
needslock,
Bindings[],
Tuple{Int,Int}[],
ReentrantLock(),
)
end
function maybelock!(f::Function, g::EGraph)
g.needslock ? lock(f, g.buffer_lock) : f()
end
function EGraph(e; keepmeta = false, kwargs...)
g = EGraph(kwargs...)
keepmeta && addanalysis!(g, :metadata_analysis)
g.root = addexpr!(g, e; keepmeta = keepmeta)
g
end
function addanalysis!(g::EGraph, costfun::Function)
g.analyses[nameof(costfun)] = costfun
g.analyses[costfun] = costfun
end
function addanalysis!(g::EGraph, analysis_name::Symbol)
g.analyses[analysis_name] = analysis_name
end
function settermtype!(g::EGraph, f, ar, T)
g.termtypes[(f, ar)] = T
end
function settermtype!(g::EGraph, T)
g.default_termtype = T
end
function gettermtype(g::EGraph, f, ar)
if haskey(g.termtypes, (f, ar))
g.termtypes[(f, ar)]
else
g.default_termtype
end
end
"""
Returns the canonical e-class id for a given e-class.
"""
find(g::EGraph, a::EClassId)::EClassId = find_root(g.uf, a)
find(g::EGraph, a::EClass)::EClassId = find(g, a.id)
Base.getindex(g::EGraph, i::EClassId) = g.classes[find(g, i)]
### Definition 2.3: canonicalization
iscanonical(g::EGraph, n::ENodeTerm) = n == canonicalize(g, n)
iscanonical(g::EGraph, n::ENodeLiteral) = true
iscanonical(g::EGraph, e::EClass) = find(g, e.id) == e.id
canonicalize(g::EGraph, n::ENodeLiteral) = n
function canonicalize(g::EGraph, n::ENodeTerm)
if arity(n) > 0
new_args = map(x -> find(g, x), n.args)
return ENodeTerm(exprhead(n), operation(n), symtype(n), new_args)
end
return n
end
function canonicalize!(g::EGraph, n::ENodeTerm)
for (i, arg) in enumerate(n.args)
n.args[i] = find(g, arg)
end
n.hash[] = UInt(0)
return n
end
canonicalize!(g::EGraph, n::ENodeLiteral) = n
function canonicalize!(g::EGraph, e::EClass)
e.id = find(g, e.id)
end
function lookup(g::EGraph, n::AbstractENode)::EClassId
cc = canonicalize(g, n)
haskey(g.memo, cc) ? find(g, g.memo[cc]) : -1
end
"""
Inserts an e-node in an [`EGraph`](@ref)
"""
function add!(g::EGraph, n::AbstractENode)::EClassId
n = canonicalize(g, n)
haskey(g.memo, n) && return g.memo[n]
id = push!(g.uf) # create new singleton eclass
if n isa ENodeTerm
for c_id in arguments(n)
addparent!(g.classes[c_id], n, id)
end
end
g.memo[n] = id
if haskey(g.symcache, operation(n))
push!(g.symcache[operation(n)], id)
else
g.symcache[operation(n)] = [id]
end
classdata = EClass(g, id, AbstractENode[n], Pair{AbstractENode,EClassId}[])
g.classes[id] = classdata
g.numclasses += 1
for an in values(g.analyses)
if !islazy(an) && an !== :metadata_analysis
setdata!(classdata, an, make(an, g, n))
modify!(an, g, id)
end
end
return id
end
"""
Extend this function on your types to do preliminary
preprocessing of a symbolic term before adding it to
an EGraph. Most common preprocessing techniques are binarization
of n-ary terms and metadata stripping.
"""
function preprocess(e::Expr)
cleanast(e)
end
preprocess(x) = x
"""
Recursively traverse an type satisfying the `TermInterface` and insert terms into an
[`EGraph`](@ref). If `e` has no children (has an arity of 0) then directly
insert the literal into the [`EGraph`](@ref).
"""
function addexpr!(g::EGraph, se; keepmeta = false)::EClassId
e = preprocess(se)
id = add!(g, if istree(se)
class_ids::Vector{EClassId} = [addexpr!(g, arg; keepmeta = keepmeta) for arg in arguments(e)]
ENodeTerm(exprhead(e), operation(e), symtype(e), class_ids)
else
# constant enode
ENodeLiteral(e)
end)
if keepmeta
meta = TermInterface.metadata(e)
!isnothing(meta) && setdata!(g.classes[id], :metadata_analysis, meta)
end
return id
end
function addexpr!(g::EGraph, ec::EClass; keepmeta = false)
@assert g == ec.g
find(g, ec.id)
end
"""
Given an [`EGraph`](@ref) and two e-class ids, set
the two e-classes as equal.
"""
function Base.merge!(g::EGraph, a::EClassId, b::EClassId)::EClassId
id_a = find(g, a)
id_b = find(g, b)
id_a == id_b && return id_a
to = union!(g.uf, id_a, id_b)
from = (to == id_a) ? id_b : id_a
push!(g.dirty, to)
from_class = g.classes[from]
to_class = g.classes[to]
to_class.id = to
# I (was) the troublesome line!
g.classes[to] = union!(to_class, from_class)
delete!(g.classes, from)
g.numclasses -= 1
return to
end
function in_same_class(g::EGraph, a, b)
find(g, a) == find(g, b)
end
# TODO new rebuilding from egg
"""
This function restores invariants and executes
upwards merging in an [`EGraph`](@ref). See
the [egg paper](https://dl.acm.org/doi/pdf/10.1145/3434304)
for more details.
"""
function rebuild!(g::EGraph)
# normalize!(g.uf)
while !isempty(g.dirty)
# todo = unique([find(egraph, id) for id ∈ egraph.dirty])
todo = unique(g.dirty)
empty!(g.dirty)
for x in todo
repair!(g, x)
end
end
if g.root != -1
g.root = find(g, g.root)
end
normalize!(g.uf)
end
function repair!(g::EGraph, id::EClassId)
id = find(g, id)
ecdata = g[id]
ecdata.id = id
new_parents = (length(ecdata.parents) > 30 ? OrderedDict : LittleDict){AbstractENode,EClassId}()
for (p_enode, p_eclass) in ecdata.parents
p_enode = canonicalize!(g, p_enode)
# deduplicate parents
if haskey(new_parents, p_enode)
merge!(g, p_eclass, new_parents[p_enode])
end
n_id = find(g, p_eclass)
g.memo[p_enode] = n_id
new_parents[p_enode] = n_id
end
ecdata.parents = collect(new_parents)
# ecdata.nodes = map(n -> canonicalize(g.uf, n), ecdata.nodes)
# Analysis invariant maintenance
for an in values(g.analyses)
hasdata(ecdata, an) && modify!(an, g, id)
for (p_enode, p_id) in ecdata.parents
# p_eclass = find(g, p_eclass)
p_eclass = g[p_id]
if !islazy(an) && !hasdata(p_eclass, an)
setdata!(p_eclass, an, make(an, g, p_enode))
end
if hasdata(p_eclass, an)
p_data = getdata(p_eclass, an)
if an !== :metadata_analysis
new_data = join(an, p_data, make(an, g, p_enode))
if new_data != p_data
setdata!(p_eclass, an, new_data)
push!(g.dirty, p_id)
end
end
end
end
end
unique!(ecdata.nodes)
# ecdata.nodes = map(n -> canonicalize(g.uf, n), ecdata.nodes)
end
"""
Recursive function that traverses an [`EGraph`](@ref) and
returns a vector of all reachable e-classes from a given e-class id.
"""
function reachable(g::EGraph, id::EClassId)
id = find(g, id)
hist = EClassId[id]
todo = EClassId[id]
function reachable_node(xn::ENodeTerm)
x = canonicalize(g, xn)
for c_id in arguments(x)
if c_id ∉ hist
push!(hist, c_id)
push!(todo, c_id)
end
end
end
function reachable_node(x::ENodeLiteral) end
while !isempty(todo)
curr = find(g, pop!(todo))
for n in g.classes[curr]
reachable_node(n)
end
end
return hist
end
"""
When extracting symbolic expressions from an e-graph, we need
to instruct the e-graph how to rebuild expressions of a certain type.
This function must be extended by the user to add new types of expressions that can be manipulated by e-graphs.
"""
function egraph_reconstruct_expression(T::Type{Expr}, op, args; metadata = nothing, exprhead = :call)
similarterm(Expr(:call, :_), op, args; metadata = metadata, exprhead = exprhead)
end
# Thanks to Max Willsey and Yihong Zhang
import Metatheory: lookup_pat
function lookup_pat(g::EGraph, p::PatTerm)::EClassId
@assert isground(p)
eh = exprhead(p)
op = operation(p)
args = arguments(p)
ar = arity(p)
T = gettermtype(g, op, ar)
ids = map(x -> lookup_pat(g, x), args)
!all((>)(0), ids) && return -1
if T == Expr && op isa Union{Function,DataType}
id = lookup(g, ENodeTerm(eh, op, T, ids))
id < 0 && return lookup(g, ENodeTerm(eh, nameof(op), T, ids))
return id
else
return lookup(g, ENodeTerm(eh, op, T, ids))
end
end
lookup_pat(g::EGraph, p::Any) = lookup(g, ENodeLiteral(p))
lookup_pat(g::EGraph, p::AbstractPat) = throw(UnsupportedPatternException(p))
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1577 | struct IntDisjointSet
parents::Vector{Int}
normalized::Ref{Bool}
end
IntDisjointSet() = IntDisjointSet(Int[], Ref(true))
Base.length(x::IntDisjointSet) = length(x.parents)
function Base.push!(x::IntDisjointSet)::Int
push!(x.parents, -1)
length(x)
end
function find_root(x::IntDisjointSet, i::Int)::Int
while x.parents[i] >= 0
i = x.parents[i]
end
return i
end
function in_same_set(x::IntDisjointSet, a::Int, b::Int)
find_root(x, a) == find_root(x, b)
end
function Base.union!(x::IntDisjointSet, i::Int, j::Int)
pi = find_root(x, i)
pj = find_root(x, j)
if pi != pj
x.normalized[] = false
isize = -x.parents[pi]
jsize = -x.parents[pj]
if isize > jsize # swap to make size of i less than j
pi, pj = pj, pi
isize, jsize = jsize, isize
end
x.parents[pj] -= isize # increase new size of pj
x.parents[pi] = pj # set parent of pi to pj
end
return pj
end
function normalize!(x::IntDisjointSet)
for i in 1:length(x)
p_i = find_root(x, i)
if p_i != i
x.parents[i] = p_i
end
end
x.normalized[] = true
end
# If normalized we don't even need a loop here.
function _find_root_normal(x::IntDisjointSet, i::Int)
p_i = x.parents[i]
if p_i < 0 # Is `i` a root?
return i
else
return p_i
end
# return pi
end
function _in_same_set_normal(x::IntDisjointSet, a::Int64, b::Int64)
_find_root_normal(x, a) == _find_root_normal(x, b)
end
function find_root_if_normal(x::IntDisjointSet, i::Int64)
if x.normalized[]
_find_root_normal(x, i)
else
find_root(x, i)
end
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 9322 | abstract type SaturationGoal end
reached(g::EGraph, goal::Nothing) = false
reached(g::EGraph, goal::SaturationGoal) = false
"""
This goal is reached when the `exprs` list of expressions are in the
same equivalence class.
"""
struct EqualityGoal <: SaturationGoal
exprs::Vector{Any}
ids::Vector{EClassId}
function EqualityGoal(exprs, eclasses)
@assert length(exprs) == length(eclasses) && length(exprs) != 0
new(exprs, eclasses)
end
end
function reached(g::EGraph, goal::EqualityGoal)
all(x -> in_same_class(g, goal.ids[1], x), @view goal.ids[2:end])
end
"""
Boolean valued function as an arbitrary saturation goal.
User supplied function must take an [`EGraph`](@ref) as the only parameter.
"""
struct FunctionGoal <: SaturationGoal
fun::Function
end
function reached(g::EGraph, goal::FunctionGoal)::Bool
goal.fun(g)
end
mutable struct SaturationReport
reason::Union{Symbol,Nothing}
egraph::EGraph
iterations::Int
to::TimerOutput
end
SaturationReport() = SaturationReport(nothing, EGraph(), 0, TimerOutput())
SaturationReport(g::EGraph) = SaturationReport(nothing, g, 0, TimerOutput())
# string representation of timedata
function Base.show(io::IO, x::SaturationReport)
g = x.egraph
println(io, "SaturationReport")
println(io, "=================")
println(io, "\tStop Reason: $(x.reason)")
println(io, "\tIterations: $(x.iterations)")
println(io, "\tEGraph Size: $(g.numclasses) eclasses, $(length(g.memo)) nodes")
print_timer(io, x.to)
end
"""
Configurable Parameters for the equality saturation process.
"""
Base.@kwdef mutable struct SaturationParams
timeout::Int = 8
"Timeout in nanoseconds"
timelimit::UInt64 = 0
"Maximum number of eclasses allowed"
eclasslimit::Int = 5000
enodelimit::Int = 15000
goal::Union{Nothing,SaturationGoal} = nothing
stopwhen::Function = () -> false
scheduler::Type{<:AbstractScheduler} = BackoffScheduler
schedulerparams::Tuple = ()
threaded::Bool = false
timer::Bool = true
end
# function cached_ids(g::EGraph, p::PatTerm)# ::Vector{Int64}
# if isground(p)
# id = lookup_pat(g, p)
# !isnothing(id) && return [id]
# else
# return keys(g.classes)
# end
# return []
# end
function cached_ids(g::EGraph, p::AbstractPattern) # p is a literal
@warn "Pattern matching against the whole e-graph"
return keys(g.classes)
end
function cached_ids(g::EGraph, p) # p is a literal
id = lookup(g, ENodeLiteral(p))
id > 0 && return [id]
return []
end
# function cached_ids(g::EGraph, p::PatTerm)
# arr = get(g.symcache, operation(p), EClassId[])
# if operation(p) isa Union{Function,DataType}
# append!(arr, get(g.symcache, nameof(operation(p)), EClassId[]))
# end
# arr
# end
function cached_ids(g::EGraph, p::PatTerm)
keys(g.classes)
end
"""
Returns an iterator of `Match`es.
"""
function eqsat_search!(
g::EGraph,
theory::Vector{<:AbstractRule},
scheduler::AbstractScheduler,
report::SaturationReport,
)::Int
n_matches = 0
maybelock!(g) do
empty!(g.buffer)
end
@debug "SEARCHING"
for (rule_idx, rule) in enumerate(theory)
@timeit report.to string(rule_idx) begin
# don't apply banned rules
if !cansearch(scheduler, rule)
@debug "$rule is banned"
continue
end
ids = cached_ids(g, rule.left)
rule isa BidirRule && (ids = ids ∪ cached_ids(g, rule.right))
for i in ids
n_matches += rule.ematcher!(g, rule_idx, i)
end
n_matches > 0 && @debug "Rule $rule_idx: $rule produced $n_matches matches"
inform!(scheduler, rule, n_matches)
end
end
return n_matches
end
function drop_n!(D::CircularDeque, nn)
D.n -= nn
tmp = D.first + nn
D.first = tmp > D.capacity ? 1 : tmp
end
instantiate_enode!(bindings::Bindings, g::EGraph, p::Any)::EClassId = add!(g, ENodeLiteral(p))
instantiate_enode!(bindings::Bindings, g::EGraph, p::PatVar)::EClassId = bindings[p.idx][1]
function instantiate_enode!(bindings::Bindings, g::EGraph, p::PatTerm)::EClassId
eh = exprhead(p)
op = operation(p)
ar = arity(p)
args = arguments(p)
T = gettermtype(g, op, ar)
# TODO add predicate check `quotes_operation`
new_op = T == Expr && op isa Union{Function,DataType} ? nameof(op) : op
add!(g, ENodeTerm(eh, new_op, T, map(arg -> instantiate_enode!(bindings, g, arg), args)))
end
function apply_rule!(buf, g::EGraph, rule::RewriteRule, id, direction)
push!(g.merges_buffer, (id, instantiate_enode!(buf, g, rule.right)))
nothing
end
function apply_rule!(bindings::Bindings, g::EGraph, rule::EqualityRule, id::EClassId, direction::Int)
pat_to_inst = direction == 1 ? rule.right : rule.left
push!(g.merges_buffer, (id, instantiate_enode!(bindings, g, pat_to_inst)))
nothing
end
function apply_rule!(bindings::Bindings, g::EGraph, rule::UnequalRule, id::EClassId, direction::Int)
pat_to_inst = direction == 1 ? rule.right : rule.left
other_id = instantiate_enode!(bindings, g, pat_to_inst)
if find(g, id) == find(g, other_id)
@debug "$rule produced a contradiction!"
return :contradiction
end
nothing
end
"""
Instantiate argument for dynamic rule application in e-graph
"""
function instantiate_actual_param!(bindings::Bindings, g::EGraph, i)
ecid, literal_position = bindings[i]
ecid <= 0 && error("unbound pattern variable")
eclass = g[ecid]
if literal_position > 0
@assert eclass[literal_position] isa ENodeLiteral
return eclass[literal_position].value
end
return eclass
end
function apply_rule!(bindings::Bindings, g::EGraph, rule::DynamicRule, id::EClassId, direction::Int)
f = rule.rhs_fun
r = f(id, g, (instantiate_actual_param!(bindings, g, i) for i in 1:length(rule.patvars))...)
isnothing(r) && return nothing
rcid = addexpr!(g, r)
push!(g.merges_buffer, (id, rcid))
return nothing
end
function eqsat_apply!(g::EGraph, theory::Vector{<:AbstractRule}, rep::SaturationReport, params::SaturationParams)
i = 0
@assert isempty(g.merges_buffer)
@debug "APPLYING $(length(g.buffer)) matches"
maybelock!(g) do
while !isempty(g.buffer)
if reached(g, params.goal)
@debug "Goal reached"
rep.reason = :goalreached
return
end
bindings = pop!(g.buffer)
rule_idx, id = bindings[0]
direction = sign(rule_idx)
rule_idx = abs(rule_idx)
rule = theory[rule_idx]
halt_reason = apply_rule!(bindings, g, rule, id, direction)
if !isnothing(halt_reason)
rep.reason = halt_reason
return
end
end
end
maybelock!(g) do
while !isempty(g.merges_buffer)
(l, r) = pop!(g.merges_buffer)
merge!(g, l, r)
end
end
end
"""
Core algorithm of the library: the equality saturation step.
"""
function eqsat_step!(
g::EGraph,
theory::Vector{<:AbstractRule},
curr_iter,
scheduler::AbstractScheduler,
params::SaturationParams,
report,
)
setiter!(scheduler, curr_iter)
@timeit report.to "Search" eqsat_search!(g, theory, scheduler, report)
@timeit report.to "Apply" eqsat_apply!(g, theory, report, params)
if report.reason === nothing && cansaturate(scheduler) && isempty(g.dirty)
report.reason = :saturated
end
@timeit report.to "Rebuild" rebuild!(g)
@debug smallest_expr = extract!(g, astsize)
return report
end
"""
Given an [`EGraph`](@ref) and a collection of rewrite rules,
execute the equality saturation algorithm.
"""
function saturate!(g::EGraph, theory::Vector{<:AbstractRule}, params = SaturationParams())
curr_iter = 0
sched = params.scheduler(g, theory, params.schedulerparams...)
report = SaturationReport(g)
start_time = time_ns()
!params.timer && disable_timer!(report.to)
timelimit = params.timelimit > 0
while true
curr_iter += 1
@debug "================ EQSAT ITERATION $curr_iter ================"
report = eqsat_step!(g, theory, curr_iter, sched, params, report)
elapsed = time_ns() - start_time
if timelimit && params.timelimit <= elapsed
report.reason = :timelimit
break
end
if !(report.reason isa Nothing)
break
end
if curr_iter >= params.timeout
report.reason = :timeout
break
end
if params.eclasslimit > 0 && g.numclasses > params.eclasslimit
report.reason = :eclasslimit
break
end
if reached(g, params.goal)
report.reason = :goalreached
break
end
end
report.iterations = curr_iter
return report
end
function areequal(theory::Vector, exprs...; params = SaturationParams())
g = EGraph(exprs[1])
areequal(g, theory, exprs...; params = params)
end
function areequal(g::EGraph, t::Vector{<:AbstractRule}, exprs...; params = SaturationParams())
if length(exprs) == 1
return true
end
n = length(exprs)
ids = map(Base.Fix1(addexpr!, g), collect(exprs))
goal = EqualityGoal(collect(exprs), ids)
params.goal = goal
report = saturate!(g, t, params)
if !(report.reason === :saturated) && !reached(g, goal)
return missing # failed to prove
end
return reached(g, goal)
end
macro areequal(theory, exprs...)
esc(:(areequal($theory, $exprs...)))
end
macro areequalg(G, theory, exprs...)
esc(:(areequal($G, $theory, $exprs...)))
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 2750 | using GraphViz
using Metatheory
using TermInterface
function render_egraph!(io::IO, g::EGraph)
print(
io,
"""digraph {
compound=true
clusterrank=local
remincross=false
ranksep=0.9
""",
)
for (_, eclass) in g.classes
render_eclass!(io, g, eclass)
end
println(io, "\n}\n")
end
function render_eclass!(io::IO, g::EGraph, eclass::EClass)
print(
io,
""" subgraph cluster_$(eclass.id) {
style="dotted,rounded";
rank=same;
label="#$(eclass.id). Smallest: $(extract!(g, astsize; root=eclass.id))"
fontcolor = gray
fontsize = 8
""",
)
# if g.root == find(g, eclass.id)
# println(io, " penwidth=2")
# end
for (i, node) in enumerate(eclass.nodes)
render_enode_node!(io, g, eclass.id, i, node)
end
print(io, "\n }\n")
for (i, node) in enumerate(eclass.nodes)
render_enode_edges!(io, g, eclass.id, i, node)
end
println(io)
end
function render_enode_node!(io::IO, g::EGraph, eclass_id, i::Int, node::AbstractENode)
label = operation(node)
# (mr, style) = if node in diff && get(report.cause, node, missing) !== missing
# pair = get(report.cause, node, missing)
# split(split("$(pair[1].rule) ", "=>")[1], "-->")[1], " color=\"red\""
# else
# " ", ""
# end
# sg *= " $id.$os [label=<$label<br /><font point-size=\"8\" color=\"gray\">$mr</font>> $style];"
println(io, " $eclass_id.$i [label=<$label> shape=box style=rounded]")
end
render_enode_edges!(::IO, ::EGraph, eclass_id, i, ::ENodeLiteral) = nothing
function render_enode_edges!(io::IO, g::EGraph, eclass_id, i, node::ENodeTerm)
len = length(arguments(node))
for (ite, child) in enumerate(arguments(node))
cluster_id = find(g, child)
# The limitation of graphviz is that it cannot point to the eclass outer frame,
# so when pointing to the same e-class, the next best thing is to point to the same e-node.
target_id = "$cluster_id" * (cluster_id == eclass_id ? ".$i" : ".1")
# In order from left to right, if there are more than 3 children, label the order.
dir = if len == 2
ite == 1 ? ":sw" : ":se"
elseif len == 3
ite == 1 ? ":sw" : (ite == 2 ? ":s" : ":se")
else
""
end
linelabel = len > 3 ? " label=$ite" : " "
println(io, " $eclass_id.$i$dir -> $target_id [arrowsize=0.5 lhead=cluster_$cluster_id $linelabel]")
end
end
function Base.convert(::Type{GraphViz.Graph}, g::EGraph)::GraphViz.Graph
io = IOBuffer()
render_egraph!(io, g)
gs = String(take!(io))
g = GraphViz.Graph(gs)
GraphViz.layout!(g; engine = "dot")
g
end
function Base.show(io::IO, mime::MIME"image/svg+xml", g::EGraph)
show(io, mime, convert(GraphViz.Graph, g))
end | Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 676 | using SafeTestsets
using Documenter
using Metatheory
using Test
doctest(Metatheory)
function test(file::String)
@info file
@eval @time @safetestset $file begin
include(joinpath(@__DIR__, $file))
end
end
allscripts(dir) = [joinpath(@__DIR__, dir, x) for x in readdir(dir) if endswith(x, ".jl")]
const TEST_FILES = [
allscripts("classic")
allscripts("egraphs")
allscripts("integration")
allscripts("tutorials")
]
@timev map(test, TEST_FILES)
# exported consistency test
for m in [Metatheory, Metatheory.EGraphs, Metatheory.EGraphs.Schedulers]
for i in propertynames(m)
!hasproperty(m, i) && error("Module $m exports undefined symbol $i")
end
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 3331 | using Metatheory
using Metatheory.EGraphs
using TermInterface
using Test
# TODO update
function EGraphs.make(::Val{:sign_analysis}, g::EGraph, n::ENodeLiteral)
if n.value isa Real
if n.value == Inf
Inf
elseif n.value == -Inf
-Inf
elseif n.value isa Real # in Julia NaN is a Real
sign(n.value)
else
nothing
end
elseif n.value isa Symbol
s = n.value
s == :x && return 1
s == :y && return -1
s == :z && return 0
s == :k && return Inf
return nothing
end
end
function EGraphs.make(::Val{:sign_analysis}, g::EGraph, n::ENodeTerm)
# Let's consider only binary function call terms.
if exprhead(n) == :call && arity(n) == 2
# get the symbol name of the operation
op = operation(n)
op = op isa Function ? nameof(op) : op
# Get the left and right child eclasses
child_eclasses = arguments(n)
l = g[child_eclasses[1]]
r = g[child_eclasses[2]]
# Get the corresponding SignAnalysis value of the children
# defaulting to nothing
lsign = getdata(l, :sign_analysis, nothing)
rsign = getdata(r, :sign_analysis, nothing)
(lsign == nothing || rsign == nothing) && return nothing
if op == :*
return lsign * rsign
elseif op == :/
return lsign / rsign
elseif op == :+
s = lsign + rsign
iszero(s) && return nothing
(isinf(s) || isnan(s)) && return s
return sign(s)
elseif op == :-
s = lsign - rsign
iszero(s) && return nothing
(isinf(s) || isnan(s)) && return s
return sign(s)
end
end
return nothing
end
function EGraphs.join(::Val{:sign_analysis}, a, b)
return a == b ? a : nothing
end
# we are cautious, so we return false by default
isnotzero(x::EClass) = getdata(x, :sign_analysis, false)
# t = @theory a b c begin
# a * (b * c) == (a * b) * c
# a + (b + c) == (a + b) + c
# a * b == b * a
# a + b == b + a
# a * (b + c) == (a * b) + (a * c)
# a::isnotzero / a::isnotzero --> 1
# end
function custom_analysis(expr)
g = EGraph(expr)
# saturate!(g, t)
analyze!(g, :sign_analysis)
return getdata(g[g.root], :sign_analysis)
end
custom_analysis(:(3 * x)) # :odd
custom_analysis(:(3 * (2 + a) * 2)) # :even
custom_analysis(:(-3y * (2x * y))) # :even
custom_analysis(:(k / k)) # :even
#===========================================================================================#
# pattern variables can be specified before the block of rules
comm_monoid = @theory a b c begin
a * b == b * a # commutativity
a * 1 --> a # identity
a * (b * c) == (a * b) * c # associativity
end;
# theories are just vectors of rules
comm_group = [
@rule a b (a + b == b + a) # commutativity
# pattern variables can also be written with the prefix ~ notation
@rule ~a + 0 --> ~a # identity
@rule a b c (a + (b + c) == (a + b) + c) # associativity
@rule a (a + (-a) => 0) # inverse
];
# dynamic rules are defined with the `=>` operator
folder = @theory a b begin
a::Real + b::Real => a + b
a::Real * b::Real => a * b
a::Real / b::Real => a / b
end;
div_sim = @theory a b c begin
(a * b) / c == a * (b / c)
a::isnotzero / a::isnotzero --> 1
end;
t = vcat(comm_monoid, comm_group, folder, div_sim);
g = EGraph(:(a * (2 * 3) / 6));
saturate!(g, t)
@test :a == extract!(g, astsize)
# :a
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 4683 | using Metatheory
@testset "Reduction Basics" begin
t = @theory begin
~a + ~a --> 2 * (~a)
~x / ~x --> 1
~x * 1 --> ~x
end
# basic theory to check that everything works
@test rewrite(:(a + a), t) == :(2a)
@test rewrite(:(a + (x * 1)), t) == :(a + x)
@test rewrite(:(a + (a * 1)), t; order = :inner) == :(2a)
end
## Free Monoid
@testset "Free Monoid - Overriding identity" begin
# support symbol literals
function ⋅ end
symbol_monoid = @theory begin
~a ⋅ :ε --> ~a
:ε ⋅ ~a --> ~a
~a::Symbol --> ~a
~a::Symbol ⋅ ~b::Symbol => Symbol(String(a) * String(b))
# i |> error("unsupported ", i)
end
@test rewrite(:(ε ⋅ a ⋅ ε ⋅ b ⋅ c ⋅ (ε ⋅ ε ⋅ d) ⋅ e), symbol_monoid; order = :inner) == :abcde
end
## Interpolation should be possible at runtime
@testset "Calculator" begin
function ⊗ end
function ⊕ end
function ⊖ end
calculator = @theory begin
~x::Number ⊕ ~y::Number => ~x + ~y
~x::Number ⊗ ~y::Number => ~x * ~y
~x::Number ⊖ ~y::Number => ~x ÷ ~y
~x::Symbol --> ~x
~x::Number --> ~x
end
a = 10
@test rewrite(:(3 ⊕ 1 ⊕ $a), calculator; order = :inner) == 14
end
## Direct rules
@testset "Direct Rules" begin
t = @theory begin
# maps
~a * ~b => ((~a isa Number && ~b isa Number) ? ~a * ~b : _lhs_expr)
end
@test rewrite(:(3 * 1), t) == 3
t = @theory begin
# maps
~a::Number * ~b::Number => ~a * ~b
end
@test rewrite(:(3 * 1), t) == 3
end
## Take advantage of subtyping.
# Subtyping in Julia has been formalized in this paper
# [Julia Subtyping: A Rational Reconstruction](https://benchung.github.io/papers/jlsub.pdf)
abstract type Vehicle end
abstract type GroundVehicle <: Vehicle end
abstract type AirVehicle <: Vehicle end
struct Airplane <: AirVehicle end
struct Car <: GroundVehicle end
airpl = Airplane()
car = Car()
t = @theory begin
~a::AirVehicle * ~b => "flies"
~a::GroundVehicle * ~b => "doesnt_fly"
end
@testset "Subtyping" begin
sf = rewrite(:($airpl * c), t)
df = rewrite(:($car * c), t)
@test sf == "flies"
@test df == "doesnt_fly"
end
@testset "Interpolation" begin
airpl = Airplane()
car = Car()
t = @theory begin
airpl * ~b => "flies"
car * ~b => "doesnt_fly"
end
sf = rewrite(:($airpl * c), t)
df = rewrite(:($car * c), t)
@test sf == "flies"
@test df == "doesnt_fly"
end
@testset "Segment Variables" begin
t = @theory begin
f(~x, ~~y) => Expr(:call, :ok, (~~y)...)
end
sf = rewrite(:(f(1, 2, 3, 4)), t)
@test sf == :(ok(2, 3, 4))
t = @theory x y begin
f(x, y...) => Expr(:call, :ok, y...)
end
sf = rewrite(:(f(1, 2, 3, 4)), t)
@test sf == :(ok(2, 3, 4))
t = @theory x y begin
f(x, y...) --> ok(y...)
end
sf = rewrite(:(f(1, 2, 3, 4)), t)
@test sf == :(ok(2, 3, 4))
end
module NonCall
using Metatheory
t = [@rule a b (a, b) --> ok(a, b)]
test() = rewrite(:(x, y), t)
end
@testset "Non-Call expressions" begin
@test NonCall.test() == :(ok(x, y))
end
@testset "Pattern matcher can match on both function object references and name symbols" begin
ex = :($(+)($(sin)(x)^2, $(cos)(x)^2))
r = @rule(sin(~x)^2 + cos(~x)^2 --> 1)
@test r(ex) == 1
end
@testset "Pattern variable as pattern term head" begin
foo(x) = x + 2
ex = :(($foo)(bar, 2, pazz))
r = @rule ((~f)(~x, 2, ~y) => (~f)(2))
@test r(ex) == 4
end
using TermInterface
using Metatheory.Syntax: @capture
@testset "Capture form" begin
ex = :(a^a)
#note that @test inserts a soft local scope (try-catch) that would gobble
#the matches from assignment statements in @capture macro, so we call it
#outside the test macro
ret = @capture ex (~x)^(~x)
@test ret
@test @isdefined x
@test x === :a
ex = :(b^a)
ret = @capture ex (~y)^(~y)
@test !ret
@test !(@isdefined y)
ret = @capture :(a + b) (+)(~~z)
@test ret
@test @isdefined z
@test all(z .=== arguments(:(a + b)))
#a more typical way to use the @capture macro
f(x) =
if @capture x (~w)^(~w)
w
end
@test f(:(b^b)) == :b
@test isnothing(f(:(b + b)))
x = 1
r = (@capture x x)
@test r == true
end
using TermInterface
@testset "Matchable struct" begin
struct qux
args
qux(args...) = new(args)
end
TermInterface.operation(::qux) = qux
TermInterface.istree(::qux) = true
TermInterface.arguments(x::qux) = [x.args...]
@capture qux(1, 2) qux(1, 2)
@test (@rule qux(1, 2) => "hello")(qux(1, 2)) == "hello"
@test (@rule qux(1, 2) => "hello")(1) === nothing
@test (@rule 1 => "hello")(1) == "hello"
@test (@rule 1 => "hello")(qux(1, 2)) === nothing
@test (@capture qux(1, 2) qux(1, 2))
@test false == (@capture qux(1, 2) qux(3, 4))
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 8139 | # example assuming * operation is always binary
# ENV["JULIA_DEBUG"] = Metatheory
using Metatheory
using Metatheory.Library
using TermInterface
EGraphs.make(::Val{:numberfold}, g::EGraph, n::ENodeLiteral) = n.value
# This should be auto-generated by a macro
function EGraphs.make(::Val{:numberfold}, g::EGraph, n::ENodeTerm)
if exprhead(n) == :call && arity(n) == 2
op = operation(n)
args = arguments(n)
l = g[args[1]]
r = g[args[2]]
ldata = getdata(l, :numberfold, nothing)
rdata = getdata(r, :numberfold, nothing)
if ldata isa Number && rdata isa Number
if op == :*
return ldata * rdata
elseif op == :+
return ldata + rdata
end
end
end
return nothing
end
function EGraphs.join(an::Val{:numberfold}, from, to)
if from isa Number
if to isa Number
@assert from == to
else
return from
end
end
return to
end
function EGraphs.modify!(::Val{:numberfold}, g::EGraph, id::Int64)
eclass = g.classes[id]
d = getdata(eclass, :numberfold, nothing)
if d isa Number
merge!(g, addexpr!(g, d), id)
end
end
EGraphs.islazy(::Val{:numberfold}) = false
comm_monoid = @theory begin
~a * ~b --> ~b * ~a
~a * 1 --> ~a
~a * (~b * ~c) --> (~a * ~b) * ~c
end
G = EGraph(:(3 * 4))
analyze!(G, :numberfold)
# exit(0)
@testset "Basic Constant Folding Example - Commutative Monoid" begin
@test (true == @areequalg G comm_monoid 3 * 4 12)
@test (true == @areequalg G comm_monoid 3 * 4 12 4 * 3 6 * 2)
end
@testset "Basic Constant Folding Example 2 - Commutative Monoid" begin
ex = :(a * 3 * b * 4)
G = EGraph(ex)
analyze!(G, :numberfold)
addexpr!(G, :(12 * a))
@test (true == @areequalg G comm_monoid (12 * a) * b ((6 * 2) * b) * a)
@test (true == @areequalg G comm_monoid (3 * a) * (4 * b) (12 * a) * b ((6 * 2) * b) * a)
end
@testset "Basic Constant Folding Example - Adding analysis after saturation" begin
G = EGraph(:(3 * 4))
# addexpr!(G, 12)
saturate!(G, comm_monoid)
addexpr!(G, :(a * 2))
analyze!(G, :numberfold)
saturate!(G, comm_monoid)
@test (true == areequal(G, comm_monoid, :(3 * 4), 12, :(4 * 3), :(6 * 2)))
ex = :(a * 3 * b * 4)
G = EGraph(ex)
analyze!(G, :numberfold)
params = SaturationParams(timeout = 15)
@test areequal(G, comm_monoid, :((3 * a) * (4 * b)), :((12 * a) * b), :(((6 * 2) * b) * a); params = params)
end
@testset "Infinite Loops analysis" begin
boson = @theory begin
1 * ~x --> ~x
end
G = EGraph(:(1 * x))
params = SaturationParams(timeout = 100)
saturate!(G, boson, params)
ex = extract!(G, astsize)
boson = @theory begin
(:c * :cdag) --> :cdag * :c + 1
~a * (~b + ~c) --> (~a * ~b) + (~a * ~c)
(~b + ~c) * ~a --> (~b * ~a) + (~c * ~a)
# 1 * x => x
(~a * ~b) * ~c --> ~a * (~b * ~c)
~a * (~b * ~c) --> (~a * ~b) * ~c
end
g = EGraph(:(c * c * cdag * cdag))
saturate!(g, boson)
ex = extract!(g, astsize_inv)
end
@testset "Extraction" begin
comm_monoid = @commutative_monoid (*) 1
fold_mul = @theory begin
~a::Number * ~b::Number => ~a * ~b
end
t = comm_monoid ∪ fold_mul
@testset "Extraction 1 - Commutative Monoid" begin
G = EGraph(:(3 * 4))
saturate!(G, t)
@test (12 == extract!(G, astsize))
ex = :(a * 3 * b * 4)
G = EGraph(ex)
params = SaturationParams(timeout = 15)
saturate!(G, t, params)
extr = extract!(G, astsize)
@test extr == :((12 * a) * b) ||
extr == :(12 * (a * b)) ||
extr == :(a * (b * 12)) ||
extr == :((a * b) * 12) ||
extr == :((12a) * b) ||
extr == :(a * (12b)) ||
extr == :((b * (12a))) ||
extr == :((b * 12) * a) ||
extr == :((b * a) * 12) ||
extr == :(b * (a * 12)) ||
extr == :((12b) * a)
end
fold_add = @theory begin
~a::Number + ~b::Number => ~a + ~b
end
@testset "Extraction 2" begin
comm_group = @commutative_group (+) 0 inv
t = comm_monoid ∪ comm_group ∪ (@distrib (*) (+)) ∪ fold_mul ∪ fold_add
# for i ∈ 1:20
# sleep(0.3)
ex = :((x * (a + b)) + (y * (a + b)))
G = EGraph(ex)
saturate!(G, t)
# end
extract!(G, astsize) == :((y + x) * (b + a))
end
@testset "Extraction - Adding analysis after saturation" begin
G = EGraph(:(3 * 4))
addexpr!(G, 12)
saturate!(G, t)
addexpr!(G, :(a * 2))
saturate!(G, t)
saturate!(G, t)
@test (12 == extract!(G, astsize))
# for i ∈ 1:100
ex = :(a * 3 * b * 4)
G = EGraph(ex)
analyze!(G, :numberfold)
params = SaturationParams(timeout = 15)
saturate!(G, comm_monoid, params)
extr = extract!(G, astsize)
@test extr == :((12 * a) * b) ||
extr == :(12 * (a * b)) ||
extr == :(a * (b * 12)) ||
extr == :((a * b) * 12) ||
extr == :((12a) * b) ||
extr == :(a * (12b)) ||
extr == :((b * (12a))) ||
extr == :((b * 12) * a) ||
extr == :((b * a) * 12) ||
extr == :(b * (a * 12))
end
comm_monoid = @commutative_monoid (*) 1
comm_group = @commutative_group (+) 0 inv
powers = @theory begin
~a * ~a → (~a)^2
~a → (~a)^1
(~a)^~n * (~a)^~m → (~a)^(~n + ~m)
end
logids = @theory begin
log((~a)^~n) --> ~n * log(~a)
log(~x * ~y) --> log(~x) + log(~y)
log(1) --> 0
log(:e) --> 1
:e^(log(~x)) --> ~x
end
G = EGraph(:(log(e)))
params = SaturationParams(timeout = 9)
saturate!(G, logids, params)
@test extract!(G, astsize) == 1
t = comm_monoid ∪ comm_group ∪ (@distrib (*) (+)) ∪ powers ∪ logids ∪ fold_mul ∪ fold_add
@testset "Complex Extraction" begin
G = EGraph(:(log(e) * log(e)))
params = SaturationParams(timeout = 9)
saturate!(G, t, params)
@test extract!(G, astsize) == 1
G = EGraph(:(log(e) * (log(e) * e^(log(3)))))
params = SaturationParams(timeout = 7)
saturate!(G, t, params)
@test extract!(G, astsize) == 3
G = EGraph(:(a^3 * a^2))
saturate!(G, t)
ex = extract!(G, astsize)
@test ex == :(a^5)
G = EGraph(:(a^3 * a^2))
saturate!(G, t)
ex = extract!(G, astsize)
@test ex == :(a^5)
function cust_astsize(n::ENodeTerm, g::EGraph)
cost = 1 + arity(n)
if operation(n) == :^
cost += 2
end
for id in arguments(n)
eclass = g[id]
!hasdata(eclass, cust_astsize) && (cost += Inf; break)
cost += last(getdata(eclass, cust_astsize))
end
return cost
end
cust_astsize(n::ENodeLiteral, g::EGraph) = 1
G = EGraph(:((log(e) * log(e)) * (log(a^3 * a^2))))
saturate!(G, t)
ex = extract!(G, cust_astsize)
@test ex == :(5 * log(a)) || ex == :(log(a) * 5)
end
function costfun(n::ENodeTerm, g::EGraph)
arity(n) != 2 && (return 1)
left = arguments(n)[1]
left_class = g[left]
ENodeLiteral(:a) ∈ left_class.nodes ? 1 : 100
end
costfun(n::ENodeLiteral, g::EGraph) = 1
moveright = @theory begin
(:b * (:a * ~c)) --> (:a * (:b * ~c))
end
expr = :(a * (a * (b * (a * b))))
res = rewrite(expr, moveright)
g = EGraph(expr)
saturate!(g, moveright)
resg = extract!(g, costfun)
@testset "Symbols in Right hand" begin
@test resg == res == :(a * (a * (a * (b * b))))
end
function ⋅ end
co = @theory begin
sum(~x ⋅ :bazoo ⋅ :woo) --> sum(:n * ~x)
end
@testset "Consistency with classical backend" begin
ex = :(sum(wa(rio) ⋅ bazoo ⋅ woo))
g = EGraph(ex)
saturate!(g, co)
res = extract!(g, astsize)
resclassic = rewrite(ex, co)
@test res == resclassic
end
@testset "No arguments" begin
ex = :(f())
g = EGraph(ex)
@test :(f()) == extract!(g, astsize)
ex = :(sin() + cos())
t = @theory begin
sin() + cos() --> tan()
end
gg = EGraph(ex)
saturate!(gg, t)
res = extract!(gg, astsize)
@test res == :(tan())
end
@testset "Symbol or function object operators in expressions in EGraphs" begin
ex = :(($+)(x, y))
t = [@rule a b a + b => 2]
g = EGraph(ex)
saturate!(g, t)
@test extract!(g, astsize) == 2
end
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1778 |
# ENV["JULIA_DEBUG"] = Metatheory
using Metatheory
using Metatheory.EGraphs
using Metatheory.EGraphs: in_same_set, find_root
@testset "Merging" begin
testexpr = :((a * 2) / 2)
testmatch = :(a << 1)
G = EGraph(testexpr)
t2 = addexpr!(G, testmatch)
merge!(G, t2, EClassId(3))
@test in_same_set(G.uf, t2, EClassId(3)) == true
# DOES NOT UPWARD MERGE
end
# testexpr = :(42a + b * (foo($(Dict(:x => 2)), 42)))
@testset "Simple congruence - rebuilding" begin
G = EGraph()
ec1 = addexpr!(G, :(f(a, b)))
ec2 = addexpr!(G, :(f(a, c)))
testexpr = :(f(a, b) + f(a, c))
testec = addexpr!(G, testexpr)
t1 = addexpr!(G, :b)
t2 = addexpr!(G, :c)
c_id = merge!(G, t2, t1)
@test in_same_set(G.uf, c_id, t1)
@test in_same_set(G.uf, t2, t1)
rebuild!(G)
@test in_same_set(G.uf, ec1, ec2)
end
@testset "Simple nested congruence" begin
apply(n, f, x) = n == 0 ? x : apply(n - 1, f, f(x))
f(x) = Expr(:call, :f, x)
G = EGraph(:a)
t1 = addexpr!(G, apply(6, f, :a))
t2 = addexpr!(G, apply(9, f, :a))
c_id = merge!(G, t1, EClassId(1)) # a == apply(6,f,a)
c2_id = merge!(G, t2, EClassId(1)) # a == apply(9,f,a)
rebuild!(G)
t3 = addexpr!(G, apply(3, f, :a))
t4 = addexpr!(G, apply(7, f, :a))
# f^m(a) = a = f^n(a) ⟹ f^(gcd(m,n))(a) = a
@test in_same_set(G.uf, t1, EClassId(1)) == true
@test in_same_set(G.uf, t2, EClassId(1)) == true
@test in_same_set(G.uf, t3, EClassId(1)) == true
@test in_same_set(G.uf, t4, EClassId(1)) == false
# if m or n is prime, f(a) = a
t5 = addexpr!(G, apply(11, f, :a))
t6 = addexpr!(G, apply(1, f, :a))
c5_id = merge!(G, t5, EClassId(1)) # a == apply(11,f,a)
rebuild!(G)
@test in_same_set(G.uf, t5, EClassId(1)) == true
@test in_same_set(G.uf, t6, EClassId(1)) == true
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 4543 | using Metatheory
using Test
using Metatheory.Library
falseormissing(x) = x === missing || !x
r = @theory begin
max(~x, ~y) → 2 * ~x % ~y
max(~x, ~y) → sin(~x)
sin(~x) → max(~x, ~x)
end
@testset "Basic Equalities 1" begin
@test (@areequal r max(b, c) max(d, d)) == false
end
r = @theory begin
~a * 1 → :foo
~a * 2 → :bar
1 * ~a → :baz
2 * ~a → :mag
end
@testset "Matching Literals" begin
g = EGraph(:(a * 1))
addexpr!(g, :foo)
saturate!(g, r)
@test (@areequal r a * 1 foo) == true
@test (@areequal r a * 2 foo) == false
@test (@areequal r a * 1 bar) == false
@test (@areequal r a * 2 bar) == true
@test (@areequal r 1 * a baz) == true
@test (@areequal r 2 * a baz) == false
@test (@areequal r 1 * a mag) == false
@test (@areequal r 2 * a mag) == true
end
comm_monoid = @commutative_monoid (*) 1
@testset "Basic Equalities - Commutative Monoid" begin
@test true == (@areequal comm_monoid a * (c * (1 * d)) c * (1 * (d * a)))
@test true == (@areequal comm_monoid x * y y * x)
@test true == (@areequal comm_monoid (x * x) * (x * 1) x * (x * x))
end
comm_group = @commutative_group (+) 0 inv
t = comm_monoid ∪ comm_group ∪ (@distrib (*) (+))
@testset "Basic Equalities - Comm. Monoid, Abelian Group, Distributivity" begin
@test true == (@areequal t (a * b) + (a * c) a * (b + c))
@test true == (@areequal t a * (c * (1 * d)) c * (1 * (d * a)))
@test true == (@areequal t a + (b * (c * d)) ((d * c) * b) + a)
@test true == (@areequal t (x + y) * (a + b) ((a * (x + y)) + b * (x + y)) ((x * (a + b)) + y * (a + b)))
@test true == (@areequal t (((x * a + x * b) + y * a) + y * b) (x + y) * (a + b))
@test true == (@areequal t a + (b * (c * d)) ((d * c) * b) + a)
@test true == (@areequal t a + inv(a) 0 (x * y) + inv(x * y) 1 * 0)
end
@testset "Basic Equalities - False statements" begin
@test falseormissing(@areequal t (a * b) + (a * c) a * (b + a))
@test falseormissing(@areequal t (a * c) + (a * c) a * (b + c))
@test falseormissing(@areequal t a * (c * c) c * (1 * (d * a)))
@test falseormissing(@areequal t c + (b * (c * d)) ((d * c) * b) + a)
@test falseormissing(@areequal t (x + y) * (a + c) ((a * (x + y)) + b * (x + y)))
@test falseormissing(@areequal t ((x * (a + b)) + y * (a + b)) (x + y) * (a + c))
@test falseormissing(@areequal t (((x * a + x * b) + y * a) + y * b) (x + y) * (a + x))
@test falseormissing(@areequal t a + (b * (c * a)) ((d * c) * b) + a)
@test falseormissing(@areequal t a + inv(a) a)
@test falseormissing(@areequal t (x * y) + inv(x * y) 1)
end
# Issue 21
simp_theory = @theory begin
sin() => :foo
end
g = EGraph(:(sin()))
saturate!(g, simp_theory)
@test extract!(g, astsize) == :foo
module Bar
foo = 42
export foo
using Metatheory
t = @theory begin
:woo => foo
end
export t
end
module Foo
foo = 12
using Metatheory
t = @theory begin
:woo => foo
end
export t
end
g = EGraph(:woo);
saturate!(g, Bar.t);
saturate!(g, Foo.t);
foo = 12
@testset "Different modules" begin
@test @areequalg g t 42 12
end
comm_monoid = @theory begin
~a * ~b --> ~b * ~a
~a * 1 --> ~a
~a * (~b * ~c) --> (~a * ~b) * ~c
~a::Number * ~b::Number => ~a * ~b
end
G = EGraph(:(3 * 4))
@testset "Basic Constant Folding Example - Commutative Monoid" begin
@test (true == @areequalg G comm_monoid 3 * 4 12)
@test (true == @areequalg G comm_monoid 3 * 4 12 4 * 3 6 * 2)
end
@testset "Basic Constant Folding Example 2 - Commutative Monoid" begin
ex = :(a * 3 * b * 4)
G = EGraph(ex)
@test (true == @areequalg G comm_monoid (3 * a) * (4 * b) (12 * a) * b ((6 * 2) * b) * a)
end
@testset "Type Assertions in Ematcher" begin
some_theory = @theory begin
~a * ~b --> ~b * ~a
~a::Number * ~b::Number --> sin(~a, ~b)
~a::Int64 * ~b::Int64 --> cos(~a, ~b)
~a * (~b * ~c) --> (~a * ~b) * ~c
end
g = EGraph(:(2 * 3))
saturate!(g, some_theory)
@test true == areequal(g, some_theory, :(2 * 3), :(sin(2, 3)))
@test true == areequal(g, some_theory, :(sin(2, 3)), :(cos(3, 2)))
end
Base.iszero(ec::EClass) = ENodeLiteral(0) ∈ ec
@testset "Predicates in Ematcher" begin
some_theory = @theory begin
~a::iszero * ~b --> 0
~a * ~b --> ~b * ~a
end
g = EGraph(:(2 * 3))
saturate!(g, some_theory)
@test true == areequal(g, some_theory, :(a * b * 0), 0)
end
@testset "Inequalities" begin
failme = @theory p begin
p ≠ !p
:foo == !:foo
:foo --> :bazoo
:bazoo --> :wazoo
end
g = EGraph(:foo)
report = saturate!(g, failme)
@test report.reason === :contradiction
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1638 | using Test
using Metatheory
using Metatheory.Library
using Metatheory.EGraphs
using Metatheory.Rules
using Metatheory.EGraphs.Schedulers
function rep(x, op, n::Int)
foldl((x, y) -> :(($op)($x, $y)), repeat([x], n))
end
macro rep(x, op, n::Int)
expr = rep(x, op, n)
esc(expr)
end
rep(:a, :*, 3)
@rule (@rep :a (*) 3) => :b
Mid = @theory a begin
a * :ε --> a
:ε * a --> a
end
Massoc = @theory a b c begin
a * (b * c) --> (a * b) * c
(a * b) * c --> a * (b * c)
end
T = [
@rule :b * :B --> :ε
@rule :a * :a --> :ε
@rule :b * :b * :b --> :ε
@rule :B * :B --> :B
@rule (@rep (:a * :b) (*) 7) --> :ε
@rule (@rep (:a * :b * :a * :B) (*) 7) --> :ε
]
G = Mid ∪ Massoc ∪ T
another_expr = :(b * B)
g = EGraph(another_expr)
saturate!(g, G)
ex = extract!(g, astsize)
@test ex == :ε
another_expr = :(a * a * a * a)
g = EGraph(another_expr)
some_eclass = addexpr!(g, another_expr)
saturate!(g, G)
ex = extract!(g, astsize; root = some_eclass)
@test ex == :ε
another_expr = :(((((((a * b) * (a * b)) * (a * b)) * (a * b)) * (a * b)) * (a * b)) * (a * b))
g = EGraph(another_expr)
some_eclass = addexpr!(g, another_expr)
saturate!(g, G)
ex = extract!(g, astsize; root = some_eclass)
@test ex == :ε
expr = :(a * b * a * a * a * b * b * b * a * B * B * B * B * a)
g = EGraph(expr)
params = SaturationParams(timeout = 9, scheduler = BackoffScheduler)# , schedulerparams=(128,4))#, scheduler=SimpleScheduler)
# params = SaturationParams(timeout = 9, scheduler = SimpleScheduler)# , schedulerparams=(128,4))#, scheduler=SimpleScheduler)
report = saturate!(g, G, params)
ex = extract!(g, astsize)
@test_broken ex == :ε
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 3363 | using Metatheory
using Metatheory.EGraphs
using Metatheory.Library
using TermInterface
using Test
abstract type LambdaExpr end
@matchable struct IfThenElse <: LambdaExpr
guard
then
otherwise
end
@matchable struct Variable <: LambdaExpr
x::Symbol
end
@matchable struct Fix <: LambdaExpr
variable
expression
end
@matchable struct Let <: LambdaExpr
variable
value
body
end
@matchable struct λ <: LambdaExpr
x::Symbol
body
end
@matchable struct Apply <: LambdaExpr
lambda
value
end
@matchable struct Add <: LambdaExpr
x
y
end
TermInterface.exprhead(::LambdaExpr) = :call
function EGraphs.egraph_reconstruct_expression(::Type{<:LambdaExpr}, op, args; metadata = nothing, exprhead = :call)
op(args...)
end
#%%
EGraphs.make(::Val{:freevar}, ::EGraph, n::ENodeLiteral) = Set{Int64}()
function EGraphs.make(::Val{:freevar}, g::EGraph, n::ENodeTerm)
free = Set{Int64}()
if exprhead(n) == :call
op = operation(n)
args = arguments(n)
if op == Variable
push!(free, args[1])
elseif op == Let
v, a, b = args[1:3]
adata = getdata(g[a], :freevar, Set{Int64}())
bdata = getdata(g[a], :freevar, Set{Int64}())
union!(free, adata)
delete!(free, v)
union!(free, bdata)
elseif op == λ
v, b = args[1:2]
bdata = getdata(g[b], :freevar, Set{Int64}())
union!(free, bdata)
delete!(free, v)
end
end
return free
end
EGraphs.join(::Val{:freevar}, from, to) = union(from, to)
islazy(::Val{:freevar}) = false
open_term = @theory x e then alt a b c begin
# if-true
IfThenElse(true, then, alt) --> then
IfThenElse(false, then, alt) --> alt
# if-elim
IfThenElse(Variable(x) == e, then, alt) =>
if addexpr!(_egraph, Let(x, e, then)) == addexpr!(_egraph, Let(x, e, alt))
alt
else
_lhs_expr
end
Add(a, b) == Add(b, a)
Add(a, Add(b, c)) == Add(Add(a, b), c)
# (a == b) == (b == a)
end
subst_intro = @theory v body e begin
Fix(v, e) --> Let(v, Fix(v, e), e)
# beta reduction
Apply(λ(v, body), e) --> Let(v, e, body)
end
subst_prop = @theory v e a b then alt guard begin
# let-Apply
Let(v, e, Apply(a, b)) --> Apply(Let(v, e, a), Let(v, e, b))
# let-add
Let(v, e, a + b) --> Let(v, e, a) + Let(v, e, b)
# let-eq
# Let(v, e, a == b) --> Let(v, e, a) == Let(v, e, b)
# let-IfThenElse (let-if)
Let(v, e, IfThenElse(guard, then, alt)) --> IfThenElse(Let(v, e, guard), Let(v, e, then), Let(v, e, alt))
end
subst_elim = @theory v e c v1 v2 body begin
# let-const
Let(v, e, c::Any) --> c
# let-Variable-same
Let(v1, e, Variable(v1)) --> e
# TODO fancy let-Variable-diff
Let(v1, e, Variable(v2)) => if addexpr!(_egraph, v1) != addexpr!(_egraph, v2)
:(Variable($v2))
else
_lhs_expr
end
# let-lam-same
Let(v1, e, λ(v1, body)) --> λ(v1, body)
# let-lam-diff #TODO captureavoid
Let(v1, e, λ(v2, body)) => if v2.id ∈ getdata(e, :freevar, Set()) # is free
:(λ($fresh, Let($v1, $e, Let($v2, Variable($fresh), $body))))
else
:(λ($v2, Let($v1, $e, $body)))
end
end
λT = open_term ∪ subst_intro ∪ subst_prop ∪ subst_elim
ex = λ(:x, Add(4, Apply(λ(:y, Variable(:y)), 4)))
g = EGraph(ex)
settermtype!(g, LambdaExpr)
saturate!(g, λT)
@test λ(:x, Add(4, 4)) == extract!(g, astsize) # expected: :(λ(x, 4 + 4))
#%%
@test @areequal λT 2 Apply(λ(x, Variable(x)), 2) | Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 4956 | using Test
using Metatheory
using TermInterface
function prove(t, ex, steps = 1, timeout = 10, eclasslimit = 5000)
params = SaturationParams(
timeout = timeout,
eclasslimit = eclasslimit,
# scheduler=Schedulers.ScoredScheduler, schedulerparams=(1000,5, Schedulers.exprsize))
scheduler = Schedulers.BackoffScheduler,
schedulerparams = (6000, 5),
)
hist = UInt64[]
push!(hist, hash(ex))
for i in 1:steps
g = EGraph(ex)
exprs = [true, g[g.root]]
ids = [addexpr!(g, e) for e in exprs]
goal = EqualityGoal(exprs, ids)
params.goal = goal
saturate!(g, t, params)
ex = extract!(g, astsize)
if !TermInterface.istree(ex)
return ex
end
if hash(ex) ∈ hist
return ex
end
push!(hist, hash(ex))
end
return ex
end
function ⟹ end
fold = @theory p q begin
(p::Bool == q::Bool) => (p == q)
(p::Bool || q::Bool) => (p || q)
(p::Bool ⟹ q::Bool) => ((p || q) == q)
(p::Bool && q::Bool) => (p && q)
!(p::Bool) => (!p)
end
@testset "Prop logic" begin
or_alg = @theory p q r begin
((p || q) || r) == (p || (q || r))
(p || q) == (q || p)
(p || p) --> p
(p || true) --> true
(p || false) --> p
end
and_alg = @theory p q r begin
((p && q) && r) == (p && (q && r))
(p && q) == (q && p)
(p && p) --> p
(p && true) --> p
(p && false) --> false
end
comb = @theory p q r begin
# DeMorgan
!(p || q) == (!p && !q)
!(p && q) == (!p || !q)
# distrib
(p && (q || r)) == ((p && q) || (p && r))
(p || (q && r)) == ((p || q) && (p || r))
# absorb
(p && (p || q)) --> p
(p || (p && q)) --> p
# complement
(p && (!p || q)) --> p && q
(p || (!p && q)) --> p || q
end
negt = @theory p begin
(p && !p) --> false
(p || !(p)) --> true
!(!p) == p
end
impl = @theory p q begin
(p == !p) --> false
(p == p) --> true
(p == q) --> (!p || q) && (!q || p)
(p ⟹ q) --> (!p || q)
end
t = or_alg ∪ and_alg ∪ comb ∪ negt ∪ impl ∪ fold
ex = rewrite(:(((p ⟹ q) && (r ⟹ s) && (p || r)) ⟹ (q || s)), impl)
@test prove(t, ex, 5, 10, 5000)
@test @areequal t true ((!p == p) == false)
@test @areequal t true ((!p == !p) == true)
@test @areequal t true ((!p || !p) == !p) (!p || p) !(!p && p)
@test @areequal t p (p || p)
@test @areequal t true ((p ⟹ (p || p)))
@test @areequal t true ((p ⟹ (p || p)) == ((!(p) && q) ⟹ q)) == true
# Frege's theorem
@test @areequal t true (p ⟹ (q ⟹ r)) ⟹ ((p ⟹ q) ⟹ (p ⟹ r))
# Demorgan's
@test @areequal t true (!(p || q) == (!p && !q))
# Consensus theorem
# @test_broken @areequal t true ((x && y) || (!x && z) || (y && z)) ((x && y) || (!x && z))
end
# https://www.cs.cornell.edu/gries/Logic/Axioms.html
# The axioms of calculational propositional logic C are listed in the order in
# which they are usually presented and taught. Note that equivalence comes
# first. Note also that, after the first axiom, we take advantage of
# associativity of equivalence and write sequences of equivalences without
# parentheses. We use == for equivalence, | for disjunction, & for conjunction,
# Golden rule: p & q == p == q == p | q
#
# Implication: p ⟹ q == p | q == q
# Consequence: p ⟸q == q ⟹ p
# Definition of false: false == !true
@testset "Calculational Logic" begin
calc = @theory p q r begin
# Associativity of ==:
((p == q) == r) == (p == (q == r))
# Symmetry of ==:
(p == q) == (q == p)
# Identity of ==:
(q == q) --> true
# Excluded middle
# Distributivity of !:
!(p == q) == (!(p) == q)
# Definition of !=:
(p != q) == !(p == q)
#Associativity of ||:
((p || q) || r) == (p || (q || r))
# Symmetry of ||:
(p || q) == (q || p)
# Idempotency of ||:
(p || p) --> p
# Distributivity of ||:
(p || (q == r)) == (p || q == p || r)
# Excluded Middle:
(p || !(p)) --> true
# DeMorgan
!(p || q) == (!p && !q)
!(p && q) == (!p || !q)
(p && q) == ((p == q) == p || q)
(p ⟹ q) == ((p || q) == q)
end
# t = or_alg ∪ and_alg ∪ neg_alg ∪ demorgan ∪ and_or_distrib ∪
# absorption ∪ calc
t = calc ∪ fold
g = EGraph(:(((!p == p) == false)))
saturate!(g, t)
extract!(g, astsize)
@test @areequal t true ((!p == p) == false)
@test @areequal t true ((!p == !p) == true)
@test @areequal t true ((!p || !p) == !p) (!p || p) !(!p && p)
@test @areequal t true ((p ⟹ (p || p)) == true)
params = SaturationParams(timeout = 12, eclasslimit = 10000, schedulerparams = (1000, 5))
@test areequal(t, true, :(((p ⟹ (p || p)) == ((!(p) && q) ⟹ q)) == true); params = params)
# Frege's theorem
@test areequal(t, true, :((p ⟹ (q ⟹ r)) ⟹ ((p ⟹ q) ⟹ (p ⟹ r))); params = params)
# Demorgan's
@test @areequal t true (!(p || q) == (!p && !q))
# Consensus theorem
areequal(t, :((x && y) || (!x && z) || (y && z)), :((x && y) || (!x && z)); params = params)
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 3002 | using Metatheory
using Metatheory.Rewriters
using Test
using TermInterface
# using SymbolicUtils
apply(f, x) = f(x)
fand(f, g) = x -> f(x) && g(x)
array_theory = @theory x y f g M N begin
#map(f,x)[n:m] = map(f,x[n:m]) # but does NOT commute with filter
map(f, fill(x, N)) == fill(apply(f, x), N) # hmm
# cumsum(fill(x,N)) == collect(x:x:(N*x))
fill(x, N)[y] --> x
length(fill(x, N)) --> N
reverse(reverse(x)) --> x
sum(fill(x, N)) --> x * N
map(f, reverse(x)) == reverse(map(f, x))
filter(f, reverse(x)) == reverse(filter(f, x))
reverse(fill(x, N)) == fill(x, N)
filter(f, fill(x, N)) == (
if apply(f, x)
fill(x, N)
else
fill(x, 0)
end
)
filter(f, filter(g, x)) == filter(fand(f, g), x) # using functional &&
cat(fill(x, N), fill(x, M)) == fill(x, N + M)
cat(map(f, x), map(f, y)) == map(f, cat(x, y))
map(f, cat(x, y)) == cat(map(f, x), map(f, y))
map(f, map(g, x)) == map(f ∘ g, x)
reverse(cat(x, y)) == cat(reverse(y), reverse(x))
map(f, x)[y] == apply(f, x[y])
apply(f ∘ g, x) == apply(f, apply(g, x))
reduce(g, map(f, x)) == mapreduce(f, g, x)
foldl(g, map(f, x)) == mapfoldl(f, g, x)
foldr(g, map(f, x)) == mapfoldr(f, g, x)
end
asymptot_t = @theory x y z n m f g begin
(length(filter(f, x)) <= length(x)) => true
length(cat(x, y)) --> length(x) + length(y)
length(map(f, x)) => length(map)
length(x::UnitRange) => length(x)
end
fold_theory = @theory x y z begin
x::Number * y::Number => x * y
x::Number + y::Number => x + y
x::Number / y::Number => x / y
x::Number - y::Number => x / y
# etc...
end
# Simplify expressions like :(d->3:size(A,d)-3) given an explicit value for d
import Base.Cartesian: inlineanonymous
tryinlineanonymous(x) = nothing
function tryinlineanonymous(ex::Expr)
exprhead(ex) != :call && return nothing
f = operation(ex)
(!(f isa Expr) || exprhead(f) !== :->) && return nothing
arg = arguments(ex)[1]
try
return inlineanonymous(f, arg)
catch e
return nothing
end
end
normalize_theory = @theory x y z f g begin
fand(f, g) => Expr(:->, :x, :(($f)(x) && ($g)(x)))
apply(f, x) => Expr(:call, f, x)
end
params = SaturationParams()
function stream_optimize(ex)
g = EGraph(ex)
saturate!(g, array_theory, params)
ex = extract!(g, astsize) # TODO cost fun with asymptotic complexity
ex = Fixpoint(Postwalk(Chain([tryinlineanonymous, normalize_theory..., fold_theory...])))(ex)
return ex
end
build_fun(ex) = eval(:(() -> $ex))
@testset "Stream Fusion" begin
ex = :(map(x -> 7 * x, fill(3, 4)))
opt = stream_optimize(ex)
@test opt == :(fill(21, 4))
ex = :(map(x -> 7 * x, fill(3, 4))[1])
opt = stream_optimize(ex)
@test opt == 21
end
# ['a','1','2','3','4']
ex = :(filter(ispow2, filter(iseven, reverse(reverse(fill(4, 100))))))
opt = stream_optimize(ex)
ex = :(map(x -> 7 * x, reverse(reverse(fill(13, 40)))))
opt = stream_optimize(ex)
opt = stream_optimize(opt)
macro stream_optimize(ex)
stream_optimize(ex)
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 753 | using Metatheory
struct Σ end
taylor = @theory x a b begin
exp(x) --> Σ(x^:n / factorial(big(:n)))
cos(x) --> Σ((-1)^:n * x^2(:n) / factorial(big(2 * :n)))
Σ(a) + Σ(b) --> Σ(a + b)
end
macro expand(iters)
quote
@rule a Σ(a) --> sum((:n -> a), $(0:iters))
end
end
a = rewrite(:(exp(x) + cos(x)), taylor)
r = @expand(5000)
# r = expand(5000)
bexpr = rewrite(a, [r])
# you may want to do algebraic simplification
# with egraphs here
x = big(42)
b = eval(bexpr)
# 1.739274941520501044994695988622883932193276720547806372656638132701531037200611e+18
exp(x) + cos(x)
# 1.739274941520501046994695988622883932193276720547806372656638132701531037200651e+18
@testset "Infinite Series Approximation" begin
@test b ≈ (exp(x) + cos(x))
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 3967 |
## Turing Complete Interpreter
### A Very Tiny Turing Complete Programming Language defined with denotational semantics
# semantica dalle dispense degano
using Metatheory, Test
import Base.ImmutableDict
Mem = Dict{Symbol,Union{Bool,Int}}
read_mem = @theory v σ begin
(v::Symbol, σ::Mem) => if v == :skip
σ
else
σ[v]
end
end
@testset "Reading Memory" begin
ex = :((x), $(Mem(:x => 2)))
@test true == areequal(read_mem, ex, 2)
end
arithm_rules = @theory a b σ begin
(a + b, σ::Mem) --> (a, σ) + (b, σ)
(a * b, σ::Mem) --> (a, σ) * (b, σ)
(a - b, σ::Mem) --> (a, σ) - (b, σ)
(a::Int, σ::Mem) --> a
(a::Int + b::Int) => a + b
(a::Int * b::Int) => a * b
(a::Int - b::Int) => a - b
end
@testset "Arithmetic" begin
@test areequal(read_mem ∪ arithm_rules, :((2 + 3), $(Mem())), 5)
end
# don't need to access memory
bool_rules = @theory a b σ begin
(a < b, σ::Mem) --> (a, σ) < (b, σ)
(a || b, σ::Mem) --> (a, σ) || (b, σ)
(a && b, σ::Mem) --> (a, σ) && (b, σ)
(!(a), σ::Mem) --> !((a, σ))
(a::Bool, σ::Mem) => a
(!a::Bool) => !a
(a::Bool || b::Bool) => (a || b)
(a::Bool && b::Bool) => (a && b)
(a::Int < b::Int) => (a < b)
end
t = read_mem ∪ arithm_rules ∪ bool_rules
@testset "Booleans" begin
@test areequal(t, :((false || false), $(Mem())), false)
exx = :((false || false) || !(false || false), $(Mem(:x => 2)))
g = EGraph(exx)
saturate!(g, t)
ex = extract!(g, astsize)
@test ex == true
params = SaturationParams(timeout = 12)
@test areequal(t, exx, true; params = params)
@test areequal(t, :((2 < 3) && (3 < 4), $(Mem(:x => 2))), true)
@test areequal(t, :((2 < x) || !(3 < 4), $(Mem(:x => 2))), false)
@test areequal(t, :((2 < x) || !(3 < 4), $(Mem(:x => 4))), true)
end
if_rules = @theory guard t f σ begin
(
if guard
t
end
) --> (
if guard
t
else
:skip
end
)
(if guard
t
else
f
end, σ::Mem) --> (if (guard, σ)
t
else
f
end, σ)
(if true
t
else
f
end, σ::Mem) --> (t, σ)
(if false
t
else
f
end, σ::Mem) --> (f, σ)
end
if_language = read_mem ∪ arithm_rules ∪ bool_rules ∪ if_rules
@testset "If Semantics" begin
@test areequal(if_language, 2, :(if true
x
else
0
end, $(Mem(:x => 2))))
@test areequal(if_language, 0, :(if false
x
else
0
end, $(Mem(:x => 2))))
@test areequal(if_language, 2, :(if !(false)
x
else
0
end, $(Mem(:x => 2))))
params = SaturationParams(timeout = 10)
@test areequal(if_language, 0, :(if !(2 < x)
x
else
0
end, $(Mem(:x => 3))); params = params)
end
while_rules = @theory a b σ begin
(:skip, σ::Mem) --> σ
((a; b), σ::Mem) --> ((a, σ); b)
(a::Int; b) --> b
(a::Bool; b) --> b
(σ::Mem; b) --> (b, σ)
(while a
b
end, σ::Mem) --> (if a
(b;
while a
b
end)
else
:skip
end, σ)
end
write_mem = @theory sym val σ begin
(sym::Symbol = val, σ::Mem) --> (sym = (val, σ), σ)
(sym::Symbol = val::Int, σ::Mem) => merge(σ, Dict(sym => val))
end
while_language = if_language ∪ write_mem ∪ while_rules;
@testset "While Semantics" begin
exx = :((x = 3), $(Mem(:x => 2)))
g = EGraph(exx)
saturate!(g, while_language)
ex = extract!(g, astsize)
@test areequal(while_language, Mem(:x => 3), exx)
exx = :((x = 4; x = x + 1), $(Mem(:x => 3)))
g = EGraph(exx)
saturate!(g, while_language)
ex = extract!(g, astsize)
params = SaturationParams(timeout = 10)
@test areequal(while_language, Mem(:x => 5), exx; params = params)
params = SaturationParams(timeout = 14, timer=false)
exx = :((
if x < 10
x = x + 1
else
skip
end
), $(Mem(:x => 3)))
@test areequal(while_language, Mem(:x => 4), exx; params = params)
exx = :((while x < 10
x = x + 1
end;
x), $(Mem(:x => 3)))
g = EGraph(exx)
params = SaturationParams(timeout = 100)
saturate!(g, while_language, params)
@test 10 == extract!(g, astsize)
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 6416 | using Test
using Metatheory
using Metatheory.Library
using Metatheory.Schedulers
using TermInterface
mult_t = @commutative_monoid (*) 1
plus_t = @commutative_monoid (+) 0
minus_t = @theory a b begin
# TODO Jacques Carette's post in zulip chat
a - a --> 0
a - b --> a + (-1 * b)
-a --> -1 * a
a + (-b) --> a + (-1 * b)
end
mulplus_t = @theory a b c begin
# TODO FIXME these rules improves performance and avoids commutative
# explosion of the egraph
a + a --> 2 * a
0 * a --> 0
a * 0 --> 0
a * (b + c) == ((a * b) + (a * c))
a + (b * a) --> ((b + 1) * a)
end
pow_t = @theory x y z n m p q begin
(y^n) * y --> y^(n + 1)
x^n * x^m == x^(n + m)
(x * y)^z == x^z * y^z
(x^p)^q == x^(p * q)
x^0 --> 1
0^x --> 0
1^x --> 1
x^1 --> x
x * x --> x^2
inv(x) == x^(-1)
end
div_t = @theory x y z begin
x / 1 --> x
# x / x => 1 TODO SIGN ANALYSIS
x / (x / y) --> y
x * (y / x) --> y
x * (y / z) == (x * y) / z
x^(-1) == 1 / x
end
trig_t = @theory θ begin
sin(θ)^2 + cos(θ)^2 --> 1
sin(θ)^2 - 1 --> cos(θ)^2
cos(θ)^2 - 1 --> sin(θ)^2
tan(θ)^2 - sec(θ)^2 --> 1
tan(θ)^2 + 1 --> sec(θ)^2
sec(θ)^2 - 1 --> tan(θ)^2
cot(θ)^2 - csc(θ)^2 --> 1
cot(θ)^2 + 1 --> csc(θ)^2
csc(θ)^2 - 1 --> cot(θ)^2
end
# Dynamic rules
fold_t = @theory a b begin
-(a::Number) => -a
a::Number + b::Number => a + b
a::Number * b::Number => a * b
a::Number^b::Number => begin
b < 0 && a isa Int && (a = float(a))
a^b
end
a::Number / b::Number => a / b
end
using Calculus: differentiate
function ∂ end
diff_t = @theory x y begin
∂(y, x::Symbol) => begin
z = extract!(_egraph, simplcost; root = y.id)
differentiate(z, x)
end
end
cas = fold_t ∪ mult_t ∪ plus_t ∪ minus_t ∪ mulplus_t ∪ pow_t ∪ div_t ∪ trig_t ∪ diff_t
function customlt(x, y)
if typeof(x) == Expr && typeof(y) == Expr
false
elseif typeof(x) == typeof(y)
isless(x, y)
elseif x isa Symbol && y isa Number
false
elseif x isa Expr && y isa Number
false
elseif x isa Expr && y isa Symbol
false
else
true
end
end
canonical_t = @theory x y n xs ys begin
# restore n-arity
(x * x) --> x^2
(x^n::Number * x) --> x^(n + 1)
(x * x^n::Number) --> x^(n + 1)
(x + (+)(ys...)) --> +(x, ys...)
((+)(xs...) + y) --> +(xs..., y)
(x * (*)(ys...)) --> *(x, ys...)
((*)(xs...) * y) --> *(xs..., y)
(*)(xs...) => Expr(:call, :*, sort!(xs; lt = customlt)...)
(+)(xs...) => Expr(:call, :+, sort!(xs; lt = customlt)...)
end
function simplcost(n::ENodeTerm, g::EGraph)
cost = 0 + arity(n)
if operation(n) == :∂
cost += 20
end
for id in arguments(n)
eclass = g[id]
!hasdata(eclass, simplcost) && (cost += Inf; break)
cost += last(getdata(eclass, simplcost))
end
return cost
end
simplcost(n::ENodeLiteral, g::EGraph) = 0
function simplify(ex; steps = 4)
params = SaturationParams(
scheduler = ScoredScheduler,
eclasslimit = 5000,
timeout = 7,
schedulerparams = (1000, 5, Schedulers.exprsize),
#stopwhen=stopwhen,
)
hist = UInt64[]
push!(hist, hash(ex))
for i in 1:steps
g = EGraph(ex)
@profview_allocs saturate!(g, cas, params)
ex = extract!(g, simplcost)
ex = rewrite(ex, canonical_t)
if !TermInterface.istree(ex)
return ex
end
if hash(ex) ∈ hist
return ex
end
push!(hist, hash(ex))
end
end
@test :(4a) == simplify(:(2a + a + a))
@test :(a * b * c) == simplify(:(a * c * b))
@test :(2x) == simplify(:(1 * x * 2))
@test :((a * b)^2) == simplify(:((a * b)^2))
@test :((a * b)^6) == simplify(:((a^2 * b^2)^3))
@test :(a + b + d) == simplify(:(a + b + (0 * c) + d))
@test :(a + b) == simplify(:(a + b + (c * 0) + d - d))
@test :(a) == simplify(:((a + d) - d))
@test :(a + b + d) == simplify(:(a + b * c^0 + d))
@test :(a * b * x^(d + y)) == simplify(:(a * x^y * b * x^d))
@test :(a * b * x^74103) == simplify(:(a * x^(12 + 3) * b * x^(42^3)))
@test 1 == simplify(:((x + y)^(a * 0) / (y + x)^0))
@test 2 == simplify(:(cos(x)^2 + 1 + sin(x)^2))
@test 2 == simplify(:(cos(y)^2 + 1 + sin(y)^2))
@test 2 == simplify(:(sin(y)^2 + cos(y)^2 + 1))
@test :(y + sec(x)^2) == simplify(:(1 + y + tan(x)^2))
@test :(y + csc(x)^2) == simplify(:(1 + y + cot(x)^2))
# simplify(:( ∂(x^2, x)))
simplify(:(∂(x^(cos(x)), x)))
@test :(2x^3) == simplify(:(x * ∂(x^2, x) * x))
# @simplify ∂(y^3, y) * ∂(x^2 + 2, x) / y * x
# @simplify (6 * x * x * y)
# @simplify ∂(y^3, y) / y
# # ex = :( ∂(x^(cos(x)), x) )
# ex = :( (6 * x * x * y) )
# g = EGraph(ex)
# saturate!(g, cas)
# g.classes
# extract!(g, simplcost; root=g.root)
# params = SaturationParams(
# scheduler=BackoffScheduler,
# eclasslimit=5000,
# timeout=7,
# schedulerparams=(1000,5),
# #stopwhen=stopwhen,
# )
# ex = :((x+y)^(a*0) / (y+x)^0)
# g = EGraph(ex)
# @profview println(saturate!(g, cas, params))
# ex = extract!(g, simplcost)
# ex = rewrite(ex, canonical_t; clean=false)
# FIXME this is a hack to get the test to work.
if VERSION < v"1.9.0-DEV"
function EGraphs.make(::Val{:type_analysis}, g::EGraph, n::ENodeLiteral)
v = n.value
if v == :im
typeof(im)
else
typeof(v)
end
end
function EGraphs.make(::Val{:type_analysis}, g::EGraph, n::ENodeTerm)
symtype(n) !== Expr && return Any
if exprhead(n) != :call
# println("$n is not a call")
t = Any
# println("analyzed type of $n is $t")
return t
end
sym = operation(n)
if !(sym isa Symbol)
# println("head $sym is not a symbol")
t = Any
# println("analyzed type of $n is $t")
return t
end
symval = getfield(@__MODULE__, sym)
child_classes = map(x -> g[x], arguments(n))
child_types = Tuple(map(x -> getdata(x, :type_analysis, Any), child_classes))
# t = t_arr[1]
t = Core.Compiler.return_type(symval, child_types)
if t == Union{}
throw(MethodError(symval, child_types))
end
# println("analyzed type of $n is $t")
return t
end
EGraphs.join(::Val{:type_analysis}, from, to) = typejoin(from, to)
EGraphs.islazy(::Val{:type_analysis}) = true
function infer(e)
g = EGraph(e)
analyze!(g, :type_analysis)
getdata(g[g.root], :type_analysis)
end
ex1 = :(cos(1 + 3.0) + 4 + (4 - 4im))
ex2 = :("ciao" * 2)
ex3 = :("ciao" * " mondo")
@test ComplexF64 == infer(ex1)
@test_throws MethodError infer(ex2)
@test String == infer(ex3)
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 4731 | # # Interfacing with Metatheory.jl
# This section is for Julia package developers who may want to use the rule
# rewriting systems on their own expression types.
# ## Defining the interface
#
# Metatheory.jl matchers can match any Julia object that implements an interface
# to traverse it as a tree. The interface in question, is defined in the
# [TermInterface.jl](https://github.com/JuliaSymbolics/TermInterface.jl) package.
# Its purpose is to provide a shared interface between various symbolic
# programming Julia packages.
# In particular, you should define methods from TermInterface.jl for an expression
# tree type `T` with symbol types `S` to work with SymbolicUtils.jl
# You can read the documentation of
# [TermInterface.jl](https://github.com/JuliaSymbolics/TermInterface.jl) on the
# [Github repository](https://github.com/JuliaSymbolics/TermInterface.jl).
# ## Concrete example
using Metatheory, TermInterface, Test
using Metatheory.EGraphs
# We first define our custom expression type in `MyExpr`:
# It behaves like `Expr`, but it adds some extra fields.
struct MyExpr
head::Any
args::Vector{Any}
foo::String # additional metadata
end
MyExpr(head, args) = MyExpr(head, args, "")
MyExpr(head) = MyExpr(head, [])
# We also need to define equality for our expression.
function Base.:(==)(a::MyExpr, b::MyExpr)
a.head == b.head && a.args == b.args && a.foo == b.foo
end
# ## Overriding `TermInterface`` methods
# First, we need to discern when an expression is a leaf or a tree node.
# We can do it by overriding `istree`.
TermInterface.istree(::MyExpr) = true
# The `operation` function tells us what's the node's represented operation.
TermInterface.operation(e::MyExpr) = e.head
# `arguments` tells the system how to extract the children nodes.
TermInterface.arguments(e::MyExpr) = e.args
# A particular function is `exprhead`. It is used to bridge our custom `MyExpr`
# type, together with the `Expr` functionality that is used in Metatheory rule syntax.
# In this example we say that all expressions of type `MyExpr`, can be represented (and matched against) by
# a pattern that is represented by a `:call` Expr.
TermInterface.exprhead(::MyExpr) = :call
# While for common usage you will always define `exprhead` it to be `:call`,
# there are some cases where you would like to match your expression types
# against more complex patterns, for example, to match an expression `x` against an `a[b]` kind of pattern,
# you would need to inform the system that `exprhead(x)` is `:ref`, because
ex = :(a[b])
(ex.head, ex.args)
# `metadata` should return the extra metadata. If you have many fields, i suggest using a `NamedTuple`.
TermInterface.metadata(e::MyExpr) = e.foo
# Additionally, you can override `EGraphs.preprocess` on your custom expression
# to pre-process any expression before insertion in the E-Graph.
# In this example, we always `uppercase` the `foo::String` field of `MyExpr`.
EGraphs.preprocess(e::MyExpr) = MyExpr(e.head, e.args, uppercase(e.foo))
# `TermInterface` provides a very important function called `similarterm`.
# It is used to create a term that is in the same closure of types of `x`.
# Given an existing term `x`, it is used to instruct Metatheory how to recompose
# a similar expression, given a `head` (the result of `operation`), some children (given by `arguments`)
# and additionally, `metadata` and `exprehead`, in case you are recomposing an `Expr`.
function TermInterface.similarterm(x::MyExpr, head, args; metadata = nothing, exprhead = :call)
MyExpr(head, args, isnothing(metadata) ? "" : metadata)
end
# Since `similarterm` works by making a new term similar to an existing term `x`,
# in the e-graphs system, there won't be enough information such as a 'reference' object.
# Only the type of the object is known. This extra function adds a bit of verbosity, due to compatibility
# with SymbolicUtils.jl
function EGraphs.egraph_reconstruct_expression(::Type{MyExpr}, op, args; metadata = nothing, exprhead = nothing)
MyExpr(op, args, (isnothing(metadata) ? () : metadata))
end
# ## Theory Example
# Note that terms in the RHS will inherit the type of terms in the LHS.
t = @theory a begin
f(z(2), a) --> f(a)
end
# Let's create an example expression and e-graph
hcall = MyExpr(:h, [4], "hello")
ex = MyExpr(:f, [MyExpr(:z, [2]), hcall])
g = EGraph(ex; keepmeta = true)
# We use `settermtype!` on an existing e-graph to inform the system about
# the *default* type of expressions that we want newly added expressions to have.
settermtype!(g, MyExpr)
# Now let's test that it works.
saturate!(g, t)
expected = MyExpr(:f, [MyExpr(:h, [4], "HELLO")], "")
extracted = extract!(g, astsize)
@test expected == extracted
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 581 | # # Benchmarking Fibonacci. E-Graphs memoize computation.
using Metatheory
using Test
function fib end
fibo = @theory x y n begin
x::Int + y::Int => x + y
fib(n::Int) => (n < 2 ? n : :(fib($(n - 1)) + fib($(n - 2))))
end
params = SaturationParams(timeout = 60)
# We run the saturation twice to see a result that does not include compilation time.
g = EGraph(:(fib(10)))
saturate!(g, fibo, params)
# That's fast!
z = EGraph(:(fib(10)))
saturate!(z, fibo, params)
# We can test that the result is correct.
@testset "Fibonacci" begin
@test 55 == extract!(g, astsize)
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 1045 | # # The MU Puzzle
# The puzzle cannot be solved: it is impossible to change the string MI into MU
# by repeatedly applying the given rules. In other words, MU is not a theorem of
# the MIU formal system. To prove this, one must step "outside" the formal system
# itself. [Wikipedia](https://en.wikipedia.org/wiki/MU_puzzle#Solution)
using Metatheory, Test
# Here are the axioms of MU:
# * Composition of the string monoid is associative
# * Add a uf to the end of any string ending in I
# * Double the string after the M
# * Replace any III with a U
# * Remove any UU
function ⋅ end
miu = @theory x y z begin
x ⋅ (y ⋅ z) --> (x ⋅ y) ⋅ z
x ⋅ :I ⋅ :END --> x ⋅ :I ⋅ :U ⋅ :END
:M ⋅ x ⋅ :END --> :M ⋅ x ⋅ x ⋅ :END
:I ⋅ :I ⋅ :I --> :U
x ⋅ :U ⋅ :U ⋅ y --> x ⋅ y
end
# No matter the timeout we set here,
# MU is not a theorem of the MIU system
params = SaturationParams(timeout = 12, eclasslimit = 8000)
start = :(M ⋅ I ⋅ END)
g = EGraph(start)
saturate!(g, miu)
@test false == areequal(g, miu, start, :(M ⋅ U ⋅ END); params = params)
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | code | 11386 |
#=
# Write a very tiny Turing Complete language in Julia.
WHILE is a very tiny Turing Complete Programming Language defined by denotational semantics.
Semantics come from the excellent
[course notes](http://pages.di.unipi.it/degano/ECC-uno.pdf) in *"Elements of computability and
complexity"* by prof. [Pierpaolo Degano](http://pages.di.unipi.it/degano/).
It is a toy C-like language used to explain the core concepts of computability and Turing-completeness.
The name WHILE, comes from the fact that the most complicated construct in the language is a WHILE loop.
The language supports:
* A variable-value memory that can be pre-defined for program input.
* Integer arithmetics.
* Boolean logic.
* Conditional if-then-else statement called `cond`.
* Running a command after another with `seq(c1,c2)`.
* Repeatedly applying a command `c` while a condition `g` holds with `loop(g,c)`.
This is enough to be Turing-complete!
We are going to implement this tiny imperative language with classical rewriting rules in [Metatheory.jl](https://github.com/JuliaSymbolics/Metatheory.jl/).
WHILE is implemented in around 55 readable lines of code, and reaches around 80 lines with tests.
The goal of this tutorial is to show an implementation of a programming language interpreter that is very, very very close to the
simple theory used to describe it in a textbook. Each denotational semantics rule in the course notes is a Metatheory.jl rewrite rule, with a few extras and minor naming changes.
The idea, is that Julia is a really valid didactical programming language!
=#
# Let's load the Metatheory and Test packages.
using Test, Metatheory
# ## Memory
# The first thing that our programming language needs, is a model of the *computer memory*,
# that is going to hold the state of the programs. We define the type of
# WHILE's memory as a map from variables (Julia `Symbol`s) to actual values.
# We want to keep things simple so in our toy programming language we are just going to use boolean or integer values. Surprisingly, we can still achieve turing completeness without having to introduce strings or any other complex data type.
# We are going to use the letter `σ` (sigma) to denote an actual value of type `Mem`, in simple words the state of a program in a given moment.
# For example, if a `σ::Mem` holds the value `σ[:a] = 2`, this means that at that given moment, in our program
# the variable `a` holds the value 2.
Mem = Dict{Symbol,Union{Bool,Int}}
# We are now ready to define our first rewrite rule.
# In WHILE, un-evaluated expressions are represented by a tuple of `(program, state)`.
# This simple rule tells us that, if at a given memory state `σ` we want to know the value of a variable `v`, we
# can simply read it from the memory and return the value.
read_mem = @theory v σ begin
(v::Symbol, σ::Mem) => σ[v]
end
# Let's test this behavior. We first create a `Mem`, holding the variable `x` with value 2.
σ₁ = Mem(:x => 2)
# Then, we define a program. Julia helps us avoid unneeded complications.
# Generally, to create an interpreted programming language, one would have to design a syntax for it, and then engineer components such as
# a lexer or a [parser](https://en.wikipedia.org/wiki/Parsing) in order to turn the input string into a manipulable, structured program.
# The Julia developers were really smart. We can directly re-use the whole Julia syntax, because Julia
# allows us to treat programs as values. You can try this by prefixing any expression you type in the REPL inside of `:( ... )` or `quote ... end`.
# If you type this in the Julia REPL:
2 + 2
# You get the obvious result out, but if you wrap it in `quote` or `:(...)`, you can see that the program will not be executed, but instead stored as an `Expr`.
some_expr = :(2 + 2)
# We can use the `$` unary operator to interpolate and insert values inside of quoted code.
:(2 + $(1 + 1))
# These code-manipulation utilities can be very useful, because we can completely skip the burden of having to write a new syntax for our educational programming language, and just
# re-use Julia's syntax. It hints us that Julia is very powerful, because you can define new semantics and customize the language's behaviour without
# having to leave the comfort of the Julia terminal. This is also how julia `@macros` work.
# The practice of manipulating programs in the language itself is called **Metaprogramming**,
# and you can read more about metaprogramming in Julia [in the official docs](https://docs.julialang.org/en/v1/manual/metaprogramming/).
# Let's test that our first, simple rule is working.
program = :(x, $σ₁)
@test rewrite(program, read_mem) == 2
# ## Arithmetics
# How can our programming language be turing complete if we do not include basic arithmetics?
# If we have an integer and a memory state, we can just keep the integer
# The following rules are the first cases of recursion.
# Given two expressions `a,b`, to know what's `a + b` in state `σ`,
# we need to know first what `a` and `b` are in state σ
# The last dynamic rules let us directly evaluate arithmetic operations.
arithm_rules = @theory a b n σ begin
(n::Int, σ::Mem) --> n
(a + b, σ::Mem) --> (a, σ) + (b, σ)
(a * b, σ::Mem) --> (a, σ) * (b, σ)
(a - b, σ::Mem) --> (a, σ) - (b, σ)
(a::Int + b::Int) => a + b
(a::Int * b::Int) => a * b
(a::Int - b::Int) => a - b
end
# ## Evaluation strategy
# We now have some nice denotational semantic rules for arithmetics, but in what order should we apply them?
# Metatheory.jl provides a flexible rewriter combinator library. You can read more in the [Rewriters](@ref) module docs.
#
# Given a set of rules, we can define a rewriter strategy by functionally composing rewriters.
# First, we want to use `Chain` to combine together the many rules in the theory, and to try to apply them one-by-one on our expressions.
#
# But should we first evaluate the outermost operations in the expression, or the innermost?
# Intuitively, if we have the program `(1 + 2) - 3`, it can hint us that we do want to first evaluate the innermost expressions.
# To do so, we then pass the result to the [Postwalk](@ref) rewriter, which recursively walks the input expression tree, and applies the rewriter first on
# the inner expressions, and then, on the outer, rewritten expression. (Hence the name `Post`-walk. Can you guess what [Prewalk](@ref) does?).
#
# The last component of our strategy is the [Fixpoint](@ref) combinator. This combinator repeatedly applies the rewriter on the input expression,
# and it does stop looping only when the output expression is the unchanged input expression.
using Metatheory.Rewriters
strategy = (Fixpoint ∘ Postwalk ∘ Chain)
# In Metatheory.jl, rewrite theories are just vectors of [Rules](@ref). It means we can compose them by concatenating the vectors, or elegantly using the
# built-in set operations provided by the Julia language.
arithm_lang = read_mem ∪ arithm_rules
# We can define a convenience function that takes an expression, a memory state and calls our strategy.
eval_arithm(ex, mem) = strategy(arithm_lang)(:($ex, $mem))
# Does it work?
@test eval_arithm(:(2 + 3), Mem()) == 5
# Yay! Let's say that before the program started, the computer memory already held a variable `x` with value 2.
@test eval_arithm(:(2 + x), Mem(:x => 2)) == 4
# ## Boolean Logic
# To be Turing-complete, our tiny WHILE language requires boolean logic support.
# There's nothing special or different from other programming languages. These rules
# define boolean operations to work just as you would expect, and in the same way we defined arithmetic rules for integers.
#
# We need to bridge together the world of integer arithmetics and boolean logic to achieve something useful.
# The last two rules in the theory.
bool_rules = @theory a b σ begin
(a::Bool || b::Bool) => (a || b)
(a::Bool && b::Bool) => (a && b)
!a::Bool => !a
(a::Bool, σ::Mem) => a
(!b, σ::Mem) => !eval_bool(b, σ)
(a || b, σ::Mem) --> (a, σ) || (b, σ)
(a && b, σ::Mem) --> (a, σ) && (b, σ)
(a < b, σ::Mem) => (eval_arithm(a, σ) < eval_arithm(b, σ)) # This rule bridges together ints and bools
(a::Int < b::Int) => (a < b)
end
eval_bool(ex, mem) = strategy(bool_rules)(:($ex, $mem))
# Let's run a few tests.
@test all(
[
eval_bool(:(false || false), Mem()) == false
eval_bool(:((false || false) || !(false || false)), Mem(:x => 2)) == true
eval_bool(:((2 < 3) && (3 < 4)), Mem(:x => 2)) == true
eval_bool(:((2 < x) || !(3 < 4)), Mem(:x => 2)) == false
eval_bool(:((2 < x) || !(3 < 4)), Mem(:x => 4)) == true
],
)
# ## Conditionals: If-then-else
# Conditional expressions in our language take the form of
# `cond(guard, thenbranch)` or `cond(guard, branch, elsebranch)`
# It means that our program at this point will:
# 1. Evaluate the `guard` expressions
# 2. If `guard` evaluates to `true`, then evaluate `thenbranch`
# 3. If `guard` evaluates to `false`, then evaluate `elsebranch`
# The first rule here is simple. If there's no `elsebranch` in the
# `cond` statement, we add an empty one with the `skip` command.
# Otherwise, we piggyback on the existing Julia if-then-else ternary operator.
# To do so, we need to evaluate the boolean expression in the guard by
# using the `eval_bool` function we defined above.
if_rules = @theory guard t f σ begin
(cond(guard, t), σ::Mem) --> (cond(guard, t, :skip), σ)
(cond(guard, t, f), σ::Mem) => (eval_bool(guard, σ) ? :($t, $σ) : :($f, $σ))
end
eval_if(ex, mem::Mem) = strategy(read_mem ∪ arithm_rules ∪ if_rules)(:($ex, $mem))
# And here is our working conditional
@testset "If Semantics" begin
@test 2 == eval_if(:(cond(true, x, 0)), Mem(:x => 2))
@test 0 == eval_if(:(cond(false, x, 0)), Mem(:x => 2))
@test 2 == eval_if(:(cond(!(false), x, 0)), Mem(:x => 2))
@test 0 == eval_if(:(cond(!(2 < x), x, 0)), Mem(:x => 3))
end
# ## Writing memory
# Our language then needs a mechanism to write in memory.
# We define the behavior of the `store` construct, which
# behaves like the `=` assignment operator in other programming languages.
# `store(a, 5)` will store the value 5 in the `a` variable inside the program's memory.
write_mem = @theory sym val σ begin
(store(sym::Symbol, val), σ) => (σ[sym] = eval_if(val, σ);
σ)
end
# ## While loops and sequential computation.
while_rules = @theory guard a b σ begin
(:skip, σ::Mem) --> σ
((:skip; b), σ::Mem) --> (b, σ)
(seq(a, b), σ::Mem) --> (b, merge((a, σ), σ))
merge(a::Mem, σ::Mem) => merge(σ, a)
merge(a::Union{Bool,Int}, σ::Mem) --> σ
(loop(guard, a), σ::Mem) --> (cond(guard, seq(a, loop(guard, a)), :skip), σ)
end
# ## Completing the language.
while_language = write_mem ∪ read_mem ∪ arithm_rules ∪ if_rules ∪ while_rules;
using Metatheory.Syntax: rmlines
eval_while(ex, mem) = strategy(while_language)(:($(rmlines(ex)), $mem))
# Final steps
@testset "While Semantics" begin
@test Mem(:x => 3) == eval_while(:((store(x, 3))), Mem(:x => 2))
@test Mem(:x => 5) == eval_while(:(seq(store(x, 4), store(x, x + 1))), Mem(:x => 3))
@test Mem(:x => 4) == eval_while(:(cond(x < 10, store(x, x + 1))), Mem(:x => 3))
@test 10 == eval_while(:(seq(loop(x < 10, store(x, x + 1)), x)), Mem(:x => 3))
@test 50 == eval_while(:(seq(loop(x < y, seq(store(x, x + 1), store(y, y - 1))), x)), Mem(:x => 0, :y => 100))
end
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 1793 | # Code Structure in `src/`
## Patterns Module
The `Patterns.jl` file contains type definitions for pattern matching building blocks
called `AbstractPat`s, shared between pattern matching backends.
This module provides the type hierarchy required to build patterns, the
left hand side of rules.
## Rules
- `Rules.jl`: definitions for rule types used in various rewriting backends.
- `matchers.jl`: Classical rewriting pattern matcher.
# `Syntax.jl`
Contains the frontend to Rules and Patterns (`@rule` macro and `Pattern` function), using the compatible SymbolicUtils.jl syntax.
# EGraphs Module
Contains code for the e-graphs rewriting backend. See [egg paper](https://dl.acm.org/doi/pdf/10.1145/3434304) for an high level overview.
- `egraph.jl`: Definition of `ENode`, `EClass` and `EGraph` types, EClass unioning, metadata access, definition of EGraphs, adding, merging, rebuilding.
- `analysis.jl`: Core algorithms for analyzing egraphs and extracting terms from egraphs.
- `saturation.jl`: Core algorithm for equality saturation, rewriting on e-graphs, e-graphs search. Search phase of equality saturation. Uses multiple-dispatch on rules, Write phase of equality saturation. Application and instantiation of `Patterns` from matching/search results. Definition of `SaturationParams` type, parameters for equality saturation, Definition of equality saturation execution reports. Utility functions and macros to check equality of terms in egraphs.
- `Schedulers.jl`: Module containing definition of Schedulers for equality saturation.
## `Library.jl`
Contains utility functions and examples of ready-to-use theories of rules. Macros that generate single rules corresponding to common algebraic properties and macros for generating theories from common algebraic structures.
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 7341 | # Notes for Metatheory.jl Contributors
Welcome, and thanks for considering Metatheory.jl! Please be sure to respect our [community standards](https://julialang.org/community/standards) in all interactions.
We gratefully acknowledge the general [Julia CONTRIBUTING.md document](https://github.com/JuliaLang/julia/blob/master/CONTRIBUTING.md), from which much of this was adapted.
## Learning Julia
A pre-requisite for using Metatheory.jl is to know at least a little about Julia itself. [The learning page](https://julialang.org/learning) has a great list of resources for new and experienced users alike. [This tutorial video](https://www.youtube.com/watch?v=vWkgEddb4-A) is one recommended starting point, as is the "[Invitation to Julia](https://www.youtube.com/watch?v=gQ1y5NUD_RI)" workshop video from JuliaCon 2015 ([slide materials here](https://github.com/dpsanders/invitation_to_julia)). The [Julia documentation](https://docs.julialang.org) covers the language and core library features, and is searchable.
## Learning Metatheory.jl
Our [main documentation](https://github.com/JuliaSymbolics/Metatheory.jl/) provides an overview and some examples of using Metatheory.jl.
The core package is hosted at [Metatheory.jl](https://github.com/JuliaSymbolics/Metatheory.jl/).
## Before filing an issue
Julia's own "[How to file a bug report](https://github.com/JuliaLang/julia/blob/master/CONTRIBUTING.md#how-to-file-a-bug-report)" has many useful tips to help make sure that all necessary information is included.
Try to report the issue in the package responsible for the error.
You can often make good guesses by examining the backtrace (in cases where an
error is thrown), using `@which`, stepping in with the debugger, or just
using the search bar at the top left of [Metatheory.jl](https://github.com/JuliaSymbolics/Metatheory.jl/).
## Contributing documentation
*By contributing you agree to be bound by Metatheory.jl' MIT license*
Many documentation issues are easy! For small changes, you can just click on one of the files in the `docs/src` directory, click on the "pencil icon," and [edit it in your browser](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository). Any changes you suggest will first be vetted by an experienced developer, so there is no need to worry that you'll mess something up.
Changes to the "docstrings" (the string preceding a method in source code) should be made in the package in which they appear.
For bigger documentation changes, it is probably best to clone the package and submit the changes as an ordinary pull request, as described below under "Contributing code." You can build the package locally if you install [Documenter.jl](https://github.com/JuliaDocs/Documenter.jl), and run `include("make.jl")` in the `docs/` folder. To see the completed documentation, open the `build/index.md` file in your browser.
## Contributing code
*By contributing you agree to be bound by Metatheory.jl' MIT license*
If you've never submitted a pull request before, it can take a little while to become familiar with the process. In addition to the steps below, [GitHub has a tutorial and exercises](https://try.github.io/). See also the excellent [Git book](https://git-scm.com/book/en/v2). There are also many good external tutorials on this subject, like [this one](https://yangsu.github.io/pull-request-tutorial/).
### Contributor Checklist
* Create a [GitHub account](https://github.com/signup/free).
* If you plan to fix a bug, feel free to first report the bug as an issue on its own.
In the text, you can mention whether you're planning on addressing it yourself.
*Pro tip*: if you do submit a pull request to fix it, put "Fixes #<issue number>" in the commit message and it will close automatically when your pull request is merged.
If you're concerned your change might be controversial, you can also use an issue to propose your change in general terms and discuss it before implementation.
* Fork whatever repository you plan to commit to by clicking on the "Fork" button at the upper-right of the home page.
* If you haven't already implemented your changes, check the package out for development: hit `]` in the Julia REPL and then type (for example) `dev Images`.
You'll get a copy of the full repository in your `~/.julia/dev` folder. See the [package manager documentation](https://julialang.github.io/Pkg.jl/v1/) for further details.
* Make your changes. Generally you should be working on a branch, so your work doesn't conflict with ongoing development in the `master` branch. Ensure you follow the [Julia style guide](https://docs.julialang.org/en/v1/manual/style-guide/index.html) for your contribution.
* Test your changes. We aspire to have test coverage for every bit of "user visible" functionality. Tests are stored, appropriately, in the `test/` folder of each package. You can run existing tests yourself and add new ones. Sometimes testing is more work than the actual change itself, but having tests ensures that no well-meaning future developer will accidentally mess up your functionality---it's worth it! *"A fix is for today. A test is forever."*
* Submit your changes up to your fork and then submit a pull request-!
* See what happens to the automated tests that run on Github Actions. If there are errors, check the logs and see whether they look like they are related to your changes; if so, try to fix the problem by adding new commits to your pull request. Once the tests pass, hooray! :tada:
* Relax and wait for feedback. We try to review contributions quickly and courteously. But we are human, and sometimes we get busy with other things or fail to notice an email; if it's been a while since you submitted your pull request, try posting a polite reminder about the existence of your pull request.
* Discuss any feedback you receive as necessary. It's fine to defend your approach, but also be open to making changes based on suggestions you receive.
* Sooner or later, the fate of your pull request will become clear. If it gets approved, an established contributor will merge it. It's not officially released into the wild until a contributor releases a new version of the package; if that doesn't happen quickly, don't hesitate to make an inquiry in case it's simply been overlooked.
From the whole team, thanks in advance for your contribution!
### Contribution tips
* [Revise](https://github.com/timholy/Revise.jl) is a package that
tracks changes in source files and automatically updates function
definitions in your running Julia session. Using it, you can make
extensive changes without needing to rebuild the package in order to test
your changes.
* Debuggers can help you get to the root of a problem. There are many choices and interfaces:
+ [Juno](https://github.com/JunoLab/Juno.jl) has a polished GUI for debugging
+ [Debugger](https://github.com/JuliaDebug/Debugger.jl) has a polished command-line interface
+ [Rebugger](https://github.com/timholy/Rebugger.jl) has an innovative but somewhat less-polished command-line interface
+ [Infiltrator](https://github.com/JuliaDebug/Infiltrator.jl) offers more limited debugging, but often it's precisely what you need while avoiding the performance penalties that some of the other options suffer from.
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 1444 | # 2.0
- No longer dispatch against types, but instead dispatch against objects.
- Faster E-Graph Analysis
- Better library macros
- Updated TermInterface to 0.3.3
- New interface for e-graph extraction using `EGraphs.egraph_reconstruct_expression`
- Simplify E-Graph Analysis Interface. Use Symbols or functions for identifying Analyses.
- Remove duplicates in E-Graph analyses data.
## 1.2
- Fixes when printing patterns
- Can pass custom `similarterm` to `SaturationParams` by using `SaturationParams.simterm`.
## 1.1
- EGraph pattern matcher can now match against both symbols and function objects
- Fixes for Symbolics.jl integration
## 1.0
Metatheory.jl + SymbolicUtils.jl = ❤️
- Metatheory.jl now supports the same syntax as [SymbolicUtils.jl](https://github.com/JuliaSymbolics/SymbolicUtils.jl/) for the rule definition DSL!
- The classical pattern matcher has been redesigned, and it is a port of [SymbolicUtils.jl](https://github.com/JuliaSymbolics/SymbolicUtils.jl/)'s pattern matcher. Now Metatheory.jl can be used in place of SU's rewriting backend.
- Performance improvements: caching of ground terms when doing e-matching in equality saturation.
- Dynamic Rules do not use RuntimeGeneratedFunctions when not needed.
- Removed `@metatheory_init`
- Rules now support type and function predicates as in SymbolicUtils.jl
- Redesigned the library
- Introduced `@timerewrite` to time the execution of classical rewriting systems. | Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 7309 | <p align="center">
<img width="400px" src="https://raw.githubusercontent.com/juliasymbolics/Metatheory.jl/master/docs/src/assets/dragon.jpg"/>
</p>
# Metatheory.jl
[](https://juliasymbolics.github.io/Metatheory.jl/dev/)
[](https://juliasymbolics.github.io/Metatheory.jl/stable/)

[](https://codecov.io/gh/juliasymbolics/Metatheory.jl)
[](https://arxiv.org/abs/2102.07888)
[](https://joss.theoj.org/papers/3266e8a08a75b9be2f194126a9c6f0e9)
[](https://julialang.zulipchat.com/#narrow/stream/277860-metatheory.2Ejl)
**Metatheory.jl** is a general purpose term rewriting, metaprogramming and algebraic computation library for the Julia programming language, designed to take advantage of the powerful reflection capabilities to bridge the gap between symbolic mathematics, abstract interpretation, equational reasoning, optimization, composable compiler transforms, and advanced
homoiconic pattern matching features. The core features of Metatheory.jl are a powerful rewrite rule definition language, a vast library of functional combinators for classical term rewriting and an *e-graph rewriting*, a fresh approach to term rewriting achieved through an equality saturation algorithm. Metatheory.jl can manipulate any kind of
Julia symbolic expression type, as long as it satisfies the [TermInterface.jl](https://github.com/JuliaSymbolics/TermInterface.jl).
Metatheory.jl provides:
- An eDSL (domain specific language) to define different kinds of symbolic rewrite rules.
- A classical rewriting backend, derived from the [SymbolicUtils.jl](https://github.com/JuliaSymbolics/SymbolicUtils.jl) pattern matcher, supporting associative-commutative rules. It is based on the pattern matcher in the [SICM book](https://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html).
- A flexible library of rewriter combinators.
- An e-graph rewriting (equality saturation) backend and pattern matcher, based on the [egg](https://egraphs-good.github.io/) library, supporting backtracking and non-deterministic term rewriting by using a data structure called *e-graph*, efficiently incorporating the notion of equivalence in order to reduce the amount of user effort required to achieve optimization tasks and equational reasoning.
- `@capture` macro for flexible metaprogramming.
Intuitively, Metatheory.jl transforms Julia expressions
in other Julia expressions and can achieve such at both compile and run time. This allows Metatheory.jl users to perform customized and composable compiler optimizations specifically tailored to single, arbitrary Julia packages.
Our library provides a simple, algebraically composable interface to help scientists in implementing and reasoning about semantics and all kinds of formal systems, by defining concise rewriting rules in pure, syntactically valid Julia on a high level of abstraction. Our implementation of equality saturation on e-graphs is based on the excellent, state-of-the-art technique implemented in the [egg](https://egraphs-good.github.io/) library, reimplemented in pure Julia.
## 2.0 is out!
Second stable version is out:
- New e-graph pattern matching system, relies on functional programming and closures, and is much more extensible than 1.0's virtual machine.
- No longer dispatch against types, but instead dispatch against objects.
- Faster E-Graph Analysis
- Better library macros
- Updated TermInterface to 0.3.3
- New interface for e-graph extraction using `EGraphs.egraph_reconstruct_expression`
- Simplify E-Graph Analysis Interface. Use Symbols or functions for identifying Analyses.
- Remove duplicates in E-Graph analyses data.
Many features have been ported from SymbolicUtils.jl. Metatheory.jl can be used in place of SymbolicUtils.jl when you have no need of manipulating mathematical expressions. The introduction of [TermInterface.jl](https://github.com/JuliaSymbolics/TermInterface.jl) has allowed for large potential in generalization of term rewriting and symbolic analysis and manipulation features. Integration between Metatheory.jl with Symbolics.jl, as it has been shown in the ["High-performance symbolic-numerics via multiple dispatch"](https://arxiv.org/abs/2105.03949) paper.
## Recommended Readings - Selected Publications
- The [Metatheory.jl manual](https://juliasymbolics.github.io/Metatheory.jl/stable/)
- **OUT OF DATE**: The [Metatheory.jl introductory paper](https://joss.theoj.org/papers/10.21105/joss.03078#) gives a brief high level overview on the library and its functionalities.
- The Julia Manual [metaprogramming section](https://docs.julialang.org/en/v1/manual/metaprogramming/) is fundamental to understand what homoiconic expression manipulation is and how it happens in Julia.
- An [introductory blog post on SIGPLAN](https://blog.sigplan.org/2021/04/06/equality-saturation-with-egg/) about `egg` and e-graphs rewriting.
- [egg: Fast and Extensible Equality Saturation](https://dl.acm.org/doi/pdf/10.1145/3434304) contains the definition of *E-Graphs* on which Metatheory.jl's equality saturation rewriting backend is based. This is a strongly recommended reading.
- [High-performance symbolic-numerics via multiple dispatch](https://arxiv.org/abs/2105.03949): a paper about how we used Metatheory.jl to optimize code generation in [Symbolics.jl](https://github.com/JuliaSymbolics/Symbolics.jl)
- [Automated Code Optimization with E-Graphs](https://arxiv.org/abs/2112.14714). Alessandro Cheli's Thesis on Metatheory.jl
## Contributing
If you'd like to give us a hand and contribute to this repository you can:
- Find a high level description of the project architecture in [ARCHITECTURE.md](https://github.com/juliasymbolics/Metatheory.jl/blob/master/ARCHITECTURE.md)
- Read the contribution guidelines in [CONTRIBUTING.md](https://github.com/juliasymbolics/Metatheory.jl/blob/master/CONTRIBUTING.md)
## Installation
You can install the stable version:
```julia
julia> using Pkg; Pkg.add("Metatheory")
```
Or you can install the developer version (recommended by now for latest bugfixes)
```julia
julia> using Pkg; Pkg.add(url="https://github.com/JuliaSymbolics/Metatheory.jl")
```
## Documentation
Extensive Metatheory.jl is available [here](https://juliasymbolics.github.io/Metatheory.jl/dev)
## Citing
If you use Metatheory.jl in your research, please [cite](https://github.com/juliasymbolics/Metatheory.jl/blob/master/CITATION.bib) our works.
---
# Sponsors
If you enjoyed Metatheory.jl and would like to help, you can donate a coffee or choose place your logo and name in this page. [See 0x0f0f0f's Github Sponsors page](https://github.com/sponsors/0x0f0f0f/)!
<p align="center">
<a href="https://planting.space">
<img width="300px" src="https://raw.githubusercontent.com/juliasymbolics/Metatheory.jl/master/.github/plantingspace.png"/>
</a>
</p>
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 7690 | # Style guide
### IDE
It is recommended to use VSCode when programming in Julia. Its Julia extension
exclusively has shortcuts for evaluating Julia code, can display results inline
and has some support for working with system images, among others, which
typically make it better suited than other editors (unless you spend some effort
customizing another editor to your workflow). For autocompletions, linting and
navigation, it uses the Language Server Protocol (LSP) which you can reuse in
other text editors that support it.
#### Recommended VSCode extensions
- Julia: the official Julia extension.
- GitLens: lets you see inline which
commit recently affected the selected line. It is excellent to know who was
working on a piece of code, such that you can easily ask for explanations or
help in case of trouble.
### Reduce latency with system images
We can put package dependencies into a system image (kind of like a snapshot of
a Julia session, abbreviated as sysimage) to speed up their loading.
### Logging
To turn on debug logging for a given module, set the environment variable
`JULIA_DEBUG` to the name of the module. For example, to enable debugging from
module Foo, just do
```bash
JULIA_DEBUG=Foo julia --project test/runtests.jl
```
Or from REPL
```julia
ENV["JULIA_DEBUG"] = Foo
```
## Collaboration
Once you have developed a piece of code and want to share it with the team, you
can create a merge request. If the changes are not final and will require
further work before considering a merge, then please mark the merge request as a
draft.
Merge requests marked as drafts may not be reviewed. If you seek a review from
someone, you should explicitly state it in the merge request and tag the person
in question.
When you are confident in your changes and want to consider a merge, you can
mark the merge request as ready. It will then be reviewed, and when review
comments are addressed, an automatic merge will be issued.
## Style
Code style is different from [[#Formatting]]. While the latter can be easily
assisted with by automatic tools, the former cannot.
### Comments
Comments and error messages should form proper sentences unless they are titles.
Get something done later, but only if someone looks at this code again. For
larger things make an issue.
```
# TODO: ...
```
Sometimes a piece of code is written in a certain way to work around an existing
issue in a dependency. If this code should be cleaned up after that issue is
fixed then the following line with link to issue should be added.
```
# ISSUE: https://
```
Probabilistic tests can sometimes fail in CI. If that is the case they should be marked with [`@test_skip`](https://docs.julialang.org/en/v1/stdlib/Test/#Test.@test_skip), which indicates that the test may intermittently fail (it will be reported in the test summary as `Broken`). This is equivalent to `@test (...) skip=true` but requires at least Julia v1.7. A comment before the relevant line is useful so that they can be debugged and made more reliable.
```
# FLAKY
@test_skip some_probabilistic_test()
```
For packages that do not have to be used as libraries, it is sometimes
convenient to extend external methods on external types - this is referred to as
"type piracy" in Julia style guide. Generally it should be avoided, but for the
cases where it is very convenient it should be tagged.
```
# PIRACY
```
### Code
Generally follow the [Julia Style Guide](https://docs.julialang.org/en/v1/manual/style-guide/) with some caveats:
- [Avoid elaborate container types](https://docs.julialang.org/en/v1/manual/style-guide/#Avoid-elaborate-container-types): if explicitly typing a complex container helps with safety then you should do it. But, if a container type is not concrete (abstract type or unparametrized parametric type), nesting it inside another container probably won't do what you intend (Julia types are invariant). For example:
```julia
# Don't
const NestedContainer = AbstractDict{Symbol,Vector{Array}}
Dict{Float64, AbstractDict{Symbol,NestedContainer}}
# Do
Dict{Float64, <:AbstractDict}
Dict{Float64, Vector{Int}}
const Bytes = Vector{UInt8}
struct BytesCollections
collections::Vector{Bytes}
end
AbstractDict{Symbol, BytesCollections}
```
- [Avoid type piracy](https://docs.julialang.org/en/v1/manual/style-guide/#Avoid-type-piracy): this is more important for libraries, but in a self-contained project this may be a nice feature.
- Prefer `Foo[]` and `Pair{Symbol,Foo}[]` over `Vector{Foo}()` and `Vector{Pair{Symbol,Foo}}()` for better readability.
- Avoid explicit use of the `return` keyword if it is pointless, e.g. when a function has a unique point of return.
Otherwise follow this:
```julia
"Module definition first."
module ExampleModule
# `using` of external modules.
using Distributions: Normal
# `using` of symbols from internal modules, always explicitly name them.
using ..SomeNeighbourModule: nicefn
# `import` of symbols, usually to be extended, with the exception of those from `Base` (see below).
import StatsBase: mean
# ---------------------
# # First main section.
# Above begins a section of code which is readable with [Literate.jl](https://fredrikekre.github.io/Literate.jl/v2/fileformat/).
"Function docs as usual. Write proper sentences."
f(x) = x^2
# -----------------------
# ## Title of subsection.
"Some code in subsection."
g(x) = log(x)
# ----------------------
# # Second main section.
struct A
id::Int64
end
"Keep constructors close to datastructure definitions."
A() = A(rand(1:10))
"""
Do not use explicit type parameters if not needed.
Use multi-line strings for longer docstrings.
"""
h(x::Vector{<:Real})::String = "Real vector."
h(x::Vector) = nothing
"""
Use output type annotations when the return type is not clear from context.
This facilitates readability by not requiring the reader to look for the lastly executed statement(s).
"""
function h(x)::Float64
compute_something(x)
end
h(::Nothing) = 2
"Here the type parameter is used twice - it was needed."
i(x::Vector{T})::T where T<:Real = sum(x)
# Extend symbols defined in `Base` prepending the module's name.
Base.convert(::Type{Expr}, ::Type{Union{}}) = :(Union{})
end
```
Concerning unit testing, it is a good practice to use [SafeTestsets.j](https://github.com/YingboMa/SafeTestsets.jl), since it makes every single test script an independently runnable file. In turn, this implies that imports need to be manually added in each file. Moreover, we prefer to use explicit imports since that helps to keep tests targeted at what they should be testing. Hence, we suggest the following guidelines in test scripts (which should be included using `@safetestset`):
```julia
# load modules (eventually, also package itself)
using Test, MacroTools
# load specific names from external dependencies
using MeasureTheory: Dirac
# load specific names from MyPackage submodules (sorted alphabetically)
using MyPackage.SomeModule: Foo, bar, Baz, ⊕
@testset "Descriptive name" begin
# ...
end
```
## Formatting
Use [JuliaFormatter.jl](https://github.com/domluna/JuliaFormatter.jl) to ensure that all code is formatted consistently. There should be a CI job that automatically checks for formatting. However, everyone is encouraged to use the formatter locally before pushing, see usage details below.
Notable settings:
- Use two spaces for indentation: by default the Julia guide recommends four, but that tends to push code too much to the right.
### VS Code
If you are using VS code and the Julia Extension, you can also trigger the formatter via [various shortcuts](https://www.julia-vscode.org/docs/stable/userguide/formatter/).
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 496 | # API Documentation
## Syntax
```@autodocs
Modules = [Metatheory.Syntax]
```
---
## Patterns
```@autodocs
Modules = [Metatheory.Patterns]
```
---
## Rules
```@autodocs
Modules = [Metatheory.Rules]
```
---
## Rules
```@autodocs
Modules = [Metatheory.Rules]
```
---
## Rewriters
```@autodocs
Modules = [Metatheory.Rewriters]
```
---
## EGraphs
```@autodocs
Modules = [Metatheory.EGraphs]
```
---
## EGraph Schedulers
```@autodocs
Modules = [Metatheory.EGraphs.Schedulers]
``` | Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 16874 | # EGraphs and Equality Saturation
An *EGraph* is an efficient data structure for representing congruence relations.
EGraphs are data structures originating from theorem provers. Several projects
have very recently repurposed EGraphs to implement state-of-the-art,
rewrite-driven compiler optimizations and program synthesizers using a technique
known as equality saturation. Metatheory.jl provides a general purpose,
customizable implementation of EGraphs and equality saturation, inspired from
the [egg](https://egraphs-good.github.io/) library for Rust. You can read more
about the design of the EGraph data structure and equality saturation algorithm
in the [egg paper](https://dl.acm.org/doi/pdf/10.1145/3434304).
Let's load Metatheory and the rule library
```julia
using Metatheory
using Metatheory.Library
```
```@meta
DocTestSetup = quote
using Metatheory
using Metatheory.Library
end
```
## What can I do with EGraphs in Metatheory.jl?
In classical term rewriting, rewrites are typically destructive and forget the
matched left-hand side. Therefore, rules are applied in an arbitrary or
controlled order - this often results in local minima and looping. For decades,
programmers and scientists using term rewriting systems have spent their time
trying to find confluent and terminating systems of rules. This requires a lot
of effort and time. When studying any computational, mathematical or scientific
system governed by equational rules, about non obviously oriented equations, such as `(a + b) + c = a + (b + c
)`?
E-Graphs come to our help.
EGraphs are bipartite graphs of [ENode](@ref)s and [EClass](@ref)es:
a data structure for efficiently represent and rewrite on many equivalent expressions at the same time. A sort of fast data structure for sets of trees. Subtrees and parents are shared if possible. This makes EGraphs similar to DAGs.
Most importantly, with EGraph rewriting you can use **bidirectional rewrite rules**, such as **equalities** without worrying about
the ordering and confluence of your rewrite system!
Therefore, rule application in EGraphs is non-destructive - everything is
copied! This allows users to run non-deterministic rewrite systems. Many rules
can match at the same time and the previous state of expressions will not be
lost.
The EGraph backend for Metatheory.jl allows you to create an
EGraph from a starting expression, to add more expressions to the EGraph with
`addexpr!`, and then to effectively fill the EGraph with all possible equivalent
expressions resulting from applying rewrite rules from a [theory](../rewrite#Theories), by using the
`saturate!` function. You can then easily extract expressions from an e-graph by calling `extract!` with a cost
function.
A killer feature of [egg](https://egraphs-good.github.io/) and Metatheory.jl
are **EGraph Analyses**. They allow you to annotate expressions and equivalence classes in an EGraph with values from a semilattice domain, and then to:
* Automatically extract optimal expressions from an EGraph deciding from analysis data.
* Have conditional rules that are executed if some criteria is met on analysis data
* Have dynamic rules that compute the right hand side based on analysis data.
## Library
The `Metatheory.Library` module contains utility functions and macros for creating
rules and theories from commonly used algebraic structures and
properties, to be used with the e-graph backend.
```jldoctest
comm_monoid = @commutative_monoid (*) 1
# output
4-element Vector{RewriteRule}:
~a * ~b --> ~b * ~a
(~a * ~b) * ~c --> ~a * (~b * ~c)
~a * (~b * ~c) --> (~a * ~b) * ~c
1 * ~a --> ~a
```
#### Theories and Algebraic Structures
**The e-graphs backend can directly handle associativity, equalities
commutativity and distributivity**, rules that are
otherwise known of causing loops and require extensive user reasoning
in classical rewriting.
```jldoctest
t = @theory a b c begin
a * b == b * a
a * 1 == a
a * (b * c) == (a * b) * c
end
# output
3-element Vector{EqualityRule}:
~a * ~b == ~b * ~a
~a * 1 == ~a
~a * (~b * ~c) == (~a * ~b) * ~c
```
## Equality Saturation
We can programmatically build and saturate an EGraph. The function `saturate!`
takes an `EGraph` and a theory, and executes equality saturation. Returns a
report of the equality saturation process. `saturate!` is configurable,
customizable parameters include a `timeout` on the number of iterations, a
`eclasslimit` on the number of e-classes in the EGraph, a `stopwhen` functions
that stops saturation when it evaluates to true.
```@example
g = EGraph(:((a * b) * (1 * (b + c))));
report = saturate!(g, t);
```
With the EGraph equality saturation backend, Metatheory.jl can prove **simple**
equalities very efficiently. The `@areequal` macro takes a theory and some
expressions and returns true iff the expressions are equal according to the
theory. The following example may return true with an appropriate example theory.
```julia
julia> @areequal some_theory (x+y)*(a+b) ((a*(x+y))+b*(x+y)) ((x*(a+b))+y*(a+b))
```
## Configurable Parameters
[`EGraphs.saturate!`](@ref) can accept an additional parameter of type
[`EGraphs.SaturationParams`](@ref) to configure the equality saturation algorithm.
Extensive documentation for the configurable parameters is available in the [`EGraphs.SaturationParams`](@ref) API docstring.
```julia
# create the saturation params
params = SaturationParams(timeout=10, eclasslimit=4000)
saturate!(egraph, theory, params)
```
```@meta
CurrentModule = Base
```
## Outline of the Equality Saturation Algorithm
The `saturate!` function behaves as following.
Given a starting e-graph `g`, a set of rewrite rules `t` and some parameters `p` (including an iteration limit `n`):
* For each rule in `t`, search through the e-graph for l.h.s.
* For each match produced, apply the rewrite
* Do a bottom-up traversal of the e-graph to rebuild the congruence closure
* If the e-graph hasn’t changed from last iteration, it has saturated. If so, halt saturation.
* Loop at most n times.
Note that knowing if an expression with a set of rules saturates an e-graph or never terminates
is still an open research problem
## Extracting from an EGraph
Since e-graphs non-deterministically represent many equivalent symbolic terms,
extracting an expression from an EGraph is the process of selecting and
extracting a single symbolic expression from the set of all the possible
expressions contained in the EGraph. Extraction is done through the `extract!`
function, and the theoretical background behind this procedure is an [EGraph
Analysis](https://dl.acm.org/doi/pdf/10.1145/3434304); A cost function is
provided as a parameter to the `extract!` function. This cost function will
examine mostly every e-node in the e-graph and will determine which e-nodes will
be chosen from each e-class through an automated, recursive algorithm.
Metatheory.jl already provides some simple cost functions, such as `astsize`,
which expresses preference for the smallest expressions contained in equivalence
classes.
Here's an example
Given the theory:
```@example extraction
using Metatheory
using Metatheory.Library
comm_monoid = @commutative_monoid (*) 1;
t = @theory a b c begin
a + 0 --> a
a + b --> b + a
a + inv(a) --> 0 # inverse
a + (b + c) --> (a + b) + c
a * (b + c) --> (a * b) + (a * c)
(a * b) + (a * c) --> a * (b + c)
a * a --> a^2
a --> a^1
a^b * a^c --> a^(b+c)
log(a^b) --> b * log(a)
log(a * b) --> log(a) + log(b)
log(1) --> 0
log(:e) --> 1
:e^(log(a)) --> a
a::Number + b::Number => a + b
a::Number * b::Number => a * b
end
t = comm_monoid ∪ t ;
nothing # hide
```
We can extract an expression by using
```@example extraction
expr = :((log(e) * log(e)) * (log(a^3 * a^2)))
g = EGraph(expr)
saturate!(g, t)
ex = extract!(g, astsize)
```
The second argument to `extract!` is a **cost function**. [astsize](@ref) is
a cost function provided by default, which computes the size of expressions.
## Defining custom cost functions for extraction.
A *cost function* for *EGraph extraction* is a function used to determine
which *e-node* will be extracted from an *e-class*.
It must return a positive, non-complex number value and, must accept 3 arguments.
1) The current [ENode](@ref) `n` that is being inspected.
2) The current [EGraph](@ref) `g`.
3) The current analysis name `an::Symbol`.
From those 3 parameters, one can access all the data needed to compute
the cost of an e-node recursively.
* One can use [TermInterface.jl](https://github.com/JuliaSymbolics/TermInterface.jl) methods to access the operation and child arguments of an e-node: `operation(n)`, `arity(n)` and `arguments(n)`
* Since e-node children always point to e-classes in the same e-graph, one can retrieve the [EClass](@ref) object for each child of the currently visited enode with `g[id] for id in arguments(n)`
* One can inspect the analysis data for a given eclass and a given analysis name `an`, by using [hasdata](@ref) and [getdata](@ref).
* Extraction analyses always associate a tuple of 2 values to a single e-class: which e-node is the one that minimizes the cost
and its cost. More details can be found in the [egg paper](https://dl.acm.org/doi/pdf/10.1145/3434304) in the *Analyses* section.
Here's an example:
```julia
# This is a cost function that behaves like `astsize` but increments the cost
# of nodes containing the `^` operation. This results in a tendency to avoid
# extraction of expressions containing '^'.
function cost_function(n::ENodeTerm, g::EGraph)
cost = 1 + arity(n)
operation(n) == :^ && (cost += 2)
for id in arguments(n)
eclass = g[id]
# if the child e-class has not yet been analyzed, return +Inf
!hasdata(eclass, cost_function) && (cost += Inf; break)
cost += last(getdata(eclass, cost_function))
end
return cost
end
# All literal expressions (e.g `a`, 123, 0.42, "hello") have cost 1
cost_function(n::ENodeLiteral, g::EGraph) = 1
```
## EGraph Analyses
An *EGraph Analysis* is an efficient and automated way of analyzing all the possible
terms contained in an e-graph. Metatheory.jl provides a toolkit to ease and
automate the process of EGraph Analysis.
An *EGraph Analysis* defines a domain of values and associates a value from the domain to each [EClass](@ref) in the graph. Theoretically, the domain should form a [join semilattice](https://en.wikipedia.org/wiki/Semilattice). Rewrites can cooperate with e-class analyses by depending on analysis facts and adding equivalences that in turn establish additional facts.
In Metatheory.jl, **EGraph Analyses are uniquely identified** by either
* An unique name of type `Symbol`.
* A function object `f`, used for cost function analysis. This will use built-in definitions of `make` and `join`.
If you are specifying a custom analysis by its `Symbol` name,
the following functions define an interface for analyses based on multiple dispatch
on `Val{analysis_name::Symbol}`:
* [islazy(an)](@ref) should return true if the analysis name `an` should NOT be computed on-the-fly during egraphs operation, but only when inspected.
* [make(an, egraph, n)](@ref) should take an ENode `n` and return a value from the analysis domain.
* [join(an, x,y)](@ref) should return the semilattice join of `x` and `y` in the analysis domain (e.g. *given two analyses value from ENodes in the same EClass, which one should I choose?*). If `an` is a `Function`, it is treated as a cost function analysis, it is automatically defined to be the minimum analysis value between `x` and `y`. Typically, the domain value of cost functions are real numbers, but if you really do want to have your own cost type, make sure that `Base.isless` is defined.
* [modify!(an, egraph, eclassid)](@ref) Can be optionally implemented. This can be used modify an EClass `egraph[eclassid]` on-the-fly during an e-graph saturation iteration, given its analysis value.
### Defining a custom analysis
In this example, we will provide a custom analysis that tags each EClass in an EGraph
with `:even` if it contains an even number or with `:odd` if it represents an odd number,
or `nothing` if it does not contain a number at all. Let's suppose that the language of the symbolic expressions
that we are considering will contain *only integer numbers, variable symbols and the `*` and `+` operations.*
Since we are in a symbolic computation context, we are not interested in the
the actual numeric result of the expressions in the EGraph, but we only care to analyze and identify
the symbolic expressions that will result in an even or an odd number.
Defining an EGraph Analysis is similar to the process [Mathematical Induction](https://en.wikipedia.org/wiki/Mathematical_induction).
To define a custom EGraph Analysis, one should start by defining a name of type `Symbol` that will be used to identify this specific analysis and to dispatch against the required methods.
```julia
using Metatheory
using Metatheory.EGraphs
```
The next step, the base case of induction, is to define a method for
[make](@ref) dispatching against our `OddEvenAnalysis`. First, we want to
associate an analysis value only to the *literals* contained in the EGraph. To do this we
take advantage of multiple dispatch against `ENodeLiteral`.
```julia
function EGraphs.make(::Val{:OddEvenAnalysis}, g::EGraph, n::ENodeLiteral)
if n.value isa Integer
return iseven(n.value) ? :even : :odd
else
return nothing
end
end
```
Now we have to consider the *induction step*.
Knowing that our language contains only `*` and `+` operations, and knowing that:
* odd * odd = odd
* odd * even = even
* even * even = even
And we know that
* odd + odd = even
* odd + even = odd
* even + even = even
We can now define a method for `make` dispatching against
`OddEvenAnalysis` and `ENodeTerm`s to compute the analysis value for *nested* symbolic terms.
We take advantage of the methods in [TermInterface](https://github.com/JuliaSymbolics/TermInterface.jl)
to inspect the content of an `ENodeTerm`.
From the definition of an [ENode](@ref), we know that children of ENodes are always IDs pointing
to EClasses in the EGraph.
```julia
function EGraphs.make(::Val{:OddEvenAnalysis}, g::EGraph, n::ENodeTerm)
# Let's consider only binary function call terms.
if exprhead(n) == :call && arity(n) == 2
op = operation(n)
# Get the left and right child eclasses
child_eclasses = arguments(n)
l = g[child_eclasses[1]]
r = g[child_eclasses[2]]
# Get the corresponding OddEvenAnalysis value of the children
# defaulting to nothing
ldata = getdata(l, :OddEvenAnalysis, nothing)
rdata = getdata(r, :OddEvenAnalysis, nothing)
if ldata isa Symbol && rdata isa Symbol
if op == :*
if ldata == rdata
ldata
elseif (ldata == :even || rdata == :even)
:even
else
nothing
end
elseif op == :+
(ldata == rdata) ? :even : :odd
end
elseif isnothing(ldata) && rdata isa Symbol && op == :*
rdata
elseif ldata isa Symbol && isnothing(rdata) && op == :*
ldata
end
end
return nothing
end
```
We have now defined a way of tagging each ENode in the EGraph with `:odd` or `:even`, reasoning
inductively on the analyses values. The [analyze!](@ref) function will do the dirty job of doing
a recursive walk over the EGraph. The missing piece, is now telling Metatheory.jl how to merge together
analysis values. Since EClasses represent many equal ENodes, we have to inform the automated analysis
how to extract a single value out of the many analyses values contained in an EGraph.
We do this by defining a method for [join](@ref).
```julia
function EGraphs.join(::Val{:OddEvenAnalysis}, a, b)
if a == b
return a
else
# an expression cannot be odd and even at the same time!
# this is contradictory, so we ignore the analysis value
return nothing
end
end
```
We do not care to modify the content of EClasses in consequence of our analysis.
Therefore, we can skip the definition of [modify!](@ref).
We are now ready to test our analysis.
```julia
t = @theory a b c begin
a * (b * c) == (a * b) * c
a + (b + c) == (a + b) + c
a * b == b * a
a + b == b + a
a * (b + c) == (a * b) + (a * c)
end
function custom_analysis(expr)
g = EGraph(expr)
saturate!(g, t)
analyze!(g, OddEvenAnalysis)
return getdata(g[g.root], OddEvenAnalysis)
end
custom_analysis(:(2*a)) # :even
custom_analysis(:(3*3)) # :odd
custom_analysis(:(3*(2+a)*2)) # :even
custom_analysis(:(3y * (2x*y))) # :even
```
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 7122 | # Metatheory.jl 2.0
```@raw html
<p align="center">
<img width="400px" src="https://raw.githubusercontent.com/juliasymbolics/Metatheory.jl/master/docs/src/assets/dragon.jpg"/>
</p>
```
[](https://juliasymbolics.github.io/Metatheory.jl/dev/)
[](https://juliasymbolics.github.io/Metatheory.jl/stable/)

[](https://codecov.io/gh/juliasymbolics/Metatheory.jl)
[](https://arxiv.org/abs/2102.07888)
[](https://joss.theoj.org/papers/3266e8a08a75b9be2f194126a9c6f0e9)
[](https://julialang.zulipchat.com/#narrow/stream/277860-metatheory.2Ejl)
**Metatheory.jl** is a general purpose term rewriting, metaprogramming and algebraic computation library for the Julia programming language, designed to take advantage of the powerful reflection capabilities to bridge the gap between symbolic mathematics, abstract interpretation, equational reasoning, optimization, composable compiler transforms, and advanced
homoiconic pattern matching features. The core features of Metatheory.jl are a powerful rewrite rule definition language, a vast library of functional combinators for classical term rewriting and an *e-graph rewriting*, a fresh approach to term rewriting achieved through an equality saturation algorithm. Metatheory.jl can manipulate any kind of
Julia symbolic expression type, as long as it satisfies the [TermInterface.jl](https://github.com/JuliaSymbolics/TermInterface.jl).
Metatheory.jl provides:
- An eDSL (domain specific language) to define different kinds of symbolic rewrite rules.
- A classical rewriting backend, derived from the [SymbolicUtils.jl](https://github.com/JuliaSymbolics/SymbolicUtils.jl) pattern matcher, supporting associative-commutative rules. It is based on the pattern matcher in the [SICM book](https://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html).
- A flexible library of rewriter combinators.
- An e-graph rewriting (equality saturation) backend and pattern matcher, based on the [egg](https://egraphs-good.github.io/) library, supporting backtracking and non-deterministic term rewriting by using a data structure called *e-graph*, efficiently incorporating the notion of equivalence in order to reduce the amount of user effort required to achieve optimization tasks and equational reasoning.
- `@capture` macro for flexible metaprogramming.
Intuitively, Metatheory.jl transforms Julia expressions
in other Julia expressions and can achieve such at both compile and run time. This allows Metatheory.jl users to perform customized and composable compiler optimizations specifically tailored to single, arbitrary Julia packages.
Our library provides a simple, algebraically composable interface to help scientists in implementing and reasoning about semantics and all kinds of formal systems, by defining concise rewriting rules in pure, syntactically valid Julia on a high level of abstraction. Our implementation of equality saturation on e-graphs is based on the excellent, state-of-the-art technique implemented in the [egg](https://egraphs-good.github.io/) library, reimplemented in pure Julia.
## 2.0 is out!
Second stable version is out:
- New e-graph pattern matching system, relies on functional programming and closures, and is much more extensible than 1.0's virtual machine.
- No longer dispatch against types, but instead dispatch against objects.
- Faster E-Graph Analysis
- Better library macros
- Updated TermInterface to 0.3.3
- New interface for e-graph extraction using `EGraphs.egraph_reconstruct_expression`
- Simplify E-Graph Analysis Interface. Use Symbols or functions for identifying Analyses.
- Remove duplicates in E-Graph analyses data.
Many features have been ported from SymbolicUtils.jl. Metatheory.jl can be used in place of SymbolicUtils.jl when you have no need of manipulating mathematical expressions. The introduction of [TermInterface.jl](https://github.com/JuliaSymbolics/TermInterface.jl) has allowed for large potential in generalization of term rewriting and symbolic analysis and manipulation features. Integration between Metatheory.jl with Symbolics.jl, as it has been shown in the ["High-performance symbolic-numerics via multiple dispatch"](https://arxiv.org/abs/2105.03949) paper.
## Recommended Readings - Selected Publications
- The [Metatheory.jl manual](https://juliasymbolics.github.io/Metatheory.jl/stable/)
- The [Metatheory.jl introductory paper](https://joss.theoj.org/papers/10.21105/joss.03078#) gives a brief high level overview on the library and its functionalities.
- The Julia Manual [metaprogramming section](https://docs.julialang.org/en/v1/manual/metaprogramming/) is fundamental to understand what homoiconic expression manipulation is and how it happens in Julia.
- An [introductory blog post on SIGPLAN](https://blog.sigplan.org/2021/04/06/equality-saturation-with-egg/) about `egg` and e-graphs rewriting.
- [egg: Fast and Extensible Equality Saturation](https://dl.acm.org/doi/pdf/10.1145/3434304) contains the definition of *E-Graphs* on which Metatheory.jl's equality saturation rewriting backend is based. This is a strongly recommended reading.
- [High-performance symbolic-numerics via multiple dispatch](https://arxiv.org/abs/2105.03949): a paper about how we used Metatheory.jl to optimize code generation in [Symbolics.jl](https://github.com/JuliaSymbolics/Symbolics.jl)
## Contributing
If you'd like to give us a hand and contribute to this repository you can:
- Find a high level description of the project architecture in [ARCHITECTURE.md](https://github.com/juliasymbolics/Metatheory.jl/blob/master/ARCHITECTURE.md)
- Read the contribution guidelines in [CONTRIBUTING.md](https://github.com/juliasymbolics/Metatheory.jl/blob/master/CONTRIBUTING.md)
If you enjoyed Metatheory.jl and would like to help, please also consider a [tiny donation 💕](https://github.com/sponsors/0x0f0f0f/)!
## Installation
You can install the stable version:
```julia
julia> using Pkg; Pkg.add("Metatheory")
```
Or you can install the developer version (recommended by now for latest bugfixes)
```julia
julia> using Pkg; Pkg.add(url="https://github.com/JuliaSymbolics/Metatheory.jl")
```
## Documentation
Extensive Metatheory.jl is available [here](https://juliasymbolics.github.io/Metatheory.jl/dev)
## Citing
If you use Metatheory.jl in your research, please [cite](https://github.com/juliasymbolics/Metatheory.jl/blob/master/CITATION.bib) our works.
---
```@raw html
<p align="center">
<a href="https://planting.space">
<img width="300px" src="https://raw.githubusercontent.com/juliasymbolics/Metatheory.jl/master/.github/plantingspace.png"/>
</a>
</p>
``` | Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 9267 | # Classical Term Rewriting
## Rule-based rewriting
Rewrite rules match and transform an expression. A rule is written using either
the `@rule` or `@theory` macros. It creates a callable `Rule` object.
### Basics of rule-based term rewriting in Metatheory.jl
**NOTE:** for a real world use case using mathematical constructs, please refer
to [SymbolicUtils.jl](https://github.com/JuliaSymbolics/SymbolicUtils.jl). SU
provides optimized types for mathematical expressions, code generation and a
polished set of rules for simplification.
Here is a simple symbolic rewrite rule, that uses formula for the double angle of the sine function:
```julia:rewrite1
using Metatheory
r1 = @rule sin(2(~x)) --> 2sin(~x)*cos(~x)
expr = :(sin(2z))
r1(expr)
```
The `@rule` macro takes a pair of patterns -- the _matcher_ and the _consequent_ (`@rule matcher OPERATOR consequent`). If an expression matches the matcher pattern, it is rewritten to the consequent pattern. `@rule` returns a callable object that applies the rule to an expression. There are different kinds of rule in Metatheory.jl:
**Rule operators**:
- `LHS => RHS`: create a `DynamicRule`. The RHS is *evaluated* on rewrite.
- `LHS --> RHS`: create a `RewriteRule`. The RHS is **not** evaluated but *symbolically substituted* on rewrite.
- `LHS == RHS`: create a `EqualityRule`. In e-graph rewriting, this rule behaves like `RewriteRule` but can go in both directions. Doesn't work in classical rewriting.
- `LHS ≠ RHS`: create a `UnequalRule`. Can only be used in e-graphs, and is used to eagerly stop the process of rewriting if LHS is found to be equal to RHS.
You can use **dynamic rules**, defined with the `=>`
operator, to dynamically compute values in the right hand of expressions. This is the default behaviour of rules in [SymbolicUtils.jl](https://github.com/JuliaSymbolics/SymbolicUtils.jl)
Dynamic rules, are similar to anonymous functions. Instead of a symbolic
substitution, the right hand of a dynamic `=>` rule is evaluated during
rewriting: the values that produced a match are bound to the pattern variables.
`~x` in the example is what is a **slot variable** (or *pattern* variable) named `x`. In a matcher pattern, slot variables are placeholders that match exactly one expression. When used on the consequent side, they stand in for the matched expression. If a slot variable appears twice in a matcher pattern, **in classical rewriting** all corresponding matches must be equal (as tested by `Base.isequal` function). Hence this rule says: if you see something added to itself, make it twice of that thing, and works as such.
If you try to apply this rule to an expression with triple angle, it will return `nothing` -- this is the way a rule signifies failure to match.
```julia:rewrite2
r1(:(sin(3z))) === nothing
```
Slot variable (matcher) is not necessary a single variable
```julia:rewrite3
r1(:(sin(2*(w-z))))
```
but it must be a single expression
```julia:rewrite4
r1(:(sin(2*(w+z)*(α+β)))) === nothing
```
Rules are of course not limited to single slot variable
```julia:rewrite5
r2 = @rule sin(~x + ~y) --> sin(~x)*cos(~y) + cos(~x)*sin(~y);
r2(:(sin(α+β)))
```
If you want to match a variable number of subexpressions at once, you will need a **segment variable**. `~xs...` in the following example is a segment variable:
```julia:rewrite6
@rule(+(~xs...) => xs)(:(x + y + z))
```
`~xs` is a vector of subexpressions matched. You can use it to construct something more useful:
```julia:rewrite7
r3 = @rule *(~ys...)^~x => :((*)($(map(y-> :($y^$x), ys)...)));
r3(:((w*w*α*β)^2))
```
### Predicates for matching
Matcher pattern may contain slot variables with attached predicates, written as `~x::p` where `p` is either
- A function that takes a matched expression and returns a boolean value. Such a slot will be considered a match only if `p` returns true.
- A Julia type. Will be considered a match if and only if the value matching against `x` has a type that is a subtype of `p` (`typeof(x) <: p`)
Similarly `~x::g...` is a way of attaching a predicate `g` to a segment variable. In the case of segment variables `g` gets a vector of 0 or more expressions and must return a boolean value. If the same slot or segment variable appears twice in the matcher pattern, then at most one of the occurrence should have a predicate.
For example,
```julia:pred1
r = @rule +(~x, ~y::(ys->iseven(length(ys)))...) => "odd terms";
@show r(:(a + b + c + d))
@show r(:(b + c + d))
@show r(:(b + c + b))
@show r(:(a + b))
```
### Declaring Slots
Slot variables can be declared without the `~` using the `@slots` macro
```julia:slots1
@slots x y @rule sin(x + y) => sin(x)*cos(y) + cos(x)*sin(y);
```
This works for segments as well:
```julia:slots2
@slots xs @rule(+(~xs...) => xs);
```
The `@slots` macro is superfluous for the `@rule`, `@capture` and `@theory` macros.
Slot variables may be declared directly as the first arguments to those macros:
```julia:slots3
@rule x y sin(x + y) => sin(x)*cos(y) + cos(x)*sin(y);
```
### Theories
In almost all use cases, it is practical to define many rules grouped together.
A set of rewrite rules and equalities is called a *theory*, and can be defined with the
`@theory` macro. This macro is just syntax sugar to define vectors of rules in a nice and readable way.
```julia
t = @theory x y z begin
x * (y + z) --> (x * y) + (x * z)
x + y == (y + x)
#...
end;
```
Is the same thing as writing
```julia
v = [
@rule x y z x * (y + z) --> (x * y) + (x * z)
@rule x y x + y == (y + x)
#...
];
```
Theories are just collections and
can be composed as regular Julia collections. The most
useful way of composing theories is unioning
them with the '∪' operator.
You are not limited to composing theories, you can
manipulate and create them at both runtime and compile time
as regular vectors.
```julia
using Metatheory
using Metatheory.Library
comm_monoid = @commutative_monoid (*) 1
comm_group = @theory a b c begin
a + 0 --> a
a + b --> b + a
a + inv(a) --> 0 # inverse
a + (b + c) --> (a + b) + c
end
distrib = @theory a b c begin
a * (b + c) => (a * b) + (a * c)
end
t = comm_monoid ∪ comm_group ∪ distrib
```
## Composing rewriters
Rules may be *chained together* into more
sophisticated rewriters to avoid manual application of the rules. A rewriter is
any callable object which takes an expression and returns an expression or
`nothing`. If `nothing` is returned that means there was no changes applicable
to the input expression. The Rules we created above are rewriters.
The `Metatheory.Rewriters` module contains some types which create and transform
rewriters.
- `Empty()` is a rewriter which always returns `nothing`
- `Chain(itr)` chain an iterator of rewriters into a single rewriter which applies
each chained rewriter in the given order.
If a rewriter returns `nothing` this is treated as a no-change.
- `RestartedChain(itr)` like `Chain(itr)` but restarts from the first rewriter once on the
first successful application of one of the chained rewriters.
- `IfElse(cond, rw1, rw2)` runs the `cond` function on the input, applies `rw1` if cond
returns true, `rw2` if it returns false
- `If(cond, rw)` is the same as `IfElse(cond, rw, Empty())`
- `Prewalk(rw; threaded=false, thread_cutoff=100)` returns a rewriter which does a pre-order
(*from top to bottom and from left to right*) traversal of a given expression and applies
the rewriter `rw`. `threaded=true` will use multi threading for traversal.
Note that if `rw` returns `nothing` when a match is not found, then `Prewalk(rw)` will
also return nothing unless a match is found at every level of the walk. If you are
applying multiple rules, then `Chain` already has the appropriate passthrough behavior.
If you only want to apply one rule, then consider using `PassThrough`.
`thread_cutoff`
is the minimum number of nodes in a subtree which should be walked in a threaded spawn.
- `Postwalk(rw; threaded=false, thread_cutoff=100)` similarly does post-order
(*from left to right and from bottom to top*) traversal.
- `Fixpoint(rw)` returns a rewriter which applies `rw` repeatedly until there are no changes to be made.
- `FixpointNoCycle` behaves like `Fixpoint` but instead it applies `rw` repeatedly only while it is returning new results.
- `PassThrough(rw)` returns a rewriter which if `rw(x)` returns `nothing` will instead
return `x` otherwise will return `rw(x)`.
### Chaining rewriters
Several rules may be chained to give chain of rules. Chain is an array of rules which are subsequently applied to the expression.
Important feature of `Chain` is that it returns the expression instead of `nothing` if it doesn't change the expression
It is important to notice, that chain is ordered, so if rules are in different order it wouldn't work the same as in earlier example
One way to circumvent the problem of order of applying rules in chain is to use
`RestartedChain`, it restarts the chain after each successful application of a
rule, so after a rule is hit it (re)starts again and it can apply all the other
rules to the resulting expression. You can also use `Fixpoint` to apply the
rules until there are no changes.
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 872 | # Visualizing E-Graphs
You can visualize e-graphs in VSCode by using [GraphViz.jl]()
All you need to do is to install GraphViz.jl and to evaluate an e-graph after including the extra script:
```julia
using GraphViz
include(dirname(pathof(Metatheory)) * "/extras/graphviz.jl")
algebra_rules = @theory a b c begin
a * (b * c) == (a * b) * c
a + (b + c) == (a + b) + c
a + b == b + a
a * (b + c) == (a * b) + (a * c)
(a + b) * c == (a * c) + (b * c)
-a == -1 * a
a - b == a + -b
1 * a == a
0 * a --> 0
a + 0 --> a
a::Number * b == b * a::Number
a::Number * b::Number => a * b
a::Number + b::Number => a + b
end;
ex = :(a - a)
g = EGraph(ex)
params = SaturationParams(; timeout = 2)
saturate!(g, algebra_rules, params)
g
```
And you will see a nice e-graph drawing in the Julia Plots VSCode panel:
 | Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 13486 | ---
title: 'Metatheory.jl: Fast and Elegant Algebraic Computation in Julia with Extensible Equality Saturation'
tags:
- Julia
- compiler
- symbolic
- algebra
- rewriting
- optimization
authors:
- name: Alessandro Cheli #^[Custom footnotes for e.g. denoting who the corresponding author is can be included like this.]
orcid: 0000-0002-8122-9469
affiliation: 1 # (Multiple affiliations must be quoted)
affiliations:
- name: University of Pisa, Pisa, Italy
index: 1
date: 11 February 2021
bibliography: paper.bib
---
# Statement of Need
<!-- The Julia programming language is a fresh approach to technical computing [@bezanson2017julia], disrupting the popular conviction that a programming language cannot be very high level, easy to learn, and performant at the same time. One of the most practical features of Julia is the excellent metaprogramming and macro system, allowing for programmatic generation and manipulation of Julia expressions as first-class values in the core language, with a well-known paradigm similar to LISP idioms such as Scheme,
a programming language property colloquially referred to as *homoiconicity*. -->
The Julia programming language is a fresh approach to technical computing [@bezanson2017julia], disrupting the popular conviction that a programming language cannot be high-level, easy to learn, and performant at the same time. One of the most practical features of Julia is the excellent metaprogramming and macro system, allowing for *homoiconicity*: programmatic generation and manipulation of expressions as first-class values, a well-known paradigm found in LISP dialects such as Scheme.
Metatheory.jl is a general-purpose metaprogramming and algebraic computation library for the Julia programming language, designed to take advantage of its powerful reflection capabilities to bridge the gap between symbolic mathematics,
abstract interpretation, equational reasoning, optimization, composable compiler transforms, and advanced homoiconic pattern-matching features. Intuitively, Metatheory.jl transforms Julia expressions into other Julia expressions at both compile time and run time. This allows users to perform customized and composable compiler optimizations that are specifically tailored to single, arbitrary Julia packages. The library provides a simple, algebraically composable interface to help scientists to implement and reason about all kinds of formal systems, by defining concise rewriting rules as syntactically-valid Julia code. The primary benefit of using Metatheory.jl is the algebraic nature of the specification of the rewriting system. Composable blocks of rewrite rules bear a strong resemblance to algebraic
structures encountered in everyday scientific literature.
<!-- Rewrite rules are defined as regular Julia expressions, manipulating other syntactically valid Julia expressions: since Julia supports LaTeX-like abbreviations of UTF8 mathematical symbols as valid operators and symbols,
rewrite theories in Metatheory.jl can bear a strong structural and visual resemblance to mathematical formalisms encountered in paper literature. -->
<!-- Theories can then be executed through two, highly composable, rewriting backends. The first backend relies on a *classic* fixed-point recursive iteration of AST, with a match-and-replace algorithm built on top of the [@matchcore] pattern matcher. This backend is suitable for deterministic recursive algorithms that intensively use pattern matching on syntax trees, for example, defining an interpreter from operational or denotational semantics. Nevertheless, when using this classical approach, even trivial equational rules such as commutativity and associativity may cause the rewriting algorithm to loop indefinitely, or to return unexpected results. This is known as *rewrite order* and is notoriously recognized for requiring extensive user reasoning about the ordering and structuring of rules to ensure termination. -->
# Summary
Metatheory.jl offers a concise macro system to define *theories*: composable blocks of rewriting rules that can be executed through two, highly composable, rewriting backends. The first is based on standard rewriting, built on top of the pattern matcher developed in @matchcore.
This approach, however, suffers from the usual problems of rewriting systems. For example, even trivial equational rules such as commutativity may lead to non-terminating systems and thus need to be adjusted by some sort of structuring or rewriting order, which is known to require extensive user reasoning.
The other back-end for Metatheory.jl, the core of our contribution, is designed so that it does not require the user to reason about rewriting order. To do so it relies on equality saturation on *e-graphs*, the state-of-the-art technique adapted from the `egg` Rust library [@egg].
*E-graphs* can compactly represent many equivalent expressions and programs. Provided with a theory of rewriting rules, defined in pure Julia, the *equality saturation* process iteratively executes an e-graph-specific pattern matcher and inserts the matched substitutions. Since e-graphs can contain loops, infinite derivations can be represented compactly and it is not required that the described rewrite system be terminating or confluent.
The saturation process relies on the definition of e-graphs to include *rebuilding*, i.e. the automatic process of propagation and maintenance of congruence closures.
One of the core contributions of @egg is a delayed e-graph rebuilding process that is executed at the end of each saturation step, whereas previous definitions of e-graphs in the literature included rebuilding after every rewrite operation.
Provided with *equality saturation*, users can efficiently derive (and analyze) all possible equivalent expressions contained in an e-graph. The saturation process can be required to stop prematurely as soon as chosen properties about the e-graph and its expressions are proved. This latter back-end based on *e-graphs* is suitable for partial evaluators, symbolic mathematics, static analysis, theorem proving and superoptimizers.
<!-- The other back-end for Metatheory.jl, the core of our contribution, is designed to not require the user to reason about rewriting order by employing equality saturation on e-graphs. This backend allows programmers to define equational theories in pure Julia without worrying about rule ordering and structuring, by relying on state-of-the-art techniques for equality saturation over *e-graphs* adapted from the `egg` Rust library [@egg].
Provided with a theory of equational rewriting rules, *e-graphs* compactly represent many equivalent programs. Saturation iteratively executes an e-graph specific pattern matcher to efficiently compute (and analyze) all possible equivalent expressions contained in the e-graph congruence closure. This latter back-end is suitable for partial evaluators, symbolic mathematics, static analysis, theorem proving and superoptimizers. -->

The original `egg` library [@egg] is
the first implementation of generic and extensible e-graphs [@nelson1980fast]; the contributions of `egg` include novel amortized algorithms for fast and efficient equivalence saturation and analysis.
Differently from the original Rust implementation of `egg`, which handles expressions defined as Rust strings and data structures, our system directly manipulates homoiconic Julia expressions, and can therefore fully leverage the Julia subtyping mechanism [@zappa2018julia], allowing programmers to build expressions containing not only symbols but all kinds of Julia values.
This permits rewriting and analyses to be efficiently based on runtime data contained in expressions. Most importantly, users can -- and are encouraged to -- include type assertions in the left-hand side of rewriting rules in theories.
One of the project goals of Metatheory.jl, beyond being easy to use and composable, is to be fast and efficient. Both the first-class pattern matching system and the generation of e-graph analyses from theories rely on RuntimeGeneratedFunctions.jl [@rgf], generating callable functions at runtime that efficiently bypass Julia's world age problem (explained and formalized in @belyakova2020world) with the full performance of a standard Julia anonymous function.
## Analyses and Extraction
With Metatheory.jl, modeling analyses and conditional/dynamic rewrites are straightforward. It is possible to check conditions on runtime values or to read and write from external data structures during rewriting. The analysis mechanism described in `egg` [@egg] and re-implemented in our contribution lets users define ways to compute additional analysis metadata from an arbitrary semi-lattice domain, such as costs of nodes or logical statements attached to terms. Other than for inspection, analysis data can be used to modify expressions in the e-graph both during rewriting steps and after e-graph saturation.
Therefore using the equality saturation (e-graph) backend, extraction can be performed as an on-the-fly e-graph analysis or after saturation. Users
can define their own cost function, or choose between a variety of predefined cost functions for automatically extracting the best-fitting expressions from an equivalence class represented in an e-graph.
# Example Usage
In this example, we build rewrite systems, called `theories` in Metatheory.jl, for simplifying expressions
in the usual commutative monoid of multiplication and the commutative group of addition, and we compose
the `theories` together with a *constant folding* theory. The pattern matcher for the e-graphs backend
allows us to use the existing Julia type hierarchy for integers and floating-point numbers with a high level
of abstraction. As a contribution over the original egg [@egg] implementation, left-hand sides of rules in Metatheory.jl can contain type assertions on pattern variables, to give rules that depend on consistent type hierarchies and to seamlessly access literal Julia values in the right-hand side of dynamic rules.
We finally introduce two simple rules for simplifying fractions, that
for the sake of simplicity, do not check any additional analysis data.
\autoref{fig:egraph} contains a friendly visualization of a consistent fragment of the equality saturation process in this example.
You can see how loops evidently appear in the definition of the rewriting rules.
While the classic rewriting backend would loop indefinitely or stop early when repeatedly matching these rules,
the e-graph backend natively supports this level of abstraction and allows the
programmer to completely forget about the ordering and looping of rules.
Efficient scheduling heuristics are applied automatically to prevent instantaneous
combinatorial explosion of the e-graph, thus preventing substantial slowdown of the equality saturation
process.
```julia
using Metatheory
using Metatheory.EGraphs
comm_monoid = @theory begin
# commutativity
a * b => b * a
# identity
a * 1 => a
# associativity
a * (b * c) => (a * b) * c
(a * b) * c => a * (b * c)
end;
comm_group = @theory begin
# commutativity
a + b => b + a
# identity
a + 0 => a
# associativity
a + (b + c) => (a + b) + c
(a + b) + c => a + (b + c)
# inverse
a + (-a) => 0
end;
# dynamic rules are defined with the `|>` operator
folder = @theory begin
a::Real + b::Real |> a+b
a::Real * b::Real |> a*b
end;
div_sim = @theory begin
(a * b) / c => a * (b / c)
a::Real / a::Real |> (a != 0 ? 1 : error("division by 0"))
end;
t = union(comm_monoid, comm_group, folder, div_sim) ;
g = EGraph(:(a * (2*3) / 6)) ;
saturate!(g, t) ;
ex = extract!(g, astsize)
# :a
```
# Conclusion
Many applications of equality saturation to advanced optimization tasks have been recently published. Herbie [@panchekha2015automatically]
is a tool for automatically improving the precision of floating point expressions, which recently switched to `egg` as the core rewriting backend. However, Herbie requires interoperation and conversion of expressions between different languages and libraries. In @yang2021equality, the authors used `egg` to superoptimize tensor signal flow graphs describing neural networks. Implementing similar case studies in pure Julia would make valid research contributions on their own. We are confident that a well-integrated and homoiconic equality saturation engine in pure Julia will permit exploration of many new metaprogramming applications, and allow them to be implemented in an elegant, performant and concise way. Code for Metatheory.jl is available in @metatheory, or at [https://github.com/0x0f0f0f/Metatheory.jl](https://github.com/0x0f0f0f/Metatheory.jl).
# Acknowledgements
We acknowledge Max Willsey and contributors for their work on the original `egg` library [@egg], Christopher Rackauckas and Christopher Foster for their efforts in developing RuntimeGeneratedFunctions [@rgf], Taine Zhao for developing MLStyle [@mlstyle] and MatchCore [@matchcore], and Philip Zucker for his original idea of implementing E-Graphs in Julia [@philzuck1; @philzuck2] and support during the development of the project. Special thanks to Filippo Bonchi for a friendly review of a preliminary version of this article.
# References
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 819 | This is a simple script to convert Metatheory.jl <https://github.com/0x0f0f0f/Metatheory.jl> theories into an Egg <https://egraphs-good.github.io/> query for comparison.
Get a rust toolchain <https://rustup.rs/>
Make a new project
```
cargo new my_project
cd my_project
```
Add egg as a dependency to the Cargo.toml. Add the last line shown here.
```
[package]
name = "autoegg"
version = "0.1.0"
authors = ["Philip Zucker <[email protected]>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
egg = "0.6.0"
```
Copy and paste the Julia script in the project folder. Replace the example theory and query with yours in the script
Run it
```
julia gen_egg.jl
```
Now you can run it in Egg
```
cargo run --release
```
Profit.
| Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 2.0.2 | 2cdcf1d7c4ae2d81cc015fae603df5c1b4b8f422 | docs | 218 | # Literate tests
This folder contains Julia scripts in the [Literate.jl](https://fredrikekre.github.io/Literate.jl/v2/) format.
Such scripts are executed by tests, and are also included in the generated documentation. | Metatheory | https://github.com/JuliaSymbolics/Metatheory.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 1641 | module AxisAlgorithms
using WoodburyMatrices
using LinearAlgebra, SparseArrays
export A_ldiv_B_md!,
A_ldiv_B_md,
A_mul_B_md!,
A_mul_B_md,
A_mul_B_perm!,
A_mul_B_perm
"""
`A_ldiv_B_md(F, src, dim)` solves `F\b` for slices `b` of `src` along dimension `dim`,
storing the result along the same dimension of the output.
Currently, `F` must be an LU-factorized tridiagonal matrix or a Woodbury matrix.
"""
A_ldiv_B_md(F, src, dim::Integer) = A_ldiv_B_md!(similar(src), F, src, dim)
"""
`A_mul_B_md(M, src, dim)` computes `M*x` for slices `x` of `src` along dimension `dim`,
storing the resulting vector along the same dimension of the output.
`M` must be an `AbstractMatrix`. This uses an in-place naive algorithm.
"""
A_mul_B_md(M::AbstractMatrix, src, dim::Integer) = A_mul_B_md!(alloc_matmul(M,src,dim), M, src, dim)
"""
`A_mul_B_perm(M, src, dim)` computes `M*x` for slices `x` of `src` along dimension `dim`, storing the
resulting vector along the same dimension of the output.
`M` must be an `AbstractMatrix`. This uses `permutedims` to make dimension
`dim` into the first dimension, performs a standard matrix multiplication, and restores the original
dimension ordering. In many cases, this algorithm exhibits the best cache behavior.
"""
A_mul_B_perm(M::AbstractMatrix, src, dim::Integer) = A_mul_B_perm!(alloc_matmul(M,src,dim), M, src, dim)
function alloc_matmul(M,src::AbstractArray{S,N},dim) where {S,N}
sz = [size(src)...]
sz[dim] = size(M,1)
T = Base.promote_op(*, eltype(M), S)
Array{T,N}(undef, sz...)
end
include("tridiag.jl")
include("matmul.jl")
include("woodbury.jl")
end # module
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 5632 | # Consider permutedims as an alternative to direct multiplication.
# Multiplication is an O(m*N) cost compared to an O(N) cost for tridiagonal algorithms.
# However, when the multiplication is only a small fraction of the total time
# (for example, when m is small), then these can be convenient and avoid the need for calls to permutedims.
# Multiplication using permutedims
"""
`A_mul_B_perm!(dest, M, src, dim)` computes `M*x` for slices `x` of `src` along dimension `dim`,
storing the result in `dest`. `M` must be an `AbstractMatrix`. This uses `permutedims` to make dimension
`dim` into the first dimension, performs a standard matrix multiplication, and restores the original
dimension ordering. In many cases, this algorithm exhibits the best cache behavior.
"""
function A_mul_B_perm!(dest, M::AbstractMatrix, src, dim::Integer)
check_matmul_sizes(dest, M, src, dim)
order = [dim; setdiff(1:ndims(src), dim)]
srcp = permutedims(src, order)
tmp = Array{eltype(dest), 2}(undef, size(dest, dim), div(length(dest), size(dest, dim)))
mul!(tmp, M, reshape(srcp, (size(src,dim), div(length(srcp), size(src,dim)))))
iorder = [2:dim; 1; dim+1:ndims(src)]
permutedims!(dest, reshape(tmp, size(dest)[order]), iorder)
dest
end
# Direct (temporary-free) multiplication
"""
`A_mul_B_md!(dest, M, src, dim)` computes `M*x` for slices `x` of `src` along dimension `dim`,
storing the result in `dest`. `M` must be an `AbstractMatrix`. This uses an in-place naive algorithm.
"""
function A_mul_B_md!(dest, M::AbstractMatrix, src, dim::Integer)
check_matmul_sizes(dest, M, src, dim)
if size(M,1) == size(M,2) == 1
return mul!(dest, src, M[1,1])
end
R2 = CartesianIndices(size(dest)[dim+1:end])
if dim > 1
R1 = CartesianIndices(size(dest)[1:dim-1])
_A_mul_B_md!(dest, M, src, R1, R2)
else
_A_mul_B_md!(dest, M, src, R2)
end
end
# Multiplication along the first dimension
# Here we expect that M will typically be small and fit in cache, whereas src and dest do not
function _A_mul_B_md!(dest, M::AbstractMatrix, src, R2::CartesianIndices)
m, n = size(M, 1), size(M, 2)
if m == n == 2
return _A_mul_B_md_2x2!(dest, M, src, R2)
end
for I2 in R2
@inbounds for i = 1:m
dest[i,I2] = zero(eltype(dest))
end
@inbounds for j = 1:n
b = src[j,I2]
@simd for i = 1:m
dest[i,I2] += M[i,j]*b
end
end
end
dest
end
_A_mul_B_md(M::AbstractMatrix, src, R2::CartesianIndices) = _A_mul_B_md!(alloc_matmul(M, src, 1), M, src, R2)
function _A_mul_B_md_2x2!(dest, M::AbstractMatrix, src, R2::CartesianIndices)
a, b, c, d = M[1,1], M[1,2], M[2,1], M[2,2]
@simd for I2 in R2
@inbounds begin
s1, s2 = src[1,I2], src[2,I2]
dest[1,I2] = a*s1 + b*s2
dest[2,I2] = c*s1 + d*s2
end
end
dest
end
function _A_mul_B_md!(dest, M::SparseMatrixCSC, src, R2::CartesianIndices)
m, n = size(M,1), size(M,2)
nzv = M.nzval
rv = M.rowval
cp = M.colptr
for I2 in R2
@inbounds for i = 1:m
dest[i,I2] = zero(eltype(dest))
end
for j = 1:n
b = src[j,I2]
@inbounds for k = cp[j]:(cp[j+1]-1)
dest[rv[k],I2] += nzv[k]*b
end
end
end
dest
end
# Multiplication along any other dimension
function _A_mul_B_md!(dest, M::AbstractMatrix, src, R1::CartesianIndices, R2::CartesianIndices)
m, n = size(M, 1), size(M, 2)
if m == n == 2
return _A_mul_B_md_2x2!(dest, M, src, R1, R2)
end
fill!(dest, zero(eltype(dest)))
for I2 in R2
for j = 1:n
@inbounds for i = 1:m
Mij = M[i,j]
@simd for I1 in R1
dest[I1,i,I2] += Mij*src[I1,j,I2]
end
end
end
end
dest
end
_A_mul_B_md(M::AbstractMatrix, src, R1::CartesianIndices, R2::CartesianIndices) = _A_mul_B_md!(alloc_matmul(M, src, ndims(R1)+1), M, src, R1, R2)
function _A_mul_B_md_2x2!(dest, M::AbstractMatrix, src, R1::CartesianIndices, R2::CartesianIndices)
a, b, c, d = M[1,1], M[1,2], M[2,1], M[2,2]
for I2 in R2
@simd for I1 in R1
@inbounds begin
s1, s2 = src[I1,1,I2], src[I1,2,I2]
dest[I1,1,I2] = a*s1 + b*s2
dest[I1,2,I2] = c*s1 + d*s2
end
end
end
dest
end
function _A_mul_B_md!(dest, M::SparseMatrixCSC, src, R1::CartesianIndices, R2::CartesianIndices)
m, n = size(M,1), size(M,2)
nzv = M.nzval
rv = M.rowval
cp = M.colptr
fill!(dest, zero(eltype(dest)))
for I2 in R2
for j = 1:n
@inbounds for k = cp[j]:(cp[j+1]-1)
i, Mij = rv[k], nzv[k]
@simd for I1 in R1
dest[I1,i,I2] += Mij*src[I1,j,I2]
end
end
end
end
dest
end
function check_matmul_sizes(dest, M::AbstractMatrix, src, dim)
1 <= dim <= max(ndims(dest),ndims(src)) || throw(DimensionMismatch("The chosen dimension $dim is larger than $(ndims(src)) and $(ndims(dest))"))
m, n = size(M, 1), size(M, 2)
n == size(src, dim) && m == size(dest, dim) || throw(DimensionMismatch("Sizes $m, $n, $(size(src,dim)), and $(size(dest,dim)) do not match"))
for i = 1:max(ndims(src), ndims(dest))
i == dim && continue
if size(src,i) != size(dest,i)
throw(DimensionMismatch("Sizes $(size(dest)), $(size(src)) do not match"))
end
end
nothing
end
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 3222 | _axes(F::Factorization, dim::Int) = hasmethod(axes, Tuple{typeof(F), Int}) ? axes(F, dim) : Base.OneTo(size(F, dim))
_axes(A::AbstractArray, dim::Int) = axes(A, dim)
"""
`A_ldiv_B_md!(dest, F, src, dim)` solves a tridiagonal system along dimension `dim` of `src`,
storing the result in `dest`. Currently, `F` must be an LU-factorized tridiagonal matrix.
If desired, you may safely use the same array for both `src` and `dest`, so that this becomes an
in-place algorithm.
"""
function A_ldiv_B_md!(dest, F, src, dim::Integer)
1 <= dim <= max(ndims(dest),ndims(src)) || throw(DimensionMismatch("The chosen dimension $dim is larger than $(ndims(src)) and $(ndims(dest))"))
ax = _axes(F, 1)
ax == axes(src, dim) && ax == axes(dest, dim) || throw(DimensionMismatch("Axes $ax, $(axes(src,dim)), and $(axes(dest,dim)) do not match"))
axes(dest) == axes(src) || throw(DimensionMismatch("Axes $(axes(dest)), $(axes(src)) do not match"))
check_matrix(F)
R1 = CartesianIndices(axes(dest)[1:dim-1])
R2 = CartesianIndices(axes(dest)[dim+1:end])
_A_ldiv_B_md!(dest, F, src, R1, R2)
end
_A_ldiv_B_md(F, src, R1::CartesianIndices, R2::CartesianIndices) =
_A_ldiv_B_md!(similar(src, promote_type(eltype(F), eltype(src))), F, src, R1, R2)
# Solving along the first dimension
function _A_ldiv_B_md!(dest, F::LU{T,<:Tridiagonal{T}}, src, R1::CartesianIndices{0}, R2::CartesianIndices) where {T}
ax = _axes(F, 1)
axbegin, axend = first(ax), last(ax)
dl = F.factors.dl
d = F.factors.d
du = F.factors.du
# Forward substitution
@inbounds for I2 in R2
dest[axbegin, I2] = src[axbegin, I2]
for i = axbegin+1:axend # note: cannot use @simd here!
dest[i, I2] = src[i, I2] - dl[i-1]*dest[i-1, I2]
end
end
# Backward substitution
dinv = 1 ./ d
@inbounds for I2 in R2
dest[axend, I2] /= d[axend]
for i = axend-1:-1:axbegin # note: cannot use @simd here!
dest[i, I2] = (dest[i, I2] - du[i]*dest[i+1, I2])*dinv[i]
end
end
dest
end
# Solving along any other dimension
function _A_ldiv_B_md!(dest, F::LU{T,<:Tridiagonal{T}}, src, R1::CartesianIndices, R2::CartesianIndices) where {T}
ax = _axes(F, 1)
axbegin, axend = first(ax), last(ax)
dl = F.factors.dl
d = F.factors.d
du = F.factors.du
# Forward substitution
@inbounds for I2 in R2
@simd for I1 in R1
dest[I1, axbegin, I2] = src[I1, axbegin, I2]
end
for i = axbegin+1:axend
@simd for I1 in R1
dest[I1, i, I2] = src[I1, i, I2] - dl[i-1]*dest[I1, i-1, I2]
end
end
end
# Backward substitution
dinv = 1 ./ d
for I2 in R2
@simd for I1 in R1
dest[I1, axend, I2] *= dinv[axend]
end
for i = axend-1:-1:axbegin
@simd for I1 in R1
dest[I1, i, I2] = (dest[I1, i, I2] - du[i]*dest[I1, i+1, I2])*dinv[i]
end
end
end
dest
end
function check_matrix(F::LU{T,<:Tridiagonal{T}}) where {T}
ax = _axes(F, 1)
for i in ax
F.ipiv[i] == i || error("For efficiency, pivoting is not supported")
end
nothing
end
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 483 | function _A_ldiv_B_md!(dest, W::Woodbury, src, R1, R2)
_A_ldiv_B_md!(dest, W.A, src, R1, R2)
tmp1 = _A_mul_B_md(W.V, dest, R1, R2)
tmp2 = _A_mul_B_md(W.Cp, tmp1, R1, R2)
tmp3 = _A_mul_B_md(W.U, tmp2, R1, R2)
# TODO?: would be nice to fuse the next two steps
tmp4 = _A_ldiv_B_md(W.A, tmp3, R1, R2)
sub!(dest, tmp4)
end
function sub!(A, B)
for I in eachindex(A, B)
A[I] -= B[I]
end
A
end
check_matrix(W::Woodbury) = check_matrix(W.A)
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 1715 | using AxisAlgorithms
using Test
@testset "matmul" begin
n = 5
m = 3
src = rand(n,n,n)
for M in (rand(m,n), sprand(m,n,0.2))
for dim = 1:3
dest1 = mapslices(b->M*b, src, dims=dim)
sz = fill(n,3)
sz[dim] = m
dest2 = rand(sz...)
AxisAlgorithms.A_mul_B_md!(dest2, M, src, dim)
@test dest1 ≈ dest2
if !issparse(M)
rand!(dest2)
AxisAlgorithms.A_mul_B_perm!(dest2, M, src, dim)
@test dest1 ≈ dest2
end
end
end
# Test size-checking
for dim = 1:3
M = sprand(m,n,0.2)
dest1 = mapslices(b->M*b, src, dims=dim)
sz = fill(n,3)
sz[dim] = m+1
dest2 = rand(sz...)
@test_throws DimensionMismatch AxisAlgorithms.A_mul_B_md!(dest2, M, src, dim)
@test_throws DimensionMismatch AxisAlgorithms.A_mul_B_perm!(dest2, M, src, dim)
sz[dim] = m
sz[mod1(dim+1,3)] = 1
dest2 = rand(sz...)
@test_throws DimensionMismatch AxisAlgorithms.A_mul_B_md!(dest2, M, src, dim)
@test_throws DimensionMismatch AxisAlgorithms.A_mul_B_perm!(dest2, M, src, dim)
end
# Test 1x1 and 2x2 cases
n = 5
for dim = 1:3
sz = fill(n,3)
sz[dim] = 1
src2 = rand(sz...)
M = fill(3.2,1,1)
dest1 = mapslices(b->M*b, src2, dims=dim)
dest2 = AxisAlgorithms.A_mul_B_md(M, src2, dim)
@test dest1 ≈ dest2
sz[dim] = 2
src2 = rand(sz...)
M = rand(2,2)
dest1 = mapslices(b->M*b, src2, dims=dim)
dest2 = AxisAlgorithms.A_mul_B_md(M, src2, dim)
@test dest1 ≈ dest2
end
end
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 136 | using AxisAlgorithms
using Test, LinearAlgebra, SparseArrays, Random
include("tridiag.jl")
include("matmul.jl")
include("woodbury.jl")
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 580 | using Test, OffsetArrays
@testset "Tridiag" begin
d = 2 .+ rand(5)
dl = rand(4)
du = rand(4)
M = Tridiagonal(dl, d, du)
F = lu(M)
src = rand(5,5,5)
for dim = 1:3
dest1 = mapslices(x->ldiv!(F, x), copy(src), dims=dim)
dest2 = similar(src)
AxisAlgorithms.A_ldiv_B_md!(dest2, F, src, dim)
@test dest1 ≈ dest2
end
src = OffsetArray(src, -2:2, 1:5, 0:4)
dest1 = mapslices(x->ldiv!(F, x), copy(src), dims=2)
dest2 = similar(src)
AxisAlgorithms.A_ldiv_B_md!(dest2, F, src, 2)
@test dest1 ≈ dest2
end
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | code | 562 | using WoodburyMatrices
using Test
@testset "Woodbury" begin
d = 2 .+ rand(5)
dl = -rand(4)
du = -rand(4)
M = Tridiagonal(dl, d, du)
F = lu(M)
U = sprand(5,2,0.2)
V = sprand(2,5,0.2)
C = rand(2,2)
W = Woodbury(F, U, C, V)
src = rand(5, 8)
@test W\src ≈ AxisAlgorithms.A_ldiv_B_md(W, src, 1)
src = rand(5, 5, 5, 5)
for dim = 1:4
dest1 = mapslices(x->W\x, copy(src), dims=dim)
dest2 = similar(src)
AxisAlgorithms.A_ldiv_B_md!(dest2, W, src, dim)
@test dest1 ≈ dest2
end
end
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 1.1.0 | 01b8ccb13d68535d73d2b0c23e39bd23155fb712 | docs | 1961 | # AxisAlgorithms
[](https://github.com/timholy/AxisAlgorithms.jl/actions/workflows/ci.yml)
[](https://codecov.io/gh/timholy/AxisAlgorithms.jl)
AxisAlgorithms is a collection of filtering and linear algebra algorithms for multidimensional arrays.
For algorithms that would typically apply along the columns of a matrix, you can instead pick an arbitrary axis (dimension).
Note that all functions come in two variants, a `!` version that uses pre-allocated output (where the output is
the first argument) and a version that allocates the output. Below, the `!` versions will be described.
### Tridiagonal and Woodbury inversion
If `F` is an LU-factorization of a tridiagonal matrix, or a [Woodbury matrix](WoodburyMatrices.jl) created from such a factorization,
then `A_ldiv_B_md!(dest, F, src, axis)` will solve the equation `F\b` for 1-dimensional slices
along dimension `axis`.
Unlike many linear algebra algorithms, this one is safe to use as a mutating algorithm with `dest=src`.
The tridiagonal case does not create temporaries, and it has excellent cache behavior.
### Matrix multiplication
Multiply a matrix `M` to all 1-dimensional slices along a particular dimension.
Here you have two algorithms to choose from:
- `A_mul_B_perm!(dest, M, src, axis)` uses `permutedims` and standard BLAS-accelerated routines; it allocates temporary storage.
- `A_mul_B_md!(dest, M, src, axis)` is a non-allocating naive routine. This also has optimized implementations for sparse `M` and 2x2 matrices.
In general it is very difficult to get efficient cache behavior for multidimensional multiplication, and often using `A_mul_B_perm!` is the best strategy.
However, there are cases where `A_mul_B_md!` is faster.
It's a good idea to time both and see which works better for your case.
| AxisAlgorithms | https://github.com/timholy/AxisAlgorithms.jl.git |
|
[
"MIT"
] | 0.11.3 | 2557b26a61b61ebcd34cbe0c36b160b707bbf5df | code | 259 | using Documenter, ModiaLang
makedocs(
#modules = [Modia],
sitename = "ModiaLang",
authors = "Hilding Elmqvist (Mogram) and Martin Otter (DLR-SR)",
format = Documenter.HTML(prettyurls = false),
pages = [
"Home" => "index.md",
]
)
| ModiaLang | https://github.com/ModiaSim/ModiaLang.jl.git |
|
[
"MIT"
] | 0.11.3 | 2557b26a61b61ebcd34cbe0c36b160b707bbf5df | code | 68 | include("Blocks.jl")
include("Electric.jl")
include("Rotational.jl") | ModiaLang | https://github.com/ModiaSim/ModiaLang.jl.git |
|
[
"MIT"
] | 0.11.3 | 2557b26a61b61ebcd34cbe0c36b160b707bbf5df | code | 3805 | """
Modia module with block component models (inspired from Modelica Standard Library).
* Developer: Hilding Elmqvist, Mogram AB, Martin Otter, DLR
* Copyright (c) 2016-2021: Hilding Elmqvist, Martin Otter
* License: MIT (expat)
"""
#module Blocks
using ModiaLang
#export Gain, FirstOrder, Feedback, PI, Step, Ramp # Sine, Switch, MIMO
# Single-Input-Single-Output continuous control block
SISO = Model(
u = input,
y = output
)
# Gain
Gain = SISO | Model(
k = 1, # (info = "Gain")
equations = :[
y = k*u ]
)
# First-order transfer function block (= 1 pole)
FirstOrder = SISO | Model(
k = 1.0,
T = 1.0*u"s",
x = Var(init=0.0),
equations = :[
der(x) = (k * u - x) / T
y = x ]
)
# Output difference between commanded and feedback input
Feedback = Model(
u1 = input | info"Input 1",
u2 = input | info"Input 2",
y = output | info"Output signal",
equations = :[
y = u1 - u2 ]
)
# Proportional-Integral controller
PI = SISO | Model(
k = 1.0, # (info = "Gain")
T = 1.0u"s", # (min = 1E-10, info = "Time Constant (T>0 required)")
x = Var(init=0.0),
equations = :[
der(x) = u / T
y = k * (x + u) ]
)
# Single-Output continuous control block
SO = Model(
y = output
)
# Base class for a continuous signal source
SignalSource = SO | Model(
offset = 0.0, # info = "Offset of output signal y"
startTime = 0.0*u"s" # info = "Output y = offset for time < startTime")
)
# Step signal
Step = SignalSource | Model(
height = 1.0,
equations = :[
y = offset + (time < startTime ? 0*height : height) ] # 0*height is needed, if height has a unit
)
# Ramp signal
Ramp = SignalSource | Model(
height = 1.0,
duration = 2.0u"s",
equations = :[
y = offset + (time < startTime ? 0.0*height : # 0*height is needed, if height has a unit
(time < startTime + duration ? (time - startTime)*height/duration :
height)) ]
)
# Linear state space system
StateSpace = Model(
A = parameter | fill(0.0,0,0),
B = parameter | fill(0.0,0,0),
C = parameter | fill(0.0,0,0),
D = parameter | fill(0.0,0,0),
u = input,
y = output,
x = Var(init = zeros(0)),
equations = :[
der(x) = A*x + B*u
y = C*x + D*u
]
)
# -------------------------------------------------------
#=
# Sinusoidal signal
@model Sine begin
amplitude = Parameter(1.0, info = "Amplitude of sine wave")
freqHz = Parameter(1.0,info = "Frequency of sine wave")
phase = Parameter(0.0, info = "Phase of sine wave")
offset = Parameter(0.0, info = "Offset of output signal y")
startTime = Parameter(0.0, info = "Output y = offset for time < startTime")
SO()
equations
y = offset + if time < startTime; 0 else amplitude*sin(2*pi*freqHz*(time - startTime) + phase) end
end
@model Sine2 begin
# Generate sine signal
amplitude = Parameter(1.0, info = "Amplitude of sine wave")
freqHz = Parameter(1.0,info = "Frequency of sine wave")
phase = Parameter(0.0, info = "Phase of sine wave")
SignalSource()
equations
y = offset + if time < startTime; 0 else amplitude * sin(2 * pi * freqHz * (time - startTime) + phase) end
end
# Switch
@model Switch begin
sw = Input(Boolean(info = "Switch position (if `true`, use `u1`, else use `u2`)"))
u1 = Input(info = "Input 1")
u2 = Input(info = "Input 2")
y = Output(info = "Output signal")
equations
y = if sw; u1 else u2 end
end
# ABCD model
@model ABCD(A = -1, B = 1, C = 1, D = 0) begin
u = Input(info = "Input signal"); y = Output(info = "Output signal")
x = Float(start = 0)
equations
der(x) = A * x + B * u
y = C * x + D * u
end
=#
#end
| ModiaLang | https://github.com/ModiaSim/ModiaLang.jl.git |
|
[
"MIT"
] | 0.11.3 | 2557b26a61b61ebcd34cbe0c36b160b707bbf5df | code | 4409 | """
Modia module with electric component models (inspired from Modelica Standard Library).
* Developer: Hilding Elmqvist, Mogram AB
* Copyright (c) 2016-2021: Hilding Elmqvist
* License: MIT (expat)
"""
#module Electric
using ModiaLang
Pin = Model( v = potential, i = flow )
OnePort = Model( p = Pin, n = Pin, equations = :[
v = p.v - n.v
0 = p.i + n.i
i = p.i ]
)
"""
Resistor(R=1.0u"Ω")
Electrical resistor
`R` - Resistance Ω
"""
Resistor = OnePort | Model( R = 1.0u"Ω", equations = :[ R*i = v ] )
# @showModel(Resistor)
Capacitor = OnePort | Model( C = 1.0u"F", v=Var(init=0.0u"V"), equations = :[ C*der(v) = i ] )
Inductor = OnePort | Model( L = 1.0u"H", i=Var(init=0.0u"A"), equations = :[ L*der(i) = v ] )
ConstantVoltage = OnePort | Model( V = 1.0u"V", equations = :[ v = V ] )
Ground = Model( p = Pin, equations = :[ p.v = 0.0u"V" ] )
# Ideal operational amplifier (norator-nullator pair), but 3 pins
IdealOpAmp3Pin = Model(
in_p = Pin,
in_n = Pin,
out = Pin,
equations = :[
in_p.v = in_n.v
in_p.i = 0u"A"
in_n.i = 0u"A" ]
)
# Partial generic voltage source using the input signal as source voltage
PartialSignalVoltage = Model(
v = input,
p = Pin,
n = Pin
)
# Generic voltage source using the input signal (without and with unit) as source voltage
SignalVoltage = PartialSignalVoltage | Model(
equations = :[
p.v - n.v = v
0 = p.i + n.i
i = p.i ]
)
UnitlessSignalVoltage = PartialSignalVoltage | Model(
equations = :[
p.v - n.v = v*u"V"
0 = p.i + n.i
i = p.i ]
)
# Partial sensor to measure the current in a branch
PartialCurrentSensor = Model(
i = output,
p = Pin, # (info = "Positive pin")
n = Pin, # (info = "Negative pin")
)
# Sensor to measure the current in a branch
CurrentSensor = PartialCurrentSensor | Model(
equations = :[
p.v = n.v
0 = p.i + n.i
i = p.i]
)
UnitlessCurrentSensor = PartialCurrentSensor | Model(
equations = :[
p.v = n.v
0 = p.i + n.i
i = p.i/u"A"]
)
#=
# Step voltage source
@model StepVoltage begin
V = Parameter(1.0, start = 1.0, info = "Voltage") #, T = Unitful.V)
startTime = Parameter(0.0, start = 0.0, info = "Start time") # , T = Unitful.s)
OnePort()
equations
v = if time < startTime; 0 else V end
end
@model VoltageSource begin
OnePort()
offset = Par(0.0) # Voltage offset
startTime = Par(0.0) # Time offset
signalSource = SignalSource(offset=offset, startTime=startTime)
equations
v = signalSource.y
end
#=
@model SineVoltage1 begin
# Sine voltage source
V = Parameter() # Amplitude of sine wave
phase = Par(0.0) # Phase of sine wave
freqHz = Parameter() # Frequency of sine wave
VoltageSource(signalSource=Sine(amplitude=V, freqHz=freqHz, phase=phase))
end
=#
# Sinusoidal voltage source
@model SineVoltage begin
V = Parameter() # Amplitude of sine wave
phase = Par(0.0) # Phase of sine wave
freqHz = Parameter() # Frequency of sine wave
VoltageSource()
equations
v = V*sin(10*time)
end
# Ideal diode
@model IdealDiode begin # Ideal diode
OnePort()
Ron = Par(1.0E-2) # Forward state-on differential resistance (closed diode resistance)
Goff = Par(1.0E-2) # Backward state-off conductance (opened diode conductance)
Vknee = Par(0) # Forward threshold voltage
# off = Variable(start=true) # Switching state
s = Float(start=0.0) # Auxiliary variable for actual position on the ideal diode characteristic
#=
s = 0: knee point
s < 0: below knee point, diode conducting
s > 0: above knee point, diode locking
=#
equations
# off := s < 0
# v = s * if !positive(s); 1 else Ron end + Vknee
# i = s * if !positive(s); Goff else 1 end + Goff * Vknee
v = s * if !(s>0); 1 else Ron end + Vknee
i = s * if !(s>0); Goff else 1 end + Goff * Vknee
end
@model Diode begin
OnePort()
Ids=Par(1.e-6) # "Saturation current";
Vt=Par(0.04) # "Voltage equivalent of temperature (kT/qn)";
Maxexp = Par(15) # "Max. exponent for linear continuation";
R=Par(1.e8) # "Parallel ohmic resistance";
equations
i = if v/Vt > Maxexp; Ids*(exp(Maxexp)*(1 + v/Vt - Maxexp) - 1) else Ids*(exp(v/Vt) - 1) end + v/R
end
=#
#end
| ModiaLang | https://github.com/ModiaSim/ModiaLang.jl.git |
|
[
"MIT"
] | 0.11.3 | 2557b26a61b61ebcd34cbe0c36b160b707bbf5df | code | 5209 | """
Modia module with electric component models (inspired from Modelica Standard Library).
* Developer: Hilding Elmqvist, Mogram AB
* Copyright (c) 2016-2021: Hilding Elmqvist
* License: MIT (expat)
"""
#module Electric
using ModiaLang
Var(args...; kwargs...) = (;args..., kwargs...)
Var(value::Union{Float64, Int64, Bool, String, Expr}, args...; kwargs...) = (;value = value, args..., kwargs...)
parameter = :parameter => true
input = :input => true
output = :output => true
potential = :potential => true
flow = :flow => true
v = Var(potential, nominal=10)
@show v
v = Var(5, parameter, min=0)
@show v
v = Var(potential, min=0, flow, nominal=10)
@show v
Pin = Model( v = Var(potential, nominal=10), i = Var(flow) )
@show Pin
Input(; kwargs...) = (;input=true, kwargs...)
Output(; kwargs...) = (;output=true, kwargs...)
Potential(; kwargs...) = (;potential=true, kwargs...)
Flow(; kwargs...) = (;flow=true, kwargs...)
Pin = Model( v = Potential(nominal=10), i = Flow() )
@show Pin
#Pin = Model( potentials = :[v], flows = :[i] )
Pin = Model( v = Var(;pot), i = Var(;flow) )
OnePort = Model( p = Pin, n = Pin, equations = :[
v = p.v - n.v
0 = p.i + n.i
i = p.i ]
)
"""
Resistor(R=1.0u"Ω")
Electrical resistor
`R` - Resistance Ω
"""
Resistor = OnePort | Model( R = 1.0u"Ω", equations = :[ R*i = v ] )
Capacitor = OnePort | Model( C = 1.0u"F", v = Map(init=0.0u"V"), equations = :[ C*der(v) = i ] )
Inductor = OnePort | Model( L = 1.0u"H", init=Map(i=0.0u"A"), equations = :[ L*der(i) = v ] )
ConstantVoltage = OnePort | Model( V = 1.0u"V", equations = :[ v = V ] )
Ground = Model( p = Pin, equations = :[ p.v = 0.0u"V" ] )
# Ideal operational amplifier (norator-nullator pair), but 3 pins
IdealOpAmp3Pin = Model(
in_p = Pin,
in_n = Pin,
out = Pin,
equations = :[
in_p.v = in_n.v
in_p.i = 0u"A"
in_n.i = 0u"A" ]
)
# Partial generic voltage source using the input signal as source voltage
PartialSignalVoltage = Model(
inputs = :[v],
p = Pin,
n = Pin
)
# Generic voltage source using the input signal (without and with unit) as source voltage
SignalVoltage = PartialSignalVoltage | Model(
equations = :[
p.v - n.v = v
0 = p.i + n.i
i = p.i ]
)
UnitlessSignalVoltage = PartialSignalVoltage | Model(
equations = :[
p.v - n.v = v*u"V"
0 = p.i + n.i
i = p.i ]
)
# Partial sensor to measure the current in a branch
PartialCurrentSensor = Model(
outputs = :[i],
p = Pin, # (info = "Positive pin")
n = Pin, # (info = "Negative pin")
)
# Sensor to measure the current in a branch
CurrentSensor = PartialCurrentSensor | Model(
equations = :[
p.v = n.v
0 = p.i + n.i
i = p.i]
)
UnitlessCurrentSensor = PartialCurrentSensor | Model(
equations = :[
p.v = n.v
0 = p.i + n.i
i = p.i/u"A"]
)
#=
# Step voltage source
@model StepVoltage begin
V = Parameter(1.0, start = 1.0, info = "Voltage") #, T = Unitful.V)
startTime = Parameter(0.0, start = 0.0, info = "Start time") # , T = Unitful.s)
OnePort()
equations
v = if time < startTime; 0 else V end
end
@model VoltageSource begin
OnePort()
offset = Par(0.0) # Voltage offset
startTime = Par(0.0) # Time offset
signalSource = SignalSource(offset=offset, startTime=startTime)
equations
v = signalSource.y
end
#=
@model SineVoltage1 begin
# Sine voltage source
V = Parameter() # Amplitude of sine wave
phase = Par(0.0) # Phase of sine wave
freqHz = Parameter() # Frequency of sine wave
VoltageSource(signalSource=Sine(amplitude=V, freqHz=freqHz, phase=phase))
end
=#
# Sinusoidal voltage source
@model SineVoltage begin
V = Parameter() # Amplitude of sine wave
phase = Par(0.0) # Phase of sine wave
freqHz = Parameter() # Frequency of sine wave
VoltageSource()
equations
v = V*sin(10*time)
end
# Ideal diode
@model IdealDiode begin # Ideal diode
OnePort()
Ron = Par(1.0E-2) # Forward state-on differential resistance (closed diode resistance)
Goff = Par(1.0E-2) # Backward state-off conductance (opened diode conductance)
Vknee = Par(0) # Forward threshold voltage
# off = Variable(start=true) # Switching state
s = Float(start=0.0) # Auxiliary variable for actual position on the ideal diode characteristic
#=
s = 0: knee point
s < 0: below knee point, diode conducting
s > 0: above knee point, diode locking
=#
equations
# off := s < 0
# v = s * if !positive(s); 1 else Ron end + Vknee
# i = s * if !positive(s); Goff else 1 end + Goff * Vknee
v = s * if !(s>0); 1 else Ron end + Vknee
i = s * if !(s>0); Goff else 1 end + Goff * Vknee
end
@model Diode begin
OnePort()
Ids=Par(1.e-6) # "Saturation current";
Vt=Par(0.04) # "Voltage equivalent of temperature (kT/qn)";
Maxexp = Par(15) # "Max. exponent for linear continuation";
R=Par(1.e8) # "Parallel ohmic resistance";
equations
i = if v/Vt > Maxexp; Ids*(exp(Maxexp)*(1 + v/Vt - Maxexp) - 1) else Ids*(exp(v/Vt) - 1) end + v/R
end
=#
#end
| ModiaLang | https://github.com/ModiaSim/ModiaLang.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.