licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 7733 |
struct HashedIndex
value::UInt128
value2::UInt64
index::Int64
end
@inline HashedIndex() = HashedIndex(0, 0, 0)
# Definition of the mutable IndexHashTable structure for HashedIndex
mutable struct IndexHashTable{V<:AbstractVector{HashedIndex}, R}
table::V
mylength::MVector{1, UInt64}
occupied::BitVector
deleted::BitVector
lock::R
function IndexHashTable(v::A, len::Int64, lock=SingleThread()) where A
len2 = next_power_of_two(len)
resize!(v, len2)
l = locktype(lock)
new{A, typeof(l)}(v, MVector{1, UInt64}([len2 - 1]), falses(len2),falses(len2), l)
end
IndexHashTable(len::Int64, lock=SingleThread()) = IndexHashTable(Vector{HashedIndex}(undef, len), len, lock)
end
@inline function pushqueue!(ht::Q, key::K, index, write::Bool=true) where {Q<:IndexHashTable,K}
return _pushqueue!(ht, key, index, write) >0
end
@inline has_index(ht::Q, key::K) where {Q<:IndexHashTable,K} = _pushqueue!(ht, key, 0, false)
# Method for inserting into the IndexHashTable
function _pushqueue!(ht::Q, key::K, index, write::Bool=true) where {Q<:IndexHashTable,K}
value = fnv1a_hash(key, UInt128)
value_ = UInt64(value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & ht.mylength[1]
index2 = fnv1a_hash(key, UInt64)
i = UInt64(0)
ret = -1
lock(ht.lock)
while true
idx = reinterpret(Int64, (index1 + i * index2) & ht.mylength[1] + 1) # try bitcast( ) instead
@inbounds data = ht.occupied[idx] ? ht.table[idx] : HashedIndex()
if (ht.deleted[idx] && !write)
i += 1
continue
end
ht.deleted[idx] &= !write
ht.occupied[idx] |= write
if data.value == 0
if write
ht.table[idx] = HashedIndex(value, index2, index)
end
break # return false
elseif data.value == value && data.value2 == index2
ret = data.index
break # return false
end
i += 1
if i >= ht.mylength[1]
if write
extend(ht)
ret = _pushqueue!(ht, key,index,true)
end
break
end
end
unlock(ht.lock)
return ret
end
@inline Base.haskey(ht::Q, key::K) where {Q<:IndexHashTable,K} = pushqueue!(ht,key,0,false)
function Base.delete!(ht::Q, key::K) where {Q<:IndexHashTable,K}
value = fnv1a_hash(key, UInt128)
value_ = UInt64(value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & ht.mylength[1]
index2 = fnv1a_hash(key, UInt64)
i = UInt64(0)
lock(ht.lock)
try
while true
idx = reinterpret(Int64, (index1 + i * index2) & ht.mylength[1] + 1) # try bitcast( ) instead
if ht.deleted[idx]
i+=1
continue
end
!ht.occupied[idx] && break
data = ht.table[idx]
if data.value == value && data.value2 == index2
ht.occupied[idx] = false
ht.deleted[idx] = true
break # return false
end
i += 1
if i >= ht.mylength[1]
break
end
end
finally
unlock(ht.lock)
end
end
@inline clearhashvector(::Vector{HashedIndex}) = nothing
@inline similarqueuehash(::Type{HashedIndex}, len2::Int64) = Vector{HashedIndex}(undef, len2)
# Function to extend the IndexHashTable
function extend(ht::IndexHashTable)
len2 = 2 * (ht.mylength[1] + 1)
V2 = similarqueuehash(HashedIndex, reinterpret(Int64, len2))
new_occupied = falses(len2)
len2 -= 1
pos = 0
for data in ht.table
pos += 1
!ht.occupied[pos] && continue
value_ = UInt64(data.value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & len2
index2 = data.value2
i = 0
while true
if data.value==0 && data.value2==0
end
idx = reinterpret(Int64, (index1 + i * index2) & len2 + 1) # try bitcast( ) instead
i += 1
if !new_occupied[idx] # V2[idx].value==0
V2[idx] = data
new_occupied[idx] = true
break
end
end
end
clearhashvector(ht.table)
ht.table = V2
ht.occupied = new_occupied
ht.deleted = falses(len2+1)
ht.mylength[1] = len2
end
#=
function Base.resize!(ht::IndexHashTable,len::Int64)
len2 = next_power_of_two(len)
len2<=(ht.mylength[1]+1) && return
V2 = similarqueuehash(HashedIndex, reinterpret(Int64, len2))
new_occupied = falses(len2)
len2 -= 1
pos = 0
for data in ht.table
pos += 1
!ht.occupied[pos] && continue
value_ = UInt64(data.value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & len2
index2 = data.value2
i = 0
while true
idx = reinterpret(Int64, (index1 + i * index2) & len2 + 1) # try bitcast( ) instead
i += 1
if !new_occupied[idx] # V2[idx].value==0
V2[idx] = data
new_occupied[idx] = true
break
end
end
end
clearhashvector(ht.table)
ht.table = V2
ht.occupied = new_occupied
ht.deleted = falses(len2+1)
ht.mylength[1] = len2
end
=#
# Method to empty the IndexHashTable
function Base.empty!(ht::IndexHashTable)
lock(ht.lock)
fill!(ht.occupied, false)
fill!(ht.deleted, false)
unlock(ht.lock)
end
struct KeyDict{K,V,HT<:IndexHashTable}
data::Vector{V}
hashes::HT
lock::ReadWriteLock
function KeyDict{K1,V}() where {K1,V}
ht = IndexHashTable(256,SingleThread())
return new{K1,V,typeof(ht)}(Vector{V}(),ht,ReadWriteLock())
end
end
Base.length(kd::KD) where {KD<:KeyDict} = length(kd.data)
function Base.push!(kd::KD,p::P) where {KD<:KeyDict,P<:Pair}
writelock(kd.lock)
push!(kd.data,p[2])
i = length(kd.data)
pushqueue!(kd.hashes, p[1], i)
writeunlock(kd.lock)
end
function Base.haskey(kd::KD,key::K) where {KD<:KeyDict,K}
readlock(kd.lock)
h = haskey(kd.hashes,key)
readunlock(kd.lock)
return h
end
function Base.getindex(kd::KD,key::K) where {KD<:KeyDict,K}
readlock(kd.lock)
i = has_index(kd.hashes,key)
v = kd.data[i]
readunlock(kd.lock)
return v
end
struct MultiKeyDict{K,V,D<:KeyDict{K,V}}<:AbstractDict{K, V}
data::Vector{D}
lock::ReadWriteLock
function MultiKeyDict{K,V}() where {K,V}
data = [KeyDict{K,V}()]
return new{K,V,eltype(data)}(data,ReadWriteLock())
end
end
function Base.push!(kd::KD,p::P) where {K,V,KD<:MultiKeyDict{K,V},P<:Pair}
writelock(kd.lock)
pp = p[1][1]
ll = length(kd.data)
if pp>ll
resize!(kd.data,pp)
for i in (ll+1):pp
kd.data[i] = KeyDict{K,V}()
end
end
dd = kd.data[pp]
writeunlock(kd.lock)
push!(dd,p)
end
function Base.haskey(kd::KD,key::K) where {KD<:MultiKeyDict,K}
readlock(kd.lock)
pp = key[1]
ll = length(kd.data)
if pp<=ll
h = haskey(kd.data[pp],key)
else
h = false
end
readunlock(kd.lock)
return h
end
function Base.getindex(kd::KD,key::K) where {KD<:MultiKeyDict,K}
readlock(kd.lock)
v = getindex(kd.data[key[1]],key)
readunlock(kd.lock)
return v
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 19773 | ####################################################################################################################################
## Managing the integral values, volumes, interface area
####################################################################################################################################
@doc raw"""
struct Voronoi_Integral{T}
Stores calculated volumes, interface areas, bulk integral and interface integrals as well as a list of neighbors for each cell
"""
struct Voronoi_Integral{P<:Point, T<:Voronoi_MESH{P}} <: HVIntegral{P}
neighbors::Vector{Vector{Int64}}
volumes::Vector{Float64}
area::Vector{Vector{Float64}}
bulk_integral::Vector{Vector{Float64}}
interface_integral::Vector{Vector{Vector{Float64}}}
MESH::T
vol_buffer::Vector{Float64}
Voronoi_Integral{P,T}(a,b,c,d,e,f) where {P,T} = new{P,T}(a,b,c,d,e,f,[0.0])
Voronoi_Integral(a,b,c,d,e,f::T) where {P<:Point, T<:Voronoi_MESH{P}} = new{P,T}(a,b,c,d,e,f,[0.0])
end
struct Voronoi_Integral_Store_Container_1
neighbors::Vector{Vector{Int64}}
volumes::Vector{Float64}
area::Vector{Vector{Float64}}
bulk_integral::Vector{Vector{Float64}}
interface_integral::Vector{Vector{Vector{Float64}}}
end
Voronoi_Integral_Store_Container_1(vi::Voronoi_Integral) = Voronoi_Integral_Store_Container_1(vi.neighbors,vi.volumes,vi.area,vi.bulk_integral,vi.interface_integral)
Voronoi_Integral(visc::Voronoi_Integral_Store_Container_1,mesh::T) where {P<:Point, T<:Voronoi_MESH{P}} =
Voronoi_Integral{P,T}(visc.neighbors,visc.volumes,visc.area,visc.bulk_integral,visc.interface_integral,mesh)
pack_integral(I::VI) where VI<:Voronoi_Integral = Voronoi_Integral_Store_Container_1(I)
unpack_integral(I::VI,m) where VI<:Voronoi_Integral_Store_Container_1 = Voronoi_Integral(I,m)
function Voronoi_Integral(mesh; get_volume=true, get_area=true, integrate_bulk=false, integrate_interface=false,get_neighbors=true)
l=length(nodes(mesh))
l_volume=get_volume*l
l_area=get_area*l
l_bulk=integrate_bulk*l
l_int=integrate_interface*l
VI=Voronoi_Integral(Vector{Vector{Int64}}(undef,l*get_neighbors),
Vector{Float64}(undef,l_volume),
Vector{Vector{Float64}}(undef, l_area),
Vector{Vector{Float64}}(undef, l_bulk),
Vector{Vector{Vector{Float64}}}(undef, l_int),
mesh)
# emptyint=Int64[]
# for i in 1:l VI.neighbors[i]=copy(emptyint) end
return VI
end
function Voronoi_Integral(mesh::T, neigh::Vector{Vector{Int64}}) where T
# Initialize the other vectors with zero length
volumes = Float64[]
area = Vector{Float64}[]
bulk_integral = Vector{Float64}[]
interface_integral = Vector{Vector{Float64}}[]
# Return the new instance of Voronoi_Integral
return Voronoi_Integral(neigh, volumes, area, bulk_integral, interface_integral, mesh)
end
function EmptyVoronoi_Integral(mesh::AM;parameters=nothing) where AM<:AbstractMesh
VI=Voronoi_Integral(Vector{Vector{Int64}}(undef,0),
Vector{Float64}(undef,0),
Vector{Vector{Float64}}(undef, 0),
Vector{Vector{Float64}}(undef, 0),
Vector{Vector{Vector{Float64}}}(undef, 0),
mesh)
return VI
end
function enable_geo_data(int::Voronoi_Integral)
l = internal_length(int.MESH)
resize!(int.volumes,l)
resize!(int.area,l)
resize!(int.neighbors,l)
end
function enable_neighbor_data(int::Voronoi_Integral)
l = internal_length(int.MESH)
resize!(int.neighbors,l)
end
function enable_integral_data(int::Voronoi_Integral)
l = internal_length(int.MESH)
resize!(int.bulk_integral,l)
resize!(int.interface_integral,l)
resize!(int.neighbors,l)
end
mesh(Integral::Voronoi_Integral) = Integral.MESH
"""
returns a function x->(r,R) where `r` and `R` are the inner and outer radius of the cell in which lies `x`.
"""
function DiameterFunction(Integral;tree = KDTree(nodes(mesh(Integral))))
nodes = Integral.nodes
_boundary = Integral.boundary
#_av = Integral.MESH.All_Verteces
#_bv = Integral.MESH.Buffer_Verteces
_neigh = Integral.neighbors
lref = length(Integral.references)
function dists(index,vertices,boundary,neigh)
R = 0.0
for (sig,r) in vertices[index]
nn = norm(r-nodes[index])
R = max(R,nn)
end
r = 2*R
ln = length(neigh)
for n in neigh[index]
if n<=ln
nn = 0.5*norm(nodes[n]-nodes[index])
r = min(r,nn)
else
bi = n-ln
nn = abs(dot(nodes[index]-boundary.planes[bi].base,boundary.planes[bi].normal))
r = min(r,nn)
end
end
return [r,R]
end
return x->dists(nn_id(tree,x)+lref,Integral.vertices,_boundary,_neigh)
end
# For developing and testing only:
#=
function show_integral(I::Voronoi_Integral;volume=true,bulk=true,area=true,interface=true)
show_vol=volume && length(I.volumes)>0
show_bulk=bulk && length(I.bulk_integral)>0
if show_vol || show_bulk
println("properties of nodes:")
for i in 1:length(I.MESH)
print("$i(")
show_vol && print("$(I.volumes[i])")
show_vol && show_bulk && print(",")
show_bulk && print("$(I.bulk_integral[i])")
print(") -- ")
end
println("")
end
show_ar=area && length(I.area)>0
show_i=interface && length(I.interface_integral)>0
println("properties of interfaces:")
for i in 1:length(I.MESH)
print("$i: ")
nei=I.neighbors[i]
for k in 1:length(nei)
print("$(nei[k])(")
show_ar && print("$(I.area[i][k])")
show_ar && show_i && print(",")
show_i && print("$(I.interface_integral[i][k])")
print(") -- ")
end
println("")
end
end
=#
# For developing and testing only:
#=
function print_integral(I::Voronoi_Integral;volume=false,bulk=false,area=true,interface=true)
vol=(length(I.volumes)!=0)
ar=(length(I.area)!=0)
bulk=(length(I.bulk_integral)!=0)
inter=(length(I.interface_integral)!=0)
mesh=I.MESH
for i in 1:(length(I.neighbors))
print("$i: ")
vol && (print("vol=$(I.volumes[i]) , "))
bulk && (print("bulk_I=$(I.bulk_integral[i]) "))
print(" Neigh's: ")
neigh=I.neighbors[i]
for k in 1:(length(I.neighbors[i]))
print("$(neigh[k])(")
ar && (print("a=$(I.area[i][k]); "))
inter && (print("i=$(I.interface_integral[i][k]) "))
print(") ; ")
end
println("")
end
end
=#
# the following function is for internal use inside modify_integral(...) only
#=function modify_Integral_entry!(b::Bool,field,data)
if b
if length(field)==0
append!(field,data)
end
else
empty!(field)
end
end
@doc raw"""
modify_Integral!(modify_Integral!(I::Voronoi_Integral;get_volume=(length(I.volumes)>0), get_area=(length(I.area)>0), integrate_bulk=(length(I.bulk_integral)>0), integrate_interface=(length(I.interface_integral)>0)))
modifies the integral I in the prescribed manner.
Caution!: Data will be lost forever if a previously "true" value is set to "false"
"""
function modify_Integral!(I::Voronoi_Integral;get_volume=(length(I.volumes)>0), get_area=(length(I.area)>0), integrate_bulk=(length(I.bulk_integral)>0), integrate_interface=(length(I.interface_integral)>0))
l=length(I.MESH.nodes)
modify_Integral_entry!(get_volume,I.volumes,Vector{Float64}(undef,l))
modify_Integral_entry!(get_area,I.area,Vector{Vector{Float64}}(undef,l))
modify_Integral_entry!(integrate_bulk,I.bulk_integral,Vector{Vector{Float64}}(undef, l))
modify_Integral_entry!(integrate_interface,I.interface_integral,Vector{Vector{Vector{Float64}}}(undef, l_int))
return I
end
=#
@doc raw"""
length(Integral::Voronoi_Integral)
returns the length of the underlying mesh
"""
function length(Integral::Voronoi_Integral)
return length(Integral.MESH)
end
function dimension(Integral::VI) where {P,VI<:Voronoi_Integral{P}}
return length(zeros(P))
end
add_virtual_points(Integral::Voronoi_Integral, xs) = prepend!(Integral,xs)
@doc raw"""
prepend!(Integral::Voronoi_Integral, xs)
adds the points 'xs' to the beginning of the mesh and correpsondingly shifts the indeces in the field of the integral, including 'neighbors'
"""
prepend!(Integral::Voronoi_Integral, xs::HVNodes) = prepend!(Integral,length(xs))
function prepend!(Integral::Voronoi_Integral, len::Int64)
for i in 1:(length(Integral.neighbors)) # have in mind that the nodes are renumbered, so we have to update the neighbors indeces
try
isassigned(Integral.neighbors,i) && ((Integral.neighbors[i]).+=len)
catch
println(Integral.neighbors[1:10],i)
rethrow()
end
end
if length(Integral.neighbors)>0
prepend!(Integral.neighbors,Vector{Vector{Int64}}(undef,len))
for i in 1:len Integral.neighbors[i]=Int64[] end
end
if length(Integral.volumes)>0
prepend!(Integral.volumes,Vector{Float64}(undef,len))
end
if length(Integral.area)>0
prepend!(Integral.area,Vector{Vector{Float64}}(undef, len))
end
if length(Integral.bulk_integral)>0
prepend!(Integral.bulk_integral,Vector{Vector{Float64}}(undef, len))
end
if length(Integral.interface_integral)>0
prepend!(Integral.interface_integral, Vector{Vector{Vector{Float64}}}(undef, len))
end
return Integral
end
@inline enabled_volumes(Integral::Voronoi_Integral) = length(Integral.volumes)>0
@inline enabled_area(Integral::Voronoi_Integral) = length(Integral.area)>0
@inline enabled_bulk(Integral::Voronoi_Integral) = length(Integral.bulk_integral)>0
@inline enabled_interface(Integral::Voronoi_Integral) = length(Integral.interface_integral)>0
@inline enabled_neighbors(Integral::Voronoi_Integral) = length(Integral.neighbors)>0
@doc raw"""
append!(Integral::Voronoi_Integral, xs)
adds the points 'xs' to the beginning of the mesh and correpsondingly shifts the indeces in the field of the integral, including 'neighbors'
"""
append!(Integral::Voronoi_Integral, xs::HVNodes) = append!(Integral,length(xs))
function append!(Integral::Voronoi_Integral, len::Int64)
len_I=length(Integral)
if length(Integral.neighbors)>0
append!(Integral.neighbors,Vector{Vector{Int64}}(undef,len))
for i in (len_I+1):(len_I+len) Integral.neighbors[i]=Int64[] end
end
if length(Integral.volumes)>0
append!(Integral.volumes,Vector{Float64}(undef,len))
end
if length(Integral.area)>0
append!(Integral.area,Vector{Vector{Float64}}(undef, len))
end
if length(Integral.bulk_integral)>0
append!(Integral.bulk_integral,Vector{Vector{Float64}}(undef, len))
end
if length(Integral.interface_integral)>0
append!(Integral.interface_integral, Vector{Vector{Vector{Float64}}}(undef, len))
end
return Integral
end
function keepat!(Integral::Voronoi_Integral,entries)
for I in 1:length(Integral)
if !isassigned(Integral.area,I) && length(Integral.area)>=I
Integral.area[I] = Float64[]
end
if !isassigned(Integral.bulk_integral,I) && length(Integral.bulk_integral)>=I
Integral.bulk_integral[I] = Float64[]
end
if !isassigned(Integral.neighbors,I) && length(Integral.neighbors)>=I
Integral.neighbors[I] = Int64[]
end
if !isassigned(Integral.interface_integral,I) && length(Integral.interface_integral)>=I
Integral.interface_integral[I] = Float64[Float64[]]
end
end
if length(Integral.volumes)>0 keepat!(Integral.volumes,entries) end
if length(Integral.area)>0 keepat!(Integral.area,entries) end
if length(Integral.bulk_integral)>0 keepat!(Integral.bulk_integral,entries) end
if length(Integral.interface_integral)>0 keepat!(Integral.interface_integral,entries) end
if length(Integral.neighbors)>0 keepat!(Integral.neighbors,entries) end
keepat!(Integral.MESH,entries)
end
@inline _has_cell_data(I::Voronoi_Integral,_Cell) = isassigned(I.area,_Cell)#_Cell<=length(I.volumes)
@inline function cell_data_writable(I::Voronoi_Integral,_Cell,vec,vecvec,::StaticFalse;get_integrals=statictrue)
inter = get_integrals==true ? enabled_interface(I) : false
if _has_cell_data(I,_Cell)
return (volumes = view(I.volumes,_Cell:_Cell),area = length(I.area)>0 ? I.area[_Cell] : Float64[], bulk_integral = inter ? I.bulk_integral[_Cell] : vec, interface_integral = inter ? I.interface_integral[_Cell] : vecvec, neighbors = I.neighbors[_Cell])
else
resize!(I.vol_buffer,max(1,length(I.neighbors[_Cell])))
return (volumes = view(I.vol_buffer,1:1),area = I.vol_buffer, bulk_integral = inter ? I.bulk_integral[_Cell] : vec, interface_integral = inter ? I.interface_integral[_Cell] : vecvec, neighbors = I.neighbors[_Cell])
end
end
@inline cell_data(I::Voronoi_Integral,_Cell,vec,vecvec;get_integrals=statictrue) = cell_data_writable(I,_Cell,vec,vecvec,get_integrals=get_integrals)
@doc raw"""
copy(Integral::Voronoi_Integral)
returns a autonomous copy of the 'Integral'
"""
function copy(Integral::Voronoi_Integral,new_mesh = copy(Integral.MESH);volumes=true,area=true,bulk_integral=true,interface_integral=true,neighbors=true,kwargs...)
g_v=volumes && length(Integral.volumes)>0
g_a=neighbors && area && length(Integral.area)>0
i_b=bulk_integral && length(Integral.bulk_integral)>0
i_i=neighbors && interface_integral && length(Integral.interface_integral)>0
n_n=neighbors && length(Integral.neighbors)>0
new_Integral = Voronoi_Integral(new_mesh,get_volume=g_v,get_area=g_a,integrate_bulk=i_b,integrate_interface=i_i,get_neighbors=n_n)
for i in 1:(length(Integral))
if n_n && isassigned(Integral.neighbors,i)
new_Integral.neighbors[i]=copy(Integral.neighbors[i])
end
if g_v new_Integral.volumes[i]=Integral.volumes[i] end
if g_a && isassigned(Integral.area,i)
new_Integral.area[i]=copy(Integral.area[i])
end
if i_b && isassigned(Integral.bulk_integral,i)
new_Integral.bulk_integral[i]=copy(Integral.bulk_integral[i])
end
if i_i && isassigned(Integral.interface_integral,i)
new_Integral.interface_integral[i]=Vector{Vector{Float64}}(undef,length(Integral.interface_integral[i]))
new_ii=new_Integral.interface_integral[i]
old_ii=Integral.interface_integral[i]
for j in 1:(length(old_ii))
new_ii[j]=copy(old_ii[j])
end
end
end
return new_Integral
end
@inline get_neighbors(I::Voronoi_Integral,_Cell,::StaticFalse) = I.neighbors[_Cell]
function set_neighbors(I::Voronoi_Integral,_Cell,new_neighbors,proto_bulk,proto_interface,::StaticFalse)
old_neighbors = isassigned(I.neighbors,_Cell) ? I.neighbors[_Cell] : Int64[]
bulk = enabled_bulk(I) && proto_bulk!=nothing
ar = enabled_area(I)
inter = enabled_interface(I) && proto_interface!=nothing
vol = enabled_volumes(I)
if ar && !isassigned(I.area,_Cell)
I.area[_Cell]=zeros(Float64,length(old_neighbors))
end
#if ar && !isdefined(I.area,_Cell)
# I.area[_Cell]=zeros(Float64,length(old_neighbors))
#end
if (length(old_neighbors)>0)
#print(" ho ")
if bulk && (!(isdefined(I.bulk_integral,_Cell)) || length(I.bulk_integral[_Cell])!=length(proto_bulk))
I.bulk_integral[_Cell]=copy(proto_bulk)
end
if inter && !(isdefined(I.interface_integral,_Cell))
I.interface_integral[_Cell]=Vector{Vector{Float64}}(undef,length(old_neighbors))
for i in 1:(length(old_neighbors))
(I.interface_integral[_Cell])[i]=copy(proto_interface)
end
end
knn = 0
for n in new_neighbors
knn += (n in old_neighbors) ? 0 : 1
end
if (knn>0) && ar
a_neighbors = zeros(Int64,knn)
a_areas = zeros(Int64,knn)
n_interface = Vector{Vector{Float64}}(undef,inter ? knn : 0)
for i in 1:(inter ? knn : 0)
n_interface[i]=copy(proto_interface)
end
knn2 = 1
for n in new_neighbors
if !(n in old_neighbors)
a_neighbors[knn2] = n
knn2 += 1
end
end
areas = I.area[_Cell]
append!(old_neighbors,a_neighbors)
append!(areas,a_areas)
inter && append!(I.interface_integral[_Cell],n_interface)
for k in 1:length(old_neighbors)
if !(old_neighbors[k] in new_neighbors)
old_neighbors[k] = maxInt # length(data.extended_xs)+data.size
end
end
quicksort!(old_neighbors, ar ? areas : old_neighbors, inter ? I.interface_integral[_Cell] : old_neighbors)
lnn = length(new_neighbors)
resize!(old_neighbors,lnn)
resize!(areas,lnn)
inter && resize!(I.interface_integral[_Cell],lnn)
end
else
old_neighbors=new_neighbors
I.neighbors[_Cell]=new_neighbors
vol && (I.volumes[_Cell]=0)
ar && (I.area[_Cell]=zeros(Float64,length(old_neighbors)))
bulk && (I.bulk_integral[_Cell]=copy(proto_bulk))
inter && (I.interface_integral[_Cell]=Vector{Vector{Float64}}(undef,length(old_neighbors)))
inter && (for i in 1:(length(old_neighbors))
(I.interface_integral[_Cell])[i]=copy(proto_interface)
end)
end
end
function get_integral(Integral::Voronoi_Integral,_Cell,Neigh,::StaticTrue)
k=1
neighbors=Integral.neighbors[_Cell]
if length(Integral.interface_integral)==0 return Float64[] end
while k<=length(neighbors)
if Neigh==neighbors[k] break end
k+=1
end
if k<=length(neighbors) && isassigned(Integral.interface_integral,_Cell) && isassigned(Integral.interface_integral[_Cell],k)
return (Integral.interface_integral[_Cell])[k]
else
y=copy((Integral.interface_integral[_Cell])[1])
y.*=0.0
return y
end
end
function isassigned_integral(Integral::Voronoi_Integral,_Cell,Neigh)
k=1
!isassigned(Integral.neighbors,_Cell) && return false
neighbors=Integral.neighbors[_Cell]
if length(Integral.interface_integral)==0 return false end
while k<=length(neighbors)
if Neigh==neighbors[k] break end
k+=1
end
return isassigned(Integral.interface_integral,_Cell) && isassigned(Integral.interface_integral[_Cell],k)
end
function get_area(Integral::Voronoi_Integral,_Cell,Neigh,::StaticTrue)
k=1
if isassigned(Integral.neighbors,_Cell)
neighbors=Integral.neighbors[_Cell]
while k<=length(neighbors)
if Neigh==neighbors[k] break end
k+=1
end
if k<=length(neighbors) && isassigned(Integral.area,_Cell) && isassigned(Integral.area[_Cell],k)
return (Integral.area[_Cell])[k]
else
return 0.0
end
else
return 0.0
end
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 15086 |
struct Geometry_Integrator{VI<:HVIntegral}
Integral::VI
function Geometry_Integrator(mesh::Voronoi_MESH,neigh=false)
N=(Vector{Int64})[]
if neigh
l=length(mesh)
emptyint=Int64[]
N=Vector{Vector{Int64}}(undef,l)
for i in 1:l N[i]=copy(emptyint) end
end
vi = Voronoi_Integral(mesh,N)
return new{typeof(vi)}(vi)
end
function Geometry_Integrator(points::HVNodes,neigh=false)
return Geometry_Integrator(ClassicMesh(points),neigh)
end
function Geometry_Integrator(Inte::HV,neigh=false) where HV<:HVIntegral
enable(Inte,neighbors=neigh)
return new{typeof(Inte)}(Inte)
end
end
function copy(I::Geometry_Integrator)
return Geometry_Integrator(copy(I.Integral))
end
function integrate(xs,c,a,b,s,I::Geometry_Integrator,_)
end
decreases_neigh = 0
function integrate_cell(vol::Bool,ar::Bool,bulk::Bool,inter::Bool, _Cell::Int, iterate, calculate, data, Integrator::Geometry_Integrator)
I = Integrator.Integral
#print("$_Cell : $(length(I.MESH.All_Verteces[_Cell])+length(I.MESH.Buffer_Verteces[_Cell])), ")
adj = neighbors_of_cell(_Cell,mesh(I),adjacents=true)
activate_data_cell(data,_Cell,adj)
# lneigh1 = length(adj)
#=xs = data.extended_xs
extended_xs=xs
NFnew = NewNeighborFinder(length(xs[1]),xs[1])
mesh = I.MESH
reset(NFnew,adj,Iterators.flatten((mesh.All_Verteces[_Cell],mesh.Buffer_Verteces[_Cell])),length(mesh.All_Verteces[_Cell])+length(mesh.Buffer_Verteces[_Cell]),data.extended_xs[_Cell])
neigh2 = correct_neighbors(NFnew,copy(adj),xs=extended_xs,_Cell=_Cell)
reset(NFnew,adj,Iterators.flatten((mesh.All_Verteces[_Cell],mesh.Buffer_Verteces[_Cell])),length(mesh.All_Verteces[_Cell])+length(mesh.Buffer_Verteces[_Cell]),data.extended_xs[_Cell],false)
neigh3 = correct_neighbors(NFnew,copy(adj),xs=extended_xs,_Cell=_Cell)
=#
neigh = neighbors_of_cell(_Cell,mesh(I),extended_xs=data.extended_xs,edgeiterator=data.NFfind, neighbors=adj)
set_neighbors(I,_Cell,neigh,nothing,nothing)
# I.neighbors[_Cell] = neighbors_of_cell(_Cell,I.MESH,extended_xs=data.extended_xs,edgeiterator=data.NFfind, neighbors=adj)
#println("$(length(neigh2)), $(length(neigh3)), $(length(adj))")
#_Cell == 10 && error("")
# lneigh2 = length(I.neighbors[_Cell])
# HighVoronoi.decreases_neigh += lneigh2<lneigh1 ? 1 : 0
end
function prototype_bulk(Integrator::Geometry_Integrator)
return Float64[]
end
function prototype_interface(Integrator::Geometry_Integrator)
return Float64[]
end
###############################################################################################################
## stores geometric data for integration
###############################################################################################################
_NeighborFinder(dim,x) = NeighborFinder(dim,x)#dim>=UseNeighborFinderDimension ? NeighborFinder(dim,x) : nothing
struct IntegrateData{T,VP,TT}
extended_xs::VP
domain::Boundary
size::Int64
active::BitVector
float_vec_buffer::Vector{Float64}
float_vec_vec_buffer::Vector{Vector{Float64}}
dimension::Int64
# NF::FastEdgeIterator
NFfind::T
counts::Vector{Int64}
accepted::Vector{Bool}
deprecated::Vector{Bool}
buffer_data::TT
end
function IntegrateData(xs::HV,dom,tt::TT) where {P,HV<:HVNodes{P},TT}
return _IntegrateData(xs,dom,0)
end
function _IntegrateData(xs::HV,dom,tt) where {P,HV<:HVNodes{P}}
dim = size(P)[1]#length(xs[1])
l=length(dom)
m=append!(copy(xs),Vector{P}(undef,l))
a=falses(l) #BitVector(zeros(Int8,l))
nf = NeighborFinder(dim,zeros(P))
c = Vector{Int64}(undef,length(m))
a = Vector{Bool}(undef,length(m))
d = Vector{Bool}(undef,length(m))
return IntegrateData{typeof(nf),typeof(m),typeof(tt)}(m,dom,length(xs),a,Float64[],(Vector{Float64})[],length(zeros(P)),nf,c,a,d,tt)
end
function activate_data_cell(tree,_Cell,neigh)
tree.active .= false
lxs=tree.size
for n in neigh
if n>lxs
plane=n-lxs
tree.active[plane] && continue
tree.active[plane]=true
tree.extended_xs[lxs+plane]=reflect(tree.extended_xs[_Cell],tree.domain,plane)
end
end
end
function neighbors_of_cell(_Cell::Int,mesh::Voronoi_MESH,data::IntegrateData, condition = r->true)
adj = neighbors_of_cell(_Cell,mesh,adjacents=true)
activate_data_cell(data,_Cell,adj)
return neighbors_of_cell(_Cell,mesh,extended_xs=data.extended_xs,edgeiterator=data.NFfind, neighbors=adj)
end
###############################################################################################################
## actual integration method
###############################################################################################################
"""
For each implemented Integrator type this method shall be overwritten.
In particular, the passage of calculate and iterate might be modified according to the needs of the respective class.
See also Polygon_Integrator and Montecarlo_Integrator for reference.
"""
function integrate(Integrator; progress=ThreadsafeProgressMeter(0,true,""), domain=Boundary(), relevant=1:(length(Integrator.Integral)+length(domain)), modified=1:(length(Integrator.Integral)))
_integrate(Integrator; progress=progress,domain=domain, calculate=modified, iterate=relevant)
end
@inline function __integrate_getdata(I_data::Nothing,Integral,domain,Integrator)
nn = nodes(mesh(Integral))
IntegrateData(nn,domain,Integrator)
end
__integrate_getdata(I_data,Integral,domain,Integrator) = I_data
"""
Iterates integrate_cell over all elements of iterate.
It thereby passes the information on whether volume, areas, bulk- or surface integrals shall be calculated.
"""
function _integrate(Integrator; domain=Boundary(), calculate, iterate, # =1:(length(Integrator.Integral)+length(domain)), iterate=1:(length(Integrator.Integral)),
I_data=nothing, compact=false, intro="$(Integrator_Name(Integrator))-integration over $(length(collect(iterate))) cells:",progress=ThreadsafeProgressMeter(2*length(iterate),false,intro))
TODO=collect(iterate)
try
#vp_print(0,intro)
position_0 = length(intro)+5
#vp_print(position_0-5," \u1b[0K")
Integral=Integrator.Integral
data = __integrate_getdata(I_data,Integral,domain,Integrator)
if length(TODO)==0
vp_print(position_0,"nothing to integrate")
return Integrator, data
end
vol=enabled_volumes(Integral)
ar=enabled_area(Integral)
bulk=enabled_bulk(Integral)
inter=enabled_interface(Integral)
TODO_count=length(TODO)
max_string_i = length(string(iterate[end], base=10))
max_string_todo = length(string(TODO_count, base=10))
vol_sum = 0.0
count=0
bb = typeof(Integrator.Integral)<:ThreadsafeIntegral
# println("Hallo")
#println(Integrator.Integral.area)
for k in 1:TODO_count # initialize and array of length "length(xs)" to locally store verteces of cells
#vp_print(position_0,"Cell $(string(TODO[k], base=10, pad=max_string_i)) (in cycle: $(string(k, base=10, pad=max_string_todo)) of $TODO_count)")
#@descend integrate_cell(vol,ar,bulk,inter,TODO[k],iterate, calculate, data,Integrator)
#error("")
V=integrate_cell(vol,ar,bulk,inter,TODO[k],iterate, calculate, data,Integrator)
#print(Threads.threadid())
if vol
vol_sum+=V #Integral.volumes[TODO[k]]
count += V<1E-10
end
#k<5 && println(k)
next!(progress)
#print(" vol = $(vol ? V : 0.0), s=$(round(vol_sum,digits=6)), $count")
end
# println("Hallo")
V1 = vol_sum
#println()
#println("reached end in thread ",Threads.threadid())
s = synchronizer(Integrator.Integral)
#println("Thread $(Threads.threadid()), $(sync(s))",)
sync(s) # wait until all threads arrive at this point
for k in 1:TODO_count # initialize and array of length "length(xs)" to locally store verteces of cells
#vp_print(position_0,"Cell $(string(TODO[k], base=10, pad=max_string_i)) (in cycle: $(string(k, base=10, pad=max_string_todo)) of $TODO_count)")
#@descend integrate_cell(vol,ar,bulk,inter,TODO[k],iterate, calculate, data,Integrator)
#error("")
V=cleanup_cell(vol,ar,bulk,inter,TODO[k],iterate, calculate, data,Integrator)
if vol
vol_sum+=V #Integral.volumes[TODO[k]]
count += V<1E-10
end
#print(" vol = $(vol ? V : 0.0), s=$(round(vol_sum,digits=6)), $count")
next!(progress)
end
# println("Hallo")
#vp_line_up(1)
#if (!compact) vp_line() end
#println("Differenz: $(vol_sum-V1)")
return Integrator,data
catch e
open("error_log$(Threads.threadid()).txt", "w") do f
# Stacktrace speichern
Base.showerror(f, e, catch_backtrace())
end
#sync(s)
#sync(s)
end
end
function integrate_cell(vol::Bool,ar::Bool,bulk::Bool,inter::Bool, _Cell, iterate, calculate, data, Integrator::Nothing)
end
"""
adjusts the entries of the Integrator.Integral variable:
It sorts the entries according to the modified order of neighbors and fills up gaps and deletes entries for neighbors that are gone.
afterwards it calls the true integration function that is provided by the Integrator.
"""
function integrate_cell(vol::Bool,ar::Bool,bulk::Bool,inter::Bool, _Cell, iterate, calculate, data, Integrator)
I=Integrator.Integral
adj = neighbors_of_cell(_Cell,mesh(I),adjacents=true)
activate_data_cell(data,_Cell,adj)
new_neighbors = neighbors_of_cell(_Cell,mesh(I),extended_xs=data.extended_xs,edgeiterator=data.NFfind, neighbors=adj)
# println("$_Cell : $new_neighbors")
#activate_data_cell(data,_Cell,neighbors_of_cell(_Cell,I.MESH,adjacents=true))
#new_neighbors = neighbors_of_cell(_Cell,I.MESH,extended_xs=data.extended_xs,edgeiterator=data.NFfind)
proto_bulk=prototype_bulk(Integrator)
proto_interface=prototype_interface(Integrator)
set_neighbors(I,_Cell,new_neighbors,proto_bulk,proto_interface)
old_neighbors = get_neighbors(I,_Cell)
activate_data_cell(data,_Cell,old_neighbors)
dfvb=data.float_vec_buffer
dfvvb=data.float_vec_vec_buffer
# println(I.area[_Cell])
#@descend integrate(old_neighbors,_Cell,iterate, calculate, data,Integrator, ar ? I.area[_Cell] : dfvb , bulk ? I.bulk_integral[_Cell] : dfvb , inter ? I.interface_integral[_Cell] : dfvvb)
#error("")
cdw = cell_data_writable(I,_Cell,dfvb,dfvvb)
#println(old_neighbors)
#integrate(cdw.neighbors,_Cell,iterate, calculate, data,Integrator, cdw.area , cdw.bulk_integral , cdw.interface_integral)
#@descend integrate(cdw.neighbors,_Cell,iterate, calculate, data,Integrator, cdw.area , cdw.bulk_integral , cdw.interface_integral)
#error("")
V=integrate(cdw.neighbors,_Cell,iterate, calculate, data,Integrator, cdw.area , cdw.bulk_integral , cdw.interface_integral,cdw.volumes)
#println("-")
#V=integrate(old_neighbors,_Cell,iterate, calculate, data,Integrator, ar ? I.area[_Cell] : dfvb , bulk ? I.bulk_integral[_Cell] : dfvb , inter ? I.interface_integral[_Cell] : dfvvb)
# println(I.area[_Cell])
if (vol)
cdw.volumes[1]=V
end
return V
# error("")
end
####################################################################################################################
## Merge two Integrators.
####################################################################################################################
merge_integrate_data(Integral,domain,I_data::Nothing,Integrator) = IntegrateData(Integral.MESH.nodes,domain,Integrator)
merge_integrate_data(Integral,domain,I_data,Integrator) = I_data
function merge_integrate(Integrator,Integrator2; domain=Boundary(), calculate=1:(length(Integrator.Integral)+length(domain)), iterate=1:(length(Integrator.Integral)),
I_data=nothing, use1=x->true, compact=false, intro="")
TODO=collect(iterate)
#use1=x->true
position_0 = length(intro)+5
Integral=Integrator.Integral
data = merge_integrate_data(Integral,domain,I_data,Integrator)
if length(TODO)==0
return Integrator, data
end
vol=enabled_volumes(Integral)
ar=enabled_area(Integral)
bulk=enabled_bulk(Integral)
inter=enabled_interface(Integral)
TODO_count=length(TODO)
max_string_i = length(string(iterate[end], base=10))
max_string_todo = length(string(TODO_count, base=10))
for k in 1:TODO_count # initialize and array of length "length(xs)" to locally store verteces of cells
if typeof(Integrator)==Geometry_Integrator
_Cell = TODO[k]
I = Integrator.Integral
activate_data_cell(data,_Cell,neighbors_of_cell(_Cell,I.MESH,adjacents=true))
Integrator.Integral.neighbors[TODO[k]] = neighbors_of_cell(_Cell,I.MESH,extended_xs=data.extended_xs)
else
integrate_cell(vol,ar,bulk,inter,TODO[k],iterate, calculate, data,use1(TODO[k]) ? Integrator : Integrator2)
end
end
#vp_line_up(1)
return Integrator,data
end
####################################################################################################################
## Two fully implemented types of Test Integrators.
####################################################################################################################
struct TestIntegrator
Integral::Voronoi_Integral
end
#=function copy(I::TestIntegrator)
return TestIntegrator(copy(I.Integral))
end
function prototype_interface(Integrator::TestIntegrator)
return [0.0]
end
function prototype_bulk(Integrator::TestIntegrator)
return [0.0]
end
function integrate(neighbors,_Cell,iterate, calculate, data,Integrator::TestIntegrator,ar,bulk_inte,inter_inte)
for i in 1:(length(neighbors))
ar[i]=1.0*_Cell+0.01*neighbors[i]
inter_inte[i][1]=trunc(0.1*neighbors[i],digits=3)
end
bulk_inte.+=100+_Cell
return 1.0*_Cell
end
=#
struct TestIntegrator2
Integral::Voronoi_Integral
end
#=
function copy(I::TestIntegrator2)
return TestIntegrator2(copy(I.Integral))
end
function prototype_interface(Integrator::TestIntegrator2)
return [0.0]
end
function prototype_bulk(Integrator::TestIntegrator2)
return [0.0]
end
function integrate(neighbors,_Cell,iterate, calculate, data,Integrator::TestIntegrator2,ar,bulk_inte,inter_inte)
for i in 1:(length(neighbors))
ar[i]=10.0*_Cell+0.01*neighbors[i]
#inter_inte[i][1]=trunc(0.1*neighbors[i],digits=3)
end
return 1.0*_Cell
end
=# | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 3892 |
@inline _integrator_function(integrand) = integrand, integrand, integrand
function Integrator(integral::HVIntegral,type::Call_HEURISTIC_MC;mc_accurate=(1000,100,20),integrand=nothing)
fb, fi, f = _integrator_function(integrand)
return HeuristicMCIntegrator(integral,f, mc_accurate)
end
function Integrator(integral::HVIntegral,type::Call_HEURISTIC_INTERNAL;mc_accurate=(1000,100,20),integrand=nothing)
fb, fi, f = _integrator_function(integrand)
return Heuristic_Integrator{typeof(f),typeof(integral)}(f,true,integral)
end
function Integrator(integral::HVIntegral,type::Call_HEURISTIC;mc_accurate=(1000,100,20),integrand=nothing)
fb, fi, f = _integrator_function(integrand)
return Heuristic_Integrator(integral,f,true)
end
Integrator(integral::HVIntegral,type::Call_GEO;mc_accurate=(1000,100,20),integrand=nothing) = Geometry_Integrator(integral,true)
function Integrator(integral::HVIntegral,type::Call_POLYGON;mc_accurate=(1000,100,20),integrand=nothing)
fb, fi, f = _integrator_function(integrand)
return Polygon_Integrator(integral,f,true)
end
function Integrator(integral::HVIntegral,type::Call_FAST_POLYGON;mc_accurate=(1000,100,20),integrand=nothing)
fb, fi, f = _integrator_function(integrand)
return Fast_Polygon_Integrator(integral,f,true)
end
function Integrator(integral::HVIntegral,type::Call_MC;mc_accurate=(1000,100,20),integrand=nothing,heuristic=false)
fb, fi, f = _integrator_function(integrand)
i,b,r=mc_accurate
return Montecarlo_Integrator(integral, b=fb, i=fi, nmc_bulk=b, nmc_interface=i, recycle=r,heuristic=false)
end
Integrator(mesh::Voronoi_MESH,type::Call_NO;mc_accurate=(1000,100,20),integral=nothing,integrand=nothing) = VI_NOTHING
const _VI__TEST=0
const _VI__TEST_2=1
const _VI__MIN=2
const _VI__POLYGON=2
const _VI__MONTECARLO=3
const _VI__GEOMETRY=4
const _VI__HEURISTIC=5
const _VI__HEURISTIC_INTERNAL=6
const _VI__HEURISTIC_CUBE=7
const _VI__HEURISTIC_MC=8
const _VI__FAST_POLYGON=9
const _VI__MAX=_VI__FAST_POLYGON
@inline Integrator_Number(I::Call_POLYGON) = _VI__POLYGON
@inline Integrator_Number(I::Call_FAST_POLYGON) = _VI__FAST_POLYGON
@inline Integrator_Number(I::Call_MC) = _VI__MONTECARLO
@inline Integrator_Number(I::Call_GEO) = _VI__GEOMETRY
@inline Integrator_Number(I::Call_HEURISTIC) = _VI__HEURISTIC
@inline Integrator_Number(I::Call_HEURISTIC_MC) = _VI__HEURISTIC_MC
@inline IntegratorType(I) = I
function IntegratorType(I::Int64)
if I == _VI__POLYGON
return VI_POLYGON
elseif I == _VI__FAST_POLYGON
return VI_FAST_POLYGON
elseif I == _VI__MONTECARLO
return VI_MONTECARLO
elseif I == _VI__GEOMETRY
return VI_GEOMETRY
elseif I == _VI__HEURISTIC
return VI_HEURISTIC
elseif I == _VI__HEURISTIC_MC
return VI_HEURISTIC_MC
else
error("Invalid integrator value")
end
end
function backup_Integrator(I,b)
return I
end
Integrator_Name(I::Int) = Integrator_Name(IntegratorType(I))
function Integrator_Name(I)
if (typeof(I)<:Polygon_Integrator)
return "POLYGON"
elseif (typeof(I)<:Fast_Polygon_Integrator)
return "FAST_POLYGON"
elseif (typeof(I)<:Montecarlo_Integrator)
return "MONTECARLO"
elseif (typeof(I)<:Geometry_Integrator)
return "GEOMETRY"
elseif (typeof(I)<:Heuristic_Integrator)
return "HEURISTIC"
elseif (typeof(I)<:HeuristicMCIntegrator)
return "HEURISTIC_MC"
else
return "$(typeof(I)): Unknown"
end
end
function replace_integrator(integrator::Int)
return integrator in [_VI__HEURISTIC_INTERNAL,_VI__HEURISTIC_CUBE] ? _VI__POLYGON : integrator
end
replace_integrator(I::Call_HEURISTIC_INTERNAL) = VI_POLYGON
replace_integrator(I) = I
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 4233 | struct ProjRow
cols::Vector{Int64}
vals::Vector{Float64}
end
function ProjRow()
return ProjRow(zeros(Int64,10),zeros(Float64,10))
end
function increase(rows::Vector{ProjRow},r,c)
row = rows[r]
lc = length(row.cols)
i = 1
while row.cols[i]!=c && row.cols[i]!=0
i += 1
end
if row.cols[i]==0
if i==lc
resize!(row.cols,lc+10)
resize!(row.valse,lc+10)
end
row.cols[i] = c
row.vals[i] = 0
end
row.vals[i] += 1
end
function get_matrix(rows::Vector{ProjRow})
lr = length(rows)
mysize = 0
for i in 1:lr
fi = findfirst(x->x==0.0,rows[i].cols)
resize!(rows[i].cols,fi-1)
mysize += fi-1
end
rs = Vector{Int64}(undef,mysize)
cs = Vector{Int64}(undef,mysize)
vals = Vector{Float64}(undef,mysize)
count = 0
for i in 1:lr
lc = length(rows[i].cols)
all_c = sum(rows[i].vals)
for k in 1:lc
count += 1
rs[count] = i
cs[count] = rows[i].cols[k]
vals[count] = rows[i].vals[k]/all_c
end
end
return rs, cs, vals
end
function interactionmatrix(vg_r::VoronoiGeometry,vg_c::VoronoiGeometry; check_compatibility=true, tolerance = 1.0E-12, hits_per_cell = 1000, bounding_box=Boundary() )
if check_compatibility && !compare(vg_r.domain.boundary,vg_c.domain.boundary,true)
@warn "The domains "*boundaryToString(vg_r.domain.boundary)*" and "*boundaryToString(vg_c.domain.boundary)*" are not identical!!"
end
domain = vg_r.domain.boundary
nodes_r = nodes(mesh(integral(vg_r.domain)))#Integrator.Integral.MESH.nodes
lm_r = length(nodes_r)
tree_r = NearestNeighbors.KDTree(nodes_r)
nodes_c = nodes(mesh(integral(vg_c.domain)))
lm_c = length(nodes_c)
tree_c = NearestNeighbors.KDTree(nodes_c)
reference_r = zeros(Int64, lm_r)
referenc_c = zeros(Int64, lm_c)
for i in 1:lm_r
ref = NearestNeighbors.nn(tree_c,nodes_r[i])[1]
if norm(nodes_r[i]-nodes_c[ref])<tolerance
reference_r[i] = ref
referenc_c[ref] = i
end
end
dim = length(nodes_r[1])
number_of_all_nodes = lm_r+lm_c-sum(map(k->k!=0,reference_r))
#println(referenc_c)
#println(reference_r)
# set up range
left, right, poly_vol = poly_box(domain,bounding_box)
min_number_of_hits = hits_per_cell*number_of_all_nodes # mimimal number of samples in domain
box_dimensions = map(k->right[k]-left[k],1:dim) # dimensions of future range
box_vol = prod(box_dimensions) # volume of range
number_of_hits_in_range = (box_vol/poly_vol)*min_number_of_hits # number of samples in range in order to have required number of hits in domain
noh_in_cube = number_of_hits_in_range /box_vol# rescale this number to a unit cube
number_of_hits_per_dim = (noh_in_cube*1.0)^(1/dim) # take the number of samples per dimension
box_dimensions .*= number_of_hits_per_dim # rescale this number to the dimensions of the range
range = DensityRange(map(k->unsafe_trunc(Int64,box_dimensions[k])+1,1:dim),map(k->(left[k],right[k]),1:dim))
ref_r = references(vg_r.domain)#.references
off_r = length(ref_r)
ref_c = references(vg_c.domain)#.references
off_c = length(ref_c)
my_increase(rows,r,c) = increase(rows, r>off_r ? r-off_r : ref_r[r]-off_r, c>off_c ? c-off_c : ref_c[c]-off_c)
rows = Vector{ProjRow}(undef,lm_r-off_r)
map!(k->ProjRow(),rows,1:lm_r-off_r)
iterate_interactions(tree_r,tree_c,rows,range,1,copy(range.x),my_increase)
return get_matrix(rows)
end
function iterate_interactions(tree_r,tree_c,rows,range::DensityRange,level,x,increase)
x0 = x[level]
for k in 1:range.number_of_cells[level]
if level<DIMENSION(range)
iterate_interactions(tree_r,tree_c,rows,range,level+1,x,increase)
else
r = NearestNeighbors.nn(tree_r,x)[1]
c = NearestNeighbors.nn(tree_c,x)[1]
increase(rows,r,c)
end
x[level] += range.dimensions[level]
end
x[level] = x0
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 8487 |
# We need to write a dispatch of Base.zero in order to deal with the problem Vector{Float64}==zero
#=import Base.zero
function zero(Base::Type{Vector{Float64}})
return Vector{Float64}[]
end
=#
#######################################################################################################################################
# Montecarlo integration of two vector valued functions on bulk and interfaces respectively. Each of the functions may be
# put empty by bulk=nothing or interface=nothing
#######################################################################################################################################
@doc raw"""
Montecarlo_Integrator{T,TT}
Integrator for a Montecarlo integration over the interfaces and the bulk of each cell.
If area==false any interface information is dropped.
"bulk" and "interface" are vector(!) valued functions evaluated on the bulk and interfaces respectively.
Set either of them to "nothing" to avoid any evaluation. In this case, the Integrator will only calculate
the volume (if bulk==nothing) and the interface d-1 dimensional area (if interface==nothing and area==true)
"""
struct Montecarlo_Integrator{II,A,T,TT}
Integral::II
bulk::T
interface::TT
NMC_bulk::Int64
NMC_interface::Int64
_recycle::Int64
area::Bool
directions::A
gamma::Float64
cycle::Vector{Int64}
heuristic::Bool
# If i!=nothing, then area has to be true. Otherwise values are taken as given
end
function Montecarlo_Integrator(I::HVI, b, i; nmc_bulk=100, nmc_interface=1000, cal_area=true, recycle=20,heuristic=false) where HVI<:HVIntegral
enable(I,volume=true,integral=typeof(b)!=Nothing)
_nodes = nodes(mesh(I))
return Montecarlo_Integrator(I, b, i, nmc_bulk, nmc_interface, recycle, typeof(i)!=Nothing ? true : cal_area, new_directions!(Vector{typeof(_nodes[1])}(undef,nmc_interface),length(_nodes[1])),gamma(1+length(_nodes[1])/2),[1],heuristic)
end
function Montecarlo_Integrator(mesh::Voronoi_MESH; b=nothing, i=nothing, nmc_bulk=100, nmc_interface=1000, cal_area=true, recycle=20)
Inte=Voronoi_Integral(mesh,integrate_bulk=(typeof(b)!=Nothing),integrate_interface=(typeof(i)!=Nothing))
return Montecarlo_Integrator(Inte, b, i, nmc_bulk=nmc_bulk, nmc_interface=nmc_interface, recycle=recycle, cal_area=cal_area)
end
function Montecarlo_Integrator(Inte::HVIntegral; b=nothing, i=nothing, nmc_bulk=100, nmc_interface=1000, cal_area=true, recycle=20,heuristic=false)
enable(Inte,volume=true,integral=typeof(b)!=Nothing)
return Montecarlo_Integrator(Inte, b, i, nmc_bulk=nmc_bulk, nmc_interface=nmc_interface, recycle=recycle, cal_area=cal_area,heuristic=heuristic)
end
function copy(I::Montecarlo_Integrator)
return Montecarlo_Integrator(copy(I.Integral),I.bulk,I.interface,nmc_bulk=I.NMC_bulk,nmc_interface=I.NMC_interface,recycle=I._recycle,cal_area=I.area)
end
function integrate(Integrator::Montecarlo_Integrator; progress=ThreadsafeProgressMeter(0,true,""), domain=Boundary(), relevant=1:(length(mesh(Integrator.Integral))+length(domain)), modified=1:(length(mesh(Integrator.Integral))))
_integrate(Integrator; domain=domain, calculate=relevant, progress=progress, iterate=Base.intersect(relevant,1:(length(mesh(Integrator.Integral)))))
end
function prototype_interface(Integrator::Montecarlo_Integrator)
y = (typeof(Integrator.interface)!=Nothing) ? Integrator.interface(nodes(mesh(Integrator.Integral))[1]) : Float64[]
y.*=0
return y
end
function prototype_bulk(Integrator::Montecarlo_Integrator)
y = (typeof(Integrator.bulk)!=Nothing) ? Integrator.bulk(nodes(mesh(Integrator.Integral))[1]) : Float64[]
y.*=0
return y
end
"""calculates NMC_interface new i.i.d. random directions and stores them to the dirvec variabel"""
function new_directions!(dirvec,dim)
for i in 1:(length(dirvec))
v=randn(dim)
abs=sum(abs2,v)
while abs>1
v=randn(dim)
abs=sum(abs2,v)
end
dirvec[i]=mystaticversion(v/sqrt(abs),dirvec[i])
end
return dirvec
end
#function integrate(domain,_Cell,iter, calcul,searcher,integrator::Montecarlo_Integrator)
function integrate(neighbors,_Cell,iterate, calculate, data,Integrator::Montecarlo_Integrator,ar,bulk_inte,inter_inte,_)
vec = Float64[]
vecvec = [vec]
I=Integrator
xs=data.extended_xs
if mod(I.cycle[1], I._recycle)==0
new_directions!(I.directions,length(xs[1]))
I.cycle[1]=0
end
I.cycle[1] += 1
directions=I.directions
x = xs[_Cell]
d = data.dimension
lneigh = length(neighbors)
# Bulk computations: V stores volumes y stores function values in a vector format
V = 0.0
ar.*= 0.0
bulk_inte.*=0.0
for i in 1:(length(inter_inte))
inter_inte[i] .= 0.0
end
normals=Vector{typeof(xs[1])}(undef,length(neighbors))
for j in 1:lneigh
normals[j]=normalize(xs[neighbors[j]] - xs[_Cell])
end
for K in 1:(I.NMC_interface)
u = directions[K]
(j, t) = mc_raycast(_Cell, neighbors, x, u, xs)
V += t^d
if typeof(I.bulk)!=Nothing && !I.heuristic
for _ in 1:(I.NMC_bulk)
r = t * rand()
x′ = x + u * r
bulk_inte .+= I.bulk(x′) * r^(d-1) * t
end
end
if I.area && t < Inf
normal = normals[j]
dA = t ^ (d-1) / abs(dot(normal, u))
ar[j] += dA # be aware that j=1 refers to xs[1], i.e. the CENTER of the cell 'i'
if typeof(I.interface)!=Nothing && !I.heuristic
inter_inte[j] .+= dA * I.interface(x + t*u)
end
end
end
c_vol = pi^(d/2) / I.gamma
c_area = d * c_vol
V *= c_vol / I.NMC_interface
ar .*= (c_area / I.NMC_interface)
bulk_inte .*= (c_area / I.NMC_interface / I.NMC_bulk)
inter_inte .*= (c_area / I.NMC_interface)
#return V
#=V_0 = 0.0
for k in 1:lneigh
n = neighbors[k]
dist = 0.5*norm(xs[n] - xs[_Cell])
factor = dist/d
V_0 += ar[k]*factor
end=#
#println(abs(V_0-V))
if I.area
lmesh = length(mesh(I.Integral))
for k in 1:lneigh
n = neighbors[k]
n>lmesh && continue
if n in calculate && n<_Cell #: true
neigh_data = cell_data_writable(Integrator.Integral,n,vec,vecvec)
_Cell_index = findfirstassured(_Cell,neigh_data.neighbors)
neigh_area = 0.0
neigh_area = neigh_data.area[_Cell_index]
#println("$neigh_area, $n, $_Cell")
old_area = ar[k]
new_area = abs(neigh_area)<old_area*1.0E-10 ? old_area : 0.5*(old_area+neigh_area)
dist = 0.5*norm(xs[n] - xs[_Cell])
factor = dist/d
#V1 = Integrator.Integral.volumes[n] +V
neigh_data.volumes[1] += (new_area-neigh_area)*factor
V += (new_area-old_area)*factor
#println((Integrator.Integral.volumes[n] + V-V1)/V1)
ar[k] = new_area
neigh_data.area[_Cell_index] = new_area
#set_area(Integrator.Integral,n,_Cell,new_area)
(typeof(I.interface)==Nothing || length(neigh_data.interface_integral)<length(neigh_data.neighbors) || I.heuristic) && continue
old_int = inter_inte[k]
new_int = !isassigned(neigh_data.interface_integral,_Cell_index) ? old_int : 0.5*(old_int+neigh_data.interface_integral[_Cell_index])
inter_inte[k] = new_int
neigh_data.interface_integral[_Cell_index] = copy(new_int)
end
end
end
return V
end
function mc_raycast(_Cell, neighbors, r, u, xs)
ts = 1.0
ts += Inf
x0 = xs[_Cell]
c = dot(xs[_Cell], u)
skip(n) = (dot(xs[n], u) <= c)
result_i=0
for i in 1:length(neighbors)
skip(neighbors[i]) && continue
x = xs[neighbors[i]]
t = (sum(abs2, r .- x) - sum(abs2, r .- x0)) / (2 * u' * (x-x0))
if 0 < t < ts
ts, result_i = t, i
end
end
return result_i, ts
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 11174 |
# Mutable struct RefineMesh
struct RefineMesh{P <: Point, VDB <: VertexDB{P}, T <: AbstractMesh{P}} <: AbstractMesh{P,VDB}
data::T
affected::Vector{Bool} # or BitVector
int_data::MVector{1, Int64} # A mutable vector of length 1 for integer data
length::Int64 # Field to store the length of the mesh
buffer_sig::Vector{Int64}
# Constructor
function RefineMesh(mesh::AbstractMesh{P}) where P
new{P, ptype(mesh), typeof(mesh)}(mesh, zeros(Bool, length(mesh)), MVector{1, Int64}([0]), length(mesh),Int64[])
end
end
const TrackAffectedMesh = RefineMesh
filter!( condition, mesh::RefineMesh,_depsig=StaticBool{true}(),_depr=StaticBool{true}(); affected = nodes_iterator(mesh) ) = filter!(condition,mesh.data,_depsig,_depr,affected=affected)
#=
# Overloading getproperty and setproperty! for RefineMesh
function Base.getproperty(m::RefineMesh, sym::Symbol)
if sym === :_count
return m.int_data[1]
else
return getfield(m, sym)
end
end
function Base.setproperty!(m::RefineMesh, sym::Symbol, value)
if sym === :_count
m.int_data[1] = value
else
setfield!(m, sym, value)
end
end
=#
@inline Base.getproperty(m::RefineMesh, prop::Symbol) = dyncast_get(m,Val(prop))
@inline @generated dyncast_get(m::RefineMesh, ::Val{:_count}) = :(getfield(m,:int_data)[1])
@inline @generated dyncast_get(m::RefineMesh, ::Val{:boundary_Vertices}) = :(getfield(m,:data).boundary_Vertices)
@inline @generated dyncast_get(m::RefineMesh, d::Val{S}) where S = :( getfield(m, S))
@inline Base.setproperty!(m::RefineMesh, prop::Symbol, val) = dyncast_set(m,Val(prop),val)
@inline @generated dyncast_set(m::RefineMesh, ::Val{:_count},val) = :(getfield(m,:int_data)[1]=val)
@inline @generated dyncast_set(m::RefineMesh, d::Val{S},val) where S = :( setfield(m, S,val))
# Implementing methods for RefineMesh by forwarding to data
PointType(m::RefineMesh) = PointType(m.data)
Base.length(m::RefineMesh) = length(m.data)
dimension(m::RefineMesh) = dimension(m.data)
nodes(m::RefineMesh) = nodes(m.data)
@inline number_of_vertices(m::RM,i::Int64,static::StaticFalse) where RM<:RefineMesh = number_of_vertices(m.data,i,static)
@inline number_of_vertices(m::RM,i::Int64,static::StaticTrue) where RM<:RefineMesh = number_of_vertices(m.data,i,static)
@inline vertices_iterator(m::RefineMesh, i::Int64) = vertices_iterator(m.data, i)
@inline vertices_iterator(m::TM, index::Int64, internal::StaticTrue) where TM<:RefineMesh = vertices_iterator(m.data, index, internal)
@inline get_vertex_index(m::RefineMesh, v::AbstractVector{Int64}) = get_vertex_index(m.data, v)
@inline get_vertex(m::RM,ref::VertexRef) where {RM<:RefineMesh} = get_vertex(m.data,ref)
@inline remove_vertex(m::RefineMesh, ref::VertexRef) = remove_vertex(m.data, ref)
@inline remove_vertex(m::RefineMesh, v::AbstractVector{Int64}) = remove_vertex(m.data, v)
@inline haskey(m::RefineMesh, v::AbstractVector{Int64}, i) = haskey(m.data, v, i)
@inline internal_index(m::TM, index::Int64) where TM<:RefineMesh = internal_index(m.data, index)
@inline external_index(m::TM, index::Int64) where TM<:RefineMesh = external_index(m.data, index)
@inline external_index(m::TM, inds::AVI) where {TM<:RefineMesh, AVI<:AbstractVector{Int64}} = _external_indeces(m.data, inds, m.buffer_sig)
@inline internal_index(m::TM, inds::AVI) where {TM<:RefineMesh, AVI<:AbstractVector{Int64}} = _internal_indeces(m.data, inds, m.buffer_sig)
@inline search_data(m::RefineMesh) = search_data(m.data)
@inline haskey(m::RefineMesh, v::AbstractVector{Int64}) = haskey(m.data,v)
function push!(m::RefineMesh{T, VDB, T1}, vertex::Pair{Vector{Int64}, T}) where {T<:Point, VDB<:HighVoronoi.VertexDB{T}, T1<:AbstractMesh{T, VDB} }
ret = push!(m.data,vertex)
sig, _ = vertex
sort!(sig)
_push_affected!(m,sig)
return ret
end
function _push_affected!(m::RefineMesh{T, VDB, T1}, sig) where {T<:Point, VDB<:HighVoronoi.VertexDB{T}, T1<:AbstractMesh{T, VDB} }
for j in sig
j>m.length && break
if !(m.affected[j]) m._count+=1 end
m.affected[j]=true
end
end
function push!(m::RefineMesh{T, VDB, T1}, vertex::Pair{Vector{Int64}, T},i) where {T<:Point, VDB<:HighVoronoi.VertexDB{T}, T1<:AbstractMesh{T, VDB} }
ret = push!(m.data,vertex,i)
sig, _ = vertex
sig = external_index(m,sig)
_push_affected!(m,sig)
return ret
end
@inline push_ref!(mesh::TM, ref, index) where {T<:Point, TM<:RefineMesh{T}} = push_ref!(mesh.data, ref, index)
#=function push!(m::RefineMesh{T}, vertex::Pair{Vector{Int64},T}, i) where T
sig, _ = vertex
sort!(sig)
for j in sig
j>m.length && break
if !(m.affected[j]) m._count+=1 end
m.affected[j]=true
end
push!(m.data,vertex,i)
end=#
function retrieve(m::RefineMesh, lnxs)
iter = vcat(collect(1:lnxs), zeros(Int64, m._count - lnxs))
m._count = lnxs + 1
for i in (lnxs + 1):m.length
if m.affected[i]
iter[m._count] = i
m._count += 1
end
end
return iter
end
function clean_affected!(mesh::AbstractMesh,nxs,affected; clean_neighbors=StaticBool{false}())
# If a vertex is solely composed of affected nodes, it has to be removed.
# if a vertex contains only one NONaffected vertex, it has to stay.
# Hence the following needs to be iterated only over affected nodes.
tree = SearchTree(nxs)#nodes(mesh))
#mynodes = nodes(mesh)
numberOfNodes = length(mesh)
function my_filter(sig,r)
if first_is_subset(sig,affected,numberOfNodes)
i,dist = nn(tree,r)
return norm(nodes(mesh)[sig[1]]-r)<=(1.0+1.0E-7)*dist
end
return true
end
#open("eliminate.txt", "w") do file
filter!((sig,r)->my_filter(sig,r),mesh,affected=affected)
#end
if clean_neighbors==true
for i in affected
empty!(neighbors[i])
end
end
end
#=function systematic_refine!( Integral::Voronoi_Integral, new_xs::HVNodes, domain=Boundary(); settings=DefaultRaycastSetting, obligatories=Int64[], kwargs...)
#return mesh(Integral)
length(new_xs)!=0 && prepend!(Integral,new_xs)
return systematic_refine!( mesh(Integral),new_xs,domain; obligatories=obligatories, settings=settings, kwargs...)
end=#
"""
systematic_refine!(mesh::AbstractMesh, new_xs::HVNodes; domain=Boundary(), settings=DefaultRaycastSetting, subroutine_offset=0, intro="Refine with ..... points", pdomain=StaticBool{false}(), obligatories=Int64[])
Refines the given `mesh` by incorporating `new_xs` nodes into its structure, ensuring that the refinement process adheres to the constraints specified by `domain`, `settings`, and additional parameters. This function is designed to be called after `new_xs` has been prepended to `mesh` externally, and it focuses on integrating these new points systematically into the mesh's topology.
# Arguments
- `mesh::AbstractMesh`: The mesh to be refined, adhering to the `AbstractMesh` interface.
- `new_xs::HVNodes`: The set of new nodes to be integrated into `mesh`. These nodes are assumed to have been already added to `mesh` before calling this function.
- `domain=Boundary()`: Specifies the domain within which the refinement is to occur. Proper specification of `domain` is crucial to prevent conflicts at the mesh's boundary vertices.
- `settings=DefaultRaycastSetting`: Raycasting settings to be used during the refinement process. These settings dictate how rays are cast within the mesh to facilitate the integration of new nodes.
- `subroutine_offset=0`: An offset for output in subroutine calls
- `intro="Refine with (length(new_xs)) points"`: A descriptive message or introduction that is displayed or logged at the beginning of the refinement process.
- `pdomain=StaticBool{false}()`: print or do not print out domain specifics
- `obligatories=Int64[]`: An array of indices representing obligatory cells over which to iterate Voronoi algorithm
# Usage
This function is intended to be used in scenarios where the mesh needs to be refined by adding a predefined set of nodes (`new_xs`) to it. The caller is responsible for ensuring that these nodes are appropriately added to `mesh` before invoking `systematic_refine!`. The function then systematically integrates these nodes into the mesh's structure, taking into consideration the provided `domain`, raycasting `settings`, and other parameters to ensure a seamless and conflict-free refinement process.
# Notes
- It is assumed that `new_xs` has been properly prepended to `mesh` prior to calling this function. Failure to do so results in incorrect refinement and crashes.
- Proper specification of the `domain` parameter is crucial to avoid conflicts at the boundary vertices of `mesh`.
- The `settings`, `subroutine_offset`, `intro`, `pdomain`, and `obligatories` parameters provide flexibility in tailoring the refinement process to specific requirements or constraints.
# Returns
Internal indices of modified cells
"""
function systematic_refine!( mesh::AbstractMesh, new_xs::HVNodes, domain=Boundary(); settings=NamedTuple(), subroutine_offset=0, intro="Refine with $(length(new_xs)) points: ", pdomain=StaticBool{false}(), obligatories=Int64[])
s_offset = subroutine_offset+sys_refine_offset
iter = obligatories # array to store all old cells that are affected
lxs = length(mesh)
lnxs = length(new_xs)
search_ = RaycastParameter(eltype(eltype(new_xs)),settings)
#println(search_)
#vp_print(subroutine_offset,intro)
s_offset += length(intro)
searcher = Raycast(nodes(mesh),domain=domain,options=search_)
if pdomain==true
vp_print(searcher.domain)
end
#plausible(Integral.MESH,searcher,report_number=0)
if length(new_xs)!=0
ref_mesh = RefineMesh(mesh)
voronoi(ref_mesh,Iter=1:lnxs,searcher=searcher,subroutine_offset=s_offset,intro=intro*"1st Voronoi: ",iteration_reset=true,compact=true)
#return mesh
#println("Total length= $(length(Integrator.Integral)), new points: $lnxs")
println("Identify affected old cells and clear broken vertices ")
#identify all affected cells
iter = retrieve(ref_mesh,lnxs)
#println(iter)
# get a list of all "old" cells that are possibly affected
short_iter = view(iter,lnxs+1:length(iter))
#println("short_iter: $short_iter")
#println(short_iter)
# erase all data that needs to be recalculated
clean_affected!(ref_mesh,new_xs,short_iter)
obligatories .+= lnxs
voronoi(ref_mesh,Iter=obligatories,searcher=searcher,subroutine_offset=s_offset,intro=intro*"2nd Voronoi: ",iteration_reset=true,compact=true)
end
!isempty(obligatories) && voronoi( mesh, Iter=obligatories, searcher=searcher ,subroutine_offset=s_offset,intro=intro*"3rd Voronoi: ",compact=true)
return _internal_indeces(mesh,iter)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 4959 | ###########################################################################################################
## MeshView...
###########################################################################################################
struct MeshView{P<:Point, VDB <: VertexDB{P}, AM<:AbstractMesh{P,VDB}, V<:HVView} <: AbstractMesh{P,VDB}
sigma::Vector{Int64}
data::AM
view::V
int_data::MVector{3, Int64} # Adjusted to 3 for _Cell
# Internal constructor
function MeshView{P, VDB, AM, V}(data::AM, view::V) where {P<:Point, VDB <: VertexDB{P}, AM<:AbstractMesh{P,VDB}, V<:HVView}
new{P, VDB, AM, V}(
Vector{Int64}(), # sigma
data, # data
view, # view
MVector{3, Int64}(zeros(Int64, 3)) # int_data initialized with zeros
)
end
end
# External constructor
function MeshView(data::AM, view::HV) where {P,VDB,AM<:AbstractMesh{P,VDB},HV<:HVView}
MeshView{P, ptype(data), AM, HV}(data, view)
end
#@inline copy_sig(mesh::LM,sig) where {LM<:LockMesh} = _copy_indeces(mesh,sig,mesh.sigma)
const SortedMesh{P<:Point, VDB <: VertexDB{P}, AM<:AbstractMesh{P,VDB}} = MeshView{P, VDB , AM, V} where V<:SortedView
@inline Base.getproperty(cd::MeshView, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::MeshView, ::Val{:length_sigma}) = :(getfield(cd,:int_data)[1])
@inline @generated dyncast_get(cd::MeshView, ::Val{:internal_length_sigma}) = :(getfield(cd,:int_data)[2])
@inline @generated dyncast_get(cd::MeshView, ::Val{:_Cell}) = :(getfield(cd,:int_data)[3])
@inline @generated dyncast_get(cd::MeshView, ::Val{:boundary_Vertices}) = :(getfield(cd,:data).boundary_Vertices)
@inline @generated dyncast_get(cd::MeshView, d::Val{S}) where S = :( getfield(cd, S))
@inline Base.setproperty!(cd::MeshView, prop::Symbol, val) = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::MeshView, ::Val{:length_sigma},val) = :(getfield(cd,:int_data)[1]=val)
@inline @generated dyncast_set(cd::MeshView, ::Val{:internal_length_sigma},val) = :(getfield(cd,:int_data)[2]=val)
@inline @generated dyncast_set(cd::MeshView, ::Val{:_Cell},val) = :(getfield(cd,:int_data)[3]=val)
@inline @generated dyncast_set(cd::MeshView, d::Val{S},val) where S = :( setfield(cd, S,val))
@inline Base.length(mv::MeshView) = length(mv.data)
@inline nodes(mv::MeshView)= NodesView(nodes(mv.data), mv.view)
@inline internal_length(mv::MeshView) = internal_length(mv.data)
@inline internal_index(m::MV,index::Int64) where MV<:MeshView = internal_index(m.data,m.view / index)
@inline external_index(m::MV,index::Int64) where MV<:MeshView = m.view * external_index(m.data,index)
@inline function external_index(m::MV,inds::AVI) where {MV<:MeshView,AVI<:AbstractVector{Int64}}
#a = _copy_indeces(m.data,external_index(m.data,inds),m.sigma)
a = _external_indeces(m.data,inds,m.sigma)
a .= m.view .* a
#for i in 1:length(a)
# a[i] = m.view * a[i]
#end
return a
end
@inline function internal_index(m::MV,inds::AVI) where {MV<:MeshView,AVI<:AbstractVector{Int64}}
a = _copy_indeces(m.data,inds,m.sigma)
a .= m.view ./ a
#for i in 1:length(a)
# a[i] = m.view / a[i]
#end
return internal_index(m.data,a)
end
@inline internal_sig(mesh::MV,sig::AVI,static::StaticTrue) where {MV<:MeshView,AVI<:AbstractVector{Int64}} = sort!(internal_index(mesh,sig))
@inline function internal_sig(mesh::MV,sig::AVI,static::StaticFalse) where {MV<:MeshView,AVI<:AbstractVector{Int64}}
sig .= internal_sig(mesh,sig,statictrue)
return sig
end
@inline external_sig(mesh::MV,sig::AVI,static::StaticTrue) where {MV<:MeshView,AVI<:AbstractVector{Int64}} = sort!(external_index(mesh,sig))
@inline vertices_iterator(m::MV, index::Int64, internal::StaticTrue) where MV<:MeshView = vertices_iterator(m.data,index,statictrue)
@inline all_vertices_iterator(m::MV, index::Int64, internal::StaticTrue) where MV<:MeshView = all_vertices_iterator(m.data,index,statictrue)
@inline number_of_vertices(m::MV, index::Int64, internal::StaticTrue) where MV<:MeshView = number_of_vertices(m.data,index,statictrue)
@inline push!(mesh::MV, p::Pair{Vector{Int64},T},index) where {T<:Point,MV<:MeshView{T}} = push!(mesh.data,p,index)
@inline push_ref!(mesh::MV, ref,index) where {T<:Point,MV<:MeshView{T}} = push_ref!(mesh.data,ref,index)
@inline haskey(mesh::MV,sig::AbstractVector{Int64},index::Int) where MV<:MeshView = haskey(mesh.data,sig,index)
@inline delete_reference(mesh::MV,s,ref) where MV<:MeshView = delete_reference(mesh.data,s,ref)
@inline cleanupfilter!(mesh::MV,i) where MV<:MeshView = cleanupfilter!(mesh.data,i)
@inline mark_delete_vertex!(mesh::MV,sig,i,ii) where MV<:MeshView = mark_delete_vertex!(mesh.data,sig,i,ii)
@inline get_vertex(m::MV,i) where MV<:MeshView = get_vertex(m.data,i)
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 16869 |
struct NeighborFinder{M,VM,T}
dimension::Int64
candidates::Vector{Int64}
broken::Vector{Bool}
verteces::T
transformed::M
local_basis::VM
local_origin::M
cell_center::M
length_verts::Vector{Int64}
each_neighbor_verts::Vector{Vector{Bool}}
sure_neighbors::Vector{Bool}
end
empty_local_Base(dim::Int) = Vector{MVector{dim,Float64}}([MVector{dim}(zeros(Float64,dim)) for _ in 1:dim])
empty_local_Base(x::StaticVector) = [MVector(0*x) for _ in 1:length(x)]
empty_local_Base(vec::AbstractVector{Float64}) = Vector{MVector{length(vec),Float64}}([MVector{length(vec)}(zeros(Float64,length(vec))) for _ in 1:length(vec)])
function NeighborFinder(dim,x::P) where P<:Point
mv = MVector(x)
return NeighborFinder{typeof(mv),Vector{typeof(mv)},Vector{P}}(
dim,[1],[false],
Vector{P}(undef,1),
MVector(0*x),
empty_local_Base(x),
MVector(0*x),
MVector(0*x),
Int64[1],[[true]],[false])
end
@Base.propagate_inbounds function reset(NF::NeighborFinder,neighbors,iterator,li,center)
dim = NF.dimension
ln = length(neighbors)+1
lc = length(NF.candidates)
#li = length(iterator)
lv = length(NF.verteces)
NF.length_verts[1] = li
if lc<ln
resize!(NF.candidates,ln)
resize!(NF.broken,ln)
# resize!(NF.transformed,ln)
resize!(NF.each_neighbor_verts,ln)
for i in (lc+1):ln
# NF.transformed[i] = MVector{dim}(zeros(Float64,dim))
NF.each_neighbor_verts[i] = zeros(Bool,lv)
end
end
if lv<li
resize!(NF.verteces,li)
for i in 1:max(ln,lc)
resize!(NF.each_neighbor_verts[i],li)
end
end
ln -= 1
NF.candidates[1:ln] .= neighbors
NF.candidates[ln+1] = 0
NF.broken[1:ln] .= true
NF.cell_center .= center
for i in 1:max(ln,lc)
NF.each_neighbor_verts[i] .= 0
end
count = 0
for (sig,r) in iterator
count += 1
NF.verteces[count] = r
b = length(sig)<=dim+1
for s in sig
pos = searchsortedlast(neighbors,s)#findfirst(x->x==s,NF.candidates)
if pos!=0 && neighbors[pos]==s #typeof(pos)!=Nothing
#println("$s, $pos")
NF.each_neighbor_verts[pos][count] = true
b && (NF.broken[pos] = false)
end
end
end
NF.length_verts[1] = count
end
@Base.propagate_inbounds function correct_neighbors(nf::NeighborFinder,neigh;xs=nothing,_Cell=0)
number_of_neighbors = findfirst(x->x==0, nf.candidates)-1
base = nf.local_basis
origin = nf.local_origin
number_of_verteces = nf.length_verts[1]
allverteces = nf.verteces
buffer = nf.transformed
dim = nf.dimension
# println(nf.candidates)
for n in 1:number_of_neighbors
(!nf.broken[n]) && continue
# print("$n: ")
active_verteces = nf.each_neighbor_verts[n]
origin .= 0
count = 0
# center of potential interface
for i in 1:number_of_verteces
!active_verteces[i] && continue
origin .+= allverteces[i]
count += 1
end
origin ./= count
# TODO: get candidate for normal vector
#=base[dim] .= xs[_Cell] .- xs[neigh[n]]
base[1] .= rand(dim)
normalize!(base[1])
normalize!(base[dim])
base[1] .-= dot(base[1],base[dim]) .* base[dim]
normalize!(base[1])
base[dim] .+= base[1]=#
base[dim] .= rand(dim)
normalize!(base[dim])
# 1st is longest distance ray:
max_dist = 0.0
max_ind = 0
for i in 1:number_of_verteces
!active_verteces[i] && continue
base[2] .= allverteces[i] .- origin
dist = norm(base[2])
if dist > max_dist
max_dist = dist
base[1] .= base[2]
max_ind = i
end
end
if max_ind==0
nf.broken[n]=true
continue
end
active_verteces[max_ind] = false
base[1] ./= max_dist
rotate(base,1,dim)
rotate(base,1,dim)
nf.broken[n] = false
for k in 2:(dim-1)
max_angle = 0.0
max_ind = 0
for i in 1:number_of_verteces
!active_verteces[i] && continue
buffer .= origin .- allverteces[i]
normalize!(buffer)
angle = abs(dot(buffer,base[dim]))
if angle>max_angle && angle>1.0E-8
max_angle = angle
max_ind = i
base[k] .= buffer
end
end
# println(" $k: $max_angle, $max_ind, ")#$(base[k]) --> $(base[dim])")
if max_angle>1.0E-8
rotate(base,k,dim)
rotate(base,k,dim)
else
nf.broken[n] = true
break
end
end
end
return deleteat!(neigh,view(nf.broken,1:length(neigh)))
end
####################################################################################################################
####################################################################################################################
####################################################################################################################
## Neighbor Search....
####################################################################################################################
####################################################################################################################
####################################################################################################################
global NeighborFinders = Vector{Any}(undef,5)
function _NeighborFinder(dim)
lnf=length(HighVoronoi.NeighborFinders)
dim>lnf && resize!(HighVoronoi.NeighborFinders,dim)
if !isassigned(HighVoronoi.NeighborFinders,dim)
HighVoronoi.NeighborFinders[dim] = NeighborFinder(dim,VoronoiNode(zeros(Float64,dim)))
end
#return reinterpret(DimNeighborFinder{S},HighVoronoi.NeighborFinders[dim])
return HighVoronoi.NeighborFinders[dim]
end
"""
neighbors_of_cell(_Cell,mesh,condition = r->true)
This function takes the verteces of a cell (calculated e.g. by systematic_voronoi) and returns
an array containing the index numbers of all neighbors. A `neighbor` here is a cell that shares a full interface.
any lower dimensional edge/vertex is not sufficient as a criterion. This is equivalent with `_Cell` and
`neighbor` sharing at least `dimension` different verteces.
'condition' can be any condition on the coordinates of a vertex
"""
@inline function neighbors_of_cell(_Cells,mesh::AbstractMesh,condition = r->true; adjacents=false, extended_xs::Points = nodes(mesh), edgeiterator = nothing, neighbors = zeros(Int64,10))
return neighbors_of_cell_new(_Cells,mesh,condition,adjacents=adjacents,extended_xs=extended_xs,edgeiterator=edgeiterator,neighbors=neighbors)
end
function neighbors_of_cell_new(_Cells,mesh,condition = r->true; adjacents=true, extended_xs = nodes(mesh), edgeiterator = nothing, neighbors = zeros(Int64,10))
if neighbors[1]==0
position = 1
__max = 10
dim = dimension(mesh)
adjacents = adjacents || length(_Cells)>1
for _Cell in _Cells
for (sigma,r) in vertices_iterator(mesh,_Cell)
#ls_dim = length(sigma)<=dim+1
for i in sigma
if i!=_Cell && (condition(r))
#f = searchsortedlast(neighbors,i)#findfirstassured(i,neighbors)
f = findfirstassured(i,neighbors)
if f==0
f = position
neighbors[position] = i
position += 1
if position>__max
__max += 10
append!(neighbors,zeros(Int64,10))
end
end
end
end
end
end
for i in position:__max
neighbors[i] = typemax(Int64)
end
sort!(neighbors)
resize!(neighbors, findfirst(x->(x>typemax(Int64)-1),neighbors)-1)
end
adjacents && (return neighbors)
_Cell = _Cells
nf = typeof(edgeiterator)!=Nothing ? edgeiterator : _NeighborFinder(dim)
reset(nf,neighbors,vertices_iterator(mesh,_Cell),number_of_vertices(mesh,_Cell),extended_xs[_Cell])
correct_neighbors(nf,neighbors,xs=extended_xs,_Cell=_Cell)
return neighbors
end
####################################################################################################################
####################################################################################################################
####################################################################################################################
## IterativeDimensionChecker
####################################################################################################################
####################################################################################################################
####################################################################################################################
struct IterativeDimensionChecker{S}
dimension::Int64
local_basis::Vector{MVector{S,Float64}}
neighbors::Vector{Int64}
local_cone::Vector{MVector{S,Float64}}
valid_neighbors::Vector{Vector{Bool}}
current_path::Vector{Int64}
local_neighbors::Vector{Int64}
trivial::Vector{Bool}
random::MVector{S,Float64}
buffer::MVector{S,Float64}
edge_iterator::FastEdgeIterator{Vector{DimFEIData{S,Float64}},Float64}
edge_buffer::Vector{Int64}
end
function IterativeDimensionChecker(dim::Int)
return IterativeDimensionChecker{dim}(dim,empty_local_Base(dim),zeros(Int64,dim),empty_local_Base(dim),map!(k->zeros(Bool,dim),Vector{Vector{Bool}}(undef,dim),1:dim),
Vector{Int64}(zeros(Int64,dim)),Vector{Int64}(zeros(Int64,dim)),[true],MVector{dim}(rand(dim)),MVector{dim}(zeros(Float64,dim)),
FastEdgeIterator(zeros(SVector{dim,Float64})),zeros(Int64,dim))
end
function IterativeDimensionChecker(m::AM) where {P,AM<:AbstractMesh{P}}
dim = size(P)[1]
return IterativeDimensionChecker{size(P)[1]}(dim,empty_local_Base(dim),zeros(Int64,dim),empty_local_Base(dim),map!(k->zeros(Bool,dim),Vector{Vector{Bool}}(undef,dim),1:dim),
Vector{Int64}(zeros(Int64,dim)),Vector{Int64}(zeros(Int64,dim)),[true],MVector{dim}(rand(dim)),MVector{dim}(zeros(Float64,dim)),
FastEdgeIterator(zeros(P)),zeros(Int64,dim))
end
@Base.propagate_inbounds function reset(idc::IterativeDimensionChecker, neighbors,xs,_Cell,verteces,anyway=true)
idc.trivial[1] = true
length(xs[1])==2 && return 3
dim = idc.dimension
mlsig = dim+1
for (sig,r) in verteces
lsig = length(sig)
if lsig>dim+1
mlsig = max(mlsig,lsig)
idc.trivial[1] = false
end
end
idc.trivial[1] &= anyway
if idc.trivial[1]
return dim+1
end
#idc.trivial[1] = false
ln = length(neighbors)
lidc = length(idc.neighbors)
if ln>lidc
resize!(idc.neighbors,ln)
resize!(idc.local_cone,ln)
for i in (lidc+1):ln
idc.local_cone[i] = MVector{dim}(zeros(Float64,dim))
end
for i in 1:dim
resize!(idc.valid_neighbors[i],ln)
end
lidc = ln
end
view(idc.neighbors,1:ln) .= neighbors
view(idc.neighbors,(ln+1):lidc) .= 0
for i in 1:ln
idc.local_cone[i] .= xs[neighbors[i]] .- xs[_Cell]
normalize!(idc.local_cone[i])
end
idc.valid_neighbors[1][1:ln] .= 1
idc.valid_neighbors[2][1:ln] .= 1
return mlsig
end
function set_dimension(idc::IterativeDimensionChecker,entry,_Cell,neighbor)
neighbor==_Cell && (return false)
idc.trivial[1] && (return true)
index = findfirst(x->x==neighbor,idc.neighbors)
if entry==1
idc.local_basis[1] .= idc.local_cone[index]
else
if !idc.valid_neighbors[entry][index]
return false
end
if entry<idc.dimension
idc.valid_neighbors[entry+1] .= idc.valid_neighbors[entry]
end
idc.local_basis[entry] .= idc.local_cone[index]
for _ in 1:2
for i in 1:(entry-1)
idc.local_basis[entry] .-= dot(idc.local_basis[entry],idc.local_basis[i]) .* idc.local_basis[i]
end
end
value = norm(idc.local_basis[entry])
if value<1.0E-5
return false
end
idc.local_basis[entry] ./= value
end
idc.current_path[entry] = neighbor
return true
end
function get_sup_edge(dc::IterativeDimensionChecker,edges,xs)
sig,pe = pop!(edges)
r = pe.r1
r2 = pe.r2
val = pe.value
nu = r2-r
#nu = normalize(nu)
#bb = length(edges)>0
#tree = bb ? KDTree(xs) : 1
#dim =length(xs[1])
#=if bb
println(sig)
ir = sort!(inrange(tree,pe.r1,norm(xs[1]-pe.r1)*(1.0+1.0E-7)))
println(" ",round.(pe.r1,digits=5),", ",ir)
print(" ")
for it in ir
print("$it: $(norm(xs[it]-pe.r1)), ")
end
println()
ir = sort!(inrange(tree,pe.r2,norm(xs[1]-pe.r2)*(1.0+1.0E-7)))
println(" ",round.(pe.r2,digits=5),", ",ir)
print(" ")
for it in ir
print("$it: $(norm(xs[it]-pe.r2)), ")
end
ir1 =ir
println()
u = rand(length(xs[1]))
dc.local_basis[dim] .= u
for i in 1:(dim-1)
dc.local_basis[dim] .-= dot(dc.local_basis[dim],dc.local_basis[i]) .* dc.local_basis[i]
end
normalize!(dc.local_basis[dim])
println(" - $(dot(r-r2,nu)/norm(r-r2)) - ")
for k in sig
println("$(round(dot((xs[k]-xs[sig[1]]),nu)))")
end
append!(sig,ir1)
append!(sig,ir)
end=#
while length(edges)>0
sig2,pe = pop!(edges)
#=println(sig2)#,round.(pe.r1,digits=5),round.(pe.r2,digits=5))
ir = sort!(inrange(tree,pe.r1,norm(xs[1]-pe.r1)*(1.0+1.0E-7)))
println(" ",round.(pe.r1,digits=5),", ",ir)
print(" ")
for it in ir
print("$it: $(norm(xs[it]-pe.r1)), ")
end
println()
append!(sig,ir)
ir = sort!(inrange(tree,pe.r2,norm(xs[1]-pe.r2)*(1.0+1.0E-7)))
println(" ",round.(pe.r2,digits=5),", ",ir)
print(" ")
for it in ir
print("$it: $(norm(xs[it]-pe.r2)), ")
end
println()
append!(sig,ir)
append!(sig,sig2)=#
#=if pe.r1!=pe.r2
differ = pe.r1-pe.r2
ndiffer = norm(differ)
if abs(abs(dot(differ,nu))-ndiffer)>ndiffer*1.0E-4
print("+")
end
end=#
rr = pe.r1
if dot(rr-r,nu)<0
r=rr
elseif dot(rr-r2,nu)>0
r2 = rr
end
rr = pe.r2
if dot(rr-r,nu)<0
r=rr
elseif dot(rr-r2,nu)>0
r2 = rr
end
#=differ = r-r2
differ = differ - nu * dot(nu,differ)
ndiffer = norm(differ)
if ndiffer>1.0E-4
print("+")
end=#
#=print(" ")
for k in sig2
print("$(round(dot((xs[k]-xs[sig2[1]]),nu))) ")
end
println()=#
end
#=if bb
println(sort!(unique!(sig)))
for k in sig
println("$k: $(round.(xs[k],digits=5)) ")
end
error("")
end=#
return r,r2,val
end
#=
function joint_neighbors(idc::IterativeDimensionChecker,sig,sig2)
count = 0
lln = length(idc.local_neighbors)
for s in sig
if s in sig2 && s in idc.neighbors
count += 1
if count>lln
lln += idc.dimension
resize!(idc.local_neighbors,lln)
end
idc.local_neighbors[count] = s
end
end
return view(idc.local_neighbors,1:count)
end
=# | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 10473 |
#const HighVoronoiNodes{S} = AbstractVector{AbstractVector{S}} where S<:Real
StaticArrays.MVector(v::Vector{D}) where {D<:Real} = MVector{length(v),D}(v)
const HVNodes{P} = AbstractVector{P} where P<:Point
SearchTree(nodes::HVN,type=HVKDTree) where HVN<:HVNodes = error("SearchTree needs to be implemented for $(typeof(nodes))")
Base.eltype(::Type{<:HVNodes{P}}) where {P<:Point} = P
@inline references(n::HV) where HV<:HVNodes = Int64[]
@inline reference_shifts(n::HV) where HV<:HVNodes = BitVector[]
@inline dimension(m::HVNodes{P}) where P = size(P)[1]
function copynodes(nodes::HVN) where {P<:Point,HVN<:HVNodes{P}}
l = length(nodes)
result = Vector{P}(undef,l)
@inbounds for i in 1:l
result[i] = nodes[i]
end
return result
end
abstract type AbstractCombinedNodes{P<:Point} <: AbstractVector{P} end
#abstract type AbstractCombinedNodes{P} <: AbstractVector{P} where P <: Point end
#Base.eltype(nodes::PrependedNodes) = eltype(nodes.first)
SearchTree(nodes::ACN) where ACN<:AbstractCombinedNodes = KDTree(nodes)
@inline function Base.setindex!(nodes::AbstractCombinedNodes, value, i)
if i <= nodes.length1
error("PrependedNodes currently not meant to be modified inside domain.")
nodes.first[i] = value
else
nodes.second[i - nodes.length1] = value
end
end
@inline function Base.getindex(nodes::AbstractCombinedNodes, i::Int)
if i <= nodes.length1
return nodes.first[i]
else
return nodes.second[i - nodes.length1]
end
end
function Base.getindex(nodes::AbstractCombinedNodes, i_vec::AbstractVector{Int})
_result = Vector{eltype(nodes)}(undef, length(i_vec))
for i in 1:length(i_vec)
_result[i] = nodes[i_vec[i]]
end
return _result
end
# Iterate over elements
function Base.iterate(nodes::AbstractCombinedNodes, state=1)
if state <= length(nodes)
return getindex(nodes, state), state + 1
else
return nothing
end
end
SearchTree(nodes::Vector{<:Point},type=HVKDTree()) = HVTree(nodes,type)
#####################################################################################################################
## DoubleVector
#####################################################################################################################
#=
# Definition der Struktur DoubleVector
struct DoubleVector{V, AV1<:AbstractVector{V}, AV2<:AbstractVector{V}} <: HVNodes{V}
d1::AV1
d2::AV2
l1::Int64
l2::Int64
# Konstruktor, der d1 und d2 als Argumente nimmt und l1 und l2 setzt
function DoubleVector(d1::AV1, d2::AV2) where {V, AV1<:AbstractVector{V}, AV2<:AbstractVector{V}}
new{V, AV1, AV2}(d1, d2, length(d1), length(d2))
end
end
# Implementierung der size() Methode
Base.size(v::DoubleVector) = (length(v),)
# Implementierung der length() Methode
Base.length(v::DoubleVector) = v.l1 + v.l2
# Implementierung der getindex Methode
function Base.getindex(v::DoubleVector, i::Int)
return i <= v.l1 ? v.d1[i] : v.d2[i - v.l1]
end
# Implementierung der setindex Methode
function Base.setindex!(v::DoubleVector, value, i::Int)
if i <= v.l1
v.d1[i] = value
else
v.d2[i - v.l1] = value
end
end
=#
#####################################################################################################################
## NodesContainer{S}
#####################################################################################################################
#=
struct NodesContainer{S,P<:Point{S},N<:HVNodes{P}} <: HVNodes{P} #AbstractVector{AbstractVector{S}}
data::N
function NodesContainer(d)
return new{eltype(eltype(d)),eltype(d),typeof(d)}(d)
end
end
Base.size(nc::NodesContainer) = size(nc.data)
Base.length(nc::NodesContainer) = length(nc.data)
Base.getindex(nc::NodesContainer, idx) = getindex(nc.data, idx)
Base.setindex!(nc::NodesContainer, val, idx) = setindex!(nc.data, val, idx)
#Base.eltype(nc::NodesContainer) = eltype(nc.data)
Base.iterate(nc::NodesContainer, state...) = iterate(nc.data, state...)
SearchTree(nodes::NodesContainer,type=HVKDTree) = SearchTree(nodes.data)
=#
#####################################################################################################################
## SortedNodes
#####################################################################################################################
#=
struct SortedNodes{S,P<:Point{S},T<:HVNodes{P}} <: HVNodes{P} #AbstractVector{AbstractVector{S}}
data::T
indices::Vector{Int64}
function SortedNodes(d)
new{eltype(eltype(d)),eltype(d),typeof(d)}(d, collect(1:length(d)))
end
end
Base.size(nc::SortedNodes) = size(nc.data)
Base.length(nc::SortedNodes) = length(nc.data)
Base.getindex(nc::SortedNodes, idx) = getindex(nc.data, nc.indices[idx])
Base.setindex!(nc::SortedNodes, val, idx) = setindex!(nc.data, val, nc.indices[idx])
#Base.eltype(nc::SortedNodes) = eltype(nc.data)
Base.iterate(nc::SortedNodes, state=1) = state<=length(nc.data) ? (nc.data[nc.indices[state]], state+1) : nothing
SearchTree(nodes::SortedNodes) = error("SearchTree(SortedNodes) needing proper implementation")
=#
#####################################################################################################################
## generate_CombinedNodes_struct
#####################################################################################################################
#=
macro generate_CombinedNodes_struct(struct_name)
return quote
struct $(esc(struct_name)){S, P<:Point{S}, T<:HVNodes{P}, TT<:HVNodes{P}} <: AbstractCombinedNodes{P}
first::T
second::TT
length1::Int64
length2::Int64
length::Int64
function $(esc(struct_name))(d1, d2)
return new{eltype(eltype(d1)),eltype(d1), typeof(d1), typeof(d2)}(d1, d2, length(d1), length(d2), length(d1) + length(d2))
end
function $(esc(struct_name))(CNS::AbstractCombinedNodes)
return $(esc(struct_name))(CNS.first, CNS.second)
end
end
Base.size(nodes::$(esc(struct_name))) = (nodes.length,)
Base.length(nodes::$(esc(struct_name))) = nodes.length
end
end
@generate_CombinedNodes_struct PrependedNodes
@generate_CombinedNodes_struct BoundaryNodes
@generate_CombinedNodes_struct RefinedNodes
=#
#####################################################################################################################
## NodesView
#####################################################################################################################
struct NodesView{P<:Point, HVN<:HVNodes{P},HVV<:HVView} <: HVNodes{P}
data::HVN
view::HVV
NodesView{P, HVN}(data::HVN, view::HVV) where {P<:Point, HVN<:HVNodes{P},HVV<:HVView} = new{P, HVN,HVV}(data, view)
end
NodesView(data::HVN, view::HVV) where {P<:Point, HVN<:HVNodes{P},HVV<:HVView} = NodesView{P, typeof(data)}(data, view)
@inline length(data::NV) where NV<:NodesView =length(data.data)
@inline Base.size(data::NV) where NV<:NodesView = (length(data.data),)
@inline Base.getindex(nv::NV, i::Int) where NV<:NodesView = getindex(nv.data, nv.view / i)
@inline Base.setindex!(nv::NV, value, i::Int) where NV<:NodesView = setindex!(nv.data, value, nv.view / i)
@inline Base.iterate(nv::NV, state=1) where NV<:NodesView = state > length(nv.data) ? nothing : (getindex(nv.data, nv.view / state), state + 1)
#=
struct NodesViewTree{P <: Point,T<:AbstractTree{P},NV<:NodesView{P}} <: AbstractTree{P}
nodes::NV
tree::T
function NodesViewTree(n::NV,type) where {P<:Point,NV<:NodesView{P}}
t = SearchTree(n.data,type)
new{P,typeof(t),NV}(n,t)
end
end
@inline Base.getproperty(cd::NodesViewTree, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::NodesViewTree, ::Val{:nodes}) = :(getfield(cd,:nodes))
@inline @generated dyncast_get(cd::NodesViewTree, ::Val{:tree}) = :(getfield(cd,:tree))
@inline @generated dyncast_get(cd::NodesViewTree, d::Val{S}) where S = :( getfield(cd.tree, S))
@inline search_vertex(tree::NodesViewTree,r,idx,dist,data) = search_vertex(tree.tree,r,idx,dist,data)
#@inline SearchTree(n::NV,type) where {NV<:NodesView} = NodesViewTree(n,type)
@inline SearchTree(n::NV,type) where {NV<:NodesView} = NodesViewTree(n,type)
@inline nodes(tree::NodesViewTree) = tree.nodes
@inline function nn(tree::NodesViewTree,x,skip=(y->false))
tnv = tree.nodes.view
idx , dists = nn(tree.tree,x,y->skip(tnv*y))
b=length(idx)>0
return tnv*idx, dists
# return knn(tree.tree2,x,1,false,skip)[1][1],knn(tree.tree2,x,1,false,skip)[2][1]
return b ? (tree.nodes.view*idx[1], dists[1]) : (0,Inf64)
end
@inline function knn(tree::NodesViewTree,x,i,b,skip=(y->false))
tnv = tree.nodes.view
ids,dists = knn(tree.tree,x,i,b,y->skip(tnv*y))
tnv*(ids,ids)
# return knn(tree.tree2,x,i,b,skip)
return ids,dists
end
@inline function inrange(tree::NodesViewTree,x,r)
tnv = tree.nodes.view
ids = inrange(tree.tree,x,r)
tnv*(ids,ids)
# return inrange(tree.tree2,x,r)
return ids
end
=#
SearchTree(nodes::ACN) where ACN<:NodesView = KDTree(nodes)
SearchTree(nodes::ACN,type=HVKDTree) where {P<:Point,S,ACN<:SubArray{P,S,NodesView{P}}} = KDTree(nodes)
#####################################################################################################################
## ReflectedNodes
#####################################################################################################################
struct ReflectedNodes{P<:Point} <: HVNodes{P}
data::Vector{P}
references::Vector{Int64}
reference_shifts::Vector{BitVector}
end
@inline Base.getindex(rn::ReflectedNodes, i) = getindex(rn.data, i)
@inline Base.setindex!(rn::ReflectedNodes, v, i) = setindex!(rn.data, v, i)
@inline Base.iterate(rn::ReflectedNodes, state...) = iterate(rn.data, state...)
@inline Base.length(rn::ReflectedNodes) = length(rn.data)
@inline Base.size(rn::ReflectedNodes) = size(rn.data)
@inline references(n::HV) where HV<:ReflectedNodes = n.references
@inline reference_shifts(n::HV) where HV<:ReflectedNodes = n.reference_shifts
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 5079 |
struct ThreadsafeVector{T, AV<:AbstractVector{T},RWL<:Union{ReadWriteLock,Nothing}} <: AbstractVector{T}
data::AV
lock::RWL
end
ThreadsafeVector(::Nothing,::RWL) where {RWL<:Union{ReadWriteLock,Nothing}} = nothing
@inline function Base.size(tv::ThreadsafeVector)
readlock(tv.lock)
s = size(tv.data)
readunlock(tv.lock)
return s
end
@inline function Base.length(tv::ThreadsafeVector)
readlock(tv.lock)
l = length(tv.data)
readunlock(tv.lock)
return l
end
@inline function Base.setindex!(tv::ThreadsafeVector, value, idx::Int)
writelock(tv.lock)
tv.data[idx] = value
writeunlock(tv.lock)
end
@inline function Base.getindex(tv::ThreadsafeVector, idx::Int)
readlock(tv.lock)
d = tv.data[idx]
readunlock(tv.lock)
return d
end
@inline function Base.in(value, tv::ThreadsafeVector)
readlock(tv.lock)
b = value in tv.data
readunlock(tv.lock)
return b
end
function Base.iterate(tv::ThreadsafeVector, state...)
readlock(tv.lock)
ii = iterate(tv.data, state...)
readunlock(tv.lock)
return ii
end
struct IntegralLocks{RWL}
neighbors::RWL
volume::RWL
area::RWL
bulk_integral::RWL
interface_integral::RWL
sync::SyncLock
IntegralLocks{ReadWriteLock}() = new{ReadWriteLock}(ReadWriteLock(),ReadWriteLock(),ReadWriteLock(),ReadWriteLock(),ReadWriteLock(),SyncLock())
IntegralLocks{Nothing}() = new{Nothing}(nothing,nothing,nothing,nothing,nothing,SyncLock())
end
struct ThreadsafeIntegral{P<:Point, HVI<:HVIntegral{P}, AM<:AbstractMesh{P},IL<:IntegralLocks} <: HVIntegral{P}
data::HVI
mesh::AM
locks::IL
end
function ThreadsafeIntegral(d::HVI, locks::IntegralLocks) where {P<:Point, HVI<:HVIntegral{P}}
ThreadsafeIntegral( d,ThreadMesh(mesh(d)),locks)
end
synchronizer(ti::TI) where {TI<:ThreadsafeIntegral} = ti.locks.sync
synchronizer(::HI) where {HI<:HVIntegral} = nothing
mesh(iv::ThreadsafeIntegral) = iv.mesh #MeshView(mesh(iv.data), iv.view)
@inline _has_cell_data(I::ThreadsafeIntegral,_Cell) = _has_cell_data(I.data,_Cell)
cell_data_writable(I::ThreadsafeIntegral,_Cell,vec,vecvec,::StaticFalse;get_integrals=statictrue) = begin
cdw = cell_data_writable(I.data,_Cell,vec,vecvec,staticfalse,get_integrals=get_integrals)
return (volumes = ThreadsafeVector(cdw.volumes,I.locks.volume), area = ThreadsafeVector(cdw.area,I.locks.area), bulk_integral = ThreadsafeVector(cdw.bulk_integral,I.locks.bulk_integral), interface_integral = ThreadsafeVector(cdw.interface_integral,I.locks.interface_integral), neighbors = cdw.neighbors)
end
@inline function get_neighbors(I::ThreadsafeIntegral,_Cell,::StaticFalse)
readlock(I.locks.neighbors)
readlock(I.locks.volume)
readlock(I.locks.area)
readlock(I.locks.bulk_integral)
readlock(I.locks.interface_integral)
gn = get_neighbors(I.data,_Cell,staticfalse)
readunlock(I.locks.neighbors)
readunlock(I.locks.volume)
readunlock(I.locks.area)
readunlock(I.locks.bulk_integral)
readunlock(I.locks.interface_integral)
return gn
end
@inline function set_neighbors(I::ThreadsafeIntegral,_Cell,new_neighbors,proto_bulk,proto_interface,::StaticFalse)
writelock(I.locks.neighbors)
writelock(I.locks.volume)
writelock(I.locks.area)
writelock(I.locks.bulk_integral)
writelock(I.locks.interface_integral)
set_neighbors(I.data,_Cell,new_neighbors,proto_bulk,proto_interface,staticfalse)
writeunlock(I.locks.neighbors)
writeunlock(I.locks.volume)
writeunlock(I.locks.area)
writeunlock(I.locks.bulk_integral)
writeunlock(I.locks.interface_integral)
end
@inline enable(iv::IV;kwargs...) where IV<:ThreadsafeIntegral = enable(iv.data;kwargs...)
@inline enabled_volumes(Integral::ThreadsafeIntegral) = enabled_volumes(Integral.data)
@inline enabled_area(Integral::ThreadsafeIntegral) = enabled_area(Integral.data)
@inline enabled_bulk(Integral::ThreadsafeIntegral) = enabled_bulk(Integral.data)
@inline enabled_interface(Integral::ThreadsafeIntegral) = enabled_interface(Integral.data)
@inline get_area(iv::ThreadsafeIntegral,c,n,::StaticTrue) = begin
readlock(iv.locks.area)
a = get_area(iv.data,c,n,statictrue)
readunlock(iv.locks.area)
return a
end
@inline get_integral(iv::ThreadsafeIntegral,c,n,::StaticTrue) = begin
readlock(iv.locks.interface_integral)
ii = get_integral(iv.data,c,n,statictrue)
readunlock(iv.locks.interface_integral)
return ii
end
@inline function Parallel_Integrals(integrals,::III) where {III<:Union{Call_FAST_POLYGON,Call_GEO,Call_HEURISTIC,Call_POLYGON}}
locks = IntegralLocks{Nothing}()
return map(i->ThreadsafeIntegral(i,locks),integrals)
end
@inline function Parallel_Integrals(integrals,::III) where {III<:Union{Call_HEURISTIC_MC,Call_MC}}
locks = IntegralLocks{ReadWriteLock}()
return map(i->ThreadsafeIntegral(i,locks),integrals)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 2382 | ###########################################################################################################################################################
###########################################################################################################################################################
## Locks for parallel computation
###########################################################################################################################################################
###########################################################################################################################################################
struct HasKeyLock
lock::BusyFIFOLock
hashes::Vector{HashedQueue}
blocked::BitVector
function HasKeyLock(nthreads::Int64)
lock = BusyFIFOLock()
hashes = Vector{HashedQueue}(undef, nthreads)
blocked = falses(nthreads) # Initialisiert mit `false` für alle Threads
return new(lock, hashes, blocked)
end
end
@inline Base.lock(rwl::HasKeyLock) = lock(rwl.lock)
@inline Base.unlock(rwl::HasKeyLock) = unlock(rwl.lock)
@inline blocked(rwl::HasKeyLock, i::Int)::Bool = rwl.blocked[i]
@inline function block(rwl::HasKeyLock, key)::Bool
i = Threads.threadid()
value = fnv1a_hash(key, UInt128)
index2 = fnv1a_hash(key, UInt64)
hq = HashedQueue(value, index2)
exists = false
rwl.blocked[i] = false
lock(rwl.lock)
for j in 1:length(rwl.blocked)
j == i && continue
exists |= (hq == rwl.hashes[j])
end
if !exists
rwl.blocked[i] = true
rwl.hashes[i] = hq
end
unlock(rwl.lock)
return !exists
end
struct ParallelLocks
#general::BusyFIFOLock
rwl::ReadWriteLock
hkl::HasKeyLock
function ParallelLocks(nthreads::Int64)
#general = BusyFIFOLock()
rwl = ReadWriteLock()
hkl = HasKeyLock(nthreads)
return new( rwl, hkl)
end
end
#@inline Base.lock(pl::ParallelLocks) = lock(pl.general)
#@inline Base.unlock(pl::ParallelLocks) = unlock(pl.general)
@inline readlock(pl::ParallelLocks) = readlock(pl.rwl)
@inline writelock(pl::ParallelLocks) = writelock(pl.rwl)
@inline readunlock(pl::ParallelLocks) = readunlock(pl.rwl)
@inline writeunlock(pl::ParallelLocks) = writeunlock(pl.rwl) | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 13030 | ##############################################################################################################################
##############################################################################################################################
## Split Threads properly...
##############################################################################################################################
##############################################################################################################################
function create_multithreads(threading::MultiThread,singular=false)
# Anzahl der verfügbaren Threads
NThreads = Threads.nthreads()
# Bestimme die Anzahl der zu erstellenden MultiThread Objekte
node_threads = min(NThreads, threading.node_threads)
# Berechne die nahezu gleichmäßige Verteilung von sub_threads
max_sub_threads = singular ? 1 : threading.sub_threads
ideal_sub_threads = min(max_sub_threads, cld(NThreads, node_threads))
# Erstelle die MultiThread Objekte
threads_array = [MultiThread(1, ideal_sub_threads) for _ in 1:node_threads]
# Korrigiere eventuelle Überschreitung der Gesamtzahl der Threads
total_sub_threads = sum(getfield(t, :sub_threads) for t in threads_array)
# Iteriere rückwärts über das Array, um die sub_threads zu reduzieren
while total_sub_threads > NThreads
for i in length(threads_array):-1:1
if threads_array[i].sub_threads > 1
threads_array[i] = MultiThread(1, threads_array[i].sub_threads - 1)
total_sub_threads -= 1
if total_sub_threads <= NThreads
break
end
end
end
end
return threads_array
end
function speedup(i)
i==1 && return 1.0
i==2 && return 0.9
i==3 && return 0.8
i==3 && return 0.75
i==4 && return 0.7
i==5 && return 0.75
return 1.0
end
function partition_indices(todo_length::Int, mt::Vector{MultiThread})
# Berechnung des Gesamtgewichts
weights = [speedup(t.sub_threads) for t in mt]
total_weight = sum(weights)
# Berechne den Anteil jedes Threads anhand des Gewichts
parts = [Int(round(todo_length * weight / total_weight)) for weight in weights]
# Anpassung der letzten Teile, um die volle Länge zu gewährleisten
sum_parts = sum(parts)
while sum_parts < todo_length
parts[end] += 1
sum_parts += 1
end
i = 0
ll = length(parts)
while sum_parts > todo_length
parts[ll-i] -= 1
i += 1
sum_parts -= 1
i>=ll && (i=0)
end
# Berechne die Anfangs- und Endindizes für jeden Teil
start_indices = [1]
end_indices = Int64[]
for part_len in parts
push!(end_indices, start_indices[end] + part_len - 1)
push!(start_indices, end_indices[end] + 1)
end
# Entfernen des letzten, zusätzlichen Startindex
pop!(start_indices)
return start_indices, end_indices
end
##############################################################################################################################
##############################################################################################################################
struct ThreadMesh{P <: Point, VDB <: VertexDB{P}, T <: AbstractMesh{P,VDB}} <: AbstractMesh{P,VDB}
mesh::T
buffer_sig::Vector{Int64}
nthreads::Int64
function ThreadMesh(mesh::T, nthreads::Int64) where {P <: Point, VDB <: VertexDB{P}, T <: AbstractMesh{P,VDB}}
new{P,VDB,T}(mesh, Int64[], nthreads)
end
function ThreadMesh(mesh::T) where {P <: Point, VDB <: VertexDB{P}, T <: AbstractMesh{P,VDB}}
ThreadMesh(mesh,0)
end
end
meshthreads(m::AM) where AM<:AbstractMesh = Threads.nthreads()
meshthreads(m::TM) where TM<:ThreadMesh = m.nthreads
@inline Base.getproperty(cd::TM, prop::Symbol) where {TM<:ThreadMesh} = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::TM, ::Val{:boundary_Vertices}) where {TM<:ThreadMesh} = :(getfield(cd,:mesh).boundary_Vertices)
@inline @generated dyncast_get(cd::TM, ::Val{S}) where {TM<:ThreadMesh,S} = :( getfield(cd, S))
@inline length(m::TM) where TM<:ThreadMesh = length(m.mesh)
@inline internal_index(m::TM, index::Int64) where TM<:ThreadMesh = internal_index(m.mesh, index)
@inline external_index(m::TM, index::Int64) where TM<:ThreadMesh = external_index(m.mesh, index)
@inline external_index(m::TM, inds::AVI) where {TM<:ThreadMesh, AVI<:AbstractVector{Int64}} = _external_indeces(m.mesh, inds, m.buffer_sig)
@inline internal_index(m::TM, inds::AVI) where {TM<:ThreadMesh, AVI<:AbstractVector{Int64}} = _internal_indeces(m.mesh, inds, m.buffer_sig)
@inline internal_sig(mesh::TM, sig::AVI, static::StaticTrue) where {TM<:ThreadMesh, AVI<:AbstractVector{Int64}} = sort!(_internal_indeces(mesh.mesh, sig, mesh.buffer_sig))
@inline function internal_sig(mesh::TM, sig::AVI, static::StaticFalse) where {TM<:ThreadMesh, AVI<:AbstractVector{Int64}}
sig .= _internal_indeces(mesh.mesh, sig, mesh.buffer_sig)
return sort!(sig)
end
@inline external_sig(mesh::TM, sig::AVI, static::StaticTrue) where {TM<:ThreadMesh, AVI<:AbstractVector{Int64}} = sort!(_external_indeces(mesh.mesh, sig, mesh.buffer_sig))
@inline function external_sig(mesh::TM, sig::AVI, static::StaticFalse) where {TM<:ThreadMesh, AVI<:AbstractVector{Int64}}
sig .= _external_indeces(mesh.mesh, sig, mesh.buffer_sig)
return sig
end
@inline get_vertex(m::TM, ref::VertexRef) where TM<:ThreadMesh = get_vertex(m.mesh, ref)
@inline nodes(m::TM) where TM<:ThreadMesh = nodes(m.mesh)
@inline vertices_iterator(m::TM, index::Int64, internal::StaticTrue) where TM<:ThreadMesh = vertices_iterator(m.mesh, index, internal)
@inline all_vertices_iterator(m::TM, index::Int64, internal::StaticTrue) where TM<:ThreadMesh = all_vertices_iterator(m.mesh, index, internal)
@inline number_of_vertices(m::TM, index::Int64, internal::StaticTrue) where TM<:ThreadMesh = number_of_vertices(m.mesh, index, internal)
@inline push!(mesh::TM, p::Pair{Vector{Int64},T}, index) where {T<:Point, TM<:ThreadMesh{T}} = push!(mesh.mesh, p, index)
@inline push_ref!(mesh::TM, ref, index) where {T<:Point, TM<:ThreadMesh{T}} = push_ref!(mesh.mesh, ref, index)
@inline haskey(mesh::TM, sig::AbstractVector{Int64}, index::Int) where TM<:ThreadMesh = haskey(mesh.mesh, sig, index)
struct LockMesh{P <: Point, VDB <: VertexDB{P}, T <: AbstractMesh{P,VDB}} <: AbstractMesh{P,VDB}
mesh::T
global_lock::ParallelLocks
nthreads::Int64
# following lines can be erased after debugging
meshes::Vector{T}
buffer::Vector{Int64}
LockMesh(a::T,b,c) where {P <: Point, VDB <: VertexDB{P}, T <: AbstractMesh{P,VDB}} = new{P,VDB,T}(a,b,c,Vector{T}(undef,c),Int64[])
end
@inline ReadWriteLock(m::LM) where {LM<:LockMesh} = m.global_lock
meshthreads(m::TM) where TM<:LockMesh = m.nthreads
@inline Base.getproperty(cd::TM, prop::Symbol) where {TM<:LockMesh} = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::TM, ::Val{:boundary_Vertices}) where {TM<:LockMesh} = :(getfield(cd,:mesh).boundary_Vertices)
@inline @generated dyncast_get(cd::TM, ::Val{S}) where {TM<:LockMesh,S} = :( getfield(cd, S))
@inline internal_index(m::LM, index::Int64) where LM<:LockMesh = internal_index(m.mesh, index)
@inline external_index(m::LM, index::Int64) where LM<:LockMesh = external_index(m.mesh, index)
@inline external_index(m::LM, inds::AVI) where {LM<:LockMesh, AVI<:AbstractVector{Int64}} = external_index(m.mesh, inds)
@inline internal_index(m::LM, inds::AVI) where {LM<:LockMesh, AVI<:AbstractVector{Int64}} = internal_index(m.mesh, inds)
@inline internal_sig(mesh::TM, sig::AVI, static::StaticTrue) where {TM<:LockMesh, AVI<:AbstractVector{Int64}} = internal_sig(mesh.mesh, sig, static)
@inline internal_sig(mesh::TM, sig::AVI, static::StaticFalse) where {TM<:LockMesh, AVI<:AbstractVector{Int64}} = internal_sig(mesh.mesh, sig, static)
@inline external_sig(mesh::TM, sig::AVI, static::StaticTrue) where {TM<:LockMesh, AVI<:AbstractVector{Int64}} = external_sig(mesh.mesh, sig, static)
@inline external_sig(mesh::TM, sig::AVI, static::StaticFalse) where {TM<:LockMesh, AVI<:AbstractVector{Int64}} = external_sig(mesh.mesh, sig, static)
@inline get_vertex(m::LM, ref::VertexRef) where LM<:LockMesh = get_vertex(m.mesh, ref)
@inline nodes(m::LM) where LM<:LockMesh = nodes(m.mesh)
#=@inline vertices_iterator(m::LM, index::Int64, internal::StaticTrue) where LM<:LockMesh = begin
readlock(m.global_lock)
vi = ThreadsafeHeapVertexIterator(vertices_iterator(m.mesh, index, internal),m.global_lock.rwl)
readunlock(m.global_lock)
return vi
end
@inline all_vertices_iterator(m::LM, index::Int64, internal::StaticTrue) where LM<:LockMesh = begin
readlock(m.global_lock)
vi = ThreadsafeHeapVertexIterator(all_vertices_iterator(m.mesh, index, internal),m.global_lock.rwl)
readunlock(m.global_lock)
return vi
end=#
@inline vertices_iterator(m::LM, index::Int64, internal::StaticTrue) where LM<:LockMesh = vertices_iterator(m.mesh, index, internal)
@inline all_vertices_iterator(m::LM, index::Int64, internal::StaticTrue) where LM<:LockMesh = all_vertices_iterator(m.mesh, index, internal)
@inline number_of_vertices(m::LM, index::Int64, internal::StaticTrue) where LM<:LockMesh = number_of_vertices(m.mesh, index, internal)
@inline push_ref!(mesh::LM, ref, index) where {T<:Point, LM<:LockMesh{T}} = push_ref!(mesh.mesh, ref, index)
@inline haskey(mesh::LM, sig::AbstractVector{Int64}, index::Int) where LM<:LockMesh = haskey(mesh.mesh, sig, index)
function haskey(mesh::LM, sig::AbstractVector{Int64}) where LM<:LockMesh
readlock(mesh.global_lock)
b = haskey(mesh.mesh, sig)
readunlock(mesh.global_lock)
if !b
newsig = sort!(_internal_indeces(mesh,sig,mesh.buffer))
block(mesh.global_lock.hkl,newsig)
end
return b
end
@inline function copy_sig(mesh::LM,sig) where {LM<:LockMesh}
s =_copy_indeces(mesh,sig,mesh.buffer)
resize!(mesh.buffer,length(s))
return mesh.buffer
end
# Spezialisierte `push!`-Funktion mit Locking-Mechanismus
@inline function push!(mesh::LM, p::Pair{Vector{Int64},T}) where {T<:Point, LM<:LockMesh{T}}
if !blocked(mesh.global_lock.hkl,Threads.threadid())
return
end
sig = internal_sig(mesh.mesh, copy(p[1])) #copy_sig(mesh,p[1]))
writelock(mesh.global_lock)
ref = push!(mesh.mesh, sig => p[2], sig[1])
i = 2
lsig = length(sig)
while i <= lsig
push_ref!(mesh.mesh, ref, sig[i])
i += 1
end
writeunlock(mesh.global_lock)
end
@inline function pushray!(mesh::LM,full_edge,r,u,_Cell) where LM<:LockMesh
writelock(mesh.global_lock)
push!(mesh.boundary_Vertices,_internal_indeces(mesh,full_edge)=>boundary_vertex(r,u,internal_index(mesh,_Cell)))
writeunlock(mesh.global_lock)
end
struct SeparatedMesh{T}
mesh::T
buffer::SVector{8,Int64} # 64 Byte buffer to avoid cache contention
SeparatedMesh(m::T) where T = new{T}(m,zeros(SVector{8,Int64}))
end
struct ParallelMesh{T}
meshes::Vector{SeparatedMesh{T}}
global_lock::ParallelLocks
end
getParallelMesh(m,lock,threads,a,b) = LockMesh(MeshView(ThreadMesh(m,threads), SwitchView(a,b) ),lock,threads)
function ParallelMesh(m::AM,_threads::Vector{MultiThread},TODO,start_indices) where {P <: Point, VDB <: VertexDB{P}, AM<:AbstractMesh{P,VDB}}
threads = length(_threads)
global_lock = ParallelLocks(Threads.nthreads())
total_length = length(m)
#push!(start_indices,total_length+1)
mesh1 = getParallelMesh(m,global_lock, threads,1,threads>1 ? TODO[start_indices[2]]-1 : total_length )
#mesh1 = MeshView(ThreadMesh(m,threads), SwitchView(1,threads>1 ? TODO[start_indices[2]]-1 : total_length) )
meshes = Vector{SeparatedMesh{typeof(mesh1)}}(undef,threads)
meshes[1] = SeparatedMesh(mesh1)
for i in 2:(threads-1)
meshes[i] = SeparatedMesh(getParallelMesh(m,global_lock,threads,TODO[start_indices[i]],TODO[start_indices[i+1]]-1))
#meshes[i] = SeparatedMesh(MeshView(ThreadMesh(m,threads), SwitchView(TODO[start_indices[i]],TODO[start_indices[i+1]]-1) ))
end
if threads>1
meshes[threads] = SeparatedMesh(getParallelMesh(m,global_lock,threads,TODO[start_indices[end]],total_length))
#meshes[threads] = SeparatedMesh(MeshView(ThreadMesh(m,threads), SwitchView(TODO[start_indices[end]],total_length)))
end
# following lines can be erased after debugging:
for i in 1:threads
for j in 1:threads
meshes[i].mesh.meshes[j] = meshes[j].mesh.mesh
end
end
return ParallelMesh{typeof(mesh1)}(meshes,global_lock)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 25374 | function redundancy(data)
p0 = 0.0
for i in 1:length(data)
if (data[i]>3)
d = data[i]
p0 = 3*p0/d +(d-3)/d
end
end
return p0
end
struct PeriodicData
repeat::Vector{Int64}
dim::Int64
width::Vector{Float64}
factorials::Vector{Int64}
number_of_nodes::Int64
offset::Vector{Float64}
end
function PeriodicData(rep,width,nodes,_offset)
d = length(rep)
fac = ones(Int64,d+1)
for i in 2:(d+1)
fac[i] = rep[i-1]*fac[i-1]
end
return PeriodicData(rep,d,width,fac,nodes,_offset)
end
#=function PeriodicData(rep)
d = length(rep)
fac = ones(Int64,d+1)
for i in 2:(d+1)
fac[i] = rep[i-1]*fac[i-1]
end
return PeriodicData(rep,d,zeros(Float64,d),fac,1,zeros(Float64,d))
end=#
function array_from_index(index,p::PeriodicData,arr=zeros(Int64,p.dim))
ind = index
arr .= 0
for i in p.dim:-1:2
for k in 1:p.repeat[i]
if k*p.factorials[i] >= ind
ind = ind - (k-1)*p.factorials[i]
arr[i] = k
break
end
end
end
arr[1]=ind
return arr
end
function index_from_array(arr,p::PeriodicData)
index=arr[1]
for i in 2:(p.dim)
index += p.factorials[i]*(arr[i]-1)
end
return index
end
function offset(arr::Vector{Int},p::PeriodicData,off=zeros(Float64,p.dim))
off .= 0.0
off .+= p.offset .+ p.width .* (arr .- ones(Int64,p.dim))
return off
end
function offset(index::Int,p::PeriodicData,off=zeros(Float64,p.dim))
return offset(array_from_index(index,p),p,off)
end
mutable struct Periodic_Counter
cell_index::Int64
cell_array::Vector{Int64}
cell_offset::Vector{Float64}
maxindex::Int64
data::PeriodicData
end
function Periodic_Counter(data::PeriodicData)
return Periodic_Counter(1,ones(Int64,data.dim),copy(data.offset),data.factorials[data.dim+1],data)
end
#=function reset_Periodic_Counter(counter::Periodic_Counter)
counter.cell_array .= 1
counter.cell_offset .= counter.data.offset
counter.cell_index = 1
end=#
function increase(counter::Periodic_Counter)
counter.cell_index += 1
array_from_index(counter.cell_index,counter.data,counter.cell_array)
offset(counter.cell_array,counter.data,counter.cell_offset)
end
function eol(counter::Periodic_Counter)
return counter.cell_index>counter.maxindex
end
#=function periodicgeodata(data,periodicity)#,dispatch_resolve)
pc = Periodic_Counter(periodicity)
DATA = Vector{typeof(data)}(undef,pc.maxindex)
while !eol(pc)
DATA[pc.cell_index] = data .+ pc.cell_offset #round.(data .+ pc.cell_offset; digits=2)
increase(pc)
end
return VoronoiNodes(hcat(DATA...))#,length(dispatch_resolve))
end=#
function periodicgeodata(data::HN,periodicity) where {P,HN<:HVNodes{P}} #,dispatch_resolve)
pc = Periodic_Counter(periodicity)
NON = length(data)
DATA = Vector{P}(undef,pc.maxindex*NON)
while !eol(pc)
for i in 1:NON
DATA[NON*(pc.cell_index-1)+i] = data[i] .+ pc.cell_offset #round.(data .+ pc.cell_offset; digits=2)
end
increase(pc)
end
return DATA
end
###############################################################################################################################
## Periodic Grids: Sort Boundary cells ...
###############################################################################################################################
function periodic_cells(periodic,periodicity,dim)
lb = 2*dim #length(boundary)
NON = periodicity.number_of_nodes
all_cells = 1
inner_cells = 1
for k in 1:dim
all_cells *= periodicity.repeat[k]
inner_cells *= k in periodic ? periodicity.repeat[k]-2 : periodicity.repeat[k]
end
boundary_nodes = (all_cells-inner_cells)*NON
new_positions = Vector{Int64}(undef,all_cells*NON)
reference = Vector{Int64}(undef,all_cells*NON)
reference_shifts = Vector{BitVector}(undef,all_cells*NON)
count_boundary = 0
count_inner = 0
#mirrors = EmptyDictOfType(0=>[1])
this_boundary = BitVector(zeros(Int8,lb))
no_shift = BitVector(zeros(Int8,lb))
pc = Periodic_Counter(periodicity)
buffer = copy(pc.cell_array)
while !eol(pc)
index = pc.cell_index
this_boundary .= false
sig = pc.cell_array
buffer .= pc.cell_array
for k in 1:length(sig)
!(k in periodic) && continue
if sig[k]==1 # we are at 'left' boundary
this_boundary[2*k-1]=true # so 'origin' is at 'right' boundary
buffer[k] = pc.data.repeat[k]-1
elseif sig[k]==pc.data.repeat[k]
this_boundary[2*k]=true
buffer[k] = 2
end
end
if sum(this_boundary)>0
old_index = index_from_array(buffer,pc.data)
shifts = copy(this_boundary)
for k in 1:NON #((index-1)*NON+1):(index*NON)
node = ((index-1)*NON+k)
reference[node] = (old_index-1)*NON + k
reference_shifts[node] = shifts
new_positions[node] = count_boundary*NON + k
end
count_boundary += 1
else
for k in 1:NON #((index-1)*NON+1):(index*NON)
node = ((index-1)*NON+k)
reference[node] = 0
reference_shifts[node] = no_shift
new_positions[node] = count_inner*NON + k + boundary_nodes
end
count_inner += 1
end
increase(pc)
end
return reference, reference_shifts, new_positions
end
#=function test_periodic_cells()
dim = 2
periodic =[1,2]
repeat = 2*ones(Int64,dim)
my_repeat = copy(repeat)
for k in 1:dim
if k in periodic my_repeat[k]+=2 end
end
periodicity = PeriodicData(my_repeat,ones(Float64,dim),1,zeros(Float64,dim))
return periodic_cells(periodic,periodicity,dim)
end=#
function switch_ints!(list,new_positions)
for i in 1:length(list)
list[i]==0 && continue
list[i] = new_positions[list[i]]
end
end
###############################################################################################################################
## Periodic Grids: Periodic Voronoi Geometry ...
###############################################################################################################################
#=function _Matrix_from_Points(xs::Points)
data = zeros(Float64,length(xs[1]),length(xs))
for i in 1:(length(xs))
data[:,i] .= xs[i]
end
return data
end=#
function periodic_geo_data(periodic,scale,dimensions,repeat,matrix_data,dim,_print=statictrue)
number_of_nodes = length(matrix_data)
_scale=diagm(scale)
data = Vector{eltype(matrix_data)}(undef,length(matrix_data))
for i in 1:length(matrix_data)
data[i] = _scale * matrix_data[i]
end
# println(data)
# error("")
offsetvector = zeros(Float64,dim)
my_repeat = copy(repeat)
for i in 1:dim
if i in periodic
my_repeat[i]+=2
offsetvector[i] = (-1.0)* dimensions[i]
end
end
offsetvector=_scale*offsetvector
_print==true && println(Crayon(foreground=:red,underline=true), "Periodicity: $periodic, Unit cell size: $(_scale*dimensions), repeat=$repeat, i.e. $(prod(repeat)) unit cells",Crayon(reset=true))
# dimensions of the actual cube
cubedimensions = _scale*copy(dimensions)
cubedimensions .*= repeat
cube = cuboid( dim, periodic = periodic, dimensions = cubedimensions )
# dimensions of the extended cube
extended_cubedimensions = _scale*copy(dimensions)
extended_cubedimensions .*= my_repeat
extended_cube = cuboid( dim, periodic = periodic, dimensions = extended_cubedimensions, offset = offsetvector )
periodicity = PeriodicData(my_repeat,_scale*dimensions,number_of_nodes,offsetvector)
return cube, extended_cube, periodicity, data
end
function PeriodicDomain(periodic,scale,dimensions,repeat,matrix_data,dim,vertex_storage)
cube, extended_cube, periodicity, data = periodic_geo_data(periodic,scale,dimensions,repeat,matrix_data,dim)
xs = periodicgeodata(data,periodicity)
#=println("xs2 = $xs")
println(boundaryToString(cube))
println(boundaryToString(extended_cube))
println(periodicity)
println("data = $data")
error("")
=#
lmesh = length(xs)
# step 3: create mesh
reference, reference_shifts, new_positions = periodic_cells(periodic,periodicity,dim)
internal_positions = copy(new_positions)
external_positions = collect(1:length(internal_positions))
my_zeros=0
for i in 1:length(reference) my_zeros += reference[i]==0 ? 1 : 0 end
new_xs = Vector{eltype(xs)}(undef,0)
lnxs = 0
if my_zeros<length(reference)
resize!(new_xs,length(xs))
for i in 1:length(reference) new_xs[new_positions[i]] = xs[i] end
switch_ints!(reference,new_positions)
parallelquicksort!(new_positions,reference,reference_shifts,external_positions)
resize!(reference,length(reference)-my_zeros)
resize!(reference_shifts,length(reference_shifts)-my_zeros)
lref = length(reference)
for i in 1:(length(xs)-lref)
xs[i]=new_xs[i+lref]
end
resize!(xs,length(xs)-lref)
resize!(new_xs,lref)
lnxs = length(new_xs)
reference .-= lnxs
else
resize!(reference,0)
resize!(reference_shifts,0)
end
pc = Periodic_Counter(periodicity)
mesh = cast_mesh(vertex_storage,xs)
_domain = Domain(mesh,cube)
set_internal_boundary(_domain,extended_cube)
add_virtual_points(_domain,ReflectedNodes(new_xs,reference,reference_shifts),do_refine=staticfalse)
return _domain, pc, ShuffleView(external_positions,internal_positions), periodicity
end
function PeriodicVoronoiGeometry(matrix_data; vertex_storage=false, silence=false, search_settings=NamedTuple(), fast=true, periodic=[], scale=ones(Float64,size(matrix_data,1)), repeat = 2*ones(Int64,size(matrix_data,1)), dimensions=ones(Float64,size(matrix_data,1)), integrator=VI_POLYGON, integrand=nothing, mc_accurate=(1000,100,20))
# step 1: Prepare data
dim = size(eltype(matrix_data),1)
search_ = RaycastParameter(eltype(eltype(matrix_data)),search_settings)
search = RaycastParameter(search_,(threading = fast ? SingleThread() : search_.threading,))
#=cancel = false
try
check_boundary(matrix_data,cuboid(dim,periodic=[],dimensions=0.999*dimensions,offset=0.0005*dimensions))
catch
rethrow()
cancel = true
end
cancel && error(("The nodes $(matrix_data)) have to lie inside the following domain with 0.5% distance to the boundary: \n"*boundaryToString(cuboid(dim,periodic=[],dimensions=dimensions),offset=4)))
=#
number_of_nodes = length(matrix_data)#,2)
println(Crayon(foreground=:red,underline=true), "Create periodic mesh in $dim-D from $number_of_nodes points",Crayon(reset=true))
# step 2: Create periodic nodes and offset
domain, pc, periodicview, periodicity = PeriodicDomain(periodic,scale,dimensions,repeat,matrix_data,dim,vertex_storage)
oldstd = stdout
redirect_stdout(silence ? devnull : oldstd)
___redir(x) = redirect_stdout(x ? devnull : oldstd)
result_integrator=integrator
try
if number_of_nodes==1
first_node = matrix_data[1] - 0.5*dimensions
deviation = first_node .* scale
cell_size = (first_node*0) + scale.*dimensions
cubic_voronoi(domain,periodicity,deviation,cell_size,search,m->HighVoronoi.Integrator(m,integrator,integrand=integrand,mc_accurate=mc_accurate),integrand,periodicview)
elseif fast
extended_cube = internal_boundary(domain)
lboundary = length(extended_cube)
get_mi(::Call_GEO) = VI_GEOMETRY
get_mi(::Call_FAST_POLYGON) = VI_FAST_POLYGON
get_mi(::Call_MC) = VI_MONTECARLO
get_mi(::Call_HEURISTIC) = VI_HEURISTIC
get_mi(i) = VI_POLYGON
my_integrator = get_mi(integrator)
result_integrator = my_integrator
if integrator!=my_integrator
println(Integrator_Name(integrator),"-method makes no sense. I use ",Integrator_Name(VI_POLYGON)," instead...")
end
#standardize(domain)
#MESH1 = mesh(domain)
Integral = IntegralView(HighVoronoi.integral(domain),periodicview)
MESH = mesh(Integral)
#println(sort!(_external_indeces(MESH,_internal_indeces(MESH1,[13, 14, 62, 194]))))
#println(sort!(_external_indeces(MESH,_internal_indeces(MESH1,[13, 61, 62, 194]))))
#error("")
lmesh=length(MESH)
Integrator = HighVoronoi.Integrator(Integral,my_integrator,integrand=integrand,mc_accurate=mc_accurate)
Integrator2 = integrand!=nothing && my_integrator!=VI_GEOMETRY ? HighVoronoi.Integrator(Integral,VI_HEURISTIC_INTERNAL,integrand=integrand,mc_accurate=mc_accurate) : nothing
enable(Integral,enforced=true)
xs = copy(nodes(MESH))
searcher = Raycast(xs; domain = internal_boundary(domain), options=search)
I_data = IntegrateData(xs, internal_boundary(domain),Integrator)
affected = BitVector(zeros(Int8,length(xs)+length(extended_cube)))
#affected[1:number_of_nodes] .= 1
affected[(length(xs)+1):((length(xs)+length(extended_cube)))] .= true
max_string_len = length(string(pc.maxindex, base=10))
liste = EmptyDictOfType([1]=>xs[1])
modified = BitVector(zeros(Int8,length(xs)))
lengths = zeros(Int64,number_of_nodes)
use_Integrator1 = x->modified[x]
#try
progress = ThreadsafeProgressMeter(2*pc.maxindex,silence,"")
no_trusted = 0
while !eol(pc)
i = pc.cell_index
#(i>1) && vp_line_up()
b = number_of_nodes*pc.cell_index
a = b-number_of_nodes+1
i_nodes = (a:b) # nodes to iterate in this step
#vp_print(0,"Block $(string(i, base = 10, pad = max_string_len)), copy data : ")
neighbors1 = neighbors_of_cell(i_nodes,MESH,adjacents=true)
nodeshift, trust = periodic_copy_data(pc, MESH, extended_cube, affected, Integral,searcher,modified,I_data)
#println("bla")
if trust
#___redir(true)
integrand!=nothing && merge_integrate( Integrator,Integrator2, use1=x->false, intro="Block $(string(i, base = 10, pad = max_string_len)), Integrate : ",
calculate = 1:((length(xs)+lboundary)), iterate=i_nodes, I_data=I_data,compact=true)
increase(pc)
no_trusted += 1
#___redir(false)
next!(progress)
next!(progress)
continue
end
neighbors2 = neighbors_of_cell(i_nodes,MESH,adjacents=true)
for n in neighbors2
if n<=lmesh && !(n in neighbors1)
modified[n]=true
end
end
if i != 1
for k in i_nodes
lengths[k-a+1] = number_of_vertices(MESH,k)
modified[k] = lengths[k-a+1]!=number_of_vertices(MESH,k-nodeshift)
end
end
voronoi( Integrator, Iter=i_nodes, searcher=searcher, intro="Block $(string(i, base = 10, pad = max_string_len)), Voronoi cells: ",compact=true,silence=true,printsearcher=false)
next!(progress)
for k in i_nodes
modified[k] = modified[k] || (lengths[k-a+1]!=number_of_vertices(MESH,k))
end
merge_integrate( Integrator,Integrator2, use1=x->true, intro="Block $(string(i, base = 10, pad = max_string_len)), Integrate : ",
calculate = 1:((length(xs)+lboundary)), iterate=i_nodes, I_data=I_data,compact=true)
# old version calculate = Iterators.flatten((modified_i_nodes,(b+1):((length(xs)+length(cube))))), iterate=modified_i_nodes, I_data=I_data,compact=true)
next!(progress)
# finally increase to next cell
increase(pc)
# vp_line_up()
end
#println(modified)
print("modified cells: ",sum(modified))
println(", trusted blocks: ",no_trusted)
else
myintegrator = replace_integrator(integrator)
result_integrator = myintegrator
fast_MESH = mesh(domain)
xs = copynodes(nodes(fast_MESH))
println(Crayon(foreground=:red,underline=true), "Slow Track....",Crayon(reset=true))
println(Crayon(foreground=:red,underline=true), "Initialize bulk mesh with $(length(xs)) points",Crayon(reset=true))
fast_Integral = integrate_view(domain).integral
#@descend periodic_final_integration(fast_Integral,fast_MESH,xs,search,domain,myintegrator,integrand,mc_accurate)
#error("")
periodic_final_integration(fast_Integral,fast_MESH,xs,search,domain,myintegrator,integrand,mc_accurate)
end
catch
redirect_stdout(oldstd)
rethrow()
end
redirect_stdout(oldstd)
standardize(domain)
result = VoronoiGeometry(result_integrator,domain,integrand,search,mc_accurate,nothing)#NoFile())
#= integral = HighVoronoi.integral(domain)
v = 0.0
inte = [0.0,0.0]
for i in (length(references(domain))+1):length(mesh(domain))
cdw = cell_data_writable(integral,i,Float64[],[Float64[]])
v += cdw.volumes[1]
inte .+= cdw.bulk_integral
end
println(v)
println(inte)=#
# return domain
return result
end
function periodic_final_integration(Integral,MESH,xs,search,domain,myintegrator,integrand,mc_accurate)
voronoi(MESH,searcher=Raycast(xs;domain=internal_boundary(domain), options =search),intro="")
println(Crayon(foreground=:red,underline=true), "Initialize mesh on boundary based on boundary conditions",Crayon(reset=true))
#### _domain,_Inte,search = Create_Discrete_Domain(I.Integral,b,intro="",search_settings=search) # periodized version including all boundary data
#shifts = periodic_shifts(cube,length(xs[1]))
#_domain = Discrete_Domain(cube,shifts,reference_shifts, reference,extended_cube)
d2 = domain
II2=HighVoronoi.Integrator(Integral,myintegrator,integrand=integrand,mc_accurate=mc_accurate)
l = length(mesh(d2))
lboundary = length(boundary(d2))
#HighVoronoi.integrate(backup_Integrator(II2,true),domain,relevant,modified)
bI = backup_Integrator(II2,true)
l1 = public_length(d2)
l_2 = (length(mesh(d2))+lboundary)
HighVoronoi.integrate(bI,domain=internal_boundary(domain),relevant=1:l1,modified=1:l_2,progress = ThreadsafeProgressMeter(l1,false,"$(Integrator_Name(bI))-integration over $(l1) cells:"))
end
###################################################################################################################################
## copy non-broken verteces
##################################################################################################################################
function right_indeces(indexarray,data,current_dim,dim,indeces=zeros(Int64,3^(dim-1)*data.number_of_nodes),running_dim=1,_NON=data.number_of_nodes,count=0)
if running_dim>dim
index = index_from_array(indexarray,data)
offset = (index-1)*_NON
for i in 1:_NON
indeces[i+count] = offset + i
end
return indeces, count+_NON
elseif running_dim!=current_dim
i = indexarray[running_dim]
if (i<data.repeat[running_dim])
indexarray[running_dim] = i+1
_, count = right_indeces(indexarray,data,current_dim,dim,indeces,running_dim+1,_NON,count)
end
if (i>1)
indexarray[running_dim] = i-1
_, count = right_indeces(indexarray,data,current_dim,dim,indeces,running_dim+1,_NON,count)
end
indexarray[running_dim]=i
_, count = right_indeces(indexarray,data,current_dim,dim,indeces,running_dim+1,_NON,count)
else
_, count = right_indeces(indexarray,data,current_dim,dim,indeces,running_dim+1,_NON,count)
end
return indeces,count
end
#=function block_neighbors(counter::Periodic_Counter,lmesh)
lrp = length(counter.data.repeat)
boundaries = collect((1+lmesh):(lmesh+2*lrp))
for i in 1:lrp
if counter.cell_array[i]!=counter.data.repeat[i]
boundaries[2*i-1] = 0
end
if counter.cell_array[i]!=1
boundaries[2*i] = 0
end
end
return filter!(x->(x!=0),boundaries)
end=#
function mark_modified(sig2,modified,lmesh)
for i in 1:length(sig2)
s = sig2[i]
s>lmesh && return
modified[s] = true
end
end
function periodic_copy_data(counter::Periodic_Counter, mesh::AM, domain::Boundary, affected::BitVector, Integral::HI,searcher,modified,I_data) where {AM<:AbstractMesh, HI<:HVIntegral}
dim = length(nodes(mesh)[1])
_NON = counter.data.number_of_nodes
lmesh = length(mesh)
lboundary = length(domain)
new_index = counter.cell_index
current_dim = 0
# dimensional direction of the old block from which we copy....
for i in 1:length(counter.data.repeat)
if counter.cell_array[i]>2
current_dim = i
if counter.cell_array[i]<counter.data.repeat[i]
break
end
end
if counter.cell_array[i]>1 && current_dim==0
current_dim = i
end
end
if current_dim==0
return 0, false
end
old_array = copy(counter.cell_array)
old_array[current_dim] -= 1
old_index = index_from_array(old_array,counter.data)
#print("$new_index, $(counter.cell_array) <-- $old_index, $old_array via: $current_dim ")
old_range = (1+(old_index-1)*_NON):(old_index*_NON)
right_frame = counter.cell_array[current_dim]==counter.data.repeat[current_dim]
left_frame = old_array[current_dim]==1
trust_all = !(left_frame || right_frame)
nodeshift = ( new_index - old_index )*_NON
coordinateshift = counter.cell_offset - offset(old_array,counter.data)
# now transfer non-affected nodes
emptysig = Int64[]
for i in old_range
k = i + nodeshift
activate_cell( searcher, k, (lmesh+1):(lmesh+lboundary) )
sig2 = emptysig
for (sig,r) in vertices_iterator(mesh,i)
mark_modified(sig2,modified,lmesh)
if sig[1]!=i
sig2 = emptysig
continue
end
sig2 = copy(sig)
r2 = adjust_boundary_vertex(r + coordinateshift,domain,sig,lmesh,length(sig))
for ikk in 1:length(sig)
sig2[ikk]>lmesh && break
sig2[ikk] += nodeshift
if sig2[ikk]>lmesh
resize!(sig2,ikk-1)
break
end
end
length(sig2)<length(sig) && continue
if !(trust_all)
vv = vertex_variance(sig2,r2,searcher)
vv > 100 * searcher.variance_tol && continue
i, _ =_nn(searcher.tree,r2,x->(x in sig2))
!(i in sig2) && continue
end
push!(mesh, sig2=>r2)
sig2 = emptysig
end
mark_modified(sig2,modified,lmesh)
end
# copy integral content
if trust_all
vec = Float64[]
vecvec = [vec]
for i in ((old_index-1)*_NON+1):(old_index*_NON)
k = i + nodeshift
old_neighbors = get_neighbors(Integral,i)
neigh = copy(old_neighbors)
for ii in 1:length(neigh)
if neigh[ii]<=lmesh
neigh[ii] += nodeshift
end
end
set_neighbors(Integral,k,neigh,nothing,nothing)
if enabled_volumes(Integral)
data_i = cell_data_writable(Integral,i,vec,vecvec,get_integrals=staticfalse)
data_k = cell_data_writable(Integral,k,vec,vecvec,get_integrals=staticfalse)
for j in 1:length(old_neighbors)
data_k.area[j] = data_i.area[j]
end
data_k.volumes[1] = data_i.volumes[1]
end
end
end
return nodeshift, trust_all
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 22608 | # We provide the Polygon_Integrator. It is defined and initialized similar to
# the MC
struct Polygon_Integrator{T<:Union{Nothing,Function},TT,IDC<:IterativeDimensionChecker}
_function::T
bulk::Bool
# If i!=nothing, then area has to be true. Otherwise values are taken as given
Integral::TT
iterative_checker::IDC
end
function Polygon_Integrator(f,b,I)
return Polygon_Integrator(f,b,I,IterativeDimensionChecker(mesh(I)))
end
#=function Polygon_Integrator(mesh::VM,integrand, bulk_integral=false) where VM<:Voronoi_MESH
b_int=(typeof(integrand)!=Nothing) ? bulk_integral : false
i_int=(typeof(integrand)!=Nothing) ? true : false
Integ=Voronoi_Integral(mesh,integrate_bulk=b_int, integrate_interface=i_int)
PI=Polygon_Integrator( integrand, b_int, Integ, IterativeDimensionChecker(mesh) )
return PI
end=#
function Polygon_Integrator(Integ::HVIntegral,integrand, bulk_integral=false)
b_int=(typeof(integrand)!=Nothing) ? bulk_integral : false
enable(Integ,volume=true,integral=b_int)
return Polygon_Integrator( integrand, b_int, Integ, IterativeDimensionChecker(mesh(Integ)) )
end
function copy(I::Polygon_Integrator)
Inte = copy(I.Integral)
return Polygon_Integrator{typeof(I._function),typeof(I.Integral)}(I._function,I.bulk,Inte, IterativeDimensionChecker(length(Inte.MESH.nodes[1])))
end
@inline function integrate(Integrator::Polygon_Integrator; progress=ThreadsafeProgressMeter(0,true,""),domain=Boundary(), relevant=1:(length(Integrator.Integral)+length(domain)), modified=1:(length(Integrator.Integral)))
_integrate(Integrator; domain=domain, calculate=modified, iterate=relevant,progress=progress)
end
function prototype_bulk(Integrator::Polygon_Integrator)
y = (typeof(Integrator._function)!=Nothing && Integrator.bulk) ? Integrator._function(nodes(mesh(Integrator.Integral))[1]) : Float64[]
y.*= 0.0
return y
end
function prototype_interface(Integrator::Polygon_Integrator)
return 0.0*(typeof(Integrator._function)!=Nothing ? Integrator._function(nodes(mesh(Integrator.Integral))[1]) : Float64[])
end
struct PolyEdge{T}
r1::T
r2::T
value::Vector{Float64}
function PolyEdge(r,lproto)
return new{typeof(r)}(r,r,Vector{Float64}(undef,lproto))
end
function PolyEdge(pe::PolyEdge,rr)
r = pe.r1
r2 = pe.r2
nu = r2-r
if dot(rr-r,nu)<=0
r=rr
elseif dot(rr-r2,nu)>0
r2 = rr
end
return new{typeof(pe.r1)}(r,r2,pe.value)
end
end
"""
initialize_integrator(xs,_Cell,verteces,edges,integrator::Polygon_Integrator)
The integrator is initialized at beginning of systematic_voronoi(....) if an MC_Function_Integrator is passed as a last argument
This buffer version of the function does nothing but return two empty arrays:
- The first for Volume integrals. The first coordinate of the vector in each node
corresponds to the volume of the Voronoi cell
- The second for Area integrals. The first coordinate of the vector on each interface
corresponds to the d-1 dimensional area of the Voronoi interface
"""
#function integrate(domain,_Cell,iter,calcul,searcher,Integrator::Polygon_Integrator)
function integrate(neighbors,_Cell,iterate, calculate, data,Integrator::Polygon_Integrator,ar,bulk_inte,inter_inte,_)
Integral = Integrator.Integral
#verteces2 = Integral.MESH.Buffer_Verteces[_Cell]
verteces = vertices_iterator(mesh(Integral),_Cell)
xs=data.extended_xs
dim = data.dimension # (full) Spatial dimension
# get all neighbors of this current cell
neigh=neighbors
_length=length(neigh)
# flexible data structure to store the sublists of verteces at each iteration step 1...dim-1
emptydict=EmptyDictOfType([0]=>PolyEdge(xs[1],0)) # empty buffer-list to create copies from
listarray=(typeof(emptydict))[] # will store to each remaining neighbor N a sublist of verteces
# which are shared with N
all_dd=Vector{typeof(listarray)}(undef,dim-1)
map!(k->copy(listarray), all_dd, 1:dim-1)
# create a data structure to store the minors (i.e. sub-determinants)
all_determinants=Minors(dim)
# empty_vector will be used to locally store the center at each level of iteration. This saves
# a lot of "memory allocation time"
empty_vector=zeros(Float64,dim)
# Bulk computations: V stores volumes y stores function values in a vector format
V = Vector([0.0])
# do the integration
I=Integrator
taboo = zeros(Int64,dim)
#typeof(data.buffer_data)==Int64 && error("fehler")
#inter_inte .*= 0
iterative_volume(I._function, I.bulk, _Cell, V, bulk_inte, ar, inter_inte, dim, neigh,
_length,verteces,emptydict,emptydict,xs[_Cell],empty_vector,all_dd,all_determinants,calculate,Integral,xs,taboo,I.iterative_checker)
#println()
try
return V[1]
catch
return 0.0
end
end
function _neigh_index(_my_neigh,n)
for i in 1:(length(_my_neigh))
if _my_neigh[i]==n return i end
end
return 0
end
function first_relevant_edge_index(edge,_Cell,neigh)
le = length(edge)
for i in 1:le
ei = edge[i]
if ei!=_Cell
return _neigh_index(neigh,ei)
end
end
return 0
end
function queue_integral_edge(dd,edge,r,_Cell,neigh,lproto,le)
first_index = first_relevant_edge_index(edge,_Cell,neigh)
first_index==0 && return
nfirst = neigh[first_index]
# set edge
if haskey(dd[first_index],edge)
pe = dd[first_index][edge]
pe.r1==r && return
coords = PolyEdge(pe,r)
for _i in 1:le
ee = edge[_i]
(ee<=_Cell || !(ee in neigh)) && continue
index = _neigh_index(neigh,ee)
dd[index][edge] = coords # bad performance
end
else
coords = PolyEdge(r,lproto)
edge = copy(edge)
for _i in 1:le
ee = edge[_i]
((ee<=_Cell && ee!=nfirst) || !(ee in neigh)) && continue
push!(dd[_neigh_index(neigh,ee)], edge => coords)
end
end
end
function iterative_volume(_function, _bulk, _Cell::Int64, V, y, A, Ay, dim,neigh,_length,verteces,verteces2,
emptylist,vector,empty_vector,all_dd,all_determinants,calculate,Full_Matrix,xs,taboo,dc)
space_dim=length(vector)
if (dim==1) # this is the case if and only if we arrived at an edge
#print(length(verteces)," ")
#for (ed,pe) in verteces # = first(verteces)
r, r2,val = get_sup_edge(dc,verteces,xs)
k_minor(all_determinants,space_dim-1,r-vector)
k_minor(all_determinants,space_dim, r2-vector)
vol=(all_determinants.data[space_dim])[1] #pop!(all_determinants[space_dim])
vol=abs(vol)
#println(_Cell," vol ",vol)
A[1]+=vol
#if (typeof(_function)!=Nothing)
#if abs(val[1]-1.0)>0.00000001
# error("$val")
#end
Ay .+= vol .* val
#println("a :$(Ay) ")
#end
#end
return
elseif dim==space_dim
# get the center of the current dim-dimensional face. this center is taken
# as the new coordinate to construct the currenct triangle. The minors are stored in place space_dim-dim+1
#_Center=midpoint(verteces,verteces2,empty_vector,vector)
# dd will store to each remaining neighbor N a sublist of verteces which are shared with N
dd=Vector{typeof(emptylist)}(undef,_length)
for i in 1:_length dd[i]=copy(emptylist) end
mlsig = reset(dc, neigh, xs, _Cell, verteces)
NF = dc.edge_iterator
#println(neigh,"*************************************************************************")
resize!(dc.edge_buffer,mlsig)
dc.edge_buffer .= 0.0
lproto = typeof(_function)!=Nothing ? length(_function(xs[1])) : 0
searcher = (ray_tol = 1.0E-12,)
for (sig,r) in verteces # repeat in case verteces2 is not empty
lsig=length(sig)
if lsig>space_dim+1
b = reset(NF,sig,r,xs,_Cell,searcher,allrays=true,_Cell_first=true)
lsig = length(sig)
while b
b, edge = update_edge(NF,searcher,[])
(!b) && break
# get edge
le = length(edge)
dc.edge_buffer[1:le] .= NF.iterators[1].sig[edge]
count = 0
for i in 1:le
if dc.edge_buffer[i] in neigh
count += 1
dc.edge_buffer[count] = dc.edge_buffer[i]
end
end
count += 1
dc.edge_buffer[count] = _Cell
le = count
edge = view(dc.edge_buffer,1:le)
sort!(edge)
edge[end]<=_Cell && continue
queue_integral_edge(dd,edge,r,_Cell,neigh,lproto,le)
end
elseif lsig==space_dim+1
edgeview = view(dc.edge_buffer,1:space_dim)
start = 1 #findfirst(x->(x==_Cell),sig)
for i in 1:space_dim edgeview[i]=start+i-1 end
b = true
while b
edge = view(sig,edgeview)
if edge[end]<=_Cell
b,_ = increase_edgeview( edgeview, space_dim+1, space_dim)
continue
end
(_Cell in edge) && queue_integral_edge(dd,edge,r,_Cell,neigh,lproto,space_dim)
b,_ = increase_edgeview( edgeview, space_dim+1, space_dim)
end
start = 2
for i in 1:space_dim edgeview[i]=start+i-1 end
edge = view(sig,edgeview)
edge[end]<=_Cell && continue
(_Cell in edge) && queue_integral_edge(dd,edge,r,_Cell,neigh,lproto,space_dim)
end
end
if (typeof(_function)!=Nothing)
for k in 1:_length
for (edge,pe) in dd[k]
#edge[end]<=_Cell && continue
#resize!(pe.value,lproto)
pe.value .= _function(pe.r1)
pe.value .+= _function(pe.r2)
pe.value .*= 0.5
end
end
end
taboo[dim]=_Cell
AREA=zeros(Float64,1)
_Center = MVector{dim}(zeros(Float64,dim))
# println(y)
for k in 1:_length
buffer=neigh[k] # this is the (further) common node of all verteces of the next iteration
# in case dim==space_dim the dictionary "bufferlist" below will contain all
# verteces that define the interface between "_Cell" and "buffer"
# However, when it comes to A and Ay, the entry "buffer" is stored in place "k".
if !(buffer in calculate)
empty!(dd[k])
continue
end
bufferlist=dd[k]
buffer>_Cell && isempty(bufferlist) && continue
taboo[dim-1] = buffer
neigh[k]=0
AREA[1]=0.0
AREA_Int=(typeof(_function)!=Nothing) ? Ay[k] : Float64[]
# now get area and area integral either from calculation or from stack
if buffer>_Cell && (buffer in calculate)# && (buffer in calculate) # in this case the interface (_Cell,buffer) has not yet been investigated
set_dimension(dc,1,_Cell,buffer)
#test_idc(dc,_Cell,buffer,1)
AREA_Int.*=0
_Center2=midpoint(bufferlist,emptylist,empty_vector,vector)
_Center2.+=vector # midpoint shifts the result by -vector, so we have to correct that ....
_Center .= _Center2
iterative_volume(_function, _bulk, _Cell, V, y, AREA, AREA_Int, dim-1, neigh, _length, bufferlist, emptylist, emptylist,vector,empty_vector,all_dd,all_determinants,calculate,Full_Matrix,xs,taboo,dc)
neigh[k]=buffer
#if abs(AREA_Int[1]/AREA[1]-1.0)>0.00000001
# error("")
#end
# Account for dimension (i.e. (d-1)! to get the true surface volume and also consider the distance="height of cone")
empty!(bufferlist) # the bufferlist is empty
distance= 0.5*norm(vector-xs[buffer]) #abs(dot(normalize(vector-xs[buffer]),vert))
FACTOR=1.0
for _k in 1:(dim-1) FACTOR*=1/_k end
thisvolume = AREA[1]*FACTOR/dim
V[1] += thisvolume
FACTOR*=1/distance
AREA.*=FACTOR
AREA_Int.*=FACTOR
A[k]=AREA[1] # return value of area
if typeof(_function)!=Nothing
# adjust the "area integral" by interpolation with the value at the center of the surface
_y=_function(_Center)
AREA_Int.*=((dim-1)/(dim)) # "convex interpolation" of the (d-2)-dimensional boundary-boundary and the center of the surface
_y.*=(1/(dim))*AREA[1]
AREA_Int.+=_y
if _bulk # and finally the bulk integral, if whished
_y=_function(vector)
_y.*=(thisvolume/(dim+1))
_y.+=(AREA_Int*(distance/(dim+1))) # there is hidden a factor dim/dim which cancels out
y.+=_y
end
# println("$(V[1]) vs. $(y[1])")
#if abs(y[1]/V[1]-1.0)>0.00000001
# error("hier: $(y[1]/V[1])")
#end
end
else # the interface (buffer,_Cell) has been calculated in the systematic_voronoi - cycle for the cell "buffer"
#greife auf Full_Matrix zurück
empty!(bufferlist)
#error("sollte nicht sein...")
distance=0.5*norm(vector-xs[buffer])#abs(dot(normalize(vector-xs[buffer]),vert))
AREA[1]= buffer>_Cell ? AREA[1] : get_area(Full_Matrix,buffer,_Cell)
# !!!!! if you get an error at this place, it means you probably forgot to include the boundary planes into "calculate"
thisvolume = AREA[1]*distance/dim
V[1] += thisvolume
A[k]=AREA[1]
if typeof(_function)!=Nothing
# AREA_Int.*=0
AREA_Int .= buffer>_Cell ? AREA_Int : get_integral(Full_Matrix,buffer,_Cell)
end
if _bulk # and finally the bulk integral, if whished
_y=_function(vector)
_y.*=(thisvolume/(dim+1))
_y.+=(AREA_Int .*(distance/(dim+1))) # there is hidden a factor dim/dim which cancels out
y.+=_y #(distance/dim).*AREA_Int
end
end
neigh[k]=buffer
end
else
# the next three lines get the center of the current dim-dimensional face. this center is taken
# as the new coordinate to construct the currenct triangle. The minors are stored in place space_dim-dim+1
_Center=midpoint(verteces,verteces2,empty_vector,vector)
k_minor(all_determinants,space_dim-dim,_Center)
dd=all_dd[dim-1] # dd will store to each remaining neighbor N a sublist of verteces which are shared with N
_count=1
for k in 1:_length
_count+=neigh[k]!=0 ? 1 : 0 # only if neigh[k] has not been treated earlier in the loop
end
_my_neigh=Vector{Int64}(undef,_count-1)
while length(dd)<_count push!(dd,copy(emptylist)) end
_count=1
for k in 1:_length
if (neigh[k]!=0) # only if neigh[k] has not been treated earlier in the loop
_my_neigh[_count]=neigh[k]
_count+=1
end
end
ll=(length(verteces))
count = 0
for _ii in 1:(ll) # iterate over all verteces
(sig,r) = pop!(verteces)
if (r.r1==r.r2)
#=if length(sig)==space_dim
print("-")
else
print("+")
end=#
continue
#elseif length(sig)!=space_dim
# print("0")
end
#=δr =r.r1-r.r2
dist = norm(δr)
(dot(δr,dc.local_basis[space_dim-dim])/dist>1.0E-12) && continue=#
for _neigh in sig # iterate over neighbors in vertex
(_neigh in taboo) && continue # if _N is a valid neighbor (i.e. has not been treated in earlier recursion)
index = _neigh_index(_my_neigh,_neigh)
(index==0 || count==dim) && continue
#count+=1
push!( dd[index] , sig =>r) # push vertex to the corresponding list
end
end
if dim==2
l_mn = length(_my_neigh)
for k in 1:(l_mn-1)
length(dd[k])==0 && continue
keys_1 = keys(dd[k])
for i in (k+1):l_mn
keys_2 = keys(dd[i])
linear = false
for s1 in keys_1
for s2 in keys_2
if s1==s2
linear=true
break
end
end
linear && break
end
if linear
merge!(dd[k],dd[i])
empty!(dd[i])
end
end
end
end
# Base.rehash!(verteces)
_count=1
# dim==2 && println()
for k in 1:_length
buffer=neigh[k] # this is the (further) common node of all verteces of the next iteration
# in case dim==space_dim the dictionary "bufferlist" below will contain all
# verteces that define the interface between "_Cell" and "buffer"
# However, when it comes to A and Ay, the entry "buffer" is stored in place "k".
buffer==0 && continue
# bufferlist=dd[_neigh_index(_my_neigh,buffer)] # this one can be replaced by a simple counting of neigh!=0
bufferlist=dd[_count]
valid = set_dimension(dc,space_dim-dim+1,_Cell,buffer)
if !valid
empty!(bufferlist)
end
_count+=1
isempty(bufferlist) && continue
# test_idc(dc,_Cell,buffer,space_dim-dim+1)
if (A[1]==Inf || A[1]==NaN64) # if A[1] (the current cell interface (d-1) dimensional volume) is already "at least" infinite, we can interrupt
# the current branch at all levels, except the level dim=space_dim: "it won't get any better"
empty!(bufferlist)
for k in 1:length(dd)
empty!(dd[k])
end
return
end
neigh[k]=0
taboo[dim-1]=buffer
iterative_volume(_function, _bulk, _Cell, V, y, A, Ay , dim-1, neigh, _length, bufferlist, emptylist, emptylist,vector,empty_vector,all_dd,all_determinants,calculate,Full_Matrix,xs,taboo,dc)
neigh[k]=buffer
taboo[dim-1]=0
if !isempty(bufferlist) pop!(bufferlist) end
end
end
end
function midpoint_points(vertslist,vertslist2,empty_vector,cell_center=Float64[])
empty_vector.*=0.0
for (_,r) in vertslist
empty_vector.+=r
end
for (_,r) in vertslist2
empty_vector.+=r
end
empty_vector.*= 1/(length(vertslist)+length(vertslist2))
if length(cell_center)>0 empty_vector.-= cell_center end
return empty_vector
end
function midpoint_points_fast(vertslist,data,empty_vector,cell_center=Float64[])
empty_vector.*=0.0
for v in vertslist
r = data[v][2]
empty_vector.+=r
end
empty_vector.*= 1/(length(vertslist))
if length(cell_center)>0 empty_vector.-= cell_center end
return empty_vector
end
function midpoint(vertslist,vertslist2,empty_vector,cell_center=Float64[])
empty_vector.*=0.0
for (_,ee) in vertslist
empty_vector.+=ee.r1
empty_vector.+=ee.r2
end
for (_,ee) in vertslist2
empty_vector.+=ee.r1
empty_vector.+=ee.r2
end
empty_vector.*= 0.5/(length(vertslist)+length(vertslist2))
if length(cell_center)>0 empty_vector.-= cell_center end
return empty_vector
end
#=function midpoint(vertslist,vertslist2,dim::Int)
empty_vector=zeros(Float64,dim)
for (_,r) in vertslist
empty_vector.+=r
end
for (_,r) in vertslist2
empty_vector.+=r
end
empty_vector.*= 1/(length(vertslist)+length(vertslist2))
return empty_vector
end=#
#=function dist_to_facett(Center,Midpoint,base)
difference=Center-Midpoint
dist=(-1)*sum(x->x^2,difference)
for i in 1:length(base)
dist+=dot(base[i],difference)^2
end
return sqrt(abs(dist))
end=#
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 2593 |
struct ThreadsafeProgressMeter{RWL<:Union{BusyFIFOLock,Nothing}}
p::Progress
silence::Bool
lock::RWL
end
function ThreadsafeProgressMeter(tpm::TPM) where {TPM<:ThreadsafeProgressMeter}
return tpm
end
function ThreadsafeProgressMeter(n::Int,silence::Bool,intro)
return ThreadsafeProgressMeter(Progress(n,intro),silence,nothing)
end
function ThreadsafeProgressMeter(n::Int,silence::Bool,intro,::MultiThread)
return ThreadsafeProgressMeter(Progress(n,intro),silence,BusyFIFOLock())
end
#import Progress
@inline function next!(tpm::TPM) where {TPM<:ThreadsafeProgressMeter}
lock(tpm.lock)
(!tpm.silence) && (ProgressMeter.next!(tpm.p))
unlock(tpm.lock)
end
#ProgressMeter.lock_if_threading(f::Function, p::ProgressMeter.Progress) = f()
###################################################################################################
## provides a bunch of tools to display progress of voronoi algorithms properly
###################################################################################################
### come offset functions
const voronoi_offset = 4
const integral_offset = 4
const sys_refine_offset = 4
const BC_offset = 4
#### Main functions
function vp_line()
print("\n")
end
#=function vp_column(i)
print("\u1b[0E\u1b[$(i)C")
end
function vp_delete_from_here()
end
function vp_delete_line_content()
print("\u1b[2K") # delete entire line
end=#
function vp_line_up()
print("\u1b[2K\u1b[1A\u1b[200D")
end
#=
function vp_line_up(K)
for i in 1:K
print("\u1b[2K\u1b[1A\u1b[200D")
end
end
function vp_blocks(content,offsets)
for i in 1:(length(content))
print("\u1b[0E\u1b[$(offset[i])C")
print(content[i])
end
end
=#
function vp_print(o1::Int,c;crayon=nothing)
# if typeof(crayon)==Nothing
# print("\u1b[0E\u1b[$(o1)C")
print("\u1b[200D\u1b[$(o1)C")
print(c)
# else
#= print(crayon)
print("\u1b[0E\u1b[$(o1)C")
print(c)
print(Crayon(reset=true))=#
# end
end
function vp_print(o1::Int,c1,o2::Int,c2;crayon=nothing)
# if typeof(crayon)==Nothing
print("\u1b[0E\u1b[$(o1)C")
print(c1)
print("\u1b[0E\u1b[$(o2)C")
print(c2)
# else
#= print(crayon)
print("\u1b[0E\u1b[$(o1)C")
print(c1)
print("\u1b[0E\u1b[$(o2)C")
print(c2)
print(Crayon(reset=true))=#
# end
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 7637 | struct HashedQueue
value::UInt128
value2::UInt64
end
@inline HashedQueue() = HashedQueue(0, 0)
# Definition of the mutable QueueHashTable structure for HashedQueue
mutable struct QueueHashTable{V<:AbstractVector{HashedQueue}, R}
table::V
mylength::MVector{1, UInt64}
occupied::BitVector
deleted::BitVector
lock::R
function QueueHashTable(v::A, len::Int64, lock=SingleThread()) where A
len2 = next_power_of_two(len)
resize!(v, len2)
l = locktype(lock)
#typeof(l)!=Nothing && error("")
new{A, typeof(l)}(v, MVector{1, UInt64}([len2 - 1]), falses(len2),falses(len2), l)
end
QueueHashTable(len::Int64, lock=SingleThread()) = QueueHashTable(Vector{HashedQueue}(undef, len), len, lock)
end
# Method for inserting into the QueueHashTable
function pushqueue!(ht::Q, key::K, write::Bool=true) where {Q<:QueueHashTable,K}
value = fnv1a_hash(key, UInt128)
value_ = UInt64(value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & ht.mylength[1]
index2 = fnv1a_hash(key, UInt64)
i = UInt64(0)
ret = false
lock(ht.lock)
try
while true
idx = reinterpret(Int64, (index1 + i * index2) & ht.mylength[1] + 1) # try bitcast( ) instead
@inbounds data = ht.occupied[idx] ? ht.table[idx] : HashedQueue()
if (ht.deleted[idx] && !write)
i += 1
continue
end
ht.deleted[idx] &= !write
ht.occupied[idx] |= write
if data.value == 0
if write
ht.table[idx] = HashedQueue(value, index2)
end
break # return false
elseif data.value == value && data.value2 == index2
ret = true
break # return false
end
i += 1
if i >= ht.mylength[1]
if write
extend(ht)
ret = pushqueue!(ht, key)
end
break
end
end
finally
unlock(ht.lock)
end
return ret
end
@inline Base.haskey(ht::Q, key::K) where {Q<:QueueHashTable,K} = pushqueue!(ht,key,false)
function Base.delete!(ht::Q, key::K) where {Q<:QueueHashTable,K}
value = fnv1a_hash(key, UInt128)
value_ = UInt64(value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & ht.mylength[1]
index2 = fnv1a_hash(key, UInt64)
i = UInt64(0)
lock(ht.lock)
try
while true
idx = reinterpret(Int64, (index1 + i * index2) & ht.mylength[1] + 1) # try bitcast( ) instead
if ht.deleted[idx]
i+=1
continue
end
!ht.occupied[idx] && break
data = ht.table[idx]
if data.value == value && data.value2 == index2
ht.occupied[idx] = false
ht.deleted[idx] = true
break # return false
end
i += 1
if i >= ht.mylength[1]
break
end
end
finally
unlock(ht.lock)
end
end
@inline clearhashvector(::Vector{HashedQueue}) = nothing
@inline similarqueuehash(::Type{HashedQueue}, len2::Int64) = Vector{HashedQueue}(undef, len2)
# Function to extend the QueueHashTable
function extend(ht::QueueHashTable)
len2 = 2 * (ht.mylength[1] + 1)
V2 = similarqueuehash(HashedQueue, reinterpret(Int64, len2))
new_occupied = falses(len2)
len2 -= 1
pos = 0
for data in ht.table
pos += 1
!ht.occupied[pos] && continue
value_ = UInt64(data.value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & len2
index2 = data.value2
i = 0
while true
if data.value==0 && data.value2==0
end
idx = reinterpret(Int64, (index1 + i * index2) & len2 + 1) # try bitcast( ) instead
i += 1
if !new_occupied[idx] # V2[idx].value==0
V2[idx] = data
new_occupied[idx] = true
break
end
end
end
clearhashvector(ht.table)
ht.table = V2
ht.occupied = new_occupied
ht.deleted = falses(len2+1)
ht.mylength[1] = len2
end
function Base.resize!(ht::QueueHashTable,len::Int64)
len2 = next_power_of_two(len)
len2<=(ht.mylength[1]+1) && return
V2 = similarqueuehash(HashedQueue, reinterpret(Int64, len2))
new_occupied = falses(len2)
len2 -= 1
pos = 0
for data in ht.table
pos += 1
!ht.occupied[pos] && continue
value_ = UInt64(data.value & UInt128(0x7FFFFFFFFFFFFFFF))
index1 = value_ & len2
index2 = data.value2
i = 0
while true
idx = reinterpret(Int64, (index1 + i * index2) & len2 + 1) # try bitcast( ) instead
i += 1
if !new_occupied[idx] # V2[idx].value==0
V2[idx] = data
new_occupied[idx] = true
break
end
end
end
clearhashvector(ht.table)
ht.table = V2
ht.occupied = new_occupied
ht.deleted = falses(len2+1)
ht.mylength[1] = len2
end
# Method to empty the QueueHashTable
function Base.empty!(ht::QueueHashTable)
lock(ht.lock)
fill!(ht.occupied, false)
fill!(ht.deleted, false)
unlock(ht.lock)
end
struct EmptyQueueHashTable
end
@inline function pushqueue!(ht::EmptyQueueHashTable, key)
return false
end
@inline function extend(ht::EmptyQueueHashTable)
return nothing
end
@inline function Base.resize!(ht::EmptyQueueHashTable, len::Int64)
return nothing
end
@inline function Base.empty!(ht::EmptyQueueHashTable)
return nothing
end
## USE FOLLOWING CODE FOR TESTING
function test_queuehashing()
# Create a QueueHashTable with an initial size
ht = HighVoronoi.QueueHashTable(8)
# Define some Vector{Int64} values to be used as keys
keys = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
[13, 14, 15],
[16, 17, 18],
[19, 20, 21],
[22, 23, 24]
]
# Insert keys into the QueueHashTable
for key in keys
HighVoronoi.pushqueue!(ht, key)
end
# Test extending the QueueHashTable by adding more keys
more_keys = [
[25, 26, 27],
[28, 29, 30],
[31, 32, 33],
[34, 35, 36]
]
for key in more_keys
HighVoronoi.pushqueue!(ht, key)
end
resize!(ht,20)
for key in more_keys
print(HighVoronoi.pushqueue!(ht, key),", ")
end
print(HighVoronoi.pushqueue!(ht, [1,8,7]),", ")
println()
# Check the status of the table after all insertions
println("QueueHashTable after insertions:")
for i in 1:length(ht.table)
if ht.occupied[i]
println("Index $i: ", ht.table[i])
else
println("Index $i: empty")
end
end
# Test the empty! function
HighVoronoi.empty!(ht)
println("\nQueueHashTable after calling empty!:")
for i in 1:length(ht.table)
if ht.occupied[i]
println("Index $i: ", ht.table[i])
else
println("Index $i: empty")
end
end
# Test extending the QueueHashTable again by adding keys
for key in keys
HighVoronoi.pushqueue!(ht, key)
end
println("\nQueueHashTable after re-inserting keys:")
for i in 1:length(ht.table)
if ht.occupied[i]
println("Index $i: ", ht.table[i])
else
println("Index $i: empty")
end
end
return true
end | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 7736 |
###########################################################################################################################################################
###########################################################################################################################################################
## Threadsafe Queues
###########################################################################################################################################################
###########################################################################################################################################################
struct ThreadsafeQueue{K, VK<:AbstractVector{K},REL,Q<:Union{QueueHashTable,EmptyQueueHashTable}}
data::VK
positions::MVector{1, Int64}
initsize::Int64
lock::REL
empty::K
queuehash::Q
end
function ThreadsafeQueue(d::VK,empty::K,mode = SingleThread(),is::Int64=20) where {K, VK<:AbstractVector{K}}
lock = locktype(mode)
myqht(::SingleThread) = EmptyQueueHashTable()
myqht(::MultiThread) = QueueHashTable(1000,SingleThread())
q = myqht(mode)
ThreadsafeQueue{K, VK, typeof(lock),typeof(q)}(d, MVector{1, Int64}([0]),is,lock,empty,q)
end
ThreadsafeQueue{K}(empty::K,mode=SingleThread(),is::Int64=20) where {K} = ThreadsafeQueue(Vector{K}(undef, 0),empty,mode,is)
@inline Base.resize!(queue::ThreadsafeQueue{K, VK,R}, newsize::Int) where {K, VK<:AbstractVector{K},R} = begin
#islocked(queue.lock) && println("großer mist")
lock(queue.lock)
resize!(queue.data, newsize)
resize!(queue.queuehash, 2*newsize)
unlock(queue.lock)
end
@inline Base.size(queue::ThreadsafeQueue{K, VK,R}) where {K, VK<:AbstractVector{K},R} = (queue.positions[1], )
@inline Base.length(queue::ThreadsafeQueue{K, VK,R}) where {K, VK<:AbstractVector{K},R} = size(queue)[1]
@inline Base.sizehint!(queue::ThreadsafeQueue{K, VK,R}, newsize::Int) where {K, VK<:AbstractVector{K}, R} = resize!(queue, newsize)
@inline function Base.push!(queue::ThreadsafeQueue{K, VK,R}, k::K) where {K, VK<:AbstractVector{K},R}
#islocked(queue.lock) && println("großer mist")
lock(queue.lock)
ret = false
if !(pushqueue!(queue.queuehash,k[1])) # only if entry is not yet present in queue
ret = true
queue.positions[1] += 1
if queue.positions[1] > length(queue.data)
resize!(queue, queue.positions[1] + queue.initsize)
end
queue.data[queue.positions[1]] = k
end
unlock(queue.lock)
return ret
end
@inline function Base.pop!(queue::ThreadsafeQueue{K, VK,R}) where {K, VK<:AbstractVector{K},R}
ret = queue.empty
#islocked(queue.lock) && println("großer mist")
lock(queue.lock)
if queue.positions[1]>0
ret = queue.data[queue.positions[1]]
queue.positions[1] -= 1
end
unlock(queue.lock)
return ret
end
@inline function Base.empty!(queue::ThreadsafeQueue{K, VK,R}) where {K, VK<:AbstractVector{K},R}
#islocked(queue.lock) && println("großer mist")
lock(queue.lock)
queue.positions[1] = 0
empty!(queue.queuehash)
unlock(queue.lock)
end
@inline function Base.isempty(queue::ThreadsafeQueue{K, VK,R}) where {K, VK<:AbstractVector{K},R}
return queue.positions[1] == 0
end
isempty_entry(queue::ThreadsafeQueue{K, VK,R},item::K) where {K, VK<:AbstractVector{K},R} = (item === queue.empty)
struct ParallelQueueData{TQ<:ThreadsafeQueue,M,R}
queue::TQ
mesh::M
_cell::MVector{1,Int64}
buffer_sig::Vector{Int64}
buffer::SVector{10,Int64}
master::R
ParallelQueueData(q::TQ_,m::M_,master::R_) where {TQ_<:ThreadsafeQueue,M_,R_} = new{TQ_,M_,R_}(q,m,MVector{1,Int64}([0]),Int64[],zeros(SVector{10,Int64}),master)
ParallelQueueData(q::TQ_,m::M_,cell,master::R_) where {TQ_<:ThreadsafeQueue,M_,R_} = new{TQ_,M_,R_}(q,m,cell,Int64[],zeros(SVector{10,Int64}),master)
end
@inline Base.getproperty(cd::PQD, prop::Symbol) where {PQD<:ParallelQueueData} = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::PQD, ::Val{:cell}) where {PQD<:ParallelQueueData} = :(getfield(cd,:_cell)[1])
@inline @generated dyncast_get(cd::PQD, ::Val{:lock}) where {PQD<:ParallelQueueData} = :(getfield(cd,:queue).lock)
@inline @generated dyncast_get(cd::PQD, d::Val{S}) where {PQD<:ParallelQueueData,S} = :( getfield(cd, S))
@inline Base.setproperty!(cd::PQD, prop::Symbol, val) where {PQD<:ParallelQueueData} = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::PQD, ::Val{:cell},val) where {PQD<:ParallelQueueData} = :(getfield(cd,:_cell)[1]=val)
@inline @generated dyncast_set(cd::PQD, ::Val{S},val) where {PQD<:ParallelQueueData,S} = :(setfield(cd,S,val))
@inline Base.resize!(queue::PQD, newsize::Int) where {PQD<:ParallelQueueData} = resize!(queue.queue,newsize)
@inline Base.size(queue::PQD) where {PQD<:ParallelQueueData} = size(queue.queue)
@inline Base.length(queue::PQD) where {PQD<:ParallelQueueData} = size(queue)[1]
@inline Base.sizehint!(queue::PQD, newsize::Int) where {PQD<:ParallelQueueData} = sizehint!(queue.queue,newsize)
@inline function Base.push!(queue::PQD, k::K) where {K, PQD<:ParallelQueueData}
lock(queue.lock)
_Cell = queue.cell
ret = _push!(queue,k) # set true if something actually pushed
sig = k[1]
r = k[2]
sig2 = _internal_indeces(queue.mesh,sig,queue.buffer_sig)
for q in queue.master.queues
_c = q.cell
_c==_Cell && continue
!(_c in sig2) && continue
sig3 = copy(sig2)
_external_indeces(q.mesh,sig3)
_push!(q,sig3=>r)
end
unlock(queue.lock)
#sig2 = internal_sig(queue.mesh,copy(k[1]),staticfalse)
#push!(queue.master,sig2,queue.cell)
return ret
end
@inline _push!(queue::PQD, k::K) where {K, PQD<:ParallelQueueData} = push!(queue.queue,k)
@inline Base.pop!(queue::PQD) where {PQD<:ParallelQueueData} = pop!(queue.queue)
@inline Base.empty!(queue::PQD) where {PQD<:ParallelQueueData} = begin
queue.cell = 0
empty!(queue.queue)
end
@inline activate_queue_cell(queue::PQD,cell) where {PQD<:ParallelQueueData} = begin
queue.cell = internal_index(queue.mesh,cell)
end
#@inline activate_queue_cell(queue::PQD,cell) where PQD = nothing
@inline Base.isempty(queue::PQD) where {PQD<:ParallelQueueData} = isempty(queue.queue)
@inline isempty_entry(queue::PQD,item::K) where {PQD<:ParallelQueueData,K} = isempty_entry(queue.queue,item)
struct ParallelQueues{PQD<:ParallelQueueData}
queues::Vector{PQD}
buffer::SVector{10,Int64} # separate variables in cache
lock::ReentrantLock
function ParallelQueues(threads::Int64,generator)
q,m = generator(1)
firstentry = ParallelQueueData(q,m,nothing)
queues = Vector{typeof(firstentry)}(undef,threads)
queues[1] = firstentry
for i in 2:threads
q_i,m_i = generator(i)
queues[i] = ParallelQueueData(q_i,m_i,nothing)
end
p = new{typeof(firstentry)}(queues,zeros(SVector{10,Int64}),ReentrantLock())
secondentry = ParallelQueueData(q,m,firstentry._cell,p)
_queues = Vector{typeof(secondentry)}(undef,threads)
_queues[1] = secondentry
for i in 2:threads
q_i,m_i = generator(i)
_queues[i] = ParallelQueueData(q_i,m_i,queues[i]._cell,p)
end
return new{typeof(secondentry)}(_queues,zeros(SVector{10,Int64}),p.lock)
end
end
function Base.push!(pq::PQ,k,skip::Int64) where {PQ<:ParallelQueues}
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 4490 | struct qs_step
left::Int64
right::Int64
end
mutable struct qs_data
data::Vector{qs_step}
counter::Int64
lsteps::Int64
end
function qs_data(len::Int64)
return qs_data(Vector{qs_data}(undef,len),0,len)
end
function add_qs(left,right,data::qs_data)
if left<right
data.counter += 1
if data.counter>data.lsteps
data.lsteps += min(10,round(Int64,data.lsteps/10))
resize!(data.data,data.lsteps)
end
data.data[data.counter] = qs_step(left,right)
end
end
function pop_qs(data)
if data.counter>0
data.counter -= 1
return data.data[data.counter+1].left,data.data[data.counter+1].right
else
return 100,0
end
end
function quicksort!(neigh,area,inter)
lsteps = round(Int64,length(neigh)/2)
left=1
right=length(neigh)
data = qs_data(lsteps)
while (left<right)
split = split!(neigh,area,inter,left,right)
add_qs(left, split - 1,data)
add_qs(split + 1, right,data)
left,right=pop_qs(data)
#println(left,right)
end
end
function parallelquicksort!(x...)
x2=(x[1],)
le=length(x[1])
for i in 2:length(x)
if typeof(x[i])!=Nothing && length(x[i])>=le
x2=(x2...,x[i])
end
end
_parallelquicksort!(1,length(first(x2)),x2)
end
function _parallelquicksort!(left,right,x::Tuple)
lsteps = round(Int64,right/2)
data = qs_data(lsteps)
while (left<right)
split = _parallelsplit!(left,right,x)
add_qs(left, split - 1,data)
add_qs(split + 1, right,data)
left,right=pop_qs(data)
end
end
#=@generated function switchdata(x::T, i::Int, j::Int) where T <: Tuple{Vararg{AbstractVector}}
N = length(T.parameters)
swaps = [:(x[$k][i], x[$k][j] = x[$k][j], x[$k][i]) for k in 1:N]
quote
@inline
Expr(:block, $swaps...)
end
end=#
function _parallelsplit!(left,right,x::T) where T<:Tuple
i = left
# start with j left from the Pivotelement
j = right - 1
neigh=x[1]
pivot = neigh[right]
while i < j
# start from left to look for an element larger than the Pivotelement
while i < j && neigh[i] <= pivot
i = i + 1
end
# start from right to look for an element larger than the Pivotelement
while j > i && neigh[j] > pivot
j = j - 1
end
if neigh[i] > neigh[j]
for k in 1:length(x)
x[k][i], x[k][j] = x[k][j], x[k][i]
end
end
end
# switch Pivotelement (neigh[right]) with neu final Position (neigh[i])
# and return the new Position of Pivotelements, stop this iteration
if neigh[i] > pivot
#switch data[i] with data[right] :
for k in 1:length(x)
buffer=x[k][i]
x[k][i]=x[k][right]
x[k][right]=buffer
end
else
i = right
end
return i
end
function split!(neigh,area,inter,left,right)
i = left
# start with j left from the Pivotelement
j = right - 1
pivot = neigh[right]
while i < j
# start from left to look for an element larger than the Pivotelement
while i < j && neigh[i] <= pivot
i = i + 1
end
# start from right to look for an element larger than the Pivotelement
while j > i && neigh[j] > pivot
j = j - 1
end
if neigh[i] > neigh[j]
#switch data[i] with data[j] :
N=neigh[i]
A=area[i]
I=inter[i]
neigh[i]=neigh[j]
area[i]=area[j]
inter[i]=inter[j]
neigh[j]=N
area[j]=A
inter[j]=I
end
end
# switch Pivotelement (neigh[right]) with neu final Position (neigh[i])
# and return the new Position of Pivotelements, stop this iteration
if neigh[i] > pivot
#switch data[i] with data[right] :
N=neigh[i]
A=area[i]
I=inter[i]
neigh[i]=neigh[right]
area[i]=area[right]
inter[i]=inter[right]
neigh[right]=N
area[right]=A
inter[right]=I
else
i = right
end
return i
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 17070 | ########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
## MyTree
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
#=
struct MyTree{T,TT,TTT}
tree::T
extended_xs::TTT
active::BitVector
size::Int64
mirrors::Int64
function MyTree(xs,b::Boundary;perturbed_nodes=false)
#println(typeof(xs))
t = SearchTree(xs)
l = length(b)
#t=BallTree(xs)#VoronoiNodes(xs,perturbation=perturbed_nodes ? 1.0E-10 : 0.0))
exs = ExtendedNodes(xs,b)
#exs = append!(copy(xs),Vector{typeof(xs[1])}(undef,l))
a=BitVector(zeros(Int8,l))
return new{typeof(t),eltype(xs),typeof(exs)}(t,exs,a,length(xs),l)
end
function MyTree(exs::ExtendedNodes)
t = SearchTree(exs.data)
l = length(exs.boundary)
a = BitVector(zeros(Int8,l))
return new{typeof(t),eltype(xs),typeof(exs)}(t,exs,a,length(xs),l)
end
end
function _nn(tree::MyTree,x::Point,skip=(x->false))::Tuple{Int64,Float64}
idx , dists=knn(tree.tree,x,1,false,skip)
b=length(idx)>0
index::Int64 = b ? idx[1] : 0
dist::Float64 = b ? dists[1] : Inf64::Float64
lm=tree.mirrors
if lm==0 return index, dist end
for i in 1:lm
( !tree.active[i] || skip(tree.size+i) ) && continue
d=norm(x-tree.extended_xs[i+tree.size])
if d<dist
index=i+tree.size
dist=d
end
end
return index, dist
end
function _inrange(tree::MyTree,x,r)
idx = inrange(tree.tree,x,r)
lm=tree.mirrors
if lm==0 return idx end
for i in 1:lm
( !tree.active[i] ) && continue
d=norm(x-tree.extended_xs[i+tree.size])
if d<r
append!(idx,i+tree.size)
end
end
return idx
end
=#
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
## MyBruteTree
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
#=struct MyBruteTree{TT}
extended_xs::Vector{TT}
ib::Vector{Int64}
valid_indeces::Vector{Int64}
end
function MyBruteTree(xs)
return MyBruteTree{typeof(xs[1])}(xs,zeros(Int64,10),collect(1:length(xs)))
end
function _nn(searcher,x,skip=x->false)::Tuple{Int64,Float64}
return _nn(searcher.tree,x,skip)
end
function _nn(tree::MyBruteTree,x::Point,skip=x->false)::Tuple{Int64,Float64}
index = 0
dist = Inf64
lxs = length(tree.extended_xs)
for i in tree.valid_indeces
skip(i) && continue
d=norm(x-tree.extended_xs[i])
if d<dist
index=i
dist=d
end
end
return index, dist
end
function _inrange(tree::MyBruteTree,x,r)
lib = length(tree.ib)
idx = tree.ib
count = 0
for i in 1:lib
d = norm(x-tree.extended_xs[i])
if d<r
count += 1
if count>lib
lib += 10
resize!(idx,lib)
end
idx[count] = i
end
end
return copy(view(idx,1:count))
end
=#
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
## RAYCAST
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
struct RaycastParameter{FLOAT,TREE,R,T,C}
variance_tol::FLOAT
break_tol::FLOAT
b_nodes_tol::FLOAT
plane_tolerance::FLOAT
ray_tol::FLOAT
nntree::TREE
method::R
threading::T
copynodes::C
end
@inline variance_tol(::Type{Float64}) = 1.0E-15
@inline break_tol(::Type{Float64}) = 1.0E-5
@inline b_nodes_tol(::Type{Float64}) = 1.0E-7
@inline plane_tolerance(::Type{Float64}) = 1.0E-12
@inline ray_tol(::Type{Float64}) = 1.0E-12
@inline variance_tol(::Type{Float32}) = 5.0E-10
@inline break_tol(::Type{Float32}) = 1.0E-5
@inline b_nodes_tol(::Type{Float32}) = 1.0E-7
@inline plane_tolerance(::Type{Float32}) = 1.0E-5
@inline ray_tol(::Type{Float32}) = 1.0E-12
#@inline variance_tol(::Type{Float32}) = 3.5E-10
#@inline break_tol(::Type{Float32}) = 3.0E-4
#@inline b_nodes_tol(::Type{Float32}) = 1.0E-5
#@inline plane_tolerance(::Type{Float32}) = 3.0E-6
#@inline ray_tol(::Type{Float32}) = 3.0E-6
struct Raycast_Original end
const RCOriginal = Raycast_Original()
struct Raycast_Non_General end
const RCNonGeneral = Raycast_Non_General()
struct Raycast_Combined end
const RCCombined = Raycast_Combined()
const RCCopyNodes = statictrue
const RCNoCopyNodes = staticfalse
locktype(::MultiThread) = Base.ReentrantLock()
locktype(::SingleThread) = nothing
Base.lock(::Nothing) = nothing
Base.unlock(::Nothing) = nothing
Base.notify(::Nothing) = nothing
copynodes(xs,::StaticFalse) = xs
copynodes(xs,::StaticTrue) = copy(xs)
MultyRaycast(r,::SingleThread) = r
function MultyRaycast(r,mt::MultiThread)
nthreads = mt.sub_threads
ret = Vector{typeof(r)}(undef,nthreads)
ret[1]=r
for i in 2:nthreads
ret[i] = copy_RaycastIncircleSkip(r)
end
return ret
end
const RCStandard = RCNonGeneral
RaycastParameter{FLOAT}(;variance_tol = variance_tol(FLOAT),
break_tol = break_tol(FLOAT),
b_nodes_tol = b_nodes_tol(FLOAT),
plane_tolerance = plane_tolerance(FLOAT),
ray_tol = ray_tol(FLOAT),
nntree = VI_KD, method= RCStandard,
threading = SingleThread(),
copynodes = RCCopyNodes, kwargs... ) where {FLOAT<:Real} = RaycastParameter{FLOAT,typeof(nntree),typeof(method),typeof(threading),typeof(copynodes)}(FLOAT(variance_tol),
FLOAT(break_tol),
FLOAT(b_nodes_tol),
FLOAT(plane_tolerance),
FLOAT(ray_tol), nntree, method, threading, copynodes )
RaycastParameter{FLOAT}(RP::RaycastParameter;variance_tol = FLOAT(RP.variance_tol),
break_tol = FLOAT(RP.break_tol),
b_nodes_tol = FLOAT(RP.b_nodes_tol),
plane_tolerance = FLOAT(RP.plane_tolerance),
ray_tol = FLOAT(RP.ray_tol),
nntree = RP.nntree, method=RP.method, threading = RP.threading,
copynodes = RP.copynodes, kwargs... ) where {FLOAT<:Real} = RaycastParameter{FLOAT,typeof(nntree),typeof(method),typeof(threading),typeof(copynodes)}(FLOAT(variance_tol),
FLOAT(break_tol),
FLOAT(b_nodes_tol),
FLOAT(plane_tolerance),
FLOAT(ray_tol), nntree, method, threading, copynodes )
RaycastParameter(::Type{T};kwargs...) where T = RaycastParameter{T}(;kwargs...)
RaycastParameter(::Type{T},RP::RaycastParameter;kwargs...) where T = RaycastParameter{T}(RP;kwargs...)
RaycastParameter(::Type{T},NT::NamedTuple;kwargs...) where T = RaycastParameter{T}(;NT...,kwargs...)
RaycastParameter(r1::RaycastParameter{FLOAT},RP::RaycastParameter) where FLOAT = RaycastParameter(FLOAT,r1,variance_tol = FLOAT(RP.variance_tol),
break_tol = FLOAT(RP.break_tol),
b_nodes_tol = FLOAT(RP.b_nodes_tol),
plane_tolerance = FLOAT(RP.plane_tolerance),
ray_tol = FLOAT(RP.ray_tol),
nntree = RP.nntree, method=RP.method, threading=RP.threading, copynodes = RP.copynodes)
RaycastParameter(r1::RaycastParameter{FLOAT},kwargs::NamedTuple) where FLOAT = RaycastParameter(FLOAT,r1;kwargs...)
Base.merge(r1::RaycastParameter{FLOAT},RP::RaycastParameter) where FLOAT = RaycastParameter(r1,RP)
Base.merge(r1::RaycastParameter{FLOAT},tup::NamedTuple) where FLOAT = RaycastParameter(FLOAT,r1;tup...)
struct MiniRaycast{T,B}
tree::T
domain::B
end
struct MultiThreadRaycaster{R}
raycaster::R
buffer::MVector{10,Int64}
MultiThreadRaycaster(r::RR) where RR = new{RR}(r,zeros(MVector{10,Int64}))
end
function getMultiThreadRaycasters(rc::RR,meshes::PP) where {RR,PP}
nthreads = length(meshes.meshes)
MTR(r)=MultiThreadRaycaster(r)
proto = MTR(RaycastIncircleSkip(nodes(meshes.meshes[1].mesh),rc.domain,rc.parameters) )
rcs = Vector{typeof(proto)}(undef,nthreads)
rcs[1] = proto
for i in 2:nthreads
rcs[i] = MTR(RaycastIncircleSkip(nodes(meshes.meshes[i].mesh),rc.domain,rc.parameters) )
end
return rcs
end
mutable struct RaycastIncircleSkip{T,TTT,TTTTT,TTTTTT,FEI,PA,FLOAT}
tree::T
lmesh::Int64
lboundary::Int64
visited::Vector{Int64}
ts::Vector{FLOAT}
positions::BitVector
vectors::Matrix{Float64}
symmetric::Matrix{FLOAT}
rhs::Vector{Float64}
rhs_cg::Vector{Float64}
ddd::Vector{FLOAT}
domain::Boundary
rare_events::Vector{Int64}
dimension::Int64
edgeiterator::TTT
edgeiterator2::TTT
xs::TTTTT
general_edgeiterator::TTTTTT
find_general_edgeiterator::TTTTTT
FEIStorage_global::FEI
parameters::PA
end
function RaycastIncircleSkip(xs_::HN,dom,parameters::RaycastParameter{FLOAT,TREE}) where {P,HN<:HVNodes{P},FLOAT,TREE}
xs = copynodes(xs_,parameters.copynodes)
lxs=length(xs)
dim=size(eltype(xs))[1]#length(xs[1])
z1d_1=zeros(FLOAT,lxs+length(dom))
z1d_2=zeros(Float64,dim)
z1d_3=zeros(Float64,dim)
z1d_4=zeros(FLOAT,dim+1)
z2d_1=zeros(Float64,dim,dim)
z2d_2=zeros(FLOAT,dim,dim)
tree = ExtendedTree(xs,dom,parameters.nntree)
EI = FastEdgeIterator(zeros(P),1E-8)
EI2 = FastEdgeIterator(zeros(P),1E-8)
FEIStorage_global = ThreadSafeDict(Dict{Vector{Int64},DimFEIStorage{length(xs[1])}}(),parameters.threading)
#sizehint!(FEIStorage_global,length(xs)*2^(length(xs[1])-1))
return RaycastIncircleSkip( tree, lxs, length(dom), zeros(Int64,lxs+length(dom)+3), z1d_1,
BitVector(zeros(Int8,length(xs))), z2d_1, z2d_2, z1d_2, z1d_3, z1d_4, dom,
zeros(Int64,SRI_max),dim,EI,EI2,xs,General_EdgeIterator(size(eltype(xs))[1]),General_EdgeIterator(size(eltype(xs))[1]),FEIStorage_global,parameters)
end
function copy_RaycastIncircleSkip(original::RaycastIncircleSkip{T, TTT, TTTTT, TTTTTT, FEI, PA, FLOAT}) where {T, TTT, TTTTT, TTTTTT, FEI, PA, FLOAT}
# Create a new tree using the copy constructor of ExtendedTree
new_tree = ExtendedTree(original.tree) # ExtendedTree(original.tree)
P = eltype(original.xs)
# Create new edge iterators
new_EI = FastEdgeIterator(zeros(P), original.edgeiterator.ray_tol)
new_EI2 = FastEdgeIterator(zeros(P), original.edgeiterator2.ray_tol)
# Create new general edge iterators
new_general_EI = General_EdgeIterator(size(P)[1])
new_find_general_EI = General_EdgeIterator(size(P)[1])
# Create a new RaycastIncircleSkip object with copied fields and newly created iterators and tree
return RaycastIncircleSkip(
new_tree,
original.lmesh,
original.lboundary,
copy(original.visited),
copy(original.ts),
copy(original.positions),
copy(original.vectors),
copy(original.symmetric),
copy(original.rhs),
copy(original.rhs_cg),
copy(original.ddd),
original.domain,
copy(original.rare_events),
original.dimension,
new_EI,
new_EI2,
original.xs,
new_general_EI,
new_find_general_EI,
original.FEIStorage_global,
original.parameters
)
end
@inline Base.getproperty(cd::RaycastIncircleSkip, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::RaycastIncircleSkip, ::Val{:variance_tol}) = :(getfield(cd,:parameters).variance_tol)
@inline @generated dyncast_get(cd::RaycastIncircleSkip, ::Val{:break_tol}) = :(getfield(cd,:parameters).break_tol)
@inline @generated dyncast_get(cd::RaycastIncircleSkip, ::Val{:b_nodes_tol}) = :(getfield(cd,:parameters).b_nodes_tol)
@inline @generated dyncast_get(cd::RaycastIncircleSkip, ::Val{:plane_tolerance}) = :(getfield(cd,:parameters).plane_tolerance)
@inline @generated dyncast_get(cd::RaycastIncircleSkip, ::Val{:ray_tol}) = :(getfield(cd,:parameters).ray_tol)
@inline @generated dyncast_get(cd::RaycastIncircleSkip, ::Val{S}) where S = :( getfield(cd, S))
#=function Base.getproperty(rics::RaycastIncircleSkip, sym::Symbol)
if sym == :variance_tol
return rics.parameters.variance_tol
elseif sym == :break_tol
return rics.parameters.break_tol
elseif sym == :b_nodes_tol
return rics.parameters.b_nodes_tol
elseif sym == :plane_tolerance
return rics.parameters.plane_tolerance
elseif sym == :ray_tol
return rics.parameters.ray_tol
else
return getfield(rics, sym)
end
end
=#
# SRI = search rare index
const SRI_vertex_tolerance_breach = 1 #
const SRI_vertex_suboptimal_correction = 2 #
const SRI_vertex_irreparable = 3 #
const SRI_raycast = 4
const SRI_deactivate_boundary = 5 #
const SRI_activate_mirror = 6 #
const SRI_irregular_node = 7
const SRI_irregular_node_calculated = 8
const SRI_out_of_line_vertex = 9
const SRI_out_of_line_is_multi = 10
const SRI_out_of_line_is_severe_multi = 11
const SRI_descent_out_of_vertex_line = 12
const SRI_fake_vertex = 13
const SRI_check_fake_vertex = 14
const SRI_walkray = 15 #
const SRI_descent = 16 #
const SRI_vertex = 17
const SRI_boundary_vertex = 18
const SRI_nn = 19
const SRI_max = 20
function vp_print(searcher::RaycastIncircleSkip; rare_events=true,mirrors=false)
if rare_events
println("$(searcher.rare_events), that means: ")
if searcher.rare_events[SRI_vertex_tolerance_breach]>0
println("$(searcher.rare_events[SRI_vertex_tolerance_breach]) Tolerance breaches in vertex calculations. Among them: ")
println(" $(searcher.rare_events[SRI_vertex_suboptimal_correction]) Tolerance breaches with non-optimal corrections")
(searcher.rare_events[SRI_vertex_irreparable]>0) && println(" $(searcher.rare_events[SRI_vertex_irreparable]) Tolerance breaches were irreparable")
end
if mirrors
println("$(searcher.rare_events[SRI_activate_mirror]) cases a mirror was activated")
println(" $(searcher.rare_events[SRI_deactivate_boundary]) cases it was temporarily deactivated")
end
# somehow a suspicion of irregular node within raycast(...)
# println("$(searcher.rare_events[SRI_irregular_node]) suspicions of irregular verteces")
println("$(searcher.rare_events[SRI_irregular_node_calculated]) irregular vertices calculated")
if (searcher.rare_events[SRI_out_of_line_vertex]>0)
println("$(searcher.rare_events[SRI_out_of_line_vertex]) verteces were out of line in appearance")
println(" $(searcher.rare_events[SRI_out_of_line_is_multi]) of them were multi-verteces")
println(" $(searcher.rare_events[SRI_descent_out_of_vertex_line]) appeared in descent algorithm")
end
end
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 23961 | RaycastData = Vector{Any}(undef,2)
######## default raycast
"""
Raycast(xs;variance_tol=1.0E-20,break_tol=1.0E-5,domain=Boundary(),bruteforce=false)
Initializes the standard searcher for computation of Voronoi meshes.
# Arguments
- `xs::Points`: An array of points, preferably of SVector-type to speed up the algorithm
- `variance-tol`: when the variance of (distance of a vertex to its nodes)^2 is larger than that value, the vertex candidate will be corrected
- `break_tol` : when the afore mentioned variance is even larger than that (happens only on quasi-periodic grids) this is sign that something goes
really wrong. Therefore, the vertex is skipped. Typically happens "outside" the quasi-periodic domain
- `b_nodes_tol`: When a vertex evidently should lie on the boundary but is slightly appart with distance less than `b_nodes_tol`, it will be corrected.
Otherwise it will be dumped.
- `nodes_tol=1.0E-5,`: this is the threshold below which a node is considered to be part of
an irregular vertex.
- `domain` : When a vertex is found that lies outside of "domain" the algorithm will not look for further neighbor verteces. However,
the vertex itself will be stored.
- `bruteforce`: when set to "true" the algorithm will use a BruteTree instead of a KDTree
- `fastiterator`: when set to 'true' this will choose an iterator with slightly higher speed on quasi-periodic meshes at the cost of much higher memory usage
- `periodic_searcher`: when `0` this will use an early algorith to handle periodic grids. This is still the only available version for 2d, but is replaced with the
default `1` in higher dimensions
"""
function Raycast(xs;domain=Boundary(), options=RaycastParameter(eltype(eltype(xs))))
return RaycastIncircleSkip(xs,domain,options)
end
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
## Stepest Decent to find a vertex from thin air
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
""" starting at given points, run the ray shooting descent to find vertices """
function descent(xs::Points, searcher::RaycastIncircleSkip, start)
searcher.rare_events[SRI_descent] += 1
dim = searcher.dimension
sig = [start]
r = xs[start]
minimal_edge = zeros(Int64,dim+1)
keep_searching = true
count = 0
while keep_searching
count += 1
if count == 10
println("Stupid Error should never happen")
error("descent failed at node $start: $(searcher.tree.active), $xs")
end
sig = [start]
r = xs[start]
minimal_edge .= 0
minimal_edge[1] = start
my_vv = 1.0
try
for k in 1:dim # find an additional generator for each dimension
#println("$k ---------------------------------------------------- ")
u = 0*r
u += randray(xs[minimal_edge[1:k]],map(i->view(searcher.vectors,:,i),1:(dim-1)),count,xs,start)
#=if k==1
u = -r
u = normalize(u)
end=#
generator, t, r2 = raycast_des(sig, r, u, xs, searcher,0,sig,sig,Raycast_By_Descend())
b = false
if t == Inf
u = -u
generator, t, r2 = raycast_des(sig, r, u, xs, searcher,0,sig,sig,Raycast_By_Descend())
end
if t == Inf
error("Could not find a vertex in both directions of current point." *
"Consider increasing search range (tmax)")
end
r = r2
minimal_edge[k+1] = generator
my_vv = vertex_variance(view(minimal_edge,1:(k+1)),r,xs,k,view(searcher.ddd,1:(k+1)))
my_vv>searcher.variance_tol && error("$my_vv, $minimal_edge")
#identify_multivertex(searcher, sig, r, vertex_variance(view(minimal_edge,1:(k+1)),r,xs,k,view(searcher.ddd,1:(k+1))))
end
catch
rethrow()
my_vv=1.0
end
my_vv>searcher.variance_tol && continue
keep_searching = vertex_variance(view(minimal_edge,1:(dim+1)),r,xs,dim,view(searcher.ddd,1:(dim+1)))>searcher.variance_tol
end
sort!(sig)
r = project(r,searcher.domain)
r,_ = walkray_correct_vertex(r, sig, searcher, minimal_edge,minimal_edge[dim+1])
return (sig, r)
end
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
## WALKRAY
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
####################################################################################################################################
""" find the vertex connected to `v` by moving away from its `i`-th generator """
function walkray(full_edge::Sigma, r::Point, xs::Points, searcher, sig, u, edge)
searcher.rare_events[SRI_walkray] += 1
success = true
k=0
lsig = length(sig)
while true
k+=1
!(sig[k] in full_edge) && break
if k==lsig
println("There is an odd situation")
error("")
end
end
Rest = sig[k]
generator, t, r2 = raycast_des(full_edge, r, u, xs, searcher, Rest,edge,sig,Raycast_By_Walkray())
exception_raycast(t,r,sig,edge,u,searcher)
if t==0.0
return sig, r, false
end
if t < Inf
sig2 = full_edge
r2,success,vv = walkray_correct_vertex(r2, sig2, searcher, edge, generator)
return sig2, r2, success
else
return sig, r, false # if the vertex has an unbounded ray, return the same vertex
end
end
#=""" generate a random ray orthogonal to the subspace spanned by the given points """
function randray(xs::Points)
k = length(xs)
d = length(xs[1])
v = similar(xs, k-1)
# Gram Schmidt
for i in 1:k-1
v[i] = xs[i] .- xs[k]
for j in 1:(i-1)
v[i] = v[i] .- dot(v[i], v[j]) .* v[j]
end
v[i] = normalize(v[i])
for j in 1:(i-1)
v[i] = v[i] .- dot(v[i], v[j]) .* v[j]
end
v[i] = normalize(v[i])
end
u = randn(d)
for i in 1:k-1
u = u - dot(u, v[i]) * v[i]
end
u = normalize(u)
for i in 1:k-1
u = u - dot(u, v[i]) * v[i]
end
u = normalize(u)
return u
end=#
#=function rand_oriented(dim,base,start)
ret = zeros(Float64,dim)
elements = min(50,length(base))
for i in 1:elements
ret .+= base[i]
end
ret ./= elements
ret .-= base[start]
normalize!(ret)
ret .+= 0.1 .* normalize!(randn(dim))
return ret
end=#
function randray(xs::Points,v,count::Int64=0,base=xs,start=1)
k = length(xs)
d = length(xs[1])
# println(typeof(xs),xs)
# println(typeof(v),v)
# Gram Schmidt
for i in 1:k-1
map!(j->xs[i][j] - xs[k][j],v[i],1:d)
for j in 1:(i-1)
v[i] .-= dot(v[i], v[j]) .* v[j]
end
normalize!(v[i])
for j in 1:(i-1)
v[i] .-= dot(v[i], v[j]) .* v[j]
end
normalize!(v[i])
end
u = count<8 ? randn(d) : rand_oriented(dim,base,start)
for i in 1:k-1
u .-= dot(u, v[i]) .* v[i]
end
normalize!(u)
for i in 1:k-1
u .-= dot(u, v[i]) .* v[i]
end
normalize!(u)
return u
end
function walkray_correct_vertex(_r, _sig, searcher, minimal_edge, new_generator)
dim = searcher.dimension
r=_r
vv = searcher.variance_tol
#println("hier mit $sig, $_r, $correct_bulk")
sig = _sig
# correct_bulk=false
sig = view(searcher.visited,1:(dim+1))
for i in 1:dim
searcher.visited[i] = minimal_edge[i]
end
searcher.visited[dim+1] = new_generator
vv = vertex_variance(sig,r,searcher.tree.extended_xs,dim,searcher.ddd)
b = vv>searcher.break_tol
i = 0
while i<3 && vv>0.0001*searcher.variance_tol
i += 1
r = _correct_vertex(sig,searcher.tree.extended_xs,searcher,r)
vv = vertex_variance(sig,r,searcher.tree.extended_xs,dim,searcher.ddd)
end
exception_walray_correct_vertex(b,vv,searcher,sig,r)
#r2 = _correct_vertex(sig,searcher.tree.extended_xs,searcher,_r)
if vv>searcher.variance_tol && vv<searcher.break_tol
r = _correct_vertex(sig,searcher.tree.extended_xs,searcher,r)
vv = vertex_variance(sig,r,searcher.tree.extended_xs,dim,searcher.ddd)
searcher.rare_events[SRI_vertex_tolerance_breach] += 1
end
if vv>searcher.break_tol
searcher.rare_events[SRI_vertex_irreparable] += 1
return r, false, vv
else
if vv>searcher.variance_tol
searcher.rare_events[SRI_vertex_suboptimal_correction] += 1
end
return adjust_boundary_vertex(r, searcher.domain, sig, searcher.lmesh, length(sig), searcher.b_nodes_tol), true, vv
end
end
####################################################################################################################################
## Check precision of calculated vertex and correct if precision is to low
####################################################################################################################################
function _correct_vertex(sig,xs,searcher,x)
dim=length(xs[1])
#print(typeof(x),"->")
diff=sum(abs2,xs[sig[dim+1]])
searcher.rhs_cg.*=0
searcher.vectors.*=0
searcher.rhs .= x
for i in 1:dim
searcher.vectors[:,i]=xs[sig[i]] - xs[sig[dim+1]]
searcher.rhs_cg.+=(0.5*(sum(abs2,xs[sig[i]])-diff)).*searcher.vectors[:,i]
end
searcher.symmetric.*=0
for i in 1:dim
for j in i:dim
for k in 1:dim
searcher.symmetric[i,j]+=searcher.vectors[i,k]*searcher.vectors[j,k]
end
end
end
for i in 2:dim
for j in 1:(i-1)
searcher.symmetric[i,j]=searcher.symmetric[j,i]
end
end
solution1 = 0*x
cg!(searcher.rhs,searcher.symmetric,searcher.rhs_cg,log=false)
solution2 = typeof(solution1)( searcher.rhs)
#println(typeof(solution2))
return solution2
end
function vertex_variance(sig,r,xs::Points,dimension=length(xs[1]),distances=zeros(Float64,dimension+1))
for kk in 1:(dimension+1)
#sig[kk]== 0 && error("$sig")
distances[kk]=sum(abs2,xs[sig[kk]]-r)
# print("s: $(sig[kk]), dist: $(distances[kk])")
end
d=(-1)*sum(view(distances,1:(dimension+1)))/(dimension+1)
distances.+=d
return sum(abs2,view(distances,1:(dimension+1)))/(d^2)
end
function vertex_variance(sig,r,searcher)
lsig = length(sig)
if sig[lsig]>searcher.lmesh
i = lsig
while sig[i]>searcher.lmesh
i-=1
i==0 && println("i=0 darf eigentlich nicht sein")
end
activate_cell( searcher, sig[1], view(sig,(i+1):lsig) )
end
return vertex_variance(sig,r,searcher.tree.extended_xs,lsig-1,view(searcher.ts,1:lsig))
end
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
## Handlign MIRROR NODES
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
@Base.propagate_inbounds function activate_cell(searcher,_Cell,neigh)
lxs=searcher.tree.size
ln = length(neigh)
i = ln
while i>0
n = neigh[i]
i -= 1
n<=lxs && break
activate_mirror(searcher,_Cell,n-lxs)
end
end
@inline activate_cell(::Nothing,_,_) = nothing
@Base.propagate_inbounds function activate_mirror(searcher,i,plane)
if searcher.tree.active[plane]
return false
end
searcher.tree.active[plane]=true
searcher.tree.extended_xs[searcher.tree.size+plane]=reflect(searcher.tree.extended_xs[i],searcher.domain,plane)
return true
# println("node $i : activate $plane <-> $(searcher.tree.size+plane) ; $(searcher.tree.extended_xs[i]) <-> $(searcher.tree.extended_xs[searcher.tree.size+plane])")
end
########################################################################################################################################
## raycast-method
########################################################################################################################################
function myskips(xs::Points,i::Int64,c::Float64,u::Point,_Cell::Int64,sig::Sigma)#,ts,r)
#b = dot(xs[i], u) <= c
#= if !b
new_t = get_t(r,u,xs[_Cell],xs[i])
if new_t<ts[1]
ts[1] = new_t
else
b=true
end
end=#
return dot(xs[i], u) <= c
end
function get_t(r,u,x0,x_new)
return (sum(abs2, r - x_new) - sum(abs2, r - x0)) / (2 * u' * (x_new-x0))
end
struct Raycast_By_Descend
end
struct Raycast_By_Walkray
end
function correct_cast(r,r2,u,edge,generator,origin,searcher,cast_type::Raycast_By_Descend)
return r2
end
function correct_cast(r,r2,u,edge,generator,origin,searcher,cast_type::Raycast_By_Walkray)
r3,success,vv = walkray_correct_vertex(r2, origin, searcher, edge, generator)
!success && (r2=r)
return r3
end
#=function verify_edge(sig,r,u,edge,searcher,origin,xs)
ortho = sum(s->abs(dot(u,xs[s]-xs[edge[1]])),sig) # should be almost zero
check = sum(s->max(0.0,dot(u,xs[s]-xs[edge[1]])),origin) # should be zero
check2 = sum(s->dot(u,xs[s]-xs[edge[1]])<-1E-10 ? 1 : 0 , origin)+length(sig)-length(origin) # should be zero
b=true
if ortho>1E-10 || abs(check)>1E-10 || check2!=0
b=false
println("Broken edge of vertex: $origin, $edge, $sig")
println("$ortho, $check, $check2 ")
for s in eachindex(origin)
println(xs[s]-r)
end
end
return b
end
function display_ortho_process(sig,r,xs,searcher)
end
=#
function verify_vertex(sig,r,xs,searcher,output=StaticBool{false})
idx = sort!(_inrange(searcher.tree,r,norm(r-xs[sig[1]])*(1+1E-8)))
b = true
for i in eachindex(sig)
b &= sig[i] in idx
end
for i in eachindex(idx)
b &= idx[i] in sig
end
output==true && !b && println(" $sig and $idx not identical in list with $(length(xs)) entries!")
b &= vertex_variance(sig,r,xs,length(sig)-1)<1E-20
output==true && !b && println(" var_sig = $(vertex_variance(sig,r,xs,length(sig)-1)), var_idx = $(vertex_variance(idx,r,xs,length(idx)-1))")
dim = length(xs[1])
AA = zeros(Float64,length(sig),dim)
for i in eachindex(sig)
AA[i,:] .= xs[sig[i]]
end
Q,R = qr(AA)
b&=abs(R[end,end])>1E-8
#=if output==true && !b
println(my_dim,base)
end=#
#output==true && !b && println("orthogonality & dimensionality: $u")
return b
end
function raycast_des2(sig::Sigma, old_r, u, xs, searcher::RaycastIncircleSkip, old ,edge,origin,cast_type,::Raycast_Combined)
data = searcher.tree.tree.data
plane_tolerance = searcher.plane_tolerance
x0 = xs[edge[1]]
old_r_ = old_r + u * dot(u , (x0-old_r))
r = old_r_ + u * dot(u , (x0-old_r_))
searcher.rare_events[SRI_raycast] += 1
reset!(data,origin,r,x0,u,plane_tolerance,xs)
search_vertex2(searcher.tree,data.r,data.bestnode,data.bestdist)
generator = data.bestnode[1]
t = generator!=0 ? 1.0 : Inf64 #get_t(r,u,x0,xs[generator]) : Inf64
generator == 0 && (return generator, t, old_r)
ll = length(sig)+length(data.sigma)
unique!(sort!(append!(sig,data.sigma)))
#if ll>length(sig)
# error("")
#end
new_r = data.new_r
r2 = correct_cast(r,new_r,u,edge,generator,origin,searcher,cast_type)
return generator, t, r2
end
function raycast_des2(sig::Sigma, r, u, xs, searcher::RaycastIncircleSkip, old ,edge,origin,cast_type,::Raycast_Non_General)
max_int = typemax(Int64)
x0 = xs[edge[1]]
c1 = maximum(dot(xs[g], u) for g in sig)
c2 = abs(c1)
c = c1 + c2*searcher.plane_tolerance
skip = _i->myskips(xs,_i,c,u,edge[1],sig)
vvv = r + u * dot(u , (x0-r))
i, t = _nn(searcher.tree, vvv, skip)
t == Inf && return 0, Inf, r
x = xs[i]
t = get_t(r,u,x0,x) #(sum(abs2, r - x) - sum(abs2, r - x0)) / (2 * u' * (x-x0))
vvv = r+t*u
i, t = _nn(searcher.tree, vvv, skip)
if i!=0
x = xs[i]
t = get_t(r,u,x0,x) #(sum(abs2, r - x) - sum(abs2, r - x0)) / (2 * u' * (x-x0))
end
_r = r+t*u
measure = maximum(norm(xs[s]-_r) for s in sig)
old_measure = measure
upper_t = t+2*measure
idss = _inrange(searcher.tree,_r,(1+searcher.b_nodes_tol*100)*measure)
lidss = length(idss)
# println("1: ",map(k->k<max_int ? k : 0,idss))
lidss==0 && (return 0, Inf64, r)
ts = view(searcher.ts,1:lidss)
map!(i-> i∈origin ? 0.0 : get_t(r,u,x0,xs[i]) ,ts, idss)
k=0
while k<lidss
k += 1
if ts[k]<searcher.plane_tolerance
idss[k]=max_int
ts[k] = 0.0
elseif ts[k]<upper_t
upper_t = ts[k]
end
end
max_dist = 0.0
generator = 0
upper_t += 10E-8#searcher.plane_tolerance
k=0
while k<lidss
k += 1
if ts[k]>upper_t
ts[k] = 0.0
elseif idss[k]<max_int
ts[k] = dot(u,xs[idss[k]]-x0)
if ts[k]>max_dist
max_dist = ts[k]
generator = idss[k]
end
end
end
#println("3: ",map(k->k<max_int ? k : 0,idss))
#end
generator==0 && (return 0, Inf64, r)
# println(" -> -> -> -> -> ",dot(u,xs[generator]-xs[edge[1]]))
t = get_t(r,u,x0,xs[generator])# (sum(abs2, r - xs[generator]) - sum(abs2, r - x0)) / (2 * u' * (xs[generator]-x0))
#println(generator,", ",r2)
r2 = correct_cast(r,r+t*u,u,edge,generator,origin,searcher,cast_type)
#if typeof(cast_type)==Raycast_By_Walkray
measure2 = maximum(norm(xs[s]-r2) for s in sig)
measure2 = max(measure2, norm(xs[generator]-r2)) * (1+0.1*searcher.b_nodes_tol)
k=0
while k<lidss
k += 1
if idss[k]<max_int && norm(xs[idss[k]]-r2)>measure2
idss[k] = max_int
end
end
append!(sig,filter!(i->i<max_int,idss))
sort!(sig)
return generator, t, r2
end
function raycast_des2(sig::Sigma, r, u, xs, searcher::RaycastIncircleSkip, old ,edge,origin,cast_type,::Raycast_Original)
x0 = xs[edge[1]]
c1 = maximum(dot(xs[g], u) for g in sig)
c2 = abs(c1)
c = c1 + c2*searcher.plane_tolerance
skip = _i->myskips(xs,_i,c,u,edge[1],sig)
vvv = r + u * dot(u , (x0-r))
i, t = _nn(searcher.tree, vvv, skip)
t == Inf && return 0, Inf, r
i2 = i
while i!=0
x = xs[i]
t = get_t(r,u,x0,x) #(sum(abs2, r - x) - sum(abs2, r - x0)) / (2 * u' * (x-x0))
vvv = r+t*u
i, t = _nn(searcher.tree, vvv)
(i==i2 || (i in sig)) && (i=0)
i!=0 && (i2 = i)
end
x2 = xs[i2]
t = get_t(r,u,x0,x2) #(sum(abs2, r - x) - sum(abs2, r - x0)) / (2 * u' * (x-x0))
_r = r+t*u
append!(sig,i2)
sort!(sig)
return i2, t, _r
end
@inline raycast_des(sig, r, u, xs, searcher, old ,edge,origin,cast_type) = raycast_des(sig, r, u, xs, searcher, old ,edge,origin,cast_type,searcher.parameters.method)
@inline raycast_des(sig, r, u, xs, searcher, old ,edge,origin,cast_type,method) = raycast_des2(sig, r, u, xs, searcher, old ,edge,origin,cast_type,method)
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
## Raycast several nodes case
########################################################################################################################################
########################################################################################################################################
########################################################################################################################################
#=function add_multi_vert_inds(r,sig,idx,searcher,measure,vv)
xs = searcher.tree.extended_xs
buffer = searcher.visited
count = 1
#searcher.rare_events[9]+=1
for i in idx
i in sig && continue
if (abs2(sum(abs2,r-xs[i])-measure^2)<vv)
buffer[count] = i
count += 1
end
end
if count>1
newidx = view(buffer,1:(count-1))
sort!(append!(sig,newidx))
searcher.rare_events[SRI_irregular_node_calculated] += length(sig)>searcher.dimension+1
length(sig)>searcher.dimension+1 && println(sig)
end
end
=#
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 10257 | #helping data structure:
struct IncrementalInt64Vector <: AbstractVector{Int64}
data::Vector{Int64}
positions::MVector{2, Int64}
IncrementalInt64Vector(i::Int64) = new(Vector{Int64}(undef, i), MVector{2, Int64}((0, i)))
end
Base.size(dw::IncrementalInt64Vector) = (dw.positions[1],)
@inline Base.getindex(dw::IncrementalInt64Vector, i::Int64) = dw.data[i]
@inline Base.setindex!(dw::IncrementalInt64Vector, value::Int64, i::Int64) = (dw.data[i] = value)
# Definition der push! Methode
function Base.push!(dw::IncrementalInt64Vector, value::Int64)
dw.positions[1] += 1
if dw.positions[1] > dw.positions[2]
dw.positions[2] *= 2
resize!(dw.data, dw.positions[2])
end
dw.data[dw.positions[1]] = value
return dw
end
reset!(dw::IncrementalInt64Vector) = (dw.positions[1]=0)
function set!(dw::IncrementalInt64Vector,v::AVI) where {AVI<:AbstractVector{Int64}}
l = length(v)
if l > dw.positions[2]
dw.positions[2] = l
resize!(dw.data, l)
end
dw.positions[1] = l
@inbounds dw.data[1:l] .= v
end
abstract type HVUnstructuredTree end
abstract type AbstractTree{P <: Point} end # Abstract type HVTree
# Abstract function nodes for HVTree
nodes(tree::AbstractTree) = error("method not implemented")
# Descendants of HVUnstructuredTree
struct HVKDTree <: HVUnstructuredTree end
struct HVHVKDTree <: HVUnstructuredTree end
struct HVBallTree <: HVUnstructuredTree end
struct HVBruteTree <: HVUnstructuredTree end
const VI_KD = HVKDTree()
const VI_BALL = HVBallTree()
const VI_BRUTE = HVBruteTree()
mutable struct NNSearchData{T,T2}
sigma::IncrementalInt64Vector # current container of all generators
t::Float64 # current best guess for t
bestdist::MVector{1,Float64} # current best distance + minor error to be shown to tree algorithm
bestnode::MVector{1,Int64} # current best node shown to tree algorithm
#taboo::Vector{Int64} # nodes that are to be skipped (i.e. all members of edge)
taboo::IncrementalInt64Vector # nodes that are to be skipped (i.e. all members of edge)
taboo_visited::Vector{Bool} # the ones that were allready visited
c::Float64 # c-offset for comparison
main_c::Float64 # x_0 ⋅ u
current_c::Float64 # current c of current best node
u::T # edge-vector
lt::Int64 # length of taboo vector
visited::Int64 # number of visited elements of taboo
dist_r_x0_2::Float64 # (r-x_0)²
dist_new_r_x0_2::Float64 # (new_r-x_0)²
plane_tolerance::Float64 # tolerance in c
r::T # initial vertex candidate, reference for all nn searches in nn algorithm
x0::T # x0 in classical raycast algorithm
new_r::T # current vertex candidate
point::T2
mins::T2
maxs::T2
lmesh::Int64
no_box_tolerance::Float64
visited_leafs::BitVector
function NNSearchData(p::P,nodes::Int,lmesh) where P
len_p = 2^length(p)
point = MVector(0*p)
#new{P,typeof(point)}(IncrementalInt64Vector(len_p), 0.0, MVector{1, Float64}([0.0]), MVector{1, Int64}([0]),Int64[],Bool[],0.0,0.0,0.0,0*p,0,0,0.0,0.0,0.0, 0*p,0*p,0*p,point,MVector(0*p),MVector(0*p),lmesh,0.0,falses(2))
new{P,typeof(point)}(IncrementalInt64Vector(len_p), 0.0, MVector{1, Float64}([0.0]), MVector{1, Int64}([0]),IncrementalInt64Vector(length(p)+1),Bool[],0.0,0.0,0.0,0*p,0,0,0.0,0.0,0.0, 0*p,0*p,0*p,point,MVector(0*p),MVector(0*p),lmesh,0.0,falses(2))
end
end
function reset!(nn_data::NNSD,taboo,r,x0,u,plane_tolerance,xs) where {T,T2,NNSD<:NNSearchData{T,T2}}
reset!(nn_data.sigma)
nn_data.t = 0#typemax(Float64)
nn_data.bestdist[1] = typemax(Float64)
nn_data.bestnode[1] = 0
#resize!(nn_data.taboo,length(taboo)) #length(taboo)>length(data.)
#nn_data.taboo .= taboo
#nn_data.taboo = taboo
set!(nn_data.taboo, taboo)
nn_data.u = u
nn_data.r = r
nn_data.new_r = r
nn_data.x0 = x0
nn_data.lt = length(taboo)
nn_data.visited = 0
nn_data.lt>length(nn_data.taboo_visited) && resize!(nn_data.taboo_visited,nn_data.lt)
nn_data.taboo_visited .= false
nn_data.plane_tolerance = plane_tolerance
c1 = maximum(dot(xs[g], u) for g in taboo)
c2 = abs(c1)
nn_data.c = c1 + c2 * plane_tolerance
nn_data.main_c = dot(x0,u)
nn_data.current_c = nn_data.main_c
nn_data.dist_r_x0_2 = sum(abs2, r - x0)
nn_data.dist_new_r_x0_2 = typemax(Float64)
nn_data.point .= nn_data.r
fill!(nn_data.visited_leafs,false)
end
@inline function check_point_in_box(point, dmins, dmaxs, d2)
if all(point .>= dmins) && all(point .<= dmaxs)
return true
else
distance = 0.0
@inbounds for i in eachindex(point)
distance += max(dmins[i] - point[i], 0, point[i] - dmaxs[i])^2
end
return distance < d2
end
end
function skip_nodes_on_search(data::NNSearchData{T},x_new,i,dist,boundarymode::S) where {T,S<:StaticBool}
if data.visited<data.lt
id = findfirstassured_sorted(i,data.taboo)
if id>0
data.visited += 1
return true
end
end
# check distance to current candidate
new_dist = dist
_dnrx02 = data.dist_new_r_x0_2
correction = _dnrx02 * 10 * data.plane_tolerance
if new_dist > _dnrx02 + correction # by no means a better candidate
return true
end
# x_new = xs[i]
r = getfield(data,:r)
u = getfield(data,:u)
x0= getfield(data,:x0)
c_new = dot(x_new,u)
c_new<=data.c && (return true) # original raycast exclusion principle
δx = x_new-x0
(typeof(x_new)!=typeof(x0)) && println(typeof(x_new),typeof(x0))
abs_δx = dot(δx,δx)
if abs(new_dist - _dnrx02) < correction # as good as current candidate => degenerate vertex?
abs_δx/new_dist<100*correction && (return true)
push!(data.sigma,i)
if c_new>data.current_c
data.bestnode[1] = eltype(data.bestnode)(i)
data.current_c = c_new
end
return true
end
# if we reach this point, we have a better candidate since new_dist < data.dist_new_r_x0_2 - correction
# short version of get_t:
new_t = (sum(abs2, r - x_new) - data.dist_r_x0_2) / (2 * (c_new-data.main_c))
abs_δx/(new_t^2)<data.plane_tolerance && (return true)
new_r = r + new_t*u
# second order correction for t
t_order_2 = get_t(new_r,u,x0,x_new)
new_r += t_order_2*u
#data.t = new_t+t_order_2
data.new_r = new_r
# set new values for radii
dnrx02 = norm(new_r-x0)
data.dist_new_r_x0_2 = dnrx02^2
newtry = data.bestdist[1]==typemax(Int64)
data.bestdist[1] = (norm(r-new_r)+dnrx02)^2*(1+10000*data.plane_tolerance)
data.bestnode[1] = i
data.current_c = c_new
reset!(data.sigma) # delete all old candidates
push!(data.sigma,i)
if boundarymode==false
if newtry && !check_point_in_box(new_r, data.mins, data.maxs, data.no_box_tolerance)
#data.point = new_r
#reset!(data,data.taboo,new_r,x0,u,data.plane_tolerance,xs)
data.r = new_r
data.dist_r_x0_2 = sum(abs2, new_r - x0)
error("")
end
end
return true
end
# Modified UnstructuredTree
struct UnstructuredTree{P <: Point,T,NNSD<:NNSearchData{P}} <: AbstractTree{P} # Making UnstructuredTree a subtype of HVTree
tree::T # tree.data refers to nodes
data::NNSD
#=function UnstructuredTree(old::UnstructuredTree{P ,T,NNSD}, xs::AbstractVector{P}) where {P <: Point,T,NNSD<:NNSearchData{P}} # Constraining xs to AbstractVector{P <: Point}
xs = old.tree.data
_tree = old.tree
sd = NNSearchData(xs[1],length(_tree.nodes),length(xs))
sd.mins .= _tree.hyper_rec.mins
sd.maxs .= _tree.hyper_rec.maxes
differences = sd.maxs .- sd.mins
min_diff = minimum(differences)
sd.no_box_tolerance = min_diff^2
new{P,T,NNSD}(_tree,sd) # Passing P as an argument to new
end=#
function UnstructuredTree(t::HVUnstructuredTree, xs::AbstractVector{P}) where {P} # Constraining xs to AbstractVector{P <: Point}
_tree = getUnstructuredTree(t, xs)
sd = NNSearchData(xs[1],length(_tree.nodes),length(xs))
sd.mins .= _tree.hyper_rec.mins
sd.maxs .= _tree.hyper_rec.maxes
differences = sd.maxs .- sd.mins
min_diff = minimum(differences)
sd.no_box_tolerance = min_diff^2
new{P,typeof(_tree),typeof(sd)}(_tree,sd) # Passing P as an argument to new
end
end
# Placeholder implementations for getUnstructuredTree
getUnstructuredTree(::HVKDTree, xs) = HVNearestNeighbors.HVKDTree(xs,storedata=true)
#getUnstructuredTree(::HVKDTree, xs) = KDTree(xs,storedata=true)
getUnstructuredTree(::HVBallTree, xs) = BallTree(xs,storedata=true)
getUnstructuredTree(::HVBruteTree, xs) = BruteTree(xs,storedata=true)
# Implement HVTree for HVUnstructuredTree types
HVTree(xs,type::HVUnstructuredTree) = UnstructuredTree(type, xs)
HVTree(xs,type) = UnstructuredTree(HVKDTree(), xs)
# Implement nodes function for UnstructuredTree
@inline nodes(tree::UnstructuredTree) = tree.tree.data
@inline function nn(tree::UnstructuredTree,x,skip=(y->false))
idx , dists=HVNearestNeighbors.knn(tree.tree,x,1,false,skip)
b=length(idx)>0
return b ? (idx[1], dists[1]) : (0,Inf64)
end
@inline knn(tree::UnstructuredTree,x,i,b,skip=(y->false)) = NearestNeighbors.knn(tree.tree,x,i,b,skip)
@inline inrange(tree::UnstructuredTree,x,r) = HVNearestNeighbors.inrange(tree.tree,x,r)
@inline _knn(tree::NearestNeighbors.KDTree, point, idx, dist, skip,bv) = NearestNeighbors._knn(tree, point, idx, dist, skip)
@inline _knn(tree::hVK, point, idx, dist, skip::F,bv) where {hVK<:HVNearestNeighbors.HVKDTree,F<:Function} = HVNearestNeighbors._knn_flex(tree, point, idx, dist, skip,bv)
function search_vertex(tree::UnstructuredTree, point::AbstractVector{T}, idx,dist,data) where {T <: Number}#<:Function}
_knn(tree.tree, point, idx, dist, x->false, data) # sortres=false
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 9835 |
struct Serial_Domain{P<:Point,M<:AbstractMesh{P}, AM<:AbstractMesh{P}, SI<:HVIntegral{P}, TT<:HVIntegral{P}} <: AbstractDomain{P}
boundary::Boundary
shifts::Vector{Vector{Float64}}
references::SerialVector_Vector{Int64}
reference_shifts::SerialVector_Vector{BitVector}
internal_boundary::Boundary
_mesh::M
_internal_mesh::AM
_integral::SI
_internal_integral::TT
reflections::SerialVector_Vector{BitVector}
_lref::MVector{1,Int64}
end
struct Serial_Domain_Store_1{P<:Point,M<:AbstractMesh{P}, AM<:AbstractMesh{P}, SI<:HVIntegral{P}, TT<:HVIntegral{P}}
boundary::Boundary
shifts::Vector{Vector{Float64}}
references::SerialVector_Vector{Int64}
reference_shifts::SerialVector_Vector{BitVector}
internal_boundary::Boundary
#_mesh
_internal_mesh::AM
#_integral
_internal_integral
reflections::SerialVector_Vector{BitVector}
_lref::MVector{1,Int64}
end
JLD2.writeas(::Type{Serial_Domain{P,M, AM, SI, TT}}) where {P,M, AM, SI, TT} = Serial_Domain_Store_1{P,M, AM, SI, TT}
JLD2.wconvert(::Type{Serial_Domain_Store_1{P,M, AM, SI, TT}},domain::Serial_Domain{P,M, AM, SI, TT} ) where {P,M, AM, SI, TT} =
Serial_Domain_Store_1{P,M, AM, SI, TT}(
domain.boundary,
domain.shifts,
domain.references,
domain.reference_shifts,
domain.internal_boundary,
domain._internal_mesh,
pack_integral(domain._internal_integral.integral), # Initializing this field as per your requirement
domain.reflections,domain._lref
)
function JLD2.rconvert(::Type{Serial_Domain{P,M, AM, SI, TT}},store::Serial_Domain_Store_1{P,M, AM, SI, TT}) where {P,M, AM, SI, TT}
integral = unpack_integral(store._internal_integral,store._internal_mesh.data)
m1,m2,i1,i2 = Serial_Domain_Data(store._internal_mesh.data,integral)
sd = Serial_Domain{P,M, AM, SI, TT}(
store.boundary,
store.shifts,
store.references,
store.reference_shifts,
store.internal_boundary,
m2,m1,i2,i1,
store.reflections,store._lref
)
standardize(sd)
return sd
end
function Serial_Domain_Data(mesh::SerialMeshVector,inte = SerialIntegral(mesh.meshes[1].mesh,true, mesh.dimensions[1], mesh))
public_mesh = standardize_mesh(mesh)
public_integral = standardize_integral(inte,mesh)
return ExplicitMeshContainer(mesh), ExplicitMeshContainer(public_mesh), ExplicitIntegralContainer(inte), ExplicitIntegralContainer(public_integral)
end
function Serial_Domain_Data(mesh::SerialMeshTuple,inte = SerialIntegral(mesh.meshes[1].mesh,true, mesh.dimensions[1], mesh))
public_mesh = standardize_mesh(mesh)
public_integral = standardize_integral(inte,mesh)
return MeshContainer(mesh), MeshContainer(public_mesh), IntegralContainer(inte), IntegralContainer(public_integral)
end
function Serial_Domain(mesh,_boundary::Boundary, _shifts::Vector{Vector{Float64}}, _reference_shifts::Vector{BitVector}, _references::Vector{Int64},internal=boundary)
lref = MVector{1,Int64}([0])
#references = SerialVector_Vector(collect(1:10),mesh.dimensions[1])
references = SerialVector_Vector(_references,mesh.dimensions[1])
reference_shifts = SerialVector_Vector(_reference_shifts,mesh.dimensions[1])
reflections = SerialVector_Vector{BitVector}([falses(length(_boundary)) for i in 1:length(mesh)],mesh.dimensions[1])
#println("test: ",view(references,1:10))
m1,m2,i1,i2 = Serial_Domain_Data(mesh)
return Serial_Domain(_boundary,_shifts,references,reference_shifts,internal,m2,m1,i2,i1,reflections,lref)
end
function Serial_Domain(mesh,boundary)
shifts = periodic_shifts(boundary, size(eltype(nodes(mesh)))[1] )
return Serial_Domain(mesh,boundary,shifts,Vector{BitVector}(),Vector{Int64}(),copy(boundary))
end
@inline Base.getproperty(cd::Serial_Domain, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::Serial_Domain, ::Val{:mesh}) = :(getfield(cd,:_mesh).data)
@inline @generated dyncast_get(cd::Serial_Domain, ::Val{:internal_mesh}) = :(getfield(cd,:_internal_mesh).data)
@inline @generated dyncast_get(cd::Serial_Domain, ::Val{:integral}) = :(getfield(cd,:_integral).integral)
@inline @generated dyncast_get(cd::Serial_Domain, ::Val{:internal_integral}) = :(getfield(cd,:_internal_integral).integral)
@inline @generated dyncast_get(cd::Serial_Domain, ::Val{:lref}) = :(getfield(cd,:_lref)[1])
@inline @generated dyncast_get(cd::Serial_Domain, d::Val{S}) where S = :( getfield(cd, S))
@inline Base.setproperty!(cd::Serial_Domain, prop::Symbol, val) = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::Serial_Domain, ::Val{:mesh},val) = :(getfield(cd,:_mesh).data=val)
@inline @generated dyncast_set(cd::Serial_Domain, ::Val{:internal_mesh},val) = :(getfield(cd,:_internal_mesh).data=val)
@inline @generated dyncast_set(cd::Serial_Domain, ::Val{:lref},val) = :(getfield(cd,:_lref)[1]=val)
@inline @generated dyncast_set(cd::Serial_Domain, ::Val{:integral},val) = :(getfield(cd,:_integral).integral=val)
@inline @generated dyncast_set(cd::Serial_Domain, ::Val{:internal_integral},val) = :(getfield(cd,:_internal_integral).integral=val)
@inline internaly_precise(sd::Serial_Domain) = true
@inline Domain(mesh::SerialMesh,boundary::Boundary) = Serial_Domain(mesh,boundary)
@inline boundary(d::Serial_Domain) = d.boundary
@inline mesh(d::SD) where SD<:Serial_Domain = d.mesh
@inline shifts(d::Serial_Domain) = d.shifts
@inline references(d::Serial_Domain) = HVViewVector(IntMeshViewOnVector(d.references,d.mesh),1,d.lref)
@inline reference_shifts(d::Serial_Domain) = HVViewVector(MeshViewOnVector(d.reference_shifts,d.mesh),1,d.lref)
@inline reflections(d::Serial_Domain) = HVViewVector(MeshViewOnVector(d.reflections,d.mesh),(1+d.lref),length(d.mesh))
@inline expand_internal_boundary(domain::Serial_Domain,new_xs) = extend_periodic_part(domain.internal_boundary,new_xs) # shifts the periodic part of the boundary such that new_xs lies completely inside the
# the newly constructed domain
@inline internal_boundary(d::Serial_Domain) = d.internal_boundary
@inline integral(d::Serial_Domain) = d.integral
@inline function set_internal_boundary(d::Serial_Domain,b::Boundary)
empty!(d.internal_boundary.planes)
append!(d.internal_boundary.planes,b.planes)
end
@inline function integrate_view(vd::Serial_Domain)
sv = CombinedView(SwitchView(length(references(vd))+1,length(vd.internal_mesh)),SortedView(vd.internal_mesh.meshes))
return (mesh = MeshView(vd.internal_mesh,sv), integral = IntegralView(vd.internal_integral,sv))
end
@inline standardize_mesh(i_mesh::SM) where {SM<:SerialMesh} = MeshView(i_mesh,SortedView(i_mesh.meshes))
@inline standardize_integral(integral::SI,i_mesh) where {SI<:SerialIntegral} = IntegralView(integral,SortedView(i_mesh.meshes))
@inline function standardize(domain::Serial_Domain)
i_mesh = domain.internal_mesh
domain.mesh = standardize_mesh(i_mesh)
domain.integral = standardize_integral(domain.internal_integral,i_mesh)
end
function prepend!(domain::SD,new_xs::ReflectedNodes;kwargs...) where SD<:Serial_Domain
lnxs = length(new_xs)
l1 = length(references(domain))
_internal_indeces(domain.mesh,new_xs.references) # transform `new_xs.references` to internal references first
domain.internal_mesh = append(domain.internal_mesh,new_xs.data,false)
append!(domain.references,new_xs.references,domain.internal_mesh.dimensions[end])
append!(domain.reference_shifts,new_xs.reference_shifts,domain.internal_mesh.dimensions[end])
append!(domain.reflections,BitVector[],domain.internal_mesh.dimensions[end])
i_mesh = domain.internal_mesh
domain.internal_integral = append(domain.internal_integral, i_mesh, false)
domain.lref = domain.lref + lnxs
standardize(domain)
#domain.mesh = MeshView(domain.internal_mesh,CombinedView(SwitchView(l1+1,l1+lnxs),SortedView(domain.internal_mesh.meshes)))
#sv2 = SortedView2(domain.internal_mesh.meshes)
#println(sv2.data)
#println(sv2 / collect(1:50))
end
function append!(domain::SD,new_xs::HV;kwargs...) where {SD<:Serial_Domain, HV<:HVNodes}
lnxs = length(new_xs)
l1 = length(mesh(domain))
lb = length(domain.boundary)
domain.internal_mesh = append(domain.internal_mesh,new_xs,true)
append!(domain.references,Int64[],domain.internal_mesh.dimensions[end])
append!(domain.reference_shifts,BitVector[],domain.internal_mesh.dimensions[end])
append!(domain.reflections,[falses(lb) for _ in 1:length(new_xs)],domain.internal_mesh.dimensions[end])
i_mesh = domain.internal_mesh
domain.internal_integral = append(domain.internal_integral,i_mesh,true)
standardize(domain)
m = MeshView(domain.internal_mesh,CombinedView(SwitchView(l1+1,l1+lnxs),SortedView(domain.internal_mesh.meshes)))
#i = IntegralView(domain.integral,CombinedView(SwitchView(l1+1,l1+lnxs),SortedView(domain.internal_mesh.meshes)))
return m#,i
end
function Base.copy(sd::SD;resize = 0, kwargs...) where SD<:Serial_Domain
newmesh = copy(sd.internal_mesh;kwargs...)
newintegral = copy(sd.internal_integral,newmesh;kwargs...)
if resize>0
resize_integrals(newintegral,resize)
end
public_mesh = standardize_mesh(newmesh)
public_integral = standardize_integral(newintegral,newmesh)
I = typeof(sd._integral)
II = typeof(sd._internal_integral)
M = typeof(sd._mesh)
IM = typeof(sd._internal_mesh)
return SD(copy(sd.boundary),deepcopy(sd.shifts),copy(sd.references),copy(sd.reference_shifts),copy(sd.internal_boundary),
M(public_mesh),IM(newmesh),I(public_integral),II(newintegral),copy(sd.reflections),sd._lref)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 16660 | ###########################################################################################################
## Copy data for SerialIntegrals...
###########################################################################################################
mutable struct ___SI_copy_data{I<:HVIntegral,M<:AbstractMesh,B}
integral::I
visible::B
data::CompoundData
mesh::M
end
###########################################################################################################
## Compound Integrals...
###########################################################################################################
# Modified CompoundIntegral struct
struct CompoundIntegral{P <: Point, T<: HVIntegral{P}, V <: Union{StaticTrue,StaticFalse,Bool}} <: HVIntegral{P}
integral::T
visible::V
data::CompoundData
# Modified constructor for CompoundIntegral
function CompoundIntegral(integral::HVIntegral{P}, visible=true, s=1, _s=1) where {P}
lm = length(integral)
new{P,typeof(integral), typeof(visible)}(integral, visible, CompoundData(s, _s, lm, lm))
end
CompoundIntegral(integral::HVI, visible::V, data::CompoundData) where {P,V, HVI<:HVIntegral{P}} = new{P,HVI, V}(integral, visible, data)
end
#=@inline Base.length(m::CompoundIntegral) = length(mesh(m.integral))
@inline nodes(m::CompoundIntegral) = nodes(mesh(m.integral))
@inline internal_index(m::CM,index::Int64) where CM<:CompoundIntegral = internal_index(mesh(m.integral),index)
@inline external_index(m::CM,index::Int64) where CM<:CompoundIntegral = external_index(mesh(m.integral),index)
@inline external_index(m::CM,inds::AVI) where {CM<:CompoundIntegral,AVI<:AbstractVector{Int64}} = external_index(mesh(m.integral),inds)
@inline internal_index(m::CM,inds::AVI) where {CM<:CompoundIntegral,AVI<:AbstractVector{Int64}} = internal_index(mesh(m.integral),inds)
@inline internal_sig(m::CM,sig::AVI,static::StaticTrue) where {CM<:CompoundIntegral,AVI<:AbstractVector{Int64}} = internal_sig(mesh(m.integral),sig,static)
@inline internal_sig(m::CM,sig::AVI,static::StaticFalse) where {CM<:CompoundIntegral,AVI<:AbstractVector{Int64}} = internal_sig(mesh(m.integral),sig,static)
=#
###########################################################################################################
## Serial Meshes...
###########################################################################################################
# Define the SerialIntegral struct CompoundData
struct SerialIntegral{P <: Point, AM <: SerialMesh{P}, T , D , PARAMS,SVN,SVVol,SVar,SVBI,SVII} <: HVIntegral{P}
mesh::AM
integrals::T
dimensions::D
data::MVector{1,Int64}
parameters::PARAMS
neighbors::SVN
volumes::SVVol
area::SVar
interface_integral::SVII
bulk_integral::SVBI
buffer_sig::Vector{Int64}
enable::MVector{3,Bool}
end
mutable struct Store_compound_integral
integral
visible
data
mesh
end
struct SerialIntegral_Store_Container_1{P, AM, T , D , PARAMS,SVN,SVVol,SVar,SVBI,SVII}
#meshes
integrals
#dimensions::D
data::MVector{1,Int64}
parameters::PARAMS
#neighbors::SVN
#volumes::SVVol
#area::SVar
#interface_integral::SVII
#bulk_integral::SVBI
#buffer_sig::Vector{Int64}
enable::MVector{3,Bool}
end
SerialIntegral_Store_Container_1(si::SI) where {P, AM, T , D , PARAMS,SVN,SVVol,SVar,SVBI,SVII,SI<:SerialIntegral{P, AM, T , D , PARAMS,SVN,SVVol,SVar,SVBI,SVII}} =
SerialIntegral_Store_Container_1{P, AM, T , D , PARAMS,SVN,SVVol,SVar,SVBI,SVII}(map(x->Store_compound_integral(pack_integral(x.integral),x.visible,0,0),si.integrals),si.data,si.parameters,si.enable)
function SerialIntegral(si::SI,new_mesh) where {P, AM, T , D , PARAMS,SVN,SVVol,SVar,SVBI,SVII,SI<:SerialIntegral_Store_Container_1{P, AM, T , D , PARAMS,SVN,SVVol,SVar,SVBI,SVII}}
meshes = new_mesh.meshes
for k in 1:length(si.integrals)
si.integrals[k].data = meshes[k].data
si.integrals[k].mesh = meshes[k].mesh
end
integrals = map(i->CompoundIntegral(unpack_integral(i.integral,i.mesh),i.visible,i.data),si.integrals)
dimensions = map(x->x.data,si.integrals)
inte = integrals[1].integral
_neighbors = SerialVector_Vector(inte.neighbors, dimensions[1])
_volumes = SerialVector_Vector(inte.volumes, dimensions[1])
_area = SerialVector_Vector(inte.area, dimensions[1])
_bi = SerialVector_Vector(inte.bulk_integral, dimensions[1])
_ii = SerialVector_Vector(inte.interface_integral, dimensions[1])
for i in 2:length(integrals)
inte = integrals[i].integral
cdata = dimensions[i]
append!(_neighbors,inte.neighbors,cdata)
append!(_volumes,inte.volumes,cdata)
append!(_area,inte.area,cdata)
append!(_bi,inte.bulk_integral,cdata)
append!(_ii,inte.interface_integral,cdata)
end
return SerialIntegral(new_mesh,integrals,dimensions,si.data,si.parameters,_neighbors,_volumes,_area,
_ii,_bi,Int64[],si.enable)
end
pack_integral(I::SI) where SI<:SerialIntegral = SerialIntegral_Store_Container_1(I)
unpack_integral(I::SI,mesh) where SI<:SerialIntegral_Store_Container_1 = SerialIntegral(I,mesh)
function copy(si::SI,new_mesh=copy(si.mesh)) where {SI<:SerialIntegral}
data = map(Inte->___SI_copy_data(Inte.integral,Inte.visible,Inte.data,mesh(Inte.integral)),si.integrals)
for i in 1:length(si.integrals)
m = new_mesh.meshes[i].mesh
data[i].integral = copy(si.integrals[i].integral,m)
data[i].data = new_mesh.meshes[i].data
data[i].mesh = m
end
integrals = map(x->CompoundIntegral(x.integral,x.visible,x.data),data)
dimensions = map(x->x.data,data)
inte = integrals[1].integral
_neighbors = SerialVector_Vector(inte.neighbors, dimensions[1])
_volumes = SerialVector_Vector(inte.volumes, dimensions[1])
_area = SerialVector_Vector(inte.area, dimensions[1])
_bi = SerialVector_Vector(inte.bulk_integral, dimensions[1])
_ii = SerialVector_Vector(inte.interface_integral, dimensions[1])
for i in 2:length(integrals)
inte = integrals[i].integral
cdata = dimensions[i]
append!(_neighbors,inte.neighbors,cdata)
append!(_volumes,inte.volumes,cdata)
append!(_area,inte.area,cdata)
append!(_bi,inte.bulk_integral,cdata)
append!(_ii,inte.interface_integral,cdata)
end
return SerialIntegral(new_mesh,integrals,dimensions,copy(si.data),si.parameters,_neighbors,_volumes,_area,
_ii,_bi,copy(si.buffer_sig),copy(si.enable))
end
const SerialIntegralTuple{P <: Point, AM <: SerialMesh{P},PARAMS} = SerialIntegral{P, AM , T , D , PARAMS} where {T <: Tuple{Vararg{HVIntegral{P}}}, D <: Tuple{Vararg{CompoundData}}}
const SerialIntegralVector{P <: Point, AM <: SerialMesh{P},MType,PARAMS} = SerialIntegral{P, AM , Vector{MType} , Vector{CompoundData} , PARAMS} where {MType <: HVIntegral{P}}
@inline mesh(i::SerialIntegral) = i.mesh
SerialIntegral(m::VM, visible, newdimensions, meshes::SM;parameters = nothing) where {VM<:Voronoi_MESH, SM<:SerialMeshVector } = SerialIntegralVector(m, visible, newdimensions, meshes, parameters = parameters)
function SerialIntegralVector(m::VM, visible, newdimensions, meshes::SerialMesh;parameters = nothing) where {VM<:Voronoi_MESH }
inte = EmptyVoronoi_Integral(m,parameters=parameters)
newcompound = CompoundIntegral(inte, visible, newdimensions)
m2 = [newcompound,]
dims = [newdimensions,]
_neighbors = SerialVector_Vector(inte.neighbors, newdimensions)
_volumes = SerialVector_Vector(inte.volumes, newdimensions)
_area = SerialVector_Vector(inte.area, newdimensions)
_bi = SerialVector_Vector(inte.bulk_integral, newdimensions)
_ii = SerialVector_Vector(inte.interface_integral, newdimensions)
SerialIntegral(meshes,m2, dims, MVector{1,Int64}([length(m)]),parameters,_neighbors,_volumes,_area,_ii,_bi,Int64[],MVector{3,Bool}([false,false,false]))
end
## everything about SerialIntegralTuple => Needed for later
#=
SerialIntegral(m::VM, visible, newdimensions, meshes::SM;parameters = nothing) where {VM<:Voronoi_MESH, SM<:SerialMeshTuple } = SerialIntegralTuple(m, visible, newdimensions, meshes, parameters = parameters)
function SerialIntegralTuple(m::VM, visible, newdimensions, meshes::SerialMesh;parameters = nothing) where {VM<:Voronoi_MESH }
inte = EmptyVoronoi_Integral(m,parameters=parameters)
newcompound = CompoundIntegral(inte, visible, newdimensions)
m2 = (newcompound,)
dims = (newdimensions,)
_neighbors = SerialVector_Vector(inte.neighbors, newdimensions)
_volumes = SerialVector_Vector(inte.volumes, newdimensions)
_area = SerialVector_Vector(inte.area, newdimensions)
_bi = SerialVector_Vector(inte.bulk_integral, newdimensions)
_ii = SerialVector_Vector(inte.interface_integral, newdimensions)
SerialIntegral(meshes,m2, dims, MVector{1,Int64}([length(m)]),parameters,_neighbors,_volumes,_area,_ii,_bi,Int64[],MVector{3,Bool}([false,false,false]))
end
function SerialIntegralTuple(old_inte::SI, m::VM, visible, newdimensions, meshes::SerialMesh) where {SI<:SerialIntegralTuple,VM<:Voronoi_MESH }
parameters=old_inte.parameters
inte = EmptyVoronoi_Integral(m,parameters=parameters)
newcompound = CompoundIntegral(inte, visible, newdimensions)
m2 = (old_inte.integrals...,newcompound,)
dims = (old_inte.dimensions...,newdimensions,)
cdata = newdimensions
_neighbors = append(old_inte.neighbors,inte.neighbors,cdata)#SerialVector_Vector(inte.neighbors, newdimensions)
_volumes = append(old_inte.volumes,inte.volumes,cdata)
_area = append(old_inte.area,inte.area,cdata)
_bi = append(old_inte.bulk_integral,inte.bulk_integral,cdata)
_ii = append(old_inte.interface_integral,inte.interface_integral,cdata)
SerialIntegral(meshes,m2, dims, MVector{1,Int64}([length(m)]),parameters,_neighbors,_volumes,_area,_ii,_bi,Int64[],MVector{3,Bool}([false,false,false]))
end
@inline append(m::SerialIntegralTuple,i_mesh,visible=true) = SerialIntegralTuple(m, i_mesh.meshes[end].mesh, visible, i_mesh.dimensions[end], i_mesh)
=#
function append!(m::SM,n::MType,cdata::CompoundData,visible=true) where {SM<:SerialIntegralVector,MType<:AbstractMesh}#P <: Point, VDB <: VertexDB{P},MType<:HVIntegral{P,VDB},PARAMS,RT, SM<:SerialIntegralVector{P,VDB,MType,PARAMS,RT}}
inte = EmptyVoronoi_Integral(n,parameters=m.parameters)
newcompound = CompoundIntegral(inte, visible, cdata)
push!(m.integrals, newcompound)
push!(m.dimensions,newcompound.data)
append!(m.neighbors,inte.neighbors,cdata)
append!(m.volumes,inte.volumes,cdata)
append!(m.area,inte.area,cdata)
append!(m.bulk_integral,inte.bulk_integral,cdata)
append!(m.interface_integral,inte.interface_integral,cdata)
enable_block(m,length(m.integrals),false)
m.data[1] += length(n)
return m
end
@inline append(m::SerialIntegralVector,i_mesh,visible=true) = append!(m,i_mesh.meshes[end].mesh, i_mesh.dimensions[end],visible)
#@inline append(m::SerialIntegralTuple,d,visible=true) = SerialIntegralTuple(m,d,visible)
@inline add_virtual_points(I::SI,m::M) where {SI<:SerialIntegral,M<:AbstractMesh} = append(I,m,false)
@inline Base.getproperty(cd::SerialIntegral, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::SerialIntegral, ::Val{:length}) = :(getfield(cd,:data)[1])
@inline @generated dyncast_get(cd::SerialIntegral, ::Val{:enable_neighbors}) = :(getfield(cd,:enable)[1])
@inline @generated dyncast_get(cd::SerialIntegral, ::Val{:enable_volume}) = :(getfield(cd,:enable)[2])
@inline @generated dyncast_get(cd::SerialIntegral, ::Val{:enable_integral}) = :(getfield(cd,:enable)[3])
@inline @generated dyncast_get(cd::SerialIntegral, d::Val{S}) where S = :( getfield(cd, S))
@inline Base.setproperty!(cd::SerialIntegral, prop::Symbol, val) = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::SerialIntegral, ::Val{:length},val) = :(getfield(cd,:data)[1]=val)
@inline @generated dyncast_set(cd::SerialIntegral, ::Val{:enable_neighbors},val) = :(getfield(cd,:enable)[1]=val)
@inline @generated dyncast_set(cd::SerialIntegral, ::Val{:enable_volume},val) = :(getfield(cd,:enable)[2]=val)
@inline @generated dyncast_set(cd::SerialIntegral, ::Val{:enable_integral},val) = :(getfield(cd,:enable)[3]=val)
@inline @generated dyncast_set(cd::SerialIntegral, d::Val{S},val) where S = :( setfield(cd, S,val))
@inline length(m::SM) where SM<:SerialIntegral = m.length
@inline internal_length(m::SM) where SM<:SerialIntegral = sum(cd->cd._length,m.dimensions)
"""takes an internal index and returns the meshindex it belongs to"""
@inline meshindex_from_internal(m::SerialIntegral,index) = meshindex_from_internal(m.mesh,index)
"""takes an official index and returns the meshindex it belongs to"""
@inline meshindex_from_external(m::SerialIntegral,index) = meshindex_from_internal(m.mesh,index)
@inline internal_index(m::SM,index::Int64) where SM<:SerialIntegral = internal_index(m.mesh,index)
@inline external_index(m::SM,index::Int64) where SM<:SerialIntegral = external_index(m.mesh,index)
@inline external_index(m::SM,inds::AVI) where {SM<:SerialIntegral,AVI<:AbstractVector{Int64}} = _external_indeces(m.mesh,inds,m.buffer_sig)
@inline internal_index(m::SM,inds::AVI) where {SM<:SerialIntegral,AVI<:AbstractVector{Int64}} = _internal_indeces(m.mesh,inds,m.buffer_sig)
#=@inline function cleanupfilter!(m::S,i) where S<:SerialIntegral
found = meshindex_from_internal(m,i)
cleanupfilter!(m.meshes[found].mesh,i-m.meshes[found].data._start+1)
end
=#
@inline function enable_block(inte::III,i,enforced) where III<:SerialIntegral
!inte.integrals[i].visible && !enforced && return
inte.enable_neighbors && enable_neighbor_data(inte.integrals[i].integral)
inte.enable_volume && enable_geo_data(inte.integrals[i].integral)
inte.enable_integral && enable_integral_data(inte.integrals[i].integral)
end
@inline function enable(inte::III; neighbors=false,volume=false,integral=false,enforced=false) where III<:SerialIntegral
volume |= integral
neighbors |= volume
inte.enable_volume|=volume
inte.enable_neighbors|=neighbors
inte.enable_integral|=integral
for i in 1:length(inte.integrals)
enable_block(inte,i,enforced)
end
end
@inline enabled_volumes(Integral::SerialIntegral) = Integral.enable_volume
@inline enabled_area(Integral::SerialIntegral) = Integral.enable_volume
@inline enabled_bulk(Integral::SerialIntegral) = Integral.enable_integral
@inline enabled_interface(Integral::SerialIntegral) = Integral.enable_integral
@inline enabled_neighbors(Integral::SerialIntegral) = Integral.enable_neighbors
@inline function _has_cell_data(I::SerialIntegral,_Cell)
found = meshindex_from_internal(mesh(I),_Cell)
return _has_cell_data(I.integrals[found].integral,_Cell-I.integrals[found].data._start+1)
end
@inline function cell_data_writable(I::SerialIntegral,_Cell,vec,vecvec,::StaticFalse;get_integrals=statictrue)
found = meshindex_from_internal(mesh(I),_Cell)
return cell_data_writable(I.integrals[found].integral,_Cell-I.integrals[found].data._start+1,vec,vecvec,staticfalse,get_integrals=get_integrals)
end
@inline function get_neighbors(I::SerialIntegral,_Cell,::StaticFalse)
found = meshindex_from_internal(mesh(I),_Cell)
return get_neighbors(I.integrals[found].integral,_Cell-I.integrals[found].data._start+1,staticfalse)
end
function set_neighbors(I::SerialIntegral,_Cell,new_neighbors,proto_bulk,proto_interface,::StaticFalse)
found = meshindex_from_internal(mesh(I),_Cell)
set_neighbors(I.integrals[found].integral,_Cell-I.integrals[found].data._start+1,new_neighbors,proto_bulk,proto_interface,staticfalse)
end
@inline function get_area(I::SerialIntegral,c,n,::StaticTrue)
found = meshindex_from_internal(mesh(I),c)
c2 = c-I.integrals[found].data._start+1
return get_area(I.integrals[found].integral,c2,n,statictrue)
end
@inline function get_integral(I::SerialIntegral,c,n,::StaticTrue)
found = meshindex_from_internal(mesh(I),c)
c2 = c-I.integrals[found].data._start+1
try
return get_integral(I.integrals[found].integral,c2,n,statictrue)
catch
error("$found, \n $c2, \n $c")
end
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 5844 | """
`SparseVectorWrapper{T}`: A wrapper around a `Vector` of `Pair{Int64, T}` that provides an efficient dictionary-like interface
for data assumed to be sorted by the first coordinate (the `Int64` key).
# Fields:
- `data::Vector{Pair{Int64, T}}`: The underlying vector of pairs where each pair consists of a key of type `Int64`
and a value of type `T`.
# Constructor:
- `SparseVectorWrapper(data::Vector{Pair{Int64, T}})`: Creates an instance of `SparseVectorWrapper` from the given vector of pairs.
# Methods:
- `Base.size(wrapper::SparseVectorWrapper)`: Returns the size of the underlying data vector.
- `Base.eltype(::Type{SparseVectorWrapper{T}})`: Returns the type of the values stored in the dictionary.
- `Base.eltype(wrapper::SparseVectorWrapper)`: Returns the type of the values stored in the dictionary.
- `Base.iterate(wrapper::SparseVectorWrapper, state=1)`: Iterates over the key-value pairs in the wrapper.
- `Base.keys(wrapper::SparseVectorWrapper)`: Returns an iterator over the keys in the wrapper.
- `Base.values(wrapper::SparseVectorWrapper)`: Returns an iterator over the values in the wrapper.
- `Base.haskey(wrapper::SparseVectorWrapper, key::Int64)`: Checks if the specified key exists in the wrapper using binary search.
- `Base.getindex(wrapper::SparseVectorWrapper, key::Int64)`: Retrieves the value associated with the given key using binary search, or throws a `KeyError` if the key is not found.
- `Base.get(wrapper::SparseVectorWrapper, key::Int64, default=nothing)`: Retrieves the value associated with the given key using binary search, or returns the specified default value if the key is not found.
"""
struct SparseVectorWrapper{T} <: AbstractDict{Int64, T}
data::Vector{Pair{Int64, T}}
end
# Konstruktor
#function SparseVectorWrapper(data::Vector{Pair{Int64, T}}) where T
# return SparseVectorWrapper{T}(data)
#end
# Implementierung der Größe
Base.size(wrapper::SparseVectorWrapper) = size(wrapper.data)
# Implementierung des eltype
Base.eltype(::Type{SparseVectorWrapper{T}}) where T = T
Base.eltype(::SparseVectorWrapper{T}) where T = T
# Implementierung des Iterators
Base.iterate(wrapper::SparseVectorWrapper, state=1) =
state > length(wrapper.data) ? nothing : ((wrapper.data[state][1], wrapper.data[state][2]), state + 1)
# Implementierung von keys
function Base.keys(wrapper::SparseVectorWrapper)
return (pair[1] for pair in wrapper.data)
end
# Implementierung von values
function Base.values(wrapper::SparseVectorWrapper)
return (pair[2] for pair in wrapper.data)
end
# Implementierung von haskey mit binärer Suche
function Base.haskey(wrapper::SparseVectorWrapper, key::Int64)
lo, hi = 1, length(wrapper.data)
while lo <= hi
mid = div(lo + hi, 2)
mid_key = wrapper.data[mid][1]
if mid_key == key
return true
elseif mid_key < key
lo = mid + 1
else
hi = mid - 1
end
end
return false
end
# Implementierung von getindex mit binärer Suche
function Base.getindex(wrapper::SparseVectorWrapper, key::Int64)
lo, hi = 1, length(wrapper.data)
while lo <= hi
mid = div(lo + hi, 2)
mid_key = wrapper.data[mid][1]
if mid_key == key
return wrapper.data[mid][2]
elseif mid_key < key
lo = mid + 1
else
hi = mid - 1
end
end
error("KeyError: key $key not found")
end
# Optionale Methode: get, um einen Standardwert zu liefern, wenn der Schlüssel nicht existiert
function Base.get(wrapper::SparseVectorWrapper, key::Int64, default=nothing)
lo, hi = 1, length(wrapper.data)
while lo <= hi
mid = div(lo + hi, 2)
mid_key = wrapper.data[mid][1]
if mid_key == key
return wrapper.data[mid][2]
elseif mid_key < key
lo = mid + 1
else
hi = mid - 1
end
end
return default
end
# Testcode für SparseVectorWrapper
#=
# Hilfsfunktion zum Testen
function run_tests()
# Beispielhafte Daten: Ein Vector von Paaren (Key-Value)
data = [1 => "eins", 3 => "drei", 5 => "fünf", 7 => "sieben"]
# Erstelle den SparseVectorWrapper
wrapper = SparseVectorWrapper(data)
# Test: Größe
println("Test: size")
@assert size(wrapper) == size(data)
println("✔ Größe ist korrekt")
# Test: Elementtyp
println("Test: eltype")
println(eltype(wrapper))
println(typeof(wrapper))
@assert eltype(wrapper) == String
println("✔ Elementtyp ist korrekt")
# Test: Iteration
println("Test: iterate")
for (key, value) in wrapper
println("Key: $key, Value: $value")
end
println("✔ Iteration ist korrekt")
# Test: keys
println("Test: keys")
@assert collect(keys(wrapper)) == [1, 3, 5, 7]
println("✔ keys ist korrekt")
# Test: values
println("Test: values")
@assert collect(values(wrapper)) == ["eins", "drei", "fünf", "sieben"]
println("✔ values ist korrekt")
# Test: haskey
println("Test: haskey")
@assert haskey(wrapper, 3) == true
@assert haskey(wrapper, 4) == false
println("✔ haskey ist korrekt")
# Test: getindex
println("Test: getindex")
@assert wrapper[1] == "eins"
@assert wrapper[5] == "fünf"
try
wrapper[2]
catch e
println("✔ KeyError wurde korrekt ausgelöst")
end
# Test: get mit Standardwert
println("Test: get mit Standardwert")
@assert get(wrapper, 7, "nicht gefunden") == "sieben"
@assert get(wrapper, 4, "nicht gefunden") == "nicht gefunden"
println("✔ get mit Standardwert ist korrekt")
end
# Führe die Tests aus
run_tests()=#
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 15747 |
function sample_data_periodic(dim,_NON)
periodicity = PeriodicData(3*ones(Int64,dim),(1.0/3)*ones(Int64,dim),_NON,zeros(Float64,dim))
xs = periodicgeodata(VoronoiNodes(rand(dim,_NON)),periodicity)#,SVector{dim,Float64}(zeros(Float64,dim)))
offset = _NON*(index_from_array(2*ones(Int64,dim),periodicity)-1)
println(length(xs)," ",offset)
for i in 1:_NON
a = xs[i]
xs[i] = xs[i+offset]
xs[i+offset] = a
end
return xs
end
function sample_data_random(dim,_NON)
xs = VoronoiNodes(rand(dim,6^dim))
tree = NearestNeighbors.KDTree(xs)
idxs,_ = NearestNeighbors.knn(tree,0.5*ones(Float64,dim),_NON)
for i in 1:_NON
a = xs[i]
xs[i] = xs[idxs[i]]
xs[idxs[i]] = a
end
return xs
end
function VoronoiStatistics(dim,samples;periodic=nothing,points=1,my_generator=nothing,geodata=true)
println("VoronoiStatistics in dim = $dim with $samples samples and generation method: ",(periodic!=nothing) ? "periodic($periodic) " : (my_generator!=nothing ? "own method($points)" : "random generator($points)"))
println()
my_generator!=nothing && (periodic=nothing)
data_size = (periodic!=nothing) ? samples*periodic : samples*points
volumes = Vector{Float64}(undef,data_size)
verteces = Vector{Float64}(undef,data_size)
interfaces = Vector{Float64}(undef,data_size)
areas = Vector{Vector{Float64}}(undef,data_size)
min_verts = typemax(Int64)
for S in 1:samples
xs, number = (periodic!=nothing) ? (sample_data_periodic(dim,periodic),periodic) : ( my_generator==nothing ? (sample_data_random(dim,points),points) : my_generator(dim,points) )
#voronoi( Integrator, Iter=i_nodes, searcher=searcher, intro="Block $(string(i, base = 10, pad = max_string_len)), Voronoi cells: ",compact=true,printsearcher=false)
I,_=voronoi(xs,searcher=Raycast(xs),intro="Run number: $S ",compact=true, Iter=1:number)
vp_line_up()
I2=Integrator(I.Integral,geodata ? VI_POLYGON : VI_GEOMETRY,integrand=nothing)
_integrate( I2, intro="Run number: $S ", calculate = 1:length(xs), iterate=1:number, compact=true)
mesh = I.Integral.MESH
for i in 1:number
verteces[(S-1)*number+i] = length(I2.Integral.MESH.All_Vertices[i])+length(I2.Integral.MESH.Buffer_Vertices[i])
interfaces[(S-1)*number+i] = length(I2.Integral.neighbors[i])
neigh = neighbors_of_cell(i,mesh)#,adjacents=true)
vn_count = zeros(Int64,length(neigh))
if geodata
volumes[(S-1)*number+i] = I2.Integral.volumes[i]
areas[(S-1)*number+i] = I2.Integral.area[i]
end
for (sig,_) in Iterators.flatten((mesh.All_Vertices[i],mesh.Buffer_Vertices[i]))
# print(" $n: $sig --> ")
for s in sig
if s in neigh
vn_count[findfirst(x->x==s,neigh)] += 1
end
end
end
my_min = minimum(vn_count)
min_verts = minimum([my_min,min_verts])
end
end
# println(data_size)
# println(length(volumes))
# println("At least $min_verts verteces per interface")
# means...
VOLUMES = geodata ? sum(volumes)/data_size : 0.0
VERTECES = sum(verteces)/data_size
all_neighbors = sum(interfaces)
INTERFACES = all_neighbors/data_size
AREAS = geodata ? sum(x->sum(x),areas)/all_neighbors : 0
# variance
_volumes = geodata ? sqrt(sum(x->(x-VOLUMES)^2,volumes)/data_size ) : 0.0
_verteces = sqrt(sum(x->(x-VERTECES)^2,verteces)/data_size )
_interfaces = sqrt( sum(x->(x-INTERFACES)^2,interfaces)/data_size )
_areas = geodata ? sqrt( sum(x->sum(a->(a-AREAS)^2,x),areas)/all_neighbors ) : 0.0
return (sample_size=data_size ,volume=(VOLUMES,_volumes), verteces=(VERTECES,_verteces), neighbors=(INTERFACES,_interfaces), area=(AREAS,_areas))
end
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
# nodeslist = [200,500,1000,1500,2000,3000,4000,6000,8000,10000,12500,15000,17500,20000,22500,25000,27500,30000]
# dim = 5
# collect_statistics(statistic_samples(dim,nodeslist,4),text="results$(dim)D-30000-new.txt")
function vor_calc_statistics_1(dim,NN,searchdata,cycle=1,silence=false,counts=[0])
oldstd = stdout
count = cycle
for i in 1:cycle
xs = VoronoiNodes(rand(dim,NN))
redirect_stdout(oldstd)
print(" - $i")
redirect_stdout(silence ? devnull : oldstd)
try
I,searcher = HighVoronoi.voronoi(xs,searcher=HighVoronoi.Raycast(xs,domain=cuboid(dim,periodic=[])))
searchdata .+= searcher.rare_events
catch err
redirect_stdout(oldstd)
println("Error: $err")
count -= 1
end
end
counts[1]=count
if count==0
searchdata .= 0
end
redirect_stdout(oldstd)
end
function vor_calc_statistics(dim,NN,cycle,eval_data=nothing,entry=0, silence=false)
searchdata = zeros(Int64,20)
print("--- Voronoi in dim $dim: $NN nodes")
counts = [0]
t = @elapsed vor_calc_statistics_1(dim,NN,searchdata,cycle,silence,counts)
counts[1] == max(counts[1],1)
println(" -- $t secs.")
data = Vector{Float64}(undef,20)
data .= searchdata./cycle
print("$NN nodes in R^$dim: $(t/cycle) secs, ")
print("$(data[HighVoronoi.SRI_vertex]) verteces, ")
print("$(data[HighVoronoi.SRI_boundary_vertex]) B-verteces, ")
print("$(data[HighVoronoi.SRI_walkray]) walks, ")
println("$(searchdata[HighVoronoi.SRI_nn]/searchdata[HighVoronoi.SRI_walkray]) nn-searches")
if (eval_data!=nothing)
eval_data[1,entry] = NN
eval_data[2,entry] = dim
eval_data[3,entry] = t/counts[1]
eval_data[4,entry] = data[HighVoronoi.SRI_vertex]
eval_data[5,entry] = data[HighVoronoi.SRI_boundary_vertex]
eval_data[6,entry] = data[HighVoronoi.SRI_walkray]
eval_data[7,entry] = data[HighVoronoi.SRI_walkray]!=0 ? searchdata[HighVoronoi.SRI_nn]/searchdata[HighVoronoi.SRI_walkray] : 0
end
end
function statistic_samples(dim,samps,cycles)
lsamps=length(samps)
data = zeros(Int64,3,lsamps)
for i in 1:lsamps
data[1,i] = dim
data[2,i] = samps[i]
data[3,i] = cycles
end
vor_calc_statistics(dim,100,1)
return data
end
function collect_statistics(samples;jld="",txt="",silence=true)
lsamples = size(samples,2)
data=zeros(Float64,7,lsamples)
b = size(samples,1)==3
try
for i in 1:lsamples
vor_calc_statistics(samples[1,i],samples[2,i], b ? samples[3,i] : 3,data,i,silence)
end
catch
end
println(data)
if jld!=""
jldopen(jld,"w") do file
write(file,"data",data)
end
end
if txt!=""
open(txt,"w") do file
print(file,data)
end
end
return data
end
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
# nodeslist = [200,500,1000,1500,2000,3000,4000,6000,8000,10000,12500,15000,17500,20000,22500,25000,27500,30000]
# dim = 5
# collect_statistics(statistic_samples(dim,nodeslist,4),text="results$(dim)D-30000-new.txt")
function vor_calc_statistics_1(unit_nodes::Matrix,iterator,searchdata,cycle,silence,counts)
oldstd = stdout
count = cycle
matrix_data = unit_nodes
dim = size(matrix_data,1)
offsetvector = zeros(Float64,dim)
dimensions = ones(Float64,dim)
for i in 1:cycle
data = unit_nodes
number_of_nodes = size(matrix_data,2)
cubedimensions = ones(Float64,dim)
cubedimensions .*= iterator
extended_cube = cuboid( dim, periodic = [], dimensions = cubedimensions )
periodicity = PeriodicData(iterator,dimensions,number_of_nodes,offsetvector)
xs = periodicgeodata(VoronoiNodes(data),periodicity)#,SVector{dim,Float64}(zeros(Float64,dim)))
lmesh = length(xs)
redirect_stdout(oldstd)
print(" - $i")
redirect_stdout(silence ? devnull : oldstd)
try
for x in xs
if !(x in extended_cube)
println(iterator)
error("$x not in domain:"*boundaryToString(extended_cube))
end
end
# try
I,searcher=voronoi(xs,searcher=Raycast(xs;domain=extended_cube),intro="")
searchdata .+= searcher.rare_events
# catch err
# redirect_stdout(oldstd)
# println("Error: $err")
# count -= 1
# end
catch
redirect_stdout(oldstd)
rethrow()
end
end
counts[1]=count
if count==0
searchdata .= 0
end
redirect_stdout(oldstd)
end
function vor_calc_statistics(unit_nodes::Matrix,dim,iterator,eval_data,entry,silence=true,cycle=1)
searchdata = zeros(Int64,20)
matrix_data = unit_nodes
NN = prod(iterator)*size(matrix_data,2)
print("--- Voronoi in dim $dim: $NN nodes")
counts = [0]
t = @elapsed vor_calc_statistics_1(unit_nodes,iterator,searchdata,cycle,silence,counts)
counts[1] == max(counts[1],1)
println(" -- $t secs.")
data = Vector{Float64}(undef,20)
data .= searchdata./cycle
print("$NN nodes in R^$dim: $(t/cycle) secs, ")
print("$(data[HighVoronoi.SRI_vertex]) verteces, ")
print("$(data[HighVoronoi.SRI_boundary_vertex]) B-verteces, ")
print("$(data[HighVoronoi.SRI_walkray]) walks, ")
println("$(searchdata[HighVoronoi.SRI_nn]/searchdata[HighVoronoi.SRI_walkray]) nn-searches")
if (eval_data!=nothing)
eval_data[1,entry] = NN
eval_data[2,entry] = dim
eval_data[3,entry] = t/counts[1]
eval_data[4,entry] = data[HighVoronoi.SRI_vertex]
eval_data[5,entry] = data[HighVoronoi.SRI_boundary_vertex]
eval_data[6,entry] = data[HighVoronoi.SRI_walkray]
eval_data[7,entry] = data[HighVoronoi.SRI_walkray]!=0 ? searchdata[HighVoronoi.SRI_nn]/searchdata[HighVoronoi.SRI_walkray] : 0
end
end
function collect_statistics(unit_nodes::Points,dim, min_size=2*ones(Int64,dim),max_size=4*ones(Int64,dim);jld="",txt="",silence=true)
return collect_statistics(_Matrix_from_Points(unit_nodes),dim, min_size,max_size;jld=jld,txt=txt,silence=silence)
end
function collect_statistics(unit_nodes::Matrix,dim, min_size=2*ones(Int64,dim),max_size=4*ones(Int64,dim);jld="",txt="",silence=true,fast=false)
iterator = copy(min_size)
iterator[1] -= 1
b = true
lsamples = 0
while b
b = false
for i in 1:dim
iterator[i]==max_size[i] && continue
iterator[i] += 1
b = true
lsamples += 1
i==1 && iterator[i]==min_size[i] && break
end
end
data=zeros(Float64,7,lsamples)
lsamples = 0
iterator .= min_size
iterator[1] -= 1
b=true
while b
b = false
for i in 1:dim
iterator[i]==max_size[i] && continue
iterator[i] += 1
b = true
lsamples += 1
fast ? vor_calc_statistics_fast(unit_nodes,dim,iterator,data,lsamples,silence) : vor_calc_statistics(unit_nodes,dim,iterator,data,lsamples,silence)
i==1 && iterator[i]==min_size[i] && break
end
end
println(data)
if jld!=""
jldopen(jld,"w") do file
write(file,"data",data)
end
end
if txt!=""
open(txt,"w") do file
print(file,data)
end
end
return data
end
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
##################################################################################################################################################################
function vor_calc_statistics_fast(unit_nodes::Matrix,dim,iterator,eval_data,entry,silence=true,cycle=1)
searchdata = zeros(Int64,20)
matrix_data = unit_nodes
matrix_data .*= 0.99
matrix_data .+= 0.005*ones(Float64,dim)
NN = prod(iterator)*size(matrix_data,2)
print("--- Fast periodic Voronoi in dim $dim: $NN nodes")
counts = [1]
t = @elapsed VoronoiGeometry(VoronoiNodes(matrix_data),periodic_grid=(periodic=[],dimensions=ones(Float64,dim),
scale=ones(Float64,dim), repeat=iterator), integrator=VI_GEOMETRY,silence=true)
println(" -- $t secs.")
data = Vector{Float64}(undef,20)
data .= searchdata./cycle
print("$NN nodes in R^$dim: $(t/cycle) secs, ")
print("$(data[HighVoronoi.SRI_vertex]) verteces, ")
print("$(data[HighVoronoi.SRI_boundary_vertex]) B-verteces, ")
print("$(data[HighVoronoi.SRI_walkray]) walks, ")
println("$(searchdata[HighVoronoi.SRI_nn]/searchdata[HighVoronoi.SRI_walkray]) nn-searches")
if (eval_data!=nothing)
eval_data[1,entry] = NN
eval_data[2,entry] = dim
eval_data[3,entry] = t/counts[1]
eval_data[4,entry] = data[HighVoronoi.SRI_vertex]
eval_data[5,entry] = data[HighVoronoi.SRI_boundary_vertex]
eval_data[6,entry] = data[HighVoronoi.SRI_walkray]
eval_data[7,entry] = data[HighVoronoi.SRI_walkray]!=0 ? searchdata[HighVoronoi.SRI_nn]/searchdata[HighVoronoi.SRI_walkray] : 0
end
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 9687 | """
substitute!(VG::VoronoiGeometry,VG2::VoronoiGeometry,indeces)
Takes the nodes `indeces` from `VG2` and erases all nodes from `VG` within the VornoiCells of `indeces`. Then plugs the nodes
`indeces` into `VG` and generates the full mesh for this new setting.
"""
function substitute!(VG::VoronoiGeometry,VG2::VoronoiGeometry,indeces,update=true;silence=false, search_settings=[])
println("This method is not available in current version. Keep on Track with new releases!")
end
#=
if (!(typeof(VG.domain)<:Voronoi_Domain && typeof(VG2.domain)<:Voronoi_Domain))
error("Both geometries must have classical storage modes")
end
oldstd = stdout
redirect_stdout(silence ? devnull : oldstd)
try
if !(typeof(indeces)<:AbstractVector{Int64})
if typeof(indeces)<:Function
indeces = indeces(VG2.Integrator.Integral.MESH)
else
error("you need to provide either a vector of indeces or a function generating indeces from a Voronoi_MESH in the third argument")
end
end
# find all internally mirrored points:
indeces = copy(indeces)
indeces .+= length(VG2.domain.references)
unique!(sort!(prepend!(indeces,filter!(x->x!=0,map!(i->VG2.domain.references[i] in indeces ? i : 0, Vector{Int64}(undef,length(VG2.domain.references)),1:length(VG2.domain.references))))))
Integral1 = integral(VG.domain)
Integral2 = copy(integral(VG.domain))
nodes1 = Integral1.MESH._nodes
nodes2 = Integral2.MESH._nodes
references1 = VG.domain.references
references2 = copy(VG2.domain.references)
reference_shifts1 = VG.domain.reference_shifts
reference_shifts2 = copy(VG2.domain.reference_shifts)
# extract periodic boundaries:
periodic = filter!(n->VG.domain.boundary.planes[n].BC>0,collect(1:(length(VG.domain.boundary))))
ln1 = length(nodes1)
ln2 = length(nodes2)
keeps1 = BitVector(undef,ln1)
modified1 = BitVector(undef,ln1)
keeps2 = BitVector(undef,ln2)
modified2 = BitVector(undef,ln2)
tree2 = NearestNeighbors.KDTree(nodes2)
not_in_grid_2(x) = !(NearestNeighbors.nn(tree2,x)[1] in indeces)
#println(indeces)
#draw2D(VG,"test-reduce-1.mp")
filter!(n->not_in_grid_2(nodes1[n]), (sig,r,m)->_not_in_grid(sig,r,tree2,nodes1,n->!(n in indeces)), Integral1, references1, reference_shifts1, length(VG.domain.boundary), keeps1, modified1 )
#draw2D(VG,"test-reduce-2.mp")
tree1 = KDTree(nodes1)
# not_in_new_grid1(x) = !keeps1[nn(tree1,x)[1]]
periodic .+= ln2
filter!(n->n in indeces, (sig,r,m)->periodic_bc_filterfunction(sig,periodic) && _not_in_grid(sig,r,tree1,nodes2), Integral2, references2, reference_shifts2, length(VG2.domain.boundary), keeps2, modified2 )
ln1 = length(nodes1)
ln2 = length(nodes2)
for i in 1:length(Integral1)
Integral1.neighbors[i] .+= ln2
for (sig,_) in Integral1.MESH.All_Vertices[i]
sig.+=ln2
end
end
# println("$ln2, $ln1, $(length(Integral2)) : ")
for i in 1:length(Integral2)
shift_boundarynodes(Integral2.neighbors[i],ln2,ln1)
for (sig,_) in Integral2.MESH.All_Vertices[i]
# print("$i: $sig -> ")
shift_boundarynodes(sig,ln2,ln1)
# print("$sig ; ")
end
end
prepend!(nodes1,nodes2)
prepend!(Integral1.volumes,Integral2.volumes)
prepend!(Integral1.area,Integral2.area)
prepend!(Integral1.bulk_integral,Integral2.bulk_integral)
prepend!(Integral1.interface_integral,Integral2.interface_integral)
prepend!(Integral1.neighbors,Integral2.neighbors)
prepend!(Integral1.MESH.All_Vertices,Integral2.MESH.All_Vertices)
prepend!(Integral1.MESH.Buffer_Vertices,Integral2.MESH.Buffer_Vertices)
for i in 1:length(Integral1)
Base.rehash!(Integral1.MESH.All_Vertices[i])
Base.rehash!(Integral1.MESH.Buffer_Vertices[i])
end
append!(modified2,modified1)
#plausible(Integral1.MESH,Raycast(nodes1,domain=VG.domain.internal_boundary),report=true)
num_of_vert = map!(n->length(Integral1.MESH.All_Vertices[n])+length(Integral1.MESH.All_Vertices[n]),Vector{Int64}(undef,length(Integral1)),1:length(Integral1))
voronoi(VG.Integrator,searcher=Raycast(nodes1;RaycastParameter(VG.searcher,(;search_settings...,domain=VG.domain.internal_boundary))...))
map!(n->modified2[n] || (num_of_vert[n]!=length(Integral1.MESH.All_Vertices[n])+length(Integral1.MESH.All_Vertices[n])),modified2,1:length(Integral1))
shift_block!(Integral1,length(references2)+1,ln2,ln1) # shift new nodes to their proper position
shift_block!(modified2,length(references2)+1,ln2,ln1)
references2 .+= ln1
references1 .+= length(references2)
prepend!(references1,references2)
prepend!(reference_shifts1,reference_shifts2)
iter = keepat!(collect(1:length(nodes1)),modified2)
repair_periodic_structure!(VG.domain,Integral1,iter)
# periodize!()
#draw2D(VG,"test-reduce-3.mp")
VG.refined[1]=true
if (update)
println("updating...")
println(Crayon(foreground=:red,underline=true), "Start integration on refined cells:",Crayon(reset=true))
_relevant=Base.intersect(iter,collect((1+length(VG.domain.references)):(length(VG.Integrator.Integral.MESH))))
append!(_relevant,collect((length(Integral1.MESH)+1):(length(Integral1.MESH)+length(VG.domain.boundary))))
integrate(backup_Integrator(VG.Integrator,VG.refined[1]),domain=VG.domain.internal_boundary, modified=iter ,relevant=_relevant)
VG.refined[1]=false
end
catch
redirect_stdout(oldstd)
rethrow()
end
redirect_stdout(oldstd)
return VG
end
function contains_only(sig,keeps,lmax)
for s in sig
s>lmax && break
( !keeps[s] ) && (return false)
end
return true
end
function _not_in_grid(sig,r,tree,nodes,skip=x->false)
dist2 = NearestNeighbors.nn(tree,r,skip)[2]
dist1 = sum(abs2,r-nodes[sig[1]])
return (dist2^2-dist1)/dist1>1.0E-10
end
function filter!(filter_nodes,filter_verteces,Integral::Voronoi_Integral,references,reference_shifts,lb,keeps=BitVector(undef,length(Integral.MESH.nodes)),modified=BitVector(undef,length(Integral.MESH.nodes)),rehash=false;valid_vertex_checker=nothing,modified_tracker=nothing)
nodes = Integral.MESH._nodes
mesh = Integral.MESH
ln1 = length(nodes)
lref = length(references)
keeps = map!(n->filter_nodes(n<=lref ? references[n] : n),keeps,1:ln1)
old_node_indeces = view(1:(length(nodes)),keeps)
#for n in 1:ln1
# println(n," ",Integral.neighbors[n])
#end
#modified = map!(n->(!keeps[n]) || (!first_is_subset(Integral.neighbors[n],old_node_indeces,ln1)),modified,1:ln1)
vertex_check = typeof(valid_vertex_checker)!=Nothing
mycondition(sig,r) = valid_vertex_checker==nothing ? contains_only(sig,keeps,ln1) : check(valid_vertex_checker,sig,r,keeps,ln1,modified_tracker)
num_verteces_old = map!(n->length(mesh.All_Vertices[n])+length(mesh.Buffer_Vertices[n]),Vector{Int64}(undef,ln1),1:ln1)
filter!((sig,r)->mycondition(sig,r) && filter_verteces(sig,r,modified),Integral.MESH)#,affected=keeps)
map!(n->!keeps[n] || ((length(mesh.All_Vertices[n])+length(mesh.Buffer_Vertices[n])-num_verteces_old[n])!=0),modified,1:ln1)
keepat!(modified,keeps)
keepnodes = BitVector(undef,ln1)
tracker = typeof(modified_tracker)==ModifiedTracker
for n in 1:ln1
(n<=length(references) || !keeps[n]) && continue
neigh = Integral.neighbors[n]
lneigh = length(neigh)
keepmynodes = view(keepnodes,1:lneigh)
map!(k->k>ln1 || keeps[k],keepnodes,neigh)
if length(Integral.area)>0 && (isassigned(Integral.area,n)) keepat!(Integral.area[n],keepmynodes) end
if length(Integral.interface_integral)>0 && (isassigned(Integral.interface_integral,n)) keepat!(Integral.interface_integral[n],keepmynodes) end
tracker && keepat!(modified_tracker.data[n],keepmynodes)
keepat!(Integral.neighbors[n],keepmynodes)
end
keepat!(Integral,keeps)
keepat!(references,view(keeps,1:lref))
keepat!(reference_shifts,view(keeps,1:lref))
# find new indeces for all remaining nodes
newindeces = map!(n->sum(i->keeps[i], 1:n),Vector{Int64}(undef,ln1+lb),1:ln1)
# find new indeces for all boundaries
sk = sum(keeps)
map!(n->sk+n,view(newindeces,(ln1+1):(ln1+lb)),1:lb)
#println(newindeces)
switch_indeces(arr)=map!(s->newindeces[s],arr,arr)
for n in 1:length(mesh)
for (sig,_) in mesh.All_Vertices[n]
# print(sig)
vertex_check && reduce_sig(sig,keeps,ln1)
switch_indeces(sig)
# print(" -> $sig ; ")
#for i in 1:length(sig)
# sig[i] = newindeces[sig[i]]
#end
end
# println()
switch_indeces(Integral.neighbors[n])
#for i in 1:length(Integral.neighbors[n])
# Integral.neighbors[n][i]=newindeces[Integral.neighbors[n][i]]
#end
end
switch_indeces(references)
return Integral
end
=#
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 19792 | #############################################################################################
# Here follows everything special about systematic Voronoi search
#############################################################################################
# needed in some statistics functions
function voronoi(xs::Points; searcher::RaycastIncircleSkip=Raycast(xs),initialize::Int64=0,Iter::UnitRange=1:length(xs),intro::String="Calculating Voronoi cells:",compact::Bool=false,printsearcher::Bool=false)
return voronoi(Geometry_Integrator(xs),searcher=searcher,initialize=initialize,Iter=Iter,intro=intro,compact=compact,printsearcher=printsearcher)
end
# needed in periodic_mesh
function voronoi(Integrator; Iter=1:(length(nodes(mesh(Integrator.Integral)))), searcher::RaycastIncircleSkip=Raycast(nodes(mesh(Integrator.Integral))), kwargs...)
m = mesh(Integrator.Integral)
_,s = voronoi(m;Iter=Iter,searcher=searcher, kwargs...)
return Integrator, s
end
function voronoi(mesh::AM; Iter=1:(length(nodes(mesh))), searcher::RaycastIncircleSkip=Raycast(nodes(mesh)),initialize::Int64=0, subroutine_offset::Int64=0,intro::String="Calculating Voronoi cells:",iteration_reset::Bool=true,compact::Bool=false, printsearcher::Bool=false, silence = false) where {P,AM<:AbstractMesh{P}}
nm = nodes(mesh)
l=length(nm)
dimension = size(P)[1]#length(nm[1])
if l==0 || l<=dimension
error("There are not enough points to create a Voronoi tessellation")
end
v_offset=subroutine_offset
# !silence && vp_print(v_offset,intro)
if (!compact)
# vp_line()
else
v_offset += length(intro)
end
TODO=collect(Iter)
_voronoi(mesh,TODO,compact,v_offset,silence,iteration_reset, printsearcher,searcher,intro, searcher.parameters.threading)
end
@inline function _voronoi(mesh::AM,TODO,compact,v_offset,silence,iteration_reset, printsearcher,searcher,intro,threading::SingleThread) where {P,AM<:AbstractMesh{P}}
dimension = size(P)[1]
queue = ThreadsafeQueue{Pair{Vector{Int64},P}}(Int64[]=>zeros(P),threading,0) #Dict{Vector{Int64},typeof(xs[1])}()
lower_b = round(Int,lowerbound(dimension,dimension))
sizehint!(queue,2*lower_b)
__voronoi(mesh,TODO,compact,v_offset,silence,iteration_reset, printsearcher,searcher, threading,queue,intro)
end
@inline function _voronoi(mesh::AM,TODO,compact,v_offset,silence,iteration_reset, printsearcher,searcher,intro,threading::MultiThread) where {P,AM<:AbstractMesh{P}}
dimension = size(P)[1]
#mesh2 = cast_mesh(ExternalMemory(6),copy(nodes(mesh)))
#println(threading)
#_queue = ThreadsafeQueue{Pair{Vector{Int64},P}}(Int64[]=>zeros(P),threading,0) #Dict{Vector{Int64},typeof(xs[1])}()
_threads = create_multithreads(threading)
#println(_threads)
node_threads = length(_threads)
list , tops = partition_indices(length(TODO),_threads)
pms = ParallelMesh(mesh,_threads,TODO,list)
TODOS = map(i->view(TODO,list[i]:tops[i]),1:length(list))
map(i->_transform_indeces(mesh,pms.meshes[i].mesh,TODOS[i]),1:length(list))
generator(i) = (ThreadsafeQueue{Pair{Vector{Int64},P}}(Int64[]=>zeros(P),threading,0),pms.meshes[i].mesh)
pq = ParallelQueues(node_threads,generator)
#println(length(nodes(pms.meshes[1].mesh)))
Raycasts = getMultiThreadRaycasters(searcher,pms)
new_vertices = Atomic{Int64}(0)
#println("Use Multithreading with $(node_threads) threads.")
#println(typeof(pms.meshes[1].mesh))
prog = ThreadsafeProgressMeter(length(TODO),silence,intro)
#globallock = PLock()
#println("Hier")
#Threads.@spawn print_plock(globallock)
#println("Hier2")
Threads.@threads for i in 1:node_threads
__voronoi(pms.meshes[i].mesh,copy(TODOS[i]),compact,v_offset,silence,iteration_reset, printsearcher,Raycasts[i].raycaster, _threads[i],pq.queues[i],intro,new_vertices,prog,nothing)
end
#Threads.atomic_and!(globallock.running,false)
if (!compact)
println()
end
println("Total number of vertices: $(new_vertices[])")
end
#=function __voronoi(mesh::AM,TODO,compact,v_offset,silence,iteration_reset, printsearcher,searcher,threading,queue, new_vertices_atomic = Atomic{Int64}(0),progress=ThreadsafeProgressMeter(1,silence)) where {P,AM<:AbstractMesh{P}}
dimension = size(P)[1]
lower_b = round(Int,lowerbound(dimension,dimension))
sizehint!(queue,2*lower_b)
edgecount_global = EdgeHashTable(8*lower_b*dimension^2,threading)
_searchers = MultyRaycast(searcher,threading)
xs = searcher.tree.extended_xs
repeat=true
iteration_count=1
TODO_count=length(TODO)
new_verteces=0
b_index = collect((searcher.lmesh+1):(searcher.lmesh+searcher.lboundary))
while repeat
if iteration_count>4
@warn "There is some serious problem with the mesh: $iteration_count iterations are not a good sign"
end
if iteration_count>=6
error("you should check that all nodes lie within the domain and restart. If problem persists, contact the developer with a sample of your points and domain")
end
!iteration_reset && !silence && vp_line()
!silence && vp_print(v_offset+4,"Iteration:",v_offset+21,"Cell:")
repeat=false
!silence && vp_print(v_offset+16,iteration_count)
!silence && vp_print(v_offset+30, " ")
k=1
while k<=length(TODO) # iterate:
i=TODO[k]
TODO[k]=0
k+=1
i==0 && continue
!silence && vp_print(v_offset+30, i,v_offset+40,"($(k-1) of $TODO_count)")
new_verteces += systematic_explore_cell(xs,i,mesh,edgecount_global,_searchers,queue,b_index,new_vertices_atomic)
end
!silence && vp_print(v_offset+21,"Cells:\u1b[0K")
!silence && vp_print(v_offset+35,TODO_count)
if (!iteration_reset) && (!compact) && !(typeof(mesh)<:LockMesh)
!silence && vp_line()
!silence && vp_print(v_offset+21,"New verteces:",v_offset+35,new_verteces)
new_verteces=0
end
if true==true #searcher.recursive
count=1
for j in 1:searcher.tree.size
if searcher.positions[j] && (j in Iter)
TODO[count]=j
count+=1
repeat=true
end
end
TODO_count=count-1
searcher.positions .= false
end
iteration_count+=1
#break
end
if iteration_reset && !(typeof(mesh)<:LockMesh)
#println("")
!silence && vp_print(v_offset+4,"Iterations:")
!silence && vp_print(v_offset+21,"New verteces:",v_offset+35,new_verteces)
new_verteces=0
end
if !compact && !(typeof(mesh)<:LockMesh)
!silence && vp_line()
end
printsearcher && (vp_print(searcher))
return mesh, searcher
end=#
function __voronoi(mesh::AM,TODO,compact,v_offset,silence,iteration_reset, printsearcher,searcher,threading,queue,intro, new_vertices_atomic = Atomic{Int64}(0),progress=ThreadsafeProgressMeter(length(TODO),silence,intro),globallock=nothing) where {P,AM<:AbstractMesh{P}}
dimension = size(P)[1]
lower_b = round(Int,lowerbound(dimension,dimension))
sizehint!(queue,2*lower_b)
edgecount_global = EdgeHashTable(8*lower_b*dimension^2,threading)
_searchers = MultyRaycast(searcher,threading)
xs = searcher.tree.extended_xs
repeat=true
iteration_count=1
TODO_count=length(TODO)
new_verteces=0
b_index = collect((searcher.lmesh+1):(searcher.lmesh+searcher.lboundary))
while repeat
if iteration_count>4
@warn "There is some serious problem with the mesh: $iteration_count iterations are not a good sign"
end
if iteration_count>=6
error("you should check that all nodes lie within the domain and restart. If problem persists, contact the developer with a sample of your points and domain")
end
!iteration_reset && !silence && vp_line()
repeat=false
k=1
while k<=length(TODO) # iterate:
i=TODO[k]
TODO[k]=0
k+=1
i==0 && continue
#@descend systematic_explore_cell(xs,i,mesh,edgecount_global,_searchers,queue,b_index,new_vertices_atomic,globallock)
#error("")
new_verteces += systematic_explore_cell(xs,i,mesh,edgecount_global,_searchers,queue,b_index,new_vertices_atomic,globallock)
next!(progress)
end
if (!iteration_reset) && (!compact) && !(typeof(mesh)<:LockMesh)
!silence && vp_line()
!silence && println("New verteces:",new_verteces)
new_verteces=0
end
if true==true #searcher.recursive
count=1
for j in 1:searcher.tree.size
if searcher.positions[j] && (j in Iter)
TODO[count]=j
count+=1
repeat=true
end
end
TODO_count=count-1
searcher.positions .= false
end
iteration_count+=1
#break
end
if iteration_reset && !(typeof(mesh)<:LockMesh)
#println("")
# !silence && vp_print(v_offset+4,"Iterations:")
!silence && println("New verteces: ",new_verteces)
new_verteces=0
end
if !compact && !(typeof(mesh)<:LockMesh)
!silence && vp_line()
end
printsearcher && (vp_print(searcher))
return mesh, searcher
end
##############################################################################################################################
## Core functions of the geometry part
##############################################################################################################################
#=global NO_VERTEX=0::Int64
global VER_VAR=0.0::Float64
global CORRECTIONS=0::Int64
global SUCCESSFUL=0::Int64
global FIRSTCORRECTIONS=0::Int64
global SECONDCORRECTIONS=0::Int64=#
function systematic_explore_cell(xs::Points,_Cell,mesh::AM,edgecount,searcher_vec::RC,queue,b_index,new_vertices,globallock) where {AM<:AbstractMesh, RI<:RaycastIncircleSkip,RC<:AbstractVector{RI}}
try
nthreads = length(searcher_vec)
#print(Threads.threadid())
activate_queue_cell(queue,_Cell)
boundary = searcher_vec[1].domain
searcher_vec[1].tree.active .= false
activate_cell( searcher_vec[1], _Cell, b_index )
#=println(xs[1])
for i in 1:10
println(i,":",xs[100+i])
end=#
empty!(queue)
empty!(edgecount)
mm = number_of_vertices(mesh,_Cell)
#plock(globallock,"$(Threads.threadid())B")
#lock(mesh.global_lock)
if mm>0
i=0
vi = vertices_iterator(mesh,_Cell)
for (sig,r) in vi
push!(queue, copy(sig)=>r)
queue_edges_OnCell(sig,r,searcher_vec[1],_Cell,xs,edgecount)
i += 1
i==mm && break
end
end
#plock(globallock,"$(Threads.threadid())C")
# print("$_Cell : ")
#unlock(mesh.global_lock)
if isempty(queue) # the following makes no sense for parallelization
sig2, r2 = descent(xs, searcher_vec[1],_Cell)
push!(queue, copy(sig2)=>r2)
if !haskey(mesh,sig2)
Threads.atomic_add!(new_vertices,1)
push!(mesh, sig2 => r2)
end
queue_edges_OnFind(sig2,r2,searcher_vec[1],_Cell,xs,edgecount)
end
#plock(globallock,"$(Threads.threadid())D")
#while !isempty(queue)
#print("-")
#Threads.@threads
#for i in 1:nthreads
while true
#mod(i,10)==0 && print("-")
#plock(globallock,"$(Threads.threadid())E")
#print("+")
(sig,r) = pop!(queue)
if isempty_entry(queue,sig=>r)
break
else
ei = get_EdgeIterator(sig,r,searcher_vec[1],_Cell,xs,OnSysVoronoi())
nv = systematic_explore_vertex_multithread(xs,sig,r,_Cell,edgecount,mesh,queue,boundary,searcher_vec[1],ei,globallock)
#print(nv)
atomic_add!(new_vertices,nv)
end
#plock(globallock,"$(Threads.threadid())F")
end
#end
#end
#println(" ",number_of_vertices(mesh,1))
catch e
open("error_log_voronoi_$(Threads.threadid()).txt", "w") do f
# Stacktrace speichern
Base.showerror(f, e, catch_backtrace())
end
rethrow()
#=println("hallo")
println("julia")
println("ist")
println("doof")
Base.showerror(stdout, e, catch_backtrace())
println("very")
println(catch_backtrace())
#sync(s)
#sync(s)
=#
end
return new_vertices[]
end
#=
function systematic_work_queue2(xs,_Cell,mesh,edgecount,searcher,queue,vi_lock,new_vertices)
while true
(sig,r) = pop!(queue)
if isempty_entry(queue,sig=>r)
return
else
ei = get_EdgeIterator(sig,r,searcher,_Cell,xs,OnSysVoronoi())
nv = systematic_explore_vertex_multithread(xs,sig,r,_Cell,edgecount,mesh,queue,boundary,searcher,ei,vi_lock)
atomic_add!(new_vertices,nv)
end
end
end
function systematic_work_queue(xs,_Cell,mesh,edgecount,searcher,queue,vi_lock,notify_found,sleeping,nthreads,new_vertices,threadmanager)
b = true
while b
(sig,r) = pop!(queue)
if isempty_entry(queue,sig=>r)
b = suspendthread(threadmanager,Threads.threadid())
else
ei = get_EdgeIterator(sig,r,searcher,_Cell,xs,OnSysVoronoi())
nv = systematic_explore_vertex_multithread(xs,sig,r,_Cell,edgecount,mesh,queue,boundary,searcher,ei,vi_lock)
atomic_add!(new_vertices,nv)
end
end
end=#
function systematic_explore_vertex_multithread(xs::Points,sig,r,_Cell,edgecount,mesh,queue,boundary,searcher,edgeIterator,globallock)
k=0
dim = size(eltype(xs))[1]
typeof(edgeIterator)<:FastEdgeIterator && println("$(Threads.threadid())z")
for (edge,skip) in edgeIterator
# print("1")
b = pushedge!(edgecount,edge,_Cell)
(edge[1]!=_Cell || b ) && continue
full_edge, u = get_full_edge(sig,r,edge,edgeIterator,xs)
sig2, r2, success = walkray(full_edge, r, xs, searcher, sig, u, edge ) # provide missing node "j" of new vertex and its coordinate "r"
#
if sig2 == sig
pushray!(mesh,full_edge,r,u,_Cell)
continue
end
lsig2 = length(sig2)
lsig2<=length(r) && continue
# print("2")
if !haskey(mesh, sig2)
fraud_vertex(dim,sig2,r2,lsig2,searcher,xs) && continue
k+=1
push!(mesh, sig2 => r2)
end
if push!(queue, sig2 => r2)
# print("5")
queue_edges_OnFind(sig2,r2,searcher,_Cell,xs,edgecount) && continue
end
end
return k
end
function systematic_explore_cell(xs::Points,_Cell,mesh::AM,edgecount,searcher::RC,queue,b_index,_,_) where {AM<:AbstractMesh,RC<:RaycastIncircleSkip}
new_vertices=0
boundary = searcher.domain
searcher.tree.active .= false
activate_cell( searcher, _Cell, b_index )
empty!(queue)
empty!(edgecount)
mm = number_of_vertices(mesh,_Cell)
# mm = false
# for (sig,r) in vertices_iterator(mesh,_Cell)
if mm>0
vi = vertices_iterator(mesh,_Cell)
# mm=false
i=0
for (sig,r) in vi
try
queue_edges_OnCell(sig,r,searcher,_Cell,xs,edgecount)
catch
println(sig,r)
rethrow()
end
i += 1
i==mm && break
end
i=0
for (sig,r) in vi
ei = get_EdgeIterator(sig,r,searcher,_Cell,xs,OnSysVoronoi())
new_vertices += systematic_explore_vertex(xs,sig,r,_Cell,edgecount,mesh,queue,boundary,searcher,ei)
i += 1
i==mm && break
end
end
if isempty(queue) && mm==0
sig2, r2 = descent(xs, searcher,_Cell)
#=if !verify_vertex(sig2,r2,xs,searcher)
error("totaler schrott")
end=#
new_vertices += 1
push!(queue, sig2=>r2)
push!(mesh, sig2 => r2)
queue_edges_OnFind(sig2,r2,searcher,_Cell,xs,edgecount)
end
while length(queue) > 0
(sig,r) = pop!(queue)
ei = get_EdgeIterator(sig,r,searcher,_Cell,xs,OnSysVoronoi())
new_vertices += systematic_explore_vertex(xs,sig,r,_Cell,edgecount,mesh,queue,boundary,searcher,ei)
end
#=print(length(all_vertices_iterator(mesh,_Cell)))
_c = 0
for (sig,r) in all_vertices_iterator(mesh,_Cell)
_c+= length(sig)>7 ? 1 : 0
end
println(" $_c")=#
return new_vertices
end
function get_full_edge(sig,r,edge,::General_EdgeIterator,xs)
i = 0
while true
i+=1
!(sig[i] in edge) && break
end
u = u_default(sig, xs, i)
return Vector{Int64}(edge), u
end
@inline get_full_edge_indexing(sig,r,edge,::General_EdgeIterator,xs) = edge
function systematic_explore_vertex(xs::Points,sig,r,_Cell,edgecount,mesh,queue,boundary,searcher,edgeIterator)
k=0
dim = size(eltype(xs))[1]
for (edge,skip) in edgeIterator
b = pushedge!(edgecount,edge,_Cell)
(edge[1]!=_Cell || b ) && continue
full_edge, u = get_full_edge(sig,r,edge,edgeIterator,xs)
sig2, r2, success = walkray(full_edge, r, xs, searcher, sig, u, edge ) # provide missing node "j" of new vertex and its coordinate "r"
if sig2 == sig
try
pushray!(mesh,full_edge,r,u,_Cell)
catch
rethrow()
end
continue
end
lsig2 = length(sig2)
lsig2<=length(r) && continue
#(length(sig2)<=length(r)+1 && haskey(mesh, sig2)) && error("")
if (length(sig2)<=length(r)+1) || !haskey(mesh, sig2)
fraud_vertex(dim,sig2,r2,lsig2,searcher,xs) && continue
k+=1
push!(mesh, sig2 => r2)
queue_edges_OnFind(sig2,r2,searcher,_Cell,xs,edgecount) && continue
push!(queue, sig2 => r2)
end
end
return k
end
function fraud_vertex(dim,sig,r,lsig2,searcher,xs)
if lsig2>2^dim
if !verify_vertex(sig,r,xs,searcher)
return true
end
max_dist = max(map(s->norm(r-xs[s]),sig))
distance = 0.0
max_distance = 0.0
lsig = length(sig)
for k in 1:(lsig-1)
for i in (k+1):lsig
dd = norm(xs[sig[i]]-xs[sig[k]])
distance += dd
max_distance = max(max_distance,dd)
end
end
distance /= lsig*(lsig-1)/2
if max_distance/max_dist<0.001*lsig/2^dim || distance/max_dist<1000*searcher.plane_tolerance
return true
end
end
return false
end
function increase_edgeview( edgeview, lsig, dim)
i = dim
while ( i>1 && edgeview[i]==(lsig-(dim-i)) ) i=i-1 end
i==1 && return false, 1
ret = i
edgeview[i] += 1
i += 1
while i<=dim
edgeview[i]=edgeview[i-1]+1
i += 1
end
return true, ret
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 5193 |
"""
`ThreadSafeDict{K, V, ADKV<:AbstractDict{K, V}}`: A thread-safe dictionary that allows multiple concurrent reads or ONE single write operation at a time.
# Fields:
- `dict::ADKV`: The underlying dictionary that stores key-value pairs.
- `lock::ReadWriteLock`: A read-write lock that manages concurrent access to the dictionary.
# Constructor:
- `ThreadSafeDict(dict::ADKV)`: Creates a thread-safe wrapper around the given dictionary `dict`. Uses a `ReadWriteLock` to ensure thread safety.
# Methods:
- `Base.getindex(tsd::ThreadSafeDict{K, V, ADKV}, key::K)`: Retrieves the value associated with `key` in a thread-safe manner, allowing concurrent reads.
- `Base.setindex!(tsd::ThreadSafeDict{K, V, ADKV}, value::V, key::K)`: Sets the value for `key` in a thread-safe manner, allowing only one write operation at a time.
- `Base.delete!(tsd::ThreadSafeDict{K, V, ADKV}, key::K)`: Deletes the entry associated with `key` in a thread-safe manner.
- `Base.size(tsd::ThreadSafeDict)`: Returns the size of the underlying dictionary in a thread-safe manner.
- `Base.iterate(tsd::ThreadSafeDict{K, V, ADKV}, state...)`: Iterates over the dictionary in a thread-safe manner, allowing concurrent reads.
- `Base.get!(tsd::ThreadSafeDict{K, V, ADKV}, key::K2, default::V)`: Retrieves the value associated with `key`, or inserts and returns `default` if `key` is not found, in a thread-safe manner.
- `Base.push!(tsd::ThreadSafeDict{K, V, ADKV}, pairs::Pair{K, V}...)`: Inserts the given pairs into the dictionary in a thread-safe manner.
# Usage:
- This type is useful in scenarios where multiple threads need to access a dictionary concurrently.
- It ensures that multiple read operations can occur simultaneously, while write operations are isolated to prevent data races.
"""
struct ThreadSafeDict{K, V, ADKV<:AbstractDict{K, V}} <: AbstractDict{K, V}
dict::ADKV
lock::ReadWriteLock
end
# Constructor for ThreadSafeDict
function ThreadSafeDict{K, V, ADKV}(dict::ADKV) where {K, V, ADKV<:AbstractDict{K, V}}
return ThreadSafeDict(dict, ReadWriteLock())
end
function ThreadSafeDict(dict::ADKV) where {K, V, ADKV<:AbstractDict{K, V}}
return ThreadSafeDict(dict, ReadWriteLock())
end
ThreadSafeDict(dict::ADKV,::MultiThread) where {K, V, ADKV<:AbstractDict{K, V}} = ThreadSafeDict(dict)
ThreadSafeDict(dict::ADKV,::SingleThread) where {K, V, ADKV<:AbstractDict{K, V}} = dict
# Thread-safe getindex operation
function Base.getindex(tsd::ThreadSafeDict{K, V, ADKV}, key::K) where {K, V, ADKV<:AbstractDict{K, V}}
readlock(tsd.lock)
ret = get(tsd.dict, key, nothing)
readunlock(tsd.lock)
return ret
end
# Thread-safe setindex! operation
function Base.setindex!(tsd::ThreadSafeDict{K, V, ADKV}, value::V, key::K) where {K, V, ADKV<:AbstractDict{K, V}}
writelock(tsd.lock)
tsd.dict[key] = value
writeunlock(tsd.lock)
end
# Thread-safe delete! operation
function Base.delete!(tsd::ThreadSafeDict{K, V, ADKV}, key::K) where {K, V, ADKV<:AbstractDict{K, V}}
writelock(tsd.lock)
delete!(tsd.dict, key)
writeunlock(tsd.lock)
end
# Define the size method for the thread-safe dictionary
@inline Base.size(tsd::ThreadSafeDict) = begin
readlock(tsd.lock)
s = size(tsd.dict)
readunlock(tsd.lock)
end
# Define the iterate method for the thread-safe dictionary
function Base.iterate(tsd::ThreadSafeDict{K, V, ADKV}, state...) where {K, V, ADKV<:AbstractDict{K, V}}
readlock(tsd.lock)
ret = nothing
ret = iterate(tsd.dict, state...)
readunlock(tsd.lock)
return ret
end
function Base.get!(tsd::ThreadSafeDict{K, V, ADKV}, key::K2, default::V) where {K,K2, V, ADKV<:AbstractDict{K, V}}
readlock(tsd.lock)
g = get!(tsd.dict, key, default)
readunlock(tsd.lock)
return g
end
#function Base.haskey(tsd::ThreadSafeDict{K, V, ADKV}, key::K2) where {K,K2, V, ADKV<:AbstractDict{K, V}}
# readlock(tsd.lock)
# g = haskey(tsd.dict, key)
# readunlock(tsd.lock)
# return g
#end
function Base.push!(tsd::ThreadSafeDict{K, V, ADKV}, pairs::Pair{K, V}...) where {K, V, ADKV<:AbstractDict{K, V}}
writelock(tsd.lock)
push!(tsd.dict,pairs)
writeunlock(tsd.lock)
return tsd
end
#=
# Example usage of the ThreadSafeDict
function example_usage()
tsd = ThreadSafeDict(Dict{Int, String}())
# Function to perform concurrent operations on the dictionary
function thread_work(id)
for i in 1:10
tsd[i] = "Value $i from thread $id"
println("Thread $id set key $i")
sleep(0.01)
end
for i in 1:10
val = tsd[i]
println("Thread $id got key $i with value $val")
sleep(0.01)
end
end
# Launch nthreads() threads to run the thread_work function
tasks = []
for id in 1:nthreads()
push!(tasks, Threads.@spawn thread_work(id))
end
# Wait for all threads to complete
for t in tasks
wait(t)
end
println("Final state of the dictionary: ", tsd.dict)
end
# Run the example usage
example_usage()
=#
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 27244 |
#const FNV_prime = UInt64(0x100000001b3)
@inline FNV_OFFSET_BASIS(::Type{UInt64}) = UInt64(14695981039346656037)
@inline FNV_PRIME(::Type{UInt64}) = UInt64(1099511628211)
@inline FNV_OFFSET_BASIS(::Type{UInt128}) = UInt128(144066263297769815596495629667062367629)
@inline FNV_PRIME(::Type{UInt128}) = UInt128(309485009821345068724781371)
# FNV-1a hash function
function fnv1a_hash(vec::VI,::Type{T},bias=0) where {T,VI<:AbstractVector{Int}}
hash = FNV_OFFSET_BASIS(T)
for value in vec
# XOR the bottom with the current octet
hash = xor(hash, UInt64(value))
# Multiply by the 64-bit FNV magic prime mod 2^64
hash *= FNV_PRIME(T) #& 0xFFFFFFFFFFFFFFFF
end
if bias!=0
hash = xor(hash, UInt64(bias))
hash *= FNV_PRIME(T) #& 0xFFFFFFFFFFFFFFFF
end
hash==0 && (return 1)
while (hash & 1) == 0
hash >>= 1
end
return hash
end
#=
@inline doublehash(vec::VI,::Type{T}) where {T,VI<:AbstractVector{Int}} = fnv1a_hash(vec,UInt128), fnv1a_hash(vec,UInt64)
# Definition des HashedDict
struct HashedDict{VI<:AbstractVector{Int}, B, T<:Union{UInt64, UInt128}} <: AbstractDict{VI, B}
data::Dict{T, B}
function HashedDict{VI, B, T}() where {VI<:AbstractVector{Int}, B, T<:Union{UInt64, UInt128}}
new(Dict{T, B}())
end
end
# Funktion, um ein Element hinzuzufügen
@inline Base.setindex!(dict::HashedDict{VI, B, T}, value::B, key::VI2) where {VI, B, T, VI2} = dict.data[fnv1a_hash(key, T)] = value
# Funktion, um ein Element zu holen
@inline Base.get(dict::HashedDict{VI, B, T}, key::VI2, default=nothing) where {VI, B, T, VI2} = get(dict.data, fnv1a_hash(key, T), default)
# Funktion, um zu prüfen, ob ein Schlüssel existiert
@inline Base.haskey(dict::HashedDict{VI, B, T}, key::VI2) where {VI, B, T,VI2} = haskey(dict.data, fnv1a_hash(key, T))
# Funktion, um ein Element zu löschen
@inline Base.delete!(dict::HashedDict{VI, B, T}, key::VI2) where {VI, B, T, VI2} = delete!(dict.data, fnv1a_hash(key, T))
# Funktion, um alle Schlüssel zu bekommen (die Original-Schlüssel sind nicht rekonstruierbar)
@inline Base.keys(dict::HashedDict) = keys(dict.data)
# Funktion, um alle Werte zu bekommen
@inline Base.values(dict::HashedDict) = values(dict.data)
# Funktion, um die Länge des Dictionaries zu bekommen
@inline Base.length(dict::HashedDict) = length(dict.data)
@inline Base.sizehint!(dict::HashedDict,l::Int64) = sizehint!(dict.data,l)
=#
################################################################################################################
## DoubleDict
################################################################################################################
#=struct DoubleDict{A,B} <: AbstractDict{A,B}
d1::Dict{A,B}
d2::Dict{A,B}
l1::Int64
l2::Int64
mystate::MVector{1,Int64}
function DoubleDict(d1::Dict{A,B}, d2::Dict{A,B}) where {A,B}
new{A,B}(d1, d2, length(d1), length(d2), MVector{1,Int64}([0]))
end
end
function Base.iterate(dd::DoubleDict{A,B}, state...) where {A,B}
dd.mystate[1] += 1
if dd.mystate[1] <= dd.l1
return iterate(dd.d1, state...)
elseif dd.mystate[1] <= dd.l1 + dd.l2
if dd.mystate[1] == dd.l1 + 1
return iterate(dd.d2)
else
return iterate(dd.d2, state...)
end
else
dd.mystate[1] = 0
return ((Int64[],zeros(B)),-1)
end
end=#
################################################################################################################
## Search Algorithms
################################################################################################################
@inline function findfirstassured(key,vec)
for i in eachindex(vec)
@inbounds vec[i] == key && (return i)
end
return 0
end
@Base.propagate_inbounds function findfirstassured_sorted(key,vec)
left, right = 1, length(vec)
while left <= right
mid = left + (right - left) ÷ 2
if vec[mid] == key
return mid # Key found at position mid
elseif vec[mid] < key
left = mid + 1 # Search in the right half
else
right = mid - 1 # Search in the left half
end
end
return -1 # Key not found in the vector
end
function findfirstassured(key,vec,range)
for i in range
if vec[i] == key
return i
end
end
return 0
end
function transfer_values!(destination,origin,len,offset::Int=0)
for i in 1:len
destination[i] = origin[i+offset]
end
end
first_is_subset(sig,iter) = first_is_subset(sig,iter,typemax(eltype(sig)))
function first_is_subset(sig,iter,top)
k=1
i=1
len=length(sig)
while sig[len]>top
len-=1
end
len_k=length(iter)
while i<=len
while k<=len_k && sig[i]>iter[k]
k+=1
end
if k>len_k || iter[k]>sig[i]
break
end
i+=1
end
return i>len
end
################################################################################################################
## MapIterator
################################################################################################################
struct MapIterator{C, F}
collection::C
func::F
end
# Iterator interface
function Base.iterate(m::MI, state...) where {MI<:MapIterator{C, F} where {C,F}}
modify(::Nothing) = nothing
modify(data) = m.func(data[1]), data[2]
return modify(iterate(m.collection, state...))
end
################################################################################################################
## ReadOnlyView
################################################################################################################
#=struct ReadOnlyView{T,N,SA<:AbstractArray{T,N}} <: AbstractArray{T,N}
data::SA
end
@inline Base.size(v::ReadOnlyView) = size(v.data)
@inline Base.getindex(v::ReadOnlyView, inds...) = getindex(v.data, inds...)
=#
@inline ReadOnlyView(a) = a
################################################################################################################
## StaticBool
################################################################################################################
struct StaticBool{S}
function StaticBool{S}() where {S}
new{S::Bool}()
end
end
Base.@pure StaticBool(S::Bool) = StaticBool{S}()
Base.@pure StaticBool(S::StaticBool{true}) = StaticBool{true}()
Base.@pure StaticBool(S::StaticBool{false}) = StaticBool{false}()
const StaticTrue = StaticBool{true}
const StaticFalse = StaticBool{false}
const statictrue = StaticTrue()
const staticfalse = StaticFalse()
#Base.@pure StaticBool(S) = StaticBool{false}()
@inline Base.:(==)(x::StaticBool{true}, y::Bool) = y == true
@inline Base.:(==)(x::StaticBool{false}, y::Bool) = y == false
@inline @generated Base.:(==)(y::Bool, x::SB ) where SB<:StaticBool = :(x==y)
@inline Base.:(==)(x::StaticBool{false}, y::StaticBool{false}) = true
@inline Base.:(==)(x::StaticBool{false}, y::StaticBool{true}) = false
@inline Base.:(==)(x::StaticBool{true}, y::StaticBool{false}) = false
@inline Base.:(==)(x::StaticBool{true}, y::StaticBool{true}) = true
@inline Base.:(!)(x::StaticBool{true}) = staticfalse
@inline Base.:(!)(x::StaticBool{false}) = statictrue
@inline Base.Bool(x::StaticBool{A}) where A = A
################################################################################################################
## CompoundData
################################################################################################################
struct CompoundData
_start::Int64
_length::Int64
mutables::MVector{2,Int64}
function CompoundData(start::Int64, _start::Int64, length::Int64, _length::Int64)
m = MVector{2,Int64}([start, length])
return new(_start, _length, m)
end
end
@inline Base.getproperty(cd::CompoundData, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::CompoundData, ::Val{:start}) = :(getfield(cd,:mutables)[1])
@inline @generated dyncast_get(cd::CompoundData, ::Val{:length}) = :(getfield(cd,:mutables)[2])
@inline @generated dyncast_get(cd::CompoundData, d::Val{S}) where S = :( getfield(cd, S))
@inline Base.setproperty!(cd::CompoundData, prop::Symbol, val) = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::CompoundData, ::Val{:start},val) = :(getfield(cd,:mutables)[1]=val)
@inline @generated dyncast_set(cd::CompoundData, ::Val{:length},val) = :(getfield(cd,:mutables)[2]=val)
@inline @generated dyncast_set(cd::CompoundData, d::Val{S},val) where S = :( setfield(cd, S,val))
################################################################################################################
## CompoundVector
################################################################################################################
struct CompoundVector{P, T <: Union{AbstractVector{P}, Nothing}} <: AbstractVector{P}
data::T
start::Int64
length::Int64
end
################################################################################################################
## SerialVector
################################################################################################################
struct SerialVector{P , T } <: AbstractVector{P}
vectors::T
end
const SerialVector_Vector{P} = SerialVector{P,Vector{CompoundVector{P,Vector{P}}}} where {P}
@inline Base.size(sv::SerialVector) = (sum(d->d.length,sv.vectors))
# Constructor for SerialVector with a single CompoundVector
function SerialVector{P}(d::DD, c::CompoundData) where {P,DD<:Union{AbstractVector{P}, Nothing}}
cv = CompoundVector{P, typeof(d)}(d, c._start, c._length)
SerialVector{P,typeof((cv,))}((cv,))
end
@inline function copy(sv::SV) where {P,T,SV<:SerialVector{P,T}}
return SerialVector{P,T}(map(v->CompoundVector(deepcopy(v.data),v.start,v.length),sv.vectors))
end
# Constructor for SerialVector with an additional SerialVector
function SerialVector{P}(m::SerialVector{P}, d::Union{AbstractVector{P}, Nothing}, c::CompoundData) where P
cv = CompoundVector{P, typeof(d)}(d, c._start, c._length)
SerialVector{P,typeof((m.vectors..., cv))}((m.vectors..., cv))
end
@inline SerialVector(m::SerialVector{P}, d::Union{AbstractVector{P}, Nothing}, c::CompoundData) where P = SerialVector{P}(m, d, c)
function SerialVector_Vector{P}(d::Vector{P},c::CompoundData) where P
cv = CompoundVector{P, Vector{P}}(d, c._start, c._length)
SerialVector_Vector{P}([cv])
end
@inline SerialVector_Vector(d::Vector{P},c::CompoundData) where P = SerialVector_Vector{P}(d,c)
@inline function append!(V::SV,d::Vector{P},c::CompoundData) where {P, SV<:SerialVector_Vector{P}}
push!(V.vectors,CompoundVector{P, Vector{P}}(d, c._start, c._length))
# println("call this!")
return V
end
@inline append(V::SV,d::Vec,c::CompoundData) where {P, Vec, SV<:SerialVector{P}} = SerialVector(V,d,c)
@inline append(V::SV,d::Vector{P},c::CompoundData) where {P, SV<:SerialVector_Vector{P}} = append!(V,d,c)
@inline function Base.isassigned(sv::SerialVector{P}, index::Int) where P
for vec in sv.vectors
if vec.start <= index < vec.start + vec.length
return isassigned(vec.data,index - vec.start + 1)
end
end
return false
end
# getindex for SerialVector
@inline function Base.getindex(sv::SerialVector{P}, index::Int) where P
for vec in sv.vectors
if vec.start <= index < vec.start + vec.length
return vec.data[index - vec.start + 1]
end
end
throw(BoundsError(sv, index))
end
# setindex! for SerialVector
@inline function Base.setindex!(sv::SerialVector{P}, value, index::Int) where P
for vec in sv.vectors
if vec.start <= index < vec.start + vec.length
vec.data[index - vec.start + 1] = value
return value
end
end
error("Index out of bounds")
end
################################################################################################################
## MeshViewVector
################################################################################################################
struct MeshViewVector{T, AV <: AbstractVector{T}, M} <: AbstractVector{T}
data::AV
mesh::M
end
const MeshViewOnVector = MeshViewVector
# Forward getindex to the internal index determined by mesh
@inline function Base.getindex(v::MeshViewVector{T, AV, M}, index) where {T, AV <: AbstractVector{T}, M}
internal_idx = internal_index(v.mesh, index)
return getindex(v.data, internal_idx)
end
@inline function Base.isassigned(v::MeshViewVector{T, AV, M}, index::Integer) where {T, AV <: AbstractVector{T}, M}
internal_idx = internal_index(v.mesh, index)
return isassigned(v.data, internal_idx)
end
# Forward setindex! to the internal index determined by mesh
@inline function Base.setindex!(v::MeshViewVector{T, AV, M}, value, index) where {T, AV <: AbstractVector{T}, M}
internal_idx = internal_index(v.mesh, index)
setindex!(v.data, value, internal_idx)
end
@inline Base.size(mvv::MeshViewVector) = (length(mvv.mesh),)
################################################################################################################
## IntMeshViewOnVector
################################################################################################################
struct IntMeshViewOnVector{MV<:MeshViewVector{Int64}} <: AbstractVector{Int64}
data::MV
function IntMeshViewOnVector(data::AV,mesh::M) where {T, AV <: AbstractVector{T}, M}
mydata = MeshViewVector(data,mesh)
return new{typeof(mydata)}(mydata)
end
end
@inline function Base.getindex(v::IMV, index) where {IMV<:IntMeshViewOnVector}
return external_index(v.data.mesh,getindex(v.data, index))
end
@inline Base.size(imvv::IntMeshViewOnVector) = size(imvv.data)
################################################################################################################
## HVViewVector
################################################################################################################
# Poorman's version of a Julia view
struct HVViewVector{P, D<:AbstractVector{P}} <: AbstractVector{P}
start::Int64
length::Int64
data::D
function HVViewVector{P, D}(d::D, s::Int64, e::Int64) where {P, D<:AbstractVector{P}}
e2 = min(e,length(d))
s2 = s>e2 ? e2+1 : s
new{P, D}(s2-1, e2-s2+1, d)
end
end
HVViewVector(d::AbstractVector{P}, s::Int64, e::Int64) where P = HVViewVector{P, typeof(d)}(d, s, e)
@inline function Base.setindex!(v::HVViewVector, val, i::Int)
v.data[i + v.start] = val
end
@inline function Base.getindex(v::HVViewVector, i::Int)
v.data[i + v.start]
end
@inline Base.size(v::HVViewVector) = (v.length,)
function Base.show(io::IO, v::HVViewVector{P}) where P
# Extract the elements from v[1] to v[length] as a vector
print(io,"[")
for i in 1:v.length
print(io,v[i], i<v.length ? "," : "]")
end
end
################################################################################################################
## ShuffleViewVector
################################################################################################################
using Base: getindex, setindex!, size, eltype, iterate
struct ShuffleViewVector{T, IV <: AbstractVector{Int64}, DV <: AbstractVector{T}} <: AbstractVector{T}
index::IV
data::DV
ShuffleViewVector(i::AbstractVector{Int64}, d::AbstractVector{T}) where T = new{T, typeof(i), typeof(d)}(i, d)
end
# Constructor
@inline Base.getindex(sv::ShuffleViewVector, i::Int) = sv.data[sv.index[i]]
@inline Base.isassigned(sv::ShuffleViewVector, i::Int) = isassigned(sv.data,sv.index[i])
@inline Base.setindex!(sv::ShuffleViewVector, value, i::Int) = sv.data[sv.index[i]] = value
@inline Base.size(sv::ShuffleViewVector) = size(sv.data)
@inline Base.eltype(::ShuffleViewVector{T}) where {T} = T
@inline function Base.iterate(sv::ShuffleViewVector, state=1)
state>length(sv.data) && return nothing
return sv.data[sv.index[state]], state+1
end
################################################################################################################
## ShortVector
################################################################################################################
mutable struct ShortVector{T} <: AbstractVector{T}
data::T
end
ShortVector{T}() where {T<:Real} = ShortVector{T}(0)
ShortVector{T}() where {T} = ShortVector{T}(T())
Base.length(::ShortVector{T}) where {T} = 1
Base.size(::ShortVector{T}) where {T} = (1,)
Base.getindex(v::ShortVector{T}, i::Int) where {T} = (i == 1) ? v.data : throw(BoundsError(v, i))
Base.setindex!(v::ShortVector{T}, value::T, i::Int) where {T} = (i == 1) ? (v.data = value) : throw(BoundsError(v, i))
Base.iterate(v::ShortVector{T}, state=1) where {T} = state == 1 ? (v.data, 2) : nothing
Base.show(io::IO, v::ShortVector{T}) where {T} = print(io, "ShortVector(", v.data, ")")
################################################################################################################
## CombinedSortedVector
################################################################################################################
struct CombinedSortedVector{T, V1<:AbstractVector{T}, V2<:AbstractVector{T}} <: AbstractVector{T}
first::V1
second::V2
end
# Implement the length function
function Base.length(v::CombinedSortedVector)
return length(v.first) + length(v.second)
end
# Implement the size function
function Base.size(v::CombinedSortedVector)
return (length(v),)
end
# Implement the getindex function
function Base.getindex(v::CombinedSortedVector, i::Int)
if i <= length(v.first)
return v.first[i]
else
return v.second[i - length(v.first)]
end
end
# Implement the "in" function for optimized search
function Base.:(in)(element, v::CombinedSortedVector)
i1 = searchsortedfirst(v.first, element)
if i1 <= length(v.first) && v.first[i1] == element
return true
end
i2 = searchsortedfirst(v.second, element)
if i2 <= length(v.second) && v.second[i2] == element
return true
end
return false
end
# Optional: Implement the iterate function to allow iteration over the combined vector
function Base.iterate(v::CombinedSortedVector, state=1)
if state <= length(v)
return (v[state], state + 1)
else
return nothing
end
end
################################################################################################################
## FUN with TUPLES
################################################################################################################
@generated function remove_first_entry(t::Tuple)
if length(t.parameters) >= 2
new_tuple_type = Tuple{ t.parameters[2:end]...}
return :(
begin
($(Expr(:tuple, (:(t[$i]) for i in 2:length(t.parameters))...))::$(new_tuple_type))
end
)
end
return :(nothing)
end
@generated function cut_off_first(t::Tuple, ::Type{A}, command::Function) where {A}
if length(t.parameters) >= 2 && t.parameters[1]<:A
k = 2
while k <= length(t.parameters) && t.parameters[k]<:A
k += 1
end
if k > 2
new_tuple_type = Tuple{ t.parameters[k:end]...}
return :(
begin
new_head = command(t,$(k-1))
(new_head,), ($(Expr(:tuple, (:(t[$i]) for i in k:length(t.parameters))...))::$(new_tuple_type))
end
)
end
end
return :(tuple(), t)
end
@generated function cut_off_last(t::Tuple, ::Type{A}, command::Function, delta_max::Size{s} = Size(2)) where {A,s}
if length(t.parameters) >= 2 && t.parameters[end]<:A
k = length(t.parameters) - 1
while k >= 1 && t.parameters[k]<:A && k > length(t.parameters) - s[1]
k -= 1
end
if k < length(t.parameters) - 1
new_tuple_type = Tuple{t.parameters[1:k]...}
return :(
begin
println(s[1]," ", $k)
new_tail = command(t, $(k+1))
($(Expr(:tuple, (:(t[$i]) for i in 1:k)...))::$(new_tuple_type)), (new_tail,)
end
)
end
end
return :(t, tuple())
end
@inline function group_last(t::Tuple, ::Type{A}, command::Function, delta_max::Size{s} = Size(2)) where {A,s}
t1, t2 = cut_off_last(t,A,command,delta_max)
return (t1...,t2...)
end
#= # Following code for inspiration
@generated function transform_tuple2(t::Tuple, ::Type{A}, command::Function) where {A}
if length(t.parameters) >= 2 && t.parameters[1]<:A
k = 2
while k <= length(t.parameters) && t.parameters[k]<:A
k += 1
end
if k > 2
#new_tuple_type = Tuple{???, t.parameters[k:end]...}
return :(
begin
new_head = command(t,$(k-1))#B(undef, $(k - 1))
#for i in 1:$(k - 1)
# new_head[i] = t[i]
#end
(new_head,t[$k:end]...)
#($(Expr(:tuple, :new_head, (:(t[$i]) for i in k:length(t.parameters))...))::$(new_tuple_type))
end
)
end
end
return :(t)
end
=#
@generated function split_tuple_at_A_sequence(t::Tuple, ::Type{A}) where {A}
k = 1
while k < length(t.parameters) && !(t.parameters[k]<:A && t.parameters[k+1]<:A)
k += 1
end
if k < length(t.parameters)
return :(
(t[1:$(k - 1)], t[$k:end])
)
else
return :(
(t[1:end], tuple())
)
end
end
#= The following are examples for what one could look at when grouping
function mycollect(t,i)
ret = Vector{typeof(t[1])}(undef,i)
for k in 1:i
ret[k] = t[k]
end
return ret
end
function mycollectlast(t,i)
ret = Vector{typeof(t[1])}(undef,length(t)-i+1)
for k in i:length(t)
ret[k-i+1] = t[k]
end
return ret
end
=#
#=fulltransform_sequences(t::Tuple{}) = t
function fulltransform_sequences(t::Tuple,command::Function)
t1,t2 = split_tuple_at_A_sequence(t, Int64)
tv, t3 = cut_off_first(t2, Int, (t,i)->command(t,i))
t4 = fulltransform_sequences(t3,command)
return (t1...,tv...,t4...,)
end
=#
################################################################################################################
## FUN with StaticArrays
################################################################################################################
@StaticArrays.propagate_inbounds _mydeleteat(vec::StaticVector, index,proto) = __mydeleteat(Size(vec), vec, index,proto)
@StaticArrays.generated function __mydeleteat(::Size{s}, vec::StaticVector, index, proto) where {s}
newlen = s[1] - 1
exprs = [:(ifelse($i < index, vec[$i], vec[$i+1])) for i = 1:newlen]
return quote
@StaticArrays._propagate_inbounds_meta
@StaticArrays.boundscheck if (index < 1 || index > $(s[1]))
throw(BoundsError(vec, index))
end
@StaticArrays.inbounds return similar_type(proto, Size($newlen))(tuple($(exprs...)))
end
end
@StaticArrays.propagate_inbounds mystaticview(vec, indexset,proto::StaticVector) = _mystaticview(Size(proto), vec, indexset,proto)
@StaticArrays.generated function _mystaticview(::Size{s}, vec, index, proto::StaticVector) where {s}
newlen = s[1]
exprs = [:(vec[index[$i]] ) for i = 1:newlen]
return quote
@StaticArrays._propagate_inbounds_meta
@StaticArrays.boundscheck if (length(index)!=s[1])
throw(BoundsError(index, s[1]))
end
@StaticArrays.inbounds return similar_type(proto, Size($newlen))(tuple($(exprs...)))
end
end
@StaticArrays.propagate_inbounds mystaticversion(vec, proto::StaticVector) = _mystaticversion(Size(proto), vec, proto)
@StaticArrays.generated function _mystaticversion(::Size{s}, vec, proto::StaticVector) where {s}
newlen = s[1]
exprs = [:(vec[$i] ) for i = 1:newlen]
return quote
@StaticArrays._propagate_inbounds_meta
@StaticArrays.boundscheck if (length(vec)!=s[1])
throw(BoundsError(index, s[1]))
end
@StaticArrays.inbounds return similar_type(proto, Size($newlen))(tuple($(exprs...)))
end
end
@StaticArrays.propagate_inbounds convert_SVector(vec::StaticVector) = _convert_S(Size(vec), vec)
@StaticArrays.generated function _convert_S(::Size{s}, vec) where {s}
newlen = s[1]
exprs = [:(vec[$i] ) for i = 1:newlen]
return quote
@StaticArrays._propagate_inbounds_meta
@StaticArrays.boundscheck if (s[1]==0)
throw(BoundsError(1, s[1]))
end
@StaticArrays.inbounds return SVector{s[1],eltype(vec)}(tuple($(exprs...)))
end
end
#[email protected]_inbounds intersects_cuboid_ball(c::StaticVector, mins::StaticVector, maxs::StaticVector, r_squared::Float64) = _intersects_cuboid_ball(Size(c),c, mins, maxs, r_squared)
@StaticArrays.generated function _intersects_cuboid_ball(::Size{s},c, mins, maxs, r_squared) where {s}
return quote
dists_squared = 0.0
δ = 0.0
@StaticArrays._propagate_inbounds_meta
@StaticArrays.boundscheck if (s[1]==0)
throw(BoundsError(1, s[1]))
end
for i in 1:s[1]
if c[i] < mins[i]
@StaticArrays.inbounds δ = mins[i] - c[i]
elseif c[i] > maxs[i]
@StaticArrays.inbounds δ = c[i] - maxs[i]
else
δ = 0.0
end
dists_squared += δ^2
end
return dists_squared <= r_squared
end
end
=#
u_default(a,b,c) = u_qr(a,b,c)
# From VoronoiGraph.jl
function u_qr(sig, xs::HN, i) where {P, HN<:AbstractVector{P}}
n = length(sig)
dimension = size(P)[1]
X = MMatrix{dimension, dimension, Float64}(undef)
for j in 1:i-1
X[:, j] = xs[sig[j]]
end
for j in i:n-1
X[:, j] = xs[sig[j+1]]
end
origin = X[:, end]
X[:, end] = xs[sig[i]]
X .-= origin
X = SMatrix(X)
Q, R = qr(X)
u = -Q[:,end] * sign(R[end,end])
return eltype(xs)(u)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 8760 |
##############################################################################################################################################################
##############################################################################################################################################################
## DatabaseIndexIterator
##############################################################################################################################################################
##############################################################################################################################################################
struct DatabaseIndexIteratorData{IV<:AbstractVector{Int64},D}
indices::IV
database::D
return_all::Bool
current_index::Int64
DatabaseIndexIteratorData(i::II,d::DD,return_all::Bool) where {II,DD} = new{II,DD}(i,d,return_all,0)
end
mutable struct DatabaseIndexIterator{IV<:AbstractVector{Int64},D,R}
indices::IV
database::D
return_all::Bool
current_index::Int64
lock::R
DatabaseIndexIterator(data::DatabaseIndexIteratorData{II,DD},lock::R) where {II,DD,R} = new{II,DD,R}(data.indices,data.database,data.return_all,data.current_index,lock)
end
@inline function VertexIterator(m::AM,i::II,index::Int64, s::S=statictrue) where {T,AM<:AbstractMesh{T},II<:DatabaseIndexIteratorData,S<:StaticBool}
return VertexIterator(m,DatabaseIndexIterator(i,ReadWriteLock(m)),index,s)
end
# Define the iterate function
function Base.iterate(iter::DII, state=1) where {DII<:DatabaseIndexIterator}
len = length(iter.indices)
while state <= len
readlock(iter.lock)
index = iter.indices[state]
readunlock(iter.lock)
#if index==0
# open("protokol.txt","a") do f
# write(f,"index is 0: $state, $(length(iter.indices)) \n $(iter.indices)")
# end
#end
# Check if we should return this entry
if iter.return_all || index < 0
readlock(iter.lock)
entry = get_entry(iter.database, abs(index))
readunlock(iter.lock)
if length(entry[1])==0
state += 1
continue
end
iter.current_index = abs(index)
return (entry, state + 1)
end
# Move to the next state if we skip this index
state += 1
end
return nothing # End of iteration
end
@inline IterationIndex(dii::DII) where {DII<:DatabaseIndexIterator} = IterationIndex(dii.current_index)
# Define the length of the iterator
@inline Base.length(iter::DII) where {DII<:DatabaseIndexIterator} = length(iter.indices)
# Define the iterator traits for better integration
Base.IteratorSize(::Type{DII}) where {DII<:DatabaseIndexIterator} = Base.HasLength()
Base.IteratorEltype(::Type{DII}) where {DII<:DatabaseIndexIterator} = Base.HasEltype()
# Define the element type of the iterator
Base.eltype(::Type{DatabaseIndexIterator{IV,D}}) where {IV,D} = SigmaView
##############################################################################################################################################################
##############################################################################################################################################################
## VDBVertexCentral
##############################################################################################################################################################
##############################################################################################################################################################
struct VDBVertexCentral{P<:Point,VI<:AbstractVector{Int64},D} <: VertexDBCentral{P}
indices::Vector{VI}
database::D
_offset::MVector{1,Int64}
data::Vector{MVector{2,Int64}}
end
function VDBVertexCentral(xs::HVNodes{P},database::D) where {P<:Point,D}
ind = [indexvector(database) for _ in 1:length(xs)]
d = [zeros(MVector{2,Int64}) for _ in 1:length(xs)]
return VDBVertexCentral{P,VectorType(D),D}(ind,database,MVector{1,Int64}([0]),d)
end
function VDBVertexCentral(vdb::VDBVertexCentral{P,VI,D}) where {P,VI,D}
ind = deepcopy(vdb.indices)
d = [copy(vdb.data[i]) for i in 1:length(vdb.data)]
return VDBVertexCentral{P,VI,D}(ind,vdb.database,copy(vdb._offset),d)
end
@inline copy(vdb::VDB;kwargs...) where VDB<:VDBVertexCentral = VDBVertexCentral(vdb;kwargs...)
@inline Base.getproperty(cd::VDB, prop::Symbol) where {VDB<:VDBVertexCentral} = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::VDB, ::Val{:offset}) where {VDB<:VDBVertexCentral} = :(getfield(cd,:_offset)[1])
@inline @generated dyncast_get(cd::VDB, d::Val{S}) where {VDB<:VDBVertexCentral,S} = :( getfield(cd, S))
@inline Base.setproperty!(cd::VDB, prop::Symbol, val) where {VDB<:VDBVertexCentral} = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::VDB, ::Val{:offset},val) where {VDB<:VDBVertexCentral} = :(getfield(cd,:_offset)[1]=val)
@inline set_offset(vdb::VDB,i) where {VDB<:VDBVertexCentral} = (vdb.offset = i)
@inline vertices_iterator(m::VDB, i::Int64,::StaticTrue) where {VDB<:VDBVertexCentral} = DatabaseIndexIteratorData(m.indices[i],m.database,true) #BufferVertexData(VDBVR_vertices_iterator(m,i),VDBVR_references_iterator(m,i))
@inline all_vertices_iterator(m::VDB, i::Int64,::StaticTrue) where {VDB<:VDBVertexCentral} = DatabaseIndexIterator(DatabaseIndexIteratorData(m.indices[i],m.database,false),nothing) #BufferVertexData(VDBVR_vertices_iterator(m,i),VDBVR_references_iterator(m,i))
@inline number_of_vertices(vdb::VDB, i::Int64,::StaticTrue) where {VDB<:VDBVertexCentral} = vdb.data[i][2]#length(m.indices[i])
@inline function Base.push!(vdb::VDB, p::Pair{Vector{Int64},T},i) where {T<:Point,VDB<:VDBVertexCentral{T} }
vdb.data[i][1] += 1 # this is the actual final index
vdb.data[i][2] += 1
ref = push!(vdb.database,p) #VertexRef(i+vdb.offset,vdb.data[i][1]) # get reference for other lists
if ref==0
open("protokol.txt","a") do f
write(f,"$ref from $p\n")
end
end
push!(vdb.indices[i],-ref) # push index of new vertex to this index list as member of "all_vertices" i.e. negative sign
return ref # return reference
end
@inline Base.haskey(vdb::VDB,sig::AVI,i::Int) where {VDB<:VDBVertexCentral, AVI<:AbstractVector{Int64}} = haskey(vdb.database,sig)
@inline function push_ref!(vdb::VDB, ref,i) where {VDB<:VDBVertexCentral}
vdb.data[i][1] += 1
vdb.data[i][2] += 1
if ref==0
open("protokol.txt","a") do f
write(f,"pushing 0 \n")
end
end
push!(vdb.indices[i],ref) # push index of new vertex to this index list as member of "vertices", i.e. positive sign
end
@inline function cleanupfilter!(vdb::VDB,i) where {VDB<:VDBVertexCentral}
end
@inline function mark_delete_vertex!(vdb::VDB,sig,i,ind) where {VDB<:VDBVertexCentral}
index = ind.index
vdb.data[i][2] -= 1
delete!(vdb.database,index,sig)
return index
end
@inline delete_reference(vdb::VDB,i,_) where {VDB<:VDBVertexCentral} = (vdb.data[i][2] -= 1)
##############################################################################################################################################################
##############################################################################################################################################################
## VDBVertexCentral_Store_1
##############################################################################################################################################################
##############################################################################################################################################################
struct VDBVertexCentral_Store_1{P<:Point,VI<:AbstractVector{Int64},D} <: VertexDBCentral{P}
indices::Vector{VI}
database::D
_offset::MVector{1,Int64}
data::Vector{MVector{2,Int64}}
end
JLD2.writeas(::Type{VDBVertexCentral{P,VI,D}}) where {P,VI,D} = VDBVertexCentral_Store_1{P,VI,D}
JLD2.wconvert(::Type{VDBVertexCentral_Store_1{P,VI,D}},m::VDBVertexCentral{P,VI,D}) where {P,VI,D} =
VDBVertexCentral_Store_1{P,VI,D}(m.indices,m.database,m._offset,m.data)
function JLD2.rconvert(::Type{VDBVertexCentral{P,VI,D}},m::VDBVertexCentral_Store_1{P,VI,D}) where {P,VI,D}
VDBVertexCentral{P,VI,D}(m.indices,m.database,m._offset,m.data)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 5040 | struct VDBExplicitHeap{P<:Point,RT} <: VertexDBExplicit{P}
All_Vertices::Vector{Dict{Vector{Int64},P}}
Buffer_Vertices::Vector{Dict{Vector{Int64},P}}
references::RT # stores at position `external_index` the `internal_index`
end
function VDBExplicitHeap(xs::HVNodes{P},refs) where P
vert=Dict{Vector{Int64},eltype(xs)}()
vertlist1=Vector{typeof(vert)}(undef,length(xs))
vertlist2=Vector{typeof(vert)}(undef,length(xs))
for i in 1:length(xs)
vertlist1[i]=Dict{Vector{Int64},P}()
vertlist2[i]=Dict{Vector{Int64},P}()
end
getmyrefs(r::Vector{Int}) = copy(r)
getmyrefs(r) = nothing
return VDBExplicitHeap{P,typeof(refs)}(vertlist1,vertlist2,getmyrefs(refs))
end
function VDBExplicitHeap(vdb::VDBExplicitHeap{P},refs::RT) where {P,RT}
lxs = length(vdb.All_Vertices)
vert=Dict{Vector{Int64},P}()
vertlist1=Vector{typeof(vert)}(undef,lxs)
vertlist2=Vector{typeof(vert)}(undef,lxs)
for i in 1:lxs
vertlist1[i]=Dict{Vector{Int64},P}()
vertlist2[i]=Dict{Vector{Int64},P}()
end
getmyrefs(r::Vector{Int}) = copy(r)
getmyrefs(r) = nothing
for i in 1:lxs
for (sig,r) in vdb.All_Vertices[i]
sig2 = copy(sig)
push!(vertlist1[i],sig2=>r)
for k in 2:length(sig)
sig[k]>lxs && break
push!(vertlist2[sig[k]],sig2=>r)
end
end
end
return VDBExplicitHeap{P,typeof(refs)}(vertlist1,vertlist2,getmyrefs(refs))
end
struct VDBExplicitHeapStorage_1{P<:Point,RT}
All_Vertices::Vector{Dict{Vector{Int64},P}}
references::RT # stores at position `external_index` the `internal_index`
end
JLD2.writeas(::Type{VDBExplicitHeap{P,RT}}) where {P<:Point,RT} = VDBExplicitHeapStorage_1{P,RT}
JLD2.wconvert(::Type{VDBExplicitHeapStorage_1{P,RT}},vdb::VDBExplicitHeap{P,RT}) where {P<:Point,RT} = VDBExplicitHeapStorage_1{P,RT}(vdb.All_Vertices,vdb.references)
function JLD2.rconvert(::Type{VDBExplicitHeap{P,RT}},data::VDBExplicitHeapStorage_1{P,RT}) where {P<:Point,RT}
bv = [Dict{Vector{Int64},P}() for _ in 1:length(data.All_Vertices)]
re = VDBExplicitHeap{P,RT}(data.All_Vertices,bv,data.references)
new_Buffer_vertices!(re)
return re
end
@inline dimension(::VDBExplicitHeap{P}) where P = size(P)[1]
#@inline vertices_iterator(m::VDB, i::Int64,static::StaticTrue) where {T,VDB<:VDBExplicitHeap{T}}= MyFlatten(m.All_Vertices[i],m.Buffer_Vertices[i],T)
@inline vertices_iterator(m::VDBExplicitHeap, i::Int64,static::StaticTrue) = Iterators.Flatten((m.All_Vertices[i],m.Buffer_Vertices[i]))
#@inline vertices_iterator(m::VDBExplicitHeap, i::Int64,static::StaticTrue) = DoubleDict(m.All_Vertices[i],m.Buffer_Vertices[i])
@inline all_vertices_iterator(m::VDBExplicitHeap, i::Int64, static::StaticTrue) = m.All_Vertices[i]
@inline number_of_vertices(m::VDBExplicitHeap, i::Int64,static::StaticTrue) = length(m.All_Vertices[i]) + length(m.Buffer_Vertices[i])
@inline set_offset(vdb::VDBExplicitHeap,i) = nothing
@inline haskey(mesh::VDBExplicitHeap,sig::AbstractVector{Int64},i::Int) = Base.haskey(mesh.All_Vertices[i],sig)
@inline push_ref!(vdb::VDBExplicitHeap{T}, p::Pair{Vector{Int64},T},i) where {T<:Point} = push!(vdb.Buffer_Vertices[i],p)
@inline function push!(vdb::VDBExplicitHeap{T}, p::Pair{Vector{Int64},T},i) where {T<:Point}
push!(vdb.All_Vertices[i],p)
return p
end
@inline function mark_delete_vertex!(vdb::VDB,sig,_,_) where {VDB<:VDBExplicitHeap}
empty!(sig)
return nothing
end
@inline function cleanupfilter!(vdb::VDBExplicitHeap,i) # assume i in internal representation
filter!( x->( length(x.first)!=0 ), vdb.All_Vertices[i] )
filter!( x->( length(x.first)!=0 ), vdb.Buffer_Vertices[i] )
end
@inline copy(vdb::VDB;kwargs...) where VDB<:VDBExplicitHeap = VDBExplicitHeap(vdb,vdb.references)
#=@inline internal_index(m::VDBExplicitHeap{P,RT},i::Int64) where {P<:Point,RT<:AbstractVector{Int}} = m.length_ref[1]!=0 ? m.references[i] : i
@inline external_index(m::VDBExplicitHeap{P,RT},i::Int64) where {P<:Point,RT<:AbstractVector{Int}} = m.length_ref[1]!=0 ? findfirstassured_sorted(i,m.references) : i
@inline internal_index(m::VDBExplicitHeap{P,Nothing},i::Int64) where {P<:Point} = i
@inline external_index(m::VDBExplicitHeap{P,Nothing},i::Int64) where {P<:Point} = i
=#
@doc raw"""
new_Buffer_verteces!(mesh::ClassicMesh)
calculate the list of Buffer_Verteces from already determined list All_Verteces using the distribute_verteces function
"""
function new_Buffer_vertices!(mesh::VDB) where VDB<:VDBExplicitHeap
lmesh = length(mesh.All_Vertices)
for i in 1:lmesh
for (sigma,r) in mesh.All_Vertices[i]
lsigma = length(sigma)
for k in 2:lsigma
Index=sigma[k]
if (Index<=lmesh) push!(mesh.Buffer_Vertices[Index],sigma=>r) end
end
end
end
return mesh
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 12359 | const ___VDBVertices{P} = Vector{Vector{Pair{Vector{Int64}, P}}} where P<:Point
const ___VDBIndex = Vector{Dict{Vector{Int64},Int64}}
#const ___VDBIndexCompact{T} = Vector{HashedDict{Vector{Int64},Int64,T}}
const ___VDBVertexRefs = Vector{Vector{VertexRef}}
const empty_sigma = Int64[]
struct VDBVertexRef{P<:Point,VT,II,R} <: VertexDBReference{P}
vertices::VT # ...[i] = (sig,r)
indices::II # [i] = sig=>index
refs::R # ...[i] = VertexRef(...)
filename::String
_offset::MVector{1,Int64}
data::Vector{MVector{4,Int64}}
end
function VDBVertexRef(xs::HVNodes{P},filename::String, vertices::StaticBool, indices::StaticBool, refs::StaticBool) where P<:Point
v = VDBVertexRef_Vertices(xs,vertices)
i = VDBVertexRef_Indices(xs,staticfalse)
r = VDBVertexRef_Refs(xs,refs)
d = [MVector{4,Int64}(zeros(Int64,4)) for _ in 1:length(xs)]
return VDBVertexRef{P,typeof(v),typeof(i),typeof(r)}(v,i,r,filename,MVector{1,Int64}([0]),d)
end
function VDBVertexRef(vdb::VDBVertexRef{P};filename=vdb.filename, kwargs...) where P
i = copy_indices(vdb;kwargs...)
v = copy_vertices(vdb,i;kwargs...)
r = copy_refs(vdb;kwargs...)
d = [copy(vdb.data[i]) for i in 1:length(vdb.data)]
return VDBVertexRef{P,typeof(v),typeof(i),typeof(r)}(v,i,r,filename,copy(vdb._offset),d)
end
@inline copy(vdb::VDB;kwargs...) where VDB<:VDBVertexRef = VDBVertexRef(vdb;kwargs...)
@inline VDBVertexRef_Vertices(xs::HVNodes{P},vertices::StaticTrue) where P<:Point = nothing
@inline VDBVertexRef_Vertices(xs::HVNodes{P},vertices::StaticFalse) where P<:Point = [Vector{Pair{Vector{Int64}, P}}() for _ in 1:length(xs)]
@inline VDBVertexRef_Indices(xs,indices::StaticFalse) = [Dict{Vector{Int64}, Int64}() for _ in 1:length(xs)]
#@inline VDBVertexRef_Indices(xs,indices::StaticTrue) = nothing
@inline VDBVertexRef_Refs(xs,refs::StaticTrue) = nothing
@inline VDBVertexRef_Refs(xs,refs::StaticFalse) = [Vector{VertexRef}() for _ in 1:length(xs)]
@inline Base.getproperty(cd::VDB, prop::Symbol) where {VDB<:VDBVertexRef} = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::VDB, ::Val{:offset}) where {VDB<:VDBVertexRef} = :(getfield(cd,:_offset)[1])
@inline @generated dyncast_get(cd::VDB, d::Val{S}) where {VDB<:VDBVertexRef,S} = :( getfield(cd, S))
@inline Base.setproperty!(cd::VDB, prop::Symbol, val) where {VDB<:VDBVertexRef} = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::VDB, ::Val{:offset},val) where {VDB<:VDBVertexRef} = :(getfield(cd,:_offset)[1]=val)
@inline set_offset(vdb::VDB,i) where {VDB<:VDBVertexRef} = (vdb.offset = i)
@inline vertices_iterator(m::VDB, i::Int64,::StaticTrue) where {VDB<:VDBVertexRef} = BufferVertexData(VDBVR_vertices_iterator(m,i),VDBVR_references_iterator(m,i))
@inline number_of_vertices(m::VDB, i::Int64,::StaticTrue) where {VDB<:VDBVertexRef} = number_own_vertices(m,i) + number_other_vertices(m,i)
@inline number_own_vertices(vdb::VDB,i::Int64) where VDB<:VDBVertexRef = vdb.data[i][2]
@inline number_other_vertices(vdb::VDB,i::Int64) where VDB<:VDBVertexRef = vdb.data[i][4]
@inline function push!(vdb::VDB, p::Pair{Vector{Int64},T},i) where {T<:Point,VDB<:VDBVertexRef{T} }
vdb.data[i][1] += 1
vdb.data[i][2] += 1
ref = VertexRef(i+vdb.offset,vdb.data[i][1]) # get reference for other lists
push_index!(vdb,p,i) # push sig=>vdb.data[i][1] to index list
push_data!(vdb,p,i) # actually push vertex
return ref # return reference
end
@inline function cleanupfilter!(vdb::VDB,i) where {VDB<:VDBVertexRef}
cleanupfilter_refs!(vdb,i)
cleanupfilter_vertices!(vdb,i)
cleanupfilter_indeces!(vdb,i)
end
####################################################################################################################################
const VDBVertexRefHeapVertices{P,II,R} = VDBVertexRef{P,___VDBVertices{P},II,R} where {P,II,R}
####################################################################################################################################
@inline VDBVR_vertices_iterator(m::VDBVertexRefHeapVertices,i) = view(m.vertices[i],1:m.data[i][1])
@inline all_vertices_iterator(m::VDBVertexRefHeapVertices, i::Int64, static::StaticTrue) = VertexDictIterator(m,m.vertices[i],m.data[i][1]) # view(m.vertices[i],1:m.data[i][1])
function push_data!(vdb::VDBVertexRefHeapVertices{T},p::Pair{Vector{Int64},T},i::Int64) where T
if length(vdb.vertices[i])<vdb.data[i][1]
new_l = length(vdb.vertices[i]) + 10
resize!(vdb.vertices[i],new_l)
end
vdb.vertices[i][vdb.data[i][1]] = p
end
@inline function delete_sigma(m::VDBVertexRefHeapVertices,i,index)
m.vertices[i][index] = empty_sigma => m.vertices[i][index].second
end
@inline get_vertex(vdb::VDB,p::VertexRef) where {VDB<:VDBVertexRefHeapVertices} = begin
ver = vdb.vertices[p.cell]
return ver[p.index]
end
@inline cleanupfilter_vertices!(vdb::VDBVertexRefHeapVertices,i) = nothing
function copy_vertices(vdb::VDBVertexRefHeapVertices{P},indices::Vector{Dict{Vector{Int64}, Int64}};kwargs...) where P
lvdb = length(vdb.vertices)
vertices = Vector{Vector{Pair{Vector{Int64}, P}}}(undef,lvdb)
origin = zeros(P)
for i in 1:lvdb
list = vdb.vertices[i]
vertices[i] = Vector{Pair{Vector{Int64}, P}}(undef,length(list))
for (sig,k) in indices[i]
vertices[i][k] = sig=>list[k][2]
end
for k in 1:vdb.data[i][1]
if !isassigned(vertices[i],k)
vertices[i][k] = empty_sigma => origin
end
end
end
return vertices
end
function copy_vertices(vdb::VDBVertexRefHeapVertices{P},indices;kwargs...) where P
lvdb = length(vdb.vertices)
vertices = Vector{Vector{Pair{Vector{Int64}, P}}}(undef,lvdb)
for i in 1:lvdb
list = vdb.vertices[i]
vertices[i] = [Pair(copy(list[k][1]),list[k][2]) for k in 1:vdb.data[i][1]]
end
return vertices
end
####################################################################################################################################
#const VDBVertexRefHeapIndices{P,VT,R} = VDBVertexRef{P,VT,T,R} where {P,VT,R,T}
# Everything around `indices`
####################################################################################################################################
@inline haskey(vdb::VDB,sig::AbstractVector{Int64},i::Int) where {VDB<:VDBVertexRef} = Base.haskey(vdb.indices[i],sig)
@inline push_index!(vdb::VDB,p::Pair{Vector{Int64},T},i::Int64) where {T<:Point,VDB<:VDBVertexRef{T}} = push!(vdb.indices[i],p[1]=>vdb.data[i][1])
@inline function mark_delete_vertex!(vdb::VDB,sig,i,_) where {VDB<:VDBVertexRef}
index = vdb.indices[i][sig]
vr = VertexRef(sig[1],index)
delete!(vdb.indices[i],sig)
delete_sigma(vdb,i,index)
vdb.data[i][2] -= 1
return vr
end
@inline cleanupfilter_indeces!(vdb::VDB,i) where {VDB<:VDBVertexRef}= nothing
function copy_indices(vdb::VDB;kwargs...) where {VDB<:VDBVertexRef}
li = length(vdb.indices)
indices = Vector{Dict{Vector{Int64}, Int64}}(undef,li)
for i in 1:li
indices[i] = Dict{Vector{Int64}, Int64}()
sizehint!(indices[i],length(vdb.indices[i]))
for (sig,k) in vdb.indices[i]
push!(indices[i],copy(sig)=>k)
end
end
return indices
end
####################################################################################################################################
const VDBVertexRefHeapRefs{P,VT,II} = VDBVertexRef{P,VT,II,___VDBVertexRefs} where {P,VT,II}
####################################################################################################################################
@inline VDBVR_references_iterator(m::VDBVertexRefHeapRefs,i) = view(m.refs[i],1:m.data[i][3])
function push_ref!(vdb::VDBVertexRefHeapRefs, p::VertexRef,i)
vdb.data[i][3] += 1
vdb.data[i][4] += 1
vdbri = vdb.refs[i]
vdbdi3 = vdb.data[i][3]
if length(vdbri)<vdbdi3
new_l = length(vdbri) + 100
resize!(vdbri,new_l)
end
vdbri[vdbdi3] = p
end
@inline function delete_reference(vdb::VDBVertexRefHeapRefs{T},s,ref) where {T<:Point}
for i in 1:vdb.data[s][3]
r = vdb.refs[s][i]
if r==ref
vdb.refs[s][i] = VertexRef(0,0)
vdb.data[s][4] -= 1
return
end
end
end
@inline function cleanupfilter_refs!(vdb::VDBVertexRefHeapRefs,i)
end
function copy_refs(vdb::VDBVertexRefHeapVertices{P};kwargs...) where P
lvdb = length(vdb.refs)
refs = Vector{Vector{VertexRef}}(undef,lvdb)
for i in 1:lvdb
list = vdb.refs[i]
refs[i] = [list[k] for k in 1:vdb.data[i][3]]
end
return refs
end
####################################################################################################################################
const VDBVertexRefStoreVertices{P,II,R} = VDBVertexRef{P,Nothing,II,R} where {P,II,R}
####################################################################################################################################
@inline function all_vertices_iterator(m::VDBVertexRefStoreVertices, i::Int64, static::StaticTrue)
println("here")
collect(values(m.indices))
end
function VDBVR_vertices_iterator(m::VDBVertexRefStoreVertices,i)
end
####################################################################################################################################
const VDBVertexRefStoreRefs{P,VT,II} = VDBVertexRef{P,VT,II,Nothing} where {P,VT,II}
####################################################################################################################################
function VDBVR_references_iterator(m::VDBVertexRefStoreRefs,i)
end
#=
@inline function mark_delete_vertex!(vdb::VDB,sig) where {VDB<:VDBVertexRef}
empty!(sig)
return nothing
end
@inline function cleanupfilter!(vdb::VDBVertexRef,i) # assume i in internal representation
filter!( x->( length(x.first)!=0 ), vdb.All_Vertices[i] )
filter!( x->( length(x.first)!=0 ), vdb.Buffer_Vertices[i] )
end
=#
struct VDBVertexRef_Store_1{P,VT,II,R}
vertices::VT # ...[i] = (sig,r)
indices::II # [i] = sig=>index
refs::R # ...[i] = VertexRef(...)
filename::String
_offset::MVector{1,Int64}
data::Vector{MVector{4,Int64}}
end
JLD2.writeas(::Type{VDBVertexRef{P,VT,II,R} }) where {P,VT,II,R} = VDBVertexRef_Store_1{P,VT,II,R}
JLD2.wconvert(::Type{VDBVertexRef_Store_1{P,VT,II,R} },m::VDBVertexRef{P,VT,II,R} ) where {P,VT,II,R} =
VDBVertexRef_Store_1{P,VT,II,R}(store_vertices(m.vertices),store_indices(m.indices),store_refs(m.refs),m.filename,m._offset, m.data)
function JLD2.rconvert(::Type{VDBVertexRef{P,VT,II,R} },m::VDBVertexRef_Store_1{P,VT,II,R}) where {P,VT,II,R}
verts = load_vertices(m.vertices)
inds = load_indices(m.indices,m.vertices)
refs = load_refs(m.refs)
VDBVertexRef{P,VT,II,R}(verts,inds,refs,m.filename,m._offset, m.data)
end
@inline store_vertices(v::V) where V = v
@inline load_vertices(v) = v
@inline store_indices(i::___VDBIndex) = [Dict{Vector{Int64},Int64}() for _ in 1:length(i)]
#@inline store_indices(i::___VDBIndexCompact{T}) where T = [HashedDict{Vector{Int64},Int64,T}() for _ in 1:length(i)]
function load_indices(indices::Union{___VDBIndex},verts) #where T #Union{___VDBIndex,___VDBIndexCompact{T}}
for i in 1:length(indices)
for (sig,k) in safe_sig_index_iterator(verts[i])
push!(indices[i],sig=>k)
end
end
return indices
end
store_refs(r) = r
load_refs(r) = r
struct SafeSigIndexIteratorHeap{P}
verts::Vector{Pair{Vector{Int64}, P}}
end
safe_sig_index_iterator(v::Vector{Pair{Vector{Int64}, P}}) where P = SafeSigIndexIteratorHeap(v)
function Base.iterate(ssii::SSII,start=1) where SSII<:SafeSigIndexIteratorHeap
ll = length(ssii.verts)
while start<=ll
if isassigned(ssii.verts,start) && length(ssii.verts[start][1])>0
break
end
start+=1
end
if start<=ll
return (ssii.verts[start][1],start), start+1
else
return nothing
end
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 2850 | struct Valid_Vertex_Checker{S,T}
sig::Vector{Int64}
r::MVector{S,Float64}
xs::Vector{T}
boundary::Boundary
localbase::Vector{MVector{S,Float64}}
localxs::Vector{MVector{S,Float64}}
function Valid_Vertex_Checker(xs,boundary::Boundary)
dim = length(xs[1])
return Valid_Vertex_Checker{dim,xs[1]}(zeros(Int64,2^dim),MVector{dim,Float64}(zeros(Float64,dim)),boundary,empty_local_Base(dim),empty_local_Base(dim))
end
end
function check(VVC::Valid_Vertex_Checker,sig,r,keeps,lmax,modified_tracker)
println("this is currently disabled.")
#= lsig = length(sig)
localdim = 1
dim = length(r)
lsig<=dim && (return false)
for i in 1:dim
VVC.localbase[dim][i] = randn()
end
normalize!(VVC.localbase[dim])
sig_end = lsig
mydim = 1
for i in lsig:-1:1
sig[i]<=lmax && break
VVC.boundary.planes[sig[i]-lmax].BC>1 && continue
sig_end -= 1
mydim += 1
VVC.localbase[i] .= VVC.boundary.planes[sig[i]-lmax].normal
rotate(VVC.localbase,i,dim)
rotate(VVC.localbase,i,dim)
end
sigpos = 2
while mydim<=dim
sigpos>sig_end && (return false)
while sigpos<=sig_end
VVC.localbase[mydim] .= xs[sig[sigpos]]
VVC.localbase[mydim] .-= xs[sig[1]]
normalize!(VVC.localbase[mydim])
sigpos += 1
( !(sig[sigpos] in keeps) ) && continue
if abs(dot(VVC.localbase[mydim],VVC.localbase[dim]))>1.0E-10
rotate(VVC.localbase,mydim,dim)
rotate(VVC.localbase,mydim,dim)
break
end
end
mydim += 1
end=#
return true
end
struct ModifiedTracker
data::Vector{BitVector}
neighbors::Vector{Vector{Int64}}
function ModifiedTracker(neighbors)
ln = length(neighbors)
data = Vector{BitVector}(undef,ln)
for i in 1:ln
data[i] = BitVector(undef,length(neighbors[i]))
data[i] .= false
end
return new(data,neighbors)
end
end
#=
function set_index(mt::ModifiedTracker,node,neigh,val)
if neigh in mt.neighbors[node]
f = findfirst(n->n==neigh,mt.neighbors[node])
mt.data[node][f] = val
end
end
=#
function check(VVC::Valid_Vertex_Checker,sig,r,keeps,lmax,modified_tracker::ModifiedTracker)
c = check(VVC,sig,r,keeps,lmax,1)
#= if c==false
lsig = length(sig)
for i in 1:lsig
s = sig[i]
s>lmax && break
(!(s in keeps)) && continue
for j in 1:lsig
j==i && continue
set_index(modified_tracker,s,sig[j],true)
end
end
end=#
return c
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 18365 |
function EmptyDictOfType(x)
d=Dict(x)
pop!(d)
return d
end
function VectorOfDict(x,len)
proto = EmptyDictOfType( x )
ret = Vector{typeof(proto)}(undef,len)
len==0 && return
for i in 1:len
ret[i]=copy(proto)
end
return ret
end
####################################################################################################################################
## Mesh-related content
####################################################################################################################################
"""
struct boundary_vertex{T}
base::T
direction::T
node::Int64
end
typically provided in a dictionary `boundary_Verteces::Dict{Vector{Int64},boundary_vertex{T}}` in the format
`sig=>bv`. Then `sig=[s1,...,sd]` is a d-dimensional vector of `Int` defining a direction by the corresponding `d` nodes.
This direction is stored in `bv.direction`. The starting vertex is stored in `bv.base` and `bv.base` was the vertex created by
`[sig;bv.node]`.
"""
struct boundary_vertex{T}
base::T
direction::T
node::Int64
function boundary_vertex{T}(b::T,dir::T,n) where {T}
return new(b,dir,n)
end
function boundary_vertex(b,dir,n)
bv=boundary_vertex{typeof(b)}(b,dir,n)
return bv
end
end
@doc raw"""
intersect(B::Boundary,v::boundary_vertex)
returns the couple 'i','t' such that the line v.base+t*v.direction lies in B.planes[i]
'i' is such that 't' is the minimal positive value, i.e. B.planes[i] is actually
the true part of the boundary that is hit by 'v'
"""
function intersect(B::Boundary,v::boundary_vertex,condition=(x->true))
return intersect(B,v.base,v.direction,condition)
end
struct __vstore{S1,S2,S3}
filename::String
vertices::S1
indices::S2
refs::S3
function __vstore(f,v,i,r)
_v = StaticBool(v)
_i = StaticBool(i)
_r = StaticBool(r)
return new{typeof(_v),typeof(_i),typeof(_r)}(f,_v,_i,_r)
end
end
struct DatabaseVertexStorage{F}
file::F
DatabaseVertexStorage() = new{Nothing}(nothing)
DatabaseVertexStorage(d::D) where D = new{D}(d)
end
ClassicVertexStorage() = nothing
ReferencedVertexStorage() = ExternalMemory()
#ExternalMemory(::Nothing) = nothing
ExternalMemory(;filename="",vertices=false,indices=false,refs=false) = __vstore(filename,vertices,indices,refs)
#ExternalMemory(i::Int) = i<5 ? ExternalMemory(nothing) : ExternalMemory(filename="",vertices=false,indices=false,refs=false)
change_db_type(_,::MT) where {MT<:MultiThread} = DatabaseVertexStorage()
change_db_type(type,::ST) where {ST<:SingleThread} = type
"""
Voronoi_MESH{T}
Provides the infrastructure for storing a Voronoi mesh with nodes in R^d of type T
(i.e. T is supposed to be a vector of reals)
Fields:
nodes: array of the nodes of the Grid
All_Verteces: an array storing for each node 'i' the verteces with a smallest index 'i'
Buffer_Verteces: an array storing all remaining verteces of node 'i', where the smallest
index is of each vertex is smaller than 'i'
"""
struct Voronoi_MESH{T<:Point, VDB <: VertexDB{T}, DB<:VDB, VT<:HVNodes{T},RT,PARAMS} <:AbstractMesh{T,VDB}
_nodes::VT
vertices::DB
boundary_Vertices::Dict{Vector{Int64},boundary_vertex{T}}
references::RT
length_ref::MVector{1,Int64}
initial_length::Int64
buffer_sig::Vector{Int64}
parameters::PARAMS
# neighbors::Vector{Vector{Int64}}
end
function _ClassicMesh(n,av::Vector{Dict{Vector{Int64},T}},bv,bounV) where {T}
if !(typeof(bounV)<:Dict{Vector{Int64},boundary_vertex{T}})
bounV = Dict{Vector{Int64},boundary_vertex{T}}()
end
Heap = VDBExplicitHeap{T,Nothing}(av,bv,nothing)
new_Buffer_vertices!(Heap)
return Voronoi_MESH{T, VertexDBExplicit{T}, VDBExplicitHeap{T,Nothing}, typeof(n), Nothing, Nothing}(n,Heap,bounV,nothing,MVector{1,Int64}(0),length(n),Int64[],nothing)
end
Voronoi_MESH(xs::Points,refs) = Voronoi_MESH(xs,refs,nothing)
function Voronoi_MESH(xs::Points,refs,vertex_storage::DatabaseVertexStorage) #where {T}
P = eltype(xs)
dimension = size(P)[1]
hdb = HeapDataBase(P,round(Int,max(2^dimension+2*dimension,lowerbound(dimension,dimension))))
return Voronoi_MESH(xs,refs,hdb)
end
function Voronoi_MESH(xs::Points,refs,vertex_storage) #where {T}
bound=Dict{Vector{Int64},boundary_vertex{eltype(xs)}}()
verts = VoronoiVDB(vertex_storage,xs,refs)
if (typeof(refs)<:AbstractVector)
resize!(refs,length(xs))
refs .= collect(1:length(xs))
end
tt=Voronoi_MESH{eltype(xs),ptype(verts),typeof(verts),typeof(xs),typeof(refs),typeof(vertex_storage)}(xs,verts,bound,refs,MVector{1,Int64}([0]),length(xs),Int64[],vertex_storage)#,nn)
return tt
end
VoronoiVDB(vertex_storage::Nothing,xs,refs) = VDBExplicitHeap(xs,refs)
VoronoiVDB(vs::Union{NamedTuple,__vstore},xs,refs) = VDBVertexRef(xs,vs.filename,vs.vertices,vs.indices,vs.refs)
VoronoiVDB(vertex_storage::HDB,xs,refs) where {HDB<:HeapDataBase} = VDBVertexCentral(xs,vertex_storage)
const ClassicMesh{T<:Point, VT<:HVNodes{T}} = Voronoi_MESH{T, VertexDBExplicit{T}, VDBExplicitHeap{T,Nothing}, VT, Nothing, Nothing}
const AppendableVoronoiMesh{T, VDB, DB, VT, RT} = Voronoi_MESH{T, VDB , DB, VT, RT, Nothing} where {T<:Point, VDB <: VertexDB{T}, DB<:VDB, VT<:HVNodes{T}, RT}
const NoRefVoronoiMesh{T, VDB, DB, VT, P} = Voronoi_MESH{T, VDB , DB, VT, Nothing, P} where {T<:Point, VDB <: VertexDB{T}, DB<:VDB, VT<:HVNodes{T}, P}
const IntRefVoronoiMesh{T, VDB, DB, VT, P} = Voronoi_MESH{T, VDB , DB, VT, Vector{Int64}, P} where {T<:Point, VDB <: VertexDB{T}, DB<:VDB, VT<:HVNodes{T}, P}
ClassicMesh(xs::Points) = Voronoi_MESH(xs,nothing)
@inline set_offset(vdb::VM,i) where {VM<:Voronoi_MESH} = set_offset(vdb.vertices,i)
@inline vertices_iterator(m::VM, i::Int64, ::StaticTrue) where VM<:Voronoi_MESH = vertices_iterator(m.vertices,i,statictrue)
#@inline vertices_iterator(m::VM, i::Int64, internal::StaticFalse) where VM<:Voronoi_MESH = VertexIterator(m,vertices_iterator(m.vertices,i,staticfalse))
@inline all_vertices_iterator(m::VM,i::Int64,static::StaticTrue) where VM<:Voronoi_MESH = all_vertices_iterator(m.vertices,i,static)
@inline number_of_vertices(m::VM,i::Int64,static::StaticFalse) where VM<:Voronoi_MESH = number_of_vertices(m.vertices,internal_index(m,i),statictrue)
@inline number_of_vertices(m::VM,i::Int64,static::StaticTrue) where VM<:Voronoi_MESH = number_of_vertices(m.vertices,i,statictrue)
@inline internal_length(m::M) where M<:Voronoi_MESH = m.initial_length
@inline internal_index(m::VM,i::Int64) where VM<:Voronoi_MESH = m.length_ref[1]!=0 ? m.references[i] : i
@inline external_index(m::VM,i::Int64) where VM<:Voronoi_MESH = @inbounds m.length_ref[1]!=0 ? findfirstassured_sorted(i,m.references) : i
@inline external_index(m::VM,inds::AVI) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64}}= _external_index(m,inds)
@inline _external_index( ::VM,inds::AVI) where {VM<:NoRefVoronoiMesh,AVI<:AbstractVector{Int64}} = inds
@inline _external_index(m::VM,inds::AVI) where {VM<:IntRefVoronoiMesh,AVI<:AbstractVector{Int64}} = length(m.references)>0 ? _external_indeces(m,inds,m.buffer_sig) : _copy_indeces(m,inds,m.buffer_sig)
@inline internal_index(m::VM,inds::AVI) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64} }= _internal_index(m,inds,m.references)
@inline _internal_index(m::VM,inds::AVI,rt::Nothing) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64}} = inds
@inline _internal_index(m::VM,inds::AVI,buffer) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64}} = length(m.references)>0 ? _internal_indeces(m,inds,m.buffer_sig) : _copy_indeces(m,inds,m.buffer_sig)
@inline external_index(m::NoRefVoronoiMesh,i::Int64, state=statictrue) = i
@inline internal_index(m::NoRefVoronoiMesh,i::Int64, state=statictrue) = i
@inline nodes(m::Voronoi_MESH) = m._nodes
@inline nodes_iterator(m::Voronoi_MESH) = 1:length(m)
@inline get_vertex(m::Voronoi_MESH,vr::VertexRef) = get_vertex(m.vertices,vr)
# For developing and testing only:
#=
function show_mesh(mesh::Voronoi_MESH; nodes=false,verteces=true,vertex_coordinates=false)
if nodes
for i in 1:length(mesh)
print("$i:$(mesh.nodes[i]), ")
end
println("")
end
if verteces
for i in 1:length(mesh)
print("$i: ")
for (sig,r) in Iterators.flatten((mesh.All_Verteces[i],mesh.Buffer_Verteces[i]))
print(sig)
if vertex_coordinates
print("/$r")
end
print(", ")
end
println("")
end
end
end
=#
@doc raw"""
length(mesh::Voronoi_MESH)
returns the length of the nodes vector
"""
@inline Base.length(mesh::Voronoi_MESH) = length(mesh._nodes)
###############################################################################################################
## COLLECTION-Type functionalities of Voronoi_MESH
###############################################################################################################
@inline filtermesh!(mesh::M,affected,_filter) where M<:Voronoi_MESH = filter!(_filter,mesh.vertices,affected)
@inline cleanupfilter!(mesh::M,i) where M<:Voronoi_MESH = cleanupfilter!(mesh.vertices,i)
@inline haskey(mesh::M,sig::AbstractVector{Int64},i::Int) where M<:Voronoi_MESH = haskey(mesh.vertices,sig,i)
@inline push!(mesh::VM, p::Pair{Vector{Int64},T},i) where {T<:Point,VM<:Voronoi_MESH{T}} = push!(mesh.vertices,p,i)
@inline push_ref!(mesh::VM, ref,i) where {T<:Point,VM<:Voronoi_MESH{T}} = i<=length(mesh) && push_ref!(mesh.vertices,ref,i)
@inline function internal_sig(mesh::VM,sig::AVI,static::StaticFalse) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64}}
sig .= internal_index(mesh,sig)
return sig
end
@inline internal_sig(mesh::VM,sig::AVI,static::StaticTrue) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64}} = internal_index(mesh,sig)
@inline mark_delete_vertex!(m::VM,sig,i,ii) where {VM<:Voronoi_MESH} = mark_delete_vertex!(m.vertices,sig,i,ii)
@inline delete_reference(mesh::VM,s,ref) where VM<:Voronoi_MESH = delete_reference(mesh.vertices,s,ref)
@inline function external_sig(mesh::VM,sig::AVI,static::StaticFalse) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64}}
sig .= external_index(mesh,sig)
return sort!(sig)
end
@inline external_sig(mesh::VM,sig::AVI,static::StaticTrue) where {VM<:Voronoi_MESH,AVI<:AbstractVector{Int64}} = sort!(external_index(mesh,sig))
## the following is not used or will be restricted to ClassicMesh
#=
function pop!(mesh::Voronoi_MESH{T}, key) where {T}
if length(key)>0
eI = mesh.nodes[1]
Base.pop!(mesh.All_Verteces[key[1]],key,eI)
for i in 2:length(key)
Base.pop!(mesh.Buffer_Verteces[key[i]],key,eI)
end
end
end
=#
#################################################################################################################
#################################################################################################################
#################################################################################################################
## ClassicMesh
#################################################################################################################
#################################################################################################################
#################################################################################################################
@inline internal_sig(mesh::ClassicMesh,sig::AVI,static::StaticFalse) where {AVI<:AbstractVector{Int64}} = sig
@inline internal_sig(mesh::ClassicMesh,sig::AVI,static::StaticTrue) where {AVI<:AbstractVector{Int64}} = sig
@inline internal_length(m::M) where M<:AppendableVoronoiMesh = length(m._nodes)
@inline internal_length(m::M) where M<:ClassicMesh = length(m._nodes)
@inline Base.getproperty(cd::ClassicMesh, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::ClassicMesh, ::Val{:All_Vertices}) = :(getfield(getfield(cd,:vertices),:All_Vertices))
@inline @generated dyncast_get(cd::ClassicMesh, ::Val{:Buffer_Vertices}) = :(getfield(getfield(cd,:vertices),:Buffer_Vertices))
@inline @generated dyncast_get(cd::ClassicMesh, d::Val{S}) where S = :( getfield(cd, S))
@inline haskey(mesh::ClassicMesh,sig::AbstractVector{Int64},i::Int) = haskey(mesh.vertices,sig,i)
function rehash!(mesh::ClassicMesh{T}) where {T}
for i in 1:length(mesh)
# Rehashing All_Vertices
buffer = Dict{Vector{Int64}, T}()
sizehint!(buffer, length(mesh.All_Vertices[i]))
while length(mesh.All_Vertices[i]) > 0
push!(buffer, pop!(mesh.All_Vertices[i]))
end
mesh.All_Vertices[i] = buffer
# Rehashing Buffer_Vertices
buffer = Dict{Vector{Int64}, T}()
sizehint!(buffer, length(mesh.Buffer_Vertices[i]))
while length(mesh.Buffer_Vertices[i]) > 0
push!(buffer, pop!(mesh.Buffer_Vertices[i]))
end
mesh.Buffer_Vertices[i], buffer = buffer, mesh.Buffer_Vertices[i]
end
end
@doc raw"""
append!(mesh::ClassicMesh, xs)
adds the points 'xs' to the beginning of the mesh and correpsondingly shifts the indeces in the fields of All_Verteces and Buffer_Verteces
"""
function append!(mesh::ClassicMesh,xs)
append!(mesh._nodes,xs)
vert=Dict([0]=>xs[1])
pop!(vert)
vertlist1=Vector{typeof(vert)}(undef,length(xs))
vertlist2=Vector{typeof(vert)}(undef,length(xs))
for i in 1:length(xs)
vertlist1[i]=copy(vert)
vertlist2[i]=copy(vert)
end
append!(mesh.All_Vertices,vertlist1)
append!(mesh.Buffer_Vertices,vertlist2)
end
@doc raw"""
prepend!(mesh::ClassicMesh, xs)
adds the points 'xs' to the beginning of the mesh and correpsondingly shifts the indeces in the fields of All_Verteces and Buffer_Verteces
"""
prepend!(mesh::VM,xs) where VM<:Voronoi_MESH = error("prepend! not implemented for the Voronoi_MESH type $(typeof(mesh))")
function prepend!(mesh::AppendableVoronoiMesh,xs)
allverts=mesh.All_Vertices
lnxs=length(xs)
for i in 1:length(mesh) # if propertly initiate (via distribute_verteces) the list Buffer_Verteces is updated on the fly via array-pointer
for (sig,_) in allverts[i]
sig.+=lnxs
end
end
for (sig,r) in mesh.boundary_Vertices
sig.+=lnxs
end
#mesh._nodes.data = PrependedNodes(xs,mesh._nodes.data)
#println(typeof(nodes(mesh)))
prepend!(nodes(mesh),xs)
_prepend(::Nothing,_) = nothing
_prepend(refs::Vector{Int64},xs) = begin
if length(refs)!=0
lxs = length(xs)
refs .+= lxs
prepend!(refs,collect(1:lxs))
end
return nothing
end
_prepend(mesh.references,xs)
vert=EmptyDictOfType([0]=>xs[1])
vertlist1=Vector{typeof(vert)}(undef,length(xs))
vertlist2=Vector{typeof(vert)}(undef,length(xs))
vertlist3=Vector{typeof(vert)}(undef,length(xs))
vertlist4=Vector{typeof(vert)}(undef,length(xs))
for i in 1:length(xs)
vertlist1[i]=copy(vert)
vertlist2[i]=copy(vert)
vertlist3[i]=copy(vert)
vertlist4[i]=copy(vert)
end
prepend!(mesh.All_Vertices,vertlist1)
prepend!(mesh.Buffer_Vertices,vertlist2)
rehash!(mesh)
end
@doc raw"""
copy(mesh::ClassicMesh)
provides a closed copy new_mesh=copy(mesh) of the Voronoi mesh 'mesh'. In particular
changes in 'mesh' will not affect 'new_mesh' and vice versa.
"""
function copy(mesh::VM;kwargs...) where VM<:Voronoi_MESH
bv_type = typeof(mesh.boundary_Vertices)
bv = bv_type()
xs = copy(mesh._nodes)
for (sig,b) in mesh.boundary_Vertices
push!(bv,copy(sig)=>b)
end
c_rt(::Nothing) = nothing
c_rt(i) = copy(i)
if typeof(bv)!=typeof(mesh.boundary_Vertices)
error("1")
end
if typeof(xs)!=typeof(mesh._nodes)
error("2")
end
if typeof(copy(mesh.vertices;kwargs...))!=typeof(mesh.vertices)
println(typeof(copy(mesh.vertices;kwargs...)))
println(typeof(mesh.vertices))
error("3")
end
return VM(xs,copy(mesh.vertices;kwargs...),bv,c_rt(mesh.references),copy(mesh.length_ref),mesh.initial_length,Int64[],mesh.parameters)
end
function keepat!(mesh::ClassicMesh,entries)
keepat!(mesh._nodes,entries)
keepat!(mesh.All_Vertices,entries)
keepat!(mesh.Buffer_Vertices,entries)
end
struct Voronoi_MESH_Store_1{T<:Point, VDB <: VertexDB{T}, DB<:VDB, VT<:HVNodes{T},RT,PARAMS}
_nodes::VT
vertices::DB
boundary_Vertices::Dict{Vector{Int64},boundary_vertex{T}}
references::RT
length_ref::MVector{1,Int64}
initial_length::Int64
buffer_sig::Vector{Int64}
parameters::PARAMS
# neighbors::Vector{Vector{Int64}}
end
JLD2.writeas(::Type{Voronoi_MESH{T, VDB, DB, VT,RT,PARAMS}}) where {T, VDB, DB, VT,RT,PARAMS} = Voronoi_MESH_Store_1{T, VDB, DB, VT,RT,PARAMS}
JLD2.wconvert(::Type{Voronoi_MESH_Store_1{T, VDB, DB, VT,RT,PARAMS}},m::Voronoi_MESH{T, VDB, DB, VT,RT,PARAMS}) where {T, VDB, DB, VT,RT,PARAMS} = Voronoi_MESH_Store_1{T, VDB, DB, VT,RT,PARAMS}(m._nodes,m.vertices,m.boundary_Vertices,m.references,m.length_ref,m.initial_length,m.buffer_sig, m.parameters)
JLD2.rconvert(::Type{Voronoi_MESH{T, VDB, DB, VT,RT,PARAMS}},m::Voronoi_MESH_Store_1{T, VDB, DB, VT,RT,PARAMS}) where {T, VDB, DB, VT,RT,PARAMS} = Voronoi_MESH{T, VDB, DB, VT,RT,PARAMS}(m._nodes,m.vertices,m.boundary_Vertices,m.references,m.length_ref,m.initial_length,m.buffer_sig, m.parameters)
#JLD2.rconvert(::Type{Voronoi_MESH{T, VDB, DB, VT,RT,PARAMS}},m::Voronoi_MESH_Store_1{T, VDB, DB, VT,RT,PARAMS}) where {T, VDB, DB, VT,RT,PARAMS} = Voronoi_MESH{T, VDB, DB, VT,RT,PARAMS}(m._nodes,m.vertices,m.boundary_Vertices,m.references,m.length_ref,m.initial_length,m.buffer_sig, m.parameters)
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 28146 | struct DeepNeighborData{REF,M<:AbstractMesh}
references::REF
buffer::Vector{Int64}
mesh::M
end
@inline DeepNeighborData(r::REF,m::M) where {REF,M} = DeepNeighborData(r,Int64[],m)
@inline function VoronoiDataArray(sigma,offset,references;lsigma=length(sigma),lreferences=length(references))
for k in 1:lsigma
s=sigma[k]
sigma[k]= s<=lreferences ? references[s]-offset : s-offset
end
return sigma # sort!(sigma)
end
@inline function transform(bonus::D,neighs,offset,simple=staticfalse) where {D<:DeepNeighborData}
lneigh = length(neighs)
vbuffer = simple==false ? _external_indeces(bonus.mesh,neighs,bonus.buffer) : _copy_indeces(bonus.mesh,neighs,bonus.buffer)
VoronoiDataArray(vbuffer,offset,bonus.references;lsigma=lneigh,lreferences=offset)
return vbuffer
end
struct SortingMatrix <: AbstractVector{Vector{Int64}}
data::Vector{Vector{Int64}}
offset::Int64
function SortingMatrix(data,offset=0)
l = length(data)
matrix = Vector{Vector{Int64}}(undef,l)
buffer = Int64[]
for i in 1:l
!isassigned(data,i) && continue
di = data[i]
ln = length(di)
ln>length(buffer) && resize!(buffer,ln)
vbuffer = view(buffer,1:ln)
vbuffer .= di
newdata = collect(1:ln)
quicksort!(vbuffer,newdata,newdata)
matrix[i] = newdata
end
return new(matrix,offset)
end
SortingMatrix(sm::SortingMatrix,offset=0) = new(sm.data,offset)
end
@inline Base.getindex(m::SortingMatrix,i::Int64) = m.data[i-m.offset]
@inline Base.isassigned(m::SortingMatrix,i::Int64) = i<m.offset ? false : isassigned(m.data,i-m.offset)
@inline Base.size(m::SortingMatrix) = (length(m.data)+m.offset,)
###############################################################################################################################
## DeepVector -- Iteratively yielding views on data
## DeepVectorNeighbor - specialized on neighbor vectors
## DeepVectorFloat64 - vector of floats (volume)
## DeepVectorFloat64Vector - vector of vector of floats (b. integral / area)
## DeepVectorFloat64VectorVector - vector of vector of vector of floats (i.integral)
###############################################################################################################################
struct ReadOnlyVector{T,AV<:AbstractVector{T},BONUS} <: AbstractVector{T}
data::AV
bonus::BONUS
end
@inline Base.size(A::ReadOnlyVector) = size(A.data)
@inline Base.getindex(A::ReadOnlyVector, index) = getindex(A.data, shiftbonus(A.bonus,index))
struct DeepVector{P,T<:AbstractVector{P},WRITE,SUB,BONUS} <: AbstractVector{P}
data::T
offset::Int64
bonus::BONUS
function DeepVector(data::T, offset::Int64 = 0, w::WRITE = staticfalse, s::SUB = Val(:deep),bonus::B=nothing) where {P, T<:AbstractVector{P},WRITE,SUB,B}
new{P, T, WRITE, SUB,B}(data, offset,bonus)
end
end
const DeepVectorNeighbor{P,T<:AbstractVector{P},WRITE,BONUS} = DeepVector{P,T,WRITE,Val{:neighbors},BONUS}
#const DeepVectorFloat64{T<:AbstractVector{Float64},WRITE} = DeepVector{Float64,T,WRITE,Val{:deep},Nothing}
@inline DeepVectorFloat64(data,offset=0,write=staticfalse;sorting=nothing) =
DeepVector(data,offset,write,Val(:deep),sorting)
#const DeepVectorFloat64Vector{T<:AbstractVector{Vector{Float64}},WRITE} = DeepVector{Vector{Float64},T,WRITE,Val{:deep},Nothing}
@inline DeepVectorFloat64Vector(data,offset=0,write=staticfalse;sorting=nothing) =
DeepVector(data,offset,write,Val(:deep),sorting)
#const DeepVectorFloat64VectorVector{V<:AbstractVector{Vector{Float64}},T<:AbstractVector{V},WRITE,BONUS} = DeepVector{V,T,WRITE,Val{:deep},BONUS}
@inline DeepVectorFloat64VectorVector(data,offset=0, A::StaticBool{write} = staticfalse;sorting=nothing) where {write} =
DeepVector(data,offset,StaticBool{write}(),Val(:deep),sorting)
function DeepVectorNeighbor(d2::AD,publicview::Bool) where {AD<:AbstractDomain}
ref = references(d2)
i = integral(d2)
return DeepVector(i.neighbors,publicview*length(ref),staticfalse,Val(:neighbors),DeepNeighborData(ref,mesh(i)))
end
@inline subbonus(_,i) = nothing
@inline subbonus(m::SortingMatrix,i) = begin
#println("bonus $i: $(m[i])")
m[i]
end
#@inline subbonus(m::Vector{Int64},i) = nothing
@inline shiftbonus(_,i) = i
@inline shiftbonus(m::Vector{Int64},i) = m[i]
#@inline subbonus(m::Vector{Int64},i) = nothing
# Define the size method
@inline Base.size(v::DeepVector) = (length(v.data) - v.offset,)
@inline Base.length(v::DeepVector) = size(v)[1]
@inline Base.isassigned(v::DeepVector,key::Int64) = isassigned(v.data,key+v.offset)
# Define the getindex method
#@inline Base.getindex(v::DeepVector{P,T,W,SUB,BONUS}, i::Int) where {P,T,W,SUB,BONUS} = getsubvector(v.data,i + v.offset,v.bonus,v.offset,Val(SUB),EmptyDeepVector(v))
@inline Base.getindex(v::DeepVector{P,T,W,SUB,BONUS}, i::Int) where {P<:AbstractVector,T,W,SUB,BONUS} = getsubvector(v,shiftbonus(v.bonus,i + v.offset),SUB(),subbonus(v.bonus,i))
@inline @generated getsubvector(v::DeepVector{P,T,WRITE,SUB,BONUS},i,::Val{:deep},subbonus) where {P,T,WRITE,SUB,BONUS}= :(DeepVector(v.data[i],0,WRITE(),Val(:deep),subbonus))
@inline @generated getsubvector(v::DeepVector{P,T,WRITE,SUB,BONUS},i,::Val{:neighbors},_) where {P<:AbstractVector{Int},T,WRITE,SUB,BONUS}= :(get_neighbors(v,i))
@inline Base.getindex(v::DeepVector{P,T,W,SUB,BONUS}, i::Int) where {P,T,W,SUB,BONUS} = v.data[shiftbonus(v.bonus,i + v.offset)]
# Define the setindex! method, ensuring it checks the WRITE parameter
@inline Base.setindex!(v::DeepVector{P,T,true,SUB,BONUS}, val, i::Int) where {P,T,SUB,BONUS} = v.data[shiftbonus(v.bonus,i + v.offset)] = val
# Attempting to write to a DeepVector with WRITE == false
@inline Base.setindex!(v::DeepVector{P,T,false,SUB,BONUS}, val, i::Int) where {P,T,SUB,BONUS} = @warn "Cannot write to a DeepVector with WRITE == false"
function get_neighbors(dv::DeepVector{P,T,W,SUB,BONUS}, i::Int) where {P,T,W,SUB,BONUS}
neighs = dv.data[i]
#println(neighs)
return ReadOnlyVector(transform(dv.bonus,dv.data[i],dv.offset),subbonus(dv.bonus,i))
end
Base.deepcopy(v::DeepVector{P,T,W,Union{Val{:deep},Val{:neighbors}}}) where {P<:AbstractVector,T,W} = [deepcopy(v[i]) for i in 1:length(v.data)]
Base.deepcopy(v::DeepVector{P,T,W,Val{:deep}}) where {P,T,W} = deepcopy(v.data)
function Base.deepcopy(v::DeepVector{P,T,W,Val{:neighbors}}) where {P<:AbstractVector{Int},T,W}
l = length(v)
neighs = Vector{Vector{Int64}}(undef,l)
for i in 1:l
_n = transform(v.bonus,v.data[i+v.offset],v.offset)
neighs[i] = [_n[j] for j in 1:length(_n)]
end
return neighs
end
#=function test_deep()
t = [[[1,2,3],[4,5,6]],[[1]]]
dv = DeepVector(t)
println(dv[2])
println(typeof(dv[2]))
println(dv[2][1])
println(typeof(dv[2][1]))
t = [[[1.0,2.0,3.0],[4.0,5.0,6.0]],[[1.0]]]
dv = DeepVector(t)
println(typeof(dv)<:DeepVectorFloat64VectorVector)
dv2 = DeepVector(t[1])
println(typeof(dv2))
println(typeof(dv2)<:DeepVectorFloat64Vector)
end
=#
###############################################################################################################################
## Vertices_Vector
###############################################################################################################################
"takes a common VertexIterator and modifies its output to account for a view on public nodes only, i.e. internal nodes will be replaced by their public equivalents"
struct PublicIterator{I,B}
iterator::I
offset::Int64
bonus::B
end
function Base.iterate(pi::PI,state...) where {PI<:PublicIterator}
modify(n::Nothing) = nothing
function modify(data)
(sig,r) = data[1]
sig2 = transform(pi.bonus,sig,pi.offset,statictrue)
return (sig2,r),data[2]
end
return modify(iterate(pi.iterator, state...))
end
function Base.deepcopy(pi::PublicIterator,dict)
for (sig,r) in pi
push!(dict,copy(sig)=>r)
end
return dict
end
convert_to_vector(data::PI,::Type{R},n) where {PI<:PublicIterator,R} = begin
ret = Vector{Pair{Vector{Int64},R}}(undef,n)
count = 1
for (sig,r) in data
ret[count] = copy(sig)=>r
count += 1
end
return ret
end
######################################################################################
struct Vertices_Vector{M,B} #<: AbstractArray{PublicIterator}
mesh::M
offset::Int64
bonus::B
function Vertices_Vector(d::D,publicview=false) where {D<:AbstractDomain}
m = mesh(d)
ref = references(d)
i = integral(d)
b = DeepNeighborData(ref,mesh(i))
return new{typeof(m),typeof(b)}(m,publicview*length(ref),b)
end
end
@inline Base.getindex(v::Vertices_Vector, i::Int) = PublicIterator(vertices_iterator(v.mesh,i + v.offset),v.offset,v.bonus)
@inline convert_to_vector(v::Vertices_Vector) = [convert_to_vector(v[i],PointType(v.mesh),number_of_vertices(v.mesh,i+ v.offset)) for i in 1:(length(v.mesh)-v.offset)]
@inline function Base.deepcopy(vv::VV) where {P,M<:AbstractMesh{P},VV<:Vertices_Vector{M}}
l = length(vv.mesh)-vv.offset
result = Vector{Dict{Vector{Int64},P}}(undef,l)
for i in 1:l
result[i] = deepcopy(vv[i],Dict{Vector{Int64},P}())
end
return result
end
@inline Base.size(v::VV) where {VV<:Vertices_Vector} = (length(v.mesh),)
###############################################################################################################################
## OrientationsVector
###############################################################################################################################
struct Orientations{P,N<:HVNodes{P},S<:StaticBool,VI<:AbstractVector{Int64}} <: AbstractVector{P}
x::P
neighs::VI
nodes::N
boundary::Boundary
onboudary::S
lmesh::Int64
end
@inline function Base.getindex(o::Orientations,i::Int64)
n=o.neighs[i]
if n<=o.lmesh
return o.nodes[n]-o.x
elseif o.onboudary==true
return 0.5 * (reflect(o.x,o.boundary,n-o.lmesh)-o.x)
else
return reflect(o.x,o.boundary,n-o.lmesh)-o.x
end
end
@inline Base.size(o::Orientations) = (length(o.neighs),)
@inline Base.deepcopy(o::Orientations{P}) where P = [o[i] for i in 1:length(o.neighs)]
struct OrientationsVector{P,N<:HVNodes{P},AV,S<:StaticBool} <: AbstractVector{Orientations{P,N,S}}
nodes::N
neighbors::AV
offset::Int64
boundary::Boundary
onboundary::S
lmesh::Int64
function OrientationsVector(d::AD,onboundary,publicview=staticfalse;sorting=nothing) where {P,AD<:AbstractDomain{P}}
m = mesh(d)
n = nodes(m)
_nei = DeepVectorNeighbor(d,false) # gets external representation of full neighbor matrix
o = publicview==true ? length(references(d)) : 0
shift_sorting(a,o) = a
shift_sorting(a::SortingMatrix,o) = SortingMatrix(a,o)
nei = DeepVector(_nei,0,staticfalse,Val(:deep),shift_sorting(sorting,o)) # gets a sorted view on the external full representation
b = boundary(d)
on = StaticBool(onboundary)
l = length(m)
return new{P,typeof(n),typeof(nei),typeof(on)}(n,nei,o,b,on,l)
end
end
@inline Base.size(o::OrientationsVector{P}) where P = (o.lmesh-o.offset,)
@inline Base.isassigned(o::OrientationsVector,i::Int64) = isassigned(o.neighbors,i+o.offset)
@inline Base.getindex(o::OrientationsVector,i::Int64) = Orientations(o.nodes[i+o.offset],o.neighbors[i+o.offset],o.nodes,o.boundary,o.onboundary,o.lmesh)
function Base.deepcopy(ov::OrientationsVector{P}) where P
result = Vector{Vector{P}}(undef,ov.lmesh-ov.offset)
for i in 1:length(result)
if isassigned(ov,i)
result[i] = deepcopy(ov[i])
end
end
return result
end
###############################################################################################################################
## BoundaryNodes
###############################################################################################################################
# Dict project.....
struct BNodesDict{P, S, onBoundary} <: AbstractDict{Int64, P}
active::SVector{S, Bool}
x::P
boundary::Boundary
offset::Int64 # = lmesh
end
Base.keys(d::BNodesDict) = view((d.offset + 1):(d.offset + length(d.active)), d.active)
function Base.values(d::BNodesDict)
keys_iter = keys(d)
return MapIterator(keys_iter, k -> reflect(d.x, d.boundary, k - d.offset))
end
function Base.iterate(d::BNodesDict{P, S, onBoundary}, state=1) where {P, S, onBoundary}
while state <= S && !d.active[state]
state += 1
end
if state <= S
if onBoundary==StaticFalse
return (Pair(state + d.offset, reflect(d.x, d.boundary, state)), state + 1)
else
return (Pair(state + d.offset, 0.5*(d.x+reflect(d.x, d.boundary, state))), state + 1)
end
else
return nothing
end
end
function Base.deepcopy(pi::BNodesDict,dict)
for (k,p) in pi
push!(dict,k=>p)
end
return dict
end
Base.eltype(::Type{BNodesDict{P, S, onBoundary}}) where {P, S, onBoundary} = Pair{Int64, P}
Base.length(d::BNodesDict) = count(d.active)
struct BNodesDictDict{P, S, onBoundary<:StaticBool, N,N2,M<:AbstractMesh{P}} <: AbstractDict{Int64, BNodesDict{P, S, onBoundary}}
neighbors::N
nodes::N2
boundary::Boundary
buffer::MVector{S, Bool}
lmesh::Int64
offset::Int64
mesh::M
function BNodesDictDict(d::AD,onboundary=false,publicview=false) where {P,AD<:AbstractDomain{P}}
x0 = zeros(P)
n = DeepVectorNeighbor(d,false)
_nodes = nodes(mesh(d))
b = boundary(d)
buf = MVector{length(b),Bool}(falses(length(b)))
m = mesh(d)
lmesh = length(m)
myob = StaticBool(onboundary)
o = publicview==false ? 0 : length(references(d))
return new{P,length(b),typeof(myob),typeof(n),typeof(_nodes),typeof(m)}(n,_nodes,b,buf,lmesh,o,m)
end
end
Base.haskey(d::BNodesDictDict, key::Int) = isassigned(d.neighbors,key+d.offset) ? d.neighbors[key+d.offset][end] > d.lmesh : false
function Base.get(d::BNodesDictDict{P, S, onBoundary, N}, key::Int, default=nothing) where {P, S, onBoundary, N}
if haskey(d, key)
neigh = d.neighbors[key+d.offset]
n = length(neigh)
d.buffer .= false
#lb = length(d.boundary)
while neigh[n]>d.lmesh
d.buffer[neigh[n] - d.lmesh] = true
n -= 1
end
return BNodesDict{P, S, onBoundary}(SVector(d.buffer),d.nodes[key+d.offset], d.boundary, d.lmesh-d.offset) # Example, adjust as necessary
else
return default
end
end
Base.getindex(d::BNodesDictDict{P, S, onBoundary, N}, key::Int, default=nothing) where {P, S, onBoundary, N} = get(d,key)
function Base.iterate(d::BNodesDictDict{P, S, onBoundary, N}, state=1) where {P, S, onBoundary, N}
while state <= length(d.neighbors) && !haskey(d, state)
state += 1
end
if state <= length(d.neighbors)
return ((state,get(d, state)), state + 1)
else
return nothing
end
end
function Base.deepcopy(vv::VV) where {P,VV<:BNodesDictDict{P}}
result = Dict{Int64,Dict{Int64,P}}()
for (i,bni) in vv
push!(result, i=>deepcopy(bni,Dict{Int64,P}()))
end
return result
end
function Base.length(d::BNodesDictDict)
state = 1
count = 0
while state <= length(d.neighbors)
while state <= length(d.neighbors) && !haskey(d, state)
state += 1
end
state <= length(d.neighbors) && (count+=1)
state += 1
end
return count
end
Base.keys(d::BNodesDictDict) = filter(k -> haskey(d, k), 1:length(d.neighbors))
Base.values(d::BNodesDictDict) = MapIterator(keys(d),k -> get(d, k)) #map(k -> get(d, k), Base.keys(d))
Base.eltype(::Type{BNodesDictDict{P, S, onBoundary, N}}) where {P, S, onBoundary, N} = Tuple{Int64, BNodesDict{P, S, onBoundary}}
convert_to_vector(d::BNDD) where BNDD<:BNodesDictDict = begin
SVV = SparseVectorWrapper{PointType(d.mesh)}
VV = Vector{Pair{Int64,PointType(d.mesh)}}
ld = length(d)
ret = Vector{SVV}(undef, ld)
inds = Vector{Int64}(undef,ld)
state = 1
count = 0
while state <= length(d.neighbors)
if haskey(d, state)
count += 1
vec = get(d,state)
lvec = length(vec)
nvec = VV(undef,lvec)
ret[count] = SVV(nvec)
inds[count] = state
count2 = 1
for (i,r) in vec
nvec[count2] = Pair(i,r)
count2 += 1
end
end
state += 1
end
return sparsevec(inds,ret)
end
###############################################################################################################################
## ShiftVector
###############################################################################################################################
struct ShiftVector{P}<:AbstractVector{P}
reference_shifts
shifts
end
@inline Base.getindex(s::SV,index::Int64) where {P<:Point,SV<:ShiftVector{P}} = P(periodic_shift(s.reference_shifts[index],s.shifts))
@inline Base.size(s::SV) where {P<:Point,SV<:ShiftVector{P}} = size(s.reference_shifts)
###############################################################################################################################
## VoronoiData
###############################################################################################################################
function VoronoiDataShift(s,offset,references)
return s<=offset ? references[s]-offset : s-offset # das wäre in problem in C++ ;-)
end
struct VoronoiData
nodes
vertices
boundary_vertices # referred to by boundary_verteces
boundary_nodes
boundary_nodes_on_boundary::Bool
neighbors
orientations
volume
area
bulk_integral
interface_integral
offset::Int64
references
reference_shifts
geometry
boundary
end
"""
Using the call
data=VoronoiData(VG)
some data of the Voronoi geometry `VG` is extracted and presented to the user in a convenient way that requires no knowledge of the complicated multilevel data structures of VoronoiGeometry. Once applied, the data set contains at least the following informations:
- `nodes::Vector{T}`: The original nodes
- `vertices`: For each `i` this is an iterator over the vertices of cell `i`
- `boundary_vertices`: This is an iterator of the form `edge => (base,direction,node)` where `edge` is a list of generators
of an infinite edge, `base` the start of the edge, `direction` the orientation and `node` is one additional generator that
defines `base` together with `edge`.
# Additional Fields in `VoronoiData`
The set `data` contains the following additional information, which is `READ_ONLY` in the standard setting. The standard read-only datastructures are highly involved as the output values are generated on-the-fly from internal data in order to save memory. See below to extract easier editable data structures
- `neighbors`: For each node `nodes[i]` the field `neighbors[i]` contains a sorted list of indeces of all neighboring cells.
Multiple appearence of the same node is possible on a periodic grid.
- `volume`: the volume for each node
- `area`: stores for each neighbor `neighbors[i][k]` of node `i` in `area[i][k]` the area of the interface.
- `bulk_integral`: the integral over the bulk of each cell. `bulk_integral[i]` is of type `AbstractVector{Float64}`
- `interface_integral`: same as for `area` but with the integral values of the interface function. In paricular
`interface_integral[i][k]` is of type `AbstractVector{Float64}`
- `orientations`: If the neighbors have been calculated by the integral algorithm, then for each `neighbor[i][k]` there is the
matched orientation from `i` to `k`. This is particularly useful in periodic geometries, where manual calculation of this vector is tricky.
- `boundary_nodes`: A collection iterating as `Tuple(generator_i,collection(boundary_index=>mirrored_generator))`. In particular, if the cell of generator `i` touches
the boundary then `boundary_nodes` has a key `i`. The value is a dictionary that has for every boudnary plane 'k' that is touched
the mirrored version of generator `i` (if `onboudary=false`) or its projection onto plane `k` (if `onboudary=true`).
- `offset`: If `reduce_to_periodic=false`, this field will contain the number of internal nodes. The official nodes start from `offset+1`.
- `references`: If `offset>0` then there exist a vectors `references` and `reference_shifts` of `length(offset)` stating
that `node[i]=node[references[i]]+reference_shifts[i]` for `i in 1:length(offset)`.
- `reference_shifts`: See the previous entry
- `boundary`: If `reduce_to_periodic=false` this contains the internal boundary that is used to compute the periodic structure.
Otherwise this contains the official boundary of the domain.
- `geometry`: For internal use, this is a reference to `VG`.
!!! warning "No request implies empty data field"
If the above data fields where calculated by the integration algorithm, they have no values assigned for `1:offset`.
On the other hand, you may check this with `isassigned`. Also if `reduce_to_periodic=false`, the values for indices <= offset are not assigned.
# Named Arguments
The call of `VoronoiData(VG)` provides the following options:
- `getFIELD`: replace `FIELD` with any of the above names except `geometry` to obtain a hard copy of the respective data that is detached from the internal data structure and can be modified or stored separately.
- `copyall=true`: corresponds to setting `getFIELD=true` for every `FIELD`.
- `reduce_to_periodic=true`: This hides all internal data generated from the periodization. It is highly advised to set this option to `true`
as the user will then only see the periodic mesh with no information overhead.
- `onboundary=false`: refer to `boundary_nodes` above
- `sorted=true`: During the reduction of the internal pseudo periodic mesh to the fully periodic output, the neighbors (jointly with their respective properties) get sorted by their numbers. This is only possible if `getarea`,`getneighbors` and `getinterfaceintegral` are `true`. Otherwise it will be ignored
"""
function VoronoiData(VG::PGeometry{P};reduce_to_periodic=true,view_only=true,copyall=!view_only,getboundary=copyall,getbulk_integral=copyall,getreferences=copyall,getreference_shifts=copyall,getinterface_integral=copyall,getvolume=copyall,getarea=copyall,getneighbors=copyall,getnodes=copyall,getboundary_vertices=copyall,getorientations=copyall,getvertices=copyall,getboundary_nodes=copyall,onboundary=false,sorted=false) where P
domain = VG.domain
offset = reduce_to_periodic * length(references(domain))
deepversion(::StaticTrue,a) = convert_to_vector(a)
deepversion(::StaticFalse,a) = a
___nodes = nodes(mesh(domain))
_nodes = deepversion(StaticBool(getnodes),view(___nodes,(offset+1):length(___nodes)))
_vertices = deepversion(StaticBool(getvertices),Vertices_Vector(domain,reduce_to_periodic))
_bv = deepversion(StaticBool(getboundary_vertices),Public_BV_Iterator(mesh(domain))) # referred to by boundary_verteces
_bn = deepversion(StaticBool(getboundary_nodes),BNodesDictDict(domain,onboundary,reduce_to_periodic))
#boundary_nodes_on_boundary = onboundary
__neighbors = deepversion(StaticBool(getneighbors),DeepVectorNeighbor(domain,reduce_to_periodic))
_neighbors, bonus = VoronoiData_neighbors(StaticBool(sorted),StaticBool(getneighbors),__neighbors)
ori = deepversion(StaticBool(getorientations),OrientationsVector(domain,onboundary,StaticBool(reduce_to_periodic),sorting=bonus))
_volume = deepversion(StaticBool(getvolume),DeepVectorFloat64(integral(domain).volumes,offset))
_area = deepversion(StaticBool(getarea),DeepVector(integral(domain).area,offset,staticfalse,Val(:deep),bonus))
_bi = deepversion(StaticBool(getbulk_integral),DeepVectorFloat64Vector(integral(domain).bulk_integral,offset))
_ii = deepversion(StaticBool(getinterface_integral),DeepVector(integral(domain).interface_integral,offset,staticfalse,Val(:deep),bonus))
_boun = reduce_to_periodic ? boundary(VG.domain) : internal_boundary(VG.domain)
boun = getboundary ? deepcopy(_boun) : _boun
_references = deepversion(StaticBool(getreferences),references(VG.domain))
_reference_shifts = deepversion(StaticBool(getreference_shifts),ShiftVector{P}(reference_shifts(VG.domain),shifts(VG.domain)))
return VoronoiData(_nodes,_vertices,_bv,_bn,onboundary,_neighbors,ori,_volume,_area,_bi,_ii,length(references(domain)),_references,_reference_shifts,VG,boun)
end
function VoronoiData_neighbors(::StaticTrue,getneighbors::StaticBool,__neighbors)
deepversion(::StaticTrue,a) = convert_to_vector(a)
deepversion(::StaticFalse,a) = a
sm = SortingMatrix(__neighbors)
neigh = DeepVector(__neighbors,0,staticfalse,Val(:deep),sm)
return deepversion(getneighbors,neigh), sm
end
@inline VoronoiData_neighbors(::StaticFalse,::S,__neighbors) where S<:StaticBool = __neighbors, nothing
function first_assigned(v)
for i in 1:length(v)
isassigned(v, i) && return i
end
return length(v)+1
end
@inline convert_to_vector(v::AVR) where {R<:Real, AVR<:AbstractVector{R}} = begin
lv = length(v)
fa = first_assigned(v)
r = Vector{R}(undef,lv)
r[1:(fa-1)] .= R(0)
r[fa:lv] .= view(v,fa:lv)
return r
end
@inline convert_to_vector(v::AVR) where {R<:Real,II, AVR<:SVector{II,R}} = v
#@inline convert_to_vector(v::R) where {R<:Real} = v #, AVR<:AbstractVector{R}} = copy(v)
convert_to_vector(v::AVR) where {R, AVR<:AbstractVector{R}} = begin
lv = length(v)
fa = first_assigned(v)
r = [convert_to_vector(subv) for subv in view(v,fa:lv)]
(fa>1) && prepend!(r,Vector{eltype(r)}(undef,fa-1))
return r
end
@inline convert_to_vector(v) = v
function __simplify_discrete(F)
return length(F)==1 ? F[1] : F
end
function extract_discretefunctions(VD::VoronoiData, FC)#::FunctionComposer)
vol = i->map(__simplify_discrete,decompose(FC,length(VD.bulk_integral)>0 ? VD.bulk_integral[i] : FC.reference_value))
#inter = length(VD.interface_integral)>0 ? (i,j)->map(__simplify_discrete,decompose(FC,VD.interface_integral[i][j])) : (i,j)->map(__simplify_discrete,decompose(FC,FC.reference_value))
#inter = (i,j)->map(__simplify_discrete,decompose(FC,length(VD.interface_integral)>0 && length(VD.interface_integral[i])>0 ? VD.interface_integral[i][j] : FC.reference_value))
#vol = length(VD.bulk_integral)>0 ? i->map(__simplify_discrete,decompose(FC,VD.bulk_integral[i])) : nothing
inter = length(VD.interface_integral)>0 ? (i,j)->map(__simplify_discrete,decompose(FC,VD.interface_integral[i][j])) : nothing
return (bulk=vol,interface=inter)
end
function _get_midpoint_for_discrete_functions(data::VoronoiData,i,j,l)
n=data.neighbors[i][j]
return n>l ? data.boundary_nodes[i][n] : data.nodes[i] + 0.5*data.orientations[i][j]
end
function extract_discretefunctions(data::VoronoiData;functions...)
vol = i->map(f->f(data.nodes[i]),values(functions))
l=length(data.nodes)
inter = (i,j)->map(f->f(_get_midpoint_for_discrete_functions(data,i,j,l)),values(functions))
return (bulk=vol,interface=inter)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 6921 | #####################################################################################################################################
## create a Discrete Domain that stores a mesh adjusted to boundary conditions
#####################################################################################################################################
abstract type AbstractDomain{P<:Point} end
@inline dimension(::AbstractDomain{P}) where P = size(P)[1]
@inline public_length(d::AbstractDomain) = length(mesh(d))-length(references(d))
@inline nodes(d::VD) where VD<:AbstractDomain = nodes(mesh(d))
"""
Voronoi_Domain
Philosophy: nodes[i] = nodes[references[i]] + periodic_shift( reference_shifts[i], shifts )
"""
struct Voronoi_Domain{P<:Point,T<:AbstractMesh{P},TT<:HVIntegral{P}} <: AbstractDomain{P}
# MESH::Voronoi_MESH
boundary::Boundary
shifts::Vector{Vector{Float64}}
references::Vector{Int64}
reference_shifts::Vector{BitVector}
internal_boundary::Boundary
_mesh::T
internaly_precise::Bool
_integral::TT
reflections::Vector{BitVector}
end
function Voronoi_Domain(mesh,_boundary::Boundary, _shifts::Vector{Vector{Float64}}, _reference_shifts::Vector{BitVector}, _references::Vector{Int64},internal=boundary,internaly_precise=true,refl=BitVector[])
return Voronoi_Domain(_boundary,_shifts,_references,_reference_shifts,internal,ExplicitMeshContainer(mesh),internaly_precise,EmptyVoronoi_Integral(mesh),refl)
end
function Voronoi_Domain(mesh,boundary,internaly_precise=true)
shifts = periodic_shifts(boundary, size(eltype(nodes(mesh)))[1] )
lref = 0
bv = [falses(length(boundary)) for i in 1:(length(mesh)-lref)]
return Voronoi_Domain(mesh,boundary,shifts,Vector{BitVector}(),Vector{Int64}(),copy(boundary),internaly_precise,bv)
end
@inline function integrate_view(vd::Voronoi_Domain)
sv = SwitchView(length(vd.references)+1,length(mesh(vd)))
return (mesh = MeshView(vd._mesh.data,sv), integral = IntegralView(vd._integral,sv))
end
@inline Base.getproperty(cd::Voronoi_Domain, prop::Symbol) = dyncast_get(cd,Val(prop))
@inline @generated dyncast_get(cd::Voronoi_Domain, ::Val{:mesh}) = :(getfield(cd,:_mesh).data)
#@inline @generated dyncast_get(cd::Voronoi_Domain, ::Val{:integral}) = :(getfield(cd,:_integral).integral)
@inline @generated dyncast_get(cd::Voronoi_Domain, d::Val{S}) where S = :( getfield(cd, S))
@inline Base.setproperty!(cd::Voronoi_Domain, prop::Symbol, val) = dyncast_set(cd,Val(prop),val)
@inline @generated dyncast_set(cd::Voronoi_Domain, ::Val{:mesh},val) = :(getfield(cd,:_mesh).data=val)
#@inline @generated dyncast_set(cd::Voronoi_Domain, ::Val{:integral},val) = :(getfield(cd,:_integral).integral=val)
#@inline @generated dyncast_set(cd::CompoundData, d::Val{S},val) where S = :( setfield(cd, S,val))
@inline internaly_precise(d::Voronoi_Domain) = d.internaly_precise
@inline boundary(d::Voronoi_Domain) = d.boundary
@inline mesh(d::VD) where VD<:Voronoi_Domain = d.mesh
@inline shifts(d::Voronoi_Domain) = d.shifts
@inline Domain(mesh::Voronoi_MESH,boundary::Boundary) = Voronoi_Domain(mesh,boundary)
@inline references(d::Voronoi_Domain) = d.references
@inline reflections(d::Voronoi_Domain) = d.reflections
@inline reference_shifts(d::Voronoi_Domain) = d.reference_shifts
@inline expand_internal_boundary(domain::Voronoi_Domain,new_xs) = extend_periodic_part(domain.internal_boundary,new_xs) # shifts the periodic part of the boundary such that new_xs lies completely inside the
# the newly constructed domain
@inline internal_boundary(d::Voronoi_Domain) = d.internal_boundary
@inline integral(d::Voronoi_Domain) = d._integral
@inline standardize(d::Voronoi_Domain) = nothing
@inline function set_internal_boundary(d::Voronoi_Domain,b::Boundary)
empty!(d.internal_boundary.planes)
append!(d.internal_boundary.planes,b.planes)
end
"""
copy(domain::Voronoi_Domain)
provides a (deep)copy of the discrete domain. However, the boundary object is taken as it is, i.e. this particular object is NOT a copy but identical to the original
"""
function copy(domain::Voronoi_Domain;resize=0,kwargs...)
newmesh = copy(domain.mesh;kwargs...)
newintegral = copy(domain._integral,newmesh;kwargs...)
if resize>0
resize_integrals(newintegral,resize)
end
return Voronoi_Domain(copy(domain.boundary),deepcopy(domain.shifts),copy(domain.references),deepcopy(domain.reference_shifts),copy(domain.internal_boundary),ExplicitMeshContainer(newmesh),domain.internaly_precise,newintegral,deepcopy(domain.reflections))
end
function append!(domain::VD,new_xs) where VD<:Voronoi_Domain
l_old = length(mesh(domain))
lnxs = length(new_xs)
lb = length(boundary(domain))
append!(domain._integral,new_xs)
append!(domain.mesh,new_xs)
append!(domain.reflections,[falses(lb) for _ in 1:lnxs])
return MeshView(domain.mesh,SwitchView(l_old+1,l_old+lnxs))
end
function prepend!(domain::Voronoi_Domain,new_xs::ReflectedNodes;kwargs...)
lnxs=length(new_xs.data)
prepend!(domain.references,new_xs.references)
prepend!(domain.reference_shifts,new_xs.reference_shifts)
domain.references .+= lnxs # note that reference hereafter will refer to the original node in the new full list.
# Makes it compatible with refinements
#add_virtual_points(domain.integral,new_xs.data)
prepend!(domain.mesh,new_xs.data)
prepend!(domain._integral,new_xs)
end
struct Voronoi_Domain_Store_1{P<:Point,T<:AbstractMesh{P},TT<:HVIntegral{P}}
boundary::Boundary
shifts::Vector{Vector{Float64}}
references::Vector{Int64}
reference_shifts::Vector{BitVector}
internal_boundary::Boundary
_mesh::T
internaly_precise::Bool
integral_store::Voronoi_Integral_Store_Container_1
reflections::Vector{BitVector}
end
JLD2.writeas(::Type{Voronoi_Domain{P, T , TT}}) where {P, T, TT} = Voronoi_Domain_Store_1{P, T , TT}
JLD2.wconvert(::Type{Voronoi_Domain_Store_1{P, T , TT}},domain::Voronoi_Domain{P, T , TT} ) where {P, T, TT} =
Voronoi_Domain_Store_1{P, T, TT}(
domain.boundary,
domain.shifts,
domain.references,
domain.reference_shifts,
domain.internal_boundary,
domain._mesh,
domain.internaly_precise,
Voronoi_Integral_Store_Container_1(domain._integral), # Initializing this field as per your requirement
domain.reflections
)
JLD2.rconvert(::Type{Voronoi_Domain{P, T , TT}},store::Voronoi_Domain_Store_1{P, T , TT}) where {P, T, TT} =
Voronoi_Domain{P, T, TT}(
store.boundary,
store.shifts,
store.references,
store.reference_shifts,
store.internal_boundary,
store._mesh,
store.internaly_precise,
Voronoi_Integral(store.integral_store, store._mesh.data),
store.reflections
)
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 3712 | """
VoronoiNodes(x::Matrix)
also available in the forms
VoronoiNodes(x::Vector{<:Vector})
VoronoiNodes(x::Vector{<:SVector})
creates a list of points (as static vectors) from a matrix.
# Example: 100 Points in ``(0,1)^3``
data = rand(3,100)
points = VoronoiNodes(data)
"""
VoronoiNodes(x::Matrix) = map(SVector{size(x,1),eltype(x)}, eachcol(x))
VoronoiNodes(x::Matrix,hint::Int64) = map(SVector{hint,eltype(x)}, eachcol(x))
VoronoiNodes(x::Vector{<:Vector}) = map(SVector{length(x[1])}, x)
VoronoiNodes(x::Vector{<:SVector};perturbation=0.0) = perturbation==0.0 ? x : perturbNodes(x,perturbation)
VoronoiNodes(p::AbstractVector{Float64}) = VoronoiNodes([p])
VoronoiNodes(ini,dim::Int,l::Int) = Vector{SVector{dim,Float64}}(ini,l)
VoronoiNode(v) = SVector{length(v)}(v)
function perturbNodes(x::Vector{<:SVector},perturbation)
lx2=length(x)
x2 = Vector{typeof(x[1])}(undef,lx2)
dim=length(x2[1])
for i in 1:lx2
x2[i] = x[i] + perturbation*randn(dim)
end
return x2
end
function poly_box(domain::Boundary, bounding_box::Boundary)
dimension = 0
total_length = length(domain) + length(bounding_box)
if total_length==0
error("There is not enough data to create a distribution of points: Provide either :range or :domain !")
end
halfspaces = []
for p in Iterators.flatten((domain.planes,bounding_box.planes))
dimension = length(p.normal)
push!(halfspaces,HalfSpace(p.normal, dot(p.normal,p.base)))
end
halfspaces = [h for h in halfspaces]
left = zeros(Float64,dimension)
right = zeros(Float64,dimension)
left .= Inf64
right .= -Inf64
poly_vol = 0.0
try
poly = polyhedron(hrep(halfspaces))
my_points = Polyhedra.points(vrep(poly))
for p in my_points
for k in 1:dimension
left[k] = min(left[k],p[k])
right[k] = max(right[k],p[k])
end
end
poly_vol = Polyhedra.volume(poly)
catch
error("It is not possible to create a polyhedron from :domain and :bounding_box")
end
return left, right, poly_vol
end
function VoronoiNodes(nodes::Real;density = x->1.0, range=nothing, domain::Boundary=Boundary(),bounding_box::Boundary=Boundary(),resolution=nothing,criterium=x->true,silence = true,factor=100)
if range==nothing
left, right, poly_vol = poly_box(domain,bounding_box)
dimension = length(left)
box_vol = prod(k->right[k]-left[k],1:dimension)
if typeof(nodes)<:Integer && resolution==nothing
resolution = unsafe_trunc(Int64,((box_vol/poly_vol)*nodes*factor)^(1/dimension))*(dimension==2 ? 10 : 1) + 1
end
range = DensityRange(resolution*ones(Int64,dimension),map(k->(left[k],right[k]),1:dimension))
end
println("total max resolution: $(prod(range.number_of_cells))")
if !(typeof(nodes)<:Integer)
nodes = round(Int64,prod(range.number_of_cells)*nodes)
else
nodes = min(nodes,prod(range.number_of_cells))
end
_criterium = x->(criterium(x) && (x in domain))
density = get_density(density,_criterium,range)
cell_vol = prod(range.dimensions)
ρ = x->1.0-(1.0-density(x)*cell_vol)^nodes+nodes*(nodes-1)*0.5*(density(x)*cell_vol)^2
oldstd = stdout
try
redirect_stdout(silence ? devnull : oldstd)
print("Calculated nodes so far: ")
res = get_nodes(x::Point->(_criterium(x) && rand()<ρ(x)),range)
println()
redirect_stdout(oldstd)
return res
catch
redirect_stdout(oldstd)
rethrow()
end
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1527 | module HVNearestNeighbors
#####################################################################################
## The code of this submodule is taken from NearestNeighbors.jl
## This is done in order to prevent problems in case the structure
## of said package is modified at some point in future.
## However, we need to hack the algorithms of this package in order to obtain
## faster performance fitted to our problem
######################################################################################
using Distances
using ..HighVoronoi
import Distances: Metric, result_type, eval_reduce, eval_end, eval_op, eval_start, evaluate, parameters
using StaticArrays
using LinearAlgebra
import Base.show
export HVHVNNTree, HVKDTree, HVBallTree, skip_nodes_on_search
#=export knn, nn, inrange, inrangecount # TODOs? , allpairs, distmat, npairs
export injectdata
export Euclidean,
Cityblock,
Minkowski,
Chebyshev,
Hamming,
WeightedEuclidean,
WeightedCityblock,
WeightedMinkowski
=#
abstract type HVNNTree{V <: AbstractVector,P <: Metric} end
const MinkowskiMetric = Union{Euclidean,Chebyshev,Cityblock,Minkowski,WeightedEuclidean,WeightedCityblock,WeightedMinkowski}
get_T(::Type{T}) where {T <: AbstractFloat} = T
get_T(::T) where {T} = Float64
include("evaluation.jl")
include("tree_data.jl")
include("hyperspheres.jl")
include("hyperrectangles.jl")
include("utilities.jl")
include("kd_tree.jl")
include("ball_tree.jl")
include("tree_ops.jl")
end # module
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 9352 | # A HVBallTree (also called Metric tree) is a tree that is created
# from successively splitting points into surrounding hyper spheres
# which radius are determined from the given metric.
# The tree uses the triangle inequality to prune the search space
# when finding the neighbors to a point,
struct HVBallTree{V <: AbstractVector,N,T,M <: Metric} <: HVNNTree{V,M}
data::Vector{V}
hyper_spheres::Vector{HyperSphere{N,T}} # Each hyper sphere bounds its children
indices::Vector{Int} # Translates from tree index -> point index
metric::M # Metric used for tree
tree_data::TreeData # Some constants needed
reordered::Bool # If the data has been reordered
end
# When we create the bounding spheres we need some temporary arrays.
# We create a type to hold them to not allocate these arrays at every
# function call and to reduce the number of parameters in the tree builder.
struct ArrayBuffers{N,T <: AbstractFloat}
center::MVector{N,T}
end
function ArrayBuffers(::Type{Val{N}}, ::Type{T}) where {N, T}
ArrayBuffers(zeros(MVector{N,T}))
end
"""
HVBallTree(data [, metric = Euclidean(); leafsize = 10, reorder = true]) -> balltree
Creates a `HVBallTree` from the data using the given `metric` and `leafsize`.
"""
function HVBallTree(data::AbstractVector{V},
metric::M = Euclidean();
leafsize::Int = 10,
reorder::Bool = true,
storedata::Bool = true,
reorderbuffer::Vector{V} = Vector{V}()) where {V <: AbstractArray, M <: Metric}
reorder = !isempty(reorderbuffer) || (storedata ? reorder : false)
tree_data = TreeData(data, leafsize)
n_d = length(V)
n_p = length(data)
array_buffs = ArrayBuffers(Val{length(V)}, get_T(eltype(V)))
indices = collect(1:n_p)
# Bottom up creation of hyper spheres so need spheres even for leafs)
hyper_spheres = Vector{HyperSphere{length(V),eltype(V)}}(undef, tree_data.n_internal_nodes + tree_data.n_leafs)
if reorder
indices_reordered = Vector{Int}(undef, n_p)
if isempty(reorderbuffer)
data_reordered = Vector{V}(undef, n_p)
else
data_reordered = reorderbuffer
end
else
# Dummy variables
indices_reordered = Vector{Int}()
data_reordered = Vector{V}()
end
if metric isa Distances.UnionMetrics
p = parameters(metric)
if p !== nothing && length(p) != length(V)
throw(ArgumentError(
"dimension of input points:$(length(V)) and metric parameter:$(length(p)) must agree"))
end
end
if n_p > 0
# Call the recursive HVBallTree builder
build_HVBallTree(1, data, data_reordered, hyper_spheres, metric, indices, indices_reordered,
1, length(data), tree_data, array_buffs, reorder)
end
if reorder
data = data_reordered
indices = indices_reordered
end
HVBallTree(storedata ? data : similar(data, 0), hyper_spheres, indices, metric, tree_data, reorder)
end
function HVBallTree(data::AbstractVecOrMat{T},
metric::M = Euclidean();
leafsize::Int = 10,
storedata::Bool = true,
reorder::Bool = true,
reorderbuffer::Matrix{T} = Matrix{T}(undef, 0, 0)) where {T <: AbstractFloat, M <: Metric}
dim = size(data, 1)
npoints = size(data, 2)
points = copy_svec(T, data, Val(dim))
if isempty(reorderbuffer)
reorderbuffer_points = Vector{SVector{dim,T}}()
else
reorderbuffer_points = copy_svec(T, reorderbuffer, Val(dim))
end
HVBallTree(points, metric, leafsize = leafsize, storedata = storedata, reorder = reorder,
reorderbuffer = reorderbuffer_points)
end
# Recursive function to build the tree.
function build_HVBallTree(index::Int,
data::Vector{V},
data_reordered::Vector{V},
hyper_spheres::Vector{HyperSphere{N,T}},
metric::Metric,
indices::Vector{Int},
indices_reordered::Vector{Int},
low::Int,
high::Int,
tree_data::TreeData,
array_buffs::ArrayBuffers{N,T},
reorder::Bool) where {V <: AbstractVector, N, T}
n_points = high - low + 1 # Points left
if n_points <= tree_data.leafsize
if reorder
reorder_data!(data_reordered, data, index, indices, indices_reordered, tree_data)
end
# Create bounding sphere of points in leaf node by brute force
hyper_spheres[index] = create_bsphere(data, metric, indices, low, high, array_buffs)
return
end
# Find split such that one of the sub trees has 2^p points
# and the left sub tree has more points
mid_idx = find_split(low, tree_data.leafsize, n_points)
# Brute force to find the dimension with the largest spread
split_dim = find_largest_spread(data, indices, low, high)
# Sort the data at the mid_idx boundary using the split_dim
# to compare
select_spec!(indices, mid_idx, low, high, data, split_dim)
build_HVBallTree(getleft(index), data, data_reordered, hyper_spheres, metric,
indices, indices_reordered, low, mid_idx - 1,
tree_data, array_buffs, reorder)
build_HVBallTree(getright(index), data, data_reordered, hyper_spheres, metric,
indices, indices_reordered, mid_idx, high,
tree_data, array_buffs, reorder)
# Finally create bounding hyper sphere from the two children's hyper spheres
hyper_spheres[index] = create_bsphere(metric, hyper_spheres[getleft(index)],
hyper_spheres[getright(index)],
array_buffs)
end
function _knn(tree::HVBallTree,
point::AbstractVector,
best_idxs::AbstractVector{Int},
best_dists::AbstractVector,
skip::F) where {F}
knn_kernel!(tree, 1, point, best_idxs, best_dists, skip)
return
end
function knn_kernel!(tree::HVBallTree{V},
index::Int,
point::AbstractArray,
best_idxs::AbstractVector{Int},
best_dists::AbstractVector,
skip::F) where {V, F}
if isleaf(tree.tree_data.n_internal_nodes, index)
add_points_knn!(best_dists, best_idxs, tree, index, point, true, skip)
return
end
left_sphere = tree.hyper_spheres[getleft(index)]
right_sphere = tree.hyper_spheres[getright(index)]
left_dist = max(zero(eltype(V)), evaluate(tree.metric, point, left_sphere.center) - left_sphere.r)
right_dist = max(zero(eltype(V)), evaluate(tree.metric, point, right_sphere.center) - right_sphere.r)
if left_dist <= best_dists[1] || right_dist <= best_dists[1]
if left_dist < right_dist
knn_kernel!(tree, getleft(index), point, best_idxs, best_dists, skip)
if right_dist <= best_dists[1]
knn_kernel!(tree, getright(index), point, best_idxs, best_dists, skip)
end
else
knn_kernel!(tree, getright(index), point, best_idxs, best_dists, skip)
if left_dist <= best_dists[1]
knn_kernel!(tree, getleft(index), point, best_idxs, best_dists, skip)
end
end
end
return
end
function _inrange(tree::HVBallTree{V},
point::AbstractVector,
radius::Number,
idx_in_ball::Union{Nothing, Vector{Int}}) where {V}
ball = HyperSphere(convert(V, point), convert(eltype(V), radius)) # The "query ball"
return inrange_kernel!(tree, 1, point, ball, idx_in_ball) # Call the recursive range finder
end
function inrange_kernel!(tree::HVBallTree,
index::Int,
point::AbstractVector,
query_ball::HyperSphere,
idx_in_ball::Union{Nothing, Vector{Int}})
if index > length(tree.hyper_spheres)
return 0
end
sphere = tree.hyper_spheres[index]
# If the query ball in the bounding sphere for the current sub tree
# do not intersect we can disrecard the whole subtree
if !intersects(tree.metric, sphere, query_ball)
return 0
end
# At a leaf node, check all points in the leaf node
if isleaf(tree.tree_data.n_internal_nodes, index)
return add_points_inrange!(idx_in_ball, tree, index, point, query_ball.r, true)
end
count = 0
# The query ball encloses the sub tree bounding sphere. Add all points in the
# sub tree without checking the distance function.
if encloses(tree.metric, sphere, query_ball)
count += addall(tree, index, idx_in_ball)
else
# Recursively call the left and right sub tree.
count += inrange_kernel!(tree, getleft(index), point, query_ball, idx_in_ball)
count += inrange_kernel!(tree, getright(index), point, query_ball, idx_in_ball)
end
return count
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1107 | @inline eval_pow(::MinkowskiMetric, s) = abs(s)
@inline eval_pow(::Euclidean, s) = abs2(s)
@inline eval_pow(::WeightedEuclidean, s) = abs2(s)
@inline eval_pow(d::Minkowski, s) = abs(s)^d.p
@inline eval_diff(::MinkowskiMetric, a, b) = a - b
@inline eval_diff(::Chebyshev, ::Any, b) = b
function myevaluate(d::Distances.UnionMetrics, a::AbstractVector,
b::AbstractVector, do_end::Bool)
p = Distances.parameters(d)
s = eval_start(d, a, b)
if p === nothing
@simd for i in eachindex(b)
@inbounds ai = a[i]
@inbounds bi = b[i]
s = eval_reduce(d, s, eval_op(d, ai, bi))
end
else
@simd for i in eachindex(b)
@inbounds ai = a[i]
@inbounds bi = b[i]
@inbounds pi = p[i]
s = eval_reduce(d, s, eval_op(d, ai, bi, pi))
end
end
if do_end
return eval_end(d, s)
else
return s
end
end
function myevaluate(d::Distances.PreMetric, a::AbstractVector,
b::AbstractVector, ::Bool)
evaluate(d, a, b)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1451 | # abstract HyperRectangle{N, T}
struct HyperRectangle{T}
mins::Vector{T}
maxes::Vector{T}
end
# Computes a bounding box around a point cloud
function compute_bbox(data::AbstractVector{V}) where {V <: AbstractVector}
T = eltype(V)
n_dim = length(V)
maxes = Vector{T}(undef, n_dim)
mins = Vector{T}(undef, n_dim)
@inbounds for j in 1:length(V)
dim_max = typemin(T)
dim_min = typemax(T)
for k in 1:length(data)
dim_max = max(data[k][j], dim_max)
dim_min = min(data[k][j], dim_min)
end
maxes[j] = dim_max
mins[j] = dim_min
end
return HyperRectangle(mins, maxes)
end
############################################
# Rectangle - Point functions
############################################
# Max distance between rectangle and point
@inline function get_max_distance(rec::HyperRectangle, point::AbstractVector{T}) where {T}
max_dist = zero(T)
@inbounds @simd for dim in eachindex(point)
max_dist += abs2(max(rec.maxes[dim] - point[dim], point[dim] - rec.mins[dim]))
end
return max_dist
end
# Min distance between rectangle and point
@inline function get_min_distance(rec::HyperRectangle, point::AbstractVector{T}) where {T}
min_dist = zero(T)
@inbounds @simd for dim in eachindex(point)
min_dist += abs2(max(0, max(rec.mins[dim] - point[dim], point[dim] - rec.maxes[dim])))
end
return min_dist
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 3187 | const NormMetric = Union{Euclidean,Chebyshev,Cityblock,Minkowski,WeightedEuclidean,WeightedCityblock,WeightedMinkowski,Mahalanobis}
struct HyperSphere{N,T <: AbstractFloat}
center::SVector{N,T}
r::T
end
HyperSphere(center::SVector{N,T1}, r::T2) where {N, T1, T2} = HyperSphere(center, convert(T1, r))
HyperSphere(center::AbstractVector{T}, r) where {T} = HyperSphere{length(center),T}(center, r)
@inline function intersects(m::M,
s1::HyperSphere{N,T},
s2::HyperSphere{N,T}) where {T <: AbstractFloat, N, M <: Metric}
evaluate(m, s1.center, s2.center) <= s1.r + s2.r
end
@inline function encloses(m::M,
s1::HyperSphere{N,T},
s2::HyperSphere{N,T}) where {T <: AbstractFloat, N, M <: Metric}
evaluate(m, s1.center, s2.center) + s1.r <= s2.r
end
@inline function interpolate(::M,
c1::V,
c2::V,
x,
d,
ab) where {V <: AbstractVector, M <: NormMetric}
alpha = x / d
@assert length(c1) == length(c2)
@inbounds for i in eachindex(ab.center)
ab.center[i] = (1 - alpha) .* c1[i] + alpha .* c2[i]
end
return ab.center, true
end
@inline function interpolate(::M,
c1::V,
::V,
::Any,
::Any,
::Any) where {V <: AbstractVector, M <: Metric}
return c1, false
end
function create_bsphere(data::AbstractVector{V}, metric::Metric, indices::Vector{Int}, low, high, ab) where {V}
n_points = high - low + 1
# First find center of all points
fill!(ab.center, 0.0)
for i in low:high
for j in 1:length(ab.center)
ab.center[j] += data[indices[i]][j]
end
end
ab.center .*= 1 / n_points
# Then find r
r = zero(get_T(eltype(V)))
for i in low:high
r = max(r, evaluate(metric, data[indices[i]], ab.center))
end
r += eps(get_T(eltype(V)))
return HyperSphere(SVector{length(V),eltype(V)}(ab.center), r)
end
# Creates a bounding sphere from two other spheres
function create_bsphere(m::Metric,
s1::HyperSphere{N,T},
s2::HyperSphere{N,T},
ab) where {N, T <: AbstractFloat}
if encloses(m, s1, s2)
return HyperSphere(s2.center, s2.r)
elseif encloses(m, s2, s1)
return HyperSphere(s1.center, s1.r)
end
# Compute the distance x along a geodesic from s1.center to s2.center
# where the new center should be placed (note that 0 <= x <= d because
# neither s1 nor s2 contains the other)
dist = evaluate(m, s1.center, s2.center)
x = 0.5 * (s2.r - s1.r + dist)
center, is_exact_center = interpolate(m, s1.center, s2.center, x, dist, ab)
if is_exact_center
rad = 0.5 * (s2.r + s1.r + dist)
else
rad = max(s1.r + evaluate(m, s1.center, center), s2.r + evaluate(m, s2.center, center))
end
return HyperSphere(SVector{N,T}(center), rad)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 18205 | # A KDNode stores the information needed in each non leaf node
# to make the needed distance computations
struct KDNode{T}
lo::T # The low boundary for the hyper rect in this dimension
hi::T # The high boundary for the hyper rect in this dimension
split_val::T # The value the hyper rectangle was split at
split_dim::Int # The dimension the hyper rectangle was split at
end
struct HVKDTree{V <: AbstractVector,M <: MinkowskiMetric,T} <: HVNNTree{V,M}
data::Vector{V}
hyper_rec::HyperRectangle{T}
indices::Vector{Int}
metric::M
nodes::Vector{KDNode{T}}
tree_data::TreeData
reordered::Bool
end
"""
HVKDTree(data [, metric = Euclidean(); leafsize = 10, reorder = true]) -> kdtree
Creates a `HVKDTree` from the data using the given `metric` and `leafsize`.
The `metric` must be a `MinkowskiMetric`.
"""
function HVKDTree(data::AbstractVector{V},
metric::M = Euclidean();
leafsize::Int = 10,
storedata::Bool = true,
reorder::Bool = true,
reorderbuffer::Vector{V} = Vector{V}()) where {V <: AbstractArray, M <: MinkowskiMetric}
reorder = !isempty(reorderbuffer) || (storedata ? reorder : false)
tree_data = TreeData(data, leafsize)
n_d = length(V)
n_p = length(data)
indices = collect(1:n_p)
nodes = Vector{KDNode{eltype(V)}}(undef, tree_data.n_internal_nodes)
if reorder
indices_reordered = Vector{Int}(undef, n_p)
if isempty(reorderbuffer)
data_reordered = Vector{V}(undef, n_p)
else
data_reordered = reorderbuffer
end
else
# Dummy variables
indices_reordered = Vector{Int}()
data_reordered = Vector{V}()
end
if metric isa Distances.UnionMetrics
p = parameters(metric)
if p !== nothing && length(p) != length(V)
throw(ArgumentError(
"dimension of input points:$(length(V)) and metric parameter:$(length(p)) must agree"))
end
end
# Create first bounding hyper rectangle that bounds all the input points
hyper_rec = compute_bbox(data)
# Call the recursive HVKDTree builder
build_HVKDTree(1, data, data_reordered, hyper_rec, nodes, indices, indices_reordered,
1, length(data), tree_data, reorder)
if reorder
data = data_reordered
indices = indices_reordered
end
if metric isa Distances.UnionMetrics
p = parameters(metric)
if p !== nothing && length(p) != length(V)
throw(ArgumentError(
"dimension of input points:$(length(V)) and metric parameter:$(length(p)) must agree"))
end
end
HVKDTree(storedata ? data : similar(data, 0), hyper_rec, indices, metric, nodes, tree_data, reorder)
end
#=
function HVKDTree(data::AbstractVecOrMat{T},
metric::M = Euclidean();
leafsize::Int = 10,
storedata::Bool = true,
reorder::Bool = true,
reorderbuffer::Matrix{T} = Matrix{T}(undef, 0, 0)) where {T <: AbstractFloat, M <: MinkowskiMetric}
dim = size(data, 1)
npoints = size(data, 2)
points = copy_svec(T, data, Val(dim))
if isempty(reorderbuffer)
reorderbuffer_points = Vector{SVector{dim,T}}()
else
reorderbuffer_points = copy_svec(T, reorderbuffer, Val(dim))
end
HVKDTree(points, metric, leafsize = leafsize, storedata = storedata, reorder = reorder,
reorderbuffer = reorderbuffer_points)
end
=#
function build_HVKDTree(index::Int,
data::AbstractVector{V},
data_reordered::Vector{V},
hyper_rec::HyperRectangle,
nodes::Vector{KDNode{T}},
indices::Vector{Int},
indices_reordered::Vector{Int},
low::Int,
high::Int,
tree_data::TreeData,
reorder::Bool) where {V <: AbstractVector, T}
n_p = high - low + 1 # Points left
if n_p <= tree_data.leafsize
if reorder
reorder_data!(data_reordered, data, index, indices, indices_reordered, tree_data)
end
return
end
mid_idx = find_split(low, tree_data.leafsize, n_p)
split_dim = 1
max_spread = zero(T)
# Find dimension and spread where the spread is maximal
for d in 1:length(V)
spread = hyper_rec.maxes[d] - hyper_rec.mins[d]
if spread > max_spread
max_spread = spread
split_dim = d
end
end
select_spec!(indices, mid_idx, low, high, data, split_dim)
split_val = data[indices[mid_idx]][split_dim]
lo = hyper_rec.mins[split_dim]
hi = hyper_rec.maxes[split_dim]
nodes[index] = KDNode{T}(lo, hi, split_val, split_dim)
# Call the left sub tree with an updated hyper rectangle
hyper_rec.maxes[split_dim] = split_val
build_HVKDTree(getleft(index), data, data_reordered, hyper_rec, nodes,
indices, indices_reordered, low, mid_idx - 1, tree_data, reorder)
hyper_rec.maxes[split_dim] = hi # Restore the hyper rectangle
# Call the right sub tree with an updated hyper rectangle
hyper_rec.mins[split_dim] = split_val
build_HVKDTree(getright(index), data, data_reordered, hyper_rec, nodes,
indices, indices_reordered, mid_idx, high, tree_data, reorder)
# Restore the hyper rectangle
hyper_rec.mins[split_dim] = lo
end
"""
knn(tree::HVNNTree, points, k [, sortres=false]) -> indices, distances
nn(tree:HVNNTree, points) -> indices, distances
Performs a lookup of the `k` nearest neigbours to the `points` from the data
in the `tree`. If `sortres = true` the result is sorted such that the results are
in the order of increasing distance to the point. `skip` is an optional predicate
to determine if a point that would be returned should be skipped based on its
index.
"""
function knn(tree::HVNNTree{V}, points::Vector{T}, k::Int, sortres=false, skip::F=always_false) where {V, T <: AbstractVector, F<:Function}
check_input(tree, points)
check_k(tree, k)
n_points = length(points)
dists = [Vector{get_T(eltype(V))}(undef, k) for _ in 1:n_points]
idxs = [Vector{Int}(undef, k) for _ in 1:n_points]
for i in 1:n_points
knn_point!(tree, points[i], sortres, dists[i], idxs[i], skip)
end
return idxs, dists
end
function knn_point!(tree::HVNNTree{V}, point::AbstractVector{T}, sortres, dist, idx, skip::F) where {V, T <: Number, F}
fill!(idx, -1)
fill!(dist, typemax(get_T(eltype(V))))
_knn(tree, point, idx, dist, skip)
if skip !== always_false
skipped_idxs = findall(==(-1), idx)
deleteat!(idx, skipped_idxs)
deleteat!(dist, skipped_idxs)
end
sortres && heap_sort_inplace!(dist, idx)
if tree.reordered
for j in eachindex(idx)
@inbounds idx[j] = tree.indices[idx[j]]
end
end
return
end
function knn(tree::HVNNTree{V}, point::AbstractVector{T}, k::Int, sortres=false, skip::F=always_false) where {V, T <: Number, F<:Function}
#check_k(tree, k)
idx = Vector{Int}(undef, k)
dist = Vector{get_T(eltype(V))}(undef, k)
knn_point!(tree, point, sortres, dist, idx, skip)
return idx, dist
end
function _knn_flex(tree::HVKDTree,
point::AbstractVector,
best_idxs::AbstractVector{Int},
best_dists::AbstractVector,
skip::F,d::D) where {F,D}
init_min = get_min_distance(tree.hyper_rec, point)
while !knn_kernel_flex!(tree, 1, d.r, best_idxs, best_dists, init_min, skip,d)
d.maxs .= tree.hyper_rec.maxes
d.mins .= tree.hyper_rec.mins
end
d.maxs .= tree.hyper_rec.maxes
d.mins .= tree.hyper_rec.mins
end
@inline function safe_expandable_bitvector(bv::BitVector, index::Int )
lbv = length(bv)
if index > lbv
resize!(bv, index) # Erweitern Sie den BitVector bis zum gewünschten Index
bv[(lbv+1):end] .= false
end
return @inbounds bv[index]
end
@inline function safe_expandable_bitvector!(bv::BitVector, index::Int, value::Bool )
safe_expandable_bitvector(bv,index)
return bv[index] = value
end
@inline function switch_leaf(data,split_val,split_dim,right)
old_val = right ? data.mins[split_dim] : data.maxs[split_dim]
if right
@inbounds data.mins[split_dim] = split_val
else
@inbounds data.maxs[split_dim] = split_val
end
return old_val, split_dim, right
end
@inline function valid_leaf(data,split_val,split_dim,right)
ov, sd, r = switch_leaf(data,split_val,split_dim,right)
#return true, ov, sd, r
u = data.u
maxs = data.maxs
mins = data.mins
max_dot = 0.0
for i in eachindex(u)
if u[i] > 0
max_dot += maxs[i] * u[i]
else
max_dot += mins[i] * u[i]
end
end
#return max_dot>data.c && intersects_cuboid_ball(data.new_r,data.mins,data.maxs,data.dist_new_r_x0_2*(1+1E-10)), ov, sd, r
return max_dot>data.c , ov, sd, r
end
function knn_kernel_flex!(tree::HVKDTree{V},
index::Int,
point::AV1,
best_idxs::BI,
best_dists::AV2,
min_dist,
skip::F,data::D) where {V, AV1<:AbstractVector, BI<:AbstractVector{Int}, AV2<:AbstractVector, F, D}
# At a leaf node. Go through all points in node and add those in range
if isleaf(tree.tree_data.n_internal_nodes, index)
old_r = data.new_r
safe_expandable_bitvector!(data.visited_leafs,index, true)
#if HighVoronoi.intersects_cuboid_ball(data.new_r,data.mins,data.maxs,data.dist_new_r_x0_2*(1+1E-10))
#if !valid_end_leaf(data)
# return true
#end
add_points_knn_flex!(best_dists, best_idxs, tree, index, point, false, skip,data)
if old_r!=data.new_r
#data.dist_new_r_x0_2 = norm(r-x0)^2 needs no change
data.r = data.new_r
data.bestdist[1] = myevaluate(tree.metric, data.x0, data.new_r, false)*(1+1000*data.plane_tolerance)
data.dist_r_x0_2 = data.bestdist[1]
return false
end
return true
end
node = tree.nodes[index]
p_dim = point[node.split_dim]
split_val = node.split_val
split_diff = p_dim - split_val
# Point is to the right of the split value
hi = node.hi
lo = node.lo
M = tree.metric
if split_diff > 0
close = getright(index)
far = getleft(index)
ddiff = max(zero(eltype(V)), p_dim - hi)
right = true
else
close = getleft(index)
far = getright(index)
ddiff = max(zero(eltype(V)), lo - p_dim)
right = false
end
# Always call closer sub tree
success = true
valid, p1,p2,p3 = valid_leaf(data,split_val, node.split_dim,right)
!valid && (safe_expandable_bitvector!(data.visited_leafs,close,true))
if (!safe_expandable_bitvector(data.visited_leafs,close))
success &= knn_kernel_flex!(tree, close, point, best_idxs, best_dists, min_dist, skip, data)
(!success) && (return false)
end
switch_leaf(data,p1,p2,p3)
split_diff_pow = eval_pow(M, split_diff)
ddiff_pow = eval_pow(M, ddiff)
diff_tot = eval_diff(M, split_diff_pow, ddiff_pow)
new_min = eval_reduce(M, min_dist, diff_tot)
valid, p1,p2,p3 = valid_leaf(data,split_val, node.split_dim,!right)
!valid && (safe_expandable_bitvector!(data.visited_leafs,far,true))
if new_min < best_dists[1] && !safe_expandable_bitvector(data.visited_leafs,far)
success &= knn_kernel_flex!(tree, far, point, best_idxs, best_dists, new_min, skip,data)
(!success) && (return false)
end
switch_leaf(data,p1,p2,p3)
safe_expandable_bitvector!(data.visited_leafs,index, true)
return true
end
function _knn(tree::HVKDTree,
point::AbstractVector,
best_idxs::AbstractVector{Int},
best_dists::AbstractVector,
skip::F) where {F}
init_min = get_min_distance(tree.hyper_rec, point)
knn_kernel!(tree, 1, point, best_idxs, best_dists, init_min, skip)
@simd for i in eachindex(best_dists)
@inbounds best_dists[i] = eval_end(tree.metric, best_dists[i])
end
end
function knn_kernel!(tree::HVKDTree{V},
index::Int,
point::AbstractVector,
best_idxs::AbstractVector{Int},
best_dists::AbstractVector,
min_dist,
skip::F) where {V, F}
# At a leaf node. Go through all points in node and add those in range
if isleaf(tree.tree_data.n_internal_nodes, index)
add_points_knn!(best_dists, best_idxs, tree, index, point, false, skip)
return
end
node = tree.nodes[index]
p_dim = point[node.split_dim]
split_val = node.split_val
lo = node.lo
hi = node.hi
split_diff = p_dim - split_val
M = tree.metric
# Point is to the right of the split value
if split_diff > 0
close = getright(index)
far = getleft(index)
ddiff = max(zero(eltype(V)), p_dim - hi)
else
close = getleft(index)
far = getright(index)
ddiff = max(zero(eltype(V)), lo - p_dim)
end
# Always call closer sub tree
knn_kernel!(tree, close, point, best_idxs, best_dists, min_dist, skip)
split_diff_pow = eval_pow(M, split_diff)
ddiff_pow = eval_pow(M, ddiff)
diff_tot = eval_diff(M, split_diff_pow, ddiff_pow)
new_min = eval_reduce(M, min_dist, diff_tot)
if new_min < best_dists[1]
knn_kernel!(tree, far, point, best_idxs, best_dists, new_min, skip)
end
return
end
function _inrange(tree::HVKDTree,
point::AbstractVector,
radius::Number,
idx_in_ball::Union{Nothing, Vector{Int}} = Int[])
init_min = get_min_distance(tree.hyper_rec, point)
return inrange_kernel!(tree, 1, point, eval_op(tree.metric, radius, zero(init_min)), idx_in_ball,
init_min)
end
# Explicitly check the distance between leaf node and point while traversing
function inrange_kernel!(tree::HVKDTree,
index::Int,
point::AbstractVector,
r::Number,
idx_in_ball::Union{Nothing, Vector{Int}},
min_dist)
# Point is outside hyper rectangle, skip the whole sub tree
if min_dist > r
return 0
end
# At a leaf node. Go through all points in node and add those in range
if isleaf(tree.tree_data.n_internal_nodes, index)
return add_points_inrange!(idx_in_ball, tree, index, point, r, false)
end
node = tree.nodes[index]
split_val = node.split_val
lo = node.lo
hi = node.hi
p_dim = point[node.split_dim]
split_diff = p_dim - split_val
M = tree.metric
count = 0
if split_diff > 0 # Point is to the right of the split value
close = getright(index)
far = getleft(index)
ddiff = max(zero(p_dim - hi), p_dim - hi)
else # Point is to the left of the split value
close = getleft(index)
far = getright(index)
ddiff = max(zero(lo - p_dim), lo - p_dim)
end
# Call closer sub tree
count += inrange_kernel!(tree, close, point, r, idx_in_ball, min_dist)
# TODO: We could potentially also keep track of the max distance
# between the point and the hyper rectangle and add the whole sub tree
# in case of the max distance being <= r similarly to the BallTree inrange method.
# It would be interesting to benchmark this on some different data sets.
# Call further sub tree with the new min distance
split_diff_pow = eval_pow(M, split_diff)
ddiff_pow = eval_pow(M, ddiff)
diff_tot = eval_diff(M, split_diff_pow, ddiff_pow)
new_min = eval_reduce(M, min_dist, diff_tot)
count += inrange_kernel!(tree, far, point, r, idx_in_ball, new_min)
return count
end
check_radius(r) = r < 0 && throw(ArgumentError("the query radius r must be ≧ 0"))
#=
"""
inrange(tree::HVNNTree, points, radius [, sortres=false]) -> indices
Find all the points in the tree which is closer than `radius` to `points`. If
`sortres = true` the resulting indices are sorted.
"""
function inrange(tree::HVNNTree,
points::Vector{T},
radius::Number,
sortres=false) where {T <: AbstractVector}
check_input(tree, points)
check_radius(radius)
idxs = [Vector{Int}() for _ in 1:length(points)]
for i in 1:length(points)
inrange_point!(tree, points[i], radius, sortres, idxs[i])
end
return idxs
end
=#
function inrange_point!(tree, point, radius, sortres, idx)
count = _inrange(tree, point, radius, idx)
if idx !== nothing
if tree.reordered
@inbounds for j in 1:length(idx)
idx[j] = tree.indices[idx[j]]
end
end
sortres && sort!(idx)
end
return count
end
function inrange(tree::HVNNTree{V}, point::AbstractVector{T}, radius::Number, sortres=false) where {V, T <: Number}
#check_input(tree, point)
#check_radius(radius)
idx = Int[]
inrange_point!(tree, point, radius, sortres, idx)
return idx
end
#=
function inrange(tree::HVNNTree{V}, point::AbstractMatrix{T}, radius::Number, sortres=false) where {V, T <: Number}
dim = size(point, 1)
npoints = size(point, 2)
if isbitstype(T)
new_data = copy_svec(T, point, Val(dim))
else
new_data = SVector{dim,T}[SVector{dim,T}(point[:, i]) for i in 1:npoints]
end
inrange(tree, new_data, radius, sortres)
end
"""
inrangecount(tree::HVNNTree, points, radius) -> count
Count all the points in the tree which are closer than `radius` to `points`.
"""
function inrangecount(tree::HVNNTree{V}, point::AbstractVector{T}, radius::Number) where {V, T <: Number}
check_input(tree, point)
check_radius(radius)
return inrange_point!(tree, point, radius, false, nothing)
end
function inrangecount(tree::HVNNTree,
points::Vector{T},
radius::Number) where {T <: AbstractVector}
check_input(tree, points)
check_radius(radius)
return inrange_point!.(Ref(tree), points, radius, false, nothing)
end
function inrangecount(tree::HVNNTree{V}, point::AbstractMatrix{T}, radius::Number) where {V, T <: Number}
dim = size(point, 1)
npoints = size(point, 2)
if isbitstype(T)
new_data = copy_svec(T, point, Val(dim))
else
new_data = SVector{dim,T}[SVector{dim,T}(point[:, i]) for i in 1:npoints]
end
return inrangecount(tree, new_data, radius)
end
=# | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1267 | struct TreeData
last_node_size::Int # Number of points in the last node
leafsize::Int # Number of points in each leaf node (except last)
n_leafs::Int # Number of leafs
n_internal_nodes::Int # Number of non leaf nodes
cross_node::Int
offset::Int
offset_cross::Int
last_full_node::Int
end
function TreeData(data::AbstractVector{V}, leafsize) where V
n_dim, n_p = length(V), length(data)
# If number of points is zero
n_p == 0 && return TreeData(0, 0, 0, 0, 0, 0, 0, 0)
n_leafs = ceil(Integer, n_p / leafsize)
n_internal_nodes = n_leafs - 1
leafrow = floor(Integer, log2(n_leafs))
cross_node = 2^(leafrow + 1)
last_node_size = n_p % leafsize
if last_node_size == 0
last_node_size = leafsize
end
# This only happens when n_p / leafsize is a power of 2?
if cross_node >= n_internal_nodes + n_leafs
cross_node = div(cross_node, 2)
end
offset = 2(n_leafs - 2^leafrow) - 1
k1 = (offset - n_internal_nodes - 1) * leafsize + last_node_size + 1
k2 = -cross_node * leafsize + 1
last_full_node = n_leafs + n_internal_nodes
TreeData(last_node_size, leafsize, n_leafs,
n_internal_nodes, cross_node, k1, k2, last_full_node)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 6802 | # Helper functions to get node numbers and points
@inline getleft(i::Int) = 2i
@inline getright(i::Int) = 2i + 1
@inline getparent(i::Int) = div(i, 2)
@inline isleaf(n_internal_nodes::Int, idx::Int) = idx > n_internal_nodes
# We split the tree such that one of the sub trees has exactly 2^p points
# and such that the left sub tree always has more points.
# This means that we can deterministally (with just some comparisons)
# find if we are at a leaf node and how many
function find_split(low, leafsize, n_p)
# The number of leafs node left in the tree,
# use `ceil` to count a partially filled node as 1.
n_leafs = ceil(Int, n_p / leafsize)
# Number of leftover nodes needed
k = floor(Integer, log2(n_leafs))
rest = n_leafs - 2^k
# The conditionals here fulfill the desired splitting procedure but
# can probably be written in a nicer way
# Can fill less than two nodes -> leafsize to left node.
if n_p <= 2 * leafsize
mid_idx = leafsize
# The last leaf node will be in the right sub tree -> fill the left
# sub tree with
elseif rest > 2^(k - 1) # Last node over the "half line" in the row
mid_idx = 2^k * leafsize
# Perfectly filling both sub trees -> half to left and right sub tree
elseif rest == 0
mid_idx = 2^(k - 1) * leafsize
# Else we fill the right sub tree -> send the rest to the left sub tree
else
mid_idx = n_p - 2^(k - 1) * leafsize
end
return mid_idx + low
end
# Gets number of points in a leaf node, this is equal to leafsize for every node
# except the last node.
@inline function n_ps(idx::Int, td::TreeData)
if idx != td.last_full_node
return td.leafsize
else
return td.last_node_size
end
end
# Returns the index for the first point for a given leaf node.
@inline function point_index(idx::Int, td::TreeData)
if idx >= td.cross_node
return td.offset_cross + idx * td.leafsize
else
return td.offset + idx * td.leafsize
end
end
# Returns a range over the points in a leaf node with a given index
@inline function get_leaf_range(td::TreeData, index)
p_index = point_index(index, td)
n_p = n_ps(index, td)
return p_index:p_index + n_p - 1
end
# Store all the points in a leaf node continuously in memory in data_reordered to improve cache locality.
# Also stores the mapping to get the index into the original data from the reordered data.
function reorder_data!(data_reordered::Vector{V}, data::AbstractVector{V}, index::Int,
indices::Vector{Int}, indices_reordered::Vector{Int}, tree_data::TreeData) where {V}
for i in get_leaf_range(tree_data, index)
idx = indices[i]
data_reordered[i] = data[idx]
# Saves the inverse n
indices_reordered[i] = idx
end
end
# Checks the distance function and add those points that are among the k best.
# Uses a heap for fast insertion.
#=
@inline function add_points_knn_old!(best_dists::AbstractVector, best_idxs::AbstractVector{Int},
tree::HVNNTree, index::Int, point::AbstractVector,
do_end::Bool, skip2::F,offset::Vector{Float64},leftright) where {F}
result = true
skip = skip2[1]
u = skip2[2]
c = skip2[3]
!(typeof(point)<:MVector) && error("")
bb = true
i=0
for z in get_leaf_range(tree.tree_data, index)
idx = tree.reordered ? z : tree.indices[z]
dist_d = myevaluate(tree.metric, tree.data[idx], point, do_end)
# dot(tree.data[idx]-offset,leftright)>0.0 && error("")
bb &= dot(u,tree.data[idx])<=c
i+=1
if dist_d <= best_dists[1]
result &= skip(tree.indices[z],dist_d)
end
end
bb && i>1 && println(" + $i")
!result && skip(0,0.0)
return !result
end
=#
# Checks the distance function and add those points that are among the k best.
# Uses a heap for fast insertion.
@inline function add_points_knn_flex!(best_dists::AbstractVector, best_idxs::AbstractVector{Int},
tree::HVNNTree, index::Int, point::AbstractVector, do_end::Bool, skip::F,data::D) where {F,D}
#result=true
for z in get_leaf_range(tree.tree_data, index)
@inbounds tiz = tree.indices[z]
idx = tree.reordered ? z : tiz
x_new = tree.data[idx]
dist_d = myevaluate(tree.metric, x_new, data.new_r, do_end)
correction = data.dist_new_r_x0_2 * 1000 * data.plane_tolerance
if dist_d <= data.dist_new_r_x0_2 + correction
HighVoronoi.skip_nodes_on_search(data,x_new,tiz,dist_d,HighVoronoi.staticfalse)
#skip(tree.indices[z],dist_d)
end
end
return #!result # return true iff new_r has not changed
end
# Checks the distance function and add those points that are among the k best.
# Uses a heap for fast insertion.
@inline function add_points_knn!(best_dists::AbstractVector, best_idxs::AbstractVector{Int},
tree::HVNNTree, index::Int, point::AbstractVector,
do_end::Bool, skip::F) where {F}
for z in get_leaf_range(tree.tree_data, index)
idx = tree.reordered ? z : tree.indices[z]
dist_d = myevaluate(tree.metric, tree.data[idx], point, do_end)
if dist_d <= best_dists[1]
if skip(tree.indices[z])
continue
end
best_dists[1] = dist_d
best_idxs[1] = idx
percolate_down!(best_dists, best_idxs, dist_d, idx)
end
end
end
# Add those points in the leaf node that are within range.
# TODO: If we have a distance function that is incrementally increased
# as we sum over the dimensions (like the Minkowski norms) then we could
# stop computing the distance function as soon as we reach the desired radius.
# This will probably prevent SIMD and other optimizations so some care is needed
# to evaluate if it is worth it.
@inline function add_points_inrange!(idx_in_ball::Union{Nothing, AbstractVector{Int}}, tree::HVNNTree,
index::Int, point::AbstractVector, r::Number, do_end::Bool)
count = 0
for z in get_leaf_range(tree.tree_data, index)
idx = tree.reordered ? z : tree.indices[z]
dist_d = myevaluate(tree.metric, tree.data[idx], point, do_end)
if dist_d <= r
count += 1
idx_in_ball !== nothing && push!(idx_in_ball, idx)
end
end
return count
end
# Add all points in this subtree since we have determined
# they are all within the desired range
#=
function addall(tree::HVNNTree, index::Int, idx_in_ball::Union{Nothing, Vector{Int}})
tree_data = tree.tree_data
count = 0
if isleaf(tree_data.n_internal_nodes, index)
for z in get_leaf_range(tree_data, index)
idx = tree.reordered ? z : tree.indices[z]
count += 1
idx_in_ball !== nothing && push!(idx_in_ball, idx)
end
else
count += addall(tree, getleft(index), idx_in_ball)
count += addall(tree, getright(index), idx_in_ball)
end
return count
end
=# | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 4012 | # Find the dimension with the largest spread.
#=
function find_largest_spread(data::AbstractVector{V}, indices, low, high) where {V}
T = eltype(V)
n_points = high - low + 1
n_dim = length(V)
split_dim = 1
max_spread = zero(T)
for dim in 1:n_dim
xmin = typemax(T)
xmax = typemin(T)
# Find max and min in this dim
for coordinate in 1:n_points
xmin = min(xmin, data[indices[coordinate + low - 1]][dim])
xmax = max(xmax, data[indices[coordinate + low - 1]][dim])
end
if xmax - xmin > max_spread # Found new max_spread, update split dimension
max_spread = xmax - xmin
split_dim = dim
end
end
return split_dim
end
=#
# Taken from https://github.com/JuliaLang/julia/blob/v0.3.5/base/sort.jl
# and modified to compare against a matrix
@inline function select_spec!(v::Vector{Int}, k::Int, lo::Int,
hi::Int, data::AbstractVector, dim::Int)
lo <= k <= hi || error("select index $k is out of range $lo:$hi")
@inbounds while lo < hi
if hi - lo == 1
if data[v[hi]][dim] < data[v[lo]][dim]
v[lo], v[hi] = v[hi], v[lo]
end
return
end
pivot = v[(lo + hi) >>> 1]
i, j = lo, hi
while true
while data[v[i]][dim] < data[pivot][dim]; i += 1; end
while data[pivot][dim] < data[v[j]][dim]; j -= 1; end
i <= j || break
v[i], v[j] = v[j], v[i]
i += 1; j -= 1
end
if k <= j
hi = j
elseif i <= k
lo = i
else
return
end
end
return
end
# In place heap sort
@inline function heap_sort_inplace!(xs, xis)
@inbounds for i in length(xs):-1:2
xs[i], xs[1] = xs[1], xs[i]
xis[i], xis[1] = xis[1], xis[i]
percolate_down!(xs, xis, xs[1], xis[1], i - 1)
end
return
end
# Binary max-heap percolate down.
@inline function percolate_down!(xs::AbstractArray,
xis::AbstractArray,
dist::Number,
index::Int,
len::Int=length(xs))
i = 1
@inbounds while (l = getleft(i)) <= len
r = getright(i)
j = ifelse(r > len || (xs[l] > xs[r]), l, r)
if xs[j] > dist
xs[i] = xs[j]
xis[i] = xis[j]
i = j
else
break
end
end
xs[i] = dist
xis[i] = index
return
end
# Default skip function, always false
@inline function always_false(::Int)
false
end
# Instead of ReinterpretArray wrapper, copy an array, interpreting it as a vector of SVectors
copy_svec(::Type{T}, data, ::Val{dim}) where {T, dim} =
[SVector{dim,T}(ntuple(i -> data[n+i], Val(dim))) for n in 0:dim:(length(data)-1)]
#=
"""
intersects_cuboid_ball(c::Vector{Float64}, mins::Vector{Float64}, maxs::Vector{Float64}, r_squared::Float64)
Checks if there is an intersection between an axis-aligned box and a sphere in R^d.
# Arguments
- `c::Vector{Float64}`: Coordinates of the sphere's center.
- `mins::Vector{Float64}`: Lower bounds of the box.
- `maxs::Vector{Float64}`: Upper bounds of the box.
- `r_squared::Float64`: Squared radius of the sphere.
# Return value
- `Bool`: `true` if there is an intersection, otherwise `false`.
# Example
```julia
c = [1.0, 2.0, 3.0]
mins = [0.0, 0.0, 0.0]
maxs = [2.0, 2.0, 2.0]
r_squared = 1.0
intersects_cuboid_ball(c, mins, maxs, r_squared)
# Return value: true
"""
function intersects_cuboid_ball(c::T, mins::T2, maxs::T2, r_squared::Float64) where {T,T2}
d = length(c)
dists_squared = 0.0
δ = 0.0
for i in 1:d
if c[i] < mins[i]
δ = mins[i] - c[i]
elseif c[i] > maxs[i]
δ = c[i] - maxs[i]
else
δ = 0.0
end
dists_squared += δ^2
end
return dists_squared <= r_squared
end
=# | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 4521 |
@testset "VoronoiGeometry" begin
function boundary_tests()
b = Boundary(BC_Dirichlet([0,1],[0,1]),BC_Neumann([0,0],[0,-1]),BC_Periodic([0,0],[1,0],[-1,0]))
println(HighVoronoi.boundaryToString(b))
HighVoronoi.intersections!(b,[0.5,0.5],[1.0,0])
println(HighVoronoi.boundaryToString(HighVoronoi.reduce_periodic_part(b)[1]))
println(HighVoronoi.boundaryToString(HighVoronoi.reduce_to_periodic(b)))
HighVoronoi.show_in([2.0,0.0],b)
HighVoronoi.show_in([0.5,0.50],b)
push!(b,BC_Dirichlet([0,2],[0,1]))
push!(b,BC_Periodic([0,1],[2,0],[-2,0]))
return true
end
function test_number_and_names(i)
VG = VoronoiGeometry(VoronoiNodes(rand(3,100)),cuboid(3,periodic=[1],neumann=[2,-3]),integrator=i,integrand=x->[x[1]^2],silence=true)
println("$i: $(HighVoronoi.Integrator_Name(i)) vs. $(HighVoronoi.Integrator_Name(VG.Integrator)) vs. $(HighVoronoi.Integrator_Name(HighVoronoi.IntegratorType(VG.Integrator))) ")
return 5<i<8 ? true : HighVoronoi.Integrator_Number(VG.Integrator)==i
end
# @test boundary_tests()
# Test all Integrators
println("-----------------------------------------------------------------")
println("testing integrators")
println("-----------------------------------------------------------------")
for i in [HighVoronoi._VI__POLYGON, HighVoronoi._VI__MONTECARLO, HighVoronoi._VI__GEOMETRY, HighVoronoi._VI__HEURISTIC, HighVoronoi._VI__HEURISTIC_MC, HighVoronoi._VI__FAST_POLYGON]
@test test_number_and_names(i)
end
# Test full space, so bad cases will happen and will be corrected
println("-----------------------------------------------------------------")
println("testing Voronoi Data and related stuff")
println("-----------------------------------------------------------------")
function test_fast_poly()
VG = VoronoiGeometry(VoronoiNodes(rand(4,500)),cuboid(4,periodic=[]),integrator=HighVoronoi.VI_FAST_POLYGON,silence=true,integrate=true,integrand=x->[x[1],x[2]^2])
return true#abs(0.5-sum(VG.Integrator.Integral.bulk_integral)[1])<0.05
end
println("-----------------------------------------------------------------")
println("testing Heuristic integrator in high dimensions")
println("-----------------------------------------------------------------")
vg2 = VoronoiGeometry(VoronoiNodes(rand(4,500)),cuboid(4,periodic=[1]),integrator=HighVoronoi.VI_POLYGON,integrand = x->[1.0,x[1],x[2]],silence=global_silence)
#for i in 100:110
# @test length(HighVoronoi.adjacents_of_cell(i, vg2.Integrator.Integral.MESH))>0
#end
@test abs(sum( x->x[1], VoronoiData(vg2).bulk_integral)-1.0)<1.0E-2
vg2b = VoronoiGeometry( vg2, integrator=HighVoronoi.VI_HEURISTIC, integrand = x->[1.0] ,silence=global_silence)
@test abs(sum( abs, map(x->x[1],VoronoiData(vg2b).bulk_integral))-1.0)<1.0E-1
vg2c = VoronoiGeometry( vg2b, integrate=false ,silence=global_silence)
vd2d = VoronoiData(vg2c)
@test abs(sum( abs, vd2d.volume .- map(x->x[1],vd2d.bulk_integral)))<1.0E-1
HighVoronoi.vp_print(HighVoronoi.Raycast(VoronoiNodes(rand(2,10))),mirrors=true)
end
@testset "improving" begin
VG = VoronoiGeometry(VoronoiNodes(rand(2,20)),cuboid(2,periodic=[]),improving=(max_iterations=5,))
@test true
end
@testset "Substitute and refine" begin
function test_substitute(dim,NN,NN2=100)
VG = VoronoiGeometry(VoronoiNodes(rand(dim,NN)),cuboid(dim,periodic=[1,2]),integrator=HighVoronoi.VI_POLYGON,silence=global_silence)
VG2 = VoronoiGeometry(VoronoiNodes(rand(dim,NN2)),cuboid(dim,periodic=[1,2]),integrator=HighVoronoi.VI_POLYGON,silence=global_silence)
indeces = HighVoronoi.indeces_in_subset(VG2,cuboid(dim,periodic=[],dimensions=0.3*ones(Float64,dim),offset=0.7*ones(Float64,dim)))
HighVoronoi.substitute!(VG,VG2,indeces,silence=true)
VD=VoronoiData(VG)
return abs(sum(VD.volume)-1)<1.0E-1
#draw2D(VG,"2dsample.mp",drawVerteces=false)
end
println("-----------------------------------------------------------------")
println("testing substitute")
println("-----------------------------------------------------------------")
#@test test_substitute(2,60,600)
#refine to be tested in next step implicitly. For now:
@test abs(HighVoronoi.redundancy([3,5,2,6,8])-0.8875)<0.0001
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 549 |
@testset "database" begin
function test(db)
vg1 = VoronoiGeometry(VoronoiNodes(rand(4,1000)),cuboid(4,periodic=[]),vertex_storage=db,integrate=true,integrand=x->[1.0],integrator=VI_FAST_POLYGON,silence=false,search_settings=(threading=MultiThread(1,1),))
vd1 = VoronoiData(vg1)
v = sum(vd1.bulk_integral)[1]
println("Integral: $v")
return abs(v-1.0)<0.001
end
@test test(DatabaseVertexStorage())
@test test(ClassicVertexStorage())
@test test(ReferencedVertexStorage())
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1378 | @testset "Discrete Functions" begin
function discrete_function_test()
mycube = cuboid(2,periodic=[1,2])
f = FunctionComposer(reference_argument = [0.0,0.0], super_type = Float64, alpha = x->norm(x)*x, beta = x->sum(abs,x) )
VG = VoronoiGeometry(VoronoiNodes(40,density=x->sin(x[1]*π), domain=mycube), mycube, integrator=HighVoronoi.VI_MONTECARLO, integrand=f.functions)
# make a step function from integrated values:
println("Hallo")
f_all = StepFunction(VG)
# retrieve the alpha-component as a single real valued function
alpha_step = x-> HighVoronoi.decompose(f, f_all(x),scalar=true)[:alpha]
beta_step = x-> HighVoronoi.decompose(f, f_all(x),scalar=true)[:beta]
# generate some sample output
println(alpha_step([0.5,0.5]))
println(beta_step([0.5,0.5]))
println("Hallo")
kappa = StepFunction(VG,HighVoronoi.PeriodicFunction(x->sin(x[2]*π),VG))
println(kappa([0.5,0.25]))
println("Hallo")
dia = DiameterFunction(VG)
println(dia([0.5,0.25]))
f_all_int = HighVoronoi.InterfaceFunction(VG)
fungen(;kwargs...) = 0.0
fff = FunctionFromData(VG,function_generator=fungen)
println(f_all_int([0.5,0.25]))
return true
end
@test discrete_function_test()
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 500 | @testset "Draw" begin
function draw_test()
VG = VoronoiGeometry(VoronoiNodes(rand(2,20)),cuboid(2,periodic=[1,2]),integrator=HighVoronoi.VI_GEOMETRY,silence=true)
draw2D(VG,"testoutput.png")
draw2D(VG,"testoutput.mp",board=MetaPostBoard())
VG2 = VoronoiGeometry(VoronoiNodes(rand(3,20)),cuboid(3,periodic=[]),integrator=HighVoronoi.VI_GEOMETRY,silence=true)
draw3D(VG2,"testoutput3d.png")
return true
end
@test draw_test()
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1901 |
@testset "Handling Frauds" begin
function test_fraud()
# make sure fraud vertex routine is called
println("testing fraud")
VoronoiGeometry(VoronoiNodes(rand(2, 300000)), Boundary(), silence=true,integrate = false)
dim = 2
println("testing periodic/cubic 2D edge iterator")
VG = VoronoiGeometry( [VoronoiNode([0.5,0.5])],
periodic_grid = ( dimensions=ones(Float64,dim),
scale=0.25*ones(Float64,dim), repeat=4*ones(Int64,dim),
periodic=[], fast=true ) )
VG2 = VoronoiGeometry(HighVoronoi.nodes(HighVoronoi.mesh(VG.domain)),cuboid(2,periodic=[]),silence=true,integrate = false)
return true
end
function test_2000()
# the following is necessary since unbounded domains can lead to a crash in very rare events
#try
#println("-------- 1 ---------------------------------------------------")
xs=VoronoiNodes(1000,density=x->x[1]*sin(x[2]*π),domain=cuboid(5,periodic=[]))
#xs2 = HighVoronoi.perturbNodes(xs,0.0001)
#println("-------- 2 ---------------------------------------------------")
#btree = HighVoronoi.MyBruteTree(xs2)
#HighVoronoi._nn(btree,zeros(Float64,5))
#HighVoronoi._inrange(btree,zeros(Float64,5),0.1)
#println("-------- 3 ---------------------------------------------------")
vg = VoronoiGeometry(xs,cuboid(5,periodic=[]),integrate=false,silence=global_silence)
vd = VoronoiData(vg)
values(vd.boundary_nodes)
vd2 = VoronoiData(vg,copyall=true)
#catch
# b = i<=3
#end
return true
end
@test test_2000()
@test test_fraud()
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 2351 | @testset "Finite Volume" begin
function myflux(;para_i,para_j,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = norm(normal)^(-1) * mass_ij * sqrt(para_i[:kappa]*para_j[:kappa])
return weight, weight
end
myRHS__2(;para_i,mass_i,kwargs...) = mass_i * para_i[:f]
function surface_int(;para_i,para_j,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = mass_ij * sqrt(para_i[:kappa]*para_j[:kappa])
return weight
end
b_int(;para_i,mass_i,kwargs...) = mass_i * para_i[:f] #* para_i[:κ]^2
function test_FV_3D(nop)
vfvp = VoronoiFVProblem( VoronoiNodes( rand(3,nop) ), cuboid(3,periodic=[]),
discretefunctions = (f=x->sin(2*pi*x[1]),), # evaluate f pointwise
integralfunctions = (kappa=x->1.0+norm(x)^2,), # calculate averages of kappa over cells and interfaces
fluxes = ( j1 = myflux, ),
rhs_functions = (F = myRHS__2,),
flux_integrals = ( fi = surface_int, ),
bulk_integrals = (bi = b_int,) )
println( get_Fluxintegral(vfvp,:fi) )
println( get_Bulkintegral(vfvp,:bi))
# turn functions that depend on x into the format HighVoronoi needs:
homogeneous = FVevaluate_boundary(x->0.0)
one = FVevaluate_boundary(x->1.0)
non_hom = FVevaluate_boundary(x->sin(pi*x[2])*sin(pi*x[3]))
r,c,v,f = linearVoronoiFVProblem( vfvp, flux = :j1, rhs = :F,
Neumann = ([5,6],one),
Dirichlet = (([3,4],homogeneous), ([1,2],non_hom),), )
A = sparse(r,c,v) # a sparse matrix with rows `r`, coloumns `c` and values `v`
# solution_u = somelinearsolver(A,f)
return length(f)==nop
end
@test test_FV_3D(100)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 765 | @testset "volume matrix" begin
function test_interactionmatrix2(db)
VG = VoronoiGeometry(VoronoiNodes(rand(2,20)),cuboid(2,periodic = [1,2]),vertex_storage=db,integrator=HighVoronoi.VI_POLYGON,silence=global_silence,)
VG2 = copy(VG)
VG2 = refine(VG,VoronoiNodes(0.2*rand(2,4)),silence=global_silence)
r,c,vals=interactionmatrix(VG2,VG)
A = sparse(r,c,vals)
u = A*ones(Float64,20)
return abs(sum(u)-24)<0.01
end
# @test test_interactionmatrix2()
@test test_interactionmatrix2(DatabaseVertexStorage())
@test test_interactionmatrix2(ClassicVertexStorage())
@test test_interactionmatrix2(ReferencedVertexStorage())
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1872 |
@testset "JLD" begin
function test_write()
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)),cuboid(2,periodic = [1,2]),vertex_storage=ClassicVertexStorage(),integrator=HighVoronoi.VI_POLYGON,integrand=x->[sin(x[1])],silence=global_silence)
write_jld(VG,"test.jld")
VG2 = VoronoiGeometry("test.jld",bulk=true,interface=true,silence=global_silence)
println(HighVoronoi.compare(HighVoronoi.mesh(VG.domain),HighVoronoi.mesh(VG2.domain)))
vd1 = VoronoiData(VG)
load_Voronoi_info("test.jld")
vd2 = VoronoiData(VG2)
mysum = abs.(vd1.volume-vd2.volume)
return sum(mysum)<0.00001
end
function test_jld(db)
xs = VoronoiNodes(rand(5,100))
vg = VoronoiGeometry(xs,cuboid(5,periodic=[1]),vertex_storage=db,search_settings=(method=RCOriginal,),integrate=true,integrator=VI_FAST_POLYGON,integrand=x->[x[1]],silence=false)
println("Step 1")
jldopen("geometry5d.jld2","w") do file
file["geo"] = vg
end
println("Step 2")
b = false
jldopen("geometry5d.jld2","r") do file
try
vg2 = file["geo"]
b = HighVoronoi.compare(HighVoronoi.mesh(vg.domain),HighVoronoi.mesh(vg.domain))
catch e
open("error_open_log.txt", "w") do f
# Stacktrace speichern
Base.showerror(f, e, catch_backtrace())
end
end
end
return b
end
@test test_write()
@test test_jld(DatabaseVertexStorage())
@test test_jld(ReferencedVertexStorage())
@test test_jld(ClassicVertexStorage())
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 477 |
@testset "Multithread" begin
function test()
vg1 = VoronoiGeometry(VoronoiNodes(rand(4,1000)),cuboid(4,periodic=[]),vertex_storage=DatabaseVertexStorage(),integrate=MultiThread(1,1),integrand=x->[1.0],integrator=VI_FAST_POLYGON,silence=false,search_settings=(threading=MultiThread(1,1),))
vd1 = VoronoiData(vg1)
v = sum(vd1.bulk_integral)[1]
println("Integral: $v")
return abs(v-1.0)<0.001
end
@test test()
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 451 |
@testset "Multithread" begin
function test()
vg1 = VoronoiGeometry(rand(4,1000),cuboid(4,periodic=[]),vertex_storage=DatabaseVertexStorage(),integrate=true,integrand=x->[1.0],integrator=VI_FAST_POLYGON,silence=false,search_settings=(threading=MultiThread(1,1),))
vd1 = VoronoiData(vg1)
v = sum(vd1.bulk_integral)[1]
println("Integral: $v")
return abs(v-1.0)<0.001
end
@test test()
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1098 | @testset "Periodic Grids" begin
function test_periodic_mesh_integration(dim,nn,f=true,peri=[],integrator=HighVoronoi.VI_POLYGON)
#try
VG = VoronoiGeometry( VoronoiNodes(rand(dim,nn)),periodic_grid=(periodic=peri, dimensions=ones(Float64,dim),
scale=0.25*ones(Float64,dim), repeat=4*ones(Int64,dim),fast=f),integrator=integrator,integrand=x->[1.0,x[1]^2,x[2]^2],silence=global_silence)
# VG = VoronoiGeometry( VoronoiNodes(rand(dim,1000)),cuboid(dim,periodic=[]), integrator=HighVoronoi.VI_POLYGON,integrand=x->[1.0,x[1]^2,x[2]^2])
vd = VoronoiData(VG)
return integrator!=VI_GEOMETRY ? abs(sum(vd.volume)-sum(x->x[1],vd.bulk_integral))<0.1 && (dim<5 || abs(0.33-sum(x->x[2],vd.bulk_integral))<0.1) : true
end
@test test_periodic_mesh_integration(5,1)
@test test_periodic_mesh_integration(5,2)
# @test test_periodic_mesh_integration(3,1,false,[1])
@test test_periodic_mesh_integration(3,2,false,[1])
@test test_periodic_mesh_integration(3,2,false,[1],HighVoronoi.VI_GEOMETRY)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 519 |
@testset "RaycastMethods" begin
function test(MM)
vg1 = VoronoiGeometry(VoronoiNodes(rand(4,1000)),cuboid(4,periodic=[]),vertex_storage=DatabaseVertexStorage(),integrate=true,integrand=x->[1.0],integrator=VI_FAST_POLYGON,silence=false,search_settings=(method=MM,))
vd1 = VoronoiData(vg1)
v = sum(vd1.bulk_integral)[1]
println("Integral: $v")
return abs(v-1.0)<0.001
end
@test test(RCCombined)
@test test(RCOriginal)
@test test(RCCombined)
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 837 | using Test
#using Revise
using HighVoronoi
using SpecialFunctions
using LinearAlgebra
using SparseArrays
using StaticArrays
using JLD2
const global_silence = false
@testset "HighVoronoi.jl" begin
@testset "various" begin
@test HighVoronoi.testboundary()
@test HighVoronoi.test_EdgeHashTable()
@test HighVoronoi.test_VertexHashTable()
@test HighVoronoi.test_queuehashing()
end
include("tools.jl")
include("basics.jl")
include("voronoidata.jl")
include("statistics.jl")
include("fraud.jl")
include("periodicgrids.jl")
include("draw.jl")
include("rcmethods.jl")
include("multithread.jl")
include("database.jl")
include("jld.jl")
include("interaction.jl")
include("discrete.jl")
include("fv.jl")
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 786 |
@testset "Statistics" begin
function statistics()
HighVoronoi.VoronoiStatistics(3,10;periodic=nothing,points=100)
HighVoronoi.VoronoiStatistics(3,10;periodic=3,points=100)
nodeslist = [200,500]#,10000,12500,15000,17500,20000,22500,25000,27500,30000]
dim = 3
A = HighVoronoi.collect_statistics(HighVoronoi.statistic_samples(dim,nodeslist,2),txt="test.txt",silence=true)
A2 = HighVoronoi.collect_statistics(rand(dim,2),dim,2*ones(Int64,dim),3*ones(Int64,dim),txt="test2.txt",fast=false,silence=true)
A3 = HighVoronoi.collect_statistics(rand(dim,2),dim,2*ones(Int64,dim),3*ones(Int64,dim),txt="test3.txt",fast=true,silence=true)
return true
end
@test statistics()
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1292 |
@testset "Tools" begin
function mycollect(t,i)
ret = Vector{typeof(t[1])}(undef,i)
for k in 1:i
ret[k] = t[k]
end
return ret
end
function mycollectlast(t,i)
ret = Vector{typeof(t[1])}(undef,length(t)-i+1)
for k in i:length(t)
ret[k-i+1] = t[k]
end
return ret
end
function test_function2(test_tuple)
# Call transform_tuple2 with the test_tuple, A=Int, B=Vector{Int64}
#transformed_tuple = HighVoronoi.fulltransform_sequences(test_tuple,mycollect)
#println("Transformed Tuple: ", transformed_tuple)
tft2 = HighVoronoi.group_last(test_tuple,Int,mycollectlast,StaticArrays.Size(2))
tft4 = HighVoronoi.cut_off_last(test_tuple,Int,mycollectlast)
tft4 = HighVoronoi.cut_off_first(test_tuple,Int,mycollectlast)
HighVoronoi.remove_first_entry(test_tuple)
HighVoronoi.split_tuple_at_A_sequence(("a",1,2,3,"b"),Int64)
# Return the value of the last entry of the new tuple
#return transformed_tuple[end]==[4,6,8] && tft2[end]==[6,8]
return tft2[end]==[6,8]
end
# Example usage
@test test_function2((1,'c',3,4,5,rand(),5,"hallo",4,6,8))
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | code | 1136 |
@testset "VoronoiData" begin
function test_VoronoiData()
vg = VoronoiGeometry(VoronoiNodes(rand(3,100)),cuboid(3,periodic=[1]),integrate=true,silence=global_silence,integrand=x->[x[1]],integrator=VI_POLYGON)
vd = VoronoiData(vg)
values(vd.boundary_nodes)
for a in vd.boundary_vertices
break
end
vg2 = VoronoiGeometry(VoronoiNodes(rand(3,100)),cuboid(3,periodic=[1]),integrate=true,silence=global_silence,integrand=x->[x[1]],integrator=VI_POLYGON)
vd2 = VoronoiData(vg2,copyall=true)
vd3 = VoronoiData(vg2,copyall=true,sorted=true)
deepcopy(vd.nodes)
deepcopy(vd.vertices)
deepcopy(vd.boundary_nodes)
deepcopy(vd.boundary_vertices)
deepcopy(vd.neighbors)
deepcopy(vd.orientations)
deepcopy(vd.volume)
deepcopy(vd.area)
deepcopy(vd.bulk_integral)
deepcopy(vd.interface_integral)
deepcopy(vd.references)
deepcopy(vd.reference_shifts)
return true
end
# @test test_fast_poly()
@test test_VoronoiData()
end
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1092 | # HighVoronoi.jl
A Julia Package for parallelized computing of high dimensional (i.e. any dimension >= 2) Voronoi Meshes and setting up Finite Volume computations on these Meshes
[](https://martinheida.github.io/HighVoronoi.jl/stable/)
[](https://martinheida.github.io/HighVoronoi.jl/dev/)
[](https://github.com/martinheida/HighVoronoi.jl/actions/workflows/CI.yml?query=branch%3Amain)
[](https://codecov.io/gh/martinheida/HighVoronoi.jl)
[](https://coveralls.io/github/martinheida/HighVoronoi.jl?branch=main)
Refer to the manual for detailed information on how to use.
The new version 1.3.0 has improved performance due to new internal data structure and parallelization.
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 11202 | ```@meta
CurrentModule = HighVoronoi
```
# HighVoronoi 1.3.0: $N\log N$ complexity Parallel Computed Voronoi Grids in $\mathbb{R}^d$
Documentation for [HighVoronoi](https://github.com/martinheida/HighVoronoi.jl). Voronoi mesh generation in arbitrary dimensions + Finite Volume setup, also for vertices with $d+k$, $k>1$ generators.
- [QUICK START on VORONOI generation: Click here](@ref quickVG) / [The ABSTRACT WORKFLOW is here](@ref workflowgeometry)
- [QUICK START on FINITE VOLUME methods: Click here](@ref QuickFV) / [The ABSTRACT WORKFLOW is here](@ref workflowfv)
- [Toy file for testing numerical solver](@ref toyfile)
### News to version 1.3.0:
- Parallelized compuation of Voronoi Diagrams, volumes and integrals
- Comprimated internal database with accellerated access, free choice of database
- Improved, accellerated Raycast routine (heart of Voronoi computations), free choice of Raycast method
- Storage is directly possible via JLD2.
- `substitute` is currently disabled as it has to be rewritten for new database structure.
### News to version 1.2.0:
- further improved algorithms for faster calculation of all features
- `VI_POLYGON` has been modified. I uses more memory but is more than twice as fast in higher dimensions.
- A new `Integrator` has been implemented: `VI_FAST_POLYGON`, see [here](@ref integratoroverview). Even more precise than `VI_POLYGON` and much faster (factor 15-20 to for 500 nodes in 6D) but using a lot of memory. Competitive with `VI_MONTECARLO` with `mc_accurate=(10_000,2,2)` in 6D (recall that a cell has on average `9_000` vertices in 6D).
- Bug fixes for unbounded domains in the far field.
### News to version 1.1.0:
- improved algorithms for faster calculation of all features
- 3D output
- autmaticaly improving geometric quality of meshes if wanted by user: Nodes will be locally modified until voronoi nodes almost coincide with center of gravity of cells.
### Preprints
There is a recent [PREPRINT](http://www.wias-berlin.de/preprint/3041/wias_preprints_3041.pdf) where I outline the algorithm and provide a mathematical proof that it works.
## Index
```@index
```
## Functionality of the HighVoronoi Package
`HighVoronoi` is intended as an effective Voronoi mesh generator in any ARBITRARY DIMENSION greater or equal to 2. It can work on polygonal domains and also on (partially or fully) periodic domains. It also provides methods to implement Finite Volume problems on these high dimensional meshes.
The underlying Raycast-Method as well as the Monte-Carlo integration method were reimplemented from the `VoronoiGraph` package by Alexander Sikorski in the version of June 2022. In the course, the code was fully restructured and in wide parts rewritten to adapt it to mesh-refinement and to verteces that are formed by more than $d+1$ cells, e.g. cubic grids (with $2^d$ cells generating each vertex). Furthermore, boundaries, periodic grids and internal correction algorithms are implemented to stabilize the algorithm and to increase numerical accuracy.
## Performance and advantages over the classical algorithms
The classical approach is to use quickhull in $d+1$ dimensions to get the Delaunay grid and calculate the Voronoigrid from there. Starting with $n$ nodes that will have $K$ verteces in total, the amount of calculations is at leas $n^2$ for the quickhull algorithm (with a lot of linear equations to be solved) and afterwards solving of $K$ linear equations. In general, it is not fully understood how Quickhull scales with time, but it seems to be polynomial, see [http://www.qhull.org/html/qh-code.htm#performance](http://www.qhull.org/html/qh-code.htm#performance)
Compared to that, the computational cost of the `HighVoronoi` algorithm scales with $K*\ln n$ on regular grids and comes with almost no linear equations to be solved, except for the few occasions (like 0.01%) when a vertex needs to be corrected to compensate for accumulated machine inaccuracy. A paper on the underlying Raycast-Algorithm is in preparation.
### Performance in 4D

### Performance in 5D

### Code for performance check
To verify the claimed scaling, one may use the following:
```julia
nodeslist = [200,500,1000,1500,2000,3000,4000,6000,8000,10000,12500,15000,17500,20000,22500,25000,27500,30000]
dim = 5
# look up statistics.jl in the src/ folder to see how collect_statistics and statistic_samples work
A = HighVoronoi.collect_statistics(HighVoronoi.statistic_samples(dim,nodeslist,4),txt="results$(dim)D-30000-new.txt")
```
The above calculates for each number of nodes `n` in `nodeslist` in dimension `dim`
a Voronoi grid in the unit cube. It does this 4 times and returns averaged information about:
- `A[1,entry]` = number of nodes
- `A[2,entry]` = `dim`
- `A[3,entry]` = average time for one calculation
- `A[4,entry]` = number of vertices
- `A[5,entry]` = number of vertices at the boundary of the unit cube
- `A[6,entry]` = number of raycasts
- `A[7,entry]` = average number of nn-searches per raycast
The code below plots sample results from a 4D or a 5D simulation:
```julia
# 4D:
A = [200.0 500.0 1000.0 1500.0 2000.0 3000.0 4000.0 6000.0 8000.0 10000.0 12500.0 15000.0 17500.0 20000.0 22500.0 25000.0 27500.0 30000.0; 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0 4.0; 0.050457975 0.131645475 0.295149075 0.47852225 0.6634493 1.166146925 1.5823699 2.430149 3.32245565 4.32058905 5.644007625 6.986462575 8.052074575 9.1541459 10.400109675 11.842956775 12.998982725 14.368660125; 4026.0 11125.5 23466.5 36429.5 49401.0 76030.0 103180.0 158009.5 213505.0 268840.75 339912.0 411174.0 482256.0 553107.5 625884.5 697474.5 769424.0 841395.0; 1633.75 3630.25 6507.25 9074.5 11473.75 15744.0 20051.75 27695.25 34437.25 41655.25 49458.0 56950.75 64301.75 71822.25 78083.25 85112.25 91864.0 98515.75; 4017.75 11104.25 23427.0 36371.25 49328.0 75921.5 103041.5 157807.75 213230.5 268510.5 339506.0 410691.0 481688.0 552451.5 625152.0 696653.25 768551.25 840449.0; 2.611411859871819 2.6134813247180135 2.605658001451317 2.6184486373165616 2.6229018001946156 2.6242599263713178 2.6269051789812843 2.621926046090892 2.626835748169235 2.627970786989708 2.628140592507938 2.6301477266363276 2.631068762352394 2.6324491833219748 2.6321470618345617 2.633201309259664 2.6337345752804384 2.6314196340289535]
# 5D:
#A = [200.0 500.0 1000.0 1500.0 2000.0 3000.0 4000.0 6000.0 8000.0 10000.0 12500.0 15000.0 17500.0 20000.0 22500.0 25000.0 27500.0 30000.0; 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0 5.0; 0.211763325 0.653174575 1.577807425 2.689303325 3.98041055 6.6097507 9.7188122 15.90510205 24.3054906 33.8117523 42.40471245 53.050212825 65.127449625 76.881652375 88.523779375 100.420322575 116.35595995 130.32798235; 15143.0 43881.0 98554.25 157179.25 215857.75 340909.25 468655.0 730134.0 1.0014475e6 1.2742545e6 1.6244175e6 1.97465225e6 2.324588e6 2.68794375e6 3.04505275e6 3.40743625e6 3.776617e6 4.1409315e6; 7809.75 19189.75 37197.0 53944.75 70912.5 101793.5 132341.75 190513.75 244890.5 299057.25 361937.25 425335.5 487805.25 545611.75 606317.75 664678.5 720208.75 776399.25; 15137.0 43869.75 98534.0 157151.0 215819.75 340856.5 468575.5 730022.75 1.00130575e6 1.27407825e6 1.6242075e6 1.97439425e6 2.32430825e6 2.68760575e6 3.0446865e6 3.40703625e6 3.77616375e6 4.140452e6; 2.5825130474995044 2.5891018298485857 2.593754440091745 2.598367175519087 2.6079552496933203 2.608840523798138 2.6137458104403666 2.6110275330460593 2.612983846342638 2.6159984679119983 2.6151359970939674 2.617593851886471 2.6170647761543675 2.616155661967906 2.6180548473545633 2.6180065592199084 2.6191039782106906 2.619351220591375]
# This can be plotted using the following
using Plots
using DataFitting
using SpecialFunctions
output_round(x) = round(x, digits = 3 - floor(Int64,log10(abs(x))))
f(x, p1, p2, p3 ) = @. (p1 + p2 * x + p3 * x * log(x) )
params = [1.0,1.0,1.0]
dom = Domain(A[1,:])
data = Measures(A[3,:],1.0)
model1 = Model(:comp1 => FuncWrap(f, params...))
prepare!(model1, dom, :comp1)
result1 = fit!(model1, data)
plot(A[1,:], A[3,:], color=:blue, label="nodes vs time")
my_p1 = result1.param[:comp1__p1].val
my_p2 = result1.param[:comp1__p2].val
my_p3 = result1.param[:comp1__p3].val
plot!(x->my_p1 + my_p2 * x + my_p3 * x * log(x), color=:red, label="f(x)=$(output_round(my_p1)) + $(output_round(my_p2)) * x + $(output_round(my_p3)) * x * log(x)")
savefig("plot.pdf")
```
## The `HighVoronoi` package provides
- a series of data sets that allow to set up a Voronoi mesh in arbitrary dimension on a convex domain with plane boundaries or even without boundaries.
- works for nodes in general and non-general position. In particular, verices may be generated by more than $d+1$ generating nodes.
- 2 different methods to calculate the volumes and interface areas of cells: An exact triangulation method and a Montecarlo method
- 3 different methods to integrate functions:
* two on the fly for both triangulation and Montecarlo
* one heuristic method based on given volume and surface data
- Refinement of Voronoi tessellations: Add points to your grid and the algorithm will locally recalculate the mesh, including integration of volume, area and functions.
- Fast calculation of periodic grids using the `periodic_grid` keyword.
- Set up the linear equation for a finite volume Voronoi discretization of a given elliptic PDE with Neumann, Dirichlet or periodic boundary conditions
- other functionalities like 2D data export in Metapost, storing and loading data.
## Important data structures and methods
### Data structures
- `VoronoiGeometry`: Creating, loading, updating, refining and managing the mesh
- `VoronoiNodes`: Nodes for mesh generation
- `Boundary`: Boundary of the mesh
- `VoronoiData`: Providing the data of the mesh for further use outside of `HighVoronoi.jl`
- `VoronoiFVProblem`: Calculating internal data for setting up linear matrix equations for Finite Volume discretizations on a `VoronoiGeometry`
- `StepFunction`: Generates a piecewise constant function
- `InterfaceFunction`: Generates a function living on interfaces.
- `FunctionComposer`: Glues together several functions and returns a new vector valued function.
### Methods
- `write_jld`: Store a `VoronoiGeometry`
- `refine!`: refine a `VoronoiGeometry` by new nodes
- `substitute!`: refine a `VoronoiGeometry` by erasing the points in a given subdomain and replacing them by a finer precalculated grid. Automatically fills out all the gaps.
- `interactionmatrix`: constructs a projectoin from a function on one geometry to a function on a second geometry.
- `linearVoronoiFVProblem`: Extract the Matrix and right-hand-side from a given `VoronoiFVProblem` and for given boundary conditions.
### To be implemented in a forthcoming version
- refine a `VoronoiFVProblem`. Project a given "rough" FV solution of a `linearVoronoiFVProblem` onto the refined solution space.
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 13360 | # [Using the HighVoronoi Library](@id intentions)
We collect some examples how the package is meant to be applied.
!!! tip "SKIP ''Mesh generation'' and study the ''Finite Volume methods'' section first"
If you are interested in Finite Volume methods but you do not want to go to much into details on mesh generation, you may skipt this first part. However, for setting up several different problems on large dimensions, recycling mesh data and using mesh refinement techniques, it is strongly advised to study the capabilities of the `VoronoiGeometry` data structure in a second approach.
## Mesh generation and integration
Mesh generation in form of a `VoronoiGeometry` relies on the following data: A set of points (`VoronoiNodes`), a boundary (`Boundary`, `cuboid`), the choice of an `integrator` method and the optional choice of a function to be integrated (`integrand = x->...`). Points and boundaries can also be retrieved from a formerly calculated `VoronoiGeometry`.
The intentions how this is done are demonstrated in the following examples:
1. [Example 1: Basics](@ref Mgi1)
2. [Example 2: Integration on fully periodic grid](@ref Mgi2)
3. [Example 3: Non-periodic bounded domain with data storage](@ref Mgi3)
4. [Example 4: Load and integrate new function](@ref Mgi4)
5. [Example 5: Copy and integrate new function](@ref Mgi5)
6. [Example 6: Mesh-Refinement](@ref Mgi6)
7. [Example 7: Mesh-Refinement with locally new integrand](@ref Mgi7)
Future extensions imply the fast, efficient generation of large quasi-periodic meshes in high dimensions. These meshes shall then be locally refined according to the user's needs.
### [Example 1: Basics](@id Mgi1)
Generate a 3D mesh of 100 Points with no boundary. Calculates only verteces and neighbors.
```julia
xs = VoronoiNodes( rand(3,100) )
vg = VoronoiGeometry(xs, integrator=HighVoronoi.VI_GEOMETRY)
vd = VoronoiData(vg, getverteces=true)
# vd.neighbors contains for each node `i` a list of all neighbors
# vd.verteces contains for each node `i` a list of all verteces that define the cell.
```
### [Example 2: Integration on fully periodic grid](@id Mgi2)
Generate a 5D mesh of 1000 points with periodic boundary conditions on a unit cube $(0,1)^5$. It then uses triangulation integration to integrate the function
$$x\mapsto\left(\begin{array}{c}\|x\| \\ x_1x_2\end{array}\right)$$
For general polygon domains [see here](@ref createboundary).
```julia
xs2 = VoronoiNodes( rand(5,1000) )
vg2 = VoronoiGeometry(xs2, cuboid(5), integrator=HighVoronoi.VI_POLYGON, integrand = x->[norm(x),x[1]*x[2]])
vd2 = VoronoiData(vg2)
```
- `vd2.volume[i]` and `vd2.bulk_integral[i]` contain the volume of cell $i$ and the integral of `integrand` over cell $i$
- `vd2.neighbors[i]` contains an array of all neighbors of $i$.
- for each $j$ the field `vd2.area[i][j]` contains the interface area between $i$ and `vd2.neighbors[i][j]`.
- for each $j$ the field `vd2.interface_integral[i][j]` contains the integral of `integrand` over
the interface area between $i$ and `vd2.neighbors[i][j]`.
!!! note ""
If $j\not=k$ but `vd2.neighbors[i][j]==vd2.neighbors[i][k]`
this means that $i$ shares two differnt interfaces with `n=vd2.neighbors[i][j]`.
This happens due to periodicity and low number of nodes in relation to the dimension.
### [Example 3: Non-periodic bounded domain with data storage](@id Mgi3)
Like Example 2 but we store and load the data:
```julia
xs3 = VoronoiNodes( rand(5,1000) )
vg3 = VoronoiGeometry(xs3, cuboid(5,periodic=[2]), integrator=HighVoronoi.VI_POLYGON, integrand = x->[norm(x),x[1]*x[2]])
write_jld(vg3, "my5Dexample.jld")
vg3_reload_vol = VoronoiGeometry("my5Dexample.jld")
```
The mesh `vg3` is periodic only in direction of $e_2=(0,1,0,0,0)$. The variable `vg3_reload_vol` now contains a copy of nodes, verteces, volumes and areas in `vg3`. It contains NOT the integral values.
```julia
vg3_a = VoronoiGeometry("my5Dexample.jld", bulk=true, interface=true)
```
The variable `vg3_a` contains also the integrated values. However, the method will prompt a warning because no integrand is provided. Hence try the following:
```julia
vg3_modified = VoronoiGeometry("my5Dexample.jld", bulk=true, interface=true, integrand = x->[x[5],sqrt(abs(x[3]))])
vg3_full = VoronoiGeometry("my5Dexample.jld", bulk=true, interface=true, integrand = x->[norm(x),x[1]*x[2]])
```
!!! warning ""
The method `VoronoiGeometry(filename)` DOES compare the dimensions of the integrand with the stored data. However, it DOES NOT compare wether the original and the newly provided function are the same.
### [Example 4: Load and integrate new function](@id Mgi4)
We can also recycle efficiently the stored geometry by using its volumes and interfaces and integrate another function using the `VI_HEURISTIC` integrator.
```julia
vg4 = VoronoiGeometry("my5Dexample.jld", integrand = x->[x[1]*x[5],sqrt(abs(x[3])),sum(abs2,x)],integrator=HighVoronoi.VI_HEURISTIC)
```
This will cause a warning stating that the new integrator `VI_HEURISTIC` does not match the original integrator. Just ignore it. You can also use `VI_POLYGON` or `VI_MONTECARLO` but this will take much more time for the integration.
### [Example 5: Copy and integrate new function](@id Mgi5)
Similar to the last example, we may also directly copy `vg3`
```julia
vg5 = VoronoiGeometry(vg3, integrator=HighVoronoi.VI_HEURISTIC, integrand = x->[sum(abs2,x)])
```
### [Example 6: Mesh-Refinement](@id Mgi6)
Say the user has created or loaded a `VoronoiGeometry` and wants to add some more points. In our case, we create a partially periodic mesh in $3D$ with 1000 points in $(0,1)^3$ and afterwards add 100 Points in $(0,0.1)^3$ for higher resolution in this region.
```julia
vg6 = VoronoiGeometry( VoronoiNodes(rand(3,1000)), cuboid(3,periodic=[2]),
integrand=x->[sum(abs,x)], integrator=HighVoronoi.VI_POLYGON)
refine!(vg6, VoronoiNodes(0.1.*rand(3,100)))
```
If, for whatever reason, the user does not want the algorithm to update the volumes, areas, integrals, ... he may add the command `update=false`.
### [Example 7: Mesh-Refinement with locally new integrand](@id Mgi7)
We modify Example 6:
```julia
vg7 = VoronoiGeometry( VoronoiNodes(rand(3,1000)), cuboid(3,periodic=[2]),
integrand=x->[sum(abs,x)], integrator=HighVoronoi.VI_POLYGON)
vg7b = VoronoiGeometry( vg7, bulk=true, interface=true, integrand=x->[sqrt(sum(abs2,x))])
refine!(vg7b, VoronoiNodes(0.1.*rand(3,100)))
```
Because `bulk=true` and `interface=true`, `vg7b` simply copies all data from `vg7`, including the integrated values of `f(x)=[sum(abs,x)]`. However, when the `refine!` function is called, the local integral on every modified interface and cell will be recalculated using the new function `f2(x)=[sqrt(sum(abs2,x))]`. This means in the new cell we completly have integrated values of `f2` while on old and non-modified cells we still have integrated values of `f`. On cells that have been partially modified, the new integral is an interpolation between the old and the new function.
## Finite Volume problems: Generating the matrix and the right hand side from data
The most simple way to implement a Finite Volume discretization within `HighVoronoi` is to provide
- a list of nodes
- a domain
- a list of parameter functions to evaluated pointwise or in an averaged sense
- a description of the flux in terms of the Voronoi mesh and the pointwise/averaged data
- a description of right hand side in terms of the Voronoi mesh and the pointwise/averaged data
- a description of the boundary conditions (note that periodic boundary conditions are in fact implemented as a part of the MESH and cannot be modified at this stage)
We provide the following two examples covering both intentions of use
1. [Example 1: Most simple way from scratch](@ref FVex1)
2. [Example 2: Relying on preexisting `VoronoiGeometry`](@ref FVex2)
### [Example 1: Most simple way from scratch](@id FVex1)
We create #`nop` points within $(0,1)^3$ and prescribe $(0,1)^3$ as our domain for the mesh generation. We define functions $\kappa(x)=1+\|x\|^2$ and $f(x)=\sin(2*\pi*x_1)$. Then we make use of `VoronoiFVProblem` to set up the discrete equation
$\forall i: \qquad \sum_{j\sim i}p_{ij}u_i-p_{ji}u_j=F_i$
where
$\left(p_{ij},p_{ji}\right)=\mathrm{myflux}=\left(\frac{1}{|x_i-x_j|}m_{ij}*\sqrt{\kappa_i\kappa_j}\,,\;\frac{1}{|x_i-x_j|}m_{ij}*\sqrt{\kappa_i\kappa_j}\right)\,,\qquad F_i=m_i*f(x_i)\,.$
[This is a discretization of](@ref examplefluxes)
$-\nabla\cdot(\kappa\nabla u)=f\qquad\mathrm{on}\,(0,1)^3\,.$
As boundary conditions we implement for $J=-\kappa\nabla u$ and outer normal $\nu$:
```math
\begin{align*}
1.& & u(x) & =\sin(\pi x_2)\sin(\pi x_3) & \quad\text{on } & \{0,1\}\times(0,1)^2\,,\\
2.& & u(x) & =0 & \quad\text{on } & (0,1)\times\{0,1\}\times(0,1)\,,\\
3.& & j\cdot\nu & =1 & \quad\text{on } & (0,1)^2\times\{0,1\}\,,\\
\end{align*}
```
Accodring to the internal structure of the cube, BC 1. corresponds to the surface planes `[1,2]`, BC 2. corresponds to the surface planes `[3,4]` and BC 3. correpsonds to the surface planes `[5,6]`. [More information on boundaries is given here.](@ref allonboundaries)
```julia
using LinearAlgebra
using SpecialFunctions
using SparseArrays
function myflux(;para_i,para_j,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = norm(normal)^(-1) * mass_ij * sqrt(para_i[:kappa]*para_j[:kappa])
return weight, weight
end
myRHS(;para_i,mass_i,kwargs...) = mass_i * para_i[:f]
function test_FV_3D(nop)
vfvp = VoronoiFVProblem( VoronoiNodes( rand(3,nop) ), cuboid(3,periodic=[]),
discretefunctions = (f=x->sin(2*pi*x[1]),), # evaluate f pointwise
integralfunctions = (kappa=x->1.0+norm(x)^2,), # calculate averages of kappa over cells and interfaces
fluxes = ( j1 = myflux, ),
rhs_functions = (F = myRHS,) )
# turn functions that depend on x into the required HighVoronoi-format:
homogeneous = FVevaluate_boundary(x->0.0)
one = FVevaluate_boundary(x->1.0)
non_hom = FVevaluate_boundary(x->sin(pi*x[2])*sin(pi*x[3]))
r,c,v,f = linearVoronoiFVProblem( vfvp, flux = :j1, rhs = :F,
Neumann = ([5,6],one),
Dirichlet = (([3,4],homogeneous), ([1,2],non_hom),), )
A = sparse(r,c,v) # a sparse matrix with rows `r`, coloumns `c` and values `v`
# solution_u = somelinearsolver(A,f)
end
test_FV_3D(100)
```
### [Example 2: Relying on preexisting `VoronoiGeometry`](@id FVex2)
We build a 5D-mesh in the unit cube of 5000 points using `VoronoiGeometry` and store it for later use. Since we have plenty of time, we do it using the exact `VI_POLYGON` integrator.
```julia
write_jld( VoronoiGeometry( VoronoiNodes(rand(5,5000)), cuboid(5,periodic=[]), integrator=HighVoronoi.VI_POLYGON ), "my5Dmesh.jld" )
```
Next, we want to use this stored grid to immplement [the above example](@ref FVex1) in 5D, adding homogeneous Dirichlet conditions in the remaining dimensions. However, we also want `:f` to be evaluated in an averaged sence, not pointwise. Since we will need their specification in two places, we fix them once and for all:
```julia
my_functions = (f=x->sin(2*pi*x[1]), kappa=x->1.0+norm(x)^2,)
```
We need to integrate $\kappa$ and $f$ the moment we load the geometry from file. To make sure the integrated data will match the needs of the Finite Volume algorithm, we use [`FunctionComposer`](@ref The-FunctionComposer-struct):
```julia
composed_function = FunctionComposer(reference_argument=zeros(Float64,5), super_type=Float64; my_functions...).functions
```
The definitions of `myflux` and `myRHS` are independent from the dimension and can just be taken from above.
```julia
function test_FV_5D_from_file()_
my_functions = (f=x->sin(2*pi*x[1]), kappa=x->1.0+norm(x)^2,)
composed_function = FunctionComposer( reference_argument=zeros(Float64,5),
super_type=Float64; my_functions...).functions
vg = VoronoiGeometry( "my5Dmesh.jld", integrator = HighVoronoi.VI_HEURISTIC,
integrand = composed_function)
vfvp = VoronoiFVProblem( vg, integralfunctions = my_functions,
fluxes = ( j1 = myflux, ),
rhs_functions = (F = myRHS,) )
homogeneous = FVevaluate_boundary(x->0.0)
one = FVevaluate_boundary(x->1.0)
non_hom = FVevaluate_boundary(x->sin(pi*x[2])*sin(pi*x[3]))
r,c,v,f = linearVoronoiFVProblem( vfvp, flux = :j1, rhs = :F, Neumann = ([5,6],one),
Dirichlet = (([3,4,7,8,9,10],homogeneous), ([1,2],non_hom),), )
A = sparse(r,c,v) # a sparse matrix with rows `r`, coloumns `c` and values `v`
# solution_u = somelinearsolver(A,f)
end
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1728 | # Advanced Options for controlling the Algorithm and output
## Output options
- `silence=true`: will suppress output by voronoi algorithm and integration, thereby speed up the routine a little bit.
- `printevents=true`: callable only in `VoronoiGeometry(points,...)` this will generate output at the end of the voronoi algorithm with some information on the results and if there where some unexpected events due to non-regular data strucutres.
## Controlling the Voronoi-Algorithm
The optional argument `search_settings::NamedTuple` will help to influence the Voronoi routine. However, if you don't know what for sure what you do, you should not touch these. Otherwise don't blame the package if you get strange results or crash the algorithm. Also you should be aware of the fact that parameters once set at generation of a `VoronoiGeometry` are kept for refining, substituting, .... You can, however, locally modify these parameters with `search_settings` in `refine!` or `substitute!`
The following commands are available:
- `variance-tol=1E-20`: when the variance of (distance of a vertex to its nodes)^2 is larger than that value, the vertex candidate will be corrected
- `break_tol=1E-5` : when the afore mentioned variance is even larger than that (actually did not appear in tests on bounded domains so far) this is sign that something goes
terribly wrong. Therefore, the vertex is skipped. cases when this happens is a geometry quasi periodic in at least one dimension and unbounded in the others. Typically happens "far away, i.e. 1E200" from the origin.
- `b_nodes_tol=1E-10`: When a vertex has a distance smaller than that to the boundary, it is considered a boundary vertex.
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1734 |
# [Boundaries](@id allonboundaries)
## The Boundary struct
In what follows we describe how boundaries are implemented in the calculation of Voronoi meshes. Handling boundaries within `HighVoronoi` is done using the following struct.
```@docs
Boundary
```
## [Creating Boundaries](@id createboundary)
Apart from cuboids, `Boundary` should always be generated using the following method:
```@docs
Boundary(planes...)
```
## [Rectangular domains](@id rectangulardomains)
For simplicity of application, the following methods are provided for boundaries of rectangular domains. They return an object of type `b::Boundary` with the following structure:
For every $i\in 1,...\mathrm{dim}$
- the plane `b.plane[2*i-1]` has base $\mathrm{offset}[i]+e_i*\mathrm{dimensions}[i]$ and normal $e_i$
- the plane `b.plane[2*i]` has base $\mathrm{offset}[i]$ and normal $-e_i$
```@docs
cuboid(dim;dimensions=ones(Float64,dim),periodic=collect(1:dim),neumann=Int64[],offset=zeros(Float64,dim))
```
## Warnings
!!! warning "Using no boundaries in high dimensions"
when using no boundary planes the result "at infinity" i.e. for farout vertex points can be corrupted for high dimensions. This is because virtually every boundary point (a point with infinite cell) becomes neighbor with almost all other boundary points and the verteces reach out to very very very large coordinates compared to the original nodes coordinates. The Library provides internal algorithms to identify and correct misscalculations but this functionallity is, however, limited to the precission of `Float64`. We advise to implement a farout boundary (e.g. `1.0E6`) compared to a cube of diameter `1`.
## Some more tools
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1557 | # Voronoi: Database Structure
There are currently three types of internal data storage implemented. They may be called using the keyword `vertex_storage` at the `VoronoiGeometry` constructor.
## Standard solution
The most recent and most efficient method is `DatabaseVertexStorage()`. It stores all information in one centralized database and uses a sophisticated indexing system for fast access to any information from all points in the code. It might be slightly slower than the other two methods for large grids in high dimension, but it requires much less memory and for smaller grids it is even faster than the other two methods.
This is the only database that is reliably compatible with multithreading and as such it will be automatically enforced once `threading=MutliThread(...)` is provided as an option!
## Deprecated solution
Another option is the `ReferencedVertexStorage()` which is slower but may be useful in low dimensions. It has a decentralized dictionary-based data structure with additional references. It was first implemented to save memory compared to the very initial solution.
## Initial solution
The `ClassicVertexStorage()` which is fast for integration algorithms in low dimensions and which was the first database structure underlying the computations. It builds solely on a system of dictionaries. For some applications it is efficient but it requires a lot of memory which becomes troublesome in high dimensions. However, for documentation of the development of the library, it is still useable.
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 735 | # Known sources of errors
We summarize the major sources of errors so the user is aware of them:
- inserting points into the grid that are outside of the prescribed domain. This will cause in the best case weird verteces that do not exist or - in most cases - it will crash the algorithm. However, since the algorithm allows for unbounded domains, this is easy to prevent.
- The calculation of verteces and volumes is precise and reproduceable for regular grids. However, for nodes in non-general position, `VI_POLYGON` volume calculations tend to have a worst case error in the range of 2.0% in 5D and 6D and this value may increase in higher dimensions. You can test this on a unit cube to get an idea for a specific dimension.
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 13408 | # The Finite Volume functionality
HighVoronois most important feature to the user is the automatic generation of a linear system
$$\mathbb A\,\mathbf u = \mathbf b$$
from the PDE-Problem
$-\nabla\cdot\left(\kappa\nabla u + \kappa u\nabla V\right) = f\,.$
More abstract, the class `VoronoiFVProblem` discretizes the problem
$\nabla\cdot J(u) = f\,,\tag{Flux-Form}$
in the bulk (in the domain) where $J(u)$ is a linear differential operator in $u$ and $f$ is a given right hand side. Furthermore, the method can account for periodic, Dirichlet and Neumann boundary conditions, also all at once (on different parts of the boundary).
The function `linearVoronoiFVProblem` then adds particular boundary conditions to the abstract discretization in `VoronoiFVProblem` and returns a linear equation to be solved.
## The `VoronoiFVProblem` dataset
!!! note "Summary"
The `VoronoiFVProblem` is conceptually a black box into which the user throws a list of nodes and a boundary (or a ready-to-use `VoronoiGeometry`) as well as a description of $J$ and $f$. Internally, the black box computes the discrete coefficients of $J$ and $f$ and stores them in a way that allows efficient computation of the matrix and right-hand side for given boundary conditions.
We advise the user to first jump to the [examples for calculations of fluxes](@ref myVoronoiFVProblem) below and afterwards study the following abstract description of `VoronoiFVProblem`.
```@docs
VoronoiFVProblem()
```
```@docs
VoronoiFVProblem
```
## [Examples for the `VoronoiFVProblem`](@id myVoronoiFVProblem)
### Content
1. [Creating a `VoronoiFVProblem`](@ref examplecreating)
2. [Calculating fluxes and rigthand side](@ref examplefluxes)
3. [Creating a VoronoiFVProblem from a VoronoiGeometry](@ref FVfromGeo)
4. [Internal storage of data (For very deep coding only)](@ref examplestoragedata)
### [Creating a `VoronoiFVProblem` and calculating some integrals...](@id examplecreating)
We create a first instance of `VoronoiFVProblem`. The following code calculates a `VoronoiGeometry` for the given `data` of random points. It furthermore calculates the integral of `integralfunctions` and pointwise evaluations of `discretefunctions`. The latter are not stored but will be internally used for calculations of fluxes or right hand sides (in a later example). To get familiar with the data structure try out the following:
```julia
using LinearAlgebra
function myrhs(;para_i,mass_i,kwargs...)
return para_i[:alpha]*mass_i
end
function test_FV(dim,nop)
data = rand(dim,nop)
xs = VoronoiNodes(data)
cube = cuboid(dim,periodic=[1])
VoronoiFVProblem(xs,cube, discretefunctions = (alpha=x->sum(abs,x),),
rhs_functions = (F=myrhs,) )
end
vfvp = test_FV(2,4)
println(vfvp.Coefficients.functions)
```
The algorithm internally calculutes for each of the four random cells the quantity $\alpha(x_i)*m_i$, where $m_i$ is the mass of cell $i$. The output hence looks like the following:
```
(F = [0.8899968951003052, 1.6176576528551534, 1.2484331005796414, 0.9868594550457225],)
```
- At a later stage, we will of course not directly work with `vfvp.Coefficients.functions`...
- `myrhs` could addionally work with `x_i`, the coordinates of $x_i$
### [Calculating fluxes and rigthand side](@id examplefluxes)
We write $i\sim j$ if the Voronoi cells of the nodes $x_i$ and $x_j$ are neighbored. Then the discrete version of
$\nabla\cdot J(u) = f\,,\tag{Flux-Form}$
in the node $x_i$ is
$\sum_{j\sim i} J_{i,j}(u) = F_i\,.\tag{Flux-Form-discrete}$
!!! note "Indeces $i$ and $j$"
In the text and in the code hereafter $i$ is the current cell and $j$ is either a neighbor or an index of a part of the boundary.
More precisely, let $f$ and $\kappa$ be scalar functions. If $m_i$ is the mass of cell $i$ and $m_{ij}$ is the mass of the interface between cells $i$ and $j$ and $f_i=f(x_i)$ or $f_i=m_i^{-1}\int_{cell_i}f$ and similarly for $\kappa$ we find the following possible discretization of Fick's law:
$J(u)=-\kappa \nabla u \qquad\leftrightarrow\qquad J_{ij}(u)\,=\,-\frac{m_{ij}}{h_{ij}}\sqrt{\kappa_i\kappa_j}(u_j-u_i)\,=\,+\frac{m_{ij}}{h_{ij}}\sqrt{\kappa_i\kappa_j}(u_i-u_j)\,,$
with the right hand side
$F_i=m_i f_i\,.$
We can rewrite $J_{ij}(u)$ in the following form:
$J_{ij}(u)=\frac{m_{ij}}{h_{ij}}\sqrt{\kappa_i\kappa_j}u_i-\frac{m_{ij}}{h_{ij}}\sqrt{\kappa_i\kappa_j}u_j=p_{ij,i}u_i - p_{ij,j}u_j$
!!! note "purpose of `VoronoiFVProblem`"
The purpose of `VoronoiFVProblem` is to calculate $p_{ij,i}$ and $p_{ij,j}$ using `fluxes=...` as well as $F_i$ using `rhs_functions`.
We implement the above discretization in `myflux_1` and an alternative replacing $\sqrt{\kappa_i\kappa_j}$ by an average over the joint interface of cells $i,j$ in `myflux_2`. Here, $\alpha$ is evaluated pointwise in the middle of each cell / interface, while $\kappa$ is averaged over cells and interfaces.
```julia
using LinearAlgebra
using SpecialFunctions
function myflux_1(;para_i,para_j,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = norm(normal)^(-1) * mass_ij * sqrt(para_i[:kappa]*para_j[:kappa])
return weight, weight
end
function myflux_2(;para_ij,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = norm(normal)^(-1) * mass_ij * para_ij[:kappa]
return weight, weight
end
myRHS(;para_i,mass_i,kwargs...) = mass_i * para_i[:f]
function test_FV(dim,nop)
data = rand(dim,nop)
xs = VoronoiNodes(data)
cube = cuboid(dim,periodic=[],neumann=[1,-1]) # cube with preset Neumann BC in dimension 1 and Dirichlet BC all other dimensions
VoronoiFVProblem(xs,cube, discretefunctions = (f=x->sin(2*pi*x[1]),), # evaluate f pointwise
integralfunctions = (kappa=x->1.0+norm(x)^2,), # calculate averages of kappa over cells and interfaces
fluxes = ( j1 = myflux_1, j2 = myflux_2, ),
rhs_functions = (F = myRHS,) )
end
test_FV(2,10)
```
### [Creating a VoronoiFVProblem from a VoronoiGeometry](@id FVfromGeo)
It is also possible to write the following less compact code for `test_FV(dim,nop)`. Though it may seem weird to do the extra effort, remember that mesh generation in high dimensions is very time consuming. Hence this approach could be usefull to set up a high dimensional problem from a formerly calculated grid.
```julia
function test_FV(dim,nop)
data = rand(dim,nop)
xs = VoronoiNodes(data)
cube = cuboid(dim,periodic=[],neumann=[1,-1])
vg = VoronoiGeometry(xs, cube, integrator=HighVoronoi.VI_POLYGON,
integrand=x->1.0+norm(x)^2)
vfvp = VoronoiFVProblem(vg, discretefunctions = (f=x->sin(2*pi*x[1]),),
integralfunctions = (kappa=x->0.0,),
fluxes = ( j1 = myflux_1, j2 = myflux_2, ),
rhs_functions = (F = myRHS,) )
end
```
The instatiation of `vg` calculates all integrals of `x->1.0+norm(x)^2`. The instatiation of `vfvp` cimply uses the values stored in `vg` and "rebrands" them as `:kappa`.
!!! tip "Compatibility of dimension"
The dimension of `integrand` in the instatiation of `vg` can be greater or equal than the summed up dimension of all `integralfunctions`, but not less!! The definition of `:kappa` in `VoronoiFVProblem(...)` in the above example does not matter as all values have been calculated before. We strongly advise to have a look at the "intentions of use" section.
### [Internal storage of data](@id examplestoragedata)
In the [second example](@ref examplefluxes), try out the following code:
```julia
vfvp = test_FV(2,4)
println(vfvp.Coefficients.functions)
println(vfvp.Coefficients.fluxes)
println(vfvp.Coefficients.rows)
println(vfvp.Coefficients.cols)
```
The fields `rows` and `cols` of `vfvp` store the row and coloumn coordinates of potentially non-zero entries of a sparse flux matrix. The arrays stored in `fluxes` correspondingly store the non-zero values. It is thus possible to directly create `SparseMatrix` instances from this data. However, this would not yet properly account for boundary conditions.
## [Full list of LOCAL PARAMETER names](@id parameter_names)
Functions like `myflux_1` and `myflux_2` in [this example here](@ref examplecreating) are evaluated on interfaces between neighboring cells or on the boundary and can take the following arguments
- `x_i`: coordinates of the current node $i$
- `x_j`: coordinates of the current neighbor $j$ (in case this is an actually existing cell) or the coordinates of a point on the boundary (if this is part of the boundary, see `onboundary`)
- `para_i` and `para_j`: a named tuple container of all pointwise evaluated (`discretefunctions`) or averaged (`integralfunctions`) functions for either cell $i$ and $j$ respectively.
- `para_ij`: same for the interface
- `mass_i` and `mass_j`: if of cell $i$ and $j$
- `mass_ij`: the mass of the interface
- `normal`: Something like $x_j-x_i$. However, in case of periodic nodes with cells "crossing the periodic boundary", it typically holds $x_i+\mathrm{normal}\not=x_j$ but $(x_i+\mathrm{normal})$ is a periodic shift of $x_j$. In any case, it is the correct outer normal vector with length of the "periodized distance".
- `onboundary`: is true if and only if `x_j` is a point on the boudary.
Righthand side functions (bulk functions) like `myRHS` are evaluated on nodes have only access to
- `x_i`
- `para_i`
- `mass_i`
!!! danger ""
- If a function `f` is not provided to either `discretefunctions` or `integralfunctions` the call `para_i[:f]` and alike will cause an error message.
- Every name can be used only ONCE. Particularly, a name `f` CANNOT be used both inside `discretefunctions` AND `integralfunctions`.
## Extracting the full FV linear equations including BOUNDARY CONDITIONS
1. [Theoretical background](@ref lin_eq_background)
2. [`linearVoronoiFVProblem`](@ref linear_vor_prob)
3. [No Dirichlet condition: Ambiguity](@ref no_dirichlet)
4. [Examples](@ref lin_vor_prob_ex)
### [Background](@id lin_eq_background)
To understand how boundary conditions are implemented in the `HighVoronoi` package, multiply equation (Flux-Form) with some function $\varphi$ and use integration by parts to obtain
$$-\int_{domain}J\cdot\nabla\varphi=\int_{domain}f\,\varphi-\int_{boundary}\varphi\,J\cdot \nu$$
where $\nu$ is the outer normal vector.
Furthermore, assume we want to prescribe $u=u_0$ on some part of the boundary. We can write $u=\tilde u +u_0$ where $\tilde u$ has boundary value $0$. Then (Flux-Form-discrete) reads
$\sum_{j\sim i} J_{i,j}(\tilde u + u_0) = F_i\,.$
However, since we work in a discrete setting, we can make the following assumptions:
!!! note "Assumptions on boundary data"
- The function $u_0$ is a discrete function taking value $0$ on every node inside the domain, but might be non-zero on the boundary. $\tilde u$ is a discrete function which is zero on all Dirichlet-parts of the boundary.
- The function `J_0` is a discrete function on the boundary which mimics $J_0=J\cdot\nu$. In particular, we think of `J_0(i,j)=m_ij*J_0(x_ij)`.
### [`linearVoronoiFVProblem`](@id linear_vor_prob)
```@docs
linearVoronoiFVProblem(vd::VoronoiFVProblem;flux)
```
### [No Dirichlet condition: Ambiguity](@id no_dirichlet)
In case the boundary conditions consist only of periodic and/or Neumann conditions, the solution is unique only up to a constant. This is taken into account by providing `linearVoronoiFVProblem` with the parameter
- `enforcement_node=1`: This picks out a node where the solution is forced to be $0$. If the user wants another condition, such as average value $0$, this can be achieved after solving the linear problem, as the library provides enough tools to calculate the respective integrals in the aftermath.
### [Examples](@id lin_vor_prob_ex)
Let us look at the following example:
```julia
using SparseArrays
myrhs(;para_i,mass_i,kwargs...) = mass_i*para_i[:alpha]
function myflux_2(;para_ij,mass_ij,normal,kwargs...)
weight = norm(normal)^(-1) * mass_ij * para_ij[:alpha]
return weight, weight
end
xs = VoronoiNodes(rand(2,6))
cube = cuboid(2,periodic=[1])
vfvp = VoronoiFVProblem(xs, cube, discretefunctions = (alpha=x->sum(abs,x),),
rhs_functions=(F=myrhs,),
fluxes=(j1=myflux_2,) )
har = FVevaluate_boundary(x->0.0) # turn a function into the format HighVoronoi needs
one = FVevaluate_boundary(x->1.0)
r,c,v,f = linearVoronoiFVProblem(vfvp, flux = :j1, Neumann = (3,har), Dirichlet = (4,one))
A = sparse(r,c,v) # a sparse matrix with rows `r`, coloumns `c` and values `v`
# solution_u = somelinearsolver(A,f)
```
As we see, the output of the algorithm is a matrix `A` and a right hand side `f` which can be plugged into a linear solver method from some suitable package. | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 7501 |
# [Quick Introduction to Finite Volume Problems](@id QuickFV)
In what follows we give a short introduction on how to use the Finite Volume functionality of `HighVoronoi.jl` in 2d.
## Setting up a FV Problem
### parameters
Since we want to reuse our code below, we define a set of parameters of the following form that will be passed to the FV code:
```julia
set = ( # only dirichlet boundary
u_exact = u,# expected exact solution
κ = k,# parameter field
RHS = rhs, # expected -[∇⋅(κ∇u)](x),
domain = d, # of form cuboid(2,periodic=[???]),
dirichlet_boundary = db, # indeces of Dirichlet boundaries of `domain`
neumann_boundary = nb, # indeces of Neumann boundaries of `domain`
neumann = n, # The Neumann condition for -(κ∇u)⋅ν = n on the neumann boundary
)
```
a simple example is the following one:
```julia
set1 = ( # only dirichlet boundary
u_exact = x->sin(x[1]*π) * sin(x[2]*π)+1,
κ = x->1.0,
RHS = x->2*π^2 * sin(x[1]*π) * sin(x[2]*π),
domain = cuboid(dimension,periodic=[]), # no periodic boundaries
dirichlet_boundary = collect(1:4), # all four boundaries are Dirichlet
neumann_boundary = nothing # no Neumann condition
)
```
### simulation code
The numerical calculations are done by the following code. It does the following:
- generate the Voronoi geometry
- integrate $\kappa$ and $RHS$ over cells and interfaces and calculates the interface areas and cell volumes
- it uses the subsequently defined `SQRA_flux` and `myRHS` do set up the structure of the flux $j=-\kappa\nabla u$ and the right hand side from `RHS`.
- it defines the dirichlet boundary condition `compatibility` from the expected exact solution
- it defines the Neumann boundary condition `neumann`
- it calculates the matrix `A` and the right hand side `f` that describe the problem $-\nabla\cdot(\kappa\nabla u)=RHS$ as a finite dimensional linear problem
- it uses the `IterativeSolvers` library to solve `A*solution_u = f`
- it calculates the $L^2$-error between the exact and the numerical solution
- it returns the nodes and values of the discrete solution.
```julia
function SQRA_flux(;para_i,para_j,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = norm(normal)^(-1) * mass_ij * sqrt(para_i[:κ]*para_j[:κ])
return weight, weight
end
myRHS(;para_i,mass_i,kwargs...) = mass_i * para_i[:f]
function simulation(set)
# account for periodicity in the right hand side
new_RHS = HighVoronoi.PeriodicFunction(set.RHS,set.domain)
# modify original parameter set
# replace RHS by periodic version,
# set density distribution of points to x->1.0 in case nothing else is provided by user
# set neumann condition to zero if nothing else is provided by user
set = (density=x->1.0, neumann = x->0.0, dirichlet_boundary=nothing, neumann_boundary=nothing, set..., RHS=new_RHS)
# generate approximately 400 points distributed according to set.density
nodes = VoronoiNodes(1000;density=set.density,domain=cuboid(2,periodic=[]))
# calculate Voronoi tessellation
VG_basis = VoronoiGeometry(nodes,set.domain,integrator=HighVoronoi.VI_GEOMETRY)
# integrate parameters
VG_κ = VoronoiGeometry(VG_basis, integrator=HighVoronoi.VI_POLYGON, integrand=x->[set.κ(x),set.RHS(x)])
#retrieve total volume of domain to verify volume integration works properly
vd = VoronoiData(VG_κ)
println("vol: $(sum(vd.volume))")
# set up fluxes and RHS
vfvp = VoronoiFVProblem(VG_κ,
integralfunctions = (κ = set.κ, f = set.RHS, ),
fluxes = ( j1 = SQRA_flux, ),
rhs_functions = (F = myRHS,) )
# define functions that can be applied as boundary conditions
compatibility = FVevaluate_boundary(x->set.u_exact(x))
neumann = FVevaluate_boundary(x->set.neumann(x))
# construct linear system from fluxes, RHS and boundary conditions
r,c,v,f = linearVoronoiFVProblem(vfvp, flux = :j1, rhs = :F,
Dirichlet = set.dirichlet_boundary!=nothing ? (set.dirichlet_boundary,compatibility) : nothing,
Neumann = set.neumann_boundary!=nothing ? (set.neumann_boundary,neumann) : nothing)
A = sparse(r,c,v) # a sparse matrix with rows `r`, coloumns `c` and values `v`
# solve linear system using IterativeSolvers-Package
solution_u = cg(A,f) # conjugate gradients
# print out approximate L²-error between exact and numerical solutions
println("Approximate L²-error: ",sqrt(sum(map(k->abs2(solution_u[k]-set.u_exact(nodes[k]))*VG_κ.Integrator.Integral.volumes[k],1:length(nodes)))))
return nodes, solution_u # return nodes and values for plotting...
end
nodes, values = simulation(set1)
```
## Plotting the Result
We may use the data obtained above for a plot of `values` against `nodes`:
```julia
using Plots
function plot_2d_surface(nodes, values)
# The following two lines are necessary in order for the plot to look nicely
func = StepFunction(nodes,values) # some minor HighVoronoi tool
new_nodes = vcat([VoronoiNode([k/10,j*1.0]) for k in 0:10, j in 0:1], [VoronoiNode([j*1.0,k/10]) for k in 1:9, j in 0:1])
append!(nodes,new_nodes)
append!(values,[func(n) for n in new_nodes])
x = [node[1] for node in nodes]
y = [node[2] for node in nodes]
p = surface(x, y, values, legend=false)
xlabel!("X")
ylabel!("Y")
zlabel!("Values")
title!("2D Surface Graph")
display(p)
end
plot_2d_surface(nodes, values)
```
## Other Simulation Examples
Instead of `set1` from above, try out the following examples.
!!! warning "Mind the regularity"
If you want to verify the algorithm with known examples, keep in mind that the expected solution should be $C^2$ accross the periodic boundary or you may find unexpected behavior...
```julia
set2 = ( # dirichlet boundary and periodic in 1st dim
u_exact = x->sin(x[1]*2*π) * sin(x[2]*π),
κ = x->1.0,
RHS = x->5*π^2 * sin(x[1]*2*π) * sin(x[2]*π),
domain = cuboid(dimension,periodic=[1]), # periodic in x[1]
dirichlet_boundary = collect(3:4),
neumann_boundary = nothing, # no neumann
neumann = x-> π* sin(x[2]*π)# 0#-π*cos(x[1]*π) * sin(x[2]*π)
)
set3 = ( # only dirichlet boundary
u_exact = x->x[1]^2,
κ = x->1.0,
RHS = x->-2,
domain = cuboid(dimension,periodic=[]),
dirichlet_boundary = collect(1:4),
neumann_boundary = nothing,
neumann = x->0.0
)
set4 = ( # Neumann on 1 and Dirichlet on 2-4 boundary
u_exact = x->x[1]^2,
κ = x->1.0,
RHS = x->-2.0,
domain = cuboid(dimension,periodic=[]),
dirichlet_boundary = collect(2:4),
neumann_boundary = 1,
neumann = x->-2.0
)
set5 = ( # periodic in x[1] and dirichlet in x[2] boundary
u_exact = x->sin(x[1]*π)^2 * sin(2*x[2]*π),
κ = x->1.0,
RHS = x->2*π^2 * (1-2*cos(2*π*x[1]))*sin(2*π*x[2]),
domain = cuboid(dimension,periodic=[1]),
dirichlet_boundary = collect(3:4),
)
set6 = ( # only dirichlet boundary
u_exact = x->sin(x[1]*2*π)^2 * sin(2*x[2]*π),
κ = x->1.0,
RHS = x->π^2 * (1-5*cos(4*π*x[1]))*sin(2*π*x[2]),
domain = cuboid(dimension,periodic=[]),
dirichlet_boundary = collect(1:4)
)
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 8132 | # [Periodic functions](@id createalltypesoffunctions)
To make a function `f` periodic with respect to a (partially) periodic boundary `b::Boundary` or geometry `VG::VoronoiGeometry` use the following
```julia
f2 = PeriodicFunction(f::Function,b::Boundary)
f2 = PeriodicFunction(f::Function,VG::VoronoiGeometry)
```
# Step functions
Use one of the following methods to create a step function on the Voronoi grid:
```julia
f = StepFunction(VG::VoronoiGeometry, u<:AbstractVector; tree::Union{VoronoiKDTree,KDTree})
f = StepFunction(VG::VoronoiGeometry, u::Function; tree::Union{VoronoiKDTree,KDTree})
f = StepFunction(VG::VoronoiGeometry; tree::Union{VoronoiKDTree,KDTree})
f = StepFunction(nodes::VoronoiNodes, u<:AbstractVector; tree::Union{VoronoiKDTree,KDTree}=KDTree(nodes))
```
This yields a step function `f` that is constant on every cell of the `VoronoiGeometry` `VG` or on the Voronoi tessellation given by `nodes`.
If `u` is an abstract vector, the value `f(x)=u[i]` is assigned if - according to `tree` - the nearest neighbor of `x` is the i-th node of `VG` or `nodes`. If no value for `u` is provided, `StepFunction` will retrieved bulk-integral data stored in `VG`. If `VG` has no bulk-data, the step-function will return `nothing`.
`tree` can be a `KDTree` from `NearestNeighbors.jl` or a `VoronoiKDTree`. It is highly recommended to use the last one as it accounts for periodicity.
Finally, consider the following advanced code:
```julia
# create a composed function for integration
f = FunctionComposer(reference_argument = [0.0,0.0], super_type = Float64, alpha = x->norm(x)*x, beta = x->sum(abs,x) )
# create a VoronoiGeometry and integrate :alpha, :beta
VG = VoronoiGeometry(VoronoiNodes(rand(2,40)), cuboid(2,periodic=[1]), integrator=HighVoronoi.VI_MONTECARLO, integrand=f.functions)
# make a step function from integrated values:
f_all = StepFunction(VG)
# retrieve the alpha and beta- components as a single (real) valued stepfunctions
alpha_step = x-> HighVoronoi.decompose(f, f_all(x),scalar=true)[:alpha]
beta_step = x-> HighVoronoi.decompose(f, f_all(x),scalar=true)[:beta]
# generate some sample output
println(alpha_step([0.5,0.5]))
println(beta_step([0.5,0.5]))
```
## VoronoiKDTree
```julia
vt = VoronoiKDTree(VG::VoronoiGeometry; restrict_to_periodic=true)
```
This will create a `KDTree` that accounts for periodicity of `VG`.
`restrict_to_periodic=true` implies that the "official" nodes are used only. It is highly recommended not to change this option if you are not knowing what you are doing.
# Diameters of cells
```julia
f = DiameterFunction(VG::VoronoiGeometry; tree = VoronoiKDTree(VG))
```
This yields $f(x):=(r,R)$ where `r` is the inner and `R` the outer radius of the Voronoi cell that contains `x`. This is by its nature a step function.
# Functions on interfaces
It can be usefull to consider the integrated values over the interfaces of the Voronoi tessellation as a function. This is achieved by `InterfaceFunction`:
```julia
f = InterfaceFunction(VD::VoronoiData,range,symbol=nothing;scalar=true)
```
This takes the `VD::VoronoiData` and creates a function that locally takes the value `VD.interface_integral[i][k]` over the respective interface. The value $f(x)$ of the function is chosen according to the two nearest neighbors, hence there is ambiguity in points with more than 2 nearest neighbors.
- `range`: This can be a `FunctionComposer`-opject, in which case `symbol` has to be provided. It can also be `a:b` or `[a1,a2,...,aN]` to take a subarray of the values. It can also be `:all` in which case the full vector of values is taken.
- `scalar`: If true, then vectors with only one index will be returned as scalar values.
```julia
f = InterfaceFunction(VG::VoronoiGeometry,range,symbol=nothing;scalar=true)
```
Calculates the `VoronoiData` and calls the first instance of the method.
```julia
f = InterfaceFunction(VG::VoronoiGeometry)
```
Sets `range` to the full data range. Similar to the above example for `StepFunctions` one may consider the following setting:
```julia
# create a composed function for integration
f = FunctionComposer(reference_argument = [0.0,0.0], super_type = Float64, alpha = x->norm(x)*x, beta = x->sum(abs,x) )
# create a VoronoiGeometry and integrate :alpha, :beta
VG = VoronoiGeometry(VoronoiNodes(rand(2,40)), cuboid(2,periodic=[1]), integrator=HighVoronoi.VI_MONTECARLO, integrand=f.functions)
# make a step function from integrated values:
f_all = InterfaceFunction(VG)
# retrieve the alpha and beta- components as a single (real) valued stepfunctions
alpha_i = x-> HighVoronoi.decompose(f, f_all(x),scalar=true)[:alpha]
beta_i = x-> HighVoronoi.decompose(f, f_all(x),scalar=true)[:beta]
# generate some sample output
println(alpha_i([0.5,0.5]))
println(beta_i([0.5,0.5]))
```
# Functions from Data
If you want to generate a function from various integrated data in your own way, you can call
```@docs
FunctionFromData(vg::VoronoiGeometry,tree=VoronoiKDTree(vg),composer=nothing; function_generator)
```
# The FunctionComposer: Passing function arguments
!!! info "Always glue functions with a FunctionComposer"
The `FunctionComposer` is internally used to glue together real valued functions. Therefore, if a user wants to glue together functions and afterwards work with "glued" information generated from `HighVoronoi`, using `FunctionComposer` is the way unify internal and external calculations.
The `FunctionComposer` is the element implemented in `HighVoronoi` to concatenate several `Float` or `Vector{Float}` valued functions into one single `Vector{Float}`-valued function using `vcat(...)`. It is built using a call of the following method.
```@docs
FunctionComposer(;reference_argument, super_type, _functions...)
```
A typical example would be
```julia
f = FunctionComposer(reference_argument = [0.0,0.0,0.0], super_type = Float64, alpha = x->norm(x)*x, beta = x->sum(abs,x) )
```
or:
```julia
myfunctions=(alpha = x->norm(x)*x, beta = x->sum(abs,x))
f = FunctionComposer(reference_argument = [0.0,0.0,0.0], super_type = Float64; myfunctions... )
```
The latter has the advantage that you can define your set of functions once and for all and use it again and again ensuring you always have the same order in the arguments. This brings us to an important point:
!!! warning "Don't mess with the order of arguments"
FunctionComposer takes the order of functions as given in the argument. That is if you make function calls
```julia
f1 = FunctionComposer(reference_argument = [0.0,0.0,0.0], super_type = Float64, alpha = exp, beta = sin )
f2 = FunctionComposer(reference_argument = [0.0,0.0,0.0], super_type = Float64; beta = sin, alpha = exp )
```
the algorithm will create two different functions `x->[exp(x),sin(x)]` and `x->[sin(x),exp(x)]` and it will NOT be able to clear up the mess this creates....
## Retrieving the full (combined) function
The full function is stored in the variable `FunctionComposer.functions`.
```julia
myfunctions=(alpha = x->norm(x)*x, beta = x->sum(abs,x))
f = FunctionComposer(reference_argument = [0.0,0.0,0.0], super_type = Float64; myfunctions... )
myvalue = f.functions([1.2,3.4,5.6])
```
## Decomposing the Composer
To retrieve single information from an array like `myvalue` in the last example, you can simply use the internal function `HighVoronoi.decompose(...)`:
```julia
myfunctions=(alpha = x->norm(x)*x, beta = x->sum(abs,x))
f = FunctionComposer(reference_argument = [0.0,0.0,0.0], super_type = Float64; myfunctions... )
myvalue = f.functions([1.2,3.4,5.6])
values = HighVoronoi.decompose(f, myvalue)
println(values[:alpha], values[:beta])
```
If you whish $1d$-vectors to be returned as scalars, try out this one:
```julia
values2 = HighVoronoi.decompose(f, myvalue, scalar=true)
println(values2[:alpha], values2[:beta])
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 6192 | # Voronoi: Nodes and Geometry, Integrators
## [Nodes](@id differentnodegenerators)
The most basic thing is the creation of a list of Points. We advise to use the following:
```@docs
VoronoiNodes(x::Matrix)
```
An advanced method is given by the following
```julia
VoronoiNodes(number_of_nodes::Int;density ,
domain::Boundary=Boundary(), bounding_box::Boundary=Boundary(),
criterium=x->true)
```
When `density = x->f(x)` this will create a cloud of approximately `number_of_nodes` points inside the intersection of `domain` and `bounding_box` with spatial distribution $f(x)$. Note that both exact number and position of points are random. The variable `bounding_box` allows to handle also the case when `domain` is unbounded. The intersection of `domain` and `bounding_box` HAS TO BE bounded!
The following two pictures show first a distribution `density = x->sin(pi*2*x[1])^2*sin(pi*2*x[2])^2` and the second takes the same density squared.


### Single Nodes
To instatiate a single node (e.g. if you want to add a specific node to an existing list of nodes) use
```julia
# make [1.0, 0.0, 0.5] a valid Voronoi node
VoronoiNode([1.0, 0.0, 0.5])
```
### Example
```julia
# This is an example to illustrate VoronoiNodes(number_of_nodes::Int;density)
## First some plot routine ############################
using Plots
function plot_2d_surface(nodes, values)
# The following two lines are necessary in order for the plot to look nicely
func = StepFunction(nodes,values)
new_nodes = vcat([VoronoiNode([k/10,j*1.0]) for k in 0:10, j in 0:1], [VoronoiNode([j*1.0,k/10]) for k in 1:9, j in 0:1])
append!(nodes,new_nodes)
append!(values,[func(n) for n in new_nodes])
x = [node[1] for node in nodes]
y = [node[2] for node in nodes]
p = Plots.surface(x, y, values, legend=false)
xlabel!("X")
ylabel!("Y")
zlabel!("Values")
title!("2D Surface Graph")
display(p)
end
########################################################
## Now for the main part ################################
my_distribution = x->(sin(x[1]*π)*sin(x[2]*π))^4
my_nodes = VoronoiNodes(100,density = my_distribution, domain=cuboid(2,periodic=[]))
# you may compare the output to the following:
# my_nodes = VoronoiNodes(100,density = x->1.0, domain=cuboid(2,periodic=[]))
println("This generated $(length(my_nodes)) nodes.")
my_vals = map(x->sin(x[1]*π)^2*sin(x[2]*π),my_nodes)
plot_2d_surface(my_nodes,my_vals)
```
### DensityRange
```@docs
DensityRange{S}
```
## Geometry
The creation and storage of Voronoi geometry data is handled by the following class.
```@docs
VoronoiGeometry{T}
```
To create a Voronoi mesh it is most convenient to call either of the following methods
```@docs
VoronoiGeometry()
```
## [Integrators (overview)](@id integratoroverview)
As discussed above there is a variety of integrators available to the user, plus some internal integrators that we will not discuss in this manual. The important integrators for the user are:
* `VI_GEOMETRY`: Only the basic properties of the mesh are provided: the verteces and an implicit list of neighbors of each node. This is the fastes way to generate a `VoronoiGeometry`
* `VI_MONTECARLO`: Volumes, interface areas and integrals are calculated using a montecarlo algorithm introduced by A. Sikorski in `VoronoiGraph.jl` and discussed in a forthcoming article by Heida, Sikorski, Weber. This particular integrator comes up with the following additional paramters:
+ `mc_accurate=(int1,int2,int3)`: Montecarlo integration takes place in `int1` directions, over `int2`
volumetric samples (vor volume integrals only). It reuses the same set of directions `int3`-times to save memory allocation time.
Standard setting is: `(1000,100,20)`.
* `VI_POLYGON`: We use the polygon structure of the mesh to calculate the exact values of interface area and volume. The
integral over functions is calculated using the values at the center, the verteces and linear interpolation between. Also this method is to be discussed in the anounced article by Heida, Sikorski, Weber.
* `VI_FAST_POLYGON`: Even more precise than `VI_POLYGON`, very fast (50 secs for 500 nodes in 6D) but using a lot of memory. It is advised to use this integrator if you insists on accuracy over performance and if you have large RAM (advised >=4GB of FREE RAM). On my personal machine with total 16GB RAM `VI_FAST_POLYGON` is by factor 15 faster than `VI_POLYGON` for 500 nodes in 6 dimensions and integrating $x\rightarrow(x_1,x_2^2)$.
* `VI_HEURISTIC`: When this integrator is chosen, you need to provide a fully computed Geometry including volumes and interface areas.
`VI_HEURISTIC` will then use this information to derive the integral values.
* `VI_HEURISTIC_MC`: This combines directly `VI_MONTECARLO` calculations of volumes and interfaces and calculates integral values
of functions based on those volumes and areas. In particular, it also relies on `mc_accurate`!
It is important to have in mind that the polygon-integrator will be faster in low dimensions, whereas the Montecarlo integrator will outperform from 5 dimensions and higher. However, when volumes and integrals are to be calculated in high dimensions, the `VI_HEURISTIC_MC` is highly recommended, as it works with much less function evaluations than the `VI_MONTECARLO`.
## Storage: JLD2
you may use JLD2 to directly write a `VoronoiGeometry` or `VoronoiData` object to a file. It will be made sure that storing and reading data will be downward compatible in future.
## Storage: deprecated solution
The following solution is still available for grids that have been created with `ClassicVertexStorage()`. However, it is not advised to use them.
```@docs
write_jld()
```
```@docs
load_Voronoi_info()
```
## Extraction of `VoronoiData` data for further processing
```@docs
VoronoiData
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1549 | # [Improving Voronoi meshes for FV ](@id toyfile)
[It has been shown](https://wias-berlin.de/publications/wias-publ/run.jsp?template=abstract&type=Preprint&year=&number=2913) that finite volume methods for elliptic PDE should be more accurate if for each generator the distance to its vertices is approximately equal. This can be achieved as follows:
```julia
mynodes = VoronoiNodes(rand(2,200))
VG1 = VoronoiGeometry(copy(mynodes),cuboid(2,periodic=[]),integrator=VI_GEOMETRY)
draw2D(VG1)
VG2 = VoronoiGeometry(copy(mynodes),cuboid(2,periodic=[]),integrator=VI_GEOMETRY,improving=(max_iterations=5,))
draw2D(VG2)
```
The above example generates two Voronoi grids: One where mesh is generated from the given nodes and one using the `improving` keyword, where the nodes are modified so that the nodes will lie closer to the centers of mass of their respective Voronoi cell. This is an iterative process and takes the following parameters:
- `max_iterations::Int = 1`: The process will stop after this amount of iterations even if the wanted accuracy is not achieved.
- `tolerance::Float64 = 1.0`: if the distance between a node and the center of mass `D` and the minimal distance of the node to the boundary `r` satisfy `D/r < tolerance` the node will not be modified.
The following pictures illustrate the improvement of the mesh for standard setting and 200 Points in $\mathbb R^2$:
### Original Mesh

### Modified Mesh

| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1939 | # [(More) Integrals](@id evenmoreintegrals)
There are two more options that can be passed to `VoronoiFVProblem()`:
- `bulk_integrals`: A list of functions following the pattern of `rhs_functions`
- `flux_integrals`: A list of functions following the pattern of `fluxes` but returning only one single value instead of two.
These options are thought to provide the user with the ability to calculate complex integrals even after the `VoronoiGeometry` has been calculated. The algorithm will sum every member of `flux_integrals` over all interfaces and sum every member of `bulk_integrals` over all cells.
To illustrate this, consider the following example:
```julia
function surface_int(;para_i,para_j,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = mass_ij * sqrt(para_i[:κ]*para_j[:κ])
return weight
end
b_int(;para_i,mass_i,kwargs...) = mass_i * para_i[:f] * para_i[:κ]^2
function test_integrals()
nodes = VoronoiNodes(rand(2,40))
# calculate Voronoi tessellation and integrate κ(x)=sin(pi*x[1]) and f(x)=x[2]^2 over individual cells and interfaces
VG_basis = VoronoiGeometry(nodes,cuboid(2,periodic=[]),integrator=HighVoronoi.VI_POLYGON,integrand=x->[sin(pi*x[1]),x[2]^2])
# set up fluxes and RHS
vfvp = VoronoiFVProblem(VG_basis,
# note that the exact form of κ and f does not matter since data will be retrieved from VG_basis:
integralfunctions = (κ = x->1.0, f = x->1.0, ),
flux_integrals = ( fi = surface_int, ),
bulk_integrals = (bi = b_int,) )
# print the integral of sqrt(κ_i*κ_j) over the interfaces
println( get_Fluxintegral(vfvp,:fi) )
# print the integral of f*κ^2 over the bulk
println( get_Bulkintegral(vfvp,:bi))
end
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1610 | # Graphical Output in 2D and 3D
Graphical output can be produced using the `Plots.jl`. This is achieved as follows
## `draw2D`
Example:
```@julia
VG = VoronoiGeometry(VoronoiNodes(rand(2,5)),cuboid(2,periodic=[]))
HighVoronoi.draw2D(VG)
```
Optionally, the output can be stored in any format supported by Plots:
```@julia
HighVoronoi.draw2D(VG,"nice_plot.png")
```
The package provides the following output functions:
```@docs
draw2D
```
You may need the following:
```@docs
PlotBoard
```
## `draw3D`

The same works with 3 dimensions (you may also pass a customized `PlotBoard`):
```@julia
VG = VoronoiGeometry(VoronoiNodes(rand(3,5)),cuboid(3,periodic=[]))
draw3D(VG,"nice_plot_3D.pdf")
```
## Using `PlotlyJS`
```@julia
using PlotlyJS
plotly()
VG = VoronoiGeometry(VoronoiNodes(rand(3,5)),cuboid(3,periodic=[]))
draw3D(VG)
```
!!! warning " "
When you use `plotly()` you will not be able to write the output to a file.
## 2D-Output using MetaPost
Similar to LaTeXX, MetaPost is an elegant way to create eps, pdf etc. from a programming language vector graphic code. If you do not have it installed on your PC, you may use the
MetaPost generator by Troy Henderson: [www.tlhiv.org/mppreview/](http://www.tlhiv.org/mppreview/). However, this link sometimes did not work in the past.
## The MeatPostBoard
These methods are based on the `MetaPostBoard` structure:
```@docs
MetaPostBoard
```
```@docs
MetaPostBoard()
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 1610 | # [Multithreading](@id multithreading)
you can use the following keywords either in `threading` in the `search_settings` to occasionally use multi threading in Voronoi computations or you can set it for `integrate=....`. Note that `integrate=true` is equivalent to `integrate=SingleThread()`
## Periodic Meshes
Mutlithreading is currently not available for `fast=true` generation of periodic meshes!!
## Automatic Inference of Threads
`threading=AutoThread()` in the `search_settings` will automatically infer the maximal number of availabler threads and correspondingly use `MultiThread` or `SingleThread` from below.
## Single Threaded Computations (Even if Julia is started with more threads)
Single threaded calculations can be enforced with `threading=SingleThread()` in the `search_settings`. Even if you start Julia with several threads but want to do a single threaded computation, it is strongly advised to use this option as this will call a specialized version of the code.
## Multi Threaded Computations
Parallelized compuations can be enforced with `threading=MultiThread(a,b)` in the `search_settings`. `a` and `b` provide parallelization information on an "outer" and an "inner" parallelization. While the use of `a` is save, it is currently not adviced to use `b>1`. If `a>Threads.nthreads()` it will be internally reduced to the maximally available number of threads.
Currently, `b` is implemented as a parameter, because it is technically doable. However, more tests are needed before it can be said if it is advantageous compared to the sole usage of `a`.
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 7338 |
# [Highspeed periodic geometries](@id periodicgeometrysection)
A fast and efficient way to generate meshes in high dimension are quasi-periodic meshes. That is:
- Take $N$ points $(x_j)_{j\in\{1,\dots,N\}}$ within a unit cube $[0,1]^{dim}$
- for $i=1,\dots,dim$ and natural numbers $(n_i)_{i\in\{1,\dots,dim\}}$ make $\prod_i n_i$ "shifted" copies of $(x_j)_{j\in\{1,\dots,N\}}$, which yields a periodic set of points with $n_i$ repetitions of $(x_j)_{j\in\{1,\dots,N\}}$ in direction $i$.
- calculated mesh geometry and interfaces areas and volumes with the Polygon-Method
- if desired: calculate the integral of given functions using the `Heuristic` method
This is automatised in the following call:
```julia
dim = 3
VG = HighVoronoi.VoronoiGeometry( VoronoiNodes(rand(dim,N)),
periodic_grid = ( dimensions=ones(Float64,dim),
scale=0.25*ones(Float64,dim), repeat=4*ones(Int64,dim),
periodic=[], fast=true ) )
```
Here, `VoronoiNodes(rand(dim,N))` corresponds to the above $X=(x_j)_{j\in\{1,\dots,N\}}$.
A new feature is the field `periodic_grid` as a keyword that signals to `HighVoronoi.jl` what we intend to do. `periodic_grid` is a `NamedTuple` which can take the following fields:
- `dimensions`: The box that contains the data $X$. Its default is `ones(Float64,dim)`.
- `scale`: a diagonal matrix to scale $(x_j)_{j\in\{1,\dots,N\}}$ before repeating. Its default is `ones(Float64,dim)`.
- `repeat`: corresponds to $(n_i)_{i\in\{1,\dots,dim\}}$, i.e. tells how often the data shall be repeated in each dimension. Its default is `2*ones(Int64,dim)`.
- `fast`: `true` uses internal copy-and-paste algorithms to speed up the calculation significantly in high dimensions. Integration of functions falls back to `Heuristic`. `false` uses classical computations. Integration of functions using `Polygon` and `MonteCarlo` is then possible. Default: `true`. Note that `fast=true` will disable multithreading. It will have to be reactivated in subsequent calls of `refine!(...)`.
The resulting domain will be a `cuboid(dim,periodic=periodic,dimensions=(scale.*dimensions.*repeat))` of dimensions: `scale.*dimensions.*repeat`. The second un-named argument usually indicating the domain becomes meaningless.
In view of this fact, periodic boundary conditions on the resulting domain are implemented using:
- `periodic`: Tells which dimensions shall have periodic boundary conditions. Default: `[]`
## Intention of use
!!! note "Intentions of use"
Using this feature makes sense only if either $n_i>1$ for some $i$ OR if `periodic != []`. In particular, `periodic=[i]` internaly increases $n_i$ by $2$.
## Gain in performance compared to non-periodic grid
As mentioned above, the algorithm automatically tracks data that can be "copy-pasted" and as such can increase performance for $(n_i)_i = (2,2,\dots,2)$. However, lets assume for simplicitiy that the first $k$ dimensions have $n_i>3$ (actual order of $n_i$ does not matter to the implemented algorithm). We partition the full domain into cubes indexed by $(m_i)_{i\in\mathbb N}$ with $1\leq m_i\leq n_i$. One can observe that $n_i>m_i\geq3$ for $i\leq k$ implies that the volume and area data as well as all verteces of cell $(m_i)_i$ can be obtained as a copy of the respective data from cell $(m_1,\dots,m_{i-1},m_i-1,m_{i+1},\dots m_N)$.
The percentage of small cubes that can be copied from previous existing cubes can then be calculated as
```math
P_0=0\,,\qquad P_k = P_{k-1}*\frac{3}{n_k}+\frac{n_k-3}{n_k}\,.
```
The value $P_{dim}$ can be calculated using non-exported method `HighVoronoi.redundancy`, e.g.
```julia
HighVoronoi.redundancy([3,5,2,6,8]) # returns 0.8875
```
!!! note "Example"
Assume `repeat = 4*ones(Int64,dim)` then the percentage of copied data increases according to:
```math
\begin{array}{rcccc}
\mathrm{dim} & 2 & 3 & 4 & 5 & 6 & \dots & \infty\\
\% & \frac{7}{16} & \frac{17}{32} & \frac{37}{64} & \frac{177}{256} & \frac{357}{512} & \dots & 1
\end{array}
```
However, since other cells can be partically recycled, numerical experiments show even higher gain in performance.
## Memory usage
The lower geometric complexity of periodic meshes leads to less memory being used. This can be checked with the command applied to a geometry with nodes in general position and to a periodic geometry with comparable amount of generators.
```julia
size = memory_allocations(vg::VoronoiGeometry;verbose=false)
```
which returns the memory allocated by `vg` in Bytes. `verbose=true` prints to the shell which internal part of the geometry data structure occupies how much memory.
## Advantage for numerics and some statistics
Another advantage of periodic meshes with a low number of generating nodes is the following: In a cubic grid every node has $2d$ neighbors, while in a regular grid the number of neighbors grows super-linear. E.g. in 5 dimensions, tests suggest around $90\pm 10$ neighbors compared to $10$ neighbors in a cubic grid. At the same time, a periodic grid generated from 3 points shows an average neighboring of $20$. Thus, matrices generated in this way will be much sparser than matrices for regular grids.
The user can play around a bit with
```julia
HighVoronoi.VoronoiStatistics(dim,samples;periodic=nothing,points=1,my_generator=nothing,geodata=true)
```
- `dim::Int`: The dimension
- `samples::Int`: How many samples shall be considered
- `periodic`: If `periodic` is an integer, it will calculate the statistics for periodic meshes from `periodic` nodes. Otherwise, it will calculate the statistics for the point closest to the center of a cube, where the cube is filled with `points` random points.
- `my_generator`: if this is a function `my_generator(dim,points)` which returns some `xs::VoronoiNodes, number::Int` the algorithm will do the Voronoi statistics for the first `number` points of `xs`.
- `geodata`: if true calculates volumes and areas. costly in high dimensions.
The `VoronoiStatistics` returns a named tuple with the following entries:
- `data_size`: Number of sample nodes calculated
- `volume=(V,_v)`: Average volume `V` of a cell with standard deviation `_v`
- `verteces=(V,_v)`: Average number of verteces `V` of a cell with standard deviation `_v`
- `neighbors=(N,_n)`: Average number of neighbors `N` of a cell with standard deviation `_n`
- `area=(A,_a)`: Average area `A` of an interface with standard deviation `_a`
## Cubic grids: Ultra high speed mesh generation
When the call of `VoronoiGeometry` looks like the following:
```julia
VG = HighVoronoi.VoronoiGeometry( VoronoiNodes(rand(dim,1)),
periodic_grid = ( periodic=[], dimensions=ones(Float64,dim),
scale=0.25*ones(Float64,dim), repeat=4*ones(Int64,dim),
periodic=[], fast=true ) )
```
i.e. if only one single node is passed, the resulting grid will be cubic. This apriori knowledge is used by a specialized internal fast computation algorithm.
!!! hint "Fast generation of complex grids"
The generation of coarse grids with local refinements in large dimensions can be achieved by calculating one coarse and one fine cubic grid and using `substitute` | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 3857 |
# Volume Projection Matrix
The idea of "projection" stems from finite volume application. Assume the user has solved a FV problem on a coarse geometry `VG` and wants to project this solution onto the refined geometry `VG2` as an initial guess for a solver of the FV problem on `VG2`. Have a look at the following code.
```julia
using SparseArrays
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)),cuboid(2,periodic = [1]),integrator=HighVoronoi.VI_POLYGON)
VG2 = refine(VG,VoronoiNodes(0.2*rand(2,4)))
rows, cols, vals = interactionmatrix(VG2,VG)
A = sparse(rows,cols,vals)
println(A*ones(Float64,10)) # this will print out a vector or 14 values `1.0` (mass conservation)
```
Then `A` is the matrix that projects vectors on `VG` to vectors on `VG2`. More precisely, let `VG` be a partition in cells with masses $m_i$ and let `VG2` a partition in cells with masses $\tilde m_j$. Then, if $u$ is the vector of data on `VG` and $\tilde u$ is the data on `VG2` with $\tilde u = A \cdot u$ then
$$\sum_j \tilde m_j \tilde u_j = \sum_i m_i u_i\,.$$
### Optional Parameters
- `check_compatibility=true`: This enforces that the geometries are verified for their compatibility.
- `tolerance = 1.0E-12,`: This two points in `VG` and `VG2` have a distance of less than `tolerance` they are considered identical
- `hits_per_cell = 1000`: The method is based on a sampling procedure. This parameter controlls how many samples are taken from each cell on average.
- `bounding_box=Boundary()`: It is mathematicaly not reasonable to apply the method for unbounded domains. However, if you anyway wish to do so, you have to provide a bounded `bounding_box`.
## Conditions on `VG` and `VG2`
`VG` and `VG2` may be completely unrelated, the methods works anyway as long as the domains are bounded (or an additional bound is provided) and identical, including periodicity.
### This works
#### Example 1
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)),cuboid(2,periodic = [1]),integrator=HighVoronoi.VI_GEOMETRY)
VG2 = VoronoiGeometry(VoronoiNodes(rand(2,20)),cuboid(2,periodic = [1]),integrator=HighVoronoi.VI_POLYGON)
rows, cols, vals = interactionmatrix(VG2,VG)
rows2, cols2, vals2 = interactionmatrix(VG,VG2) # the inverse projection
```
#### Example 2
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)),cuboid(2,periodic = []))
VG2 = VoronoiGeometry(VoronoiNodes(rand(2,20)),cuboid(2,periodic = []))
rows, cols, vals = interactionmatrix(VG2,VG)
rows2, cols2, vals2 = interactionmatrix(VG,VG2) # the inverse projection
```
#### Example 5
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)))
VG2 = VoronoiGeometry(VoronoiNodes(2*rand(2,20)))
rows, cols, vals = interactionmatrix(VG2,VG,bounding_box=cuboid(2))
```
### This does not works
#### Example 4
The following breaks because the periodicities are not compatible
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)),cuboid(2,periodic = [1]),integrator=HighVoronoi.VI_GEOMETRY)
VG2 = VoronoiGeometry(VoronoiNodes(rand(2,20)),cuboid(2,periodic = []),integrator=HighVoronoi.VI_POLYGON)
rows, cols, vals = interactionmatrix(VG2,VG)
```
#### Example 5
The following breaks because the dimensions of the domain are different.
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)),cuboid(2,periodic = []))
VG2 = VoronoiGeometry(VoronoiNodes(2*rand(2,20)),cuboid(2,periodic = [],dimensions=2*ones(Float64,dim)))
rows, cols, vals = interactionmatrix(VG2,VG)
```
#### Example 6
The following breaks because the dimension is unbounded.
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(2,10)))
VG2 = VoronoiGeometry(VoronoiNodes(2*rand(2,20)))
rows, cols, vals = interactionmatrix(VG2,VG)
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 2242 | # Voronoi: Raycast methods
During the develop of `HighVoronoi.jl` I developed several `Raycast`-methods. These methods are central to the Voronoi algorithm.
!!! Choice of Raycast Method may be crucial for performance or overall success!
The classical Raycast method `method=RCOriginal` e.g. is only for generators in general position, but is very good for the far field computation on unbounded domains. On the other hand, as soon as you have a bounded domain, `method=RCCombined` might be very fast compared to all other methods, while it gets very very slow on unbounded domains in high dimensions. It's a classical tradeof.
## [Classical Method](@id classicraycast)
This is the most classical method introduced in [PREPRINT](http://www.wias-berlin.de/preprint/3041/wias_preprints_3041.pdf). It basically works only on generators in general position (when a vertex is created by exactly $d+1$ generators) and can be called using `method=RCOriginal` in the `search_settings`.
## [Statistically Boosted Method](@id boostedraycast)
This method is called with `method=RCNonGeneral` and speeds up the classical method in the following way: It performs two classical steps as in [PREPRINT](http://www.wias-berlin.de/preprint/3041/wias_preprints_3041.pdf) and then performs an `inrange` search and finally sorts out all non-generators. This provides a significant boost when the generators are in non-general position (more than $d+1$ generators for one vertex)
## [Nested Method](@id nestedraycast)
This method is called with `method=RCCombined` and performs a deep hacking of the nearest-neighbor search, i.e. it made it necessary to include a modified version of `NearestNeighbors.jl` into the source code of `HighVoronoi.jl`. The result is a nearest neighbor search that basically converges the raycast method within one single search. It is very fast on bounded and periodic domains but may take much longer on unbounded domains at the periphery of the point set.
## Suggested usage
It is highly recommended to use `method=RCCombined` (the standard setting) and switch to `method=RCNonGeneral` in pathological situation respectively `method=RCNonGeneral` only if the generators are "nicely" distributed.
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 3137 | # [Refinement and Substitution of Subdomains](@id refinementsection)
`HighVoronoi.jl` allows you to refine a mesh in two different ways:
- refinement by locally adding addional points
- substitution: in a given domain the existing mesh is replaced by another geometry.
## Refinement using `refine!`
The intention of `refine!` is to refine a given mesh in a region where the users wants a more detailed view at a later stage of either the same code or built upon calculated and stored data. It can use its own `search_settings` that are knwon from `VoronoiGeometry(...)`. In particular, the Voronoi computation can be parallelized.
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(5,1000),cuboid(5)))
## Do some fancy stuff with `VG`
## ...
## Now we want to refine the geometry with additional VoronoiNodes xs
refine!(VG,xs,update=true, search_settings=(method=RCNonGeneral,)) # original 'search_settings' of VG like `method`or `threading` can be temporarily overwritten
```
!!! hint "On `update`..."
The parameter `update::Bool` tells whether or not the volumes and intgrals of new or modified cells shall be recalculated / updated. Its default value is `update=true`.
One could also use `update=false` and call `integrate!(VG)` instead. This will have (much) higher computational costs.
## Refinement using `substitute!`
The intention of `substitute!` is fast mesh generation in high dimensions, particularly in combo with `periodic_grid`. In this way the algorithm will have to calculate much less verteces explicitly. As an additional benefit, using `periodic_grid` we can achieve a grid with rather few neighbors, resulting in a much sparser matrix than with fully random nodes.
```@docs
substitute!(VG::VoronoiGeometry,VG2::VoronoiGeometry,indeces)
```
```@docs
indeces_in_subset(VG::VoronoiGeometry,B::Boundary)
```
!!! warning "Domain and Boundary condition matching"
The domains of the original and the substitute Geometry MUST match. `HighVoronoi` will not controll this but you may have strange results or even a clash. Furthermore, both domains should have the same periodic boundary conditions.
The following code will create a cubic grid with poor resolution in $\mathbb R^8$ and then refine it by a cubic grid with high resolution in the cube $(0.7,1)^8$. Furthermore, the grid will have periodic boundaries in dimensions $1$ and $5$.
```julia
VG = VoronoiGeometry(VoronoiNodes(rand(8,1)),periodic_grid = ( dimensions=ones(Float64,dim),
scale=0.2*ones(Float64,dim), repeat=5*ones(Int64,dim),
periodic=[1,5], fast=true ))
substitute_VG = VoronoiGeometry(VoronoiNodes(rand(8,1)),periodic_grid = ( dimensions=ones(Float64,dim),
scale=0.05*ones(Float64,dim), repeat=20*ones(Int64,dim),
periodic=[1,5], fast=true ))
## now pu all that stuff togetehr
substitute_indeces = indeces_in_subset(substitute_VG,cuboid(8,periodic=[],dimensions=0.3*ones(Float64,8),offset=0.7*ones(Float64,8)))
substitute!(VG, substitute_VG, substitute_indeces)
```
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 5910 | # Voronoi generation using the HighVoronoi Library
The following are examples to get a first impression of the functionalities of `HighVoronoi`. They do not represent the actual intention of use. For this we refer [here](@ref intentions)
## [Getting Started...](@id quickVG)
You can write your first HighVoronoi code e.g. as follows:
```julia
function VoronoiTest(dim,nop)
# create a random matrix of dim x nop entries
data=rand(dim,nop)
# transform these into `nop` different static vectors of dimension `dim`
xs=VoronoiNodes(data)
return VoronoiGeometry(xs,Boundary())
end
```
The command ```Boundary()``` creates an unbounded version of $\mathbb R^{dim}$. This VoronoiGeometry only contains the nodes, verteces and neighbor relations, as well as information on verteces "going to infinity".
## Bounded domains, volumes and interface areas
So let us make the following modification:
```julia
function VoronoiTest_cube(dim,nop,integrator=HighVoronoi.VI_POLYGON)
# create a random matrix of dim x nop entries
data=rand(dim,nop)
# transform these into `nop` different static vectors of dimension `dim`
xs=VoronoiNodes(data)
return VoronoiGeometry(xs,cuboid(dim,periodic=Int64[]),integrator=integrator)
end
vd=VoronoiData(VoronoiTest_cube(2,10))
println(vd.volume)
println(vd.neighbors)
println(vd.area)
```
- The parameter `integrator` tells julia wether and how to compute volumes of cells and areas of interfaces. `VI_POLYGON` refers to an exact trigonalization of the polytopes.
- `Boundary()` has been exchanged for `cuboid(dim,periodic=Int64[])`, which in this case returns the simple cube $[0,1]^{dim}$. Remark: Unlike `VI_MONTECARLO` the algorithm `VI_POLYGON` will return finite volumes also for the infinite cells that are automatically created on unbounded domains like `Boundary()`.
- The last three lines cause julia to print the volumes of the 10 cells, the neighbors of each cell and the area of the respective interfaces. Note that some points have neighbors with values from `11` to `14`.
!!! warning "Boundary planes can be neighbors"
The numbers `11` to `14` represent an internal numbering of the 4 hyperplanes (e.g. lines) that define the cube $[0,1]^2$. In general, given a domain with $N$ nodes and $P$ planes, whenever a Voronoi cell corresponding to node $n$ touches a boundary plane $p$ this will cause a neighbor entry $N+p$.
## Periodic Boundaries
The `HighVoronoi` package provides several possibilities to define boundaries of bounded or (partially) unbounded domains. It also provides the possibility to study periodic boundary conditions:
```julia
function VoronoiTest_cube_periodic(dim,nop,integrator=HighVoronoi.VI_POLYGON)
# create a random matrix of dim x nop entries
data=rand(dim,nop)
# transform these into `nop` different static vectors of dimension `dim`
xs=VoronoiNodes(data)
return VoronoiGeometry(xs,cuboid(dim,periodic=[1]),integrator=integrator)
end
vd=VoronoiData(VoronoiTest_cube_periodic(2,10))
println(vd.volume)
println(vd.neighbors)
println(vd.area)
```
- `periodic=[1]` in the `cuboid(...)` command forces the Voronoi mesh to be periodic in space dimension $1$. Note that the internal default is `periodic = collect(1:dim)`, i.e. the grid to be periodic in all space dimensions. Our current choice has the following essential consequences.
* No cell will have $11$ or $12$ as a neighbor, since these boundaries are now periodic.
* Some cells `n` will have "doubled" neighbors, i.e. the same neighbor node appears twice (or even more often for e.g. `periodic=[1,2]`) in the array `vd.neighbors[n]`. This is since for only few cells it is highly probable, that one cell has the same neighbor both "on the left" and "on the right".
## Recycle Voronoi data for new integrations
The following code first generates a Voronoi grid, simulatneously integrating the function `x->[norm(x),1]`. Afterwards, the volume and area information is used to integrate the function `x->[x[1]]`
```julia
data = rand(4,20)#round.(rand(dim,nop),digits=4)
xs = HighVoronoi.VoronoiNodes(data)
VG = VoronoiGeometry(xs,cuboid(4,periodic=Int64[]),integrator=HighVoronoi.VI_POLYGON,integrand = x->[norm(x),1])
VG2 = VoronoiGeometry(VG,integrand = x->[x[1]],integrate=true,integrator=HighVoronoi.VI_HEURISTIC)
vd = VoronoiData(VG)
println(vd.bulk_integral)
vd2 = VoronoiData(VG2)
println(vd2.bulk_integral)
```
## Recycle Voronoi data for refined geometries
The following creates a `VoronoiGeometry VG`, then makes a copy `VG2` and refines it with 20 points inside the region $(0,\,0.2)^4$.
```julia
xs = HighVoronoi.VoronoiNodes(rand(4,20))
VG = VoronoiGeometry(xs,cuboid(4,periodic=Int64[]),integrator=HighVoronoi.VI_POLYGON,integrand = x->[norm(x),1])
VG2 = copy(VG)
refine!(VG,VoronoiNodes(0.2*rand(4,20)))
vd = VoronoiData(VG)
println(vd.bulk_integral)
vd2 = VoronoiData(VG2)
println(vd2.bulk_integral)
```
## Store and load data
Data can easily be stored using the following
```julia
Geo = VoronoiGeometry(HighVoronoi.VoronoiNodes(rand(4,20)), cuboid(4,periodic=Int64[]), integrator=HighVoronoi.VI_POLYGON, integrand = x->[norm(x),1])
write_jld(Geo,"example.jld")
```
the ending ".jld" is important as it indicates julia which data format to use. Retrieve this data later using
```julia
Geo = VoronoiGeometry("example.jld", bulk=true, interface=true, integrand = x->[norm(x),1])
```
!!! warning "integrands can easily be messed up..."
The method does not store the `integrand` parameter to the file. However, due to `bulk=true, interface=true` the integral data is loaded from the file and must be properly interpreted by a potential user. This has drawbacks and advantages, as will be discussed in the [Intentions of use](showcase/).
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 8144 | # [Some code to test and play around](@id toyfile)
The following code works as it is. It takes as parameters a function $u$, a field $\kappa$ and the dimension `dim`. It then calculates $-\nabla(\kappa\nabla u)=:f$ and creates discrete versions of $\kappa$ and $f$ to generate a numerical solution $u$. Finally it compares the numerical and the exact solution in $L^2$ and plots the numerical result in case `dim==2`.
The boundary conditions below are set periodic in dimension 1 , Dirichlet at $x_2=1$ and Neumann at $x_2=0$ but you can change this according to your whishes.
The nodes distribution is as standard iid but you can provide a density. The code is implemented to generate 1000 nodes but you can change this as well.
```julia
using HighVoronoi
using SparseArrays
using IterativeSolvers
using NearestNeighbors
using LinearAlgebra
using StaticArrays
##########################################################################################
## Derivatives
##########################################################################################
function ∂_k(f,x,k;dim=length(x),vec2=MVector{dim}(zeros(Float64,dim)))
h = 0.00245
vec2 .= x
vec2[k] += h
f1 = f(vec2)
vec2[k] += h
f2 = f(vec2)
vec2 .= x
vec2[k] -= h
f3 = f(vec2)
vec2[k] -= h
f4 = f(vec2)
return ( 8*(f1-f3) + (f4-f2) ) / ( 12*h ) # five-point stencil
end
function ∇(f::Function,dim)
vec2=MVector{dim}(zeros(Float64,dim))
return x->map(k->∂_k(f,x,k,dim=dim,vec2=vec2),1:dim)
end
function ∇_buffered(f::Function,dim,vec=MVector{dim}(zeros(Float64,dim)),base=HighVoronoi.empty_local_Base(dim))
vec = MVector{dim}(zeros(Float64,dim))
vec2 = MVector{dim}(zeros(Float64,dim))
return x->map!(k->∂_k(f,x,k,dim=dim,vec2=vec2),vec,1:dim)
end
function ∇cdot(f::Function,dim)
function sum_partials(f,x,dim,vec2)
f_sum = 0.0
for k in 1:dim
f_sum += ∂_k(y->f(y)[k],x,k,dim=dim,vec2=vec2)
end
return f_sum
end
vec = MVector{dim}(zeros(Float64,dim))
return x->sum_partials(f,x,dim,vec)
end
function neumann_bc(flux,domain,x)
function plane_of_x(domain,x)
k = 0
dist = 10.0
ldp = length(domain.planes)
for i in 1:ldp
d = dot(domain.planes[i].base-x,domain.planes[i].normal)
if d<dist
k=i
end
end
return domain.planes[k].normal
end
normal = plane_of_x(domain,x)
return dot(normal,flux(x))
end
##########################################################################################
## Solving -∇⋅(κ∇u) = RHS on 'domain'
## keep track of signs!!!
##########################################################################################
# returns all necessary data to perform a numerical calculation to solve
# -∇⋅(κ∇u) = RHS using HighVoronoi tools
# calculates RHS := -∇⋅(κ∇u) and if needed Neumann condition
# provides u_exact:=u and κ
function make_set(u::Function,κ::Function,dim;periodic=[],
dirichlet_boundary=collect(1:(2*dim)),
neumann_boundary=nothing, density=nothing,
number_of_nodes=1000)
my_domain = cuboid(dim,periodic=periodic)
∇u = ∇(u,dim)
rhs = ∇cdot(x->-1.0*κ(x)*∇u(x),dim)
flux(x) = -κ(x)*∇u(x)
neumann_flux(x) = -κ(x)*∇_buff_u(x) # faster but with internal buffer
if density!=nothing
return (u_exact = u, κ=κ, RHS = rhs,
domain=my_domain, dim=dim,
dirichlet_boundary=dirichlet_boundary,
neumann_boundary=neumann_boundary,
number_of_nodes = number_of_nodes,
neumann = x->neumann_bc(flux,my_domain,x))
else
return (u_exact = u, κ=κ, RHS = rhs,
domain=my_domain, dim=dim,
density=density,
dirichlet_boundary=dirichlet_boundary,
neumann_boundary=neumann_boundary,
number_of_nodes = number_of_nodes,
neumann = x->neumann_bc(flux,my_domain,x))
end
end
using Plots
# plotting the results if dimension is 2
function plot_2d_surface(nodes, values)
# The following two lines are necessary in order for the plot to look nicely
func = StepFunction(nodes,values)
new_nodes = vcat([VoronoiNode([k/10,j*1.0]) for k in 0:10, j in 0:1], [VoronoiNode([j*1.0,k/10]) for k in 1:9, j in 0:1])
append!(nodes,new_nodes)
append!(values,[func(n) for n in new_nodes])
x = [node[1] for node in nodes]
y = [node[2] for node in nodes]
p = Plots.surface(x, y, values, legend=false)
xlabel!("X")
ylabel!("Y")
zlabel!("Values")
title!("2D Surface Graph")
display(p)
end
# Flux function passed as a parameter to HighVoronoi
function SQRA_flux(;para_i,para_j,mass_ij,normal,kwargs...)
# kwargs... collects all additional parameters which are not used in the current function.
weight = norm(normal)^(-1) * mass_ij * sqrt(para_i[:κ]*para_j[:κ])
return weight, weight
end
# RHS passed to HighVoronoi
myRHS(;para_i,mass_i,kwargs...) = mass_i * para_i[:f]
# performs numerical calculations to solve -∇⋅(κ∇u) = RHS
function simulation(set)
# adjust RHS for periodic domain
new_RHS = HighVoronoi.PeriodicFunction(set.RHS,set.domain)
set = (neumann = x->0.0, set..., RHS=new_RHS)
# get nodes
nodes = nothing
if isdefined(set,:density)
nodes = VoronoiNodes( set.number_of_nodes;density=set.density,
domain=cuboid(set.dim,periodic=[]), silence=false)
else
nodes = VoronoiNodes(rand(set.dim,set.number_of_nodes))
end
# Voronoi Geometry and integration. We could also set up VG_κ directly...
VG_basis = VoronoiGeometry(nodes,set.domain,integrator=HighVoronoi.VI_GEOMETRY)
VG_κ = VoronoiGeometry(VG_basis, integrator=HighVoronoi.VI_POLYGON, integrand=x->[set.κ(x),set.RHS(x)])
vd = VoronoiData(VG_κ) # needed for volumes in the final calculations
# set up fluxes and RHS
vfvp = VoronoiFVProblem(VG_κ,
integralfunctions = (κ = set.κ, f = set.RHS, ),
fluxes = ( j1 = SQRA_flux, ),
rhs_functions = (F = myRHS,) )
# define functions that can be applied as boundary conditions
harmonic = FVevaluate_boundary(x->0.0) # turn a function into the format HighVoronoi needs
compatibility = FVevaluate_boundary(x->set.u_exact(x))
neumann = FVevaluate_boundary(x->set.neumann(x))
# construct linear system from fluxes, RHS and boundary conditions
r,c,v,f = linearVoronoiFVProblem(vfvp, flux = :j1, rhs = :F,
Dirichlet = set.dirichlet_boundary!=nothing ? (set.dirichlet_boundary,compatibility) : nothing,
Neumann = set.neumann_boundary!=nothing ? (set.neumann_boundary,neumann) : nothing)
A = sparse(r,c,v) # a sparse matrix with rows `r`, coloumns `c` and values `v`
# solve linear system using IterativeSolvers-Package
solution_u = cg(A,f) # conjugate gradients
# print out approximate L²-error between exact and numerical solutions
println("Approximate L²-error: ",sqrt(sum(map(k->abs2(solution_u[k]-set.u_exact(nodes[k]))*vd.volume[k],1:length(nodes)))))
return nodes, solution_u # return nodes and values for plotting...
end
###################################################################################
## Putting together the above pieces
###################################################################################
# create parameters
my_set = make_set(x->sin(x[1]*2*π)^2 * sin(x[2]*2*π)^2, x->1.0, 2,
periodic=[1], dirichlet_boundary=3, neumann_boundary=4)
# reminder: periodic=[1] identifies boundary 1 with boundary 2
# perform simulation
nodes, values = simulation(my_set)
# plot results
my_set.dim==2 && plot_2d_surface(nodes, values)
``` | HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 2230 | # [Workflow in FV](@id workflowfv)
In order to create a Finite Volume problem you have several options you should think through and decide:
- `Geometry`, i.e. the cells and interfaces on which your finite volume problem is defined can be generated in two ways:
* prior using [this guide](@ref workflowgeometry)
* on the fly: passing arguments to `VoronoiFVProblem` in the third step below as if it was a `VoronoiGeometry`.
- You may whish to define some step functions or interface function or any type of customized functions from integrated data using [this guide](@ref createalltypesoffunctions)
- Create a `VoronoiFVProblem` (if not done in first step)
* provide the `VoronoiGeometry`
* optionally provide `integralfunctions` whose values are infered on cells and interfaces using the chosen integration method
* optionally provide `discretefunctions` whose values are infered by pointwise evaluation
* optionally provide `fluxes` as a named tuple of description how fluxes should be calculated using [this guide](@ref examplefluxes)
* optionally provide `rhs_functions` a named tuple of descriptions how to compute a potential right hand side in the FV problem using [this guide](@ref examplefluxes)
* optionally provide `bulk_integrals` as a way to integrate a function over the tessellation using [this guide](@ref evenmoreintegrals)
* optionally provide `flux_integrals` as a way to integrate a function over the interfaces of the tessallation using [this guide](@ref evenmoreintegrals)
- You may whish to define some more step functions or interface function or any type of customized functions from integrated data using [this guide](@ref createalltypesoffunctions) and the integrate information in `VoronoiFVProblem` using [this guide](@ref evenmoreintegrals)
- Call `linearVoronoiFVProblem` with a given description of fluxes and right hand sides provided by `VoronoiFVProblem` and your favorite boundary conditions using [this guide](@ref linear_vor_prob). (caution: since boundary conditions rely on a given boundary, the periodic boundary conditions are subject of `VoronoiGeometry`, resp. `Boundary`, so this has to be implemented in the very first step.)
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 1.3.1 | bc1b8bfb168b8850d28c935b20a545882a9a078e | docs | 993 | # [Workflow](@id workflowgeometry)
In order to create a `VoronoiGeometry` you have several options you should think through and decide:
- `VoronoiNodes`, i.e. the generators of your mesh can be:
* periodic, so you may look [here](@ref periodicgeometrysection)
* non-periodic
+ fully customized by the user, [here](@ref quickVG)
+ destributed according to a density, [here](@ref differentnodegenerators)
- The domain, i.e. a `Boundary` object. Those could be
* [rectangular](@ref rectangulardomains)
* [customized](@ref createboundary), including (partially) unbounded domains
- The `integrator` argument. This basically choses between sole geometry calculation and various integration techniques, see [here](@ref integratoroverview)
- The `integrand`, which is optionally a function that you whish to calculate the local integrals over cells and interfaces.
Built on top are [methods for refinement and replacement](@ref refinementsection)
| HighVoronoi | https://github.com/martinheida/HighVoronoi.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 3866 | using ObjectOriented
@oodef struct IVehicle
function get_speed end
function info end
end
@oodef mutable struct Vehicle <: IVehicle
m_speed :: Float64
function new(speed)
@mk begin
m_speed = speed
end
end
end
@oodef mutable struct Bus <: Vehicle
function new(speed::Float64)
@mk begin
Vehicle(speed)
end
end
# override
function info(self)
""
end
# override
function get_speed(self)
self.m_speed
end
end
@oodef struct IHouse
function get_rooms end
function set_rooms end
function can_cook end
end
@oodef mutable struct House <: IHouse
m_rooms :: Int
function new(rooms::Int)
@mk begin
# IHouse()
m_rooms = rooms
end
end
# class methods
# virtual methods
# override
function get_rooms(self)
self.m_rooms
end
# override
function set_rooms(self, value)
self.m_rooms = value
end
# override
function can_cook(self)
true
end
end
@oodef mutable struct HouseBus <: {Bus, House}
m_power :: String
function new(speed::Float64, rooms::Int, power :: String = "oil")
@mk begin
Bus(speed), House(rooms)
m_power = power
end
end
function get_power(self)
self.m_power
end
function set_power(self, v)
self.m_power = v
end
# override
function info(self)
"power = $(self.m_power), " *
"speed = $(self.get_speed()), " *
"rooms=$(self.get_rooms())"
end
end
@oodef mutable struct RailBus <: {Bus}
m_power :: String
function new(speed::Float64)
@mk begin
Bus(speed)
m_power="electricity"
end
end
function get_power(self)
self.m_power
end
# override
function info(self)
"power = $(self.m_power), " *
"speed = $(self.get_speed())"
end
end
housebuses = @like(IVehicle)[
[HouseBus(rand(Float64), 2) for i in 1:5000]...,
[RailBus(rand(Float64)) for i in 1:5000]...
]
any_buses = Any[
[HouseBus(rand(Float64), 2) for i in 1:5000]...,
[RailBus(rand(Float64)) for i in 1:5000]...
]
union_buses = Union{HouseBus, RailBus}[
[HouseBus(rand(Float64), 2) for i in 1:5000]...,
[RailBus(rand(Float64)) for i in 1:5000]...
]
monotype_buses = HouseBus[
[HouseBus(rand(Float64), 2) for i in 1:10000]...,
]
function f(buses::Vector)
for bus in buses
info = bus.info()
if bus isa HouseBus
cook = bus.can_cook()
@info typeof(bus) info cook
else
@info typeof(bus) info
end
end
end
function sum_speeds(buses::Vector)
sum(buses; init=0.0) do bus
bus.get_speed()
end
end
function sum_speeds_forloop(buses::Vector)
s = 0.0
@inbounds for bus in buses
s += bus.get_speed() :: Float64
end
s
end
function g(o::@like(IVehicle))
o.get_speed()
end
function g(o::HouseBus)
x = get_base(o, Bus)
@typed_access x.get_speed()
end
hb = HouseBus(80.0, 2)
rb = RailBus(80.0)
using InteractiveUtils
@info :housebus code_typed(g, (HouseBus, ))
@info :housebus @code_typed g(hb)
@info :housebus @code_llvm g(hb)
# @info :railbus @code_llvm g(rb)
using BenchmarkTools
# @btime sum_speeds(housebuses)
# @btime sum_speeds(any_buses)
# @btime sum_speeds(union_buses)
# @btime sum_speeds(monotype_buses)
@btime sum_speeds_forloop(housebuses)
@btime sum_speeds_forloop(any_buses)
@btime sum_speeds_forloop(union_buses)
@btime sum_speeds_forloop(monotype_buses)
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 3499 | module A
using ObjectOriented
using BenchmarkTools
const T = Any
@oodef struct Base1
a :: T
function new(a::T)
@mk begin
a = a
end
end
function identity_a(self)
self
end
end
@oodef struct Base2 <: Base1
b :: T
function new(a::T, b::T)
@mk begin
@base(Base1) = Base1(a)
b = b
end
end
function identity_b(self)
self
end
end
@oodef struct Base3 <: Base2
c :: T
function new(a::T, b::T, c::T)
@mk begin
@base(Base2) = Base2(a, b)
c = c
end
end
function identity_c(self)
self
end
end
@oodef struct Base4 <: Base3
d :: T
function new(a::T, b::T, c::T, d::T)
@mk begin
@base(Base3) = Base3(a, b, c)
d = d
end
end
function identity_d(self)
self
end
end
@oodef struct Base5 <: Base4
e :: T
function new(a::T, b::T, c::T, d::T, e::T)
@mk begin
@base(Base4) = Base4(a, b, c, d)
e = e
end
end
function identity_e(self)
self
end
end
inst_o = Base5(1, 2, 3, 4, 5)
@info :struct
@btime inst_o.a
@btime inst_o.b
@btime inst_o.c
@btime inst_o.d
@btime inst_o.e
@btime inst_o.identity_a()
@btime inst_o.identity_b()
@btime inst_o.identity_c()
@btime inst_o.identity_d()
@btime inst_o.identity_e()
bases = [Base5(1, 2, 3, 4, 5) for i in 1:10000]
function sum_all(bases)
s = 0
for each in bases
s += each.a :: Int
end
return s
end
using BenchmarkTools
@btime sum_all(bases)
end
module B
using ObjectOriented
using BenchmarkTools
const T = Any
@oodef mutable struct Base1
a :: T
function new(a::T)
@mk begin
a = a
end
end
function identity_a(self)
self
end
end
@oodef mutable struct Base2 <: Base1
b :: T
function new(a::T, b::T)
@mk begin
@base(Base1) = Base1(a)
b = b
end
end
function identity_b(self)
self
end
end
@oodef mutable struct Base3 <: Base2
c :: T
function new(a::T, b::T, c::T)
@mk begin
@base(Base2) = Base2(a, b)
c = c
end
end
function identity_c(self)
self
end
end
@oodef mutable struct Base4 <: Base3
d :: T
function new(a::T, b::T, c::T, d::T)
@mk begin
@base(Base3) = Base3(a, b, c)
d = d
end
end
function identity_d(self)
self
end
end
@oodef mutable struct Base5 <: Base4
e :: T
function new(a::T, b::T, c::T, d::T, e::T)
@mk begin
@base(Base4) = Base4(a, b, c, d)
e = e
end
end
function identity_e(self)
self
end
end
inst_o = Base5(1, 2, 3, 4, 5)
@info :class
@btime inst_o.a
@btime inst_o.b
@btime inst_o.c
@btime inst_o.d
@btime inst_o.e
@btime inst_o.identity_a()
@btime inst_o.identity_b()
@btime inst_o.identity_c()
@btime inst_o.identity_d()
@btime inst_o.identity_e()
bases = [Base5(1, 2, 3, 4, 5) for i in 1:10000]
function sum_all(bases)
s = 0
for each in bases
s += each.a :: Int
end
return s
end
using BenchmarkTools
@btime sum_all(bases)
end
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 3205 | ## 类型不稳定 1:字段
mutable struct X
a::Any
end
xs = [X(1) for i = 1:10000]
function sum1(xs::AbstractVector{X})
s = 0
for x in xs
s += x.a
end
return s
end
function sum2(xs::AbstractVector{X})
s = 0
for x in xs
s += x.a :: Int
end
return s
end
using BenchmarkTools
@btime sum1(xs)
# 147.800 μs (9489 allocations: 148.27 KiB)
10000
@btime sum2(xs)
# 5.567 μs (1 allocation: 16 bytes)
10000
# 如果想要检测性能问题,可以使用`@code_warntype`检测类型稳定性,
# 还可以用`@code_llvm`检测是否调用`jl_apply_generic`函数。
#
# `@code_llvm sum1(xs)`或者 `code_llvm(sum1, (typeof(xs1), ))`:
# 可以发现存在 `jl_apply_generic`,这意味着动态分派。
#
# Julia动态分派的性能差,不如Python。
## 类型不稳定 2: 数组类型
using BenchmarkTools
function fslow(n)
xs = [] # equals to 'Any[]'
push!(xs, Ref(0))
s = 0
for i = 1:n
xs[end][] = i
s += xs[end][]
end
return s
end
function ffast(n)
xs = Base.RefValue{Int}[]
push!(xs, Ref(0))
s = 0
for i = 1:n
xs[end][] = i
s += xs[end][]
end
return s
end
@btime fslow(10000)
# 432.200 μs (28950 allocations: 452.44 KiB)
50005000
@btime ffast(10000)
# 4.371 μs (3 allocations: 144 bytes)
50005000
"""
class Ref:
def __init__(self, x):
self.x = x
def fpython(n):
xs = []
xs.append(Ref(0))
s = 0
for i in range(n):
xs[-1].v = i
s += xs[-1].v
return s
%timeit fpython(10000)
# 1.03 ms ± 13.3 µs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
"""
# `InteractiveUtils.@code_warntype`可以发现类型不稳定的问题。黄色的代码表示可能存在问题,红色表示存在问题。
@code_warntype fslow(10000)
@code_warntype ffast(10000)
## 类型不稳定 3:类型不稳定的全局变量
int64_t = Int
float64_t = Float64
scalar = 3
function sum_ints1(xs::Vector)
s = 0
for x in xs
if x isa int64_t
s += x * scalar
end
end
return s
end
# const Int = Int
const const_scalar = 3
function sum_ints2(xs::Vector)
s = 0
for x in xs
if x isa Int
s += x * const_scalar
end
end
return s
end
"""
scalar = 3
def sum_ints(xs):
s = 0
for x in xs:
if isinstance(x, int):
s += x
return s
data = [1 if i % 2 == 0 else "2" for i in range(1, 1000001)]
%timeit sum_ints(data)
# 59.2 ms ± 2 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
"""
data = [i % 2 == 0 ? 1 : "2" for i = 1:1000000]
@btime sum_ints1(data)
# 18.509 ms (499830 allocations: 7.63 MiB)
1500000
@btime sum_ints2(data)
# 476.600 μs (1 allocation: 16 bytes)
1500000
## 可以用`@code_warntype`看到性能问题:
@code_warntype sum_ints1(data)
@code_warntype sum_ints2(data)
## 顶层作用域性能问题
xs = ones(Int, 1000000)
t0 = time_ns()
s = 0
for each in xs
s += each
end
s
println("time elapsed: ", time_ns() - t0, "ns")
# time elapsed: 115_459_800ns
@noinline test_loop(xs) = begin
t0 = time_ns()
s = 0
for each in xs
s += each
end
println("time elapsed: ", time_ns() - t0, "ns")
return s
end
test_loop(xs) === 1000000
# time elapsed: 1_095_200ns
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 912 | using ObjectOriented
using Documenter
DocMeta.setdocmeta!(ObjectOriented, :DocTestSetup, :(using ObjectOriented); recursive=true)
makedocs(;
modules=[ObjectOriented],
authors="thautwarm <[email protected]> and contributors",
repo="https://github.com/Suzhou-Tongyuan/ObjectOriented.jl/blob/{commit}{path}#{line}",
sitename="ObjectOriented.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://Suzhou-Tongyuan.github.io/ObjectOriented.jl",
edit_link="main",
assets=String[],
),
pages=[
"Home" => "index.md",
"Cheat Sheet" => "cheat-sheet-en.md",
# "index-cn.md",
# "cheat-sheet-cn.md",
"Translating OOP into Idiomatic Julia" => "how-to-translate-oop-into-julia.md"
],
)
deploydocs(;
repo="github.com/Suzhou-Tongyuan/ObjectOriented.jl",
devbranch="main",
)
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 3645 | using ObjectOriented
## 定义类
@oodef mutable struct MyClass
attr::Int
function new(arg::Int)
# @mk宏:设置字段和基类
@mk begin
attr = arg
end
end
function f(self)
return self.attr
end
end
c = MyClass(1)
c.attr
c.f()
@code_typed c.f()
## 继承
@oodef mutable struct MySubclass <: MyClass
attr::Int # shadowing
function new(attr_base, attr)
a = attr + 1
@mk begin
@base(MyClass) = MyClass(attr_base)
attr = a
end
end
function base_f(self)
get_base(self, MyClass).f()
end
end
sc = MySubclass(100, 200)
sc.attr
sc.f()
sc.base_f()
@code_typed sc.base_f()
## 多继承和MRO
@oodef struct Base1
function func(self)
return "call base1"
end
end
@oodef struct Base2 <: Base1
function func(self)
return "call base2"
end
end
@oodef struct Base3 <: Base1
function func(self)
return "call base3"
end
function base3_func(self)
return "special: call base3"
end
end
@oodef struct Sub1 <: {Base2, Base3} end
Sub1().func()
Sub1().base3_func()
get_base(Sub1(), Base3).func()
## properties
@oodef mutable struct Square
area::Float64
function new(area::Number)
@mk begin
area = convert(Float64, area)
end
end
#= setter, getter
function get_side(self)
return sqrt(self.area)
end
function set_side(self, value::Number)
self.area = convert(Float64, value)^2
end
=#
# explicit property
@property(side) do
get = self -> sqrt(self.area)
set = (self, value) -> self.area = convert(Float64, value)^2
end
end
sq = Square(25)
sq.side
sq.side = 4
sq.area
sq.area = 36
sq.area = 36.0
sq.side
## 泛型和接口
using ObjectOriented
@oodef struct AbstractMLModel{X, Y}
function fit! end
function predict end
end
using LsqFit
@oodef mutable struct LsqModel{M<:Function} <: AbstractMLModel{Vector{Float64},Vector{Float64}}
model::M
param::Vector{Float64}
function new(m::M, init_param::Vector{Float64})
@mk begin
model = m
param = init_param
end
end
function fit!(self, X::Vector{Float64}, y::Vector{Float64})
fit = curve_fit(self.model, X, y, self.param)
self.param = fit.param
self
end
function predict(self, x::Float64)
self.predict([x])
end
function predict(self, X::Vector{Float64})
return self.model(X, self.param)
end
end
# 例子来自 https://github.com/JuliaNLSolvers/LsqFit.jl
@. model(x, p) = p[1] * exp(-x * p[2])
clf = LsqModel(model, [0.5, 0.5])
ptrue = [1.0, 2.0]
xdata = collect(range(0, stop = 10, length = 20))
ydata = collect(model(xdata, ptrue) + 0.01 * randn(length(xdata)))
clf.fit!(xdata, ydata)
clf.predict(xdata)
clf.param
# 比如ScikitLearnBase提供了fit!函数和predict函数
using ScikitLearnBase
ScikitLearnBase.is_classifier(::@like(AbstractMLModel)) = true
ScikitLearnBase.fit!(clf::@like(AbstractMLModel{X, Y}), x::X, y::Y) where {X, Y} = clf.fit!(x, y)
ScikitLearnBase.predict(clf::@like(AbstractMLModel{X}), x::X) where X = clf.predict(x)
ScikitLearnBase.fit!(clf, xdata, ydata)
ScikitLearnBase.predict(clf, xdata)
# 一些辅助函数
isinstance(clf, LsqModel)
isinstance(clf, AbstractMLModel)
issubclass(LsqModel, AbstractMLModel)
function f2(_::@like(AbstractMLModel))
end
## Julia类型标注不支持子类、父类转换(Python没有type assertion,不需要写标注),要使用@like宏支持接口参数
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 170 | module ObjectOriented
export @oodef
using Reexport
include("runtime.jl")
@reexport using .RunTime
include("compile-time.jl")
@reexport using .CompileTime
end # module
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 611 | import MacroTools
rmlines(x) = x
function rmlines(x::Expr)
if x.head === :macrocall && length(x.args) >= 2
Expr(x.head, x.args[1], x.args[2], filter(x->!MacroTools.isline(x), x.args[3:end])...)
else
Expr(x.head, filter(x->!MacroTools.isline(x), x.args)...)
end
end
_striplines(ex) = MacroTools.prewalk(rmlines, ex)
# this is a duplicate of MacroTools.@q but avoid removing line numbers
# from macrocalls:
# see: https://github.com/FluxML/MacroTools.jl/blob/d1937f95a7e5c82f9cc3b5a4f8a2b33fdb32f884/src/utils.jl#L33
macro q(ex)
# esc(Expr(:quote, ex))
esc(Expr(:quote, _striplines(ex)))
end
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 12443 | import ObjectOriented.RunTime: Object
using DataStructures
Base.@enum PropertyKind MethodKind GetterPropertyKind SetterPropertyKind
MLStyle.is_enum(::PropertyKind) = true
MLStyle.pattern_uncall(e::PropertyKind, _, _, _, _) = MLStyle.AbstractPatterns.literal(e)
struct PropertyDefinition
name::Symbol
def::Any # nothing for abstract methods
from_type::Any
kind :: PropertyKind
end
lift_to_quot(x::PropertyDefinition) = Expr(:call, PropertyDefinition, lift_to_quot(x.name), x.def, x.from_type, x.kind)
lift_to_quot(x::Symbol) = QuoteNode(x)
lift_to_quot(x) = x
function lift_to_quot(x::Dict)
pair_exprs = Expr[:($(lift_to_quot(k)) => $(lift_to_quot(v))) for (k, v) in x]
:($Dict($(pair_exprs...)))
end
struct PropertyName
is_setter :: Bool
name :: Symbol
end
macro getter_prop(name)
esc(:($PropertyName(false, $name)))
end
macro setter_prop(name)
esc(:($PropertyName(true, $name)))
end
Base.show(io::IO, prop_id::PropertyName) = print(io, string(prop_id.name) * " " * (prop_id.is_setter ? "(setter)" : "(getter)"))
const sym_generic_type = Symbol("generic::Self")
Base.@inline function _union(::Type{<:Object{I}}, ::Type{<:Object{J}}) where {I, J}
Object{Union{I, J}}
end
Base.@pure function _merge_shape_types(@nospecialize(t), @nospecialize(xs...))
ts = Any[t]
for x in xs
x isa DataType || error("invalid base type $x")
append!(ts, _shape_type(x).parameters)
end
Object{Union{ts...}}
end
"""map a Julia type to its **shape type**.
Given an OO type `Cls` whose orm is `[Cls, Base1, Base2]`,
the corresponding shape type is `_shape_type(Cls) == Object{Union{Cls, Base1, Base2}}`.
"""
_shape_type(x) = x
Base.@pure function like(@nospecialize(t))
if t <: Object
x = _shape_type(t)
if x === t
error("invalid base type $t: no an OO type")
end
if x isa DataType
v = gensym(:t)
vt = TypeVar(v, x.parameters[1], Any)
UnionAll(vt, Core.apply_type(Object, vt))
else
ts = Any[]
while x isa UnionAll
push!(ts, x.var)
x = x.body
end
v = gensym(:t)
vt = TypeVar(v, x.parameters[1], Any)
x = UnionAll(vt, Core.apply_type(Object, vt))
while !isempty(ts)
x = UnionAll(pop!(ts), x)
end
x
end
else
error("$t is not an object type")
end
end
"""`@like(type)` convert a Julia type to its covariant shape type.
Given an OO type `Cls` whose orm is `[Cls, Base1, Base2]`,
the corresponding covariant shape type is `@like(Cls) == Object{U} where U >: Union{Cls, Base1, Base2}`.
"""
macro like(t)
esc(:($like($t)))
end
"""
define (abstract) properties such as getters and setters:
```
## Abstract
@oodef struct IXXX
@property(field) do
get
set
end
end
## Concrete
@oodef struct XXX1 <: IXXX
@property(field) do
get = self -> 1
set = (self, value) -> ()
end
end
```
"""
macro property(f, ex)
esc(Expr(:do, :(define_property($ex)), f))
end
macro base(X)
esc(:($ObjectOriented.RunTime.InitField{$ObjectOriented.base_field($sym_generic_type, $X), $X}()))
end
@inline function _base_initfield(generic_type, :: Type{X}) where X
ObjectOriented.RunTime.InitField{ObjectOriented.base_field(generic_type, X), X}()
end
macro mk()
esc(@q begin
$__source__
$ObjectOriented.construct(ObjectOriented.RunTime.default_initializers($sym_generic_type), $sym_generic_type)
end)
end
function _mk_arguments!(__module__::Module, __source__::LineNumberNode, arguments::Vector{Any}, ln::LineNumberNode, arg)
@switch arg begin
@case ::LineNumberNode
ln = arg
return ln
@case Expr(:tuple, args...)
for arg in args
ln = _mk_arguments!(__module__, __source__, arguments, ln, arg)
end
return ln
@case Expr(:call, _...)
sym_basecall = gensym("basecall")
push!(arguments, Expr(
:block,
ln,
:($sym_basecall = $arg),
:($_base_initfield($sym_generic_type, typeof($sym_basecall)))
))
push!(arguments, sym_basecall)
return ln
@case :($a = $b)
a = __module__.macroexpand(__module__, a)
if a isa Symbol
a = :($ObjectOriented.RunTime.InitField{$(QuoteNode(a)), nothing}())
end
push!(arguments, Expr(:block, ln, a))
push!(arguments, Expr(:block, ln, b))
return ln
@case _
error("invalid construction statement $arg")
end
end
macro mk(ex)
arguments = []
ln = __source__
@switch ex begin
@case Expr(:block, args...)
for arg in args
ln = _mk_arguments!(__module__, __source__, arguments, ln, arg)
end
@case _
ln = _mk_arguments!(__module__, __source__, arguments, ln, ex)
end
esc(@q begin
$__source__
# $ObjectOriented.check_abstract($sym_generic_type) # TODO: a better mechanism to warn abstract classes
$ObjectOriented.construct(ObjectOriented.RunTime.default_initializers($sym_generic_type), $sym_generic_type, $(arguments...))
end)
end
mutable struct CodeGenInfo
cur_mod :: Module
base_dict :: OrderedDict{Type, Symbol}
fieldnames :: Vector{Symbol}
typename :: Symbol
class_where :: Vector
class_ann :: Any
methods :: Vector{PropertyDefinition}
method_dict :: OrderedDict{PropertyName, PropertyDefinition}
outblock :: Vector
end
function codegen(cur_line :: LineNumberNode, cur_mod::Module, type_def::TypeDef)
base_dict = OrderedDict{Type, Symbol}()
struct_block = []
outer_block = []
methods = PropertyDefinition[]
fieldnames = Symbol[]
typename = type_def.name
default_inits = Expr[]
traitname = Symbol(typename, "::", :trait)
traithead = apply_curly(traitname, Symbol[p.name for p in type_def.typePars])
custom_init :: Bool = false
class_where = type_def_create_where(type_def)
class_ann = type_def_create_ann(type_def)
for each::FieldInfo in type_def.fields
push!(struct_block, each.ln)
if each.name in fieldnames
throw(create_exception(each.ln, "duplicate field name $(each.name)"))
end
push!(fieldnames, each.name)
type_expr = each.type
if each.defaultVal isa Undefined
else
fname = gensym("$typename::create_default_$(each.name)")
fun = Expr(:function, :($fname()), Expr(:block, each.ln, each.ln, each.defaultVal))
push!(outer_block, fun)
push!(default_inits, :($(each.name) = $fname))
end
push!(struct_block, :($(each.name) :: $(type_expr)))
end
for each::FuncInfo in type_def.methods
if each.name === typename
throw(create_exception(each.ln, "methods having the same name as the class is not allowed"))
end
if each.name === :new
# automatically create type parameters if 'self' parameter is not annotated
each.typePars = TypeParamInfo[type_def.typePars..., each.typePars...]
insert!(each.body.args, 1, :($sym_generic_type = $class_ann))
insert!(each.body.args, 1, each.ln)
each.name = typename
push!(struct_block, each.ln)
push!(struct_block, to_expr(each))
if !isempty(type_def.typePars)
each.name = class_ann
push!(struct_block, to_expr(each))
end
custom_init = true
continue
end
push!(outer_block, each.ln)
name = each.name
meth_name = Symbol(typename, "::", "method", "::", name)
each.name = meth_name
if length(each.pars) >= 1 && each.pars[1].type isa Undefined
each.pars[1].type = :($like($class_ann))
each.typePars = TypeParamInfo[type_def.typePars..., each.typePars...]
end
push!(outer_block, try_pushmeta!(to_expr(each), :inline))
push!(methods,
PropertyDefinition(
name,
each.isAbstract ? missing : :($cur_mod.$meth_name),
:($cur_mod.$typename),
MethodKind))
end
for each::PropertyInfo in type_def.properties
push!(outer_block, each.ln)
name = each.name
if !(each.set isa Undefined)
prop = each.set
if length(prop.pars) >= 1 && prop.pars[1].type isa Undefined
prop.pars[1].type = :($like($class_ann))
prop.typePars = TypeParamInfo[type_def.typePars..., prop.typePars...]
end
meth_name = Symbol(typename, "::", "setter", "::", name)
prop.name = meth_name
key = @setter_prop(name)
push!(outer_block, try_pushmeta!(to_expr(prop), :inline))
push!(methods,
PropertyDefinition(
name,
prop.isAbstract ? missing : :($cur_mod.$meth_name),
:($cur_mod.$typename),
SetterPropertyKind))
end
if !(each.get isa Undefined)
prop = each.get
if length(prop.pars) >= 1 && prop.pars[1].type isa Undefined
prop.pars[1].type = :($like($class_ann))
prop.typePars = TypeParamInfo[type_def.typePars..., prop.typePars...]
end
meth_name = Symbol(typename, "::", "getter", "::", name)
prop.name = meth_name
key = @getter_prop(name)
push!(outer_block, to_expr(prop))
push!(methods,
PropertyDefinition(
name,
prop.isAbstract ? missing : :($cur_mod.$meth_name),
:($cur_mod.$typename),
GetterPropertyKind))
end
end
for (idx, each::TypeRepr) in enumerate(type_def.bases)
base_name_sym = Symbol(typename, "::layout$idx::", string(each.base))
base_dict[Base.eval(cur_mod, each.base)] = base_name_sym
type_expr = to_expr(each)
push!(struct_block, :($base_name_sym :: $type_expr))
push!(outer_block,
:(Base.@inline $ObjectOriented.base_field(::Type{$class_ann}, ::Type{$type_expr}) where {$(class_where...)} = $(QuoteNode(base_name_sym))))
end
if !custom_init
push!(
outer_block,
let generic_type = class_ann
@q if $(type_def.isMutable) || sizeof($typename) == 0
@eval function $typename()
$cur_line
$sym_generic_type = $class_ann
$(Expr(:macrocall, getfield(ObjectOriented, Symbol("@mk")), cur_line))
end
end
end
)
end
defhead = apply_curly(typename, class_where)
expr_default_initializers =
isempty(default_inits) ? :(NamedTuple()) : Expr(:tuple, default_inits...)
outer_block = [
[
:(struct $traithead end),
Expr(:struct,
type_def.isMutable,
:($defhead <: $_merge_shape_types($traithead, $(to_expr.(type_def.bases)...))),
Expr(:block, struct_block...)),
:($ObjectOriented.CompileTime._shape_type(t::$Type{<:$typename}) = $supertype(t)),
];
[
:($ObjectOriented.RunTime.default_initializers(t::$Type{<:$typename}) = $expr_default_initializers)
];
outer_block
]
cgi = CodeGenInfo(
cur_mod,
base_dict,
fieldnames,
typename,
class_where,
class_ann,
methods,
OrderedDict{PropertyName, PropertyDefinition}(),
outer_block
)
build_multiple_dispatch!(cur_line, cgi)
Expr(:block, cgi.outblock...)
end
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 1715 | function fix_path(base, t, path)
(t, (base, path...))
end
function cls_linearize(::Type{root}) where root
bases = ObjectOriented.ootype_bases(root)
chains = [[fix_path(base, t, path) for (t, path) in ObjectOriented.ootype_mro(base)] for base in bases]
resolved = linearize(Tuple{Type, Tuple}, chains) do l, r
l[1] === r[1]
end
insert!(resolved, 1, (root, ()))
resolved
end
function cls_linearize(bases::Vector)::Vector{Tuple{Type, Tuple}}
chains = [[fix_path(base, t, path) for (t, path) in ObjectOriented.ootype_mro(base)] for base in bases]
linearize(Tuple{Type, Tuple}, chains) do l, r
l[1] === r[1]
end
end
function linearize(eq, ::Type{T}, xs::Vector) where T
mro = T[]
bases = T[K[1] for K in xs]
xs = reverse!([reverse(K) for K in xs])
while !isempty(xs)
for i in length(xs):-1:1
top = xs[i][end]
for j in eachindex(xs)
j === i && continue
K = xs[j]
for k = 1:length(K)-1
if eq(K[k], top)
@goto not_top
end
end
end
push!(mro, top)
for j in length(xs):-1:1
K = xs[j]
if eq(K[end], top)
pop!(K)
if isempty(K)
deleteat!(xs, j)
end
end
end
@goto find_top
@label not_top
end
error("Cannot create a consistent method resolution order (MRO) for $bases")
@label find_top
end
return mro
end
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 789 | module CompileTime
export @oodef, @mk, @base, @property
export PropertyName, like, @like
if isdefined(Base, :Experimental) && isdefined(Base.Experimental, Symbol("@compiler_options"))
@eval Base.Experimental.@compiler_options compile=min infer=no optimize=0
end
import ObjectOriented
using MLStyle
using DataStructures
include("compat.q.jl")
include("compile-time.utils.jl")
include("compile-time.reflection.jl")
include("compile-time.c3_linearize.jl")
include("compile-time.buildclass.jl")
include("compile-time.static_dispatch.jl")
macro oodef(ex)
preprocess(x) = Base.macroexpand(__module__, x)
type_def = parse_class(__source__, ex, preprocess=preprocess)
esc(canonicalize_where(codegen(__source__, __module__, type_def)))
end
end # module
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 15398 | using MLStyle
struct Undefined end
const NullSymbol = Union{Symbol, Undefined}
const _undefined = Undefined()
const PVec{T, N} = NTuple{N, T}
const _pseudo_line = LineNumberNode(1, Symbol("<PSEUDO>"))
Base.@kwdef struct TypeRepr
base :: Any = _undefined
typePars :: PVec{TypeRepr} = ()
end
to_expr(t::TypeRepr) =
if isempty(t.typePars)
t.base
else
:($(t.base){$(to_expr.(t.typePars)...)})
end
Base.@kwdef mutable struct ParamInfo
name :: Any = _undefined
type :: Any = _undefined
defaultVal :: Any = _undefined
meta :: Vector{Any} = []
isVariadic :: Bool = false
end
function to_expr(p::ParamInfo)
res = if p.name isa Undefined
@assert !(p.type isa Undefined)
:(::$(p.type))
else
if p.type isa Undefined
p.name
else
:($(p.name)::$(p.type))
end
end
if p.isVariadic
res = Expr(:..., res)
end
if !(p.defaultVal isa Undefined)
res = Expr(:kw, res, p.defaultVal)
end
if !isempty(p.meta)
res = Expr(:meta, p.meta..., res)
end
return res
end
Base.@kwdef struct TypeParamInfo
name :: Symbol
lb :: Union{TypeRepr, Undefined} = _undefined
ub :: Union{TypeRepr, Undefined} = _undefined
end
function to_expr(tp::TypeParamInfo)
if tp.lb isa Undefined
if tp.ub isa Undefined
tp.name
else
:($(tp.name) <: $(to_expr(tp.ub)))
end
else
if tp.ub isa Undefined
:($(tp.name) >: $(to_expr(tp.lb)))
else
:($(to_expr(tp.lb)) <: $(tp.name) <: $(to_expr(tp.ub)))
end
end
end
Base.@kwdef mutable struct FuncInfo
ln :: LineNumberNode = _pseudo_line
name :: Any = _undefined
pars :: Vector{ParamInfo} = ParamInfo[]
kwPars :: Vector{ParamInfo} = ParamInfo[]
typePars :: Vector{TypeParamInfo} = TypeParamInfo[]
returnType :: Any = _undefined # can be _undefined
body :: Any = _undefined # can be _undefined
isAbstract :: Bool = false
end
function to_expr(f::FuncInfo)
if f.isAbstract
return :nothing
else
args = []
if !isempty(f.kwPars)
kwargs = Expr(:parameters)
push!(args, kwargs)
for each in f.kwPars
push!(kwargs.args, to_expr(each))
end
end
for each in f.pars
push!(args, to_expr(each))
end
header = if f.name isa Undefined
Expr(:tuple, args...)
else
Expr(:call, f.name, args...)
end
if !(f.returnType isa Undefined)
header = :($header :: $(f.returnType))
end
if !isempty(f.typePars)
header = :($header where {$(to_expr.(f.typePars)...)})
end
return Expr(:function, header, f.body)
end
end
Base.@kwdef struct FieldInfo
ln :: LineNumberNode
name :: Symbol
type :: Any = :Any
defaultVal :: Any = _undefined
end
Base.@kwdef struct PropertyInfo
ln :: LineNumberNode
name :: Symbol
get :: Union{Undefined, FuncInfo} = _undefined
set :: Union{Undefined, FuncInfo} = _undefined
end
Base.@kwdef mutable struct TypeDef
ln :: LineNumberNode = _pseudo_line
name :: Symbol = :_
typePars :: Vector{TypeParamInfo} = TypeParamInfo[]
bases :: Vector{TypeRepr} = TypeRepr[]
fields :: Vector{FieldInfo} = FieldInfo[]
properties :: Vector{PropertyInfo} = PropertyInfo[]
methods :: Vector{FuncInfo} = FuncInfo[]
isMutable :: Bool = false
end
function type_def_create_ann(t::TypeDef)
if isempty(t.typePars)
t.name
else
:($(t.name){$([p.name for p in t.typePars]...)})
end
end
function type_def_create_where(t::TypeDef)
res = []
for each in t.typePars
push!(res, to_expr(each))
end
res
end
function parse_class(ln::LineNumberNode, def; preprocess::T=nothing) where T
@switch def begin
@case :(mutable struct $defhead; $(body...) end)
type_def = TypeDef()
type_def.ln = ln
type_def.isMutable = true
parse_class_header!(ln, type_def, defhead, preprocess = preprocess)
parse_class_body!(ln, type_def, body, preprocess = preprocess)
return type_def
@case :(struct $defhead; $(body...) end)
type_def = TypeDef()
type_def.ln = ln
type_def.isMutable = false
parse_class_header!(ln, type_def, defhead, preprocess = preprocess)
parse_class_body!(ln, type_def, body, preprocess = preprocess)
return type_def
@case _
throw(create_exception(ln, "invalid type definition: $(string(def))"))
end
end
function parse_type_repr(ln::LineNumberNode, repr)
@switch repr begin
@case :($typename{$(generic_params...)})
return TypeRepr(typename, Tuple(parse_type_repr(ln, x) for x in generic_params))
@case typename
return TypeRepr(typename, ())
@case _
throw(create_exception(ln, "invalid type representation: $repr"))
end
end
function parse_class_header!(ln::LineNumberNode, type_def::TypeDef, defhead; preprocess::T=nothing) where T
if preprocess !== nothing
defhead = preprocess(defhead)
end
@switch defhead begin
@case :($defhead <: {$(t_bases...)})
for x in t_bases
push!(type_def.bases, parse_type_repr(ln, x))
end
@case :($defhead <: $t_base)
push!(type_def.bases, parse_type_repr(ln, t_base))
@case _
end
local typename
@switch defhead begin
@case :($typename{$(generic_params...)})
for p in generic_params
push!(type_def.typePars, parse_type_parameter(ln, p))
end
@case typename
end
if typename isa Symbol
type_def.name = typename
else
throw(create_exception(ln, "typename is invalid: $typename"))
end
end
function parse_class_body!(ln::LineNumberNode, self::TypeDef, body; preprocess::T=nothing) where T
for x in body
if preprocess !== nothing
x = preprocess(x)
end
if x isa LineNumberNode
ln = x
continue
end
if Meta.isexpr(x, :block)
parse_class_body!(ln, self, x.args; preprocess = preprocess)
continue
end
if (field_info = parse_field_def(ln, x, fallback = nothing)) isa FieldInfo
push!(self.fields, field_info)
continue
end
if (prop_info = parse_property_def(ln, x, fallback = nothing)) isa PropertyInfo
push!(self.properties, prop_info)
continue
end
if (func_info = parse_function(ln, x, fallback = nothing, allow_lambda = false, allow_short_func = false)) isa FuncInfo
push!(self.methods, func_info)
continue
end
throw(create_exception(ln, "unrecognised statement in $(self.name) definition: $(x)"))
end
end
function parse_field_def(ln :: LineNumberNode, f; fallback :: T = _undefined) where T
@match f begin
:($n :: $t) => FieldInfo(ln = ln, name = n, type = t)
:($n :: $t = $v) => FieldInfo(ln = ln, name = n, type = t, defaultVal = v)
:($n = $v) => FieldInfo(ln = ln, name = n, defaultVal = v)
n :: Symbol => FieldInfo(ln = ln, name = n)
_ =>
if fallback isa Undefined
throw(create_exception(ln, "invalid field declaration: $(string(f))"))
else
fallback
end
end
end
function parse_property_def(ln::LineNumberNode, p; fallback :: T = _undefined) where T
@when Expr(:do, :(define_property($name)), Expr(:->, Expr(:tuple), Expr(:block, inner_body...))) = p begin
name isa Symbol || throw(create_exception(ln, "invalid property name: $name"))
setter :: Union{Undefined, FuncInfo} = _undefined
getter :: Union{Undefined, FuncInfo} = _undefined
for decl in inner_body
@switch decl begin
@case ln::LineNumberNode
@case :set
setter = FuncInfo(ln = ln, isAbstract=true)
@case :get
getter = FuncInfo(ln = ln, isAbstract=true)
@case :(set = $f)
if setter isa Undefined
setter = parse_function(ln, f, allow_lambda = true, allow_short_func = false)
else
throw(create_exception(ln, "multiple setters for property $name"))
end
@case :(get = $f)
if getter isa Undefined
getter = parse_function(ln, f, allow_lambda = true, allow_short_func = false)
else
throw(create_exception(ln, "multiple getters for property $name"))
end
@case _
throw(create_exception(ln, "invalid property declaration: $(string(decl))"))
end
end
return PropertyInfo(ln = ln, name = name, get = getter, set = setter)
@otherwise
if fallback isa Undefined
throw(create_exception(ln, "invalid property declaration: $(string(p))"))
else
return fallback
end
end
end
function parse_parameter(ln :: LineNumberNode, p; support_tuple_parameters=true)
self = ParamInfo()
parse_parameter!(ln, self, p, support_tuple_parameters)
return self
end
function parse_parameter!(ln :: LineNumberNode, self::ParamInfo, p, support_tuple_parameters)
@switch p begin
@case Expr(:meta, x, p)
push!(self.meta, x)
parse_parameter!(ln, self, p, support_tuple_parameters)
@case Expr(:..., p)
self.isVariadic = true
parse_parameter!(ln, self, p, support_tuple_parameters)
@case Expr(:kw, p, b)
self.defaultVal = b
parse_parameter!(ln, self, p, support_tuple_parameters)
@case :(:: $t)
self.type = t
nothing
@case :($p :: $t)
self.type = t
parse_parameter!(ln, self, p, support_tuple_parameters)
@case p::Symbol
self.name = p
nothing
@case Expr(:tuple, _...)
if support_tuple_parameters
self.name = p
else
throw(create_exception(ln, "tuple parameters are not supported"))
end
nothing
@case _
throw(create_exception(ln, "invalid parameter $p"))
end
end
function parse_type_parameter(ln :: LineNumberNode, t)
@switch t begin
@case :($lb <: $(t::Symbol) <: $ub) || :($ub >: $(t::Symbol) >: $lb)
TypeParamInfo(t, parse_type_repr(ln, lb), parse_type_repr(ln, ub))
@case :($(t::Symbol) >: $lb)
TypeParamInfo(t, parse_type_repr(ln, lb), _undefined)
@case :($(t::Symbol) <: $ub)
TypeParamInfo(t, _undefined, parse_type_repr(ln, ub))
@case t::Symbol
TypeParamInfo(t, _undefined, _undefined)
@case _
throw(create_exception(ln, "invalid type parameter $t"))
end
end
function parse_function(ln :: LineNumberNode, ex; fallback :: T = _undefined, allow_short_func :: Bool = false, allow_lambda :: Bool = false) where T
self :: FuncInfo = FuncInfo()
self.ln = ln
@switch ex begin
@case Expr(:function, header, body)
self.body = body
self.isAbstract = false # unnecessary but clarified
parse_function_header!(ln, self, header; is_lambda = false, allow_lambda = allow_lambda)
return self
@case Expr(:function, header)
self.isAbstract = true
parse_function_header!(ln, self, header; is_lambda = false, allow_lambda = allow_lambda)
return self
@case Expr(:(->), header, body)
if !allow_lambda
throw(create_exception(ln, "lambda functions are not allowed here: $ex"))
end
self.body = body
self.isAbstract = false
parse_function_header!(ln, self, header; is_lambda = true, allow_lambda = true)
return self
@case Expr(:(=), Expr(:call, _...) && header, rhs)
if !allow_short_func
throw(create_exception(ln, "short functions are not allowed here: $ex"))
end
self.body = rhs
self.isAbstract = false
parse_function_header!(ln, self, header; is_lambda = false, allow_lambda = false)
return self
@case _
if fallback isa Undefined
throw(create_exception(ln, "invalid function expression: $ex"))
else
fallback
end
end
end
function parse_function_header!(ln::LineNumberNode, self::FuncInfo, header; is_lambda :: Bool = false, allow_lambda :: Bool = false)
typePars = self.typePars
@switch header begin
@case Expr(:where, header, tyPar_exprs...)
for tyPar_expr in tyPar_exprs
push!(typePars, parse_type_parameter(ln, tyPar_expr))
end
@case _
end
@switch header begin
@case Expr(:(::), header, returnType)
self.returnType = returnType
@case _
end
if is_lambda && !Meta.isexpr(header, :tuple)
header = Expr(:tuple, header)
end
@switch header begin
@case Expr(:call, f, Expr(:parameters, kwargs...), args...)
for x in kwargs
push!(self.kwPars, parse_parameter(ln, x))
end
for x in args
push!(self.pars, parse_parameter(ln, x))
end
self.name = f
@case Expr(:call, f, args...)
for x in args
push!(self.pars, parse_parameter(ln, x))
end
self.name = f
@case Expr(:tuple, Expr(:parameters, kwargs...), args...)
if !allow_lambda
throw(create_exception(ln, "tuple function signature are not allowed here."))
end
for x in kwargs
push!(self.kwPars, parse_parameter(ln, x))
end
for x in args
push!(self.pars, parse_parameter(ln, x))
end
@case Expr(:tuple, args...)
if !allow_lambda
throw(create_exception(ln, "tuple function signature are not allowed here."))
end
for x in args
push!(self.pars, parse_parameter(ln, x))
end
@case _
if !self.isAbstract
throw(create_exception(ln, "unrecognised function signature $header."))
else
self.name = header
end
end
end
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 10050 | function _is_abstract(def::PropertyDefinition)
def.def === missing
end
function try_remove_prefix(f, prefix::Symbol, name::Symbol)
prefix_s = string(prefix)
name_s = string(name)
if startswith(name_s, prefix_s)
return f(Symbol(SubString(name_s, ncodeunits(prefix_s) + 1)))
end
end
const _has_warn_setter_dep = Ref(false)
const _has_warn_getter_dep = Ref(false)
function build_multiple_dispatch!(
ln::LineNumberNode,
cgi :: CodeGenInfo)
t = cgi.typename
rt_methods = cgi.methods
method_dict = cgi.method_dict
for proper_def in rt_methods
name :: PropertyName =
if proper_def.kind === SetterPropertyKind
@setter_prop(proper_def.name)
else
@getter_prop(proper_def.name)
end
if haskey(method_dict, name)
continue
end
method_dict[name] = proper_def
end
# use 'get_xxx' to generate a getter for 'xxx' & use 'set_xxx' to generate a setter for 'xxx'
for key in collect(keys(method_dict))
desc :: PropertyDefinition = method_dict[key]
if desc.kind === MethodKind
try_remove_prefix(:set_, desc.name) do name
if !_has_warn_setter_dep[]
@warn "properties using `set_xxx` are deprecated, use '@property($name) do; set = (self, value) -> statement end' instead."
_has_warn_setter_dep[] = true
end
local key = @setter_prop(name)
if !haskey(method_dict, key)
method_dict[key] = PropertyDefinition(name, desc.def, desc.from_type, SetterPropertyKind)
end
end
try_remove_prefix(:get_, desc.name) do name
if !_has_warn_getter_dep[]
@warn "properties using `get_xxx` are deprecated, use '@property($name) do; get = (self) -> value end' instead."
_has_warn_getter_dep[] = true
end
local key = @getter_prop(name)
if !haskey(method_dict, key)
method_dict[key] = PropertyDefinition(name, desc.def, desc.from_type, GetterPropertyKind)
end
end
end
end
cgi.method_dict = method_dict
out = cgi.outblock
base_dict = cgi.base_dict
push!(out,
:($ObjectOriented.ootype_bases(::$Type{<:$t}) = $(Set{Type}(keys(base_dict)))))
for (k, v) in cgi.base_dict
push!(out,
:($Base.@inline $ObjectOriented.base_field(::$Type{<:$t}, ::Type{<:$k}) = $(QuoteNode(v))))
end
push!(out,
:($ObjectOriented.direct_methods(::$Type{<:$t}) = $(QuoteNode(method_dict))))
build_multiple_dispatch2!(ln, cgi)
end
function build_multiple_dispatch2!(ln::LineNumberNode, cgi::CodeGenInfo)
t = cgi.typename
out = cgi.outblock
valid_fieldnames = cgi.fieldnames
push!(out, ln)
push!(out,
:($ObjectOriented.direct_fields(::Type{<:$t}) = $(Expr(:tuple, QuoteNode.(valid_fieldnames)...))))
build_multiple_dispatch3!(ln, cgi)
end
function _build_field_getter_setter_for_pathed_base(push_getter!, push_setter!, this, path, sym)
@switch path begin
@case []
push_getter!(
QuoteNode(sym) =>
:($Base.getfield($this, $(QuoteNode(sym)))))
push_setter!(
QuoteNode(sym) =>
@q let this = $this
$Base.setfield!(
this,
$(QuoteNode(sym)),
$convert($fieldtype(typeof(this), $(QuoteNode(sym))), value))
end)
@case [head, path...]
_build_field_getter_setter_for_pathed_base(push_getter!, push_setter!, :($ObjectOriented.get_base($this, $head)), path, sym)
end
end
function _build_method_get(push_getter!, this, path, sym, funcval)
push_getter!(
QuoteNode(sym) =>
:($ObjectOriented.BoundMethod($this, $funcval)))
end
function _build_getter_property(push_getter!, this, path, sym, getter_func)
push_getter!(
QuoteNode(sym) =>
:($getter_func($this)))
end
function _build_setter_property(push_setter!, this, path, sym, setter_func)
push_setter!(
QuoteNode(sym) =>
:($setter_func($this, value)))
end
function build_if(pairs, else_block)
foldr(pairs; init=else_block) do (cond, then), r
Expr(:if, :(prop === $cond), then, r)
end
end
function build_val_getters(t, pairs)
result = []
for (k, v) in pairs
exp = @q function $ObjectOriented.getproperty_typed(this::$t, ::Val{$(k)})
return $v
end
push!(result, exp)
end
result
end
function build_val_setters(t, pairs)
result = []
for (k, v) in pairs
exp = @q function $ObjectOriented.setproperty_typed!(this::$t, value, ::Val{$(k)})
return $v
end
push!(result, exp)
end
result
end
function build_multiple_dispatch3!(ln::LineNumberNode, cgi::CodeGenInfo)
t = cgi.typename
defined = OrderedSet{PropertyName}()
cur_mod = cgi.cur_mod
get_block = []
set_block = []
abstract_methods = Dict{PropertyName, PropertyDefinition}()
push_getter!(x) = push!(get_block, x)
push_setter!(x) = push!(set_block, x)
subclass_block = []
mro = cls_linearize(collect(keys(cgi.base_dict)))
mro_expr = let res = :([])
push!(res.args, :($(cgi.cur_mod).$(cgi.typename), ()))
append!(res.args, [Expr(:tuple, k, v) for (k, v) in mro])
res
end
function process_each!(
base::Union{Expr, Type},
path_tuple::(NTuple{N, Type} where N),
_direct_fields::(NTuple{N, Symbol} where N),
_direct_methods :: AbstractDict{PropertyName, PropertyDefinition}
)
path = Any[path_tuple...]
push!(
subclass_block,
:($Base.@inline $ObjectOriented.issubclass(::$Type{<:$cur_mod.$t}, ::$Type{<:$base}) = true))
for fieldname :: Symbol in _direct_fields
if @getter_prop(fieldname) in defined
continue
end
push!(defined, @getter_prop(fieldname), @setter_prop(fieldname))
_build_field_getter_setter_for_pathed_base(push_getter!, push_setter!, :this, path, fieldname)
end
for (methodname :: PropertyName, desc :: PropertyDefinition) in _direct_methods
def = desc.def
@switch (desc.kind, _is_abstract(desc)) begin
@case (MethodKind, true)
haskey(abstract_methods, methodname) && continue
abstract_methods[methodname] = desc
@case (MethodKind, false)
methodname in defined && continue
push!(defined, methodname)
_build_method_get(push_getter!, :this, path, methodname.name, def)
@case (GetterPropertyKind, true)
haskey(abstract_methods, methodname) && continue
abstract_methods[methodname] = desc
@case (GetterPropertyKind, false)
methodname in defined && continue
push!(defined, methodname)
_build_getter_property(push_getter!, :this, path, methodname.name, def)
@case (SetterPropertyKind, true)
haskey(abstract_methods, methodname) && continue
abstract_methods[methodname] = desc
@case (SetterPropertyKind, false)
methodname in defined && continue
push!(defined, methodname)
_build_setter_property(push_setter!, :this, path, methodname.name, def)
end
end
end
# build resolution table (specifically, the bodies of getter and setter)
process_each!(:($(cgi.cur_mod).$(cgi.typename)), (), Tuple(cgi.fieldnames), cgi.method_dict)
for (base, path_tuple) in mro
process_each!(base, path_tuple, ObjectOriented.direct_fields(base), ObjectOriented.direct_methods(base))
end
# detect all unimplemented abstract methods
for implemented in intersect(Set{PropertyName}(keys(abstract_methods)), defined)
delete!(abstract_methods, implemented)
end
check_abstract_def = @q function $ObjectOriented.check_abstract(::$Type{<:$t})
$(lift_to_quot(abstract_methods))
end
getter_body = build_if(get_block, :($ObjectOriented.getproperty_fallback(this, prop)))
setter_body = build_if(set_block, :($ObjectOriented.setproperty_fallback!(this, prop, value)))
expr_propernames = Expr(:tuple, QuoteNode.(unique!(Symbol[k.name for k in defined]))...)
out = cgi.outblock
# codegen
push!(out, ln)
append!(out, subclass_block)
## for '@typed_access'
append!(out, build_val_getters(t, get_block))
append!(out, build_val_setters(t, set_block))
push!(out, check_abstract_def)
## mro
push!(out, :($ObjectOriented.ootype_mro(::$Type{<:$t}) = $mro_expr))
## codegen getter and setter
push!(out, @q begin
function $Base.getproperty(this::$cur_mod.$t, prop::$Symbol)
$(Expr(:meta, :aggressive_constprop, :inline))
$ln
$getter_body
end
function $Base.setproperty!(this::$cur_mod.$t, prop::$Symbol, value)
$(Expr(:meta, :aggressive_constprop, :inline))
$ln
$setter_body
end
function $Base.propertynames(::$Type{<:$cur_mod.$t})
$(Expr(:meta, :inline))
$ln
$(expr_propernames)
end
end)
end
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 781 | function canonicalize_where(ex::Expr)
@switch ex begin
@case Expr(:where, a)
return canonicalize_where(a)
@case Expr(head, args...)
res = Expr(head)
for arg in args
push!(res.args, canonicalize_where(arg))
end
return res
end
end
canonicalize_where(ex) = ex
function apply_curly(t_base, t_args::AbstractVector)
if isempty(t_args)
t_base
else
:($t_base{$(t_args...)})
end
end
function create_exception(ln::LineNumberNode, reason::String)
LoadError(string(ln.file), ln.line, ErrorException(reason))
end
function try_pushmeta!(ex::Expr, sym::Symbol)
Base.pushmeta!(ex, sym)
end
try_pushmeta!(a, sym::Symbol) = a
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
|
[
"MIT"
] | 0.1.4 | 43b678a00a1f9a096104f462d52412d2b59c1b68 | code | 5920 | ## RTS functions and types
module RunTime
using MLStyle
export Object, BoundMethod, Property
export construct, shape_type
export ootype_bases, ootype_mro
export direct_fields, direct_methods, base_field, getproperty_fallback, setproperty_fallback!
export get_base, set_base!, check_abstract, issubclass, isinstance
export getproperty_typed, setproperty_typed!
export @typed_access
"""
用来实现`@mk`宏。
该单例类型传递给generated function `construct`,
用来对任意结构体构造零开销、参数无序的构造器。
用法:
ObjectOriented.construct(目标类型, InitField{field符号, nothing或基类对象}())
"""
struct InitField{Sym, Base} end
abstract type Object{U} end
"""`BoundMethod(this, func)(arg1, arg2) == func(this, arg1, arg2)`
"""
struct BoundMethod{This, Func}
this:: This
func:: Func
end
@inline (m::BoundMethod{This, Func})(args...; kwargs...) where {This, Func} = m.func(m.this, args...; kwargs...)
@inline function getproperty_typed(x, ::Val{T}) where T
getproperty(x, T)
end
@inline function setproperty_typed!(x, value, ::Val{T}) where T
setproperty!(x, T, value)
end
typed_access(x) = x
function typed_access(ex::Expr)
@match ex begin
:($a.$(b::Symbol) = $c) => :($setproperty_typed!($(typed_access(a)), $(typed_access(c)), $(QuoteNode(Val(b)))))
:($a.$(b::Symbol)) => :($getproperty_typed($(typed_access(a)), $(QuoteNode(Val(b)))))
Expr(head, args...) => Expr(head, typed_access.(args)...)
end
end
macro typed_access(ex)
esc(typed_access(ex))
end
function ootype_mro end
function ootype_bases(x)
Type[]
end
function direct_methods end
function direct_fields end
"""用户可自定义的默认成员访问方法。
如果为类型A定义重载此方法,则当类型A无法根据名字`name`找到成员时,该方法被调用。
"""
function getproperty_fallback(_::T, name) where T
error("unknown property '$name' for object of type '$T'")
end
"""用户可自定义的默认成员赋值方法。
如果为类型A定义重载此方法,则当类型A无法根据名字`name`找到可以赋值的成员时,该方法被调用。
"""
function setproperty_fallback!(_::T, name, value) where T
error("unknown property '$name' for object of type '$T'")
end
"""获取实例的基类实例。
```
@oodef mutable struct A
a :: Int
function new(a::Int)
@mk begin
a = 1
end
end
end
@oodef mutable struct B <: A
b :: Int
function new(a::Int, b::Int)
@mk begin
@base(A) = A(a)
b = 1
end
end
end
b_inst = B(1, 2)
a_inst = get_base(b_inst, A) :: A
```
"""
@inline function get_base(x::T, t) where T
Base.getfield(x, base_field(T, t))
end
@inline function set_base!(x::T, base::BaseType) where {T, BaseType}
Base.setfield!(x, base_field(T, BaseType), base)
end
Base.@pure function base_field(T, t)
error("type $T has no base type $t")
end
@inline _object_init_impl(self, args...; kwargs...) = nothing
@inline function object_init(self, args...; kwargs...)
_object_init_impl(self, args...; kwargs...)
return self
end
"""查询类型没有实现的抽象方法,用以检查目的。
用`check_abstract(Class)::Dict`查询是否存在未实现的抽象方法。
"""
function check_abstract end
@inline function issubclass(a :: Type, b :: Type)
false
end
@inline function isinstance(:: T, cls) where T <: Object
issubclass(T, cls)
end
@inline isinstance(jl_val, cls) = jl_val isa cls
## END
_unwrap(::Type{InitField{Sym, Base}}) where {Sym, Base} = (Sym, Base)
_unwrap(x) = nothing
function _find_index(@nospecialize(arr), @nospecialize(x))
for i in 1:length(arr)
e = arr[i]
if e == x
return i
end
end
return -1
end
function mk_init_singleton(@nospecialize(t))
Expr(:new, t, map(mk_init_singleton, fieldtypes(t))...)
end
function default_initializers(t)
NamedTuple()
end
@noinline function _construct(type_default_initializers, T, args)
n = div(length(args), 2)
names = fieldnames(T)
types = fieldtypes(T)
arguments = Vector{Any}(undef, length(names))
for i = 1:n
kw = args[2i-1]
bare = _unwrap(kw)
if bare === nothing
return :(error($("unknown base type or property '$kw' for class $T")))
end
(sym, base) = bare
if base === nothing
fieldname = sym
indice = _find_index(names, fieldname)
if indice == -1
return :(error($("unknown fieldname '$fieldname' for class '$T'")))
end
if isassigned(arguments, indice)
return :(error($("resetting property '$fieldname' for class '$T'")))
end
t_field = types[indice]
arguments[indice] = :($Base.convert($t_field, args[$(2i)]))
else
t = base
fieldname = sym
indice = _find_index(names, fieldname)
if isassigned(arguments, indice)
return :(error($("resetting base type '$t' for class '$T'")))
end
t_field = types[indice]
arguments[indice] = :($Base.convert($t_field, args[$(2i)]))
end
end
default_support_symbols = type_default_initializers.parameters[1]
for i = eachindex(arguments)
if !isassigned(arguments, i)
name = fieldname(T, i)
if name in default_support_symbols
t_field = fieldtype(T, i)
arguments[i] = :($Base.convert($t_field, default_initializers.$name()))
continue
elseif ismutable(T) || isbitstype(types[i]) && sizeof(types[i]) === 0
arguments[i] = mk_init_singleton(types[i])
continue
end
return :(error($("uninitialized field '$(names[i])' for class '$T'")))
end
end
Expr(:new, T, arguments...)
end
@generated function construct(default_initializers::NamedTuple, ::Type{T}, args...) where T
_construct(default_initializers, T, args)
end
end # module
| ObjectOriented | https://github.com/Suzhou-Tongyuan/ObjectOriented.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.