licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1438 | @testset "Deconstruct" begin
let p = deconstruct(f)
@test p[:category] == :function
@test p[:name] == :f
@test p[:args] == [:x, Expr(:kw, :y, 3)]
@test p[:kwargs] == [:a, Expr(:kw, :b, 5)]
@test p[:short]
@test issubset([50], p[:body].args)
end
@test deconstruct(doc_f)[:docstring] == "I document function f"
let p = deconstruct(f2)
@test p[:category] == :function
@test p[:name] == :f
@test p[:args] == [:x]
@test issubset([:(println(2)), :(return 5)], p[:body].args)
@test !p[:short]
end
let p = deconstruct(s)
@test p[:category] == :struct
@test p[:name] == :HandleType
@test p[:fields] == [:(handle::Ptr{Nothing})]
@test p[:is_mutable] == true
@test p[:constructors] == []
end
@test deconstruct(doc_s)[:docstring] == "I document structure s"
let p = deconstruct(c)
@test p[:category] == :const
@test p[:value] == :val
end
let p = deconstruct(e)
@test p[:category] == :enum
@test p[:name] == :myenum
@test p[:macro] == Symbol("@enum")
@test p[:values] == [:a, :b, :c, :d]
end
let p = deconstruct(e2)
@test p[:category] == :enum
@test p[:name] == :myotherenum
@test p[:macro] == Symbol("@cenum")
@test p[:values] == [:(a = 1), :(b = 2), :(c = 3), :(d = 4)]
end
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 366 | @testset "Reconstruct" begin
@test prettify(reconstruct(deconstruct(f))) == prettify(f)
@test prettify(reconstruct(deconstruct(doc_f))) == prettify(doc_f)
@test prettify(reconstruct(deconstruct(e2))) == prettify(e2)
@test prettify(reconstruct(deconstruct(s))) == prettify(s)
@test prettify(reconstruct(deconstruct(doc_s))) == prettify(doc_s)
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 102 | @testset "Unblock" begin
@test string(unblock(striplines(f))) == "f(x, y = 3; a, b = 5) = 50"
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1260 | @testset "Library" begin
@testset "Camel Case Splitting" begin
@test split(camel_split_l_c1) == ["my", "Camel", "2"]
@test split(camel_split_l_c2) == ["my", "Camel", "2", "KHR", "Ext"]
@test split(camel_split_l_c3) == ["my", "Camel", "2", "Ext", "4"]
@test split(camel_split_u_c1) == ["My", "Camel", "2"]
@test split(camel_split_u_c2) == ["My", "Camel", "2", "KHR", "Ext"]
@test split(camel_split_u_c3) == ["My", "Camel", "2", "Ext", "4"]
end
@testset "Conversions" begin
@test Base.convert(CamelCaseLower, const_s1) == const_c1
@test Base.convert(CamelCaseLower, const_s2) == const_c1
@test Base.convert(CamelCaseUpper, const_s1) == const_c2
@test Base.convert(CamelCaseUpper, const_s2) == const_c2
@test Base.convert(SnakeCaseLower, const_c1) == const_s1
@test Base.convert(SnakeCaseLower, const_c2) == const_s1
@test Base.convert(SnakeCaseUpper, const_c1) == const_s2
@test Base.convert(SnakeCaseUpper, const_c2) == const_s2
end
@testset "Remove parts" begin
@test remove_parts(longstr, [1, 3]).value == "snake_with_many_words"
@test remove_parts(longstr, 1).value == "snake_case_with_many_words"
end
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 161 | @testset "Remove Vulkan prefixes" begin
@test remove_prefix(s2) == s2_novk
@test remove_prefix(c1) == c1_novk
@test remove_prefix(c2) == c2_novk
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 2846 | """
$(README)
Depends on:
$(IMPORTS)
"""
module Vulkan
using Reexport
using DocStringExtensions
using StructEquality: @struct_hash_equal
using Accessors: @set, setproperties
using PrecompileTools
using Libdl: Libdl
using BitMasks
using VulkanCore: VulkanCore, vk
using .vk
const Vk = Vulkan
const VkCore = vk
export VkCore, Vk
using Base: RefArray
import Base: convert, unsafe_convert, &, |, xor, isless, ==, typemax, in, parent
using MLStyle
const Optional{T} = Union{T, Nothing}
@reexport using ResultTypes: unwrap, unwrap_error, iserror
using ResultTypes: ResultTypes
@template FUNCTIONS =
"""
$(DOCSTRING)
$(METHODLIST)
"""
@template (METHODS, MACROS) =
"""
$(DOCSTRING)
$(TYPEDSIGNATURES)
"""
@template TYPES =
"""
$(DOCSTRING)
$(TYPEDEF)
$(TYPEDFIELDS)
"""
include("preferences.jl")
# generated wrapper
include("prewrap.jl")
include("CEnum/CEnum.jl")
using .CEnum
@static if Sys.islinux()
include("../generated/linux.jl")
elseif Sys.isapple()
include("../generated/macos.jl")
elseif Sys.isbsd()
include("../generated/bsd.jl")
elseif Sys.iswindows()
include("../generated/windows.jl")
end
include("utils.jl")
include("debug.jl")
include("driver.jl")
include("validation.jl")
include("instance.jl")
include("device.jl")
include("dispatch.jl")
include("formats.jl")
include("show.jl")
const global_dispatcher = Ref{APIDispatcher}()
include("precompile_workload.jl")
include("precompile.jl")
function __init__()
global_dispatcher[] = APIDispatcher()
fill_dispatch_table()
end
# Extensions.
# Turn these into extensions when precompilation issues raised in
# in https://github.com/JuliaLang/julia/issues/52511 are addressed.
include("../ext/VulkanFixedPointNumbersExt.jl")
include("../ext/VulkanColorTypesExt.jl")
include("../ext/VulkanFixedPointNumbersColorTypesExt.jl")
include("../ext/VulkanStaticArraysCoreExt.jl")
include("../ext/VulkanFixedPointNumbersStaticArraysCoreExt.jl")
export
# Wrapper
VulkanStruct,
ReturnedOnly,
Handle,
to_vk,
from_vk,
structure_type,
SpecExtensionSPIRV, SpecCapabilitySPIRV,
PropertyCondition, FeatureCondition,
# Driver
set_driver,
@set_driver,
# Printing
print_app_info,
print_available_devices,
print_device_info,
print_devices,
# Device
find_queue_family,
# Debugging
default_debug_callback,
set_debug_name,
# Pointer utilities
function_pointer,
pointer_length,
chain, unchain,
# Bitmask manipulation utilities
BitMask,
@bitmask_flag,
# Error handling
VulkanError,
@check,
iserror,
# Formats
format_type
end # module Vulkan
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 3334 | function set_debug_name(handle::Handle, name)
device = device_or_nothing(handle)
isnothing(device) && error("No parent device found for $handle; you must provide the device explicitly as first argument")
set_debug_name(device::Device, handle, name)
end
set_debug_name(device::Device, handle::Handle, name::Symbol) = set_debug_name(device, handle, string(name))
function set_debug_name(device::Device, handle::Handle, name::String)
object_type = ObjectType(handle)
address = UInt64(convert(Ptr{Cvoid}, handle))
info = DebugUtilsObjectNameInfoEXT(object_type, address; object_name = name)
set_debug_utils_object_name_ext(device, info)
end
ObjectType(::Handle) = OBJECT_TYPE_UNKNOWN
ObjectType(::Instance) = OBJECT_TYPE_INSTANCE
ObjectType(::PhysicalDevice) = OBJECT_TYPE_PHYSICAL_DEVICE
ObjectType(::Device) = OBJECT_TYPE_DEVICE
ObjectType(::Queue) = OBJECT_TYPE_QUEUE
ObjectType(::Semaphore) = OBJECT_TYPE_SEMAPHORE
ObjectType(::CommandBuffer) = OBJECT_TYPE_COMMAND_BUFFER
ObjectType(::Fence) = OBJECT_TYPE_FENCE
ObjectType(::DeviceMemory) = OBJECT_TYPE_DEVICE_MEMORY
ObjectType(::Buffer) = OBJECT_TYPE_BUFFER
ObjectType(::Image) = OBJECT_TYPE_IMAGE
ObjectType(::Event) = OBJECT_TYPE_EVENT
ObjectType(::QueryPool) = OBJECT_TYPE_QUERY_POOL
ObjectType(::BufferView) = OBJECT_TYPE_BUFFER_VIEW
ObjectType(::ImageView) = OBJECT_TYPE_IMAGE_VIEW
ObjectType(::ShaderModule) = OBJECT_TYPE_SHADER_MODULE
ObjectType(::PipelineCache) = OBJECT_TYPE_PIPELINE_CACHE
ObjectType(::PipelineLayout) = OBJECT_TYPE_PIPELINE_LAYOUT
ObjectType(::RenderPass) = OBJECT_TYPE_RENDER_PASS
ObjectType(::Pipeline) = OBJECT_TYPE_PIPELINE
ObjectType(::DescriptorSetLayout) = OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT
ObjectType(::Sampler) = OBJECT_TYPE_SAMPLER
ObjectType(::DescriptorPool) = OBJECT_TYPE_DESCRIPTOR_POOL
ObjectType(::DescriptorSet) = OBJECT_TYPE_DESCRIPTOR_SET
ObjectType(::Framebuffer) = OBJECT_TYPE_FRAMEBUFFER
ObjectType(::CommandPool) = OBJECT_TYPE_COMMAND_POOL
ObjectType(::SamplerYcbcrConversion) = OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION
ObjectType(::DescriptorUpdateTemplate) = OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE
ObjectType(::PrivateDataSlot) = OBJECT_TYPE_PRIVATE_DATA_SLOT
ObjectType(::SurfaceKHR) = OBJECT_TYPE_SURFACE_KHR
ObjectType(::SwapchainKHR) = OBJECT_TYPE_SWAPCHAIN_KHR
ObjectType(::DisplayKHR) = OBJECT_TYPE_DISPLAY_KHR
ObjectType(::DisplayModeKHR) = OBJECT_TYPE_DISPLAY_MODE_KHR
ObjectType(::DebugReportCallbackEXT) = OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT
ObjectType(::CuModuleNVX) = OBJECT_TYPE_CU_MODULE_NVX
ObjectType(::CuFunctionNVX) = OBJECT_TYPE_CU_FUNCTION_NVX
ObjectType(::DebugUtilsMessengerEXT) = OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT
ObjectType(::AccelerationStructureKHR) = OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR
ObjectType(::ValidationCacheEXT) = OBJECT_TYPE_VALIDATION_CACHE_EXT
ObjectType(::AccelerationStructureNV) = OBJECT_TYPE_ACCELERATION_STRUCTURE_NV
ObjectType(::PerformanceConfigurationINTEL) = OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL
ObjectType(::DeferredOperationKHR) = OBJECT_TYPE_DEFERRED_OPERATION_KHR
ObjectType(::IndirectCommandsLayoutNV) = OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV
# These don't have wrappers defined yet.
# ObjectType(::) = OBJECT_TYPE_VIDEO_SESSION_KHR
# ObjectType(::) = OBJECT_TYPE_VIDEO_SESSION_PARAMETERS_KHR
# ObjectType(::) = OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 3680 | for (T, msg) in zip([:PhysicalDeviceFeatures, :PhysicalDeviceVulkan11Features, :PhysicalDeviceVulkan12Features, :PhysicalDeviceVulkan13Features], ["", " Vulkan 1.1", " Vulkan 1.2", " Vulkan 1.3"])
hasnext = T ≠ :PhysicalDeviceFeatures
fdecl = hasnext ? :($T(features::AbstractArray; next=C_NULL)) : :($T(features::AbstractArray))
call = hasnext ? :($T(args...; next)) : :($T(args...))
@eval $fdecl = begin
names = filter(!in((:s_type, :next)), fieldnames($T))
diff = setdiff(features, names)
if length(diff) ≠ 0
error(string("Invalid physical device", $msg, " features: ", join(diff, ", ")))
end
args = map(in(features), names)
$call
end
end
"""
Return a `PhysicalDeviceFeatures` object with the provided `features` set to true.
```jldoctest
julia> PhysicalDeviceFeatures()
PhysicalDeviceFeatures()
julia> PhysicalDeviceFeatures(:wide_lines, :sparse_binding)
PhysicalDeviceFeatures(wide_lines, sparse_binding)
```
"""
PhysicalDeviceFeatures(features::Symbol...) = PhysicalDeviceFeatures(collect(features))
"""
Return a `PhysicalDeviceVulkan11Features` object with the provided `features` set to true.
```jldoctest
julia> PhysicalDeviceVulkan11Features(; next = C_NULL)
PhysicalDeviceVulkan11Features(next=Ptr{Nothing}(0x0000000000000000))
julia> PhysicalDeviceVulkan11Features(:multiview, :variable_pointers, next = C_NULL)
PhysicalDeviceVulkan11Features(next=Ptr{Nothing}(0x0000000000000000), multiview, variable_pointers)
```
"""
PhysicalDeviceVulkan11Features(features::Symbol...; next = C_NULL) = PhysicalDeviceVulkan11Features(collect(features); next)
"""
Return a `PhysicalDeviceVulkan12Features` object with the provided `features` set to true.
```jldoctest
julia> PhysicalDeviceVulkan12Features(; next = C_NULL)
PhysicalDeviceVulkan12Features(next=Ptr{Nothing}(0x0000000000000000))
julia> PhysicalDeviceVulkan12Features(:draw_indirect_count, :descriptor_binding_variable_descriptor_count)
PhysicalDeviceVulkan12Features(next=Ptr{Nothing}(0x0000000000000000), draw_indirect_count, descriptor_binding_variable_descriptor_count)
```
"""
PhysicalDeviceVulkan12Features(features::Symbol...; next = C_NULL) = PhysicalDeviceVulkan12Features(collect(features); next)
"""
Return a `PhysicalDeviceVulkan13Features` object with the provided `features` set to true.
```jldoctest
julia> PhysicalDeviceVulkan13Features(; next = C_NULL)
PhysicalDeviceVulkan13Features(next=Ptr{Nothing}(0x0000000000000000))
julia> PhysicalDeviceVulkan13Features(:dynamic_rendering)
PhysicalDeviceVulkan13Features(next=Ptr{Nothing}(0x0000000000000000), dynamic_rendering)
```
"""
PhysicalDeviceVulkan13Features(features::Symbol...; next = C_NULL) = PhysicalDeviceVulkan13Features(collect(features); next)
"""
Find a queue index (starting at 0) from `physical_device` which matches the provided `queue_capabilities`.
```jldoctest
julia> find_queue_family(physical_device, QUEUE_COMPUTE_BIT & QUEUE_GRAPHICS_BIT)
0
```
"""
function find_queue_family(physical_device::PhysicalDevice, queue_capabilities::QueueFlag)
qf_props = get_physical_device_queue_family_properties(physical_device)
index = findfirst(x -> queue_capabilities in x.queue_flags, qf_props)
if isnothing(index)
error("No queue with the desired capabilities could be found.")
end
index - 1
end
device(device::Device) = device
function device(handle::Handle)
next = parent(handle)
isnothing(next) && error("No parent device found")
device(next)::Device
end
device_or_nothing(device::Device) = device
device_or_nothing(handle::Handle) = device_or_nothing(parent(handle))
device_or_nothing(::Nothing) = nothing
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 2193 | struct DispatchTable
pointers::Dict{Symbol,Ptr{Cvoid}}
end
DispatchTable() = DispatchTable(Dict())
function add_fptr!(dtable::DispatchTable, handle, f::Symbol)
fptr = function_pointer(handle, string(f))
dtable.pointers[f] = fptr
end
struct APIDispatcher
loader_table::DispatchTable
instance_tables::Dict{Instance,DispatchTable}
device_tables::Dict{Device,DispatchTable}
end
APIDispatcher() = APIDispatcher(DispatchTable(), Dict(), Dict())
table(disp::APIDispatcher, ::Nothing) = disp.loader_table
function table(disp::APIDispatcher, x)
h = handle(x)
h isa Instance || h isa Device || error("Expected instance or device handle, got $h")
table(disp, h)
end
function table(disp::APIDispatcher, instance::Instance)
get!(DispatchTable, disp.instance_tables, instance)
end
function table(disp::APIDispatcher, device::Device)
get!(DispatchTable, disp.device_tables, device)
end
"""
Query a function pointer for an API function.
"""
function function_pointer end
function function_pointer(disp::APIDispatcher, handle, key::Symbol)::Ptr{Cvoid}
t = table(disp, handle)
fptr = t.pointers[key]
if fptr == C_NULL
error(
"Could not retrieve function pointer for '$key'. This can be caused by an extension not being enabled for a function that needs it; see the help with `?` or the documentation for more information.",
)
end
fptr
end
function_pointer(name::AbstractString) = get_instance_proc_addr(name)
function_pointer(::Nothing, name::AbstractString) = function_pointer(name)
function_pointer(instance::Instance, name::AbstractString) = get_instance_proc_addr(name; instance)
function_pointer(device::Device, name::AbstractString) = get_device_proc_addr(device, name)
function_pointer(x, name::AbstractString) = function_pointer(handle(x), name)
dispatchable_functions(::Nothing) = CORE_FUNCTIONS
dispatchable_functions(::Instance) = INSTANCE_FUNCTIONS
dispatchable_functions(::Device) = DEVICE_FUNCTIONS
function fill_dispatch_table(handle = nothing)
disp = global_dispatcher[]
t = table(disp, handle)
for f in dispatchable_functions(handle)
add_fptr!(t, handle, f)
end
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 2673 | """
Convenience function for setting a specific driver used by Vulkan.
Only SwiftShader is currently supported. To add another driver, you must specify it by hand. You can achieve that by setting the environment variable `VK_DRIVER_FILES` (formerly `VK_ICD_FILENAMES`) to point to your own driver JSON manifest file, as described in https://github.com/KhronosGroup/Vulkan-Loader/blob/main/docs/LoaderDriverInterface.md#driver-discovery.
Available drivers:
- SwiftShader: a CPU implementation of Vulkan. Requires `SwiftShader_jll` to be imported in `mod`.
"""
function set_driver(backend::Symbol)
@match backend begin
:SwiftShader => begin
pkg = Base.PkgId(Base.UUID("c404ff5a-e271-5628-8eaa-423d39d64c15"), "SwiftShader_jll")
mod = get(Base.loaded_modules, pkg, nothing)
if isnothing(mod)
try
@eval @__MODULE__() import SwiftShader_jll
mod = get(Base.loaded_modules, pkg, nothing)
isnothing(mod) && error("Another module named `SwiftShader_jll` seems to exist, but is not $pkg")
catch
@error "$pkg could not be loaded"
rethrow()
end
end
(; libvulkan) = mod
ENV["JULIA_VULKAN_LIBNAME"] = basename(libvulkan)
libdir = dirname(libvulkan)
sep = Sys.iswindows() ? ';' : ':'
# Read/set both `VK_ICD_FILENAMES` and `VK_DRIVER_FILES` for compatibility,
# even though `VK_ICD_FILENAMES` has been obsoleted by `VK_DRIVER_FILES`.
icd_filenames = split(get(ENV, "VK_ICD_FILENAMES", ""), sep)
driver_files = split(get(ENV, "VK_DRIVER_FILES", ""), sep)
drivers = [icd_filenames; driver_files]
swiftshader_icd = joinpath(libdir, "vk_swiftshader_icd.json")
!in(swiftshader_icd, drivers) && push!(drivers, swiftshader_icd)
ENV["VK_ICD_FILENAMES"] = join(drivers, sep)
ENV["VK_DRIVER_FILES"] = join(drivers, sep)
end
_ => error("Backend `$backend` not available. Only 'SwiftShader' is currently supported.")
end
nothing
end
macro set_driver(backend) :(set_driver($(esc(backend)))) end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 7536 | """
format_type(Vk.FORMAT_R4G4_UNORM_PACK8) # UInt8
format_type(Vk.FORMAT_R32_SFLOAT) # Float32
format_type(Vk.FORMAT_R32G32_SFLOAT) # NTuple{2,Float32}
format_type(Vk.FORMAT_R32G32B32_SFLOAT) # RGB{Float32} with the extension for ColorTypes.jl
format_type(Vk.FORMAT_R16G16B16A16_SFLOAT) # RGBA{Float16} with the extension for ColorTypes.jl
Retrieve a canonical type associated with an uncompressed Vulkan image format.
Note from the spec:
> Depth/stencil formats are considered opaque and need not be stored in the exact number of bits per texel or component ordering indicated by the format enum.
This means we can't reliably interpret an image with a depth/stencil format. The image needs to be transitioned
to a color format first (e.g. `D16` to `R16`), and when both depth and stencil aspects are present,
a view must be formed for the transfer (e.g. `D16S8` must be viewed with the depth aspect for transfer from `D16` into `R16`, or with the stencil aspect for transfer from `S8` into `R8`).
The exact byte representation is available at https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap49.html#texel-block-size.
Note from the spec for packed representations:
> Packed formats store multiple components within one underlying type. The bit representation is that the first component specified in the name of the format is in the most-significant bits and the last component specified is in the least-significant bits of the underlying type. The in-memory ordering of bytes comprising the underlying type is determined by the host endianness.
One must therefore be careful about endianness for packed representations when reading from an image.
# Extended help
Here is an informative list of most mappings (the element type, where relevant, is omitted and represented as `T`):
`PACK8`: -> `UInt8`
- `RG`
`PACK16`: -> `UInt16`
- `RGBA`
- `BGRA`
- `RGB`
- `BGR`
- `RGBA1`
- `BGRA1`
- `A1RGB`
- `A4RGB`
- `A4BGR`
- `R12X4`
`PACK32`: -> `UInt32`
- `ARGB`
- `A2RGB`
- `A2BGR`
- `BGR`
- `EBGR`
- `X8D24`
- `GBGR_422`
- `BGRG_422`
8-bit per component:
- `R` -> `T`
- `RG` -> `NTuple{2,T}`
- `RGB` -> `RGB{T}`
- `BGR` -> `BGR{T}`
- `RGBA` -> `RGBA{T}`
- `BGRA` -> `BGRA{T}`
- `ABGR` -> `ABGR{T}`
- `GBGR` -> `NTuple{4,T}`
- `BGRG` -> `NTuple{4,T}`
- `S` -> undefined, transition to `R8`
16-bit per component:
- `R` -> `T`
- `RG` -> `NTuple{2,T}`
- `RGB` -> `RGB{T}`
- `RGBA` -> `RGBA{T}`
- `D` -> undefined, transition to `R16`
32-bit per component:
- `R` -> `T`
- `RG` -> `NTuple{2,T}`
- `RGB` -> `RGB{T}`
- `RGBA` -> `RGBA{T}`
- `D` -> undefined, transition to `R32`
64-bit per component:
- `R` -> `T`
- `RG` -> `NTuple{2,T}`
- `RGB` -> `RGB{T}`
- `RGBA` -> `RGBA{T}`
Depth/stencil:
- `D16S8` -> undefined, transition to `R16`/`R8`
- `D24S8` -> undefined, transition to ?/`R8`
- `D32S8` -> undefined, transition to `R32`/`R8`
Compressed formats: -> undefined byte representation, transition to other format
- `BC`
- `ETC2`
- `EAC`
- `ASTC`
- `PVRTC`
"""
function format_type end
@inline format_type(x) = format_type(Val(x))
format_type(::Val{x}) where {x} = error("No type is known which corresponds to format $x")
# generated by `ext/generate_formats.jl`.
Vk.format_type(::Val{Vk.FORMAT_R4G4_UNORM_PACK8}) = UInt8
Vk.format_type(::Val{Vk.FORMAT_R4G4B4A4_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_B4G4R4A4_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_R5G6B5_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_B5G6R5_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_R5G5B5A1_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_B5G5R5A1_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_A1R5G5B5_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_A8B8G8R8_UNORM_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A8B8G8R8_SNORM_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A8B8G8R8_USCALED_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A8B8G8R8_SSCALED_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A8B8G8R8_UINT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A8B8G8R8_SINT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A8B8G8R8_SRGB_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2R10G10B10_UNORM_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2R10G10B10_SNORM_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2R10G10B10_USCALED_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2R10G10B10_SSCALED_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2R10G10B10_UINT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2R10G10B10_SINT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2B10G10R10_UNORM_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2B10G10R10_SNORM_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2B10G10R10_USCALED_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2B10G10R10_SSCALED_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2B10G10R10_UINT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_A2B10G10R10_SINT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_B10G11R11_UFLOAT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_E5B9G9R9_UFLOAT_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_X8_D24_UNORM_PACK32}) = UInt32
Vk.format_type(::Val{Vk.FORMAT_R10X6_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_R10X6G10X6_UNORM_2PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_R12X4_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_R12X4G12X4_UNORM_2PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_A4R4G4B4_UNORM_PACK16}) = UInt16
Vk.format_type(::Val{Vk.FORMAT_A4B4G4R4_UNORM_PACK16}) = UInt16
Vk.Format(::Type{Float16}) = FORMAT_R16_SFLOAT
Vk.Format(::Type{Tuple{Float16, Float16}}) = FORMAT_R16G16_SFLOAT
Vk.Format(::Type{Float32}) = FORMAT_R32_SFLOAT
Vk.Format(::Type{Tuple{Float32, Float32}}) = FORMAT_R32G32_SFLOAT
Vk.Format(::Type{Float64}) = FORMAT_R64_SFLOAT
Vk.Format(::Type{Tuple{Float64, Float64}}) = FORMAT_R64G64_SFLOAT
Vk.format_type(::Val{FORMAT_R16_SFLOAT}) = Float16
Vk.format_type(::Val{FORMAT_R32_SFLOAT}) = Float32
Vk.format_type(::Val{FORMAT_R64_SFLOAT}) = Float64
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 112 | instance(instance::Instance)::Instance = instance
instance(handle::Handle)::Instance = instance(parent(handle))
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1906 | @compile_workload begin
initialize(_PhysicalDeviceFeatures2, _PhysicalDeviceVulkan12Features, _PhysicalDeviceVulkanMemoryModelFeatures)
f1 = initialize(PhysicalDeviceFeatures2, PhysicalDeviceVulkan12Features, PhysicalDeviceVulkanMemoryModelFeatures)
chain_args = [PhysicalDeviceFeatures2(PhysicalDeviceFeatures()), PhysicalDeviceVulkan12Features(), PhysicalDeviceVulkanMemoryModelFeatures(false, false, false)]
chained = chain(chain_args...)
unchain(chained)
f2 = _PhysicalDeviceFeatures2(f1)
from_vk(PhysicalDeviceFeatures2, f2.vks, PhysicalDeviceVulkan12Features, PhysicalDeviceVulkanMemoryModelFeatures)
f3 = Base.unsafe_convert(VkCore.VkPhysicalDeviceFeatures2, f2)
f4 = PhysicalDeviceFeatures2(f3, PhysicalDeviceVulkan12Features, PhysicalDeviceVulkanMemoryModelFeatures)
if in(@load_preference("PRECOMPILE_DEVICE_FUNCTIONS", "auto"), ("true", "auto"))
@debug "Running device-dependent precompilation workload"
try
precompile_workload()
catch e
if @load_preference("PRECOMPILE_DEVICE_FUNCTIONS", "auto") == "true"
if isa(e, PrecompilationError)
@error "The precompilation of device functions failed, likely because of an unsupported configuration on the host machine. You may disable this error by setting the preference \"PRECOMPILE_DEVICE_FUNCTIONS\" to \"auto\" or \"false\"."
else
@error "The precompilation of device functions failed unexpectedly. We encourage you to file an issue on https://github.com/JuliaGPU/Vulkan.jl including this logged message."
end
rethrow()
else
@debug "The precompilation of device functions failed, likely because of an unsupported configuration on the host machine.\nException: $(sprint(showerror, e))"
end
end
end
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 7606 | struct PrecompilationError
msg::String
end
Base.showerror(io::IO, err::PrecompilationError) = print(io, "PrecompilationError: ", err.msg)
function precompile_workload()
# Only run Vulkan commands if a Vulkan library is found.
libname = Libdl.find_library(VkCore.libvulkan)
!isempty(libname) || throw(PrecompilationError("No Vulkan library named $(VkCore.libvulkan) in the library path."))
# This is run before initializing Vulkan.
# VkCore.libvulkan_handle[] = Libdl.dlopen(libname)
!isdefined(global_dispatcher, 1) && (global_dispatcher[] = APIDispatcher())
fill_dispatch_table()
loader_version = unwrap(enumerate_instance_version())
loader_version ≥ v"1.1" || throw(PrecompilationError("The Vulkan loader version is unsupported ($loader_version): a version of 1.1 or higher is required."))
layers = []
exts = []
for layer in unwrap(enumerate_instance_layer_properties())
layer.layer_name == "VK_LAYER_KHRONOS_validation" && layer.spec_version ≥ v"1.3" && push!(layers, layer.layer_name)
end
ext_props = unwrap(enumerate_instance_extension_properties())
next = C_NULL
if !isnothing(findfirst(x -> x.extension_name == "VK_EXT_debug_utils", ext_props))
push!(exts, "VK_EXT_debug_utils")
debug_callback = @cfunction(
Vk.default_debug_callback,
UInt32,
(DebugUtilsMessageSeverityFlagEXT, DebugUtilsMessageTypeFlagEXT, Ptr{VkCore.VkDebugUtilsMessengerCallbackDataEXT}, Ptr{Cvoid})
)
messenger_create_info = DebugUtilsMessengerCreateInfoEXT(
DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT,
debug_callback,
)
next = messenger_create_info
end
instance = Instance(layers, exts; application_info = ApplicationInfo(v"0.1", v"0.1", v"1.3"), next)
messenger = DebugUtilsMessengerEXT(instance, messenger_create_info)
pdevices = unwrap(enumerate_physical_devices(instance))
physical_device = nothing
isempty(pdevices) && throw(PrecompilationError("No physical devices found which support Vulkan."))
@debug "Physical devices: "
for pdevice in pdevices
props = unwrap(get_physical_device_properties(pdevice))
@debug sprint(show, MIME"text/plain"(), props)
props.api_version ≥ v"1.3" && (physical_device = pdevice)
end
!isnothing(physical_device) || throw(PrecompilationError("No physical device was found which supports Vulkan 1.3 or higher."))
queue_props = get_physical_device_queue_family_properties(physical_device)
queue_family_index = findfirst(x -> (QUEUE_COMPUTE_BIT | QUEUE_GRAPHICS_BIT) in x.queue_flags, queue_props)
!isnothing(queue_family_index) || throw(PrecompilationError("No queue found which supports graphics and compute operations."))
queue_family_index -= 1 # 0-based indexing
device = Device(physical_device, [DeviceQueueCreateInfo(queue_family_index, [1.0])], [], [])
queue = get_device_queue(device, queue_family_index, 0)
command_pool = CommandPool(device, queue_family_index)
compute_workload(device, queue, command_pool)
true
end
function compute_workload(device, queue, command_pool)
data_items = 100
data_size = sizeof(Float32) * data_items
buffer = Buffer(
device,
data_size,
BUFFER_USAGE_STORAGE_BUFFER_BIT,
SHARING_MODE_EXCLUSIVE,
[0], # queue family index, ignored because sharing mode is not concurrent
)
buffer_reqs = get_buffer_memory_requirements(device, buffer)
memory_props = get_physical_device_memory_properties(device.physical_device)
candidate_indices = findall(i -> buffer_reqs.memory_type_bits & (1 << (i - 1)) ≠ 0, 1:memory_props.memory_type_count)
isempty(candidate_indices) && throw(PrecompilationError("No buffer memory available."))
memory_type = findfirst(i -> (MEMORY_PROPERTY_HOST_VISIBLE_BIT | MEMORY_PROPERTY_HOST_COHERENT_BIT) in memory_props.memory_types[i].property_flags, candidate_indices)
isnothing(memory_type) && throw(PrecompilationError("No host-visible and host-coherent buffer memory available."))
memory_type = candidate_indices[memory_type] - 1
mem = DeviceMemory(device, buffer_reqs.size, memory_type)
unwrap(bind_buffer_memory(device, buffer, mem, 0 #= offset =#))
memptr = unwrap(map_memory(device, mem, 0 #= offset =#, data_size))
data = unsafe_wrap(Vector{Float32}, convert(Ptr{Float32}, memptr), data_items, own = false)
data .= 0
unwrap(flush_mapped_memory_ranges(device, [MappedMemoryRange(mem, 0 #= offset =#, data_size)]))
shader_bcode = reinterpret(UInt32, read(joinpath(pkgdir(Vulkan, "assets", "precompile_compute.spv"))))
shader = ShaderModule(device, sizeof(UInt32) * length(shader_bcode), shader_bcode)
descriptor_set_layout = DescriptorSetLayout(
device,
[
DescriptorSetLayoutBinding(
0, # binding
DESCRIPTOR_TYPE_STORAGE_BUFFER,
SHADER_STAGE_COMPUTE_BIT;
descriptor_count = 1,
),
],
)
pipeline_layout = PipelineLayout(
device,
[descriptor_set_layout],
[PushConstantRange(SHADER_STAGE_COMPUTE_BIT, 0 #= offset =#, 8 #= size =#)],
)
local_size_x = UInt32(4)
spec_consts = [local_size_x]
pipeline_info = ComputePipelineCreateInfo(
PipelineShaderStageCreateInfo(
SHADER_STAGE_COMPUTE_BIT,
shader,
"main", # this needs to match the function name in the shader
specialization_info = SpecializationInfo(
[SpecializationMapEntry(0 #= id =#, 0 #= offset =#, 4 #= size =#)],
UInt(sizeof(spec_consts)),
Ptr{Nothing}(pointer(spec_consts)),
),
),
pipeline_layout,
-1,
)
(pipeline, _...), _ = unwrap(create_compute_pipelines(device, [pipeline_info]))
descriptor_pool = DescriptorPool(device, 1 #= pool size =#, [DescriptorPoolSize(DESCRIPTOR_TYPE_STORAGE_BUFFER, 1)]; flags = DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT)
dset, _... = unwrap(allocate_descriptor_sets(device, DescriptorSetAllocateInfo(descriptor_pool, [descriptor_set_layout])))
update_descriptor_sets(
device,
[
WriteDescriptorSet(
dset,
0, # binding
0, # array element
DESCRIPTOR_TYPE_STORAGE_BUFFER,
[],
[DescriptorBufferInfo(buffer, 0 #= offset =#, data_size)],
[],
),
],
[],
)
command_buffer, _... = unwrap(
allocate_command_buffers(
device,
CommandBufferAllocateInfo(command_pool, COMMAND_BUFFER_LEVEL_PRIMARY, 1),
),
)
begin_command_buffer(
command_buffer,
CommandBufferBeginInfo(flags = COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT),
)
cmd_bind_pipeline(command_buffer, PIPELINE_BIND_POINT_COMPUTE, pipeline)
push_constants = [(Float32(1.234), UInt32(data_items))]
cmd_push_constants(
command_buffer,
pipeline_layout,
SHADER_STAGE_COMPUTE_BIT,
0, # offset
sizeof(push_constants), # size
Ptr{Nothing}(pointer(push_constants)),
)
cmd_bind_descriptor_sets(command_buffer, PIPELINE_BIND_POINT_COMPUTE, pipeline_layout, 0 #= first set =#, [dset], [])
cmd_dispatch(command_buffer, div(data_items, local_size_x, RoundUp), 1 #= group count y =#, 1 #= group count z =#)
end_command_buffer(command_buffer)
unwrap(queue_submit(queue, [SubmitInfo([], [], [command_buffer], [])]))
GC.@preserve buffer descriptor_set_layout pipeline_layout spec_consts pipeline push_constants begin
unwrap(queue_wait_idle(queue))
end
free_command_buffers(device, command_pool, [command_buffer])
free_descriptor_sets(device, descriptor_pool, [dset])
unwrap(invalidate_mapped_memory_ranges(device, [MappedMemoryRange(mem, 0 #= offset =#, data_size)]))
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1385 | using Preferences: Preferences, @load_preference
set_preferences!(args...; kwargs...) = Preferences.set_preferences!(@__MODULE__, args...; kwargs...)
load_preference(args...; kwargs...) = Preferences.load_preference(@__MODULE__, args...; kwargs...)
macro pref_log_destruction(handle, ex)
if @load_preference("LOG_DESTRUCTION", "false") == "true"
quote
premsg = string("(thread ", Threads.threadid(), ") Finalizing ", $(esc(handle)))
@ccall jl_safe_printf(premsg::Cstring)::Cvoid
was_destroyed = $(esc(ex))
msg =
was_destroyed ? ":\e[32m destroyed\e[m\n" :
":\e[33m nothing to do\e[m\n"
@ccall jl_safe_printf(msg::Cstring)::Cvoid
end
else
esc(ex)
end
end
macro pref_log_refcount(ex)
if @load_preference("LOG_REFCOUNT", "false") == "true"
ex = quote
val = Int(handle.refcount.value)
@ccall jl_safe_printf("Refcount: $handle: $val "::Cstring)::Cvoid
$ex
val_after = Int(handle.refcount.value)
if val_after < val
@ccall jl_safe_printf("\e[31m↓\e[m"::Cstring)::Cvoid
else
@ccall jl_safe_printf("\e[32m↑\e[m"::Cstring)::Cvoid
end
@ccall jl_safe_printf(" $val_after\n"::Cstring)::Cvoid
end
end
esc(ex)
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 180 | include("prewrap/types.jl")
include("prewrap/handles.jl")
include("prewrap/pointers.jl")
include("prewrap/conversions.jl")
include("prewrap/errors.jl")
include("prewrap/spirv.jl")
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1042 | function Base.show(io::IO, pdp::PhysicalDeviceProperties)
print(io, pdp.device_name, " - ", pdp.device_type, " (driver: ", pdp.driver_version, ", supported Vulkan API: ", pdp.api_version, ")")
end
Base.show(io::IO, x::PhysicalDeviceMemoryProperties) = print(io, "PhysicalDeviceMemoryProperties($(x.memory_types[1:x.memory_type_count]), $(x.memory_heaps[1:x.memory_heap_count]))")
const PhysicalDeviceVulkanFeatures_T = Union{PhysicalDeviceVulkan11Features,PhysicalDeviceVulkan12Features,PhysicalDeviceVulkan13Features}
function Base.show(io::IO, features::T) where {T<:Union{PhysicalDeviceFeatures,PhysicalDeviceVulkanFeatures_T}}
fnames = fieldnames(typeof(features))
fields = filter(!in((:s_type, :next)), fnames)
vals = Bool.(getproperty.(Ref(features), fields))
print(io, nameof(T), '(')
idxs = findall(vals)
if features isa PhysicalDeviceVulkanFeatures_T
print(io, "next=", features.next)
!isempty(idxs) && print(io, ", ")
end
print(io, join(getindex(fields, idxs), ", "), ')')
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 580 | """
Chain all arguments together in a `next` chain. to form a new structure `next` chain.
If `nexts` is empty, `C_NULL` is returned.
"""
function chain(nexts::HighLevelStruct...)
isempty(nexts) && return C_NULL
foldr((a, b) -> (@set a.next = b), nexts)
end
"""
Break a `next` chain into its constituents, with all `next` members set to `C_NULL`.
"""
function unchain(x::HighLevelStruct)
unchained = Any[@set x.next = C_NULL]
(; next) = x
while next ≠ C_NULL
push!(unchained, @set next.next = C_NULL)
(; next) = next
end
unchained
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 3498 | using Logging
# From https://discourse.julialang.org/t/task-switch-not-allowed-from-inside-staged-nor-pure-functions/20488/9
# Define safe loggers for use in finalizers (where task switches are not allowed).
for level in [:debug, :info, :warn, :error]
@eval macro $(Symbol("safe_$level"))(ex...)
macrocall = Expr(:macrocall, Symbol($"@$level"), __source__, ex...)
quote
# Log to `Base.stderr`
with_logger(Logging.ConsoleLogger(stderr, Logging.min_enabled_level(global_logger()))) do
$(esc(macrocall))
end
end
end
end
const message_severities = [
DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT,
DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT,
DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT,
DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
]
"""
Default callback for debugging with [`DebugUtilsMessengerEXT`](@ref).
"""
function default_debug_callback(message_severity, message_type, callback_data_ptr, user_data_ptr)
callback_data_ptr == C_NULL && return UInt32(0)
callback_data = unsafe_load(callback_data_ptr)
message = unsafe_string(callback_data.pMessage)
# Ignore messages about available device extensions.
if !startswith(message, "Device Extension: VK")
id_name = unsafe_string(callback_data.pMessageIdName)
msg_type = @match message_type begin
&DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT => "General"
&DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT => "Validation"
&DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT => "Performance"
_ => error("Unknown message type $message_type")
end
log = string("$msg_type ($id_name): $message")
@match message_severity begin
&DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT => @safe_debug log
&DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT => @safe_info log
&DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT => @safe_warn log
&DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT => @safe_error log
_ => error("Unknown message severity $message_severity")
end
end
UInt32(0)
end
"""
Register a user-defined callback and return the corresponding messenger.
All the levels from `min_severity` will be included. Note that this controls only what messages are sent to the callback. The logging function may use logging macros such as `@info` or `@error` to easily filter logs through the Julia logging system.
A default function [`default_debug_callback`](@ref) can be converted to a function pointer to use as a callback.
!!! warning
`callback` must be a function pointer of type `Ptr{Nothing}` obtained from a `callback_f` function as follows:
`callback = @cfunction(callback_f, UInt32, (DebugUtilsMessageSeverityFlagEXT, DebugUtilsMessageTypeFlagEXT, Ptr{VkCore.VkDebugUtilsMessengerCallbackDataEXT}, Ptr{Cvoid}))`
with `callback_f` a Julia function with a signature matching the `@cfunction` call.
"""
function DebugUtilsMessengerEXT(
instance::Instance,
callback::Ptr{Nothing};
min_severity = DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT,
types = DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT
)
index = findfirst(==(min_severity), message_severities)
severity_bits = message_severities[index:end]
DebugUtilsMessengerEXT(instance, |(severity_bits...), types, callback)
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 6387 | # Copyright (c) 2019 JuliaInterop
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# copied from https://github.com/JuliaLang/julia/pull/30924
# modified to be in compliance with C99: http://port70.net/~nsz/c/c99/n1256.html#6.7.2.2
module CEnum
import Core.Intrinsics.bitcast
export @cenum
function namemap end
function name_value_pairs end
"""
Cenum{T<:Integer}
The abstract supertype of all enumerated types defined with [`@cenum`](@ref).
"""
abstract type Cenum{T<:Integer} end
basetype(::Type{<:Cenum{T}}) where {T<:Integer} = T
(::Type{T})(x::Cenum{T2}) where {T<:Integer,T2<:Integer} = T(bitcast(T2, x))::T
Base.cconvert(::Type{T}, x::Cenum{T2}) where {T<:Integer,T2<:Integer} = T(x)
Base.write(io::IO, x::Cenum{T}) where {T<:Integer} = write(io, T(x))
Base.read(io::IO, ::Type{T}) where {T<:Cenum} = T(read(io, basetype(T)))
Base.isless(x::T, y::T) where {T<:Cenum} = isless(basetype(T)(x), basetype(T)(y))
Base.Symbol(x::Cenum)::Symbol = get(namemap(typeof(x)), Integer(x), :UnknownMember)
Base.print(io::IO, x::Cenum) = print(io, Symbol(x))
function Base.show(io::IO, x::Cenum)
sym = Symbol(x)
if !get(io, :compact, false)
from = get(io, :module, Main)
def = typeof(x).name.module
if from === nothing || !Base.isvisible(sym, def, from)
show(io, def)
print(io, ".")
end
end
print(io, sym)
end
function Base.show(io::IO, ::MIME"text/plain", x::Cenum)
print(io, x, "::")
show(IOContext(io, :compact => true), typeof(x))
print(io, " = ")
show(io, Integer(x))
end
function Base.show(io::IO, ::MIME"text/plain", t::Type{<:Cenum})
print(io, "Cenum ")
Base.show_datatype(io, t)
print(io, ":")
for (s, i) in name_value_pairs(t)
print(io, "\n", Symbol(s), " = ")
show(io, Integer(i))
end
end
# give Cenum types scalar behavior in broadcasting
Base.broadcastable(x::Cenum) = Ref(x)
@noinline enum_argument_error(typename, x) = throw(ArgumentError(string("input value out of range for Cenum $(typename): $x")))
macro cenum(T, syms...)
if isempty(syms)
throw(ArgumentError("no arguments given for Cenum $T"))
end
basetype = Int32
typename = T
if isa(T, Expr) && T.head == :(::) && length(T.args) == 2 && isa(T.args[1], Symbol)
typename = T.args[1]
basetype = Core.eval(__module__, T.args[2])
if !isa(basetype, DataType) || !(basetype <: Integer) || !isbitstype(basetype)
throw(ArgumentError("invalid base type for Cenum $typename, $T=::$basetype; base type must be an integer primitive type"))
end
elseif !isa(T, Symbol)
throw(ArgumentError("invalid type expression for Cenum $T"))
end
seen = Set{Symbol}()
name_values = Tuple{Symbol,basetype}[]
namemap = Dict{basetype,Symbol}()
lo = hi = 0
i = zero(basetype)
if length(syms) == 1 && syms[1] isa Expr && syms[1].head == :block
syms = syms[1].args
end
for s in syms
s isa LineNumberNode && continue
if isa(s, Symbol)
if i == typemin(basetype) && !isempty(name_values)
throw(ArgumentError("overflow in value \"$s\" of Cenum $typename"))
end
elseif isa(s, Expr) &&
(s.head == :(=) || s.head == :kw) &&
length(s.args) == 2 && isa(s.args[1], Symbol)
i = Core.eval(__module__, s.args[2]) # allow exprs, e.g. uint128"1"
if !isa(i, Integer)
throw(ArgumentError("invalid value for Cenum $typename, $s; values must be integers"))
end
i = convert(basetype, i)
s = s.args[1]
else
throw(ArgumentError(string("invalid argument for Cenum ", typename, ": ", s)))
end
if !Base.isidentifier(s)
throw(ArgumentError("invalid name for Cenum $typename; \"$s\" is not a valid identifier"))
end
haskey(namemap, i) || (namemap[i] = s;)
if s in seen
throw(ArgumentError("name \"$s\" in Cenum $typename is not unique"))
end
push!(seen, s)
push!(name_values, (s,i))
if length(name_values) == 1
lo = hi = i
else
lo = min(lo, i)
hi = max(hi, i)
end
i += oneunit(i)
end
blk = quote
# enum definition
Base.@__doc__(primitive type $(esc(typename)) <: Cenum{$(basetype)} $(sizeof(basetype) * 8) end)
function $(esc(typename))(x::Integer)
x ≤ typemax(x) || enum_argument_error($(Expr(:quote, typename)), x)
return bitcast($(esc(typename)), convert($(basetype), x))
end
CEnum.namemap(::Type{$(esc(typename))}) = $(esc(namemap))
CEnum.name_value_pairs(::Type{$(esc(typename))}) = $(esc(name_values))
Base.typemin(x::Type{$(esc(typename))}) = $(esc(typename))($lo)
Base.typemax(x::Type{$(esc(typename))}) = $(esc(typename))($hi)
let insts = (Any[ $(esc(typename))(v[2]) for v in $name_values ]...,)
Base.instances(::Type{$(esc(typename))}) = insts
end
end
if isa(typename, Symbol)
for (sym, i) in name_values
push!(blk.args, :(const $(esc(sym)) = $(esc(typename))($i)))
end
end
push!(blk.args, :nothing)
blk.head = :toplevel
return blk
end
include("operators.jl")
end # module
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 715 | import Base: +, -, *, &, |, xor, ==, ~
for op in (:+, :-, :&, :|, :xor, :(==))
@eval begin
function ($op)(a::Cenum{T}, b::Cenum{S}) where {T<:Integer,S<:Integer}
N = promote_type(T, S)
($op)(N(a), N(b))
end
function ($op)(a::Cenum{T}, b::S) where {T<:Integer,S<:Integer}
N = promote_type(T, S)
($op)(N(a), N(b))
end
function ($op)(a::T, b::Cenum{S}) where {T<:Integer,S<:Integer}
N = promote_type(T, S)
($op)(N(a), N(b))
end
end
end
function ~(a::Cenum{T}) where {T<:Integer}
~(T(a))
end
Base.convert(::Type{T1}, x::Cenum{T2}) where {T1<:Integer,T2<:Integer} = convert(T1, T2(x))
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1578 | """
Convert a type into its corresponding Vulkan type.
### Examples
```jldoctest
julia> to_vk(UInt32, v"1")
0x00400000
julia> to_vk(NTuple{6, UInt8}, "hello")
(0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x00)
```
"""
function to_vk end
to_vk(T, x) = T(x)
to_vk(::Type{T}, x::T) where {T} = x
to_vk(::Type, x::VulkanStruct) = x.vks
to_vk(T::Type{<:NTuple}, x) = to_vk.(eltype(T), x)
to_vk(T::Type{UInt32}, version::VersionNumber) = VK_MAKE_VERSION(version.major, version.minor, version.patch)
to_vk(T::Type{NTuple{N,UInt8}}, s::AbstractString) where {N} = T(s * '\0' ^ (N - length(s)))
"""
Convert a Vulkan type into its corresponding Julia type.
### Examples
```jldoctest
julia> from_vk(VersionNumber, UInt32(VkCore.VK_MAKE_VERSION(1, 2, 3)))
v"1.2.3"
julia> from_vk(String, (0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x00))
"hello"
julia> from_vk(Bool, UInt32(1))
true
```
"""
function from_vk end
from_vk(T::Type{<:VulkanStruct{false}}, x) = T(x)
from_vk(T::Type{<:VulkanStruct{true}}, x) = T(x, [])
from_vk(T::Type{<:HighLevelStruct}, x, next_types...) = T(x, next_types...)
from_vk(T, x) = convert(T, x)
from_vk(T::Type{<:NTuple}, x) = from_vk.(eltype(T), x)
from_vk(T::Type{VersionNumber}, version::UInt32) = T(VK_VERSION_MAJOR(version), VK_VERSION_MINOR(version), VK_VERSION_PATCH(version))
function from_vk(T::Type{S}, @nospecialize(str::NTuple{N})) where {N,S <: AbstractString}
bytes = collect(str)
nullchar = findfirst(iszero, bytes)
nullchar == 1 && return ""
if !isnothing(nullchar)
bytes = bytes[1:nullchar - 1]
end
T(reinterpret(UInt8, bytes))
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1217 | """
Exception type indicating that an API function
returned a non-success code.
"""
struct VulkanError <: Exception
msg::String
code
end
Base.showerror(io::IO, e::VulkanError) = print(io, e.code, ": ", e.msg)
"""
@check vkCreateInstance(args...)
Assign the expression to a variable named `_return_code`. Then, if the value is not a success code, return a [`VulkanError`](@ref) holding the return code.
"""
macro check(expr)
msg = string("failed to execute ", expr)
esc(:(@check $expr $msg))
end
macro check(expr, msg)
esc(quote
_return_code = $expr
_code_int = Int32(_return_code)
if _code_int < 0
return VulkanError($msg, Result(_code_int))
end
_return_code
end)
end
"""
Evaluate the given expression, and reexecute it while the local variable `_return_code` equals to `VK_INCOMPLETE`.
"""
macro repeat_while_incomplete(expr)
esc(quote
$expr
while _return_code == VK_INCOMPLETE
$expr
end
end)
end
macro propagate_errors(expr)
quote
ret = $(esc(expr))
if iserror(ret)
return unwrap_error(ret)
else
unwrap(ret)
end
end
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 4734 | """
Opaque handle referring to internal Vulkan data.
Finalizer registration is taken care of by constructors.
"""
abstract type Handle <: VulkanStruct{false} end
Base.:(==)(x::H, y::H) where {H<:Handle} = x.vks == y.vks
Base.hash(handle::Handle, h::UInt) = hash(handle.vks, h)
Base.show(io::IO, h::Handle) = print(io, typeof(h), '(', h.vks, ')')
"""
handle(x)::Handle
Return a [`Handle`](@ref) for use with Vulkan API functions.
This function should be implemented for types that wrap handles,
if these handles are to be passed on to API functions directly.
"""
function handle end
handle(h::Handle) = h
const RefCounter = Threads.Atomic{UInt}
function increment_refcount!(handle::Handle)
@pref_log_refcount Threads.atomic_add!(handle.refcount, UInt(1))
end
function decrement_refcount!(handle::Handle)
@pref_log_refcount Threads.atomic_sub!(handle.refcount, UInt(1))
end
function try_destroy(f, handle::Handle, parent)
decrement_refcount!(handle)
if iszero(handle.refcount[])
@pref_log_destruction handle f(handle) ≠ handle
if !isnothing(parent) && !isa(parent.destructor, UndefInitializer)
parent.destructor()
end
return true
end
handle.refcount[]
end
function init_handle!(handle::Handle, destructor, parent = nothing)
handle.destructor = () -> try_destroy(destructor, handle, parent)
finalizer(x -> handle.destructor(), handle)
end
function (T::Type{<:Handle})(ptr::Ptr{Cvoid}, destructor)
init_handle!(T(ptr, RefCounter(UInt(1))), destructor)
end
function (T::Type{<:Handle})(ptr::Ptr{Cvoid}, destructor, parent)
parent = handle(parent)
increment_refcount!(parent)
init_handle!(T(ptr, parent, RefCounter(UInt(1))), destructor, parent)
end
"""
depends_on(x, handle::Handle)
Make reference counting aware that `x` depends on `handle`.
This ensures that `handle` is destroyed *after* `x`, and not the other way around.
This may notably be used to encode dependencies that fall out of Vulkan's handle hierarchy,
such as between a `SurfaceKHR` and a `SwapchainKHR`.
If `x` is not a `Handle`, it must be a mutable object; in this case, a finalizer will be added
which decrements the `handle`'s reference count (and destroys them if it reaches zero).
`depends_on(x, handle)` is idempotent: multiple calls to it will simply incur needless incrementing/decrementing and finalizer registrations, possibly harming performance, but will not cause bugs.
If one is a parent handle of the other (i.e. `Vk.parent(x) === handle`), `depends_on(x, handle)` is already implicit, and needs not be used.
!!! warning
`depends_on` must not be used in a circular manner: using both `depends_on(x, y)` and `depends_on(y, x)` will prevent both `x` and `y` from ever being destroyed. Same for `depends_on(x, y)`, `depends_on(y, z)`, `depends_on(z, x)` and so on.
"""
function depends_on end
function depends_on(x::Vk.Handle, handle::Vk.Handle)
Vk.increment_refcount!(handle)
prev_destructor = x.destructor
x.destructor = () -> begin
prev_destructor()
iszero(x.refcount[]) && handle.destructor()
end
nothing
end
function depends_on(x, handle::Vk.Handle)
T = typeof(x)
ismutabletype(T) || error("`x` must be a mutable object or a `Vk.Handle`")
finalizer(_ -> handle.destructor(), x)
nothing
end
macro dispatch(handle, expr)
if @load_preference("USE_DISPATCH_TABLE", "true") == "true"
@match expr begin
:($f($(args...))) => begin
quote
fptr = function_pointer(global_dispatcher[], $(esc(handle)), $(QuoteNode(f)))
$f($(esc.(args)...), fptr)
end
end
_ => error("Expected a function call, got $expr")
end
else
esc(expr)
end
end
"""
Obtain a function pointer from `source` and `handle`, and append the retrieved pointer to the function call arguments of `expr`.
No effect if the preference "USE_DISPATCH_TABLE" is not enabled.
"""
macro dispatch(source, handle, expr)
handle = esc(handle)
if @load_preference("USE_DISPATCH_TABLE", "true") == "true"
@match expr begin
:($f($(args...); $(kwargs...))) => quote
fptr = function_pointer(global_dispatcher[], $handle, $(QuoteNode(source)))
$f($(esc.(args)...), fptr; $(esc.(kwargs)...))
end
_ => error("Expected a function call, got $expr")
end
else
esc(expr)
end
end
macro fill_dispatch_table(handle)
handle = esc(handle)
@load_preference("USE_DISPATCH_TABLE", "true") ≠ "true" && return handle
quote
handle = $handle
fill_dispatch_table(handle)
handle
end
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 5642 | # Make sure our dispatches for vectors are hit before any other method.
# Unfortunately, we'll still need to add dispatches from `Base.cconvert` to this `cconvert`
# because `Base.cconvert` is what will be called during `ccall`s, not this function.
cconvert(T, x) = Base.cconvert(T, x)
Base.cconvert(T::Type{Ptr{Cvoid}}, x::Handle) = x
Base.cconvert(T::Type{<:Ptr}, x::VulkanStruct{false}) = Ref(x.vks)
Base.cconvert(T::Type{<:Ptr}, x::VulkanStruct{true}) = (x, Ref(x.vks))
Base.cconvert(T::Type{<:Ptr}, x::HighLevelStruct) = Base.cconvert(T, convert(getproperty(@__MODULE__, Symbol(:_, nameof(typeof(x)))), x))
cconvert(T::Type{<:Ptr}, x::AbstractVector{<:VulkanStruct{false}}) = Base.cconvert(T, getproperty.(x, :vks))
cconvert(T::Type{<:Ptr}, x::AbstractVector{<:VulkanStruct{true}}) = (x, Base.cconvert(T, getproperty.(x, :vks)))
cconvert(T::Type{<:Ptr}, x::AbstractVector{<:HighLevelStruct}) = Base.cconvert(T, convert.(getproperty(@__MODULE__, Symbol(:_, nameof(eltype(x)))), x))
Base.cconvert(T::Type{<:Ptr}, x::AbstractVector{<:VulkanStruct{false}}) = cconvert(T, x)
Base.cconvert(T::Type{<:Ptr}, x::AbstractVector{<:VulkanStruct{true}}) = cconvert(T, x)
Base.cconvert(T::Type{<:Ptr}, x::AbstractVector{<:HighLevelStruct}) = cconvert(T, x)
# Shadow the otherwise more specific Base method
# `cconvert(::Type{<:Ptr}, ::Array)`.
Base.cconvert(T::Type{<:Ptr}, x::Vector{<:VulkanStruct{false}}) = cconvert(T, x)
Base.cconvert(T::Type{<:Ptr}, x::Vector{<:VulkanStruct{true}}) = cconvert(T, x)
Base.cconvert(T::Type{<:Ptr}, x::Vector{<:HighLevelStruct}) = cconvert(T, x)
# Shadow the otherwise more specific Base method
# `cconvert(::Type{Ptr{P<:Union{Cstring,Cwstring,Ptr}}}, ::Array)`.
Base.cconvert(T::Type{Ptr{P}}, x::Vector{<:VulkanStruct{false}}) where {P<:Ptr} = cconvert(T, x)
Base.cconvert(T::Type{Ptr{P}}, x::Vector{<:VulkanStruct{true}}) where {P<:Ptr} = cconvert(T, x)
Base.cconvert(T::Type{Ptr{P}}, x::Vector{<:HighLevelStruct}) where {P<:Ptr} = cconvert(T, x)
convert(T::Type{Ptr{Cvoid}}, x::Handle) = x.vks
unsafe_convert(T::Type, x::VulkanStruct) = x.vks
unsafe_convert(T::Type, x::Tuple{<:VulkanStruct{true}, <:Ref}) = unsafe_convert(T, last(x))
unsafe_convert(T::Type, x::Tuple{<:AbstractVector{<:VulkanStruct{true}}, <:Any}) = unsafe_convert(T, last(x))
"""
`pointer_length(val)`
Return the length `val` considering it as an array.
Differ from `Base.length` in that `pointer_length(C_NULL) == 0`.
"""
function pointer_length end
pointer_length(arr::Ptr{Nothing}) = 0
pointer_length(arr::AbstractArray) = length(arr)
convert_nonnull(T, val) = convert(T, val)
convert_nonnull(T, val::Ptr{Cvoid}) = val == C_NULL ? val : convert(T, val)
# # Initialization.
@nospecialize
initialize(::Type{T}) where {T<:VkCore.CEnum.Cenum} = typemin(T)
initialize(::Type{T}) where {T} = zero(T)
initialize(::Type{NTuple{N,T}}) where {N,T} = ntuple(Returns(zero(T)), N)
initialize(::Type{T}) where {T<:AbstractArray} = T()
initialize(::Type{<:Ptr}) = C_NULL
function init_empty(T)
@assert isbitstype(T)
ptr = Libc.calloc(1, sizeof(T))
res = unsafe_load(Ptr{T}(ptr))
Libc.free(ptr)
res
end
function _initialize_core(T, refs)
res = init_empty(T)
sType = structure_type(T)
if isempty(refs)
pNext = C_NULL
else
ref = popfirst!(refs)
ref[] = initialize_core(typeof(ref[]), refs)
pNext = Base.unsafe_convert(Ptr{Cvoid}, ref)
end
setproperties(res, (; sType, pNext))
end
"""
initialize_core(T, next_refs)
Initialize a core Vulkan structure, with `next` chain types specified in `refs`.
Every ref in `refs` will be used to construct an initialized `pNext` element, and will be filled with the *value* of the initialized type, acting as the pointer. Note that these references will have to be preserved for the initialized Vulkan structure to remain valid.
"""
initialize_core(T, refs) = _initialize_core(Base.inferencebarrier(T), Base.inferencebarrier(refs))
"""
Initialize an intermediate structure, with a `next` chain built from initialized `Tnext` structs.
"""
function _initialize(T::Type{<:VulkanStruct}, Tnext)
refs = Any[]
for t in Tnext
push!(refs, Ref{core_type(t)}())
end
vks = initialize_core(core_type(T), refs)
T(vks, refs)
end
"""
Initialize a high-level structure, with a `next` chain built from initialized `Tnext` structs.
"""
function _initialize(T::Type{<:HighLevelStruct}, Tnext)
args = []
for (name, t) in zip(fieldnames(T), fieldtypes(T))
if t === StructureType
push!(args, structure_type(T))
elseif name == :next
push!(args, isempty(Tnext) ? C_NULL : initialize(Tnext...))
else
push!(args, initialize(t))
end
end
T(args...)
end
"""
initialize(T, next_Ts...)
Initialize a value or Vulkan structure with the purpose of being
filled in by the API. The types can be either high-level or intermediate
wrapper types.
If `next_Ts` is not empty and `T` designates
a Vulkan structure which can hold `next` chains, then the corresponding
types will be initialized and added to the `next`/`pNext` member.
"""
initialize(T::Union{Type{<:HighLevelStruct}, Type{<:VulkanStruct}}, args...) = _initialize(Base.inferencebarrier(T), Base.inferencebarrier(collect(args)))
@noinline function _load_next_chain(ptr, next_types)
isempty(next_types) && return C_NULL
T, Ts... = next_types
@assert ptr ≠ C_NULL
vks = Base.unsafe_load(Ptr{core_type(T)}(ptr))
T(vks, Ts...)
end
load_next_chain(ptr, args...) = _load_next_chain(Base.inferencebarrier(ptr), Base.inferencebarrier(collect(args)))
@specialize
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 2254 | """
SPIR-V extension which may have been promoted to a core version or be enabled implicitly by enabled Vulkan extensions.
"""
struct SpecExtensionSPIRV
"Name of the SPIR-V extension."
name::String
"Core version of the Vulkan API in which the extension was promoted, if promoted."
promoted_to::Optional{VersionNumber}
"Vulkan extensions that implicitly enable the SPIR-V extension."
enabling_extensions::Vector{String}
end
"""
Condition that a feature needs to satisfy to be considered enabled.
"""
struct FeatureCondition
"Name of the feature structure relevant to the condition."
type::Symbol
"Member of the structure which must be set to true to enable the feature."
member::Symbol
"Core version corresponding to the structure, if any."
core_version::Optional{VersionNumber}
"Extension required for the corresponding structure, if any."
extension::Optional{String}
end
"""
Device property that enables a SPIR-V capability when supported.
"""
struct PropertyCondition
"Name of the property structure relevant to the condition."
type::Symbol
"Member of the property structure to be tested."
member::Symbol
"Required core version of the Vulkan API, if any."
core_version::Optional{VersionNumber}
"Required extension, if any."
extension::Optional{String}
"Whether the property to test is a boolean. If not, then it will be a bit from a bitmask."
is_bool::Bool
"Name of the bit enum that must be included in the property, if the property is not a boolean."
bit::Optional{Symbol}
end
"""
SPIR-V capability with information regarding various requirements to consider it enabled.
"""
struct SpecCapabilitySPIRV
"Name of the SPIR-V capability."
name::Symbol
"Core version of the Vulkan API in which the SPIR-V capability was promoted, if promoted."
promoted_to::Optional{VersionNumber}
"Vulkan extensions that implicitly enable the SPIR-V capability."
enabling_extensions::Vector{String}
"Vulkan features that implicitly enable the SPIR-V capability."
enabling_features::Vector{FeatureCondition}
"Vulkan properties that implicitly enable the SPIR-V capability."
enabling_properties::Vector{PropertyCondition}
end
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 594 | """
Represents any kind of wrapper structure that was generated from a Vulkan structure.
`D` is a `Bool` parameter indicating whether the structure has specific dependencies or not.
"""
abstract type VulkanStruct{D} end
Base.broadcastable(x::VulkanStruct) = Ref(x)
const FunctionPtr = Union{Ptr{Cvoid}, Base.CFunction}
const OptionalPtr{T} = Union{T, Ptr{Cvoid}}
"""
High-level structure with idiomatic Julia types.
"""
abstract type HighLevelStruct end
Base.broadcastable(x::HighLevelStruct) = Ref(x)
hl_type(T::Type{<:HighLevelStruct}) = T
intermediate_type(T::Type{<:VulkanStruct}) = T
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 6840 | const CALLBACK_DEBUG_SIGNAL = Ref(false)
function debug_callback(args...)
CALLBACK_DEBUG_SIGNAL[] = true
default_debug_callback(args...)
end
const debug_callback_c = @cfunction(debug_callback, UInt32, (DebugUtilsMessageSeverityFlagEXT, DebugUtilsMessageTypeFlagEXT, Ptr{VkCore.VkDebugUtilsMessengerCallbackDataEXT}, Ptr{Cvoid}))
const API_VERSION = v"1.3"
const VALIDATION_LAYER = "VK_LAYER_KHRONOS_validation"
const INSTANCE_LAYERS = String[
]
const INSTANCE_EXTENSIONS = String[
]
const DEVICE_EXTENSIONS = String[
]
const ENABLED_FEATURES = PhysicalDeviceFeatures(
)
let available_layers = unwrap(enumerate_instance_layer_properties())
if VALIDATION_LAYER ∈ getproperty.(available_layers, :layer_name)
push!(INSTANCE_LAYERS, VALIDATION_LAYER)
true
else
@warn "Validation layer not found."
false
end
end
const WITH_DEBUG = let available_extensions = unwrap(enumerate_instance_extension_properties())
if "VK_EXT_debug_utils" ∈ getproperty.(available_extensions, :extension_name)
push!(INSTANCE_EXTENSIONS, "VK_EXT_debug_utils")
true
else
@warn "VK_EXT_debug_utils not supported"
false
end
end
@testset "Vulkan API usage" begin
include("init.jl")
@testset "Utilities" begin
@testset "Function pointers" begin
@test function_pointer(instance, "vkEnumeratePhysicalDevices") == get_instance_proc_addr("vkEnumeratePhysicalDevices"; instance)
@test function_pointer(device, "vkQueueWaitIdle") == get_device_proc_addr(device, "vkQueueWaitIdle")
end
end
@testset "Debugging" begin
@testset "Validation" begin
if WITH_DEBUG
@test CALLBACK_DEBUG_SIGNAL[]
end
end
@testset "Setting object names" begin
if WITH_DEBUG
buffer = Buffer(device, 30, BUFFER_USAGE_TRANSFER_DST_BIT, SHARING_MODE_EXCLUSIVE, UInt32[0])
ret = set_debug_name(buffer, "Test buffer")
@test unwrap(ret) == SUCCESS
ret = set_debug_name(buffer, :buffer_1)
@test unwrap(ret) == SUCCESS
end
end
end
@testset "Bitmask flags" begin
buffer = Buffer(device, 24, BUFFER_USAGE_INDIRECT_BUFFER_BIT, SHARING_MODE_EXCLUSIVE, [0]; flags=BUFFER_CREATE_PROTECTED_BIT & BUFFER_CREATE_SPARSE_ALIASED_BIT)
buffer_2 = Buffer(device, 24, BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, SHARING_MODE_EXCLUSIVE, [0]; flags=0)
@test buffer isa Buffer
@test buffer_2 isa Buffer
end
@testset "Buffers" begin
command_pool = CommandPool(device, 0)
cbuffer = first(unwrap(allocate_command_buffers(device, CommandBufferAllocateInfo(command_pool, COMMAND_BUFFER_LEVEL_PRIMARY, 1))))
@test cbuffer isa CommandBuffer
buffer = Buffer(device, 100, BUFFER_USAGE_TRANSFER_DST_BIT, SHARING_MODE_EXCLUSIVE, [])
@test buffer isa Buffer
@test get_buffer_memory_requirements_2_khr ≠ get_buffer_memory_requirements_2
end
@testset "Introspection" begin
@test Vk.hl_type(VkCore.VkPhysicalDeviceFeatures) == PhysicalDeviceFeatures
@test Vk.intermediate_type(VkCore.VkPhysicalDeviceFeatures) == Vk.intermediate_type(PhysicalDeviceFeatures) == _PhysicalDeviceFeatures
@test Vk.core_type(PhysicalDeviceFeatures) == VkCore.VkPhysicalDeviceFeatures
@test Vk.structure_type(PhysicalDeviceFeatures2) ==
Vk.structure_type(_PhysicalDeviceFeatures2) ==
Vk.structure_type(VkCore.VkPhysicalDeviceFeatures2) ==
VkCore.VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2
end
@testset "Next chains" begin
# Initialization of chained empty structs.
f1 = Vk.initialize(_PhysicalDeviceFeatures2, _PhysicalDeviceVulkan12Features, _PhysicalDeviceVulkanMemoryModelFeatures)
@test f1.vks.pNext ≠ C_NULL
@test Base.unsafe_load(Base.unsafe_convert(Ptr{VkCore.VkPhysicalDeviceVulkan12Features}, f1.vks.pNext)).pNext ≠ C_NULL
f1 = Vk.initialize(PhysicalDeviceFeatures2, PhysicalDeviceVulkan12Features, PhysicalDeviceVulkanMemoryModelFeatures)
@test f1.next isa PhysicalDeviceVulkan12Features
@test f1.next.next isa PhysicalDeviceVulkanMemoryModelFeatures
# Chaining/unchaining of structs.
chain_args = [PhysicalDeviceFeatures2(PhysicalDeviceFeatures()), PhysicalDeviceVulkan12Features(), PhysicalDeviceVulkanMemoryModelFeatures(false, false, false)]
chained = Vk.chain(chain_args...)
@test Vk.unchain(chained) == chain_args
@test f1 == chained
# Conversion to core data structures and back.
f2 = _PhysicalDeviceFeatures2(f1)
f3 = Base.unsafe_convert(VkCore.VkPhysicalDeviceFeatures2, f2)
f4 = PhysicalDeviceFeatures2(f3, PhysicalDeviceVulkan12Features, PhysicalDeviceVulkanMemoryModelFeatures)
@test f1 == f4
# Queries with optional chain members.
feats = get_physical_device_features_2(device.physical_device, PhysicalDeviceVulkan12Features, PhysicalDeviceVulkanMemoryModelFeatures)
@test feats isa PhysicalDeviceFeatures2
@test feats.next isa PhysicalDeviceVulkan12Features
@test feats.next.next isa PhysicalDeviceVulkanMemoryModelFeatures
feats2 = get_physical_device_features_2(device.physical_device, PhysicalDeviceVulkan12Features)
@test feats2 == @set(feats2.next.next = C_NULL)
props = get_physical_device_properties_2(device.physical_device, PhysicalDeviceProtectedMemoryProperties, PhysicalDeviceShaderIntegerDotProductProperties)
@test props isa PhysicalDeviceProperties2
@test props.next isa PhysicalDeviceProtectedMemoryProperties
@test props.next.next isa PhysicalDeviceShaderIntegerDotProductProperties
props = get_physical_device_properties_2(device.physical_device, PhysicalDeviceVulkan11Properties, PhysicalDeviceVulkan12Properties)
@test props.properties.api_version ≥ v"1.1"
end
@testset "Handle constructors" begin
info = FenceCreateInfo()
fence_1 = Fence(device, info)
@test fence_1 isa Fence
fence_2 = Fence(device)
@test fence_2 isa Fence
fence_3 = unwrap(create_fence(device, info))
@test fence_3 isa Fence
fence_4 = unwrap(create_fence(device))
@test fence_4 isa Fence
info = _FenceCreateInfo()
fence_1 = Fence(device, info)
@test fence_1 isa Fence
fence_2 = Fence(device)
@test fence_2 isa Fence
fence_3 = unwrap(_create_fence(device, info))
@test_throws MethodError unwrap(create_fence(device, info))
@test fence_3 isa Fence
fence_4 = unwrap(_create_fence(device))
@test fence_4 isa Fence
end
end;
GC.gc()
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 985 | using Preferences: load_preference
@testset "Dispatcher" begin
if load_preference(Vk, "USE_DISPATCH_TABLE", "true") == "true"
disp = Vk.global_dispatcher[]
@test haskey(disp.instance_tables, instance)
@test haskey(disp.instance_tables[instance].pointers, :vkCreateDevice)
@test disp.instance_tables[instance].pointers[:vkCreateDevice] ≠ :C_NULL
@test haskey(disp.device_tables, device)
@test haskey(disp.device_tables[device].pointers, :vkCreateGraphicsPipelines)
@test disp.device_tables[device].pointers[:vkCreateGraphicsPipelines] ≠ C_NULL
@test_throws ErrorException("Could not retrieve function pointer for 'vkCreateSwapchainKHR'. This can be caused by an extension not being enabled for a function that needs it; see the help with `?` or the documentation for more information.") function_pointer(disp, device, :vkCreateSwapchainKHR)
else
@test !haskey(disp.instance_tables, instance)
end
end;
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 212 | using Documenter
DocMeta.setdocmeta!(Vulkan, :DocTestSetup, quote
using Vulkan
instance = Instance([], [])
physical_device = first(unwrap(enumerate_physical_devices(instance)))
end)
doctest(Vulkan)
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 964 | using FixedPointNumbers
using ColorTypes
using StaticArraysCore: SVector
function test_format_equivalent_to(format, T)
@test Vk.Format(T) === format
@test format_type(format) === T
end
@testset "Extensions" begin
# FixedPointNumbers.
test_format_equivalent_to(Vk.FORMAT_R8_UNORM, N0f8)
# ColorTypes.
test_format_equivalent_to(Vk.FORMAT_R16G16B16_SFLOAT, RGB{Float16})
test_format_equivalent_to(Vk.FORMAT_R16G16B16A16_SFLOAT, RGBA{Float16})
# FixedPointNumbers + ColorTypes.
test_format_equivalent_to(Vk.FORMAT_R8G8B8_UNORM, RGB{N0f8})
test_format_equivalent_to(Vk.FORMAT_B8G8R8_UNORM, BGR{N0f8})
# StaticArraysCore.
test_format_equivalent_to(Vk.FORMAT_R16G16_SFLOAT, SVector{2,Float16})
# FixedPointNumbers + StaticArraysCore.
test_format_equivalent_to(Vk.FORMAT_R8G8_UNORM, SVector{2, N0f8})
@test Vk.Format(SVector{3,N0f8}) === Vk.FORMAT_R8G8B8_UNORM
@test format_type(Vk.FORMAT_G8B8G8R8_422_UNORM) == SVector{4, N0f8}
end;
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 170 | using Vulkan: Vk
using Test
@testset "Format utilities" begin
@test Vk.format_type(FORMAT_R32_SFLOAT) === Float32
@test Vk.Format(Float32) == FORMAT_R32_SFLOAT
end;
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 3071 | using Vulkan, Test
using Vulkan: depends_on
mutable struct TestHandleNoParent <: Handle
vks::Ptr{Cvoid}
refcount::Vk.RefCounter
destructor
end
TestHandleNoParent(vks::Ptr{Cvoid}, refcount::Vk.RefCounter) = TestHandleNoParent(vks, refcount, undef)
TestHandleNoParent() = TestHandleNoParent(Ptr{Cvoid}(rand(UInt)), signal_destroyed)
mutable struct TestHandleWithParent <: Handle
vks::Ptr{Cvoid}
parent::Handle
refcount::Vk.RefCounter
destructor
end
TestHandleWithParent(vks::Ptr{Cvoid}, parent::Handle, refcount::Vk.RefCounter) = TestHandleWithParent(vks, parent, refcount, undef)
TestHandleWithParent(parent) = TestHandleWithParent(Ptr{Cvoid}(rand(UInt)), signal_destroyed, parent)
destroyed = IdDict{Union{TestHandleNoParent,TestHandleWithParent}, Nothing}()
signal_destroyed(x) = setindex!(destroyed, nothing, x)
@testset "Handles" begin
function test_no_dependency(x, handle)
@test !haskey(destroyed, x)
@test !haskey(destroyed, handle)
finalize(x)
@test haskey(destroyed, x)
@test !haskey(destroyed, handle)
finalize(handle)
@test haskey(destroyed, handle)
end
# Test that `handle` being finalized before `x` doesn't destroy `handle`.
function test_dependency_respected(x, handle)
@test !haskey(destroyed, x)
@test !haskey(destroyed, handle)
finalize(handle)
@test !haskey(destroyed, x)
@test !haskey(destroyed, handle)
finalize(x)
@test haskey(destroyed, x)
@test haskey(destroyed, handle)
end
# Test that `x` being finalized acts as if there were no dependency.
function test_dependency_nonintrusive(x, handle)
@test !haskey(destroyed, x)
@test !haskey(destroyed, handle)
finalize(x)
@test haskey(destroyed, x)
@test !haskey(destroyed, handle)
finalize(handle)
@test haskey(destroyed, handle)
end
handle = TestHandleNoParent()
x = TestHandleNoParent()
test_no_dependency(x, handle)
handle = TestHandleNoParent()
x = TestHandleNoParent()
test_no_dependency(handle, x)
handle = TestHandleNoParent()
x = TestHandleWithParent(handle)
test_dependency_respected(x, handle)
handle = TestHandleNoParent()
x = TestHandleWithParent(handle)
test_dependency_nonintrusive(x, handle)
handle = TestHandleNoParent()
x = TestHandleNoParent()
depends_on(x, handle)
test_dependency_respected(x, handle)
handle = TestHandleNoParent()
x = TestHandleNoParent()
depends_on(x, handle)
test_dependency_nonintrusive(x, handle)
handle = TestHandleNoParent()
x = TestHandleNoParent()
depends_on(x, handle)
depends_on(x, handle)
test_dependency_respected(x, handle)
handle = TestHandleNoParent()
x = TestHandleNoParent()
depends_on(x, handle)
depends_on(x, handle)
test_dependency_nonintrusive(x, handle)
# Circular dependency: no handle in a given dependency chain will ever be destroyed.
handle = TestHandleNoParent()
x = TestHandleNoParent()
depends_on(handle, x)
depends_on(x, handle)
finalize(x)
finalize(handle)
@test !haskey(destroyed, x)
@test !haskey(destroyed, handle)
end;
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 1391 | @test_throws VulkanError Instance(["nonexistent_layer"], [])
@test_throws VulkanError Instance([], ["nonexistent_extension"])
@test unwrap_error(create_instance([], ["nonexistent_extension"])).code == ERROR_EXTENSION_NOT_PRESENT
const instance = Instance(INSTANCE_LAYERS, INSTANCE_EXTENSIONS;
application_info = ApplicationInfo(v"0.0.1", v"0.0.1", API_VERSION; application_name="Test", engine_name="Experimental engine"))
@info "Instance created: $instance."
if WITH_DEBUG[]
debug_messenger = DebugUtilsMessengerEXT(instance, debug_callback_c)
end
const device = let pdevices = unwrap(enumerate_physical_devices(instance))
if isempty(pdevices)
error("No physical devices available for testing.")
end
@info "$(length(pdevices)) physical device(s) found."
pdevice = first(pdevices)
@info "Selected $(get_physical_device_properties(pdevice))"
@test_throws ErrorException("No queue with the desired capabilities could be found.") find_queue_family(pdevice, typemax(QueueFlag))
@test_throws ErrorException("Invalid physical device features: no_feature") PhysicalDeviceFeatures(:no_feature)
Device(
pdevice,
[DeviceQueueCreateInfo(find_queue_family(pdevice, QUEUE_GRAPHICS_BIT | QUEUE_COMPUTE_BIT), [1.0])],
[], DEVICE_EXTENSIONS; enabled_features = ENABLED_FEATURES
)
end
@info "Logical device created: $device"
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | code | 296 | using Test
using Vulkan
using Accessors: @set
@testset "Vulkan.jl" begin
include("handles.jl")
include("api.jl")
include("dispatch.jl")
include("formats.jl")
@test Vulkan.precompile_workload()
include("extensions.jl")
VERSION ≥ v"1.12" && include("doctests.jl")
end;
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 11864 | # Changelog for Vulkan.jl
## Version `v0.6.19`
- ![Feature][badge-feature] Extensions were added related to StaticArraysCore, FixedPointNumbers and ColorTypes to define additional `Vk.Format`/`Vk.format_type` mappings. However, due to https://github.com/JuliaLang/julia/issues/52511, these are not yet defined as package extensions, and the core packages were made direct dependencies instead of weak dependencies.
## Version `v0.6.16`
- ![Feature][badge-feature] Dependencies between handles may be specified via `Vk.depends_on(x, handle)`, to ensure that a given handle is not destroyed before anything that depends on it. This leverages the reference counting system already implemented, which itself encodes such dependencies from a given parent handle and its children. See the docstring of `Vk.depends_on` for more details.
## Version `v0.6.14`
- ![Feature][badge-feature] New mappings between Julia types and Vulkan formats are available, via `Vk.Format` constructors and `Vk.format_type` functions.
## Version `v0.6.9`
- ![Feature][badge-feature] The Vulkan specification used for the wrapping process has been updated from 1.3.207 to 1.3.240, along with [VulkanCore.jl](https://github.com/JuliaGPU/VulkanCore.jl). New types, functions and extensions are now available!
- ![Enhancement][badge-enhancement] The analysis of the Vulkan specification has been split into a separate package, [VulkanSpec.jl](https://github.com/serenity4/VulkanSpec.jl) to allow for reuse in other contexts. This code has also seen a major refactor that enabled its split and allowed more advanced functionality.
- ![Enhancement][badge-enhancement] Part of the advanced functionality provided in the refactor just mentioned allowed for improvements to the wrapper process that make sure no breaking change is introduced. The potential breakages that required attention were notably:
- The promotion of positional arguments into keyword arguments, reflecting a change from required to optional in the Vulkan specification; exposing optional arguments as keyword arguments was disabled in such cases, such that only arguments that are originally (or manually annotated as) optional are exposed as keyword arguments.
- More aliases had to be defined for enumeration values which were later promoted (and thus had their name changed).
- The generation of a diff with proper tests in VulkanSpec.jl to ensure that only symbols belonging to provisional extensions are dropped between versions (keeping backward compatibility for others).
- ![Enhancement][badge-enhancement] Functions that could only return a success code (such as `vkFreeDescriptorSets`) return `nothing` instead. Although breaking in theory, this should not be breaking in practice, as it is most likely that such results would be ignored or `unwrap`ed (and `unwrap` works on any value, including `nothing`, defined as a no-op).
## Version `v0.6.8`
- ![Enhancement][badge-enhancement] The default hash and equality methods defined for structures is now provided by StructEquals.jl instead of AutoHashEquals.jl.
## Version `v0.6`
- ![BREAKING][badge-breaking] The version of the Vulkan API has been updated to 1.3.207. This is breaking because certain function arguments can be annotated as optional in the specification, turning them into keyword arguments in generated code.
- ![Feature][badge-feature] All aliases are now generated to ensure backwards compatibilty with all [promotions](https://www.khronos.org/registry/vulkan/specs/1.3/html/vkspec.html#extendingvulkan-compatibility-promotion) of extensions or extension features.
- ![Enhancement][badge-enhancement] The automatic command dispatch functionality has been made thread-safe by retrieving all pointers eagerly, right after instance or device creation.
## Version `v0.5`
Changelog:
- ![BREAKING][badge-breaking] ![Enhancement][badge-enhancement] Functions that take arguments in the form of intermediate structures were renamed for consistency. They now start with an underscore, like the intermediate structures. As a result, the dispatch constraints for such functions could be relaxed, and there is no longer a need to explicitly specify array types for intermediate structures.
- ![Feature][badge-feature] Additional arguments were exposed for the functions that wrap API commands that may take empty structures as `next` chain members to be filled for queries (such as `get_physical_device_features_2` which wraps [vkGetPhysicalDeviceFeatures2](https://www.khronos.org/registry/vulkan/specs/1.2/html/vkspec.html#vkGetPhysicalDeviceFeatures2) and can have chains of empty structures as part of its [VkPhysicalDeviceFeatures2](https://www.khronos.org/registry/vulkan/specs/1.2/html/vkspec.html#VkPhysicalDeviceFeatures2) argument). You can do `get_physical_device_properties_2(physical_device, PhysicalDeviceProtectedMemoryProperties, PhysicalDeviceProvokingVertexPropertiesEXT, ...)` to have structures such as `PhysicalDeviceProtectedMemoryProperties` filled in by the API.
- ![Feature][badge-feature] A new function `initialize` was defined to allow the user to build empty structures that are meant to be filled by Vulkan implementations. This utility is used internally by the feature described above, but should the user need to initialize an empty structure himself, it is available.
- Utility functions `chain(nexts::HighLevelStruct...)` and `unchain(x::HighLevelStruct)` were added to aid in manipulating `next` chains via nesting and flattening chain members, respectively.
- ![BREAKING][badge-breaking] ![Enhancement][badge-enhancement] `ReturnedOnly` structures are no longer special-cased. Indeed, these structures may still be required as input to Vulkan functions, notably as part of `next` chains when querying for properties.
- ![Feature][badge-feature] Information for SPIR-V capabilities and extensions are available via `SPIRV_CAPABILITIES` and `SPIRV_EXTENSIONS` respectively. These structures describe how SPIR-V functionalities are implicitly enabled from Vulkan features, extensions and properties.
- ![BREAKING][badge-breaking] ![Enhancement][badge-enhancement] If a function returns a struct, and the level of wrapping can't be inferred from the arguments, then the returned type uses a high-level structure instead of an intermediate one.
- ![BREAKING][badge-breaking] ![Enhancement][badge-enhancement] The symbol `core` has been removed and the symbol `vk` is no longer exported, in favor of the new exported symbol `VkCore`.
- ![Enhancement][badge-enhancement] An alias named `Vk` for `Vulkan` is now exported.
- ![Feature][badge-feature] Introspection functions `hl_type`, `intermediate_type` and `core_type` were defined to link Vulkan structures between wrapping levels, should the user ever need it. This is mostly used internally.
- ![Enhancement][badge-enhancement] Handles now have additional constructors that accept create info structures, to be more in line with the creation functions `create_...`.
- ![Enhancement][badge-enhancement] Improvements made to generated docstrings and to the package documentation.
TL;DR: here is what you will likely need to do:
- Use `Vulkan.VkCore` instead of `Vulkan.core` where necessary (typically for C-compatible callback functions, such as a validation callback)
- If you used intermediate structures (which start with an underscore; typical applications should not use them except for identified performance reasons):
- Use underscores in functions that use them (e.g. `get_physical_device_features_2` -> `_get_physical_device_features_2`)
## Version `v0.4`
* ![BREAKING][badge-breaking] ![Feature][badge-feature] Hashing is now defined recursively on high-level structures and equality of high-level structures is defined in terms of equality of hashes. This uses [AutoHashEquals.jl](https://github.com/andrewcooke/AutoHashEquals.jl), see the package documentation for more information.
## Version `v0.3`
* ![BREAKING][badge-breaking] ![Enhancement][badge-enhancement] The beta extensions are not wrapped anymore, following their removal from VulkanCore (see [this issue](https://github.com/JuliaGPU/VulkanCore.jl/issues/43) to know why they were removed). This will allow for a more stable library.
* ![BREAKING][badge-breaking] ![Feature][badge-feature] High-level structs were defined that allow for the introspection over their fields. Minimalistic structs (with no introspection capabilities, but slightly more efficient) are still present, but were prefixed with '_'. For example, `InstanceCreateInfo` becomes `_InstanceCreateInfo`, while the new `InstanceCreateInfo` allows for querying its parameters such as extensions, layers, etc. without loading pointers from the API struct. These high-level structs should be a drop-down replacement for API calls and should have the same (or very similar) constructors than the old (minimalistic) ones. Note that this only concerns API data structures, therefore excluding handles which stay the same. Structs that were only returned (and never requested by) the API are unchanged, as they already had the same logic as these high-level structs.
* ![BREAKING][badge-breaking] ![Feature][badge-feature] All enumeration and constant types are now wrapped, with their prefix removed. Bitmasks stay unchanged. As an example, `VK_SHARING_MODE_EXCLUSIVE` must now be replaced by `SHARING_MODE_EXCLUSIVE`. This also holds for constants, e.g. `SUBPASS_EXTERNAL` instead of `VK_SUBPASS_EXTERNAL`.
* ![BREAKING][badge-breaking] ![Feature][badge-feature] The `code` field of `VulkanError` generated ith the `@check` macro (including its uses in wrappers) is now a `Result` (instead of a `VkResult`) adapting from the change above.
* ![BREAKING][badge-breaking] ![Enhancement][badge-enhancement] Convenience constructors around device features (like `PhysicalDeviceFeatures`) now accepts feature symbols in `snake_case` convention, instead of lower `camelCase`, for consistency with the rest of the library.
* ![BREAKING][badge-breaking] ![Enhancement][badge-enhancement] The convenience function for setting up a debug messenger replaced its `severity` keyword argument with `min_severity` and all keyword arguments now accept API enums instead of strings. The default debug callback has also been slightly changed to use Julia's logging system in a finalizer-safe manner.
* ![BREAKING][badge-breaking] ![Feature][badge-feature] Wrappers were added for union structs (e.g. VkClearColorValue), which are now used in wrapped structs instead of their lower-level counterparts. For example, instead of using `vk.VkClearColorValue`, you must now use `ClearColorValue` directly.
* ![Feature][badge-feature] An automatic dispatch logic was added to automate the retrieval and use of API function pointers. This means that you don't need to retrieve function pointers manually, including for extension-related functionality.
* ![Enhancement][badge-enhancement] Documentation was improved, and docstrings now list extension requirements.
[badge-breaking]: https://img.shields.io/badge/BREAKING-red.svg
[badge-deprecation]: https://img.shields.io/badge/deprecation-orange.svg
[badge-feature]: https://img.shields.io/badge/feature-green.svg
[badge-enhancement]: https://img.shields.io/badge/enhancement-blue.svg
[badge-bugfix]: https://img.shields.io/badge/bugfix-purple.svg
[badge-security]: https://img.shields.io/badge/security-black.svg
[badge-experimental]: https://img.shields.io/badge/experimental-lightgrey.svg
[badge-maintenance]: https://img.shields.io/badge/maintenance-gray.svg
<!--
# Badges (reused from the CHANGELOG.md of Documenter.jl)
![BREAKING][badge-breaking]
![Deprecation][badge-deprecation]
![Feature][badge-feature]
![Enhancement][badge-enhancement]
![Bugfix][badge-bugfix]
![Security][badge-security]
![Experimental][badge-experimental]
![Maintenance][badge-maintenance]
-->
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 2968 | # Vulkan
 [](https://juliagpu.github.io/Vulkan.jl/stable) [](https://juliagpu.github.io/Vulkan.jl/dev)
Vulkan.jl is a lightweight wrapper around the [Vulkan](https://www.vulkan.org/) graphics and compute library. It exposes abstractions over the underlying C interface, primarily geared towards developers looking for a more natural way to work with Vulkan with minimal overhead.
It builds upon the core API provided by [VulkanCore.jl](https://github.com/JuliaGPU/VulkanCore.jl/). Because Vulkan is originally a C specification, interfacing with it requires some knowledge before correctly being used from Julia. This package acts as an abstraction layer, so that you don't need to know how to properly call a C library, while still retaining full functionality. The wrapper is generated directly from the [Vulkan Specification](https://www.khronos.org/registry/vulkan/).
This is a very similar approach to that taken by [VulkanHpp](https://github.com/KhronosGroup/Vulkan-Hpp), except that the target language is Julia and not C++.
If you have questions, want to brainstorm ideas or simply want to share cool things you do with Vulkan don't hesitate to create a thread in our [Zulip channel](https://julialang.zulipchat.com/#narrow/stream/287693-vulkan).
## Status
This package is a work in progress and has not reached its 1.0 version yet. As such, documentation may not be complete and functionality may change without warning. If it happens, make sure to check out the [changelog](https://github.com/JuliaGPU/Vulkan.jl/blob/main/CHANGELOG.md). At this stage, you should not use this library in production; however, you are encouraged to push its boundaries through non-critical projects. If you find limitations, bugs or want to suggest potential improvements, do not hesitate to submit issues or pull requests. The goal is definitely to be production-ready as soon as possible.
In particular, because the library relies on automatic code generation, there may be portions of the Vulkan API that are not wrapped correctly. While you should not have trouble in most cases, there are always edge cases which were not accounted for during generation. Please open an issue whenever you encounter such a case, so that we can reliably fix those wrapping issues for future use.
## Testing
Currently, continuous integration runs only on Ubuntu 32/64 bits, for lack of a functional CI setup with Vulkan for MacOS and Windows. Because public CI services lack proper driver support, the CPU Vulkan implementation [Lavapipe](https://docs.mesa3d.org/drivers/llvmpipe.html) is used.
If you are not on Linux, we cannot guarantee that this library will work for you, although so far nothing is platform-dependent. If that is the case, we recommend that you test this package with your own setup.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 103 | # Vulkan.jl API
```@index
Modules = [Vulkan]
```
```@autodocs
Modules = [Vulkan]
Private = false
```
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 1632 | # Glossary
*Core handle*: Opaque pointer (`void*`) extensively used by the Vulkan API. See the [Object model](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap3.html#fundamentals-objectmodel-overview) section of the Vulkan documentation for more details.
*Handle*: Mutable type which wraps a core handle, allowing the use of finalizers to call API destructors with a reference counting mechanism to ensure no handle is destroyed before its children. Read more about them [here](@ref Handles).
*Core structure*: Structure defined in [VulkanCore.jl](https://github.com/JuliaGPU/VulkanCore.jl) with a 1:1 correspondence with C.
*Intermediate structure*: Minimal wrapper around a core structure which allows the interaction with pointer fields in a safe manner.
*High-level structure*: Structure meant to be interacted with like any other Julia structure, hiding C-specific details by providing e.g. arrays instead of a pointer and a count, strings instead of character pointers, version numbers instead of integers with a specific encoding.
*Core function*: Julia function defined in [VulkanCore.jl](https://github.com/JuliaGPU/VulkanCore.jl) which [forwards a call](https://docs.julialang.org/en/v1/base/c/#ccall) to the Vulkan API function of the same name.
*Intermediate function*: Wrapper around a core function meant to automate certain C anv Vulkan patterns. May return handles and intermediate structures, wrapped in a `ResultTypes.Result` if the core function may fail.
*High-level function*: Almost identical to an intermediate function, except that all returned structures are high-level structures.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 917 | # Vulkan.jl
Vulkan.jl is a lightweight wrapper around the [Vulkan](https://www.vulkan.org) graphics and compute library. It exposes abstractions over the underlying C interface, primarily geared towards developers looking for a more natural way to work with Vulkan with minimal overhead.
It builds upon the core API provided by [VulkanCore.jl](https://github.com/JuliaGPU/VulkanCore.jl/). Because Vulkan is originally a C specification, interfacing with it requires some knowledge before correctly being used from Julia. This package acts as an abstraction layer, so that you don't need to know how to properly call a C library, while still retaining full functionality. The wrapper is generated directly from the [Vulkan Specification](https://www.khronos.org/registry/vulkan/).
The approach is similar to [VulkanHpp](https://github.com/KhronosGroup/Vulkan-Hpp) for C++, except that the target language is Julia.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 3230 | # Introduction
## What is Vulkan?
[Vulkan](https://www.vulkan.org/) is a graphics and compute specification, targeting a broad range of GPUs [and even CPUs](https://github.com/google/swiftshader). It aims to provide a cross-platform API that can be used from PCs, consoles, mobile phones and embedded platforms. It can be thought of as the new generation of [OpenGL](https://www.opengl.org//) with the compute capabilities of [OpenCL](https://www.khronos.org/opencl/). It should be noted that Vulkan is merely a specification and therefore, there does not exist only one Vulkan library but rather multiple device-dependent implementations conforming to a unique standard. The version of the Vulkan implementation you may be using thus depends on the graphics drivers installed on your system.
The power of this standard lies in the genericity it guarantees to anything that builds from it. This is a direct consequence of a thorough testing of vendor implementations, which must be compatible with the specification in every detail. Therefore, tools that are developped for Vulkan can be used throughout the entire ecosystem, available for [all devices that support Vulkan](https://vulkan.gpuinfo.org/).
## Compute and graphics interface
### SPIR-V
To describe how graphics and compute programs should be executed by devices, Vulkan relies on the [Standard Portable Intermediate Representation](https://www.khronos.org/registry/spir-v/) (SPIR-V) format. This is another specification, whose aim is to free hardware vendors from having to build their own compiler for every shading/compute language, whose implementations were not always coherent with one another. It is a binary format, making it easier to generate assembly code from than text-based formats (such as GLSL and HLSL).
SPIR-V is not a language, but rather a binary format that higher level languages can compile to. It can be targeted from shading languages; for example, see Khronos' [glslang](https://github.com/KhronosGroup/glslang) and Google's [shaderc](https://github.com/KhronosGroup/glslang) for GLSL/HLSL. SPIR-V features a large [suite of tools](https://github.com/KhronosGroup/SPIRV-Tools), designed to ease the manipulation of SPIR-V programs. It includes an optimizer, spirv-opt, alleviating the need for hardware vendors to have their own SPIR-V optimizer.
SPIR-V is notably suited to cross-compilation among shading languages (see [SPIR-V Cross](https://github.com/KhronosGroup/SPIRV-Cross)).
### SPIR-V and LLVM
SPIR-V is similar to LLVM IR, for which there exists a [bi-directional translator](https://github.com/KhronosGroup/SPIRV-LLVM-Translator). However, not all SPIR-V concepts are mappable to LLVM IR, so not all of SPIR-V can be translated. Currently, only the OpenCL part of SPIR-V is supported by this translator (see [this issue](https://github.com/KhronosGroup/SPIRV-LLVM-Translator/issues/369)), missing essential features required by Vulkan. If (or when) Vulkan is supported, Julia code could be compiled to LLVM, translated to SPIR-V and executed from any supported Vulkan device, be it for graphics or compute jobs. For the moment, SPIR-V modules to be consumed by Vulkan are usually compiled from other shading languages.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 4813 | # Troubleshooting
There can be many kinds of errors when developing a Vulkan application, which can sometimes be difficult to troubleshoot.
If you identified an error happening in the Vulkan driver, or in any other C library, you can troubleshoot whether it has anything to do with Julia by doing the following:
- Executing a system utility that uses the library (driver, loader, extension dependency...) in question. If no errors happen, you can try the next step.
- If you have the courage, you can write a MWE in Julia and then translate that to C or any other low-level language.
Here we list common errors and potential solutions. If you encounter a new error, or found a new solution to an error already listed, feel free to submit a pull request to improve this section.
## ERROR\_LAYER\_NOT\_PRESENT
Most layers are not bundled with the default Vulkan driver. For example, the validation layer `VK_LAYER_KHRONOS_validation` should be installed from [their Github Page](https://github.com/KhronosGroup/Vulkan-ValidationLayers) or via your package manager. This specific layer should hopefully be integrated in the artifact system in the future, but other layers may be vendor-dependent, and therefore it is the responsibility of the user to install them before hand.
Note that any layer that fails to load is simply ignored, and may be reported as not present (in particular this can be triggered by ['GLIBCXX\_X.X.XX' not found](@ref libstdc)).
You can troubleshoot this further with instance creation debugging enabled; for this, simply pass in a [`DebugUtilsMessengerCreateInfoEXT`](@ref) as a `next` member of your [`InstanceCreateInfo`](@ref) (make sure to enable all message types and severities for complete information).
## ['GLIBCXX\_X.X.XX' not found](@id libstdc)
The interfaced C++ code may require a `libstdc++` more recent than the one found on some Julia binaries. Notably, the loader, drivers and validation layers might all have different C++ version requirements. The more recent any of these components are, the more likely they are to require a recent `libstdc++` version.
The solution is to make sure Julia uses an up-to-date `libstdc++` library (e.g. the one installed on your system), as indicated in [this issue](https://github.com/JuliaGL/GLFW.jl/issues/198). On Linux, this can be achieved by setting the `LD_PRELOAD` environment variable when launching Julia (make sure to point to your `libstdc++` on your system):
```bash
LD_PRELOAD=/usr/lib/libstdc++.so.6 julia
```
If using VSCode, you can set it for the integrated terminal (e.g. `terminal.integrated.env.linux` on Linux) such that the REPL always starts with this environment variable.
## Internal API errors
If you encounter the error `INITIALIZATION_FAILED` or similar errors with Julia, which you do not encounter with other languages (e.g. C/C++) or with your system Vulkan utilities, then it may be due to `libstdc++` version requirements (see [this tip](@ref libstdc)) or [incompatibilities in library loading](@ref Library-loading).
If the bug is encountered in a function from the loader (e.g. via a function that operates on an `Instance`, and not a `Device`), and if you are using [Vulkan-Loader](https://github.com/KhronosGroup/Vulkan-Loader) (which is most likely the case), it is recommended to enable additional logging by setting the environment variable `VK_LOADER_DEBUG=all`. See [the loader's debug environment variables](https://github.com/KhronosGroup/Vulkan-Loader/blob/master/docs/LoaderInterfaceArchitecture.md#table-of-debug-environment-variables) for more options.
## 0-based vs 1-based indexing
Vulkan uses a 0-based indexing system, so be careful whenever an index is returned from or requested for a Vulkan function.
## Crash in extension depending on an external C library
There have been cases where the relevant C libraries must be loaded (`dlopen`ed) before the instance or device with the relevant extension is used. For example, [XCB.jl](https://github.com/JuliaGL/XCB.jl) uses its own `libxcb` via `Xorg_libxcb_jll`, and this library is automatically `dlopen`ed when loading XCB (because in turn it loads `Xorg_libxcb_jll` which does the `dlopen` during `__init__()`). When loading XCB only after an instance with the extension `VK_KHR_xcb_surface` was created, trying to retrieve basic information (e.g. via `get_physical_device_surface_capabilities_khr`) caused a segmentation fault.
If may be expected that this happens with any package that relies on a C library using the [artifact system](https://pkgdocs.julialang.org/v1/artifacts/), and that is required by a Vulkan extension. In this case, always make sure you load the package before setting up your instance(s) and device(s).
In general, make sure you know where any relevant external libraries come from.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 2620 | Good for a tutorial about the different levels of wrapping:
---
There is a final family of Vulkan types that you may encounter. Those are the barebones VulkanCore.jl types, which you won't have to worry about in all cases *except when you need to pass functions to the API*. In this case, inputs will not be automatically converted for you, and you will have to define the appropriate signature before obtaining function pointers with `Base.@cfunction`. You can access these types from the (exported) module `Vulkan.VkCore`.
To summarize:
- High-level structs:
- should be used most of the time.
- store values in a way that makes it easy to retrieve them later.
- introduce a small overhead, which may be a concern in some performance-critical sections.
- Low-level structs:
- offer performance advantages over high-level structs.
- may be preferred in performance-critical sections.
- are not meant for introspection capabilities.
- are not defined for structures not needed by the API.
- VulkanCore structs:
- should never be used directly, except as argument types for functions intended to be passed to the API.
In general, high-level and low-level structs can be used interchangeably as function arguments to constructors or API functions, at the condition that they are not mixed together.
Using either high-level or low-level structs should be a performance matter, and as such it is encouraged to profile applications before using low-level structs all: they are faster, but can require additional bookkeeping due to a lack of introspection.
Typically, it is easier to use high-level types for create info arguments to handles that are created at a low frequency; this includes `Instance`, `Device` or `SwapchainKHR` handles for example. Their create info structures may contain precious information that needs to be accessed by the application, e.g. to make sure that image formats in a render pass comply with the swapchain image format, or to check instance or device extensions before using extension functionality.
API functions and structures accept either low-level structs or high-level structs. For commands with low-level structs, you currently need to provide typed arrays (i.e. not `[]` which are of type `Vector{Any}`).
In general:
- High-level structs are returned from functions with high-level arguments.
- Low-level structs are returned from functions with low-level arguments.
The only exception currently is for functions that would have the same low-level/high-level argument types, for which only one version is available that returns values in low-level types.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 3117 | # Utility
Here we describe some tools that can assist the development of Vulkan applications.
Feel free to check out the official [Vulkan website](https://www.vulkan.org/) for a more complete list of resources.
## External tools
### NVIDIA Nsight Systems
[NVIDIA Nsight Systems](https://developer.nvidia.com/nsight-systems) is a tool developed by NVIDIA to profile applications, showing both CPU and GPU usage. It can be very useful for analyzing the balance between CPU and GPU usage, as well as troubleshoot general performance bottlenecks. However, it only outputs high-level information regarding GPU tasks. Therefore, to catch GPU bottlenecks in a fine-grained manner (such as inside shaders) one should instead use a dedicated profiler such as [Nsight Graphics](@ref nsight-graphics).
### [NVIDIA Nsight Graphics](@id nsight-graphics)
[Nsight Graphics](https://developer.nvidia.com/nsight-graphics) dives deeper into the execution details of an application and provides detailed information regarding graphics pipelines, shaders and so on. This is a tool of choice to consider for NVIDIA GPUs once the GPU is identified as a bottleneck with Nsight Systems.
### RenderDoc
[RenderDoc](https://renderdoc.org/) plays a similar role to Nsight Graphics for a wider range of GPUs. It is open-source and community-maintained.
*RenderDoc is not supported with Vulkan.jl; see [this issue](https://github.com/JuliaGPU/Vulkan.jl/issues/53) for more details on the matter.*
### CPU implementation of Vulkan
#### SwiftShader
[SwiftShader](https://github.com/google/swiftshader) is a CPU implementation of Vulkan primarily designed to extend the portability of Vulkan applications. It can be used wherever there is a lack of proper driver support, including public continuous integration services. This allows for example to evaluate code when generating a documentation in CI with [Documenter.jl](https://github.com/JuliaDocs/Documenter.jl), like this one.
SwiftShader is available as a JLL package. You can add it with
```julia-repl
julia> ]add SwiftShader_jll
```
A convenience macro is implemented in Vulkan, so you can quickly use SwiftShader with
```@example
using SwiftShader_jll
using Vulkan
set_driver(:SwiftShader)
```
which will tell the Vulkan Loader to use the SwiftShader Installable Client Driver.
#### Lavapipe
[Lavapipe](https://docs.mesa3d.org/drivers/llvmpipe.html) is another CPU implementation of Vulkan, developed by Mesa as part of its Gallium stack.
This one was deemed to be too much of a hassle to setup with the Artifact system; instead, the [julia-lavapipe](https://github.com/marketplace/actions/julia-lavapipe) action was added for GitHub Actions for use in CI using `apt` to install the driver. At the time of writing, this action only supports Linux runners with the latest Ubuntu version, but contributions are encouraged to provide support for other platforms and setups.
If you want to take on the task of adding Lavapipe to Yggdrasil, that would be greatly appreciated and would result in a more convenient setup than a GitHub Action, but do expect a big rabbit hole.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 2131 | # Optional functionality
Vulkan uses a particular functionality mechanism based on [features](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap47.html), [extensions](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap46.html) and properties.
Properties are per-device, and are not specified by the user; instead, they are returned by the Vulkan corresponding driver. Features may be very similar to properties semantically: they may specify whether some functionality is available or not on the device, such as atomic operations. However, features are usually more complex than that: the presence or absence of specific features will cause the driver to behave differently. Therefore, the difference with properties is that enabling a feature may dynamically change the logic of the driver, while properties are static and can only tell whether some functionality is supported or not.
SPIR-V uses a similar mechanism, with capabilities (analogous to features) and extensions. However, one should note that SPIR-V is a format for GPU programs, and not an API in itself; there is no SPIR-V driver of any kind. Therefore, any configuration for SPIR-V will be specified through its execution environment, e.g. OpenCL or Vulkan. As a result, certain Vulkan features and extensions are directly related to SPIR-V capabilities and extensions.
As a client API for SPIR-V, Vulkan [establishes](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap52.html) what SPIR-V capabilities and extensions are enabled given the level of functionality requested from or provided by the driver. Notably, no SPIR-V capability or extension can be enabled without a corresponding requirement for a Vulkan core version or the presence of a Vulkan feature or extension.
Optional SPIR-V functionality is therefore fully implicit, based on the Vulkan API configuration. To help automate this mapping (and alleviate or even remove the burden forced on the developer), `SPIRV_CAPABILITIES` and `SPIRV_EXTENSIONS` are exported which contain information about capability and extension requirements, respectively.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 4733 | # Library loading
Owing to its extensible architecture, Vulkan may require additional libraries to be available during runtime. That will be
notably the case of every layer, and of most [WSI (Window System Integration)](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap34.html)
instance extensions which require hooking into the OS' windowing system.
It is important to know where these libraries come from, to avoid crashes and ensure correct behavior.
A notable case for failure is when some code uses a new function exposed in a recent library release, but the loaded library is too old.
In particular, this may occur after updating Vulkan drivers, or upgrading the OS (which in turn updates OS-specific libraries and possibly the Vulkan loader which may then rely on these updates).
Other than that, libraries are generally backward compatible, and compatibility issues are fairly rare.
In Julia, there are two notable systems that may provide them:
- Your operating system, using whatever is available, as matched first by the linker depending on configuration. Version suffixes (e.g. `libvulkan.so.1`) may be used to provide weak compatibility guarantees.
- Pkg's [artifact system](https://pkgdocs.julialang.org/v1/artifacts/), providing libraries and binaries with set versions and stronger compatibility guarantees with semantic versioning. The artifact system explicitly uses libraries from other artifacts, *and not from the system*. Keep that in mind especially if you rely on artifacts for application-level functionality (e.g. GLFW).
When a library is required by a Vulkan feature, extension or layer, it will most likely use the first one already loaded.
That may be an artifact, or a system library. Relying on either comes with caveats:
- Relying on an artifact may incorrectly interface with OS-specific functionality, which requires to match system libraries.
- Relying on system libraries may cause compatibility issues when using artifacts that require specific versions.
A reasonable recommendation would be to go for system libraries for anything that Vulkan heavily relies on (such as WSI functionality), and use artifact libraries for the rest.
It may however happen that you depend on the same library for both Vulkan and artifact functionality: for example, let's say you use [GLFW.jl](https://github.com/JuliaGL/GLFW.jl), which depends on the artifact `GLFW_jll`, and you are using it with Vulkan. The Vulkan loader (usually a system library itself, `libvulkan.so`) will expect a system `libxcb.so`; and `GLFW_jll` will be designed to work with the artifact `libxcb.so`. In theory, it is possible to use different versions of the same library at the same time (see [Overriding libraries](@ref)); if it works, it's probably alright to stick with that. Otherwise, should any issue occur by this mismatch, it might be preferable to use the newest library among both, or decide on a case-by-case basis. Any battle-tested guideline for this would be very welcome!
If you stumble upon an error during instance creation and wonder if it's related to library compatibility issues, these tend to show up when the `VK_LOADER_DEBUG=all` option is set; see [Internal API errors](@ref).
## Overriding libraries
**Vulkan** may be redirected to use a specific system or artifact library. It can be attempted by:
- Forcing the system linker to preload a specific library (e.g. `LD_PRELOAD` for `ld` on linux).
- Emulating such preload using `Libdl.dlopen` before the corresponding library is loaded; that is, before `using Package` where `Package` depends on artifacts (artifacts tend to `dlopen` their library dependencies during [module initialization](https://docs.julialang.org/en/v1/manual/modules/#Module-initialization-and-precompilation)).
- Loading an artifact (either directly or indirectly), triggering the loading of its dependent libraries (which may be redirected too, see below).
**Artifacts** always use artifact libraries by default, but may be redirected toward other libraries via the preferences mechanism:
```julia-repl
julia> using Xorg_libxcb_jll
julia> Xorg_libxcb_jll.set_preferences!(Xorg_libxcb_jll, "libxcb_path" => "/usr/lib/libxcb.so")
# Restart Julia to trigger precompilation, updating artifact settings.
julia> using Xorg_libxcb_jll
```
Note that every artifact may provide many library products, and each one of them will require an explicit preference to opt out of the artifact system. For instance, `Xorg_libxcb_jll` provides `libxcb.so`, but also `libxcb-render.so`, `libxcb-xkb.so`, and many more; `libxcb_path` only affects `libxcb.so`, and to affect these other libraries there exist similar preferences `libxcb_render_path`, `libxcb_xkb_path`, etc.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 738 | ## VulkanGen
VulkanGen, the generator project, converts the XML specification into a custom IR, and then generates wrapper code.
### Platform-specific wrapping
Some parts of the Vulkan API depend on system headers that are platform-specific; these notably include WSI (Window System Integration) extensions, which allow the developer to attach Vulkan devices to surfaces like windows.
These platform-specific dependencies can be grouped into operating systems, notably Windows, MacOS, Linux and BSD. Each of these systems is associated with a set of WSI extensions and has a separate wrapper file with extensions specific to other operating systems removed.
```@index
Modules = [VulkanGen]
```
```@autodocs
Modules = [VulkanGen]
```
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 2566 | # Next chains
Vulkan has a concept of `next` chains, where API structures can be chained together by filling an optional `next` field in a nested manner (hence the name).
This poses a few challenges on the Julia side, because in Vulkan such chains are linked lists which use raw pointers. Raw pointers are the reason why we required so-called intermediate structures that wrap core structures. As a reminder, these intermediate structures are nothing more than a core structure put alongside a vector of dependencies which holds Julia objects (arrays or references). These dependencies are what guarantees the validity of pointers present in the core structure, by making the garbage collector aware that these references must not be freed as long as the core structure is used (i.e., as long as the intermediate wrapper is used).
Having linked lists with opaque pointers complicate the matter. First, one must be able to build such chains to pass in the data structures to Vulkan in the format the API expects. That is the easiest part, since we can have arbitrary objects in the `next` field of high-level wrappers. From there, we can build a reference (`Ref`) to these (immutable) objects and then turn these references into pointers.
The Vulkan API sometimes makes use of a pattern where a `next` chain gets filled by an API command, such as `vkGetPhysicalDeviceProperties2`. The challenge then lies in initializing an empty intermediate object for Vulkan to fill in. We must construct core objects recursively with the right dependencies; care must be taken because every core object that is used in the chain must be saved as a dependency, but must also contain `next` members recursively. Therefore, in the initialization logic (implemented in [`initialize`](@ref Vk.initialize)), core objects are initialized via [`initialize_core`](@ref Vk.initialize_core) and their corresponding reference (if the object is not the root object) is filled with the result to be retained in the only intermediate structure that will contain the whole chain.
Reconstructing the original object is fairly straightforward. If the result is meant to be an intermediate structure, we can simply wrap the core object, the dependency being the original intermediate object that was used to initialize the object and its chain. If we want a high-level structure instead, then we need to chase pointers iteratively from the `next` chain of the core object, reconstructing the `next` objects by loading the pointers as we go along.
```@docs
Vk.initialize
Vk.initialize_core
```
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 820 | # Developer Documentation
## Generating the wrapper
A large portion of this package relies on static code generation. To re-generate the main wrapper (`generated/vulkan_wrapper.jl`), execute `generator/scripts/generate_wrapper.jl` in the `generator` environment:
```
julia --color=yes --project=generator -e 'using Pkg; Pkg.instantiate(); include("generator/scripts/generate_wrapper.jl")'
```
Note that VulkanGen, the generator module, contains tests which should be run first to ensure the correctness of the wrapping process. Therefore, it is recommended to use this command instead to also test both VulkanGen and Vulkan.jl:
```
julia --color=yes --project=generator -e 'include(\"generator/test/runtests.jl\"); include(\"generator/scripts/generate_wrapper.jl\"); using Pkg; Pkg.activate(\".\"); Pkg.test()'
```
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 254 | ### VulkanSpec
Going from the XML to the IR is fairly independent of the code generation process, and has been isolated into its own module (VulkanSpec).
```@index
Modules = [VulkanGen.VulkanSpec]
```
```@autodocs
Modules = [VulkanGen.VulkanSpec]
```
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.6.21 | 14499ab752f08ebfc0e162a718c127b29997883c | docs | 1184 | # Getting started
## Overview
This library offers [wrapper types](@ref Wrapper-types) and [wrapper functions](@ref Wrapper-functions) that are intended as a replacement for their C-like counterparts. There are two levels of wrapping, but you should focus on high-level wrappers and only drop down to intermediate wrappers if you find it necessary for performance. [Error handling](@ref Error-handling) is exposed through [ResultTypes.jl](https://github.com/iamed2/ResultTypes.jl) to provide a more robust and user-friendly way of managing error return codes.
Finally, functions and structures have docstrings with information extracted from the XML specification, with links to the original Vulkan documentation, information on required extensions, return codes and more. You can access them easily through the built-in help in the REPL: for example, `?InstanceCreateInfo` will print you information regarding the `InstanceCreateInfo` structure. See the full documentation [here](@ref Vulkan.jl-API).
## Installation
This package can be installed through the registry with
```julia-repl
julia> ]add Vulkan
```
Make sure that you have a decently recent Vulkan driver installed.
| Vulkan | https://github.com/JuliaGPU/Vulkan.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 640 | using Documenter
using DateSelectors
using Dates
using Intervals
makedocs(;
modules=[DateSelectors],
format=Documenter.HTML(
prettyurls=false,
assets=[
"assets/invenia.css",
],
),
pages=[
"Home" => "index.md",
"API" => "api.md",
"Examples" => "examples.md"
],
repo="https://github.com/invenia/DateSelectors.jl/blob/{commit}{path}#L{line}",
sitename="DateSelectors.jl",
authors="Invenia Technical Computing Corporation",
strict=false,
checkdocs=:none,
)
deploydocs(;
repo="github.com/invenia/DateSelectors.jl",
devbranch="main",
)
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 277 | module DateSelectors
using Base.Iterators
using Dates
using Intervals
using Random
export DateSelector, NoneSelector, PeriodicSelector, RandomSelector, partition
include("common.jl")
include("NoneSelector.jl")
include("PeriodicSelector.jl")
include("RandomSelector.jl")
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 450 | """
NoneSelector()
Assign all dates to the validation set, select no holdout dates.
"""
struct NoneSelector <: DateSelector end
function Iterators.partition(dates::AbstractVector{Date}, ::NoneSelector)
# Just to maintain consistency between selectors
if dates isa StepRange && step(dates) != Day(1)
throw(ArgumentError("Expected step range over days, not ($(step(dates)))."))
end
return _getdatesets(dates, Date[])
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 2305 |
"""
PeriodicSelector(period::DatePeriod, stride::DatePeriod=Day(1), offset::DatePeriod=Day(0))
Assign holdout dates by taking a set of size `stride` once per `period`.
The offset is relative to _Monday 1st Jan 1900_, and controls when the selected section starts.
For example, `PeriodicSelector(Week(1), Day(2), Day(1))` will select 2 days per week.
With this selected periods offset by 1 day from 1st Jan 1900.
I.e. if applied to the first two weeks of the year 1900,
it would select 2nd, 3rd, 9th and 8th of Jan 1900.
Note: this cannot be actually used to select days earlier than `offset` after
1st Jan 1900.
"""
struct PeriodicSelector <: DateSelector
period::DatePeriod
stride::DatePeriod
offset::DatePeriod
function PeriodicSelector(period, stride=Day(1), offset=Day(0))
period ≥ Day(2) || throw(DomainError(period, "period must be at least 2 Days."))
stride ≥ Day(1) || throw(DomainError(stride, "stride must be at least 1 Day."))
if Day(stride) > Day(period)
throw(ArgumentError(
"Cannot take a $stride stride within a $period period."
))
end
return new(period, stride, offset)
end
end
function Iterators.partition(dates::AbstractVector{Date}, s::PeriodicSelector)
if dates isa StepRange && step(dates) != Day(1)
throw(ArgumentError("Expected step range over days, not ($(step(dates)))."))
end
initial_time = _initial_date(s, dates)
sd, ed = extrema(dates)
#NOTE: you might be thinking that this process that actually checks all dates starting
# from year 1900 is too slow and we should do something smart with modulo arithmetic
# but for current decades this takes thousands of a second and even for year 9000
# is still is well under 1/4 second. so keeping it simple
holdout_dates = Date[]
curr_window = initial_time:Day(1):(initial_time + s.stride - Day(1))
while first(curr_window) <= ed
# optimization: only creating holdout window if intersect not empty
if last(curr_window) >= sd
curr_holdout_window = curr_window ∩ dates
append!(holdout_dates, curr_holdout_window)
end
curr_window = curr_window .+ s.period
end
return _getdatesets(dates, holdout_dates)
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 2586 | """
RandomSelector(seed, holdout_fraction=1//2, block_size=Day(1), offset=Day(0))
Determine holdout set by randomly subsampling contiguous blocks of size `block_size`
without replacement using a `MersenneTwister` seeded with `seed`.
The probability of any given block being in the holdout set is given by `holdout_fraction`.
The `offset` is rarely needed, but is used to control block boundries.
It is given as a offset relative to _Monday 1st Jan 1900_.
For example, with the default offset of `Day(0)`, and if using a `Week(1)` `block_size`,
then every block will start on a Monday, and will go for 1 or more weeks from there.
Note that at the boundries of the partitioned dates the blocks may not be of size
`block_size` if they go over the edge -- this is infact the common case.
"""
struct RandomSelector <: DateSelector
seed::Int
holdout_fraction::Real
block_size::DatePeriod
offset::DatePeriod
function RandomSelector(seed, holdout_fraction=1//2, block_size=Day(1), offset=Day(0))
if !(0 <= holdout_fraction <= 1)
throw(DomainError(
holdout_fraction,
"holdout fraction must be between 0 and 1 (inclusive)"
))
end
if block_size < Day(1)
throw(DomainError(block_size, "block_size must be at least 1 day."))
end
return new(seed, holdout_fraction, block_size, offset)
end
end
function Iterators.partition(dates::AbstractVector{Date}, s::RandomSelector)
if dates isa StepRange && step(dates) != Day(1)
throw(ArgumentError("Expected step range over days, not ($(step(dates)))."))
end
sd, ed = extrema(dates)
rng = MersenneTwister(s.seed)
holdout_dates = Date[]
initial_time = _initial_date(s, dates)
curr_window = initial_time:Day(1):(initial_time + s.block_size - Day(1))
while first(curr_window) <= ed
# Important: we must generate a random number for every block even before the start
# so that the `rng` state is updated constistently no matter when the start is
# and thus `partition` is invarient on the start date
r = rand(rng)
# optimization: only creating holdout window if intersect not empty
if last(curr_window) >= sd
if r < s.holdout_fraction
curr_active_window = curr_window ∩ dates # handle being near boundries
append!(holdout_dates, curr_active_window)
end
end
curr_window = curr_window .+ s.block_size
end
return _getdatesets(dates, holdout_dates)
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 3159 |
"""
DateSelector
Determines how to [`partition`](@ref) a date set into disjoint validation and holdout sets.
"""
abstract type DateSelector end
"""
partition(dates::AbstractInterval{Date}, s::DateSelector)
partition(dates::StepRange{Date, Day}, selector::DateSelector)
partition(dates::AbstractVector{Date}, s::DateSelector)
Partition the set of `dates` into disjoint `validation` and `holdout` sets according to the
`selector` and return a `NamedTuple({:validation, :holdout})` of iterators.
"""
function Iterators.partition(dates::AbstractInterval{Date}, s::DateSelector)
_dates = _interval2daterange(dates)
return partition(_dates, s)
end
"""
_getdatesets(st, ed, dates) -> NamedTuple{(:validation, :holdout)}
Construct the NamedTuple of iterators for the validation and holdout date sets.
"""
function _getdatesets(all_dates, holdout_dates)
return (
validation=sort(setdiff(all_dates, holdout_dates)),
holdout=sort(holdout_dates)
)
end
_getdatesets(all_dates, date::Date) = _getdatesets(all_dates, [date])
"""
_interval2daterange(dates::AbstractInterval{Day}) -> StepRange{Date, Day}
Helper function to turn an AbstractInterval into a StepRange taking the inclusivity into
account.
"""
function _interval2daterange(dates::AbstractInterval{Date})
fd = _firstdate(dates)
ld =_lastdate(dates)
return fd:Day(1):ld
end
# TODO: remove this once https://github.com/invenia/Intervals.jl/issues/137
# is addressed.
_firstdate(dates::AbstractInterval{Date,Closed}) = first(dates)
_firstdate(dates::AbstractInterval{Date,Open}) = first(dates) + Day(1)
_lastdate(dates::AbstractInterval{Date,<:Bound,Closed}) = last(dates)
_lastdate(dates::AbstractInterval{Date,<:Bound,Open}) = last(dates) - Day(1)
"""
_initial_date(s::S, dates) where S<: DateSelector
Determines when we start counting from when breaking `dates` up into blocks.
Checks that that initial time is valid for the given `dates`.
"""
function _initial_date(s::S, dates) where S<: DateSelector
sd, ed = extrema(dates)
# We would like to start from over 100 years ago
# 1900, which has the convient feature of starting on a Monday
beginning_of_time = Date(1900)
initial_time = beginning_of_time + s.offset
sd < initial_time && throw(DomainError(
sd,
"$S with offset $(s.offset) cannot be used before $(initial_time)",
))
return initial_time
end
function Base.parse(DT::Type{<:DateSelector}, s::AbstractString)
expr = Meta.parse(s)
if expr isa Expr && expr.head == :call && isdefined(@__MODULE__, expr.args[1])
T = getfield(@__MODULE__, expr.args[1])
T isa Type && T <: DT && return eval(expr)
end
throw(ArgumentError("Could not parse \"$s\" as a `$DT`"))
end
if VERSION < v"1.5"
function Base.show(io::IO, s::S) where S<:DateSelector
args = _stringarg.(getfield.(Ref(s), fieldnames(S)))
print(io, "$S($(join(args, ", ")))")
end
# Periods print in a format that breaks our parsing
# So we undo their formatting
_stringarg(p::Period) = "$(typeof(p))($(p.value))"
_stringarg(arg) = repr(arg)
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 432 | @testset "NoneSelector" begin
st = Date(2019, 1, 1)
ed = Date(2019, 2, 1)
date_range = st:Day(1):ed
selector = NoneSelector()
@test selector isa DateSelector
validation, holdout = partition(date_range, selector)
@test validation == date_range
@test isempty(holdout)
# Test that we can also handle any abstract vector
@test first(partition(collect(date_range), selector)) == validation
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 4434 | @testset "PeriodicSelector" begin
st = Date(2019, 1, 1)
ed = Date(2019, 2, 1)
date_range = st:Day(1):ed
@testset "1 week period, 1 day stride" begin
selector = PeriodicSelector(Week(1))
@test selector isa DateSelector
result = partition(date_range, selector)
@test sort(vcat(result...)) == date_range
@test isempty(intersect(result...))
@test all(isequal(Week(1)), diff(result.holdout))
# Test that we can also handle any abstract vector
@test partition(collect(date_range), selector) == result
end
@testset "2 week period, 5 day stride" begin
selector = PeriodicSelector(Week(2), Day(5))
result = partition(date_range, selector)
@test sort(vcat(result...)) == date_range
@test isempty(intersect(result...))
expected_holdout = [
st:Day(1):st + Day(4)...,
st + Week(2):Day(1):st + Week(2) + Day(4)...,
st + Week(4):Day(1):st + Week(4) + Day(3)...,
]
@test diff(result.holdout) == Day.([1, 1, 1, 1, 10, 1, 1, 1, 1])
end
@testset "1 week period, 1 day stride, 2 day offset" begin
selector = PeriodicSelector(Week(1), Day(1), Day(2))
result = partition(date_range, selector)
@test sort(vcat(result...)) == date_range
@test isempty(intersect(result...))
expected_holdout = [st+Day(1):Week(1):ed...]
@test result.holdout == expected_holdout
end
@testset "BiWeekly" begin
st1 = Date(1900, 1, 1)
ed1 = Date(1900, 2, 1)
date_range1 = st1:Day(1):ed1
selector = PeriodicSelector(Week(2), Week(1))
result = partition(date_range1, selector)
@test isempty(intersect(result...))
expected_holdout = [
st1:Day(1):st1+Day(6)...,
st1+Week(2):Day(1):st1+Week(3)-Day(1)...,
st1+Week(4):Day(1):ed1...,
]
expected_validation = [
st1+Week(1):Day(1):st1+Week(2)-Day(1)...,
st1+Week(3):Day(1):st1+Week(4)-Day(1)...,
]
@test result.holdout == expected_holdout
@test result.validation == expected_validation
end
@testset "Day of week" begin
@testset "Weekends as holdout" begin
selector = PeriodicSelector(Week(1), Day(2), Day(5))
validation, holdout = partition(date_range, selector)
@test isequal(
sort(unique(dayname.(validation))),
["Friday", "Monday", "Thursday", "Tuesday", "Wednesday"]
)
@test sort(unique(dayname.(holdout))) == ["Saturday", "Sunday"]
end
@testset "Week days as holdout" begin
selector = PeriodicSelector(Week(1), Day(5))
validation, holdout = partition(date_range, selector)
@test sort(unique(dayname.(validation))) == ["Saturday", "Sunday"]
@test isequal(
sort(unique(dayname.(holdout))),
["Friday", "Monday", "Thursday", "Tuesday", "Wednesday"]
)
end
end
@testset "stride, offset, period wrong domain" begin
@test_throws DomainError PeriodicSelector(Day(0))
@test_throws DomainError PeriodicSelector(Day(7), Day(0))
@test_throws DomainError PeriodicSelector(Hour(24))
@test_throws MethodError PeriodicSelector(Day(7), Hour(48))
@test_throws MethodError PeriodicSelector(Day(7), Day(3), Hour(72))
end
@testset "errors if stride > period" begin
@test_throws ArgumentError PeriodicSelector(Day(2), Day(3))
@test_throws ArgumentError PeriodicSelector(Week(1), Day(8))
end
@testset "Invarient to date_range (period $period, stride $stride)" for
(period, stride) in ((Week(2), Week(1)), (Day(30), Day(10)), (Day(5), Day(2)))
@testset "offset $offset" for offset in Day.((0, 1, 2, 3))
selector = PeriodicSelector(period, stride, offset)
initial_range = Date(2029, 1, 1):Day(1):Date(2030, 1, 31)
initial_sets = map(Set, partition(initial_range, selector))
later_range = initial_range + Day(20) # 20 days later.
later_sets = map(Set, partition(later_range, selector))
@test initial_sets.holdout ∩ later_sets.validation == Set()
@test initial_sets.validation ∩ later_sets.holdout == Set()
end
end
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 5215 | @testset "RandomSelector" begin
st = Date(2019, 1, 1)
ed = Date(2019, 2, 1)
date_range = st:Day(1):ed
@testset "construction" begin
selector = RandomSelector(2, 1//3)
@test selector.seed == 2
@test selector.holdout_fraction == 1//3
@test selector.block_size == Day(1)
@test selector.offset == Day(0)
selector = RandomSelector(2, 1//3, Day(15))
@test selector.seed == 2
@test selector.holdout_fraction == 1//3
@test selector.block_size == Day(15)
@test selector.offset == Day(0)
selector = RandomSelector(2, 1//3, Day(15), Day(5))
@test selector.seed == 2
@test selector.holdout_fraction == 1//3
@test selector.block_size == Day(15)
@test selector.offset == Day(5)
@test (
RandomSelector(99, 1//2, Day(1), Day(0)) ==
RandomSelector(99, 1//2, Day(1)) ==
RandomSelector(99, 1//2) ==
RandomSelector(99)
)
end
@testset "basic" begin
selector = RandomSelector(42)
@test selector isa DateSelector
result = partition(date_range, selector)
@test result isa NamedTuple{(:validation, :holdout)}
@test sort(union(result...)) == date_range
@test isempty(intersect(result...))
# Running partition twice should return the same result
result2 = partition(date_range, selector)
@test result.validation == result2.validation
@test result.holdout == result2.holdout
# Test that we can also handle any abstract vector
@test partition(collect(date_range), selector) == result
@testset "holdout fraction" begin
# Setting holdout_fraction 1 all days leaves the validation set empty
validation, holdout = partition(date_range, RandomSelector(42, 1))
@test isempty(validation)
@test collect(holdout) == date_range
# Setting holdout_fraction 0 all days leaves the holdout set empty
validation, holdout = partition(date_range, RandomSelector(42, 0))
@test isempty(holdout)
@test collect(validation) == date_range
end
@testset "block size" begin
selector = RandomSelector(42, 1//2, Day(3))
result = partition(date_range, selector)
@test sort(union(result...)) == date_range
@test isempty(intersect(result...))
for subset in result
# at vary least the first 3 items must be from same block
@test subset[2] == subset[1] + Day(1)
@test subset[3] == subset[1] + Day(2)
# no gaps of 2 are possible
@test !any(isequal(2), diff(subset))
end
end
end
@testset "Right holdout fraction RandomSelector($seed, $holdout_fraction, $block_size)" for
seed in (1, 42),
holdout_fraction in (1//2, 0.1, 0.9, 0.7),
block_size in (Day(1), Day(2), Week(1), Week(2))
selector = RandomSelector(seed, holdout_fraction, block_size)
range = Date(2000, 1, 1):Day(1):Date(2010, 1, 31) # 10 year total range
r = partition(range, selector)
@test length(r.holdout)/length(range) ≈ holdout_fraction atol=0.05
end
@testset "Set Seed (protects against julia RNG changing)" begin
r1 = partition(date_range, RandomSelector(42, 1//5))
r2 = partition(date_range, RandomSelector(42, 1//5))
r3 = partition(date_range, RandomSelector(42, 1//5))
# cannot directly equate NamedTuples of iterators
@test collect(r1.validation) == collect(r2.validation) == collect(r3.validation)
@test collect(r1.holdout) == collect(r2.holdout) == collect(r3.holdout)
@test isequal(r1.holdout, [Date(2019, 1, 1), Date(2019, 1, 5), Date(2019, 1, 30)])
end
@testset "Different date inputs" begin
exp = partition(date_range, RandomSelector(42))
@testset "$(typeof(d))" for d in (
# Interval
st..ed,
# AnchoredInterval should include both start and dates
AnchoredInterval{Day(31), Date, Closed, Closed}(st)
)
result = partition(d, RandomSelector(42))
@test result.validation == exp.validation
@test result.holdout == exp.holdout
end
end
@testset "Invarient to date_range RandomSelector($seed, $holdout_fraction, $block_size, $offset))" for
seed in (1, 42),
holdout_fraction in (1//2, 1/20),
block_size in (Day(1), Day(2), Week(1), Week(2)),
offset in Day.(-3:3)
selector = RandomSelector(seed, holdout_fraction, block_size, offset) #only varient to the selector
initial_range = Date(2019, 1, 1):Day(1):Date(2020, 1, 31) # 1 year total range
initial_sets = map(Set, partition(initial_range, selector))
later_range = initial_range + Day(20) # 20 days later.
later_sets = map(Set, partition(later_range, selector))
@test initial_sets.holdout ∩ later_sets.validation == Set()
@test initial_sets.validation ∩ later_sets.holdout == Set()
end
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 3652 | @testset "_getdatesets" begin
st = Date(2019, 1, 1)
ed = Date(2019, 2, 1)
date_range = st:Day(1):ed
@testset "easy partition" begin
holdout_dates = date_range[1:10]
val_dates = date_range[11:end]
result = DateSelectors._getdatesets(date_range, holdout_dates)
@test result isa NamedTuple{(:validation, :holdout)}
@test result.validation == first(result)
@test result.holdout == last(result)
@test Base.isiterable(typeof(result.validation))
@test Base.isiterable(typeof(result.holdout))
@test result.holdout == holdout_dates
@test result.validation == val_dates
end
@testset "single date" begin
date = rand(date_range)
result = DateSelectors._getdatesets(date_range, date)
@test result.holdout == [date]
@test isempty(intersect(result...))
@test sort(union(result...)) == date_range
end
@testset "random partition" begin
dates = rand(date_range, 10)
result = DateSelectors._getdatesets(date_range, dates)
@test result.holdout != dates
@test result.holdout == sort(dates)
@test isempty(intersect(result...))
@test sort(union(result...)) == date_range
end
@testset "parsing" begin
# Just the type
@test_throws ArgumentError parse(DateSelector, "NoneSelector")
# Misspelling
@test_throws ArgumentError parse(DateSelector, "NonSelector()")
# Wrong expression
@test_throws ArgumentError parse(DateSelector, "NoneSelector() isa NoneSelector")
# Extra stuff in the string
@test_throws Base.Meta.ParseError parse(DateSelector, "NoneSelector() 1")
# Wrong args
@test_throws MethodError parse(DateSelector, "PeriodicSelector()")
@test_throws MethodError parse(DateSelector, "PeriodicSelector(14)")
# Spacing
@test parse(DateSelector, " NoneSelector( ) ") isa NoneSelector
s = parse(DateSelector, "NoneSelector()")
@test s == NoneSelector()
# Check specific type works
s = parse(NoneSelector, "NoneSelector()")
@test s == NoneSelector()
s = parse(DateSelector, "PeriodicSelector(Day(2))")
@test s == PeriodicSelector(Day(2))
s = parse(DateSelector, "PeriodicSelector(Day(6), Day(4), Day(1))")
@test s == PeriodicSelector(Day(6), Day(4), Day(1))
# Check specific type works
s = parse(PeriodicSelector, "PeriodicSelector(Day(2))")
@test s == PeriodicSelector(Day(2))
s = parse(DateSelector, "RandomSelector(1)")
@test s == RandomSelector(1, 1//2, Day(1), Day(0))
s = parse(DateSelector, "RandomSelector(123, 1//4, Day(7), Day(1))")
@test s == RandomSelector(123, 1//4, Day(7), Day(1))
# Check specific type works
s = parse(RandomSelector, "RandomSelector(1)")
@test s == RandomSelector(1, 1//2, Day(1), Day(0))
s = PeriodicSelector(Day(2))
@test s == parse(DateSelector, string(s))
end
@testset "show" begin
if VERSION < v"1.5"
# Check we have undone the period printing for DateSelectors alone
@test repr(PeriodicSelector(Day(6), Day(4), Day(1))) != "PeriodicSelector(6 days, 4 days, 1 day)"
@test repr((Day(6), Day(4), Day(1))) == "(6 days, 4 days, 1 day)"
end
@test repr(PeriodicSelector(Day(2))) == "PeriodicSelector(Day(2), Day(1), Day(0))"
@test repr(RandomSelector(1)) == "RandomSelector(1, 1//2, Day(1), Day(0))"
@test repr(NoneSelector()) == "NoneSelector()"
end
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 290 | using DateSelectors
using Dates
using Intervals
using Random
using Test
Random.seed!(1)
@testset "DateSelectors.jl" begin
include("common.jl")
include("NoneSelector.jl")
include("PeriodicSelector.jl")
include("RandomSelector.jl")
include("sensibility_checks.jl")
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | code | 1554 | @testset "Sensibility checks" begin
@testset "Sanity checks on partitioning" begin
st = Date(2018, 1, 1)
ed = Date(2019, 1, 1)
datesets = (
st:Day(1):ed,
st..ed,
AnchoredInterval{Day(365), Date}(st),
)
selectors = (
NoneSelector(),
PeriodicSelector(Week(2), Week(1)),
RandomSelector(42),
)
@testset "$(repr(selector))" for selector in selectors, dateset in datesets
a, b = partition(dateset, selector)
@test all(in(dateset), a)
@test all(in(dateset), b)
@test isempty(intersect(a, b))
if dateset isa AbstractInterval
_dateset = DateSelectors._interval2daterange(dateset)
@test sort(union(a, b)) == collect(_dateset)
else
@test sort(union(a, b)) == collect(dateset)
end
end
end
@testset "Vector of days is allowed" begin
date_range = collect(Date(2019, 1, 1):Day(1):Date(2019, 2, 1))
@test first(partition(date_range, NoneSelector())) == date_range
end
@testset "Weekly intervals are not allowed" begin
st = Date(2019, 1, 1)
ed = Date(2019, 2, 1)
weekly_dates = st:Week(1):ed
for selector in (
NoneSelector(),
PeriodicSelector(Week(2), Week(1)),
RandomSelector(42),
)
@test_throws ArgumentError partition(weekly_dates, selector)
end
end
end
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | docs | 1821 | # DateSelectors
[](https://invenia.github.io/DateSelectors.jl/stable)
[](https://invenia.github.io/DateSelectors.jl/dev)
[](https://github.com/Invenia/DateSelectors.jl/actions?query=workflow%3ACI)
[](https://github.com/invenia/BlueStyle)
[](https://github.com/SciML/ColPrac)
# Usage
`DateSelectors.jl` simplifies the partitioning of a collection of dates into non-contiguous validation and holdout sets in line with best practices for tuning hyper-parameters, for time-series machine learning.
The package exports the `partition` function, which assigns dates to the validation and holdout sets according to the `DateSelector`.
The available `DateSelector`s are:
1. `NoneSelector`: assigns all dates to the validation set.
1. `RandomSelector`: randomly draws a subset of dates _without_ replacement.
1. `PeriodicSelector`: draws contiguous subsets of days periodically from the collection.
A notable trait of the `DateSelector`s is that the selection is invariant to the start and end-dates of collection itself.
Thus you can shift the start and end dates, e.g. by a week, and the days in the overlapping period will consitently still be placed into holdout or validation as before.
The only thing that controls if a date is selected or not is the parameters of the `DateSelector` itself.
See the [examples](https://invenia.github.io/DateSelectors.jl/stable/examples.html) in the docs for more info.
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | docs | 164 | # API
```@docs
partition
```
# Selectors
```@docs
DateSelector
NoneSelector
PeriodicSelector
RandomSelector
```
# Index
```@index
Modules = [DateSelectors]
```
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | docs | 3121 |
# Examples
## NoneSelector
The `NoneSelector` simply assigns all days to the validation set and none to the holdout set.
```@example dateselectors
using DateSelectors
using Dates
date_range = Date(2019, 1, 1):Day(1):Date(2019, 3, 31)
selector = NoneSelector()
validation, holdout = partition(date_range, selector)
validation
```
## RandomSelector
The `RandomSelector` uniformly subsamples the collection of dates and assigns them to the holdout set.
Here we use a seed of `42` to uniformly sample from the date range with probability 10% into the holdout set,
in 3-day blocks, some of which may be contiguous.
Note that for a given seed and date range the portion in the holdout set may not be exactly 10% as it is a random sample.
The selection, while random, is fully determined by the `RandomSelector` object and is invariant on the date range.
That is to say if one has two distinct but overlapping date ranges, and uses the same `RandomSelector` object, then the overlapping days will consistently be placed into either holdout or validation in both.
```@example dateselectors
selector = RandomSelector(42, 0.10, Day(3))
validation, holdout = partition(date_range, selector)
validation
```
## PeriodicSelector
The `PeriodicSelector` assigns holdout dates by taking a `stride` once per `period`.
Where in the period the holdout `stride` is taken from is determined by the `offset`.
The offset is relative to _Monday 1st Jan 1900_.
As the stride start location is relative to a fixed point rather than to the date range, this means that the selection, is fully determined by the `PeriodicSelector` object and is invariant on the date range.
That is to say if one has two distinct but overlapping date ranges, and uses the same `PeriodicSelector` object, then the overlapping days will consistently be placed into either holdout or validation in both.
In this example - for whatever reason - we want to assign weekdays as validation days and weekends as holdout days.
Therefore, our `period` is `Week(1)` and `stride` is `Day(2)`, because out of every week we want to keep 2 days in the holdout.
Now, since we need to start selecting on the Saturday, we must first `offset` by `Day(5)` because zero offset corresponds to a Monday.
```@example dateselectors
selector = PeriodicSelector(Week(1), Day(2), Day(5))
validation, holdout = partition(date_range, selector)
validation
```
We can verify that it returned what we expected:
```@example dateselectors
unique(dayname.(validation))
```
```@example dateselectors
unique(dayname.(holdout))
```
## Using AbstractIntervals
You can also specify the date range as an `Interval`:
```@example dateselectors
using Intervals
selector = PeriodicSelector(Week(1), Day(2), Day(4))
date_range = Date(2018, 1, 1)..Date(2019, 3, 31)
validation, holdout = partition(date_range, selector)
validation
```
as well as an `AbstractInterval`:
```@example dateselectors
selector = PeriodicSelector(Week(1), Day(2), Day(4))
date_range = AnchoredInterval{Day(90), Date}(Date(2019, 1, 1))
validation, holdout = partition(date_range, selector)
validation
```
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.4.3 | 76013640549b84a874be5aad5ff6afef9b00afc8 | docs | 538 | # DateSelectors.jl
`DateSelectors.jl` simplifies the partitioning of a collection of dates into non-contiguous validation and holdout sets.
The package exports the `partition` function, which assigns dates to the validation and holdout sets according to the `DateSelector`.
The available `DateSelector`s are:
1. `NoneSelector`: assigns all dates to the validation set.
1. `RandomSelector`: randomly draws a subset of dates _without_ replacement.
1. `PeriodicSelector`: draws contiguous subsets of days periodically from the collection.
| DateSelectors | https://github.com/invenia/DateSelectors.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 657 | using Documenter
using KrylovKit
makedocs(; modules=[KrylovKit],
sitename="KrylovKit.jl",
authors="Jutho Haegeman and collaborators",
pages=["Home" => "index.md",
"Manual" => ["man/intro.md",
"man/linear.md",
"man/eig.md",
"man/svd.md",
"man/matfun.md",
"man/algorithms.md",
"man/implementation.md"]],
format=Documenter.HTML(; prettyurls=get(ENV, "CI", nothing) == "true"))
deploydocs(; repo="github.com/Jutho/KrylovKit.jl.git")
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 268 | module KrylovKitChainRulesCoreExt
using KrylovKit
using ChainRulesCore
using LinearAlgebra
using VectorInterface
using KrylovKit: apply_normal, apply_adjoint
include("utilities.jl")
include("linsolve.jl")
include("eigsolve.jl")
include("svdsolve.jl")
end # module
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 16213 | function ChainRulesCore.rrule(config::RuleConfig,
::typeof(eigsolve),
f,
x₀,
howmany,
which,
alg_primal;
alg_rrule=Arnoldi(; tol=alg_primal.tol,
krylovdim=alg_primal.krylovdim,
maxiter=alg_primal.maxiter,
eager=alg_primal.eager,
orth=alg_primal.orth,
verbosity=alg_primal.verbosity))
(vals, vecs, info) = eigsolve(f, x₀, howmany, which, alg_primal)
if alg_primal isa Lanczos
fᴴ = f
elseif f isa AbstractMatrix
fᴴ = adjoint(f)
else
fᴴ = let pb = rrule_via_ad(config, f, zerovector(x₀, complex(scalartype(x₀))))[2]
v -> pb(v)[2]
end
end
eigsolve_pullback = make_eigsolve_pullback(config, f, fᴴ, x₀, howmany, which,
alg_primal, alg_rrule, vals, vecs, info)
return (vals, vecs, info), eigsolve_pullback
end
function make_eigsolve_pullback(config, f, fᴴ, x₀, howmany, which, alg_primal, alg_rrule,
vals, vecs, info)
function eigsolve_pullback(ΔX)
∂self = NoTangent()
∂x₀ = ZeroTangent()
∂howmany = NoTangent()
∂which = NoTangent()
∂alg = NoTangent()
# Prepare inputs:
#----------------
_Δvals = unthunk(ΔX[1])
_Δvecs = unthunk(ΔX[2])
# special case: propagate zero tangent
if _Δvals isa AbstractZero && _Δvecs isa AbstractZero
∂f = ZeroTangent()
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
end
# discard vals/vecs from n + 1 onwards if contribution is zero
_n_vals = _Δvals isa AbstractZero ? nothing : findlast(!iszero, _Δvals)
_n_vecs = _Δvecs isa AbstractZero ? nothing :
findlast(!Base.Fix2(isa, AbstractZero), _Δvecs)
n_vals = isnothing(_n_vals) ? 0 : _n_vals
n_vecs = isnothing(_n_vecs) ? 0 : _n_vecs
n = max(n_vals, n_vecs)
if n < length(vals) && vals[n + 1] == conj(vals[n])
# this can probably only happen for real problems, where it would be problematic
# to split complex conjugate pairs in solving the tangent problem
n += 1
end
# special case (can this happen?): try to maintain type stability
if n == 0
if howmany == 0
T = (alg_primal isa Lanczos) ? scalartype(x₀) : complex(scalartype(x₀))
_vecs = [zerovector(x₀, T)]
ws = [_vecs[1]]
∂f = construct∂f_eig(config, f, _vecs, ws)
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
else
ws = [vecs[1]]
∂f = construct∂f_eig(config, f, vecs, ws)
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
end
end
Δvals = fill(zero(vals[1]), n)
if n_vals > 0
Δvals[1:n_vals] .= view(_Δvals, 1:n_vals)
end
if _Δvecs isa AbstractZero
# case of no contribution of singular vectors
Δvecs = fill(ZeroTangent(), n)
else
Δvecs = fill(zerovector(vecs[1]), n)
if n_vecs > 0
Δvecs[1:n_vecs] .= view(_Δvecs, 1:n_vecs)
end
end
# Compute actual pullback data:
#------------------------------
ws = compute_eigsolve_pullback_data(Δvals, Δvecs, view(vals, 1:n), view(vecs, 1:n),
info, which, fᴴ, alg_primal, alg_rrule)
# Return pullback in correct form:
#---------------------------------
∂f = construct∂f_eig(config, f, vecs, ws)
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
end
return eigsolve_pullback
end
function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, fᴴ,
alg_primal, alg_rrule::Union{GMRES,BiCGStab})
ws = similar(vecs, length(Δvecs))
T = scalartype(vecs[1])
@inbounds for i in 1:length(Δvecs)
Δλ = Δvals[i]
Δv = Δvecs[i]
λ = vals[i]
v = vecs[i]
# First treat special cases
if isa(Δv, AbstractZero) && iszero(Δλ) # no contribution
ws[i] = zerovector(v)
continue
end
if isa(Δv, AbstractZero) && isa(alg_primal, Lanczos) # simple contribution
ws[i] = scale(v, Δλ)
continue
end
# General case :
# for the case where `f` is a real matrix, we can expect the following simplication
# TODO: can we implement this within our general approach, or generalise this to also
# cover the case where `f` is a function?
# if i > 1 && eltype(A) <: Real &&
# vals[i] == conj(vals[i - 1]) && Δvals[i] == conj(Δvals[i - 1]) &&
# vecs[i] == conj(vecs[i - 1]) && Δvecs[i] == conj(Δvecs[i - 1])
# ws[i] = conj(ws[i - 1])
# continue
# end
if isa(Δv, AbstractZero)
b = (zerovector(v), convert(T, Δλ))
else
vdΔv = inner(v, Δv)
if alg_rrule.verbosity >= 0
gauge = abs(imag(vdΔv))
gauge > alg_primal.tol &&
@warn "`eigsolve` cotangent for eigenvector $i is sensitive to gauge choice: (|gauge| = $gauge)"
end
Δv = add(Δv, v, -vdΔv)
b = (Δv, convert(T, Δλ))
end
w, reverse_info = let λ = λ, v = v
linsolve(b, zerovector(b), alg_rrule) do (x1, x2)
y1 = VectorInterface.add!!(VectorInterface.add!!(KrylovKit.apply(fᴴ, x1),
x1, conj(λ), -1),
v, x2)
y2 = inner(v, x1)
return (y1, y2)
end
end
if info.converged >= i && reverse_info.converged == 0 && alg_rrule.verbosity >= 0
@warn "`eigsolve` cotangent linear problem ($i) did not converge, whereas the primal eigenvalue problem did: normres = $(reverse_info.normres)"
elseif abs(w[2]) > alg_rrule.tol && alg_rrule.verbosity >= 0
@warn "`eigsolve` cotangent linear problem ($i) returns unexpected result: error = $(w[2])"
end
ws[i] = w[1]
end
return ws
end
function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, fᴴ,
alg_primal::Arnoldi, alg_rrule::Arnoldi)
n = length(Δvecs)
T = scalartype(vecs[1])
G = zeros(T, n, n)
VdΔV = zeros(T, n, n)
for j in 1:n
for i in 1:n
if i < j
G[i, j] = conj(G[j, i])
elseif i == j
G[i, i] = norm(vecs[i])^2
else
G[i, j] = inner(vecs[i], vecs[j])
end
if !(Δvecs[j] isa AbstractZero)
VdΔV[i, j] = inner(vecs[i], Δvecs[j])
end
end
end
# components along subspace spanned by current eigenvectors
tol = alg_primal.tol
if alg_rrule.verbosity >= 0
mask = abs.(transpose(vals) .- vals) .< tol
gaugepart = VdΔV[mask] - Diagonal(real(diag(VdΔV)))[mask]
Δgauge = norm(gaugepart, Inf)
Δgauge > tol &&
@warn "`eigsolve` cotangents sensitive to gauge choice: (|Δgauge| = $Δgauge)"
end
VdΔV′ = VdΔV - G * Diagonal(diag(VdΔV) ./ diag(G))
aVdΔV = VdΔV′ .* conj.(safe_inv.(transpose(vals) .- vals, tol))
for i in 1:n
aVdΔV[i, i] += Δvals[i]
end
Gc = cholesky!(G)
iGaVdΔV = Gc \ aVdΔV
iGVdΔV = Gc \ VdΔV
zs = similar(vecs)
for i in 1:n
z = scale(vecs[1], iGaVdΔV[1, i])
for j in 2:n
z = VectorInterface.add!!(z, vecs[j], iGaVdΔV[j, i])
end
zs[i] = z
end
# components in orthogonal subspace:
# solve Sylvester problem (A * (1-P) + shift * P) * W - W * Λ = ΔV as eigenvalue problem
# with ΔVᵢ = fᴴ(zᵢ) + (1 - P) * Δvᵢ
# where we can recylce information in the computation of P * Δvᵢ
sylvesterarg = similar(vecs)
for i in 1:n
y = KrylovKit.apply(fᴴ, zs[i])
if !(Δvecs[i] isa AbstractZero)
y = VectorInterface.add!!(y, Δvecs[i])
for j in 1:n
y = VectorInterface.add!!(y, vecs[j], -iGVdΔV[j, i])
end
end
sylvesterarg[i] = y
end
# To solve Sylvester problem as eigenvalue problem, we potentially need to shift the
# eigenvalues zero that originate from the projection onto the orthognal complement of
# original subspace, namely whenever zero is more extremal than the actual eigenvalues.
# Hereto, we shift the zero eigenvalues in the original subspace to the value 2 * vals[n],
# where we expect that if `by(vals[n]) > by(0)`, then `by(2*vals[n]) > by(vals[n])`
# (whenever `rev = false`, and with opposite inequality whenever `rev = true`)
by, rev = KrylovKit.eigsort(which)
if (rev ? (by(vals[n]) < by(zero(vals[n]))) : (by(vals[n]) > by(zero(vals[n]))))
shift = 2 * conj(vals[n])
else
shift = zero(vals[n])
end
# The ith column wᵢ of the solution to the Sylvester equation is contained in the
# the eigenvector (wᵢ, eᵢ) corresponding to eigenvalue λᵢ of the block matrix
# [(A * (1-P) + shift * P) -ΔV; 0 Λ], where eᵢ is the ith unit vector. We will need
# to renormalise the eigenvectors to have exactly eᵢ as second component. We use
# (0, e₁ + e₂ + ... + eₙ) as the initial guess for the eigenvalue problem.
W₀ = (zerovector(vecs[1]), one.(vals))
P = orthogonalprojector(vecs, n, Gc)
solver = (T <: Real) ? KrylovKit.realeigsolve : KrylovKit.eigsolve # for `eigsolve`, `T` will always be a Complex subtype`
rvals, Ws, reverse_info = let P = P, ΔV = sylvesterarg, shift = shift
solver(W₀, n, reverse_which(which), alg_rrule) do (w, x)
w₀ = P(w)
w′ = KrylovKit.apply(fᴴ, add(w, w₀, -1))
if !iszero(shift)
w′ = VectorInterface.add!!(w′, w₀, shift)
end
@inbounds for i in eachindex(x) # length(x) = n but let us not use outer variables
w′ = VectorInterface.add!!(w′, ΔV[i], -x[i])
end
return (w′, conj.(vals) .* x)
end
end
if info.converged >= n && reverse_info.converged < n && alg_rrule.verbosity >= 0
@warn "`eigsolve` cotangent problem did not converge, whereas the primal eigenvalue problem did"
end
# cleanup and construct final result by renormalising the eigenvectors and explicitly
# checking that they have the expected form and reproduce the expected eigenvalue
ws = zs
tol = alg_rrule.tol
Q = orthogonalcomplementprojector(vecs, n, Gc)
for i in 1:n
w, x = Ws[i]
_, ic = findmax(abs, x)
factor = 1 / x[ic]
x[ic] = zero(x[ic])
if alg_rrule.verbosity >= 0
error = max(norm(x, Inf), abs(rvals[i] - conj(vals[ic])))
error > 5 * tol &&
@warn "`eigsolve` cotangent linear problem ($ic) returns unexpected result: error = $error"
end
ws[ic] = VectorInterface.add!!(zs[ic], Q(w), -factor)
end
return ws
end
# several simplications happen in the case of a Hermitian eigenvalue problem
function compute_eigsolve_pullback_data(Δvals, Δvecs, vals, vecs, info, which, fᴴ,
alg_primal::Lanczos, alg_rrule::Arnoldi)
n = length(Δvecs)
T = scalartype(vecs[1])
VdΔV = zeros(T, n, n)
for j in 1:n
for i in 1:n
if !(Δvecs[j] isa AbstractZero)
VdΔV[i, j] = inner(vecs[i], Δvecs[j])
end
end
end
# components along subspace spanned by current eigenvectors
tol = alg_primal.tol
aVdΔV = rmul!(VdΔV - VdΔV', 1 / 2)
if alg_rrule.verbosity >= 0
mask = abs.(transpose(vals) .- vals) .< tol
gaugepart = view(aVdΔV, mask)
gauge = norm(gaugepart, Inf)
gauge > tol &&
@warn "`eigsolve` cotangents sensitive to gauge choice: (|gauge| = $gauge)"
end
aVdΔV .= aVdΔV .* safe_inv.(transpose(vals) .- vals, tol)
for i in 1:n
aVdΔV[i, i] += real(Δvals[i])
end
zs = similar(vecs)
for i in 1:n
z = scale(vecs[1], aVdΔV[1, i])
for j in 2:n
z = VectorInterface.add!!(z, vecs[j], aVdΔV[j, i])
end
zs[i] = z
end
# components in orthogonal subspace
sylvesterarg = similar(vecs)
for i in 1:n
y = zerovector(vecs[1])
if !(Δvecs[i] isa AbstractZero)
y = VectorInterface.add!!(y, Δvecs[i], +1)
for j in 1:n
y = VectorInterface.add!!(y, vecs[j], -VdΔV[j, i])
end
end
sylvesterarg[i] = y
end
by, rev = KrylovKit.eigsort(which)
if (rev ? (by(vals[n]) < by(zero(vals[n]))) : (by(vals[n]) > by(zero(vals[n]))))
shift = 2 * conj(vals[n])
else
shift = zero(vals[n])
end
W₀ = (zerovector(vecs[1]), one.(vals))
P = orthogonalprojector(vecs, n)
solver = (T <: Real) ? KrylovKit.realeigsolve : KrylovKit.eigsolve
rvals, Ws, reverse_info = let P = P, ΔV = sylvesterarg, shift = shift
solver(W₀, n, reverse_which(which), alg_rrule) do (w, x)
w₀ = P(w)
w′ = KrylovKit.apply(fᴴ, add(w, w₀, -1))
if !iszero(shift)
w′ = VectorInterface.add!!(w′, w₀, shift)
end
@inbounds for i in 1:length(x) # length(x) = n but let us not use outer variables
w′ = VectorInterface.add!!(w′, ΔV[i], -x[i])
end
return (w′, vals .* x)
end
end
if info.converged >= n && reverse_info.converged < n && alg_rrule.verbosity >= 0
@warn "`eigsolve` cotangent problem did not converge, whereas the primal eigenvalue problem did"
end
# cleanup and construct final result
ws = zs
tol = alg_rrule.tol
Q = orthogonalcomplementprojector(vecs, n)
for i in 1:n
w, x = Ws[i]
_, ic = findmax(abs, x)
factor = 1 / x[ic]
x[ic] = zero(x[ic])
error = max(norm(x, Inf), abs(rvals[i] - conj(vals[ic])))
if error > 5 * tol && alg_rrule.verbosity >= 0
@warn "`eigsolve` cotangent linear problem ($ic) returns unexpected result: error = $error"
end
ws[ic] = VectorInterface.add!!(zs[ic], Q(w), -factor)
end
return ws
end
function construct∂f_eig(config, f, vecs, ws)
config isa RuleConfig{>:HasReverseMode} ||
throw(ArgumentError("`eigsolve` reverse-mode AD requires AD engine that supports calling back into AD"))
v = vecs[1]
w = ws[1]
∂f = rrule_via_ad(config, f, v)[2](w)[1]
for i in 2:length(ws)
v = vecs[i]
w = ws[i]
∂f = ChainRulesCore.add!!(∂f, rrule_via_ad(config, f, v)[2](w)[1])
end
return ∂f
end
function construct∂f_eig(config, A::AbstractMatrix, vecs, ws)
if A isa StridedMatrix
return InplaceableThunk(Ā -> _buildĀ_eig!(Ā, vecs, ws),
@thunk(_buildĀ_eig!(zero(A), vecs, ws)))
else
return @thunk(ProjectTo(A)(_buildĀ_eig!(zero(A), vecs, ws)))
end
end
function _buildĀ_eig!(Ā, vs, ws)
for i in 1:length(ws)
w = ws[i]
v = vs[i]
if !(w isa AbstractZero)
if eltype(Ā) <: Real && eltype(w) <: Complex
mul!(Ā, _realview(w), _realview(v)', +1, +1)
mul!(Ā, _imagview(w), _imagview(v)', +1, +1)
else
mul!(Ā, w, v', +1, 1)
end
end
end
return Ā
end
function reverse_which(which)
by, rev = KrylovKit.eigsort(which)
return EigSorter(by ∘ conj, rev)
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 5357 | function ChainRulesCore.rrule(config::RuleConfig,
::typeof(linsolve),
f,
b,
x₀,
alg_primal,
a₀,
a₁; alg_rrule=alg_primal)
(x, info) = linsolve(f, b, x₀, alg_primal, a₀, a₁)
fᴴ, construct∂f = lin_preprocess(config, f, x)
linsolve_pullback = make_linsolve_pullback(fᴴ, b, a₀, a₁, alg_rrule, construct∂f, x,
info)
return (x, info), linsolve_pullback
end
function make_linsolve_pullback(fᴴ, b, a₀, a₁, alg_rrule, construct∂f, x, info)
function linsolve_pullback(X̄)
x̄ = unthunk(X̄[1])
@assert X̄[2] isa AbstractZero "No cotangent of the `info` output is supported."
∂self = NoTangent()
∂x₀ = ZeroTangent()
∂algorithm = NoTangent()
if x̄ isa AbstractZero
∂f = ZeroTangent()
∂b = ZeroTangent()
∂a₀ = ZeroTangent()
∂a₁ = ZeroTangent()
return ∂self, ∂f, ∂b, ∂x₀, ∂algorithm, ∂a₀, ∂a₁
end
x̄₀ = zerovector(x̄,
VectorInterface.promote_scale(scalartype(x̄),
VectorInterface.promote_scale(a₀,
a₁)))
∂b, reverse_info = linsolve(fᴴ, x̄, x̄₀, alg_rrule, conj(a₀),
conj(a₁))
if info.converged > 0 && reverse_info.converged == 0 && alg_rrule.verbosity >= 0
@warn "`linsolve` cotangent problem did not converge, whereas the primal linear problem did: normres = $(reverse_info.normres)"
end
x∂b = inner(x, ∂b)
b∂b = inner(b, ∂b)
∂f = construct∂f(scale(∂b, -conj(a₁)))
∂a₀ = -x∂b
∂a₁ = (x∂b * conj(a₀) - b∂b) / conj(a₁)
return ∂self, ∂f, ∂b, ∂x₀, ∂algorithm, ∂a₀, ∂a₁
end
end
function lin_preprocess(config, f, x)
config isa RuleConfig{>:HasReverseMode} ||
throw(ArgumentError("`linsolve` reverse-mode AD requires AD engine that supports calling back into AD"))
pb = rrule_via_ad(config, f, x)[2]
fᴴ, construct∂f_lin = let pb = rrule_via_ad(config, f, x)[2]
v -> pb(v)[2], w -> pb(w)[1]
end
return fᴴ, construct∂f_lin
end
function lin_preprocess(config, A::AbstractMatrix, x)
fᴴ = adjoint(A)
if A isa StridedMatrix
construct∂f_lin = w -> InplaceableThunk(Ā -> _buildĀ_lin!(Ā, x, w),
@thunk(_buildĀ_lin!(zero(A), x, w)))
else
construct∂f_lin = let project_A = ProjectTo(A)
w -> @thunk(project_A(_buildĀ_lin!(zero(A), x, w)))
end
end
return fᴴ, construct∂f_lin
end
function _buildĀ_lin!(Ā, v, w)
if !(w isa AbstractZero)
if eltype(Ā) <: Real && eltype(w) <: Complex
mul!(Ā, _realview(w), _realview(v)', +1, +1)
mul!(Ā, _imagview(w), _imagview(v)', +1, +1)
else
mul!(Ā, w, v', +1, 1)
end
end
return Ā
end
# frule - currently untested - commented out while untested and unused
# function ChainRulesCore.frule((_, ΔA, Δb, Δx₀, _, Δa₀, Δa₁)::Tuple, ::typeof(linsolve),
# A::AbstractMatrix, b::AbstractVector, x₀, algorithm, a₀, a₁)
# (x, info) = linsolve(A, b, x₀, algorithm, a₀, a₁)
# if Δb isa ChainRulesCore.AbstractZero
# rhs = zerovector(b)
# else
# rhs = scale(Δb, (1 - Δa₁))
# end
# if !iszero(Δa₀)
# rhs = add!!(rhs, x, -Δa₀)
# end
# if !iszero(ΔA)
# rhs = mul!(rhs, ΔA, x, -a₁, true)
# end
# (Δx, forward_info) = linsolve(A, rhs, zerovector(rhs), algorithm, a₀, a₁)
# if info.converged > 0 && forward_info.converged == 0 && alg_rrule.verbosity >= 0
# @warn "The tangent linear problem did not converge, whereas the primal linear problem did."
# end
# return (x, info), (Δx, NoTangent())
# end
# function ChainRulesCore.frule(config::RuleConfig{>:HasForwardsMode}, tangents,
# ::typeof(linsolve),
# A::AbstractMatrix, b::AbstractVector, x₀, algorithm, a₀, a₁)
# return frule(tangents, linsolve, A, b, x₀, algorithm, a₀, a₁)
# end
# function ChainRulesCore.frule(config::RuleConfig{>:HasForwardsMode},
# (_, Δf, Δb, Δx₀, _, Δa₀, Δa₁),
# ::typeof(linsolve),
# f, b, x₀, algorithm, a₀, a₁)
# (x, info) = linsolve(f, b, x₀, algorithm, a₀, a₁)
# if Δb isa AbstractZero
# rhs = zerovector(b)
# else
# rhs = scale(Δb, (1 - Δa₁))
# end
# if !iszero(Δa₀)
# rhs = add!!(rhs, x, -Δa₀)
# end
# if !(Δf isa AbstractZero)
# rhs = add!!(rhs, frule_via_ad(config, (Δf, ZeroTangent()), f, x), -a₀)
# end
# (Δx, forward_info) = linsolve(f, rhs, zerovector(rhs), algorithm, a₀, a₁)
# if info.converged > 0 && forward_info.converged == 0 && alg_rrule.verbosity >= 0
# @warn "The tangent linear problem did not converge, whereas the primal linear problem did."
# end
# return (x, info), (Δx, NoTangent())
# end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 12219 | # Reverse rule adopted from tsvd! rrule as found in TensorKit.jl
function ChainRulesCore.rrule(config::RuleConfig, ::typeof(svdsolve), f, x₀, howmany, which,
alg_primal::GKL;
alg_rrule=Arnoldi(; tol=alg_primal.tol,
krylovdim=alg_primal.krylovdim,
maxiter=alg_primal.maxiter,
eager=alg_primal.eager,
orth=alg_primal.orth,
verbosity=alg_primal.verbosity))
vals, lvecs, rvecs, info = svdsolve(f, x₀, howmany, which, alg_primal)
svdsolve_pullback = make_svdsolve_pullback(config, f, x₀, howmany, which, alg_primal,
alg_rrule, vals, lvecs, rvecs, info)
return (vals, lvecs, rvecs, info), svdsolve_pullback
end
function make_svdsolve_pullback(config, f, x₀, howmany, which, alg_primal, alg_rrule, vals,
lvecs, rvecs, info)
function svdsolve_pullback(ΔX)
∂self = NoTangent()
∂x₀ = ZeroTangent()
∂howmany = NoTangent()
∂which = NoTangent()
∂alg = NoTangent()
# Prepare inputs:
#----------------
_Δvals = unthunk(ΔX[1])
_Δlvecs = unthunk(ΔX[2])
_Δrvecs = unthunk(ΔX[3])
# special case: propagate zero tangent
if _Δvals isa AbstractZero && _Δlvecs isa AbstractZero && _Δrvecs isa AbstractZero
∂f = ZeroTangent()
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
end
# discard vals/vecs from n + 1 onwards if contribution is zero
_n_vals = _Δvals isa AbstractZero ? nothing : findlast(!iszero, _Δvals)
_n_lvecs = _Δlvecs isa AbstractZero ? nothing :
findlast(!Base.Fix2(isa, AbstractZero), _Δlvecs)
_n_rvecs = _Δrvecs isa AbstractZero ? nothing :
findlast(!Base.Fix2(isa, AbstractZero), _Δrvecs)
n_vals = isnothing(_n_vals) ? 0 : _n_vals
n_lvecs = isnothing(_n_lvecs) ? 0 : _n_lvecs
n_rvecs = isnothing(_n_rvecs) ? 0 : _n_rvecs
n = max(n_vals, n_lvecs, n_rvecs)
# special case (can this happen?): try to maintain type stability
if n == 0
if howmany == 0
_lvecs = [zerovector(x₀)]
_rvecs = [apply_adjoint(f, x₀)]
xs = [_lvecs[1]]
ys = [_rvecs[1]]
∂f = construct∂f_svd(config, f, _lvecs, _rvecs, xs, ys)
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
else
xs = [zerovector(lvecs[1])]
ys = [zerovector(rvecs[1])]
∂f = construct∂f_svd(config, f, lvecs, rvecs, xs, ys)
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
end
end
Δvals = fill(zero(vals[1]), n)
if n_vals > 0
Δvals[1:n_vals] .= view(_Δvals, 1:n_vals)
end
if _Δlvecs isa AbstractZero && _Δrvecs isa AbstractZero
# case of no contribution of singular vectors
Δlvecs = fill(ZeroTangent(), n)
Δrvecs = fill(ZeroTangent(), n)
else
Δlvecs = fill(zerovector(lvecs[1]), n)
Δrvecs = fill(zerovector(rvecs[1]), n)
if n_lvecs > 0
Δlvecs[1:n_lvecs] .= view(_Δlvecs, 1:n_lvecs)
end
if n_rvecs > 0
Δrvecs[1:n_rvecs] .= view(_Δrvecs, 1:n_rvecs)
end
end
# Compute actual pullback data:
#------------------------------
xs, ys = compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, view(vals, 1:n),
view(lvecs, 1:n), view(rvecs, 1:n),
info, f, which, alg_primal, alg_rrule)
# Return pullback in correct form:
#---------------------------------
∂f = construct∂f_svd(config, f, lvecs, rvecs, xs, ys)
return ∂self, ∂f, ∂x₀, ∂howmany, ∂which, ∂alg
end
return svdsolve_pullback
end
function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, rvecs, info, f,
which, alg_primal, alg_rrule::Union{GMRES,BiCGStab})
xs = similar(lvecs, length(Δvals))
ys = similar(rvecs, length(Δvals))
for i in 1:length(vals)
Δσ = Δvals[i]
Δu = Δlvecs[i]
Δv = Δrvecs[i]
σ = vals[i]
u = lvecs[i]
v = rvecs[i]
# First treat special cases
if isa(Δv, AbstractZero) && isa(Δu, AbstractZero) # no contribution
xs[i] = scale(u, real(Δσ) / 2)
ys[i] = scale(v, real(Δσ) / 2)
continue
end
udΔu = inner(u, Δu)
vdΔv = inner(v, Δv)
if (udΔu isa Complex) || (vdΔv isa Complex)
if alg_rrule.verbosity >= 0
gauge = abs(imag(udΔu + vdΔv))
gauge > alg_primal.tol &&
@warn "`svdsolve` cotangents for singular vectors $i are sensitive to gauge choice: (|gauge| = $gauge)"
end
Δs = real(Δσ) + im * imag(udΔu - vdΔv) / (2 * σ)
else
Δs = real(Δσ)
end
b = (add(Δu, u, -udΔu), add(Δv, v, -vdΔv))
(x, y), reverse_info = let σ = σ, u = u, v = v
linsolve(b, zerovector(b), alg_rrule) do (x, y)
x′ = VectorInterface.add!!(apply_normal(f, y), x, σ, -1)
y′ = VectorInterface.add!!(apply_adjoint(f, x), y, σ, -1)
x′ = VectorInterface.add!!(x′, u, -inner(u, x′))
y′ = VectorInterface.add!!(y′, v, -inner(v, y′))
return (x′, y′)
end
end
if info.converged >= i && reverse_info.converged == 0 && alg_rrule.verbosity >= 0
@warn "`svdsolve` cotangent linear problem ($i) did not converge, whereas the primal eigenvalue problem did: normres = $(reverse_info.normres)"
end
x = VectorInterface.add!!(x, u, Δs / 2)
y = VectorInterface.add!!(y, v, conj(Δs) / 2)
xs[i] = x
ys[i] = y
end
return xs, ys
end
function compute_svdsolve_pullback_data(Δvals, Δlvecs, Δrvecs, vals, lvecs, rvecs, info, f,
which, alg_primal, alg_rrule::Arnoldi)
@assert which == :LR "pullback currently only implemented for `which == :LR`"
T = scalartype(lvecs)
n = length(Δvals)
UdΔU = zeros(T, n, n)
VdΔV = zeros(T, n, n)
for j in 1:n
for i in 1:n
if !(Δlvecs[j] isa AbstractZero)
UdΔU[i, j] = inner(lvecs[i], Δlvecs[j])
end
if !(Δrvecs[j] isa AbstractZero)
VdΔV[i, j] = inner(rvecs[i], Δrvecs[j])
end
end
end
aUdΔU = rmul!(UdΔU - UdΔU', 1 / 2)
aVdΔV = rmul!(VdΔV - VdΔV', 1 / 2)
tol = alg_primal.tol
if alg_rrule.verbosity >= 0
mask = abs.(vals' .- vals) .< tol
gaugepart = view(aUdΔU, mask) + view(aVdΔV, mask)
gauge = norm(gaugepart, Inf)
gauge > alg_primal.tol &&
@warn "`svdsolve` cotangents for singular vectors are sensitive to gauge choice: (|gauge| = $gauge)"
end
UdΔAV = (aUdΔU .+ aVdΔV) .* safe_inv.(vals' .- vals, tol) .+
(aUdΔU .- aVdΔV) .* safe_inv.(vals' .+ vals, tol)
if !(Δvals isa ZeroTangent)
UdΔAV[diagind(UdΔAV)] .+= real.(Δvals)
end
xs = similar(lvecs, n)
ys = similar(rvecs, n)
for i in 1:n
x = scale(lvecs[1], UdΔAV[1, i] / 2)
y = scale(rvecs[1], conj(UdΔAV[i, 1]) / 2)
for j in 2:n
x = VectorInterface.add!!(x, lvecs[j], UdΔAV[j, i] / 2)
y = VectorInterface.add!!(y, rvecs[j], conj(UdΔAV[i, j]) / 2)
end
xs[i] = x
ys[i] = y
end
sylvesterargx = similar(lvecs)
for i in 1:n
x = zerovector(lvecs[1])
if !(Δlvecs[i] isa AbstractZero)
x = VectorInterface.add!!(x, Δlvecs[i], +1)
for j in 1:n
x = VectorInterface.add!!(x, lvecs[j], -UdΔU[j, i])
end
end
sylvesterargx[i] = x
end
sylvesterargy = similar(rvecs)
for i in 1:n
y = zerovector(rvecs[1])
if !(Δrvecs[i] isa AbstractZero)
y = VectorInterface.add!!(y, Δrvecs[i], +1)
for j in 1:n
y = VectorInterface.add!!(y, rvecs[j], -VdΔV[j, i])
end
end
sylvesterargy[i] = y
end
W₀ = (zerovector(lvecs[1]), zerovector(rvecs[1]), fill(one(T), n))
QU = orthogonalcomplementprojector(lvecs, n)
QV = orthogonalcomplementprojector(rvecs, n)
solver = (T <: Real) ? KrylovKit.realeigsolve : KrylovKit.eigsolve
rvals, Ws, reverse_info = let QU = QU, QV = QV, ΔU = sylvesterargx, ΔV = sylvesterargy
solver(W₀, n, :LR, alg_rrule) do w
x, y, z = w
x′ = QU(apply_normal(f, y))
y′ = QV(apply_adjoint(f, x))
@inbounds for i in 1:length(z)
x′ = VectorInterface.add!!(x′, ΔU[i], -z[i])
y′ = VectorInterface.add!!(y′, ΔV[i], -z[i])
end
return (x′, y′, vals .* z)
end
end
if info.converged >= n && reverse_info.converged < n && alg_rrule.verbosity >= 0
@warn "`svdsolve` cotangent problem did not converge, whereas the primal singular value problem did"
end
# cleanup and construct final result
tol = alg_rrule.tol
for i in 1:n
x, y, z = Ws[i]
_, ic = findmax(abs, z)
if ic != i
@warn "`svdsolve` cotangent linear problem ($ic) returns unexpected result"
end
factor = 1 / z[ic]
z[ic] = zero(z[ic])
error = max(norm(z, Inf), abs(rvals[i] - vals[ic]))
if error > 5 * tol && alg_rrule.verbosity >= 0
@warn "`svdsolve` cotangent linear problem ($ic) returns unexpected result: error = $error vs tol = $tol"
end
xs[ic] = VectorInterface.add!!(xs[ic], x, -factor)
ys[ic] = VectorInterface.add!!(ys[ic], y, -factor)
end
return xs, ys
end
function construct∂f_svd(config, f, lvecs, rvecs, xs, ys)
config isa RuleConfig{>:HasReverseMode} ||
throw(ArgumentError("`svdsolve` reverse-mode AD requires AD engine that supports calling back into AD"))
u, v = lvecs[1], rvecs[1]
x, y = xs[1], ys[1]
∂f = rrule_via_ad(config, f, v, Val(false))[2](x)[1]
∂f = ChainRulesCore.add!!(∂f, rrule_via_ad(config, f, u, Val(true))[2](y)[1])
for i in 2:length(xs)
u, v = lvecs[i], rvecs[i]
x, y = xs[i], ys[i]
∂f = ChainRulesCore.add!!(∂f, rrule_via_ad(config, f, v, Val(false))[2](x)[1])
∂f = ChainRulesCore.add!!(∂f, rrule_via_ad(config, f, u, Val(true))[2](y)[1])
end
return ∂f
end
function construct∂f_svd(config, (f, fᴴ)::Tuple{Any,Any}, lvecs, rvecs, xs, ys)
config isa RuleConfig{>:HasReverseMode} ||
throw(ArgumentError("`svdsolve` reverse-mode AD requires AD engine that supports calling back into AD"))
u, v = lvecs[1], rvecs[1]
x, y = xs[1], ys[1]
∂f = rrule_via_ad(config, f, v)[2](x)[1]
∂fᴴ = rrule_via_ad(config, fᴴ, u)[2](y)[1]
for i in 2:length(xs)
u, v = lvecs[i], rvecs[i]
x, y = xs[i], ys[i]
∂f = ChainRulesCore.add!!(∂f, rrule_via_ad(config, f, v)[2](x)[1])
∂fᴴ = ChainRulesCore.add!!(∂fᴴ, rrule_via_ad(config, fᴴ, u)[2](y)[1])
end
return (∂f, ∂fᴴ)
end
function construct∂f_svd(config, A::AbstractMatrix, lvecs, rvecs, xs, ys)
if A isa StridedMatrix
return InplaceableThunk(Ā -> _buildĀ_svd!(Ā, lvecs, rvecs, xs, ys),
@thunk(_buildĀ_svd!(zero(A), lvecs, rvecs, xs, ys)))
else
return @thunk(ProjectTo(A)(_buildĀ_svd!(zero(A), lvecs, rvecs, xs, ys)))
end
end
function _buildĀ_svd!(Ā, lvecs, rvecs, xs, ys)
for i in 1:length(xs)
u, v = lvecs[i], rvecs[i]
x, y = xs[i], ys[i]
mul!(Ā, x, v', +1, +1)
mul!(Ā, u, y', +1, +1)
end
return Ā
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 1856 | safe_inv(a, tol) = abs(a) < tol ? zero(a) : inv(a)
# vecs are assumed orthonormal
function orthogonalprojector(vecs, n)
function projector(w)
w′ = zerovector(w)
@inbounds for i in 1:n
w′ = VectorInterface.add!!(w′, vecs[i], inner(vecs[i], w))
end
return w′
end
return projector
end
function orthogonalcomplementprojector(vecs, n)
function projector(w)
w′ = scale(w, 1)
@inbounds for i in 1:n
w′ = VectorInterface.add!!(w′, vecs[i], -inner(vecs[i], w))
end
return w′
end
return projector
end
# vecs are not assumed orthonormal, G is the Cholesky factorisation of the overlap matrix
function orthogonalprojector(vecs, n, G::Cholesky)
overlaps = zeros(eltype(G), n)
function projector(w)
@inbounds for i in 1:n
overlaps[i] = inner(vecs[i], w)
end
overlaps = ldiv!(G, overlaps)
w′ = zerovector(w)
@inbounds for i in 1:n
w′ = VectorInterface.add!!(w′, vecs[i], +overlaps[i])
end
return w′
end
return projector
end
function orthogonalcomplementprojector(vecs, n, G::Cholesky)
overlaps = zeros(eltype(G), n)
function projector(w)
@inbounds for i in 1:n
overlaps[i] = inner(vecs[i], w)
end
overlaps = ldiv!(G, overlaps)
w′ = scale(w, 1)
@inbounds for i in 1:n
w′ = VectorInterface.add!!(w′, vecs[i], -overlaps[i])
end
return w′
end
return projector
end
function _realview(v::AbstractVector{Complex{T}}) where {T}
v_real = reinterpret(T, v)
return view(v_real, axes(v_real, 1)[begin:2:end])
end
function _imagview(v::AbstractVector{Complex{T}}) where {T}
v_real = reinterpret(T, v)
return view(v_real, axes(v_real, 1)[(begin + 1):2:end])
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 8521 | """
KrylovKit
A Julia package collecting a number of Krylov-based algorithms for linear problems,
singular value and eigenvalue problems and the application of functions of linear maps or
operators to vectors.
KrylovKit accepts general functions or callable objects as linear maps, and general Julia
objects with vector like behavior as vectors.
The high level interface of KrylovKit is provided by the following functions:
- [`linsolve`](@ref): solve linear systems
- [`eigsolve`](@ref): find a few eigenvalues and corresponding eigenvectors
- [`geneigsolve`](@ref): find a few generalized eigenvalues and corresponding vectors
- [`svdsolve`](@ref): find a few singular values and corresponding left and right
singular vectors
- [`exponentiate`](@ref): apply the exponential of a linear map to a vector
- [`expintegrator`](@ref): exponential integrator for a linear non-homogeneous ODE,
computes a linear combination of the `ϕⱼ` functions which generalize `ϕ₀(z) = exp(z)`.
"""
module KrylovKit
using VectorInterface
using VectorInterface: add!!
using LinearAlgebra
using Printf
using GPUArraysCore
using PackageExtensionCompat
const IndexRange = AbstractRange{Int}
export linsolve, eigsolve, geneigsolve, realeigsolve, schursolve, svdsolve
export exponentiate, expintegrator
export orthogonalize, orthogonalize!!, orthonormalize, orthonormalize!!
export basis, rayleighquotient, residual, normres, rayleighextension
export initialize, initialize!, expand!, shrink!
export ClassicalGramSchmidt, ClassicalGramSchmidt2, ClassicalGramSchmidtIR
export ModifiedGramSchmidt, ModifiedGramSchmidt2, ModifiedGramSchmidtIR
export LanczosIterator, ArnoldiIterator, GKLIterator
export CG, GMRES, BiCGStab, Lanczos, Arnoldi, GKL, GolubYe
export KrylovDefaults, EigSorter
export RecursiveVec, InnerProductVec
# Multithreading
const _NTHREADS = Ref(1)
get_num_threads() = _NTHREADS[]
function set_num_threads(n::Int)
N = Base.Threads.nthreads()
if n > N
n = N
_set_num_threads_warn(n)
end
return _NTHREADS[] = n
end
@noinline function _set_num_threads_warn(n)
@warn "Maximal number of threads limited by number of Julia threads,
setting number of threads equal to Threads.nthreads() = $n"
end
enable_threads() = set_num_threads(Base.Threads.nthreads())
disable_threads() = set_num_threads(1)
function __init__()
@require_extensions
set_num_threads(Base.Threads.nthreads())
return nothing
end
struct SplitRange
start::Int
step::Int
stop::Int
innerlength::Int
outerlength1::Int
outerlength::Int
end
function splitrange(r::OrdinalRange, n::Integer)
start = first(r)
stp = step(r)
stop = last(r)
l = length(r)
innerlength = div(l, n)
outerlength1 = l - n * innerlength
outerlength = n
return SplitRange(start, stp, stop, innerlength, outerlength1, outerlength)
end
function Base.iterate(r::SplitRange, i=1)
step = r.step
if i <= r.outerlength1
offset = (i - 1) * (r.innerlength + 1) * step
start = r.start + offset
stop = start + step * r.innerlength
elseif i <= r.outerlength
offset = (r.outerlength1 + (i - 1) * r.innerlength) * step
start = r.start + offset
stop = start + step * (r.innerlength - 1)
else
return nothing
end
return StepRange(start, step, stop), i + 1
end
Base.length(r::SplitRange) = r.outerlength
# Algorithm types
include("algorithms.jl")
# Structures to store a list of basis vectors
"""
abstract type Basis{T} end
An abstract type to collect specific types for representing a basis of vectors of type `T`.
Implementations of `Basis{T}` behave in many ways like `Vector{T}` and should have a
`length`, can be indexed (`getindex` and `setindex!`), iterated over (`iterate`), and
support resizing (`resize!`, `pop!`, `push!`, `empty!`, `sizehint!`).
The type `T` denotes the type of the elements stored in an `Basis{T}` and can be any custom
type that has vector like behavior (as defined in the docs of KrylovKit).
See [`OrthonormalBasis`](@ref) for a specific implementation.
"""
abstract type Basis{T} end
include("orthonormal.jl")
# Dense linear algebra structures and functions used in the algorithms below
include("dense/givens.jl")
include("dense/linalg.jl")
include("dense/packedhessenberg.jl")
include("dense/reflector.jl")
# Simple coordinate basis vector, i.e. a vector of all zeros and a single one on position `k`:
"""
SimpleBasisVector(m, k)
Construct a simple struct `SimpleBasisVector <: AbstractVector{Bool}` representing a
coordinate basis vector of length `m` in the direction of `k`, i.e. for
`e_k = SimpleBasisVector(m, k)` we have `length(e_k) = m` and `e_k[i] = (i == k)`.
"""
struct SimpleBasisVector <: AbstractVector{Bool}
m::Int
k::Int
end
Base.axes(e::SimpleBasisVector) = (Base.OneTo(e.m),)
Base.size(e::SimpleBasisVector) = (e.m,)
@inline function Base.getindex(e::SimpleBasisVector, i)
@boundscheck Base.checkbounds(e, i)
return e.k == i
end
# some often used tools
function checkposdef(z)
r = checkhermitian(z)
r > 0 || error("operator does not appear to be positive definite: diagonal element $z")
return r
end
function checkhermitian(z, n=abs(z))
imag(z) <= sqrt(max(eps(n), eps(one(n)))) ||
error("operator does not appear to be hermitian: diagonal element $z")
return real(z)
end
# apply operators
include("apply.jl")
# Krylov and related factorizations and their iterators
include("factorizations/krylov.jl")
include("factorizations/lanczos.jl")
include("factorizations/arnoldi.jl")
include("factorizations/gkl.jl")
# A general structure to pass on convergence information
"""
struct ConvergenceInfo{S,T}
converged::Int
residual::T
normres::S
numiter::Int
numops::Int
end
Used to return information about the solution found by the iterative method.
- `converged`: the number of solutions that have converged according to an appropriate
error measure and requested tolerance for the problem. Its value can be zero or one for
[`linsolve`](@ref), [`exponentiate`](@ref) and [`expintegrator`](@ref), or any integer
`>= 0` for [`eigsolve`](@ref), [`schursolve`](@ref) or [`svdsolve`](@ref).
- `residual:` the (list of) residual(s) for the problem, or `nothing` for problems without
the concept of a residual (i.e. `exponentiate`, `expintegrator`). This is a single
vector (of the same type as the type of vectors used in the problem) for `linsolve`, or
a `Vector` of such vectors for `eigsolve`, `schursolve` or `svdsolve`.
- `normres`: the norm of the residual(s) (in the previous field) or the value of any other
error measure that is appropriate for the problem. This is a `Real` for `linsolve` and
`exponentiate`, and a `Vector{<:Real}` for `eigsolve`, `schursolve` and `svdsolve`. The
number of values in `normres` that are smaller than a predefined tolerance corresponds
to the number `converged` of solutions that have converged.
- `numiter`: the number of iterations (sometimes called restarts) used by the algorithm.
- `numops`: the number of times the linear map or operator was applied
"""
struct ConvergenceInfo{S,T}
converged::Int # how many vectors have converged, 0 or 1 for linear systems, exponentiate, any integer for eigenvalue problems
residual::T
normres::S
numiter::Int
numops::Int
end
function Base.show(io::IO, info::ConvergenceInfo)
print(io, "ConvergenceInfo: ")
info.converged == 0 && print(io, "no converged values ")
info.converged == 1 && print(io, "one converged value ")
info.converged > 1 && print(io, "$(info.converged) converged values ")
println(io,
"after ",
info.numiter,
" iterations and ",
info.numops,
" applications of the linear map;")
return println(io, "norms of residuals are given by $((info.normres...,)).")
end
# eigsolve en schursolve
include("eigsolve/eigsolve.jl")
include("eigsolve/lanczos.jl")
include("eigsolve/arnoldi.jl")
include("eigsolve/geneigsolve.jl")
include("eigsolve/golubye.jl")
include("eigsolve/svdsolve.jl")
# linsolve
include("linsolve/linsolve.jl")
include("linsolve/cg.jl")
include("linsolve/gmres.jl")
include("linsolve/bicgstab.jl")
# exponentiate
include("matrixfun/exponentiate.jl")
include("matrixfun/expintegrator.jl")
# custom vector types
include("recursivevec.jl")
include("innerproductvec.jl")
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 15883 | # In the various algorithms we store the tolerance as a generic Real in order to
# construct these objects using keyword arguments in a type-stable manner. At
# the beginning of the corresponding algorithm, the actual tolerance will be
# converted to the concrete numeric type appropriate for the problem at hand
# Orthogonalization and orthonormalization
"""
abstract type Orthogonalizer
Supertype of a hierarchy for representing different orthogonalization strategies or
algorithms.
See also: [`ClassicalGramSchmidt`](@ref), [`ModifiedGramSchmidt`](@ref),
[`ClassicalGramSchmidt2`](@ref), [`ModifiedGramSchmidt2`](@ref),
[`ClassicalGramSchmidtIR`](@ref), [`ModifiedGramSchmidtIR`](@ref).
"""
abstract type Orthogonalizer end
abstract type Reorthogonalizer <: Orthogonalizer end
# Simple
"""
ClassicalGramSchmidt()
Represents the classical Gram Schmidt algorithm for orthogonalizing different vectors,
typically not an optimal choice.
"""
struct ClassicalGramSchmidt <: Orthogonalizer end
"""
ModifiedGramSchmidt()
Represents the modified Gram Schmidt algorithm for orthogonalizing different vectors,
typically a reasonable choice for linear systems but not for eigenvalue solvers with a
large Krylov dimension.
"""
struct ModifiedGramSchmidt <: Orthogonalizer end
# A single reorthogonalization always
"""
ClassicalGramSchmidt2()
Represents the classical Gram Schmidt algorithm with a second reorthogonalization step
always taking place.
"""
struct ClassicalGramSchmidt2 <: Reorthogonalizer end
"""
ModifiedGramSchmidt2()
Represents the modified Gram Schmidt algorithm with a second reorthogonalization step
always taking place.
"""
struct ModifiedGramSchmidt2 <: Reorthogonalizer end
# Iterative reorthogonalization
"""
ClassicalGramSchmidtIR(η::Real = 1/sqrt(2))
Represents the classical Gram Schmidt algorithm with iterative (i.e. zero or more)
reorthogonalization until the norm of the vector after an orthogonalization step has not
decreased by a factor smaller than `η` with respect to the norm before the step. The
default value corresponds to the Daniel-Gragg-Kaufman-Stewart condition.
"""
struct ClassicalGramSchmidtIR{S<:Real} <: Reorthogonalizer
η::S
end
ClassicalGramSchmidtIR() = ClassicalGramSchmidtIR(1 / sqrt(2)) # Daniel-Gragg-Kaufman-Stewart
"""
ModifiedGramSchmidtIR(η::Real = 1/sqrt(2))
Represents the modified Gram Schmidt algorithm with iterative (i.e. zero or more)
reorthogonalization until the norm of the vector after an orthogonalization step has not
decreased by a factor smaller than `η` with respect to the norm before the step. The
default value corresponds to the Daniel-Gragg-Kaufman-Stewart condition.
"""
struct ModifiedGramSchmidtIR{S<:Real} <: Reorthogonalizer
η::S
end
ModifiedGramSchmidtIR() = ModifiedGramSchmidtIR(1 / sqrt(2)) # Daniel-Gragg-Kaufman-Stewart
# Solving eigenvalue problems
abstract type KrylovAlgorithm end
# General purpose; good for linear systems, eigensystems and matrix functions
"""
Lanczos(; krylovdim = KrylovDefaults.krylovdim, maxiter = KrylovDefaults.maxiter,
tol = KrylovDefaults.tol, orth = KrylovDefaults.orth, eager = false, verbosity = 0)
Represents the Lanczos algorithm for building the Krylov subspace; assumes the linear
operator is real symmetric or complex Hermitian. Can be used in `eigsolve` and
`exponentiate`. The corresponding algorithms will build a Krylov subspace of size at most
`krylovdim`, which will be repeated at most `maxiter` times and will stop when the norm of
the residual of the Lanczos factorization is smaller than `tol`. The orthogonalizer `orth`
will be used to orthogonalize the different Krylov vectors. Eager mode, as selected by
`eager = true`, means that the algorithm that uses this Lanczos process (e.g. `eigsolve`)
can try to finish its computation before the total Krylov subspace of dimension `krylovdim`
is constructed. Default verbosity level `verbosity` is zero, meaning that no output will be
printed.
Use `Arnoldi` for non-symmetric or non-Hermitian linear operators.
See also: `factorize`, `eigsolve`, `exponentiate`, `Arnoldi`, `Orthogonalizer`
"""
struct Lanczos{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm
orth::O
krylovdim::Int
maxiter::Int
tol::S
eager::Bool
verbosity::Int
end
function Lanczos(;
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
orth::Orthogonalizer=KrylovDefaults.orth,
eager::Bool=false,
verbosity::Int=0)
return Lanczos(orth, krylovdim, maxiter, tol, eager, verbosity)
end
"""
GKL(; krylovdim = KrylovDefaults.krylovdim, maxiter = KrylovDefaults.maxiter,
tol = KrylovDefaults.tol, orth = KrylovDefaults.orth, verbosity = 0)
Represents the Golub-Kahan-Lanczos bidiagonalization algorithm for sequentially building a
Krylov-like factorization of a general matrix or linear operator with a bidiagonal reduced
matrix. Can be used in `svdsolve`. The corresponding algorithm builds a Krylov subspace of
size at most `krylovdim`, which will be repeated at most `maxiter` times and will stop when
the norm of the residual of the Arnoldi factorization is smaller than `tol`. The
orthogonalizer `orth` will be used to orthogonalize the different Krylov vectors. Default
verbosity level `verbosity` is zero, meaning that no output will be printed.
See also: [`svdsolve`](@ref), [`Orthogonalizer`](@ref)
"""
struct GKL{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm
orth::O
krylovdim::Int
maxiter::Int
tol::S
eager::Bool
verbosity::Int
end
function GKL(;
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
orth::Orthogonalizer=KrylovDefaults.orth,
eager::Bool=false,
verbosity::Int=0)
return GKL(orth, krylovdim, maxiter, tol, eager, verbosity)
end
"""
Arnoldi(; krylovdim = KrylovDefaults.krylovdim, maxiter = KrylovDefaults.maxiter,
tol = KrylovDefaults.tol, orth = KrylovDefaults.orth, eager = false, verbosity = 0)
Represents the Arnoldi algorithm for building the Krylov subspace for a general matrix or
linear operator. Can be used in `eigsolve` and `exponentiate`. The corresponding algorithms
will build a Krylov subspace of size at most `krylovdim`, which will be repeated at most
`maxiter` times and will stop when the norm of the residual of the Arnoldi factorization is
smaller than `tol`. The orthogonalizer `orth` will be used to orthogonalize the different
Krylov vectors. Eager mode, as selected by `eager = true`, means that the algorithm that
uses this Arnoldi process (e.g. `eigsolve`) can try to finish its computation before the
total Krylov subspace of dimension `krylovdim` is constructed. Default verbosity level
`verbosity` is zero, meaning that no output will be printed.
Use `Lanczos` for real symmetric or complex Hermitian linear operators.
See also: [`eigsolve`](@ref), [`exponentiate`](@ref), [`Lanczos`](@ref),
[`Orthogonalizer`](@ref)
"""
struct Arnoldi{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm
orth::O
krylovdim::Int
maxiter::Int
tol::S
eager::Bool
verbosity::Int
end
function Arnoldi(;
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
orth::Orthogonalizer=KrylovDefaults.orth,
eager::Bool=false,
verbosity::Int=0)
return Arnoldi(orth, krylovdim, maxiter, tol, eager, verbosity)
end
"""
GolubYe(; krylovdim = KrylovDefaults.krylovdim, maxiter = KrylovDefaults.maxiter,
tol = KrylovDefaults.tol, orth = KrylovDefaults.orth, verbosity = 0)
Represents the Golub-Ye algorithm for solving hermitian (symmetric) generalized eigenvalue
problems `A x = λ B x` with positive definite `B`, without the need for inverting `B`.
Builds a Krylov subspace of size `krylovdim` starting from an estimate `x` by acting with
`(A - ρ(x) B)`, where `ρ(x) = dot(x, A*x)/dot(x, B*x)`, and employing the Lanczos
algorithm. This process is repeated at most `maxiter` times. In every iteration `k>1`, the
subspace will also be expanded to size `krylovdim+1` by adding ``x_k - x_{k-1}``, which is
known as the LOPCG correction and was suggested by Money and Ye. With `krylovdim = 2`, this
algorithm becomes equivalent to `LOPCG`.
"""
struct GolubYe{O<:Orthogonalizer,S<:Real} <: KrylovAlgorithm
orth::O
krylovdim::Int
maxiter::Int
tol::S
verbosity::Int
end
function GolubYe(;
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
orth::Orthogonalizer=KrylovDefaults.orth,
verbosity::Int=0)
return GolubYe(orth, krylovdim, maxiter, tol, verbosity)
end
# Solving linear systems specifically
abstract type LinearSolver <: KrylovAlgorithm end
"""
CG(; maxiter = KrylovDefaults.maxiter, tol = KrylovDefaults.tol)
Construct an instance of the conjugate gradient algorithm with specified parameters, which
can be passed to `linsolve` in order to iteratively solve a linear system with a positive
definite (and thus symmetric or hermitian) coefficient matrix or operator. The `CG` method
will search for the optimal `x` in a Krylov subspace of maximal size `maxiter`, or stop when
`norm(A*x - b) < tol`. Default verbosity level `verbosity` is zero, meaning that no output
will be printed.
See also: [`linsolve`](@ref), [`MINRES`](@ref), [`GMRES`](@ref), [`BiCG`](@ref),
[`BiCGStab`](@ref)
"""
struct CG{S<:Real} <: LinearSolver
maxiter::Int
tol::S
verbosity::Int
end
function CG(;
maxiter::Integer=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
verbosity::Int=0)
return CG(maxiter, tol, verbosity)
end
"""
GMRES(; krylovdim = KrylovDefaults.krylovdim, maxiter = KrylovDefaults.maxiter,
tol = KrylovDefaults.tol, orth::Orthogonalizer = KrylovDefaults.orth)
Construct an instance of the GMRES algorithm with specified parameters, which can be passed
to `linsolve` in order to iteratively solve a linear system. The `GMRES` method will search
for the optimal `x` in a Krylov subspace of maximal size `krylovdim`, and repeat this
process for at most `maxiter` times, or stop when `norm(A*x - b) < tol`. In building the
Krylov subspace, `GMRES` will use the orthogonalizer `orth`. Default verbosity level
`verbosity` is zero, meaning that no output will be printed.
Note that in the traditional nomenclature of `GMRES`, the parameter `krylovdim` is referred
to as the restart parameter, and `maxiter` is the number of outer iterations, i.e. restart
cycles. The total iteration count, i.e. the number of expansion steps, is roughly
`krylovdim` times the number of iterations.
See also: [`linsolve`](@ref), [`BiCG`](@ref), [`BiCGStab`](@ref), [`CG`](@ref),
[`MINRES`](@ref)
"""
struct GMRES{O<:Orthogonalizer,S<:Real} <: LinearSolver
orth::O
maxiter::Int
krylovdim::Int
tol::S
verbosity::Int
end
function GMRES(;
krylovdim::Integer=KrylovDefaults.krylovdim,
maxiter::Integer=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
orth::Orthogonalizer=KrylovDefaults.orth,
verbosity::Int=0)
return GMRES(orth, maxiter, krylovdim, tol, verbosity)
end
# TODO
"""
MINRES(; maxiter = KrylovDefaults.maxiter, tol = KrylovDefaults.tol)
!!! warning "Not implemented yet"
Construct an instance of the conjugate gradient algorithm with specified parameters,
which can be passed to `linsolve` in order to iteratively solve a linear system with a
real symmetric or complex hermitian coefficient matrix or operator. The `MINRES` method
will search for the optimal `x` in a Krylov subspace of maximal size `maxiter`, or stop
when `norm(A*x - b) < tol`. In building the Krylov subspace, `MINRES` will use the
orthogonalizer `orth`. Default verbosity level `verbosity` is zero, meaning that no
output will be printed.
See also: [`linsolve`](@ref), [`CG`](@ref), [`GMRES`](@ref), [`BiCG`](@ref),
[`BiCGStab`](@ref)
"""
struct MINRES{S<:Real} <: LinearSolver
maxiter::Int
tol::S
verbosity::Int
end
function MINRES(;
maxiter::Integer=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
verbosity::Int=0)
return MINRES(maxiter, tol, verbosity)
end
"""
BiCG(; maxiter = KrylovDefaults.maxiter, tol = KrylovDefaults.tol)
!!! warning "Not implemented yet"
Construct an instance of the Biconjugate gradient algorithm with specified parameters,
which can be passed to `linsolve` in order to iteratively solve a linear system general
linear map, of which the adjoint can also be applied. The `BiCG` method will search for
the optimal `x` in a Krylov subspace of maximal size `maxiter`, or stop when `norm(A*x -
b) < tol`. Default verbosity level `verbosity` is zero, meaning that no output will be
printed.
See also: [`linsolve`](@ref), [`GMRES`](@ref), [`CG`](@ref), [`BiCGStab`](@ref),
[`MINRES`](@ref)
"""
struct BiCG{S<:Real} <: LinearSolver
maxiter::Int
tol::S
verbosity::Int
end
function BiCG(;
maxiter::Integer=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
verbosity::Int=0)
return BiCG(maxiter, tol, verbosity)
end
"""
BiCGStab(; maxiter = KrylovDefaults.maxiter, tol = KrylovDefaults.tol)
Construct an instance of the Biconjugate gradient algorithm with specified parameters,
which can be passed to `linsolve` in order to iteratively solve a linear system general
linear map. The `BiCGStab` method will search for the optimal `x` in a Krylov subspace
of maximal size `maxiter`, or stop when `norm(A*x - b) < tol`. Default verbosity level
`verbosity` is zero, meaning that no output will be printed.
See also: [`linsolve`](@ref), [`GMRES`](@ref), [`CG`](@ref), [`BiCG`](@ref),
[`MINRES`](@ref)
"""
struct BiCGStab{S<:Real} <: LinearSolver
maxiter::Int
tol::S
verbosity::Int
end
function BiCGStab(;
maxiter::Integer=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
verbosity::Int=0)
return BiCGStab(maxiter, tol, verbosity)
end
# Solving eigenvalue systems specifically
abstract type EigenSolver <: KrylovAlgorithm end
struct JacobiDavidson <: EigenSolver end
# Default values
"""
module KrylovDefaults
const orth = KrylovKit.ModifiedGramSchmidtIR()
const krylovdim = 30
const maxiter = 100
const tol = 1e-12
end
A module listing the default values for the typical parameters in Krylov based algorithms:
- `orth = ModifiedGramSchmidtIR()`: the orthogonalization routine used to orthogonalize
the Krylov basis in the `Lanczos` or `Arnoldi` iteration
- `krylovdim = 30`: the maximal dimension of the Krylov subspace that will be constructed
- `maxiter = 100`: the maximal number of outer iterations, i.e. the maximum number of
times the Krylov subspace may be rebuilt
- `tol = 1e-12`: the tolerance to which the problem must be solved, based on a suitable
error measure, e.g. the norm of some residual.
!!! warning
The default value of `tol` is a `Float64` value, if you solve problems in `Float32` or
`ComplexF32` arithmetic, you should always specify a new `tol` as the default value
will not be attainable.
"""
module KrylovDefaults
using ..KrylovKit
const orth = KrylovKit.ModifiedGramSchmidt2() # conservative choice
const krylovdim = 30
const maxiter = 100
const tol = 1e-12
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 683 | apply(A::AbstractMatrix, x::AbstractVector) = A * x
apply(f, x) = f(x)
function apply(operator, x, α₀, α₁)
y = apply(operator, x)
if α₀ != zero(α₀) || α₁ != one(α₁)
y = add!!(y, x, α₀, α₁)
end
return y
end
# GKL, SVD, LSMR
apply_normal(A::AbstractMatrix, x::AbstractVector) = A * x
apply_adjoint(A::AbstractMatrix, x::AbstractVector) = A' * x
apply_normal((f, fadjoint)::Tuple{Any,Any}, x) = f(x)
apply_adjoint((f, fadjoint)::Tuple{Any,Any}, x) = fadjoint(x)
apply_normal(f, x) = f(x, Val(false))
apply_adjoint(f, x) = f(x, Val(true))
# generalized eigenvalue problem
genapply((A, B)::Tuple{Any,Any}, x) = (apply(A, x), apply(B, x))
genapply(f, x) = f(x)
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 4028 | """
v = InnerProductVec(vec, dotf)
Create a new vector `v` from an existing vector `dotf` with a modified inner product given
by `inner`. The vector `vec`, which can be any type (not necessarily `Vector`) that supports
the basic vector interface required by KrylovKit, is wrapped in a custom struct
`v::InnerProductVec`. All vector space functionality such as addition and multiplication
with scalars (both out of place and in place using `mul!`, `rmul!`, `axpy!` and `axpby!`)
applied to `v` is simply forwarded to `v.vec`. The inner product between vectors
`v1 = InnerProductVec(vec1, dotf)` and `v2 = InnerProductVec(vec2, dotf)` is computed as
`dot(v1, v2) = dotf(v1.vec, v2.vec) = dotf(vec1, vec2)`. The inner product between vectors
with different `dotf` functions is not defined. Similarly, The norm of `v::InnerProductVec`
is defined as `v = sqrt(real(dot(v, v))) = sqrt(real(dotf(vec, vec)))`.
In a (linear) map applied to `v`, the original vector can be obtained as `v.vec` or simply
as `v[]`.
"""
struct InnerProductVec{F,T}
vec::T
dotf::F
end
Base.:-(v::InnerProductVec) = InnerProductVec(-v.vec, v.dotf)
function Base.:+(v::InnerProductVec{F}, w::InnerProductVec{F}) where {F}
return InnerProductVec(v.vec + w.vec, v.dotf)
end
function Base.:-(v::InnerProductVec{F}, w::InnerProductVec{F}) where {F}
return InnerProductVec(v.vec - w.vec, v.dotf)
end
Base.:*(v::InnerProductVec, a::Number) = InnerProductVec(v.vec * a, v.dotf)
Base.:*(a::Number, v::InnerProductVec) = InnerProductVec(a * v.vec, v.dotf)
Base.:/(v::InnerProductVec, a::Number) = InnerProductVec(v.vec / a, v.dotf)
Base.:\(a::Number, v::InnerProductVec) = InnerProductVec(a \ v.vec, v.dotf)
function Base.similar(v::InnerProductVec, ::Type{T}=scalartype(v)) where {T}
return InnerProductVec(similar(v.vec), v.dotf)
end
Base.getindex(v::InnerProductVec) = v.vec
function Base.copy!(w::InnerProductVec{F}, v::InnerProductVec{F}) where {F}
copy!(w.vec, v.vec)
return w
end
function LinearAlgebra.mul!(w::InnerProductVec{F},
a::Number,
v::InnerProductVec{F}) where {F}
mul!(w.vec, a, v.vec)
return w
end
function LinearAlgebra.mul!(w::InnerProductVec{F},
v::InnerProductVec{F},
a::Number) where {F}
mul!(w.vec, v.vec, a)
return w
end
function LinearAlgebra.rmul!(v::InnerProductVec, a::Number)
rmul!(v.vec, a)
return v
end
function LinearAlgebra.axpy!(a::Number,
v::InnerProductVec{F},
w::InnerProductVec{F}) where {F}
axpy!(a, v.vec, w.vec)
return w
end
function LinearAlgebra.axpby!(a::Number,
v::InnerProductVec{F},
b,
w::InnerProductVec{F}) where {F}
axpby!(a, v.vec, b, w.vec)
return w
end
function LinearAlgebra.dot(v::InnerProductVec{F}, w::InnerProductVec{F}) where {F}
return v.dotf(v.vec, w.vec)
end
VectorInterface.scalartype(::Type{<:InnerProductVec{F,T}}) where {F,T} = scalartype(T)
function VectorInterface.zerovector(v::InnerProductVec, T::Type{<:Number})
return InnerProductVec(zerovector(v.vec, T), v.dotf)
end
function VectorInterface.scale(v::InnerProductVec, a::Number)
return InnerProductVec(scale(v.vec, a), v.dotf)
end
function VectorInterface.scale!(v::InnerProductVec, a::Number)
scale!(v.vec, a)
return v
end
function VectorInterface.scale!(w::InnerProductVec{F}, v::InnerProductVec{F},
a::Number) where {F}
scale!(w.vec, v.vec, a)
return w
end
function VectorInterface.add!(v::InnerProductVec{F}, w::InnerProductVec{F}, a::Number,
b::Number) where {F}
add!(v.vec, w.vec, a, b)
return v
end
function VectorInterface.inner(v::InnerProductVec{F}, w::InnerProductVec{F}) where {F}
return v.dotf(v.vec, w.vec)
end
VectorInterface.norm(v::InnerProductVec) = sqrt(real(inner(v, v)))
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 21159 | # Definition of an orthonormal basis
"""
OrthonormalBasis{T} <: Basis{T}
A list of vector like objects of type `T` that are mutually orthogonal and normalized to
one, representing an orthonormal basis for some subspace (typically a Krylov subspace). See
also [`Basis`](@ref)
Orthonormality of the vectors contained in an instance `b` of `OrthonormalBasis`
(i.e. `all(inner(b[i],b[j]) == I[i,j] for i=1:length(b), j=1:length(b))`) is not checked when
elements are added; it is up to the algorithm that constructs `b` to guarantee
orthonormality.
One can easily orthogonalize or orthonormalize a given vector `v` with respect to a
`b::OrthonormalBasis` using the functions
[`w, = orthogonalize(v,b,...)`](@ref orthogonalize) or
[`w, = orthonormalize(v,b,...)`](@ref orthonormalize). The resulting vector `w` of the
latter can then be added to `b` using `push!(b, w)`. Note that in place versions
[`orthogonalize!(v, b, ...)`](@ref orthogonalize) or
[`orthonormalize!(v, b, ...)`](@ref orthonormalize) are also available.
Finally, a linear combination of the vectors in `b::OrthonormalBasis` can be obtained by
multiplying `b` with a `Vector{<:Number}` using `*` or `mul!` (if the output vector is
already allocated).
"""
struct OrthonormalBasis{T} <: Basis{T}
basis::Vector{T}
end
OrthonormalBasis{T}() where {T} = OrthonormalBasis{T}(Vector{T}(undef, 0))
# Iterator methods for OrthonormalBasis
Base.IteratorSize(::Type{<:OrthonormalBasis}) = Base.HasLength()
Base.IteratorEltype(::Type{<:OrthonormalBasis}) = Base.HasEltype()
Base.length(b::OrthonormalBasis) = length(b.basis)
Base.eltype(b::OrthonormalBasis{T}) where {T} = T
Base.iterate(b::OrthonormalBasis) = Base.iterate(b.basis)
Base.iterate(b::OrthonormalBasis, state) = Base.iterate(b.basis, state)
Base.getindex(b::OrthonormalBasis, i) = getindex(b.basis, i)
Base.setindex!(b::OrthonormalBasis, i, q) = setindex!(b.basis, i, q)
Base.firstindex(b::OrthonormalBasis) = firstindex(b.basis)
Base.lastindex(b::OrthonormalBasis) = lastindex(b.basis)
Base.first(b::OrthonormalBasis) = first(b.basis)
Base.last(b::OrthonormalBasis) = last(b.basis)
Base.popfirst!(b::OrthonormalBasis) = popfirst!(b.basis)
Base.pop!(b::OrthonormalBasis) = pop!(b.basis)
Base.push!(b::OrthonormalBasis{T}, q::T) where {T} = (push!(b.basis, q); return b)
Base.empty!(b::OrthonormalBasis) = (empty!(b.basis); return b)
Base.sizehint!(b::OrthonormalBasis, k::Int) = (sizehint!(b.basis, k); return b)
Base.resize!(b::OrthonormalBasis, k::Int) = (resize!(b.basis, k); return b)
# Multiplication methods with OrthonormalBasis
function Base.:*(b::OrthonormalBasis, x::AbstractVector)
y = zerovector(first(b), promote_type(scalartype(x), scalartype(first(b))))
return mul!(y, b, x)
end
LinearAlgebra.mul!(y, b::OrthonormalBasis, x::AbstractVector) = unproject!!(y, b, x, 1, 0)
const BLOCKSIZE = 4096
"""
project!!(y::AbstractVector, b::OrthonormalBasis, x,
[α::Number = 1, β::Number = 0, r = Base.OneTo(length(b))])
For a given orthonormal basis `b`, compute the expansion coefficients `y` resulting from
projecting the vector `x` onto the subspace spanned by `b`; more specifically this computes
```
y[j] = β*y[j] + α * inner(b[r[j]], x)
```
for all ``j ∈ r``.
"""
function project!!(y::AbstractVector,
b::OrthonormalBasis,
x,
α::Number=true,
β::Number=false,
r=Base.OneTo(length(b)))
# no specialized routine for IndexLinear x because reduction dimension is large dimension
length(y) == length(r) || throw(DimensionMismatch())
if get_num_threads() > 1
@sync for J in splitrange(1:length(r), get_num_threads())
Threads.@spawn for j in $J
@inbounds begin
if β == 0
y[j] = α * inner(b[r[j]], x)
else
y[j] = β * y[j] + α * inner(b[r[j]], x)
end
end
end
end
else
for j in 1:length(r)
@inbounds begin
if β == 0
y[j] = α * inner(b[r[j]], x)
else
y[j] = β * y[j] + α * inner(b[r[j]], x)
end
end
end
end
return y
end
"""
unproject!!(y, b::OrthonormalBasis, x::AbstractVector,
[α::Number = 1, β::Number = 0, r = Base.OneTo(length(b))])
For a given orthonormal basis `b`, reconstruct the vector-like object `y` that is defined by
expansion coefficients with respect to the basis vectors in `b` in `x`; more specifically
this computes
```
y = β*y + α * sum(b[r[i]]*x[i] for i = 1:length(r))
```
"""
function unproject!!(y,
b::OrthonormalBasis,
x::AbstractVector,
α::Number=true,
β::Number=false,
r=Base.OneTo(length(b)))
if y isa AbstractArray && !(y isa AbstractGPUArray) && IndexStyle(y) isa IndexLinear &&
get_num_threads() > 1
return unproject_linear_multithreaded!(y, b, x, α, β, r)
end
# general case: using only vector operations, i.e. axpy! (similar to BLAS level 1)
length(x) == length(r) || throw(DimensionMismatch())
if β == 0
y = scale!!(y, false) # should be hard zero
elseif β != 1
y = scale!!(y, β)
end
@inbounds for (i, ri) in enumerate(r)
y = add!!(y, b[ri], α * x[i])
end
return y
end
function unproject_linear_multithreaded!(y::AbstractArray,
b::OrthonormalBasis{<:AbstractArray},
x::AbstractVector,
α::Number=true,
β::Number=false,
r=Base.OneTo(length(b)))
# multi-threaded implementation, similar to BLAS level 2 matrix vector multiplication
m = length(y)
n = length(r)
length(x) == n || throw(DimensionMismatch())
for rj in r
length(b[rj]) == m || throw(DimensionMismatch())
end
if n == 0
return β == 1 ? y : β == 0 ? fill!(y, 0) : rmul!(y, β)
end
let m = m, n = n, y = y, x = x, b = b, blocksize = prevpow(2, div(BLOCKSIZE, n))
@sync for II in splitrange(1:blocksize:m, get_num_threads())
Threads.@spawn for I in $II
unproject_linear_kernel!(y, b, x, I:min(I + blocksize - 1, m), α, β, r)
end
end
end
return y
end
function unproject_linear_kernel!(y::AbstractArray,
b::OrthonormalBasis{<:AbstractArray},
x::AbstractVector,
I,
α::Number,
β::Number,
r)
@inbounds begin
if β == 0
@simd for i in I
y[i] = zero(y[i])
end
elseif β != 1
@simd for i in I
y[i] *= β
end
end
for (j, rj) in enumerate(r)
xj = x[j] * α
Vj = b[rj]
@simd for i in I
y[i] += Vj[i] * xj
end
end
end
end
"""
rank1update!(b::OrthonormalBasis, y, x::AbstractVector,
[α::Number = 1, β::Number = 1, r = Base.OneTo(length(b))])
Perform a rank 1 update of a basis `b`, i.e. update the basis vectors as
```
b[r[i]] = β*b[r[i]] + α * y * conj(x[i])
```
It is the user's responsibility to make sure that the result is still an orthonormal basis.
"""
@fastmath function rank1update!(b::OrthonormalBasis,
y,
x::AbstractVector,
α::Number=true,
β::Number=true,
r=Base.OneTo(length(b)))
if y isa AbstractArray && !(y isa AbstractGPUArray) && IndexStyle(y) isa IndexLinear &&
Threads.nthreads() > 1
return rank1update_linear_multithreaded!(b, y, x, α, β, r)
end
# general case: using only vector operations, i.e. axpy! (similar to BLAS level 1)
length(x) == length(r) || throw(DimensionMismatch())
@inbounds for (i, ri) in enumerate(r)
if β == 1
b[ri] = add!!(b[ri], y, α * conj(x[i]))
elseif β == 0
b[ri] = scale!!(b[ri], y, α * conj(x[i]))
else
b[ri] = add!!(b[ri], y, α * conj(x[i]), β)
end
end
return b
end
@fastmath function rank1update_linear_multithreaded!(b::OrthonormalBasis{<:AbstractArray},
y::AbstractArray,
x::AbstractVector,
α::Number,
β::Number,
r)
# multi-threaded implementation, similar to BLAS level 2 matrix vector multiplication
m = length(y)
n = length(r)
length(x) == n || throw(DimensionMismatch())
for rj in r
length(b[rj]) == m || throw(DimensionMismatch())
end
if n == 0
return b
end
let m = m, n = n, y = y, x = x, b = b, blocksize = prevpow(2, div(BLOCKSIZE, n))
@sync for II in splitrange(1:blocksize:m, get_num_threads())
Threads.@spawn for I in $II
@inbounds begin
for (j, rj) in enumerate(r)
xj = α * conj(x[j])
Vj = b[rj]
if β == 0
@simd for i in I:min(I + blocksize - 1, m)
Vj[i] = zero(Vj[i])
end
elseif β != 1
@simd for i in I:min(I + blocksize - 1, m)
Vj[i] *= β
end
end
if I + blocksize - 1 <= m
@simd for i in Base.OneTo(blocksize)
Vj[I - 1 + i] += y[I - 1 + i] * xj
end
else
@simd for i in I:m
Vj[i] += y[i] * xj
end
end
end
end
end
end
end
return b
end
"""
basistransform!(b::OrthonormalBasis, U::AbstractMatrix)
Transform the orthonormal basis `b` by the matrix `U`. For `b` an orthonormal basis,
the matrix `U` should be real orthogonal or complex unitary; it is up to the user to ensure
this condition is satisfied. The new basis vectors are given by
```
b[j] ← b[i] * U[i,j]
```
and are stored in `b`, so the old basis vectors are thrown away. Note that, by definition,
the subspace spanned by these basis vectors is exactly the same.
"""
function basistransform!(b::OrthonormalBasis{T}, U::AbstractMatrix) where {T} # U should be unitary or isometric
if T <: AbstractArray && !(T <: AbstractGPUArray) && IndexStyle(T) isa IndexLinear &&
get_num_threads() > 1
return basistransform_linear_multithreaded!(b, U)
end
m, n = size(U)
m == length(b) || throw(DimensionMismatch())
let b2 = [zerovector(b[1]) for j in 1:n]
if get_num_threads() > 1
@sync for J in splitrange(1:n, get_num_threads())
Threads.@spawn for j in $J
b2[j] = scale!!(b2[j], b[1], U[1, j])
for i in 2:m
b2[j] = add!!(b2[j], b[i], U[i, j])
end
end
end
else
for j in 1:n
b2[j] = scale!!(b2[j], b[1], U[1, j])
for i in 2:m
b2[j] = add!!(b2[j], b[i], U[i, j])
end
end
end
for j in 1:n
b[j] = b2[j]
end
end
return b
end
function basistransform_linear_multithreaded!(b::OrthonormalBasis{<:AbstractArray},
U::AbstractMatrix) # U should be unitary or isometric
m, n = size(U)
m == length(b) || throw(DimensionMismatch())
K = length(b[1])
blocksize = prevpow(2, div(BLOCKSIZE, m))
let b2 = [similar(b[1]) for j in 1:n], K = K, m = m, n = n
@sync for II in splitrange(1:blocksize:K, get_num_threads())
Threads.@spawn for I in $II
@inbounds for j in 1:n
b2j = b2[j]
@simd for i in I:min(I + blocksize - 1, K)
b2j[i] = zero(b2j[i])
end
for k in 1:m
bk = b[k]
Ukj = U[k, j]
@simd for i in I:min(I + blocksize - 1, K)
b2j[i] += bk[i] * Ukj
end
end
end
end
end
for j in 1:n
b[j] = b2[j]
end
end
return b
end
# function basistransform2!(b::OrthonormalBasis, U::AbstractMatrix) # U should be unitary or isometric
# m, n = size(U)
# m == length(b) || throw(DimensionMismatch())
#
# # apply basis transform via householder reflections
# for j = 1:size(U,2)
# h, ν = householder(U, j:m, j)
# lmul!(h, view(U, :, j+1:n))
# rmul!(b, h')
# end
# return b
# end
# Orthogonalization of a vector against a given OrthonormalBasis
orthogonalize(v, args...) = orthogonalize!(true * v, args...)
function orthogonalize!!(v::T, b::OrthonormalBasis{T}, alg::Orthogonalizer) where {T}
S = promote_type(eltype(v), eltype(T))
c = Vector{S}(undef, length(b))
return orthogonalize!!(v, b, c, alg)
end
function orthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
::ClassicalGramSchmidt) where {T}
x = project!!(x, b, v)
v = unproject!!(v, b, x, -1, 1)
return (v, x)
end
function reorthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
::ClassicalGramSchmidt) where {T}
s = similar(x) ## EXTRA ALLOCATION
s = project!!(s, b, v)
v = unproject!!(v, b, s, -1, 1)
x .+= s
return (v, x)
end
function orthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
::ClassicalGramSchmidt2) where {T}
(v, x) = orthogonalize!!(v, b, x, ClassicalGramSchmidt())
return reorthogonalize!!(v, b, x, ClassicalGramSchmidt())
end
function orthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
alg::ClassicalGramSchmidtIR) where {T}
nold = norm(v)
(v, x) = orthogonalize!!(v, b, x, ClassicalGramSchmidt())
nnew = norm(v)
while eps(one(nnew)) < nnew < alg.η * nold
nold = nnew
(v, x) = reorthogonalize!!(v, b, x, ClassicalGramSchmidt())
nnew = norm(v)
end
return (v, x)
end
function orthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
::ModifiedGramSchmidt) where {T}
for (i, q) in enumerate(b)
s = inner(q, v)
v = add!!(v, q, -s)
x[i] = s
end
return (v, x)
end
function reorthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
::ModifiedGramSchmidt) where {T}
for (i, q) in enumerate(b)
s = inner(q, v)
v = add!!(v, q, -s)
x[i] += s
end
return (v, x)
end
function orthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
::ModifiedGramSchmidt2) where {T}
(v, x) = orthogonalize!!(v, b, x, ModifiedGramSchmidt())
return reorthogonalize!!(v, b, x, ModifiedGramSchmidt())
end
function orthogonalize!!(v::T,
b::OrthonormalBasis{T},
x::AbstractVector,
alg::ModifiedGramSchmidtIR) where {T}
nold = norm(v)
(v, x) = orthogonalize!!(v, b, x, ModifiedGramSchmidt())
nnew = norm(v)
while eps(one(nnew)) < nnew < alg.η * nold
nold = nnew
(v, x) = reorthogonalize!!(v, b, x, ModifiedGramSchmidt())
nnew = norm(v)
end
return (v, x)
end
# Orthogonalization of a vector against a given normalized vector
orthogonalize!!(v::T, q::T, alg::Orthogonalizer) where {T} = _orthogonalize!!(v, q, alg)
# avoid method ambiguity on Julia 1.0 according to Aqua.jl
function _orthogonalize!!(v::T,
q::T,
alg::Union{ClassicalGramSchmidt,ModifiedGramSchmidt}) where {T}
s = inner(q, v)
v = add!!(v, q, -s)
return (v, s)
end
function _orthogonalize!!(v::T,
q::T,
alg::Union{ClassicalGramSchmidt2,ModifiedGramSchmidt2}) where {T}
s = inner(q, v)
v = add!!(v, q, -s)
ds = inner(q, v)
v = add!!(v, q, -ds)
return (v, s + ds)
end
function _orthogonalize!!(v::T,
q::T,
alg::Union{ClassicalGramSchmidtIR,ModifiedGramSchmidtIR}) where {T}
nold = norm(v)
s = inner(q, v)
v = add!!(v, q, -s)
nnew = norm(v)
while eps(one(nnew)) < nnew < alg.η * nold
nold = nnew
ds = inner(q, v)
v = add!!(v, q, -ds)
s += ds
nnew = norm(v)
end
return (v, s)
end
"""
orthogonalize(v, b::OrthonormalBasis, [x::AbstractVector,] alg::Orthogonalizer]) -> w, x
orthogonalize!!(v, b::OrthonormalBasis, [x::AbstractVector,] alg::Orthogonalizer]) -> w, x
orthogonalize(v, q, algorithm::Orthogonalizer]) -> w, s
orthogonalize!!(v, q, algorithm::Orthogonalizer]) -> w, s
Orthogonalize vector `v` against all the vectors in the orthonormal basis `b` using the
orthogonalization algorithm `alg` of type [`Orthogonalizer`](@ref), and return the resulting
vector `w` and the overlap coefficients `x` of `v` with the basis vectors in `b`.
In case of `orthogonalize!`, the vector `v` is mutated in place. In both functions, storage
for the overlap coefficients `x` can be provided as optional argument `x::AbstractVector`
with `length(x) >= length(b)`.
One can also orthogonalize `v` against a given vector `q` (assumed to be normalized), in
which case the orthogonal vector `w` and the inner product `s` between `v` and `q` are
returned.
Note that `w` is not normalized, see also [`orthonormalize`](@ref).
For more information on possible orthogonalization algorithms, see [`Orthogonalizer`](@ref)
and its concrete subtypes [`ClassicalGramSchmidt`](@ref), [`ModifiedGramSchmidt`](@ref),
[`ClassicalGramSchmidt2`](@ref), [`ModifiedGramSchmidt2`](@ref),
[`ClassicalGramSchmidtIR`](@ref) and [`ModifiedGramSchmidtIR`](@ref).
"""
orthogonalize, orthogonalize!!
# Orthonormalization: orthogonalization and normalization
orthonormalize(v, args...) = orthonormalize!!(scale(v, VectorInterface.One()), args...)
function orthonormalize!!(v, args...)
out = orthogonalize!!(v, args...) # out[1] === v
β = norm(v)
v = scale!!(v, inv(β))
return tuple(v, β, Base.tail(out)...)
end
"""
orthonormalize(v, b::OrthonormalBasis, [x::AbstractVector,] alg::Orthogonalizer]) -> w, β, x
orthonormalize!!(v, b::OrthonormalBasis, [x::AbstractVector,] alg::Orthogonalizer]) -> w, β, x
orthonormalize(v, q, algorithm::Orthogonalizer]) -> w, β, s
orthonormalize!!(v, q, algorithm::Orthogonalizer]) -> w, β, s
Orthonormalize vector `v` against all the vectors in the orthonormal basis `b` using the
orthogonalization algorithm `alg` of type [`Orthogonalizer`](@ref), and return the resulting
vector `w` (of norm 1), its norm `β` after orthogonalizing and the overlap coefficients `x`
of `v` with the basis vectors in `b`, such that `v = β * w + b * x`.
In case of `orthogonalize!`, the vector `v` is mutated in place. In both functions, storage
for the overlap coefficients `x` can be provided as optional argument `x::AbstractVector`
with `length(x) >= length(b)`.
One can also orthonormalize `v` against a given vector `q` (assumed to be normalized), in
which case the orthonormal vector `w`, its norm `β` before normalizing and the inner product
`s` between `v` and `q` are returned.
See [`orthogonalize`](@ref) if `w` does not need to be normalized.
For more information on possible orthogonalization algorithms, see [`Orthogonalizer`](@ref)
and its concrete subtypes [`ClassicalGramSchmidt`](@ref), [`ModifiedGramSchmidt`](@ref),
[`ClassicalGramSchmidt2`](@ref), [`ModifiedGramSchmidt2`](@ref),
[`ClassicalGramSchmidtIR`](@ref) and [`ModifiedGramSchmidtIR`](@ref).
"""
orthonormalize, orthonormalize!!
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 4304 | """
v = RecursiveVec(vecs)
Create a new vector `v` from an existing (homogeneous or heterogeneous) list of vectors
`vecs` with one or more elements, represented as a `Tuple` or `AbstractVector`. The elements
of `vecs` can be any type of vectors that are supported by KrylovKit. For a heterogeneous
list, it is best to use a tuple for reasons of type stability, while for a homogeneous list,
either a `Tuple` or a `Vector` can be used. From a mathematical perspectve, `v` represents
the direct sum of the vectors in `vecs`. Scalar multiplication and addition of vectors `v`
acts simultaneously on all elements of `v.vecs`. The inner product corresponds to the sum
of the inner products of the individual vectors in the list `v.vecs`.
The vector `v` also adheres to the iteration syntax, but where it will just produce the
individual vectors in `v.vecs`. Hence, `length(v) = length(v.vecs)`. It can also be indexed,
so that `v[i] = v.vecs[i]`, which can be useful in writing a linear map that acts on `v`.
"""
struct RecursiveVec{T<:Union{Tuple,AbstractVector}}
vecs::T
end
function RecursiveVec(arg1::AbstractVector{T}) where {T}
if isbitstype(T)
return RecursiveVec((arg1,))
else
return RecursiveVec{typeof(arg1)}(arg1)
end
end
RecursiveVec(arg1, args...) = RecursiveVec((arg1, args...))
Base.getindex(v::RecursiveVec, i) = v.vecs[i]
Base.iterate(v::RecursiveVec, args...) = iterate(v.vecs, args...)
Base.IteratorEltype(::Type{RecursiveVec{T}}) where {T} = Base.IteratorEltype(T)
Base.IteratorSize(::Type{RecursiveVec{T}}) where {T} = Base.IteratorSize(T)
Base.eltype(v::RecursiveVec) = eltype(v.vecs)
Base.size(v::RecursiveVec) = size(v.vecs)
Base.length(v::RecursiveVec) = length(v.vecs)
Base.first(v::RecursiveVec) = first(v.vecs)
Base.last(v::RecursiveVec) = last(v.vecs)
Base.:-(v::RecursiveVec) = RecursiveVec(map(-, v.vecs))
Base.:+(v::RecursiveVec, w::RecursiveVec) = RecursiveVec(map(+, v.vecs, w.vecs))
Base.:-(v::RecursiveVec, w::RecursiveVec) = RecursiveVec(map(-, v.vecs, w.vecs))
Base.:*(v::RecursiveVec, a::Number) = RecursiveVec(map(x -> x * a, v.vecs))
Base.:*(a::Number, v::RecursiveVec) = RecursiveVec(map(x -> a * x, v.vecs))
Base.:/(v::RecursiveVec, a::Number) = RecursiveVec(map(x -> x / a, v.vecs))
Base.:\(a::Number, v::RecursiveVec) = RecursiveVec(map(x -> a \ x, v.vecs))
function Base.similar(v::RecursiveVec)
return RecursiveVec(similar.(v.vecs))
end
function Base.copy!(w::RecursiveVec, v::RecursiveVec)
@assert length(w) == length(v)
@inbounds for i in 1:length(w)
copyto!(w[i], v[i])
end
return w
end
function LinearAlgebra.dot(v::RecursiveVec{T}, w::RecursiveVec{T}) where {T}
return sum(dot.(v.vecs, w.vecs))
end
VectorInterface.scalartype(::Type{RecursiveVec{T}}) where {T} = scalartype(T)
function VectorInterface.zerovector(v::RecursiveVec, T::Type{<:Number})
return RecursiveVec(zerovector(v.vecs, T))
end
function VectorInterface.scale(v::RecursiveVec, a::Number)
return RecursiveVec(scale(v.vecs, a))
end
function VectorInterface.scale!(v::RecursiveVec, a::Number)
scale!(v.vecs, a)
return v
end
function VectorInterface.scale!(w::RecursiveVec, v::RecursiveVec, a::Number)
scale!(w.vecs, v.vecs, a)
return w
end
function VectorInterface.scale!!(x::RecursiveVec, a::Number)
return RecursiveVec(scale!!(x.vecs, a))
end
function VectorInterface.scale!!(w::RecursiveVec,
v::RecursiveVec, a::Number)
return RecursiveVec(scale!!(w.vecs, v.vecs, a))
end
function VectorInterface.add(w::RecursiveVec, v::RecursiveVec, a::Number=One(),
b::Number=One())
return RecursiveVec(add(w.vecs, v.vecs, a, b))
end
function VectorInterface.add!(w::RecursiveVec, v::RecursiveVec, a::Number=One(),
b::Number=One())
add!(w.vecs, v.vecs, a, b)
return w
end
function VectorInterface.add!!(w::RecursiveVec, v::RecursiveVec,
a::Number=One(),
b::Number=One())
return RecursiveVec(add!!(w.vecs, v.vecs, a, b))
end
function VectorInterface.inner(v::RecursiveVec{T}, w::RecursiveVec{T}) where {T}
return inner(v.vecs, w.vecs)
end
VectorInterface.norm(v::RecursiveVec) = VectorInterface.norm(v.vecs)
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 1053 | # Elementary Givens rotation
using LinearAlgebra: Givens
# for reference: Julia LinearAlgebra.Givens
# immutable Givens{T} <: AbstractRotation{T}
# i1::Int
# i2::Int
# c::T
# s::T
# end
function LinearAlgebra.rmul!(b::OrthonormalBasis{T}, G::Givens) where {T}
if T isa AbstractArray && IndexStyle(T) isa IndexLinear
return _rmul_linear!(b, G)
else
return _rmul!(b, G)
end
end
@fastmath function _rmul_linear!(b::OrthonormalBasis{<:AbstractArray}, G::Givens)
q1, q2 = b[G.i1], b[G.i2]
c = G.c
s = G.s
@inbounds @simd for i in 1:length(q1)
q1[i], q2[i] = c * q1[i] - conj(s) * q2[i], s * q1[i] + c * q2[i]
end
return b
end
function _rmul!(b::OrthonormalBasis, G::Givens)
q1, q2 = b[G.i1], b[G.i2]
q1′ = add(q1, q2, -conj(G.s), G.c)
q2′ = add!!(q2, q1, G.s, G.c)
b[G.i1], b[G.i2] = q1′, q2′
return b
end
# New types for discarding or for storing successive Givens transformations
struct NoVecs end
const novecs = NoVecs()
rmulc!(::NoVecs, ::Any) = novecs
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 27967 | # Some modified wrappers for Lapack
import LinearAlgebra:
BlasFloat,
BlasInt,
LAPACKException,
DimensionMismatch,
SingularException,
PosDefException,
chkstride1,
checksquare
import LinearAlgebra.BLAS: @blasfunc, BlasReal, BlasComplex
import LinearAlgebra.LAPACK: chklapackerror
@static if VERSION >= v"1.7"
const liblapack = LinearAlgebra.BLAS.libblastrampoline
else
const liblapack = LinearAlgebra.LAPACK.liblapack
end
@static if isdefined(Base, :require_one_based_indexing)
import Base: require_one_based_indexing
else
function require_one_based_indexing(A...)
return !Base.has_offset_axes(A...) ||
throw(ArgumentError("offset arrays are not supported but got an array with index other than 1"))
end
end
struct RowIterator{A<:AbstractMatrix,R<:IndexRange}
a::A
r::R
end
rows(a::AbstractMatrix, r::IndexRange=axes(a, 1)) = RowIterator(a, r)
function Base.iterate(iter::RowIterator)
next = iterate(iter.r)
if next === nothing
return nothing
else
i, s = next
return view(iter.a, i, :), s
end
end
function Base.iterate(iter::RowIterator, s)
next = iterate(iter.r, s)
if next === nothing
return nothing
else
i, s = next
return view(iter.a, i, :), s
end
end
Base.IteratorSize(::Type{<:RowIterator}) = Base.HasLength()
Base.IteratorEltype(::Type{<:RowIterator}) = Base.HasEltype()
Base.length(iter::RowIterator) = length(iter.r)
function Base.eltype(iter::RowIterator{A}) where {T,A<:DenseArray{T}}
return SubArray{T,1,A,Tuple{Int,Base.Slice{Base.OneTo{Int}}},true}
end
struct ColumnIterator{A<:AbstractMatrix,R<:IndexRange}
a::A
r::R
end
cols(a::AbstractMatrix, r::IndexRange=axes(a, 2)) = ColumnIterator(a, r)
function Base.iterate(iter::ColumnIterator)
next = iterate(iter.r)
if next === nothing
return nothing
else
i, s = next
return view(iter.a, :, i), s
end
end
function Base.iterate(iter::ColumnIterator, s)
next = iterate(iter.r, s)
if next === nothing
return nothing
else
i, s = next
return view(iter.a, :, i), s
end
end
Base.IteratorSize(::Type{<:ColumnIterator}) = Base.HasLength()
Base.IteratorEltype(::Type{<:ColumnIterator}) = Base.HasEltype()
Base.length(iter::ColumnIterator) = length(iter.r)
function Base.eltype(iter::ColumnIterator{A}) where {T,A<:DenseArray{T}}
return SubArray{T,1,A,Tuple{Base.Slice{Base.OneTo{Int}},Int},true}
end
# # QR decomposition
# function qr!(A::StridedMatrix{<:BlasFloat})
# m, n = size(A)
# A, T = LAPACK.geqrt!(A, min(minimum(size(A)), 36))
# Q = LAPACK.gemqrt!('L', 'N', A, T, eye(eltype(A), m, min(m,n)))
# R = triu!(A[1:min(m,n), :])
# return Q, R
# end
# Triangular division: for some reason this is faster than LAPACK's trsv
function ldiv!(A::UpperTriangular, y::AbstractVector, r::UnitRange{Int}=1:length(y))
R = A.data
@inbounds for j in reverse(r)
R[j, j] == zero(R[j, j]) && throw(SingularException(j))
yj = (y[j] = R[j, j] \ y[j])
@simd for i in first(r):(j - 1)
y[i] -= R[i, j] * yj
end
end
return y
end
# Eigenvalue decomposition of SymTridiagonal matrix
function tridiageigh!(A::SymTridiagonal{T}, Z::StridedMatrix{T}=one(A)) where {T<:BlasFloat}
return stegr!(A.dv, A.ev, Z)
end # redefined
# Generalized eigenvalue decomposition of symmetric / Hermitian problem
function geneigh!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat}
return LAPACK.sygvd!(1, 'V', 'U', A, B)
end
# Singular value decomposition of a Bidiagonal matrix
function bidiagsvd!(B::Bidiagonal{T},
U::AbstractMatrix{T}=one(B),
VT::AbstractMatrix{T}=one(B)) where {T<:BlasReal}
s, Vt, U, = LAPACK.bdsqr!(B.uplo, B.dv, B.ev, VT, U, similar(U, (size(B, 1), 0)))
return U, s, Vt
end
function reversecols!(U::AbstractMatrix)
n = size(U, 2)
@inbounds for j in 1:div(n, 2)
@simd for i in 1:size(U, 1)
U[i, j], U[i, n + 1 - j] = U[i, n + 1 - j], U[i, j]
end
end
return U
end
function reverserows!(V::AbstractVecOrMat)
m = size(V, 1)
@inbounds for j in 1:size(V, 2)
@simd for i in 1:div(m, 2)
V[i, j], V[m + 1 - i, j] = V[m + 1 - i, j], V[i, j]
end
end
return V
end
# Schur factorization of a Hessenberg matrix
function hschur!(H::AbstractMatrix{T}, Z::AbstractMatrix{T}=one(H)) where {T<:BlasFloat}
return hseqr!(H, Z)
end
schur2eigvals(T::AbstractMatrix{<:BlasFloat}) = schur2eigvals(T, 1:size(T, 1))
function schur2eigvals(T::AbstractMatrix{<:BlasComplex}, which::AbstractVector{Int})
n = checksquare(T)
which2 = unique(which)
length(which2) == length(which) ||
throw(ArgumentError("which should contain unique values"))
return [T[i, i] for i in which2]
end
function schur2eigvals(T::AbstractMatrix{<:BlasReal}, which::AbstractVector{Int})
n = checksquare(T)
which2 = unique(which)
length(which2) == length(which) ||
throw(ArgumentError("which should contain unique values"))
D = zeros(Complex{eltype(T)}, length(which2))
for k in 1:length(which)
i = which[k]
if i < n && !iszero(T[i + 1, i])
halftr = (T[i, i] + T[i + 1, i + 1]) / 2
diff = (T[i, i] - T[i + 1, i + 1]) / 2
d = diff * diff + T[i, i + 1] * T[i + 1, i] # = hafltr*halftr - det
D[i] = halftr + im * sqrt(-d)
elseif i > 1 && !iszero(T[i, i - 1])
halftr = (T[i, i] + T[i - 1, i - 1]) / 2
diff = -(T[i, i] - T[i - 1, i - 1]) / 2
d = diff * diff + T[i, i - 1] * T[i - 1, i] # = hafltr*halftr - det
D[i] = halftr - im * sqrt(-d)
else
D[i] = T[i, i]
end
end
return D
end
function _normalizevecs!(V)
@inbounds for k in 1:size(V, 2)
normalize!(view(V, :, k))
end
return V
end
function schur2eigvecs(T::AbstractMatrix{<:BlasComplex})
n = checksquare(T)
VR = similar(T, n, n)
VL = similar(T, n, 0)
select = Vector{BlasInt}(undef, 0)
trevc!('R', 'A', select, T, VL, VR)
return _normalizevecs!(VR)
end
function schur2eigvecs(T::AbstractMatrix{<:BlasComplex}, which::AbstractVector{Int})
n = checksquare(T)
which2 = unique(which)
length(which2) == length(which) ||
throw(ArgumentError("which should contain unique values"))
m = BlasInt(length(which2))
VR = similar(T, n, m)
VL = similar(T, n, 0)
select = zeros(BlasInt, n)
for k in 1:length(which2)
i = which2[k]
select[i] = one(BlasInt)
trevc!('R', 'S', select, T, VL, view(VR, :, k:k))
select[i] = zero(BlasInt)
end
return _normalizevecs!(VR)
end
function schur2eigvecs(T::StridedMatrix{<:BlasReal})
n = checksquare(T)
VR = similar(T, Complex{eltype(T)}, n, n)
VR′ = similar(T, n, n)
VL′ = similar(T, n, 0)
select = Vector{BlasInt}(undef, 0)
trevc!('R', 'A', select, T, VL′, VR′)
i = 1
while i <= n
if i == n || iszero(T[i + 1, i])
@inbounds @simd for k in 1:n
VR[k, i] = VR′[k, i]
end
i += 1
else
@inbounds @simd for k in 1:n
VR[k, i] = VR′[k, i] + im * VR′[k, i + 1]
VR[k, i + 1] = VR′[k, i] - im * VR′[k, i + 1]
end
i += 2
end
end
return _normalizevecs!(VR)
end
function schur2realeigvecs(T::StridedMatrix{<:BlasReal})
n = checksquare(T)
for i in 1:(n - 1)
iszero(T[i + 1, i]) || throw(ArgumentError("T must be upper triangular"))
end
VR = similar(T, n, n)
VL = similar(T, n, 0)
select = Vector{BlasInt}(undef, 0)
trevc!('R', 'A', select, T, VL, VR)
return _normalizevecs!(VR)
end
function schur2eigvecs(T::AbstractMatrix{<:BlasReal}, which::AbstractVector{Int})
n = checksquare(T)
which2 = unique(which)
length(which2) == length(which) ||
throw(ArgumentError("which should contain unique values"))
m = length(which2)
VR = similar(T, Complex{eltype(T)}, n, m)
VR′ = similar(T, n, 2)
VL′ = similar(T, n, 0)
select = zeros(BlasInt, n)
i = 1
while i <= n
if i == n || iszero(T[i + 1, i])
j = findfirst(isequal(i), which2)
if j !== nothing
select[i] = one(BlasInt)
trevc!('R', 'S', select, T, VL′, VR′)
@inbounds @simd for k in 1:n
VR[k, j] = VR′[k, 1]
end
select[i] = zero(BlasInt)
end
i += 1
else
j1 = findfirst(isequal(i), which2)
j2 = findfirst(isequal(i + 1), which2)
if j1 !== nothing || j2 !== nothing
select[i] = one(BlasInt)
select[i + 1] = one(BlasInt)
trevc!('R', 'S', select, T, VL′, VR′)
@inbounds @simd for k in 1:n
if j1 !== nothing
VR[k, j1] = VR′[k, 1] + im * VR′[k, 2]
end
if j2 !== nothing
VR[k, j2] = VR′[k, 1] - im * VR′[k, 2]
end
end
select[i] = zero(BlasInt)
select[i + 1] = zero(BlasInt)
end
i += 2
end
end
return _normalizevecs!(VR)
end
function permuteeig!(D::AbstractVector{S},
V::AbstractMatrix{S},
perm::AbstractVector{Int}) where {S}
n = checksquare(V)
p = collect(perm) # makes copy cause will be overwritten
isperm(p) && length(p) == n ||
throw(ArgumentError("not a valid permutation of length $n"))
i = 1
@inbounds while true
if p[i] == i
i = 1
while i <= n && p[i] == i
i += 1
end
i > n && break
else
iprev = findfirst(isequal(i), p)
inext = p[i]
p[iprev] = inext
p[i] = i
D[i], D[inext] = D[inext], D[i]
@simd for j in 1:n
V[j, i], V[j, inext] = V[j, inext], V[j, i]
end
i = inext
end
end
return D, V
end
function permuteschur!(T::AbstractMatrix{<:BlasFloat}, p::AbstractVector{Int})
return permuteschur!(T, one(T), p)
end
function permuteschur!(T::AbstractMatrix{S},
Q::AbstractMatrix{S},
order::AbstractVector{Int}) where {S<:BlasComplex}
n = checksquare(T)
p = collect(order) # makes copy cause will be overwritten
@inbounds for i in 1:length(p)
ifirst::BlasInt = p[i]
ilast::BlasInt = i
T, Q = LAPACK.trexc!(ifirst, ilast, T, Q)
for k in (i + 1):length(p)
if p[k] < p[i]
p[k] += 1
end
end
end
return T, Q, schur2eigvals(T)
end
function permuteschur!(T::AbstractMatrix{S},
Q::AbstractMatrix{S},
order::AbstractVector{Int}) where {S<:BlasReal}
n = checksquare(T)
p = collect(order) # makes copy cause will be overwritten
i = 1
@inbounds while i <= length(p)
ifirst::BlasInt = p[i]
ilast::BlasInt = i
if ifirst == n || iszero(T[ifirst + 1, ifirst])
T, Q = LAPACK.trexc!(ifirst, ilast, T, Q)
@inbounds for k in (i + 1):length(p)
if p[k] < p[i]
p[k] += 1
end
end
i += 1
else
p[i + 1] == ifirst + 1 ||
error("cannot split 2x2 blocks when permuting schur decomposition")
T, Q = LAPACK.trexc!(ifirst, ilast, T, Q)
@inbounds for k in (i + 2):length(p)
if p[k] < p[i]
p[k] += 2
end
end
i += 2
end
end
return T, Q, schur2eigvals(T)
end
function partitionschur!(T::AbstractMatrix{S},
Q::AbstractMatrix{S},
select::AbstractVector{Bool}) where {S<:BlasFloat}
T, Q, vals, = trsen!('N', 'V', convert(Vector{BlasInt}, select), T, Q)
return T, Q, vals
end
# redefine LAPACK interface to tridiagonal eigenvalue problem
for (stegr, elty) in ((:dstegr_, :Float64), (:sstegr_, :Float32))
@eval begin
function stegr!(dv::AbstractVector{$elty},
ev::AbstractVector{$elty},
Z::AbstractMatrix{$elty})
require_one_based_indexing(dv, ev, Z)
chkstride1(dv, ev, Z)
n = length(dv)
if length(ev) == n - 1
eev = [ev; zero($elty)]
elseif length(ev) == n
eev = ev
else
throw(DimensionMismatch("ev has length $(length(ev)) but needs one less than dv's length, $n)"))
end
checksquare(Z) == n || throw(DimensionMismatch())
ldz = max(1, stride(Z, 2))
jobz = 'V'
range = 'A'
abstol = Vector{$elty}(undef, 1)
il = 1
iu = n
vl = zero($elty)
vu = zero($elty)
m = Ref{BlasInt}()
w = similar(dv, $elty, n)
isuppz = similar(dv, BlasInt, 2 * size(Z, 2))
work = Vector{$elty}(undef, 1)
lwork = BlasInt(-1)
iwork = Vector{BlasInt}(undef, 1)
liwork = BlasInt(-1)
info = Ref{BlasInt}()
for i in 1:2 # first call returns lwork as work[1] and liwork as iwork[1]
ccall((@blasfunc($stegr), liblapack),
Cvoid,
(Ref{UInt8},
Ref{UInt8},
Ref{BlasInt},
Ptr{$elty},
Ptr{$elty},
Ref{$elty},
Ref{$elty},
Ref{BlasInt},
Ref{BlasInt},
Ptr{$elty},
Ptr{BlasInt},
Ptr{$elty},
Ptr{$elty},
Ref{BlasInt},
Ptr{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{BlasInt},
Ref{BlasInt},
Ptr{BlasInt},
Clong,
Clong),
jobz,
range,
n,
dv,
eev,
vl,
vu,
il,
iu,
abstol,
m,
w,
Z,
ldz,
isuppz,
work,
lwork,
iwork,
liwork,
info,
1,
1)
chklapackerror(info[])
if i == 1
lwork = BlasInt(work[1])
resize!(work, lwork)
liwork = iwork[1]
resize!(iwork, liwork)
end
end
return w, Z
end
end
end
# redefine LAPACK interface to schur
for (hseqr, trevc, trsen, elty) in
((:dhseqr_, :dtrevc_, :dtrsen_, :Float64), (:shseqr_, :strevc_, :strsen_, :Float32))
@eval begin
function hseqr!(H::StridedMatrix{$elty}, Z::StridedMatrix{$elty}=one(H))
require_one_based_indexing(H, Z)
chkstride1(H, Z)
n = checksquare(H)
checksquare(Z) == n || throw(DimensionMismatch())
job = 'S'
compz = 'V'
ilo = 1
ihi = n
ldh = stride(H, 2)
ldz = stride(Z, 2)
wr = similar(H, $elty, n)
wi = similar(H, $elty, n)
work = Vector{$elty}(undef, 1)
lwork = BlasInt(-1)
info = Ref{BlasInt}()
for i in 1:2 # first call returns lwork as work[1]
ccall((@blasfunc($hseqr), liblapack),
Cvoid,
(Ref{UInt8},
Ref{UInt8},
Ref{BlasInt},
Ref{BlasInt},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ptr{$elty},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{BlasInt},
Clong,
Clong),
job,
compz,
n,
ilo,
ihi,
H,
ldh,
wr,
wi,
Z,
ldz,
work,
lwork,
info,
1,
1)
chklapackerror(info[])
if i == 1
lwork = BlasInt(real(work[1]))
resize!(work, lwork)
end
end
return H, Z, complex.(wr, wi)
end
function trevc!(side::Char,
howmny::Char,
select::StridedVector{BlasInt},
T::AbstractMatrix{$elty},
VL::AbstractMatrix{$elty},
VR::AbstractMatrix{$elty})
# Extract
if side ∉ ['L', 'R', 'B']
throw(ArgumentError("side argument must be 'L' (left eigenvectors), 'R' (right eigenvectors), or 'B' (both), got $side"))
end
n = checksquare(T)
mm = side == 'L' ? size(VL, 2) :
(side == 'R' ? size(VR, 2) : min(size(VL, 2), size(VR, 2)))
ldt, ldvl, ldvr = stride(T, 2), stride(VL, 2), stride(VR, 2)
# Check
chkstride1(T, select, VL, VR)
# Allocate
m = Ref{BlasInt}()
work = Vector{$elty}(undef, 3n)
info = Ref{BlasInt}()
ccall((@blasfunc($trevc), liblapack),
Cvoid,
(Ref{UInt8},
Ref{UInt8},
Ptr{BlasInt},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ref{BlasInt},
Ptr{BlasInt},
Ptr{$elty},
Ptr{BlasInt},
Clong,
Clong),
side,
howmny,
select,
n,
T,
ldt,
VL,
ldvl,
VR,
ldvr,
mm,
m,
work,
info,
1,
1)
chklapackerror(info[])
return VL, VR, m
end
function trsen!(job::AbstractChar, compq::AbstractChar,
select::AbstractVector{BlasInt},
T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty})
chkstride1(T, Q, select)
n = checksquare(T)
checksquare(Q) == n || throw(DimensionMismatch())
length(select) == n || throw(DimensionMismatch())
ldt = max(1, stride(T, 2))
ldq = max(1, stride(Q, 2))
wr = similar(T, $elty, n)
wi = similar(T, $elty, n)
m = sum(select)
work = Vector{$elty}(undef, 1)
lwork = BlasInt(-1)
iwork = Vector{BlasInt}(undef, 1)
liwork = BlasInt(-1)
info = Ref{BlasInt}()
select = convert(Array{BlasInt}, select)
s = Ref{$elty}(zero($elty))
sep = Ref{$elty}(zero($elty))
for i in 1:2 # first call returns lwork as work[1] and liwork as iwork[1]
ccall((@blasfunc($trsen), liblapack), Cvoid,
(Ref{UInt8}, Ref{UInt8}, Ptr{BlasInt}, Ref{BlasInt},
Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt},
Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ref{$elty},
Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt},
Ptr{BlasInt}, Clong, Clong),
job, compq, select, n,
T, ldt, Q, ldq,
wr, wi, m, s, sep,
work, lwork, iwork, liwork,
info, 1, 1)
chklapackerror(info[])
if i == 1 # only estimated optimal lwork, liwork
lwork = BlasInt(real(work[1]))
resize!(work, lwork)
liwork = BlasInt(real(iwork[1]))
resize!(iwork, liwork)
end
end
return T, Q, complex.(wr, wi), s[], sep[]
end
end
end
for (hseqr, trevc, trsen, elty, relty) in
((:zhseqr_, :ztrevc_, :ztrsen_, :ComplexF64, :Float64),
(:chseqr_, :ctrevc_, :ctrsen_, :ComplexF32, :Float32))
@eval begin
function hseqr!(H::AbstractMatrix{$elty}, Z::AbstractMatrix{$elty}=one(H))
require_one_based_indexing(H, Z)
chkstride1(H, Z)
n = checksquare(H)
checksquare(Z) == n || throw(DimensionMismatch())
job = 'S'
compz = 'V'
ilo = 1
ihi = n
ldh = stride(H, 2)
ldz = stride(Z, 2)
w = similar(H, $elty, n)
work = Vector{$elty}(undef, 1)
lwork = BlasInt(-1)
info = Ref{BlasInt}()
for i in 1:2 # first call returns lwork as work[1]
ccall((@blasfunc($hseqr), liblapack),
Cvoid,
(Ref{UInt8},
Ref{UInt8},
Ref{BlasInt},
Ref{BlasInt},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{BlasInt},
Clong,
Clong),
job,
compz,
n,
ilo,
ihi,
H,
ldh,
w,
Z,
ldz,
work,
lwork,
info,
1,
1)
chklapackerror(info[])
if i == 1
lwork = BlasInt(real(work[1]))
resize!(work, lwork)
end
end
return H, Z, w
end
function trevc!(side::Char,
howmny::Char,
select::AbstractVector{BlasInt},
T::AbstractMatrix{$elty},
VL::AbstractMatrix{$elty}=similar(T),
VR::AbstractMatrix{$elty}=similar(T))
# Check
require_one_based_indexing(select, T, VL, VR)
chkstride1(T, select, VL, VR)
# Extract
if side ∉ ['L', 'R', 'B']
throw(ArgumentError("side argument must be 'L' (left eigenvectors), 'R' (right eigenvectors), or 'B' (both), got $side"))
end
n = checksquare(T)
mm = side == 'L' ? size(VL, 2) :
(side == 'R' ? size(VR, 2) : min(size(VL, 2), size(VR, 2)))
ldt, ldvl, ldvr = stride(T, 2), stride(VL, 2), stride(VR, 2)
# Allocate
m = Ref{BlasInt}()
work = Vector{$elty}(undef, 2n)
rwork = Vector{$relty}(undef, n)
info = Ref{BlasInt}()
ccall((@blasfunc($trevc), liblapack),
Cvoid,
(Ref{UInt8},
Ref{UInt8},
Ptr{BlasInt},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ref{BlasInt},
Ptr{BlasInt},
Ptr{$elty},
Ptr{$relty},
Ptr{BlasInt},
Clong,
Clong),
side,
howmny,
select,
n,
T,
ldt,
VL,
ldvl,
VR,
ldvr,
mm,
m,
work,
rwork,
info,
1,
1)
chklapackerror(info[])
return VL, VR, m
end
function trsen!(job::Char,
compq::Char,
select::AbstractVector{BlasInt},
T::AbstractMatrix{$elty},
Q::AbstractMatrix{$elty})
chkstride1(select, T, Q)
n = checksquare(T)
ldt = max(1, stride(T, 2))
ldq = max(1, stride(Q, 2))
w = similar(T, $elty, n)
m = sum(select)
work = Vector{$elty}(undef, 1)
lwork = BlasInt(-1)
info = Ref{BlasInt}()
select = convert(Array{BlasInt}, select)
s = Ref{$relty}(zero($relty))
sep = Ref{$relty}(zero($relty))
for i in 1:2 # first call returns lwork as work[1]
ccall((@blasfunc($trsen), liblapack),
Nothing,
(Ref{UInt8},
Ref{UInt8},
Ptr{BlasInt},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ptr{$elty},
Ref{BlasInt},
Ref{$relty},
Ref{$relty},
Ptr{$elty},
Ref{BlasInt},
Ptr{BlasInt}),
job,
compq,
select,
n,
T,
ldt,
Q,
ldq,
w,
m,
s,
sep,
work,
lwork,
info)
chklapackerror(info[])
if i == 1 # only estimated optimal lwork, liwork
lwork = BlasInt(real(work[1]))
resize!(work, lwork)
end
end
return T, Q, w, s[], sep[]
end
end
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 1656 | """
struct PackedHessenberg{T,V<:AbstractVector{T}} <: AbstractMatrix{T}
data::V
n::Int
end
A custom struct to store a Hessenberg matrix in a packed format (without zeros). Hereto, the
non-zero entries are stored sequentially in vector `data` of length `n(n+1)/2`.
"""
struct PackedHessenberg{T,V<:AbstractVector{T}} <: AbstractMatrix{T}
data::V
n::Int
function PackedHessenberg{T,V}(data::V, n::Int) where {T,V<:AbstractVector{T}}
@assert length(data) >= ((n * n + 3 * n - 2) >> 1)
return new{T,V}(data, n)
end
end
function PackedHessenberg(data::AbstractVector, n::Int)
return PackedHessenberg{eltype(data),typeof(data)}(data, n)
end
Base.size(A::PackedHessenberg) = (A.n, A.n)
function Base.replace_in_print_matrix(A::PackedHessenberg,
i::Integer,
j::Integer,
s::AbstractString)
return i <= j + 1 ? s : Base.replace_with_centered_mark(s)
end
function Base.getindex(A::PackedHessenberg{T}, i::Integer, j::Integer) where {T}
@boundscheck checkbounds(A, i, j)
if i > j + 1
return zero(T)
else
return A.data[((j * j + j - 2) >> 1) + i]
end
end
function Base.setindex!(A::PackedHessenberg{T}, v, i::Integer, j::Integer) where {T}
@boundscheck checkbounds(A, i, j)
if i > j + 1 && !iszero(v)
throw(ReadOnlyMemoryError())
else
A.data[((j * j + j - 2) >> 1) + i] = v
end
return v
end
Base.IndexStyle(::Type{<:PackedHessenberg}) = Base.IndexCartesian()
# TODO: add more methods from the AbstractArray interface
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 4123 | # Elementary Householder reflection
struct Householder{T,V<:AbstractVector,R<:IndexRange}
β::T
v::V
r::R
end
Base.adjoint(H::Householder) = Householder(conj(H.β), H.v, H.r)
function householder(x::AbstractVector, r::IndexRange=axes(x, 1), k=first(r))
i = findfirst(isequal(k), r)
i isa Nothing && error("k = $k should be in the range r = $r")
β, v, ν = _householder!(x[r], i)
return Householder(β, v, r), ν
end
# Householder reflector h that zeros the elements A[r,col] (except for A[k,col]) upon lmul!(A,h)
function householder(A::AbstractMatrix, r::IndexRange, col::Int, k=first(r))
i = findfirst(isequal(k), r)
i isa Nothing && error("k = $k should be in the range r = $r")
β, v, ν = _householder!(A[r, col], i)
return Householder(β, v, r), ν
end
# Householder reflector that zeros the elements A[row,r] (except for A[row,k]) upon rmulc!(A,h)
function householder(A::AbstractMatrix, row::Int, r::IndexRange, k=first(r))
i = findfirst(isequal(k), r)
i isa Nothing && error("k = $k should be in the range r = $r")
β, v, ν = _householder!(conj!(A[row, r]), i)
return Householder(β, v, r), ν
end
# generate Householder vector based on vector v, such that applying the reflection
# to v yields a vector with single non-zero element on position i, whose value is
# positive and thus equal to norm(v)
function _householder!(v::AbstractVector{T}, i::Int) where {T}
β::T = zero(T)
@inbounds begin
σ = abs2(zero(T))
@simd for k in 1:(i - 1)
σ += abs2(v[k])
end
@simd for k in (i + 1):length(v)
σ += abs2(v[k])
end
vi = v[i]
ν = sqrt(abs2(vi) + σ)
if iszero(σ) && vi == ν
β = zero(vi)
else
if real(vi) < 0
vi = vi - ν
else
vi = ((vi - conj(vi)) * ν - σ) / (conj(vi) + ν)
end
@simd for k in 1:(i - 1)
v[k] /= vi
end
v[i] = 1
@simd for k in (i + 1):length(v)
v[k] /= vi
end
β = -conj(vi) / (ν)
end
end
return β, v, ν
end
function LinearAlgebra.lmul!(H::Householder, x::AbstractVector)
v = H.v
r = H.r
β = H.β
iszero(β) && return x
@inbounds begin
μ::eltype(x) = zero(eltype(x))
i = 1
@simd for j in r
μ += conj(v[i]) * x[j]
i += 1
end
μ *= β
i = 1
@simd for j in H.r
x[j] -= μ * v[i]
i += 1
end
end
return x
end
function LinearAlgebra.lmul!(H::Householder, A::AbstractMatrix, cols=axes(A, 2))
v = H.v
r = H.r
β = H.β
iszero(β) && return A
@inbounds begin
for k in cols
μ::eltype(A) = zero(eltype(A))
i = 1
@simd for j in r
μ += conj(v[i]) * A[j, k]
i += 1
end
μ *= β
i = 1
@simd for j in H.r
A[j, k] -= μ * v[i]
i += 1
end
end
end
return A
end
function LinearAlgebra.rmul!(A::AbstractMatrix, H::Householder, rows=axes(A, 1))
v = H.v
r = H.r
β = H.β
iszero(β) && return A
w = similar(A, length(rows))
fill!(w, zero(eltype(w)))
@inbounds begin
l = 1
for k in r
j = 1
vl = v[l]
@simd for i in rows
w[j] += A[i, k] * vl
j += 1
end
l += 1
end
l = 1
for k in r
j = 1
vl = β * conj(v[l])
@simd for i in rows
A[i, k] -= w[j] * vl
j += 1
end
l += 1
end
end
return A
end
function LinearAlgebra.rmul!(b::OrthonormalBasis, H::Householder)
v = H.v
r = H.r
β = H.β
iszero(β) && return b
w = zerovector(b[first(r)])
@inbounds begin
w = unproject!!(w, b, v, 1, 0, r)
b = rank1update!(b, w, v, -β, 1, r)
end
return b
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 19834 | """
# expert version:
schursolve(f, x₀, howmany, which, algorithm)
Compute a partial Schur decomposition containing `howmany` eigenvalues from the linear map
encoded in the matrix or function `A`. Return the reduced Schur matrix, the basis of Schur
vectors, the extracted eigenvalues and a `ConvergenceInfo` structure.
See also [`eigsolve`](@ref) to obtain the eigenvectors instead. For real symmetric or
complex hermitian problems, the (partial) Schur decomposition is identical to the (partial)
eigenvalue decomposition, and `eigsolve` should always be used.
### Arguments:
The linear map can be an `AbstractMatrix` (dense or sparse) or a general function or
callable object, that acts on vector like objects similar to `x₀`, which is the starting
guess from which a Krylov subspace will be built. `howmany` specifies how many Schur vectors
should be converged before the algorithm terminates; `which` specifies which eigenvalues
should be targeted. Valid specifications of `which` are
- `LM`: eigenvalues of largest magnitude
- `LR`: eigenvalues with largest (most positive) real part
- `SR`: eigenvalues with smallest (most negative) real part
- `LI`: eigenvalues with largest (most positive) imaginary part, only if `T <: Complex`
- `SI`: eigenvalues with smallest (most negative) imaginary part, only if `T <: Complex`
- [`EigSorter(f; rev = false)`](@ref): eigenvalues `λ` that appear first (or last if
`rev == true`) when sorted by `f(λ)`
!!! note "Note about selecting `which` eigenvalues"
Krylov methods work well for extremal eigenvalues, i.e. eigenvalues on the periphery of
the spectrum of the linear map. All of they valid `Symbol`s for `which` have this
property, but could also be specified using `EigSorter`, e.g. `:LM` is equivalent to
`Eigsorter(abs; rev = true)`. Note that smallest magnitude sorting is obtained using
e.g. `EigSorter(abs; rev = false)`, but since no (shift-and)-invert is used, this will
only be successful if you somehow know that eigenvalues close to zero are also close
to the periphery of the spectrum.
!!! warning "Degenerate eigenvalues"
From a theoretical point of view, Krylov methods can at most find a single eigenvector
associated with a targetted eigenvalue, even if the latter is degenerate. In the case of
a degenerate eigenvalue, the specific eigenvector that is returned is determined by the
starting vector `x₀`. For large problems, this turns out to be less of an issue in
practice, as often a second linearly independent eigenvector is generated out of the
numerical noise resulting from the orthogonalisation steps in the Lanczos or Arnoldi
iteration. Nonetheless, it is important to take this into account and to try not to
depend on this potentially fragile behaviour, especially for smaller problems.
The `algorithm` argument currently only supports an instance of [`Arnoldi`](@ref), which
is where the parameters of the Krylov method (such as Krylov dimension and maximum number
of iterations) can be specified. Since `schursolve` is less commonly used as `eigsolve`,
it only supports this expert mode call syntax and no convenient keyword interface is
currently available.
### Return values:
The return value is always of the form `T, vecs, vals, info = schursolve(...)` with
- `T`: a `Matrix` containing the partial Schur decomposition of the linear map, i.e. it's
elements are given by `T[i,j] = dot(vecs[i], f(vecs[j]))`. It is of Schur form, i.e.
upper triangular in case of complex arithmetic, and block upper triangular (with at most
2x2 blocks) in case of real arithmetic.
- `vecs`: a `Vector` of corresponding Schur vectors, of the same length as `vals`. Note
that Schur vectors are not returned as a matrix, as the linear map could act on any
custom Julia type with vector like behavior, i.e. the elements of the list `vecs` are
objects that are typically similar to the starting guess `x₀`, up to a possibly
different `eltype`. When the linear map is a simple `AbstractMatrix`, `vecs` will be
`Vector{Vector{<:Number}}`. Schur vectors are by definition orthogonal, i.e.
`dot(vecs[i],vecs[j]) = I[i,j]`. Note that Schur vectors are real if the problem (i.e.
the linear map and the initial guess) are real.
- `vals`: a `Vector` of eigenvalues, i.e. the diagonal elements of `T` in case of complex
arithmetic, or extracted from the diagonal blocks in case of real arithmetic. Note that
`vals` will always be complex, independent of the underlying arithmetic.
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: indicates how many eigenvalues and Schur vectors were
actually converged to the specified tolerance (see below under keyword arguments)
+ `info.residuals::Vector`: a list of the same length as `vals` containing the actual
residuals
```julia
info.residuals[i] = f(vecs[i]) - sum(vecs[j] * T[j, i] for j in 1:i+1)
```
where `T[i+1,i]` is definitely zero in case of complex arithmetic and possibly zero
in case of real arithmetic
+ `info.normres::Vector{<:Real}`: list of the same length as `vals` containing the
norm of the residual for every Schur vector, i.e.
`info.normes[i] = norm(info.residual[i])`
+ `info.numops::Int`: number of times the linear map was applied, i.e. number of times
`f` was called, or a vector was multiplied with `A`
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
!!! warning "Check for convergence"
No warning is printed if not all requested eigenvalues were converged, so always check
if `info.converged >= howmany`.
### Algorithm
The actual algorithm is an implementation of the Krylov-Schur algorithm, where the
[`Arnoldi`](@ref) algorithm is used to generate the Krylov subspace. During the algorithm,
the Krylov subspace is dynamically grown and shrunk, i.e. the restarts are so-called thick
restarts where a part of the current Krylov subspace is kept.
"""
function schursolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi)
T, U, fact, converged, numiter, numops = _schursolve(A, x₀, howmany, which, alg)
if eltype(T) <: Real && howmany < length(fact) && T[howmany + 1, howmany] != 0
howmany += 1
end
if converged > howmany
howmany = converged
end
TT = view(T, 1:howmany, 1:howmany)
values = schur2eigvals(TT)
vectors = let B = basis(fact)
[B * u for u in cols(U, 1:howmany)]
end
residuals = let r = residual(fact)
[scale(r, last(u)) for u in cols(U, 1:howmany)]
end
normresiduals = [normres(fact) * abs(last(u)) for u in cols(U, 1:howmany)]
if alg.verbosity > 0
if converged < howmany
@warn """Arnoldi schursolve finished without convergence after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))"""
else
@info """Arnoldi schursolve finished after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))"""
end
end
return TT,
vectors,
values,
ConvergenceInfo(converged, residuals, normresiduals, numiter, numops)
end
function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi; alg_rrule=alg)
T, U, fact, converged, numiter, numops = _schursolve(A, x₀, howmany, which, alg)
if eltype(T) <: Real && howmany < length(fact) && T[howmany + 1, howmany] != 0
howmany += 1
end
if converged > howmany
howmany = converged
end
d = min(howmany, size(T, 2))
TT = view(T, 1:d, 1:d)
values = schur2eigvals(TT)
# Compute eigenvectors
V = view(U, :, 1:d) * schur2eigvecs(TT)
vectors = let B = basis(fact)
[B * v for v in cols(V)]
end
residuals = let r = residual(fact)
[scale(r, last(v)) for v in cols(V)]
end
normresiduals = [normres(fact) * abs(last(v)) for v in cols(V)]
if alg.verbosity > 0
if converged < howmany
@warn """Arnoldi eigsolve finished without convergence after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
else
@info """Arnoldi eigsolve finished after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
end
end
return values,
vectors,
ConvergenceInfo(converged, residuals, normresiduals, numiter, numops)
end
"""
# expert version:
realeigsolve(f, x₀, howmany, which, algorithm; alg_rrule=algorithm)
Compute the first `howmany` eigenvalues (according to the order specified by `which`)
from the real linear map encoded in the matrix `A` or by the function `f`, with the guarantee
that these eigenvalues (and thus their associated eigenvectors) are real.
Return eigenvalues, eigenvectors and a `ConvergenceInfo` structure.
### Arguments:
The linear map can be an `AbstractMatrix` (dense or sparse) or a general function or
callable object. A starting vector `x₀` needs to be provided. Note that `x₀` does not need
to be of type `AbstractVector`; any type that behaves as a vector and supports the required
interface (see KrylovKit docs) is accepted.
The argument `howmany` specifies how many eigenvalues should be computed; `which` specifies
which eigenvalues should be targeted. Valid specifications of `which` for real
problems are given by
- `:LM`: eigenvalues of largest magnitude
- `:LR`: eigenvalues with largest (most positive) real part
- `:SR`: eigenvalues with smallest (most negative) real part
- [`EigSorter(f; rev = false)`](@ref): eigenvalues `λ` that appear first (or last if
`rev == true`) when sorted by `f(λ)`
!!! note "Note about selecting `which` eigenvalues"
Krylov methods work well for extremal eigenvalues, i.e. eigenvalues on the periphery of
the spectrum of the linear map. All of the valid `Symbol`s for `which` have this
property, but could also be specified using `EigSorter`, e.g. `:LM` is equivalent to
`Eigsorter(abs; rev = true)`. Note that smallest magnitude sorting is obtained using
e.g. `EigSorter(abs; rev = false)`, but since no (shift-and)-invert is used, this will
only be successful if you somehow know that eigenvalues close to zero are also close
to the periphery of the spectrum.
!!! warning "Degenerate eigenvalues"
From a theoretical point of view, Krylov methods can at most find a single eigenvector
associated with a targetted eigenvalue, even if the latter is degenerate. In the case of
a degenerate eigenvalue, the specific eigenvector that is returned is determined by the
starting vector `x₀`. For large problems, this turns out to be less of an issue in
practice, as often a second linearly independent eigenvector is generated out of the
numerical noise resulting from the orthogonalisation steps in the Lanczos or Arnoldi
iteration. Nonetheless, it is important to take this into account and to try not to
depend on this potentially fragile behaviour, especially for smaller problems.
The `algorithm` argument currently only supports an instance of [`Arnoldi`](@ref), which
is where the parameters of the Krylov method (such as Krylov dimension and maximum number
of iterations) can be specified. Since `realeigsolve` is less commonly used as `eigsolve`,
it only supports this expert mode call syntax and no convenient keyword interface is
currently available.
The keyword argument `alg_rrule` can be used to specify an algorithm to be used for computing
the `pullback` of `realeigsolve` in the context of reverse-mode automatic differentation.
### Return values:
The return value is always of the form `vals, vecs, info = eigsolve(...)` with
- `vals`: a `Vector` containing the eigenvalues, of length at least `howmany`, but could
be longer if more eigenvalues were converged at the same cost. Eigenvalues will be real,
an `ArgumentError` will be thrown if the first `howmany` eigenvalues ordered according
to `which` of the linear map are not all real.
- `vecs`: a `Vector` of corresponding eigenvectors, of the same length as `vals`. Note
that eigenvectors are not returned as a matrix, as the linear map could act on any
custom Julia type with vector like behavior, i.e. the elements of the list `vecs` are
objects that are typically similar to the starting guess `x₀`. For a real problem with
real eigenvalues, also the eigenvectors will be real and no complex arithmetic is used
anywhere.
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: indicates how many eigenvalues and eigenvectors were actually
converged to the specified tolerance `tol` (see below under keyword arguments)
+ `info.residual::Vector`: a list of the same length as `vals` containing the
residuals `info.residual[i] = f(vecs[i]) - vals[i] * vecs[i]`
+ `info.normres::Vector{<:Real}`: list of the same length as `vals` containing the
norm of the residual `info.normres[i] = norm(info.residual[i])`
+ `info.numops::Int`: number of times the linear map was applied, i.e. number of times
`f` was called, or a vector was multiplied with `A`
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
!!! warning "Check for convergence"
No warning is printed if not all requested eigenvalues were converged, so always check
if `info.converged >= howmany`.
"""
function realeigsolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi; alg_rrule=alg)
T, U, fact, converged, numiter, numops = _schursolve(A, x₀, howmany, which, alg)
if !(eltype(T) <: Real)
throw(ArgumentError("realeigsolve can only be used for real eigenvalue problems"))
else
allreal = true
for i in 1:(howmany < length(fact) ? howmany : howmany - 1)
if T[i + 1, i] != 0
allreal = false
break
end
end
allreal || throw(ArgumentError("not all first `howmany` eigenvalues are real"))
end
if converged > howmany
while howmany < converged && T[howmany + 1, howmany] == 0
howmany += 1
end
end
TT = view(T, 1:howmany, 1:howmany)
values = diag(TT)
# Compute eigenvectors
V = view(U, :, 1:howmany) * schur2realeigvecs(TT)
vectors = let B = basis(fact)
[B * v for v in cols(V)]
end
residuals = let r = residual(fact)
[scale(r, last(v)) for v in cols(V)]
end
normresiduals = [normres(fact) * abs(last(v)) for v in cols(V)]
if alg.verbosity > 0
if converged < howmany
@warn """Arnoldi realeigsolve finished without convergence after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
else
@info """Arnoldi realeigsolve finished after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
end
end
return values,
vectors,
ConvergenceInfo(converged, residuals, normresiduals, numiter, numops)
end
function _schursolve(A, x₀, howmany::Int, which::Selector, alg::Arnoldi)
krylovdim = alg.krylovdim
maxiter = alg.maxiter
howmany > krylovdim &&
error("krylov dimension $(krylovdim) too small to compute $howmany eigenvalues")
## FIRST ITERATION: setting up
numiter = 1
# initialize arnoldi factorization
iter = ArnoldiIterator(A, x₀, alg.orth)
fact = initialize(iter; verbosity=alg.verbosity - 2)
numops = 1
sizehint!(fact, krylovdim)
β = normres(fact)
tol::eltype(β) = alg.tol
# allocate storage
HH = fill(zero(eltype(fact)), krylovdim + 1, krylovdim)
UU = fill(zero(eltype(fact)), krylovdim, krylovdim)
# initialize storage
K = length(fact) # == 1
converged = 0
local T, U
while true
β = normres(fact)
K = length(fact)
if β <= tol
if K < howmany
@warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`); setting `howmany = $K`."
howmany = K
end
end
if K == krylovdim || β <= tol || (alg.eager && K >= howmany) # process
H = view(HH, 1:K, 1:K)
U = view(UU, 1:K, 1:K)
f = view(HH, K + 1, 1:K)
copyto!(U, I)
copyto!(H, rayleighquotient(fact))
# compute dense schur factorization
T, U, values = hschur!(H, U)
by, rev = eigsort(which)
p = sortperm(values; by=by, rev=rev)
T, U = permuteschur!(T, U, p)
f = mul!(f, view(U, K, :), β)
converged = 0
while converged < length(fact) && abs(f[converged + 1]) <= tol
converged += 1
end
if eltype(T) <: Real &&
0 < converged < length(fact) &&
T[converged + 1, converged] != 0
converged -= 1
end
if converged >= howmany
break
elseif alg.verbosity > 1
msg = "Arnoldi schursolve in iter $numiter, krylovdim = $K: "
msg *= "$converged values converged, normres = ("
msg *= @sprintf("%.2e", abs(f[1]))
for i in 2:howmany
msg *= ", "
msg *= @sprintf("%.2e", abs(f[i]))
end
msg *= ")"
@info msg
end
end
if K < krylovdim # expand
fact = expand!(iter, fact; verbosity=alg.verbosity - 2)
numops += 1
else # shrink
numiter == maxiter && break
# Determine how many to keep
keep = div(3 * krylovdim + 2 * converged, 5) # strictly smaller than krylovdim since converged < howmany <= krylovdim, at least equal to converged
if eltype(H) <: Real && H[keep + 1, keep] != 0 # we are in the middle of a 2x2 block
keep += 1 # conservative choice
keep >= krylovdim &&
error("krylov dimension $(krylovdim) too small to compute $howmany eigenvalues")
end
# Restore Arnoldi form in the first keep columns
@inbounds for j in 1:keep
H[keep + 1, j] = f[j]
end
@inbounds for j in keep:-1:1
h, ν = householder(H, j + 1, 1:j, j)
H[j + 1, j] = ν
H[j + 1, 1:(j - 1)] .= 0
lmul!(h, H)
rmul!(view(H, 1:j, :), h')
rmul!(U, h')
end
copyto!(rayleighquotient(fact), H) # copy back into fact
# Update B by applying U
B = basis(fact)
basistransform!(B, view(U, :, 1:keep))
r = residual(fact)
B[keep + 1] = scale!!(r, 1 / normres(fact))
# Shrink Arnoldi factorization
fact = shrink!(fact, keep)
numiter += 1
end
end
return T, U, fact, converged, numiter, numops
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 15089 | """
eigsolve(A::AbstractMatrix, [x₀, howmany = 1, which = :LM, T = eltype(A)]; kwargs...)
eigsolve(f, n::Int, [howmany = 1, which = :LM, T = Float64]; kwargs...)
eigsolve(f, x₀, [howmany = 1, which = :LM]; kwargs...)
# expert version:
eigsolve(f, x₀, howmany, which, algorithm; alg_rrule=...)
Compute at least `howmany` eigenvalues from the linear map encoded in the matrix `A` or by
the function `f`. Return eigenvalues, eigenvectors and a `ConvergenceInfo` structure.
### Arguments:
The linear map can be an `AbstractMatrix` (dense or sparse) or a general function or
callable object. If an `AbstractMatrix` is used, a starting vector `x₀` does not need to be
provided, it is then chosen as `rand(T, size(A,1))`. If the linear map is encoded more
generally as a a callable function or method, the best approach is to provide an explicit
starting guess `x₀`. Note that `x₀` does not need to be of type `AbstractVector`; any type
that behaves as a vector and supports the required methods (see KrylovKit docs) is accepted.
If instead of `x₀` an integer `n` is specified, it is assumed that `x₀` is a regular vector
and it is initialized to `rand(T,n)`, where the default value of `T` is `Float64`, unless
specified differently.
The next arguments are optional, but should typically be specified. `howmany` specifies how
many eigenvalues should be computed; `which` specifies which eigenvalues should be
targeted. Valid specifications of `which` are given by
- `:LM`: eigenvalues of largest magnitude
- `:LR`: eigenvalues with largest (most positive) real part
- `:SR`: eigenvalues with smallest (most negative) real part
- `:LI`: eigenvalues with largest (most positive) imaginary part, only if `T <: Complex`
- `:SI`: eigenvalues with smallest (most negative) imaginary part, only if `T <: Complex`
- [`EigSorter(f; rev = false)`](@ref): eigenvalues `λ` that appear first (or last if
`rev == true`) when sorted by `f(λ)`
!!! note "Note about selecting `which` eigenvalues"
Krylov methods work well for extremal eigenvalues, i.e. eigenvalues on the periphery of
the spectrum of the linear map. All of the valid `Symbol`s for `which` have this
property, but could also be specified using `EigSorter`, e.g. `:LM` is equivalent to
`Eigsorter(abs; rev = true)`. Note that smallest magnitude sorting is obtained using
e.g. `EigSorter(abs; rev = false)`, but since no (shift-and)-invert is used, this will
only be successful if you somehow know that eigenvalues close to zero are also close
to the periphery of the spectrum.
!!! warning "Degenerate eigenvalues"
From a theoretical point of view, Krylov methods can at most find a single eigenvector
associated with a targetted eigenvalue, even if the latter is degenerate. In the case of
a degenerate eigenvalue, the specific eigenvector that is returned is determined by the
starting vector `x₀`. For large problems, this turns out to be less of an issue in
practice, as often a second linearly independent eigenvector is generated out of the
numerical noise resulting from the orthogonalisation steps in the Lanczos or Arnoldi
iteration. Nonetheless, it is important to take this into account and to try not to
depend on this potentially fragile behaviour, especially for smaller problems.
The argument `T` acts as a hint in which `Number` type the computation should be performed,
but is not restrictive. If the linear map automatically produces complex values, complex
arithmetic will be used even though `T<:Real` was specified. However, if the linear map and
initial guess are real, approximate eigenvalues will be searched for using a partial Schur
factorization, which implies that complex conjugate eigenvalues come in pairs and cannot
be split. It is then illegal to choose `which` in a way that would treat `λ` and `conj(λ)`
differently, i.e. `:LI` and `:SI` are invalid, as well as any `EigSorter` that would lead
to `by(λ) != by(conj(λ))`.
### Return values:
The return value is always of the form `vals, vecs, info = eigsolve(...)` with
- `vals`: a `Vector` containing the eigenvalues, of length at least `howmany`, but could
be longer if more eigenvalues were converged at the same cost. Eigenvalues will be real
if [`Lanczos`](@ref) was used and complex if [`Arnoldi`](@ref) was used (see below).
- `vecs`: a `Vector` of corresponding eigenvectors, of the same length as `vals`. Note
that eigenvectors are not returned as a matrix, as the linear map could act on any
custom Julia type with vector like behavior, i.e. the elements of the list `vecs` are
objects that are typically similar to the starting guess `x₀`, up to a possibly
different `eltype`. In particular for a general matrix (i.e. with `Arnoldi`) the
eigenvectors are generally complex and are therefore always returned in a complex
number format. When the linear map is a simple `AbstractMatrix`, `vecs` will be
`Vector{Vector{<:Number}}`.
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: indicates how many eigenvalues and eigenvectors were actually
converged to the specified tolerance `tol` (see below under keyword arguments)
+ `info.residual::Vector`: a list of the same length as `vals` containing the
residuals `info.residual[i] = f(vecs[i]) - vals[i] * vecs[i]`
+ `info.normres::Vector{<:Real}`: list of the same length as `vals` containing the
norm of the residual `info.normres[i] = norm(info.residual[i])`
+ `info.numops::Int`: number of times the linear map was applied, i.e. number of times
`f` was called, or a vector was multiplied with `A`
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
!!! warning "Check for convergence"
No warning is printed if not all requested eigenvalues were converged, so always check
if `info.converged >= howmany`.
### Keyword arguments:
Keyword arguments and their default values are given by:
- `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message
at the end), 2 (information after every iteration), 3 (information per Krylov step)
- `tol::Real`: the requested accuracy (corresponding to the 2-norm of the residual for
Schur vectors, not the eigenvectors). If you work in e.g. single precision (`Float32`),
you should definitely change the default value.
- `krylovdim::Integer`: the maximum dimension of the Krylov subspace that will be
constructed. Note that the dimension of the vector space is not known or checked, e.g.
`x₀` should not necessarily support the `Base.length` function. If you know the actual
problem dimension is smaller than the default value, it is useful to reduce the value of
`krylovdim`, though in principle this should be detected.
- `maxiter::Integer`: the number of times the Krylov subspace can be rebuilt; see below
for further details on the algorithms.
- `orth::Orthogonalizer`: the orthogonalization method to be used, see
[`Orthogonalizer`](@ref)
- `issymmetric::Bool`: if the linear map is symmetric, only meaningful if `T<:Real`
- `ishermitian::Bool`: if the linear map is hermitian
- `eager::Bool = false`: if true, eagerly compute the eigenvalue or Schur decomposition
after every expansion of the Krylov subspace to test for convergence, otherwise wait
until the Krylov subspace has dimension `krylovdim`. This can result in a faster return,
for example if the initial guess is very good, but also has some overhead, as many more
dense Schur factorizations need to be computed.
The default values are given by `tol = KrylovDefaults.tol`,
`krylovdim = KrylovDefaults.krylovdim`, `maxiter = KrylovDefaults.maxiter`,
`orth = KrylovDefaults.orth`; see [`KrylovDefaults`](@ref) for details.
The default value for the last two parameters depends on the method. If an `AbstractMatrix`
is used, `issymmetric` and `ishermitian` are checked for that matrix, otherwise the default
values are `issymmetric = false` and `ishermitian = T <: Real && issymmetric`. When values
for the keyword arguments are provided, no checks will be performed even in the matrix case.
The final keyword argument `alg_rrule` is relevant only when `eigsolve` is used in a setting
where reverse-mode automatic differentation will be used. A custom `ChainRulesCore.rrule` is
defined for `eigsolve`, which can be evaluated using different algorithms that can be specified
via `alg_rrule`. A suitable default is chosen, so this keyword argument should only be used
when this default choice is failing or not performing efficiently. Check the documentation for
more information on the possible values for `alg_rrule` and their implications on the algorithm
being used.
### Algorithm
The final (expert) method, without default values and keyword arguments, is the one that is
finally called, and can also be used directly. Here, one specifies the algorithm explicitly
as either [`Lanczos`](@ref), for real symmetric or complex hermitian problems, or
[`Arnoldi`](@ref), for general problems. Note that these names refer to the process for
building the Krylov subspace, but the actual algorithm is an implementation of the
Krylov-Schur algorithm, which can dynamically shrink and grow the Krylov subspace, i.e. the
restarts are so-called thick restarts where a part of the current Krylov subspace is kept.
!!! note "Note about convergence"
In case of a general problem, where the `Arnoldi` method is used, convergence of an
eigenvalue is not based on the norm of the residual `norm(f(vecs[i]) - vals[i]*vecs[i])`
for the eigenvector but rather on the norm of the residual for the corresponding Schur
vectors.
See also [`schursolve`](@ref) if you want to use the partial Schur decomposition
directly, or if you are not interested in computing the eigenvectors, and want to work
in real arithmetic all the way true (if the linear map and starting guess are real).
If you have knowledge that all requested eigenvalues of a real problem will be real,
and thus also their associated eigenvectors, you can also use [`realeigsolve`](@ref).
"""
function eigsolve end
"""
EigSorter(by; rev = false)
A simple `struct` to be used in combination with [`eigsolve`](@ref) or [`schursolve`](@ref)
to indicate which eigenvalues need to be targeted, namely those that appear first when
sorted by `by` and possibly in reverse order if `rev == true`.
"""
struct EigSorter{F}
by::F
rev::Bool
end
EigSorter(f::F; rev=false) where {F} = EigSorter{F}(f, rev)
const Selector = Union{Symbol,EigSorter}
function eigsolve(A::AbstractMatrix,
howmany::Int=1,
which::Selector=:LM,
T::Type=eltype(A);
kwargs...)
return eigsolve(A, rand(T, size(A, 1)), howmany, which; kwargs...)
end
function eigsolve(f, n::Int, howmany::Int=1, which::Selector=:LM, T::Type=Float64;
kwargs...)
return eigsolve(f, rand(T, n), howmany, which; kwargs...)
end
function eigsolve(f, x₀, howmany::Int=1, which::Selector=:LM; kwargs...)
Tx = typeof(x₀)
Tfx = Core.Compiler.return_type(apply, Tuple{typeof(f),Tx})
T = Core.Compiler.return_type(dot, Tuple{Tx,Tfx})
alg = eigselector(f, T; kwargs...)
checkwhich(which) || error("Unknown eigenvalue selector: which = $which")
if alg isa Lanczos
if which == :LI || which == :SI
error("Eigenvalue selector which = $which invalid: real eigenvalues expected with Lanczos algorithm")
end
elseif T <: Real
by, rev = eigsort(which)
if by(+im) != by(-im)
error("Eigenvalue selector which = $which invalid because it does not treat
`λ` and `conj(λ)` equally: work in complex arithmetic by providing a complex starting vector `x₀`")
end
end
if haskey(kwargs, :alg_rrule)
alg_rrule = kwargs[:alg_rrule]
else
alg_rrule = Arnoldi(; tol=alg.tol,
krylovdim=alg.krylovdim,
maxiter=alg.maxiter,
eager=alg.eager,
orth=alg.orth)
end
return eigsolve(f, x₀, howmany, which, alg; alg_rrule=alg_rrule)
end
function eigselector(f,
T::Type;
issymmetric::Bool=false,
ishermitian::Bool=issymmetric && (T <: Real),
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
orth::Orthogonalizer=KrylovDefaults.orth,
eager::Bool=false,
verbosity::Int=0)
if (T <: Real && issymmetric) || ishermitian
return Lanczos(; krylovdim=krylovdim,
maxiter=maxiter,
tol=tol,
orth=orth,
eager=eager,
verbosity=verbosity)
else
return Arnoldi(; krylovdim=krylovdim,
maxiter=maxiter,
tol=tol,
orth=orth,
eager=eager,
verbosity=verbosity)
end
end
function eigselector(A::AbstractMatrix,
T::Type;
issymmetric::Bool=T <: Real && LinearAlgebra.issymmetric(A),
ishermitian::Bool=issymmetric || LinearAlgebra.ishermitian(A),
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
tol::Real=KrylovDefaults.tol,
orth::Orthogonalizer=KrylovDefaults.orth,
eager::Bool=false,
verbosity::Int=0,
alg_rrule=nothing)
if (T <: Real && issymmetric) || ishermitian
return Lanczos(; krylovdim=krylovdim,
maxiter=maxiter,
tol=tol,
orth=orth,
eager=eager,
verbosity=verbosity)
else
return Arnoldi(; krylovdim=krylovdim,
maxiter=maxiter,
tol=tol,
orth=orth,
eager=eager,
verbosity=verbosity)
end
end
checkwhich(::EigSorter) = true
checkwhich(s::Symbol) = s in (:LM, :LR, :SR, :LI, :SI)
eigsort(s::EigSorter) = s.by, s.rev
function eigsort(which::Symbol)
if which == :LM
by = abs
rev = true
elseif which == :LR
by = real
rev = true
elseif which == :SR
by = real
rev = false
elseif which == :LI
by = imag
rev = true
elseif which == :SI
by = imag
rev = false
else
error("invalid specification of which eigenvalues to target: which = $which")
end
return by, rev
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 11985 | """
geneigsolve((A::AbstractMatrix, B::AbstractMatrix), [howmany = 1, which = :LM,
T = promote_type(eltype(A), eltype(B))]; kwargs...)
geneigsolve(f, n::Int, [howmany = 1, which = :LM, T = Float64]; kwargs...)
geneigsolve(f, x₀, [howmany = 1, which = :LM]; kwargs...)
# expert version:
geneigsolve(f, x₀, howmany, which, algorithm)
Compute at least `howmany` generalized eigenvalues ``λ`` and generalized eigenvectors
``x`` of the form ``(A - λB)x = 0``, where `A` and `B` are either instances of
`AbstractMatrix`, or some function that implements the matrix vector product. In case
functions are used, one could either specify the action of `A` and `B` via a tuple of two
functions (or a function and an `AbstractMatrix`), or one could use a single function that
takes a single argument `x` and returns two results, corresponding to `A*x` and `B*x`.
Return the computed eigenvalues, eigenvectors and a `ConvergenceInfo` structure.
### Arguments:
The first argument is either a tuple of two linear maps, so a function or an `AbstractMatrix`
for either of them, representing the action of `A` and `B`. Alternatively, a single function
can be used that takes a single argument `x` and returns the equivalent of `(A*x, B*x)` as
result. This latter form is compatible with the `do` block syntax of Julia. If an
`AbstractMatrix` is used for either `A` or `B`, a starting vector `x₀` does not need to be
provided, it is then chosen as `rand(T, size(A,1))` if `A` is an `AbstractMatrix` (or
similarly if only `B` is an `AbstractMatrix`). Here `T = promote_type(eltype(A), eltype(B))`
if both `A` and `B` are instances of `AbstractMatrix`, or just the `eltype` of whichever is
an `AbstractMatrix`. If both `A` and `B` are encoded more generally as a callable function
or method, the best approach is to provide an explicit starting guess `x₀`. Note that `x₀`
does not need to be of type `AbstractVector`, any type that behaves as a vector and supports
the required methods (see KrylovKit docs) is accepted. If instead of `x₀` an integer `n` is
specified, it is assumed that `x₀` is a regular vector and it is initialized to `rand(T,n)`,
where the default value of `T` is `Float64`, unless specified differently.
The next arguments are optional, but should typically be specified. `howmany` specifies how
many eigenvalues should be computed; `which` specifies which eigenvalues should be
targeted. Valid specifications of `which` are given by
- `:LM`: eigenvalues of largest magnitude
- `:LR`: eigenvalues with largest (most positive) real part
- `:SR`: eigenvalues with smallest (most negative) real part
- `:LI`: eigenvalues with largest (most positive) imaginary part, only if `T <: Complex`
- `:SI`: eigenvalues with smallest (most negative) imaginary part, only if `T <: Complex`
- [`EigSorter(f; rev = false)`](@ref): eigenvalues `λ` that appear first (or last if
`rev == true`) when sorted by `f(λ)`
!!! note "Note about selecting `which` eigenvalues"
Krylov methods work well for extremal eigenvalues, i.e. eigenvalues on the periphery of
the spectrum of the linear map. Even with `ClosestTo`, no shift and invert is performed.
This is useful if, e.g., you know the spectrum to be within the unit circle in the
complex plane, and want to target the eigenvalues closest to the value `λ = 1`.
The argument `T` acts as a hint in which `Number` type the computation should be performed,
but is not restrictive. If the linear map automatically produces complex values, complex
arithmetic will be used even though `T<:Real` was specified.
### Return values:
The return value is always of the form `vals, vecs, info = geneigsolve(...)` with
- `vals`: a `Vector` containing the eigenvalues, of length at least `howmany`, but could
be longer if more eigenvalues were converged at the same cost.
- `vecs`: a `Vector` of corresponding eigenvectors, of the same length as `vals`.
Note that eigenvectors are not returned as a matrix, as the linear map could act on any
custom Julia type with vector like behavior, i.e. the elements of the list `vecs` are
objects that are typically similar to the starting guess `x₀`, up to a possibly
different `eltype`. When the linear map is a simple `AbstractMatrix`, `vecs` will be
`Vector{Vector{<:Number}}`.
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: indicates how many eigenvalues and eigenvectors were actually
converged to the specified tolerance `tol` (see below under keyword arguments)
+ `info.residual::Vector`: a list of the same length as `vals` containing the
residuals `info.residual[i] = f(vecs[i]) - vals[i] * vecs[i]`
+ `info.normres::Vector{<:Real}`: list of the same length as `vals` containing the
norm of the residual `info.normres[i] = norm(info.residual[i])`
+ `info.numops::Int`: number of times the linear map was applied, i.e. number of times
`f` was called, or a vector was multiplied with `A`
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
!!! warning "Check for convergence"
No warning is printed if not all requested eigenvalues were converged, so always check
if `info.converged >= howmany`.
### Keyword arguments:
Keyword arguments and their default values are given by:
- `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message
at the end), 2 (information after every iteration), 3 (information per Krylov step)
- `tol::Real`: the requested accuracy, relative to the 2-norm of the corresponding
eigenvectors, i.e. convergence is achieved if `norm((A - λB)x) < tol * norm(x)`. Because
eigenvectors are now normalised such that `dot(x, B*x) = 1`, `norm(x)` is not
automatically one. If you work in e.g. single precision (`Float32`), you should
definitely change the default value.
- `krylovdim::Integer`: the maximum dimension of the Krylov subspace that will be
constructed. Note that the dimension of the vector space is not known or checked, e.g.
`x₀` should not necessarily support the `Base.length` function. If you know the actual
problem dimension is smaller than the default value, it is useful to reduce the value
of `krylovdim`, though in principle this should be detected.
- `maxiter::Integer`: the number of times the Krylov subspace can be rebuilt; see below
for further details on the algorithms.
- `orth::Orthogonalizer`: the orthogonalization method to be used, see
[`Orthogonalizer`](@ref)
- `issymmetric::Bool`: if both linear maps `A` and `B` are symmetric, only meaningful if
`T<:Real`
- `ishermitian::Bool`: if both linear maps `A` and `B` are hermitian
- `isposdef::Bool`: if the linear map `B` is positive definite
The default values are given by `tol = KrylovDefaults.tol`,
`krylovdim = KrylovDefaults.krylovdim`, `maxiter = KrylovDefaults.maxiter`,
`orth = KrylovDefaults.orth`; see [`KrylovDefaults`](@ref) for details.
The default value for the last three parameters depends on the method. If an
`AbstractMatrix` is used, `issymmetric`, `ishermitian` and `isposdef` are checked for that
matrix, otherwise the default values are `issymmetric = false` and
`ishermitian = T <: Real && issymmetric`. When values are provided, no checks will be
performed even in the matrix case.
### Algorithm
The last method, without default values and keyword arguments, is the one that is finally
called, and can also be used directly. Here the algorithm is specified, though currently
only [`GolubYe`](@ref) is available. The Golub-Ye algorithm is an algorithm for solving
hermitian (symmetric) generalized eigenvalue problems `A x = λ B x` with positive definite
`B`, without the need for inverting `B`. It builds a Krylov subspace of size `krylovdim`
starting from an estimate `x` by acting with `(A - ρ(x) B)`, where
`ρ(x) = dot(x, A*x)/ dot(x, B*x)`, and employing the Lanczos algorithm. This process is
repeated at most `maxiter` times. In every iteration `k>1`, the subspace will also be
expanded to size `krylovdim+1` by adding ``x_k - x_{k-1}``, which is known as the LOPCG
correction and was suggested by Money and Ye. With `krylovdim = 2`, this algorithm becomes
equivalent to `LOPCG`.
!!! warning "Restriction to symmetric definite generalized eigenvalue problems"
While the only algorithm so far is restricted to symmetric/hermitian generalized
eigenvalue problems with positive definite `B`, this is not reflected in the default
values for the keyword arguments `issymmetric` or `ishermitian` and `isposdef`. Make
sure to set these to true to understand the implications of using this algorithm.
"""
function geneigsolve end
function geneigsolve(AB::Tuple{AbstractMatrix,AbstractMatrix},
howmany::Int=1,
which::Selector=:LM,
T=promote_type(eltype.(AB)...);
kwargs...)
if !(size(AB[1], 1) == size(AB[1], 2) == size(AB[2], 1) == size(AB[2], 2))
throw(DimensionMismatch("Matrices `A` and `B` should be square and have matching size"))
end
return geneigsolve(AB, rand(T, size(AB[1], 1)), howmany::Int, which; kwargs...)
end
function geneigsolve(AB::Tuple{Any,AbstractMatrix},
howmany::Int=1,
which::Selector=:LM,
T=eltype(AB[2]);
kwargs...)
return geneigsolve(AB, rand(T, size(AB[2], 1)), howmany, which; kwargs...)
end
function geneigsolve(AB::Tuple{AbstractMatrix,Any},
howmany::Int=1,
which::Selector=:LM,
T=eltype(AB[1]);
kwargs...)
return geneigsolve(AB, rand(T, size(AB[1], 1)), howmany, which; kwargs...)
end
function geneigsolve(f,
n::Int,
howmany::Int=1,
which::Selector=:LM,
T::Type=Float64;
kwargs...)
return geneigsolve(f, rand(T, n), howmany, which; kwargs...)
end
function geneigsolve(f, x₀, howmany::Int=1, which::Selector=:LM; kwargs...)
Tx = typeof(x₀)
Tfx = Core.Compiler.return_type(genapply, Tuple{typeof(f),Tx}) # should be a tuple type
Tfx1 = Base.tuple_type_head(Tfx)
Tfx2 = Base.tuple_type_head(Base.tuple_type_tail(Tfx))
T1 = Core.Compiler.return_type(dot, Tuple{Tx,Tfx1})
T2 = Core.Compiler.return_type(dot, Tuple{Tx,Tfx2})
T = promote_type(T1, T2)
alg = geneigselector(f, T; kwargs...)
if alg isa GolubYe && (which == :LI || which == :SI)
error("Eigenvalue selector which = $which invalid: real eigenvalues expected with Lanczos algorithm")
end
return geneigsolve(f, x₀, howmany, which, alg)
end
function geneigselector(AB::Tuple{AbstractMatrix,AbstractMatrix},
T::Type;
issymmetric=T <: Real && all(LinearAlgebra.issymmetric, AB),
ishermitian=issymmetric || all(LinearAlgebra.ishermitian, AB),
isposdef=ishermitian && LinearAlgebra.isposdef(AB[2]),
kwargs...)
if (issymmetric || ishermitian) && isposdef
return GolubYe(; kwargs...)
else
throw(ArgumentError("Only symmetric or hermitian generalized eigenvalue problems with positive definite `B` matrix are currently supported."))
end
end
function geneigselector(f,
T::Type;
issymmetric=false,
ishermitian=issymmetric,
isposdef=false,
kwargs...)
if (issymmetric || ishermitian) && isposdef
return GolubYe(; kwargs...)
else
throw(ArgumentError("Only symmetric or hermitian generalized eigenvalue problems with positive definite `B` matrix are currently supported."))
end
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 9649 | function geneigsolve(f, x₀, howmany::Int, which::Selector, alg::GolubYe)
krylovdim = alg.krylovdim
maxiter = alg.maxiter
howmany > krylovdim &&
error("krylov dimension $(krylovdim) too small to compute $howmany eigenvalues")
## FIRST ITERATION: setting up
numiter = 1
ax₀, bx₀ = genapply(f, x₀)
numops = 1
β₀ = norm(x₀)
iszero(β₀) && throw(ArgumentError("initial vector should not have norm zero"))
xax = inner(x₀, ax₀) / β₀^2
xbx = inner(x₀, bx₀) / β₀^2
T = promote_type(typeof(xax), typeof(xbx))
invβ₀ = one(T) / β₀
v = scale(x₀, invβ₀)
av = scale!!(zerovector(v), ax₀, invβ₀)
bv = scale!!(zerovector(v), bx₀, invβ₀)
ρ = checkhermitian(xax) / checkposdef(xbx)
r = add!!(av, bv, -ρ)
tol::typeof(ρ) = alg.tol
# allocate storage
HHA = fill(zero(T), krylovdim + 1, krylovdim + 1)
HHB = fill(zero(T), krylovdim + 1, krylovdim + 1)
# Start Lanczos iteration with A - ρ ⋅ B
numiter = 1
vold = v
V = OrthonormalBasis([v])
BV = [bv]
sizehint!(V, krylovdim + 1)
sizehint!(BV, krylovdim + 1)
r, α = orthogonalize!!(r, v, alg.orth) # α should be zero, otherwise ρ was miscalculated
β = norm(r)
converged = 0
values = resize!(Vector{typeof(ρ)}(undef, howmany), 0)
vectors = resize!(Vector{typeof(v)}(undef, howmany), 0)
residuals = resize!(Vector{typeof(r)}(undef, howmany), 0)
normresiduals = resize!(Vector{typeof(β)}(undef, howmany), 0)
K = 1
HHA[K, K] = real(α)
while true
β = norm(r)
if β <= tol && K < howmany
@warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`); setting `howmany = $K`."
howmany = K
end
if K == krylovdim - converged || β <= tol # process
if numiter > 1
# add vold - v, or thus just vold as v is first vector in subspace
v, = orthonormalize!!(vold, V, alg.orth)
av, bv = genapply(f, v)
numops += 1
av = add!!(av, bv, -ρ)
for i in 1:K
HHA[i, K + 1] = inner(V[i], av)
HHA[K + 1, i] = conj(HHA[i, K + 1])
end
K += 1
HHA[K, K] = checkhermitian(inner(v, av))
push!(V, v)
push!(BV, bv)
end
for i in 1:converged
# add converged vectors
v, = orthonormalize(vectors[i], V, alg.orth)
av, bv = genapply(f, v)
numops += 1
av = add!!(av, bv, -ρ)
for j in 1:K
HHA[j, K + 1] = inner(V[j], av)
HHA[K + 1, j] = conj(HHA[j, K + 1])
end
K += 1
HHA[K, K] = checkhermitian(inner(v, av))
push!(V, v)
push!(BV, bv)
end
# Process
HA = view(HHA, 1:K, 1:K)
HB = view(HHB, 1:K, 1:K)
buildHB!(HB, V, BV)
HA .+= ρ .* HB
D, Z = geneigh!(HA, HB)
by, rev = eigsort(which)
p = sortperm(D; by, rev)
xold = V[1]
converged = 0
resize!(values, 0)
resize!(vectors, 0)
resize!(residuals, 0)
resize!(normresiduals, 0)
while converged < K
z = view(Z, :, p[converged + 1])
v = mul!(zerovector(vold), V, z)
av, bv = genapply(f, v)
numops += 1
ρ = checkhermitian(inner(v, av)) / checkposdef(inner(v, bv))
r = add!!(av, bv, -ρ)
β = norm(r)
if β > tol * norm(z)
break
end
push!(values, ρ)
push!(vectors, v)
push!(residuals, r)
push!(normresiduals, β)
converged += 1
end
if converged >= howmany
howmany = converged
break
elseif numiter == maxiter
for k in (converged + 1):howmany
z = view(Z, :, p[k])
v = mul!(zerovector(vold), V, z)
av, bv = genapply(f, v)
numops += 1
ρ = checkhermitian(inner(v, av)) / checkposdef(inner(v, bv))
r = add!!(av, bv, -ρ)
β = norm(r)
push!(values, ρ)
push!(vectors, v)
push!(residuals, r)
push!(normresiduals, β)
end
elseif alg.verbosity > 1
msg = "Golub-Ye geneigsolve in iter $numiter: "
msg *= "$converged values converged, normres = ("
for i in 1:converged
msg *= @sprintf("%.2e", normresiduals[i])
msg *= ", "
end
msg *= @sprintf("%.2e", β) * ")"
@info msg
end
end
if K < krylovdim - converged
# expand
v = scale!!(r, 1 / β)
push!(V, v)
HHA[K + 1, K] = β
HHA[K, K + 1] = β
βold = β
r, α, β, bv = golubyerecurrence(f, ρ, V, βold, alg.orth)
numops += 1
K += 1
n = hypot(α, β, βold)
HHA[K, K] = checkhermitian(α, n)
push!(BV, bv)
if alg.verbosity > 2
@info "Golub-Ye iteration $numiter, step $K: normres = $β"
end
else # restart
numiter == maxiter && break
resize!(V, 0)
resize!(BV, 0)
fill!(HHA, zero(T))
fill!(HHB, zero(T))
K = 1
invβ = 1 / norm(v)
v = scale!!(v, invβ)
bv = scale!!(bv, invβ)
r = scale!!(r, invβ)
r, α = orthogonalize!!(r, v, alg.orth) # α should be zero, otherwise ρ was miscalculated
β = norm(r)
push!(V, v)
HHA[K, K] = real(α)
push!(BV, bv)
numiter += 1
end
end
if alg.verbosity > 0
if converged < howmany
@warn """Golub-Ye geneigsolve finished without convergence after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
else
@info """Golub-Ye geneigsolve finished after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
end
end
return values,
vectors,
ConvergenceInfo(converged, residuals, normresiduals, numiter, numops)
end
function golubyerecurrence(f, ρ, V::OrthonormalBasis, β, orth::ClassicalGramSchmidt)
v = V[end]
av, bv = genapply(f, v)
w = add!!(av, bv, -ρ)
α = inner(v, w)
w = add!!(w, V[end - 1], -β)
w = add!!(w, v, -α)
β = norm(w)
return w, α, β, bv
end
function golubyerecurrence(f, ρ, V::OrthonormalBasis, β, orth::ModifiedGramSchmidt)
v = V[end]
av, bv = genapply(f, v)
w = add!!(av, bv, -ρ)
w = add!!(w, V[end - 1], -β)
w, α = orthogonalize!!(w, v, orth)
β = norm(w)
return w, α, β, bv
end
function golubyerecurrence(f, ρ, V::OrthonormalBasis, β, orth::ClassicalGramSchmidt2)
v = V[end]
av, bv = genapply(f, v)
w = add!!(av, bv, -ρ)
α = inner(v, w)
w = add!!(w, V[end - 1], -β)
w = add!!(w, v, -α)
w, s = orthogonalize!!(w, V, ClassicalGramSchmidt())
α += s[end]
β = norm(w)
return w, α, β, bv
end
function golubyerecurrence(f, ρ, V::OrthonormalBasis, β, orth::ModifiedGramSchmidt2)
v = V[end]
av, bv = genapply(f, v)
w = add!!(av, bv, -ρ)
w = add!!(w, V[end - 1], -β)
w, α = orthogonalize!!(w, v, ModifiedGramSchmidt())
s = α
for q in V
w, s = orthogonalize!!(w, q, ModifiedGramSchmidt())
end
α += s
β = norm(w)
return w, α, β, bv
end
function golubyerecurrence(f, ρ, V::OrthonormalBasis, β, orth::ClassicalGramSchmidtIR)
v = V[end]
av, bv = genapply(f, v)
w = add!!(av, bv, -ρ)
α = inner(v, w)
w = add!!(w, V[end - 1], -β)
w = add!!(w, v, -α)
ab2 = abs2(α) + abs2(β)
β = norm(w)
nold = sqrt(abs2(β) + ab2)
while eps(one(β)) < β < orth.η * nold
nold = β
w, s = orthogonalize!!(w, V, ClassicalGramSchmidt())
α += s[end]
β = norm(w)
end
return w, α, β, bv
end
function golubyerecurrence(f, ρ, V::OrthonormalBasis, β, orth::ModifiedGramSchmidtIR)
v = V[end]
av, bv = genapply(f, v)
w = add!!(av, bv, -ρ)
w = add!!(w, V[end - 1], -β)
w, α = orthogonalize!!(w, v, ModifiedGramSchmidt())
ab2 = abs2(α) + abs2(β)
β = norm(w)
nold = sqrt(abs2(β) + ab2)
while eps(one(β)) < β < orth.η * nold
nold = β
s = zero(α)
for q in V
w, s = orthogonalize!!(w, q, ModifiedGramSchmidt())
end
α += s
β = norm(w)
end
return w, α, β, bv
end
function buildHB!(HB, V, BV)
m = length(V)
@inbounds for j in 1:m
HB[j, j] = checkposdef(inner(V[j], BV[j]))
for i in (j + 1):m
HB[i, j] = inner(V[i], BV[j])
HB[j, i] = conj(HB[i, j])
end
end
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 5401 | function eigsolve(A, x₀, howmany::Int, which::Selector, alg::Lanczos;
alg_rrule=Arnoldi(; tol=alg.tol,
krylovdim=alg.krylovdim,
maxiter=alg.maxiter,
eager=alg.eager,
orth=alg.orth))
krylovdim = alg.krylovdim
maxiter = alg.maxiter
howmany > krylovdim &&
error("krylov dimension $(krylovdim) too small to compute $howmany eigenvalues")
## FIRST ITERATION: setting up
# Initialize Lanczos factorization
iter = LanczosIterator(A, x₀, alg.orth)
fact = initialize(iter; verbosity=alg.verbosity - 2)
numops = 1
numiter = 1
sizehint!(fact, krylovdim)
β = normres(fact)
tol::typeof(β) = alg.tol
# allocate storage
HH = fill(zero(eltype(fact)), krylovdim + 1, krylovdim)
UU = fill(zero(eltype(fact)), krylovdim, krylovdim)
converged = 0
local D, U, f
while true
β = normres(fact)
K = length(fact)
# diagonalize Krylov factorization
if β <= tol
if K < howmany
@warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested eigenvalues (i.e. `howmany == $howmany`); setting `howmany = $K`."
howmany = K
end
end
if K == krylovdim || β <= tol || (alg.eager && K >= howmany)
U = copyto!(view(UU, 1:K, 1:K), I)
f = view(HH, K + 1, 1:K)
T = rayleighquotient(fact) # symtridiagonal
# compute eigenvalues
if K == 1
D = [T[1, 1]]
f[1] = β
converged = Int(β <= tol)
else
if K < krylovdim
T = deepcopy(T)
end
D, U = tridiageigh!(T, U)
by, rev = eigsort(which)
p = sortperm(D; by=by, rev=rev)
D, U = permuteeig!(D, U, p)
mul!(f, view(U, K, :), β)
converged = 0
while converged < K && abs(f[converged + 1]) <= tol
converged += 1
end
end
if converged >= howmany
break
elseif alg.verbosity > 1
msg = "Lanczos eigsolve in iter $numiter, krylovdim = $K: "
msg *= "$converged values converged, normres = ("
msg *= @sprintf("%.2e", abs(f[1]))
for i in 2:howmany
msg *= ", "
msg *= @sprintf("%.2e", abs(f[i]))
end
msg *= ")"
@info msg
end
end
if K < krylovdim# expand Krylov factorization
fact = expand!(iter, fact; verbosity=alg.verbosity - 2)
numops += 1
else ## shrink and restart
if numiter == maxiter
break
end
# Determine how many to keep
keep = div(3 * krylovdim + 2 * converged, 5) # strictly smaller than krylovdim since converged < howmany <= krylovdim, at least equal to converged
# Restore Lanczos form in the first keep columns
H = fill!(view(HH, 1:(keep + 1), 1:keep), zero(eltype(HH)))
@inbounds for j in 1:keep
H[j, j] = D[j]
H[keep + 1, j] = f[j]
end
@inbounds for j in keep:-1:1
h, ν = householder(H, j + 1, 1:j, j)
H[j + 1, j] = ν
H[j + 1, 1:(j - 1)] .= zero(eltype(H))
lmul!(h, H)
rmul!(view(H, 1:j, :), h')
rmul!(U, h')
end
@inbounds for j in 1:keep
fact.αs[j] = H[j, j]
fact.βs[j] = H[j + 1, j]
end
# Update B by applying U using Householder reflections
B = basis(fact)
B = basistransform!(B, view(U, :, 1:keep))
r = residual(fact)
B[keep + 1] = scale!!(r, 1 / β)
# Shrink Lanczos factorization
fact = shrink!(fact, keep)
numiter += 1
end
end
if converged > howmany
howmany = converged
end
values = D[1:howmany]
# Compute eigenvectors
V = view(U, :, 1:howmany)
# Compute convergence information
vectors = let B = basis(fact)
[B * v for v in cols(V)]
end
residuals = let r = residual(fact)
[scale(r, last(v)) for v in cols(V)]
end
normresiduals = let f = f
map(i -> abs(f[i]), 1:howmany)
end
if alg.verbosity > 0
if converged < howmany
@warn """Lanczos eigsolve finished without convergence after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
else
@info """Lanczos eigsolve finished after $numiter iterations:
* $converged eigenvalues converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
end
end
return values,
vectors,
ConvergenceInfo(converged, residuals, normresiduals, numiter, numops)
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 13650 | """
svdsolve(A::AbstractMatrix, [x₀, howmany = 1, which = :LR, T = eltype(A)]; kwargs...)
svdsolve(f, m::Int, [howmany = 1, which = :LR, T = Float64]; kwargs...)
svdsolve(f, x₀, [howmany = 1, which = :LR]; kwargs...)
# expert version:
svdsolve(f, x₀, howmany, which, algorithm; alg_rrule=...)
Compute `howmany` singular values from the linear map encoded in the matrix `A` or by the
function `f`. Return singular values, left and right singular vectors and a
`ConvergenceInfo` structure.
### Arguments:
The linear map can be an `AbstractMatrix` (dense or sparse) or a general function or
callable object. Since both the action of the linear map and its adjoint are required in
order to compute singular values, `f` can either be a tuple of two callable objects (each
accepting a single argument), representing the linear map and its adjoint respectively, or,
`f` can be a single callable object that accepts two input arguments, where the second
argument is a flag of type `Val{true}` or `Val{false}` that indicates whether the adjoint or
the normal action of the linear map needs to be computed. The latter form still combines
well with the `do` block syntax of Julia, as in
```julia
vals, lvecs, rvecs, info = svdsolve(x₀, y₀, howmany, which; kwargs...) do x, flag
if flag === Val(true)
# y = compute action of adjoint map on x
else
# y = compute action of linear map on x
end
return y
end
```
For a general linear map encoded using either the tuple or the two-argument form, the best
approach is to provide a start vector `x₀` (in the codomain, i.e. column space, of the
linear map). Alternatively, one can specify the number `m` of rows of the linear map, in
which case `x₀ = rand(T, m)` is used, where the default value of `T` is `Float64`, unless
specified differently. If an `AbstractMatrix` is used, a starting vector `x₀` does not need
to be provided; it is chosen as `rand(T, size(A,1))`.
The next arguments are optional, but should typically be specified. `howmany` specifies how
many singular values and vectors should be computed; `which` specifies which singular
values should be targeted. Valid specifications of `which` are
- `LR`: largest singular values
- `SR`: smallest singular values
However, the largest singular values tend to converge more rapidly.
### Return values:
The return value is always of the form `vals, lvecs, rvecs, info = svdsolve(...)` with
- `vals`: a `Vector{<:Real}` containing the singular values, of length at least `howmany`,
but could be longer if more singular values were converged at the same cost.
- `lvecs`: a `Vector` of corresponding left singular vectors, of the same length as
`vals`.
- `rvecs`: a `Vector` of corresponding right singular vectors, of the same length as
`vals`. Note that singular vectors are not returned as a matrix, as the linear map
could act on any custom Julia type with vector like behavior, i.e. the elements of the
lists `lvecs`(`rvecs`) are objects that are typically similar to the starting guess `y₀`
(`x₀`), up to a possibly different `eltype`. When the linear map is a simple
`AbstractMatrix`, `lvecs` and `rvecs` will be `Vector{Vector{<:Number}}`.
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: indicates how many singular values and vectors were actually
converged to the specified tolerance `tol` (see below under keyword arguments)
+ `info.residual::Vector`: a list of the same length as `vals` containing the
residuals
`info.residual[i] = A * rvecs[i] - vals[i] * lvecs[i]`.
+ `info.normres::Vector{<:Real}`: list of the same length as `vals` containing the
norm of the residual `info.normres[i] = norm(info.residual[i])`
+ `info.numops::Int`: number of times the linear map was applied, i.e. number of times
`f` was called, or a vector was multiplied with `A` or `A'`.
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
!!! warning "Check for convergence"
No warning is printed if not all requested singular values were converged, so always
check if `info.converged >= howmany`.
### Keyword arguments:
Keyword arguments and their default values are given by:
- `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message
at the end), 2 (information after every iteration), 3 (information per Krylov step)
- `krylovdim`: the maximum dimension of the Krylov subspace that will be constructed.
Note that the dimension of the vector space is not known or checked, e.g. `x₀` should
not necessarily support the `Base.length` function. If you know the actual problem
dimension is smaller than the default value, it is useful to reduce the value of
`krylovdim`, though in principle this should be detected.
- `tol`: the requested accuracy according to `normres` as defined above. If you work in
e.g. single precision (`Float32`), you should definitely change the default value.
- `maxiter`: the number of times the Krylov subspace can be rebuilt; see below for further
details on the algorithms.
- `orth`: the orthogonalization method to be used, see [`Orthogonalizer`](@ref)
- `eager::Bool = false`: if true, eagerly compute the SVD after every expansion of the
Krylov subspace to test for convergence, otherwise wait until the Krylov subspace has
dimension `krylovdim`
The final keyword argument `alg_rrule` is relevant only when `svdsolve` is used in a setting
where reverse-mode automatic differentation will be used. A custom `ChainRulesCore.rrule` is
defined for `svdsolve`, which can be evaluated using different algorithms that can be specified
via `alg_rrule`. A suitable default is chosen, so this keyword argument should only be used
when this default choice is failing or not performing efficiently. Check the documentation for
more information on the possible values for `alg_rrule` and their implications on the algorithm
being used.
### Algorithm
The last method, without default values and keyword arguments, is the one that is finally
called, and can also be used directly. Here the algorithm is specified, though currently
only [`GKL`](@ref) is available. `GKL` refers to the the partial Golub-Kahan-Lanczos
bidiagonalization which forms the basis for computing the approximation to the singular
values. This factorization is dynamically shrunk and expanded (i.e. thick restart) similar
to the Krylov-Schur factorization for eigenvalues.
"""
function svdsolve end
function svdsolve(A::AbstractMatrix,
howmany::Int=1,
which::Selector=:LR,
T::Type=eltype(A);
kwargs...)
return svdsolve(A, rand(T, size(A, 1)), howmany, which; kwargs...)
end
function svdsolve(f, n::Int, howmany::Int=1, which::Selector=:LR, T::Type=Float64;
kwargs...)
return svdsolve(f, rand(T, n), howmany, which; kwargs...)
end
function svdsolve(f, x₀, howmany::Int=1, which::Selector=:LR; kwargs...)
which == :LR ||
which == :SR ||
error("invalid specification of which singular values to target: which = $which")
alg = GKL(; kwargs...)
return svdsolve(f, x₀, howmany, which, alg)
end
function svdsolve(A, x₀, howmany::Int, which::Symbol, alg::GKL;
alg_rrule=Arnoldi(; tol=alg.tol,
krylovdim=alg.krylovdim,
maxiter=alg.maxiter,
eager=alg.eager,
orth=alg.orth,
verbosity=alg.verbosity))
krylovdim = alg.krylovdim
maxiter = alg.maxiter
howmany > krylovdim &&
error("krylov dimension $(krylovdim) too small to compute $howmany singular values")
## FIRST ITERATION: setting up
numiter = 1
# initialize GKL factorization
iter = GKLIterator(A, x₀, alg.orth)
fact = initialize(iter; verbosity=alg.verbosity - 2)
numops = 2
sizehint!(fact, krylovdim)
β = normres(fact)
tol::typeof(β) = alg.tol
# allocate storage
HH = fill(zero(eltype(fact)), krylovdim + 1, krylovdim)
PP = fill(zero(eltype(fact)), krylovdim, krylovdim)
QQ = fill(zero(eltype(fact)), krylovdim, krylovdim)
# initialize storage
local P, Q, f, S
converged = 0
while true
β = normres(fact)
K = length(fact)
if β < tol
if K < howmany
@warn "Invariant subspace of dimension $K (up to requested tolerance `tol = $tol`), which is smaller than the number of requested singular values (i.e. `howmany == $howmany`); setting `howmany = $K`."
howmany = K
end
end
if K == krylovdim || β <= tol || (alg.eager && K >= howmany)
P = copyto!(view(PP, 1:K, 1:K), I)
Q = copyto!(view(QQ, 1:K, 1:K), I)
f = view(HH, K + 1, 1:K)
B = rayleighquotient(fact) # Bidiagional (lower)
if K < krylovdim
B = deepcopy(B)
end
P, S, Q = bidiagsvd!(B, P, Q)
if which == :SR
reversecols!(P)
reverserows!(S)
reverserows!(Q)
elseif which != :LR
error("invalid specification of which singular values to target: which = $which")
end
mul!(f, view(Q', K, :), β)
converged = 0
while converged < K && abs(f[converged + 1]) < tol
converged += 1
end
if converged >= howmany
break
elseif alg.verbosity > 1
msg = "GKL svdsolve in iter $numiter, krylovdim $krylovdim: "
msg *= "$converged values converged, normres = ("
msg *= @sprintf("%.2e", abs(f[1]))
for i in 2:howmany
msg *= ", "
msg *= @sprintf("%.2e", abs(f[i]))
end
msg *= ")"
@info msg
end
end
if K < krylovdim # expand
fact = expand!(iter, fact; verbosity=alg.verbosity - 2)
numops += 2
else ## shrink and restart
if numiter == maxiter
break
end
# Determine how many to keep
keep = div(3 * krylovdim + 2 * converged, 5) # strictly smaller than krylovdim since converged < howmany <= krylovdim, at least equal to converged
# Update basis by applying P and Q using Householder reflections
U = basis(fact, :U)
basistransform!(U, view(P, :, 1:keep))
# for j = 1:m
# h, ν = householder(P, j:m, j)
# lmul!(h, view(P, :, j+1:krylovdim))
# rmul!(U, h')
# end
V = basis(fact, :V)
basistransform!(V, view(Q', :, 1:keep))
# for j = 1:m
# h, ν = householder(Q, j, j:m)
# rmul!(view(Q, j+1:krylovdim, :), h)
# rmul!(V, h)
# end
# Shrink GKL factorization (no longer strictly GKL)
r = residual(fact)
U[keep + 1] = scale!!(r, 1 / normres(fact))
H = fill!(view(HH, 1:(keep + 1), 1:keep), zero(eltype(HH)))
@inbounds for j in 1:keep
H[j, j] = S[j]
H[keep + 1, j] = f[j]
end
# Restore bidiagonal form in the first keep columns
@inbounds for j in keep:-1:1
h, ν = householder(H, j + 1, 1:j, j)
H[j + 1, j] = ν
H[j + 1, 1:(j - 1)] .= zero(eltype(H))
rmul!(view(H, 1:j, :), h')
rmul!(V, h')
h, ν = householder(H, 1:j, j, j)
H[j, j] = ν
@inbounds H[1:(j - 1), j] .= zero(eltype(H))
lmul!(h, view(H, :, 1:(j - 1)))
rmul!(U, h')
end
@inbounds for j in 1:keep
fact.αs[j] = H[j, j]
fact.βs[j] = H[j + 1, j]
end
# Shrink GKL factorization
fact = shrink!(fact, keep)
numiter += 1
end
end
if converged > howmany
howmany = converged
end
values = S[1:howmany]
# Compute schur vectors
Pv = view(P, :, 1:howmany)
Qv = view(Q, 1:howmany, :)
# Compute convergence information
leftvectors = let U = basis(fact, :U)
[U * v for v in cols(Pv)]
end
rightvectors = let V = basis(fact, :V)
[V * v for v in cols(Qv')]
end
residuals = let r = residual(fact)
[scale(r, last(v)) for v in cols(Qv')]
end
normresiduals = let f = f
map(i -> abs(f[i]), 1:howmany)
end
if alg.verbosity > 0
if converged < howmany
@warn """GKL svdsolve finished without convergence after $numiter iterations:
* $converged singular values converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
else
@info """GKL svdsolve finished after $numiter iterations:
* $converged singular values converged
* norm of residuals = $((normresiduals...,))
* number of operations = $numops"""
end
end
return values,
leftvectors,
rightvectors,
ConvergenceInfo(converged, residuals, normresiduals, numiter, numops)
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 8660 | # arnoldi.jl
"""
mutable struct ArnoldiFactorization{T,S} <: KrylovFactorization{T,S}
Structure to store an Arnoldi factorization of a linear map `A` of the form
```julia
A * V = V * B + r * b'
```
For a given Arnoldi factorization `fact` of length `k = length(fact)`, the basis `V` is
obtained via [`basis(fact)`](@ref basis) and is an instance of [`OrthonormalBasis{T}`](@ref
Basis), with also `length(V) == k` and where `T` denotes the type of vector like objects
used in the problem. The Rayleigh quotient `B` is obtained as
[`rayleighquotient(fact)`](@ref) and is of type [`B::PackedHessenberg{S<:Number}`](@ref
PackedHessenberg) with `size(B) == (k,k)`. The residual `r` is obtained as
[`residual(fact)`](@ref) and is of type `T`. One can also query [`normres(fact)`](@ref) to
obtain `norm(r)`, the norm of the residual. The vector `b` has no dedicated name but can be
obtained via [`rayleighextension(fact)`](@ref). It takes the default value ``e_k``, i.e. the
unit vector of all zeros and a one in the last entry, which is represented using
[`SimpleBasisVector`](@ref).
An Arnoldi factorization `fact` can be destructured as `V, B, r, nr, b = fact` with
`nr = norm(r)`.
`ArnoldiFactorization` is mutable because it can [`expand!`](@ref) or [`shrink!`](@ref).
See also [`ArnoldiIterator`](@ref) for an iterator that constructs a progressively expanding
Arnoldi factorizations of a given linear map and a starting vector. See
[`LanczosFactorization`](@ref) and [`LanczosIterator`](@ref) for a Krylov factorization that
is optimized for real symmetric or complex hermitian linear maps.
"""
mutable struct ArnoldiFactorization{T,S} <: KrylovFactorization{T,S}
k::Int # current Krylov dimension
V::OrthonormalBasis{T} # basis of length k
H::Vector{S} # stores the Hessenberg matrix in packed form
r::T # residual
end
Base.length(F::ArnoldiFactorization) = F.k
Base.sizehint!(F::ArnoldiFactorization, n) = begin
sizehint!(F.V, n)
sizehint!(F.H, (n * n + 3 * n) >> 1)
return F
end
Base.eltype(F::ArnoldiFactorization) = eltype(typeof(F))
Base.eltype(::Type{<:ArnoldiFactorization{<:Any,S}}) where {S} = S
basis(F::ArnoldiFactorization) = F.V
rayleighquotient(F::ArnoldiFactorization) = PackedHessenberg(F.H, F.k)
residual(F::ArnoldiFactorization) = F.r
@inbounds normres(F::ArnoldiFactorization) = abs(F.H[end])
rayleighextension(F::ArnoldiFactorization) = SimpleBasisVector(F.k, F.k)
# Arnoldi iteration for constructing the orthonormal basis of a Krylov subspace.
"""
struct ArnoldiIterator{F,T,O<:Orthogonalizer} <: KrylovIterator{F,T}
ArnoldiIterator(f, v₀, [orth::Orthogonalizer = KrylovDefaults.orth])
Iterator that takes a general linear map `f::F` and an initial vector `v₀::T` and generates
an expanding `ArnoldiFactorization` thereof. In particular, `ArnoldiIterator` iterates over
progressively expanding Arnoldi factorizations using the
[Arnoldi iteration](https://en.wikipedia.org/wiki/Arnoldi_iteration).
The argument `f` can be a matrix, or a function accepting a single argument `v`, so that
`f(v)` implements the action of the linear map on the vector `v`.
The optional argument `orth` specifies which [`Orthogonalizer`](@ref) to be used. The
default value in [`KrylovDefaults`](@ref) is to use [`ModifiedGramSchmidtIR`](@ref), which
possibly uses reorthogonalization steps.
When iterating over an instance of `ArnoldiIterator`, the values being generated are
instances of [`ArnoldiFactorization`](@ref), which can be immediately destructured into a
[`basis`](@ref), [`rayleighquotient`](@ref), [`residual`](@ref), [`normres`](@ref) and
[`rayleighextension`](@ref), for example as
```julia
for (V, B, r, nr, b) in ArnoldiIterator(f, v₀)
# do something
nr < tol && break # a typical stopping criterion
end
```
Since the iterator does not know the dimension of the underlying vector space of
objects of type `T`, it keeps expanding the Krylov subspace until the residual norm `nr`
falls below machine precision `eps(typeof(nr))`.
The internal state of `ArnoldiIterator` is the same as the return value, i.e. the
corresponding `ArnoldiFactorization`. However, as Julia's Base iteration interface (using
`Base.iterate`) requires that the state is not mutated, a `deepcopy` is produced upon every
next iteration step.
Instead, you can also mutate the `ArnoldiFactorization` in place, using the following
interface, e.g. for the same example above
```julia
iterator = ArnoldiIterator(f, v₀)
factorization = initialize(iterator)
while normres(factorization) > tol
expand!(iterator, factorization)
V, B, r, nr, b = factorization
# do something
end
```
Here, [`initialize(::KrylovIterator)`](@ref) produces the first Krylov factorization of
length 1, and `expand!(::KrylovIterator, ::KrylovFactorization)`(@ref) expands the
factorization in place. See also [`initialize!(::KrylovIterator,
::KrylovFactorization)`](@ref) to initialize in an already existing factorization (most
information will be discarded) and [`shrink!(::KrylovFactorization, k)`](@ref) to shrink an
existing factorization down to length `k`.
"""
struct ArnoldiIterator{F,T,O<:Orthogonalizer} <: KrylovIterator{F,T}
operator::F
x₀::T
orth::O
end
ArnoldiIterator(A, x₀) = ArnoldiIterator(A, x₀, KrylovDefaults.orth)
Base.IteratorSize(::Type{<:ArnoldiIterator}) = Base.SizeUnknown()
Base.IteratorEltype(::Type{<:ArnoldiIterator}) = Base.EltypeUnknown()
function Base.iterate(iter::ArnoldiIterator)
state = initialize(iter)
return state, state
end
function Base.iterate(iter::ArnoldiIterator, state)
nr = normres(state)
if nr < eps(typeof(nr))
return nothing
else
state = expand!(iter, deepcopy(state))
return state, state
end
end
function initialize(iter::ArnoldiIterator; verbosity::Int=0)
# initialize without using eltype
x₀ = iter.x₀
β₀ = norm(x₀)
iszero(β₀) && throw(ArgumentError("initial vector should not have norm zero"))
Ax₀ = apply(iter.operator, x₀)
α = inner(x₀, Ax₀) / (β₀ * β₀)
T = typeof(α)
# this line determines the vector type that we will henceforth use
v = add!!(zerovector(Ax₀, T), x₀, 1 / β₀)
if typeof(Ax₀) != typeof(v)
r = add!!(zerovector(v), Ax₀, 1 / β₀)
else
r = scale!!(Ax₀, 1 / β₀)
end
βold = norm(r)
r = add!!(r, v, -α)
β = norm(r)
# possibly reorthogonalize
if iter.orth isa Union{ClassicalGramSchmidt2,ModifiedGramSchmidt2}
dα = inner(v, r)
α += dα
r = add!!(r, v, -dα)
β = norm(r)
elseif iter.orth isa Union{ClassicalGramSchmidtIR,ModifiedGramSchmidtIR}
while eps(one(β)) < β < iter.orth.η * βold
βold = β
dα = inner(v, r)
α += dα
r = add!!(r, v, -dα)
β = norm(r)
end
end
V = OrthonormalBasis([v])
H = T[α, β]
if verbosity > 0
@info "Arnoldi iteration step 1: normres = $β"
end
return state = ArnoldiFactorization(1, V, H, r)
end
function initialize!(iter::ArnoldiIterator, state::ArnoldiFactorization; verbosity::Int=0)
x₀ = iter.x₀
V = state.V
while length(V) > 1
pop!(V)
end
H = empty!(state.H)
V[1] = scale!!(V[1], x₀, 1 / norm(x₀))
w = apply(iter.operator, V[1])
r, α = orthogonalize!!(w, V[1], iter.orth)
β = norm(r)
state.k = 1
push!(H, α, β)
state.r = r
if verbosity > 0
@info "Arnoldi iteration step 1: normres = $β"
end
return state
end
function expand!(iter::ArnoldiIterator, state::ArnoldiFactorization; verbosity::Int=0)
state.k += 1
k = state.k
V = state.V
H = state.H
r = state.r
β = normres(state)
push!(V, scale(r, 1 / β))
m = length(H)
resize!(H, m + k + 1)
r, β = arnoldirecurrence!!(iter.operator, V, view(H, (m + 1):(m + k)), iter.orth)
H[m + k + 1] = β
state.r = r
if verbosity > 0
@info "Arnoldi iteration step $k: normres = $β"
end
return state
end
function shrink!(state::ArnoldiFactorization, k)
length(state) <= k && return state
V = state.V
H = state.H
while length(V) > k + 1
pop!(V)
end
r = pop!(V)
resize!(H, (k * k + 3 * k) >> 1)
state.k = k
state.r = scale!!(r, normres(state))
return state
end
# Arnoldi recurrence: simply use provided orthonormalization routines
function arnoldirecurrence!!(operator,
V::OrthonormalBasis,
h::AbstractVector,
orth::Orthogonalizer)
w = apply(operator, last(V))
r, h = orthogonalize!!(w, V, h, orth)
return r, norm(r)
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 14171 | # gkl.jl
"""
mutable struct GKLFactorization{TU,TV,S<:Real}
Structure to store a Golub-Kahan-Lanczos (GKL) bidiagonal factorization of a linear map `A`
of the form
```julia
A * V = U * B + r * b'
A' * U = V * B'
```
For a given GKL factorization `fact` of length `k = length(fact)`, the two bases `U` and `V`
are obtained via [`basis(fact, :U)`](@ref basis) and `basis(fact, :V)`. Here, `U` and `V`
are instances of [`OrthonormalBasis{T}`](@ref Basis), with also
`length(U) == length(V) == k` and where `T` denotes the type of vector like objects used in
the problem. The Rayleigh quotient `B` is obtained as [`rayleighquotient(fact)`](@ref) and
is of type `Bidiagonal{S<:Number}` with `size(B) == (k,k)`. The residual `r` is
obtained as [`residual(fact)`](@ref) and is of type `T`. One can also query
[`normres(fact)`](@ref) to obtain `norm(r)`, the norm of the residual. The vector `b` has no
dedicated name but can be obtained via [`rayleighextension(fact)`](@ref). It takes the
default value ``e_k``, i.e. the unit vector of all zeros and a one in the last entry, which
is represented using [`SimpleBasisVector`](@ref).
A GKL factorization `fact` can be destructured as `U, V, B, r, nr, b = fact` with
`nr = norm(r)`.
`GKLFactorization` is mutable because it can [`expand!`](@ref) or [`shrink!`](@ref).
See also [`GKLIterator`](@ref) for an iterator that constructs a progressively expanding
GKL factorizations of a given linear map and a starting vector `u₀`.
"""
mutable struct GKLFactorization{TU,TV,S<:Real}
k::Int # current Krylov dimension
U::OrthonormalBasis{TU} # basis of length k
V::OrthonormalBasis{TV} # basis of length k
αs::Vector{S}
βs::Vector{S}
r::TU
end
Base.length(F::GKLFactorization) = F.k
Base.sizehint!(F::GKLFactorization, n) = begin
sizehint!(F.U, n)
sizehint!(F.V, n)
sizehint!(F.αs, n)
sizehint!(F.βs, n)
return F
end
Base.eltype(F::GKLFactorization) = eltype(typeof(F))
Base.eltype(::Type{<:GKLFactorization{<:Any,<:Any,S}}) where {S} = S
# iteration for destructuring into components
Base.iterate(F::GKLFactorization) = (basis(F, :U), Val(:V))
Base.iterate(F::GKLFactorization, ::Val{:V}) = (basis(F, :V), Val(:rayleighquotient))
function Base.iterate(F::GKLFactorization, ::Val{:rayleighquotient})
return (rayleighquotient(F), Val(:residual))
end
Base.iterate(F::GKLFactorization, ::Val{:residual}) = (residual(F), Val(:normres))
Base.iterate(F::GKLFactorization, ::Val{:normres}) = (normres(F), Val(:rayleighextension))
function Base.iterate(F::GKLFactorization, ::Val{:rayleighextension})
return (rayleighextension(F), Val(:done))
end
Base.iterate(F::GKLFactorization, ::Val{:done}) = nothing
"""
basis(fact::GKLFactorization, which::Symbol)
Return the list of basis vectors of a [`GKLFactorization`](@ref), where `which` should take
the value `:U` or `:V` and indicates which set of basis vectors (in the domain or in the
codomain of the corresponding linear map) should be returned. The return type is an
`OrthonormalBasis{T}`, where `T` represents the type of the vectors used by the problem.
"""
function basis(F::GKLFactorization, which::Symbol)
length(F.U) == F.k || error("Not keeping vectors during GKL bidiagonalization")
which == :U || which == :V || error("invalid flag for specifying basis")
return which == :U ? F.U : F.V
end
function rayleighquotient(F::GKLFactorization)
return Bidiagonal(view(F.αs, 1:(F.k)), view(F.βs, 1:(F.k - 1)), :L)
end
residual(F::GKLFactorization) = F.r
@inbounds normres(F::GKLFactorization) = F.βs[F.k]
rayleighextension(F::GKLFactorization) = SimpleBasisVector(F.k, F.k)
# GKL iteration for constructing the orthonormal basis of a Krylov subspace.
"""
struct GKLIterator{F,TU,O<:Orthogonalizer}
GKLIterator(f, u₀, [orth::Orthogonalizer = KrylovDefaults.orth, keepvecs::Bool = true])
Iterator that takes a general linear map `f::F` and an initial vector `u₀::TU` and generates
an expanding `GKLFactorization` thereof. In particular, `GKLIterator` implements the
[Golub-Kahan-Lanczos bidiagonalization procedure](http://www.netlib.org/utk/people/JackDongarra/etemplates/node198.html).
Note, however, that this implementation starts from a vector `u₀` in the codomain of the
linear map `f`, which will end up (after normalisation) as the first column of `U`.
The argument `f` can be a matrix, a tuple of two functions where the first represents the
normal action and the second the adjoint action, or a function accepting two arguments,
where the first argument is the vector to which the linear map needs to be applied, and the
second argument is either `Val(false)` for the normal action and `Val(true)` for the adjoint
action. Note that the flag is thus a `Val` type to allow for type stability in cases where
the vectors in the domain and the codomain of the linear map have a different type.
The optional argument `orth` specifies which [`Orthogonalizer`](@ref) to be used. The
default value in [`KrylovDefaults`](@ref) is to use [`ModifiedGramSchmidtIR`](@ref), which
possibly uses reorthogonalization steps.
When iterating over an instance of `GKLIterator`, the values being generated are
instances `fact` of [`GKLFactorization`](@ref), which can be immediately destructured into a
[`basis(fact, :U)`](@ref), [`basis(fact, :V)`](@ref), [`rayleighquotient`](@ref),
[`residual`](@ref), [`normres`](@ref) and [`rayleighextension`](@ref), for example as
```julia
for (U, V, B, r, nr, b) in GKLIterator(f, u₀)
# do something
nr < tol && break # a typical stopping criterion
end
```
Since the iterator does not know the dimension of the underlying vector space of
objects of type `T`, it keeps expanding the Krylov subspace until the residual norm `nr`
falls below machine precision `eps(typeof(nr))`.
The internal state of `GKLIterator` is the same as the return value, i.e. the corresponding
`GKLFactorization`. However, as Julia's Base iteration interface (using `Base.iterate`)
requires that the state is not mutated, a `deepcopy` is produced upon every next iteration
step.
Instead, you can also mutate the `GKLFactorization` in place, using the following
interface, e.g. for the same example above
```julia
iterator = GKLIterator(f, u₀)
factorization = initialize(iterator)
while normres(factorization) > tol
expand!(iterator, factorization)
U, V, B, r, nr, b = factorization
# do something
end
```
Here, [`initialize(::GKLIterator)`](@ref) produces the first GKL factorization of length 1,
and `expand!(::GKLIterator, ::GKLFactorization)`(@ref) expands the factorization in place.
See also [`initialize!(::GKLIterator, ::GKLFactorization)`](@ref) to initialize in an
already existing factorization (most information will be discarded) and
[`shrink!(::GKLIterator, k)`](@ref) to shrink an existing factorization down to length `k`.
"""
struct GKLIterator{F,TU,O<:Orthogonalizer}
operator::F
u₀::TU
orth::O
keepvecs::Bool
function GKLIterator{F,TU,O}(operator::F,
u₀::TU,
orth::O,
keepvecs::Bool) where {F,TU,O<:Orthogonalizer}
if !keepvecs && isa(orth, Reorthogonalizer)
error("Cannot use reorthogonalization without keeping all Krylov vectors")
end
return new{F,TU,O}(operator, u₀, orth, keepvecs)
end
end
function GKLIterator(operator::F,
u₀::TU,
orth::O=KrylovDefaults.orth,
keepvecs::Bool=true) where {F,TU,O<:Orthogonalizer}
return GKLIterator{F,TU,O}(operator, u₀, orth, keepvecs)
end
Base.IteratorSize(::Type{<:GKLIterator}) = Base.SizeUnknown()
Base.IteratorEltype(::Type{<:GKLIterator}) = Base.EltypeUnknown()
function Base.iterate(iter::GKLIterator)
state = initialize(iter)
return state, state
end
function Base.iterate(iter::GKLIterator, state::GKLFactorization)
nr = normres(state)
if nr < eps(typeof(nr))
return nothing
else
state = expand!(iter, deepcopy(state))
return state, state
end
end
function initialize(iter::GKLIterator; verbosity::Int=0)
# initialize without using eltype
u₀ = iter.u₀
β₀ = norm(u₀)
iszero(β₀) && throw(ArgumentError("initial vector should not have norm zero"))
v₀ = apply_adjoint(iter.operator, u₀)
α = norm(v₀) / β₀
Av₀ = apply_normal(iter.operator, v₀) # apply operator
α² = inner(u₀, Av₀) / β₀^2
α² ≈ α * α || throw(ArgumentError("operator and its adjoint are not compatible"))
T = typeof(α²)
# these lines determines the type that we will henceforth use
u = scale!!(zerovector(u₀, T), u₀, 1 / β₀) # (one(T) / β₀) * u₀
v = scale(v₀, one(T) / (α * β₀))
if typeof(Av₀) == typeof(u)
r = scale!!(Av₀, 1 / (α * β₀))
else
r = scale!!(zerovector(u), Av₀, 1 / (α * β₀))
end
r = add!!(r, u, -α)
β = norm(r)
U = OrthonormalBasis([u])
V = OrthonormalBasis([v])
S = real(T)
αs = S[α]
βs = S[β]
if verbosity > 0
@info "GKL iteration step 1: normres = $β"
end
return GKLFactorization(1, U, V, αs, βs, r)
end
function initialize!(iter::GKLIterator, state::GKLFactorization; verbosity::Int=0)
U = state.U
while length(U) > 1
pop!(U)
end
V = empty!(state.V)
αs = empty!(state.αs)
βs = empty!(state.βs)
u = scale!!(U[1], iter.u₀, 1 / norm(iter.u₀))
v = apply_adjoint(iter.operator, u)
α = norm(v)
v = scale!!(v, inv(α))
r = apply_normal(iter.operator, v)
r = add!!(r, u, -α)
β = norm(r)
state.k = 1
push!(V, v)
push!(αs, α)
push!(βs, β)
state.r = r
if verbosity > 0
@info "GKL iteration step 1: normres = $β"
end
return state
end
function expand!(iter::GKLIterator, state::GKLFactorization; verbosity::Int=0)
βold = normres(state)
U = state.U
V = state.V
r = state.r
U = push!(U, scale!!(r, 1 / βold))
v, r, α, β = gklrecurrence(iter.operator, U, V, βold, iter.orth)
push!(V, v)
push!(state.αs, α)
push!(state.βs, β)
#!iter.keepvecs && popfirst!(state.V) # remove oldest V if not keepvecs
state.k += 1
state.r = r
if verbosity > 0
@info "GKL iteration step $(state.k): normres = $β"
end
return state
end
function shrink!(state::GKLFactorization, k)
length(state) == length(state.V) ||
error("we cannot shrink GKLFactorization without keeping vectors")
length(state) <= k && return state
U = state.U
V = state.V
while length(V) > k + 1
pop!(U)
pop!(V)
end
pop!(V)
r = pop!(U)
resize!(state.αs, k)
resize!(state.βs, k)
state.k = k
state.r = scale!!(r, normres(state))
return state
end
# Golub-Kahan-Lanczos recurrence relation
function gklrecurrence(operator,
U::OrthonormalBasis,
V::OrthonormalBasis,
β,
orth::Union{ClassicalGramSchmidt,ModifiedGramSchmidt})
u = U[end]
v = apply_adjoint(operator, u)
v = add!!(v, V[end], -β)
α = norm(v)
v = scale!!(v, inv(α))
r = apply_normal(operator, v)
r = add!!(r, u, -α)
β = norm(r)
return v, r, α, β
end
function gklrecurrence(operator,
U::OrthonormalBasis,
V::OrthonormalBasis,
β,
orth::ClassicalGramSchmidt2)
u = U[end]
v = apply_adjoint(operator, u)
v = add!!(v, V[end], -β) # not necessary if we definitely reorthogonalize next step and previous step
# v, = orthogonalize!(v, V, ClassicalGramSchmidt())
α = norm(v)
v = scale!!(v, inv(α))
r = apply_normal(operator, v)
r = add!!(r, u, -α)
r, = orthogonalize!!(r, U, ClassicalGramSchmidt())
β = norm(r)
return v, r, α, β
end
function gklrecurrence(operator,
U::OrthonormalBasis,
V::OrthonormalBasis,
β,
orth::ModifiedGramSchmidt2)
u = U[end]
v = apply_adjoint(operator, u)
v = add!!(v, V[end], -β)
# for q in V # not necessary if we definitely reorthogonalize next step and previous step
# v, = orthogonalize!(v, q, ModifiedGramSchmidt())
# end
α = norm(v)
v = scale!!(v, inv(α))
r = apply_normal(operator, v)
r = add!!(r, u, -α)
for q in U
r, = orthogonalize!!(r, q, ModifiedGramSchmidt())
end
β = norm(r)
return v, r, α, β
end
function gklrecurrence(operator,
U::OrthonormalBasis,
V::OrthonormalBasis,
β,
orth::ClassicalGramSchmidtIR)
u = U[end]
v = apply_adjoint(operator, u)
v = add!!(v, V[end], -β)
α = norm(v)
nold = sqrt(abs2(α) + abs2(β))
while α < orth.η * nold
nold = α
v, = orthogonalize!!(v, V, ClassicalGramSchmidt())
α = norm(v)
end
v = scale!!(v, inv(α))
r = apply_normal(operator, v)
r = add!!(r, u, -α)
β = norm(r)
nold = sqrt(abs2(α) + abs2(β))
while eps(one(β)) < β < orth.η * nold
nold = β
r, = orthogonalize!!(r, U, ClassicalGramSchmidt())
β = norm(r)
end
return v, r, α, β
end
function gklrecurrence(operator,
U::OrthonormalBasis,
V::OrthonormalBasis,
β,
orth::ModifiedGramSchmidtIR)
u = U[end]
v = apply_adjoint(operator, u)
v = add!!(v, V[end], -β)
α = norm(v)
nold = sqrt(abs2(α) + abs2(β))
while eps(one(α)) < α < orth.η * nold
nold = α
for q in V
v, = orthogonalize!!(v, q, ModifiedGramSchmidt())
end
α = norm(v)
end
v = scale!!(v, inv(α))
r = apply_normal(operator, v)
r = add!!(r, u, -α)
β = norm(r)
nold = sqrt(abs2(α) + abs2(β))
while eps(one(β)) < β < orth.η * nold
nold = β
for q in U
r, = orthogonalize!!(r, q, ModifiedGramSchmidt())
end
β = norm(r)
end
return v, r, α, β
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 5004 | """
abstract type KrylovFactorization{T,S<:Number}
Abstract type to store a Krylov factorization of a linear map `A` of the form
```julia
A * V = V * B + r * b'
```
For a given Krylov factorization `fact` of length `k = length(fact)`, the basis `V` is
obtained via [`basis(fact)`](@ref basis) and is an instance of some subtype of
[`Basis{T}`](@ref Basis), with also `length(V) == k` and where `T` denotes the type of
vector like objects used in the problem. The Rayleigh quotient `B` is obtained as
[`rayleighquotient(fact)`](@ref) and `typeof(B)` is some subtype of `AbstractMatrix{S}` with
`size(B) == (k,k)`, typically a structured matrix. The residual `r` is obtained as
[`residual(fact)`](@ref) and is of type `T`. One can also query [`normres(fact)`](@ref) to
obtain `norm(r)`, the norm of the residual. The vector `b` has no dedicated name and often
takes a default form (see below). It should be a subtype of `AbstractVector` of length `k`
and can be obtained as [`rayleighextension(fact)`](@ref) (by lack of a better dedicated
name).
A Krylov factorization `fact` can be destructured as `V, B, r, nr, b = fact` with
`nr = norm(r)`.
See also [`LanczosFactorization`](@ref) and [`ArnoldiFactorization`](@ref) for concrete
implementations, and [`KrylovIterator`](@ref) (with in particular [`LanczosIterator`](@ref)
and [`ArnoldiIterator`](@ref)) for iterators that construct progressively expanding Krylov
factorizations of a given linear map and a starting vector.
"""
abstract type KrylovFactorization{T,S} end
"""
abstract type KrylovIterator{F,T}
Abstract type for iterators that take a linear map of type `F` and an initial vector of type
`T` and generate an expanding `KrylovFactorization` thereof.
When iterating over an instance of `KrylovIterator`, the values being generated are subtypes
of [`KrylovFactorization`](@ref), which can be immediately destructured into a
[`basis`](@ref), [`rayleighquotient`](@ref), [`residual`](@ref), [`normres`](@ref) and
[`rayleighextension`](@ref).
See [`LanczosIterator`](@ref) and [`ArnoldiIterator`](@ref) for concrete implementations and
more information.
"""
abstract type KrylovIterator{F,T} end
"""
basis(fact::KrylovFactorization)
Return the list of basis vectors of a [`KrylovFactorization`](@ref), which span the Krylov
subspace. The return type is a subtype of `Basis{T}`, where `T` represents the type of the
vectors used by the problem.
"""
function basis end
"""
rayleighquotient(fact::KrylovFactorization)
Return the Rayleigh quotient of a [`KrylovFactorization`](@ref), i.e. the reduced matrix
within the basis of the Krylov subspace. The return type is a subtype of
`AbstractMatrix{<:Number}`, typically some structured matrix type.
"""
function rayleighquotient end
"""
residual(fact::KrylovFactorization)
Return the residual of a [`KrylovFactorization`](@ref). The return type is some vector of
the same type as used in the problem. See also [`normres(F)`](@ref) for its norm, which
typically has been computed already.
"""
function residual end
"""
normres(fact::KrylovFactorization)
Return the norm of the residual of a [`KrylovFactorization`](@ref). As this has typically
already been computed, it is cheaper than (but otherwise equivalent to) `norm(residual(F))`.
"""
function normres end
"""
rayleighextension(fact::KrylovFactorization)
Return the vector `b` appearing in the definition of a [`KrylovFactorization`](@ref); often
it is simply the last coordinate unit vector, which can be represented using
[`SimpleBasisVector`](@ref).
"""
function rayleighextension end
"""
shrink!(fact::KrylovFactorization, k)
Shrink an existing Krylov factorization `fact` down to have length `k`. Does nothing if
`length(fact)<=k`.
"""
function shrink! end
"""
expand!(iter::KrylovIterator, fact::KrylovFactorization)
Expand the Krylov factorization `fact` by one using the linear map and parameters in `iter`.
"""
function expand! end
"""
initialize!(iter::KrylovIterator, fact::KrylovFactorization)
Initialize a length 1 Krylov factorization corresponding to `iter` in the already existing
factorization `fact`, thereby destroying all the information it currently holds.
"""
function initialize! end
"""
initialize(iter::KrylovIterator)
Initialize a length 1 Krylov factorization corresponding to `iter`.
"""
function initialize end
# iteration for destructuring into components
Base.iterate(F::KrylovFactorization) = (basis(F), Val(:rayleighquotient))
function Base.iterate(F::KrylovFactorization, ::Val{:rayleighquotient})
return (rayleighquotient(F), Val(:residual))
end
Base.iterate(F::KrylovFactorization, ::Val{:residual}) = (residual(F), Val(:normres))
function Base.iterate(F::KrylovFactorization, ::Val{:normres})
return (normres(F), Val(:rayleighextension))
end
function Base.iterate(F::KrylovFactorization, ::Val{:rayleighextension})
return (rayleighextension(F), Val(:done))
end
Base.iterate(F::KrylovFactorization, ::Val{:done}) = nothing
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 12899 | # lanczos.jl
"""
mutable struct LanczosFactorization{T,S<:Real} <: KrylovFactorization{T,S}
Structure to store a Lanczos factorization of a real symmetric or complex hermitian linear
map `A` of the form
```julia
A * V = V * B + r * b'
```
For a given Lanczos factorization `fact` of length `k = length(fact)`, the basis `V` is
obtained via [`basis(fact)`](@ref basis) and is an instance of [`OrthonormalBasis{T}`](@ref
Basis), with also `length(V) == k` and where `T` denotes the type of vector like objects
used in the problem. The Rayleigh quotient `B` is obtained as
[`rayleighquotient(fact)`](@ref) and is of type `SymTridiagonal{S<:Real}` with `size(B) ==
(k,k)`. The residual `r` is obtained as [`residual(fact)`](@ref) and is of type `T`. One can
also query [`normres(fact)`](@ref) to obtain `norm(r)`, the norm of the residual. The vector
`b` has no dedicated name but can be obtained via [`rayleighextension(fact)`](@ref). It
takes the default value ``e_k``, i.e. the unit vector of all zeros and a one in the last
entry, which is represented using [`SimpleBasisVector`](@ref).
A Lanczos factorization `fact` can be destructured as `V, B, r, nr, b = fact` with
`nr = norm(r)`.
`LanczosFactorization` is mutable because it can [`expand!`](@ref) or [`shrink!`](@ref).
See also [`LanczosIterator`](@ref) for an iterator that constructs a progressively expanding
Lanczos factorizations of a given linear map and a starting vector. See
[`ArnoldiFactorization`](@ref) and [`ArnoldiIterator`](@ref) for a Krylov factorization that
works for general (non-symmetric) linear maps.
"""
mutable struct LanczosFactorization{T,S<:Real} <: KrylovFactorization{T,S}
k::Int # current Krylov dimension
V::OrthonormalBasis{T} # basis of length k
αs::Vector{S}
βs::Vector{S}
r::T
end
Base.length(F::LanczosFactorization) = F.k
Base.sizehint!(F::LanczosFactorization, n) = begin
sizehint!(F.V, n)
sizehint!(F.αs, n)
sizehint!(F.βs, n)
return F
end
Base.eltype(F::LanczosFactorization) = eltype(typeof(F))
Base.eltype(::Type{<:LanczosFactorization{<:Any,S}}) where {S} = S
function basis(F::LanczosFactorization)
return length(F.V) == F.k ? F.V :
error("Not keeping vectors during Lanczos factorization")
end
rayleighquotient(F::LanczosFactorization) = SymTridiagonal(F.αs, F.βs)
residual(F::LanczosFactorization) = F.r
@inbounds normres(F::LanczosFactorization) = F.βs[F.k]
rayleighextension(F::LanczosFactorization) = SimpleBasisVector(F.k, F.k)
# Lanczos iteration for constructing the orthonormal basis of a Krylov subspace.
"""
struct LanczosIterator{F,T,O<:Orthogonalizer} <: KrylovIterator{F,T}
LanczosIterator(f, v₀, [orth::Orthogonalizer = KrylovDefaults.orth, keepvecs::Bool = true])
Iterator that takes a linear map `f::F` (supposed to be real symmetric or complex hermitian)
and an initial vector `v₀::T` and generates an expanding `LanczosFactorization` thereof. In
particular, `LanczosIterator` uses the
[Lanczos iteration](https://en.wikipedia.org/wiki/Lanczos_algorithm) scheme to build a
successively expanding Lanczos factorization. While `f` cannot be tested to be symmetric or
hermitian directly when the linear map is encoded as a general callable object or function,
it is tested whether the imaginary part of `inner(v, f(v))` is sufficiently small to be
neglected.
The argument `f` can be a matrix, or a function accepting a single argument `v`, so that
`f(v)` implements the action of the linear map on the vector `v`.
The optional argument `orth` specifies which [`Orthogonalizer`](@ref) to be used. The
default value in [`KrylovDefaults`](@ref) is to use [`ModifiedGramSchmidtIR`](@ref), which
possibly uses reorthogonalization steps. One can use to discard the old vectors that span
the Krylov subspace by setting the final argument `keepvecs` to `false`. This, however, is
only possible if an `orth` algorithm is used that does not rely on reorthogonalization, such
as `ClassicalGramSchmidt()` or `ModifiedGramSchmidt()`. In that case, the iterator strictly
uses the Lanczos three-term recurrence relation.
When iterating over an instance of `LanczosIterator`, the values being generated are
instances of [`LanczosFactorization`](@ref), which can be immediately destructured into a
[`basis`](@ref), [`rayleighquotient`](@ref), [`residual`](@ref), [`normres`](@ref) and
[`rayleighextension`](@ref), for example as
```julia
for (V, B, r, nr, b) in LanczosIterator(f, v₀)
# do something
nr < tol && break # a typical stopping criterion
end
```
Note, however, that if `keepvecs=false` in `LanczosIterator`, the basis `V` cannot be
extracted.
Since the iterator does not know the dimension of the underlying vector space of
objects of type `T`, it keeps expanding the Krylov subspace until the residual norm `nr`
falls below machine precision `eps(typeof(nr))`.
The internal state of `LanczosIterator` is the same as the return value, i.e. the
corresponding `LanczosFactorization`. However, as Julia's Base iteration interface (using
`Base.iterate`) requires that the state is not mutated, a `deepcopy` is produced upon every
next iteration step.
Instead, you can also mutate the `KrylovFactorization` in place, using the following
interface, e.g. for the same example above
```julia
iterator = LanczosIterator(f, v₀)
factorization = initialize(iterator)
while normres(factorization) > tol
expand!(iterator, factorization)
V, B, r, nr, b = factorization
# do something
end
```
Here, [`initialize(::KrylovIterator)`](@ref) produces the first Krylov factorization of
length 1, and `expand!(::KrylovIterator, ::KrylovFactorization)`(@ref) expands the
factorization in place. See also [`initialize!(::KrylovIterator,
::KrylovFactorization)`](@ref) to initialize in an already existing factorization (most
information will be discarded) and [`shrink!(::KrylovFactorization, k)`](@ref) to shrink an
existing factorization down to length `k`.
"""
struct LanczosIterator{F,T,O<:Orthogonalizer} <: KrylovIterator{F,T}
operator::F
x₀::T
orth::O
keepvecs::Bool
function LanczosIterator{F,T,O}(operator::F,
x₀::T,
orth::O,
keepvecs::Bool) where {F,T,O<:Orthogonalizer}
if !keepvecs && isa(orth, Reorthogonalizer)
error("Cannot use reorthogonalization without keeping all Krylov vectors")
end
return new{F,T,O}(operator, x₀, orth, keepvecs)
end
end
function LanczosIterator(operator::F,
x₀::T,
orth::O=KrylovDefaults.orth,
keepvecs::Bool=true) where {F,T,O<:Orthogonalizer}
return LanczosIterator{F,T,O}(operator, x₀, orth, keepvecs)
end
Base.IteratorSize(::Type{<:LanczosIterator}) = Base.SizeUnknown()
Base.IteratorEltype(::Type{<:LanczosIterator}) = Base.EltypeUnknown()
function Base.iterate(iter::LanczosIterator)
state = initialize(iter)
return state, state
end
function Base.iterate(iter::LanczosIterator, state::LanczosFactorization)
nr = normres(state)
if nr < eps(typeof(nr))
return nothing
else
state = expand!(iter, deepcopy(state))
return state, state
end
end
function warn_nonhermitian(α, β₁, β₂)
n = hypot(α, β₁, β₂)
if abs(imag(α)) / n > eps(one(n))^(2 / 5)
@warn "ignoring imaginary component $(imag(α)) from total weight $n: operator might not be hermitian?" α β₁ β₂
end
return nothing
end
function initialize(iter::LanczosIterator; verbosity::Int=0)
# initialize without using eltype
x₀ = iter.x₀
β₀ = norm(x₀)
iszero(β₀) && throw(ArgumentError("initial vector should not have norm zero"))
Ax₀ = apply(iter.operator, x₀)
α = inner(x₀, Ax₀) / (β₀ * β₀)
T = typeof(α)
# this line determines the vector type that we will henceforth use
v = add!!(zerovector(Ax₀, T), x₀, 1 / β₀)
if typeof(Ax₀) != typeof(v)
r = add!!(zerovector(v), Ax₀, 1 / β₀)
else
r = scale!!(Ax₀, 1 / β₀)
end
βold = norm(r)
r = add!!(r, v, -α) # should we use real(α) here?
β = norm(r)
warn_nonhermitian(α, zero(β), β)
# possibly reorthogonalize
if iter.orth isa Union{ClassicalGramSchmidt2,ModifiedGramSchmidt2}
dα = inner(v, r)
α += dα
r = add!!(r, v, -dα) # should we use real(dα) here?
β = norm(r)
warn_nonhermitian(α, zero(β), β)
elseif iter.orth isa Union{ClassicalGramSchmidtIR,ModifiedGramSchmidtIR}
while eps(one(β)) < β < iter.orth.η * βold
βold = β
dα = inner(v, r)
α += dα
r = add!!(r, v, -dα) # should we use real(dα) here?
β = norm(r)
warn_nonhermitian(α, zero(β), β)
end
end
V = OrthonormalBasis([v])
αs = [real(α)]
βs = [β]
if verbosity > 0
@info "Lanczos iteration step 1: normres = $β"
end
return LanczosFactorization(1, V, αs, βs, r)
end
function initialize!(iter::LanczosIterator, state::LanczosFactorization; verbosity::Int=0)
x₀ = iter.x₀
V = state.V
while length(V) > 1
pop!(V)
end
αs = empty!(state.αs)
βs = empty!(state.βs)
V[1] = scale!!(V[1], x₀, 1 / norm(x₀))
w = apply(iter.operator, V[1])
r, α = orthogonalize!!(w, V[1], iter.orth)
β = norm(r)
warn_nonhermitian(α, zero(β), β)
state.k = 1
push!(αs, real(α))
push!(βs, β)
state.r = r
if verbosity > 0
@info "Lanczos iteration step 1: normres = $β"
end
return state
end
function expand!(iter::LanczosIterator, state::LanczosFactorization; verbosity::Int=0)
βold = normres(state)
V = state.V
r = state.r
V = push!(V, scale!!(r, 1 / βold))
r, α, β = lanczosrecurrence(iter.operator, V, βold, iter.orth)
warn_nonhermitian(α, βold, β)
αs = push!(state.αs, real(α))
βs = push!(state.βs, β)
!iter.keepvecs && popfirst!(state.V) # remove oldest V if not keepvecs
state.k += 1
state.r = r
if verbosity > 0
@info "Lanczos iteration step $(state.k): normres = $β"
end
return state
end
function shrink!(state::LanczosFactorization, k)
length(state) == length(state.V) ||
error("we cannot shrink LanczosFactorization without keeping Lanczos vectors")
length(state) <= k && return state
V = state.V
while length(V) > k + 1
pop!(V)
end
r = pop!(V)
resize!(state.αs, k)
resize!(state.βs, k)
state.k = k
state.r = scale!!(r, normres(state))
return state
end
# Exploit hermiticity to "simplify" orthonormalization process:
# Lanczos three-term recurrence relation
function lanczosrecurrence(operator, V::OrthonormalBasis, β, orth::ClassicalGramSchmidt)
v = V[end]
w = apply(operator, v)
α = inner(v, w)
w = add!!(w, V[end - 1], -β)
w = add!!(w, v, -α)
β = norm(w)
return w, α, β
end
function lanczosrecurrence(operator, V::OrthonormalBasis, β, orth::ModifiedGramSchmidt)
v = V[end]
w = apply(operator, v)
w = add!!(w, V[end - 1], -β)
α = inner(v, w)
w = add!!(w, v, -α)
β = norm(w)
return w, α, β
end
function lanczosrecurrence(operator, V::OrthonormalBasis, β, orth::ClassicalGramSchmidt2)
v = V[end]
w = apply(operator, v)
α = inner(v, w)
w = add!!(w, V[end - 1], -β)
w = add!!(w, v, -α)
w, s = orthogonalize!!(w, V, ClassicalGramSchmidt())
α += s[end]
β = norm(w)
return w, α, β
end
function lanczosrecurrence(operator, V::OrthonormalBasis, β, orth::ModifiedGramSchmidt2)
v = V[end]
w = apply(operator, v)
w = add!!(w, V[end - 1], -β)
w, α = orthogonalize!!(w, v, ModifiedGramSchmidt())
s = α
for q in V
w, s = orthogonalize!!(w, q, ModifiedGramSchmidt())
end
α += s
β = norm(w)
return w, α, β
end
function lanczosrecurrence(operator, V::OrthonormalBasis, β, orth::ClassicalGramSchmidtIR)
v = V[end]
w = apply(operator, v)
α = inner(v, w)
w = add!!(w, V[end - 1], -β)
w = add!!(w, v, -α)
ab2 = abs2(α) + abs2(β)
β = norm(w)
nold = sqrt(abs2(β) + ab2)
while eps(one(β)) < β < orth.η * nold
nold = β
w, s = orthogonalize!!(w, V, ClassicalGramSchmidt())
α += s[end]
β = norm(w)
end
return w, α, β
end
function lanczosrecurrence(operator, V::OrthonormalBasis, β, orth::ModifiedGramSchmidtIR)
v = V[end]
w = apply(operator, v)
w = add!!(w, V[end - 1], -β)
w, α = orthogonalize!!(w, v, ModifiedGramSchmidt())
ab2 = abs2(α) + abs2(β)
β = norm(w)
nold = sqrt(abs2(β) + ab2)
while eps(one(β)) < β < orth.η * nold
nold = β
s = zero(α)
for q in V
w, s = orthogonalize!!(w, q, ModifiedGramSchmidt())
end
α += s
β = norm(w)
end
return w, α, β
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 6160 | function linsolve(operator, b, x₀, alg::BiCGStab, a₀::Number=0, a₁::Number=1; alg_rrule=alg)
# Initial function operation and division defines number type
y₀ = apply(operator, x₀)
T = typeof(inner(b, y₀) / norm(b) * one(a₀) * one(a₁))
α₀ = convert(T, a₀)
α₁ = convert(T, a₁)
# Continue computing r = b - a₀ * x₀ - a₁ * operator(x₀)
r = scale(b, one(T))
r = iszero(α₀) ? r : add!!(r, x₀, -α₀)
r = add!!(r, y₀, -α₁)
x = scale!!(zerovector(r), x₀, 1)
normr = norm(r)
S = typeof(normr)
# Algorithm parameters
maxiter = alg.maxiter
tol::S = alg.tol
numops = 1 # operator has been applied once to determine r
numiter = 0
# Check for early return
if normr < tol
if alg.verbosity > 0
@info """BiCGStab linsolve converged without any iterations:
* norm of residual = $normr
* number of operations = 1"""
end
return (x, ConvergenceInfo(1, r, normr, numiter, numops))
end
# First iteration
numiter += 1
r_shadow = scale!!(zerovector(r), r, 1) # shadow residual
ρ = inner(r_shadow, r)
# Method fails if ρ is zero.
if ρ ≈ 0.0
@warn """BiCGStab linsolve errored after $numiter iterations:
* norm of residual = $normr
* number of operations = $numops"""
return (x, ConvergenceInfo(0, r, normr, numiter, numops))
end
## BiCG part of the algorithm.
p = scale!!(zerovector(r), r, 1)
v = apply(operator, p, α₀, α₁)
numops += 1
σ = inner(r_shadow, v)
α = ρ / σ
s = scale!!(zerovector(r), r, 1)
s = add!!(s, v, -α) # half step residual
xhalf = scale!!(zerovector(x), x, 1)
xhalf = add!!(xhalf, p, +α) # half step iteration
normr = norm(s)
# Check for early return at half step.
if normr < tol
# Replace approximate residual with the actual residual.
s = scale!!(zerovector(b), b, 1)
s = add!!(s, apply(operator, xhalf, α₀, α₁), -1)
numops += 1
normr_act = norm(s)
if normr_act < tol
if alg.verbosity > 0
@info """BiCGStab linsolve converged at iteration $(numiter-1/2):
* norm of residual = $normr_act
* number of operations = $numops"""
end
return (xhalf, ConvergenceInfo(1, s, normr_act, numiter, numops))
end
end
## GMRES part of the algorithm.
t = apply(operator, s, α₀, α₁)
numops += 1
ω = inner(t, s) / inner(t, t)
x = scale!!(x, xhalf, 1)
x = add!!(x, s, +ω) # full step iteration
r = scale!!(r, s, 1)
r = add!!(r, t, -ω) # full step residual
# Check for early return at full step.
normr = norm(r)
if normr < tol
# Replace approximate residual with the actual residual.
r = scale!!(r, b, 1)
r = add!!(r, apply(operator, x, α₀, α₁), -1)
numops += 1
normr_act = norm(r)
if normr_act < tol
if alg.verbosity > 0
@info """BiCGStab linsolve converged at iteration $(numiter):
* norm of residual = $normr_act
* number of operations = $numops"""
end
return (x, ConvergenceInfo(1, r, normr_act, numiter, numops))
end
end
while numiter < maxiter
if alg.verbosity > 0
msg = "BiCGStab linsolve in iter $numiter: "
msg *= "normres = "
msg *= @sprintf("%12e", normr)
@info msg
end
numiter += 1
ρold = ρ
ρ = inner(r_shadow, r)
β = (ρ / ρold) * (α / ω)
p = add!!(p, v, -ω)
p = add!!(p, r, 1, β)
v = apply(operator, p, α₀, α₁)
numops += 1
σ = inner(r_shadow, v)
α = ρ / σ
s = scale!!(s, r, 1)
s = add!!(s, v, -α) # half step residual
xhalf = scale!!(xhalf, x, 1)
xhalf = add!!(xhalf, p, +α) # half step iteration
normr = norm(s)
if alg.verbosity > 0
msg = "BiCGStab linsolve in iter $(numiter-1/2): "
msg *= "normres = "
msg *= @sprintf("%12e", normr)
@info msg
end
# Check for return at half step.
if normr < tol
# Compute non-approximate residual.
s = scale!!(zerovector(b), b, 1)
s = add!!(s, apply(operator, xhalf, α₀, α₁), -1)
numops += 1
normr_act = norm(s)
if normr_act < tol
if alg.verbosity > 0
@info """BiCGStab linsolve converged at iteration $(numiter-1/2):
* norm of residual = $normr_act
* number of operations = $numops"""
end
return (xhalf, ConvergenceInfo(1, s, normr_act, numiter, numops))
end
end
## GMRES part of the algorithm.
t = apply(operator, s, α₀, α₁)
numops += 1
ω = inner(t, s) / inner(t, t)
x = scale!!(x, xhalf, 1)
x = add!!(x, s, +ω) # full step iteration
r = scale!!(r, s, 1)
r = add!!(r, t, -ω) # full step residual
# Check for return at full step.
normr = norm(r)
if normr < tol
# Replace approximate residual with the actual residual.
r = scale!!(r, b, 1)
r = add!!(r, apply(operator, x, α₀, α₁), -1)
numops += 1
normr_act = norm(r)
if normr_act < tol
if alg.verbosity > 0
@info """BiCGStab linsolve converged at iteration $(numiter):
* norm of residual = $normr_act
* number of operations = $numops"""
end
return (x, ConvergenceInfo(1, r, normr_act, numiter, numops))
end
end
end
if alg.verbosity > 0
@warn """BiCGStab linsolve finished without converging after $numiter iterations:
* norm of residual = $normr
* number of operations = $numops"""
end
return (x, ConvergenceInfo(0, r, normr, numiter, numops))
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 2728 | function linsolve(operator, b, x₀, alg::CG, a₀::Real=0, a₁::Real=1; alg_rrule=alg)
# Initial function operation and division defines number type
y₀ = apply(operator, x₀)
T = typeof(inner(b, y₀) / norm(b) * one(a₀) * one(a₁))
α₀ = convert(T, a₀)
α₁ = convert(T, a₁)
# Continue computing r = b - a₀ * x₀ - a₁ * operator(x₀)
r = scale(b, one(T))
r = iszero(α₀) ? r : add!!(r, x₀, -α₀)
r = add!!(r, y₀, -α₁)
x = scale!!(zerovector(r), x₀, 1)
normr = norm(r)
S = typeof(normr)
# Algorithm parameters
maxiter = alg.maxiter
tol::S = alg.tol
numops = 1 # operator has been applied once to determine r
numiter = 0
# Check for early return
normr < tol && return (x, ConvergenceInfo(1, r, normr, numiter, numops))
# First iteration
ρ = normr^2
p = scale!!(zerovector(r), r, 1)
q = apply(operator, p, α₀, α₁)
α = ρ / inner(p, q)
x = add!!(x, p, +α)
r = add!!(r, q, -α)
normr = norm(r)
ρold = ρ
ρ = normr^2
β = ρ / ρold
numops += 1
numiter += 1
if alg.verbosity > 1
msg = "CG linsolve in iter $numiter: "
msg *= "normres = "
msg *= @sprintf("%.12e", normr)
@info msg
end
# Check for early return
normr < tol && return (x, ConvergenceInfo(1, r, normr, numiter, numops))
while numiter < maxiter
p = add!!(p, r, 1, β)
q = apply(operator, p, α₀, α₁)
α = ρ / inner(p, q)
x = add!!(x, p, α)
r = add!!(r, q, -α)
normr = norm(r)
if normr < tol # recompute to account for buildup of floating point errors
r = scale!!(r, b, 1)
r = add!!(r, apply(operator, x, α₀, α₁), -1)
normr = norm(r)
ρ = normr^2
β = zero(β) # restart CG
else
ρold = ρ
ρ = normr^2
β = ρ / ρold
end
if normr < tol
if alg.verbosity > 0
@info """CG linsolve converged at iteration $numiter:
* norm of residual = $normr
* number of operations = $numops"""
end
return (x, ConvergenceInfo(1, r, normr, numiter, numops))
end
numops += 1
numiter += 1
if alg.verbosity > 1
msg = "CG linsolve in iter $numiter: "
msg *= "normres = "
msg *= @sprintf("%.12e", normr)
@info msg
end
end
if alg.verbosity > 0
@warn """CG linsolve finished without converging after $numiter iterations:
* norm of residual = $normr
* number of operations = $numops"""
end
return (x, ConvergenceInfo(0, r, normr, numiter, numops))
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 4724 | function linsolve(operator, b, x₀, alg::GMRES, a₀::Number=0, a₁::Number=1; alg_rrule=alg)
# Initial function operation and division defines number type
y₀ = apply(operator, x₀)
T = typeof(inner(b, y₀) / norm(b) * one(a₀) * one(a₁))
α₀ = convert(T, a₀)::T
α₁ = convert(T, a₁)::T
# Continue computing r = b - a₀ * x₀ - a₁ * operator(x₀)
r = scale(b, one(T))
r = iszero(α₀) ? r : add!!(r, x₀, -α₀)
r = add!!(r, y₀, -α₁)
x = scale!!(zerovector(r), x₀, 1)
β = norm(r)
S = typeof(β)
# Algorithm parameters
maxiter = alg.maxiter
krylovdim = alg.krylovdim
tol::S = alg.tol
# Check for early return
if β < tol
if alg.verbosity > 0
@info """GMRES linsolve converged without any iterations:
* norm of residual = $β
* number of operations = 1"""
end
return (x, ConvergenceInfo(1, r, β, 0, 1))
end
# Initialize data structures
y = Vector{T}(undef, krylovdim + 1)
gs = Vector{Givens{T}}(undef, krylovdim)
R = fill(zero(T), (krylovdim, krylovdim))
numiter = 0
numops = 1 # operator has been applied once to determine T
iter = ArnoldiIterator(operator, r, alg.orth)
fact = initialize(iter)
numops += 1 # start applies operator once
while numiter < maxiter # restart loop
numiter += 1
y[1] = β
k = 1
H = rayleighquotient(fact)
R[1, 1] = α₀ + α₁ * H[1, 1]
gs[1], R[1, 1] = givens(R[1, 1], α₁ * normres(fact), 1, 2)
y[2] = zero(T)
lmul!(gs[1], y)
β = convert(S, abs(y[2]))
if alg.verbosity > 2
msg = "GMRES linsolve in iter $numiter; step $k: "
msg *= "normres = "
msg *= @sprintf("%.12e", β)
@info msg
end
while (β > tol && length(fact) < krylovdim) # inner arnoldi loop
fact = expand!(iter, fact)
numops += 1 # expand! applies the operator once
k = length(fact)
H = rayleighquotient(fact)
# copy Arnoldi Hessenberg matrix into R
@inbounds begin
for i in 1:(k - 1)
R[i, k] = α₁ * H[i, k]
end
R[k, k] = α₀ + α₁ * H[k, k]
end
# Apply Givens rotations
Rk = view(R, :, k)
@inbounds for i in 1:(k - 1)
lmul!(gs[i], Rk)
end
gs[k], R[k, k] = givens(R[k, k], α₁ * normres(fact), k, k + 1)
# Apply Givens rotations to right hand side
y[k + 1] = zero(T)
lmul!(gs[k], y)
# New error
β = convert(S, abs(y[k + 1]))
if alg.verbosity > 2
msg = "GMRES linsolve in iter $numiter; step $k: "
msg *= "normres = "
msg *= @sprintf("%.12e", β)
@info msg
end
end
if alg.verbosity > 1
msg = "GMRES linsolve in iter $numiter; finished at step $k: "
msg *= "normres = "
msg *= @sprintf("%.12e", β)
@info msg
end
# Solve upper triangular system
y2 = copy(y)
ldiv!(UpperTriangular(R), y, 1:k)
# Update x
V = basis(fact)
@inbounds for i in 1:k
x = add!!(x, V[i], y[i])
end
if β > tol
# Recompute residual without reevaluating operator
w = residual(fact)
push!(V, scale!!(w, 1 / normres(fact)))
for i in 1:k
rmul!(V, gs[i]')
end
r = scale!!(r, V[k + 1], y[k + 1])
else
# Recompute residual and its norm explicitly, to ensure that no
# numerical errors have accumulated
r = scale!!(r, b, 1)
r = add!!(r, apply(operator, x, α₀, α₁), -1)
numops += 1
β = norm(r)
if β < tol
if alg.verbosity > 0
@info """GMRES linsolve converged at iteration $numiter, step $k:
* norm of residual = $β
* number of operations = $numops"""
end
return (x, ConvergenceInfo(1, r, β, numiter, numops))
end
end
# Restart Arnoldi factorization with new r
iter = ArnoldiIterator(operator, r, alg.orth)
fact = initialize!(iter, fact)
end
if alg.verbosity > 0
@warn """GMRES linsolve finished without converging after $numiter iterations:
* norm of residual = $β
* number of operations = $numops"""
end
return (x, ConvergenceInfo(0, r, β, numiter, numops))
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 8536 | """
linsolve(A::AbstractMatrix, b::AbstractVector, [x₀, a₀::Number = 0, a₁::Number = 1]; kwargs...)
linsolve(f, b, [x₀, a₀::Number = 0, a₁::Number = 1]; kwargs...)
# expert version:
linsolve(f, b, x₀, algorithm, [a₀::Number = 0, a₁::Number = 1]; alg_rrule=algorithm)
Compute a solution `x` to the linear system `(a₀ + a₁ * A)*x = b` or
`a₀ * x + a₁ * f(x) = b`, possibly using a starting guess `x₀`. Return the approximate
solution `x` and a `ConvergenceInfo` structure.
### Arguments:
The linear map can be an `AbstractMatrix` (dense or sparse) or a general function or
callable object. If no initial guess is specified, it is chosen as `(zero(a₀)*zero(a₁))*b`
which should generate an object similar to `b` but initialized with zeros. The numbers `a₀`
and `a₁` are optional arguments; they are applied implicitly, i.e. they do not contribute
the computation time of applying the linear map or to the number of operations on vectors of
type `x` and `b`.
### Return values:
The return value is always of the form `x, info = linsolve(...)` with
- `x`: the approximate solution to the problem, similar type as the right hand side `b`
but possibly with a different `eltype`
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: takes value 0 or 1 depending on whether the solution was
converged up to the requested tolerance
+ `info.residual`: residual `b - f(x)` of the approximate solution `x`
+ `info.normres::Real`: norm of the residual, i.e. `norm(info.residual)`
+ `info.numops::Int`: total number of times that the linear map was applied, i.e. the
number of times that `f` was called, or a vector was multiplied with `A`
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
!!! warning "Check for convergence"
No warning is printed if no converged solution was found, so always check if
`info.converged == 1`.
### Keyword arguments:
Keyword arguments are given by:
- `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message
at the end), 2 (information after every iteration), 3 (information per Krylov step)
- `atol::Real`: the requested accuracy, i.e. absolute tolerance, on the norm of the
residual.
- `rtol::Real`: the requested accuracy on the norm of the residual, relative to the norm
of the right hand side `b`.
- `tol::Real`: the requested accuracy on the norm of the residual that is actually used by
the algorithm; it defaults to `max(atol, rtol*norm(b))`. So either use `atol` and `rtol`
or directly use `tol` (in which case the value of `atol` and `rtol` will be ignored).
- `krylovdim::Integer`: the maximum dimension of the Krylov subspace that will be
constructed.
- `maxiter::Integer`: the number of times the Krylov subspace can be rebuilt; see below for
further details on the algorithms.
- `orth::Orthogonalizer`: the orthogonalization method to be used, see
[`Orthogonalizer`](@ref)
- `issymmetric::Bool`: if the linear map is symmetric, only meaningful if `T<:Real`
- `ishermitian::Bool`: if the linear map is hermitian
- `isposdef::Bool`: if the linear map is positive definite
The default values are given by `atol = KrylovDefaults.tol`, `rtol = KrylovDefaults.tol`,
`tol = max(atol, rtol*norm(b))`, `krylovdim = KrylovDefaults.krylovdim`,
`maxiter = KrylovDefaults.maxiter`, `orth = KrylovDefaults.orth`;
see [`KrylovDefaults`](@ref) for details.
The default value for the last three parameters depends on the method. If an
`AbstractMatrix` is used, `issymmetric`, `ishermitian` and `isposdef` are checked for that
matrix, ortherwise the default values are `issymmetric = false`,
`ishermitian = T <: Real && issymmetric` and `isposdef = false`.
The final keyword argument `alg_rrule` is relevant only when `linsolve` is used in a setting
where reverse-mode automatic differentation will be used. A custom `ChainRulesCore.rrule` is
defined for `linsolve`, which can be evaluated using different algorithms that can be specified
via `alg_rrule`. As the pullback of `linsolve` involves solving a linear system with the
(Hermitian) adjoint of the linear map, the default value is to use the same algorithm. This
keyword argument should only be used when this default choice is failing or not performing
efficiently. Check the documentation for more information on the possible values for
`alg_rrule` and their implications on the algorithm being used.
### Algorithms
The final (expert) method, without default values and keyword arguments, is the one that is
finally called, and can also be used directly. Here, one specifies the algorithm explicitly.
Currently, only [`CG`](@ref), [`GMRES`](@ref) and [`BiCGStab`](@ref) are implemented, where
`CG` is chosen if `isposdef == true` and `GMRES` is chosen otherwise. Note that in standard
`GMRES` terminology, our parameter `krylovdim` is referred to as the *restart* parameter,
and our `maxiter` parameter counts the number of outer iterations, i.e. restart cycles. In
`CG`, the Krylov subspace is only implicit because short recurrence relations are being
used, and therefore no restarts are required. Therefore, we pass `krylovdim*maxiter` as the
maximal number of CG iterations that can be used by the `CG` algorithm.
"""
function linsolve end
function linsolve(A::AbstractMatrix, b::AbstractVector, a₀::Number=0, a₁::Number=1;
kwargs...)
return linsolve(A, b, (zero(a₀) * zero(a₁)) * b, a₀, a₁; kwargs...)
end
function linsolve(f, b, a₀::Number=0, a₁::Number=1; kwargs...)
return linsolve(f, b, scale(b, zero(a₀) * zero(a₁)), a₀, a₁; kwargs...)
end
function linsolve(f, b, x₀, a₀::Number=0, a₁::Number=1; kwargs...)
Tx = promote_type(typeof(x₀))
Tb = typeof(b)
Tfx = Core.Compiler.return_type(apply, Tuple{typeof(f),Tx})
T = promote_type(Core.Compiler.return_type(inner, Tuple{Tb,Tfx}), typeof(a₀),
typeof(a₁))
alg = linselector(f, b, T; kwargs...)
if haskey(kwargs, :alg_rrule)
alg_rrule = kwargs[:alg_rrule]
else
alg_rrule = alg
end
return linsolve(f, b, x₀, alg, a₀, a₁; alg_rrule=alg_rrule)
end
function linselector(f,
b,
T::Type;
issymmetric::Bool=false,
ishermitian::Bool=T <: Real && issymmetric,
isposdef::Bool=false,
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
rtol::Real=KrylovDefaults.tol,
atol::Real=KrylovDefaults.tol,
tol::Real=max(atol, rtol * norm(b)),
orth=KrylovDefaults.orth,
verbosity::Int=0)
if (T <: Real && issymmetric) || ishermitian
if isposdef
return CG(; maxiter=krylovdim * maxiter, tol=tol, verbosity=verbosity)
else
# TODO: implement MINRES for symmetric but not posdef; for now use GRMES
# return MINRES(krylovdim*maxiter, tol=tol)
end
end
return GMRES(; krylovdim=krylovdim,
maxiter=maxiter,
tol=tol,
orth=orth,
verbosity=verbosity)
end
function linselector(A::AbstractMatrix,
b,
T::Type;
issymmetric::Bool=T <: Real && LinearAlgebra.issymmetric(A),
ishermitian::Bool=issymmetric || LinearAlgebra.ishermitian(A),
isposdef::Bool=ishermitian ? LinearAlgebra.isposdef(A) : false,
krylovdim::Int=KrylovDefaults.krylovdim,
maxiter::Int=KrylovDefaults.maxiter,
rtol::Real=KrylovDefaults.tol,
atol::Real=KrylovDefaults.tol,
tol::Real=max(atol, rtol * norm(b)),
orth=KrylovDefaults.orth,
verbosity::Int=0)
if (T <: Real && issymmetric) || ishermitian
if isposdef
return CG(; maxiter=krylovdim * maxiter, tol=tol, verbosity=verbosity)
else
# TODO: implement MINRES for symmetric but not posdef; for now use GRMES
# return MINRES(krylovdim*maxiter, tol=tol)
end
end
return GMRES(; krylovdim=krylovdim,
maxiter=maxiter,
tol=tol,
orth=orth,
verbosity=verbosity)
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 13049 | """
function expintegrator(A, t::Number, u₀, u₁, …; kwargs...)
function expintegrator(A, t::Number, (u₀, u₁, …); kwargs...)
function expintegrator(A, t::Number, (u₀, u₁, …), algorithm)
Compute ``y = ϕ₀(t*A)*u₀ + t*ϕ₁(t*A)*u₁ + t^2*ϕ₂(t*A)*u₂ + …``, where `A` is a general
linear map, i.e. a `AbstractMatrix` or just a general function or callable object and `u₀`,
`u₁` are of any Julia type with vector like behavior. Here, ``ϕ₀(z) = exp(z)`` and
``ϕⱼ₊₁ = (ϕⱼ(z) - 1/j!)/z``. In particular, ``y = x(t)`` represents the solution of the ODE
``ẋ(t) = A*x(t) + ∑ⱼ t^j/j! uⱼ₊₁`` with ``x(0) = u₀``.
!!! note
When there are only input vectors `u₀` and `u₁`, `t` can equal `Inf`, in which the
algorithm tries to evolve all the way to the fixed point `y = - A \\ u₁ + P₀ u₀` with
`P₀` the projector onto the eigenspace of eigenvalue zero (if any) of `A`. If `A` has
any eigenvalues with real part larger than zero, however, the solution to the ODE will
diverge, i.e. the fixed point is not stable.
!!! warning
The returned solution might be the solution of the ODE integrated up to a smaller time
``t̃ = sign(t) * |t̃|`` with ``|t̃| < |t|``, when the required precision could not be
attained. Always check `info.converged > 0` or `info.residual == 0` (see below).
### Arguments:
The linear map `A` can be an `AbstractMatrix` (dense or sparse) or a general function or
callable object that implements the action of the linear map on a vector. If `A` is an
`AbstractMatrix`, `x` is expected to be an `AbstractVector`, otherwise `x` can be of any
type that behaves as a vector and supports the required methods (see KrylovKit docs).
The time parameter `t` can be real or complex, and it is better to choose `t` e.g. imaginary
and `A` hermitian, then to absorb the imaginary unit in an antihermitian `A`. For the
former, the Lanczos scheme is used to built a Krylov subspace, in which an approximation to
the exponential action of the linear map is obtained. The arguments `u₀`, `u₁`, … can be
of any type and should be in the domain of `A`.
### Return values:
The return value is always of the form `y, info = expintegrator(...)` with
- `y`: the result of the computation, i.e.
``y = ϕ₀(t̃*A)*u₀ + t̃*ϕ₁(t̃*A)*u₁ + t̃^2*ϕ₂(t̃*A)*u₂ + …``
with ``t̃ = sign(t) * |t̃|`` with ``|t̃| <= |t|``, such that the accumulated error in
`y` per unit time is at most equal to the keyword argument `tol`
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: 0 or 1 if the solution `y` was evolved all the way up to the
requested time `t`.
+ `info.residual`: there is no residual in the conventional sense, however, this
value equals the residual time `t - t̃`, i.e. it is zero if `info.converged == 1`
+ `info.normres::Real`: a (rough) estimate of the total error accumulated in the
solution, should be smaller than `tol * |t̃|`
+ `info.numops::Int`: number of times the linear map was applied, i.e. number of times
`f` was called, or a vector was multiplied with `A`
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
### Keyword arguments:
Keyword arguments and their default values are given by:
- `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message
at the end), 2 (information after every iteration), 3 (information per Krylov step)
- `krylovdim = 30`: the maximum dimension of the Krylov subspace that will be constructed.
Note that the dimension of the vector space is not known or checked, e.g. `x₀` should
not necessarily support the `Base.length` function. If you know the actual problem
dimension is smaller than the default value, it is useful to reduce the value of
`krylovdim`, though in principle this should be detected.
- `tol = 1e-12`: the requested accuracy per unit time, i.e. if you want a certain
precision `ϵ` on the final result, set `tol = ϵ/abs(t)`. If you work in e.g. single
precision (`Float32`), you should definitely change the default value.
- `maxiter::Int = 100`: the number of times the Krylov subspace can be rebuilt; see below
for further details on the algorithms.
- `issymmetric`: if the linear map is symmetric, only meaningful if `T<:Real`
- `ishermitian`: if the linear map is hermitian
The default value for the last two depends on the method. If an `AbstractMatrix` is
used, `issymmetric` and `ishermitian` are checked for that matrix, otherwise the default
values are `issymmetric = false` and `ishermitian = T <: Real && issymmetric`.
- `eager::Bool = false`: if true, eagerly try to compute the result after every expansion
of the Krylov subspace to test for convergence, otherwise wait until the Krylov subspace
as dimension `krylovdim`. This can result in a faster return, for example if the total
time for the evolution is quite small, but also has some overhead, as more computations
are performed after every expansion step.
### Algorithm
The last method, without keyword arguments and the different vectors `u₀`, `u₁`, … in a
tuple, is the one that is finally called, and can also be used directly. Here, one
specifies the algorithm explicitly as either [`Lanczos`](@ref), for real symmetric or
complex hermitian linear maps, or [`Arnoldi`](@ref), for general linear maps. Note that
these names refer to the process for building the Krylov subspace, and that one can still
use complex time steps in combination with e.g. a real symmetric map.
"""
function expintegrator end
function expintegrator(A, t::Number, u₀, us...; kwargs...)
Ts = typeof.(inner.((u₀, us...), (u₀,)))
T = promote_type(typeof(t), Ts...)
alg = eigselector(A, T; kwargs...)
return expintegrator(A, t, (u₀, us...), alg)
end
function expintegrator(A, t::Number, u::Tuple, alg::Union{Lanczos,Arnoldi})
length(u) == 1 && return expintegrator(A, t, (u[1], zerovector(u[1])), alg)
p = length(u) - 1
# process initial vector and determine result type
u₀ = first(u)
β₀ = norm(u₀)
Au₀ = apply(A, u₀) # used to determine return type
numops = 1
T = promote_type(typeof(t), (typeof.(inner.(u, (Au₀,))))...)
S = real(T)
w₀ = scale(u₀, one(T))
# krylovdim and related allocations
krylovdim = alg.krylovdim
K = krylovdim
HH = zeros(T, (krylovdim + p + 1, krylovdim + p + 1))
# time step parameters
η::S = alg.tol # tol is per unit time
totalerr = zero(η)
sgn = sign(t)
τ::S = abs(t)
τ₀::S = zero(τ)
Δτ::S = τ - τ₀ # don't try any clever initial guesses, rely on correction mechanism
# safety factors
δ::S = 1.2
γ::S = 0.8
# initial vectors
w = Vector{typeof(w₀)}(undef, p + 1)
w[1] = w₀
# reuse the result of apply computed earlier:
w[2] = scale!!(zerovector(w₀), Au₀, one(T))
for j in 1:p
if j > 1
w[j + 1] = apply(A, w[j])
numops += 1
end
lfac = 1
for l in 0:(p - j)
w[j + 1] = add!!(w[j + 1], u[j + l + 1], (sgn * τ₀)^l / lfac)
lfac *= l + 1
end
end
v = zerovector(w₀)
β = norm(w[p + 1])
if β < alg.tol && p == 1
if alg.verbosity > 0
@info """expintegrate finished after 0 iterations, converged to fixed point up to error = $β"""
end
return w₀, ConvergenceInfo(1, zero(τ), β, 0, numops)
end
v = scale!!(v, w[p + 1], 1 / β)
# initialize iterator
if alg isa Lanczos
iter = LanczosIterator(A, w[p + 1], alg.orth)
else
iter = ArnoldiIterator(A, w[p + 1], alg.orth)
end
fact = initialize(iter; verbosity=alg.verbosity - 2)
numops += 1
sizehint!(fact, krylovdim)
# start outer iteration loop
maxiter = alg.maxiter
numiter = 1
while true
K = length(fact)
V = basis(fact)
if K == krylovdim
Δτ = min(Δτ, τ - τ₀)
# Small matrix exponential and error estimation
H = fill!(view(HH, 1:(K + p + 1), 1:(K + p + 1)), zero(T))
mul!(view(H, 1:K, 1:K), rayleighquotient(fact), sgn * Δτ)
H[1, K + 1] = 1
for i in 1:p
H[K + i, K + i + 1] = 1
end
expH = LinearAlgebra.exp!(H)
ϵ = abs(Δτ^p * β * normres(fact) * expH[K, K + p + 1])
ω = ϵ / (Δτ * η)
q = K / 2
while ω > one(ω)
ϵ_prev = ϵ
Δτ_prev = Δτ
Δτ *= (γ / ω)^(1 / (q + 1))
H = fill!(view(HH, 1:(K + p + 1), 1:(K + p + 1)), zero(T))
mul!(view(H, 1:K, 1:K), rayleighquotient(fact), sgn * Δτ)
H[1, K + 1] = 1
for i in 1:p
H[K + i, K + i + 1] = 1
end
expH = LinearAlgebra.exp!(H)
ϵ = abs(Δτ^p * β * normres(fact) * expH[K, K + p + 1])
ω = ϵ / (Δτ * η)
q = max(zero(q), log(ϵ / ϵ_prev) / log(Δτ / Δτ_prev) - 1)
end
# take time step
totalerr += ϵ
jfac = 1
for j in 1:(p - 1)
w₀ = add!!(w₀, w[j + 1], (sgn * Δτ)^j / jfac)
jfac *= (j + 1)
end
w[p + 1] = mul!(w[p + 1], basis(fact), view(expH, 1:K, K + p))
# add first correction
w[p + 1] = add!!(w[p + 1], residual(fact), expH[K, K + p + 1])
w₀ = add!!(w₀, w[p + 1], β * (sgn * Δτ)^p)
τ₀ += Δτ
# increase time step for next iteration:
if ω < γ
Δτ *= (γ / ω)^(1 / (q + 1))
end
if alg.verbosity > 1
msg = "expintegrate in iteration $numiter: "
msg *= "reached time " * @sprintf("%.2e", τ₀)
msg *= ", total error = " * @sprintf("%.4e", totalerr)
@info msg
end
elseif normres(fact) <= ((τ - τ₀) * η) || alg.eager
# Small matrix exponential and error estimation
H = fill!(view(HH, 1:(K + p + 1), 1:(K + p + 1)), zero(T))
mul!(view(H, 1:K, 1:K), rayleighquotient(fact), sgn * (τ - τ₀))
H[1, K + 1] = 1
for i in 1:p
H[K + i, K + i + 1] = 1
end
expH = LinearAlgebra.exp!(H)
ϵ = abs((τ - τ₀)^p * β * normres(fact) * expH[K, K + p + 1])
ω = ϵ / ((τ - τ₀) * η)
if ω < one(ω)
# take time step
totalerr += ϵ
jfac = 1
for j in 1:(p - 1)
w₀ = add!!(w₀, w[j + 1], (sgn * (τ - τ₀))^j / jfac)
jfac *= (j + 1)
end
w[p + 1] = mul!(w[p + 1], basis(fact), view(expH, 1:K, K + p))
# add first correction
w[p + 1] = add!!(w[p + 1], residual(fact), expH[K, K + p + 1])
w₀ = add!!(w₀, w[p + 1], β * (sgn * (τ - τ₀))^p)
τ₀ = τ
end
end
if τ₀ >= τ
if alg.verbosity > 0
@info """expintegrate finished after $numiter iterations: total error = $totalerr"""
end
return w₀, ConvergenceInfo(1, zero(τ), totalerr, numiter, numops)
end
if K < krylovdim
fact = expand!(iter, fact; verbosity=alg.verbosity - 2)
numops += 1
else
if numiter == maxiter
if alg.verbosity > 0
@warn """expintegrate finished without convergence after $numiter iterations:
total error = $totalerr, residual time = $(τ - τ₀)"""
end
return w₀, ConvergenceInfo(0, τ - τ₀, totalerr, numiter, numops)
else # reinitialize
for j in 1:p
w[j + 1] = apply(A, w[j])
numops += 1
lfac = 1
for l in 0:(p - j)
w[j + 1] = add!!(w[j + 1], u[j + l + 1], (sgn * τ₀)^l / lfac)
lfac *= l + 1
end
end
β = norm(w[p + 1])
if β < alg.tol && p == 1 # w₀ is fixed point of ODE
if alg.verbosity > 0
@info """expintegrate finished after $numiter iterations, converged to fixed point up to error = $β"""
end
return w₀, ConvergenceInfo(1, zero(τ), β, numiter, numops)
end
v = scale!!(v, w[p + 1], 1 / β)
if alg isa Lanczos
iter = LanczosIterator(A, w[p + 1], alg.orth)
else
iter = ArnoldiIterator(A, w[p + 1], alg.orth)
end
fact = initialize!(iter, fact; verbosity=alg.verbosity - 2)
numops += 1
numiter += 1
end
end
end
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 4323 | """
function exponentiate(A, t::Number, x; kwargs...)
function exponentiate(A, t::Number, x, algorithm)
Compute ``y = exp(t*A) x``, where `A` is a general linear map, i.e. a `AbstractMatrix` or
just a general function or callable object and `x` is of any Julia type with vector like
behavior.
### Arguments:
The linear map `A` can be an `AbstractMatrix` (dense or sparse) or a general function or
callable object that implements the action of the linear map on a vector. If `A` is an
`AbstractMatrix`, `x` is expected to be an `AbstractVector`, otherwise `x` can be of any
type that behaves as a vector and supports the required methods (see KrylovKit docs).
The time parameter `t` can be real or complex, and it is better to choose `t` e.g. imaginary
and `A` hermitian, then to absorb the imaginary unit in an antihermitian `A`. For the
former, the Lanczos scheme is used to built a Krylov subspace, in which an approximation to
the exponential action of the linear map is obtained. The argument `x` can be of any type
and should be in the domain of `A`.
### Return values:
The return value is always of the form `y, info = exponentiate(...)` with
- `y`: the result of the computation, i.e. `y = exp(t*A)*x`
- `info`: an object of type [`ConvergenceInfo`], which has the following fields
+ `info.converged::Int`: 0 or 1 if the solution `y` was approximated up to the
requested tolerance `tol`.
+ `info.residual::Nothing`: value `nothing`, there is no concept of a residual in
this case
+ `info.normres::Real`: a (rough) estimate of the error between the approximate and
exact solution
+ `info.numops::Int`: number of times the linear map was applied, i.e. number of times
`f` was called, or a vector was multiplied with `A`
+ `info.numiter::Int`: number of times the Krylov subspace was restarted (see below)
!!! warning "Check for convergence"
By default (i.e. if `verbosity = 0`, see below), no warning is printed if the solution
was not found with the requested precision, so be sure to check `info.converged == 1`.
### Keyword arguments:
Keyword arguments and their default values are given by:
- `verbosity::Int = 0`: verbosity level, i.e. 0 (no messages), 1 (single message
at the end), 2 (information after every iteration), 3 (information per Krylov step)
- `krylovdim = 30`: the maximum dimension of the Krylov subspace that will be constructed.
Note that the dimension of the vector space is not known or checked, e.g. `x₀` should
not necessarily support the `Base.length` function. If you know the actual problem
dimension is smaller than the default value, it is useful to reduce the value of
`krylovdim`, though in principle this should be detected.
- `tol = 1e-12`: the requested accuracy per unit time, i.e. if you want a certain
precision `ϵ` on the final result, set `tol = ϵ/abs(t)`. If you work in e.g. single
precision (`Float32`), you should definitely change the default value.
- `maxiter::Int = 100`: the number of times the Krylov subspace can be rebuilt; see below
for further details on the algorithms.
- `issymmetric`: if the linear map is symmetric, only meaningful if `T<:Real`
- `ishermitian`: if the linear map is hermitian
The default value for the last two depends on the method. If an `AbstractMatrix` is
used, `issymmetric` and `ishermitian` are checked for that matrix, otherwise the default
values are `issymmetric = false` and `ishermitian = T <: Real && issymmetric`.
- `eager::Bool = false`: if true, eagerly try to compute the result after every expansion
of the Krylov subspace to test for convergence, otherwise wait until the Krylov subspace
as dimension `krylovdim`. This can result in a faster return, for example if the total
time for the evolution is quite small, but also has some overhead, as more computations
are performed after every expansion step.
### Algorithm
This is actually a simple wrapper over more general method [`expintegrator`](@ref) for
for integrating a linear non-homogeneous ODE.
"""
function exponentiate end
exponentiate(A, t::Number, v; kwargs...) = expintegrator(A, t, v; kwargs...)
exponentiate(A, t::Number, v, alg::Union{Lanczos,Arnoldi}) = expintegrator(A, t, (v,), alg)
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
|
[
"MIT"
] | 0.8.1 | 3c2a016489c38f35160a246c91a3f3353c47bb68 | code | 41339 | module LinsolveAD
using KrylovKit, LinearAlgebra
using Random, Test, TestExtras
using ChainRulesCore, ChainRulesTestUtils, Zygote, FiniteDifferences
fdm = ChainRulesTestUtils._fdm
n = 10
N = 30
function build_mat_example(A, b, x, alg, alg_rrule)
Avec, A_fromvec = to_vec(A)
bvec, b_fromvec = to_vec(b)
xvec, x_fromvec = to_vec(x)
T = eltype(A)
function mat_example(Av, bv, xv)
à = A_fromvec(Av)
b̃ = b_fromvec(bv)
x̃ = x_fromvec(xv)
x, info = linsolve(Ã, b̃, x̃, alg; alg_rrule=alg_rrule)
if info.converged == 0
@warn "linsolve did not converge:"
println("normres = ", info.normres)
end
xv, = to_vec(x)
return xv
end
function mat_example_fun(Av, bv, xv)
à = A_fromvec(Av)
b̃ = b_fromvec(bv)
x̃ = x_fromvec(xv)
f = x -> Ã * x
x, info = linsolve(f, b̃, x̃, alg; alg_rrule=alg_rrule)
if info.converged == 0
@warn "linsolve did not converge:"
println("normres = ", info.normres)
end
xv, = to_vec(x)
return xv
end
return mat_example, mat_example_fun, Avec, bvec, xvec
end
function build_fun_example(A, b, c, d, e, f, alg, alg_rrule)
Avec, matfromvec = to_vec(A)
bvec, vecfromvec = to_vec(b)
cvec, = to_vec(c)
dvec, = to_vec(d)
evec, scalarfromvec = to_vec(e)
fvec, = to_vec(f)
function fun_example(Av, bv, cv, dv, ev, fv)
à = matfromvec(Av)
b̃ = vecfromvec(bv)
c̃ = vecfromvec(cv)
d̃ = vecfromvec(dv)
ẽ = scalarfromvec(ev)
f̃ = scalarfromvec(fv)
x, info = linsolve(b̃, zero(b̃), alg, ẽ, f̃; alg_rrule=alg_rrule) do y
return à * y + c̃ * dot(d̃, y)
end
# info.converged > 0 || @warn "not converged"
xv, = to_vec(x)
return xv
end
return fun_example, Avec, bvec, cvec, dvec, evec, fvec
end
@testset "Small linsolve AD test with eltype=$T" for T in (Float32, Float64, ComplexF32,
ComplexF64)
A = 2 * (rand(T, (n, n)) .- one(T) / 2)
b = 2 * (rand(T, n) .- one(T) / 2)
b /= norm(b)
x = 2 * (rand(T, n) .- one(T) / 2)
condA = cond(A)
tol = condA * (T <: Real ? eps(T) : 4 * eps(real(T)))
alg = GMRES(; tol=tol, krylovdim=n, maxiter=1)
config = Zygote.ZygoteRuleConfig()
_, pb = ChainRulesCore.rrule(config, linsolve, A, b, x, alg, 0, 1; alg_rrule=alg)
@constinferred pb((ZeroTangent(), NoTangent()))
@constinferred pb((rand(T, n), NoTangent()))
mat_example, mat_example_fun, Avec, bvec, xvec = build_mat_example(A, b, x, alg, alg)
(JA, Jb, Jx) = FiniteDifferences.jacobian(fdm, mat_example, Avec, bvec, xvec)
(JA1, Jb1, Jx1) = Zygote.jacobian(mat_example, Avec, bvec, xvec)
(JA2, Jb2, Jx2) = Zygote.jacobian(mat_example_fun, Avec, bvec, xvec)
@test isapprox(JA, JA1; rtol=condA * sqrt(eps(real(T))))
@test all(isapprox.(JA1, JA2; atol=n * eps(real(T))))
# factor 2 is minimally necessary for complex case, but 3 is more robust
@test norm(Jx, Inf) < condA * sqrt(eps(real(T)))
@test all(iszero, Jx1)
end
@testset "Large linsolve AD test with eltype=$T" for T in (Float64, ComplexF64)
A = rand(T, (N, N)) .- one(T) / 2
A = I - (9 // 10) * A / maximum(abs, eigvals(A))
b = 2 * (rand(T, N) .- one(T) / 2)
c = 2 * (rand(T, N) .- one(T) / 2)
d = 2 * (rand(T, N) .- one(T) / 2)
e = rand(T)
f = rand(T)
# mix algorithms]
tol = N^2 * eps(real(T))
alg1 = GMRES(; tol=tol, krylovdim=20)
alg2 = BiCGStab(; tol=tol, maxiter=100) # BiCGStab seems to require slightly smaller tolerance for tests to work
for (alg, alg_rrule) in ((alg1, alg2), (alg2, alg1))
fun_example, Avec, bvec, cvec, dvec, evec, fvec = build_fun_example(A, b, c, d, e,
f, alg,
alg_rrule)
(JA, Jb, Jc, Jd, Je, Jf) = FiniteDifferences.jacobian(fdm, fun_example,
Avec, bvec, cvec, dvec, evec,
fvec)
(JA′, Jb′, Jc′, Jd′, Je′, Jf′) = Zygote.jacobian(fun_example, Avec, bvec, cvec,
dvec, evec, fvec)
@test JA ≈ JA′
@test Jb ≈ Jb′
@test Jc ≈ Jc′
@test Jd ≈ Jd′
@test Je ≈ Je′
@test Jf ≈ Jf′
end
end
end
module EigsolveAD
using KrylovKit, LinearAlgebra
using Random, Test, TestExtras
using ChainRulesCore, ChainRulesTestUtils, Zygote, FiniteDifferences
Random.seed!(987654321)
fdm = ChainRulesTestUtils._fdm
n = 10
N = 30
function build_mat_example(A, x, howmany::Int, which, alg, alg_rrule)
Avec, A_fromvec = to_vec(A)
xvec, x_fromvec = to_vec(x)
vals, vecs, info = eigsolve(A, x, howmany, which, alg)
info.converged < howmany && @warn "eigsolve did not converge"
if eltype(A) <: Real && length(vals) > howmany &&
vals[howmany] == conj(vals[howmany + 1])
howmany += 1
end
function mat_example(Av, xv)
à = A_fromvec(Av)
x̃ = x_fromvec(xv)
vals′, vecs′, info′ = eigsolve(Ã, x̃, howmany, which, alg; alg_rrule=alg_rrule)
info′.converged < howmany && @warn "eigsolve did not converge"
catresults = vcat(vals′[1:howmany], vecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
function mat_example_fun(Av, xv)
à = A_fromvec(Av)
x̃ = x_fromvec(xv)
f = x -> Ã * x
vals′, vecs′, info′ = eigsolve(f, x̃, howmany, which, alg; alg_rrule=alg_rrule)
info′.converged < howmany && @warn "eigsolve did not converge"
catresults = vcat(vals′[1:howmany], vecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
function mat_example_fd(Av, xv)
à = A_fromvec(Av)
x̃ = x_fromvec(xv)
vals′, vecs′, info′ = eigsolve(Ã, x̃, howmany, which, alg; alg_rrule=alg_rrule)
info′.converged < howmany && @warn "eigsolve did not converge"
for i in 1:howmany
d = dot(vecs[i], vecs′[i])
@assert abs(d) > sqrt(eps(real(eltype(A))))
phasefix = abs(d) / d
vecs′[i] = vecs′[i] * phasefix
end
catresults = vcat(vals′[1:howmany], vecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
return mat_example, mat_example_fun, mat_example_fd, Avec, xvec, vals, vecs, howmany
end
function build_fun_example(A, x, c, d, howmany::Int, which, alg, alg_rrule)
Avec, matfromvec = to_vec(A)
xvec, vecfromvec = to_vec(x)
cvec, = to_vec(c)
dvec, = to_vec(d)
vals, vecs, info = eigsolve(x, howmany, which, alg) do y
return A * y + c * dot(d, y)
end
info.converged < howmany && @warn "eigsolve did not converge"
if eltype(A) <: Real && length(vals) > howmany &&
vals[howmany] == conj(vals[howmany + 1])
howmany += 1
end
fun_example_ad = let howmany′ = howmany
function (Av, xv, cv, dv)
à = matfromvec(Av)
x̃ = vecfromvec(xv)
c̃ = vecfromvec(cv)
d̃ = vecfromvec(dv)
vals′, vecs′, info′ = eigsolve(x̃, howmany′, which, alg;
alg_rrule=alg_rrule) do y
return à * y + c̃ * dot(d̃, y)
end
info′.converged < howmany′ && @warn "eigsolve did not converge"
catresults = vcat(vals′[1:howmany′], vecs′[1:howmany′]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
end
fun_example_fd = let howmany′ = howmany
function (Av, xv, cv, dv)
à = matfromvec(Av)
x̃ = vecfromvec(xv)
c̃ = vecfromvec(cv)
d̃ = vecfromvec(dv)
vals′, vecs′, info′ = eigsolve(x̃, howmany′, which, alg;
alg_rrule=alg_rrule) do y
return à * y + c̃ * dot(d̃, y)
end
info′.converged < howmany′ && @warn "eigsolve did not converge"
for i in 1:howmany′
d = dot(vecs[i], vecs′[i])
@assert abs(d) > sqrt(eps(real(eltype(A))))
phasefix = abs(d) / d
vecs′[i] = vecs′[i] * phasefix
end
catresults = vcat(vals′[1:howmany′], vecs′[1:howmany′]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
end
return fun_example_ad, fun_example_fd, Avec, xvec, cvec, dvec, vals, vecs, howmany
end
function build_hermitianfun_example(A, x, c, howmany::Int, which, alg, alg_rrule)
Avec, matfromvec = to_vec(A)
xvec, xvecfromvec = to_vec(x)
cvec, cvecfromvec = to_vec(c)
vals, vecs, info = eigsolve(x, howmany, which, alg) do y
return Hermitian(A) * y + c * dot(c, y)
end
info.converged < howmany && @warn "eigsolve did not converge"
function fun_example(Av, xv, cv)
à = matfromvec(Av)
x̃ = xvecfromvec(xv)
c̃ = cvecfromvec(cv)
vals′, vecs′, info′ = eigsolve(x̃, howmany, which, alg;
alg_rrule=alg_rrule) do y
return Hermitian(Ã) * y + c̃ * dot(c̃, y)
end
info′.converged < howmany && @warn "eigsolve did not converge"
catresults = vcat(vals′[1:howmany], vecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
function fun_example_fd(Av, xv, cv)
à = matfromvec(Av)
x̃ = xvecfromvec(xv)
c̃ = cvecfromvec(cv)
vals′, vecs′, info′ = eigsolve(x̃, howmany, which, alg;
alg_rrule=alg_rrule) do y
return Hermitian(Ã) * y + c̃ * dot(c̃, y)
end
info′.converged < howmany && @warn "eigsolve did not converge"
for i in 1:howmany
d = dot(vecs[i], vecs′[i])
@assert abs(d) > sqrt(eps(real(eltype(A))))
phasefix = abs(d) / d
vecs′[i] = vecs′[i] * phasefix
end
catresults = vcat(vals′[1:howmany], vecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
return fun_example, fun_example_fd, Avec, xvec, cvec, vals, vecs, howmany
end
@timedtestset "Small eigsolve AD test for eltype=$T" for T in
(Float32, Float64, ComplexF32,
ComplexF64)
if T <: Complex
whichlist = (:LM, :SR, :LR, :SI, :LI)
else
whichlist = (:LM, :SR, :LR)
end
A = 2 * (rand(T, (n, n)) .- one(T) / 2)
x = 2 * (rand(T, n) .- one(T) / 2)
x /= norm(x)
howmany = 3
condA = cond(A)
tol = n * condA * (T <: Real ? eps(T) : 4 * eps(real(T)))
alg = Arnoldi(; tol=tol, krylovdim=n)
alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1)
alg_rrule2 = GMRES(; tol=tol, krylovdim=n + 1, verbosity=-1)
config = Zygote.ZygoteRuleConfig()
@testset for which in whichlist
for alg_rrule in (alg_rrule1, alg_rrule2)
# unfortunately, rrule does not seem type stable for function arguments, because the
# `rrule_via_ad` call does not produce type stable `rrule`s for the function
(vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany,
which, alg; alg_rrule=alg_rrule)
# NOTE: the following is not necessary here, as it is corrected for in the `eigsolve` rrule
# if length(vals) > howmany && vals[howmany] == conj(vals[howmany + 1])
# howmany += 1
# end
@constinferred pb((ZeroTangent(), ZeroTangent(), NoTangent()))
@constinferred pb((randn(T, howmany), ZeroTangent(), NoTangent()))
@constinferred pb((randn(T, howmany), [randn(T, n)], NoTangent()))
@constinferred pb((randn(T, howmany), [randn(T, n) for _ in 1:howmany],
NoTangent()))
end
for alg_rrule in (alg_rrule1, alg_rrule2)
mat_example, mat_example_fun, mat_example_fd, Avec, xvec, vals, vecs, howmany = build_mat_example(A,
x,
howmany,
which,
alg,
alg_rrule)
(JA, Jx) = FiniteDifferences.jacobian(fdm, mat_example_fd, Avec, xvec)
(JA1, Jx1) = Zygote.jacobian(mat_example, Avec, xvec)
(JA2, Jx2) = Zygote.jacobian(mat_example_fun, Avec, xvec)
# finite difference comparison using some kind of tolerance heuristic
@test isapprox(JA, JA1; rtol=condA * sqrt(eps(real(T))))
@test all(isapprox.(JA1, JA2; atol=n * eps(real(T))))
@test norm(Jx, Inf) < condA * sqrt(eps(real(T)))
@test all(iszero, Jx1)
@test all(iszero, Jx2)
# some analysis
∂vals = complex.(JA1[1:howmany, :], JA1[howmany * (n + 1) .+ (1:howmany), :])
∂vecs = map(1:howmany) do i
return complex.(JA1[(howmany + (i - 1) * n) .+ (1:n), :],
JA1[(howmany * (n + 2) + (i - 1) * n) .+ (1:n), :])
end
if eltype(A) <: Complex # test holomorphicity / Cauchy-Riemann equations
# for eigenvalues
@test real(∂vals[:, 1:2:(2n^2)]) ≈ +imag(∂vals[:, 2:2:(2n^2)])
@test imag(∂vals[:, 1:2:(2n^2)]) ≈ -real(∂vals[:, 2:2:(2n^2)])
# and for eigenvectors
for i in 1:howmany
@test real(∂vecs[i][:, 1:2:(2n^2)]) ≈ +imag(∂vecs[i][:, 2:2:(2n^2)])
@test imag(∂vecs[i][:, 1:2:(2n^2)]) ≈ -real(∂vecs[i][:, 2:2:(2n^2)])
end
end
# test orthogonality of vecs and ∂vecs
for i in 1:howmany
@test all(isapprox.(abs.(vecs[i]' * ∂vecs[i]), 0; atol=sqrt(eps(real(T)))))
end
end
end
if T <: Complex
@testset "test warnings and info" begin
alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=-1)
(vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany,
:LR, alg; alg_rrule=alg_rrule)
@test_logs pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], NoTangent()))
alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=0)
(vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany,
:LR, alg; alg_rrule=alg_rrule)
@test_logs (:warn,) pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1],
NoTangent()))
pbs = @test_logs pb((ZeroTangent(), vecs[1:2], NoTangent()))
@test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T)))
alg_rrule = Arnoldi(; tol=tol, krylovdim=n, verbosity=1)
(vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany,
:LR, alg; alg_rrule=alg_rrule)
@test_logs (:warn,) (:info,) pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1],
NoTangent()))
pbs = @test_logs (:info,) pb((ZeroTangent(), vecs[1:2], NoTangent()))
@test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T)))
alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=-1)
(vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany,
:LR, alg; alg_rrule=alg_rrule)
@test_logs pb((ZeroTangent(), im .* vecs[1:2] .+ vecs[2:-1:1], NoTangent()))
alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=0)
(vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany,
:LR, alg; alg_rrule=alg_rrule)
@test_logs (:warn,) (:warn,) pb((ZeroTangent(),
im .* vecs[1:2] .+
vecs[2:-1:1],
NoTangent()))
pbs = @test_logs pb((ZeroTangent(), vecs[1:2], NoTangent()))
@test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T)))
alg_rrule = GMRES(; tol=tol, krylovdim=n, verbosity=1)
(vals, vecs, info), pb = ChainRulesCore.rrule(config, eigsolve, A, x, howmany,
:LR, alg; alg_rrule=alg_rrule)
@test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(),
im .* vecs[1:2] .+
vecs[2:-1:1],
NoTangent()))
pbs = @test_logs (:info,) (:info,) pb((ZeroTangent(), vecs[1:2], NoTangent()))
@test norm(unthunk(pbs[1]), Inf) < condA * sqrt(eps(real(T)))
end
end
end
@timedtestset "Large eigsolve AD test with eltype=$T" for T in (Float64, ComplexF64)
if T <: Complex
whichlist = (:LM, :SI)
else
whichlist = (:LM, :SR)
end
@testset for which in whichlist
A = rand(T, (N, N)) .- one(T) / 2
A = I - (9 // 10) * A / maximum(abs, eigvals(A))
x = 2 * (rand(T, N) .- one(T) / 2)
x /= norm(x)
c = 2 * (rand(T, N) .- one(T) / 2)
d = 2 * (rand(T, N) .- one(T) / 2)
howmany = 2
tol = 2 * N^2 * eps(real(T))
alg = Arnoldi(; tol=tol, krylovdim=2n)
alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1)
alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=-1)
@testset for alg_rrule in (alg_rrule1, alg_rrule2)
fun_example, fun_example_fd, Avec, xvec, cvec, dvec, vals, vecs, howmany = build_fun_example(A,
x,
c,
d,
howmany,
which,
alg,
alg_rrule)
(JA, Jx, Jc, Jd) = FiniteDifferences.jacobian(fdm, fun_example_fd, Avec, xvec,
cvec, dvec)
(JA′, Jx′, Jc′, Jd′) = Zygote.jacobian(fun_example, Avec, xvec, cvec, dvec)
@test JA ≈ JA′
@test Jc ≈ Jc′
@test Jd ≈ Jd′
end
end
end
@timedtestset "Large Hermitian eigsolve AD test with eltype=$T" for T in
(Float64, ComplexF64)
whichlist = (:LR, :SR)
@testset for which in whichlist
A = rand(T, (N, N)) .- one(T) / 2
A = I - (9 // 10) * A / maximum(abs, eigvals(A))
x = 2 * (rand(T, N) .- one(T) / 2)
x /= norm(x)
c = 2 * (rand(T, N) .- one(T) / 2)
howmany = 2
tol = 2 * N^2 * eps(real(T))
alg = Lanczos(; tol=tol, krylovdim=2n)
alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1)
alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=-1)
@testset for alg_rrule in (alg_rrule1, alg_rrule2)
fun_example, fun_example_fd, Avec, xvec, cvec, vals, vecs, howmany = build_hermitianfun_example(A,
x,
c,
howmany,
which,
alg,
alg_rrule)
(JA, Jx, Jc) = FiniteDifferences.jacobian(fdm, fun_example_fd, Avec, xvec,
cvec)
(JA′, Jx′, Jc′) = Zygote.jacobian(fun_example, Avec, xvec, cvec)
@test JA ≈ JA′
@test Jc ≈ Jc′
end
end
end
end
module SvdsolveAD
using KrylovKit, LinearAlgebra
using Random, Test, TestExtras
using ChainRulesCore, ChainRulesTestUtils, Zygote, FiniteDifferences
Random.seed!(123456789)
fdm = ChainRulesTestUtils._fdm
n = 10
N = 30
function build_mat_example(A, x, howmany::Int, alg, alg_rrule)
Avec, A_fromvec = to_vec(A)
xvec, x_fromvec = to_vec(x)
vals, lvecs, rvecs, info = svdsolve(A, x, howmany, :LR, alg)
info.converged < howmany && @warn "svdsolve did not converge"
function mat_example_mat(Av, xv)
à = A_fromvec(Av)
x̃ = x_fromvec(xv)
vals′, lvecs′, rvecs′, info′ = svdsolve(Ã, x̃, howmany, :LR, alg;
alg_rrule=alg_rrule)
info′.converged < howmany && @warn "svdsolve did not converge"
catresults = vcat(vals′[1:howmany], lvecs′[1:howmany]..., rvecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
function mat_example_fval(Av, xv)
à = A_fromvec(Av)
x̃ = x_fromvec(xv)
f = (x, adj::Val) -> (adj isa Val{true}) ? adjoint(Ã) * x : Ã * x
vals′, lvecs′, rvecs′, info′ = svdsolve(f, x̃, howmany, :LR, alg;
alg_rrule=alg_rrule)
info′.converged < howmany && @warn "svdsolve did not converge"
catresults = vcat(vals′[1:howmany], lvecs′[1:howmany]..., rvecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
function mat_example_ftuple(Av, xv)
à = A_fromvec(Av)
x̃ = x_fromvec(xv)
(f, fᴴ) = (x -> Ã * x, x -> adjoint(Ã) * x)
vals′, lvecs′, rvecs′, info′ = svdsolve((f, fᴴ), x̃, howmany, :LR, alg;
alg_rrule=alg_rrule)
info′.converged < howmany && @warn "svdsolve did not converge"
catresults = vcat(vals′[1:howmany], lvecs′[1:howmany]..., rvecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
function mat_example_fd(Av, xv)
à = A_fromvec(Av)
x̃ = x_fromvec(xv)
vals′, lvecs′, rvecs′, info′ = svdsolve(Ã, x̃, howmany, :LR, alg;
alg_rrule=alg_rrule)
info′.converged < howmany && @warn "svdsolve did not converge"
for i in 1:howmany
dl = dot(lvecs[i], lvecs′[i])
dr = dot(rvecs[i], rvecs′[i])
@assert abs(dl) > sqrt(eps(real(eltype(A))))
@assert abs(dr) > sqrt(eps(real(eltype(A))))
phasefix = sqrt(abs(dl * dr) / (dl * dr))
lvecs′[i] = lvecs′[i] * phasefix
rvecs′[i] = rvecs′[i] * phasefix
end
catresults = vcat(vals′[1:howmany], lvecs′[1:howmany]..., rvecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
return mat_example_mat, mat_example_ftuple, mat_example_fval, mat_example_fd, Avec,
xvec, vals, lvecs, rvecs
end
function build_fun_example(A, x, c, d, howmany::Int, alg, alg_rrule)
Avec, matfromvec = to_vec(A)
xvec, xvecfromvec = to_vec(x)
cvec, cvecfromvec = to_vec(c)
dvec, dvecfromvec = to_vec(d)
f = y -> A * y + c * dot(d, y)
fᴴ = y -> adjoint(A) * y + d * dot(c, y)
vals, lvecs, rvecs, info = svdsolve((f, fᴴ), x, howmany, :LR, alg)
info.converged < howmany && @warn "svdsolve did not converge"
function fun_example_ad(Av, xv, cv, dv)
à = matfromvec(Av)
x̃ = xvecfromvec(xv)
c̃ = cvecfromvec(cv)
d̃ = dvecfromvec(dv)
f = y -> Ã * y + c̃ * dot(d̃, y)
fᴴ = y -> adjoint(Ã) * y + d̃ * dot(c̃, y)
vals′, lvecs′, rvecs′, info′ = svdsolve((f, fᴴ), x̃, howmany, :LR, alg;
alg_rrule=alg_rrule)
info′.converged < howmany && @warn "svdsolve did not converge"
catresults = vcat(vals′[1:howmany], lvecs′[1:howmany]..., rvecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
function fun_example_fd(Av, xv, cv, dv)
à = matfromvec(Av)
x̃ = xvecfromvec(xv)
c̃ = cvecfromvec(cv)
d̃ = dvecfromvec(dv)
f = y -> Ã * y + c̃ * dot(d̃, y)
fᴴ = y -> adjoint(Ã) * y + d̃ * dot(c̃, y)
vals′, lvecs′, rvecs′, info′ = svdsolve((f, fᴴ), x̃, howmany, :LR, alg;
alg_rrule=alg_rrule)
info′.converged < howmany && @warn "svdsolve did not converge"
for i in 1:howmany
dl = dot(lvecs[i], lvecs′[i])
dr = dot(rvecs[i], rvecs′[i])
@assert abs(dl) > sqrt(eps(real(eltype(A))))
@assert abs(dr) > sqrt(eps(real(eltype(A))))
phasefix = sqrt(abs(dl * dr) / (dl * dr))
lvecs′[i] = lvecs′[i] * phasefix
rvecs′[i] = rvecs′[i] * phasefix
end
catresults = vcat(vals′[1:howmany], lvecs′[1:howmany]..., rvecs′[1:howmany]...)
if eltype(catresults) <: Complex
return vcat(real(catresults), imag(catresults))
else
return catresults
end
end
return fun_example_ad, fun_example_fd, Avec, xvec, cvec, dvec, vals, lvecs, rvecs
end
@timedtestset "Small svdsolve AD test with eltype=$T" for T in
(Float32, Float64, ComplexF32,
ComplexF64)
A = 2 * (rand(T, (n, 2 * n)) .- one(T) / 2)
x = 2 * (rand(T, n) .- one(T) / 2)
x /= norm(x)
condA = cond(A)
howmany = 3
tol = 3 * n * condA * (T <: Real ? eps(T) : 4 * eps(real(T)))
alg = GKL(; krylovdim=2n, tol=tol)
alg_rrule1 = Arnoldi(; tol=tol, krylovdim=4n, verbosity=-1)
alg_rrule2 = GMRES(; tol=tol, krylovdim=3n, verbosity=-1)
config = Zygote.ZygoteRuleConfig()
for alg_rrule in (alg_rrule1, alg_rrule2)
# unfortunately, rrule does not seem type stable for function arguments, because the
# `rrule_via_ad` call does not produce type stable `rrule`s for the function
_, pb = ChainRulesCore.rrule(config, svdsolve, A, x, howmany, :LR, alg;
alg_rrule=alg_rrule)
@constinferred pb((ZeroTangent(), ZeroTangent(), ZeroTangent(), NoTangent()))
@constinferred pb((randn(real(T), howmany), ZeroTangent(), ZeroTangent(),
NoTangent()))
@constinferred pb((randn(real(T), howmany), [randn(T, n)], ZeroTangent(),
NoTangent()))
@constinferred pb((randn(real(T), howmany), [randn(T, n) for _ in 1:howmany],
[randn(T, 2 * n) for _ in 1:howmany], NoTangent()))
end
for alg_rrule in (alg_rrule1, alg_rrule2)
(mat_example_mat, mat_example_ftuple, mat_example_fval, mat_example_fd,
Avec, xvec, vals, lvecs, rvecs) = build_mat_example(A, x, howmany, alg, alg_rrule)
(JA, Jx) = FiniteDifferences.jacobian(fdm, mat_example_fd, Avec, xvec)
(JA1, Jx1) = Zygote.jacobian(mat_example_mat, Avec, xvec)
(JA2, Jx2) = Zygote.jacobian(mat_example_fval, Avec, xvec)
(JA3, Jx3) = Zygote.jacobian(mat_example_ftuple, Avec, xvec)
# finite difference comparison using some kind of tolerance heuristic
@test isapprox(JA, JA1; rtol=3 * n * n * condA * sqrt(eps(real(T))))
@test all(isapprox.(JA1, JA2; atol=n * eps(real(T))))
@test all(isapprox.(JA1, JA3; atol=n * eps(real(T))))
@test norm(Jx, Inf) < 5 * condA * sqrt(eps(real(T)))
@test all(iszero, Jx1)
@test all(iszero, Jx2)
@test all(iszero, Jx3)
# some analysis
if eltype(A) <: Complex # test holomorphicity / Cauchy-Riemann equations
∂vals = complex.(JA1[1:howmany, :],
JA1[howmany * (3 * n + 1) .+ (1:howmany), :])
∂lvecs = map(1:howmany) do i
return complex.(JA1[(howmany + (i - 1) * n) .+ (1:n), :],
JA1[(howmany * (3 * n + 2) + (i - 1) * n) .+ (1:n), :])
end
∂rvecs = map(1:howmany) do i
return complex.(JA1[(howmany * (n + 1) + (i - 1) * (2 * n)) .+ (1:(2n)), :],
JA1[(howmany * (4 * n + 2) + (i - 1) * 2n) .+ (1:(2n)), :])
end
else
∂vals = JA1[1:howmany, :]
∂lvecs = map(1:howmany) do i
return JA1[(howmany + (i - 1) * n) .+ (1:n), :]
end
∂rvecs = map(1:howmany) do i
return JA1[(howmany * (n + 1) + (i - 1) * (2 * n)) .+ (1:(2n)), :]
end
end
# test orthogonality of vecs and ∂vecs
for i in 1:howmany
prec = 4 * cond(A) * sqrt(eps(real(T)))
@test all(<(prec), real.(lvecs[i]' * ∂lvecs[i]))
@test all(<(prec), real.(rvecs[i]' * ∂rvecs[i]))
@test all(<(prec), abs.(lvecs[i]' * ∂lvecs[i] + rvecs[i]' * ∂rvecs[i]))
end
end
if T <: Complex
@testset "test warnings and info" begin
alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=-1)
(vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x,
howmany, :LR, alg;
alg_rrule=alg_rrule)
@test_logs pb((ZeroTangent(), im .* lvecs[1:2] .+ lvecs[2:-1:1], ZeroTangent(),
NoTangent()))
alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=0)
(vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x,
howmany, :LR, alg;
alg_rrule=alg_rrule)
@test_logs (:warn,) pb((ZeroTangent(),
im .* lvecs[1:2] .+ lvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs (:warn,) pb((ZeroTangent(), lvecs[2:-1:1],
im .* rvecs[1:2] .+ rvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs pb((ZeroTangent(), lvecs[1:2] .+ lvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs (:warn,) pb((ZeroTangent(),
im .* lvecs[1:2] .+ lvecs[2:-1:1],
+im .* rvecs[1:2] + rvecs[2:-1:1],
NoTangent()))
@test_logs pb((ZeroTangent(), (1 + im) .* lvecs[1:2] .+ lvecs[2:-1:1],
(1 - im) .* rvecs[1:2] + rvecs[2:-1:1],
NoTangent()))
alg_rrule = Arnoldi(; tol=tol, krylovdim=4n, verbosity=1)
(vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x,
howmany, :LR, alg;
alg_rrule=alg_rrule)
@test_logs (:warn,) (:info,) pb((ZeroTangent(),
im .* lvecs[1:2] .+ lvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs (:warn,) (:info,) pb((ZeroTangent(), lvecs[2:-1:1],
im .* rvecs[1:2] .+ rvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs (:info,) pb((ZeroTangent(), lvecs[1:2] .+ lvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs (:warn,) (:info,) pb((ZeroTangent(),
im .* lvecs[1:2] .+ lvecs[2:-1:1],
+im .* rvecs[1:2] + rvecs[2:-1:1],
NoTangent()))
@test_logs (:info,) pb((ZeroTangent(), (1 + im) .* lvecs[1:2] .+ lvecs[2:-1:1],
(1 - im) .* rvecs[1:2] + rvecs[2:-1:1],
NoTangent()))
alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=-1)
(vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x,
howmany, :LR, alg;
alg_rrule=alg_rrule)
@test_logs pb((ZeroTangent(), im .* lvecs[1:2] .+ lvecs[2:-1:1], ZeroTangent(),
NoTangent()))
alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=0)
(vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x,
howmany, :LR, alg;
alg_rrule=alg_rrule)
@test_logs (:warn,) (:warn,) pb((ZeroTangent(),
im .* lvecs[1:2] .+
lvecs[2:-1:1], ZeroTangent(),
NoTangent()))
@test_logs (:warn,) (:warn,) pb((ZeroTangent(), lvecs[2:-1:1],
im .* rvecs[1:2] .+
rvecs[2:-1:1], ZeroTangent(),
NoTangent()))
@test_logs pb((ZeroTangent(), lvecs[1:2] .+ lvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs (:warn,) (:warn,) pb((ZeroTangent(),
im .* lvecs[1:2] .+
lvecs[2:-1:1],
+im .* rvecs[1:2] +
rvecs[2:-1:1],
NoTangent()))
@test_logs pb((ZeroTangent(),
(1 + im) .* lvecs[1:2] .+ lvecs[2:-1:1],
(1 - im) .* rvecs[1:2] + rvecs[2:-1:1],
NoTangent()))
alg_rrule = GMRES(; tol=tol, krylovdim=3n, verbosity=1)
(vals, lvecs, rvecs, info), pb = ChainRulesCore.rrule(config, svdsolve, A, x,
howmany, :LR, alg;
alg_rrule=alg_rrule)
@test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(),
im .* lvecs[1:2] .+
lvecs[2:-1:1], ZeroTangent(),
NoTangent()))
@test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(), lvecs[2:-1:1],
im .* rvecs[1:2] .+
rvecs[2:-1:1], ZeroTangent(),
NoTangent()))
@test_logs (:info,) (:info,) pb((ZeroTangent(), lvecs[1:2] .+ lvecs[2:-1:1],
ZeroTangent(),
NoTangent()))
@test_logs (:warn,) (:info,) (:warn,) (:info,) pb((ZeroTangent(),
im .* lvecs[1:2] .+
lvecs[2:-1:1],
+im .* rvecs[1:2] +
rvecs[2:-1:1],
NoTangent()))
@test_logs (:info,) (:info,) pb((ZeroTangent(),
(1 + im) .* lvecs[1:2] .+ lvecs[2:-1:1],
(1 - im) .* rvecs[1:2] + rvecs[2:-1:1],
NoTangent()))
end
end
end
@timedtestset "Large svdsolve AD test with eltype=$T" for T in (Float64, ComplexF64)
which = :LR
A = rand(T, (N, N + n)) .- one(T) / 2
A = I[1:N, 1:(N + n)] - (9 // 10) * A / maximum(svdvals(A))
x = 2 * (rand(T, N) .- one(T) / 2)
x /= norm(x)
c = 2 * (rand(T, N) .- one(T) / 2)
d = 2 * (rand(T, N + n) .- one(T) / 2)
howmany = 2
tol = 2 * N^2 * eps(real(T))
alg = GKL(; tol=tol, krylovdim=2n)
alg_rrule1 = Arnoldi(; tol=tol, krylovdim=2n, verbosity=-1)
alg_rrule2 = GMRES(; tol=tol, krylovdim=2n, verbosity=-1)
for alg_rrule in (alg_rrule1, alg_rrule2)
fun_example_ad, fun_example_fd, Avec, xvec, cvec, dvec, vals, lvecs, rvecs = build_fun_example(A,
x,
c,
d,
howmany,
alg,
alg_rrule)
(JA, Jx, Jc, Jd) = FiniteDifferences.jacobian(fdm, fun_example_fd, Avec, xvec,
cvec, dvec)
(JA′, Jx′, Jc′, Jd′) = Zygote.jacobian(fun_example_ad, Avec, xvec, cvec, dvec)
@test JA ≈ JA′
@test Jc ≈ Jc′
@test Jd ≈ Jd′
@test norm(Jx, Inf) < (T <: Complex ? 4n : n) * sqrt(eps(real(T)))
end
end
end
| KrylovKit | https://github.com/Jutho/KrylovKit.jl.git |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.