diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 127f8fdb88..c052cbffe5 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -19,27 +19,33 @@ using Strided using TimerOutputs using TupleTools -# TODO: Define an `AlgorithmSelection` module # TODO: List types, macros, and functions being used. -include("algorithm.jl") -include("SetParameters/src/SetParameters.jl") +include("lib/AlgorithmSelection/src/AlgorithmSelection.jl") +using .AlgorithmSelection: AlgorithmSelection +include("lib/BaseExtensions/src/BaseExtensions.jl") +using .BaseExtensions: BaseExtensions +include("lib/SetParameters/src/SetParameters.jl") using .SetParameters -include("TensorAlgebra/src/TensorAlgebra.jl") +include("lib/BroadcastMapConversion/src/BroadcastMapConversion.jl") +using .BroadcastMapConversion: BroadcastMapConversion +include("lib/Unwrap/src/Unwrap.jl") +using .Unwrap +include("lib/RankFactorization/src/RankFactorization.jl") +using .RankFactorization: RankFactorization +include("lib/TensorAlgebra/src/TensorAlgebra.jl") using .TensorAlgebra: TensorAlgebra -include("DiagonalArrays/src/DiagonalArrays.jl") +include("lib/DiagonalArrays/src/DiagonalArrays.jl") using .DiagonalArrays -include("BlockSparseArrays/src/BlockSparseArrays.jl") +include("lib/BlockSparseArrays/src/BlockSparseArrays.jl") using .BlockSparseArrays -include("NamedDimsArrays/src/NamedDimsArrays.jl") +include("lib/NamedDimsArrays/src/NamedDimsArrays.jl") using .NamedDimsArrays: NamedDimsArrays -include("SmallVectors/src/SmallVectors.jl") +include("lib/SmallVectors/src/SmallVectors.jl") using .SmallVectors -include("SortedSets/src/SortedSets.jl") +include("lib/SortedSets/src/SortedSets.jl") using .SortedSets -include("TagSets/src/TagSets.jl") +include("lib/TagSets/src/TagSets.jl") using .TagSets -include("Unwrap/src/Unwrap.jl") -using .Unwrap using Base: @propagate_inbounds, ReshapedArray, DimOrInd, OneTo diff --git a/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl b/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl deleted file mode 100644 index 52eae6be29..0000000000 --- a/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl +++ /dev/null @@ -1,6 +0,0 @@ -module NamedDimsArraysTensorAlgebraExt -using ..NamedDimsArrays: NamedDimsArrays -using ...NDTensors.TensorAlgebra: TensorAlgebra - -include("contract.jl") -end diff --git a/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl b/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl deleted file mode 100644 index 2a39bba0b5..0000000000 --- a/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl +++ /dev/null @@ -1,13 +0,0 @@ -using Test: @test, @testset -using NDTensors.NamedDimsArrays: named, unname -using NDTensors.TensorAlgebra: TensorAlgebra - -@testset "NamedDimsArraysTensorAlgebraExt" begin - i = named(2, "i") - j = named(2, "j") - k = named(2, "k") - na1 = randn(i, j) - na2 = randn(j, k) - na_dest = TensorAlgebra.contract(na1, na2) - @test unname(na_dest, (i, k)) ≈ unname(na1) * unname(na2) -end diff --git a/NDTensors/src/NamedDimsArrays/src/abstractnamedunitrange.jl b/NDTensors/src/NamedDimsArrays/src/abstractnamedunitrange.jl deleted file mode 100644 index 6a601d18f1..0000000000 --- a/NDTensors/src/NamedDimsArrays/src/abstractnamedunitrange.jl +++ /dev/null @@ -1,14 +0,0 @@ -abstract type AbstractNamedUnitRange{T,Value<:AbstractUnitRange{T},Name} <: - AbstractUnitRange{T} end - -# Required interface -unname(::AbstractNamedUnitRange) = error("Not implemented") -name(::AbstractNamedUnitRange) = error("Not implemented") - -# Traits -isnamed(::AbstractNamedUnitRange) = true - -# Unit range -Base.first(i::AbstractNamedUnitRange) = first(unname(i)) -Base.last(i::AbstractNamedUnitRange) = last(unname(i)) -Base.length(i::AbstractNamedUnitRange) = named(length(unname(i)), name(i)) diff --git a/NDTensors/src/NamedDimsArrays/test/test_basic.jl b/NDTensors/src/NamedDimsArrays/test/test_basic.jl deleted file mode 100644 index f53eacf791..0000000000 --- a/NDTensors/src/NamedDimsArrays/test/test_basic.jl +++ /dev/null @@ -1,38 +0,0 @@ -using NDTensors.NamedDimsArrays: - NamedDimsArrays, NamedDimsArray, align, dimnames, isnamed, named, unname -using Test: @test, @testset - -@testset "NamedDimsArrays $(@__FILE__)" begin - a = randn(3, 4) - na = named(a, ("i", "j")) - # TODO: Call `namedsize`? - i, j = size(na) - # TODO: Call `namedaxes`? - ai, aj = axes(na) - @test !isnamed(a) - @test isnamed(na) - @test dimnames(na) == ("i", "j") - @test na[1, 1] == a[1, 1] - na[1, 1] = 11 - @test na[1, 1] == 11 - @test size(a) == (named(3, "i"), named(4, "j")) - @test length(a) == 12 - @test axes(a) == (named(1:3, "i"), named(1:4, "j")) - @test randn(named(3, "i"), named(4, "j")) isa NamedDimsArray - @test na["i" => 1, "j" => 2] == a[1, 2] - @test na["j" => 2, "i" => 1] == a[1, 2] - na["j" => 2, "i" => 1] = 12 - @test na[1, 2] == 12 - @test na[j => 1, i => 2] == a[2, 1] - @test na[aj => 1, ai => 2] == a[2, 1] - na[j => 1, i => 2] = 21 - @test na[2, 1] == 21 - na[aj => 1, ai => 2] = 2211 - @test na[2, 1] == 2211 - na′ = align(na, ("j", "i")) - @test a == permutedims(unname(na′), (2, 1)) - na′ = align(na, (j, i)) - @test a == permutedims(unname(na′), (2, 1)) - na′ = align(na, (aj, ai)) - @test a == permutedims(unname(na′), (2, 1)) -end diff --git a/NDTensors/src/TensorAlgebra/src/fusedims.jl b/NDTensors/src/TensorAlgebra/src/fusedims.jl deleted file mode 100644 index 39220dd687..0000000000 --- a/NDTensors/src/TensorAlgebra/src/fusedims.jl +++ /dev/null @@ -1,23 +0,0 @@ -fuse(a1::AbstractUnitRange, a2::AbstractUnitRange) = Base.OneTo(length(a1) * length(a2)) -fuse(a...) = foldl(fuse, a) - -matricize(a::AbstractArray, biperm) = matricize(a, BipartitionedPermutation(biperm...)) - -# TODO: Make this more generic, i.e. for `BlockSparseArray`. -function matricize(a::AbstractArray, biperm::BipartitionedPermutation) - # Permute and fuse the axes - axes_src = axes(a) - axes_codomain = map(i -> axes_src[i], biperm[1]) - axes_domain = map(i -> axes_src[i], biperm[2]) - axis_codomain_fused = fuse(axes_codomain...) - axis_domain_fused = fuse(axes_domain...) - # Permute the array - perm = flatten(biperm) - a_permuted = permutedims(a, perm) - return reshape(a_permuted, (axis_codomain_fused, axis_domain_fused)) -end - -# TODO: Make this more generic, i.e. for `BlockSparseArray`. -function unmatricize(a::AbstractArray, axes_codomain, axes_domain) - return reshape(a, (axes_codomain..., axes_domain...)) -end diff --git a/NDTensors/src/algorithm.jl b/NDTensors/src/algorithm.jl deleted file mode 100644 index e1edcc9131..0000000000 --- a/NDTensors/src/algorithm.jl +++ /dev/null @@ -1,29 +0,0 @@ -""" - Algorithm - -A type representing an algorithm backend for a function. - -For example, ITensor provides multiple backend algorithms for contracting -an MPO with an MPS, which internally are selected with an `Algorithm` type. - -This allows users to extend functions in ITensor with new algorithms, but -use the same interface. -""" -struct Algorithm{Alg} end - -Algorithm(s) = Algorithm{Symbol(s)}() -algorithm_string(::Algorithm{Alg}) where {Alg} = string(Alg) - -Base.show(io::IO, alg::Algorithm) = print(io, "Algorithm type ", algorithm_string(alg)) -Base.print(io::IO, ::Algorithm{Alg}) where {Alg} = print(io, Alg) - -""" - @Algorithm_str - -A convenience macro for writing [`Algorithm`](@ref) types, typically used when -adding methods to a function in ITensor that supports multiple algorithm -backends (like contracting an MPO with an MPS). -""" -macro Algorithm_str(s) - return :(Algorithm{$(Expr(:quote, Symbol(s)))}) -end diff --git a/NDTensors/src/blocksparse/contract.jl b/NDTensors/src/blocksparse/contract.jl index a28bbab93f..c256292942 100644 --- a/NDTensors/src/blocksparse/contract.jl +++ b/NDTensors/src/blocksparse/contract.jl @@ -1,3 +1,5 @@ +using .AlgorithmSelection: Algorithm, @Algorithm_str + function contract( tensor1::BlockSparseTensor, labelstensor1, diff --git a/NDTensors/src/lib/AlgorithmSelection/src/AlgorithmSelection.jl b/NDTensors/src/lib/AlgorithmSelection/src/AlgorithmSelection.jl new file mode 100644 index 0000000000..e8426accec --- /dev/null +++ b/NDTensors/src/lib/AlgorithmSelection/src/AlgorithmSelection.jl @@ -0,0 +1,3 @@ +module AlgorithmSelection +include("algorithm.jl") +end diff --git a/NDTensors/src/lib/AlgorithmSelection/src/algorithm.jl b/NDTensors/src/lib/AlgorithmSelection/src/algorithm.jl new file mode 100644 index 0000000000..c188947f4d --- /dev/null +++ b/NDTensors/src/lib/AlgorithmSelection/src/algorithm.jl @@ -0,0 +1,39 @@ +""" + Algorithm + +A type representing an algorithm backend for a function. + +For example, a function might have multiple backend algorithm +implementations, which internally are selected with an `Algorithm` type. + +This allows users to extend functionality with a new algorithm but +use the same interface. +""" +struct Algorithm{Alg,Kwargs<:NamedTuple} + kwargs::Kwargs +end + +Algorithm{Alg}(kwargs::NamedTuple) where {Alg} = Algorithm{Alg,typeof(kwargs)}(kwargs) +Algorithm{Alg}(; kwargs...) where {Alg} = Algorithm{Alg}(NamedTuple(kwargs)) +Algorithm(s; kwargs...) = Algorithm{Symbol(s)}(NamedTuple(kwargs)) + +Algorithm(alg::Algorithm) = alg + +# TODO: Use `SetParameters`. +algorithm_string(::Algorithm{Alg}) where {Alg} = string(Alg) + +function Base.show(io::IO, alg::Algorithm) + return print(io, "Algorithm type ", algorithm_string(alg), ", ", alg.kwargs) +end +Base.print(io::IO, alg::Algorithm) = print(io, algorithm_string(alg), ", ", alg.kwargs) + +""" + @Algorithm_str + +A convenience macro for writing [`Algorithm`](@ref) types, typically used when +adding methods to a function that supports multiple algorithm +backends. +""" +macro Algorithm_str(s) + return :(Algorithm{$(Expr(:quote, Symbol(s)))}) +end diff --git a/NDTensors/src/lib/AlgorithmSelection/test/Project.toml b/NDTensors/src/lib/AlgorithmSelection/test/Project.toml new file mode 100644 index 0000000000..ef491a529c --- /dev/null +++ b/NDTensors/src/lib/AlgorithmSelection/test/Project.toml @@ -0,0 +1,3 @@ +[deps] +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/AlgorithmSelection/test/runtests.jl b/NDTensors/src/lib/AlgorithmSelection/test/runtests.jl new file mode 100644 index 0000000000..3741ea91da --- /dev/null +++ b/NDTensors/src/lib/AlgorithmSelection/test/runtests.jl @@ -0,0 +1,12 @@ +@eval module $(gensym()) +using Test: @test, @testset +using NDTensors.AlgorithmSelection: Algorithm, @Algorithm_str +@testset "AlgorithmSelection" begin + @test Algorithm"alg"() isa Algorithm{:alg} + @test Algorithm("alg") isa Algorithm{:alg} + @test Algorithm(:alg) isa Algorithm{:alg} + alg = Algorithm"alg"(; x=2, y=3) + @test alg isa Algorithm{:alg} + @test alg.kwargs == (; x=2, y=3) +end +end diff --git a/NDTensors/src/lib/BaseExtensions/src/BaseExtensions.jl b/NDTensors/src/lib/BaseExtensions/src/BaseExtensions.jl new file mode 100644 index 0000000000..747f3ac675 --- /dev/null +++ b/NDTensors/src/lib/BaseExtensions/src/BaseExtensions.jl @@ -0,0 +1,3 @@ +module BaseExtensions +include("replace.jl") +end diff --git a/NDTensors/src/lib/BaseExtensions/src/replace.jl b/NDTensors/src/lib/BaseExtensions/src/replace.jl new file mode 100644 index 0000000000..9749f85a7a --- /dev/null +++ b/NDTensors/src/lib/BaseExtensions/src/replace.jl @@ -0,0 +1,32 @@ +replace(collection, replacements::Pair...) = Base.replace(collection, replacements...) +@static if VERSION < v"1.7.0-DEV.15" + # https://github.com/JuliaLang/julia/pull/38216 + # TODO: Add to `Compat.jl` or delete when we drop Julia 1.6 support. + # `replace` for Tuples. + function _replace(f::Base.Callable, t::Tuple, count::Int) + return if count == 0 || isempty(t) + t + else + x = f(t[1]) + (x, _replace(f, Base.tail(t), count - !==(x, t[1]))...) + end + end + + function replace(f::Base.Callable, t::Tuple; count::Integer=typemax(Int)) + return _replace(f, t, Base.check_count(count)) + end + + function _replace(t::Tuple, count::Int, old_new::Tuple{Vararg{Pair}}) + return _replace(t, count) do x + Base.@_inline_meta + for o_n in old_new + isequal(first(o_n), x) && return last(o_n) + end + return x + end + end + + function replace(t::Tuple, old_new::Pair...; count::Integer=typemax(Int)) + return _replace(t, Base.check_count(count), old_new) + end +end diff --git a/NDTensors/src/lib/BaseExtensions/test/Project.toml b/NDTensors/src/lib/BaseExtensions/test/Project.toml new file mode 100644 index 0000000000..90187321ef --- /dev/null +++ b/NDTensors/src/lib/BaseExtensions/test/Project.toml @@ -0,0 +1,3 @@ +[deps] +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" diff --git a/NDTensors/src/lib/BaseExtensions/test/runtests.jl b/NDTensors/src/lib/BaseExtensions/test/runtests.jl new file mode 100644 index 0000000000..d29334d85f --- /dev/null +++ b/NDTensors/src/lib/BaseExtensions/test/runtests.jl @@ -0,0 +1,15 @@ +using SafeTestsets: @safetestset + +@safetestset "BaseExtensions" begin + using NDTensors.BaseExtensions: BaseExtensions + using Test: @test, @testset + @testset "replace $(typeof(collection))" for collection in + (["a", "b", "c"], ("a", "b", "c")) + r1 = BaseExtensions.replace(collection, "b" => "d") + @test r1 == typeof(collection)(["a", "d", "c"]) + @test typeof(r1) === typeof(collection) + r2 = BaseExtensions.replace(collection, "b" => "d", "a" => "e") + @test r2 == typeof(collection)(["e", "d", "c"]) + @test typeof(r2) === typeof(collection) + end +end diff --git a/NDTensors/src/BlockSparseArrays/.JuliaFormatter.toml b/NDTensors/src/lib/BlockSparseArrays/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/BlockSparseArrays/.JuliaFormatter.toml rename to NDTensors/src/lib/BlockSparseArrays/.JuliaFormatter.toml diff --git a/NDTensors/src/BlockSparseArrays/README.md b/NDTensors/src/lib/BlockSparseArrays/README.md similarity index 100% rename from NDTensors/src/BlockSparseArrays/README.md rename to NDTensors/src/lib/BlockSparseArrays/README.md diff --git a/NDTensors/src/BlockSparseArrays/examples/README.jl b/NDTensors/src/lib/BlockSparseArrays/examples/README.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/examples/README.jl rename to NDTensors/src/lib/BlockSparseArrays/examples/README.jl diff --git a/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl similarity index 94% rename from NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl rename to NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl index 17a831de4a..bda180c68d 100644 --- a/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockSparseArrays.jl @@ -1,4 +1,5 @@ module BlockSparseArrays +using ..AlgorithmSelection: Algorithm, @Algorithm_str using BlockArrays: AbstractBlockArray, BlockArrays, diff --git a/NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/LinearAlgebraExt.jl b/NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/LinearAlgebraExt.jl similarity index 87% rename from NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/LinearAlgebraExt.jl rename to NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/LinearAlgebraExt.jl index 998170c791..912fbab79b 100644 --- a/NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/LinearAlgebraExt.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/LinearAlgebraExt.jl @@ -1,9 +1,9 @@ module LinearAlgebraExt +using ...AlgorithmSelection: Algorithm, @Algorithm_str using BlockArrays: BlockArrays, blockedrange, blocks using ..BlockSparseArrays: SparseArray, nonzero_keys # TODO: Move to `SparseArraysExtensions` module, rename `SparseArrayDOK`. using ..BlockSparseArrays: BlockSparseArrays, BlockSparseArray, nonzero_blockkeys using LinearAlgebra: LinearAlgebra, Hermitian, Transpose, I, eigen, qr -using ...NDTensors: Algorithm, @Algorithm_str # TODO: Move to `AlgorithmSelector` module. using SparseArrays: SparseArrays, SparseMatrixCSC, spzeros, sparse # TODO: Move to `SparseArraysExtensions`. diff --git a/NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/eigen.jl b/NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/eigen.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/eigen.jl rename to NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/eigen.jl diff --git a/NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/hermitian.jl b/NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/hermitian.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/hermitian.jl rename to NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/hermitian.jl diff --git a/NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/qr.jl b/NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/qr.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/qr.jl rename to NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/qr.jl diff --git a/NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/svd.jl b/NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/svd.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/svd.jl rename to NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/svd.jl diff --git a/NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/transpose.jl b/NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/transpose.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/LinearAlgebraExt/transpose.jl rename to NDTensors/src/lib/BlockSparseArrays/src/LinearAlgebraExt/transpose.jl diff --git a/NDTensors/src/BlockSparseArrays/src/abstractarray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractarray.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/abstractarray.jl rename to NDTensors/src/lib/BlockSparseArrays/src/abstractarray.jl diff --git a/NDTensors/src/BlockSparseArrays/src/allocate_output.jl b/NDTensors/src/lib/BlockSparseArrays/src/allocate_output.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/allocate_output.jl rename to NDTensors/src/lib/BlockSparseArrays/src/allocate_output.jl diff --git a/NDTensors/src/BlockSparseArrays/src/axes.jl b/NDTensors/src/lib/BlockSparseArrays/src/axes.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/axes.jl rename to NDTensors/src/lib/BlockSparseArrays/src/axes.jl diff --git a/NDTensors/src/BlockSparseArrays/src/base.jl b/NDTensors/src/lib/BlockSparseArrays/src/base.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/base.jl rename to NDTensors/src/lib/BlockSparseArrays/src/base.jl diff --git a/NDTensors/src/BlockSparseArrays/src/blockarrays.jl b/NDTensors/src/lib/BlockSparseArrays/src/blockarrays.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/blockarrays.jl rename to NDTensors/src/lib/BlockSparseArrays/src/blockarrays.jl diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl rename to NDTensors/src/lib/BlockSparseArrays/src/blocksparsearray.jl diff --git a/NDTensors/src/BlockSparseArrays/src/broadcast.jl b/NDTensors/src/lib/BlockSparseArrays/src/broadcast.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/broadcast.jl rename to NDTensors/src/lib/BlockSparseArrays/src/broadcast.jl diff --git a/NDTensors/src/BlockSparseArrays/src/fusedims.jl b/NDTensors/src/lib/BlockSparseArrays/src/fusedims.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/fusedims.jl rename to NDTensors/src/lib/BlockSparseArrays/src/fusedims.jl diff --git a/NDTensors/src/BlockSparseArrays/src/gradedrange.jl b/NDTensors/src/lib/BlockSparseArrays/src/gradedrange.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/gradedrange.jl rename to NDTensors/src/lib/BlockSparseArrays/src/gradedrange.jl diff --git a/NDTensors/src/BlockSparseArrays/src/permuteddimsarray.jl b/NDTensors/src/lib/BlockSparseArrays/src/permuteddimsarray.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/permuteddimsarray.jl rename to NDTensors/src/lib/BlockSparseArrays/src/permuteddimsarray.jl diff --git a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/sparsearray.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/sparsearray.jl rename to NDTensors/src/lib/BlockSparseArrays/src/sparsearray.jl diff --git a/NDTensors/src/BlockSparseArrays/src/subarray.jl b/NDTensors/src/lib/BlockSparseArrays/src/subarray.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/subarray.jl rename to NDTensors/src/lib/BlockSparseArrays/src/subarray.jl diff --git a/NDTensors/src/BlockSparseArrays/src/tensor_product.jl b/NDTensors/src/lib/BlockSparseArrays/src/tensor_product.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/src/tensor_product.jl rename to NDTensors/src/lib/BlockSparseArrays/src/tensor_product.jl diff --git a/NDTensors/src/BlockSparseArrays/test/Project.toml b/NDTensors/src/lib/BlockSparseArrays/test/Project.toml similarity index 100% rename from NDTensors/src/BlockSparseArrays/test/Project.toml rename to NDTensors/src/lib/BlockSparseArrays/test/Project.toml diff --git a/NDTensors/src/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl b/NDTensors/src/lib/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl similarity index 100% rename from NDTensors/src/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl rename to NDTensors/src/lib/BlockSparseArrays/test/TestBlockSparseArraysUtils.jl diff --git a/NDTensors/src/BlockSparseArrays/test/runtests.jl b/NDTensors/src/lib/BlockSparseArrays/test/runtests.jl similarity index 96% rename from NDTensors/src/BlockSparseArrays/test/runtests.jl rename to NDTensors/src/lib/BlockSparseArrays/test/runtests.jl index 52ae8243bf..862a504388 100644 --- a/NDTensors/src/BlockSparseArrays/test/runtests.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/runtests.jl @@ -13,7 +13,12 @@ include("TestBlockSparseArraysUtils.jl") @testset "README" begin @test include( joinpath( - pkgdir(BlockSparseArrays), "src", "BlockSparseArrays", "examples", "README.jl" + pkgdir(BlockSparseArrays), + "src", + "lib", + "BlockSparseArrays", + "examples", + "README.jl", ), ) isa Any end diff --git a/NDTensors/src/lib/BroadcastMapConversion/src/BroadcastMapConversion.jl b/NDTensors/src/lib/BroadcastMapConversion/src/BroadcastMapConversion.jl new file mode 100644 index 0000000000..6edf0ae2f7 --- /dev/null +++ b/NDTensors/src/lib/BroadcastMapConversion/src/BroadcastMapConversion.jl @@ -0,0 +1,48 @@ +module BroadcastMapConversion +# Convert broadcast call to map call by capturing array arguments +# with `map_args` and creating a map function with `map_function`. +# Logic from https://github.com/Jutho/Strided.jl/blob/v2.0.4/src/broadcast.jl. + +using Base.Broadcast: Broadcasted + +const WrappedScalarArgs = Union{AbstractArray{<:Any,0},Ref{<:Any}} + +function map_args(bc::Broadcasted, rest...) + return (map_args(bc.args...)..., map_args(rest...)...) +end +map_args(a::AbstractArray, rest...) = (a, map_args(rest...)...) +map_args(a, rest...) = map_args(rest...) +map_args() = () + +struct MapFunction{F,Args<:Tuple} + f::F + args::Args +end +struct Arg end + +# construct MapFunction +function map_function(bc::Broadcasted) + args = map_function_tuple(bc.args) + return MapFunction(bc.f, args) +end +map_function_tuple(t::Tuple{}) = t +map_function_tuple(t::Tuple) = (map_function(t[1]), map_function_tuple(Base.tail(t))...) +map_function(a::WrappedScalarArgs) = a[] +map_function(a::AbstractArray) = Arg() +map_function(a) = a + +# Evaluate MapFunction +(f::MapFunction)(args...) = apply(f, args)[1] +function apply(f::MapFunction, args) + args, newargs = apply_tuple(f.args, args) + return f.f(args...), newargs +end +apply(a::Arg, args::Tuple) = args[1], Base.tail(args) +apply(a, args) = a, args +apply_tuple(t::Tuple{}, args) = t, args +function apply_tuple(t::Tuple, args) + t1, newargs1 = apply(t[1], args) + ttail, newargs = apply_tuple(Base.tail(t), newargs1) + return (t1, ttail...), newargs +end +end diff --git a/NDTensors/src/lib/BroadcastMapConversion/test/runtests.jl b/NDTensors/src/lib/BroadcastMapConversion/test/runtests.jl new file mode 100644 index 0000000000..92e5d6ae41 --- /dev/null +++ b/NDTensors/src/lib/BroadcastMapConversion/test/runtests.jl @@ -0,0 +1,14 @@ +@eval module $(gensym()) +using Test: @test, @testset +using NDTensors.BroadcastMapConversion: map_function, map_args +@testset "BroadcastMapConversion" begin + using Base.Broadcast: Broadcasted + c = 2.2 + a = randn(2, 3) + b = randn(2, 3) + bc = Broadcasted(*, (c, a)) + @test copy(bc) ≈ c * a ≈ map(map_function(bc), map_args(bc)...) + bc = Broadcasted(+, (a, b)) + @test copy(bc) ≈ a + b ≈ map(map_function(bc), map_args(bc)...) +end +end diff --git a/NDTensors/src/DiagonalArrays/.JuliaFormatter.toml b/NDTensors/src/lib/DiagonalArrays/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/DiagonalArrays/.JuliaFormatter.toml rename to NDTensors/src/lib/DiagonalArrays/.JuliaFormatter.toml diff --git a/NDTensors/src/DiagonalArrays/README.md b/NDTensors/src/lib/DiagonalArrays/README.md similarity index 100% rename from NDTensors/src/DiagonalArrays/README.md rename to NDTensors/src/lib/DiagonalArrays/README.md diff --git a/NDTensors/src/DiagonalArrays/examples/README.jl b/NDTensors/src/lib/DiagonalArrays/examples/README.jl similarity index 100% rename from NDTensors/src/DiagonalArrays/examples/README.jl rename to NDTensors/src/lib/DiagonalArrays/examples/README.jl diff --git a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl b/NDTensors/src/lib/DiagonalArrays/src/DiagonalArrays.jl similarity index 100% rename from NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl rename to NDTensors/src/lib/DiagonalArrays/src/DiagonalArrays.jl diff --git a/NDTensors/src/DiagonalArrays/src/diagview.jl b/NDTensors/src/lib/DiagonalArrays/src/diagview.jl similarity index 100% rename from NDTensors/src/DiagonalArrays/src/diagview.jl rename to NDTensors/src/lib/DiagonalArrays/src/diagview.jl diff --git a/NDTensors/src/DiagonalArrays/test/runtests.jl b/NDTensors/src/lib/DiagonalArrays/test/runtests.jl similarity index 58% rename from NDTensors/src/DiagonalArrays/test/runtests.jl rename to NDTensors/src/lib/DiagonalArrays/test/runtests.jl index 45d8b55e5c..a0064b5deb 100644 --- a/NDTensors/src/DiagonalArrays/test/runtests.jl +++ b/NDTensors/src/lib/DiagonalArrays/test/runtests.jl @@ -4,7 +4,9 @@ using NDTensors.DiagonalArrays @testset "Test NDTensors.DiagonalArrays" begin @testset "README" begin @test include( - joinpath(pkgdir(DiagonalArrays), "src", "DiagonalArrays", "examples", "README.jl") + joinpath( + pkgdir(DiagonalArrays), "src", "lib", "DiagonalArrays", "examples", "README.jl" + ), ) isa Any end end diff --git a/NDTensors/src/NamedDimsArrays/.JuliaFormatter.toml b/NDTensors/src/lib/NamedDimsArrays/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/NamedDimsArrays/.JuliaFormatter.toml rename to NDTensors/src/lib/NamedDimsArrays/.JuliaFormatter.toml diff --git a/NDTensors/src/NamedDimsArrays/README.md b/NDTensors/src/lib/NamedDimsArrays/README.md similarity index 100% rename from NDTensors/src/NamedDimsArrays/README.md rename to NDTensors/src/lib/NamedDimsArrays/README.md diff --git a/NDTensors/src/NamedDimsArrays/examples/example_readme.jl b/NDTensors/src/lib/NamedDimsArrays/examples/example_readme.jl similarity index 100% rename from NDTensors/src/NamedDimsArrays/examples/example_readme.jl rename to NDTensors/src/lib/NamedDimsArrays/examples/example_readme.jl diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl new file mode 100644 index 0000000000..027dd65ca6 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl @@ -0,0 +1,3 @@ +module NamedDimsArraysAdaptExt +include("adapt_structure.jl") +end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/adapt_structure.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/adapt_structure.jl new file mode 100644 index 0000000000..59fbdc3f29 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/src/adapt_structure.jl @@ -0,0 +1,5 @@ +using Adapt: Adapt, adapt +using NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, named, unname +function Adapt.adapt_structure(to, na::AbstractNamedDimsArray) + return named(adapt(to, unname(na)), dimnames(na)) +end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/Project.toml b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/Project.toml new file mode 100644 index 0000000000..a970d0f894 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/Project.toml @@ -0,0 +1,4 @@ +[deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/runtests.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/runtests.jl new file mode 100644 index 0000000000..536b4bb7ea --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysAdaptExt/test/runtests.jl @@ -0,0 +1,11 @@ +@eval module $(gensym()) +using Test: @test, @testset +using Adapt: adapt +using NDTensors.NamedDimsArrays: named +@testset "NamedDimsArraysAdaptExt (eltype=$elt)" for elt in (Float32, Float64) + na = named(randn(2, 2), ("i", "j")) + na_complex = adapt(Array{complex(elt)}, na) + @test na ≈ na_complex + @test eltype(na_complex) === complex(elt) +end +end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl new file mode 100644 index 0000000000..a397507f44 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl @@ -0,0 +1,7 @@ +module NamedDimsArraysTensorAlgebraExt +include("contract.jl") +include("fusedims.jl") +include("qr.jl") +include("eigen.jl") +include("svd.jl") +end diff --git a/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl similarity index 91% rename from NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl rename to NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl index a9bd24f321..4ed1db7589 100644 --- a/NDTensors/src/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/contract.jl @@ -1,5 +1,5 @@ using NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, named, unname -using NDTensors.TensorAlgebra: contract +using NDTensors.TensorAlgebra: TensorAlgebra, contract function TensorAlgebra.contract( na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray, α, β; kwargs... diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/eigen.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/eigen.jl new file mode 100644 index 0000000000..31a7e6d5b1 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/eigen.jl @@ -0,0 +1,47 @@ +## using ..ITensors: IndexID +using LinearAlgebra: LinearAlgebra, Diagonal, Hermitian, eigen +## using ..NDTensors.DiagonalArrays: DiagonalMatrix +using ...NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, name, unname +using ...NDTensors.RankFactorization: Spectrum, truncate!! +function LinearAlgebra.eigen( + na::Hermitian{T,<:AbstractNamedDimsArray{T}}; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, +) where {T<:Union{Real,Complex}} + # TODO: Handle array wrappers around + # `AbstractNamedDimsArray` more elegantly. + d, u = eigen(Hermitian(unname(parent(na)))) + + # Sort by largest to smallest eigenvalues + # TODO: Replace `cpu` with `Expose` dispatch. + p = sortperm(d; rev=true, by=abs) + d = d[p] + u = u[:, p] + + length_d = length(d) + truncerr = zero(Float64) # Make more generic + if any(!isnothing, (maxdim, cutoff)) + d, truncerr, _ = truncate!!( + d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + length_d = length(d) + if length_d < size(u, 2) + u = u[:, 1:length_d] + end + end + spec = Spectrum(d, truncerr) + + # TODO: Handle array wrappers more generally. + names_a = dimnames(parent(na)) + # TODO: Make this more generic, handle `dag`, etc. + l = randname(names_a[1]) # IndexID(rand(UInt64), "", 0) + r = randname(names_a[2]) # IndexID(rand(UInt64), "", 0) + names_d = (l, r) + nd = named(Diagonal(d), names_d) + names_u = (names_a[2], r) + nu = named(u, names_u) + return nd, nu, spec +end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/fusedims.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/fusedims.jl new file mode 100644 index 0000000000..50c21ccf88 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/fusedims.jl @@ -0,0 +1,42 @@ +using ...NDTensors.TensorAlgebra: fusedims, splitdims + +function TensorAlgebra.fusedims(na::AbstractNamedDimsArray, fusions::Pair...) + # TODO: generalize to multiple fused groups of dimensions + @assert isone(length(fusions)) + fusion = only(fusions) + + split_names = first(fusion) + fused_name = last(fusion) + + split_dims = map(split_name -> findfirst(isequal(split_name), dimnames(na)), split_names) + fused_dim = findfirst(isequal(fused_name), dimnames(na)) + @assert isnothing(fused_dim) + + unfused_dims = Tuple.(setdiff(1:ndims(na), split_dims)) + partitioned_perm = (unfused_dims..., split_dims) + + a_fused = fusedims(unname(na), partitioned_perm...) + names_fused = (setdiff(dimnames(na), split_names)..., fused_name) + return named(a_fused, names_fused) +end + +function TensorAlgebra.splitdims(na::AbstractNamedDimsArray, splitters::Pair...) + fused_names = first.(splitters) + split_namedlengths = last.(splitters) + splitters_unnamed = map(splitters) do splitter + fused_name, split_namedlengths = splitter + fused_dim = findfirst(isequal(fused_name), dimnames(na)) + split_lengths = unname.(split_namedlengths) + return fused_dim => split_lengths + end + a_split = splitdims(unname(na), splitters_unnamed...) + names_split = Any[tuple.(dimnames(na))...] + for splitter in splitters + fused_name, split_namedlengths = splitter + fused_dim = findfirst(isequal(fused_name), dimnames(na)) + split_names = name.(split_namedlengths) + names_split[fused_dim] = split_names + end + names_split = reduce((x, y) -> (x..., y...), names_split) + return named(a_split, names_split) +end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/qr.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/qr.jl new file mode 100644 index 0000000000..c48ce5dcc1 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/qr.jl @@ -0,0 +1,15 @@ +# using ..ITensors: IndexID +using LinearAlgebra: LinearAlgebra, qr +using ...NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, name, randname, unname +function LinearAlgebra.qr(na::AbstractNamedDimsArray; positive=nothing) + # TODO: Make this more systematic. + names_a = dimnames(na) + # TODO: Create a `TensorAlgebra.qr`. + q, r = qr(unname(na)) + # TODO: Use `sim` or `rand(::IndexID)`. + name_qr = randname(names_a[1]) # IndexID(rand(UInt64), "", 0) + # TODO: Make this GPU-friendly. + nq = named(Matrix(q), (names_a[1], name_qr)) + nr = named(Matrix(r), (name_qr, names_a[2])) + return nq, nr +end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/svd.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/svd.jl new file mode 100644 index 0000000000..787f89e3f0 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/src/svd.jl @@ -0,0 +1,53 @@ +using LinearAlgebra: LinearAlgebra, svd +using ...NDTensors.RankFactorization: Spectrum, truncate!! +function LinearAlgebra.svd( + na::AbstractNamedDimsArray; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, + alg=nothing, + min_blockdim=nothing, +) + # TODO: Handle array wrappers around + # `AbstractNamedDimsArray` more elegantly. + USV = svd(unname(na)) + u, s, v = USV.U, USV.S, USV.Vt + + # Sort by largest to smallest eigenvalues + # TODO: Replace `cpu` with `Expose` dispatch. + p = sortperm(s; rev=true, by=abs) + u = u[:, p] + s = s[p] + v = v[p, :] + + s² = s .^ 2 + length_s = length(s) + truncerr = zero(Float64) # Make more generic + if any(!isnothing, (maxdim, cutoff)) + s², truncerr, _ = truncate!!( + s²; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + length_s = length(s²) + # TODO: Avoid this if they are already the + # correct size. + u = u[:, 1:length_s] + s = s[1:length_s] + v = v[1:length_s, :] + end + spec = Spectrum(s², truncerr) + + # TODO: Handle array wrappers more generally. + names_a = dimnames(na) + # TODO: Make this more generic, handle `dag`, etc. + l = randname(names_a[1]) # IndexID(rand(UInt64), "", 0) + r = randname(names_a[2]) # IndexID(rand(UInt64), "", 0) + names_u = (names_a[1], l) + nu = named(u, names_u) + names_s = (l, r) + ns = named(Diagonal(s), names_s) + names_v = (r, names_a[2]) + nv = named(v, names_v) + return nu, ns, nv, spec +end diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/Project.toml b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/Project.toml new file mode 100644 index 0000000000..ef491a529c --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/Project.toml @@ -0,0 +1,3 @@ +[deps] +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl new file mode 100644 index 0000000000..31106b56d8 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/ext/NamedDimsArraysTensorAlgebraExt/test/runtests.jl @@ -0,0 +1,29 @@ +@eval module $(gensym()) +using Test: @test, @testset, @test_broken +using NDTensors.NamedDimsArrays: named, unname +using NDTensors.TensorAlgebra: TensorAlgebra +using LinearAlgebra: qr +@testset "NamedDimsArraysTensorAlgebraExt contract (eltype=$(elt))" for elt in ( + Float32, ComplexF32, Float64, ComplexF64 +) + i = named(2, "i") + j = named(2, "j") + k = named(2, "k") + na1 = randn(elt, i, j) + na2 = randn(elt, j, k) + na_dest = TensorAlgebra.contract(na1, na2) + @test eltype(na_dest) === elt + @test unname(na_dest, (i, k)) ≈ unname(na1) * unname(na2) +end +@testset "NamedDimsArraysTensorAlgebraExt QR (eltype=$(elt))" for elt in ( + Float32, ComplexF32, Float64, ComplexF64 +) + di = 2 + dj = 2 + i = named(di, "i") + j = named(dj, "j") + na = randn(elt, i, j) + @test_broken error("QR not implemented yet") + # q, r = qr(na) +end +end diff --git a/NDTensors/src/NamedDimsArrays/generate_readme.jl b/NDTensors/src/lib/NamedDimsArrays/generate_readme.jl similarity index 100% rename from NDTensors/src/NamedDimsArrays/generate_readme.jl rename to NDTensors/src/lib/NamedDimsArrays/generate_readme.jl diff --git a/NDTensors/src/NamedDimsArrays/src/NamedDimsArrays.jl b/NDTensors/src/lib/NamedDimsArrays/src/NamedDimsArrays.jl similarity index 55% rename from NDTensors/src/NamedDimsArrays/src/NamedDimsArrays.jl rename to NDTensors/src/lib/NamedDimsArrays/src/NamedDimsArrays.jl index 7b524c1a49..88939f128b 100644 --- a/NDTensors/src/NamedDimsArrays/src/NamedDimsArrays.jl +++ b/NDTensors/src/lib/NamedDimsArrays/src/NamedDimsArrays.jl @@ -1,12 +1,21 @@ module NamedDimsArrays include("traits.jl") +include("randname.jl") include("abstractnamedint.jl") include("abstractnamedunitrange.jl") include("abstractnameddimsarray.jl") include("namedint.jl") include("namedunitrange.jl") include("nameddimsarray.jl") +include("constructors.jl") +include("similar.jl") +include("permutedims.jl") +include("promote_shape.jl") +include("map.jl") +include("broadcast_shape.jl") +include("broadcast.jl") # Extensions +include("../ext/NamedDimsArraysAdaptExt/src/NamedDimsArraysAdaptExt.jl") include("../ext/NamedDimsArraysTensorAlgebraExt/src/NamedDimsArraysTensorAlgebraExt.jl") end diff --git a/NDTensors/src/NamedDimsArrays/src/abstractnameddimsarray.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsarray.jl similarity index 68% rename from NDTensors/src/NamedDimsArrays/src/abstractnameddimsarray.jl rename to NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsarray.jl index c9bd370931..71943b7414 100644 --- a/NDTensors/src/NamedDimsArrays/src/abstractnameddimsarray.jl +++ b/NDTensors/src/lib/NamedDimsArrays/src/abstractnameddimsarray.jl @@ -1,3 +1,6 @@ +using ..BaseExtensions: BaseExtensions + +# Some of the interface is inspired by: # https://github.com/invenia/NamedDims.jl # https://github.com/mcabbott/NamedPlus.jl @@ -25,10 +28,14 @@ isnamed(::AbstractNamedDimsArray) = true # Helper function, move to `utils.jl`. named_tuple(t::Tuple, names) = ntuple(i -> named(t[i], names[i]), length(t)) +# TODO: Should `axes` output named axes or not? # TODO: Use the proper type, `namedaxistype(a)`. -Base.axes(a::AbstractNamedDimsArray) = named_tuple(axes(unname(a)), dimnames(a)) +# Base.axes(a::AbstractNamedDimsArray) = named_tuple(axes(unname(a)), dimnames(a)) +Base.axes(a::AbstractNamedDimsArray) = axes(unname(a)) +namedaxes(a::AbstractNamedDimsArray) = named.(axes(unname(a)), dimnames(a)) # TODO: Use the proper type, `namedlengthtype(a)`. -Base.size(a::AbstractNamedDimsArray) = length.(axes(a)) +Base.size(a::AbstractNamedDimsArray) = size(unname(a)) +namedsize(a::AbstractNamedDimsArray) = named.(size(unname(a)), dimnames(a)) Base.getindex(a::AbstractNamedDimsArray, I...) = unname(a)[I...] function Base.setindex!(a::AbstractNamedDimsArray, x, I...) unname(a)[I...] = x @@ -46,30 +53,48 @@ rename(a::AbstractNamedDimsArray, names) = named(unname(a), names) # replacenames(a, :i => :a, :j => :b) # `rename` in `NamedPlus.jl`. -replacenames(a::AbstractNamedDimsArray, names::Pair) = error("Not implemented yet") +function replacenames(na::AbstractNamedDimsArray, replacements::Pair...) + # `BaseExtension.replace` needed for `Tuple` support on Julia 1.6 and older. + return named(unname(na), BaseExtensions.replace(dimnames(na), replacements...)) +end # Either define new names or replace names setnames(a::AbstractArray, names) = named(a, names) setnames(a::AbstractNamedDimsArray, names) = rename(a, names) +# TODO: Move to `utils.jl` file. function getperm(x, y) return map(xᵢ -> findfirst(isequal(xᵢ), y), x) end +# TODO: Use `isnamed` trait? function get_name_perm(a::AbstractNamedDimsArray, names::Tuple) + # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. return getperm(dimnames(a), names) end +# Fixes ambiguity error +# TODO: Use `isnamed` trait? +function get_name_perm(a::AbstractNamedDimsArray, names::Tuple{}) + # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. + @assert iszero(ndims(a)) + return () +end + +# TODO: Use `isnamed` trait? function get_name_perm( a::AbstractNamedDimsArray, namedints::Tuple{Vararg{AbstractNamedInt}} ) - return getperm(size(a), namedints) + # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. + return getperm(namedsize(a), namedints) end +# TODO: Use `isnamed` trait? function get_name_perm( - a::AbstractNamedDimsArray, namedaxes::Tuple{Vararg{AbstractNamedUnitRange}} + a::AbstractNamedDimsArray, new_namedaxes::Tuple{Vararg{AbstractNamedUnitRange}} ) - return getperm(axes(a), namedaxes) + # TODO: Call `getperm(dimnames(a), dimnames(namedints))`. + return getperm(namedaxes(a), new_namedaxes) end # Indexing @@ -104,10 +129,11 @@ unname(a::AbstractArray) = a # Permute into a certain order. # align(a, (:j, :k, :i)) # Like `named(nameless(a, names), names)` -function align(a::AbstractNamedDimsArray, names) - perm = get_name_perm(a, names) +function align(na::AbstractNamedDimsArray, names) + perm = get_name_perm(na, names) # TODO: Avoid permutation if it is a trivial permutation? - return typeof(a)(permutedims(unname(a), perm), names) + # return typeof(a)(permutedims(unname(a), perm), names) + return permutedims(na, perm) end # Unwrapping names and permuting diff --git a/NDTensors/src/NamedDimsArrays/src/abstractnamedint.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedint.jl similarity index 100% rename from NDTensors/src/NamedDimsArrays/src/abstractnamedint.jl rename to NDTensors/src/lib/NamedDimsArrays/src/abstractnamedint.jl diff --git a/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedunitrange.jl b/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedunitrange.jl new file mode 100644 index 0000000000..d2d5630d28 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/abstractnamedunitrange.jl @@ -0,0 +1,35 @@ +abstract type AbstractNamedUnitRange{T,Value<:AbstractUnitRange{T},Name} <: + AbstractUnitRange{T} end + +# Required interface +unname(::AbstractNamedUnitRange) = error("Not implemented") +name(::AbstractNamedUnitRange) = error("Not implemented") + +# Traits +isnamed(::AbstractNamedUnitRange) = true + +# Unit range +Base.first(i::AbstractNamedUnitRange) = first(unname(i)) +Base.last(i::AbstractNamedUnitRange) = last(unname(i)) +Base.length(i::AbstractNamedUnitRange) = named(length(unname(i)), name(i)) + +# TODO: Use `isnamed` trait? +dimnames(a::Tuple{Vararg{AbstractNamedUnitRange}}) = name.(a) + +unname(a::Tuple{Vararg{AbstractNamedUnitRange}}) = unname.(a) +unname(a::Tuple{Vararg{AbstractNamedUnitRange}}, names) = unname(align(a, names)) + +function named(as::Tuple{Vararg{AbstractUnitRange}}, names) + return ntuple(j -> named(as[j], names[j]), length(as)) +end + +function get_name_perm(a::Tuple{Vararg{AbstractNamedUnitRange}}, names::Tuple) + return getperm(dimnames(a), names) +end + +# Permute into a certain order. +# align(a, (:j, :k, :i)) +function align(a::Tuple{Vararg{AbstractNamedUnitRange}}, names) + perm = get_name_perm(a, names) + return map(j -> a[j], perm) +end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/broadcast.jl b/NDTensors/src/lib/NamedDimsArrays/src/broadcast.jl new file mode 100644 index 0000000000..c3ee82c92f --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/broadcast.jl @@ -0,0 +1,42 @@ +using Base.Broadcast: BroadcastStyle, AbstractArrayStyle, DefaultArrayStyle, Broadcasted +using ..BroadcastMapConversion: map_function, map_args + +struct NamedDimsArrayStyle{N} <: AbstractArrayStyle{N} end + +function Broadcast.BroadcastStyle(::Type{<:AbstractNamedDimsArray{<:Any,N}}) where {N} + return NamedDimsArrayStyle{N}() +end + +NamedDimsArrayStyle(::Val{N}) where {N} = NamedDimsArrayStyle{N}() +NamedDimsArrayStyle{M}(::Val{N}) where {M,N} = NamedDimsArrayStyle{N}() + +Broadcast.BroadcastStyle(a::NamedDimsArrayStyle, ::DefaultArrayStyle{0}) = a +function Broadcast.BroadcastStyle(::NamedDimsArrayStyle{N}, a::DefaultArrayStyle) where {N} + return BroadcastStyle(DefaultArrayStyle{N}(), a) +end +function Broadcast.BroadcastStyle( + ::NamedDimsArrayStyle{N}, ::Broadcast.Style{Tuple} +) where {N} + return DefaultArrayStyle{N}() +end + +# TODO: Is this needed? +# Define `output_names`, like `allocate_output`. +# function dimnames(bc::Broadcasted{<:NamedDimsArrayStyle}) +# return dimnames(first(map_args(bc))) +# end + +# TODO: Use `allocate_output`, share logic with `map`. +function Base.similar(bc::Broadcasted{<:NamedDimsArrayStyle}, elt::Type) + return similar(first(map_args(bc)), elt) +end + +# Broadcasting implementation +function Base.copyto!( + dest::AbstractNamedDimsArray{<:Any,N}, bc::Broadcasted{NamedDimsArrayStyle{N}} +) where {N} + # convert to map + # flatten and only keep the AbstractArray arguments + map!(map_function(bc), dest, map_args(bc)...) + return dest +end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/broadcast_shape.jl b/NDTensors/src/lib/NamedDimsArrays/src/broadcast_shape.jl new file mode 100644 index 0000000000..ad6986190f --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/broadcast_shape.jl @@ -0,0 +1,24 @@ +using Base.Broadcast: Broadcast, broadcast_shape, combine_axes + +# TODO: Have `axes` output named axes so Base functions "just work". +function Broadcast.combine_axes(na1::AbstractNamedDimsArray, nas::AbstractNamedDimsArray...) + return broadcast_shape(namedaxes(na1), combine_axes(nas...)) +end +function Broadcast.combine_axes(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) + return broadcast_shape(namedaxes(na1), namedaxes(na2)) +end +Broadcast.combine_axes(na::AbstractNamedDimsArray) = namedaxes(na) + +function Broadcast.broadcast_shape( + na1::Tuple{Vararg{AbstractNamedUnitRange}}, + na2::Tuple{Vararg{AbstractNamedUnitRange}}, + nas::Tuple{Vararg{AbstractNamedUnitRange}}..., +) + return broadcast_shape(broadcast_shape(shape, shape1), shapes...) +end + +function Broadcast.broadcast_shape( + na1::Tuple{Vararg{AbstractNamedUnitRange}}, na2::Tuple{Vararg{AbstractNamedUnitRange}} +) + return promote_shape(na1, na2) +end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl b/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl new file mode 100644 index 0000000000..9e167beea0 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl @@ -0,0 +1,48 @@ +using Random: AbstractRNG, default_rng + +# TODO: Use `AbstractNamedUnitRange`, determine the `AbstractNamedDimsArray` +# from a default value. Useful for distinguishing between `NamedDimsArray` +# and `ITensor`. +# Convenient constructors +default_eltype() = Float64 +for f in [:rand, :randn] + @eval begin + function Base.$f( + rng::AbstractRNG, elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}} + ) + a = $f(rng, elt, unname.(dims)) + return named(a, name.(dims)) + end + function Base.$f( + rng::AbstractRNG, elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt} + ) + return $f(rng, elt, (dim1, dims...)) + end + Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) = + $f(default_rng(), elt, dims) + Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) = + $f(elt, (dim1, dims...)) + Base.$f(dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f(default_eltype(), dims) + Base.$f(dim1::NamedInt, dims::Vararg{NamedInt}) = $f((dim1, dims...)) + end +end +for f in [:zeros, :ones] + @eval begin + function Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) + a = $f(elt, unname.(dims)) + return named(a, name.(dims)) + end + function Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) + return $f(elt, (dim1, dims...)) + end + Base.$f(dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f(default_eltype(), dims) + Base.$f(dim1::NamedInt, dims::Vararg{NamedInt}) = $f((dim1, dims...)) + end +end +function Base.fill(value, dims::Tuple{NamedInt,Vararg{NamedInt}}) + a = fill(value, unname.(dims)) + return named(a, name.(dims)) +end +function Base.fill(value, dim1::NamedInt, dims::Vararg{NamedInt}) + return fill(value, (dim1, dims...)) +end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/map.jl b/NDTensors/src/lib/NamedDimsArrays/src/map.jl new file mode 100644 index 0000000000..d7b38575ec --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/map.jl @@ -0,0 +1,14 @@ +# TODO: Handle maybe-mutation. +# TODO: Handle permutations more efficiently by fusing with `f`. +function Base.map!(f, na_dest::AbstractNamedDimsArray, nas::AbstractNamedDimsArray...) + a_dest = unname(na_dest) + as = map(na -> unname(na, dimnames(na_dest)), nas) + map!(f, a_dest, as...) + return na_dest +end + +function Base.map(f, nas::AbstractNamedDimsArray...) + na_dest = similar(first(nas)) + map!(f, na_dest, nas...) + return na_dest +end diff --git a/NDTensors/src/NamedDimsArrays/src/nameddimsarray.jl b/NDTensors/src/lib/NamedDimsArrays/src/nameddimsarray.jl similarity index 71% rename from NDTensors/src/NamedDimsArrays/src/nameddimsarray.jl rename to NDTensors/src/lib/NamedDimsArrays/src/nameddimsarray.jl index 9d68022acd..f099334f79 100644 --- a/NDTensors/src/NamedDimsArrays/src/nameddimsarray.jl +++ b/NDTensors/src/lib/NamedDimsArrays/src/nameddimsarray.jl @@ -1,23 +1,54 @@ +function _NamedDimsArray end + struct NamedDimsArray{T,N,Arr<:AbstractArray{T,N},Names} <: AbstractNamedDimsArray{T,N,Names} array::Arr names::Names + global @inline function _NamedDimsArray(array::AbstractArray, names) + elt = eltype(array) + n = ndims(array) + arraytype = typeof(array) + namestype = typeof(names) + return new{elt,n,arraytype,namestype}(array, names) + end + + # TODO: Delete + global @inline function _NamedDimsArray(array::NamedDimsArray, names) + return error("Not implemented, already named.") + end +end + +function NamedDimsArray{T,N,Arr,Names}( + a::AbstractArray, names +) where {T,N,Arr<:AbstractArray{T,N},Names} + return _NamedDimsArray(convert(Arr, a), convert(Names, names)) end +# TODO: Combine with other constructor definitions. +function NamedDimsArray{T,N,Arr,Names}( + a::AbstractArray, names::Tuple{} +) where {T,N,Arr<:AbstractArray{T,N},Names} + return _NamedDimsArray(convert(Arr, a), convert(Names, names)) +end + +NamedDimsArray(a::AbstractArray, names) = _NamedDimsArray(a, names) + # TODO: Check size consistency +# TODO: Combine with other constructor definitions. function NamedDimsArray{T,N,Arr,Names}( a::AbstractArray, namedsize::Tuple{Vararg{AbstractNamedInt}} ) where {T,N,Arr<:AbstractArray{T,N},Names} @assert size(a) == unname.(namedsize) - return NamedDimsArray{T,N,Arr,Names}(a, name.(namedsize)) + return _NamedDimsArray(convert(Arr, a), convert(Names, name.(namedsize))) end # TODO: Check axes consistency +# TODO: Combine with other constructor definitions. function NamedDimsArray{T,N,Arr,Names}( a::AbstractArray, namedaxes::Tuple{Vararg{AbstractNamedUnitRange}} ) where {T,N,Arr<:AbstractArray{T,N},Names} @assert axes(a) == unname.(namedaxes) - return NamedDimsArray{T,N,Arr,Names}(a, name.(namedaxes)) + return _NamedDimsArray(convert(Arr, a), convert(Names, name.(namedaxes))) end # Required interface @@ -58,13 +89,6 @@ function Base.similar( return undefs(arraytype, axes) end -# TODO: Use `AbstractNamedUnitRange`, determine the `AbstractNamedDimsArray` -# from a default value. Useful for distinguishing between `NamedDimsArray` -# and `ITensor`. -function Base.randn(size::Vararg{NamedInt}) - return named(randn(unname.(size)), name.(size)) -end - # TODO: Define `NamedInt`, `NamedUnitRange`, `NamedVector <: AbstractVector`, etc. # See https://github.com/mcabbott/NamedPlus.jl/blob/v0.0.5/src/int.jl. diff --git a/NDTensors/src/NamedDimsArrays/src/namedint.jl b/NDTensors/src/lib/NamedDimsArrays/src/namedint.jl similarity index 50% rename from NDTensors/src/NamedDimsArrays/src/namedint.jl rename to NDTensors/src/lib/NamedDimsArrays/src/namedint.jl index ed09829d1e..13b8a88141 100644 --- a/NDTensors/src/NamedDimsArrays/src/namedint.jl +++ b/NDTensors/src/lib/NamedDimsArrays/src/namedint.jl @@ -12,3 +12,17 @@ name(i::NamedInt) = i.name # Convenient constructor named(i::Integer, name) = NamedInt(i, name) + +# TODO: Use `isnamed` trait? +dimnames(a::Tuple{Vararg{AbstractNamedInt}}) = name.(a) + +function get_name_perm(a::Tuple{Vararg{AbstractNamedInt}}, names::Tuple) + return getperm(dimnames(a), names) +end + +# Permute into a certain order. +# align(a, (:j, :k, :i)) +function align(a::Tuple{Vararg{AbstractNamedInt}}, names) + perm = get_name_perm(a, names) + return map(j -> a[j], perm) +end diff --git a/NDTensors/src/NamedDimsArrays/src/namedunitrange.jl b/NDTensors/src/lib/NamedDimsArrays/src/namedunitrange.jl similarity index 100% rename from NDTensors/src/NamedDimsArrays/src/namedunitrange.jl rename to NDTensors/src/lib/NamedDimsArrays/src/namedunitrange.jl diff --git a/NDTensors/src/lib/NamedDimsArrays/src/permutedims.jl b/NDTensors/src/lib/NamedDimsArrays/src/permutedims.jl new file mode 100644 index 0000000000..5e81a3bfd7 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/permutedims.jl @@ -0,0 +1,4 @@ +function Base.permutedims(na::AbstractNamedDimsArray, perm) + names = map(j -> dimnames(na)[j], perm) + return named(permutedims(unname(na), perm), names) +end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/promote_shape.jl b/NDTensors/src/lib/NamedDimsArrays/src/promote_shape.jl new file mode 100644 index 0000000000..84fe517ecf --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/promote_shape.jl @@ -0,0 +1,12 @@ +function Base.promote_shape(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) + return promote_shape(namedaxes(na1), namedaxes(na2)) +end + +function Base.promote_shape( + na1::Tuple{Vararg{AbstractNamedUnitRange}}, na2::Tuple{Vararg{AbstractNamedUnitRange}} +) + a1 = unname(na1) + a2 = unname(na2, dimnames(na1)) + a_promoted = promote_shape(a1, a2) + return named(a_promoted, dimnames(na1)) +end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/randname.jl b/NDTensors/src/lib/NamedDimsArrays/src/randname.jl new file mode 100644 index 0000000000..32d15ef556 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/randname.jl @@ -0,0 +1 @@ +randname(::Any) = error("Not implemented") diff --git a/NDTensors/src/lib/NamedDimsArrays/src/similar.jl b/NDTensors/src/lib/NamedDimsArrays/src/similar.jl new file mode 100644 index 0000000000..441afecd19 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/src/similar.jl @@ -0,0 +1,49 @@ +# `similar` + +# Preserve the names +Base.similar(na::AbstractNamedDimsArray) = named(similar(unname(na)), dimnames(na)) +function Base.similar(na::AbstractNamedDimsArray, elt::Type) + return named(similar(unname(na), elt), dimnames(na)) +end + +# Remove the names +# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. +function Base.similar(na::AbstractNamedDimsArray, elt::Type, dims::Tuple{Vararg{Int64}}) + return similar(unname(na), elt, dims) +end + +# Remove the names +# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. +function Base.similar( + na::AbstractNamedDimsArray, elt::Type, dims::Tuple{Integer,Vararg{Integer}} +) + return similar(unname(na), elt, dims) +end + +# Remove the names +# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. +function Base.similar( + na::AbstractNamedDimsArray, + elt::Type, + dims::Tuple{Union{Integer,Base.OneTo},Vararg{Union{Integer,Base.OneTo}}}, +) + return similar(unname(na), elt, dims) +end + +# Remove the names +# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. +function Base.similar( + na::AbstractNamedDimsArray, elt::Type, dims::Union{Integer,AbstractUnitRange}... +) + return similar(unname(na), elt, dims...) +end + +# Remove the names +# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. +Base.similar(na::AbstractNamedDimsArray, dims::Tuple) = similar(unname(na), dims) + +# Remove the names +# TODO: Make versions taking `NamedUnitRange` and `NamedInt`. +function Base.similar(na::AbstractNamedDimsArray, dims::Union{Integer,AbstractUnitRange}...) + return similar(unname(na), dims...) +end diff --git a/NDTensors/src/NamedDimsArrays/src/traits.jl b/NDTensors/src/lib/NamedDimsArrays/src/traits.jl similarity index 100% rename from NDTensors/src/NamedDimsArrays/src/traits.jl rename to NDTensors/src/lib/NamedDimsArrays/src/traits.jl diff --git a/NDTensors/src/lib/NamedDimsArrays/test/Project.toml b/NDTensors/src/lib/NamedDimsArrays/test/Project.toml new file mode 100644 index 0000000000..18bbc81308 --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/test/Project.toml @@ -0,0 +1,3 @@ +[deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/NDTensors/src/NamedDimsArrays/test/runtests.jl b/NDTensors/src/lib/NamedDimsArrays/test/runtests.jl similarity index 93% rename from NDTensors/src/NamedDimsArrays/test/runtests.jl rename to NDTensors/src/lib/NamedDimsArrays/test/runtests.jl index c76016a348..df2a0fba66 100644 --- a/NDTensors/src/NamedDimsArrays/test/runtests.jl +++ b/NDTensors/src/lib/NamedDimsArrays/test/runtests.jl @@ -1,4 +1,4 @@ -using Test +using Test: @testset @testset "NamedDimsArrays" begin filenames = filter(readdir(@__DIR__)) do filename diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysAdaptExt.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysAdaptExt.jl new file mode 100644 index 0000000000..10ffe6fb7d --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysAdaptExt.jl @@ -0,0 +1,5 @@ +using Test: @testset + +@testset "NamedDimsArrays $(@__FILE__)" begin + include("../ext/NamedDimsArraysAdaptExt/test/runtests.jl") +end diff --git a/NDTensors/src/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl similarity index 100% rename from NDTensors/src/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl rename to NDTensors/src/lib/NamedDimsArrays/test/test_NamedDimsArraysTensorAlgebraExt.jl diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_basic.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_basic.jl new file mode 100644 index 0000000000..480e9a259a --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/test/test_basic.jl @@ -0,0 +1,108 @@ +@eval module $(gensym()) +using Test: @test, @testset +using NDTensors.NamedDimsArrays: + NamedDimsArrays, + NamedDimsArray, + align, + dimnames, + isnamed, + named, + namedaxes, + namedsize, + unname + +@testset "NamedDimsArrays $(@__FILE__)" begin + @testset "Basic functionality" begin + a = randn(3, 4) + na = named(a, ("i", "j")) + # TODO: Just call this `size`? + i, j = namedsize(na) + # TODO: Just call `namedaxes`? + ai, aj = namedaxes(na) + @test !isnamed(a) + @test isnamed(na) + @test dimnames(na) == ("i", "j") + @test na[1, 1] == a[1, 1] + na[1, 1] = 11 + @test na[1, 1] == 11 + # TODO: Should `size` output `namedsize`? + @test size(na) == (3, 4) + @test namedsize(na) == (named(3, "i"), named(4, "j")) + @test length(na) == 12 + # TODO: Should `axes` output `namedaxes`? + @test axes(na) == (1:3, 1:4) + @test namedaxes(na) == (named(1:3, "i"), named(1:4, "j")) + @test randn(named(3, "i"), named(4, "j")) isa NamedDimsArray + @test na["i" => 1, "j" => 2] == a[1, 2] + @test na["j" => 2, "i" => 1] == a[1, 2] + na["j" => 2, "i" => 1] = 12 + @test na[1, 2] == 12 + @test na[j => 1, i => 2] == a[2, 1] + @test na[aj => 1, ai => 2] == a[2, 1] + na[j => 1, i => 2] = 21 + @test na[2, 1] == 21 + na[aj => 1, ai => 2] = 2211 + @test na[2, 1] == 2211 + na′ = align(na, ("j", "i")) + @test a == permutedims(unname(na′), (2, 1)) + na′ = align(na, (j, i)) + @test a == permutedims(unname(na′), (2, 1)) + na′ = align(na, (aj, ai)) + @test a == permutedims(unname(na′), (2, 1)) + end + @testset "Shorthand constructors (eltype=$elt)" for elt in ( + Float32, ComplexF32, Float64, ComplexF64 + ) + i, j = named.((2, 2), ("i", "j")) + value = rand(elt) + for na in (zeros(elt, i, j), zeros(elt, (i, j))) + @test eltype(na) === elt + @test na isa NamedDimsArray + @test dimnames(na) == ("i", "j") + @test iszero(na) + end + for na in (fill(value, i, j), fill(value, (i, j))) + @test eltype(na) === elt + @test na isa NamedDimsArray + @test dimnames(na) == ("i", "j") + @test all(isequal(value), na) + end + for na in (rand(elt, i, j), rand(elt, (i, j))) + @test eltype(na) === elt + @test na isa NamedDimsArray + @test dimnames(na) == ("i", "j") + @test !iszero(na) + @test all(x -> real(x) > 0, na) + end + for na in (randn(elt, i, j), randn(elt, (i, j))) + @test eltype(na) === elt + @test na isa NamedDimsArray + @test dimnames(na) == ("i", "j") + @test !iszero(na) + end + end + @testset "Shorthand constructors (eltype=unspecified)" begin + i, j = named.((2, 2), ("i", "j")) + default_elt = Float64 + for na in (zeros(i, j), zeros((i, j))) + @test eltype(na) === default_elt + @test na isa NamedDimsArray + @test dimnames(na) == ("i", "j") + @test iszero(na) + end + for na in (rand(i, j), rand((i, j))) + @test eltype(na) === default_elt + @test na isa NamedDimsArray + @test dimnames(na) == ("i", "j") + @test !iszero(na) + @test all(x -> real(x) > 0, na) + end + for na in (randn(i, j), randn((i, j))) + @test eltype(na) === default_elt + @test na isa NamedDimsArray + @test dimnames(na) == ("i", "j") + @test !iszero(na) + end + end +end +end diff --git a/NDTensors/src/lib/NamedDimsArrays/test/test_tensoralgebra.jl b/NDTensors/src/lib/NamedDimsArrays/test/test_tensoralgebra.jl new file mode 100644 index 0000000000..464f85510c --- /dev/null +++ b/NDTensors/src/lib/NamedDimsArrays/test/test_tensoralgebra.jl @@ -0,0 +1,80 @@ +@eval module $(gensym()) +using Test: @test, @testset +using NDTensors.NamedDimsArrays: dimnames, named, unname, isnamed +@testset "NamedDimsArrays $(@__FILE__) (eltype=$elt)" for elt in ( + Float32, ComplexF32, Float64, ComplexF64 +) + a = randn(elt, 2, 3) + na = named(a, ("i", "j")) + b = randn(elt, 3, 2) + nb = named(b, ("j", "i")) + + nc = similar(na) + @test size(nc) == (2, 3) + @test eltype(nc) == elt + @test dimnames(nc) == ("i", "j") + + nc = similar(na, Float32) + @test size(nc) == (2, 3) + @test eltype(nc) == Float32 + @test dimnames(nc) == ("i", "j") + + c = similar(na, (3, 4)) + @test size(c) == (3, 4) + @test eltype(c) == elt + @test c isa typeof(a) + + c = similar(na, 3, 4) + @test size(c) == (3, 4) + @test eltype(c) == elt + @test c isa typeof(a) + + c = similar(na, Float32, (3, 4)) + @test size(c) == (3, 4) + @test eltype(c) == Float32 + @test !isnamed(c) + + c = similar(na, Float32, 3, 4) + @test size(c) == (3, 4) + @test eltype(c) == Float32 + @test !isnamed(c) + + nc = permutedims(na, (2, 1)) + @test unname(nc) ≈ permutedims(unname(na), (2, 1)) + @test dimnames(nc) == ("j", "i") + @test nc ≈ na + + nc = 2 * na + @test unname(nc) ≈ 2 * a + @test eltype(nc) === elt + + nc = 2 .* na + @test unname(nc) ≈ 2 * a + @test eltype(nc) === elt + + nc = na + nb + @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) + @test eltype(nc) === elt + + nc = na .+ nb + @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) + @test eltype(nc) === elt + + nc = map(+, na, nb) + @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) + @test eltype(nc) === elt + + nc = named(randn(elt, 2, 3), ("i", "j")) + map!(+, nc, na, nb) + @test unname(nc, ("i", "j")) ≈ a + permutedims(b, (2, 1)) + @test eltype(nc) === elt + + nc = na - nb + @test unname(nc, ("i", "j")) ≈ a - permutedims(b, (2, 1)) + @test eltype(nc) === elt + + nc = na .- nb + @test unname(nc, ("i", "j")) ≈ a - permutedims(b, (2, 1)) + @test eltype(nc) === elt +end +end diff --git a/NDTensors/src/lib/RankFactorization/src/RankFactorization.jl b/NDTensors/src/lib/RankFactorization/src/RankFactorization.jl new file mode 100644 index 0000000000..a2b950ac93 --- /dev/null +++ b/NDTensors/src/lib/RankFactorization/src/RankFactorization.jl @@ -0,0 +1,5 @@ +module RankFactorization +include("default_kwargs.jl") +include("truncate_spectrum.jl") +include("spectrum.jl") +end diff --git a/NDTensors/src/lib/RankFactorization/src/default_kwargs.jl b/NDTensors/src/lib/RankFactorization/src/default_kwargs.jl new file mode 100644 index 0000000000..b954dbb22a --- /dev/null +++ b/NDTensors/src/lib/RankFactorization/src/default_kwargs.jl @@ -0,0 +1,10 @@ +replace_nothing(::Nothing, replacement) = replacement +replace_nothing(value, replacement) = value + +default_maxdim(a) = minimum(size(a)) +default_mindim(a) = true +default_cutoff(a) = zero(eltype(a)) +default_svd_alg(a) = default_svd_alg(unwrap_type(a), a) +default_svd_alg(::Type{<:AbstractArray}, a) = "divide_and_conquer" +default_use_absolute_cutoff(a) = false +default_use_relative_cutoff(a) = true diff --git a/NDTensors/src/lib/RankFactorization/src/spectrum.jl b/NDTensors/src/lib/RankFactorization/src/spectrum.jl new file mode 100644 index 0000000000..7d6e9c46b7 --- /dev/null +++ b/NDTensors/src/lib/RankFactorization/src/spectrum.jl @@ -0,0 +1,23 @@ +""" + Spectrum +contains the (truncated) density matrix eigenvalue spectrum which is computed during a +decomposition done by `svd` or `eigen`. In addition stores the truncation error. +""" +struct Spectrum{VecT<:Union{AbstractVector,Nothing},ElT<:Real} + eigs::VecT + truncerr::ElT +end + +eigs(s::Spectrum) = s.eigs +truncerror(s::Spectrum) = s.truncerr + +function entropy(s::Spectrum) + S = 0.0 + eigs_s = eigs(s) + isnothing(eigs_s) && + error("Spectrum does not contain any eigenvalues, cannot compute the entropy") + for p in eigs_s + p > 1e-13 && (S -= p * log(p)) + end + return S +end diff --git a/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl b/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl new file mode 100644 index 0000000000..3d7309091a --- /dev/null +++ b/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl @@ -0,0 +1,105 @@ +using ..NDTensors.Unwrap: unwrap_type + +## TODO write Exposed version of truncate +function truncate!!(P::AbstractArray; kwargs...) + return truncate!!(unwrap_type(P), P; kwargs...) +end + +# CPU version. +function truncate!!(::Type{<:Array}, P::AbstractArray; kwargs...) + truncerr, docut = truncate!(P; kwargs...) + return P, truncerr, docut +end + +# GPU fallback version, convert to CPU. +function truncate!!(::Type{<:AbstractArray}, P::AbstractArray; kwargs...) + P_cpu = cpu(P) + truncerr, docut = truncate!(P_cpu; kwargs...) + P = adapt(unwrap_type(P), P_cpu) + return P, truncerr, docut +end + +# CPU implementation. +function truncate!( + P::AbstractVector; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, +) + mindim = replace_nothing(mindim, default_mindim(P)) + maxdim = replace_nothing(maxdim, length(P)) + cutoff = replace_nothing(cutoff, typemin(eltype(P))) + use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P)) + use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P)) + + origm = length(P) + docut = zero(eltype(P)) + + #if P[1] <= 0.0 + # P[1] = 0.0 + # resize!(P, 1) + # return 0.0, 0.0 + #end + + if origm == 1 + docut = abs(P[1]) / 2 + return zero(eltype(P)), docut + end + + s = sign(P[1]) + s < 0 && (P .*= s) + + #Zero out any negative weight + for n in origm:-1:1 + (P[n] >= zero(eltype(P))) && break + P[n] = zero(eltype(P)) + end + + n = origm + truncerr = zero(eltype(P)) + while n > maxdim + truncerr += P[n] + n -= 1 + end + + if use_absolute_cutoff + #Test if individual prob. weights fall below cutoff + #rather than using *sum* of discarded weights + while P[n] <= cutoff && n > mindim + truncerr += P[n] + n -= 1 + end + else + scale = one(eltype(P)) + if use_relative_cutoff + scale = sum(P) + (scale == zero(eltype(P))) && (scale = one(eltype(P))) + end + + #Continue truncating until *sum* of discarded probability + #weight reaches cutoff reached (or m==mindim) + while (truncerr + P[n] <= cutoff * scale) && (n > mindim) + truncerr += P[n] + n -= 1 + end + + truncerr /= scale + end + + if n < 1 + n = 1 + end + + if n < origm + docut = (P[n] + P[n + 1]) / 2 + if abs(P[n] - P[n + 1]) < eltype(P)(1e-3) * P[n] + docut += eltype(P)(1e-3) * P[n] + end + end + + s < 0 && (P .*= s) + resize!(P, n) + return truncerr, docut +end diff --git a/NDTensors/src/SetParameters/.JuliaFormatter.toml b/NDTensors/src/lib/SetParameters/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/SetParameters/.JuliaFormatter.toml rename to NDTensors/src/lib/SetParameters/.JuliaFormatter.toml diff --git a/NDTensors/src/SetParameters/README.md b/NDTensors/src/lib/SetParameters/README.md similarity index 100% rename from NDTensors/src/SetParameters/README.md rename to NDTensors/src/lib/SetParameters/README.md diff --git a/NDTensors/src/SetParameters/TODO.md b/NDTensors/src/lib/SetParameters/TODO.md similarity index 100% rename from NDTensors/src/SetParameters/TODO.md rename to NDTensors/src/lib/SetParameters/TODO.md diff --git a/NDTensors/src/SetParameters/src/Base/array.jl b/NDTensors/src/lib/SetParameters/src/Base/array.jl similarity index 100% rename from NDTensors/src/SetParameters/src/Base/array.jl rename to NDTensors/src/lib/SetParameters/src/Base/array.jl diff --git a/NDTensors/src/SetParameters/src/Base/subarray.jl b/NDTensors/src/lib/SetParameters/src/Base/subarray.jl similarity index 100% rename from NDTensors/src/SetParameters/src/Base/subarray.jl rename to NDTensors/src/lib/SetParameters/src/Base/subarray.jl diff --git a/NDTensors/src/SetParameters/src/Base/val.jl b/NDTensors/src/lib/SetParameters/src/Base/val.jl similarity index 100% rename from NDTensors/src/SetParameters/src/Base/val.jl rename to NDTensors/src/lib/SetParameters/src/Base/val.jl diff --git a/NDTensors/src/SetParameters/src/SetParameters.jl b/NDTensors/src/lib/SetParameters/src/SetParameters.jl similarity index 100% rename from NDTensors/src/SetParameters/src/SetParameters.jl rename to NDTensors/src/lib/SetParameters/src/SetParameters.jl diff --git a/NDTensors/src/SetParameters/src/default_parameter.jl b/NDTensors/src/lib/SetParameters/src/default_parameter.jl similarity index 100% rename from NDTensors/src/SetParameters/src/default_parameter.jl rename to NDTensors/src/lib/SetParameters/src/default_parameter.jl diff --git a/NDTensors/src/SetParameters/src/deprecated/set_default_parameters.jl b/NDTensors/src/lib/SetParameters/src/deprecated/set_default_parameters.jl similarity index 100% rename from NDTensors/src/SetParameters/src/deprecated/set_default_parameters.jl rename to NDTensors/src/lib/SetParameters/src/deprecated/set_default_parameters.jl diff --git a/NDTensors/src/SetParameters/src/deprecated/set_unspecified_default_parameters.jl b/NDTensors/src/lib/SetParameters/src/deprecated/set_unspecified_default_parameters.jl similarity index 100% rename from NDTensors/src/SetParameters/src/deprecated/set_unspecified_default_parameters.jl rename to NDTensors/src/lib/SetParameters/src/deprecated/set_unspecified_default_parameters.jl diff --git a/NDTensors/src/SetParameters/src/get_parameter.jl b/NDTensors/src/lib/SetParameters/src/get_parameter.jl similarity index 100% rename from NDTensors/src/SetParameters/src/get_parameter.jl rename to NDTensors/src/lib/SetParameters/src/get_parameter.jl diff --git a/NDTensors/src/SetParameters/src/interface.jl b/NDTensors/src/lib/SetParameters/src/interface.jl similarity index 100% rename from NDTensors/src/SetParameters/src/interface.jl rename to NDTensors/src/lib/SetParameters/src/interface.jl diff --git a/NDTensors/src/SetParameters/src/position.jl b/NDTensors/src/lib/SetParameters/src/position.jl similarity index 100% rename from NDTensors/src/SetParameters/src/position.jl rename to NDTensors/src/lib/SetParameters/src/position.jl diff --git a/NDTensors/src/SetParameters/src/set_default_parameters.jl b/NDTensors/src/lib/SetParameters/src/set_default_parameters.jl similarity index 100% rename from NDTensors/src/SetParameters/src/set_default_parameters.jl rename to NDTensors/src/lib/SetParameters/src/set_default_parameters.jl diff --git a/NDTensors/src/SetParameters/src/set_parameters.jl b/NDTensors/src/lib/SetParameters/src/set_parameters.jl similarity index 100% rename from NDTensors/src/SetParameters/src/set_parameters.jl rename to NDTensors/src/lib/SetParameters/src/set_parameters.jl diff --git a/NDTensors/src/SetParameters/src/set_parameters_generic.jl b/NDTensors/src/lib/SetParameters/src/set_parameters_generic.jl similarity index 100% rename from NDTensors/src/SetParameters/src/set_parameters_generic.jl rename to NDTensors/src/lib/SetParameters/src/set_parameters_generic.jl diff --git a/NDTensors/src/SetParameters/src/set_unspecified_parameters.jl b/NDTensors/src/lib/SetParameters/src/set_unspecified_parameters.jl similarity index 100% rename from NDTensors/src/SetParameters/src/set_unspecified_parameters.jl rename to NDTensors/src/lib/SetParameters/src/set_unspecified_parameters.jl diff --git a/NDTensors/src/SetParameters/src/unspecifiedparameter.jl b/NDTensors/src/lib/SetParameters/src/unspecifiedparameter.jl similarity index 100% rename from NDTensors/src/SetParameters/src/unspecifiedparameter.jl rename to NDTensors/src/lib/SetParameters/src/unspecifiedparameter.jl diff --git a/NDTensors/src/SetParameters/test/runtests.jl b/NDTensors/src/lib/SetParameters/test/runtests.jl similarity index 100% rename from NDTensors/src/SetParameters/test/runtests.jl rename to NDTensors/src/lib/SetParameters/test/runtests.jl diff --git a/NDTensors/src/SmallVectors/.JuliaFormatter.toml b/NDTensors/src/lib/SmallVectors/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/SmallVectors/.JuliaFormatter.toml rename to NDTensors/src/lib/SmallVectors/.JuliaFormatter.toml diff --git a/NDTensors/src/SmallVectors/README.md b/NDTensors/src/lib/SmallVectors/README.md similarity index 100% rename from NDTensors/src/SmallVectors/README.md rename to NDTensors/src/lib/SmallVectors/README.md diff --git a/NDTensors/src/SmallVectors/src/BaseExt/insertstyle.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/insertstyle.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/BaseExt/insertstyle.jl rename to NDTensors/src/lib/SmallVectors/src/BaseExt/insertstyle.jl diff --git a/NDTensors/src/SmallVectors/src/BaseExt/sort.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/sort.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/BaseExt/sort.jl rename to NDTensors/src/lib/SmallVectors/src/BaseExt/sort.jl diff --git a/NDTensors/src/SmallVectors/src/BaseExt/sortedunique.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/sortedunique.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/BaseExt/sortedunique.jl rename to NDTensors/src/lib/SmallVectors/src/BaseExt/sortedunique.jl diff --git a/NDTensors/src/SmallVectors/src/BaseExt/thawfreeze.jl b/NDTensors/src/lib/SmallVectors/src/BaseExt/thawfreeze.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/BaseExt/thawfreeze.jl rename to NDTensors/src/lib/SmallVectors/src/BaseExt/thawfreeze.jl diff --git a/NDTensors/src/SmallVectors/src/SmallVectors.jl b/NDTensors/src/lib/SmallVectors/src/SmallVectors.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/SmallVectors.jl rename to NDTensors/src/lib/SmallVectors/src/SmallVectors.jl diff --git a/NDTensors/src/SmallVectors/src/abstractarray/insert.jl b/NDTensors/src/lib/SmallVectors/src/abstractarray/insert.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/abstractarray/insert.jl rename to NDTensors/src/lib/SmallVectors/src/abstractarray/insert.jl diff --git a/NDTensors/src/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl b/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl rename to NDTensors/src/lib/SmallVectors/src/abstractsmallvector/abstractsmallvector.jl diff --git a/NDTensors/src/SmallVectors/src/abstractsmallvector/deque.jl b/NDTensors/src/lib/SmallVectors/src/abstractsmallvector/deque.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/abstractsmallvector/deque.jl rename to NDTensors/src/lib/SmallVectors/src/abstractsmallvector/deque.jl diff --git a/NDTensors/src/SmallVectors/src/msmallvector/msmallvector.jl b/NDTensors/src/lib/SmallVectors/src/msmallvector/msmallvector.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/msmallvector/msmallvector.jl rename to NDTensors/src/lib/SmallVectors/src/msmallvector/msmallvector.jl diff --git a/NDTensors/src/SmallVectors/src/msmallvector/thawfreeze.jl b/NDTensors/src/lib/SmallVectors/src/msmallvector/thawfreeze.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/msmallvector/thawfreeze.jl rename to NDTensors/src/lib/SmallVectors/src/msmallvector/thawfreeze.jl diff --git a/NDTensors/src/SmallVectors/src/smallvector/insertstyle.jl b/NDTensors/src/lib/SmallVectors/src/smallvector/insertstyle.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/smallvector/insertstyle.jl rename to NDTensors/src/lib/SmallVectors/src/smallvector/insertstyle.jl diff --git a/NDTensors/src/SmallVectors/src/smallvector/smallvector.jl b/NDTensors/src/lib/SmallVectors/src/smallvector/smallvector.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/smallvector/smallvector.jl rename to NDTensors/src/lib/SmallVectors/src/smallvector/smallvector.jl diff --git a/NDTensors/src/SmallVectors/src/smallvector/thawfreeze.jl b/NDTensors/src/lib/SmallVectors/src/smallvector/thawfreeze.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/smallvector/thawfreeze.jl rename to NDTensors/src/lib/SmallVectors/src/smallvector/thawfreeze.jl diff --git a/NDTensors/src/SmallVectors/src/subsmallvector/subsmallvector.jl b/NDTensors/src/lib/SmallVectors/src/subsmallvector/subsmallvector.jl similarity index 100% rename from NDTensors/src/SmallVectors/src/subsmallvector/subsmallvector.jl rename to NDTensors/src/lib/SmallVectors/src/subsmallvector/subsmallvector.jl diff --git a/NDTensors/src/SmallVectors/test/runtests.jl b/NDTensors/src/lib/SmallVectors/test/runtests.jl similarity index 100% rename from NDTensors/src/SmallVectors/test/runtests.jl rename to NDTensors/src/lib/SmallVectors/test/runtests.jl diff --git a/NDTensors/src/SortedSets/.JuliaFormatter.toml b/NDTensors/src/lib/SortedSets/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/SortedSets/.JuliaFormatter.toml rename to NDTensors/src/lib/SortedSets/.JuliaFormatter.toml diff --git a/NDTensors/src/SortedSets/src/BaseExt/sorted.jl b/NDTensors/src/lib/SortedSets/src/BaseExt/sorted.jl similarity index 100% rename from NDTensors/src/SortedSets/src/BaseExt/sorted.jl rename to NDTensors/src/lib/SortedSets/src/BaseExt/sorted.jl diff --git a/NDTensors/src/SortedSets/src/DictionariesExt/insert.jl b/NDTensors/src/lib/SortedSets/src/DictionariesExt/insert.jl similarity index 100% rename from NDTensors/src/SortedSets/src/DictionariesExt/insert.jl rename to NDTensors/src/lib/SortedSets/src/DictionariesExt/insert.jl diff --git a/NDTensors/src/SortedSets/src/DictionariesExt/isinsertable.jl b/NDTensors/src/lib/SortedSets/src/DictionariesExt/isinsertable.jl similarity index 100% rename from NDTensors/src/SortedSets/src/DictionariesExt/isinsertable.jl rename to NDTensors/src/lib/SortedSets/src/DictionariesExt/isinsertable.jl diff --git a/NDTensors/src/SortedSets/src/SmallVectorsDictionariesExt/interface.jl b/NDTensors/src/lib/SortedSets/src/SmallVectorsDictionariesExt/interface.jl similarity index 100% rename from NDTensors/src/SortedSets/src/SmallVectorsDictionariesExt/interface.jl rename to NDTensors/src/lib/SortedSets/src/SmallVectorsDictionariesExt/interface.jl diff --git a/NDTensors/src/SortedSets/src/SortedSets.jl b/NDTensors/src/lib/SortedSets/src/SortedSets.jl similarity index 100% rename from NDTensors/src/SortedSets/src/SortedSets.jl rename to NDTensors/src/lib/SortedSets/src/SortedSets.jl diff --git a/NDTensors/src/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl b/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl similarity index 100% rename from NDTensors/src/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl rename to NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl diff --git a/NDTensors/src/SortedSets/src/abstractset.jl b/NDTensors/src/lib/SortedSets/src/abstractset.jl similarity index 100% rename from NDTensors/src/SortedSets/src/abstractset.jl rename to NDTensors/src/lib/SortedSets/src/abstractset.jl diff --git a/NDTensors/src/SortedSets/src/abstractwrappedset.jl b/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl similarity index 100% rename from NDTensors/src/SortedSets/src/abstractwrappedset.jl rename to NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl diff --git a/NDTensors/src/SortedSets/src/sortedset.jl b/NDTensors/src/lib/SortedSets/src/sortedset.jl similarity index 100% rename from NDTensors/src/SortedSets/src/sortedset.jl rename to NDTensors/src/lib/SortedSets/src/sortedset.jl diff --git a/NDTensors/src/SortedSets/test/runtests.jl b/NDTensors/src/lib/SortedSets/test/runtests.jl similarity index 100% rename from NDTensors/src/SortedSets/test/runtests.jl rename to NDTensors/src/lib/SortedSets/test/runtests.jl diff --git a/NDTensors/src/TagSets/.JuliaFormatter.toml b/NDTensors/src/lib/TagSets/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/TagSets/.JuliaFormatter.toml rename to NDTensors/src/lib/TagSets/.JuliaFormatter.toml diff --git a/NDTensors/src/TagSets/README.md b/NDTensors/src/lib/TagSets/README.md similarity index 100% rename from NDTensors/src/TagSets/README.md rename to NDTensors/src/lib/TagSets/README.md diff --git a/NDTensors/src/TagSets/examples/benchmark.jl b/NDTensors/src/lib/TagSets/examples/benchmark.jl similarity index 100% rename from NDTensors/src/TagSets/examples/benchmark.jl rename to NDTensors/src/lib/TagSets/examples/benchmark.jl diff --git a/NDTensors/src/TagSets/src/TagSets.jl b/NDTensors/src/lib/TagSets/src/TagSets.jl similarity index 100% rename from NDTensors/src/TagSets/src/TagSets.jl rename to NDTensors/src/lib/TagSets/src/TagSets.jl diff --git a/NDTensors/src/TagSets/test/runtests.jl b/NDTensors/src/lib/TagSets/test/runtests.jl similarity index 100% rename from NDTensors/src/TagSets/test/runtests.jl rename to NDTensors/src/lib/TagSets/test/runtests.jl diff --git a/NDTensors/src/TensorAlgebra/.JuliaFormatter.toml b/NDTensors/src/lib/TensorAlgebra/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/TensorAlgebra/.JuliaFormatter.toml rename to NDTensors/src/lib/TensorAlgebra/.JuliaFormatter.toml diff --git a/NDTensors/src/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl b/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl rename to NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/LinearAlgebraExtensions.jl diff --git a/NDTensors/src/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl b/NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl rename to NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl diff --git a/NDTensors/src/TensorAlgebra/src/TensorAlgebra.jl b/NDTensors/src/lib/TensorAlgebra/src/TensorAlgebra.jl similarity index 86% rename from NDTensors/src/TensorAlgebra/src/TensorAlgebra.jl rename to NDTensors/src/lib/TensorAlgebra/src/TensorAlgebra.jl index dcd053f944..74ae00c662 100644 --- a/NDTensors/src/TensorAlgebra/src/TensorAlgebra.jl +++ b/NDTensors/src/lib/TensorAlgebra/src/TensorAlgebra.jl @@ -1,6 +1,6 @@ module TensorAlgebra +using ..AlgorithmSelection: Algorithm, @Algorithm_str using LinearAlgebra: mul! -using ..NDTensors: Algorithm, @Algorithm_str include("bipartitionedpermutation.jl") include("fusedims.jl") diff --git a/NDTensors/src/TensorAlgebra/src/bipartitionedpermutation.jl b/NDTensors/src/lib/TensorAlgebra/src/bipartitionedpermutation.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/src/bipartitionedpermutation.jl rename to NDTensors/src/lib/TensorAlgebra/src/bipartitionedpermutation.jl diff --git a/NDTensors/src/TensorAlgebra/src/contract/allocate_output.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/allocate_output.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/src/contract/allocate_output.jl rename to NDTensors/src/lib/TensorAlgebra/src/contract/allocate_output.jl diff --git a/NDTensors/src/TensorAlgebra/src/contract/bipartitionedpermutations.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/bipartitionedpermutations.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/src/contract/bipartitionedpermutations.jl rename to NDTensors/src/lib/TensorAlgebra/src/contract/bipartitionedpermutations.jl diff --git a/NDTensors/src/TensorAlgebra/src/contract/contract.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/contract.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/src/contract/contract.jl rename to NDTensors/src/lib/TensorAlgebra/src/contract/contract.jl diff --git a/NDTensors/src/TensorAlgebra/src/contract/contract_matricize/contract.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/contract_matricize/contract.jl similarity index 75% rename from NDTensors/src/TensorAlgebra/src/contract/contract_matricize/contract.jl rename to NDTensors/src/lib/TensorAlgebra/src/contract/contract_matricize/contract.jl index 959e09598e..1339287337 100644 --- a/NDTensors/src/TensorAlgebra/src/contract/contract_matricize/contract.jl +++ b/NDTensors/src/lib/TensorAlgebra/src/contract/contract_matricize/contract.jl @@ -17,6 +17,11 @@ function contract!( # TODO: Create a function `unmatricize` or `unfusedims`. # unmatricize!(a_dest, a_dest_matricized, axes(a_dest), perm_dest) a_dest_copy = reshape(a_dest_matricized, map(i -> axes(a_dest, i), perm_dest)) - permutedims!(a_dest, a_dest_copy, invperm(perm_dest)) + if iszero(ndims(a_dest)) && iszero(ndims(a_dest_copy)) && iszero(length(perm_dest)) + # TODO: Raise an issue with Base Julia. + a_dest[] = a_dest_copy[] + else + permutedims!(a_dest, a_dest_copy, invperm(perm_dest)) + end return a_dest end diff --git a/NDTensors/src/TensorAlgebra/src/contract/output_labels.jl b/NDTensors/src/lib/TensorAlgebra/src/contract/output_labels.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/src/contract/output_labels.jl rename to NDTensors/src/lib/TensorAlgebra/src/contract/output_labels.jl diff --git a/NDTensors/src/lib/TensorAlgebra/src/fusedims.jl b/NDTensors/src/lib/TensorAlgebra/src/fusedims.jl new file mode 100644 index 0000000000..0f2197b177 --- /dev/null +++ b/NDTensors/src/lib/TensorAlgebra/src/fusedims.jl @@ -0,0 +1,130 @@ +fuse(a1::AbstractUnitRange, a2::AbstractUnitRange) = Base.OneTo(length(a1) * length(a2)) +fuse(a...) = foldl(fuse, a; init=Base.OneTo(1)) + +## # TODO: Support subperm that is an `Int`, representing +## # a dimension that won't be self-fused (i.e. won't involve +## # and block permutations and mergers). +## function fusedims(a::AbstractArray, subperms::Tuple...) +## @assert ndims(a) == sum(length, subperms) +## perm = reduce((x, y) -> (x..., y...), subperms) +## @assert isperm(perm) +## # TODO: Do this lazily? +## a_permuted = permutedims(a, perm) +## sublengths = length.(subperms) +## substops = cumsum(sublengths) +## substarts = (1, (Base.front(substops) .+ 1)...) +## # TODO: `step=1` is not required as of Julia 1.7. +## # Remove once we drop support for Julia 1.6. +## subranges = range.(substarts, substops; step=1) +## # Get a naive product of the axes in the subrange +## axes_prod = map(subranges) do subrange +## return fuse(map(i -> axes(a_permuted, i), subrange)...) +## end +## a_reshaped = reshape(a_permuted, axes_prod) +## +## # Particular to `BlockSparseArray` +## # with graded unit range, add this back. +## # Permute and merge the axes +## mergeperms = blockmergesortperm.(axes_prod) +## end + +# TODO: Support subperm that is an `Int`, representing +# a dimension that won't be self-fused (i.e. won't involve +# and block permutations and mergers). +# This is based on the `BlockSparseArray` version +# TODO: Restrict `subperms` to `Tuple`s? +function fusedims(a::AbstractArray, subperms...) + @assert ndims(a) == sum(length, subperms) + # perm = tuple_cat(subperms...) + perm = reduce((x, y) -> (x..., y...), subperms) + + @assert isperm(perm) + # TODO: Do this lazily? + a_permuted = if iszero(ndims(a)) && iszero(length(perm)) + # TODO: Raise an issue with Base Julia. + # TODO: `copy` here? + a + else + permutedims(a, perm) + end + sublengths = length.(subperms) + substops = cumsum(sublengths) + substarts = (1, (Base.front(substops) .+ 1)...) + # TODO: `step=1` is not required as of Julia 1.7. + # Remove once we drop support for Julia 1.6. + subranges = range.(substarts, substops; step=1) + # Get a naive product of the axes in the subrange + axes_prod = map(subranges) do subrange + return fuse(map(i -> axes(a_permuted, i), subrange)...) + end + a_reshaped = reshape(a_permuted, axes_prod) + return a_reshaped + + ## # TODO: Particular to `BlockSparseArray` + ## # with graded unit range, add this back. + ## # Permute and merge the axes + ## # Permute and merge the axes + ## mergeperms = blockmergesortperm.(axes_prod) + ## block_perms = map(mergeperm -> Block.(reduce(vcat, mergeperm)), mergeperms) + ## a_blockperm = getindices(a_reshaped, block_perms...) + ## axes_merged = blockmerge.(axes_prod, mergeperms) + ## # TODO: Use `similar` here to preserve the type of `a`. + ## a_merged = BlockSparseArray{eltype(a)}(blocklengths.(axes_merged)...) + ## a_merged = similar(a, axes_merged) + ## TODO: Make this take advantage of the sparsity of `a_blockperm`. + ## TODO: Use `VectorInterface.zerovector` and `VectorInterface.add!!`? + ## copyto!(a_merged, a_blockperm) + ## return a_merged +end + +function matricize(a::AbstractArray, left_dims::Tuple, right_dims::Tuple) + return fusedims(a, left_dims, right_dims) +end + +# TODO: Remove this version. +function matricize(a::AbstractArray, biperm) + return fusedims(a, biperm[1], biperm[2]) +end + +function matricize(a::AbstractArray, biperm::BipartitionedPermutation) + return fusedims(a, biperm[1], biperm[2]) +end + +# splitdims(randn(12, 5, 12), 1 => (3, 4), 2 => (4, 3)) -> 3 × 4 × 5 × 4 × 3 +function splitdims(a::AbstractArray, splitters::Pair...) + splitdims = first.(splitters) + split_sizes = last.(splitters) + size_unsplit = size(a) + size_split = Any[size_unsplit...] + for (split_dim, split_size) in splitters + size_split[split_dim] = split_size + end + size_split = reduce((x, y) -> (x..., y...), size_split) + return reshape(a, size_split) +end + +# TODO: Make this more generic, i.e. for `BlockSparseArray`. +function unmatricize(a::AbstractArray, axes_codomain, axes_domain) + # TODO: Call `splitdims`. + return reshape(a, (axes_codomain..., axes_domain...)) +end + +## function fusedims(a::AbstractArray, perm_partitions...) +## error("Not implemented") +## end +## +## matricize(a::AbstractArray, biperm) = matricize(a, BipartitionedPermutation(biperm...)) +## +## # TODO: Make this more generic, i.e. for `BlockSparseArray`. +## function matricize(a::AbstractArray, biperm::BipartitionedPermutation) +## # Permute and fuse the axes +## axes_src = axes(a) +## axes_codomain = map(i -> axes_src[i], biperm[1]) +## axes_domain = map(i -> axes_src[i], biperm[2]) +## axis_codomain_fused = fuse(axes_codomain...) +## axis_domain_fused = fuse(axes_domain...) +## # Permute the array +## perm = flatten(biperm) +## a_permuted = permutedims(a, perm) +## return reshape(a_permuted, (axis_codomain_fused, axis_domain_fused)) +## end diff --git a/NDTensors/src/TensorAlgebra/test/Project.toml b/NDTensors/src/lib/TensorAlgebra/test/Project.toml similarity index 100% rename from NDTensors/src/TensorAlgebra/test/Project.toml rename to NDTensors/src/lib/TensorAlgebra/test/Project.toml diff --git a/NDTensors/src/TensorAlgebra/test/runtests.jl b/NDTensors/src/lib/TensorAlgebra/test/runtests.jl similarity index 100% rename from NDTensors/src/TensorAlgebra/test/runtests.jl rename to NDTensors/src/lib/TensorAlgebra/test/runtests.jl diff --git a/NDTensors/src/Unwrap/.JuliaFormatter.toml b/NDTensors/src/lib/Unwrap/.JuliaFormatter.toml similarity index 100% rename from NDTensors/src/Unwrap/.JuliaFormatter.toml rename to NDTensors/src/lib/Unwrap/.JuliaFormatter.toml diff --git a/NDTensors/src/Unwrap/README.md b/NDTensors/src/lib/Unwrap/README.md similarity index 100% rename from NDTensors/src/Unwrap/README.md rename to NDTensors/src/lib/Unwrap/README.md diff --git a/NDTensors/src/Unwrap/TODO.md b/NDTensors/src/lib/Unwrap/TODO.md similarity index 100% rename from NDTensors/src/Unwrap/TODO.md rename to NDTensors/src/lib/Unwrap/TODO.md diff --git a/NDTensors/src/Unwrap/src/Unwrap.jl b/NDTensors/src/lib/Unwrap/src/Unwrap.jl similarity index 100% rename from NDTensors/src/Unwrap/src/Unwrap.jl rename to NDTensors/src/lib/Unwrap/src/Unwrap.jl diff --git a/NDTensors/src/Unwrap/src/expose.jl b/NDTensors/src/lib/Unwrap/src/expose.jl similarity index 100% rename from NDTensors/src/Unwrap/src/expose.jl rename to NDTensors/src/lib/Unwrap/src/expose.jl diff --git a/NDTensors/src/Unwrap/src/functions/abstractarray.jl b/NDTensors/src/lib/Unwrap/src/functions/abstractarray.jl similarity index 100% rename from NDTensors/src/Unwrap/src/functions/abstractarray.jl rename to NDTensors/src/lib/Unwrap/src/functions/abstractarray.jl diff --git a/NDTensors/src/Unwrap/src/functions/adapt.jl b/NDTensors/src/lib/Unwrap/src/functions/adapt.jl similarity index 100% rename from NDTensors/src/Unwrap/src/functions/adapt.jl rename to NDTensors/src/lib/Unwrap/src/functions/adapt.jl diff --git a/NDTensors/src/Unwrap/src/functions/copyto.jl b/NDTensors/src/lib/Unwrap/src/functions/copyto.jl similarity index 100% rename from NDTensors/src/Unwrap/src/functions/copyto.jl rename to NDTensors/src/lib/Unwrap/src/functions/copyto.jl diff --git a/NDTensors/src/Unwrap/src/functions/linearalgebra.jl b/NDTensors/src/lib/Unwrap/src/functions/linearalgebra.jl similarity index 100% rename from NDTensors/src/Unwrap/src/functions/linearalgebra.jl rename to NDTensors/src/lib/Unwrap/src/functions/linearalgebra.jl diff --git a/NDTensors/src/Unwrap/src/functions/mul.jl b/NDTensors/src/lib/Unwrap/src/functions/mul.jl similarity index 100% rename from NDTensors/src/Unwrap/src/functions/mul.jl rename to NDTensors/src/lib/Unwrap/src/functions/mul.jl diff --git a/NDTensors/src/Unwrap/src/functions/permutedims.jl b/NDTensors/src/lib/Unwrap/src/functions/permutedims.jl similarity index 100% rename from NDTensors/src/Unwrap/src/functions/permutedims.jl rename to NDTensors/src/lib/Unwrap/src/functions/permutedims.jl diff --git a/NDTensors/src/Unwrap/src/import.jl b/NDTensors/src/lib/Unwrap/src/import.jl similarity index 100% rename from NDTensors/src/Unwrap/src/import.jl rename to NDTensors/src/lib/Unwrap/src/import.jl diff --git a/NDTensors/src/Unwrap/src/iswrappedarray.jl b/NDTensors/src/lib/Unwrap/src/iswrappedarray.jl similarity index 100% rename from NDTensors/src/Unwrap/src/iswrappedarray.jl rename to NDTensors/src/lib/Unwrap/src/iswrappedarray.jl diff --git a/NDTensors/src/Unwrap/test/runtests.jl b/NDTensors/src/lib/Unwrap/test/runtests.jl similarity index 98% rename from NDTensors/src/Unwrap/test/runtests.jl rename to NDTensors/src/lib/Unwrap/test/runtests.jl index 68cbb74e18..ec68afae03 100644 --- a/NDTensors/src/Unwrap/test/runtests.jl +++ b/NDTensors/src/lib/Unwrap/test/runtests.jl @@ -4,7 +4,7 @@ using NDTensors using LinearAlgebra using GPUArraysCore: @allowscalar -include("../../../test/device_list.jl") +include(joinpath(pkgdir(NDTensors), "test", "device_list.jl")) @testset "Testing Unwrap $dev, $elt" for dev in devices_list(ARGS), elt in (Float32, ComplexF32) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index e7d1643bb1..a3947233c3 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -1,3 +1,5 @@ +using .RankFactorization: Spectrum + # # Linear Algebra of order 2 NDTensors # @@ -28,30 +30,6 @@ function LinearAlgebra.exp( return tensor(Dense(vec(expTM)), inds(T)) end -""" - Spectrum -contains the (truncated) density matrix eigenvalue spectrum which is computed during a -decomposition done by `svd` or `eigen`. In addition stores the truncation error. -""" -struct Spectrum{VecT<:Union{AbstractVector,Nothing},ElT<:Real} - eigs::VecT - truncerr::ElT -end - -eigs(s::Spectrum) = s.eigs -truncerror(s::Spectrum) = s.truncerr - -function entropy(s::Spectrum) - S = 0.0 - eigs_s = eigs(s) - isnothing(eigs_s) && - error("Spectrum does not contain any eigenvalues, cannot compute the entropy") - for p in eigs_s - p > 1e-13 && (S -= p * log(p)) - end - return S -end - function svd_catch_error(A; kwargs...) USV = try svd(expose(A); kwargs...) diff --git a/NDTensors/test/BlockSparseArrays.jl b/NDTensors/test/BlockSparseArrays.jl deleted file mode 100644 index 5d1345d0e7..0000000000 --- a/NDTensors/test/BlockSparseArrays.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "BlockSparseArrays", "test", "runtests.jl")) diff --git a/NDTensors/test/DiagonalArrays.jl b/NDTensors/test/DiagonalArrays.jl deleted file mode 100644 index fa9b5a0c2b..0000000000 --- a/NDTensors/test/DiagonalArrays.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "DiagonalArrays", "test", "runtests.jl")) diff --git a/NDTensors/test/ITensors/Project.toml b/NDTensors/test/ITensors/Project.toml new file mode 100644 index 0000000000..7ee8b76af0 --- /dev/null +++ b/NDTensors/test/ITensors/Project.toml @@ -0,0 +1,4 @@ +[deps] +ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" diff --git a/NDTensors/test/ITensors/TestITensorDMRG/Project.toml b/NDTensors/test/ITensors/TestITensorDMRG/Project.toml new file mode 100644 index 0000000000..7f3412606e --- /dev/null +++ b/NDTensors/test/ITensors/TestITensorDMRG/Project.toml @@ -0,0 +1,6 @@ +[deps] +ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl b/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl index 618f2b6ced..cf77be08cd 100644 --- a/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl +++ b/NDTensors/test/ITensors/TestITensorDMRG/TestITensorDMRG.jl @@ -2,7 +2,6 @@ ## Failing for CUDA mostly with eigen (I believe there is some noise in ## eigen decomp with CUBLAS to give slightly different answer than BLAS) module TestITensorDMRG -using Test using ITensors using NDTensors using Random diff --git a/NDTensors/test/ITensors/TestITensorDMRG/dmrg.jl b/NDTensors/test/ITensors/TestITensorDMRG/dmrg.jl index ab73f5ff82..1ec22fdf86 100644 --- a/NDTensors/test/ITensors/TestITensorDMRG/dmrg.jl +++ b/NDTensors/test/ITensors/TestITensorDMRG/dmrg.jl @@ -1,4 +1,11 @@ -function test_dmrg(elt, N::Integer; dev::Function, conserve_qns) +using ITensors: MPO, OpSum, dmrg, randomMPS, siteinds +using Random: Random +using Test: @test +# TODO: Include file with `reference_energies`. + +function test_dmrg( + elt, N::Integer; dev::Function, conserve_qns, rtol_scale=true, outputlevel=0 +) sites = siteinds("S=1/2", N; conserve_qns) os = OpSum() @@ -20,6 +27,6 @@ function test_dmrg(elt, N::Integer; dev::Function, conserve_qns) ## all problems do not have a maxlinkdim > 32 maxdim = 32 - energy, psi = dmrg(H, psi0; nsweeps, cutoff, maxdim, noise, outputlevel=0) - @test energy ≈ reference_energies[N] rtol = default_rtol(elt) + energy, psi = dmrg(H, psi0; nsweeps, cutoff, maxdim, noise, outputlevel) + @test energy ≈ reference_energies[N] rtol = rtol_scale * default_rtol(elt) end diff --git a/NDTensors/test/ITensors/TestITensorDMRG/runtests.jl b/NDTensors/test/ITensors/TestITensorDMRG/runtests.jl deleted file mode 100644 index bccc179656..0000000000 --- a/NDTensors/test/ITensors/TestITensorDMRG/runtests.jl +++ /dev/null @@ -1,22 +0,0 @@ -using Test -using NDTensors -include("TestITensorDMRG.jl") - -include("../../device_list.jl") - -@testset "Test DMRG $dev, $conserve_qns, $elt, $N" for dev in devices_list(ARGS), - conserve_qns in [false, true], - elt in (Float32, ComplexF32, Float64, ComplexF64), - N in [4, 10] - - if !TestITensorDMRG.is_supported_eltype(dev, elt) - continue - end - if TestITensorDMRG.is_broken(dev, elt, Val(conserve_qns)) - # TODO: Switch to `@test ... broken=true`, introduced - # in Julia 1.7. - @test_broken TestITensorDMRG.test_dmrg(elt, N; dev, conserve_qns) - else - TestITensorDMRG.test_dmrg(elt, N; dev, conserve_qns) - end -end diff --git a/NDTensors/test/ITensors/runtests.jl b/NDTensors/test/ITensors/runtests.jl index 00ecc4aa31..93d2c2d46c 100644 --- a/NDTensors/test/ITensors/runtests.jl +++ b/NDTensors/test/ITensors/runtests.jl @@ -1 +1,33 @@ -include("TestITensorDMRG/runtests.jl") +using SafeTestsets: @safetestset + +@safetestset "Downstream tests for ITensor DMRG" begin + using Test: @testset + include("TestITensorDMRG/TestITensorDMRG.jl") + include("../device_list.jl") + @testset "Test DMRG $dev, $conserve_qns, $elt, $N" for dev in devices_list(ARGS), + conserve_qns in [false, true], + elt in (Float32, ComplexF32, Float64, ComplexF64), + N in [4, 10] + + if !TestITensorDMRG.is_supported_eltype(dev, elt) + continue + end + if TestITensorDMRG.is_broken(dev, elt, Val(conserve_qns)) + # TODO: Switch to `@test ... broken=true`, introduced + # in Julia 1.7. + @test_broken TestITensorDMRG.test_dmrg(elt, N; dev, conserve_qns) + else + TestITensorDMRG.test_dmrg(elt, N; dev, conserve_qns, outputlevel=0) + end + end + using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray + @testset "Test DMRG with NamedDimsArrays" for dev in (NDTensors.cpu,), + conserve_qns in [false], + elt in (Float32, Float64), + N in [4, 10] + + dev = dev ∘ to_nameddimsarray + # TODO: Investigate why this isn't accurate. + TestITensorDMRG.test_dmrg(elt, N; dev, conserve_qns, rtol_scale=10^3, outputlevel=0) + end +end diff --git a/NDTensors/test/NamedDimsArrays.jl b/NDTensors/test/NamedDimsArrays.jl deleted file mode 100644 index ce0ed1da77..0000000000 --- a/NDTensors/test/NamedDimsArrays.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "NamedDimsArrays", "test", "runtests.jl")) diff --git a/NDTensors/test/Project.toml b/NDTensors/test/Project.toml index 46d2cf33bb..5301386cba 100644 --- a/NDTensors/test/Project.toml +++ b/NDTensors/test/Project.toml @@ -1,4 +1,5 @@ [deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" diff --git a/NDTensors/test/SetParameters.jl b/NDTensors/test/SetParameters.jl deleted file mode 100644 index a8026805fe..0000000000 --- a/NDTensors/test/SetParameters.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "SetParameters", "test", "runtests.jl")) diff --git a/NDTensors/test/SmallVectors.jl b/NDTensors/test/SmallVectors.jl deleted file mode 100644 index 62b552dc72..0000000000 --- a/NDTensors/test/SmallVectors.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "SmallVectors", "test", "runtests.jl")) diff --git a/NDTensors/test/SortedSets.jl b/NDTensors/test/SortedSets.jl deleted file mode 100644 index e5a885737d..0000000000 --- a/NDTensors/test/SortedSets.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "SortedSets", "test", "runtests.jl")) diff --git a/NDTensors/test/TagSets.jl b/NDTensors/test/TagSets.jl deleted file mode 100644 index 3ce0fbfd98..0000000000 --- a/NDTensors/test/TagSets.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "TagSets", "test", "runtests.jl")) diff --git a/NDTensors/test/TensorAlgebra.jl b/NDTensors/test/TensorAlgebra.jl deleted file mode 100644 index 4cd51cf4cb..0000000000 --- a/NDTensors/test/TensorAlgebra.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "TensorAlgebra", "test", "runtests.jl")) diff --git a/NDTensors/test/Unwrap.jl b/NDTensors/test/Unwrap.jl deleted file mode 100644 index a7a49a2a17..0000000000 --- a/NDTensors/test/Unwrap.jl +++ /dev/null @@ -1,4 +0,0 @@ -using Test -using NDTensors - -include(joinpath(pkgdir(NDTensors), "src", "Unwrap", "test", "runtests.jl")) diff --git a/NDTensors/test/device_list.jl b/NDTensors/test/device_list.jl index 021a20a75b..4efb4b1f7f 100644 --- a/NDTensors/test/device_list.jl +++ b/NDTensors/test/device_list.jl @@ -1,3 +1,4 @@ +using NDTensors: NDTensors if "cuda" in ARGS || "all" in ARGS using CUDA end diff --git a/NDTensors/test/lib/Project.toml b/NDTensors/test/lib/Project.toml new file mode 100644 index 0000000000..6c95bd52e2 --- /dev/null +++ b/NDTensors/test/lib/Project.toml @@ -0,0 +1,8 @@ +[deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" +Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" +Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" +GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" diff --git a/NDTensors/test/lib/runtests.jl b/NDTensors/test/lib/runtests.jl new file mode 100644 index 0000000000..102c83bad7 --- /dev/null +++ b/NDTensors/test/lib/runtests.jl @@ -0,0 +1,20 @@ +@eval module $(gensym()) +using Test: @testset +@testset "Test NDTensors lib $lib" for lib in [ + "AlgorithmSelection", + "BaseExtensions", + "BlockSparseArrays", + "BroadcastMapConversion", + "DiagonalArrays", + "NamedDimsArrays", + "SetParameters", + "SmallVectors", + "SortedSets", + "TagSets", + "TensorAlgebra", + "Unwrap", +] + using NDTensors: NDTensors + include(joinpath(pkgdir(NDTensors), "src", "lib", lib, "test", "runtests.jl")) +end +end diff --git a/NDTensors/test/runtests.jl b/NDTensors/test/runtests.jl index 34718b84ea..fad15eb5f0 100644 --- a/NDTensors/test/runtests.jl +++ b/NDTensors/test/runtests.jl @@ -19,14 +19,7 @@ end @safetestset "NDTensors" begin @testset "$filename" for filename in [ - "BlockSparseArrays.jl", - "DiagonalArrays.jl", - "SetParameters.jl", - "SmallVectors.jl", - "SortedSets.jl", - "TagSets.jl", - "TensorAlgebra.jl", - "Unwrap.jl", + "lib/runtests.jl", "linearalgebra.jl", "dense.jl", "blocksparse.jl", diff --git a/src/ITensors.jl b/src/ITensors.jl index d571e436bf..bc96ecfc81 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -179,6 +179,12 @@ include("mps/observer.jl") include("mps/dmrg.jl") include("mps/adapt.jl") +##################################### +# ITensorsNamedDimsArraysExt +# Requires `AbstractMPS`. +include("ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl") +using .ITensorsNamedDimsArraysExt: ITensorsNamedDimsArraysExt + ##################################### # Physics # diff --git a/src/ITensorsNamedDimsArraysExt/examples/Project.toml b/src/ITensorsNamedDimsArraysExt/examples/Project.toml new file mode 100644 index 0000000000..3d5fd7544e --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/examples/Project.toml @@ -0,0 +1,4 @@ +[deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" diff --git a/src/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl b/src/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl new file mode 100644 index 0000000000..d86ee586b8 --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/examples/example_dmrg.jl @@ -0,0 +1,25 @@ +using Adapt: adapt +using ITensors: MPO, dmrg, randomMPS, siteinds +using ITensors.Ops: OpSum +using ITensors.ITensorsNamedDimsArraysExt: to_nameddimsarray + +function main(; n, conserve_qns=false, nsweeps=3, cutoff=1e-4, arraytype=Array) + s = siteinds("S=1/2", n; conserve_qns) + ℋ = OpSum() + ℋ = sum(j -> ("S+", j, "S-", j + 1), 1:(n - 1); init=ℋ) + ℋ = sum(j -> ("S-", j, "S+", j + 1), 1:(n - 1); init=ℋ) + ℋ = sum(j -> ("Sz", j, "Sz", j + 1), 1:(n - 1); init=ℋ) + H = MPO(ℋ, s) + ψ₀ = randomMPS(s, j -> isodd(j) ? "↑" : "↓") + + H = adapt(arraytype, H) + ψ = adapt(arraytype, ψ₀) + e, ψ = dmrg(H, ψ; nsweeps, cutoff) + + Hₙₐ = to_nameddimsarray(H) + Hₙₐ = adapt(arraytype, Hₙₐ) + ψₙₐ = to_nameddimsarray(ψ₀) + ψₙₐ = adapt(arraytype, ψₙₐ) + eₙₐ, ψₙₐ = dmrg(Hₙₐ, ψₙₐ; nsweeps, cutoff) + return (; e, eₙₐ) +end diff --git a/src/ITensorsNamedDimsArraysExt/examples/example_readme.jl b/src/ITensorsNamedDimsArraysExt/examples/example_readme.jl new file mode 100644 index 0000000000..b7404e3781 --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/examples/example_readme.jl @@ -0,0 +1,29 @@ +using ITensors: Index, hasinds, permute + +function main() + i = Index(2, "i") + j = Index(2, "j") + k = Index(2, "k") + + a = randn(i, j) + b = randn(j, k) + + @show rand(Int, i, j) + @show zeros(Float32, i, j) + @show ones(Float32, i, j) + @show fill(1.2, i, j) + + a[j => 1, i => 2] = 21 + @show a[2, 1] == 21 + @show a[j => 1, i => 2] == 21 + + c = a * b + @show hasinds(c, (i, k)) + @show permute(a, (j, i)) + + # Broken + a′ = randn(j, i) + @show a + a′ +end + +main() diff --git a/src/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl b/src/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl new file mode 100644 index 0000000000..4b2c017fdc --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/src/ITensorsNamedDimsArraysExt.jl @@ -0,0 +1,8 @@ +module ITensorsNamedDimsArraysExt +include("index.jl") +include("itensor.jl") +include("indexing.jl") +include("tensoralgebra.jl") +include("combiner.jl") +include("to_nameddimsarray.jl") +end diff --git a/src/ITensorsNamedDimsArraysExt/src/combiner.jl b/src/ITensorsNamedDimsArraysExt/src/combiner.jl new file mode 100644 index 0000000000..402d7abfba --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/src/combiner.jl @@ -0,0 +1,25 @@ +# Combiner +using ..NDTensors.NamedDimsArrays: AbstractNamedDimsArray, dimnames, name +using ..NDTensors.TensorAlgebra: TensorAlgebra, fusedims, splitdims +using NDTensors: NDTensors, Tensor, Combiner + +function ITensors._contract(na::AbstractNamedDimsArray, c::Tensor{<:Any,<:Any,<:Combiner}) + split_names = name.(NDTensors.uncombinedinds(c)) + fused_name = name(NDTensors.combinedind(c)) + + # Use to determine if we are doing fusion or splitting. + split_dims = map(split_name -> findfirst(isequal(split_name), dimnames(na)), split_names) + fused_dim = findfirst(isequal(fused_name), dimnames(na)) + + return if isnothing(fused_dim) + # Dimension fusion (joining, combining) + @assert all(!isnothing, split_dims) + fusedims(na, split_names => fused_name) + else + # Dimension unfusion (splitting, uncombining) + @assert all(isnothing, split_dims) + + split_dims = NamedInt.(NDTensors.uncombinedinds(c)) + splitdims(na, fused_name => split_dims) + end +end diff --git a/src/ITensorsNamedDimsArraysExt/src/index.jl b/src/ITensorsNamedDimsArraysExt/src/index.jl new file mode 100644 index 0000000000..0114635d3c --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/src/index.jl @@ -0,0 +1,65 @@ +using ..ITensors: ITensors, Index, IndexID, dim, noprime, prime, settags, space +using ..NDTensors: NDTensors, AliasStyle +using ..NDTensors.NamedDimsArrays: + NamedDimsArrays, + AbstractNamedDimsArray, + NamedInt, + dimnames, + named, + name, + replacenames, + unname + +# TODO: NamedDimsArrays.named(space, ::IndexID) = Index(...) +NamedDimsArrays.name(i::Index) = IndexID(i) +NamedDimsArrays.unname(i::Index) = space(i) +function ITensors.Index(i::NamedInt{<:Any,<:IndexID}) + space = unname(i) + n = name(i) + dir = ITensors.Neither + return Index(n.id, space, dir, n.tags, n.plev) +end +function NamedDimsArrays.NamedInt(i::Index) + return named(dim(i), name(i)) +end + +NamedDimsArrays.randname(i::IndexID) = IndexID(rand(UInt64), "", 0) + +# TODO: This is piracy, change this? +Base.:(==)(i1::IndexID, i2::Index) = (i1 == name(i2)) +Base.:(==)(i1::Index, i2::IndexID) = (name(i1) == i2) + +# Accessors +Base.convert(type::Type{<:IndexID}, i::Index) = type(i) +# TODO: Use this if `size` output named dimensions. +# NDTensors.inds(na::AbstractNamedDimsArray) = Index.(size(na)) +# TODO: defined `namedsize` and use that here. +function NDTensors.inds(na::AbstractNamedDimsArray) + return Index.(named.(size(na), dimnames(na))) +end +NDTensors.storage(na::AbstractNamedDimsArray) = na + +NDTensors.dim(na::AbstractNamedDimsArray) = length(na) + +# Priming, tagging `IndexID` +ITensors.prime(i::IndexID) = IndexID(prime(Index(named(0, i)))) +ITensors.noprime(i::IndexID) = IndexID(noprime(Index(named(0, i)))) +function ITensors.settags(is::Tuple{Vararg{IndexID}}, args...; kwargs...) + return IndexID.(settags(map(i -> Index(named(0, i)), is), args...; kwargs...)) +end + +# Priming, tagging `AbstractNamedDimsArray` +ITensors.prime(na::AbstractNamedDimsArray) = named(unname(na), prime.(dimnames(na))) +ITensors.noprime(na::AbstractNamedDimsArray) = named(unname(na), noprime.(dimnames(na))) +function ITensors.settags(na::AbstractNamedDimsArray, args...; kwargs...) + return named(unname(na), settags(dimnames(na), args...; kwargs...)) +end +function ITensors.replaceind(na::AbstractNamedDimsArray, i::Index, j::Index) + return replacenames(na, name(i) => name(j)) +end +function ITensors.replaceinds(na::AbstractNamedDimsArray, is, js) + return replacenames(na, (name.(is) .=> name.(js))...) +end + +# TODO: Complex conjugate and flop arrows! +ITensors.dag(::AliasStyle, na::AbstractNamedDimsArray) = na diff --git a/src/ITensorsNamedDimsArraysExt/src/indexing.jl b/src/ITensorsNamedDimsArraysExt/src/indexing.jl new file mode 100644 index 0000000000..c05ae69481 --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/src/indexing.jl @@ -0,0 +1,8 @@ +using ..ITensors: ITensors +function ITensors._getindex(na::AbstractNamedDimsArray, I::Pair...) + return na[I...] +end +function ITensors._setindex!!(na::AbstractNamedDimsArray, value::Int64, I::Pair...) + na[I...] = value + return na +end diff --git a/src/ITensorsNamedDimsArraysExt/src/itensor.jl b/src/ITensorsNamedDimsArraysExt/src/itensor.jl new file mode 100644 index 0000000000..a9200a0efd --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/src/itensor.jl @@ -0,0 +1,47 @@ +using ITensors: ITensors +using Random: AbstractRNG, default_rng + +# Constructors +ITensors.ITensor(na::AbstractNamedDimsArray) = ITensors._ITensor(na) +ITensors.itensor(na::AbstractNamedDimsArray) = ITensors._ITensor(na) + +# Convenient constructors +default_eltype() = Float64 +for f in [:rand, :randn] + @eval begin + function Base.$f( + rng::AbstractRNG, elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}} + ) + return ITensor($f(rng, elt, NamedInt.(dims))) + end + function Base.$f( + rng::AbstractRNG, elt::Type{<:Number}, dim1::Index, dims::Vararg{Index} + ) + return $f(rng, elt, (dim1, dims...)) + end + Base.$f(elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}}) = + $f(default_rng(), elt, dims) + Base.$f(elt::Type{<:Number}, dim1::Index, dims::Vararg{Index}) = + $f(elt, (dim1, dims...)) + Base.$f(dims::Tuple{Index,Vararg{Index}}) = $f(default_eltype(), dims) + Base.$f(dim1::Index, dims::Vararg{Index}) = $f((dim1, dims...)) + end +end +for f in [:zeros, :ones] + @eval begin + function Base.$f(elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}}) + return ITensor($f(elt, NamedInt.(dims))) + end + function Base.$f(elt::Type{<:Number}, dim1::Index, dims::Vararg{Index}) + return $f(elt, (dim1, dims...)) + end + Base.$f(dims::Tuple{Index,Vararg{Index}}) = $f(default_eltype(), dims) + Base.$f(dim1::Index, dims::Vararg{Index}) = $f((dim1, dims...)) + end +end +function Base.fill(value, dims::Tuple{Index,Vararg{Index}}) + return ITensor(fill(value, NamedInt.(dims))) +end +function Base.fill(value, dim1::Index, dims::Vararg{Index}) + return fill(value, (dim1, dims...)) +end diff --git a/src/ITensorsNamedDimsArraysExt/src/tensoralgebra.jl b/src/ITensorsNamedDimsArraysExt/src/tensoralgebra.jl new file mode 100644 index 0000000000..691df965c3 --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/src/tensoralgebra.jl @@ -0,0 +1,31 @@ +# AbstractArray algebra needed for ITensors.jl. +# TODO: Instead dispatch on `tensortype(::ITensor)` from within +# ITensors.jl. +using ..NDTensors.NamedDimsArrays: AbstractNamedDimsArray, align +using ..NDTensors.TensorAlgebra: TensorAlgebra +using ..NDTensors: AliasStyle +using ..ITensors: ITensors + +function ITensors._contract(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) + return TensorAlgebra.contract(na1, na2) +end + +function ITensors._add(na1::AbstractNamedDimsArray, na2::AbstractNamedDimsArray) + return na1 + na2 +end + +function ITensors._permute(::AliasStyle, na::AbstractNamedDimsArray, dims::Tuple) + # TODO: Handle aliasing properly. + return align(na, name.(dims)) +end + +function ITensors._map!!( + f, + na_dest::AbstractNamedDimsArray, + na1::AbstractNamedDimsArray, + na2::AbstractNamedDimsArray, +) + # TODO: Handle maybe-mutation. + map!(f, na_dest, na1, na2) + return na_dest +end diff --git a/src/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl b/src/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl new file mode 100644 index 0000000000..a17c8e7599 --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/src/to_nameddimsarray.jl @@ -0,0 +1,65 @@ +using ..NDTensors: data, inds +using ITensors: ITensor + +# TODO: Delete this, it is a hack to decide +# if an Index is blocked. +function is_blocked_ind(i) + return try + blockdim(i, 1) + true + catch + false + end +end + +# TODO: Delete once `TensorStorage` is removed. +function to_axes(inds::Tuple) + if any(is_blocked_ind, inds) + return BlockArrays.blockedrange.(map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds)) + else + return Base.OneTo.(dim.(inds)) + end +end + +using ..NDTensors: DenseTensor +# TODO: Delete once `Dense` is removed. +function to_nameddimsarray(x::DenseTensor) + return named(reshape(data(x), size(x)), name.(inds(x))) +end + +using ..NDTensors: DiagTensor +using ..NDTensors.DiagonalArrays: DiagonalArray +# TODO: Delete once `Diag` is removed. +function to_nameddimsarray(x::DiagTensor) + return named(DiagonalArray(data(x), size(x)), name.(inds(x))) +end + +using ..NDTensors: BlockSparseTensor +using ..NDTensors.BlockSparseArrays: BlockSparseArray +# TODO: Delete once `BlockSparse` is removed. +function to_nameddimsarray(x::BlockSparseTensor) + blockinds = map(i -> [blockdim(i, b) for b in 1:nblocks(i)], inds(x)) + blocktype = set_ndims(datatype(x), ndims(x)) + # TODO: Make a simpler constructor: + # BlockSparseArray(blocktype, blockinds) + arraystorage = BlockSparseArray{eltype(x),ndims(x),blocktype}(blockinds) + for b in nzblocks(x) + arraystorage[BlockArrays.Block(Tuple(b)...)] = x[b] + end + return named(arraystorage, name.(inds(x))) +end + +using ..NDTensors: CombinerTensor, CombinerArray, storage +# TODO: Delete when we directly use `CombinerArray` as storage. +function to_nameddimsarray(t::CombinerTensor) + return named(CombinerArray(storage(t), to_axes(inds(t))), name.(inds(t))) +end + +to_nameddimsarray(t::ITensor) = ITensor(to_nameddimsarray(t.tensor)) + +using ITensors: ITensors +# TODO: Delete once `TensorStorage` is removed. +# TODO: Move to `ITensors`? +function to_nameddimsarray(x::ITensors.AbstractMPS) + return to_nameddimsarray.(x) +end diff --git a/src/ITensorsNamedDimsArraysExt/test/runtests.jl b/src/ITensorsNamedDimsArraysExt/test/runtests.jl new file mode 100644 index 0000000000..c34d625ef4 --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/test/runtests.jl @@ -0,0 +1,11 @@ +using Test: @testset + +@testset "ITensorsNamedDimsArraysExt" begin + filenames = filter(readdir(@__DIR__)) do filename + startswith("test_")(filename) && endswith(".jl")(filename) + end + @testset "Test $(@__DIR__)/$filename" for filename in filenames + println("Running $(@__DIR__)/$filename") + @time include(filename) + end +end diff --git a/src/ITensorsNamedDimsArraysExt/test/test_examples.jl b/src/ITensorsNamedDimsArraysExt/test/test_examples.jl new file mode 100644 index 0000000000..1b4bb7d2df --- /dev/null +++ b/src/ITensorsNamedDimsArraysExt/test/test_examples.jl @@ -0,0 +1,5 @@ +using Test: @testset + +@testset "examples" begin + include("../examples/example_readme.jl") +end diff --git a/src/exports.jl b/src/exports.jl index b5cfea5fa3..83af4065b6 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -1,3 +1,5 @@ +using NDTensors.RankFactorization: Spectrum, eigs, entropy, truncerror + export # From external modules # LinearAlgebra @@ -11,6 +13,7 @@ export # NDTensors module # Types Block, + # NDTensors.RankFactorization module Spectrum, # Methods eigs, diff --git a/src/itensor.jl b/src/itensor.jl index 00e51c41fb..8be083b4fe 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -1,3 +1,6 @@ +# Private inner constructor +function _ITensor end + """ ITensor @@ -76,23 +79,21 @@ NDTensors.Dense{Float64,Array{Float64,1}} 1.2579101497658178 -1.3559959053693322 ``` """ - -## The categories in this file are -## constructors, properties, iterators, -## Accessor Functions, Index Functions and Operations mutable struct ITensor - tensor::Tensor - function ITensor(::AllowAlias, T::Tensor{<:Any,<:Any,<:Any,<:Tuple}) - @debug_check begin - is = inds(T) - if !allunique(is) - error( - "Trying to create ITensors with collection of indices $is. Indices must be unique.", - ) - end + tensor + global @inline _ITensor(parent) = new(parent) +end + +function ITensor(::AllowAlias, T::Tensor{<:Any,<:Any,<:Any,<:Tuple}) + @debug_check begin + is = inds(T) + if !allunique(is) + error( + "Trying to create ITensors with collection of indices $is. Indices must be unique." + ) end - return new(T) end + return _ITensor(T) end ######################### @@ -1870,7 +1871,7 @@ end # Unfortunately this is more complicated than it might seem since it # has to pass through the broadcasting mechanism first. function (A::ITensor + B::ITensor) - return _add(tensor(A), tensor(B)) + return itensor(_add(tensor(A), tensor(B))) end # TODO: move the order-0 EmptyStorage ITensor special to NDTensors diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 9298a97049..2ce37a4355 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -1,3 +1,4 @@ +using NDTensors.RankFactorization: Spectrum """ TruncSVD