diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index a5843f2ec6..1fe0523d6c 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -36,7 +36,7 @@ NDTensorsOctavianExt = "Octavian" NDTensorsTBLISExt = "TBLIS" [compat] -Adapt = "3.5" +Adapt = "3.7" BlockArrays = "0.16" Compat = "4.9" Dictionaries = "0.3.5" diff --git a/NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl b/NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl index 369c398d98..0652f5035a 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl @@ -20,5 +20,6 @@ include("imports.jl") include("set_types.jl") include("iscu.jl") include("adapt.jl") +include("indexing.jl") include("linearalgebra.jl") end diff --git a/NDTensors/ext/NDTensorsCUDAExt/indexing.jl b/NDTensors/ext/NDTensorsCUDAExt/indexing.jl new file mode 100644 index 0000000000..3ad23dc3fa --- /dev/null +++ b/NDTensors/ext/NDTensorsCUDAExt/indexing.jl @@ -0,0 +1,8 @@ +function Base.getindex(::Type{<:CuArray}, T::DenseTensor{<:Number}) + return CUDA.@allowscalar data(T)[] +end + +function Base.setindex!(::Type{<:CuArray}, T::DenseTensor{<:Number}, x::Number) + CUDA.@allowscalar data(T)[] = x + return T +end diff --git a/NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl b/NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl index cea9e697c9..64b0e4477e 100644 --- a/NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl +++ b/NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl @@ -1,9 +1,10 @@ module NDTensorsMetalExt +using Adapt +using Functors +using LinearAlgebra: LinearAlgebra using NDTensors using NDTensors.SetParameters -using Functors -using Adapt if isdefined(Base, :get_extension) using Metal @@ -14,4 +15,8 @@ end include("imports.jl") include("adapt.jl") include("set_types.jl") +include("indexing.jl") +include("linearalgebra.jl") +include("copyto.jl") +include("permutedims.jl") end diff --git a/NDTensors/ext/NDTensorsMetalExt/copyto.jl b/NDTensors/ext/NDTensorsMetalExt/copyto.jl new file mode 100644 index 0000000000..2b2022839f --- /dev/null +++ b/NDTensors/ext/NDTensorsMetalExt/copyto.jl @@ -0,0 +1,13 @@ +# Catches a bug in `copyto!` in Metal backend. +function NDTensors.copyto!( + ::Type{<:MtlArray}, dest::AbstractArray, ::Type{<:MtlArray}, src::SubArray +) + return Base.copyto!(dest, copy(src)) +end + +# Catches a bug in `copyto!` in Metal backend. +function NDTensors.copyto!( + ::Type{<:MtlArray}, dest::AbstractArray, ::Type{<:MtlArray}, src::Base.ReshapedArray +) + return NDTensors.copyto!(dest, parent(src)) +end diff --git a/NDTensors/ext/NDTensorsMetalExt/indexing.jl b/NDTensors/ext/NDTensorsMetalExt/indexing.jl new file mode 100644 index 0000000000..51d715d4cf --- /dev/null +++ b/NDTensors/ext/NDTensorsMetalExt/indexing.jl @@ -0,0 +1,8 @@ +function Base.getindex(::Type{<:MtlArray}, T::DenseTensor{<:Number}) + return Metal.@allowscalar data(T)[] +end + +function Base.setindex!(::Type{<:MtlArray}, T::DenseTensor{<:Number}, x::Number) + Metal.@allowscalar data(T)[] = x + return T +end diff --git a/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl b/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl new file mode 100644 index 0000000000..362be9001f --- /dev/null +++ b/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl @@ -0,0 +1,16 @@ +function NDTensors.qr(leaf_parenttype::Type{<:MtlArray}, A::AbstractMatrix) + Q, R = NDTensors.qr(NDTensors.cpu(A)) + return adapt(leaf_parenttype, Matrix(Q)), adapt(leaf_parenttype, R) +end + +function NDTensors.eigen(leaf_parenttype::Type{<:MtlArray}, A::AbstractMatrix) + D, U = NDTensors.eigen(NDTensors.cpu(A)) + return adapt(set_ndims(leaf_parenttype, ndims(D)), D), adapt(leaf_parenttype, U) +end + +function NDTensors.svd(leaf_parenttype::Type{<:MtlArray}, A::AbstractMatrix) + U, S, V = NDTensors.svd(NDTensors.cpu(A)) + return adapt(leaf_parenttype, U), + adapt(set_ndims(leaf_parenttype, ndims(S)), S), + adapt(leaf_parenttype, V) +end diff --git a/NDTensors/ext/NDTensorsMetalExt/permutedims.jl b/NDTensors/ext/NDTensorsMetalExt/permutedims.jl new file mode 100644 index 0000000000..be6f3ad870 --- /dev/null +++ b/NDTensors/ext/NDTensorsMetalExt/permutedims.jl @@ -0,0 +1,12 @@ +function NDTensors.permutedims!( + ::Type{<:MtlArray}, + Adest::Base.ReshapedArray{<:Any,<:Any,<:SubArray}, + ::Type{<:MtlArray}, + A, + perm, +) + Aperm = permutedims(A, perm) + Adest_parent = parent(Adest) + copyto!(Adest_parent, Aperm) + return Adest +end diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 641253dec0..29501e2cac 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -48,11 +48,15 @@ include("algorithm.jl") include("aliasstyle.jl") include("abstractarray/set_types.jl") include("abstractarray/to_shape.jl") +include("abstractarray/iswrappedarray.jl") +include("abstractarray/iscu.jl") include("abstractarray/similar.jl") include("abstractarray/ndims.jl") +include("abstractarray/copyto.jl") include("abstractarray/permutedims.jl") include("abstractarray/fill.jl") include("abstractarray/mul.jl") +include("abstractarray/linearalgebra.jl") include("array/set_types.jl") include("array/permutedims.jl") include("array/mul.jl") @@ -68,6 +72,7 @@ include("dims.jl") include("tensor/set_types.jl") include("tensor/similar.jl") include("tensor/permutedims.jl") +include("tensor/linearalgebra.jl") include("adapt.jl") include("tensoralgebra/generic_tensor_operations.jl") include("tensoralgebra/contraction_logic.jl") diff --git a/NDTensors/src/abstractarray/copyto.jl b/NDTensors/src/abstractarray/copyto.jl new file mode 100644 index 0000000000..3ed7b0bb5a --- /dev/null +++ b/NDTensors/src/abstractarray/copyto.jl @@ -0,0 +1,13 @@ +# NDTensors.copyto! +function copyto!(R::AbstractArray, T::AbstractArray) + copyto!(leaf_parenttype(R), R, leaf_parenttype(T), T) + return R +end + +# NDTensors.copyto! +function copyto!( + ::Type{<:AbstractArray}, R::AbstractArray, ::Type{<:AbstractArray}, T::AbstractArray +) + Base.copyto!(R, T) + return R +end diff --git a/NDTensors/src/abstractarray/iscu.jl b/NDTensors/src/abstractarray/iscu.jl new file mode 100644 index 0000000000..bcddd036ee --- /dev/null +++ b/NDTensors/src/abstractarray/iscu.jl @@ -0,0 +1,6 @@ +# TODO: Make `isgpu`, `ismtl`, etc. +# For `isgpu`, will require a `NDTensorsGPUArrayCoreExt`. +iscu(A::AbstractArray) = iscu(typeof(A)) +function iscu(A::Type{<:AbstractArray}) + return (leaf_parenttype(A) == A ? false : iscu(leaf_parenttype(A))) +end diff --git a/NDTensors/src/abstractarray/iswrappedarray.jl b/NDTensors/src/abstractarray/iswrappedarray.jl new file mode 100644 index 0000000000..e1be8987e5 --- /dev/null +++ b/NDTensors/src/abstractarray/iswrappedarray.jl @@ -0,0 +1,54 @@ +# Trait indicating if the AbstractArray type is an array wrapper. +# Assumes that it implements `NDTensors.parenttype`. +@traitdef IsWrappedArray{ArrayT} + +#! format: off +@traitimpl IsWrappedArray{ArrayT} <- is_wrapped_array(ArrayT) +#! format: on + +is_wrapped_array(arraytype::Type{<:AbstractArray}) = (parenttype(arraytype) ≠ arraytype) + +# TODO: This is only defined because the current design +# of `Diag` using a `Number` as the data type if it +# is a uniform diagonal type. Delete this when it is +# replaced by `DiagonalArray`. +is_wrapped_array(arraytype::Type{<:Number}) = false + +# For working with instances, not used by +# `SimpleTraits.jl` traits dispatch. +is_wrapped_array(array::AbstractArray) = is_wrapped_array(typeof(array)) + +# By default, the `parentype` of an array type is itself +parenttype(arraytype::Type{<:AbstractArray}) = arraytype + +# TODO: Use `SetParameters` here. +parenttype(::Type{<:ReshapedArray{<:Any,<:Any,P}}) where {P} = P +parenttype(::Type{<:Transpose{<:Any,P}}) where {P} = P +parenttype(::Type{<:Adjoint{<:Any,P}}) where {P} = P +parenttype(::Type{<:Symmetric{<:Any,P}}) where {P} = P +parenttype(::Type{<:Hermitian{<:Any,P}}) where {P} = P +parenttype(::Type{<:UpperTriangular{<:Any,P}}) where {P} = P +parenttype(::Type{<:LowerTriangular{<:Any,P}}) where {P} = P +parenttype(::Type{<:UnitUpperTriangular{<:Any,P}}) where {P} = P +parenttype(::Type{<:UnitLowerTriangular{<:Any,P}}) where {P} = P +parenttype(::Type{<:Diagonal{<:Any,P}}) where {P} = P +parenttype(::Type{<:SubArray{<:Any,<:Any,P}}) where {P} = P + +# For working with instances, not used by +# `SimpleTraits.jl` traits dispatch. +parenttype(array::AbstractArray) = parenttype(typeof(array)) + +@traitfn function leaf_parenttype( + arraytype::Type{ArrayT} +) where {ArrayT; IsWrappedArray{ArrayT}} + return leaf_parenttype(parenttype(arraytype)) +end + +@traitfn function leaf_parenttype( + arraytype::Type{ArrayT} +) where {ArrayT; !IsWrappedArray{ArrayT}} + return arraytype +end + +# For working with instances. +leaf_parenttype(array::AbstractArray) = leaf_parenttype(typeof(array)) diff --git a/NDTensors/src/abstractarray/linearalgebra.jl b/NDTensors/src/abstractarray/linearalgebra.jl new file mode 100644 index 0000000000..8520214f1a --- /dev/null +++ b/NDTensors/src/abstractarray/linearalgebra.jl @@ -0,0 +1,11 @@ +# NDTensors.qr +qr(A::AbstractMatrix) = qr(leaf_parenttype(A), A) +qr(::Type{<:AbstractArray}, A::AbstractMatrix) = LinearAlgebra.qr(A) + +# NDTensors.eigen +eigen(A::AbstractMatrix) = eigen(leaf_parenttype(A), A) +eigen(::Type{<:AbstractArray}, A::AbstractMatrix) = LinearAlgebra.eigen(A) + +# NDTensors.svd +svd(A::AbstractMatrix) = svd(leaf_parenttype(A), A) +svd(::Type{<:AbstractArray}, A::AbstractMatrix) = LinearAlgebra.svd(A) diff --git a/NDTensors/src/abstractarray/permutedims.jl b/NDTensors/src/abstractarray/permutedims.jl index f23db4ea73..f0f09249d7 100644 --- a/NDTensors/src/abstractarray/permutedims.jl +++ b/NDTensors/src/abstractarray/permutedims.jl @@ -12,12 +12,25 @@ end # NDTensors.permutedims! function permutedims!(Mdest::AbstractArray, M::AbstractArray, perm) - return permutedims!(leaf_parenttype(Mdest), Mdest, leaf_parenttype(M), M, perm) + permutedims!(leaf_parenttype(Mdest), Mdest, leaf_parenttype(M), M, perm) + return Mdest end # NDTensors.permutedims! function permutedims!(::Type{<:AbstractArray}, Mdest, ::Type{<:AbstractArray}, M, perm) - return Base.permutedims!(Mdest, M, perm) + Base.permutedims!(Mdest, M, perm) + return Mdest +end + +function permutedims!!(B::AbstractArray, A::AbstractArray, perm) + return permutedims!!(leaf_parenttype(B), B, leaf_parenttype(A), A, perm) +end + +function permutedims!!( + Bleaftype::Type{<:AbstractArray}, B, Aleaftype::Type{<:AbstractArray}, A, perm +) + permutedims!(leaf_parenttype(B), B, leaf_parenttype(A), A, perm) + return B end function permutedims!!(B::AbstractArray, A::AbstractArray, perm, f) diff --git a/NDTensors/src/abstractarray/similar.jl b/NDTensors/src/abstractarray/similar.jl index 873e22e44e..4918e5150c 100644 --- a/NDTensors/src/abstractarray/similar.jl +++ b/NDTensors/src/abstractarray/similar.jl @@ -1,54 +1,6 @@ ## Custom `NDTensors.similar` implementation. ## More extensive than `Base.similar`. -# Trait indicating if the AbstractArray type is an array wrapper. -# Assumes that it implements `NDTensors.parenttype`. -@traitdef IsWrappedArray{ArrayT} - -#! format: off -@traitimpl IsWrappedArray{ArrayT} <- is_wrapped_array(ArrayT) -#! format: on - -is_wrapped_array(arraytype::Type{<:AbstractArray}) = (parenttype(arraytype) ≠ arraytype) - -# For working with instances, not used by -# `SimpleTraits.jl` traits dispatch. -is_wrapped_array(array::AbstractArray) = is_wrapped_array(typeof(array)) - -# By default, the `parentype` of an array type is itself -parenttype(arraytype::Type{<:AbstractArray}) = arraytype - -parenttype(::Type{<:ReshapedArray{<:Any,<:Any,P}}) where {P} = P -parenttype(::Type{<:Transpose{<:Any,P}}) where {P} = P -parenttype(::Type{<:Adjoint{<:Any,P}}) where {P} = P -parenttype(::Type{<:Symmetric{<:Any,P}}) where {P} = P -parenttype(::Type{<:Hermitian{<:Any,P}}) where {P} = P -parenttype(::Type{<:UpperTriangular{<:Any,P}}) where {P} = P -parenttype(::Type{<:LowerTriangular{<:Any,P}}) where {P} = P -parenttype(::Type{<:UnitUpperTriangular{<:Any,P}}) where {P} = P -parenttype(::Type{<:UnitLowerTriangular{<:Any,P}}) where {P} = P -parenttype(::Type{<:Diagonal{<:Any,P}}) where {P} = P -parenttype(::Type{<:SubArray{<:Any,<:Any,P}}) where {P} = P - -# For working with instances, not used by -# `SimpleTraits.jl` traits dispatch. -parenttype(array::AbstractArray) = parenttype(typeof(array)) - -@traitfn function leaf_parenttype( - arraytype::Type{ArrayT} -) where {ArrayT; IsWrappedArray{ArrayT}} - return leaf_parenttype(parenttype(arraytype)) -end - -@traitfn function leaf_parenttype( - arraytype::Type{ArrayT} -) where {ArrayT; !IsWrappedArray{ArrayT}} - return arraytype -end - -# For working with instances. -leaf_parenttype(array::AbstractArray) = leaf_parenttype(typeof(array)) - # This function actually allocates the data. # NDTensors.similar function similar(arraytype::Type{<:AbstractArray}, dims::Tuple) @@ -56,11 +8,6 @@ function similar(arraytype::Type{<:AbstractArray}, dims::Tuple) return similartype(arraytype, shape)(undef, NDTensors.to_shape(arraytype, shape)) end -# For when there are CUArray specific issues inline -iscu(A::AbstractArray) = iscu(typeof(A)) -function iscu(A::Type{<:AbstractArray}) - return (leaf_parenttype(A) == A ? false : iscu(leaf_parenttype(A))) -end # This function actually allocates the data. # Catches conversions of dimensions specified by ranges # dimensions specified by integers with `Base.to_shape`. diff --git a/NDTensors/src/blocksparse/blocksparse.jl b/NDTensors/src/blocksparse/blocksparse.jl index ab62cb6f25..5e363cbdac 100644 --- a/NDTensors/src/blocksparse/blocksparse.jl +++ b/NDTensors/src/blocksparse/blocksparse.jl @@ -54,6 +54,16 @@ function BlockSparse( return BlockSparse(Vector{ElT}(undef, dim), blockoffsets; vargs...) end +function BlockSparse( + datatype::Type{<:AbstractArray}, + ::UndefInitializer, + blockoffsets::BlockOffsets, + dim::Integer; + vargs..., +) + return BlockSparse(datatype(undef, dim), blockoffsets; vargs...) +end + function BlockSparse(blockoffsets::BlockOffsets, dim::Integer; vargs...) return BlockSparse(Float64, blockoffsets, dim; vargs...) end diff --git a/NDTensors/src/blocksparse/blocksparsetensor.jl b/NDTensors/src/blocksparse/blocksparsetensor.jl index 1b274202c6..a591038512 100644 --- a/NDTensors/src/blocksparse/blocksparsetensor.jl +++ b/NDTensors/src/blocksparse/blocksparsetensor.jl @@ -15,6 +15,14 @@ function BlockSparseTensor( return tensor(storage, inds) end +function BlockSparseTensor( + datatype::Type{<:AbstractArray}, ::UndefInitializer, boffs::BlockOffsets, inds +) + nnz_tot = nnz(boffs, inds) + storage = BlockSparse(datatype, undef, boffs, nnz_tot) + return tensor(storage, inds) +end + function BlockSparseTensor( ::Type{ElT}, ::UndefInitializer, blocks::Vector{BlockT}, inds ) where {ElT<:Number,BlockT<:Union{Block,NTuple}} @@ -23,6 +31,17 @@ function BlockSparseTensor( return tensor(storage, inds) end +function BlockSparseTensor( + datatype::Type{<:AbstractArray}, + ::UndefInitializer, + blocks::Vector{<:Union{Block,NTuple}}, + inds, +) + boffs, nnz = blockoffsets(blocks, inds) + storage = BlockSparse(datatype, undef, boffs, nnz) + return tensor(storage, inds) +end + """ BlockSparseTensor(::UndefInitializer, blocks, inds) @@ -91,6 +110,14 @@ function BlockSparseTensor( return tensor(storage, inds) end +function BlockSparseTensor( + datatype::Type{<:AbstractArray}, blocks::Vector{<:Union{Block,NTuple}}, inds +) + boffs, nnz = blockoffsets(blocks, inds) + storage = BlockSparse(datatype, boffs, nnz) + return tensor(storage, inds) +end + function BlockSparseTensor( x::Number, blocks::Vector{BlockT}, inds ) where {BlockT<:Union{Block,NTuple}} @@ -150,15 +177,15 @@ function BlockSparseTensor( end function zeros( - ::BlockSparseTensor{ElT,N}, blockoffsets::BlockOffsets{N}, inds + tensor::BlockSparseTensor{ElT,N}, blockoffsets::BlockOffsets{N}, inds ) where {ElT,N} - return BlockSparseTensor(ElT, blockoffsets, inds) + return BlockSparseTensor(datatype(tensor), blockoffsets, inds) end function zeros( - ::Type{<:BlockSparseTensor{ElT,N}}, blockoffsets::BlockOffsets{N}, inds + tensortype::Type{<:BlockSparseTensor{ElT,N}}, blockoffsets::BlockOffsets{N}, inds ) where {ElT,N} - return BlockSparseTensor(ElT, blockoffsets, inds) + return BlockSparseTensor(datatype(tensortype), blockoffsets, inds) end function zeros(tensortype::Type{<:BlockSparseTensor}, inds) @@ -426,7 +453,7 @@ function permutedims_combine_output( # Combine the blocks (within the newly combined and permuted dimension) blocks_perm_comb = combine_blocks(blocks_perm_comb, comb_ind_loc, blockcomb) - return BlockSparseTensor(ElT, blocks_perm_comb, is) + return BlockSparseTensor(leaf_parenttype(T), blocks_perm_comb, is) end function permutedims_combine( @@ -487,8 +514,11 @@ function permutedims_combine( # XXX Not sure what this was for Rb = reshape(Rb, permute(dims(Tb), perm)) + # TODO: Make this `convert` call more general + # for GPUs. Tbₐ = convert(Array, Tb) - @strided Rb .= permutedims(Tbₐ, perm) + ## @strided Rb .= permutedims(Tbₐ, perm) + permutedims!(Rb, Tbₐ, perm) end return R @@ -564,7 +594,7 @@ function uncombine_output( blocks_uncomb_perm = perm_blocks(blocks_uncomb, combdim, invperm(blockperm)) boffs_uncomb_perm, nnz_uncomb_perm = blockoffsets(blocks_uncomb_perm, inds_uncomb_perm) T_uncomb_perm = tensor( - BlockSparse(ElT, boffs_uncomb_perm, nnz_uncomb_perm), inds_uncomb_perm + BlockSparse(leaf_parenttype(T), boffs_uncomb_perm, nnz_uncomb_perm), inds_uncomb_perm ) R = reshape(T_uncomb_perm, is) return R @@ -623,14 +653,16 @@ function uncombine( #copyto!(Rb,Tb) if length(Tb) == 1 - Rb[1] = Tb[1] + Rb[] = Tb[] else # XXX: this used to be: # Rbₐᵣ = ReshapedArray(parent(Rbₐ), size(Tb), ()) # however that doesn't work with subarrays Rbₐ = convert(Array, Rb) - Rbₐᵣ = ReshapedArray(Rbₐ, size(Tb), ()) - @strided Rbₐᵣ .= Tb + ## Rbₐᵣ = ReshapedArray(Rbₐ, size(Tb), ()) + Rbₐᵣ = reshape(Rbₐ, size(Tb)) + ## @strided Rbₐᵣ .= Tb + copyto!(Rbₐᵣ, Tb) end end end diff --git a/NDTensors/src/blocksparse/diagblocksparse.jl b/NDTensors/src/blocksparse/diagblocksparse.jl index 8620e576c5..9db7f7f09a 100644 --- a/NDTensors/src/blocksparse/diagblocksparse.jl +++ b/NDTensors/src/blocksparse/diagblocksparse.jl @@ -62,6 +62,15 @@ function DiagBlockSparse( return DiagBlockSparse(Vector{ElT}(undef, diaglength), boffs) end +function DiagBlockSparse( + datatype::Type{<:AbstractArray}, + ::UndefInitializer, + boffs::BlockOffsets, + diaglength::Integer, +) + return DiagBlockSparse(datatype(undef, diaglength), boffs) +end + function DiagBlockSparse(::UndefInitializer, boffs::BlockOffsets, diaglength::Integer) return DiagBlockSparse(Float64, undef, boffs, diaglength) end diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 2835d262c7..d4a64c9085 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -32,12 +32,13 @@ per row/column, otherwise it fails. This assumption makes it so the result can be computed from the dense svds of seperate blocks. """ -function LinearAlgebra.svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} +function svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} alg::String = get(kwargs, :alg, "divide_and_conquer") min_blockdim::Int = get(kwargs, :min_blockdim, 0) truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) #@timeit_debug timer "block sparse svd" begin + Us = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) Ss = Vector{DiagTensor{real(ElT),2}}(undef, nnzblocks(T)) Vs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) @@ -75,7 +76,7 @@ function LinearAlgebra.svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} dropblocks = Int[] if truncate - truncerr, docut = truncate!(d; kwargs...) + d, truncerr, docut = truncate!!(d; kwargs...) for n in 1:nnzblocks(T) blockdim = _truncated_blockdim( Ss[n], docut; min_blockdim, singular_values=true, truncate @@ -146,9 +147,11 @@ function LinearAlgebra.svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} indsS = setindex(inds(T), dag(uind), 1) indsS = setindex(indsS, dag(vind), 2) - U = BlockSparseTensor(ElT, undef, nzblocksU, indsU) - S = DiagBlockSparseTensor(real(ElT), undef, nzblocksS, indsS) - V = BlockSparseTensor(ElT, undef, nzblocksV, indsV) + U = BlockSparseTensor(leaf_parenttype(T), undef, nzblocksU, indsU) + S = DiagBlockSparseTensor( + set_eltype(leaf_parenttype(T), real(ElT)), undef, nzblocksS, indsS + ) + V = BlockSparseTensor(leaf_parenttype(T), undef, nzblocksV, indsV) for n in 1:nnzblocksT Ub, Sb, Vb = Us[n], Ss[n], Vs[n] @@ -169,10 +172,9 @@ function LinearAlgebra.svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} sU = right_arrow_sign(uind, blockU[2]) if sU == -1 - blockview(U, blockU) .= -Ub - else - blockview(U, blockU) .= Ub + Ub *= -1 end + copyto!(blockview(U, blockU), Ub) blockviewS = blockview(S, blockS) for i in 1:diaglength(Sb) @@ -190,12 +192,10 @@ function LinearAlgebra.svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} end if (sV * sVP) == -1 - blockview(V, blockV) .= -Vb - else - blockview(V, blockV) .= Vb + Vb *= -1 end + copyto!(blockview(V, blockV), Vb) end - return U, S, V, Spectrum(d, truncerr) #end # @timeit_debug end @@ -204,7 +204,7 @@ _eigen_eltypes(T::Hermitian{ElT,<:BlockSparseMatrix{ElT}}) where {ElT} = real(El _eigen_eltypes(T::BlockSparseMatrix{ElT}) where {ElT} = complex(ElT), complex(ElT) -function LinearAlgebra.eigen( +function eigen( T::Union{Hermitian{ElT,<:BlockSparseMatrix{ElT}},BlockSparseMatrix{ElT}}; kwargs... ) where {ElT<:Union{Real,Complex}} truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) @@ -237,7 +237,7 @@ function LinearAlgebra.eigen( sort!(d; rev=true, by=abs) if truncate - truncerr, docut = truncate!(d; kwargs...) + d, truncerr, docut = truncate!!(d; kwargs...) for n in 1:nnzblocks(T) blockdim = _truncated_blockdim(Ds[n], docut) if blockdim == 0 @@ -245,7 +245,9 @@ function LinearAlgebra.eigen( else Dtrunc = tensor(Diag(storage(Ds[n])[1:blockdim]), (blockdim, blockdim)) Ds[n] = Dtrunc - Vs[n] = copy(Vs[n][1:dim(Vs[n], 1), 1:blockdim]) + new_size = (dim(Vs[n], 1), blockdim) + new_data = array(Vs[n])[1:new_size[1], 1:new_size[2]] + Vs[n] = tensor(Dense(new_data), new_size) end end deleteat!(Ds, dropblocks) @@ -299,8 +301,10 @@ function LinearAlgebra.eigen( nzblocksV[n] = blockV end - D = DiagBlockSparseTensor(ElD, undef, nzblocksD, indsD) - V = BlockSparseTensor(ElV, undef, nzblocksV, indsV) + D = DiagBlockSparseTensor( + set_ndims(set_eltype(leaf_parenttype(T), ElD), 1), undef, nzblocksD, indsD + ) + V = BlockSparseTensor(set_eltype(leaf_parenttype(T), ElV), undef, nzblocksV, indsV) for n in 1:nnzblocksT Db, Vb = Ds[n], Vs[n] @@ -372,14 +376,16 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) nzblocksX[n] = (UInt(n), blockT[2]) end - Q = BlockSparseTensor(ElT, undef, nzblocksQ, indsQ) - X = BlockSparseTensor(ElT, undef, nzblocksX, indsX) + Q = BlockSparseTensor(leaf_parenttype(T), undef, nzblocksQ, indsQ) + X = BlockSparseTensor(leaf_parenttype(T), undef, nzblocksX, indsX) for n in 1:nnzblocksT blockview(Q, nzblocksQ[n]) .= Qs[n] blockview(X, nzblocksX[n]) .= Xs[n] end + Q = adapt(leaf_parenttype(T), Q) + X = adapt(leaf_parenttype(T), X) return Q, X end diff --git a/NDTensors/src/dense/densetensor.jl b/NDTensors/src/dense/densetensor.jl index ed773d6f0e..f80dc99e8c 100644 --- a/NDTensors/src/dense/densetensor.jl +++ b/NDTensors/src/dense/densetensor.jl @@ -81,7 +81,11 @@ end # @propagate_inbounds function getindex(T::DenseTensor{<:Number}) - return (iscu(T) ? NDTensors.cpu(data(T))[] : data(T)[]) + return getindex(leaf_parenttype(T), T) +end + +@propagate_inbounds function getindex(::Type{<:AbstractArray}, T::DenseTensor{<:Number}) + return data(T)[] end @propagate_inbounds function getindex(T::DenseTensor{<:Number}, I::Integer...) @@ -110,6 +114,18 @@ end return T end +@propagate_inbounds function setindex!(T::DenseTensor{<:Number}, x::Number) + setindex!(leaf_parenttype(T), T, x) + return T +end + +@propagate_inbounds function setindex!( + ::Type{<:AbstractArray}, T::DenseTensor{<:Number}, x::Number +) + data(T)[] = x + return T +end + # # Linear indexing # @@ -203,10 +219,9 @@ function permutedims!( return R end -function copyto!(R::DenseTensor{<:Number,N}, T::DenseTensor{<:Number,N}) where {N} - RA = array(R) - TA = array(T) - RA .= TA +# NDTensors.copyto! +function copyto!(R::DenseTensor, T::DenseTensor) + copyto!(array(R), array(T)) return R end @@ -242,7 +257,7 @@ function permutedims!( R::DenseTensor{<:Number,N}, T::DenseTensor{<:Number,N}, perm, f::Function ) where {N} if nnz(R) == 1 && nnz(T) == 1 - R[1] = f(R[1], T[1]) + R[] = f(R[], T[]) return R end RA = array(R) diff --git a/NDTensors/src/dense/linearalgebra/decompositions.jl b/NDTensors/src/dense/linearalgebra/decompositions.jl index 30ce66467d..70009e6aaa 100644 --- a/NDTensors/src/dense/linearalgebra/decompositions.jl +++ b/NDTensors/src/dense/linearalgebra/decompositions.jl @@ -1,4 +1,3 @@ - Strided.StridedView(T::DenseTensor) = StridedView(convert(Array, T)) function drop_singletons(::Order{N}, labels, dims) where {N} @@ -17,7 +16,7 @@ end # svd of an order-n tensor according to positions Lpos # and Rpos -function LinearAlgebra.svd( +function svd( T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs... ) where {N,IndsT,NL,NR} M = permute_reshape(T, Lpos, Rpos) @@ -40,7 +39,7 @@ end # qr decomposition of an order-n tensor according to # positions Lpos and Rpos -function LinearAlgebra.qr( +function qr( T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs... ) where {N,IndsT,NL,NR} M = permute_reshape(T, Lpos, Rpos) diff --git a/NDTensors/src/diag/diagtensor.jl b/NDTensors/src/diag/diagtensor.jl index fb1d7a1fd8..f1d97cd8ce 100644 --- a/NDTensors/src/diag/diagtensor.jl +++ b/NDTensors/src/diag/diagtensor.jl @@ -106,14 +106,33 @@ function dense(::Type{<:Tensor{ElT,N,StoreT,IndsT}}) where {ElT,N,StoreT<:Diag,I end # convert to Dense -function dense(T::TensorT) where {TensorT<:DiagTensor} - R = zeros(dense(TensorT), inds(T)) +function dense(T::DiagTensor) + return dense(leaf_parenttype(T), T) +end + +# CPU version +function dense(::Type{<:Array}, T::DiagTensor) + R = zeros(dense(typeof(T)), inds(T)) for i in 1:diaglength(T) setdiagindex!(R, getdiagindex(T, i), i) end return R end +# GPU version +function dense(::Type{<:AbstractArray}, T::DiagTensor) + D_cpu = dense(Array, cpu(T)) + return adapt(leaf_parenttype(T), D_cpu) +end + +# UniformDiag version +# TODO: Delete once new DiagonalArray is designed. +# TODO: This creates a tensor on CPU by default so may cause +# problems for GPU. +function dense(::Type{<:Number}, T::DiagTensor) + return dense(Tensor(Diag(fill(getdiagindex(T, 1), diaglength(T))), inds(T))) +end + denseblocks(T::DiagTensor) = dense(T) function permutedims!( diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index 427f220e10..8e91f52019 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -3,14 +3,12 @@ export insertblock!!, setindex, setindex!!, - # blocksparse/blockdims.jl BlockDims, blockdim, blockdims, nblocks, blockindex, - # blocksparse/blocksparse.jl # Types Block, @@ -49,7 +47,6 @@ export matrix, outer, permutedims!!, - ql, read, vector, write, @@ -78,5 +75,15 @@ export ind, store, + # truncate.jl + truncate!, + # linearalgebra.jl - qr + eigs, + entropy, + polar, + ql, + random_orthog, + random_unitary, + Spectrum, + truncerror diff --git a/NDTensors/src/imports.jl b/NDTensors/src/imports.jl index 4b58569f21..8925685297 100644 --- a/NDTensors/src/imports.jl +++ b/NDTensors/src/imports.jl @@ -17,7 +17,6 @@ import Base: convert, conj, copy, - copyto!, eachindex, eltype, empty, @@ -53,6 +52,6 @@ import Base.Broadcast: Broadcasted, BroadcastStyle import Adapt: adapt_structure, adapt_storage -import LinearAlgebra: diag, exp, norm, qr +import LinearAlgebra: diag, exp, norm import TupleTools: isperm diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index e2c8133eb9..cab3f7e057 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -1,5 +1,3 @@ -export eigs, entropy, polar, random_orthog, random_unitary, Spectrum, svd, truncerror - # # Linear Algebra of order 2 NDTensors # @@ -95,7 +93,7 @@ end svd of an order-2 DenseTensor """ -function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} +function svd(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) # @@ -169,7 +167,7 @@ function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,In P = MS .^ 2 if truncate - truncerr, _ = truncate!( + P, truncerr, _ = truncate!!( P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... ) else @@ -179,7 +177,9 @@ function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,In dS = length(P) if dS < length(MS) MU = MU[:, 1:dS] - resize!(MS, dS) + # Fails on some GPU backends like Metal. + # resize!(MS, dS) + MS = MS[1:dS] MV = MV[:, 1:dS] end @@ -195,7 +195,7 @@ function LinearAlgebra.svd(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,In return U, S, V, spec end -function LinearAlgebra.eigen( +function eigen( T::Hermitian{ElT,<:DenseTensor{ElT,2,IndsT}}; kwargs... ) where {ElT<:Union{Real,Complex},IndsT} # Keyword argument deprecations @@ -236,11 +236,9 @@ function LinearAlgebra.eigen( VM = VM[:, p] if truncate - cpu_dm = NDTensors.cpu(DM) - truncerr, _ = truncate!( - cpu_dm; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + DM, truncerr, _ = truncate!!( + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... ) - DM = adapt(typeof(DM), cpu_dm) dD = length(DM) if dD < size(VM, 2) VM = VM[:, 1:dD] @@ -316,7 +314,7 @@ random_orthog(::Type{ElT}, n::Int, m::Int) where {ElT<:Real} = random_unitary(El random_orthog(n::Int, m::Int) = random_orthog(Float64, n, m) -function LinearAlgebra.eigen( +function eigen( T::DenseTensor{ElT,2,IndsT}; kwargs... ) where {ElT<:Union{Real,Complex},IndsT} # Keyword argument deprecations @@ -355,7 +353,7 @@ function LinearAlgebra.eigen( #VM = VM[:,p] if truncate - truncerr, _ = truncate!( + DM, truncerr, _ = truncate!!( DM; maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... ) dD = length(DM) @@ -380,10 +378,13 @@ function LinearAlgebra.eigen( return D, V, spec end +# NDTensors.qr function qr(T::DenseTensor{<:Any,2}; positive=false, kwargs...) qxf = positive ? qr_positive : qr return qx(qxf, T; kwargs...) end + +# NDTensors.ql function ql(T::DenseTensor{<:Any,2}; positive=false, kwargs...) qxf = positive ? ql_positive : ql return qx(qxf, T; kwargs...) @@ -411,6 +412,13 @@ function qx(qx::Function, T::DenseTensor{<:Any,2}; kwargs...) return Q, X end +# Version of `sign` that returns one +# if `x == 0`. +function nonzero_sign(x) + iszero(x) && return one(x) + return sign(x) +end + # # Just flip signs between Q and R to get all the diagonals of R >=0. # For rectangular M the indexing for "diagonal" is non-trivial. @@ -424,28 +432,12 @@ non-negative. Such a QR decomposition of a matrix is unique. Returns a tuple (Q,R). """ function qr_positive(M::AbstractMatrix) - iscuda = iscu(M) - if iscuda - cutype = leaf_parenttype(M) - M = NDTensors.cpu(M) - end sparseQ, R = qr(M) Q = convert(typeof(R), sparseQ) nc = size(Q, 2) - ## TODO issue here for GPU because tying to access indices - for c in 1:nc - if R[c, c] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. - sign_Rc = sign(R[c, c]) - if !isone(sign_Rc) - R[c, c:end] *= conj(sign_Rc) #only fip non-zero portion of the row. - Q[:, c] *= sign_Rc - end - end - end - if iscuda - Q = adapt(cutype, Q) - R = adapt(cutype, R) - end + signs = nonzero_sign.(diag(R)) + Q = Q * Diagonal(signs) + R = Diagonal(conj.(signs)) * R return (Q, R) end @@ -458,6 +450,9 @@ non-negative. Such a QL decomposition of a matrix is unique. Returns a tuple (Q,L). """ function ql_positive(M::AbstractMatrix) + # TODO: Change to `isgpu`, or better yet rewrite + # in terms of broadcasting and linear algebra + # like `qr_positive`. iscuda = iscu(M) if iscuda cutype = leaf_parenttype(M) diff --git a/NDTensors/src/linearalgebra/svd.jl b/NDTensors/src/linearalgebra/svd.jl index 6b9a09ae83..59b5a795fc 100644 --- a/NDTensors/src/linearalgebra/svd.jl +++ b/NDTensors/src/linearalgebra/svd.jl @@ -1,5 +1,10 @@ +# The state of the `svd_recursive` algorithm. +function svd_recursive_state(S::AbstractArray, thresh::Float64) + return svd_recursive_state(leaf_parenttype(S), S, thresh) +end -function checkSVDDone(S::AbstractArray, thresh::Float64) +# CPU version. +function svd_recursive_state(::Type{<:Array}, S::AbstractArray, thresh::Float64) N = length(S) (N <= 1 || thresh < 0.0) && return (true, 1) S1t = S[1] * thresh @@ -14,6 +19,12 @@ function checkSVDDone(S::AbstractArray, thresh::Float64) return (false, start) end +# Convert to CPU to avoid slow scalar indexing +# on GPU. +function svd_recursive_state(::Type{<:AbstractArray}, S::AbstractArray, thresh::Float64) + return svd_recursive_state(Array, cpu(S), thresh) +end + function svd_recursive(M::AbstractMatrix; thresh::Float64=1E-3, north_pass::Int=2) Mr, Mc = size(M) if Mr > Mc @@ -32,11 +43,9 @@ function svd_recursive(M::AbstractMatrix; thresh::Float64=1E-3, north_pass::Int= V = M' * U V, R = qr_positive(V) - for n in 1:Nd - D[n] = R[n, n] - end + D[1:Nd] = diag(R)[1:Nd] - (done, start) = checkSVDDone(D, thresh) + (done, start) = svd_recursive_state(D, thresh) done && return U, D, V diff --git a/NDTensors/src/tensor/linearalgebra.jl b/NDTensors/src/tensor/linearalgebra.jl new file mode 100644 index 0000000000..484ce8811f --- /dev/null +++ b/NDTensors/src/tensor/linearalgebra.jl @@ -0,0 +1,19 @@ +function LinearAlgebra.qr(T::Tensor; kwargs...) + return qr(T; kwargs...) +end + +function LinearAlgebra.eigen(T::Tensor; kwargs...) + return eigen(T; kwargs...) +end + +function LinearAlgebra.eigen(T::Hermitian{<:Real,<:Tensor}; kwargs...) + return eigen(T; kwargs...) +end + +function LinearAlgebra.eigen(T::Hermitian{<:Complex{<:Real},<:Tensor}; kwargs...) + return eigen(T; kwargs...) +end + +function LinearAlgebra.svd(T::Tensor; kwargs...) + return svd(T; kwargs...) +end diff --git a/NDTensors/src/truncate.jl b/NDTensors/src/truncate.jl index 3acc2f2099..b92fa8a1f1 100644 --- a/NDTensors/src/truncate.jl +++ b/NDTensors/src/truncate.jl @@ -1,7 +1,23 @@ -export truncate! +function truncate!!(P::AbstractArray; kwargs...) + return truncate!!(leaf_parenttype(P), P; kwargs...) +end + +# CPU version. +function truncate!!(::Type{<:Array}, P::AbstractArray; kwargs...) + truncerr, docut = truncate!(P; kwargs...) + return P, truncerr, docut +end + +# GPU fallback version, convert to CPU. +function truncate!!(::Type{<:AbstractArray}, P::AbstractArray; kwargs...) + P_cpu = cpu(P) + P_cpu, truncerr, docut = truncate!(P_cpu; kwargs...) + P = adapt(leaf_parenttype(P), P_cpu) + return P, truncerr, docut +end -function truncate!(P::AbstractVector{ElT}; kwargs...)::Tuple{ElT,ElT} where {ElT} - cutoff::Union{Nothing,ElT} = get(kwargs, :cutoff, zero(ElT)) +# CPU implementation. +function truncate!(P::AbstractVector{ElT}; cutoff=zero(eltype(P)), kwargs...) where {ElT} if isnothing(cutoff) cutoff = typemin(ElT) end diff --git a/NDTensors/test/blocksparse.jl b/NDTensors/test/blocksparse.jl index 8f52f51f14..aa79013298 100644 --- a/NDTensors/test/blocksparse.jl +++ b/NDTensors/test/blocksparse.jl @@ -226,37 +226,37 @@ end @test isblocknz(T, (2, 2)) end - @testset "svd" begin + @testset "svd on $dev" for dev in devs @testset "svd example 1" begin - A = BlockSparseTensor([(2, 1), (1, 2)], [2, 2], [2, 2]) + A = dev(BlockSparseTensor([(2, 1), (1, 2)], [2, 2], [2, 2])) randn!(A) U, S, V = svd(A) @test isapprox(norm(array(U) * array(S) * array(V)' - array(A)), 0; atol=1e-14) end @testset "svd example 2" begin - A = BlockSparseTensor([(1, 2), (2, 3)], [2, 2], [3, 2, 3]) + A = dev(BlockSparseTensor([(1, 2), (2, 3)], [2, 2], [3, 2, 3])) randn!(A) U, S, V = svd(A) @test isapprox(norm(array(U) * array(S) * array(V)' - array(A)), 0.0; atol=1e-14) end @testset "svd example 3" begin - A = BlockSparseTensor([(2, 1), (3, 2)], [3, 2, 3], [2, 2]) + A = dev(BlockSparseTensor([(2, 1), (3, 2)], [3, 2, 3], [2, 2])) randn!(A) U, S, V = svd(A) @test isapprox(norm(array(U) * array(S) * array(V)' - array(A)), 0.0; atol=1e-14) end @testset "svd example 4" begin - A = BlockSparseTensor([(2, 1), (3, 2)], [2, 3, 4], [5, 6]) + A = dev(BlockSparseTensor([(2, 1), (3, 2)], [2, 3, 4], [5, 6])) randn!(A) U, S, V = svd(A) @test isapprox(norm(array(U) * array(S) * array(V)' - array(A)), 0.0; atol=1e-13) end @testset "svd example 5" begin - A = BlockSparseTensor([(1, 2), (2, 3)], [5, 6], [2, 3, 4]) + A = dev(BlockSparseTensor([(1, 2), (2, 3)], [5, 6], [2, 3, 4])) randn!(A) U, S, V = svd(A) @test isapprox(norm(array(U) * array(S) * array(V)' - array(A)), 0.0; atol=1e-13) diff --git a/NDTensors/test/linearalgebra.jl b/NDTensors/test/linearalgebra.jl index 1ffda564ab..b80fc38b8f 100644 --- a/NDTensors/test/linearalgebra.jl +++ b/NDTensors/test/linearalgebra.jl @@ -1,6 +1,7 @@ using NDTensors using LinearAlgebra using Test + if "cuda" in ARGS || "all" in ARGS using CUDA end diff --git a/src/mps/dmrg.jl b/src/mps/dmrg.jl index f81d12b288..1a0a933791 100644 --- a/src/mps/dmrg.jl +++ b/src/mps/dmrg.jl @@ -216,7 +216,6 @@ function dmrg(PH, psi0::MPS, sweeps::Sweeps; kwargs...) psi = copy(psi0) N = length(psi) - if !isortho(psi) || orthocenter(psi) != 1 psi = orthogonalize!(PH, psi, 1) end diff --git a/src/mps/mps.jl b/src/mps/mps.jl index 9983dc9918..2a19b6c1e0 100644 --- a/src/mps/mps.jl +++ b/src/mps/mps.jl @@ -547,11 +547,9 @@ function replacebond!(M::MPS, b::Int, phi::ITensor; kwargs...) sbp1 = siteind(M, b + 1) indsMb = replaceind(indsMb, sb, sbp1) end - L, R, spec = factorize( phi, indsMb; which_decomp=which_decomp, tags=tags(linkind(M, b)), kwargs... ) - M[b] = L M[b + 1] = R if ortho == "left" diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 7406419e58..9da68a2c00 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -455,12 +455,14 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar # be empty. A essentially becomes 1D after collection. # A, vαl, vαr, Linds, Rinds = add_trivial_index(A, Linds, Rinds) + # # Use combiners to render A down to a rank 2 tensor ready for matrix QR/QL routine. # CL, CR = combiner(Linds...), combiner(Rinds...) cL, cR = combinedind(CL), combinedind(CR) AC = A * CR * CL + # # Make sure we don't accidentally pass the transpose into the matrix qr/ql routine. #