From 671fae8cec1b1f0f6b5459d61ebd648f4bf50946 Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Thu, 23 Feb 2023 14:59:13 -0800 Subject: [PATCH 01/15] make shape and strides a tuple --- src/operators_lazytensor.jl | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index 80fb5b6e..4e072944 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -648,9 +648,7 @@ function _gemm_puresparse(alpha, op::Matrix, h::LazyTensor{B1,B2,F,I,T}, beta, r rmul!(result, beta) end N_k = length(h.basis_r.bases) - shape = [min(h.basis_l.shape[i], h.basis_r.shape[i]) for i=1:length(h.basis_l.shape)] - strides_j = _strides(h.basis_l.shape) - strides_k = _strides(h.basis_r.shape) + shape, strides_j, strides_k = _get_shape_and_srtides(h) _gemm_recursive_dense_lazy(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end @@ -661,12 +659,17 @@ function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::Matrix, beta, r rmul!(result, beta) end N_k = length(h.basis_l.bases) - shape = [min(h.basis_l.shape[i], h.basis_r.shape[i]) for i=1:length(h.basis_l.shape)] - strides_j = _strides(h.basis_l.shape) - strides_k = _strides(h.basis_r.shape) + shape, strides_j, strides_k = _get_shape_and_srtides(h) _gemm_recursive_lazy_dense(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end +function _get_shape_and_srtides(h::LazyTensor{B1,B2,F,I,T}) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} + shape = min.(_comp_size(h.basis_l), _comp_size(h.basis_r)) + strides_j = _strides(_comp_size(h.basis_l)) + strides_k = _strides(_comp_size(h.basis_r)) + shape, strides_j, strides_k +end + _mul_puresparse!(result::DenseOpType{B1,B3},h::LazyTensor{B1,B2,F,I,T},op::DenseOpType{B2,B3},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, h, op.data, beta, result.data); result) _mul_puresparse!(result::DenseOpType{B1,B3},op::DenseOpType{B1,B2},h::LazyTensor{B2,B3,F,I,T},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, op.data, h, beta, result.data); result) From 2a19fd9ecdfa59560b6ba1b10c721b315495e835 Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Thu, 23 Feb 2023 15:00:56 -0800 Subject: [PATCH 02/15] make _strides type stable for tuple --- src/operators_dense.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/operators_dense.jl b/src/operators_dense.jl index dd22e0bf..c3a2dbb2 100644 --- a/src/operators_dense.jl +++ b/src/operators_dense.jl @@ -274,6 +274,10 @@ function _strides(shape) return S end +function _strides(shape::Ty)::Ty where Ty <: Tuple + accumulate(*, (1,Base.front(shape)...)) +end + # Dense operator version @generated function _ptrace(::Type{Val{RANK}}, a, shape_l, shape_r, From 0a5e5b5dd4bd49c58fbd9e924215ca0fa4f9adee Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Thu, 23 Feb 2023 15:04:17 -0800 Subject: [PATCH 03/15] reduce calls to _comp_size --- src/operators_lazytensor.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index 4e072944..acffbd7d 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -664,10 +664,10 @@ function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::Matrix, beta, r end function _get_shape_and_srtides(h::LazyTensor{B1,B2,F,I,T}) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} - shape = min.(_comp_size(h.basis_l), _comp_size(h.basis_r)) - strides_j = _strides(_comp_size(h.basis_l)) - strides_k = _strides(_comp_size(h.basis_r)) - shape, strides_j, strides_k + shape_l, shape_r = _comp_size(h.basis_l), _comp_size(h.basis_r) + shape = min.(shape_l, shape_r) + strides_j, strides_k = _strides(shape_l), _strides(shape_r) + return shape, strides_j, strides_k end _mul_puresparse!(result::DenseOpType{B1,B3},h::LazyTensor{B1,B2,F,I,T},op::DenseOpType{B2,B3},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, h, op.data, beta, result.data); result) From e051a0a8ead29f2e11c88e1e89ad7afb864db61b Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Thu, 23 Feb 2023 15:09:38 -0800 Subject: [PATCH 04/15] remove type constraint from _get_shape_and_srtides --- src/operators_lazytensor.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index acffbd7d..d4cf0408 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -663,7 +663,7 @@ function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::Matrix, beta, r _gemm_recursive_lazy_dense(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -function _get_shape_and_srtides(h::LazyTensor{B1,B2,F,I,T}) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} +function _get_shape_and_srtides(h) shape_l, shape_r = _comp_size(h.basis_l), _comp_size(h.basis_r) shape = min.(shape_l, shape_r) strides_j, strides_k = _strides(shape_l), _strides(shape_r) From 52c0551dd5a8d7a163afef596931e2f4dd1f971b Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Fri, 24 Feb 2023 12:05:03 -0800 Subject: [PATCH 05/15] fix typo --- src/operators_lazytensor.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index d4cf0408..bdd763cf 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -648,7 +648,7 @@ function _gemm_puresparse(alpha, op::Matrix, h::LazyTensor{B1,B2,F,I,T}, beta, r rmul!(result, beta) end N_k = length(h.basis_r.bases) - shape, strides_j, strides_k = _get_shape_and_srtides(h) + shape, strides_j, strides_k = _get_shape_and_strides(h) _gemm_recursive_dense_lazy(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end @@ -659,11 +659,11 @@ function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::Matrix, beta, r rmul!(result, beta) end N_k = length(h.basis_l.bases) - shape, strides_j, strides_k = _get_shape_and_srtides(h) + shape, strides_j, strides_k = _get_shape_and_strides(h) _gemm_recursive_lazy_dense(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -function _get_shape_and_srtides(h) +function _get_shape_and_strides(h) shape_l, shape_r = _comp_size(h.basis_l), _comp_size(h.basis_r) shape = min.(shape_l, shape_r) strides_j, strides_k = _strides(shape_l), _strides(shape_r) From 6070884048690a6acf1196c9cf83aec16e6f4b31 Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Fri, 24 Feb 2023 15:50:14 -0800 Subject: [PATCH 06/15] allow _gemm_recursive_lazy_dense matrix vector mul --- src/operators_lazytensor.jl | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index bdd763cf..7bbf4b43 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -609,7 +609,7 @@ end function _gemm_recursive_lazy_dense(i_k, N_k, K, J, val, shape, strides_k, strides_j, indices, h::LazyTensor, - op::Matrix, result::Matrix) + op::Ts, result::Ts) where Ts<:Union{Vector,Matrix} if i_k > N_k for I=1:size(op, 2) result[J, I] += val*op[K, I] @@ -652,7 +652,7 @@ function _gemm_puresparse(alpha, op::Matrix, h::LazyTensor{B1,B2,F,I,T}, beta, r _gemm_recursive_dense_lazy(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::Matrix, beta, result::Matrix) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} +function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::Ts, beta, result::Ts) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}},Ts<:Union{Vector,Matrix}} if iszero(beta) fill!(result, beta) elseif !isone(beta) @@ -672,13 +672,7 @@ end _mul_puresparse!(result::DenseOpType{B1,B3},h::LazyTensor{B1,B2,F,I,T},op::DenseOpType{B2,B3},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, h, op.data, beta, result.data); result) _mul_puresparse!(result::DenseOpType{B1,B3},op::DenseOpType{B1,B2},h::LazyTensor{B2,B3,F,I,T},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, op.data, h, beta, result.data); result) - -function _mul_puresparse!(result::Ket{B1},a::LazyTensor{B1,B2,F,I,T},b::Ket{B2},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} - b_data = reshape(b.data, length(b.data), 1) - result_data = reshape(result.data, length(result.data), 1) - _gemm_puresparse(alpha, a, b_data, beta, result_data) - result -end +_mul_puresparse!(result::Ket{B1},a::LazyTensor{B1,B2,F,I,T},b::Ket{B2},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, a, b.data, beta, result.data); result) function _mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} a_data = reshape(a.data, 1, length(a.data)) From 5b473d6a75eb3217c9e274a0016cda6626b7c923 Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Fri, 24 Feb 2023 16:54:38 -0800 Subject: [PATCH 07/15] type fix --- src/operators_lazytensor.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index 7bbf4b43..a8c50259 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -609,7 +609,7 @@ end function _gemm_recursive_lazy_dense(i_k, N_k, K, J, val, shape, strides_k, strides_j, indices, h::LazyTensor, - op::Ts, result::Ts) where Ts<:Union{Vector,Matrix} + op::VecOrMat, result::VecOrMat) if i_k > N_k for I=1:size(op, 2) result[J, I] += val*op[K, I] @@ -652,7 +652,7 @@ function _gemm_puresparse(alpha, op::Matrix, h::LazyTensor{B1,B2,F,I,T}, beta, r _gemm_recursive_dense_lazy(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::Ts, beta, result::Ts) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}},Ts<:Union{Vector,Matrix}} +function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::VecOrMat, beta, result::VecOrMat) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} if iszero(beta) fill!(result, beta) elseif !isone(beta) From cca18de9ca032284f04b50dd41dcdc6c4a902c73 Mon Sep 17 00:00:00 2001 From: Amit Rotem Date: Tue, 21 Mar 2023 15:22:52 -0700 Subject: [PATCH 08/15] relax type in _gemm_puresparse\!, _gemm_recursive_lazy_dense, and _gemm_recursive_dense_lazy --- src/operators_lazytensor.jl | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index a8c50259..e78779f7 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -572,9 +572,11 @@ end function _gemm_recursive_dense_lazy(i_k, N_k, K, J, val, shape, strides_k, strides_j, indices, h::LazyTensor, - op::Matrix, result::Matrix) + op::AbstractArray, result::AbstractArray) if i_k > N_k - for I=1:size(op, 1) + if isa(op, AbstractVector) + result[K] += val*op[J] + else I=1:size(op, 1) result[I, K] += val*op[I, J] end return nothing @@ -609,7 +611,7 @@ end function _gemm_recursive_lazy_dense(i_k, N_k, K, J, val, shape, strides_k, strides_j, indices, h::LazyTensor, - op::VecOrMat, result::VecOrMat) + op::AbstractArray, result::AbstractArray) if i_k > N_k for I=1:size(op, 2) result[J, I] += val*op[K, I] @@ -641,7 +643,7 @@ function _gemm_recursive_lazy_dense(i_k, N_k, K, J, val, end end -function _gemm_puresparse(alpha, op::Matrix, h::LazyTensor{B1,B2,F,I,T}, beta, result::Matrix) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} +function _gemm_puresparse(alpha, op::AbstractArray, h::LazyTensor{B1,B2,F,I,T}, beta, result::AbstractArray) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} if iszero(beta) fill!(result, beta) elseif !isone(beta) @@ -652,7 +654,7 @@ function _gemm_puresparse(alpha, op::Matrix, h::LazyTensor{B1,B2,F,I,T}, beta, r _gemm_recursive_dense_lazy(1, N_k, 1, 1, alpha*h.factor, shape, strides_k, strides_j, h.indices, h, op, result) end -function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::VecOrMat, beta, result::VecOrMat) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} +function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::AbstractArray, beta, result::AbstractArray) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} if iszero(beta) fill!(result, beta) elseif !isone(beta) @@ -673,10 +675,11 @@ end _mul_puresparse!(result::DenseOpType{B1,B3},h::LazyTensor{B1,B2,F,I,T},op::DenseOpType{B2,B3},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, h, op.data, beta, result.data); result) _mul_puresparse!(result::DenseOpType{B1,B3},op::DenseOpType{B1,B2},h::LazyTensor{B2,B3,F,I,T},alpha,beta) where {B1,B2,B3,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, op.data, h, beta, result.data); result) _mul_puresparse!(result::Ket{B1},a::LazyTensor{B1,B2,F,I,T},b::Ket{B2},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, a, b.data, beta, result.data); result) - -function _mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} - a_data = reshape(a.data, 1, length(a.data)) - result_data = reshape(result.data, 1, length(result.data)) - _gemm_puresparse(alpha, a_data, b, beta, result_data) - result -end +_mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, a.data, b, beta, result.data); result) + +#function _mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} +# a_data = Base.ReshapedArray(a.data, (1, length(a.data)), ()) +# result_data = Base.ReshapedArray(result.data, (1, length(result.data)), ()) +# _gemm_puresparse(alpha, a_data, b, beta, result_data) +# result +#end From b7bdb9401a748bdabc70ba1a8680b69353f6f3ed Mon Sep 17 00:00:00 2001 From: Amit Rotem Date: Wed, 22 Mar 2023 14:43:44 -0700 Subject: [PATCH 09/15] fix size(AbstractOperator) --- src/operators.jl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/operators.jl b/src/operators.jl index f8ecf9e5..4d8a0458 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -392,5 +392,9 @@ multiplicable(a::AbstractOperator, b::Ket) = multiplicable(a.basis_r, b.basis) multiplicable(a::Bra, b::AbstractOperator) = multiplicable(a.basis, b.basis_l) multiplicable(a::AbstractOperator, b::AbstractOperator) = multiplicable(a.basis_r, b.basis_l) -Base.size(op::AbstractOperator) = prod(length(op.basis_l),length(op.basis_r)) -Base.size(op::AbstractOperator, i::Int) = (i==1 ? length(op.basis_l) : length(op.basis_r)) +Base.size(op::AbstractOperator) = (length(op.basis_l),length(op.basis_r)) +function Base.size(op::AbstractOperator, i::Int) + i < 1 && throw(ArgumentError("dimension out of range, should be strictly positive, got $i")) + i > 2 && return 1 + i==1 ? length(op.basis_l) : length(op.basis_r) +end From 7e64f549d1d55fc2ebfb2c33d7f75f092a4b75f5 Mon Sep 17 00:00:00 2001 From: Amit Rotem Date: Wed, 22 Mar 2023 14:44:49 -0700 Subject: [PATCH 10/15] add dim check for _gemm_puresparse --- src/operators_lazytensor.jl | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index e78779f7..938bdd11 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -643,7 +643,31 @@ function _gemm_recursive_lazy_dense(i_k, N_k, K, J, val, end end +""" + check_mul!_compatibility(R, A, B) +Check that `R,A,B` are dimentially compatible for `R.=A*B`. And that `R` is not aliased with either `A` nor `B`. +""" +function check_mul!_compatibility(R::AbstractVecOrMat, A, B) + # R .= A*B + if size(A, 2) != size(B, 1) + throw(DimensionMismatch(lazy"A has dimensions $(size(A)) but B has dimensions $(size(B)). Can't do `A*B`")) + end + if size(R) != (size(A, 1), Base.tail(size(B))...) + throw(DimensionMismatch(lazy"R has dimensions $(size(R)) but A*B has dimensions $((size(A, 1), Base.tail(size(B))...)). Can't do `R.=A*B`")) + end + if R===A || R===B + throw(ArgumentError(lazy"output matrix must not be aliased with input matrix")) + end + nothing +end + function _gemm_puresparse(alpha, op::AbstractArray, h::LazyTensor{B1,B2,F,I,T}, beta, result::AbstractArray) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} + if op isa AbstractVector + # _gemm_recursive_dense_lazy will treat `op` as a `Bra` + check_mul!_compatibility(result, h', op) + else + check_mul!_compatibility(result, op, h) + end if iszero(beta) fill!(result, beta) elseif !isone(beta) @@ -655,6 +679,7 @@ function _gemm_puresparse(alpha, op::AbstractArray, h::LazyTensor{B1,B2,F,I,T}, end function _gemm_puresparse(alpha, h::LazyTensor{B1,B2,F,I,T}, op::AbstractArray, beta, result::AbstractArray) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} + check_mul!_compatibility(result, h, op) if iszero(beta) fill!(result, beta) elseif !isone(beta) @@ -677,9 +702,3 @@ _mul_puresparse!(result::DenseOpType{B1,B3},op::DenseOpType{B1,B2},h::LazyTensor _mul_puresparse!(result::Ket{B1},a::LazyTensor{B1,B2,F,I,T},b::Ket{B2},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, a, b.data, beta, result.data); result) _mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} = (_gemm_puresparse(alpha, a.data, b, beta, result.data); result) -#function _mul_puresparse!(result::Bra{B2},a::Bra{B1},b::LazyTensor{B1,B2,F,I,T},alpha,beta) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} -# a_data = Base.ReshapedArray(a.data, (1, length(a.data)), ()) -# result_data = Base.ReshapedArray(result.data, (1, length(result.data)), ()) -# _gemm_puresparse(alpha, a_data, b, beta, result_data) -# result -#end From a616377707621543b36df113aa7d72a5e5813194 Mon Sep 17 00:00:00 2001 From: Amit Rotem Date: Wed, 22 Mar 2023 14:46:15 -0700 Subject: [PATCH 11/15] test LazyTensor with sparse dimension mismatch --- test/test_operators_lazytensor.jl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/test_operators_lazytensor.jl b/test/test_operators_lazytensor.jl index 799bd030..bbaef1b9 100644 --- a/test/test_operators_lazytensor.jl +++ b/test/test_operators_lazytensor.jl @@ -404,5 +404,13 @@ dop = randoperator(b3a⊗b3b, b2a⊗b2b) @test dop*lop' ≈ Operator(dop.basis_l, lop.basis_l, dop.data*dense(lop).data') @test lop*dop' ≈ Operator(lop.basis_l, dop.basis_l, dense(lop).data*dop.data') +# Dimension mismatches for LazyTensor with sparse +b1, b2 = NLevelBasis.((2, 3)) +Lop1 = LazyTensor(b1^2, b2^2, 2, sparse(randoperator(b1, b2))) +@test_throws DimensionMismatch Lop1*Lop1 +@test_throws DimensionMismatch dense(Lop1)*Lop1 +@test_throws DimensionMismatch sparse(Lop1)*Lop1 +@test_throws DimensionMismatch Lop1*dense(Lop1) +@test_throws DimensionMismatch Lop1*sparse(Lop1) end # testset From 316ba34b23a9c1e28d4e418af6c175b3cf1a0da5 Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Wed, 22 Mar 2023 15:21:45 -0700 Subject: [PATCH 12/15] change error thrown by size to ErrorException --- src/operators.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operators.jl b/src/operators.jl index 4d8a0458..fa75cf18 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -394,7 +394,7 @@ multiplicable(a::AbstractOperator, b::AbstractOperator) = multiplicable(a.basis_ Base.size(op::AbstractOperator) = (length(op.basis_l),length(op.basis_r)) function Base.size(op::AbstractOperator, i::Int) - i < 1 && throw(ArgumentError("dimension out of range, should be strictly positive, got $i")) + i < 1 && throw(ErrorException("dimension out of range, should be strictly positive, got $i")) i > 2 && return 1 i==1 ? length(op.basis_l) : length(op.basis_r) end From 6984ff091336076563626a90c93c9ae24bc9b915 Mon Sep 17 00:00:00 2001 From: AmitRotem <66641519+AmitRotem@users.noreply.github.com> Date: Wed, 22 Mar 2023 15:27:16 -0700 Subject: [PATCH 13/15] test size of AbstractOperator --- test/test_operators.jl | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/test_operators.jl b/test/test_operators.jl index a11afecc..bd20b4b4 100644 --- a/test/test_operators.jl +++ b/test/test_operators.jl @@ -130,4 +130,14 @@ op12 = destroy(bfock)⊗sigmap(bspin) @test embed(b, [1,2], op12) == destroy(bfock)⊗sigmap(bspin)⊗one(bspin) @test embed(b, [1,3], op12) == destroy(bfock)⊗one(bspin)⊗sigmap(bspin) +# size of AbstractOperator +b1, b2 = NLevelBasis.((2, 3)) +Lop1 = LazyTensor(b1^2, b2^2, 2, sparse(randoperator(b1, b2))) +@test size(Lop1) == size(dense(Lop1)) == size(dense(Lop1).data) +@test all(size(Lop1, k) == size(dense(Lop1), k) for k=1:4) +@test_throws ErrorException size(Lop1, 0) +@test_throws ErrorException size(Lop1, -1) +@test_throws ErrorException size(dense(Lop1), 0) # check for consistency +@test_throws ErrorException size(dense(Lop1), -1) + end # testset From 8aa2ab997ebe0062ae098abc73fe71b264ae5b98 Mon Sep 17 00:00:00 2001 From: Amit Rotem Date: Fri, 24 Mar 2023 16:51:31 -0700 Subject: [PATCH 14/15] use lazy string --- src/operators.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operators.jl b/src/operators.jl index fa75cf18..0e2a9d52 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -394,7 +394,7 @@ multiplicable(a::AbstractOperator, b::AbstractOperator) = multiplicable(a.basis_ Base.size(op::AbstractOperator) = (length(op.basis_l),length(op.basis_r)) function Base.size(op::AbstractOperator, i::Int) - i < 1 && throw(ErrorException("dimension out of range, should be strictly positive, got $i")) + i < 1 && throw(ErrorException(lazy"dimension out of range, should be strictly positive, got $i")) i > 2 && return 1 i==1 ? length(op.basis_l) : length(op.basis_r) end From f91c5a036da45a9f28f6ada86a10a888b41bc6b2 Mon Sep 17 00:00:00 2001 From: Amit Rotem Date: Fri, 24 Mar 2023 16:53:34 -0700 Subject: [PATCH 15/15] split check_mul\!_compatibility and specialize for vectors --- src/operators_lazytensor.jl | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/operators_lazytensor.jl b/src/operators_lazytensor.jl index 938bdd11..1e980f9f 100644 --- a/src/operators_lazytensor.jl +++ b/src/operators_lazytensor.jl @@ -648,23 +648,30 @@ end Check that `R,A,B` are dimentially compatible for `R.=A*B`. And that `R` is not aliased with either `A` nor `B`. """ function check_mul!_compatibility(R::AbstractVecOrMat, A, B) + _check_mul!_aliasing_compatibility(R, A, B) + _check_mul!_dim_compatibility(size(R), size(A), size(B)) +end +function _check_mul!_dim_compatibility(sizeR::Tuple, sizeA::Tuple, sizeB::Tuple) # R .= A*B - if size(A, 2) != size(B, 1) - throw(DimensionMismatch(lazy"A has dimensions $(size(A)) but B has dimensions $(size(B)). Can't do `A*B`")) + if sizeA[2] != sizeB[1] + throw(DimensionMismatch(lazy"A has dimensions $sizeA but B has dimensions $sizeB. Can't do `A*B`")) end - if size(R) != (size(A, 1), Base.tail(size(B))...) - throw(DimensionMismatch(lazy"R has dimensions $(size(R)) but A*B has dimensions $((size(A, 1), Base.tail(size(B))...)). Can't do `R.=A*B`")) + if sizeR != (sizeA[1], Base.tail(sizeB)...) # using tail to account for vectors + throw(DimensionMismatch(lazy"R has dimensions $sizeR but A*B has dimensions $((sizeA[1], Base.tail(sizeB)...)). Can't do `R.=A*B`")) end +end +function _check_mul!_aliasing_compatibility(R, A, B) if R===A || R===B throw(ArgumentError(lazy"output matrix must not be aliased with input matrix")) end - nothing end + function _gemm_puresparse(alpha, op::AbstractArray, h::LazyTensor{B1,B2,F,I,T}, beta, result::AbstractArray) where {B1,B2,F,I,T<:Tuple{Vararg{SparseOpPureType}}} if op isa AbstractVector # _gemm_recursive_dense_lazy will treat `op` as a `Bra` - check_mul!_compatibility(result, h', op) + _check_mul!_aliasing_compatibility(result, op, h) + _check_mul!_dim_compatibility(size(result), reverse(size(h)), size(op)) else check_mul!_compatibility(result, op, h) end