diff --git a/base/abstractarray.jl b/base/abstractarray.jl index 609f1d78dd1e6..7d54e3311ef11 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -3,20 +3,31 @@ ## Basic functions ## """ - AbstractArray{T,N} + ArrayLike{N} + +Supertype for `N`-dimensional arrays (or array-like types) with or without a +pre-defined element type. For arrays with a pre-defined eltype, use +[`AbstractArray`](@ref). +""" +ArrayLike + +""" + AbstractArray{T,N} <: ArrayLike{N} Supertype for `N`-dimensional arrays (or array-like types) with elements of type `T`. [`Array`](@ref) and other types are subtypes of this. See the manual section on the -[`AbstractArray` interface](@ref man-interface-array). +[`AbstractArray` interface](@ref man-interface-array). For arrays without a +pre-defined eltype, use [`ArrayLike`](@ref). """ AbstractArray -convert(::Type{T}, a::T) where {T<:AbstractArray} = a -convert(::Type{AbstractArray{T}}, a::AbstractArray) where {T} = AbstractArray{T}(a) -convert(::Type{AbstractArray{T,N}}, a::AbstractArray{<:Any,N}) where {T,N} = AbstractArray{T,N}(a) +convert(::Type{T}, a::T) where {T<:ArrayLike} = a +convert(::Type{AbstractArray{T}}, a::ArrayLike) where {T} = AbstractArray{T}(a) +convert(::Type{AbstractArray{T,N}}, a::ArrayLike{N}) where {T,N} = AbstractArray{T,N}(a) +convert(::Type{AbstractArray{T,N}}, a::AbstractArray{T,N}) where {T,N} = a # specific """ - size(A::AbstractArray, [dim]) + size(A::ArrayLike, [dim]) Return a tuple containing the dimensions of `A`. Optionally you can specify a dimension to just get the length of that dimension. @@ -35,7 +46,7 @@ julia> size(A, 2) 3 ``` """ -size(t::AbstractArray{T,N}, d) where {T,N} = d::Integer <= N ? size(t)[d] : 1 +size(t::ArrayLike{N}, d) where {N} = d::Integer <= N ? size(t)[d] : 1 """ axes(A, d) @@ -52,7 +63,7 @@ julia> axes(A, 2) Base.OneTo(6) ``` """ -function axes(A::AbstractArray{T,N}, d) where {T,N} +function axes(A::ArrayLike{N}, d) where {N} @_inline_meta d::Integer <= N ? axes(A)[d] : OneTo(1) end @@ -91,19 +102,19 @@ require_one_based_indexing(A...) = !has_offset_axes(A...) || throw(ArgumentError # Performance optimization: get rid of a branch on `d` in `axes(A, d)` # for d=1. 1d arrays are heavily used, and the first dimension comes up # in other applications. -axes1(A::AbstractArray{<:Any,0}) = OneTo(1) -axes1(A::AbstractArray) = (@_inline_meta; axes(A)[1]) +axes1(A::ArrayLike{0}) = OneTo(1) +axes1(A::ArrayLike) = (@_inline_meta; axes(A)[1]) axes1(iter) = OneTo(length(iter)) unsafe_indices(A) = axes(A) unsafe_indices(r::AbstractRange) = (OneTo(unsafe_length(r)),) # Ranges use checked_sub for size -keys(a::AbstractArray) = CartesianIndices(axes(a)) -keys(a::AbstractVector) = LinearIndices(a) +keys(a::ArrayLike) = CartesianIndices(axes(a)) +keys(a::ArrayLike{1}) = LinearIndices(a) """ - keytype(T::Type{<:AbstractArray}) - keytype(A::AbstractArray) + keytype(T::Type{<:ArrayLike}) + keytype(A::ArrayLike) Return the key type of an array. This is equal to the `eltype` of the result of `keys(...)`, and is provided @@ -121,16 +132,16 @@ CartesianIndex{2} !!! compat "Julia 1.2" For arrays, this function requires at least Julia 1.2. """ -keytype(a::AbstractArray) = keytype(typeof(a)) +keytype(a::ArrayLike) = keytype(typeof(a)) -keytype(A::Type{<:AbstractArray}) = CartesianIndex{ndims(A)} -keytype(A::Type{<:AbstractVector}) = Int +keytype(A::Type{<:ArrayLike}) = CartesianIndex{ndims(A)} +keytype(A::Type{<:ArrayLike{1}}) = Int -valtype(a::AbstractArray) = valtype(typeof(a)) +valtype(a::ArrayLike) = valtype(typeof(a)) """ - valtype(T::Type{<:AbstractArray}) - valtype(A::AbstractArray) + valtype(T::Type{<:ArrayLike}) + valtype(A::ArrayLike) Return the value type of an array. This is identical to `eltype` and is provided mainly for compatibility with the dictionary interface. @@ -144,16 +155,16 @@ String !!! compat "Julia 1.2" For arrays, this function requires at least Julia 1.2. """ -valtype(A::Type{<:AbstractArray}) = eltype(A) +valtype(A::Type{<:ArrayLike}) = eltype(A) -prevind(::AbstractArray, i::Integer) = Int(i)-1 -nextind(::AbstractArray, i::Integer) = Int(i)+1 +prevind(::ArrayLike, i::Integer) = Int(i)-1 +nextind(::ArrayLike, i::Integer) = Int(i)+1 eltype(::Type{<:AbstractArray{E}}) where {E} = @isdefined(E) ? E : Any -elsize(A::AbstractArray) = elsize(typeof(A)) +elsize(A::ArrayLike) = elsize(typeof(A)) """ - ndims(A::AbstractArray) -> Integer + ndims(A::ArrayLike) -> Integer Return the number of dimensions of `A`. @@ -165,8 +176,8 @@ julia> ndims(A) 3 ``` """ -ndims(::AbstractArray{T,N}) where {T,N} = N -ndims(::Type{<:AbstractArray{T,N}}) where {T,N} = N +ndims(::ArrayLike{N}) where {N} = N +ndims(::Type{<:ArrayLike{N}}) where {N} = N """ length(collection) -> Integer @@ -190,7 +201,7 @@ julia> length([1 2; 3 4]) length """ - length(A::AbstractArray) + length(A::ArrayLike) Return the number of elements in the array, defaults to `prod(size(A))`. @@ -203,13 +214,13 @@ julia> length([1 2; 3 4]) 4 ``` """ -length(t::AbstractArray) = (@_inline_meta; prod(size(t))) +length(t::ArrayLike) = (@_inline_meta; prod(size(t))) # `eachindex` is mostly an optimization of `keys` eachindex(itrs...) = keys(itrs...) # eachindex iterates over all indices. IndexCartesian definitions are later. -eachindex(A::AbstractVector) = (@_inline_meta(); axes1(A)) +eachindex(A::ArrayLike{1}) = (@_inline_meta(); axes1(A)) @noinline function throw_eachindex_mismatch(::IndexLinear, A...) throw(DimensionMismatch("all inputs to eachindex must have the same indices, got $(join(eachindex.(A), ", ", " and "))")) @@ -221,14 +232,14 @@ end """ eachindex(A...) -Create an iterable object for visiting each index of an `AbstractArray` `A` in an efficient +Create an iterable object for visiting each index of an `ArrayLike` `A` in an efficient manner. For array types that have opted into fast linear indexing (like `Array`), this is simply the range `1:length(A)`. For other array types, return a specialized Cartesian range to efficiently index into the array with indices specified for every dimension. For other iterables, including strings and dictionaries, return an iterator object supporting arbitrary index types (e.g. unevenly spaced or non-integer indices). -If you supply more than one `AbstractArray` argument, `eachindex` will create an +If you supply more than one `ArrayLike` argument, `eachindex` will create an iterable object that is fast for all arguments (a [`UnitRange`](@ref) if all inputs have fast linear indexing, a [`CartesianIndices`](@ref) otherwise). @@ -253,19 +264,19 @@ CartesianIndex(1, 1) CartesianIndex(2, 1) ``` """ -eachindex(A::AbstractArray) = (@_inline_meta(); eachindex(IndexStyle(A), A)) +eachindex(A::ArrayLike) = (@_inline_meta(); eachindex(IndexStyle(A), A)) -function eachindex(A::AbstractArray, B::AbstractArray) +function eachindex(A::ArrayLike, B::ArrayLike) @_inline_meta eachindex(IndexStyle(A,B), A, B) end -function eachindex(A::AbstractArray, B::AbstractArray...) +function eachindex(A::ArrayLike, B::ArrayLike...) @_inline_meta eachindex(IndexStyle(A,B...), A, B...) end -eachindex(::IndexLinear, A::AbstractArray) = (@_inline_meta; OneTo(length(A))) -eachindex(::IndexLinear, A::AbstractVector) = (@_inline_meta; axes1(A)) -function eachindex(::IndexLinear, A::AbstractArray, B::AbstractArray...) +eachindex(::IndexLinear, A::ArrayLike) = (@_inline_meta; OneTo(length(A))) +eachindex(::IndexLinear, A::ArrayLike{1}) = (@_inline_meta; axes1(A)) +function eachindex(::IndexLinear, A::ArrayLike, B::ArrayLike...) @_inline_meta indsA = eachindex(IndexLinear(), A) _all_match_first(X->eachindex(IndexLinear(), X), indsA, B...) || @@ -279,7 +290,7 @@ end _all_match_first(f::F, inds) where F<:Function = true # keys with an IndexStyle -keys(s::IndexStyle, A::AbstractArray, B::AbstractArray...) = eachindex(s, A, B...) +keys(s::IndexStyle, A::ArrayLike, B::ArrayLike...) = eachindex(s, A, B...) """ lastindex(collection) -> Integer @@ -299,8 +310,8 @@ julia> lastindex(rand(3,4,5), 2) 4 ``` """ -lastindex(a::AbstractArray) = (@_inline_meta; last(eachindex(IndexLinear(), a))) -lastindex(a::AbstractArray, d) = (@_inline_meta; last(axes(a, d))) +lastindex(a::ArrayLike) = (@_inline_meta; last(eachindex(IndexLinear(), a))) +lastindex(a::ArrayLike, d) = (@_inline_meta; last(axes(a, d))) """ firstindex(collection) -> Integer @@ -317,10 +328,10 @@ julia> firstindex(rand(3,4,5), 2) 1 ``` """ -firstindex(a::AbstractArray) = (@_inline_meta; first(eachindex(IndexLinear(), a))) -firstindex(a::AbstractArray, d) = (@_inline_meta; first(axes(a, d))) +firstindex(a::ArrayLike) = (@_inline_meta; first(eachindex(IndexLinear(), a))) +firstindex(a::ArrayLike, d) = (@_inline_meta; first(axes(a, d))) -first(a::AbstractArray) = a[first(eachindex(a))] +first(a::ArrayLike) = a[first(eachindex(a))] """ first(coll) @@ -392,14 +403,14 @@ julia> stride(A,3) 12 ``` """ -stride(A::AbstractArray, k::Integer) = strides(A)[k] +stride(A::ArrayLike, k::Integer) = strides(A)[k] @inline size_to_strides(s, d, sz...) = (s, size_to_strides(s * d, sz...)...) size_to_strides(s, d) = (s,) size_to_strides(s) = () -function isassigned(a::AbstractArray, i::Integer...) +function isassigned(a::ArrayLike, i::Integer...) try a[i...] true @@ -453,7 +464,7 @@ end checkbounds(Bool, A, I...) Return `true` if the specified indices `I` are in bounds for the given -array `A`. Subtypes of `AbstractArray` should specialize this method +array `A`. Subtypes of `ArrayLike` should specialize this method if they need to provide custom bounds checking behaviors; however, in many cases one can rely on `A`'s indices and [`checkindex`](@ref). @@ -476,18 +487,18 @@ julia> checkbounds(Bool, A, 1:3, 2:4) false ``` """ -function checkbounds(::Type{Bool}, A::AbstractArray, I...) +function checkbounds(::Type{Bool}, A::ArrayLike, I...) @_inline_meta checkbounds_indices(Bool, axes(A), I) end # Linear indexing is explicitly allowed when there is only one (non-cartesian) index -function checkbounds(::Type{Bool}, A::AbstractArray, i) +function checkbounds(::Type{Bool}, A::ArrayLike, i) @_inline_meta checkindex(Bool, eachindex(IndexLinear(), A), i) end # As a special extension, allow using logical arrays that match the source array exactly -function checkbounds(::Type{Bool}, A::AbstractArray{<:Any,N}, I::AbstractArray{Bool,N}) where N +function checkbounds(::Type{Bool}, A::ArrayLike{N}, I::AbstractArray{Bool,N}) where N @_inline_meta axes(A) == axes(I) end @@ -497,7 +508,7 @@ end Throw an error if the specified indices `I` are not in bounds for the given array `A`. """ -function checkbounds(A::AbstractArray, I...) +function checkbounds(A::ArrayLike, I...) @_inline_meta checkbounds(Bool, A, I...) || throw_boundserror(A, I) nothing @@ -565,7 +576,7 @@ function checkindex(::Type{Bool}, inds::AbstractUnitRange, r::AbstractRange) end checkindex(::Type{Bool}, indx::AbstractUnitRange, I::AbstractVector{Bool}) = indx == axes1(I) checkindex(::Type{Bool}, indx::AbstractUnitRange, I::AbstractArray{Bool}) = false -function checkindex(::Type{Bool}, inds::AbstractUnitRange, I::AbstractArray) +function checkindex(::Type{Bool}, inds::AbstractUnitRange, I::ArrayLike) @_inline_meta b = true for i in I @@ -587,7 +598,7 @@ given source array. The second and third arguments are both optional, defaulting given array's `eltype` and `size`. The dimensions may be specified either as a single tuple argument or as a series of integer arguments. -Custom AbstractArray subtypes may choose which specific array type is best-suited to return +Custom ArrayLike subtypes may choose which specific array type is best-suited to return for the given element type and dimensionality. If they do not specialize this method, the default is an `Array{element_type}(undef, dims...)`. @@ -622,18 +633,18 @@ julia> similar(falses(10), Float64, 2, 4) """ similar(a::AbstractArray{T}) where {T} = similar(a, T) -similar(a::AbstractArray, ::Type{T}) where {T} = similar(a, T, to_shape(axes(a))) +similar(a::ArrayLike, ::Type{T}) where {T} = similar(a, T, to_shape(axes(a))) similar(a::AbstractArray{T}, dims::Tuple) where {T} = similar(a, T, to_shape(dims)) similar(a::AbstractArray{T}, dims::DimOrInd...) where {T} = similar(a, T, to_shape(dims)) -similar(a::AbstractArray, ::Type{T}, dims::DimOrInd...) where {T} = similar(a, T, to_shape(dims)) +similar(a::ArrayLike, ::Type{T}, dims::DimOrInd...) where {T} = similar(a, T, to_shape(dims)) # Similar supports specifying dims as either Integers or AbstractUnitRanges or any mixed combination # thereof. Ideally, we'd just convert Integers to OneTos and then call a canonical method with the axes, -# but we don't want to require all AbstractArray subtypes to dispatch on Base.OneTo. So instead we +# but we don't want to require all ArrayLike subtypes to dispatch on Base.OneTo. So instead we # define this method to convert supported axes to Ints, with the expectation that an offset array # package will define a method with dims::Tuple{Union{Integer, UnitRange}, Vararg{Union{Integer, UnitRange}}} -similar(a::AbstractArray, ::Type{T}, dims::Tuple{Union{Integer, OneTo}, Vararg{Union{Integer, OneTo}}}) where {T} = similar(a, T, to_shape(dims)) +similar(a::ArrayLike, ::Type{T}, dims::Tuple{Union{Integer, OneTo}, Vararg{Union{Integer, OneTo}}}) where {T} = similar(a, T, to_shape(dims)) # similar creates an Array by default -similar(a::AbstractArray, ::Type{T}, dims::Dims{N}) where {T,N} = Array{T,N}(undef, dims) +similar(a::ArrayLike, ::Type{T}, dims::Dims{N}) where {T,N} = Array{T,N}(undef, dims) to_shape(::Tuple{}) = () to_shape(dims::Dims) = dims @@ -666,12 +677,12 @@ indices of the result will match `A`. would create a 1-dimensional logical array whose indices match those of the columns of `A`. """ -similar(::Type{T}, dims::DimOrInd...) where {T<:AbstractArray} = similar(T, dims) -similar(::Type{T}, shape::Tuple{Union{Integer, OneTo}, Vararg{Union{Integer, OneTo}}}) where {T<:AbstractArray} = similar(T, to_shape(shape)) -similar(::Type{T}, dims::Dims) where {T<:AbstractArray} = T(undef, dims) +similar(::Type{T}, dims::DimOrInd...) where {T<:ArrayLike} = similar(T, dims) +similar(::Type{T}, shape::Tuple{Union{Integer, OneTo}, Vararg{Union{Integer, OneTo}}}) where {T<:ArrayLike} = similar(T, to_shape(shape)) +similar(::Type{T}, dims::Dims) where {T<:ArrayLike} = T(undef, dims) """ - empty(v::AbstractVector, [eltype]) + empty(v::ArrayLike{1}, [eltype]) Create an empty vector similar to `v`, optionally changing the `eltype`. @@ -705,9 +716,9 @@ See also [`copyto!`](@ref). This method requires at least Julia 1.1. In Julia 1.0 this method is available from the `Future` standard library as `Future.copy!`. """ -copy!(dst::AbstractVector, src::AbstractVector) = append!(empty!(dst), src) +copy!(dst::ArrayLike{1}, src::ArrayLike{1}) = append!(empty!(dst), src) -function copy!(dst::AbstractArray, src::AbstractArray) +function copy!(dst::ArrayLike, src::ArrayLike) axes(dst) == axes(src) || throw(ArgumentError( "arrays must have the same axes for copy! (consider using `copyto!`)")) copyto!(dst, src) @@ -715,7 +726,7 @@ end ## from general iterable to any array -function copyto!(dest::AbstractArray, src) +function copyto!(dest::ArrayLike, src) destiter = eachindex(dest) y = iterate(destiter) for x in src @@ -727,7 +738,7 @@ function copyto!(dest::AbstractArray, src) return dest end -function copyto!(dest::AbstractArray, dstart::Integer, src) +function copyto!(dest::ArrayLike, dstart::Integer, src) i = Int(dstart) for x in src dest[i] = x @@ -736,8 +747,8 @@ function copyto!(dest::AbstractArray, dstart::Integer, src) return dest end -# copy from an some iterable object into an AbstractArray -function copyto!(dest::AbstractArray, dstart::Integer, src, sstart::Integer) +# copy from an some iterable object into an ArrayLike +function copyto!(dest::ArrayLike, dstart::Integer, src, sstart::Integer) if (sstart < 1) throw(ArgumentError(string("source start offset (",sstart,") is < 1"))) end @@ -764,7 +775,7 @@ function copyto!(dest::AbstractArray, dstart::Integer, src, sstart::Integer) end # this method must be separate from the above since src might not have a length -function copyto!(dest::AbstractArray, dstart::Integer, src, sstart::Integer, n::Integer) +function copyto!(dest::ArrayLike, dstart::Integer, src, sstart::Integer, n::Integer) n < 0 && throw(ArgumentError(string("tried to copy n=", n, " elements, but n should be nonnegative"))) n == 0 && return dest dmax = dstart + n - 1 @@ -795,10 +806,10 @@ end ## copy between abstract arrays - generally more efficient ## since a single index variable can be used. -copyto!(dest::AbstractArray, src::AbstractArray) = +copyto!(dest::ArrayLike, src::ArrayLike) = copyto!(IndexStyle(dest), dest, IndexStyle(src), src) -function copyto!(::IndexStyle, dest::AbstractArray, ::IndexStyle, src::AbstractArray) +function copyto!(::IndexStyle, dest::ArrayLike, ::IndexStyle, src::ArrayLike) destinds, srcinds = LinearIndices(dest), LinearIndices(src) isempty(srcinds) || (checkbounds(Bool, destinds, first(srcinds)) && checkbounds(Bool, destinds, last(srcinds))) || throw(BoundsError(dest, srcinds)) @@ -808,7 +819,7 @@ function copyto!(::IndexStyle, dest::AbstractArray, ::IndexStyle, src::AbstractA return dest end -function copyto!(::IndexStyle, dest::AbstractArray, ::IndexCartesian, src::AbstractArray) +function copyto!(::IndexStyle, dest::ArrayLike, ::IndexCartesian, src::ArrayLike) destinds, srcinds = LinearIndices(dest), LinearIndices(src) isempty(srcinds) || (checkbounds(Bool, destinds, first(srcinds)) && checkbounds(Bool, destinds, last(srcinds))) || throw(BoundsError(dest, srcinds)) @@ -819,18 +830,18 @@ function copyto!(::IndexStyle, dest::AbstractArray, ::IndexCartesian, src::Abstr return dest end -function copyto!(dest::AbstractArray, dstart::Integer, src::AbstractArray) +function copyto!(dest::ArrayLike, dstart::Integer, src::ArrayLike) copyto!(dest, dstart, src, first(LinearIndices(src)), length(src)) end -function copyto!(dest::AbstractArray, dstart::Integer, src::AbstractArray, sstart::Integer) +function copyto!(dest::ArrayLike, dstart::Integer, src::ArrayLike, sstart::Integer) srcinds = LinearIndices(src) checkbounds(Bool, srcinds, sstart) || throw(BoundsError(src, sstart)) copyto!(dest, dstart, src, sstart, last(srcinds)-sstart+1) end -function copyto!(dest::AbstractArray, dstart::Integer, - src::AbstractArray, sstart::Integer, +function copyto!(dest::ArrayLike, dstart::Integer, + src::ArrayLike, sstart::Integer, n::Integer) n == 0 && return dest n < 0 && throw(ArgumentError(string("tried to copy n=", n, " elements, but n should be nonnegative"))) @@ -843,7 +854,7 @@ function copyto!(dest::AbstractArray, dstart::Integer, return dest end -function copy(a::AbstractArray) +function copy(a::ArrayLike) @_propagate_inbounds_meta copymutable(a) end @@ -893,7 +904,7 @@ julia> Base.copymutable(tup) 3 ``` """ -function copymutable(a::AbstractArray) +function copymutable(a::ArrayLike) @_propagate_inbounds_meta copyto!(similar(a), a) end @@ -907,13 +918,13 @@ zero(x::AbstractArray{T}) where {T} = fill!(similar(x), zero(T)) # While the definitions for IndexLinear are all simple enough to inline on their # own, IndexCartesian's CartesianIndices is more complicated and requires explicit # inlining. -function iterate(A::AbstractArray, state=(eachindex(A),)) +function iterate(A::ArrayLike, state=(eachindex(A),)) y = iterate(state...) y === nothing && return nothing A[y[1]], (state[1], tail(y)...) end -isempty(a::AbstractArray) = (length(a) == 0) +isempty(a::ArrayLike) = (length(a) == 0) ## range conversions ## @@ -927,7 +938,7 @@ end ## unsafe/pointer conversions ## -# note: the following type definitions don't mean any AbstractArray is convertible to +# note: the following type definitions don't mean any ArrayLike is convertible to # a data Ref. they just map the array element type to the pointer type for # convenience in cases that work. pointer(x::AbstractArray{T}) where {T} = unsafe_convert(Ptr{T}, x) @@ -974,70 +985,70 @@ julia> getindex(A, 2:4) 4 ``` """ -function getindex(A::AbstractArray, I...) +function getindex(A::ArrayLike, I...) @_propagate_inbounds_meta error_if_canonical_getindex(IndexStyle(A), A, I...) _getindex(IndexStyle(A), A, to_indices(A, I)...) end -function unsafe_getindex(A::AbstractArray, I...) +function unsafe_getindex(A::ArrayLike, I...) @_inline_meta @inbounds r = getindex(A, I...) r end -error_if_canonical_getindex(::IndexLinear, A::AbstractArray, ::Int) = +error_if_canonical_getindex(::IndexLinear, A::ArrayLike, ::Int) = error("getindex not defined for ", typeof(A)) -error_if_canonical_getindex(::IndexCartesian, A::AbstractArray{T,N}, ::Vararg{Int,N}) where {T,N} = +error_if_canonical_getindex(::IndexCartesian, A::ArrayLike{N}, ::Vararg{Int,N}) where {N} = error("getindex not defined for ", typeof(A)) -error_if_canonical_getindex(::IndexStyle, ::AbstractArray, ::Any...) = nothing +error_if_canonical_getindex(::IndexStyle, ::ArrayLike, ::Any...) = nothing ## Internal definitions -_getindex(::IndexStyle, A::AbstractArray, I...) = +_getindex(::IndexStyle, A::ArrayLike, I...) = error("getindex for $(typeof(A)) with types $(typeof(I)) is not supported") ## IndexLinear Scalar indexing: canonical method is one Int -_getindex(::IndexLinear, A::AbstractArray, i::Int) = (@_propagate_inbounds_meta; getindex(A, i)) -function _getindex(::IndexLinear, A::AbstractArray, I::Vararg{Int,M}) where M +_getindex(::IndexLinear, A::ArrayLike, i::Int) = (@_propagate_inbounds_meta; getindex(A, i)) +function _getindex(::IndexLinear, A::ArrayLike, I::Vararg{Int,M}) where M @_inline_meta @boundscheck checkbounds(A, I...) # generally _to_linear_index requires bounds checking @inbounds r = getindex(A, _to_linear_index(A, I...)) r end -_to_linear_index(A::AbstractArray, i::Int) = i -_to_linear_index(A::AbstractVector, i::Int, I::Int...) = i -_to_linear_index(A::AbstractArray) = 1 -_to_linear_index(A::AbstractArray, I::Int...) = (@_inline_meta; _sub2ind(A, I...)) +_to_linear_index(A::ArrayLike, i::Int) = i +_to_linear_index(A::ArrayLike{1}, i::Int, I::Int...) = i +_to_linear_index(A::ArrayLike) = 1 +_to_linear_index(A::ArrayLike, I::Int...) = (@_inline_meta; _sub2ind(A, I...)) ## IndexCartesian Scalar indexing: Canonical method is full dimensionality of Ints -function _getindex(::IndexCartesian, A::AbstractArray, I::Vararg{Int,M}) where M +function _getindex(::IndexCartesian, A::ArrayLike, I::Vararg{Int,M}) where M @_inline_meta @boundscheck checkbounds(A, I...) # generally _to_subscript_indices requires bounds checking @inbounds r = getindex(A, _to_subscript_indices(A, I...)...) r end -function _getindex(::IndexCartesian, A::AbstractArray{T,N}, I::Vararg{Int, N}) where {T,N} +function _getindex(::IndexCartesian, A::ArrayLike{N}, I::Vararg{Int, N}) where {N} @_propagate_inbounds_meta getindex(A, I...) end -_to_subscript_indices(A::AbstractArray, i::Int) = (@_inline_meta; _unsafe_ind2sub(A, i)) -_to_subscript_indices(A::AbstractArray{T,N}) where {T,N} = (@_inline_meta; fill_to_length((), 1, Val(N))) -_to_subscript_indices(A::AbstractArray{T,0}) where {T} = () -_to_subscript_indices(A::AbstractArray{T,0}, i::Int) where {T} = () -_to_subscript_indices(A::AbstractArray{T,0}, I::Int...) where {T} = () -function _to_subscript_indices(A::AbstractArray{T,N}, I::Int...) where {T,N} +_to_subscript_indices(A::ArrayLike, i::Int) = (@_inline_meta; _unsafe_ind2sub(A, i)) +_to_subscript_indices(A::ArrayLike{N}) where {N} = (@_inline_meta; fill_to_length((), 1, Val(N))) +_to_subscript_indices(A::ArrayLike{0}) = () +_to_subscript_indices(A::ArrayLike{0}, i::Int) = () +_to_subscript_indices(A::ArrayLike{0}, I::Int...) = () +function _to_subscript_indices(A::ArrayLike{N}, I::Int...) where {N} @_inline_meta J, Jrem = IteratorsMD.split(I, Val(N)) _to_subscript_indices(A, J, Jrem) end -_to_subscript_indices(A::AbstractArray, J::Tuple, Jrem::Tuple{}) = +_to_subscript_indices(A::ArrayLike, J::Tuple, Jrem::Tuple{}) = __to_subscript_indices(A, axes(A), J, Jrem) -function __to_subscript_indices(A::AbstractArray, +function __to_subscript_indices(A::ArrayLike, ::Tuple{AbstractUnitRange,Vararg{AbstractUnitRange}}, J::Tuple, Jrem::Tuple{}) @_inline_meta (J..., map(first, tail(_remaining_size(J, axes(A))))...) end _to_subscript_indices(A, J::Tuple, Jrem::Tuple) = J # already bounds-checked, safe to drop -_to_subscript_indices(A::AbstractArray{T,N}, I::Vararg{Int,N}) where {T,N} = I +_to_subscript_indices(A::ArrayLike{N}, I::Vararg{Int,N}) where {N} = I _remaining_size(::Tuple{Any}, t::Tuple) = t _remaining_size(h::Tuple, t::Tuple) = (@_inline_meta; _remaining_size(tail(h), tail(t))) _unsafe_ind2sub(::Tuple{}, i) = () # _ind2sub may throw(BoundsError()) in this case @@ -1067,30 +1078,30 @@ julia> A 20.0 40.0 ``` """ -function setindex!(A::AbstractArray, v, I...) +function setindex!(A::ArrayLike, v, I...) @_propagate_inbounds_meta error_if_canonical_setindex(IndexStyle(A), A, I...) _setindex!(IndexStyle(A), A, v, to_indices(A, I)...) end -function unsafe_setindex!(A::AbstractArray, v, I...) +function unsafe_setindex!(A::ArrayLike, v, I...) @_inline_meta @inbounds r = setindex!(A, v, I...) r end -error_if_canonical_setindex(::IndexLinear, A::AbstractArray, ::Int) = +error_if_canonical_setindex(::IndexLinear, A::ArrayLike, ::Int) = error("setindex! not defined for ", typeof(A)) -error_if_canonical_setindex(::IndexCartesian, A::AbstractArray{T,N}, ::Vararg{Int,N}) where {T,N} = +error_if_canonical_setindex(::IndexCartesian, A::ArrayLike{N}, ::Vararg{Int,N}) where {N} = error("setindex! not defined for ", typeof(A)) -error_if_canonical_setindex(::IndexStyle, ::AbstractArray, ::Any...) = nothing +error_if_canonical_setindex(::IndexStyle, ::ArrayLike, ::Any...) = nothing ## Internal definitions -_setindex!(::IndexStyle, A::AbstractArray, v, I...) = +_setindex!(::IndexStyle, A::ArrayLike, v, I...) = error("setindex! for $(typeof(A)) with types $(typeof(I)) is not supported") ## IndexLinear Scalar indexing -_setindex!(::IndexLinear, A::AbstractArray, v, i::Int) = (@_propagate_inbounds_meta; setindex!(A, v, i)) -function _setindex!(::IndexLinear, A::AbstractArray, v, I::Vararg{Int,M}) where M +_setindex!(::IndexLinear, A::ArrayLike, v, i::Int) = (@_propagate_inbounds_meta; setindex!(A, v, i)) +function _setindex!(::IndexLinear, A::ArrayLike, v, I::Vararg{Int,M}) where M @_inline_meta @boundscheck checkbounds(A, I...) @inbounds r = setindex!(A, v, _to_linear_index(A, I...)) @@ -1098,11 +1109,11 @@ function _setindex!(::IndexLinear, A::AbstractArray, v, I::Vararg{Int,M}) where end # IndexCartesian Scalar indexing -function _setindex!(::IndexCartesian, A::AbstractArray{T,N}, v, I::Vararg{Int, N}) where {T,N} +function _setindex!(::IndexCartesian, A::ArrayLike{N}, v, I::Vararg{Int, N}) where {N} @_propagate_inbounds_meta setindex!(A, v, I...) end -function _setindex!(::IndexCartesian, A::AbstractArray, v, I::Vararg{Int,M}) where M +function _setindex!(::IndexCartesian, A::ArrayLike, v, I::Vararg{Int,M}) where M @_inline_meta @boundscheck checkbounds(A, I...) @inbounds r = setindex!(A, v, _to_subscript_indices(A, I...)...) @@ -1133,7 +1144,7 @@ julia> parent(V) 3 4 ``` """ -parent(a::AbstractArray) = a +parent(a::ArrayLike) = a ## rudimentary aliasing detection ## """ @@ -1151,7 +1162,7 @@ provide a [`Base.unaliascopy`](@ref) implementation. See also [`Base.mightalias`](@ref). """ -unalias(dest, A::AbstractArray) = mightalias(dest, A) ? unaliascopy(A) : A +unalias(dest, A::ArrayLike) = mightalias(dest, A) ? unaliascopy(A) : A unalias(dest, A::AbstractRange) = A unalias(dest, A) = A @@ -1163,12 +1174,12 @@ another array in order to preserve consistent semantics as that other array is m This must return an object of the same type as `A` to preserve optimal performance in the much more common case where aliasing does not occur. By default, -`unaliascopy(A::AbstractArray)` will attempt to use [`copy(A)`](@ref), but in cases where +`unaliascopy(A::ArrayLike)` will attempt to use [`copy(A)`](@ref), but in cases where `copy(A)` is not a `typeof(A)`, then the array should provide a custom implementation of `Base.unaliascopy(A)`. """ unaliascopy(A::Array) = copy(A) -unaliascopy(A::AbstractArray)::typeof(A) = (@_noinline_meta; _unaliascopy(A, copy(A))) +unaliascopy(A::ArrayLike)::typeof(A) = (@_noinline_meta; _unaliascopy(A, copy(A))) _unaliascopy(A::T, C::T) where {T} = C _unaliascopy(A, C) = throw(ArgumentError(""" an array of type `$(typeof(A).name)` shares memory with another argument and must @@ -1178,14 +1189,14 @@ _unaliascopy(A, C) = throw(ArgumentError(""" unaliascopy(A) = A """ - Base.mightalias(A::AbstractArray, B::AbstractArray) + Base.mightalias(A::ArrayLike, B::ArrayLike) Perform a conservative test to check if arrays `A` and `B` might share the same memory. By default, this simply checks if either of the arrays reference the same memory regions, as identified by their [`Base.dataids`](@ref). """ -mightalias(A::AbstractArray, B::AbstractArray) = !isbits(A) && !isbits(B) && !_isdisjoint(dataids(A), dataids(B)) +mightalias(A::ArrayLike, B::ArrayLike) = !isbits(A) && !isbits(B) && !_isdisjoint(dataids(A), dataids(B)) mightalias(x, y) = false _isdisjoint(as::Tuple{}, bs::Tuple{}) = true @@ -1199,7 +1210,7 @@ _isdisjoint(as::Tuple, bs::Tuple{UInt}) = !(bs[1] in as) _isdisjoint(as::Tuple, bs::Tuple) = !(as[1] in bs) && _isdisjoint(tail(as), bs) """ - Base.dataids(A::AbstractArray) + Base.dataids(A::ArrayLike) Return a tuple of `UInt`s that represent the mutable data segments of an array. @@ -1208,7 +1219,7 @@ parts can specialize this method to return the concatenation of the `dataids` of their component parts. A typical definition for an array that wraps a parent is `Base.dataids(C::CustomArray) = dataids(C.parent)`. """ -dataids(A::AbstractArray) = (UInt(objectid(A)),) +dataids(A::ArrayLike) = (UInt(objectid(A)),) dataids(A::Array) = (UInt(pointer(A)),) dataids(::AbstractRange) = () dataids(x) = () @@ -1218,11 +1229,11 @@ dataids(x) = () RangeVecIntList{A<:AbstractVector{Int}} = Union{Tuple{Vararg{Union{AbstractRange, AbstractVector{Int}}}}, AbstractVector{UnitRange{Int}}, AbstractVector{AbstractRange{Int}}, AbstractVector{A}} -get(A::AbstractArray, i::Integer, default) = checkbounds(Bool, A, i) ? A[i] : default -get(A::AbstractArray, I::Tuple{}, default) = checkbounds(Bool, A) ? A[] : default -get(A::AbstractArray, I::Dims, default) = checkbounds(Bool, A, I...) ? A[I...] : default +get(A::ArrayLike, i::Integer, default) = checkbounds(Bool, A, i) ? A[i] : default +get(A::ArrayLike, I::Tuple{}, default) = checkbounds(Bool, A) ? A[] : default +get(A::ArrayLike, I::Dims, default) = checkbounds(Bool, A, I...) ? A[I...] : default -function get!(X::AbstractVector{T}, A::AbstractVector, I::Union{AbstractRange,AbstractVector{Int}}, default::T) where T +function get!(X::AbstractVector{T}, A::ArrayLike{1}, I::Union{AbstractRange,AbstractVector{Int}}, default::T) where T # 1d is not linear indexing ind = findall(in(axes1(A)), I) X[ind] = A[I[ind]] @@ -1231,7 +1242,7 @@ function get!(X::AbstractVector{T}, A::AbstractVector, I::Union{AbstractRange,Ab X[last(ind)+1:last(Xind)] = default X end -function get!(X::AbstractArray{T}, A::AbstractArray, I::Union{AbstractRange,AbstractVector{Int}}, default::T) where T +function get!(X::AbstractArray{T}, A::ArrayLike, I::Union{AbstractRange,AbstractVector{Int}}, default::T) where T # Linear indexing ind = findall(in(1:length(A)), I) X[ind] = A[I[ind]] @@ -1240,25 +1251,25 @@ function get!(X::AbstractArray{T}, A::AbstractArray, I::Union{AbstractRange,Abst X end -get(A::AbstractArray, I::AbstractRange, default) = get!(similar(A, typeof(default), index_shape(I)), A, I, default) +get(A::ArrayLike, I::AbstractRange, default) = get!(similar(A, typeof(default), index_shape(I)), A, I, default) -function get!(X::AbstractArray{T}, A::AbstractArray, I::RangeVecIntList, default::T) where T +function get!(X::AbstractArray{T}, A::ArrayLike, I::RangeVecIntList, default::T) where T fill!(X, default) dst, src = indcopy(size(A), I) X[dst...] = A[src...] X end -get(A::AbstractArray, I::RangeVecIntList, default) = +get(A::ArrayLike, I::RangeVecIntList, default) = get!(similar(A, typeof(default), index_shape(I...)), A, I, default) ## structured matrix methods ## -replace_in_print_matrix(A::AbstractMatrix,i::Integer,j::Integer,s::AbstractString) = s -replace_in_print_matrix(A::AbstractVector,i::Integer,j::Integer,s::AbstractString) = s +replace_in_print_matrix(A::ArrayLike{2},i::Integer,j::Integer,s::AbstractString) = s +replace_in_print_matrix(A::ArrayLike{1},i::Integer,j::Integer,s::AbstractString) = s ## Concatenation ## eltypeof(x) = typeof(x) -eltypeof(x::AbstractArray) = eltype(x) +eltypeof(x::ArrayLike) = eltype(x) promote_eltypeof() = Bottom promote_eltypeof(v1, vs...) = promote_type(eltypeof(v1), promote_eltypeof(vs...)) @@ -1283,7 +1294,7 @@ hcat(X::Number...) = hvcat_fill(Matrix{promote_typeof(X...)}(undef, 1,length(X)) typed_vcat(::Type{T}, X::Number...) where {T} = hvcat_fill(Vector{T}(undef, length(X)), X) typed_hcat(::Type{T}, X::Number...) where {T} = hvcat_fill(Matrix{T}(undef, 1,length(X)), X) -vcat(V::AbstractVector...) = typed_vcat(promote_eltype(V...), V...) +vcat(V::ArrayLike{1}...) = typed_vcat(promote_eltype(V...), V...) vcat(V::AbstractVector{T}...) where {T} = typed_vcat(T, V...) # FIXME: this alias would better be Union{AbstractVector{T}, Tuple{Vararg{T}}} @@ -1291,7 +1302,7 @@ vcat(V::AbstractVector{T}...) where {T} = typed_vcat(T, V...) # but that solution currently fails (see #27188 and #27224) AbstractVecOrTuple{T} = Union{AbstractVector{<:T}, Tuple{Vararg{T}}} -function _typed_vcat(::Type{T}, V::AbstractVecOrTuple{AbstractVector}) where T +function _typed_vcat(::Type{T}, V::AbstractVecOrTuple{ArrayLike{1}}) where T n::Int = 0 for Vk in V n += length(Vk) @@ -1307,12 +1318,12 @@ function _typed_vcat(::Type{T}, V::AbstractVecOrTuple{AbstractVector}) where T a end -typed_hcat(::Type{T}, A::AbstractVecOrMat...) where {T} = _typed_hcat(T, A) +typed_hcat(::Type{T}, A::VectorOrMatrixLike...) where {T} = _typed_hcat(T, A) -hcat(A::AbstractVecOrMat...) = typed_hcat(promote_eltype(A...), A...) +hcat(A::VectorOrMatrixLike...) = typed_hcat(promote_eltype(A...), A...) hcat(A::AbstractVecOrMat{T}...) where {T} = typed_hcat(T, A...) -function _typed_hcat(::Type{T}, A::AbstractVecOrTuple{AbstractVecOrMat}) where T +function _typed_hcat(::Type{T}, A::AbstractVecOrTuple{VectorOrMatrixLike}) where T nargs = length(A) nrows = size(A[1], 1) ncols = 0 @@ -1338,7 +1349,7 @@ function _typed_hcat(::Type{T}, A::AbstractVecOrTuple{AbstractVecOrMat}) where T else for k=1:nargs Ak = A[k] - p1 = pos+(isa(Ak,AbstractMatrix) ? size(Ak, 2) : 1)-1 + p1 = pos+(isa(Ak,ArrayLike{2}) ? size(Ak, 2) : 1)-1 B[:, pos:p1] = Ak pos = p1+1 end @@ -1346,10 +1357,10 @@ function _typed_hcat(::Type{T}, A::AbstractVecOrTuple{AbstractVecOrMat}) where T return B end -vcat(A::AbstractVecOrMat...) = typed_vcat(promote_eltype(A...), A...) +vcat(A::VectorOrMatrixLike...) = typed_vcat(promote_eltype(A...), A...) vcat(A::AbstractVecOrMat{T}...) where {T} = typed_vcat(T, A...) -function _typed_vcat(::Type{T}, A::AbstractVecOrTuple{AbstractVecOrMat}) where T +function _typed_vcat(::Type{T}, A::AbstractVecOrTuple{VectorOrMatrixLike}) where T nargs = length(A) nrows = sum(a->size(a, 1), A)::Int ncols = size(A[1], 2) @@ -1369,27 +1380,27 @@ function _typed_vcat(::Type{T}, A::AbstractVecOrTuple{AbstractVecOrMat}) where T return B end -typed_vcat(::Type{T}, A::AbstractVecOrMat...) where {T} = _typed_vcat(T, A) +typed_vcat(::Type{T}, A::VectorOrMatrixLike...) where {T} = _typed_vcat(T, A) -reduce(::typeof(vcat), A::AbstractVector{<:AbstractVecOrMat}) = +reduce(::typeof(vcat), A::AbstractVector{<:VectorOrMatrixLike}) = _typed_vcat(mapreduce(eltype, promote_type, A), A) -reduce(::typeof(hcat), A::AbstractVector{<:AbstractVecOrMat}) = +reduce(::typeof(hcat), A::AbstractVector{<:VectorOrMatrixLike}) = _typed_hcat(mapreduce(eltype, promote_type, A), A) ## cat: general case # helper functions cat_size(A) = (1,) -cat_size(A::AbstractArray) = size(A) +cat_size(A::ArrayLike) = size(A) cat_size(A, d) = 1 -cat_size(A::AbstractArray, d) = size(A, d) +cat_size(A::ArrayLike, d) = size(A, d) cat_indices(A, d) = OneTo(1) -cat_indices(A::AbstractArray, d) = axes(A, d) +cat_indices(A::ArrayLike, d) = axes(A, d) cat_similar(A, T, shape) = Array{T}(undef, shape) -cat_similar(A::AbstractArray, T, shape) = similar(A, T, shape) +cat_similar(A::ArrayLike, T, shape) = similar(A, T, shape) cat_shape(dims, shape::Tuple) = shape @inline cat_shape(dims, shape::Tuple, nshape::Tuple, shapes::Tuple...) = @@ -1459,7 +1470,7 @@ function __cat(A, shape::NTuple{N}, catdims, X...) where N end end I::NTuple{N, UnitRange{Int}} = (inds...,) - if x isa AbstractArray + if x isa ArrayLike A[I...] = x else fill!(view(A, I...), x) @@ -1566,19 +1577,19 @@ _cat(catdims, A::AbstractArray{T}...) where {T} = cat_t(T, A...; dims=catdims) # The specializations for 1 and 2 inputs are important # especially when running with --inline=no, see #11158 -vcat(A::AbstractArray) = cat(A; dims=Val(1)) -vcat(A::AbstractArray, B::AbstractArray) = cat(A, B; dims=Val(1)) -vcat(A::AbstractArray...) = cat(A...; dims=Val(1)) -hcat(A::AbstractArray) = cat(A; dims=Val(2)) -hcat(A::AbstractArray, B::AbstractArray) = cat(A, B; dims=Val(2)) -hcat(A::AbstractArray...) = cat(A...; dims=Val(2)) - -typed_vcat(T::Type, A::AbstractArray) = cat_t(T, A; dims=Val(1)) -typed_vcat(T::Type, A::AbstractArray, B::AbstractArray) = cat_t(T, A, B; dims=Val(1)) -typed_vcat(T::Type, A::AbstractArray...) = cat_t(T, A...; dims=Val(1)) -typed_hcat(T::Type, A::AbstractArray) = cat_t(T, A; dims=Val(2)) -typed_hcat(T::Type, A::AbstractArray, B::AbstractArray) = cat_t(T, A, B; dims=Val(2)) -typed_hcat(T::Type, A::AbstractArray...) = cat_t(T, A...; dims=Val(2)) +vcat(A::ArrayLike) = cat(A; dims=Val(1)) +vcat(A::ArrayLike, B::ArrayLike) = cat(A, B; dims=Val(1)) +vcat(A::ArrayLike...) = cat(A...; dims=Val(1)) +hcat(A::ArrayLike) = cat(A; dims=Val(2)) +hcat(A::ArrayLike, B::ArrayLike) = cat(A, B; dims=Val(2)) +hcat(A::ArrayLike...) = cat(A...; dims=Val(2)) + +typed_vcat(T::Type, A::ArrayLike) = cat_t(T, A; dims=Val(1)) +typed_vcat(T::Type, A::ArrayLike, B::ArrayLike) = cat_t(T, A, B; dims=Val(1)) +typed_vcat(T::Type, A::ArrayLike...) = cat_t(T, A...; dims=Val(1)) +typed_hcat(T::Type, A::ArrayLike) = cat_t(T, A; dims=Val(2)) +typed_hcat(T::Type, A::ArrayLike, B::ArrayLike) = cat_t(T, A, B; dims=Val(2)) +typed_hcat(T::Type, A::ArrayLike...) = cat_t(T, A...; dims=Val(2)) # 2d horizontal and vertical concatenation @@ -1629,10 +1640,10 @@ julia> hvcat((2,2,2), a,b,c,d,e,f) If the first argument is a single integer `n`, then all block rows are assumed to have `n` block columns. """ -hvcat(rows::Tuple{Vararg{Int}}, xs::AbstractVecOrMat...) = typed_hvcat(promote_eltype(xs...), rows, xs...) +hvcat(rows::Tuple{Vararg{Int}}, xs::VectorOrMatrixLike...) = typed_hvcat(promote_eltype(xs...), rows, xs...) hvcat(rows::Tuple{Vararg{Int}}, xs::AbstractVecOrMat{T}...) where {T} = typed_hvcat(T, rows, xs...) -function typed_hvcat(::Type{T}, rows::Tuple{Vararg{Int}}, as::AbstractVecOrMat...) where T +function typed_hvcat(::Type{T}, rows::Tuple{Vararg{Int}}, as::VectorOrMatrixLike...) where T nbr = length(rows) # number of block rows nc = 0 @@ -1742,7 +1753,7 @@ end ## Reductions and accumulates ## -function isequal(A::AbstractArray, B::AbstractArray) +function isequal(A::ArrayLike, B::ArrayLike) if A === B return true end if axes(A) != axes(B) return false @@ -1755,7 +1766,7 @@ function isequal(A::AbstractArray, B::AbstractArray) return true end -function cmp(A::AbstractVector, B::AbstractVector) +function cmp(A::ArrayLike{1}, B::ArrayLike{1}) for (a, b) in zip(A, B) if !isequal(a, b) return isless(a, b) ? -1 : 1 @@ -1764,9 +1775,9 @@ function cmp(A::AbstractVector, B::AbstractVector) return cmp(length(A), length(B)) end -isless(A::AbstractVector, B::AbstractVector) = cmp(A, B) < 0 +isless(A::ArrayLike{1}, B::ArrayLike{1}) = cmp(A, B) < 0 -function (==)(A::AbstractArray, B::AbstractArray) +function (==)(A::ArrayLike, B::ArrayLike) if axes(A) != axes(B) return false end @@ -1784,12 +1795,12 @@ end # _sub2ind and _ind2sub # fallbacks -function _sub2ind(A::AbstractArray, I...) +function _sub2ind(A::ArrayLike, I...) @_inline_meta _sub2ind(axes(A), I...) end -function _ind2sub(A::AbstractArray, ind) +function _ind2sub(A::ArrayLike, ind) @_inline_meta _ind2sub(axes(A), ind) end @@ -1859,7 +1870,7 @@ _sub2ind(inds::Tuple{OneTo}, I1::AbstractVector{T}, I::AbstractVector{T}...) whe _sub2ind_vecs(inds, I1, I...) _sub2ind(inds::Union{DimsInteger,Indices}, I1::AbstractVector{T}, I::AbstractVector{T}...) where {T<:Integer} = _sub2ind_vecs(inds, I1, I...) -function _sub2ind_vecs(inds, I::AbstractVector...) +function _sub2ind_vecs(inds, I::ArrayLike{1}...) I1 = I[1] Iinds = axes1(I1) for j = 2:length(I) @@ -1968,11 +1979,11 @@ julia> mapslices(sum, a, dims = [1,2]) 58 ``` """ -function mapslices(f, A::AbstractArray; dims) +function mapslices(f, A::ArrayLike; dims) if isempty(dims) return map(f,A) end - if !isa(dims, AbstractVector) + if !isa(dims, ArrayLike{1}) dims = [dims...] end @@ -1996,12 +2007,12 @@ function mapslices(f, A::AbstractArray; dims) # any mutable containers. The following errs on the side of being overly # strict (#18570 & #21123). safe_for_reuse = isa(Aslice, StridedArray) && - (isa(r1, Number) || (isa(r1, AbstractArray) && eltype(r1) <: Number)) + (isa(r1, Number) || (isa(r1, ArrayLike) && eltype(r1) <: Number)) # determine result size and allocate Rsize = copy(dimsA) # TODO: maybe support removing dimensions - if !isa(r1, AbstractArray) || ndims(r1) == 0 + if !isa(r1, ArrayLike) || ndims(r1) == 0 # If the result of f on a single slice is a scalar then we add singleton # dimensions. When adding the dimensions, we have to respect the # index type of the input array (e.g. in the case of OffsetArrays) @@ -2056,11 +2067,11 @@ function replace_tuples!(nidx, idx, ridx, otherdims, I) end concatenate_setindex!(R, v, I...) = (R[I...] .= (v,); R) -concatenate_setindex!(R, X::AbstractArray, I...) = (R[I...] = X) +concatenate_setindex!(R, X::ArrayLike, I...) = (R[I...] = X) ## 1 argument -function map!(f::F, dest::AbstractArray, A::AbstractArray) where F +function map!(f::F, dest::ArrayLike, A::ArrayLike) where F for (i,j) in zip(eachindex(dest),eachindex(A)) val = f(@inbounds A[j]) @inbounds dest[i] = val @@ -2069,7 +2080,7 @@ function map!(f::F, dest::AbstractArray, A::AbstractArray) where F end # map on collections -map(f, A::AbstractArray) = collect_similar(A, Generator(f,A)) +map(f, A::ArrayLike) = collect_similar(A, Generator(f,A)) # default to returning an Array for `map` on general iterators """ @@ -2101,7 +2112,7 @@ map(f, ::AbstractDict) = error("map is not defined on dictionaries") map(f, ::AbstractSet) = error("map is not defined on sets") ## 2 argument -function map!(f::F, dest::AbstractArray, A::AbstractArray, B::AbstractArray) where F +function map!(f::F, dest::ArrayLike, A::ArrayLike, B::ArrayLike) where F for (i, j, k) in zip(eachindex(dest), eachindex(A), eachindex(B)) @inbounds a, b = A[j], B[k] val = f(a, b) @@ -2118,7 +2129,7 @@ function ith_all(i, as) return (as[1][i], ith_all(i, tail(as))...) end -function map_n!(f::F, dest::AbstractArray, As) where F +function map_n!(f::F, dest::ArrayLike, As) where F idxs1 = LinearIndices(As[1]) @boundscheck LinearIndices(dest) == idxs1 && all(x -> LinearIndices(x) == idxs1, As) for i = idxs1 @@ -2148,7 +2159,7 @@ julia> a 6.0 ``` """ -map!(f::F, dest::AbstractArray, As::AbstractArray...) where {F} = map_n!(f, dest, As) +map!(f::F, dest::ArrayLike, As::ArrayLike...) where {F} = map_n!(f, dest, As) map(f) = f() map(f, iters...) = collect(Generator(f, iters...)) @@ -2160,11 +2171,11 @@ push!(A, a, b, c...) = push!(push!(A, a, b), c...) pushfirst!(A, a, b) = pushfirst!(pushfirst!(A, b), a) pushfirst!(A, a, b, c...) = pushfirst!(pushfirst!(A, c...), a, b) -## hashing AbstractArray ## +## hashing ArrayLike ## -function hash(A::AbstractArray, h::UInt) - h = hash(AbstractArray, h) - # Axes are themselves AbstractArrays, so hashing them directly would stack overflow +function hash(A::ArrayLike, h::UInt) + h = hash(ArrayLike, h) + # Axes are themselves ArrayLikes, so hashing them directly would stack overflow # Instead hash the tuple of firsts and lasts along each dimension h = hash(map(first, axes(A)), h) h = hash(map(last, axes(A)), h) diff --git a/base/abstractarraymath.jl b/base/abstractarraymath.jl index d7d9550f82f5f..c1c505afffe38 100644 --- a/base/abstractarraymath.jl +++ b/base/abstractarraymath.jl @@ -2,17 +2,17 @@ ## Basic functions ## -isreal(x::AbstractArray) = all(isreal,x) -iszero(x::AbstractArray) = all(iszero,x) +isreal(x::ArrayLike) = all(isreal,x) +iszero(x::ArrayLike) = all(iszero,x) isreal(x::AbstractArray{<:Real}) = true ## Constructors ## """ - vec(a::AbstractArray) -> AbstractVector + vec(a::ArrayLike) -> ArrayLike{1} Reshape the array `a` as a one-dimensional column vector. Return `a` if it is -already an `AbstractVector`. The resulting array +already an `ArrayLike{1}`. The resulting array shares the same underlying data as `a`, so it will only be mutable if `a` is mutable, in which case modifying one will also modify the other. @@ -38,8 +38,8 @@ julia> vec(1:3) See also [`reshape`](@ref). """ -vec(a::AbstractArray) = reshape(a,length(a)) -vec(a::AbstractVector) = a +vec(a::ArrayLike) = reshape(a,length(a)) +vec(a::ArrayLike{1}) = a _sub(::Tuple{}, ::Tuple{}) = () _sub(t::Tuple, ::Tuple{}) = t @@ -68,7 +68,7 @@ julia> dropdims(a; dims=3) ``` """ dropdims(A; dims) = _dropdims(A, dims) -function _dropdims(A::AbstractArray, dims::Dims) +function _dropdims(A::ArrayLike, dims::Dims) for i in eachindex(dims) 1 <= dims[i] <= ndims(A) || throw(ArgumentError("dropped dims must be in range 1:ndims(A)")) length(axes(A, dims[i])) == 1 || throw(ArgumentError("dropped dims must all be size 1")) @@ -84,7 +84,7 @@ function _dropdims(A::AbstractArray, dims::Dims) end reshape(A, d::typeof(_sub(axes(A), dims))) end -_dropdims(A::AbstractArray, dim::Integer) = _dropdims(A, (Int(dim),)) +_dropdims(A::ArrayLike, dim::Integer) = _dropdims(A, (Int(dim),)) ## Unary operators ## @@ -119,7 +119,7 @@ julia> selectdim(A, 2, 3) 7 ``` """ -@inline selectdim(A::AbstractArray, d::Integer, i) = _selectdim(A, d, i, _setindex(i, d, map(Slice, axes(A))...)) +@inline selectdim(A::ArrayLike, d::Integer, i) = _selectdim(A, d, i, _setindex(i, d, map(Slice, axes(A))...)) @noinline function _selectdim(A, d, i, idxs) d >= 1 || throw(ArgumentError("dimension must be ≥ 1, got $d")) nd = ndims(A) @@ -145,7 +145,7 @@ julia> reverse(b, dims=2) 4 3 ``` """ -function reverse(A::AbstractArray; dims::Integer) +function reverse(A::ArrayLike; dims::Integer) nd = ndims(A); d = dims 1 ≤ d ≤ nd || throw(ArgumentError("dimension $d is not 1 ≤ $d ≤ $nd")) if isempty(A) @@ -177,10 +177,10 @@ function reverse(A::AbstractArray; dims::Integer) return B end -function circshift(a::AbstractArray, shiftamt::Real) +function circshift(a::ArrayLike, shiftamt::Real) circshift!(similar(a), a, (Integer(shiftamt),)) end -circshift(a::AbstractArray, shiftamt::DimsInteger) = circshift!(similar(a), a, shiftamt) +circshift(a::ArrayLike, shiftamt::DimsInteger) = circshift!(similar(a), a, shiftamt) """ circshift(A, shifts) @@ -238,14 +238,14 @@ julia> circshift(a, -1) See also [`circshift!`](@ref). """ -function circshift(a::AbstractArray, shiftamt) +function circshift(a::ArrayLike, shiftamt) circshift!(similar(a), a, map(Integer, (shiftamt...,))) end ## Other array functions ## """ - repeat(A::AbstractArray, counts::Integer...) + repeat(A::ArrayLike, counts::Integer...) Construct an array by repeating array `A` a given number of times in each dimension, specified by `counts`. @@ -270,9 +270,9 @@ julia> repeat([1, 2, 3], 2, 3) 3 3 3 ``` """ -repeat(a::AbstractArray, counts::Integer...) = repeat(a, outer = counts) +repeat(a::ArrayLike, counts::Integer...) = repeat(a, outer = counts) -function repeat(a::AbstractVecOrMat, m::Integer, n::Integer=1) +function repeat(a::VectorOrMatrixLike, m::Integer, n::Integer=1) o, p = size(a,1), size(a,2) b = similar(a, o*m, p*n) for j=1:n @@ -286,7 +286,7 @@ function repeat(a::AbstractVecOrMat, m::Integer, n::Integer=1) return b end -function repeat(a::AbstractVector, m::Integer) +function repeat(a::ArrayLike{1}, m::Integer) o = length(a) b = similar(a, o*m) for i=1:m @@ -297,7 +297,7 @@ function repeat(a::AbstractVector, m::Integer) end """ - repeat(A::AbstractArray; inner=ntuple(x->1, ndims(A)), outer=ntuple(x->1, ndims(A))) + repeat(A::ArrayLike; inner=ntuple(x->1, ndims(A)), outer=ntuple(x->1, ndims(A))) Construct an array by repeating the entries of `A`. The i-th element of `inner` specifies the number of times that the individual entries of the i-th dimension of `A` should be @@ -329,13 +329,13 @@ julia> repeat([1 2; 3 4], inner=(2, 1), outer=(1, 3)) 3 4 3 4 3 4 ``` """ -function repeat(A::AbstractArray; inner = nothing, outer = nothing) +function repeat(A::ArrayLike; inner = nothing, outer = nothing) return _repeat_inner_outer(A, inner, outer) end # we have optimized implementations of these cases above -_repeat_inner_outer(A::AbstractVecOrMat, ::Nothing, r::Union{Tuple{Integer},Tuple{Integer,Integer}}) = repeat(A, r...) -_repeat_inner_outer(A::AbstractVecOrMat, ::Nothing, r::Integer) = repeat(A, r) +_repeat_inner_outer(A::VectorOrMatrixLike, ::Nothing, r::Union{Tuple{Integer},Tuple{Integer,Integer}}) = repeat(A, r...) +_repeat_inner_outer(A::VectorOrMatrixLike, ::Nothing, r::Integer) = repeat(A, r) _repeat_inner_outer(A, ::Nothing, ::Nothing) = A _repeat_inner_outer(A, ::Nothing, outer) = _repeat(A, ntuple(n->1, Val(ndims(A))), rep_kw2tup(outer)) @@ -366,7 +366,7 @@ _rshps(shp, shp_i, sz, i, ::Tuple{}) = _reperr(s, n, N) = throw(ArgumentError("number of " * s * " repetitions " * "($n) cannot be less than number of dimensions of input ($N)")) -@noinline function _repeat(A::AbstractArray, inner, outer) +@noinline function _repeat(A::ArrayLike, inner, outer) shape, inner_shape = rep_shapes(A, inner, outer) R = similar(A, shape) @@ -408,7 +408,7 @@ _reperr(s, n, N) = throw(ArgumentError("number of " * s * " repetitions " * end """ - eachrow(A::AbstractVecOrMat) + eachrow(A::VectorOrMatrixLike) Create a generator that iterates over the first dimension of vector or matrix `A`, returning the rows as views. @@ -418,11 +418,11 @@ See also [`eachcol`](@ref) and [`eachslice`](@ref). !!! compat "Julia 1.1" This function requires at least Julia 1.1. """ -eachrow(A::AbstractVecOrMat) = (view(A, i, :) for i in axes(A, 1)) +eachrow(A::VectorOrMatrixLike) = (view(A, i, :) for i in axes(A, 1)) """ - eachcol(A::AbstractVecOrMat) + eachcol(A::VectorOrMatrixLike) Create a generator that iterates over the second dimension of matrix `A`, returning the columns as views. @@ -432,10 +432,10 @@ See also [`eachrow`](@ref) and [`eachslice`](@ref). !!! compat "Julia 1.1" This function requires at least Julia 1.1. """ -eachcol(A::AbstractVecOrMat) = (view(A, :, i) for i in axes(A, 2)) +eachcol(A::VectorOrMatrixLike) = (view(A, :, i) for i in axes(A, 2)) """ - eachslice(A::AbstractArray; dims) + eachslice(A::ArrayLike; dims) Create a generator that iterates over dimensions `dims` of `A`, returning views that select all the data from the other dimensions in `A`. @@ -448,7 +448,7 @@ See also [`eachrow`](@ref), [`eachcol`](@ref), and [`selectdim`](@ref). !!! compat "Julia 1.1" This function requires at least Julia 1.1. """ -@inline function eachslice(A::AbstractArray; dims) +@inline function eachslice(A::ArrayLike; dims) length(dims) == 1 || throw(ArgumentError("only single dimensions are supported")) dim = first(dims) dim <= ndims(A) || throw(DimensionMismatch("A doesn't have $dim dimensions")) diff --git a/base/abstractset.jl b/base/abstractset.jl index f7399fdcd27df..dcf536306d313 100644 --- a/base/abstractset.jl +++ b/base/abstractset.jl @@ -51,7 +51,7 @@ union(s::AbstractSet) = copy(s) const ∪ = union """ - union!(s::Union{AbstractSet,AbstractVector}, itrs...) + union!(s::Union{AbstractSet,ArrayLike{1}}, itrs...) Construct the union of passed in sets and overwrite `s` with the result. Maintain order with arrays. @@ -124,7 +124,7 @@ intersect(s::AbstractSet, itr) = mapfilter(_in(s), push!, itr, emptymutable(s)) const ∩ = intersect """ - intersect!(s::Union{AbstractSet,AbstractVector}, itrs...) + intersect!(s::Union{AbstractSet,ArrayLike{1}}, itrs...) Intersect all passed in sets and overwrite `s` with the result. Maintain order with arrays. @@ -215,7 +215,7 @@ symdiff(s, sets...) = symdiff!(emptymutable(s, promote_eltype(s, sets...)), s, s symdiff(s) = symdiff!(copy(s)) """ - symdiff!(s::Union{AbstractSet,AbstractVector}, itrs...) + symdiff!(s::Union{AbstractSet,ArrayLike{1}}, itrs...) Construct the symmetric difference of the passed in sets, and overwrite `s` with the result. When `s` is an array, the order is maintained. diff --git a/base/accumulate.jl b/base/accumulate.jl index c5c4e83b3b0e3..04078f8891195 100644 --- a/base/accumulate.jl +++ b/base/accumulate.jl @@ -4,7 +4,7 @@ # stable in certain situations (e.g. sums). # it does double the number of operations compared to accumulate, # though for cheap operations like + this does not have much impact (20%) -function _accumulate_pairwise!(op::Op, c::AbstractVector{T}, v::AbstractVector, s, i1, n)::T where {T,Op} +function _accumulate_pairwise!(op::Op, c::AbstractVector{T}, v::ArrayLike{1}, s, i1, n)::T where {T,Op} @inbounds if n < 128 s_ = v[i1] c[i1] = op(s, s_) @@ -20,7 +20,7 @@ function _accumulate_pairwise!(op::Op, c::AbstractVector{T}, v::AbstractVector, return s_ end -function accumulate_pairwise!(op::Op, result::AbstractVector, v::AbstractVector) where Op +function accumulate_pairwise!(op::Op, result::ArrayLike{1}, v::ArrayLike{1}) where Op li = LinearIndices(v) li != LinearIndices(result) && throw(DimensionMismatch("input and output array sizes and indices must match")) n = length(li) @@ -43,21 +43,21 @@ end Cumulative sum of `A` along the dimension `dims`, storing the result in `B`. See also [`cumsum`](@ref). """ -cumsum!(B::AbstractArray{T}, A; dims::Integer) where {T} = +cumsum!(B::ArrayLike, A; dims::Integer) = accumulate!(add_sum, B, A, dims=dims) -function cumsum!(out::AbstractArray, v::AbstractVector; dims::Integer=1) +function cumsum!(out::ArrayLike, v::ArrayLike{1}; dims::Integer=1) # we dispatch on the possibility of numerical stability issues _cumsum!(out, v, dims, ArithmeticStyle(eltype(out))) end -function _cumsum!(out::AbstractArray{T}, v, dim, ::ArithmeticRounds) where {T} +function _cumsum!(out::ArrayLike, v, dim, ::ArithmeticRounds) dim == 1 ? accumulate_pairwise!(add_sum, out, v) : copyto!(out, v) end -function _cumsum!(out::AbstractArray, v, dim, ::ArithmeticUnknown) +function _cumsum!(out::ArrayLike, v, dim, ::ArithmeticUnknown) _cumsum!(out, v, dim, ArithmeticRounds()) end -function _cumsum!(out::AbstractArray{T}, v, dim, ::ArithmeticStyle) where {T} +function _cumsum!(out::ArrayLike, v, dim, ::ArithmeticStyle) dim == 1 ? accumulate!(add_sum, out, v) : copyto!(out, v) end @@ -92,7 +92,7 @@ function cumsum(A::AbstractArray{T}; dims::Integer) where T end """ - cumsum(x::AbstractVector) + cumsum(x::ArrayLike{1}) Cumulative sum a vector. See also [`cumsum!`](@ref) to use a preallocated output array, both for performance and to control the precision of the @@ -113,7 +113,8 @@ julia> cumsum([fill(1, 2) for i in 1:3]) [3, 3] ``` """ -cumsum(x::AbstractVector) = cumsum(x, dims=1) +cumsum(x::ArrayLike{1}) = cumsum(x, dims=1) +cumsum(x::AbstractVector) = cumsum(x, dims = 1) # disambiguation """ @@ -122,16 +123,16 @@ cumsum(x::AbstractVector) = cumsum(x, dims=1) Cumulative product of `A` along the dimension `dims`, storing the result in `B`. See also [`cumprod`](@ref). """ -cumprod!(B::AbstractArray{T}, A; dims::Integer) where {T} = +cumprod!(B::ArrayLike, A; dims::Integer) = accumulate!(mul_prod, B, A, dims=dims) """ - cumprod!(y::AbstractVector, x::AbstractVector) + cumprod!(y::ArrayLike{1}, x::ArrayLike{1}) Cumulative product of a vector `x`, storing the result in `y`. See also [`cumprod`](@ref). """ -cumprod!(y::AbstractVector, x::AbstractVector) = cumprod!(y, x, dims=1) +cumprod!(y::ArrayLike{1}, x::ArrayLike{1}) = cumprod!(y, x, dims=1) """ cumprod(A; dims::Integer) @@ -158,12 +159,12 @@ julia> cumprod(a, dims=2) 4 20 120 ``` """ -function cumprod(A::AbstractArray; dims::Integer) +function cumprod(A::ArrayLike; dims::Integer) return accumulate(mul_prod, A, dims=dims) end """ - cumprod(x::AbstractVector) + cumprod(x::ArrayLike{1}) Cumulative product of a vector. See also [`cumprod!`](@ref) to use a preallocated output array, both for performance and @@ -184,7 +185,7 @@ julia> cumprod([fill(1//3, 2, 2) for i in 1:3]) [4//27 4//27; 4//27 4//27] ``` """ -cumprod(x::AbstractVector) = cumprod(x, dims=1) +cumprod(x::ArrayLike{1}) = cumprod(x, dims=1) """ @@ -304,13 +305,13 @@ function _accumulate!(op, B, A, dims::Nothing, init::Union{Nothing, Some}) throw(ArgumentError("Keyword argument dims must be provided for multidimensional arrays")) end -function _accumulate!(op, B, A::AbstractVector, dims::Nothing, init::Nothing) +function _accumulate!(op, B, A::ArrayLike{1}, dims::Nothing, init::Nothing) isempty(A) && return B v1 = reduce_first(op, first(A)) _accumulate1!(op, B, v1, A, 1) end -function _accumulate!(op, B, A::AbstractVector, dims::Nothing, init::Some) +function _accumulate!(op, B, A::ArrayLike{1}, dims::Nothing, init::Some) isempty(A) && return B v1 = op(something(init), first(A)) _accumulate1!(op, B, v1, A, 1) @@ -372,7 +373,7 @@ end B end -function _accumulate1!(op, B, v1, A::AbstractVector, dim::Integer) +function _accumulate1!(op, B, v1, A::ArrayLike{1}, dim::Integer) dim > 0 || throw(ArgumentError("dim must be a positive integer")) inds = LinearIndices(A) inds == LinearIndices(B) || throw(DimensionMismatch("LinearIndices of A and B don't match")) diff --git a/base/array.jl b/base/array.jl index cdffc3798bf5d..6e8c15a6d24f1 100644 --- a/base/array.jl +++ b/base/array.jl @@ -36,6 +36,7 @@ const AbstractMatrix{T} = AbstractArray{T,2} Union type of [`AbstractVector{T}`](@ref) and [`AbstractMatrix{T}`](@ref). """ const AbstractVecOrMat{T} = Union{AbstractVector{T}, AbstractMatrix{T}} +const VectorOrMatrixLike = Union{ArrayLike{1},ArrayLike{2}} const RangeIndex = Union{Int, AbstractRange{Int}, AbstractUnitRange{Int}} const DimOrInd = Union{Integer, AbstractUnitRange} const IntOrInd = Union{Int, AbstractUnitRange} @@ -513,7 +514,7 @@ for (fname, felt) in ((:zeros, :zero), (:ones, :one)) end end -function _one(unit::T, x::AbstractMatrix) where T +function _one(unit::T, x::ArrayLike{2}) where T require_one_based_indexing(x) m,n = size(x) m==n || throw(DimensionMismatch("multiplicative identity defined only for square matrices")) @@ -530,7 +531,7 @@ oneunit(x::AbstractMatrix{T}) where {T} = _one(oneunit(T), x) ## Conversions ## -convert(::Type{T}, a::AbstractArray) where {T<:Array} = a isa T ? a : T(a) +convert(::Type{T}, a::ArrayLike) where {T<:Array} = a isa T ? a : T(a) promote_rule(a::Type{Array{T,n}}, b::Type{Array{S,n}}) where {T,n,S} = el_same(promote_type(T,S), a, b) @@ -538,8 +539,8 @@ promote_rule(a::Type{Array{T,n}}, b::Type{Array{S,n}}) where {T,n,S} = el_same(p if nameof(@__MODULE__) === :Base # avoid method overwrite # constructors should make copies -Array{T,N}(x::AbstractArray{S,N}) where {T,N,S} = copyto!(Array{T,N}(undef, size(x)), x) -AbstractArray{T,N}(A::AbstractArray{S,N}) where {T,N,S} = copyto!(similar(A,T), A) +Array{T,N}(x::ArrayLike{N}) where {T,N} = copyto!(Array{T,N}(undef, size(x)), x) +AbstractArray{T,N}(A::ArrayLike{N}) where {T,N} = copyto!(similar(A,T), A) end ## copying iterators to containers @@ -572,10 +573,10 @@ function _collect(::Type{T}, itr, isz::SizeUnknown) where T end # make a collection similar to `c` and appropriate for collecting `itr` -_similar_for(c::AbstractArray, ::Type{T}, itr, ::SizeUnknown) where {T} = similar(c, T, 0) -_similar_for(c::AbstractArray, ::Type{T}, itr, ::HasLength) where {T} = +_similar_for(c::ArrayLike, ::Type{T}, itr, ::SizeUnknown) where {T} = similar(c, T, 0) +_similar_for(c::ArrayLike, ::Type{T}, itr, ::HasLength) where {T} = similar(c, T, Int(length(itr)::Integer)) -_similar_for(c::AbstractArray, ::Type{T}, itr, ::HasShape) where {T} = +_similar_for(c::ArrayLike, ::Type{T}, itr, ::HasShape) where {T} = similar(c, T, axes(itr)) _similar_for(c, ::Type{T}, itr, isz) where {T} = similar(c, T) @@ -602,7 +603,7 @@ julia> collect(1:2:13) """ collect(itr) = _collect(1:1 #= Array =#, itr, IteratorEltype(itr), IteratorSize(itr)) -collect(A::AbstractArray) = _collect_indices(axes(A), A) +collect(A::ArrayLike) = _collect_indices(axes(A), A) collect_similar(cont, itr) = _collect(cont, itr, IteratorEltype(itr), IteratorSize(itr)) @@ -683,7 +684,7 @@ function _collect(c, itr, ::EltypeUnknown, isz::Union{HasLength,HasShape}) collect_to_with_first!(_similar_for(c, typeof(v1), itr, isz), v1, itr, st) end -function collect_to_with_first!(dest::AbstractArray, v1, itr, st) +function collect_to_with_first!(dest::ArrayLike, v1, itr, st) i1 = first(LinearIndices(dest)) dest[i1] = v1 return collect_to!(dest, itr, i1+1, st) @@ -827,7 +828,7 @@ function setindex! end (@_inline_meta; arrayset($(Expr(:boundscheck)), A, convert(T,x)::T, i1, i2, I...)) # This is redundant with the abstract fallbacks but needed and helpful for bootstrap -function setindex!(A::Array, X::AbstractArray, I::AbstractVector{Int}) +function setindex!(A::Array, X::ArrayLike, I::AbstractVector{Int}) @_propagate_inbounds_meta @boundscheck setindex_shape_check(X, length(I)) require_one_based_indexing(X) @@ -948,7 +949,7 @@ Use [`push!`](@ref) to add individual items to `collection` which are not alread themselves in another collection. The result of the preceding example is equivalent to `push!([1, 2, 3], 4, 5, 6)`. """ -function append!(a::Vector, items::AbstractVector) +function append!(a::Vector, items::ArrayLike{1}) itemindices = eachindex(items) n = length(itemindices) _growend!(a, n) @@ -956,8 +957,8 @@ function append!(a::Vector, items::AbstractVector) return a end -append!(a::AbstractVector, iter) = _append!(a, IteratorSize(iter), iter) -push!(a::AbstractVector, iter...) = append!(a, iter) +append!(a::ArrayLike{1}, iter) = _append!(a, IteratorSize(iter), iter) +push!(a::ArrayLike{1}, iter...) = append!(a, iter) function _append!(a, ::Union{HasLength,HasShape}, iter) n = length(a) @@ -992,7 +993,7 @@ julia> prepend!([3],[1,2]) """ function prepend! end -function prepend!(a::Vector, items::AbstractVector) +function prepend!(a::Vector, items::ArrayLike{1}) itemindices = eachindex(items) n = length(itemindices) _growbeg!(a, n) @@ -1272,7 +1273,7 @@ Stacktrace: ``` """ deleteat!(a::Vector, inds) = _deleteat!(a, inds) -deleteat!(a::Vector, inds::AbstractVector) = _deleteat!(a, to_indices(a, (inds,))[1]) +deleteat!(a::Vector, inds::ArrayLike{1}) = _deleteat!(a, to_indices(a, (inds,))[1]) function _deleteat!(a::Vector, inds) n = length(a) @@ -1510,7 +1511,7 @@ julia> reverse(A, 3, 5) 3 ``` """ -function reverse(A::AbstractVector, s=first(LinearIndices(A)), n=last(LinearIndices(A))) +function reverse(A::ArrayLike{1}, s=first(LinearIndices(A)), n=last(LinearIndices(A))) B = similar(A) for i = first(LinearIndices(A)):s-1 B[i] = A[i] @@ -1525,9 +1526,9 @@ function reverse(A::AbstractVector, s=first(LinearIndices(A)), n=last(LinearIndi end # to resolve ambiguity with reverse(A; dims) -reverse(A::Vector) = invoke(reverse, Tuple{AbstractVector}, A) +reverse(A::Vector) = invoke(reverse, Tuple{ArrayLike{1}}, A) -function reverseind(a::AbstractVector, i::Integer) +function reverseind(a::ArrayLike{1}, i::Integer) li = LinearIndices(a) first(li) + last(li) - i end @@ -1558,7 +1559,7 @@ julia> A 1 ``` """ -function reverse!(v::AbstractVector, s=first(LinearIndices(v)), n=last(LinearIndices(v))) +function reverse!(v::ArrayLike{1}, s=first(LinearIndices(v)), n=last(LinearIndices(v))) liv = LinearIndices(v) if n <= s # empty case; ok elseif !(first(liv) ≤ s ≤ last(liv)) @@ -1697,7 +1698,7 @@ function findfirst(A) end # Needed for bootstrap, and allows defining only an optimized findnext method -findfirst(A::Union{AbstractArray, AbstractString}) = findnext(A, first(keys(A))) +findfirst(A::Union{ArrayLike, AbstractString}) = findnext(A, first(keys(A))) """ findnext(predicate::Function, A, i) @@ -1779,7 +1780,7 @@ function findfirst(testf::Function, A) end # Needed for bootstrap, and allows defining only an optimized findnext method -findfirst(testf::Function, A::Union{AbstractArray, AbstractString}) = +findfirst(testf::Function, A::Union{ArrayLike, AbstractString}) = findnext(testf, A, first(keys(A))) function findfirst(p::Union{Fix2{typeof(isequal),T},Fix2{typeof(==),T}}, r::StepRange{T,S}) where {T,S} @@ -1879,7 +1880,7 @@ function findlast(A) end # Needed for bootstrap, and allows defining only an optimized findprev method -findlast(A::Union{AbstractArray, AbstractString}) = findprev(A, last(keys(A))) +findlast(A::Union{ArrayLike, AbstractString}) = findprev(A, last(keys(A))) """ findprev(predicate::Function, A, i) @@ -1966,7 +1967,7 @@ function findlast(testf::Function, A) end # Needed for bootstrap, and allows defining only an optimized findprev method -findlast(testf::Function, A::Union{AbstractArray, AbstractString}) = +findlast(testf::Function, A::Union{ArrayLike, AbstractString}) = findprev(testf, A, last(keys(A))) """ @@ -2245,7 +2246,7 @@ julia> indexin(b, a) 3 ``` """ -function indexin(a, b::AbstractArray) +function indexin(a, b::ArrayLike) inds = keys(b) bdict = Dict{eltype(b),eltype(inds)}() for (val, ind) in zip(b, inds) @@ -2256,7 +2257,7 @@ function indexin(a, b::AbstractArray) ] end -function _findin(a::Union{AbstractArray, Tuple}, b) +function _findin(a::Union{ArrayLike, Tuple}, b) ind = Vector{eltype(keys(a))}() bset = Set(b) @inbounds for (i,ai) in pairs(a) @@ -2268,7 +2269,7 @@ end # If two collections are already sorted, _findin can be computed with # a single traversal of the two collections. This is much faster than # using a hash table (although it has the same complexity). -function _sortedfindin(v::Union{AbstractArray, Tuple}, w) +function _sortedfindin(v::Union{ArrayLike, Tuple}, w) viter, witer = keys(v), eachindex(w) out = eltype(viter)[] vy, wy = iterate(viter), iterate(witer) @@ -2319,7 +2320,7 @@ function findall(pred::Fix2{typeof(in),<:Union{Array{<:Real},Real}}, x::Array{<: end # issorted fails for some element types so the method above has to be restricted # to element with isless/< defined. -findall(pred::Fix2{typeof(in)}, x::Union{AbstractArray, Tuple}) = _findin(x, pred.x) +findall(pred::Fix2{typeof(in)}, x::Union{ArrayLike, Tuple}) = _findin(x, pred.x) # Copying subregions function indcopy(sz::Dims, I::Vector) @@ -2347,7 +2348,7 @@ end ## Filter ## """ - filter(f, a::AbstractArray) + filter(f, a::ArrayLike) Return a copy of `a`, removing elements for which `f` is `false`. The function `f` is passed one argument. @@ -2378,7 +2379,7 @@ function filter(f, a::Array{T, N}) where {T, N} b end -function filter(f, a::AbstractArray) +function filter(f, a::ArrayLike) (IndexStyle(a) != IndexLinear()) && return a[map(f, a)::AbstractArray{Bool}] j = 1 @@ -2396,7 +2397,7 @@ function filter(f, a::AbstractArray) end """ - filter!(f, a::AbstractVector) + filter!(f, a::ArrayLike{1}) Update `a`, removing elements for which `f` is `false`. The function `f` is passed one argument. @@ -2412,7 +2413,7 @@ julia> filter!(isodd, Vector(1:10)) 9 ``` """ -function filter!(f, a::AbstractVector) +function filter!(f, a::ArrayLike{1}) j = firstindex(a) for ai in a @inbounds a[j] = ai @@ -2443,7 +2444,7 @@ end _grow_filter!(seen) = _unique_filter!(∉, push!, seen) _shrink_filter!(keep) = _unique_filter!(∈, pop!, keep) -function _grow!(pred!, v::AbstractVector, itrs) +function _grow!(pred!, v::ArrayLike{1}, itrs) filter!(pred!, v) # uniquify v for itr in itrs mapfilter(pred!, push!, itr, v) @@ -2457,17 +2458,17 @@ union!(v::AbstractVector{T}, itrs...) where {T} = symdiff!(v::AbstractVector{T}, itrs...) where {T} = _grow!(_shrink_filter!(symdiff!(Set{T}(), v, itrs...)), v, itrs) -function _shrink!(shrinker!, v::AbstractVector, itrs) +function _shrink!(shrinker!, v::ArrayLike{1}, itrs) seen = Set{eltype(v)}() filter!(_grow_filter!(seen), v) shrinker!(seen, itrs...) filter!(_in(seen), v) end -intersect!(v::AbstractVector, itrs...) = _shrink!(intersect!, v, itrs) -setdiff!( v::AbstractVector, itrs...) = _shrink!(setdiff!, v, itrs) +intersect!(v::ArrayLike{1}, itrs...) = _shrink!(intersect!, v, itrs) +setdiff!( v::ArrayLike{1}, itrs...) = _shrink!(setdiff!, v, itrs) -vectorfilter(f, v::AbstractVector) = filter(f, v) # TODO: do we want this special case? +vectorfilter(f, v::ArrayLike{1}) = filter(f, v) # TODO: do we want this special case? vectorfilter(f, v) = [x for x in v if f(x)] function _shrink(shrinker!, itr, itrs) diff --git a/base/arraymath.jl b/base/arraymath.jl index dfea81dd9d35f..da23a212996de 100644 --- a/base/arraymath.jl +++ b/base/arraymath.jl @@ -27,14 +27,14 @@ julia> A conj!(A::AbstractArray{<:Number}) = (@inbounds broadcast!(conj, A, A); A) for f in (:-, :conj, :real, :imag) - @eval ($f)(A::AbstractArray) = broadcast_preserving_zero_d($f, A) + @eval ($f)(A::ArrayLike) = broadcast_preserving_zero_d($f, A) end ## Binary arithmetic operators ## for f in (:+, :-) - @eval function ($f)(A::AbstractArray, B::AbstractArray) + @eval function ($f)(A::ArrayLike, B::ArrayLike) promote_shape(A, B) # check size compatibility broadcast_preserving_zero_d($f, A, B) end @@ -49,10 +49,10 @@ end for f in (:/, :\, :*) if f !== :/ - @eval ($f)(A::Number, B::AbstractArray) = broadcast_preserving_zero_d($f, A, B) + @eval ($f)(A::Number, B::ArrayLike) = broadcast_preserving_zero_d($f, A, B) end if f !== :\ - @eval ($f)(A::AbstractArray, B::Number) = broadcast_preserving_zero_d($f, A, B) + @eval ($f)(A::ArrayLike, B::Number) = broadcast_preserving_zero_d($f, A, B) end end @@ -137,7 +137,7 @@ julia> rotl90(a) 1 3 ``` """ -function rotl90(A::AbstractMatrix) +function rotl90(A::ArrayLike{2}) ind1, ind2 = axes(A) B = similar(A, (ind2,ind1)) n = first(ind2)+last(ind2) @@ -165,7 +165,7 @@ julia> rotr90(a) 4 2 ``` """ -function rotr90(A::AbstractMatrix) +function rotr90(A::ArrayLike{2}) ind1, ind2 = axes(A) B = similar(A, (ind2,ind1)) m = first(ind1)+last(ind1) @@ -192,7 +192,7 @@ julia> rot180(a) 2 1 ``` """ -function rot180(A::AbstractMatrix) +function rot180(A::ArrayLike{2}) B = similar(A) ind1, ind2 = axes(A,1), axes(A,2) m, n = first(ind1)+last(ind1), first(ind2)+last(ind2) @@ -235,7 +235,7 @@ julia> rotl90(a,4) 3 4 ``` """ -function rotl90(A::AbstractMatrix, k::Integer) +function rotl90(A::ArrayLike{2}, k::Integer) k = mod(k, 4) k == 1 ? rotl90(A) : k == 2 ? rot180(A) : @@ -275,7 +275,7 @@ julia> rotr90(a,4) 3 4 ``` """ -rotr90(A::AbstractMatrix, k::Integer) = rotl90(A,-k) +rotr90(A::ArrayLike{2}, k::Integer) = rotl90(A,-k) """ rot180(A, k) @@ -300,4 +300,4 @@ julia> rot180(a,2) 3 4 ``` """ -rot180(A::AbstractMatrix, k::Integer) = mod(k, 2) == 1 ? rot180(A) : copy(A) +rot180(A::ArrayLike{2}, k::Integer) = mod(k, 2) == 1 ? rot180(A) : copy(A) diff --git a/base/arrayshow.jl b/base/arrayshow.jl index 9c7ecf564b44d..d543bc91657e7 100644 --- a/base/arrayshow.jl +++ b/base/arrayshow.jl @@ -57,8 +57,8 @@ Parameter `sep::Integer` is number of spaces to put between elements. Alignment is reported as a vector of (left,right) tuples, one for each column going across the screen. """ -function alignment(io::IO, X::AbstractVecOrMat, - rows::AbstractVector, cols::AbstractVector, +function alignment(io::IO, X::VectorOrMatrixLike, + rows::ArrayLike{1}, cols::ArrayLike{1}, cols_if_complete::Integer, cols_otherwise::Integer, sep::Integer) a = Tuple{Int, Int}[] for j in cols # need to go down each column one at a time @@ -94,8 +94,8 @@ is specified as string sep. `print_matrix_row` will also respect compact output for elements. """ function print_matrix_row(io::IO, - X::AbstractVecOrMat, A::Vector, - i::Integer, cols::AbstractVector, sep::AbstractString) + X::VectorOrMatrixLike, A::Vector, + i::Integer, cols::ArrayLike{1}, sep::AbstractString) for (k, j) = enumerate(cols) k > length(A) && break if isassigned(X,Int(i),Int(j)) # isassigned accepts only `Int` indices @@ -151,7 +151,7 @@ string post (printed at the end of the last row of the matrix). Also options to use different ellipsis characters hdots, vdots, ddots. These are repeated every hmod or vmod elements. """ -function print_matrix(io::IO, X::AbstractVecOrMat, +function print_matrix(io::IO, X::VectorOrMatrixLike, pre::AbstractString = " ", # pre-matrix string sep::AbstractString = " ", # separator between elements post::AbstractString = "", # post-matrix string @@ -256,7 +256,7 @@ end # typeinfo agnostic # n-dimensional arrays -function show_nd(io::IO, a::AbstractArray, print_matrix::Function, label_slices::Bool) +function show_nd(io::IO, a::ArrayLike, print_matrix::Function, label_slices::Bool) limit::Bool = get(io, :limit, false) if isempty(a) return @@ -303,16 +303,16 @@ end # print_array: main helper functions for show(io, text/plain, array) # typeinfo agnostic # Note that this is for showing the content inside the array, and for `MIME"text/plain". -# There are `show(::IO, ::A) where A<:AbstractArray` methods that don't use this +# There are `show(::IO, ::A) where A<:ArrayLike` methods that don't use this # e.g. show_vector, show_zero_dim -print_array(io::IO, X::AbstractArray{<:Any, 0}) = +print_array(io::IO, X::ArrayLike{0}) = isassigned(X) ? show(io, X[]) : print(io, undef_ref_str) -print_array(io::IO, X::AbstractVecOrMat) = print_matrix(io, X) -print_array(io::IO, X::AbstractArray) = show_nd(io, X, print_matrix, true) +print_array(io::IO, X::VectorOrMatrixLike) = print_matrix(io, X) +print_array(io::IO, X::ArrayLike) = show_nd(io, X, print_matrix, true) # typeinfo aware -# implements: show(io::IO, ::MIME"text/plain", X::AbstractArray) -function show(io::IO, ::MIME"text/plain", X::AbstractArray) +# implements: show(io::IO, ::MIME"text/plain", X::ArrayLike) +function show(io::IO, ::MIME"text/plain", X::ArrayLike) # 0) show summary before setting :compact summary(io, X) isempty(X) && return @@ -354,10 +354,10 @@ end # typeinfo agnostic """ -`_show_nonempty(io, X::AbstractMatrix, prefix)` prints matrix X with opening and closing square brackets, +`_show_nonempty(io, X::ArrayLike{2}, prefix)` prints matrix X with opening and closing square brackets, preceded by `prefix`, supposed to encode the type of the elements. """ -function _show_nonempty(io::IO, X::AbstractMatrix, prefix::String) +function _show_nonempty(io::IO, X::ArrayLike{2}, prefix::String) @assert !isempty(X) limit = get(io, :limit, false)::Bool indr, indc = axes(X,1), axes(X,2) @@ -401,21 +401,21 @@ function _show_nonempty(io::IO, X::AbstractMatrix, prefix::String) end -_show_nonempty(io::IO, X::AbstractArray, prefix::String) = +_show_nonempty(io::IO, X::ArrayLike, prefix::String) = show_nd(io, X, (io, slice) -> _show_nonempty(io, slice, prefix), false) # a specific call path is used to show vectors (show_vector) -_show_nonempty(::IO, ::AbstractVector, ::String) = - error("_show_nonempty(::IO, ::AbstractVector, ::String) is not implemented") +_show_nonempty(::IO, ::ArrayLike{1}, ::String) = + error("_show_nonempty(::IO, ::ArrayLike{1}, ::String) is not implemented") -_show_nonempty(io::IO, X::AbstractArray{T,0} where T, prefix::String) = print_array(io, X) +_show_nonempty(io::IO, X::ArrayLike{0}, prefix::String) = print_array(io, X) # NOTE: it's not clear how this method could use the :typeinfo attribute _show_empty(io::IO, X::Array{T}) where {T} = print(io, "Array{", T, "}(undef,", join(size(X),','), ')') _show_empty(io, X) = nothing # by default, we don't know this constructor # typeinfo aware (necessarily) -function show(io::IO, X::AbstractArray) +function show(io::IO, X::ArrayLike) ndims(X) == 0 && return show_zero_dim(io, X) ndims(X) == 1 && return show_vector(io, X) prefix = typeinfo_prefix(io, X) @@ -468,7 +468,7 @@ end # returning Any, as this would cause incorrect printing in e.g. `Vector[Any[1]]`, # because eltype(Vector) == Any so `Any` wouldn't be printed in `Any[1]`) typeinfo_eltype(typeinfo) = nothing # element type not precisely known -typeinfo_eltype(typeinfo::Type{<:AbstractArray{T}}) where {T} = eltype(typeinfo) +typeinfo_eltype(typeinfo::Type{<:AbstractArray{T}}) where {T} = eltype(typeinfo) # specific typeinfo_eltype(typeinfo::Type{<:AbstractDict{K,V}}) where {K,V} = eltype(typeinfo) typeinfo_eltype(typeinfo::Type{<:AbstractSet{T}}) where {T} = eltype(typeinfo) diff --git a/base/bitarray.jl b/base/bitarray.jl index 7c98256bc2017..77e376c574424 100644 --- a/base/bitarray.jl +++ b/base/bitarray.jl @@ -493,8 +493,8 @@ function Array{T,N}(B::BitArray{N}) where {T,N} return A end -BitArray(A::AbstractArray{<:Any,N}) where {N} = BitArray{N}(A) -function BitArray{N}(A::AbstractArray{T,N}) where N where T +BitArray(A::ArrayLike{N}) where {N} = BitArray{N}(A) +function BitArray{N}(A::ArrayLike{N}) where N B = BitArray(undef, size(A)) Bc = B.chunks l = length(B) @@ -566,7 +566,7 @@ julia> BitArray(x+y == 3 for x = 1:2 for y = 1:3) """ BitArray(itr) = gen_bitarray(IteratorSize(itr), itr) -convert(T::Type{<:BitArray}, a::AbstractArray) = a isa T ? a : T(a) +convert(T::Type{<:BitArray}, a::ArrayLike) = a isa T ? a : T(a) # generic constructor from an iterable without compile-time info # (we pass start(itr) explicitly to avoid a type-instability with filters) @@ -687,17 +687,17 @@ end indexoffset(i) = first(i)-1 indexoffset(::Colon) = 0 -@propagate_inbounds function setindex!(B::BitArray, X::AbstractArray, J0::Union{Colon,UnitRange{Int}}) +@propagate_inbounds function setindex!(B::BitArray, X::ArrayLike, J0::Union{Colon,UnitRange{Int}}) _setindex!(IndexStyle(B), B, X, to_indices(B, (J0,))[1]) end # Assigning an array of bools is more complicated, but we can still do some # work on chunks by combining X and I 64 bits at a time to improve perf by ~40% -@inline function setindex!(B::BitArray, X::AbstractArray, I::BitArray) +@inline function setindex!(B::BitArray, X::ArrayLike, I::BitArray) @boundscheck checkbounds(B, I) _unsafe_setindex!(B, X, I) end -function _unsafe_setindex!(B::BitArray, X::AbstractArray, I::BitArray) +function _unsafe_setindex!(B::BitArray, X::ArrayLike, I::BitArray) Bc = B.chunks Ic = I.chunks length(Bc) == length(Ic) || throw_boundserror(B, I) @@ -1010,7 +1010,7 @@ end const _default_bit_splice = BitVector() -function splice!(B::BitVector, r::Union{UnitRange{Int}, Integer}, ins::AbstractArray = _default_bit_splice) +function splice!(B::BitVector, r::Union{UnitRange{Int}, Integer}, ins::ArrayLike = _default_bit_splice) n = length(B) i_f = first(r) i_l = last(r) @@ -1700,7 +1700,7 @@ for (T, f) in ((:(Union{typeof(&), typeof(*), typeof(min)}), :(&)), end # If we were able to specialize the function to a known bitwise operation, -# map across the chunks. Otherwise, fall-back to the AbstractArray method that +# map across the chunks. Otherwise, fall-back to the ArrayLike method that # iterates bit-by-bit. function bit_map!(f::F, dest::BitArray, A::BitArray) where F size(A) == size(dest) || throw(DimensionMismatch("sizes of dest and A must match")) diff --git a/base/boot.jl b/base/boot.jl index 2fdfc2e8cacb9..564f1093a835b 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -49,7 +49,8 @@ #end #const nothing = Nothing() -#abstract type AbstractArray{T,N} end +#abstract type ArrayLike{N} end +#abstract type AbstractArray{T,N} <: ArrayLike{N} end #abstract type DenseArray{T,N} <: AbstractArray{T,N} end #mutable struct Array{T,N} <: DenseArray{T,N} @@ -146,7 +147,7 @@ export # key types Any, DataType, Vararg, NTuple, Tuple, Type, UnionAll, TypeVar, Union, Nothing, Cvoid, - AbstractArray, DenseArray, NamedTuple, + ArrayLike, AbstractArray, DenseArray, NamedTuple, # special objects Function, Method, Module, Symbol, Task, Array, UndefInitializer, undef, WeakRef, VecElement, @@ -427,9 +428,9 @@ Array{T,1}() where {T} = Array{T,1}(undef, 0) (::Type{Array{T,N} where T})(x::AbstractArray{S,N}) where {S,N} = Array{S,N}(x) Array(A::AbstractArray{T,N}) where {T,N} = Array{T,N}(A) -Array{T}(A::AbstractArray{S,N}) where {T,N,S} = Array{T,N}(A) +Array{T}(A::ArrayLike{N}) where {T,N} = Array{T,N}(A) -AbstractArray{T}(A::AbstractArray{S,N}) where {T,S,N} = AbstractArray{T,N}(A) +AbstractArray{T}(A::ArrayLike{N}) where {T,N} = AbstractArray{T,N}(A) # primitive Symbol constructors eval(Core, :(function Symbol(s::String) diff --git a/base/broadcast.jl b/base/broadcast.jl index e68253c03ec24..a8890faae2467 100644 --- a/base/broadcast.jl +++ b/base/broadcast.jl @@ -50,14 +50,14 @@ BroadcastStyle(::Type{Union{}}) = Unknown() # ambiguity resolution """ `Broadcast.AbstractArrayStyle{N} <: BroadcastStyle` is the abstract supertype for any style -associated with an `AbstractArray` type. -The `N` parameter is the dimensionality, which can be handy for AbstractArray types +associated with an `ArrayLike` type. +The `N` parameter is the dimensionality, which can be handy for ArrayLike types that only support specific dimensionalities: struct SparseMatrixStyle <: Broadcast.AbstractArrayStyle{2} end Base.BroadcastStyle(::Type{<:SparseMatrixCSC}) = SparseMatrixStyle() -For `AbstractArray` types that support arbitrary dimensionality, `N` can be set to `Any`: +For `ArrayLike` types that support arbitrary dimensionality, `N` can be set to `Any`: struct MyArrayStyle <: Broadcast.AbstractArrayStyle{Any} end Base.BroadcastStyle(::Type{<:MyArray}) = MyArrayStyle() @@ -79,18 +79,18 @@ abstract type AbstractArrayStyle{N} <: BroadcastStyle end """ `Broadcast.ArrayStyle{MyArrayType}()` is a [`BroadcastStyle`](@ref) indicating that an object behaves as an array for broadcasting. It presents a simple way to construct -[`Broadcast.AbstractArrayStyle`](@ref)s for specific `AbstractArray` container types. +[`Broadcast.AbstractArrayStyle`](@ref)s for specific `ArrayLike` container types. Broadcast styles created this way lose track of dimensionality; if keeping track is important for your type, you should create your own custom [`Broadcast.AbstractArrayStyle`](@ref). """ -struct ArrayStyle{A<:AbstractArray} <: AbstractArrayStyle{Any} end +struct ArrayStyle{A<:ArrayLike} <: AbstractArrayStyle{Any} end ArrayStyle{A}(::Val) where A = ArrayStyle{A}() """ `Broadcast.DefaultArrayStyle{N}()` is a [`BroadcastStyle`](@ref) indicating that an object behaves as an `N`-dimensional array for broadcasting. Specifically, `DefaultArrayStyle` is used for any -`AbstractArray` type that hasn't defined a specialized style, and in the absence of +`ArrayLike` type that hasn't defined a specialized style, and in the absence of overrides from other `broadcast` arguments the resulting output type is `Array`. When there are multiple inputs to `broadcast`, `DefaultArrayStyle` "loses" to any other [`Broadcast.ArrayStyle`](@ref). """ @@ -99,7 +99,7 @@ DefaultArrayStyle(::Val{N}) where N = DefaultArrayStyle{N}() DefaultArrayStyle{M}(::Val{N}) where {N,M} = DefaultArrayStyle{N}() const DefaultVectorStyle = DefaultArrayStyle{1} const DefaultMatrixStyle = DefaultArrayStyle{2} -BroadcastStyle(::Type{<:AbstractArray{T,N}}) where {T,N} = DefaultArrayStyle{N}() +BroadcastStyle(::Type{<:ArrayLike{N}}) where {N} = DefaultArrayStyle{N}() BroadcastStyle(::Type{T}) where {T} = DefaultArrayStyle{ndims(T)}() # `ArrayConflict` is an internal type signaling that two or more different `AbstractArrayStyle` @@ -164,7 +164,7 @@ BroadcastStyle(a::AbstractArrayStyle{M}, ::DefaultArrayStyle{N}) where {M,N} = # copyto!(dest::DestType, bc::Broadcasted{Nothing}) # that specialize on `DestType` to be easily disambiguated from # methods that instead specialize on `BroadcastStyle`, -# copyto!(dest::AbstractArray, bc::Broadcasted{MyStyle}) +# copyto!(dest::ArrayLike, bc::Broadcasted{MyStyle}) struct Broadcasted{Style<:Union{Nothing,BroadcastStyle}, Axes, F, Args<:Tuple} f::F @@ -439,8 +439,8 @@ result_style(s1, s2) = result_join(s1, s2, BroadcastStyle(s1, s2), BroadcastStyl result_join(::Any, ::Any, ::Unknown, ::Unknown) = Unknown() result_join(::Any, ::Any, ::Unknown, s::BroadcastStyle) = s result_join(::Any, ::Any, s::BroadcastStyle, ::Unknown) = s -# For AbstractArray types with specialized broadcasting and undefined precedence rules, -# we have to signal conflict. Because ArrayConflict is a subtype of AbstractArray, +# For ArrayLike types with specialized broadcasting and undefined precedence rules, +# we have to signal conflict. Because ArrayConflict is a subtype of ArrayLike, # this will "poison" any future operations (if we instead returned `DefaultArrayStyle`, then for # 3-array broadcasting the returned type would depend on argument order). result_join(::AbstractArrayStyle, ::AbstractArrayStyle, ::Unknown, ::Unknown) = @@ -575,7 +575,7 @@ Base.@propagate_inbounds Base.getindex(bc::Broadcasted) = bc[CartesianIndex(())] Index into `A` with `I`, collapsing broadcasted indices to their singleton indices as appropriate. """ -Base.@propagate_inbounds _broadcast_getindex(A::Union{Ref,AbstractArray{<:Any,0},Number}, I) = A[] # Scalar-likes can just ignore all indices +Base.@propagate_inbounds _broadcast_getindex(A::Union{Ref,ArrayLike{0},Number}, I) = A[] # Scalar-likes can just ignore all indices Base.@propagate_inbounds _broadcast_getindex(::Ref{Type{T}}, I) where {T} = T # Tuples are statically known to be singleton or vector-like Base.@propagate_inbounds _broadcast_getindex(A::Tuple{Any}, I) = A[1] @@ -587,7 +587,7 @@ Base.@propagate_inbounds _broadcast_getindex(A, I) = A[newindex(A, I)] # ahead of time (often when the size checks aren't able to be lifted out of the loop). # The Extruded struct computes that information ahead of time and stores it as a pair # of tuples to optimize indexing later. This is most commonly needed for `Array` and -# other `AbstractArray` subtypes that wrap `Array` and dynamically ask it for its size. +# other `ArrayLike` subtypes that wrap `Array` and dynamically ask it for its size. struct Extruded{T, K, D} x::T keeps::K # A tuple of booleans, specifying which indices should be passed normally @@ -595,7 +595,7 @@ struct Extruded{T, K, D} end @inline axes(b::Extruded) = axes(b.x) Base.@propagate_inbounds _broadcast_getindex(b::Extruded, i) = b.x[newindex(i, b.keeps, b.defaults)] -extrude(x::AbstractArray) = Extruded(x, newindexer(x)...) +extrude(x::ArrayLike) = Extruded(x, newindexer(x)...) extrude(x) = x # For Broadcasted @@ -638,7 +638,7 @@ Return either `x` or an object like `x` such that it supports [`axes`](@ref), in If `x` supports iteration, the returned value should have the same `axes` and indexing behaviors as [`collect(x)`](@ref). -If `x` is not an `AbstractArray` but it supports `axes`, indexing, and its type supports +If `x` is not an `ArrayLike` but it supports `axes`, indexing, and its type supports `ndims`, then `broadcastable(::typeof(x))` may be implemented to just return itself. Further, if `x` defines its own [`BroadcastStyle`](@ref), then it must define its `broadcastable` method to return itself for the custom style to have any effect. @@ -660,7 +660,7 @@ Base.RefValue{String}("hello") """ broadcastable(x::Union{Symbol,AbstractString,Function,UndefInitializer,Nothing,RoundingMode,Missing,Val,Ptr,Regex,Pair}) = Ref(x) broadcastable(::Type{T}) where {T} = Ref{Type{T}}(T) -broadcastable(x::Union{AbstractArray,Number,Ref,Tuple,Broadcasted}) = x +broadcastable(x::Union{ArrayLike,Number,Ref,Tuple,Broadcasted}) = x # Default to collecting iterables — which will error for non-iterables broadcastable(x) = collect(x) broadcastable(::Union{AbstractDict, NamedTuple}) = throw(ArgumentError("broadcasting over dictionaries and `NamedTuple`s is reserved")) @@ -861,10 +861,10 @@ end ## general `copyto!` methods # The most general method falls back to a method that replaces Style->Nothing # This permits specialization on typeof(dest) without introducing ambiguities -@inline copyto!(dest::AbstractArray, bc::Broadcasted) = copyto!(dest, convert(Broadcasted{Nothing}, bc)) +@inline copyto!(dest::ArrayLike, bc::Broadcasted) = copyto!(dest, convert(Broadcasted{Nothing}, bc)) # Performance optimization for the common identity scalar case: dest .= val -@inline function copyto!(dest::AbstractArray, bc::Broadcasted{<:AbstractArrayStyle{0}}) +@inline function copyto!(dest::ArrayLike, bc::Broadcasted{<:AbstractArrayStyle{0}}) # Typically, we must independently execute bc for every storage location in `dest`, but: # IF we're in the common no-op identity case with no nested args (like `dest .= val`), if bc.f === identity && bc.args isa Tuple{Any} && isflat(bc) @@ -896,10 +896,10 @@ preprocess_args(dest, args::Tuple{Any}) = (preprocess(dest, args[1]),) preprocess_args(dest, args::Tuple{}) = () # Specialize this method if all you want to do is specialize on typeof(dest) -@inline function copyto!(dest::AbstractArray, bc::Broadcasted{Nothing}) +@inline function copyto!(dest::ArrayLike, bc::Broadcasted{Nothing}) axes(dest) == axes(bc) || throwdm(axes(dest), axes(bc)) # Performance optimization: broadcast!(identity, dest, A) is equivalent to copyto!(dest, A) if indices match - if bc.f === identity && bc.args isa Tuple{AbstractArray} # only a single input argument to broadcast! + if bc.f === identity && bc.args isa Tuple{ArrayLike} # only a single input argument to broadcast! A = bc.args[1] if axes(dest) == axes(A) return copyto!(dest, A) @@ -917,7 +917,7 @@ end @inline function copyto!(dest::BitArray, bc::Broadcasted{Nothing}) axes(dest) == axes(bc) || throwdm(axes(dest), axes(bc)) ischunkedbroadcast(dest, bc) && return chunkedcopyto!(dest, bc) - length(dest) < 256 && return invoke(copyto!, Tuple{AbstractArray, Broadcasted{Nothing}}, dest, bc) + length(dest) < 256 && return invoke(copyto!, Tuple{ArrayLike, Broadcasted{Nothing}}, dest, bc) tmp = Vector{Bool}(undef, bitcache_size) destc = dest.chunks cind = 1 diff --git a/base/combinatorics.jl b/base/combinatorics.jl index 225fc9dace8d3..42e83435f1b53 100644 --- a/base/combinatorics.jl +++ b/base/combinatorics.jl @@ -64,7 +64,7 @@ isperm(p::Tuple{Int}) = p[1] == 1 isperm(p::Tuple{Int,Int}) = ((p[1] == 1) & (p[2] == 2)) | ((p[1] == 2) & (p[2] == 1)) # swap columns i and j of a, in-place -function swapcols!(a::AbstractMatrix, i, j) +function swapcols!(a::ArrayLike{2}, i, j) i == j && return cols = axes(a,2) @boundscheck i in cols || throw(BoundsError(a, (:,i))) @@ -74,7 +74,7 @@ function swapcols!(a::AbstractMatrix, i, j) end end # like permute!! applied to each row of a, in-place in a (overwriting p). -function permutecols!!(a::AbstractMatrix, p::AbstractVector{<:Integer}) +function permutecols!!(a::ArrayLike{2}, p::AbstractVector{<:Integer}) require_one_based_indexing(a, p) count = 0 start = 0 @@ -143,7 +143,7 @@ julia> A 1 ``` """ -permute!(a, p::AbstractVector) = permute!!(a, copymutable(p)) +permute!(a, p::ArrayLike{1}) = permute!!(a, copymutable(p)) function invpermute!!(a, p::AbstractVector{<:Integer}) require_one_based_indexing(a, p) @@ -190,7 +190,7 @@ julia> A 1 ``` """ -invpermute!(a, p::AbstractVector) = invpermute!!(a, copymutable(p)) +invpermute!(a, p::ArrayLike{1}) = invpermute!!(a, copymutable(p)) """ invperm(v) @@ -226,7 +226,7 @@ julia> B[invperm(v)] 'd' ``` """ -function invperm(a::AbstractVector) +function invperm(a::ArrayLike{1}) require_one_based_indexing(a) b = zero(a) # similar vector of zeros n = length(a) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index f1c17a6d750c3..542877bfc69b0 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -199,7 +199,7 @@ function abstract_call_method_with_const_args(@nospecialize(rettype), @nospecial if istopfunction(f, :getindex) || istopfunction(f, :setindex!) arrty = argtypes[2] # don't propagate constant index into indexing of non-constant array - if arrty isa Type && arrty <: AbstractArray && !issingletontype(arrty) + if arrty isa Type && arrty <: ArrayLike && !issingletontype(arrty) return Any end elseif istopfunction(f, :iterate) diff --git a/base/errorshow.jl b/base/errorshow.jl index 4cb0b045a784a..6661ee41949de 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -35,7 +35,7 @@ function showerror(io::IO, ex::BoundsError) print(io, ": attempt to access ") summary(io, ex.a) if isdefined(ex, :i) - !isa(ex.a, AbstractArray) && print(io, "\n ") + !isa(ex.a, ArrayLike) && print(io, "\n ") print(io, " at index [") if isa(ex.i, AbstractRange) print(io, ex.i) @@ -95,7 +95,7 @@ end showerror(io::IO, ex::InitError) = showerror(io, ex, []) function showerror(io::IO, ex::DomainError) - if isa(ex.val, AbstractArray) + if isa(ex.val, ArrayLike) compact = get(io, :compact, true) limit = get(io, :limit, true) print(IOContext(io, :compact => compact, :limit => limit), @@ -265,7 +265,7 @@ function showerror(io::IO, ex::MethodError) end print(io, ")") end - if ft <: AbstractArray + if ft <: ArrayLike print(io, "\nUse square brackets [] for indexing an Array.") end # Check for local functions that shadow methods in Base diff --git a/base/essentials.jl b/base/essentials.jl index ec4e562f2fe4f..ad04a6b2ed0ee 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -514,7 +514,7 @@ julia> f2() allowing *other code* to remove your bounds checks with [`@inbounds`](@ref). As noted there, the caller must verify—using information they can access—that their accesses are valid before using `@inbounds`. For indexing into your - [`AbstractArray`](@ref) subclasses, for example, this involves checking the + [`ArrayLike`](@ref) subclasses, for example, this involves checking the indices against its [`size`](@ref). Therefore, `@boundscheck` annotations should only be added to a [`getindex`](@ref) or [`setindex!`](@ref) implementation after you are certain its behavior is correct. @@ -617,7 +617,7 @@ end map(f, v::SimpleVector) = Any[ f(v[i]) for i = 1:length(v) ] -getindex(v::SimpleVector, I::AbstractArray) = Core.svec(Any[ v[i] for i in I ]...) +getindex(v::SimpleVector, I::ArrayLike) = Core.svec(Any[ v[i] for i in I ]...) """ isassigned(array, i) -> Bool diff --git a/base/generator.jl b/base/generator.jl index b0f7e32d0b22f..ec8f1d7119813 100644 --- a/base/generator.jl +++ b/base/generator.jl @@ -90,7 +90,7 @@ Base.HasLength() IteratorSize(x) = IteratorSize(typeof(x)) IteratorSize(::Type) = HasLength() # HasLength is the default -IteratorSize(::Type{<:AbstractArray{<:Any,N}}) where {N} = HasShape{N}() +IteratorSize(::Type{<:ArrayLike{N}}) where {N} = HasShape{N}() IteratorSize(::Type{Generator{I,F}}) where {I,F} = IteratorSize(I) IteratorSize(::Type{Any}) = SizeUnknown() diff --git a/base/indices.jl b/base/indices.jl index 2cc822bdeeb8f..d489f82810afb 100644 --- a/base/indices.jl +++ b/base/indices.jl @@ -4,7 +4,7 @@ Dims{N} An `NTuple` of `N` `Int`s used to represent the dimensions -of an [`AbstractArray`](@ref). +of an [`ArrayLike`](@ref). """ Dims{N} = NTuple{N,Int} DimsInteger{N} = NTuple{N,Integer} @@ -41,7 +41,7 @@ struct IndexLinear <: IndexStyle end Subtype of [`IndexStyle`](@ref) used to describe arrays which are optimally indexed by a Cartesian index. This is the default -for new custom [`AbstractArray`](@ref) subtypes. +for new custom [`ArrayLike`](@ref) subtypes. A Cartesian indexing style uses multiple integer indices to describe the position in a multidimensional array, with exactly one index per dimension. This means that @@ -70,7 +70,7 @@ struct IndexCartesian <: IndexStyle end IndexStyle(typeof(A)) `IndexStyle` specifies the "native indexing style" for array `A`. When -you define a new [`AbstractArray`](@ref) type, you can choose to implement +you define a new [`ArrayLike`](@ref) type, you can choose to implement either linear indexing (with [`IndexLinear`](@ref)) or cartesian indexing. If you decide to only implement linear indexing, then you must set this trait for your array type: @@ -84,21 +84,21 @@ recompute all indexing operations into the preferred style. This allows users to access elements of your array using any indexing style, even when explicit methods have not been provided. -If you define both styles of indexing for your `AbstractArray`, this +If you define both styles of indexing for your `ArrayLike`, this trait can be used to select the most performant indexing style. Some methods check this trait on their inputs, and dispatch to different algorithms depending on the most efficient access pattern. In particular, [`eachindex`](@ref) creates an iterator whose type depends on the setting of this trait. """ -IndexStyle(A::AbstractArray) = IndexStyle(typeof(A)) +IndexStyle(A::ArrayLike) = IndexStyle(typeof(A)) IndexStyle(::Type{Union{}}) = IndexLinear() -IndexStyle(::Type{<:AbstractArray}) = IndexCartesian() +IndexStyle(::Type{<:ArrayLike}) = IndexCartesian() IndexStyle(::Type{<:Array}) = IndexLinear() IndexStyle(::Type{<:AbstractRange}) = IndexLinear() -IndexStyle(A::AbstractArray, B::AbstractArray) = IndexStyle(IndexStyle(A), IndexStyle(B)) -IndexStyle(A::AbstractArray, B::AbstractArray...) = IndexStyle(IndexStyle(A), IndexStyle(B...)) +IndexStyle(A::ArrayLike, B::ArrayLike) = IndexStyle(IndexStyle(A), IndexStyle(B)) +IndexStyle(A::ArrayLike, B::ArrayLike...) = IndexStyle(IndexStyle(A), IndexStyle(B...)) IndexStyle(::IndexLinear, ::IndexLinear) = IndexLinear() IndexStyle(::IndexStyle, ::IndexStyle) = IndexCartesian() @@ -165,7 +165,7 @@ function promote_shape(a::Dims, b::Dims) return a end -function promote_shape(a::AbstractArray, b::AbstractArray) +function promote_shape(a::ArrayLike, b::ArrayLike) promote_shape(axes(a), axes(b)) end @@ -194,12 +194,12 @@ function throw_setindex_mismatch(X, I) end end -# check for valid sizes in A[I...] = X where X <: AbstractArray +# check for valid sizes in A[I...] = X where X <: ArrayLike # we want to allow dimensions that are equal up to permutation, but only # for permutations that leave array elements in the same linear order. # those are the permutations that preserve the order of the non-singleton # dimensions. -function setindex_shape_check(X::AbstractArray, I::Integer...) +function setindex_shape_check(X::ArrayLike, I::Integer...) li = ndims(X) lj = length(I) i = j = 1 @@ -233,19 +233,19 @@ function setindex_shape_check(X::AbstractArray, I::Integer...) end end -setindex_shape_check(X::AbstractArray) = +setindex_shape_check(X::ArrayLike) = (length(X)==1 || throw_setindex_mismatch(X,())) -setindex_shape_check(X::AbstractArray, i::Integer) = +setindex_shape_check(X::ArrayLike, i::Integer) = (length(X)==i || throw_setindex_mismatch(X, (i,))) -setindex_shape_check(X::AbstractArray{<:Any,1}, i::Integer) = +setindex_shape_check(X::ArrayLike{1}, i::Integer) = (length(X)==i || throw_setindex_mismatch(X, (i,))) -setindex_shape_check(X::AbstractArray{<:Any,1}, i::Integer, j::Integer) = +setindex_shape_check(X::ArrayLike{1}, i::Integer, j::Integer) = (length(X)==i*j || throw_setindex_mismatch(X, (i,j))) -function setindex_shape_check(X::AbstractArray{<:Any,2}, i::Integer, j::Integer) +function setindex_shape_check(X::ArrayLike{2}, i::Integer, j::Integer) if length(X) != i*j throw_setindex_mismatch(X, (i,j)) end @@ -269,7 +269,7 @@ special indexing behaviors. Note that some index types (like `Colon`) require more context in order to transform them into an array of indices; those get converted in the more complicated `to_indices` function. By default, this simply calls the generic `to_index(i)`. This must return either an `Int` or an -`AbstractArray` of scalar indices that are supported by `A`. +`ArrayLike` of scalar indices that are supported by `A`. """ to_index(A, i) = to_index(i) @@ -283,15 +283,15 @@ to_index(A::Array, i::UInt) = reinterpret(Int, i) Convert index `i` to an `Int` or array of `Int`s to be used as an index for all arrays. Custom index types may specialize `to_index(::CustomIndex)` to provide special -indexing behaviors. This must return either an `Int` or an `AbstractArray` of +indexing behaviors. This must return either an `Int` or an `ArrayLike` of `Int`s. """ to_index(i::Integer) = convert(Int,i)::Int to_index(i::Bool) = throw(ArgumentError("invalid index: $i of type Bool")) to_index(I::AbstractArray{Bool}) = LogicalIndex(I) -to_index(I::AbstractArray) = I +to_index(I::ArrayLike) = I to_index(I::AbstractArray{Union{}}) = I -to_index(I::AbstractArray{<:Union{AbstractArray, Colon}}) = +to_index(I::AbstractArray{<:Union{ArrayLike, Colon}}) = throw(ArgumentError("invalid index: $(limitrepr(I)) of type $(typeof(I))")) to_index(::Colon) = throw(ArgumentError("colons must be converted by to_indices(...)")) to_index(i) = throw(ArgumentError("invalid index: $(limitrepr(i)) of type $(typeof(i))")) @@ -303,7 +303,7 @@ to_index(i) = throw(ArgumentError("invalid index: $(limitrepr(i)) of type $(type Convert the tuple `I` to a tuple of indices for use in indexing into array `A`. -The returned tuple must only contain either `Int`s or `AbstractArray`s of +The returned tuple must only contain either `Int`s or `ArrayLike`s of scalar indices that are supported by array `A`. It will error upon encountering a novel index type that it does not know how to process. @@ -394,14 +394,14 @@ show(io::IO, r::IdentityUnitRange) = print(io, "Base.IdentityUnitRange(", r.indi iterate(S::IdentityUnitRange, s...) = iterate(S.indices, s...) """ - LinearIndices(A::AbstractArray) + LinearIndices(A::ArrayLike) Return a `LinearIndices` array with the same shape and [`axes`](@ref) as `A`, holding the linear index of each entry in `A`. Indexing this array with cartesian indices allows mapping them to linear indices. For arrays with conventional indexing (indices start at 1), or any multidimensional -array, linear indices range from 1 to `length(A)`. However, for `AbstractVector`s +array, linear indices range from 1 to `length(A)`. However, for `ArrayLike{1}`s linear indices are `axes(A, 1)`, and therefore do not start at 1 for vectors with unconventional indexing. @@ -450,7 +450,7 @@ LinearIndices(inds::NTuple{N,AbstractUnitRange{<:Integer}}) where {N} = LinearIndices(sz::NTuple{N,<:Integer}) where {N} = LinearIndices(map(Base.OneTo, sz)) LinearIndices(inds::NTuple{N,Union{<:Integer,AbstractUnitRange{<:Integer}}}) where {N} = LinearIndices(map(i->first(i):last(i), inds)) -LinearIndices(A::Union{AbstractArray,SimpleVector}) = LinearIndices(axes(A)) +LinearIndices(A::Union{ArrayLike,SimpleVector}) = LinearIndices(axes(A)) promote_rule(::Type{LinearIndices{N,R1}}, ::Type{LinearIndices{N,R2}}) where {N,R1,R2} = LinearIndices{N,indices_promote_type(R1,R2)} @@ -463,7 +463,7 @@ end convert(::Type{LinearIndices{N,R}}, inds::LinearIndices{N}) where {N,R} = LinearIndices(convert(R, inds.indices)) -# AbstractArray implementation +# ArrayLike implementation IndexStyle(::Type{<:LinearIndices}) = IndexLinear() axes(iter::LinearIndices) = map(axes1, iter.indices) size(iter::LinearIndices) = map(unsafe_length, iter.indices) diff --git a/base/intfuncs.jl b/base/intfuncs.jl index 77938f58d5b33..ccda59b02dae6 100644 --- a/base/intfuncs.jl +++ b/base/intfuncs.jl @@ -191,7 +191,7 @@ to_power_type(x) = convert(Base._return_type(*, Tuple{typeof(x), typeof(x)}), x) "\nMake x or $p a float by adding a zero decimal ", "(e.g., 2.0^$p or 2^$(float(p)) instead of 2^$p), ", "or write 1/x^$(-p), float(x)^$p, x^float($p) or (x//1)^$p"))) -@noinline throw_domerr_powbysq(::AbstractMatrix, p) = throw(DomainError(p, +@noinline throw_domerr_powbysq(::ArrayLike{2}, p) = throw(DomainError(p, string("Cannot raise an integer matrix x to a negative power ", p, '.', "\nMake x a float matrix by adding a zero decimal ", "(e.g., [2.0 1.0;1.0 0.0]^$p instead ", diff --git a/base/io.jl b/base/io.jl index e162996d58fe5..e3023303d13de 100644 --- a/base/io.jl +++ b/base/io.jl @@ -317,8 +317,10 @@ read(io::AbstractPipe) = read(pipe_reader(io)) readuntil(io::AbstractPipe, arg::UInt8; kw...) = readuntil(pipe_reader(io), arg; kw...) readuntil(io::AbstractPipe, arg::AbstractChar; kw...) = readuntil(pipe_reader(io), arg; kw...) readuntil(io::AbstractPipe, arg::AbstractString; kw...) = readuntil(pipe_reader(io), arg; kw...) -readuntil(io::AbstractPipe, arg::AbstractVector; kw...) = readuntil(pipe_reader(io), arg; kw...) -readuntil_vector!(io::AbstractPipe, target::AbstractVector, keep::Bool, out) = readuntil_vector!(pipe_reader(io), target, keep, out) +readuntil(io::AbstractPipe, arg::ArrayLike{1}; kw...) = readuntil(pipe_reader(io), arg; kw...) +readuntil(io::AbstractPipe, arg::AbstractVector; kw...) = readuntil(pipe_reader(io), arg; kw...) # specific +readuntil_vector!(io::AbstractPipe, target::ArrayLike{1}, keep::Bool, out) = readuntil_vector!(pipe_reader(io), target, keep, out) +readuntil_vector!(io::AbstractPipe, target::AbstractVector, keep::Bool, out) = readuntil_vector!(pipe_reader(io), target, keep, out) # specific for f in ( # peek/mark interface @@ -381,8 +383,8 @@ read(filename::AbstractString, args...) = open(io->read(io, args...), filename) read(filename::AbstractString, ::Type{T}) where {T} = open(io->read(io, T), filename) """ - read!(stream::IO, array::AbstractArray) - read!(filename::AbstractString, array::AbstractArray) + read!(stream::IO, array::ArrayLike) + read!(filename::AbstractString, array::ArrayLike) Read binary data from an I/O stream or file, filling in `array`. """ @@ -600,7 +602,7 @@ end write(s::IO, x::Bool) = write(s, UInt8(x)) write(to::IO, p::Ptr) = write(to, convert(UInt, p)) -function write(s::IO, A::AbstractArray) +function write(s::IO, A::ArrayLike) if !isbitstype(eltype(A)) error("`write` is not supported on non-isbits arrays") end @@ -621,7 +623,7 @@ end function write(s::IO, a::SubArray{T,N,<:Array}) where {T,N} if !isbitstype(T) || !isa(a, StridedArray) - return invoke(write, Tuple{IO, AbstractArray}, s, a) + return invoke(write, Tuple{IO, ArrayLike}, s, a) end elsz = sizeof(T) colsz = size(a,1) * elsz @@ -842,7 +844,7 @@ function readuntil(io::IO, target::AbstractString; keep::Bool=false) if !(target isa String) && !(target isa SubString{String}) target = String(target) end - target = codeunits(target)::AbstractVector + target = codeunits(target)::ArrayLike{1} return String(readuntil(io, target, keep=keep)) end diff --git a/base/iterators.jl b/base/iterators.jl index 7f8525d8e1d82..0d63b7fac9add 100644 --- a/base/iterators.jl +++ b/base/iterators.jl @@ -9,7 +9,7 @@ module Iterators import ..@__MODULE__, ..parentmodule const Base = parentmodule(@__MODULE__) using .Base: - @inline, Pair, AbstractDict, IndexLinear, IndexCartesian, IndexStyle, AbstractVector, Vector, + @inline, Pair, AbstractDict, IndexLinear, IndexCartesian, IndexStyle, ArrayLike, AbstractVector, Vector, tail, tuple_type_head, tuple_type_tail, tuple_type_cons, SizeUnknown, HasLength, HasShape, IsInfinite, EltypeUnknown, HasEltype, OneTo, @propagate_inbounds, Generator, AbstractRange, LinearIndices, (:), |, +, -, !==, !, <=, <, missing, map, any, @boundscheck, @inbounds @@ -86,7 +86,7 @@ last(r::Reverse) = first(r.itr) # the first shall be last first(r::Reverse) = last(r.itr) # and the last shall be first # reverse-order array iterators: assumes more-specialized Reverse for eachindex -@propagate_inbounds function iterate(A::Reverse{<:AbstractArray}, state=(reverse(eachindex(A.itr)),)) +@propagate_inbounds function iterate(A::Reverse{<:ArrayLike}, state=(reverse(eachindex(A.itr)),)) y = iterate(state...) y === nothing && return y idx, itrs = y @@ -216,13 +216,13 @@ CartesianIndex(2, 2) e See also: [`IndexStyle`](@ref), [`axes`](@ref). """ -pairs(::IndexLinear, A::AbstractArray) = Pairs(A, LinearIndices(A)) -pairs(::IndexCartesian, A::AbstractArray) = Pairs(A, CartesianIndices(axes(A))) +pairs(::IndexLinear, A::ArrayLike) = Pairs(A, LinearIndices(A)) +pairs(::IndexCartesian, A::ArrayLike) = Pairs(A, CartesianIndices(axes(A))) # preserve indexing capabilities for known indexable types # faster than zip(keys(a), values(a)) for arrays -pairs(A::AbstractArray) = pairs(IndexCartesian(), A) -pairs(A::AbstractVector) = pairs(IndexLinear(), A) +pairs(A::ArrayLike) = pairs(IndexCartesian(), A) +pairs(A::ArrayLike{1}) = pairs(IndexLinear(), A) pairs(tuple::Tuple) = Pairs(tuple, keys(tuple)) pairs(nt::NamedTuple) = Pairs(nt, keys(nt)) pairs(v::Core.SimpleVector) = Pairs(v, LinearIndices(v)) @@ -1091,17 +1091,17 @@ struct PartitionIterator{T} n::Int end # Partitions are explicitly a linear indexing operation, so reshape to 1-d immediately -PartitionIterator(A::AbstractArray, n::Int) = PartitionIterator(vec(A), n) -PartitionIterator(v::AbstractVector, n::Int) = PartitionIterator{typeof(v)}(v, n) +PartitionIterator(A::ArrayLike, n::Int) = PartitionIterator(vec(A), n) +PartitionIterator(v::ArrayLike{1}, n::Int) = PartitionIterator{typeof(v)}(v, n) eltype(::Type{PartitionIterator{T}}) where {T} = Vector{eltype(T)} # Arrays use a generic `view`-of-a-`vec`, so we cannot exactly predict what we'll get back -eltype(::Type{PartitionIterator{T}}) where {T<:AbstractArray} = AbstractVector{eltype(T)} +eltype(::Type{PartitionIterator{T}}) where {T<:ArrayLike} = AbstractVector{eltype(T)} # But for some common implementations in Base we know the answer exactly eltype(::Type{PartitionIterator{T}}) where {T<:Vector} = SubArray{eltype(T), 1, T, Tuple{UnitRange{Int}}, true} IteratorEltype(::Type{<:PartitionIterator{T}}) where {T} = IteratorEltype(T) -IteratorEltype(::Type{<:PartitionIterator{T}}) where {T<:AbstractArray} = EltypeUnknown() +IteratorEltype(::Type{<:PartitionIterator{T}}) where {T<:ArrayLike} = EltypeUnknown() IteratorEltype(::Type{<:PartitionIterator{T}}) where {T<:Vector} = IteratorEltype(T) partition_iteratorsize(::HasShape) = HasLength() @@ -1121,7 +1121,7 @@ function iterate(itr::PartitionIterator{<:AbstractRange}, state=1) return @inbounds itr.c[state:r], r + 1 end -function iterate(itr::PartitionIterator{<:AbstractArray}, state=1) +function iterate(itr::PartitionIterator{<:ArrayLike}, state=1) state > length(itr.c) && return nothing r = min(state + itr.n - 1, length(itr.c)) return @inbounds view(itr.c, state:r), r + 1 @@ -1298,7 +1298,7 @@ only(x::Tuple{Any}) = x[1] only(x::Tuple) = throw( ArgumentError("Tuple contains $(length(x)) elements, must contain exactly 1 element") ) -only(a::AbstractArray{<:Any, 0}) = @inbounds return a[] +only(a::ArrayLike{0}) = @inbounds return a[] only(x::NamedTuple{<:Any, <:Tuple{Any}}) = first(x) only(x::NamedTuple) = throw( ArgumentError("NamedTuple contains $(length(x)) elements, must contain exactly 1 element") diff --git a/base/math.jl b/base/math.jl index 20cb374c5c33b..804a9c8cdd481 100644 --- a/base/math.jl +++ b/base/math.jl @@ -69,12 +69,12 @@ clamp(x::X, lo::L, hi::H) where {X,L,H} = convert(promote_type(X,L,H), x))) """ - clamp!(array::AbstractArray, lo, hi) + clamp!(array::ArrayLike, lo, hi) Restrict values in `array` to the specified range, in-place. See also [`clamp`](@ref). """ -function clamp!(x::AbstractArray, lo, hi) +function clamp!(x::ArrayLike, lo, hi) @inbounds for i in eachindex(x) x[i] = clamp(x[i], lo, hi) end @@ -116,7 +116,7 @@ function evalpoly(x, p::Tuple) end end -evalpoly(x, p::AbstractVector) = _evalpoly(x, p) +evalpoly(x, p::ArrayLike{1}) = _evalpoly(x, p) function _evalpoly(x, p) N = length(p) @@ -155,7 +155,7 @@ end evalpoly(z::Complex, p::Tuple{<:Any}) = p[1] -evalpoly(z::Complex, p::AbstractVector) = _evalpoly(z, p) +evalpoly(z::Complex, p::ArrayLike{1}) = _evalpoly(z, p) function _evalpoly(z::Complex, p) length(p) == 1 && return p[1] diff --git a/base/missing.jl b/base/missing.jl index 90fc4caaeaed6..d2f4496a3525b 100644 --- a/base/missing.jl +++ b/base/missing.jl @@ -262,10 +262,10 @@ end # Optimized mapreduce implementation # The generic method is faster when !(eltype(A) >: Missing) since it does not need # additional loops to identify the two first non-missing values of each block -mapreduce(f, op, itr::SkipMissing{<:AbstractArray}) = +mapreduce(f, op, itr::SkipMissing{<:ArrayLike}) = _mapreduce(f, op, IndexStyle(itr.x), eltype(itr.x) >: Missing ? itr : itr.x) -function _mapreduce(f, op, ::IndexLinear, itr::SkipMissing{<:AbstractArray}) +function _mapreduce(f, op, ::IndexLinear, itr::SkipMissing{<:ArrayLike}) A = itr.x local ai inds = LinearIndices(A) @@ -295,7 +295,7 @@ mapreduce_impl(f, op, A::SkipMissing, ifirst::Integer, ilast::Integer) = mapreduce_impl(f, op, A, ifirst, ilast, pairwise_blocksize(f, op)) # Returns nothing when the input contains only missing values, and Some(x) otherwise -@noinline function mapreduce_impl(f, op, itr::SkipMissing{<:AbstractArray}, +@noinline function mapreduce_impl(f, op, itr::SkipMissing{<:ArrayLike}, ifirst::Integer, ilast::Integer, blksize::Int) A = itr.x if ifirst == ilast @@ -351,7 +351,7 @@ mapreduce_impl(f, op, A::SkipMissing, ifirst::Integer, ilast::Integer) = end """ - filter(f, itr::SkipMissing{<:AbstractArray}) + filter(f, itr::SkipMissing{<:ArrayLike}) Return a vector similar to the array wrapped by the given `SkipMissing` iterator but with all missing elements and those for which `f` returns `false` removed. @@ -371,7 +371,7 @@ julia> filter(isodd, skipmissing(x)) 1 ``` """ -function filter(f, itr::SkipMissing{<:AbstractArray}) +function filter(f, itr::SkipMissing{<:ArrayLike}) y = similar(itr.x, eltype(itr), 0) for xi in itr.x if xi !== missing && f(xi) diff --git a/base/multidimensional.jl b/base/multidimensional.jl index 3745186f5cc4f..9b73294b95354 100644 --- a/base/multidimensional.jl +++ b/base/multidimensional.jl @@ -90,7 +90,7 @@ module IteratorsMD # indexing getindex(index::CartesianIndex, i::Integer) = index.I[i] - Base.get(A::AbstractArray, I::CartesianIndex, default) = get(A, I.I, default) + Base.get(A::ArrayLike, I::CartesianIndex, default) = get(A, I.I, default) eltype(::Type{T}) where {T<:CartesianIndex} = eltype(fieldtype(T, :I)) # access to index tuple @@ -144,13 +144,13 @@ module IteratorsMD end # nextind and prevind with CartesianIndex - function Base.nextind(a::AbstractArray{<:Any,N}, i::CartesianIndex{N}) where {N} + function Base.nextind(a::ArrayLike{N}, i::CartesianIndex{N}) where {N} iter = CartesianIndices(axes(a)) # might overflow I = inc(i.I, first(iter).I, last(iter).I) return I end - function Base.prevind(a::AbstractArray{<:Any,N}, i::CartesianIndex{N}) where {N} + function Base.prevind(a::ArrayLike{N}, i::CartesianIndex{N}) where {N} iter = CartesianIndices(axes(a)) # might underflow I = dec(i.I, last(iter).I, first(iter).I) @@ -183,7 +183,7 @@ module IteratorsMD Consequently these can be useful for writing algorithms that work in arbitrary dimensions. - CartesianIndices(A::AbstractArray) -> R + CartesianIndices(A::ArrayLike) -> R As a convenience, constructing a `CartesianIndices` from an array makes a range of its indices. @@ -209,7 +209,7 @@ module IteratorsMD ## Conversion between linear and cartesian indices Linear index to cartesian index conversion exploits the fact that a - `CartesianIndices` is an `AbstractArray` and can be indexed linearly: + `CartesianIndices` is an `ArrayLike` and can be indexed linearly: ```jldoctest julia> cartesian = CartesianIndices((1:3, 1:2)) @@ -259,7 +259,7 @@ module IteratorsMD CartesianIndices(inds::NTuple{N,Union{<:Integer,AbstractUnitRange{<:Integer}}}) where {N} = CartesianIndices(map(i->first(i):last(i), inds)) - CartesianIndices(A::AbstractArray) = CartesianIndices(axes(A)) + CartesianIndices(A::ArrayLike) = CartesianIndices(axes(A)) """ (:)(I::CartesianIndex, J::CartesianIndex) @@ -308,7 +308,7 @@ module IteratorsMD convert(::Type{CartesianIndices{N,R}}, inds::CartesianIndices{N}) where {N,R} = CartesianIndices(convert(R, inds.indices)) - # AbstractArray implementation + # ArrayLike implementation Base.axes(iter::CartesianIndices{N,R}) where {N,R} = map(Base.axes1, iter.indices) Base.IndexStyle(::Type{CartesianIndices{N,R}}) where {N,R} = IndexCartesian() @inline function Base.getindex(iter::CartesianIndices{N,<:NTuple{N,Base.OneTo}}, I::Vararg{Int, N}) where {N} @@ -324,9 +324,9 @@ module IteratorsMD ndims(::Type{CartesianIndices{N}}) where {N} = N ndims(::Type{CartesianIndices{N,TT}}) where {N,TT} = N - eachindex(::IndexCartesian, A::AbstractArray) = CartesianIndices(axes(A)) + eachindex(::IndexCartesian, A::ArrayLike) = CartesianIndices(axes(A)) - @inline function eachindex(::IndexCartesian, A::AbstractArray, B::AbstractArray...) + @inline function eachindex(::IndexCartesian, A::ArrayLike, B::ArrayLike...) axsA = axes(A) Base._all_match_first(axes, axsA, B...) || Base.throw_eachindex_mismatch(IndexCartesian(), A, B...) CartesianIndices(axsA) @@ -525,7 +525,7 @@ using .IteratorsMD ## Bounds-checking with CartesianIndex # Disallow linear indexing with CartesianIndex -function checkbounds(::Type{Bool}, A::AbstractArray, i::Union{CartesianIndex, AbstractArray{<:CartesianIndex}}) +function checkbounds(::Type{Bool}, A::ArrayLike, i::Union{CartesianIndex, AbstractArray{<:CartesianIndex}}) @_inline_meta checkbounds_indices(Bool, axes(A), (i,)) end @@ -591,7 +591,7 @@ index_ndims() = () @inline index_dimsum(i1, I...) = (index_dimsum(I...)...,) @inline index_dimsum(::Colon, I...) = (true, index_dimsum(I...)...) @inline index_dimsum(::AbstractArray{Bool}, I...) = (true, index_dimsum(I...)...) -@inline function index_dimsum(::AbstractArray{<:Any,N}, I...) where N +@inline function index_dimsum(::ArrayLike{N}, I...) where N (ntuple(x->true, Val(N))..., index_dimsum(I...)...) end index_dimsum() = () @@ -599,13 +599,13 @@ index_dimsum() = () # Recursively compute the lengths of a list of indices, without dropping scalars index_lengths() = () @inline index_lengths(::Real, rest...) = (1, index_lengths(rest...)...) -@inline index_lengths(A::AbstractArray, rest...) = (length(A), index_lengths(rest...)...) +@inline index_lengths(A::ArrayLike, rest...) = (length(A), index_lengths(rest...)...) # shape of array to create for getindex() with indices I, dropping scalars # returns a Tuple{Vararg{AbstractUnitRange}} of indices index_shape() = () @inline index_shape(::Real, rest...) = index_shape(rest...) -@inline index_shape(A::AbstractArray, rest...) = (axes(A)..., index_shape(rest...)...) +@inline index_shape(A::ArrayLike, rest...) = (axes(A)..., index_shape(rest...)...) """ LogicalIndex(mask) @@ -622,7 +622,7 @@ struct LogicalIndex{T, A<:AbstractArray{Bool}} <: AbstractVector{T} end LogicalIndex(mask::AbstractVector{Bool}) = LogicalIndex{Int, typeof(mask)}(mask) LogicalIndex(mask::AbstractArray{Bool, N}) where {N} = LogicalIndex{CartesianIndex{N}, typeof(mask)}(mask) -(::Type{LogicalIndex{Int}})(mask::AbstractArray) = LogicalIndex{Int, typeof(mask)}(mask) +(::Type{LogicalIndex{Int}})(mask::ArrayLike) = LogicalIndex{Int, typeof(mask)}(mask) size(L::LogicalIndex) = (L.sum,) length(L::LogicalIndex) = L.sum collect(L::LogicalIndex) = [i for i in L] @@ -673,9 +673,9 @@ end return ((i1-1)<<6 + tz, (i1, c)) end -@inline checkbounds(::Type{Bool}, A::AbstractArray, I::LogicalIndex{<:Any,<:AbstractArray{Bool,1}}) = +@inline checkbounds(::Type{Bool}, A::ArrayLike, I::LogicalIndex{<:Any,<:AbstractArray{Bool,1}}) = eachindex(IndexLinear(), A) == eachindex(IndexLinear(), I.mask) -@inline checkbounds(::Type{Bool}, A::AbstractArray, I::LogicalIndex) = axes(A) == axes(I.mask) +@inline checkbounds(::Type{Bool}, A::ArrayLike, I::LogicalIndex) = axes(A) == axes(I.mask) @inline checkindex(::Type{Bool}, indx::AbstractUnitRange, I::LogicalIndex) = (indx,) == axes(I.mask) checkindex(::Type{Bool}, inds::Tuple, I::LogicalIndex) = false @@ -720,20 +720,20 @@ getindex(x::Number, i::CartesianIndex{0}) = x getindex(t::Tuple, i::CartesianIndex{1}) = getindex(t, i.I[1]) # These are not defined on directly on getindex to avoid -# ambiguities for AbstractArray subtypes. See the note in abstractarray.jl +# ambiguities for ArrayLike subtypes. See the note in abstractarray.jl -@inline function _getindex(l::IndexStyle, A::AbstractArray, I::Union{Real, AbstractArray}...) +@inline function _getindex(l::IndexStyle, A::ArrayLike, I::Union{Real, ArrayLike}...) @boundscheck checkbounds(A, I...) return _unsafe_getindex(l, _maybe_reshape(l, A, I...), I...) end # But we can speed up IndexCartesian arrays by reshaping them to the appropriate dimensionality: -_maybe_reshape(::IndexLinear, A::AbstractArray, I...) = A -_maybe_reshape(::IndexCartesian, A::AbstractVector, I...) = A -@inline _maybe_reshape(::IndexCartesian, A::AbstractArray, I...) = __maybe_reshape(A, index_ndims(I...)) -@inline __maybe_reshape(A::AbstractArray{T,N}, ::NTuple{N,Any}) where {T,N} = A -@inline __maybe_reshape(A::AbstractArray, ::NTuple{N,Any}) where {N} = reshape(A, Val(N)) +_maybe_reshape(::IndexLinear, A::ArrayLike, I...) = A +_maybe_reshape(::IndexCartesian, A::ArrayLike{1}, I...) = A +@inline _maybe_reshape(::IndexCartesian, A::ArrayLike, I...) = __maybe_reshape(A, index_ndims(I...)) +@inline __maybe_reshape(A::ArrayLike{N}, ::NTuple{N,Any}) where {N} = A +@inline __maybe_reshape(A::ArrayLike, ::NTuple{N,Any}) where {N} = reshape(A, Val(N)) -function _unsafe_getindex(::IndexStyle, A::AbstractArray, I::Vararg{Union{Real, AbstractArray}, N}) where N +function _unsafe_getindex(::IndexStyle, A::ArrayLike, I::Vararg{Union{Real, ArrayLike}, N}) where N # This is specifically not inlined to prevent excessive allocations in type unstable code shape = index_shape(I...) dest = similar(A, shape) @@ -743,7 +743,7 @@ function _unsafe_getindex(::IndexStyle, A::AbstractArray, I::Vararg{Union{Real, end # Always index with the exactly indices provided. -@generated function _unsafe_getindex!(dest::AbstractArray, src::AbstractArray, I::Vararg{Union{Real, AbstractArray}, N}) where N +@generated function _unsafe_getindex!(dest::ArrayLike, src::ArrayLike, I::Vararg{Union{Real, ArrayLike}, N}) where N quote @_inline_meta D = eachindex(dest) @@ -763,14 +763,14 @@ end @noinline throw_checksize_error(A, sz) = throw(DimensionMismatch("output array is the wrong size; expected $sz, got $(size(A))")) ## setindex! ## -function _setindex!(l::IndexStyle, A::AbstractArray, x, I::Union{Real, AbstractArray}...) +function _setindex!(l::IndexStyle, A::ArrayLike, x, I::Union{Real, ArrayLike}...) @_inline_meta @boundscheck checkbounds(A, I...) _unsafe_setindex!(l, _maybe_reshape(l, A, I...), x, I...) A end -@generated function _unsafe_setindex!(::IndexStyle, A::AbstractArray, x, I::Union{Real,AbstractArray}...) +@generated function _unsafe_setindex!(::IndexStyle, A::ArrayLike, x, I::Union{Real,ArrayLike}...) N = length(I) quote x′ = unalias(A, x) @@ -790,11 +790,11 @@ end end end -diff(a::AbstractVector) = diff(a, dims=1) +diff(a::ArrayLike{1}) = diff(a, dims=1) """ - diff(A::AbstractVector) - diff(A::AbstractArray; dims::Integer) + diff(A::ArrayLike{1}) + diff(A::ArrayLike; dims::Integer) Finite difference operator on a vector or a multidimensional array `A`. In the latter case the dimension to operate on needs to be specified with the `dims` @@ -822,7 +822,7 @@ julia> diff(vec(a)) 12 ``` """ -function diff(a::AbstractArray{T,N}; dims::Integer) where {T,N} +function diff(a::ArrayLike{N}; dims::Integer) where {N} require_one_based_indexing(a) 1 <= dims <= N || throw(ArgumentError("dimension $dims out of range (1:$N)")) @@ -850,7 +850,7 @@ function mightalias(A::SubArray{T,<:Any,P}, B::SubArray{T,<:Any,P}) where {T,P} !_isdisjoint(dataids(A.parent), _splatmap(dataids, B.indices)) || !_isdisjoint(dataids(B.parent), _splatmap(dataids, A.indices)) end -_parentsmatch(A::AbstractArray, B::AbstractArray) = A === B +_parentsmatch(A::ArrayLike, B::ArrayLike) = A === B # Two reshape(::Array)s of the same size aren't `===` because they have different headers _parentsmatch(A::Array, B::Array) = pointer(A) == pointer(B) && size(A) == size(B) @@ -868,12 +868,12 @@ end # And we can check scalars against each other and scalars against arrays quite easily @inline _indicesmightoverlap(A::Tuple{Real, Vararg{Any}}, B::Tuple{Real, Vararg{Any}}) = A[1] == B[1] ? _indicesmightoverlap(tail(A), tail(B)) : false -@inline _indicesmightoverlap(A::Tuple{Real, Vararg{Any}}, B::Tuple{AbstractArray, Vararg{Any}}) = +@inline _indicesmightoverlap(A::Tuple{Real, Vararg{Any}}, B::Tuple{ArrayLike, Vararg{Any}}) = A[1] in B[1] ? _indicesmightoverlap(tail(A), tail(B)) : false -@inline _indicesmightoverlap(A::Tuple{AbstractArray, Vararg{Any}}, B::Tuple{Real, Vararg{Any}}) = +@inline _indicesmightoverlap(A::Tuple{ArrayLike, Vararg{Any}}, B::Tuple{Real, Vararg{Any}}) = B[1] in A[1] ? _indicesmightoverlap(tail(A), tail(B)) : false # And small arrays are quick, too -@inline function _indicesmightoverlap(A::Tuple{AbstractArray, Vararg{Any}}, B::Tuple{AbstractArray, Vararg{Any}}) +@inline function _indicesmightoverlap(A::Tuple{ArrayLike, Vararg{Any}}, B::Tuple{ArrayLike, Vararg{Any}}) if length(A[1]) == 1 return A[1][1] in B[1] ? _indicesmightoverlap(tail(A), tail(B)) : false elseif length(B[1]) == 1 @@ -927,7 +927,7 @@ function fill!(A::AbstractArray{T}, x) where T end """ - copyto!(dest::AbstractArray, src) -> dest + copyto!(dest::ArrayLike, src) -> dest Copy all elements from collection `src` to array `dest`, whose length must be greater than @@ -955,7 +955,7 @@ julia> y """ copyto!(dest, src) -function copyto!(dest::AbstractArray{T1,N}, src::AbstractArray{T2,N}) where {T1,T2,N} +function copyto!(dest::ArrayLike{N}, src::ArrayLike{N}) where {N} checkbounds(dest, axes(src)...) src′ = unalias(dest, src) for I in eachindex(IndexStyle(src′,dest), src′) @@ -964,8 +964,8 @@ function copyto!(dest::AbstractArray{T1,N}, src::AbstractArray{T2,N}) where {T1, dest end -function copyto!(dest::AbstractArray{T1,N}, Rdest::CartesianIndices{N}, - src::AbstractArray{T2,N}, Rsrc::CartesianIndices{N}) where {T1,T2,N} +function copyto!(dest::ArrayLike{N}, Rdest::CartesianIndices{N}, + src::ArrayLike{N}, Rsrc::CartesianIndices{N}) where {N} isempty(Rdest) && return dest if size(Rdest) != size(Rsrc) throw(ArgumentError("source and destination must have same size (got $(size(Rsrc)) and $(size(Rdest)))")) @@ -996,10 +996,10 @@ end Copy the block of `src` in the range of `Rsrc` to the block of `dest` in the range of `Rdest`. The sizes of the two regions must match. """ -copyto!(::AbstractArray, ::CartesianIndices, ::AbstractArray, ::CartesianIndices) +copyto!(::ArrayLike, ::CartesianIndices, ::ArrayLike, ::CartesianIndices) # circshift! -circshift!(dest::AbstractArray, src, ::Tuple{}) = copyto!(dest, src) +circshift!(dest::ArrayLike, src, ::Tuple{}) = copyto!(dest, src) """ circshift!(dest, src, shifts) @@ -1011,13 +1011,13 @@ alias each other). See also [`circshift`](@ref). """ -@noinline function circshift!(dest::AbstractArray{T,N}, src, shiftamt::DimsInteger) where {T,N} +@noinline function circshift!(dest::ArrayLike{N}, src, shiftamt::DimsInteger) where {N} dest === src && throw(ArgumentError("dest and src must be separate arrays")) inds = axes(src) axes(dest) == inds || throw(ArgumentError("indices of src and dest must match (got $inds and $(axes(dest)))")) _circshift!(dest, (), src, (), inds, fill_to_length(shiftamt, 0, Val(N))) end -circshift!(dest::AbstractArray, src, shiftamt) = circshift!(dest, src, (shiftamt...,)) +circshift!(dest::ArrayLike, src, shiftamt) = circshift!(dest, src, (shiftamt...,)) # For each dimension, we copy the first half of src to the second half # of dest, and the second half of src to the first half of dest. This @@ -1315,7 +1315,7 @@ end end end -@propagate_inbounds function setindex!(B::BitArray, X::AbstractArray, +@propagate_inbounds function setindex!(B::BitArray, X::ArrayLike, I0::Union{Colon,UnitRange{Int}}, I::Union{Int,UnitRange{Int},Colon}...) _setindex!(IndexStyle(B), B, X, to_indices(B, (I0, I...))...) end @@ -1397,7 +1397,7 @@ function permutedims(B::StridedArray, perm) permutedims!(P, B, perm) end -function checkdims_perm(P::AbstractArray{TP,N}, B::AbstractArray{TB,N}, perm) where {TP,TB,N} +function checkdims_perm(P::ArrayLike{N}, B::ArrayLike{N}, perm) where {N} indsB = axes(B) length(perm) == N || throw(ArgumentError("expected permutation of size $N, but length(perm)=$(length(perm))")) isperm(perm) || throw(ArgumentError("input is not a permutation")) @@ -1447,7 +1447,7 @@ end hash(x::Prehashed) = x.hash """ - unique(A::AbstractArray; dims::Int) + unique(A::ArrayLike; dims::Int) Return unique regions of `A` along dimension `dims`. @@ -1485,11 +1485,11 @@ julia> unique(A, dims=3) 0 0 ``` """ -unique(A::AbstractArray; dims::Union{Colon,Integer} = :) = _unique_dims(A, dims) +unique(A::ArrayLike; dims::Union{Colon,Integer} = :) = _unique_dims(A, dims) -_unique_dims(A::AbstractArray, dims::Colon) = invoke(unique, Tuple{Any}, A) +_unique_dims(A::ArrayLike, dims::Colon) = invoke(unique, Tuple{Any}, A) -@generated function _unique_dims(A::AbstractArray{T,N}, dim::Integer) where {T,N} +@generated function _unique_dims(A::ArrayLike{N}, dim::Integer) where {N} quote 1 <= dim <= $N || return copy(A) hashes = zeros(UInt, axes(A, dim)) @@ -1560,7 +1560,7 @@ _unique_dims(A::AbstractArray, dims::Colon) = invoke(unique, Tuple{Any}, A) end """ - extrema(A::AbstractArray; dims) -> Array{Tuple} + extrema(A::ArrayLike; dims) -> Array{Tuple} Compute the minimum and maximum elements of an array over the given dimensions. @@ -1585,10 +1585,10 @@ julia> extrema(A, dims = (1,2)) (9, 15) ``` """ -extrema(A::AbstractArray; dims = :) = _extrema_dims(identity, A, dims) +extrema(A::ArrayLike; dims = :) = _extrema_dims(identity, A, dims) """ - extrema(f, A::AbstractArray; dims) -> Array{Tuple} + extrema(f, A::ArrayLike; dims) -> Array{Tuple} Compute the minimum and maximum of `f` applied to each element in the given dimensions of `A`. @@ -1596,11 +1596,11 @@ of `A`. !!! compat "Julia 1.2" This method requires Julia 1.2 or later. """ -extrema(f, A::AbstractArray; dims=:) = _extrema_dims(f, A, dims) +extrema(f, A::ArrayLike; dims=:) = _extrema_dims(f, A, dims) -_extrema_dims(f, A::AbstractArray, ::Colon) = _extrema_itr(f, A) +_extrema_dims(f, A::ArrayLike, ::Colon) = _extrema_itr(f, A) -function _extrema_dims(f, A::AbstractArray, dims) +function _extrema_dims(f, A::ArrayLike, dims) sz = [size(A)...] for d in dims sz[d] = 1 @@ -1634,14 +1634,14 @@ end extrema!(B, A) = extrema!(identity, B, A) # Show for pairs() with Cartesian indices. Needs to be here rather than show.jl for bootstrap order -function Base.showarg(io::IO, r::Iterators.Pairs{<:Integer, <:Any, <:Any, T}, toplevel) where T <: Union{AbstractVector, Tuple} +function Base.showarg(io::IO, r::Iterators.Pairs{<:Integer, <:Any, <:Any, T}, toplevel) where T <: Union{ArrayLike{1}, Tuple} print(io, "pairs(::$T)") end -function Base.showarg(io::IO, r::Iterators.Pairs{<:CartesianIndex, <:Any, <:Any, T}, toplevel) where T <: AbstractArray +function Base.showarg(io::IO, r::Iterators.Pairs{<:CartesianIndex, <:Any, <:Any, T}, toplevel) where T <: ArrayLike print(io, "pairs(::$T)") end -function Base.showarg(io::IO, r::Iterators.Pairs{<:CartesianIndex, <:Any, <:Any, T}, toplevel) where T<:AbstractVector +function Base.showarg(io::IO, r::Iterators.Pairs{<:CartesianIndex, <:Any, <:Any, T}, toplevel) where T<:ArrayLike{1} print(io, "pairs(IndexCartesian(), ::$T)") end @@ -1764,7 +1764,7 @@ julia> sortslices(reshape([5; 4; 3; 2; 1], (1,1,5)), dims=3, by=x->x[1,1]) 5 ``` """ -function sortslices(A::AbstractArray; dims::Union{Integer, Tuple{Vararg{Integer}}}, kws...) +function sortslices(A::ArrayLike; dims::Union{Integer, Tuple{Vararg{Integer}}}, kws...) _sortslices(A, Val{dims}(); kws...) end @@ -1783,7 +1783,7 @@ function compute_itspace(A, ::Val{dims}) where {dims} vec(permutedims(collect(axs), (dims..., negdims...))) end -function _sortslices(A::AbstractArray, d::Val{dims}; kws...) where dims +function _sortslices(A::ArrayLike, d::Val{dims}; kws...) where dims itspace = compute_itspace(A, d) vecs = map(its->view(A, its...), itspace) p = sortperm(vecs; kws...) diff --git a/base/ordering.jl b/base/ordering.jl index ea1887c6ae471..5b403c25b1b19 100644 --- a/base/ordering.jl +++ b/base/ordering.jl @@ -6,7 +6,7 @@ module Order import ..@__MODULE__, ..parentmodule const Base = parentmodule(@__MODULE__) import .Base: - AbstractVector, @propagate_inbounds, isless, identity, getindex, + ArrayLike, @propagate_inbounds, isless, identity, getindex, +, -, !, &, <, | ## notions of element ordering ## @@ -42,7 +42,7 @@ struct Lt{T} <: Ordering lt::T end -struct Perm{O<:Ordering,V<:AbstractVector} <: Ordering +struct Perm{O<:Ordering,V<:ArrayLike{1}} <: Ordering order::O data::V end @@ -58,11 +58,11 @@ lt(o::Lt, a, b) = o.lt(a,b) lt(p.order, da, db) | (!lt(p.order, db, da) & (a < b)) end -ordtype(o::ReverseOrdering, vs::AbstractArray) = ordtype(o.fwd, vs) -ordtype(o::Perm, vs::AbstractArray) = ordtype(o.order, o.data) +ordtype(o::ReverseOrdering, vs::ArrayLike) = ordtype(o.fwd, vs) +ordtype(o::Perm, vs::ArrayLike) = ordtype(o.order, o.data) # TODO: here, we really want the return type of o.by, without calling it -ordtype(o::By, vs::AbstractArray) = try typeof(o.by(vs[1])) catch; Any end -ordtype(o::Ordering, vs::AbstractArray) = eltype(vs) +ordtype(o::By, vs::ArrayLike) = try typeof(o.by(vs[1])) catch; Any end +ordtype(o::Ordering, vs::ArrayLike) = eltype(vs) _ord(lt::typeof(isless), by::typeof(identity), order::Ordering) = order _ord(lt::typeof(isless), by, order::Ordering) = By(by) diff --git a/base/permuteddimsarray.jl b/base/permuteddimsarray.jl index d50cc11678e78..c09b615401a77 100644 --- a/base/permuteddimsarray.jl +++ b/base/permuteddimsarray.jl @@ -6,10 +6,10 @@ import Base: permutedims, permutedims! export PermutedDimsArray # Some day we will want storage-order-aware iteration, so put perm in the parameters -struct PermutedDimsArray{T,N,perm,iperm,AA<:AbstractArray} <: AbstractArray{T,N} +struct PermutedDimsArray{T,N,perm,iperm,AA<:ArrayLike} <: AbstractArray{T,N} parent::AA - function PermutedDimsArray{T,N,perm,iperm,AA}(data::AA) where {T,N,perm,iperm,AA<:AbstractArray} + function PermutedDimsArray{T,N,perm,iperm,AA}(data::AA) where {T,N,perm,iperm,AA<:ArrayLike} (isa(perm, NTuple{N,Int}) && isa(iperm, NTuple{N,Int})) || error("perm and iperm must both be NTuple{$N,Int}") isperm(perm) || throw(ArgumentError(string(perm, " is not a valid permutation of dimensions 1:", N))) all(map(d->iperm[perm[d]]==d, 1:N)) || throw(ArgumentError(string(perm, " and ", iperm, " must be inverses"))) @@ -20,7 +20,7 @@ end """ PermutedDimsArray(A, perm) -> B -Given an AbstractArray `A`, create a view `B` such that the +Given an ArrayLike `A`, create a view `B` such that the dimensions appear to be permuted. Similar to `permutedims`, except that no copying occurs (`B` shares storage with `A`). @@ -78,7 +78,7 @@ end @inline genperm(I, perm::AbstractVector{Int}) = genperm(I, (perm...,)) """ - permutedims(A::AbstractArray, perm) + permutedims(A::ArrayLike, perm) Permute the dimensions of array `A`. `perm` is a vector specifying a permutation of length `ndims(A)`. @@ -108,13 +108,13 @@ julia> permutedims(A, [3, 2, 1]) 6 8 ``` """ -function permutedims(A::AbstractArray, perm) +function permutedims(A::ArrayLike, perm) dest = similar(A, genperm(axes(A), perm)) permutedims!(dest, A, perm) end """ - permutedims(m::AbstractMatrix) + permutedims(m::ArrayLike{2}) Permute the dimensions of the matrix `m`, by flipping the elements across the diagonal of the matrix. Differs from `LinearAlgebra`'s [`transpose`](@ref) in that the @@ -146,10 +146,10 @@ julia> transpose(X) [5 7; 6 8] [13 15; 14 16] ``` """ -permutedims(A::AbstractMatrix) = permutedims(A, (2,1)) +permutedims(A::ArrayLike{2}) = permutedims(A, (2,1)) """ - permutedims(v::AbstractVector) + permutedims(v::ArrayLike{1}) Reshape vector `v` into a `1 × length(v)` row matrix. Differs from `LinearAlgebra`'s [`transpose`](@ref) in that @@ -175,7 +175,7 @@ julia> transpose(V) [1 3; 2 4] [5 7; 6 8] ``` """ -permutedims(v::AbstractVector) = reshape(v, (1, length(v))) +permutedims(v::ArrayLike{1}) = reshape(v, (1, length(v))) """ permutedims!(dest, src, perm) @@ -188,7 +188,7 @@ regions. See also [`permutedims`](@ref). """ -function permutedims!(dest, src::AbstractArray, perm) +function permutedims!(dest, src::ArrayLike, perm) Base.checkdims_perm(dest, src, perm) P = PermutedDimsArray(dest, invperm(perm)) _copy!(P, src) @@ -199,7 +199,7 @@ function Base.copyto!(dest::PermutedDimsArray{T,N}, src::AbstractArray{T,N}) whe checkbounds(dest, axes(src)...) _copy!(dest, src) end -Base.copyto!(dest::PermutedDimsArray, src::AbstractArray) = _copy!(dest, src) +Base.copyto!(dest::PermutedDimsArray, src::ArrayLike) = _copy!(dest, src) function _copy!(P::PermutedDimsArray{T,N,perm}, src) where {T,N,perm} # If dest/src are "close to dense," then it pays to be cache-friendly. diff --git a/base/range.jl b/base/range.jl index 5877fc8bc2ea0..478b4be6e1cf2 100644 --- a/base/range.jl +++ b/base/range.jl @@ -864,8 +864,8 @@ issubset(r::AbstractUnitRange{<:Integer}, s::AbstractUnitRange{<:Integer}) = # promote eltype if at least one container wouldn't change, otherwise join container types. el_same(::Type{T}, a::Type{<:AbstractArray{T,n}}, b::Type{<:AbstractArray{T,n}}) where {T,n} = a -el_same(::Type{T}, a::Type{<:AbstractArray{T,n}}, b::Type{<:AbstractArray{S,n}}) where {T,S,n} = a -el_same(::Type{T}, a::Type{<:AbstractArray{S,n}}, b::Type{<:AbstractArray{T,n}}) where {T,S,n} = b +el_same(::Type{T}, a::Type{<:AbstractArray{T,n}}, b::Type{<:ArrayLike{n}}) where {T,n} = a +el_same(::Type{T}, a::Type{<:ArrayLike{n}}, b::Type{<:AbstractArray{T,n}}) where {T,n} = b el_same(::Type, a, b) = promote_typejoin(a, b) promote_rule(a::Type{UnitRange{T1}}, b::Type{UnitRange{T2}}) where {T1,T2} = diff --git a/base/rational.jl b/base/rational.jl index be0f84a094e73..175393c5f6c31 100644 --- a/base/rational.jl +++ b/base/rational.jl @@ -67,7 +67,7 @@ end //(x::Number, y::Complex) = x*conj(y)//abs2(y) -//(X::AbstractArray, y::Number) = X .// y +//(X::ArrayLike, y::Number) = X .// y function show(io::IO, x::Rational) show(io, numerator(x)) diff --git a/base/reduce.jl b/base/reduce.jl index 2c97effdf9550..bdbaa8b8ad804 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -224,7 +224,7 @@ foldr(op, itr; kw...) = mapfoldr(identity, op, itr; kw...) # This is a generic implementation of `mapreduce_impl()`, # certain `op` (e.g. `min` and `max`) may have their own specialized versions. -@noinline function mapreduce_impl(f, op, A::AbstractArray, ifirst::Integer, ilast::Integer, blksize::Int) +@noinline function mapreduce_impl(f, op, A::ArrayLike, ifirst::Integer, ilast::Integer, blksize::Int) if ifirst == ilast @inbounds a1 = A[ifirst] return mapreduce_first(f, op, a1) @@ -247,7 +247,7 @@ foldr(op, itr; kw...) = mapfoldr(identity, op, itr; kw...) end end -mapreduce_impl(f, op, A::AbstractArray, ifirst::Integer, ilast::Integer) = +mapreduce_impl(f, op, A::ArrayLike, ifirst::Integer, ilast::Integer) = mapreduce_impl(f, op, A, ifirst, ilast, pairwise_blocksize(f, op)) """ @@ -380,7 +380,7 @@ The default is `reduce_first(op, f(x))`. """ mapreduce_first(f, op, x) = reduce_first(op, f(x)) -_mapreduce(f, op, A::AbstractArray) = _mapreduce(f, op, IndexStyle(A), A) +_mapreduce(f, op, A::ArrayLike) = _mapreduce(f, op, IndexStyle(A), A) function _mapreduce(f, op, ::IndexLinear, A::AbstractArray{T}) where T inds = LinearIndices(A) @@ -407,7 +407,7 @@ end mapreduce(f, op, a::Number) = mapreduce_first(f, op, a) -_mapreduce(f, op, ::IndexCartesian, A::AbstractArray) = mapfoldl(f, op, A) +_mapreduce(f, op, ::IndexCartesian, A::ArrayLike) = mapfoldl(f, op, A) """ reduce(op, itr; [init]) @@ -557,7 +557,7 @@ isgoodzero(::typeof(max), x) = isbadzero(min, x) isgoodzero(::typeof(min), x) = isbadzero(max, x) function mapreduce_impl(f, op::Union{typeof(max), typeof(min)}, - A::AbstractArray, first::Int, last::Int) + A::ArrayLike, first::Int, last::Int) a1 = @inbounds A[first] v1 = mapreduce_first(f, op, a1) v2 = v3 = v4 = v1 @@ -853,7 +853,7 @@ function count(pred, itr) end return n end -function count(pred, a::AbstractArray) +function count(pred, a::ArrayLike) n = 0 for i in eachindex(a) @inbounds n += pred(a[i])::Bool diff --git a/base/reducedim.jl b/base/reducedim.jl index a448931499fc5..429b2430fd31b 100644 --- a/base/reducedim.jl +++ b/base/reducedim.jl @@ -12,10 +12,10 @@ No method is implemented for reducing index range of type $typeof(i). Please imp reduced_index for this index type or report this as an issue. """ )) -reduced_indices(a::AbstractArray, region) = reduced_indices(axes(a), region) +reduced_indices(a::ArrayLike, region) = reduced_indices(axes(a), region) # for reductions that keep 0 dims as 0 -reduced_indices0(a::AbstractArray, region) = reduced_indices0(axes(a), region) +reduced_indices0(a::ArrayLike, region) = reduced_indices0(axes(a), region) function reduced_indices(inds::Indices{N}, d::Int) where N d < 1 && throw(ArgumentError("dimension must be ≥ 1, got $d")) @@ -77,20 +77,20 @@ end ## initialization # initarray! is only called by sum!, prod!, etc. for (Op, initfun) in ((:(typeof(add_sum)), :zero), (:(typeof(mul_prod)), :one)) - @eval initarray!(a::AbstractArray{T}, ::$(Op), init::Bool, src::AbstractArray) where {T} = (init && fill!(a, $(initfun)(T)); a) + @eval initarray!(a::AbstractArray{T}, ::$(Op), init::Bool, src::ArrayLike) where {T} = (init && fill!(a, $(initfun)(T)); a) end for Op in (:(typeof(max)), :(typeof(min))) - @eval initarray!(a::AbstractArray{T}, ::$(Op), init::Bool, src::AbstractArray) where {T} = (init && copyfirst!(a, src); a) + @eval initarray!(a::ArrayLike, ::$(Op), init::Bool, src::ArrayLike) = (init && copyfirst!(a, src); a) end for (Op, initval) in ((:(typeof(&)), true), (:(typeof(|)), false)) - @eval initarray!(a::AbstractArray, ::$(Op), init::Bool, src::AbstractArray) = (init && fill!(a, $initval); a) + @eval initarray!(a::ArrayLike, ::$(Op), init::Bool, src::ArrayLike) = (init && fill!(a, $initval); a) end # reducedim_initarray is called by -reducedim_initarray(A::AbstractArray, region, init, ::Type{R}) where {R} = fill!(similar(A,R,reduced_indices(A,region)), init) -reducedim_initarray(A::AbstractArray, region, init::T) where {T} = reducedim_initarray(A, region, init, T) +reducedim_initarray(A::ArrayLike, region, init, ::Type{R}) where {R} = fill!(similar(A,R,reduced_indices(A,region)), init) +reducedim_initarray(A::ArrayLike, region, init::T) where {T} = reducedim_initarray(A, region, init, T) # TODO: better way to handle reducedim initialization # @@ -105,10 +105,10 @@ _realtype(T::Type) = T _realtype(::Union{typeof(abs),typeof(abs2)}, T) = _realtype(T) _realtype(::Any, T) = T -function reducedim_init(f, op::Union{typeof(+),typeof(add_sum)}, A::AbstractArray, region) +function reducedim_init(f, op::Union{typeof(+),typeof(add_sum)}, A::ArrayLike, region) _reducedim_init(f, op, zero, sum, A, region) end -function reducedim_init(f, op::Union{typeof(*),typeof(mul_prod)}, A::AbstractArray, region) +function reducedim_init(f, op::Union{typeof(*),typeof(mul_prod)}, A::ArrayLike, region) _reducedim_init(f, op, one, prod, A, region) end function _reducedim_init(f, op, fv, fop, A, region) @@ -126,7 +126,7 @@ end # initialization when computing minima and maxima requires a little care for (f1, f2, initval) in ((:min, :max, :Inf), (:max, :min, :(-Inf))) - @eval function reducedim_init(f, op::typeof($f1), A::AbstractArray, region) + @eval function reducedim_init(f, op::typeof($f1), A::ArrayLike, region) # First compute the reduce indices. This will throw an ArgumentError # if any region is invalid ri = reduced_indices(A, region) @@ -156,8 +156,8 @@ end reducedim_init(f::Union{typeof(abs),typeof(abs2)}, op::typeof(max), A::AbstractArray{T}, region) where {T} = reducedim_initarray(A, region, zero(f(zero(T))), _realtype(f, T)) -reducedim_init(f, op::typeof(&), A::AbstractArray, region) = reducedim_initarray(A, region, true) -reducedim_init(f, op::typeof(|), A::AbstractArray, region) = reducedim_initarray(A, region, false) +reducedim_init(f, op::typeof(&), A::ArrayLike, region) = reducedim_initarray(A, region, true) +reducedim_init(f, op::typeof(|), A::ArrayLike, region) = reducedim_initarray(A, region, false) # specialize to make initialization more efficient for common cases @@ -179,7 +179,7 @@ end ## generic (map)reduction -has_fast_linear_indexing(a::AbstractArray) = false +has_fast_linear_indexing(a::ArrayLike) = false has_fast_linear_indexing(a::Array) = true function check_reducedims(R, A) @@ -216,9 +216,9 @@ end """ Extract first entry of slices of array A into existing array R. """ -copyfirst!(R::AbstractArray, A::AbstractArray) = mapfirst!(identity, R, A) +copyfirst!(R::ArrayLike, A::ArrayLike) = mapfirst!(identity, R, A) -function mapfirst!(f, R::AbstractArray, A::AbstractArray{<:Any,N}) where {N} +function mapfirst!(f, R::ArrayLike, A::ArrayLike{N}) where {N} lsiz = check_reducedims(R, A) t = _firstreducedslice(axes(R), axes(A)) map!(f, R, view(A, t...)) @@ -233,7 +233,7 @@ _firstslice(i::OneTo) = OneTo(1) _firstslice(i::Slice) = Slice(_firstslice(i.indices)) _firstslice(i) = i[firstindex(i):firstindex(i)] -function _mapreducedim!(f, op, R::AbstractArray, A::AbstractArray) +function _mapreducedim!(f, op, R::ArrayLike, A::ArrayLike) lsiz = check_reducedims(R,A) isempty(A) && return R @@ -271,14 +271,14 @@ function _mapreducedim!(f, op, R::AbstractArray, A::AbstractArray) return R end -mapreducedim!(f, op, R::AbstractArray, A::AbstractArray) = +mapreducedim!(f, op, R::ArrayLike, A::ArrayLike) = (_mapreducedim!(f, op, R, A); R) -reducedim!(op, R::AbstractArray{RT}, A::AbstractArray) where {RT} = +reducedim!(op, R::ArrayLike, A::ArrayLike) = mapreducedim!(identity, op, R, A) """ - mapreduce(f, op, A::AbstractArray...; dims=:, [init]) + mapreduce(f, op, A::ArrayLike...; dims=:, [init]) Evaluates to the same as `reduce(op, map(f, A); dims=dims, init=init)`, but is generally faster because the intermediate array is avoided. @@ -304,17 +304,17 @@ julia> mapreduce(isodd, |, a, dims=1) 1 1 1 1 ``` """ -mapreduce(f, op, A::AbstractArray; dims=:, kw...) = _mapreduce_dim(f, op, kw.data, A, dims) -mapreduce(f, op, A::AbstractArray...; kw...) = reduce(op, map(f, A...); kw...) +mapreduce(f, op, A::ArrayLike; dims=:, kw...) = _mapreduce_dim(f, op, kw.data, A, dims) +mapreduce(f, op, A::ArrayLike...; kw...) = reduce(op, map(f, A...); kw...) -_mapreduce_dim(f, op, nt::NamedTuple{(:init,)}, A::AbstractArray, ::Colon) = mapfoldl(f, op, A; nt...) +_mapreduce_dim(f, op, nt::NamedTuple{(:init,)}, A::ArrayLike, ::Colon) = mapfoldl(f, op, A; nt...) -_mapreduce_dim(f, op, ::NamedTuple{()}, A::AbstractArray, ::Colon) = _mapreduce(f, op, IndexStyle(A), A) +_mapreduce_dim(f, op, ::NamedTuple{()}, A::ArrayLike, ::Colon) = _mapreduce(f, op, IndexStyle(A), A) -_mapreduce_dim(f, op, nt::NamedTuple{(:init,)}, A::AbstractArray, dims) = +_mapreduce_dim(f, op, nt::NamedTuple{(:init,)}, A::ArrayLike, dims) = mapreducedim!(f, op, reducedim_initarray(A, dims, nt.init), A) -_mapreduce_dim(f, op, ::NamedTuple{()}, A::AbstractArray, dims) = +_mapreduce_dim(f, op, ::NamedTuple{()}, A::ArrayLike, dims) = mapreducedim!(f, op, reducedim_init(f, op, A, dims), A) """ @@ -349,11 +349,11 @@ julia> reduce(max, a, dims=1) 4 8 12 16 ``` """ -reduce(op, A::AbstractArray; kw...) = mapreduce(identity, op, A; kw...) +reduce(op, A::ArrayLike; kw...) = mapreduce(identity, op, A; kw...) ##### Specific reduction functions ##### """ - sum(A::AbstractArray; dims) + sum(A::ArrayLike; dims) Sum elements of an array over the given dimensions. @@ -374,7 +374,7 @@ julia> sum(A, dims=2) 7 ``` """ -sum(A::AbstractArray; dims) +sum(A::ArrayLike; dims) """ sum!(r, A) @@ -401,7 +401,7 @@ julia> sum!([1 1], A) sum!(r, A) """ - prod(A::AbstractArray; dims) + prod(A::ArrayLike; dims) Multiply elements of an array over the given dimensions. @@ -422,7 +422,7 @@ julia> prod(A, dims=2) 12 ``` """ -prod(A::AbstractArray; dims) +prod(A::ArrayLike; dims) """ prod!(r, A) @@ -449,7 +449,7 @@ julia> prod!([1 1], A) prod!(r, A) """ - maximum(A::AbstractArray; dims) + maximum(A::ArrayLike; dims) Compute the maximum value of an array over the given dimensions. See also the [`max(a,b)`](@ref) function to take the maximum of two or more arguments, @@ -472,7 +472,7 @@ julia> maximum(A, dims=2) 4 ``` """ -maximum(A::AbstractArray; dims) +maximum(A::ArrayLike; dims) """ maximum!(r, A) @@ -499,7 +499,7 @@ julia> maximum!([1 1], A) maximum!(r, A) """ - minimum(A::AbstractArray; dims) + minimum(A::ArrayLike; dims) Compute the minimum value of an array over the given dimensions. See also the [`min(a,b)`](@ref) function to take the minimum of two or more arguments, @@ -522,7 +522,7 @@ julia> minimum(A, dims=2) 3 ``` """ -minimum(A::AbstractArray; dims) +minimum(A::ArrayLike; dims) """ minimum!(r, A) @@ -570,7 +570,7 @@ julia> all(A, dims=2) 1 ``` """ -all(A::AbstractArray; dims) +all(A::ArrayLike; dims) """ all!(r, A) @@ -618,7 +618,7 @@ julia> any(A, dims=2) 1 ``` """ -any(::AbstractArray; dims) +any(::ArrayLike; dims) """ any!(r, A) @@ -649,8 +649,8 @@ for (fname, _fname, op) in [(:sum, :_sum, :add_sum), (:prod, :_prod, (:maximum, :_maximum, :max), (:minimum, :_minimum, :min)] @eval begin # User-facing methods with keyword arguments - @inline ($fname)(a::AbstractArray; dims=:) = ($_fname)(a, dims) - @inline ($fname)(f, a::AbstractArray; dims=:) = ($_fname)(f, a, dims) + @inline ($fname)(a::ArrayLike; dims=:) = ($_fname)(a, dims) + @inline ($fname)(f, a::ArrayLike; dims=:) = ($_fname)(f, a, dims) # Underlying implementations using dispatch ($_fname)(a, ::Colon) = ($_fname)(identity, a, :) @@ -658,11 +658,11 @@ for (fname, _fname, op) in [(:sum, :_sum, :add_sum), (:prod, :_prod, end end -any(a::AbstractArray; dims=:) = _any(a, dims) -any(f::Function, a::AbstractArray; dims=:) = _any(f, a, dims) +any(a::ArrayLike; dims=:) = _any(a, dims) +any(f::Function, a::ArrayLike; dims=:) = _any(f, a, dims) _any(a, ::Colon) = _any(identity, a, :) -all(a::AbstractArray; dims=:) = _all(a, dims) -all(f::Function, a::AbstractArray; dims=:) = _all(f, a, dims) +all(a::ArrayLike; dims=:) = _all(a, dims) +all(f::Function, a::ArrayLike; dims=:) = _all(f, a, dims) _all(a, ::Colon) = _all(identity, a, :) for (fname, op) in [(:sum, :add_sum), (:prod, :mul_prod), @@ -671,9 +671,9 @@ for (fname, op) in [(:sum, :add_sum), (:prod, :mul_prod), fname! = Symbol(fname, '!') _fname = Symbol('_', fname) @eval begin - $(fname!)(f::Function, r::AbstractArray, A::AbstractArray; init::Bool=true) = + $(fname!)(f::Function, r::ArrayLike, A::ArrayLike; init::Bool=true) = mapreducedim!(f, $(op), initarray!(r, $(op), init, A), A) - $(fname!)(r::AbstractArray, A::AbstractArray; init::Bool=true) = $(fname!)(identity, r, A; init=init) + $(fname!)(r::ArrayLike, A::ArrayLike; init::Bool=true) = $(fname!)(identity, r, A; init=init) $(_fname)(A, dims) = $(_fname)(identity, A, dims) $(_fname)(f, A, dims) = mapreduce(f, $(op), A, dims=dims) @@ -683,7 +683,7 @@ end ##### findmin & findmax ##### # The initial values of Rval are not used if the corresponding indices in Rind are 0. # -function findminmax!(f, Rval, Rind, A::AbstractArray{T,N}) where {T,N} +function findminmax!(f, Rval, Rind, A::ArrayLike{N}) where {N} (isempty(Rval) || isempty(A)) && return Rval, Rind lsiz = check_reducedims(Rval, A) for i = 1:N @@ -740,7 +740,7 @@ Find the minimum of `A` and the corresponding linear index along singleton dimensions of `rval` and `rind`, and store the results in `rval` and `rind`. `NaN` is treated as less than all other values. """ -function findmin!(rval::AbstractArray, rind::AbstractArray, A::AbstractArray; +function findmin!(rval::ArrayLike, rind::ArrayLike, A::ArrayLike; init::Bool=true) findminmax!(isless, init && !isempty(A) ? fill!(rval, first(A)) : rval, fill!(rind,zero(eltype(keys(A)))), A) end @@ -765,7 +765,7 @@ julia> findmin(A, dims=2) ([1.0; 3.0], CartesianIndex{2}[CartesianIndex(1, 1); CartesianIndex(2, 1)]) ``` """ -findmin(A::AbstractArray; dims=:) = _findmin(A, dims) +findmin(A::ArrayLike; dims=:) = _findmin(A, dims) function _findmin(A, region) ri = reduced_indices0(A, region) @@ -789,7 +789,7 @@ Find the maximum of `A` and the corresponding linear index along singleton dimensions of `rval` and `rind`, and store the results in `rval` and `rind`. `NaN` is treated as greater than all other values. """ -function findmax!(rval::AbstractArray, rind::AbstractArray, A::AbstractArray; +function findmax!(rval::ArrayLike, rind::ArrayLike, A::ArrayLike; init::Bool=true) findminmax!(isgreater, init && !isempty(A) ? fill!(rval, first(A)) : rval, fill!(rind,zero(eltype(keys(A)))), A) end @@ -814,7 +814,7 @@ julia> findmax(A, dims=2) ([2.0; 4.0], CartesianIndex{2}[CartesianIndex(1, 2); CartesianIndex(2, 2)]) ``` """ -findmax(A::AbstractArray; dims=:) = _findmax(A, dims) +findmax(A::ArrayLike; dims=:) = _findmax(A, dims) function _findmax(A, region) ri = reduced_indices0(A, region) @@ -854,7 +854,7 @@ julia> argmin(A, dims=2) CartesianIndex(2, 1) ``` """ -argmin(A::AbstractArray; dims=:) = findmin(A; dims=dims)[2] +argmin(A::ArrayLike; dims=:) = findmin(A; dims=dims)[2] """ argmax(A; dims) -> indices @@ -879,4 +879,4 @@ julia> argmax(A, dims=2) CartesianIndex(2, 2) ``` """ -argmax(A::AbstractArray; dims=:) = findmax(A; dims=dims)[2] +argmax(A::ArrayLike; dims=:) = findmax(A; dims=dims)[2] diff --git a/base/reflection.jl b/base/reflection.jl index ccb00baa2e667..16f5330749223 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -537,7 +537,7 @@ Determine whether type `T` was declared as an abstract type # Examples ```jldoctest -julia> isabstracttype(AbstractArray) +julia> isabstracttype(ArrayLike) true julia> isabstracttype(Vector) @@ -751,7 +751,7 @@ function instances end function to_tuple_type(@nospecialize(t)) @_pure_meta - if isa(t,Tuple) || isa(t,AbstractArray) || isa(t,SimpleVector) + if isa(t,Tuple) || isa(t,ArrayLike) || isa(t,SimpleVector) t = Tuple{t...} end if isa(t,Type) && t<:Tuple diff --git a/base/refpointer.jl b/base/refpointer.jl index b12886b6a5373..4e77d91fb05ad 100644 --- a/base/refpointer.jl +++ b/base/refpointer.jl @@ -118,7 +118,7 @@ if is_primary_base_module return RefArray(ptrs,1,roots) end end - Ref(x::AbstractArray, i::Integer) = RefArray(x, i) + Ref(x::ArrayLike, i::Integer) = RefArray(x, i) end cconvert(::Type{Ptr{P}}, a::Array{<:Ptr}) where {P<:Ptr} = a diff --git a/base/reshapedarray.jl b/base/reshapedarray.jl index d8b154b78b0c7..6f3bc71ce4cf9 100644 --- a/base/reshapedarray.jl +++ b/base/reshapedarray.jl @@ -2,7 +2,7 @@ using Base.MultiplicativeInverses: SignedMultiplicativeInverse -struct ReshapedArray{T,N,P<:AbstractArray,MI<:Tuple{Vararg{SignedMultiplicativeInverse{Int}}}} <: AbstractArray{T,N} +struct ReshapedArray{T,N,P<:ArrayLike,MI<:Tuple{Vararg{SignedMultiplicativeInverse{Int}}}} <: AbstractArray{T,N} parent::P dims::NTuple{N,Int} mi::MI @@ -10,7 +10,7 @@ end ReshapedArray(parent::AbstractArray{T}, dims::NTuple{N,Int}, mi) where {T,N} = ReshapedArray{T,N,typeof(parent),typeof(mi)}(parent, dims, mi) # IndexLinear ReshapedArray -const ReshapedArrayLF{T,N,P<:AbstractArray} = ReshapedArray{T,N,P,Tuple{}} +const ReshapedArrayLF{T,N,P<:ArrayLike} = ReshapedArray{T,N,P,Tuple{}} # Fast iteration on ReshapedArrays: use the parent iterator struct ReshapedArrayIterator{I,M} @@ -52,8 +52,8 @@ function reshape(a::Array{T,M}, dims::NTuple{N,Int}) where {T,N,M} end """ - reshape(A, dims...) -> AbstractArray - reshape(A, dims) -> AbstractArray + reshape(A, dims...) -> ArrayLike + reshape(A, dims) -> ArrayLike Return an array with the same data as `A`, but with different dimension sizes or number of dimensions. The two arrays share the same @@ -107,15 +107,15 @@ julia> reshape(1:6, 2, 3) """ reshape -reshape(parent::AbstractArray, dims::IntOrInd...) = reshape(parent, dims) -reshape(parent::AbstractArray, shp::Tuple{Union{Integer,OneTo}, Vararg{Union{Integer,OneTo}}}) = reshape(parent, to_shape(shp)) -reshape(parent::AbstractArray, dims::Dims) = _reshape(parent, dims) +reshape(parent::ArrayLike, dims::IntOrInd...) = reshape(parent, dims) +reshape(parent::ArrayLike, shp::Tuple{Union{Integer,OneTo}, Vararg{Union{Integer,OneTo}}}) = reshape(parent, to_shape(shp)) +reshape(parent::ArrayLike, dims::Dims) = _reshape(parent, dims) # Allow missing dimensions with Colon(): -reshape(parent::AbstractVector, ::Colon) = parent -reshape(parent::AbstractArray, dims::Int...) = reshape(parent, dims) -reshape(parent::AbstractArray, dims::Union{Int,Colon}...) = reshape(parent, dims) -reshape(parent::AbstractArray, dims::Tuple{Vararg{Union{Int,Colon}}}) = _reshape(parent, _reshape_uncolon(parent, dims)) +reshape(parent::ArrayLike{1}, ::Colon) = parent +reshape(parent::ArrayLike, dims::Int...) = reshape(parent, dims) +reshape(parent::ArrayLike, dims::Union{Int,Colon}...) = reshape(parent, dims) +reshape(parent::ArrayLike, dims::Tuple{Vararg{Union{Int,Colon}}}) = _reshape(parent, _reshape_uncolon(parent, dims)) @inline function _reshape_uncolon(A, dims) @noinline throw1(dims) = throw(DimensionMismatch(string("new dimensions $(dims) ", "may have at most one omitted dimension specified by `Colon()`"))) @@ -136,8 +136,8 @@ end @inline _after_colon(dim::Any, tail...) = _after_colon(tail...) @inline _after_colon(dim::Colon, tail...) = tail -reshape(parent::AbstractArray{T,N}, ndims::Val{N}) where {T,N} = parent -function reshape(parent::AbstractArray, ndims::Val{N}) where N +reshape(parent::ArrayLike{N}, ndims::Val{N}) where {N} = parent +function reshape(parent::ArrayLike, ndims::Val{N}) where N reshape(parent, rdims(Val(N), axes(parent))) end @@ -164,14 +164,14 @@ _reshape(parent::Array, dims::Dims{1}) = reshape(parent, dims) _reshape(parent::Array, dims::Dims) = reshape(parent, dims) # When reshaping Vector->Vector, don't wrap with a ReshapedArray -function _reshape(v::AbstractVector, dims::Dims{1}) +function _reshape(v::ArrayLike{1}, dims::Dims{1}) require_one_based_indexing(v) len = dims[1] len == length(v) || _throw_dmrs(length(v), "length", len) v end # General reshape -function _reshape(parent::AbstractArray, dims::Dims) +function _reshape(parent::ArrayLike, dims::Dims) n = length(parent) prod(dims) == n || _throw_dmrs(n, "size", dims) __reshape((parent, IndexStyle(parent)), dims) @@ -185,7 +185,7 @@ end _reshape(v::ReshapedArray{<:Any,1}, dims::Dims{1}) = _reshape(v.parent, dims) _reshape(R::ReshapedArray, dims::Dims) = _reshape(R.parent, dims) -function __reshape(p::Tuple{AbstractArray,IndexCartesian}, dims::Dims) +function __reshape(p::Tuple{ArrayLike,IndexCartesian}, dims::Dims) parent = p[1] strds = front(size_to_strides(map(length, axes(parent))..., 1)) strds1 = map(s->max(1,Int(s)), strds) # for resizing empty arrays @@ -193,12 +193,12 @@ function __reshape(p::Tuple{AbstractArray,IndexCartesian}, dims::Dims) ReshapedArray(parent, dims, reverse(mi)) end -function __reshape(p::Tuple{AbstractArray{<:Any,0},IndexCartesian}, dims::Dims) +function __reshape(p::Tuple{ArrayLike{0},IndexCartesian}, dims::Dims) parent = p[1] ReshapedArray(parent, dims, ()) end -function __reshape(p::Tuple{AbstractArray,IndexLinear}, dims::Dims) +function __reshape(p::Tuple{ArrayLike,IndexLinear}, dims::Dims) parent = p[1] ReshapedArray(parent, dims, ()) end diff --git a/base/set.jl b/base/set.jl index 9f98aa0133ee6..d746a7017a712 100644 --- a/base/set.jl +++ b/base/set.jl @@ -181,7 +181,7 @@ function unique(f, C) return _unique!(f, out, C, seen, i) end -function _unique!(f, out::AbstractVector, C, seen::Set, i) +function _unique!(f, out::ArrayLike{1}, C, seen::Set, i) s = iterate(C, i) while s !== nothing (x, i) = s @@ -203,7 +203,7 @@ function _unique!(f, out::AbstractVector, C, seen::Set, i) end """ - unique!(f, A::AbstractVector) + unique!(f, A::ArrayLike{1}) Selects one value from `A` for each unique value produced by `f` applied to elements of `A` , then return the modified A. @@ -231,7 +231,7 @@ julia> unique!(iseven, [2, 3, 5, 7, 9]) 3 ``` """ -function unique!(f, A::AbstractVector) +function unique!(f, A::ArrayLike{1}) if length(A) <= 1 return A end @@ -244,7 +244,7 @@ function unique!(f, A::AbstractVector) return _unique!(f, A, seen, i, i+1) end -function _unique!(f, A::AbstractVector, seen::Set, current::Integer, i::Integer) +function _unique!(f, A::ArrayLike{1}, seen::Set, current::Integer, i::Integer) while i <= lastindex(A) x = @inbounds A[i] y = f(x) @@ -267,13 +267,13 @@ end # If A is not grouped, then we will need to keep track of all of the elements that we have # seen so far. -_unique!(A::AbstractVector) = unique!(identity, A::AbstractVector) +_unique!(A::ArrayLike{1}) = unique!(identity, A::ArrayLike{1}) # If A is grouped, so that each unique element is in a contiguous group, then we only # need to keep track of one element at a time. We replace the elements of A with the # unique elements that we see in the order that we see them. Once we have iterated # through A, we resize A based on the number of unique elements that we see. -function _groupedunique!(A::AbstractVector) +function _groupedunique!(A::ArrayLike{1}) isempty(A) && return A idxs = eachindex(A) y = first(A) @@ -291,7 +291,7 @@ function _groupedunique!(A::AbstractVector) end """ - unique!(A::AbstractVector) + unique!(A::ArrayLike{1}) Remove duplicate items as determined by [`isequal`](@ref), then return the modified `A`. `unique!` will return the elements of `A` in the order that they occur. If you do not care @@ -620,9 +620,9 @@ function _replace!(new::Callable, res::T, A::T, res end -### replace! for AbstractArray +### replace! for ArrayLike -function _replace!(new::Callable, res::AbstractArray, A::AbstractArray, count::Int) +function _replace!(new::Callable, res::ArrayLike, A::ArrayLike, count::Int) c = 0 if count >= length(A) # simpler loop allows for SIMD for i in eachindex(A) diff --git a/base/show.jl b/base/show.jl index a2eb5664ab223..a61797c4055dc 100644 --- a/base/show.jl +++ b/base/show.jl @@ -700,7 +700,7 @@ function show(io::IO, l::Core.MethodInstance) end end -function show_delim_array(io::IO, itr::Union{AbstractArray,SimpleVector}, op, delim, cl, +function show_delim_array(io::IO, itr::Union{ArrayLike,SimpleVector}, op, delim, cl, delim_one, i1=first(LinearIndices(itr)), l=last(LinearIndices(itr))) print(io, op) if !show_circular(io, itr) @@ -2114,7 +2114,7 @@ function summary(x) end summary(io::IO, t::Tuple) = print(io, t) -## `summary` for AbstractArrays +## `summary` for ArrayLikes # sizes such as 0-dimensional, 4-dimensional, 2x3 dims2string(d) = isempty(d) ? "0-dimensional" : length(d) == 1 ? "$(d[1])-element" : @@ -2125,7 +2125,7 @@ _indsstring(i) = string(i) _indsstring(i::Union{IdentityUnitRange, Slice}) = string(i.indices) # anything array-like gets summarized e.g. 10-element Array{Int64,1} -summary(io::IO, a::AbstractArray) = array_summary(io, a, axes(a)) +summary(io::IO, a::ArrayLike) = array_summary(io, a, axes(a)) function array_summary(io::IO, a, inds::Tuple{Vararg{OneTo}}) print(io, dims2string(length.(inds)), " ") showarg(io, a, true) @@ -2219,7 +2219,7 @@ function showarg(io::IO, r::ReinterpretArray{T}, toplevel) where {T} end # pretty printing for Iterators.Pairs -function Base.showarg(io::IO, r::Iterators.Pairs{<:Integer, <:Any, <:Any, T}, toplevel) where T<:AbstractArray +function Base.showarg(io::IO, r::Iterators.Pairs{<:Integer, <:Any, <:Any, T}, toplevel) where T<:ArrayLike print(io, "pairs(IndexLinear(), ::", T, ")") end diff --git a/base/sort.jl b/base/sort.jl index 6400b722c9829..b8518fda3b65a 100644 --- a/base/sort.jl +++ b/base/sort.jl @@ -6,7 +6,7 @@ import ..@__MODULE__, ..parentmodule const Base = parentmodule(@__MODULE__) using .Base.Order using .Base: copymutable, LinearIndices, length, (:), - eachindex, axes, first, last, similar, zip, OrdinalRange, + eachindex, axes, first, last, similar, zip, OrdinalRange, ArrayLike, AbstractVector, @inbounds, AbstractRange, @eval, @inline, Vector, @noinline, AbstractMatrix, AbstractUnitRange, isless, identity, eltype, >, <, <=, >=, |, +, -, *, !, extrema, sub_with_overflow, add_with_overflow, oneunit, div, getindex, setindex!, @@ -91,7 +91,7 @@ issorted(itr; lt=isless, by=identity, rev::Union{Bool,Nothing}=nothing, order::Ordering=Forward) = issorted(itr, ord(lt,by,rev,order)) -function partialsort!(v::AbstractVector, k::Union{Integer,OrdinalRange}, o::Ordering) +function partialsort!(v::ArrayLike{1}, k::Union{Integer,OrdinalRange}, o::Ordering) inds = axes(v, 1) sort!(v, first(inds), last(inds), PartialQuickSort(k), o) maybeview(v, k) @@ -151,7 +151,7 @@ julia> a 1 ``` """ -partialsort!(v::AbstractVector, k::Union{Integer,OrdinalRange}; +partialsort!(v::ArrayLike{1}, k::Union{Integer,OrdinalRange}; lt=isless, by=identity, rev::Union{Bool,Nothing}=nothing, order::Ordering=Forward) = partialsort!(v, k, ord(lt,by,rev,order)) @@ -161,7 +161,7 @@ partialsort!(v::AbstractVector, k::Union{Integer,OrdinalRange}; Variant of [`partialsort!`](@ref) which copies `v` before partially sorting it, thereby returning the same thing as `partialsort!` but leaving `v` unmodified. """ -partialsort(v::AbstractVector, k::Union{Integer,OrdinalRange}; kws...) = +partialsort(v::ArrayLike{1}, k::Union{Integer,OrdinalRange}; kws...) = partialsort!(copymutable(v), k; kws...) # This implementation of `midpoint` is performance-optimized but safe @@ -174,7 +174,7 @@ midpoint(lo::Integer, hi::Integer) = midpoint(promote(lo, hi)...) # index of the first value of vector a that is greater than or equal to x; # returns length(v)+1 if x is greater than all values in v. -function searchsortedfirst(v::AbstractVector, x, lo::T, hi::T, o::Ordering) where T<:Integer +function searchsortedfirst(v::ArrayLike{1}, x, lo::T, hi::T, o::Ordering) where T<:Integer u = T(1) lo = lo - u hi = hi + u @@ -191,7 +191,7 @@ end # index of the last value of vector a that is less than or equal to x; # returns 0 if x is less than all values of v. -function searchsortedlast(v::AbstractVector, x, lo::T, hi::T, o::Ordering) where T<:Integer +function searchsortedlast(v::ArrayLike{1}, x, lo::T, hi::T, o::Ordering) where T<:Integer u = T(1) lo = lo - u hi = hi + u @@ -209,7 +209,7 @@ end # returns the range of indices of v equal to x # if v does not contain x, returns a 0-length range # indicating the insertion point of x -function searchsorted(v::AbstractVector, x, ilo::T, ihi::T, o::Ordering) where T<:Integer +function searchsorted(v::ArrayLike{1}, x, ilo::T, ihi::T, o::Ordering) where T<:Integer u = T(1) lo = ilo - u hi = ihi + u @@ -313,8 +313,8 @@ searchsorted(a::AbstractRange{<:Real}, x::Real, o::DirectOrdering) = for s in [:searchsortedfirst, :searchsortedlast, :searchsorted] @eval begin - $s(v::AbstractVector, x, o::Ordering) = (inds = axes(v, 1); $s(v,x,first(inds),last(inds),o)) - $s(v::AbstractVector, x; + $s(v::ArrayLike{1}, x, o::Ordering) = (inds = axes(v, 1); $s(v,x,first(inds),last(inds),o)) + $s(v::ArrayLike{1}, x; lt=isless, by=identity, rev::Union{Bool,Nothing}=nothing, order::Ordering=Forward) = $s(v,x,ord(lt,by,rev,order)) end @@ -482,7 +482,7 @@ const DEFAULT_STABLE = MergeSort const SMALL_ALGORITHM = InsertionSort const SMALL_THRESHOLD = 20 -function sort!(v::AbstractVector, lo::Integer, hi::Integer, ::InsertionSortAlg, o::Ordering) +function sort!(v::ArrayLike{1}, lo::Integer, hi::Integer, ::InsertionSortAlg, o::Ordering) @inbounds for i = lo+1:hi j = i x = v[i] @@ -507,7 +507,7 @@ end # Upon return, the pivot is in v[lo], and v[hi] is guaranteed to be # greater than the pivot -@inline function selectpivot!(v::AbstractVector, lo::Integer, hi::Integer, o::Ordering) +@inline function selectpivot!(v::ArrayLike{1}, lo::Integer, hi::Integer, o::Ordering) @inbounds begin mi = midpoint(lo, hi) @@ -533,7 +533,7 @@ end # # select a pivot, and partition v according to the pivot -function partition!(v::AbstractVector, lo::Integer, hi::Integer, o::Ordering) +function partition!(v::ArrayLike{1}, lo::Integer, hi::Integer, o::Ordering) pivot = selectpivot!(v, lo, hi, o) # pivot == v[lo], v[hi] > pivot i, j = lo, hi @@ -552,7 +552,7 @@ function partition!(v::AbstractVector, lo::Integer, hi::Integer, o::Ordering) return j end -function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::QuickSortAlg, o::Ordering) +function sort!(v::ArrayLike{1}, lo::Integer, hi::Integer, a::QuickSortAlg, o::Ordering) @inbounds while lo < hi hi-lo <= SMALL_THRESHOLD && return sort!(v, lo, hi, SMALL_ALGORITHM, o) j = partition!(v, lo, hi, o) @@ -570,7 +570,7 @@ function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::QuickSortAlg, o:: return v end -function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::MergeSortAlg, o::Ordering, t=similar(v,0)) +function sort!(v::ArrayLike{1}, lo::Integer, hi::Integer, a::MergeSortAlg, o::Ordering, t=similar(v,0)) @inbounds if lo < hi hi-lo <= SMALL_THRESHOLD && return sort!(v, lo, hi, SMALL_ALGORITHM, o) @@ -608,7 +608,7 @@ function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::MergeSortAlg, o:: return v end -function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::PartialQuickSort{<:Integer}, +function sort!(v::ArrayLike{1}, lo::Integer, hi::Integer, a::PartialQuickSort{<:Integer}, o::Ordering) @inbounds while lo < hi hi-lo <= SMALL_THRESHOLD && return sort!(v, lo, hi, SMALL_ALGORITHM, o) @@ -631,7 +631,7 @@ function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::PartialQuickSort{ end -function sort!(v::AbstractVector, lo::Integer, hi::Integer, a::PartialQuickSort{T}, +function sort!(v::ArrayLike{1}, lo::Integer, hi::Integer, a::PartialQuickSort{T}, o::Ordering) where T<:OrdinalRange @inbounds while lo < hi hi-lo <= SMALL_THRESHOLD && return sort!(v, lo, hi, SMALL_ALGORITHM, o) @@ -657,10 +657,10 @@ end ## generic sorting methods ## -defalg(v::AbstractArray) = DEFAULT_STABLE +defalg(v::ArrayLike) = DEFAULT_STABLE defalg(v::AbstractArray{<:Union{Number, Missing}}) = DEFAULT_UNSTABLE -function sort!(v::AbstractVector, alg::Algorithm, order::Ordering) +function sort!(v::ArrayLike{1}, alg::Algorithm, order::Ordering) inds = axes(v,1) sort!(v,first(inds),last(inds),alg,order) end @@ -704,7 +704,7 @@ julia> v = [(1, "c"), (3, "a"), (2, "b")]; sort!(v, by = x -> x[2]); v (1, "c") ``` """ -function sort!(v::AbstractVector; +function sort!(v::ArrayLike{1}; alg::Algorithm=defalg(v), lt=isless, by=identity, @@ -770,7 +770,7 @@ julia> v 2 ``` """ -sort(v::AbstractVector; kws...) = sort!(copymutable(v); kws...) +sort(v::ArrayLike{1}; kws...) = sort!(copymutable(v); kws...) ## partialsortperm: the permutation to sort the first k elements of an array ## @@ -805,7 +805,7 @@ julia> v[p] 2 ``` """ -partialsortperm(v::AbstractVector, k::Union{Integer,OrdinalRange}; kwargs...) = +partialsortperm(v::ArrayLike{1}, k::Union{Integer,OrdinalRange}; kwargs...) = partialsortperm!(similar(Vector{eltype(k)}, axes(v,1)), v, k; kwargs..., initialized=false) """ @@ -853,7 +853,7 @@ julia> partialsortperm!(ix, v, 2:3, initialized=true) 3 ``` """ -function partialsortperm!(ix::AbstractVector{<:Integer}, v::AbstractVector, +function partialsortperm!(ix::AbstractVector{<:Integer}, v::ArrayLike{1}, k::Union{Integer, OrdinalRange}; lt::Function=isless, by::Function=identity, @@ -905,7 +905,7 @@ julia> v[p] 3 ``` """ -function sortperm(v::AbstractVector; +function sortperm(v::ArrayLike{1}; alg::Algorithm=DEFAULT_UNSTABLE, lt=isless, by=identity, @@ -955,7 +955,7 @@ julia> v[p] 3 ``` """ -function sortperm!(x::AbstractVector{<:Integer}, v::AbstractVector; +function sortperm!(x::AbstractVector{<:Integer}, v::ArrayLike{1}; alg::Algorithm=DEFAULT_UNSTABLE, lt=isless, by=identity, @@ -1028,7 +1028,7 @@ julia> sort(A, dims = 2) 1 2 ``` """ -function sort(A::AbstractArray; +function sort(A::ArrayLike; dims::Integer, alg::Algorithm=DEFAULT_UNSTABLE, lt=isless, @@ -1088,7 +1088,7 @@ julia> sort!(A, dims = 2); A 3 4 ``` """ -function sort!(A::AbstractArray; +function sort!(A::ArrayLike; dims::Integer, alg::Algorithm=defalg(A), lt=isless, @@ -1114,7 +1114,7 @@ end module Float using ..Sort using ...Order -using ..Base: @inbounds, AbstractVector, Vector, last, axes +using ..Base: @inbounds, ArrayLike, AbstractVector, Vector, last, axes import Core.Intrinsics: slt_int import ..Sort: sort! @@ -1137,7 +1137,7 @@ lt(::Right, x::T, y::T) where {T<:Floats} = slt_int(x, y) isnan(o::DirectOrdering, x::Floats) = (x!=x) isnan(o::Perm, i::Integer) = isnan(o.order,o.data[i]) -function nans2left!(v::AbstractVector, o::Ordering, lo::Integer=first(axes(v,1)), hi::Integer=last(axes(v,1))) +function nans2left!(v::ArrayLike{1}, o::Ordering, lo::Integer=first(axes(v,1)), hi::Integer=last(axes(v,1))) i = lo @inbounds while i <= hi && isnan(o,v[i]) i += 1 @@ -1152,7 +1152,7 @@ function nans2left!(v::AbstractVector, o::Ordering, lo::Integer=first(axes(v,1)) end return i, hi end -function nans2right!(v::AbstractVector, o::Ordering, lo::Integer=first(axes(v,1)), hi::Integer=last(axes(v,1))) +function nans2right!(v::ArrayLike{1}, o::Ordering, lo::Integer=first(axes(v,1)), hi::Integer=last(axes(v,1))) i = hi @inbounds while lo <= i && isnan(o,v[i]) i -= 1 @@ -1168,8 +1168,8 @@ function nans2right!(v::AbstractVector, o::Ordering, lo::Integer=first(axes(v,1) return lo, i end -nans2end!(v::AbstractVector, o::ForwardOrdering) = nans2right!(v,o) -nans2end!(v::AbstractVector, o::ReverseOrdering) = nans2left!(v,o) +nans2end!(v::ArrayLike{1}, o::ForwardOrdering) = nans2right!(v,o) +nans2end!(v::ArrayLike{1}, o::ReverseOrdering) = nans2left!(v,o) nans2end!(v::AbstractVector{<:Integer}, o::Perm{<:ForwardOrdering}) = nans2right!(v,o) nans2end!(v::AbstractVector{<:Integer}, o::Perm{<:ReverseOrdering}) = nans2left!(v,o) @@ -1177,7 +1177,7 @@ issignleft(o::ForwardOrdering, x::Floats) = lt(o, x, zero(x)) issignleft(o::ReverseOrdering, x::Floats) = lt(o, x, -zero(x)) issignleft(o::Perm, i::Integer) = issignleft(o.order, o.data[i]) -function fpsort!(v::AbstractVector, a::Algorithm, o::Ordering) +function fpsort!(v::ArrayLike{1}, a::Algorithm, o::Ordering) i, j = lo, hi = nans2end!(v,o) @inbounds while true while i <= j && issignleft(o,v[i]); i += 1; end @@ -1192,7 +1192,7 @@ function fpsort!(v::AbstractVector, a::Algorithm, o::Ordering) end -fpsort!(v::AbstractVector, a::Sort.PartialQuickSort, o::Ordering) = +fpsort!(v::ArrayLike{1}, a::Sort.PartialQuickSort, o::Ordering) = sort!(v, first(axes(v,1)), last(axes(v,1)), a, o) sort!(v::AbstractVector{<:Floats}, a::Algorithm, o::DirectOrdering) = fpsort!(v,a,o) diff --git a/base/strings/basic.jl b/base/strings/basic.jl index e0d00a446c767..1dbdf621f0002 100644 --- a/base/strings/basic.jl +++ b/base/strings/basic.jl @@ -191,7 +191,7 @@ checkbounds(::Type{Bool}, s::AbstractString, I::AbstractArray{<:Real}) = all(i -> checkbounds(Bool, s, i), I) checkbounds(::Type{Bool}, s::AbstractString, I::AbstractArray{<:Integer}) = all(i -> checkbounds(Bool, s, i), I) -checkbounds(s::AbstractString, I::Union{Integer,AbstractArray}) = +checkbounds(s::AbstractString, I::Union{Integer,ArrayLike}) = checkbounds(Bool, s, I) ? nothing : throw(BoundsError(s, I)) ## construction, conversion, promotion ## diff --git a/base/subarray.jl b/base/subarray.jl index a1b68ceaadf4f..3ab6597fd2110 100644 --- a/base/subarray.jl +++ b/base/subarray.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license abstract type AbstractCartesianIndex{N} end # This is a hacky forward declaration for CartesianIndex -const ViewIndex = Union{Real, AbstractArray} +const ViewIndex = Union{Real, ArrayLike} const ScalarIndex = Real """ @@ -23,7 +23,7 @@ struct SubArray{T,N,P,I,L} <: AbstractArray{T,N} end end # Compute the linear indexability of the indices, and combine it with the linear indexing of the parent -function SubArray(parent::AbstractArray, indices::Tuple) +function SubArray(parent::ArrayLike, indices::Tuple) @_inline_meta SubArray(IndexStyle(viewindexing(indices), IndexStyle(parent)), parent, ensure_indexable(indices), index_dimsum(indices...)) end @@ -65,7 +65,7 @@ viewindexing(I::Tuple{AbstractRange, Vararg{ScalarIndex}}) = IndexLinear() # All other index combinations are slow viewindexing(I::Tuple{Vararg{Any}}) = IndexCartesian() # Of course, all other array types are slow -viewindexing(I::Tuple{AbstractArray, Vararg{Any}}) = IndexCartesian() +viewindexing(I::Tuple{ArrayLike, Vararg{Any}}) = IndexCartesian() # Simple utilities size(V::SubArray) = (@_inline_meta; map(n->Int(unsafe_length(n)), axes(V))) @@ -97,7 +97,7 @@ julia> parentindices(V) (1, Base.Slice(Base.OneTo(2))) ``` """ -parentindices(a::AbstractArray) = map(OneTo, size(a)) +parentindices(a::ArrayLike) = map(OneTo, size(a)) ## Aliasing detection dataids(A::SubArray) = (dataids(A.parent)..., _splatmap(dataids, A.indices)...) @@ -115,17 +115,17 @@ end # Transform indices to be "dense" _trimmedindex(i::Real) = oftype(i, 1) _trimmedindex(i::AbstractUnitRange) = oftype(i, OneTo(length(i))) -_trimmedindex(i::AbstractArray) = oftype(i, reshape(eachindex(IndexLinear(), i), axes(i))) +_trimmedindex(i::ArrayLike) = oftype(i, reshape(eachindex(IndexLinear(), i), axes(i))) ## SubArray creation # We always assume that the dimensionality of the parent matches the number of # indices that end up getting passed to it, so we store the parent as a # ReshapedArray view if necessary. The trouble is that arrays of `CartesianIndex` # can make the number of effective indices not equal to length(I). -_maybe_reshape_parent(A::AbstractArray, ::NTuple{1, Bool}) = reshape(A, Val(1)) +_maybe_reshape_parent(A::ArrayLike, ::NTuple{1, Bool}) = reshape(A, Val(1)) _maybe_reshape_parent(A::AbstractArray{<:Any,1}, ::NTuple{1, Bool}) = reshape(A, Val(1)) _maybe_reshape_parent(A::AbstractArray{<:Any,N}, ::NTuple{N, Bool}) where {N} = A -_maybe_reshape_parent(A::AbstractArray, ::NTuple{N, Bool}) where {N} = reshape(A, Val(N)) +_maybe_reshape_parent(A::ArrayLike, ::NTuple{N, Bool}) where {N} = reshape(A, Val(N)) """ view(A, inds...) @@ -157,14 +157,14 @@ julia> A # Note A has changed even though we modified b 0 4 ``` """ -function view(A::AbstractArray, I::Vararg{Any,N}) where {N} +function view(A::ArrayLike, I::Vararg{Any,N}) where {N} @_inline_meta J = map(i->unalias(A,i), to_indices(A, I)) @boundscheck checkbounds(A, J...) unsafe_view(_maybe_reshape_parent(A, index_ndims(J...)), J...) end -function unsafe_view(A::AbstractArray, I::Vararg{ViewIndex,N}) where {N} +function unsafe_view(A::ArrayLike, I::Vararg{ViewIndex,N}) where {N} @_inline_meta SubArray(A, I) end @@ -210,15 +210,15 @@ reindex(idxs::Tuple{Slice, Vararg{Any}}, subidxs::Tuple{Any, Vararg{Any}}) = (@_propagate_inbounds_meta; (subidxs[1], reindex(tail(idxs), tail(subidxs))...)) # Re-index into parent vectors with one subindex -reindex(idxs::Tuple{AbstractVector, Vararg{Any}}, subidxs::Tuple{Any, Vararg{Any}}) = +reindex(idxs::Tuple{ArrayLike{1}, Vararg{Any}}, subidxs::Tuple{Any, Vararg{Any}}) = (@_propagate_inbounds_meta; (idxs[1][subidxs[1]], reindex(tail(idxs), tail(subidxs))...)) # Parent matrices are re-indexed with two sub-indices -reindex(idxs::Tuple{AbstractMatrix, Vararg{Any}}, subidxs::Tuple{Any, Any, Vararg{Any}}) = +reindex(idxs::Tuple{ArrayLike{2}, Vararg{Any}}, subidxs::Tuple{Any, Any, Vararg{Any}}) = (@_propagate_inbounds_meta; (idxs[1][subidxs[1], subidxs[2]], reindex(tail(idxs), tail(tail(subidxs)))...)) # In general, we index N-dimensional parent arrays with N indices -@generated function reindex(idxs::Tuple{AbstractArray{T,N}, Vararg{Any}}, subidxs::Tuple{Vararg{Any}}) where {T,N} +@generated function reindex(idxs::Tuple{ArrayLike{N}, Vararg{Any}}, subidxs::Tuple{Vararg{Any}}) where {N} if length(subidxs.parameters) >= N subs = [:(subidxs[$d]) for d in 1:N] tail = [:(subidxs[$d]) for d in N+1:length(subidxs.parameters)] @@ -317,7 +317,7 @@ substrides(strds, I::Tuple{Any, Vararg{Any}}) = throw(ArgumentError("strides is stride(V::SubArray, d::Integer) = d <= ndims(V) ? strides(V)[d] : strides(V)[end] * size(V)[end] -compute_stride1(parent::AbstractArray, I::NTuple{N,Any}) where {N} = +compute_stride1(parent::ArrayLike, I::NTuple{N,Any}) where {N} = (@_inline_meta; compute_stride1(1, fill_to_length(axes(parent), OneTo(1), Val(N)), I)) compute_stride1(s, inds, I::Tuple{}) = s compute_stride1(s, inds, I::Tuple{Vararg{ScalarIndex}}) = s @@ -344,7 +344,7 @@ end # sum of index each multiplied by the parent's stride. # The running sum is `f`; the cumulative stride product is `s`. # If the parent is a vector, then we offset the parent's own indices with parameters of I -compute_offset1(parent::AbstractVector, stride1::Integer, I::Tuple{AbstractRange}) = +compute_offset1(parent::ArrayLike{1}, stride1::Integer, I::Tuple{AbstractRange}) = (@_inline_meta; first(I[1]) - first(axes1(I[1]))*stride1) # If the result is one-dimensional and it's a Colon, then linear # indexing uses the indices along the given dimension. Otherwise @@ -404,7 +404,7 @@ end axes(S::SubArray) = (@_inline_meta; _indices_sub(S.indices...)) _indices_sub(::Real, I...) = (@_inline_meta; _indices_sub(I...)) _indices_sub() = () -function _indices_sub(i1::AbstractArray, I...) +function _indices_sub(i1::ArrayLike, I...) @_inline_meta (unsafe_indices(i1)..., _indices_sub(I...)...) end diff --git a/base/threads.jl b/base/threads.jl index 292513418525b..a9dd0a6c399f1 100644 --- a/base/threads.jl +++ b/base/threads.jl @@ -22,7 +22,7 @@ where `copyvalue` defaults to `A[1]`. This is typically used to allocate per-thread variables, and should be called in `__init__` if `A` is a global constant. """ -function resize_nthreads!(A::AbstractVector, copyvalue=A[1]) +function resize_nthreads!(A::ArrayLike{1}, copyvalue=A[1]) nthr = nthreads() nold = length(A) resize!(A, nthr) diff --git a/base/tuple.jl b/base/tuple.jl index 2f99c6de69d57..838f3138a80ae 100644 --- a/base/tuple.jl +++ b/base/tuple.jl @@ -23,7 +23,7 @@ size(@nospecialize(t::Tuple), d::Integer) = (d == 1) ? length(t) : throw(Argumen axes(@nospecialize t::Tuple) = (OneTo(length(t)),) @eval getindex(@nospecialize(t::Tuple), i::Int) = getfield(t, i, $(Expr(:boundscheck))) @eval getindex(@nospecialize(t::Tuple), i::Real) = getfield(t, convert(Int, i), $(Expr(:boundscheck))) -getindex(t::Tuple, r::AbstractArray{<:Any,1}) = ([t[ri] for ri in r]...,) +getindex(t::Tuple, r::ArrayLike{1}) = ([t[ri] for ri in r]...,) getindex(t::Tuple, b::AbstractArray{Bool,1}) = length(b) == length(t) ? getindex(t, findall(b)) : throw(BoundsError(t, b)) getindex(t::Tuple, c::Colon) = t diff --git a/base/views.jl b/base/views.jl index b1be0dc0962a8..1a40b6ebbc6df 100644 --- a/base/views.jl +++ b/base/views.jl @@ -121,10 +121,10 @@ end # maybeview is like getindex, but returns a view for slicing operations # (while remaining equivalent to getindex for scalar indices and non-array types) @propagate_inbounds maybeview(A, args...) = getindex(A, args...) -@propagate_inbounds maybeview(A::AbstractArray, args...) = view(A, args...) -@propagate_inbounds maybeview(A::AbstractArray, args::Number...) = getindex(A, args...) +@propagate_inbounds maybeview(A::ArrayLike, args...) = view(A, args...) +@propagate_inbounds maybeview(A::ArrayLike, args::Number...) = getindex(A, args...) @propagate_inbounds maybeview(A) = getindex(A) -@propagate_inbounds maybeview(A::AbstractArray) = getindex(A) +@propagate_inbounds maybeview(A::ArrayLike) = getindex(A) # _views implements the transformation for the @views macro. # @views calls esc(_views(...)) to work around #20241, diff --git a/doc/src/base/arrays.md b/doc/src/base/arrays.md index 3b73e58f9a36c..de338a67eae9f 100644 --- a/doc/src/base/arrays.md +++ b/doc/src/base/arrays.md @@ -3,6 +3,7 @@ ## Constructors and Types ```@docs +Core.ArrayLike Core.AbstractArray Base.AbstractVector Base.AbstractMatrix diff --git a/src/builtins.c b/src/builtins.c index 2ffd1c95e8c21..0319fd04aac4b 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1375,6 +1375,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin("Ptr", (jl_value_t*)jl_pointer_type); add_builtin("Task", (jl_value_t*)jl_task_type); + add_builtin("ArrayLike", (jl_value_t*)jl_arraylike_type); add_builtin("AbstractArray", (jl_value_t*)jl_abstractarray_type); add_builtin("DenseArray", (jl_value_t*)jl_densearray_type); add_builtin("Array", (jl_value_t*)jl_array_type); diff --git a/src/dump.c b/src/dump.c index a0ddd5de89786..7007e928b2b75 100644 --- a/src/dump.c +++ b/src/dump.c @@ -3344,7 +3344,7 @@ void jl_init_serializer(void) jl_bool_type, jl_linenumbernode_type, jl_pinode_type, jl_upsilonnode_type, jl_type_type, jl_bottom_type, jl_ref_type, - jl_pointer_type, jl_vararg_type, jl_abstractarray_type, jl_void_type, + jl_pointer_type, jl_vararg_type, jl_arraylike_type, jl_abstractarray_type, jl_void_type, jl_densearray_type, jl_function_type, jl_typename_type, jl_builtin_type, jl_task_type, jl_uniontype_type, jl_typetype_type, jl_array_any_type, jl_intrinsic_type, @@ -3396,7 +3396,7 @@ void jl_init_serializer(void) deser_tag[LAST_TAG+1+i] = (jl_value_t*)vals[i]; i += 1; } - assert(LAST_TAG+1+i < 256); + assert(LAST_TAG+1+i < 257); for (i = 2; i < 256; i++) { if (deser_tag[i]) @@ -3416,6 +3416,7 @@ void jl_init_serializer(void) arraylist_push(&builtin_typenames, ((jl_datatype_t*)jl_ref_type->body)->name); arraylist_push(&builtin_typenames, jl_pointer_typename); arraylist_push(&builtin_typenames, jl_type_typename); + arraylist_push(&builtin_typenames, ((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_arraylike_type))->name); arraylist_push(&builtin_typenames, ((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_abstractarray_type))->name); arraylist_push(&builtin_typenames, ((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_densearray_type))->name); arraylist_push(&builtin_typenames, jl_tuple_typename); diff --git a/src/jltypes.c b/src/jltypes.c index 3873a9e4ede72..7203c0ae4897f 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -47,6 +47,7 @@ jl_datatype_t *jl_builtin_type; jl_datatype_t *jl_typeofbottom_type; jl_value_t *jl_bottom_type; +jl_unionall_t *jl_arraylike_type; jl_unionall_t *jl_abstractarray_type; jl_unionall_t *jl_densearray_type; @@ -1983,10 +1984,19 @@ void jl_init_types(void) JL_GC_DISABLED jl_function_type->name->mt = NULL; // subtypes of Function have independent method tables jl_builtin_type->name->mt = NULL; // so they don't share the Any type table - tv = jl_svec2(tvar("T"), tvar("N")); + tv = jl_svec1(tvar("N")); + jl_arraylike_type = (jl_unionall_t*) + jl_new_abstracttype((jl_value_t*)jl_symbol("ArrayLike"), core, + jl_any_type, tv)->name->wrapper; + + jl_tvar_t *tv_T = tvar("T"); + jl_tvar_t *tv_N = tvar("N"); + tv = jl_svec2(tv_T, tv_N); + jl_abstractarray_type = (jl_unionall_t*) jl_new_abstracttype((jl_value_t*)jl_symbol("AbstractArray"), core, - jl_any_type, tv)->name->wrapper; + (jl_datatype_t*)jl_apply_type((jl_value_t*)jl_arraylike_type, jl_svec_data(jl_svec1(tv_N)), 1), + tv)->name->wrapper; tv = jl_svec2(tvar("T"), tvar("N")); jl_densearray_type = (jl_unionall_t*) diff --git a/src/julia.h b/src/julia.h index 647ee82be932f..418c7f6ed38ef 100644 --- a/src/julia.h +++ b/src/julia.h @@ -600,6 +600,7 @@ extern JL_DLLEXPORT jl_datatype_t *jl_code_instance_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_datatype_t *jl_code_info_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_datatype_t *jl_method_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_datatype_t *jl_module_type JL_GLOBALLY_ROOTED; +extern JL_DLLEXPORT jl_unionall_t *jl_arraylike_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_unionall_t *jl_abstractarray_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_unionall_t *jl_densearray_type JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_unionall_t *jl_array_type JL_GLOBALLY_ROOTED; diff --git a/src/staticdata.c b/src/staticdata.c index a59168c63716d..6dd5518d3dbed 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -42,7 +42,7 @@ static void *const _tags[] = { &jl_gotonode_type, &jl_quotenode_type, &jl_pinode_type, &jl_phinode_type, &jl_phicnode_type, &jl_upsilonnode_type, &jl_type_type, &jl_bottom_type, &jl_ref_type, &jl_pointer_type, - &jl_vararg_type, &jl_abstractarray_type, + &jl_vararg_type, &jl_arraylike_type, &jl_abstractarray_type, &jl_densearray_type, &jl_void_type, &jl_function_type, &jl_typeofbottom_type, &jl_unionall_type, &jl_typename_type, &jl_builtin_type, &jl_code_info_type, &jl_task_type, &jl_uniontype_type, &jl_typetype_type, &jl_abstractstring_type, diff --git a/stdlib/DelimitedFiles/src/DelimitedFiles.jl b/stdlib/DelimitedFiles/src/DelimitedFiles.jl index 38dffbff17632..5ef443f671e91 100644 --- a/stdlib/DelimitedFiles/src/DelimitedFiles.jl +++ b/stdlib/DelimitedFiles/src/DelimitedFiles.jl @@ -733,7 +733,7 @@ function writedlm_cell(io::IO, elt::AbstractString, dlm::T, quotes::Bool) where end end writedlm_cell(io::IO, elt, dlm, quotes) = print(io, elt) -function writedlm(io::IO, a::AbstractMatrix, dlm; opts...) +function writedlm(io::IO, a::ArrayLike{2}, dlm; opts...) optsd = val_opts(opts) quotes = get(optsd, :quotes, true) pb = PipeBuffer() @@ -749,7 +749,7 @@ function writedlm(io::IO, a::AbstractMatrix, dlm; opts...) nothing end -writedlm(io::IO, a::AbstractArray{<:Any,0}, dlm; opts...) = writedlm(io, reshape(a,1), dlm; opts...) +writedlm(io::IO, a::ArrayLike{0}, dlm; opts...) = writedlm(io, reshape(a,1), dlm; opts...) # write an iterable row as dlm-separated items function writedlm_row(io::IO, row, dlm, quotes) diff --git a/stdlib/Distributed/src/cluster.jl b/stdlib/Distributed/src/cluster.jl index e7b3086362e67..01a0f898fa87c 100644 --- a/stdlib/Distributed/src/cluster.jl +++ b/stdlib/Distributed/src/cluster.jl @@ -1176,12 +1176,12 @@ pressing Ctrl-C on the local machine. If no arguments are given, all workers are interrupt(pids::Integer...) = interrupt([pids...]) """ - interrupt(pids::AbstractVector=workers()) + interrupt(pids::ArrayLike{1}=workers()) Interrupt the current executing task on the specified workers. This is equivalent to pressing Ctrl-C on the local machine. If no arguments are given, all workers are interrupted. """ -function interrupt(pids::AbstractVector=workers()) +function interrupt(pids::ArrayLike{1}=workers()) @assert myid() == 1 @sync begin for pid in pids diff --git a/stdlib/Distributed/src/managers.jl b/stdlib/Distributed/src/managers.jl index 1a05f957dca9f..b5c3306d1bec9 100644 --- a/stdlib/Distributed/src/managers.jl +++ b/stdlib/Distributed/src/managers.jl @@ -113,7 +113,7 @@ This timeout can be controlled via environment variable `JULIA_WORKER_TIMEOUT`. The value of `JULIA_WORKER_TIMEOUT` on the master process specifies the number of seconds a newly launched worker waits for connection establishment. """ -function addprocs(machines::AbstractVector; tunnel=false, sshflags=``, max_parallel=10, kwargs...) +function addprocs(machines::ArrayLike{1}; tunnel=false, sshflags=``, max_parallel=10, kwargs...) check_addprocs_args(kwargs) addprocs(SSHManager(machines); tunnel=tunnel, sshflags=sshflags, max_parallel=max_parallel, kwargs...) end diff --git a/stdlib/Future/src/Future.jl b/stdlib/Future/src/Future.jl index 1d70dba7c84de..8bff776a45302 100644 --- a/stdlib/Future/src/Future.jl +++ b/stdlib/Future/src/Future.jl @@ -22,7 +22,7 @@ Copy `src` into `dst`. """ copy!(dst::AbstractSet, src::AbstractSet) = Base.copy!(dst, src) copy!(dst::AbstractDict, src::AbstractDict) = Base.copy!(dst, src) -copy!(dst::AbstractArray, src::AbstractArray) = Base.copy!(dst, src) +copy!(dst::ArrayLike, src::ArrayLike) = Base.copy!(dst, src) ## randjump diff --git a/stdlib/InteractiveUtils/src/editless.jl b/stdlib/InteractiveUtils/src/editless.jl index fcd91f834a55e..61c9becbc81db 100644 --- a/stdlib/InteractiveUtils/src/editless.jl +++ b/stdlib/InteractiveUtils/src/editless.jl @@ -105,7 +105,7 @@ end editor_matches(p::Regex, cmd::Cmd) = occursin(p, shell_escape(cmd)) editor_matches(p::String, cmd::Cmd) = p == splitext(basename(first(cmd)))[1] -editor_matches(ps::AbstractArray, cmd::Cmd) = any(editor_matches(p, cmd) for p in ps) +editor_matches(ps::ArrayLike, cmd::Cmd) = any(editor_matches(p, cmd) for p in ps) function define_default_editors() # fallback: just call the editor with the path as argument diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl index 946056e50e667..01eae659252f0 100644 --- a/stdlib/InteractiveUtils/test/runtests.jl +++ b/stdlib/InteractiveUtils/test/runtests.jl @@ -124,7 +124,7 @@ let a = @code_typed 1 + 1 function thing(a::Array, b::Real) println("thing") end - function thing(a::AbstractArray, b::Int) + function thing(a::ArrayLike, b::Int) println("blah") end @test_throws MethodError thing(rand(10), 1) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index b78ed785080e0..3a138b3cf0227 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -314,8 +314,8 @@ Linear algebra functions in Julia are largely implemented by calling functions f Sparse factorizations call functions from [SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html). ```@docs -Base.:*(::AbstractMatrix, ::AbstractMatrix) -Base.:\(::AbstractMatrix, ::AbstractVecOrMat) +Base.:*(::ArrayLike{2}, ::ArrayLike{2}) +Base.:\(::ArrayLike{2}, ::Base.VectorOrMatrixLike) LinearAlgebra.SingularException LinearAlgebra.PosDefException LinearAlgebra.ZeroPivotException @@ -405,13 +405,13 @@ LinearAlgebra.tr LinearAlgebra.det LinearAlgebra.logdet LinearAlgebra.logabsdet -Base.inv(::AbstractMatrix) +Base.inv(::ArrayLike{2}) LinearAlgebra.pinv LinearAlgebra.nullspace Base.kron LinearAlgebra.exp(::StridedMatrix{<:LinearAlgebra.BlasFloat}) -Base.:^(::AbstractMatrix, ::Number) -Base.:^(::Number, ::AbstractMatrix) +Base.:^(::ArrayLike{2}, ::Number) +Base.:^(::Number, ::ArrayLike{2}) LinearAlgebra.log(::StridedMatrix) LinearAlgebra.sqrt(::StridedMatrix{<:Real}) LinearAlgebra.cos(::StridedMatrix{<:Real}) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 30002837f8398..75d5415981172 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -16,7 +16,8 @@ import Base: USE_BLAS64, abs, acos, acosh, acot, acoth, acsc, acsch, adjoint, as setindex!, show, similar, sin, sincos, sinh, size, sqrt, strides, stride, tan, tanh, transpose, trunc, typed_hcat, vec using Base: hvcat_fill, IndexLinear, promote_op, promote_typeof, - @propagate_inbounds, @pure, reduce, typed_vcat, require_one_based_indexing + @propagate_inbounds, @pure, reduce, typed_vcat, require_one_based_indexing, + VectorOrMatrixLike using Base.Broadcast: Broadcasted, broadcasted export @@ -345,7 +346,7 @@ control over the factorization of `B`. rdiv!(A, B) copy_oftype(A::AbstractArray{T}, ::Type{T}) where {T} = copy(A) -copy_oftype(A::AbstractArray{T,N}, ::Type{S}) where {T,N,S} = convert(AbstractArray{S,N}, A) +copy_oftype(A::ArrayLike{N}, ::Type{S}) where {N,S} = convert(AbstractArray{S,N}, A) include("adjtrans.jl") include("transpose.jl") diff --git a/stdlib/LinearAlgebra/src/adjtrans.jl b/stdlib/LinearAlgebra/src/adjtrans.jl index b822be0c6e36d..11c9a5121d826 100644 --- a/stdlib/LinearAlgebra/src/adjtrans.jl +++ b/stdlib/LinearAlgebra/src/adjtrans.jl @@ -12,7 +12,7 @@ import Base: length, size, axes, IndexStyle, getindex, setindex!, parent, vec, c Adjoint Lazy wrapper type for an adjoint view of the underlying linear algebra object, -usually an `AbstractVector`/`AbstractMatrix`, but also some `Factorization`, for instance. +usually an `ArrayLike{1}`/`ArrayLike{2}`, but also some `Factorization`, for instance. Usually, the `Adjoint` constructor should not be called directly, use [`adjoint`](@ref) instead. To materialize the view use [`copy`](@ref). @@ -43,7 +43,7 @@ end Transpose Lazy wrapper type for a transpose view of the underlying linear algebra object, -usually an `AbstractVector`/`AbstractMatrix`, but also some `Factorization`, for instance. +usually an `ArrayLike{1}`/`ArrayLike{2}`, but also some `Factorization`, for instance. Usually, the `Transpose` constructor should not be called directly, use [`transpose`](@ref) instead. To materialize the view use [`copy`](@ref). @@ -121,7 +121,7 @@ julia> adjoint(A) 9-2im 4-6im ``` """ -adjoint(A::AbstractVecOrMat) = Adjoint(A) +adjoint(A::VectorOrMatrixLike) = Adjoint(A) """ transpose(A) @@ -146,7 +146,7 @@ julia> transpose(A) 9+2im 4+6im ``` """ -transpose(A::AbstractVecOrMat) = Transpose(A) +transpose(A::VectorOrMatrixLike) = Transpose(A) # unwrapping lowercase quasi-constructors adjoint(A::Adjoint) = A.parent @@ -157,10 +157,10 @@ transpose(A::Adjoint{<:Real}) = A.parent # some aliases for internal convenience use const AdjOrTrans{T,S} = Union{Adjoint{T,S},Transpose{T,S}} where {T,S} -const AdjointAbsVec{T} = Adjoint{T,<:AbstractVector} -const TransposeAbsVec{T} = Transpose{T,<:AbstractVector} -const AdjOrTransAbsVec{T} = AdjOrTrans{T,<:AbstractVector} -const AdjOrTransAbsMat{T} = AdjOrTrans{T,<:AbstractMatrix} +const AdjointAbsVec{T} = Adjoint{T,<:ArrayLike{1}} +const TransposeAbsVec{T} = Transpose{T,<:ArrayLike{1}} +const AdjOrTransAbsVec{T} = AdjOrTrans{T,<:ArrayLike{1}} +const AdjOrTransAbsMat{T} = AdjOrTrans{T,<:ArrayLike{2}} # for internal use below wrapperop(A::Adjoint) = adjoint @@ -242,15 +242,19 @@ Broadcast.broadcast_preserving_zero_d(f, tvs::Union{Number,TransposeAbsVec}...) ## multiplication * # Adjoint/Transpose-vector * vector -*(u::AdjointAbsVec, v::AbstractVector) = dot(u.parent, v) +*(u::AdjointAbsVec, v::ArrayLike{1}) = dot(u.parent, v) +*(u::AdjointAbsVec, v::AbstractVector) = dot(u.parent, v) # specific *(u::TransposeAbsVec{T}, v::AbstractVector{T}) where {T<:Real} = dot(u.parent, v) -function *(u::TransposeAbsVec, v::AbstractVector) +@inline function multiply_transpose(u, v) require_one_based_indexing(u, v) @boundscheck length(u) == length(v) || throw(DimensionMismatch()) return sum(@inbounds(u[k]*v[k]) for k in 1:length(u)) end +*(u::TransposeAbsVec, v::ArrayLike{1}) = multiply_transpose(u, v) +*(u::TransposeAbsVec, v::AbstractVector) = multiply_transpose(u, v) # specific + # vector * Adjoint/Transpose-vector -*(u::AbstractVector, v::AdjOrTransAbsVec) = broadcast(*, u, v) +*(u::ArrayLike{1}, v::AdjOrTransAbsVec) = broadcast(*, u, v) # Adjoint/Transpose-vector * Adjoint/Transpose-vector # (necessary for disambiguation with fallback methods in linalg/matmul) *(u::AdjointAbsVec, v::AdjointAbsVec) = throw(MethodError(*, (u, v))) @@ -277,10 +281,10 @@ pinv(v::TransposeAbsVec, tol::Real = 0) = pinv(conj(v.parent)).parent ## right-division / -/(u::AdjointAbsVec, A::AbstractMatrix) = adjoint(adjoint(A) \ u.parent) -/(u::TransposeAbsVec, A::AbstractMatrix) = transpose(transpose(A) \ u.parent) -/(u::AdjointAbsVec, A::Transpose{<:Any,<:AbstractMatrix}) = adjoint(conj(A.parent) \ u.parent) # technically should be adjoint(copy(adjoint(copy(A))) \ u.parent) -/(u::TransposeAbsVec, A::Adjoint{<:Any,<:AbstractMatrix}) = transpose(conj(A.parent) \ u.parent) # technically should be transpose(copy(transpose(copy(A))) \ u.parent) +/(u::AdjointAbsVec, A::ArrayLike{2}) = adjoint(adjoint(A) \ u.parent) +/(u::TransposeAbsVec, A::ArrayLike{2}) = transpose(transpose(A) \ u.parent) +/(u::AdjointAbsVec, A::Transpose{<:Any,<:ArrayLike{2}}) = adjoint(conj(A.parent) \ u.parent) # technically should be adjoint(copy(adjoint(copy(A))) \ u.parent) +/(u::TransposeAbsVec, A::Adjoint{<:Any,<:ArrayLike{2}}) = transpose(conj(A.parent) \ u.parent) # technically should be transpose(copy(transpose(copy(A))) \ u.parent) ## complex conjugate conj(A::Transpose) = adjoint(A.parent) diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index faa32aee15fa6..f6a7316fceb5d 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -16,14 +16,14 @@ end function Bidiagonal{T,V}(dv, ev, uplo::Symbol) where {T,V<:AbstractVector{T}} Bidiagonal{T,V}(dv, ev, char_uplo(uplo)) end -function Bidiagonal{T}(dv::AbstractVector, ev::AbstractVector, uplo::Union{Symbol,AbstractChar}) where {T} +function Bidiagonal{T}(dv::ArrayLike{1}, ev::ArrayLike{1}, uplo::Union{Symbol,AbstractChar}) where {T} Bidiagonal(convert(AbstractVector{T}, dv)::AbstractVector{T}, convert(AbstractVector{T}, ev)::AbstractVector{T}, uplo) end """ - Bidiagonal(dv::V, ev::V, uplo::Symbol) where V <: AbstractVector + Bidiagonal(dv::V, ev::V, uplo::Symbol) where V <: ArrayLike{1} Constructs an upper (`uplo=:U`) or lower (`uplo=:L`) bidiagonal matrix using the given diagonal (`dv`) and off-diagonal (`ev`) vectors. The result is of type `Bidiagonal` @@ -98,7 +98,7 @@ julia> Bidiagonal(A, :L) # contains the main diagonal and first subdiagonal of A ⋅ ⋅ 4 4 ``` """ -function Bidiagonal(A::AbstractMatrix, uplo::Symbol) +function Bidiagonal(A::ArrayLike{2}, uplo::Symbol) Bidiagonal(diag(A, 0), diag(A, uplo === :U ? 1 : -1), uplo) end @@ -180,7 +180,7 @@ promote_rule(::Type{<:Tridiagonal}, ::Type{<:Bidiagonal}) = Tridiagonal # When asked to convert Bidiagonal to AbstractMatrix{T}, preserve structure by converting to Bidiagonal{T} <: AbstractMatrix{T} AbstractMatrix{T}(A::Bidiagonal) where {T} = convert(Bidiagonal{T}, A) -convert(T::Type{<:Bidiagonal}, m::AbstractMatrix) = m isa T ? m : T(m) +convert(T::Type{<:Bidiagonal}, m::ArrayLike{2}) = m isa T ? m : T(m) # For B<:Bidiagonal, similar(B[, neweltype]) should yield a Bidiagonal matrix. # On the other hand, similar(B, [neweltype,] shape...) should yield a sparse matrix. @@ -338,23 +338,23 @@ end const BiTriSym = Union{Bidiagonal,Tridiagonal,SymTridiagonal} const BiTri = Union{Bidiagonal,Tridiagonal} -@inline mul!(C::AbstractMatrix, A::SymTridiagonal, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::BiTriSym, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::AbstractTriangular, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::AbstractMatrix, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::Diagonal, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::Adjoint{<:Any,<:Diagonal}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::Transpose{<:Any,<:Diagonal}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::Adjoint{<:Any,<:AbstractTriangular}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::Transpose{<:Any,<:AbstractTriangular}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::Adjoint{<:Any,<:AbstractVecOrMat}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::Transpose{<:Any,<:AbstractVecOrMat}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractVector, A::BiTriSym, B::AbstractVector, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::BiTriSym, B::AbstractVecOrMat, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractVecOrMat, A::BiTriSym, B::AbstractVecOrMat, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::BiTriSym, B::Transpose{<:Any,<:AbstractVecOrMat}, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) # around bidiag line 330 -@inline mul!(C::AbstractMatrix, A::BiTriSym, B::Adjoint{<:Any,<:AbstractVecOrMat}, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractVector, A::BiTriSym, B::Transpose{<:Any,<:AbstractVecOrMat}, alpha::Number, beta::Number) = throw(MethodError(mul!, (C, A, B)), MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::SymTridiagonal, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::BiTriSym, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::AbstractTriangular, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::ArrayLike{2}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::Diagonal, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:AbstractTriangular}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:AbstractTriangular}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:VectorOrMatrixLike}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:VectorOrMatrixLike}, B::BiTriSym, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{1}, A::BiTriSym, B::ArrayLike{1}, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::BiTriSym, B::VectorOrMatrixLike, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::VectorOrMatrixLike, A::BiTriSym, B::VectorOrMatrixLike, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{2}, A::BiTriSym, B::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) # around bidiag line 330 +@inline mul!(C::ArrayLike{2}, A::BiTriSym, B::Adjoint{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = A_mul_B_td!(C, A, B, MulAddMul(alpha, beta)) +@inline mul!(C::ArrayLike{1}, A::BiTriSym, B::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = throw(MethodError(mul!, (C, A, B)), MulAddMul(alpha, beta)) function check_A_mul_B!_sizes(C, A, B) require_one_based_indexing(C) @@ -386,7 +386,7 @@ function _diag(A::Bidiagonal, k) end end -function A_mul_B_td!(C::AbstractMatrix, A::BiTriSym, B::BiTriSym, +function A_mul_B_td!(C::ArrayLike{2}, A::BiTriSym, B::BiTriSym, _add::MulAddMul = MulAddMul()) check_A_mul_B!_sizes(C, A, B) n = size(A,1) @@ -444,7 +444,7 @@ function A_mul_B_td!(C::AbstractMatrix, A::BiTriSym, B::BiTriSym, C end -function A_mul_B_td!(C::AbstractMatrix, A::BiTriSym, B::Diagonal, +function A_mul_B_td!(C::ArrayLike{2}, A::BiTriSym, B::Diagonal, _add::MulAddMul = MulAddMul()) check_A_mul_B!_sizes(C, A, B) n = size(A,1) @@ -479,7 +479,7 @@ function A_mul_B_td!(C::AbstractMatrix, A::BiTriSym, B::Diagonal, C end -function A_mul_B_td!(C::AbstractVecOrMat, A::BiTriSym, B::AbstractVecOrMat, +function A_mul_B_td!(C::VectorOrMatrixLike, A::BiTriSym, B::VectorOrMatrixLike, _add::MulAddMul = MulAddMul()) require_one_based_indexing(C) require_one_based_indexing(B) @@ -510,7 +510,7 @@ function A_mul_B_td!(C::AbstractVecOrMat, A::BiTriSym, B::AbstractVecOrMat, C end -function A_mul_B_td!(C::AbstractMatrix, A::AbstractMatrix, B::BiTriSym, +function A_mul_B_td!(C::ArrayLike{2}, A::ArrayLike{2}, B::BiTriSym, _add::MulAddMul = MulAddMul()) check_A_mul_B!_sizes(C, A, B) iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) @@ -545,7 +545,7 @@ function A_mul_B_td!(C::AbstractMatrix, A::AbstractMatrix, B::BiTriSym, C end -function A_mul_B_td!(C::AbstractMatrix, A::Diagonal, B::BiTriSym, +function A_mul_B_td!(C::ArrayLike{2}, A::Diagonal, B::BiTriSym, _add::MulAddMul = MulAddMul()) check_A_mul_B!_sizes(C, A, B) n = size(A,1) @@ -688,7 +688,7 @@ function *(A::SymTridiagonal, B::Diagonal) A_mul_B_td!(Tridiagonal(zeros(TS, size(A, 1)-1), zeros(TS, size(A, 1)), zeros(TS, size(A, 1)-1)), A, B) end -function dot(x::AbstractVector, B::Bidiagonal, y::AbstractVector) +function dot(x::ArrayLike{1}, B::Bidiagonal, y::ArrayLike{1}) require_one_based_indexing(x, y) nx, ny = length(x), length(y) (nx == size(B, 1) == ny) || throw(DimensionMismatch()) @@ -719,10 +719,10 @@ function dot(x::AbstractVector, B::Bidiagonal, y::AbstractVector) end #Linear solvers -ldiv!(A::Union{Bidiagonal, AbstractTriangular}, b::AbstractVector) = naivesub!(A, b) -ldiv!(A::Transpose{<:Any,<:Bidiagonal}, b::AbstractVector) = ldiv!(copy(A), b) -ldiv!(A::Adjoint{<:Any,<:Bidiagonal}, b::AbstractVector) = ldiv!(copy(A), b) -function ldiv!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) +ldiv!(A::Union{Bidiagonal, AbstractTriangular}, b::ArrayLike{1}) = naivesub!(A, b) +ldiv!(A::Transpose{<:Any,<:Bidiagonal}, b::ArrayLike{1}) = ldiv!(copy(A), b) +ldiv!(A::Adjoint{<:Any,<:Bidiagonal}, b::ArrayLike{1}) = ldiv!(copy(A), b) +function ldiv!(A::Union{Bidiagonal,AbstractTriangular}, B::ArrayLike{2}) require_one_based_indexing(A, B) nA,mA = size(A) tmp = similar(B,size(B,1)) @@ -737,7 +737,7 @@ function ldiv!(A::Union{Bidiagonal,AbstractTriangular}, B::AbstractMatrix) end B end -function ldiv!(adjA::Adjoint{<:Any,<:Union{Bidiagonal,AbstractTriangular}}, B::AbstractMatrix) +function ldiv!(adjA::Adjoint{<:Any,<:Union{Bidiagonal,AbstractTriangular}}, B::ArrayLike{2}) require_one_based_indexing(adjA, B) A = adjA.parent nA,mA = size(A) @@ -753,7 +753,7 @@ function ldiv!(adjA::Adjoint{<:Any,<:Union{Bidiagonal,AbstractTriangular}}, B::A end B end -function ldiv!(transA::Transpose{<:Any,<:Union{Bidiagonal,AbstractTriangular}}, B::AbstractMatrix) +function ldiv!(transA::Transpose{<:Any,<:Union{Bidiagonal,AbstractTriangular}}, B::ArrayLike{2}) require_one_based_indexing(transA, B) A = transA.parent nA,mA = size(A) @@ -770,7 +770,7 @@ function ldiv!(transA::Transpose{<:Any,<:Union{Bidiagonal,AbstractTriangular}}, B end #Generic solver using naive substitution -function naivesub!(A::Bidiagonal{T}, b::AbstractVector, x::AbstractVector = b) where T +function naivesub!(A::Bidiagonal{T}, b::ArrayLike{1}, x::ArrayLike{1} = b) where T require_one_based_indexing(A, b, x) N = size(A, 2) if N != length(b) || N != length(x) @@ -817,21 +817,21 @@ function \(A::Bidiagonal{<:Number}, B::AbstractVecOrMat{<:Number}) TAB = typeof((zero(TA)*zero(TB) + zero(TA)*zero(TB))/one(TA)) ldiv!(convert(AbstractArray{TAB}, A), copy_oftype(B, TAB)) end -\(A::Bidiagonal, B::AbstractVecOrMat) = ldiv!(A, copy(B)) +\(A::Bidiagonal, B::VectorOrMatrixLike) = ldiv!(A, copy(B)) function \(transA::Transpose{<:Number,<:Bidiagonal{<:Number}}, B::AbstractVecOrMat{<:Number}) A = transA.parent TA, TB = eltype(A), eltype(B) TAB = typeof((zero(TA)*zero(TB) + zero(TA)*zero(TB))/one(TA)) ldiv!(transpose(convert(AbstractArray{TAB}, A)), copy_oftype(B, TAB)) end -\(transA::Transpose{<:Any,<:Bidiagonal}, B::AbstractVecOrMat) = ldiv!(transpose(transA.parent), copy(B)) +\(transA::Transpose{<:Any,<:Bidiagonal}, B::VectorOrMatrixLike) = ldiv!(transpose(transA.parent), copy(B)) function \(adjA::Adjoint{<:Number,<:Bidiagonal{<:Number}}, B::AbstractVecOrMat{<:Number}) A = adjA.parent TA, TB = eltype(A), eltype(B) TAB = typeof((zero(TA)*zero(TB) + zero(TA)*zero(TB))/one(TA)) ldiv!(adjoint(convert(AbstractArray{TAB}, A)), copy_oftype(B, TAB)) end -\(adjA::Adjoint{<:Any,<:Bidiagonal}, B::AbstractVecOrMat) = ldiv!(adjoint(adjA.parent), copy(B)) +\(adjA::Adjoint{<:Any,<:Bidiagonal}, B::VectorOrMatrixLike) = ldiv!(adjoint(adjA.parent), copy(B)) factorize(A::Bidiagonal) = A diff --git a/stdlib/LinearAlgebra/src/blas.jl b/stdlib/LinearAlgebra/src/blas.jl index 1f9badcf162ab..0386822b7207f 100644 --- a/stdlib/LinearAlgebra/src/blas.jl +++ b/stdlib/LinearAlgebra/src/blas.jl @@ -7,7 +7,7 @@ module BLAS import ..axpy!, ..axpby! import Base: copyto! -using Base: require_one_based_indexing +using Base: require_one_based_indexing, VectorOrMatrixLike export # Level 1 @@ -383,7 +383,7 @@ for (fname, elty, ret_type) in ((:dnrm2_,:Float64,:Float64), end end end -nrm2(x::Union{AbstractVector,DenseArray}) = GC.@preserve x nrm2(length(x), pointer(x), stride1(x)) +nrm2(x::Union{ArrayLike{1},DenseArray}) = GC.@preserve x nrm2(length(x), pointer(x), stride1(x)) ## asum @@ -416,7 +416,7 @@ for (fname, elty, ret_type) in ((:dasum_,:Float64,:Float64), end end end -asum(x::Union{AbstractVector,DenseArray}) = GC.@preserve x asum(length(x), pointer(x), stride1(x)) +asum(x::Union{ArrayLike{1},DenseArray}) = GC.@preserve x asum(length(x), pointer(x), stride1(x)) ## axpy @@ -546,7 +546,7 @@ for (fname, elty) in ((:idamax_,:Float64), end end end -iamax(dx::Union{AbstractVector,DenseArray}) = GC.@preserve dx iamax(length(dx), pointer(dx), stride1(dx)) +iamax(dx::Union{ArrayLike{1},DenseArray}) = GC.@preserve dx iamax(length(dx), pointer(dx), stride1(dx)) """ iamax(n, dx, incx) @@ -1473,12 +1473,12 @@ for (fname, elty) in ((:dsyrk_,:Float64), end end end -function syrk(uplo::AbstractChar, trans::AbstractChar, alpha::Number, A::AbstractVecOrMat) +function syrk(uplo::AbstractChar, trans::AbstractChar, alpha::Number, A::VectorOrMatrixLike) T = eltype(A) n = size(A, trans == 'N' ? 1 : 2) syrk!(uplo, trans, convert(T,alpha), A, zero(T), similar(A, T, (n, n))) end -syrk(uplo::AbstractChar, trans::AbstractChar, A::AbstractVecOrMat) = syrk(uplo, trans, one(eltype(A)), A) +syrk(uplo::AbstractChar, trans::AbstractChar, A::VectorOrMatrixLike) = syrk(uplo, trans, one(eltype(A)), A) """ herk!(uplo, trans, alpha, A, beta, C) @@ -1594,7 +1594,7 @@ Returns the [`uplo`](@ref stdlib-blas-uplo) triangle of `alpha*transpose(A)*B + alpha*transpose(B)*A`, according to [`trans`](@ref stdlib-blas-trans). """ -function syr2k(uplo::AbstractChar, trans::AbstractChar, alpha::Number, A::AbstractVecOrMat, B::AbstractVecOrMat) +function syr2k(uplo::AbstractChar, trans::AbstractChar, alpha::Number, A::VectorOrMatrixLike, B::VectorOrMatrixLike) T = eltype(A) n = size(A, trans == 'N' ? 1 : 2) syr2k!(uplo, trans, convert(T,alpha), A, B, zero(T), similar(A, T, (n, n))) @@ -1605,7 +1605,7 @@ end Returns the [`uplo`](@ref stdlib-blas-uplo) triangle of `A*transpose(B) + B*transpose(A)` or `transpose(A)*B + transpose(B)*A`, according to [`trans`](@ref stdlib-blas-trans). """ -syr2k(uplo::AbstractChar, trans::AbstractChar, A::AbstractVecOrMat, B::AbstractVecOrMat) = syr2k(uplo, trans, one(eltype(A)), A, B) +syr2k(uplo::AbstractChar, trans::AbstractChar, A::VectorOrMatrixLike, B::VectorOrMatrixLike) = syr2k(uplo, trans, one(eltype(A)), A, B) for (fname, elty1, elty2) in ((:zher2k_,:ComplexF64,:Float64), (:cher2k_,:ComplexF32,:Float32)) @eval begin diff --git a/stdlib/LinearAlgebra/src/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl index c57dedc66776e..1ba962a60d6a6 100644 --- a/stdlib/LinearAlgebra/src/bunchkaufman.jl +++ b/stdlib/LinearAlgebra/src/bunchkaufman.jl @@ -63,7 +63,7 @@ permutation: 1 ``` """ -struct BunchKaufman{T,S<:AbstractMatrix} <: Factorization{T} +struct BunchKaufman{T,S<:ArrayLike{2}} <: Factorization{T} LD::S ipiv::Vector{BlasInt} uplo::Char @@ -71,7 +71,7 @@ struct BunchKaufman{T,S<:AbstractMatrix} <: Factorization{T} rook::Bool info::BlasInt - function BunchKaufman{T,S}(LD, ipiv, uplo, symmetric, rook, info) where {T,S<:AbstractMatrix} + function BunchKaufman{T,S}(LD, ipiv, uplo, symmetric, rook, info) where {T,S<:ArrayLike{2}} require_one_based_indexing(LD) new(LD, ipiv, uplo, symmetric, rook, info) end diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl index bd28a2e1f351e..d6bb4b0fd2e6a 100644 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -69,12 +69,12 @@ julia> C.L * C.U == A true ``` """ -struct Cholesky{T,S<:AbstractMatrix} <: Factorization{T} +struct Cholesky{T,S<:ArrayLike{2}} <: Factorization{T} factors::S uplo::Char info::BlasInt - function Cholesky{T,S}(factors, uplo, info) where {T,S<:AbstractMatrix} + function Cholesky{T,S}(factors, uplo, info) where {T,S<:ArrayLike{2}} require_one_based_indexing(factors) new(factors, uplo, info) end @@ -116,7 +116,7 @@ permutation: 1 ``` """ -struct CholeskyPivoted{T,S<:AbstractMatrix} <: Factorization{T} +struct CholeskyPivoted{T,S<:ArrayLike{2}} <: Factorization{T} factors::S uplo::Char piv::Vector{BlasInt} @@ -124,7 +124,7 @@ struct CholeskyPivoted{T,S<:AbstractMatrix} <: Factorization{T} tol::Real info::BlasInt - function CholeskyPivoted{T,S}(factors, uplo, piv, rank, tol, info) where {T,S<:AbstractMatrix} + function CholeskyPivoted{T,S}(factors, uplo, piv, rank, tol, info) where {T,S<:ArrayLike{2}} require_one_based_indexing(factors) new(factors, uplo, piv, rank, tol, info) end @@ -157,7 +157,7 @@ function _chol!(A::StridedMatrix) end ## Non BLAS/LAPACK element types (generic) -function _chol!(A::AbstractMatrix, ::Type{UpperTriangular}) +function _chol!(A::ArrayLike{2}, ::Type{UpperTriangular}) require_one_based_indexing(A) n = checksquare(A) @inbounds begin @@ -181,7 +181,7 @@ function _chol!(A::AbstractMatrix, ::Type{UpperTriangular}) end return UpperTriangular(A), convert(BlasInt, 0) end -function _chol!(A::AbstractMatrix, ::Type{LowerTriangular}) +function _chol!(A::ArrayLike{2}, ::Type{LowerTriangular}) require_one_based_indexing(A) n = checksquare(A) @inbounds begin @@ -450,7 +450,7 @@ Base.propertynames(F::CholeskyPivoted, private::Bool=false) = issuccess(C::Cholesky) = C.info == 0 -function show(io::IO, mime::MIME{Symbol("text/plain")}, C::Cholesky{<:Any,<:AbstractMatrix}) +function show(io::IO, mime::MIME{Symbol("text/plain")}, C::Cholesky{<:Any,<:ArrayLike{2}}) if issuccess(C) summary(io, C); println(io) println(io, "$(C.uplo) factor:") @@ -460,7 +460,7 @@ function show(io::IO, mime::MIME{Symbol("text/plain")}, C::Cholesky{<:Any,<:Abst end end -function show(io::IO, mime::MIME{Symbol("text/plain")}, C::CholeskyPivoted{<:Any,<:AbstractMatrix}) +function show(io::IO, mime::MIME{Symbol("text/plain")}, C::CholeskyPivoted{<:Any,<:ArrayLike{2}}) summary(io, C); println(io) println(io, "$(C.uplo) factor with rank $(rank(C)):") show(io, mime, C.uplo == 'U' ? C.U : C.L) @@ -468,10 +468,10 @@ function show(io::IO, mime::MIME{Symbol("text/plain")}, C::CholeskyPivoted{<:Any show(io, mime, C.p) end -ldiv!(C::Cholesky{T,<:AbstractMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = +ldiv!(C::Cholesky{T,<:ArrayLike{2}}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = LAPACK.potrs!(C.uplo, C.factors, B) -function ldiv!(C::Cholesky{<:Any,<:AbstractMatrix}, B::StridedVecOrMat) +function ldiv!(C::Cholesky{<:Any,<:ArrayLike{2}}, B::StridedVecOrMat) if C.uplo == 'L' return ldiv!(adjoint(LowerTriangular(C.factors)), ldiv!(LowerTriangular(C.factors), B)) else @@ -523,7 +523,7 @@ function ldiv!(C::CholeskyPivoted, B::StridedMatrix) B end -function rdiv!(B::StridedMatrix, C::Cholesky{<:Any,<:AbstractMatrix}) +function rdiv!(B::StridedMatrix, C::Cholesky{<:Any,<:ArrayLike{2}}) if C.uplo == 'L' return rdiv!(rdiv!(B, adjoint(LowerTriangular(C.factors))), LowerTriangular(C.factors)) else diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 5d69e48ad9e26..22ac03d18d8a6 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -67,7 +67,7 @@ julia> A 2.0 6.78233 ``` """ -isposdef!(A::AbstractMatrix) = +isposdef!(A::ArrayLike{2}) = ishermitian(A) && isposdef(cholesky!(Hermitian(A); check = false)) """ @@ -88,7 +88,7 @@ julia> isposdef(A) true ``` """ -isposdef(A::AbstractMatrix) = +isposdef(A::ArrayLike{2}) = ishermitian(A) && isposdef(cholesky(Hermitian(A); check = false)) isposdef(x::Number) = imag(x)==0 && real(x) > 0 @@ -130,7 +130,7 @@ julia> triu!(M, 1) 0 0 0 0 0 ``` """ -function triu!(M::AbstractMatrix, k::Integer) +function triu!(M::ArrayLike{2}, k::Integer) require_one_based_indexing(M) m, n = size(M) for j in 1:min(n, m + k) @@ -168,7 +168,7 @@ julia> tril!(M, 2) 1 2 3 4 5 ``` """ -function tril!(M::AbstractMatrix, k::Integer) +function tril!(M::ArrayLike{2}, k::Integer) require_one_based_indexing(M) m, n = size(M) for j in max(1, k + 1):n @@ -181,7 +181,7 @@ end tril(M::Matrix, k::Integer) = tril!(copy(M), k) """ - fillband!(A::AbstractMatrix, x, l, u) + fillband!(A::ArrayLike{2}, x, l, u) Fill the band between diagonals `l` and `u` with the value `x`. """ @@ -217,7 +217,7 @@ julia> diagind(A,-1) 2:4:6 ``` """ -function diagind(A::AbstractMatrix, k::Integer=0) +function diagind(A::ArrayLike{2}, k::Integer=0) require_one_based_indexing(A) diagind(size(A,1), size(A,2), k) end @@ -243,11 +243,11 @@ julia> diag(A,1) 6 ``` """ -diag(A::AbstractMatrix, k::Integer=0) = A[diagind(A,k)] +diag(A::ArrayLike{2}, k::Integer=0) = A[diagind(A,k)] """ - diagm(kv::Pair{<:Integer,<:AbstractVector}...) - diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...) + diagm(kv::Pair{<:Integer,<:ArrayLike{1}}...) + diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:ArrayLike{1}}...) Construct a matrix from `Pair`s of diagonals and vectors. Vector `kv.second` will be placed on the `kv.first` diagonal. @@ -276,9 +276,9 @@ julia> diagm(1 => [1,2,3], -1 => [4,5]) 0 0 0 0 ``` """ -diagm(kv::Pair{<:Integer,<:AbstractVector}...) = _diagm(nothing, kv...) -diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...) = _diagm((Int(m),Int(n)), kv...) -function _diagm(size, kv::Pair{<:Integer,<:AbstractVector}...) +diagm(kv::Pair{<:Integer,<:ArrayLike{1}}...) = _diagm(nothing, kv...) +diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:ArrayLike{1}}...) = _diagm((Int(m),Int(n)), kv...) +function _diagm(size, kv::Pair{<:Integer,<:ArrayLike{1}}...) A = diagm_container(size, kv...) for p in kv inds = diagind(A, p.first) @@ -288,18 +288,18 @@ function _diagm(size, kv::Pair{<:Integer,<:AbstractVector}...) end return A end -function diagm_size(size::Nothing, kv::Pair{<:Integer,<:AbstractVector}...) +function diagm_size(size::Nothing, kv::Pair{<:Integer,<:ArrayLike{1}}...) mnmax = mapreduce(x -> length(x.second) + abs(Int(x.first)), max, kv; init=0) return mnmax, mnmax end -function diagm_size(size::Tuple{Int,Int}, kv::Pair{<:Integer,<:AbstractVector}...) +function diagm_size(size::Tuple{Int,Int}, kv::Pair{<:Integer,<:ArrayLike{1}}...) mmax = mapreduce(x -> length(x.second) - min(0,Int(x.first)), max, kv; init=0) nmax = mapreduce(x -> length(x.second) + max(0,Int(x.first)), max, kv; init=0) m, n = size (m ≥ mmax && n ≥ nmax) || throw(DimensionMismatch("invalid size=$size")) return m, n end -function diagm_container(size, kv::Pair{<:Integer,<:AbstractVector}...) +function diagm_container(size, kv::Pair{<:Integer,<:ArrayLike{1}}...) T = promote_type(map(x -> eltype(x.second), kv)...) return zeros(T, diagm_size(size, kv...)...) end @@ -307,8 +307,8 @@ diagm_container(size, kv::Pair{<:Integer,<:BitVector}...) = falses(diagm_size(size, kv...)...) """ - diagm(v::AbstractVector) - diagm(m::Integer, n::Integer, v::AbstractVector) + diagm(v::ArrayLike{1}) + diagm(m::Integer, n::Integer, v::ArrayLike{1}) Construct a matrix with elements of the vector as diagonal elements. By default (if `size=nothing`), the matrix is square and its size is given by @@ -324,8 +324,8 @@ julia> diagm([1,2,3]) 0 0 3 ``` """ -diagm(v::AbstractVector) = diagm(0 => v) -diagm(m::Integer, n::Integer, v::AbstractVector) = diagm(m, n, 0 => v) +diagm(v::ArrayLike{1}) = diagm(0 => v) +diagm(m::Integer, n::Integer, v::ArrayLike{1}) = diagm(m, n, 0 => v) function tr(A::Matrix{T}) where T n = checksquare(A) @@ -394,14 +394,16 @@ function kron(a::AbstractMatrix{T}, b::AbstractMatrix{S}) where {T,S} R end -kron(a::Number, b::Union{Number, AbstractVecOrMat}) = a * b -kron(a::AbstractVecOrMat, b::Number) = a * b -kron(a::AbstractVector, b::AbstractVector) = vec(kron(reshape(a ,length(a), 1), reshape(b, length(b), 1))) -kron(a::AbstractMatrix, b::AbstractVector) = kron(a, reshape(b, length(b), 1)) -kron(a::AbstractVector, b::AbstractMatrix) = kron(reshape(a, length(a), 1), b) +kron(a::Number, b::Union{Number, VectorOrMatrixLike}) = a * b +kron(a::VectorOrMatrixLike, b::Number) = a * b +kron(a::ArrayLike{1}, b::ArrayLike{1}) = vec(kron(reshape(a ,length(a), 1), reshape(b, length(b), 1))) +kron(a::ArrayLike{2}, b::ArrayLike{1}) = kron(a, reshape(b, length(b), 1)) +kron(a::ArrayLike{1}, b::ArrayLike{2}) = kron(reshape(a, length(a), 1), b) # Matrix power -(^)(A::AbstractMatrix, p::Integer) = p < 0 ? power_by_squaring(inv(A), -p) : power_by_squaring(A, p) +@inline matrix_power(A, p) = p < 0 ? power_by_squaring(inv(A), -p) : power_by_squaring(A, p) +(^)(A::ArrayLike{2}, p::Integer) = matrix_power(A, p) +(^)(A::AbstractMatrix, p::Integer) = matrix_power(A, p) # specific function (^)(A::AbstractMatrix{T}, p::Integer) where T<:Integer # make sure that e.g. [1 1;1 0]^big(3) # gets promotes in a similar way as 2^big(3) @@ -412,7 +414,7 @@ function integerpow(A::AbstractMatrix{T}, p) where T TT = promote_op(^, T, typeof(p)) return (TT == T ? A : copyto!(similar(A, TT), A))^Integer(p) end -function schurpow(A::AbstractMatrix, p) +function schurpow(A::ArrayLike{2}, p) if istriu(A) # Integer part retmat = A ^ floor(p) @@ -473,7 +475,7 @@ function (^)(A::AbstractMatrix{T}, p::Real) where T end """ - ^(A::AbstractMatrix, p::Number) + ^(A::ArrayLike{2}, p::Number) Matrix power, equivalent to ``\\exp(p\\log(A))`` @@ -485,12 +487,12 @@ julia> [1 2; 0 3]^3 0 27 ``` """ -(^)(A::AbstractMatrix, p::Number) = exp(p*log(A)) +(^)(A::ArrayLike{2}, p::Number) = exp(p*log(A)) # Matrix exponential """ - exp(A::AbstractMatrix) + exp(A::ArrayLike{2}) Compute the matrix exponential of `A`, defined by @@ -520,7 +522,7 @@ exp(A::StridedMatrix{<:BlasFloat}) = exp!(copy(A)) exp(A::StridedMatrix{<:Union{Integer,Complex{<:Integer}}}) = exp!(float.(A)) """ - ^(b::Number, A::AbstractMatrix) + ^(b::Number, A::ArrayLike{2}) Matrix exponential, equivalent to ``\\exp(\\log(b)A)``. @@ -541,9 +543,9 @@ julia> ℯ^[1 2; 0 3] 0.0 20.0855 ``` """ -Base.:^(b::Number, A::AbstractMatrix) = exp!(log(b)*A) +Base.:^(b::Number, A::ArrayLike{2}) = exp!(log(b)*A) # method for ℯ to explicitly elide the log(b) multiplication -Base.:^(::Irrational{:ℯ}, A::AbstractMatrix) = exp(A) +Base.:^(::Irrational{:ℯ}, A::ArrayLike{2}) = exp(A) ## Destructive matrix exponential using algorithm from Higham, 2008, ## "Functions of Matrices: Theory and Computation", SIAM @@ -700,7 +702,7 @@ function log(A::StridedMatrix) end """ - sqrt(A::AbstractMatrix) + sqrt(A::ArrayLike{2}) If `A` has no negative real eigenvalues, compute the principal matrix square root of `A`, that is the unique matrix ``X`` with eigenvalues having positive real part such that @@ -774,7 +776,7 @@ function inv(A::StridedMatrix{T}) where T end """ - cos(A::AbstractMatrix) + cos(A::ArrayLike{2}) Compute the matrix cosine of a square matrix `A`. @@ -807,7 +809,7 @@ function cos(A::AbstractMatrix{<:Complex}) end """ - sin(A::AbstractMatrix) + sin(A::ArrayLike{2}) Compute the matrix sine of a square matrix `A`. @@ -844,7 +846,7 @@ function sin(A::AbstractMatrix{<:Complex}) end """ - sincos(A::AbstractMatrix) + sincos(A::ArrayLike{2}) Compute the matrix sine and cosine of a square matrix `A`. @@ -893,7 +895,7 @@ function sincos(A::AbstractMatrix{<:Complex}) end """ - tan(A::AbstractMatrix) + tan(A::ArrayLike{2}) Compute the matrix tangent of a square matrix `A`. @@ -908,7 +910,7 @@ julia> tan(fill(1.0, (2,2))) -1.09252 -1.09252 ``` """ -function tan(A::AbstractMatrix) +function tan(A::ArrayLike{2}) if ishermitian(A) return copytri!(parent(tan(Hermitian(A))), 'U', true) end @@ -918,11 +920,11 @@ function tan(A::AbstractMatrix) end """ - cosh(A::AbstractMatrix) + cosh(A::ArrayLike{2}) Compute the matrix hyperbolic cosine of a square matrix `A`. """ -function cosh(A::AbstractMatrix) +function cosh(A::ArrayLike{2}) if ishermitian(A) return copytri!(parent(cosh(Hermitian(A))), 'U', true) end @@ -932,11 +934,11 @@ function cosh(A::AbstractMatrix) end """ - sinh(A::AbstractMatrix) + sinh(A::ArrayLike{2}) Compute the matrix hyperbolic sine of a square matrix `A`. """ -function sinh(A::AbstractMatrix) +function sinh(A::ArrayLike{2}) if ishermitian(A) return copytri!(parent(sinh(Hermitian(A))), 'U', true) end @@ -946,11 +948,11 @@ function sinh(A::AbstractMatrix) end """ - tanh(A::AbstractMatrix) + tanh(A::ArrayLike{2}) Compute the matrix hyperbolic tangent of a square matrix `A`. """ -function tanh(A::AbstractMatrix) +function tanh(A::ArrayLike{2}) if ishermitian(A) return copytri!(parent(tanh(Hermitian(A))), 'U', true) end @@ -966,7 +968,7 @@ function tanh(A::AbstractMatrix) end """ - acos(A::AbstractMatrix) + acos(A::ArrayLike{2}) Compute the inverse matrix cosine of a square matrix `A`. @@ -985,7 +987,7 @@ julia> acos(cos([0.5 0.1; -0.2 0.3])) -0.2+2.63678e-16im 0.3-3.46945e-16im ``` """ -function acos(A::AbstractMatrix) +function acos(A::ArrayLike{2}) if ishermitian(A) acosHermA = acos(Hermitian(A)) return isa(acosHermA, Hermitian) ? copytri!(parent(acosHermA), 'U', true) : parent(acosHermA) @@ -997,7 +999,7 @@ function acos(A::AbstractMatrix) end """ - asin(A::AbstractMatrix) + asin(A::ArrayLike{2}) Compute the inverse matrix sine of a square matrix `A`. @@ -1016,7 +1018,7 @@ julia> asin(sin([0.5 0.1; -0.2 0.3])) -0.2+9.71445e-17im 0.3-1.249e-16im ``` """ -function asin(A::AbstractMatrix) +function asin(A::ArrayLike{2}) if ishermitian(A) asinHermA = asin(Hermitian(A)) return isa(asinHermA, Hermitian) ? copytri!(parent(asinHermA), 'U', true) : parent(asinHermA) @@ -1028,7 +1030,7 @@ function asin(A::AbstractMatrix) end """ - atan(A::AbstractMatrix) + atan(A::ArrayLike{2}) Compute the inverse matrix tangent of a square matrix `A`. @@ -1047,7 +1049,7 @@ julia> atan(tan([0.5 0.1; -0.2 0.3])) -0.2+6.93889e-17im 0.3-4.16334e-17im ``` """ -function atan(A::AbstractMatrix) +function atan(A::ArrayLike{2}) if ishermitian(A) return copytri!(parent(atan(Hermitian(A))), 'U', true) end @@ -1058,14 +1060,14 @@ function atan(A::AbstractMatrix) end """ - acosh(A::AbstractMatrix) + acosh(A::ArrayLike{2}) Compute the inverse hyperbolic matrix cosine of a square matrix `A`. For the theory and logarithmic formulas used to compute this function, see [^AH16_4]. [^AH16_4]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) """ -function acosh(A::AbstractMatrix) +function acosh(A::ArrayLike{2}) if ishermitian(A) acoshHermA = acosh(Hermitian(A)) return isa(acoshHermA, Hermitian) ? copytri!(parent(acoshHermA), 'U', true) : parent(acoshHermA) @@ -1077,14 +1079,14 @@ function acosh(A::AbstractMatrix) end """ - asinh(A::AbstractMatrix) + asinh(A::ArrayLike{2}) Compute the inverse hyperbolic matrix sine of a square matrix `A`. For the theory and logarithmic formulas used to compute this function, see [^AH16_5]. [^AH16_5]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) """ -function asinh(A::AbstractMatrix) +function asinh(A::ArrayLike{2}) if ishermitian(A) return copytri!(parent(asinh(Hermitian(A))), 'U', true) end @@ -1095,14 +1097,14 @@ function asinh(A::AbstractMatrix) end """ - atanh(A::AbstractMatrix) + atanh(A::ArrayLike{2}) Compute the inverse hyperbolic matrix tangent of a square matrix `A`. For the theory and logarithmic formulas used to compute this function, see [^AH16_6]. [^AH16_6]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) """ -function atanh(A::AbstractMatrix) +function atanh(A::ArrayLike{2}) if ishermitian(A) return copytri!(parent(atanh(Hermitian(A))), 'U', true) end @@ -1119,12 +1121,12 @@ for (finv, f, finvh, fh, fn) in ((:sec, :cos, :sech, :cosh, "secant"), hname = string(finvh) @eval begin @doc """ - $($name)(A::AbstractMatrix) + $($name)(A::ArrayLike{2}) Compute the matrix $($fn) of a square matrix `A`. """ ($finv)(A::AbstractMatrix{T}) where {T} = inv(($f)(A)) @doc """ - $($hname)(A::AbstractMatrix) + $($hname)(A::ArrayLike{2}) Compute the matrix hyperbolic $($fn) of square matrix `A`. """ ($finvh)(A::AbstractMatrix{T}) where {T} = inv(($fh)(A)) @@ -1138,10 +1140,10 @@ for (tfa, tfainv, hfa, hfainv, fn) in ((:asec, :acos, :asech, :acosh, "secant"), hname = string(hfa) @eval begin @doc """ - $($tname)(A::AbstractMatrix) + $($tname)(A::ArrayLike{2}) Compute the inverse matrix $($fn) of `A`. """ ($tfa)(A::AbstractMatrix{T}) where {T} = ($tfainv)(inv(A)) @doc """ - $($hname)(A::AbstractMatrix) + $($hname)(A::ArrayLike{2}) Compute the inverse matrix hyperbolic $($fn) of `A`. """ ($hfa)(A::AbstractMatrix{T}) where {T} = ($hfainv)(inv(A)) end end @@ -1397,7 +1399,7 @@ julia> nullspace(M, atol=0.95) 1.0 ``` """ -function nullspace(A::AbstractMatrix; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol)) +function nullspace(A::ArrayLike{2}; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol)) m, n = size(A) (m == 0 || n == 0) && return Matrix{eltype(A)}(I, n, n) SVD = svd(A, full=true) @@ -1406,7 +1408,7 @@ function nullspace(A::AbstractMatrix; atol::Real = 0.0, rtol::Real = (min(size(A return copy(SVD.Vt[indstart:end,:]') end -nullspace(A::AbstractVector; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol)) = nullspace(reshape(A, length(A), 1), rtol= rtol, atol= atol) +nullspace(A::ArrayLike{1}; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol)) = nullspace(reshape(A, length(A), 1), rtol= rtol, atol= atol) """ cond(M, p::Real=2) @@ -1414,7 +1416,7 @@ nullspace(A::AbstractVector; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps Condition number of the matrix `M`, computed using the operator `p`-norm. Valid values for `p` are `1`, `2` (default), or `Inf`. """ -function cond(A::AbstractMatrix, p::Real=2) +function cond(A::ArrayLike{2}, p::Real=2) if p == 2 v = svdvals(A) maxv = maximum(v) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index b0d1fb0fee9b6..0e7fb2d3a1cd8 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -11,10 +11,10 @@ struct Diagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} end end Diagonal(v::AbstractVector{T}) where {T} = Diagonal{T,typeof(v)}(v) -Diagonal{T}(v::AbstractVector) where {T} = Diagonal(convert(AbstractVector{T}, v)::AbstractVector{T}) +Diagonal{T}(v::ArrayLike{1}) where {T} = Diagonal(convert(AbstractVector{T}, v)::AbstractVector{T}) """ - Diagonal(A::AbstractMatrix) + Diagonal(A::ArrayLike{2}) Construct a matrix from the diagonal of `A`. @@ -33,10 +33,10 @@ julia> Diagonal(A) ⋅ ⋅ 9 ``` """ -Diagonal(A::AbstractMatrix) = Diagonal(diag(A)) +Diagonal(A::ArrayLike{2}) = Diagonal(diag(A)) """ - Diagonal(V::AbstractVector) + Diagonal(V::ArrayLike{1}) Construct a matrix with `V` as its diagonal. @@ -53,7 +53,7 @@ julia> Diagonal(V) ⋅ 2 ``` """ -Diagonal(V::AbstractVector) +Diagonal(V::ArrayLike{1}) Diagonal(D::Diagonal) = D Diagonal{T}(D::Diagonal{T}) where {T} = D @@ -160,25 +160,26 @@ end (*)(D::Diagonal, x::Number) = Diagonal(D.diag * x) (/)(D::Diagonal, x::Number) = Diagonal(D.diag / x) (*)(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag .* Db.diag) -(*)(D::Diagonal, V::AbstractVector) = D.diag .* V +(*)(D::Diagonal, V::ArrayLike{1}) = D.diag .* V +(*)(D::Diagonal, V::AbstractVector) = D.diag .* V # specific (*)(A::AbstractTriangular, D::Diagonal) = rmul!(copyto!(similar(A, promote_op(*, eltype(A), eltype(D.diag))), A), D) (*)(D::Diagonal, B::AbstractTriangular) = lmul!(D, copyto!(similar(B, promote_op(*, eltype(B), eltype(D.diag))), B)) -(*)(A::AbstractMatrix, D::Diagonal) = +(*)(A::ArrayLike{2}, D::Diagonal) = rmul!(copyto!(similar(A, promote_op(*, eltype(A), eltype(D.diag)), size(A)), A), D) -(*)(D::Diagonal, A::AbstractMatrix) = +(*)(D::Diagonal, A::ArrayLike{2}) = lmul!(D, copyto!(similar(A, promote_op(*, eltype(A), eltype(D.diag)), size(A)), A)) -function rmul!(A::AbstractMatrix, D::Diagonal) +function rmul!(A::ArrayLike{2}, D::Diagonal) require_one_based_indexing(A) A .= A .* permutedims(D.diag) return A end -function lmul!(D::Diagonal, B::AbstractMatrix) +function lmul!(D::Diagonal, B::ArrayLike{2}) require_one_based_indexing(B) B .= D.diag .* B return B @@ -218,7 +219,7 @@ end *(D::Adjoint{<:Any,<:Diagonal}, B::Diagonal) = Diagonal(adjoint.(D.parent.diag) .* B.diag) *(A::Adjoint{<:Any,<:AbstractTriangular}, D::Diagonal) = rmul!(copyto!(similar(A, promote_op(*, eltype(A), eltype(D.diag))), A), D) -function *(adjA::Adjoint{<:Any,<:AbstractMatrix}, D::Diagonal) +function *(adjA::Adjoint{<:Any,<:ArrayLike{2}}, D::Diagonal) A = adjA.parent Ac = similar(A, promote_op(*, eltype(A), eltype(D.diag)), (size(A, 2), size(A, 1))) adjoint!(Ac, A) @@ -228,7 +229,7 @@ end *(D::Transpose{<:Any,<:Diagonal}, B::Diagonal) = Diagonal(transpose.(D.parent.diag) .* B.diag) *(A::Transpose{<:Any,<:AbstractTriangular}, D::Diagonal) = rmul!(copyto!(similar(A, promote_op(*, eltype(A), eltype(D.diag))), A), D) -function *(transA::Transpose{<:Any,<:AbstractMatrix}, D::Diagonal) +function *(transA::Transpose{<:Any,<:ArrayLike{2}}, D::Diagonal) A = transA.parent At = similar(A, promote_op(*, eltype(A), eltype(D.diag)), (size(A, 2), size(A, 1))) transpose!(At, A) @@ -239,7 +240,7 @@ end *(D::Diagonal, B::Adjoint{<:Any,<:AbstractTriangular}) = lmul!(D, copyto!(similar(B, promote_op(*, eltype(B), eltype(D.diag))), B)) *(D::Diagonal, adjQ::Adjoint{<:Any,<:Union{QRCompactWYQ,QRPackedQ}}) = (Q = adjQ.parent; rmul!(Array(D), adjoint(Q))) -function *(D::Diagonal, adjA::Adjoint{<:Any,<:AbstractMatrix}) +function *(D::Diagonal, adjA::Adjoint{<:Any,<:ArrayLike{2}}) A = adjA.parent Ac = similar(A, promote_op(*, eltype(A), eltype(D.diag)), (size(A, 2), size(A, 1))) adjoint!(Ac, A) @@ -249,7 +250,7 @@ end *(D::Diagonal, B::Transpose{<:Any,<:Diagonal}) = Diagonal(D.diag .* transpose.(B.parent.diag)) *(D::Diagonal, B::Transpose{<:Any,<:AbstractTriangular}) = lmul!(D, copyto!(similar(B, promote_op(*, eltype(B), eltype(D.diag))), B)) -function *(D::Diagonal, transA::Transpose{<:Any,<:AbstractMatrix}) +function *(D::Diagonal, transA::Transpose{<:Any,<:ArrayLike{2}}) A = transA.parent At = similar(A, promote_op(*, eltype(A), eltype(D.diag)), (size(A, 2), size(A, 1))) transpose!(At, A) @@ -264,20 +265,20 @@ end rmul!(A::Diagonal, B::Diagonal) = Diagonal(A.diag .*= B.diag) lmul!(A::Diagonal, B::Diagonal) = Diagonal(B.diag .= A.diag .* B.diag) -function lmul!(adjA::Adjoint{<:Any,<:Diagonal}, B::AbstractMatrix) +function lmul!(adjA::Adjoint{<:Any,<:Diagonal}, B::ArrayLike{2}) A = adjA.parent return lmul!(adjoint(A), B) end -function lmul!(transA::Transpose{<:Any,<:Diagonal}, B::AbstractMatrix) +function lmul!(transA::Transpose{<:Any,<:Diagonal}, B::ArrayLike{2}) A = transA.parent return lmul!(transpose(A), B) end -function rmul!(A::AbstractMatrix, adjB::Adjoint{<:Any,<:Diagonal}) +function rmul!(A::ArrayLike{2}, adjB::Adjoint{<:Any,<:Diagonal}) B = adjB.parent return rmul!(A, adjoint(B)) end -function rmul!(A::AbstractMatrix, transB::Transpose{<:Any,<:Diagonal}) +function rmul!(A::ArrayLike{2}, transB::Transpose{<:Any,<:Diagonal}) B = transB.parent return rmul!(A, transpose(B)) end @@ -290,74 +291,74 @@ function *ₛ end Broadcast.broadcasted(::typeof(*ₛ), out, beta) = iszero(beta::Number) ? false : broadcasted(*, out, beta) -# Get ambiguous method if try to unify AbstractVector/AbstractMatrix here using AbstractVecOrMat -@inline mul!(out::AbstractVector, A::Diagonal, in::AbstractVector, +# Get ambiguous method if try to unify ArrayLike{1}/ArrayLike{2} here using VectorOrMatrixLike +@inline mul!(out::ArrayLike{1}, A::Diagonal, in::ArrayLike{1}, alpha::Number, beta::Number) = out .= (A.diag .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractVector, A::Adjoint{<:Any,<:Diagonal}, in::AbstractVector, +@inline mul!(out::ArrayLike{1}, A::Adjoint{<:Any,<:Diagonal}, in::ArrayLike{1}, alpha::Number, beta::Number) = out .= (adjoint.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractVector, A::Transpose{<:Any,<:Diagonal}, in::AbstractVector, +@inline mul!(out::ArrayLike{1}, A::Transpose{<:Any,<:Diagonal}, in::ArrayLike{1}, alpha::Number, beta::Number) = out .= (transpose.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Diagonal, in::StridedMatrix, +@inline mul!(out::ArrayLike{2}, A::Diagonal, in::StridedMatrix, alpha::Number, beta::Number) = out .= (A.diag .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Adjoint{<:Any,<:Diagonal}, in::StridedMatrix, +@inline mul!(out::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, in::StridedMatrix, alpha::Number, beta::Number) = out .= (adjoint.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Transpose{<:Any,<:Diagonal}, in::StridedMatrix, +@inline mul!(out::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, in::StridedMatrix, alpha::Number, beta::Number) = out .= (transpose.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Diagonal, in::Adjoint{<:Any,<:StridedMatrix}, +@inline mul!(out::ArrayLike{2}, A::Diagonal, in::Adjoint{<:Any,<:StridedMatrix}, alpha::Number, beta::Number) = out .= (A.diag .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Adjoint{<:Any,<:Diagonal}, in::Adjoint{<:Any,<:StridedMatrix}, +@inline mul!(out::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, in::Adjoint{<:Any,<:StridedMatrix}, alpha::Number, beta::Number) = out .= (adjoint.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Transpose{<:Any,<:Diagonal}, in::Adjoint{<:Any,<:StridedMatrix}, +@inline mul!(out::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, in::Adjoint{<:Any,<:StridedMatrix}, alpha::Number, beta::Number) = out .= (transpose.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Diagonal, in::Transpose{<:Any,<:StridedMatrix}, +@inline mul!(out::ArrayLike{2}, A::Diagonal, in::Transpose{<:Any,<:StridedMatrix}, alpha::Number, beta::Number) = out .= (A.diag .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Adjoint{<:Any,<:Diagonal}, in::Transpose{<:Any,<:StridedMatrix}, +@inline mul!(out::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, in::Transpose{<:Any,<:StridedMatrix}, alpha::Number, beta::Number) = out .= (adjoint.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, A::Transpose{<:Any,<:Diagonal}, in::Transpose{<:Any,<:StridedMatrix}, +@inline mul!(out::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, in::Transpose{<:Any,<:StridedMatrix}, alpha::Number, beta::Number) = out .= (transpose.(A.parent.diag) .* in) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::StridedMatrix, A::Diagonal, +@inline mul!(out::ArrayLike{2}, in::StridedMatrix, A::Diagonal, alpha::Number, beta::Number) = out .= (in .* permutedims(A.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::StridedMatrix, A::Adjoint{<:Any,<:Diagonal}, +@inline mul!(out::ArrayLike{2}, in::StridedMatrix, A::Adjoint{<:Any,<:Diagonal}, alpha::Number, beta::Number) = out .= (in .* adjoint(A.parent.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::StridedMatrix, A::Transpose{<:Any,<:Diagonal}, +@inline mul!(out::ArrayLike{2}, in::StridedMatrix, A::Transpose{<:Any,<:Diagonal}, alpha::Number, beta::Number) = out .= (in .* transpose(A.parent.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::Adjoint{<:Any,<:StridedMatrix}, A::Diagonal, +@inline mul!(out::ArrayLike{2}, in::Adjoint{<:Any,<:StridedMatrix}, A::Diagonal, alpha::Number, beta::Number) = out .= (in .* permutedims(A.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::Adjoint{<:Any,<:StridedMatrix}, A::Adjoint{<:Any,<:Diagonal}, +@inline mul!(out::ArrayLike{2}, in::Adjoint{<:Any,<:StridedMatrix}, A::Adjoint{<:Any,<:Diagonal}, alpha::Number, beta::Number) = out .= (in .* adjoint(A.parent.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::Adjoint{<:Any,<:StridedMatrix}, A::Transpose{<:Any,<:Diagonal}, +@inline mul!(out::ArrayLike{2}, in::Adjoint{<:Any,<:StridedMatrix}, A::Transpose{<:Any,<:Diagonal}, alpha::Number, beta::Number) = out .= (in .* transpose(A.parent.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::Transpose{<:Any,<:StridedMatrix}, A::Diagonal, +@inline mul!(out::ArrayLike{2}, in::Transpose{<:Any,<:StridedMatrix}, A::Diagonal, alpha::Number, beta::Number) = out .= (in .* permutedims(A.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::Transpose{<:Any,<:StridedMatrix}, A::Adjoint{<:Any,<:Diagonal}, +@inline mul!(out::ArrayLike{2}, in::Transpose{<:Any,<:StridedMatrix}, A::Adjoint{<:Any,<:Diagonal}, alpha::Number, beta::Number) = out .= (in .* adjoint(A.parent.diag)) .*ₛ alpha .+ out .*ₛ beta -@inline mul!(out::AbstractMatrix, in::Transpose{<:Any,<:StridedMatrix}, A::Transpose{<:Any,<:Diagonal}, +@inline mul!(out::ArrayLike{2}, in::Transpose{<:Any,<:StridedMatrix}, A::Transpose{<:Any,<:Diagonal}, alpha::Number, beta::Number) = out .= (in .* transpose(A.parent.diag)) .*ₛ alpha .+ out .*ₛ beta @@ -371,27 +372,27 @@ Broadcast.broadcasted(::typeof(*ₛ), out, beta) = *(transD::Transpose{<:Any,<:Diagonal}, transA::Transpose{<:Any,<:RealHermSymComplexSym}) = transD * transA.parent *(adjA::Adjoint{<:Any,<:RealHermSymComplexHerm}, adjD::Adjoint{<:Any,<:Diagonal}) = adjA.parent * adjD *(adjD::Adjoint{<:Any,<:Diagonal}, adjA::Adjoint{<:Any,<:RealHermSymComplexHerm}) = adjD * adjA.parent -mul!(C::AbstractMatrix, A::Adjoint{<:Any,<:Diagonal}, B::Adjoint{<:Any,<:RealHermSymComplexSym}) = C .= adjoint.(A.parent.diag) .* B -mul!(C::AbstractMatrix, A::Transpose{<:Any,<:Diagonal}, B::Transpose{<:Any,<:RealHermSymComplexHerm}) = C .= transpose.(A.parent.diag) .* B +mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, B::Adjoint{<:Any,<:RealHermSymComplexSym}) = C .= adjoint.(A.parent.diag) .* B +mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, B::Transpose{<:Any,<:RealHermSymComplexHerm}) = C .= transpose.(A.parent.diag) .* B -@inline mul!(C::AbstractMatrix, +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, B::Adjoint{<:Any,<:RealHermSym}, alpha::Number, beta::Number) = mul!(C, A, B.parent, alpha, beta) -@inline mul!(C::AbstractMatrix, +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}, alpha::Number, beta::Number) = mul!(C, A, B.parent, alpha, beta) -@inline mul!(C::AbstractMatrix, +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, B::Transpose{<:Any,<:RealHermSym}, alpha::Number, beta::Number) = mul!(C, A, B.parent, alpha, beta) -@inline mul!(C::AbstractMatrix, +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, B::Transpose{<:Any,<:RealHermSymComplexSym}, alpha::Number, beta::Number) = mul!(C, A, B.parent, alpha, beta) -@inline mul!(C::AbstractMatrix, +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:Diagonal}, B::Adjoint{<:Any,<:RealHermSymComplexSym}, alpha::Number, beta::Number) = C .= (adjoint.(A.parent.diag) .* B) .*ₛ alpha .+ C .*ₛ beta -@inline mul!(C::AbstractMatrix, +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:Diagonal}, B::Transpose{<:Any,<:RealHermSymComplexHerm}, alpha::Number, beta::Number) = C .= (transpose.(A.parent.diag) .* B) .*ₛ alpha .+ C .*ₛ beta @@ -427,7 +428,7 @@ function ldiv!(D::Diagonal{T}, V::AbstractMatrix{T}) where {T} end V end -ldiv!(x::AbstractArray, A::Diagonal, b::AbstractArray) = (x .= A.diag .\ b) +ldiv!(x::ArrayLike, A::Diagonal, b::ArrayLike) = (x .= A.diag .\ b) ldiv!(adjD::Adjoint{<:Any,<:Diagonal{T}}, B::AbstractVecOrMat{T}) where {T} = (D = adjD.parent; ldiv!(conj(D), B)) @@ -579,10 +580,10 @@ function ldiv!(D::Diagonal, B::StridedVecOrMat) end return B end -(\)(D::Diagonal, A::AbstractMatrix) = +(\)(D::Diagonal, A::ArrayLike{2}) = ldiv!(D, (typeof(oneunit(eltype(D))/oneunit(eltype(A)))).(A)) -(\)(D::Diagonal, b::AbstractVector) = D.diag .\ b +(\)(D::Diagonal, b::ArrayLike{1}) = D.diag .\ b (\)(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag .\ Db.diag) function inv(D::Diagonal{T}) where T @@ -642,13 +643,13 @@ function svd(D::Diagonal{<:Number}) end # disambiguation methods: * of Diagonal and Adj/Trans AbsVec -*(x::Adjoint{<:Any,<:AbstractVector}, D::Diagonal) = Adjoint(map((t,s) -> t'*s, D.diag, parent(x))) -*(x::Transpose{<:Any,<:AbstractVector}, D::Diagonal) = Transpose(map((t,s) -> transpose(t)*s, D.diag, parent(x))) -*(x::Adjoint{<:Any,<:AbstractVector}, D::Diagonal, y::AbstractVector) = +*(x::Adjoint{<:Any,<:ArrayLike{1}}, D::Diagonal) = Adjoint(map((t,s) -> t'*s, D.diag, parent(x))) +*(x::Transpose{<:Any,<:ArrayLike{1}}, D::Diagonal) = Transpose(map((t,s) -> transpose(t)*s, D.diag, parent(x))) +*(x::Adjoint{<:Any,<:ArrayLike{1}}, D::Diagonal, y::ArrayLike{1}) = mapreduce(t -> t[1]*t[2]*t[3], +, zip(x, D.diag, y)) -*(x::Transpose{<:Any,<:AbstractVector}, D::Diagonal, y::AbstractVector) = +*(x::Transpose{<:Any,<:ArrayLike{1}}, D::Diagonal, y::ArrayLike{1}) = mapreduce(t -> t[1]*t[2]*t[3], +, zip(x, D.diag, y)) -function dot(x::AbstractVector, D::Diagonal, y::AbstractVector) +function dot(x::ArrayLike{1}, D::Diagonal, y::ArrayLike{1}) mapreduce(t -> dot(t[1], t[2], t[3]), +, zip(x, D.diag, y)) end diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl index 253466091b038..bbbdd5b32ef96 100644 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ b/stdlib/LinearAlgebra/src/eigen.jl @@ -47,7 +47,7 @@ julia> vals == F.values && vecs == F.vectors true ``` """ -struct Eigen{T,V,S<:AbstractMatrix,U<:AbstractVector} <: Factorization{T} +struct Eigen{T,V,S<:ArrayLike{2},U<:ArrayLike{1}} <: Factorization{T} values::U vectors::S Eigen{T,V,S,U}(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V,S,U} = @@ -109,7 +109,7 @@ julia> vals == F.values && vecs == F.vectors true ``` """ -struct GeneralizedEigen{T,V,S<:AbstractMatrix,U<:AbstractVector} <: Factorization{T} +struct GeneralizedEigen{T,V,S<:ArrayLike{2},U<:ArrayLike{1}} <: Factorization{T} values::U vectors::S GeneralizedEigen{T,V,S,U}(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V,S,U} = @@ -129,7 +129,7 @@ isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, # as is the LAPACK default (for complex λ — LAPACK sorts by λ for the Hermitian/Symmetric case) eigsortby(λ::Real) = λ eigsortby(λ::Complex) = (real(λ),imag(λ)) -function sorteig!(λ::AbstractVector, X::AbstractMatrix, sortby::Union{Function,Nothing}=eigsortby) +function sorteig!(λ::ArrayLike{1}, X::ArrayLike{2}, sortby::Union{Function,Nothing}=eigsortby) if sortby !== nothing && !issorted(λ, by=sortby) p = sortperm(λ; alg=QuickSort, by=sortby) permute!(λ, p) @@ -137,7 +137,7 @@ function sorteig!(λ::AbstractVector, X::AbstractMatrix, sortby::Union{Function, end return λ, X end -sorteig!(λ::AbstractVector, sortby::Union{Function,Nothing}=eigsortby) = sortby === nothing ? λ : sort!(λ, by=sortby) +sorteig!(λ::ArrayLike{1}, sortby::Union{Function,Nothing}=eigsortby) = sortby === nothing ? λ : sort!(λ, by=sortby) """ eigen!(A, [B]; permute, scale, sortby) @@ -254,7 +254,7 @@ julia> eigvecs([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) 0.0 0.0 1.0 ``` """ -eigvecs(A::Union{Number, AbstractMatrix}; kws...) = +eigvecs(A::Union{Number, ArrayLike{2}}; kws...) = eigvecs(eigen(A; kws...)) eigvecs(F::Union{Eigen, GeneralizedEigen}) = F.vectors @@ -370,7 +370,7 @@ Stacktrace: [...] ``` """ -function eigmax(A::Union{Number, AbstractMatrix}; permute::Bool=true, scale::Bool=true) +function eigmax(A::Union{Number, ArrayLike{2}}; permute::Bool=true, scale::Bool=true) v = eigvals(A, permute = permute, scale = scale) if eltype(v)<:Complex throw(DomainError(A, "`A` cannot have complex eigenvalues.")) @@ -411,7 +411,7 @@ Stacktrace: [...] ``` """ -function eigmin(A::Union{Number, AbstractMatrix}; +function eigmin(A::Union{Number, ArrayLike{2}}; permute::Bool=true, scale::Bool=true) v = eigvals(A, permute = permute, scale = scale) if eltype(v)<:Complex @@ -604,7 +604,7 @@ julia> eigvecs(A, B) -1.0+0.0im -1.0-0.0im ``` """ -eigvecs(A::AbstractMatrix, B::AbstractMatrix; kws...) = eigvecs(eigen(A, B; kws...)) +eigvecs(A::ArrayLike{2}, B::ArrayLike{2}; kws...) = eigvecs(eigen(A, B; kws...)) function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Union{Eigen,GeneralizedEigen}) summary(io, F); println(io) diff --git a/stdlib/LinearAlgebra/src/factorization.jl b/stdlib/LinearAlgebra/src/factorization.jl index 737786a6df617..b5002f3befb5c 100644 --- a/stdlib/LinearAlgebra/src/factorization.jl +++ b/stdlib/LinearAlgebra/src/factorization.jl @@ -52,7 +52,7 @@ end convert(::Type{T}, f::T) where {T<:Factorization} = f convert(::Type{T}, f::Factorization) where {T<:Factorization} = T(f) -convert(::Type{T}, f::Factorization) where {T<:AbstractArray} = T(f) +convert(::Type{T}, f::Factorization) where {T<:ArrayLike} = T(f) ### General promotion rules Factorization{T}(F::Factorization{T}) where {T} = F @@ -93,14 +93,14 @@ function (/)(B::VecOrMat{Complex{T}}, F::Factorization{T}) where T<:BlasReal return copy(reinterpret(Complex{T}, x)) end -function \(F::Factorization, B::AbstractVecOrMat) +function \(F::Factorization, B::VectorOrMatrixLike) require_one_based_indexing(B) TFB = typeof(oneunit(eltype(B)) / oneunit(eltype(F))) BB = similar(B, TFB, size(B)) copyto!(BB, B) ldiv!(F, BB) end -function \(adjF::Adjoint{<:Any,<:Factorization}, B::AbstractVecOrMat) +function \(adjF::Adjoint{<:Any,<:Factorization}, B::VectorOrMatrixLike) require_one_based_indexing(B) F = adjF.parent TFB = typeof(oneunit(eltype(B)) / oneunit(eltype(F))) @@ -109,14 +109,14 @@ function \(adjF::Adjoint{<:Any,<:Factorization}, B::AbstractVecOrMat) ldiv!(adjoint(F), BB) end -function /(B::AbstractMatrix, F::Factorization) +function /(B::ArrayLike{2}, F::Factorization) require_one_based_indexing(B) TFB = typeof(oneunit(eltype(B)) / oneunit(eltype(F))) BB = similar(B, TFB, size(B)) copyto!(BB, B) rdiv!(BB, F) end -function /(B::AbstractMatrix, adjF::Adjoint{<:Any,<:Factorization}) +function /(B::ArrayLike{2}, adjF::Adjoint{<:Any,<:Factorization}) require_one_based_indexing(B) F = adjF.parent TFB = typeof(oneunit(eltype(B)) / oneunit(eltype(F))) @@ -128,7 +128,7 @@ end /(B::TransposeAbsVec, adjF::Adjoint{<:Any,<:Factorization}) = adjoint(adjF.parent \ adjoint(B)) # support the same 3-arg idiom as in our other in-place A_*_B functions: -function ldiv!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) +function ldiv!(Y::VectorOrMatrixLike, A::Factorization, B::VectorOrMatrixLike) require_one_based_indexing(Y, B) m, n = size(A, 1), size(A, 2) if m > n @@ -141,11 +141,11 @@ function ldiv!(Y::AbstractVecOrMat, A::Factorization, B::AbstractVecOrMat) end # fallback methods for transposed solves -\(F::Transpose{<:Any,<:Factorization{<:Real}}, B::AbstractVecOrMat) = adjoint(F.parent) \ B -\(F::Transpose{<:Any,<:Factorization}, B::AbstractVecOrMat) = conj.(adjoint(F.parent) \ conj.(B)) +\(F::Transpose{<:Any,<:Factorization{<:Real}}, B::VectorOrMatrixLike) = adjoint(F.parent) \ B +\(F::Transpose{<:Any,<:Factorization}, B::VectorOrMatrixLike) = conj.(adjoint(F.parent) \ conj.(B)) -/(B::AbstractMatrix, F::Transpose{<:Any,<:Factorization{<:Real}}) = B / adjoint(F.parent) -/(B::AbstractMatrix, F::Transpose{<:Any,<:Factorization}) = conj.(conj.(B) / adjoint(F.parent)) +/(B::ArrayLike{2}, F::Transpose{<:Any,<:Factorization{<:Real}}) = B / adjoint(F.parent) +/(B::ArrayLike{2}, F::Transpose{<:Any,<:Factorization}) = conj.(conj.(B) / adjoint(F.parent)) /(B::AdjointAbsVec, F::Transpose{<:Any,<:Factorization{<:Real}}) = B / adjoint(F.parent) /(B::TransposeAbsVec, F::Transpose{<:Any,<:Factorization{<:Real}}) = B / adjoint(F.parent) /(B::AdjointAbsVec, F::Transpose{<:Any,<:Factorization}) = conj.(conj.(B) / adjoint(F.parent)) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 0b5c1af380257..4bb9e3bf353fa 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -74,7 +74,7 @@ julia> C return end -@inline function _rmul_or_fill!(C::AbstractArray, beta::Number) +@inline function _rmul_or_fill!(C::ArrayLike, beta::Number) if isempty(C) return C end @@ -87,7 +87,7 @@ end end -function generic_mul!(C::AbstractArray, X::AbstractArray, s::Number, _add::MulAddMul) +function generic_mul!(C::ArrayLike, X::ArrayLike, s::Number, _add::MulAddMul) if length(C) != length(X) throw(DimensionMismatch("first array has length $(length(C)) which does not match the length of the second, $(length(X)).")) end @@ -97,7 +97,7 @@ function generic_mul!(C::AbstractArray, X::AbstractArray, s::Number, _add::MulAd C end -function generic_mul!(C::AbstractArray, s::Number, X::AbstractArray, _add::MulAddMul) +function generic_mul!(C::ArrayLike, s::Number, X::ArrayLike, _add::MulAddMul) if length(C) != length(X) throw(DimensionMismatch("first array has length $(length(C)) which does not match the length of the second, $(length(X)).")) @@ -108,15 +108,15 @@ match the length of the second, $(length(X)).")) C end -@inline mul!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike, s::Number, X::ArrayLike, alpha::Number, beta::Number) = generic_mul!(C, s, X, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike, X::ArrayLike, s::Number, alpha::Number, beta::Number) = generic_mul!(C, X, s, MulAddMul(alpha, beta)) # For better performance when input and output are the same array # See https://github.com/JuliaLang/julia/issues/8415#issuecomment-56608729 """ - rmul!(A::AbstractArray, b::Number) + rmul!(A::ArrayLike, b::Number) Scale an array `A` by a scalar `b` overwriting `A` in-place. Use [`lmul!`](@ref) to multiply scalar from left. The scaling operation @@ -145,7 +145,7 @@ julia> rmul!([NaN], 0.0) NaN ``` """ -function rmul!(X::AbstractArray, s::Number) +function rmul!(X::ArrayLike, s::Number) @simd for I in eachindex(X) @inbounds X[I] *= s end @@ -154,7 +154,7 @@ end """ - lmul!(a::Number, B::AbstractArray) + lmul!(a::Number, B::ArrayLike) Scale an array `B` by a scalar `a` overwriting `B` in-place. Use [`rmul!`](@ref) to multiply scalar from right. The scaling operation @@ -183,7 +183,7 @@ julia> lmul!(0.0, [Inf]) NaN ``` """ -function lmul!(s::Number, X::AbstractArray) +function lmul!(s::Number, X::ArrayLike) @simd for I in eachindex(X) @inbounds X[I] = s*X[I] end @@ -191,7 +191,7 @@ function lmul!(s::Number, X::AbstractArray) end """ - rdiv!(A::AbstractArray, b::Number) + rdiv!(A::ArrayLike, b::Number) Divide each entry in an array `A` by a scalar `b` overwriting `A` in-place. Use [`ldiv!`](@ref) to divide scalar from left. @@ -209,7 +209,7 @@ julia> rdiv!(A, 2.0) 1.5 2.0 ``` """ -function rdiv!(X::AbstractArray, s::Number) +function rdiv!(X::ArrayLike, s::Number) @simd for I in eachindex(X) @inbounds X[I] /= s end @@ -217,7 +217,7 @@ function rdiv!(X::AbstractArray, s::Number) end """ - ldiv!(a::Number, B::AbstractArray) + ldiv!(a::Number, B::ArrayLike) Divide each entry in an array `B` by a scalar `a` overwriting `B` in-place. Use [`rdiv!`](@ref) to divide scalar from right. @@ -235,16 +235,16 @@ julia> ldiv!(2.0, B) 1.5 2.0 ``` """ -function ldiv!(s::Number, X::AbstractArray) +function ldiv!(s::Number, X::ArrayLike) @simd for I in eachindex(X) @inbounds X[I] = s\X[I] end X end -ldiv!(Y::AbstractArray, s::Number, X::AbstractArray) = Y .= s .\ X +ldiv!(Y::ArrayLike, s::Number, X::ArrayLike) = Y .= s .\ X # Generic fallback. This assumes that B and Y have the same sizes. -ldiv!(Y::AbstractArray, A::AbstractMatrix, B::AbstractArray) = ldiv!(A, copyto!(Y, B)) +ldiv!(Y::ArrayLike, A::ArrayLike{2}, B::ArrayLike) = ldiv!(A, copyto!(Y, B)) """ @@ -274,7 +274,7 @@ julia> cross(a,b) 0 ``` """ -function cross(a::AbstractVector, b::AbstractVector) +function cross(a::ArrayLike{1}, b::ArrayLike{1}) if !(length(a) == length(b) == 3) throw(DimensionMismatch("cross product is only defined for vectors of length 3")) end @@ -305,7 +305,7 @@ julia> triu(a) 0.0 0.0 0.0 1.0 ``` """ -triu(M::AbstractMatrix) = triu!(copy(M)) +triu(M::ArrayLike{2}) = triu!(copy(M)) """ tril(M) @@ -329,7 +329,7 @@ julia> tril(a) 1.0 1.0 1.0 1.0 ``` """ -tril(M::AbstractMatrix) = tril!(copy(M)) +tril(M::ArrayLike{2}) = tril!(copy(M)) """ triu(M, k::Integer) @@ -360,7 +360,7 @@ julia> triu(a,-3) 1.0 1.0 1.0 1.0 ``` """ -triu(M::AbstractMatrix,k::Integer) = triu!(copy(M),k) +triu(M::ArrayLike{2},k::Integer) = triu!(copy(M),k) """ tril(M, k::Integer) @@ -391,7 +391,7 @@ julia> tril(a,-3) 1.0 0.0 0.0 0.0 ``` """ -tril(M::AbstractMatrix,k::Integer) = tril!(copy(M),k) +tril(M::ArrayLike{2},k::Integer) = tril!(copy(M),k) """ triu!(M) @@ -399,7 +399,7 @@ tril(M::AbstractMatrix,k::Integer) = tril!(copy(M),k) Upper triangle of a matrix, overwriting `M` in the process. See also [`triu`](@ref). """ -triu!(M::AbstractMatrix) = triu!(M,0) +triu!(M::ArrayLike{2}) = triu!(M,0) """ tril!(M) @@ -407,9 +407,9 @@ triu!(M::AbstractMatrix) = triu!(M,0) Lower triangle of a matrix, overwriting `M` in the process. See also [`tril`](@ref). """ -tril!(M::AbstractMatrix) = tril!(M,0) +tril!(M::ArrayLike{2}) = tril!(M,0) -diag(A::AbstractVector) = throw(ArgumentError("use diagm instead of diag to construct a diagonal matrix")) +diag(A::ArrayLike{1}) = throw(ArgumentError("use diagm instead of diag to construct a diagonal matrix")) ########################################################################################### # Dot products and norms @@ -694,7 +694,7 @@ end """ - opnorm(A::AbstractMatrix, p::Real=2) + opnorm(A::ArrayLike{2}, p::Real=2) Compute the operator norm (or matrix norm) induced by the vector `p`-norm, where valid values of `p` are `1`, `2`, or `Inf`. (Note that for sparse matrices, @@ -729,7 +729,7 @@ julia> opnorm(A, 1) 5.0 ``` """ -function opnorm(A::AbstractMatrix, p::Real=2) +function opnorm(A::ArrayLike{2}, p::Real=2) if p == 2 return opnorm2(A) elseif p == 1 @@ -870,7 +870,7 @@ end dot(x::Number, y::Number) = conj(x) * y -function dot(x::AbstractArray, y::AbstractArray) +function dot(x::ArrayLike, y::ArrayLike) lx = length(x) if lx != length(y) throw(DimensionMismatch("first array has length $(lx) which does not match the length of the second, $(length(y)).")) @@ -910,7 +910,7 @@ true """ dot(x, A, y) = dot(x, A*y) # generic fallback for cases that are not covered by specialized methods -function dot(x::AbstractVector, A::AbstractMatrix, y::AbstractVector) +function dot(x::ArrayLike{1}, A::ArrayLike{2}, y::ArrayLike{1}) (axes(x)..., axes(y)...) == axes(A) || throw(DimensionMismatch()) T = typeof(dot(first(x), first(A), first(y))) s = zero(T) @@ -928,14 +928,14 @@ function dot(x::AbstractVector, A::AbstractMatrix, y::AbstractVector) end return s end -dot(x::AbstractVector, adjA::Adjoint, y::AbstractVector) = adjoint(dot(y, adjA.parent, x)) -dot(x::AbstractVector, transA::Transpose{<:Real}, y::AbstractVector) = adjoint(dot(y, transA.parent, x)) +dot(x::ArrayLike{1}, adjA::Adjoint, y::ArrayLike{1}) = adjoint(dot(y, adjA.parent, x)) +dot(x::ArrayLike{1}, transA::Transpose{<:Real}, y::ArrayLike{1}) = adjoint(dot(y, transA.parent, x)) ########################################################################################### """ - rank(A::AbstractMatrix; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ) - rank(A::AbstractMatrix, rtol::Real) + rank(A::ArrayLike{2}; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ) + rank(A::ArrayLike{2}, rtol::Real) Compute the rank of a matrix by counting how many singular values of `A` have magnitude greater than `max(atol, rtol*σ₁)` where `σ₁` is @@ -967,7 +967,7 @@ julia> rank(diagm(0 => [1, 0.001, 2]), atol=1.5) 1 ``` """ -function rank(A::AbstractMatrix; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol)) +function rank(A::ArrayLike{2}; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol)) isempty(A) && return 0 # 0-dimensional case s = svdvals(A) tol = max(atol, rtol*s[1]) @@ -991,16 +991,16 @@ julia> tr(A) 5 ``` """ -function tr(A::AbstractMatrix) +function tr(A::ArrayLike{2}) checksquare(A) sum(diag(A)) end tr(x::Number) = x -#kron(a::AbstractVector, b::AbstractVector) +#kron(a::ArrayLike{1}, b::ArrayLike{1}) #kron(a::AbstractMatrix{T}, b::AbstractMatrix{S}) where {T,S} -#det(a::AbstractMatrix) +#det(a::ArrayLike{2}) """ inv(M) @@ -1055,7 +1055,7 @@ end # this method is just an optimization: literal negative powers of A are # already turned by literal_pow into powers of inv(A), but for A^-1 this # would turn into inv(A)^1 = copy(inv(A)), which makes an extra copy. -@inline Base.literal_pow(::typeof(^), A::AbstractMatrix, ::Val{-1}) = inv(A) +@inline Base.literal_pow(::typeof(^), A::ArrayLike{2}, ::Val{-1}) = inv(A) """ \\(A, B) @@ -1086,7 +1086,7 @@ julia> A * X == B true ``` """ -function (\)(A::AbstractMatrix, B::AbstractVecOrMat) +function (\)(A::ArrayLike{2}, B::VectorOrMatrixLike) require_one_based_indexing(A, B) m, n = size(A) if m == n @@ -1105,20 +1105,20 @@ function (\)(A::AbstractMatrix, B::AbstractVecOrMat) return qr(A,Val(true)) \ B end -(\)(a::AbstractVector, b::AbstractArray) = pinv(a) * b -function (/)(A::AbstractVecOrMat, B::AbstractVecOrMat) +(\)(a::ArrayLike{1}, b::ArrayLike) = pinv(a) * b +function (/)(A::VectorOrMatrixLike, B::VectorOrMatrixLike) size(A,2) != size(B,2) && throw(DimensionMismatch("Both inputs should have the same number of columns")) return copy(adjoint(adjoint(B) \ adjoint(A))) end # \(A::StridedMatrix,x::Number) = inv(A)*x Should be added at some point when the old elementwise version has been deprecated long enough # /(x::Number,A::StridedMatrix) = x*inv(A) -/(x::Number, v::AbstractVector) = x*pinv(v) +/(x::Number, v::ArrayLike{1}) = x*pinv(v) cond(x::Number) = x == 0 ? Inf : 1.0 cond(x::Number, p) = cond(x) #Skeel condition numbers -condskeel(A::AbstractMatrix, p::Real=Inf) = opnorm(abs.(inv(A))*abs.(A), p) +condskeel(A::ArrayLike{2}, p::Real=Inf) = opnorm(abs.(inv(A))*abs.(A), p) """ condskeel(M, [x, p::Real=Inf]) @@ -1137,7 +1137,7 @@ Valid values for `p` are `1`, `2` and `Inf` (default). This quantity is also known in the literature as the Bauer condition number, relative condition number, or componentwise relative condition number. """ -condskeel(A::AbstractMatrix, x::AbstractVector, p::Real=Inf) = norm(abs.(inv(A))*(abs.(A)*abs.(x)), p) +condskeel(A::ArrayLike{2}, x::ArrayLike{1}, p::Real=Inf) = norm(abs.(inv(A))*(abs.(A)*abs.(x)), p) issymmetric(A::AbstractMatrix{<:Real}) = ishermitian(A) @@ -1165,7 +1165,7 @@ julia> issymmetric(b) false ``` """ -function issymmetric(A::AbstractMatrix) +function issymmetric(A::ArrayLike{2}) indsm, indsn = axes(A) if indsm != indsn return false @@ -1204,7 +1204,7 @@ julia> ishermitian(b) true ``` """ -function ishermitian(A::AbstractMatrix) +function ishermitian(A::ArrayLike{2}) indsm, indsn = axes(A) if indsm != indsn return false @@ -1220,7 +1220,7 @@ end ishermitian(x::Number) = (x == conj(x)) """ - istriu(A::AbstractMatrix, k::Integer = 0) -> Bool + istriu(A::ArrayLike{2}, k::Integer = 0) -> Bool Test whether `A` is upper triangular starting from the `k`th superdiagonal. @@ -1249,7 +1249,7 @@ julia> istriu(b, 1) false ``` """ -function istriu(A::AbstractMatrix, k::Integer = 0) +function istriu(A::ArrayLike{2}, k::Integer = 0) require_one_based_indexing(A) m, n = size(A) for j in 1:min(n, m + k - 1) @@ -1262,7 +1262,7 @@ end istriu(x::Number) = true """ - istril(A::AbstractMatrix, k::Integer = 0) -> Bool + istril(A::ArrayLike{2}, k::Integer = 0) -> Bool Test whether `A` is lower triangular starting from the `k`th superdiagonal. @@ -1291,7 +1291,7 @@ julia> istril(b, -1) false ``` """ -function istril(A::AbstractMatrix, k::Integer = 0) +function istril(A::ArrayLike{2}, k::Integer = 0) require_one_based_indexing(A) m, n = size(A) for j in max(1, k + 2):n @@ -1304,7 +1304,7 @@ end istril(x::Number) = true """ - isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) -> Bool + isbanded(A::ArrayLike{2}, kl::Integer, ku::Integer) -> Bool Test whether `A` is banded with lower bandwidth starting from the `kl`th superdiagonal and upper bandwidth extending through the `ku`th superdiagonal. @@ -1334,7 +1334,7 @@ julia> LinearAlgebra.isbanded(b, -1, 0) true ``` """ -isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) = istriu(A, kl) && istril(A, ku) +isbanded(A::ArrayLike{2}, kl::Integer, ku::Integer) = istriu(A, kl) && istril(A, ku) """ isdiag(A) -> Bool @@ -1360,13 +1360,13 @@ julia> isdiag(b) true ``` """ -isdiag(A::AbstractMatrix) = isbanded(A, 0, 0) +isdiag(A::ArrayLike{2}) = isbanded(A, 0, 0) isdiag(x::Number) = true # BLAS-like in-place y = x*α+y function (see also the version in blas.jl # for BlasFloat Arrays) -function axpy!(α, x::AbstractArray, y::AbstractArray) +function axpy!(α, x::ArrayLike, y::ArrayLike) n = length(x) if n != length(y) throw(DimensionMismatch("x has length $n, but y has length $(length(y))")) @@ -1377,7 +1377,7 @@ function axpy!(α, x::AbstractArray, y::AbstractArray) y end -function axpy!(α, x::AbstractArray, rx::AbstractArray{<:Integer}, y::AbstractArray, ry::AbstractArray{<:Integer}) +function axpy!(α, x::ArrayLike, rx::AbstractArray{<:Integer}, y::ArrayLike, ry::AbstractArray{<:Integer}) if length(rx) != length(ry) throw(DimensionMismatch("rx has length $(length(rx)), but ry has length $(length(ry))")) elseif !checkindex(Bool, eachindex(IndexLinear(), x), rx) @@ -1391,7 +1391,7 @@ function axpy!(α, x::AbstractArray, rx::AbstractArray{<:Integer}, y::AbstractAr y end -function axpby!(α, x::AbstractArray, β, y::AbstractArray) +function axpby!(α, x::ArrayLike, β, y::ArrayLike) if length(x) != length(y) throw(DimensionMismatch("x has length $(length(x)), but y has length $(length(y))")) end @@ -1404,7 +1404,7 @@ end # Elementary reflection similar to LAPACK. The reflector is not Hermitian but # ensures that tridiagonalization of Hermitian matrices become real. See lawn72 -@inline function reflector!(x::AbstractVector) +@inline function reflector!(x::ArrayLike{1}) require_one_based_indexing(x) n = length(x) @inbounds begin @@ -1428,7 +1428,7 @@ end end # apply reflector from left -@inline function reflectorApply!(x::AbstractVector, τ::Number, A::StridedMatrix) +@inline function reflectorApply!(x::ArrayLike{1}, τ::Number, A::StridedMatrix) require_one_based_indexing(x) m, n = size(A) if length(x) != m @@ -1510,7 +1510,7 @@ julia> logabsdet(B) (0.6931471805599453, 1.0) ``` """ -logabsdet(A::AbstractMatrix) = logabsdet(lu(A, check=false)) +logabsdet(A::ArrayLike{2}) = logabsdet(lu(A, check=false)) """ logdet(M) @@ -1532,7 +1532,7 @@ julia> logdet(Matrix(I, 3, 3)) 0.0 ``` """ -function logdet(A::AbstractMatrix) +function logdet(A::ArrayLike{2}) d,s = logabsdet(A) return d + log(s) end @@ -1563,12 +1563,12 @@ Complex{Float64} promote_leaf_eltypes(x::Union{AbstractArray{T},Tuple{T,Vararg{T}}}) where {T<:Number} = T promote_leaf_eltypes(x::Union{AbstractArray{T},Tuple{T,Vararg{T}}}) where {T<:NumberArray} = eltype(T) promote_leaf_eltypes(x::T) where {T} = T -promote_leaf_eltypes(x::Union{AbstractArray,Tuple}) = mapreduce(promote_leaf_eltypes, promote_type, x; init=Bool) +promote_leaf_eltypes(x::Union{ArrayLike,Tuple}) = mapreduce(promote_leaf_eltypes, promote_type, x; init=Bool) # isapprox: approximate equality of arrays [like isapprox(Number,Number)] # Supports nested arrays; e.g., for `a = [[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]` # `a ≈ a` is `true`. -function isapprox(x::AbstractArray, y::AbstractArray; +function isapprox(x::ArrayLike, y::ArrayLike; atol::Real=0, rtol::Real=Base.rtoldefault(promote_leaf_eltypes(x),promote_leaf_eltypes(y),atol), nans::Bool=false, norm::Function=norm) @@ -1582,18 +1582,18 @@ function isapprox(x::AbstractArray, y::AbstractArray; end """ - normalize!(v::AbstractVector, p::Real=2) + normalize!(v::ArrayLike{1}, p::Real=2) Normalize the vector `v` in-place so that its `p`-norm equals unity, i.e. `norm(v, p) == 1`. See also [`normalize`](@ref) and [`norm`](@ref). """ -function normalize!(v::AbstractVector, p::Real=2) +function normalize!(v::ArrayLike{1}, p::Real=2) nrm = norm(v, p) __normalize!(v, nrm) end -@inline function __normalize!(v::AbstractVector, nrm::AbstractFloat) +@inline function __normalize!(v::ArrayLike{1}, nrm::AbstractFloat) # The largest positive floating point number whose inverse is less than infinity δ = inv(prevfloat(typemax(nrm))) @@ -1611,7 +1611,7 @@ end end """ - normalize(v::AbstractVector, p::Real=2) + normalize(v::ArrayLike{1}, p::Real=2) Normalize the vector `v` so that its `p`-norm equals unity, i.e. `norm(v, p) == 1`. @@ -1640,7 +1640,7 @@ julia> norm(c, 1) 1.0 ``` """ -function normalize(v::AbstractVector, p::Real = 2) +function normalize(v::ArrayLike{1}, p::Real = 2) nrm = norm(v, p) if !isempty(v) vv = copy_oftype(v, typeof(v[1]/nrm)) diff --git a/stdlib/LinearAlgebra/src/givens.jl b/stdlib/LinearAlgebra/src/givens.jl index 3bf571d1c53e9..b1578494a06a5 100644 --- a/stdlib/LinearAlgebra/src/givens.jl +++ b/stdlib/LinearAlgebra/src/givens.jl @@ -10,8 +10,8 @@ function (*)(R::AbstractRotation{T}, A::AbstractVecOrMat{S}) where {T,S} TS = typeof(zero(T)*zero(S) + zero(T)*zero(S)) lmul!(convert(AbstractRotation{TS}, R), TS == S ? copy(A) : convert(AbstractArray{TS}, A)) end -(*)(A::AbstractVector, adjR::Adjoint{<:Any,<:AbstractRotation}) = _absvecormat_mul_adjrot(A, adjR) -(*)(A::AbstractMatrix, adjR::Adjoint{<:Any,<:AbstractRotation}) = _absvecormat_mul_adjrot(A, adjR) +(*)(A::ArrayLike{1}, adjR::Adjoint{<:Any,<:AbstractRotation}) = _absvecormat_mul_adjrot(A, adjR) +(*)(A::ArrayLike{2}, adjR::Adjoint{<:Any,<:AbstractRotation}) = _absvecormat_mul_adjrot(A, adjR) function _absvecormat_mul_adjrot(A::AbstractVecOrMat{T}, adjR::Adjoint{<:Any,<:AbstractRotation{S}}) where {T,S} R = adjR.parent TS = typeof(zero(T)*zero(S) + zero(T)*zero(S)) @@ -283,7 +283,7 @@ function givens(f::T, g::T, i1::Integer, i2::Integer) where T Givens(i1, i2, convert(T, c), convert(T, s)), r end """ - givens(A::AbstractArray, i1::Integer, i2::Integer, j::Integer) -> (G::Givens, r) + givens(A::ArrayLike, i1::Integer, i2::Integer, j::Integer) -> (G::Givens, r) Computes the Givens rotation `G` and scalar `r` such that the result of the multiplication ``` @@ -297,12 +297,12 @@ B[i2,j] = 0 See also: [`LinearAlgebra.Givens`](@ref) """ -givens(A::AbstractMatrix, i1::Integer, i2::Integer, j::Integer) = +givens(A::ArrayLike{2}, i1::Integer, i2::Integer, j::Integer) = givens(A[i1,j], A[i2,j],i1,i2) """ - givens(x::AbstractVector, i1::Integer, i2::Integer) -> (G::Givens, r) + givens(x::ArrayLike{1}, i1::Integer, i2::Integer) -> (G::Givens, r) Computes the Givens rotation `G` and scalar `r` such that the result of the multiplication ``` @@ -316,7 +316,7 @@ B[i2] = 0 See also: [`LinearAlgebra.Givens`](@ref) """ -givens(x::AbstractVector, i1::Integer, i2::Integer) = +givens(x::ArrayLike{1}, i1::Integer, i2::Integer) = givens(x[i1], x[i2], i1, i2) @@ -336,7 +336,7 @@ function getindex(G::Givens, i::Integer, j::Integer) end end -@inline function lmul!(G::Givens, A::AbstractVecOrMat) +@inline function lmul!(G::Givens, A::VectorOrMatrixLike) require_one_based_indexing(A) m, n = size(A, 1), size(A, 2) if G.i2 > m @@ -349,7 +349,7 @@ end end return A end -@inline function rmul!(A::AbstractMatrix, G::Givens) +@inline function rmul!(A::ArrayLike{2}, G::Givens) require_one_based_indexing(A) m, n = size(A, 1), size(A, 2) if G.i2 > n @@ -367,13 +367,13 @@ function lmul!(G::Givens, R::Rotation) push!(R.rotations, G) return R end -function lmul!(R::Rotation, A::AbstractMatrix) +function lmul!(R::Rotation, A::ArrayLike{2}) @inbounds for i = 1:length(R.rotations) lmul!(R.rotations[i], A) end return A end -function rmul!(A::AbstractMatrix, adjR::Adjoint{<:Any,<:Rotation}) +function rmul!(A::ArrayLike{2}, adjR::Adjoint{<:Any,<:Rotation}) R = adjR.parent @inbounds for i = 1:length(R.rotations) rmul!(A, adjoint(R.rotations[i])) @@ -386,10 +386,10 @@ end # instead be MethodErrors, or revised. # # disambiguation methods: *(Adj/Trans of AbsVec or AbsMat, Adj of AbstractRotation) -*(A::Adjoint{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B -*(A::Adjoint{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B -*(A::Transpose{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B -*(A::Transpose{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B +*(A::Adjoint{<:Any,<:ArrayLike{1}}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B +*(A::Adjoint{<:Any,<:ArrayLike{2}}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B +*(A::Transpose{<:Any,<:ArrayLike{1}}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B +*(A::Transpose{<:Any,<:ArrayLike{2}}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B # disambiguation methods: *(Adj/Trans of AbsTri or RealHermSymComplex{Herm|Sym}, Adj of AbstractRotation) *(A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B *(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractRotation}) = copy(A) * B diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index c64c2f55669cd..b1b6fe817798a 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -4,7 +4,7 @@ # Upper-Hessenberg matrices H+μI, analogous to the UpperTriangular type """ - UpperHessenberg(A::AbstractMatrix) + UpperHessenberg(A::ArrayLike{2}) Construct an `UpperHessenberg` view of the matrix `A`. Entries of `A` below the first subdiagonal are ignored. @@ -47,9 +47,9 @@ struct UpperHessenberg{T,S<:AbstractMatrix{T}} <: AbstractMatrix{T} end end UpperHessenberg(H::UpperHessenberg) = H -UpperHessenberg{T}(A::AbstractMatrix) where {T} = UpperHessenberg(convert(AbstractMatrix{T}, A)) +UpperHessenberg{T}(A::ArrayLike{2}) where {T} = UpperHessenberg(convert(AbstractMatrix{T}, A)) UpperHessenberg{T}(H::UpperHessenberg) where {T} = UpperHessenberg{T}(H.data) -UpperHessenberg(A::AbstractMatrix) = UpperHessenberg{eltype(A),typeof(A)}(A) +UpperHessenberg(A::ArrayLike{2}) = UpperHessenberg{eltype(A),typeof(A)}(A) Matrix(H::UpperHessenberg{T}) where {T} = Matrix{T}(H) Array(H::UpperHessenberg) = Matrix(H) size(H::UpperHessenberg, d) = size(H.data, d) @@ -124,7 +124,7 @@ fillstored!(H::UpperHessenberg, x) = (fillband!(H.data, x, -1, size(H,2)-1); H) # right to left, and doing backsubstitution *simultaneously*. # solve (H+μI)X = B, storing result in B -function ldiv!(F::UpperHessenberg, B::AbstractVecOrMat; shift::Number=false) +function ldiv!(F::UpperHessenberg, B::VectorOrMatrixLike; shift::Number=false) checksquare(F) m = size(F,1) m != size(B,1) && throw(DimensionMismatch("wrong right-hand-side # rows != $m")) @@ -174,7 +174,7 @@ end # of rows/cols. Essentially, we take the ldiv! algorithm, # swap indices of H and X to transpose, and reverse the # order of the H indices (or the order of the loops). -function rdiv!(B::AbstractMatrix, F::UpperHessenberg; shift::Number=false) +function rdiv!(B::ArrayLike{2}, F::UpperHessenberg; shift::Number=false) checksquare(F) m = size(F,1) m != size(B,2) && throw(DimensionMismatch("wrong right-hand-side # cols != $m")) @@ -284,7 +284,7 @@ function logabsdet(F::UpperHessenberg; shift::Number=false) return (logdeterminant, P) end -function dot(x::AbstractVector, H::UpperHessenberg, y::AbstractVector) +function dot(x::ArrayLike{1}, H::UpperHessenberg, y::ArrayLike{1}) require_one_based_indexing(x, y) m = size(H, 1) (length(x) == m == length(y)) || throw(DimensionMismatch()) @@ -324,14 +324,14 @@ end A `Hessenberg` object represents the Hessenberg factorization `QHQ'` of a square matrix, or a shift `Q(H+μI)Q'` thereof, which is produced by the [`hessenberg`](@ref) function. """ -struct Hessenberg{T,SH<:AbstractMatrix,S<:AbstractMatrix,W<:AbstractVector,V<:Number} <: Factorization{T} +struct Hessenberg{T,SH<:ArrayLike{2},S<:ArrayLike{2},W<:ArrayLike{1},V<:Number} <: Factorization{T} H::SH # UpperHessenberg or SymTridiagonal uplo::Char factors::S # reflector data in uplo triangle, may share data with H τ::W # more Q (reflector) data μ::V # diagonal shift for copy-free (F+μI) \ b solves and similar end -Hessenberg(factors::AbstractMatrix, τ::AbstractVector, H::AbstractMatrix=UpperHessenberg(factors), uplo::AbstractChar='L'; μ::Number=false) = +Hessenberg(factors::ArrayLike{2}, τ::ArrayLike{1}, H::ArrayLike{2}=UpperHessenberg(factors), uplo::AbstractChar='L'; μ::Number=false) = Hessenberg{typeof(zero(eltype(factors))+μ),typeof(H),typeof(factors),typeof(τ),typeof(μ)}(H, uplo, factors, τ, μ) Hessenberg(F::Hessenberg) = F Hessenberg(F::Hessenberg, μ::Number) = Hessenberg(F.factors, F.τ, F.H, F.uplo; μ=μ) @@ -362,7 +362,7 @@ end `hessenberg!` is the same as [`hessenberg`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. """ -hessenberg!(A::AbstractMatrix) +hessenberg!(A::ArrayLike{2}) """ hessenberg(A) -> Hessenberg @@ -441,11 +441,11 @@ matrix `Q` in the Hessenberg factorization `QHQ'` represented by `F`. This `F.Q` object can be efficiently multiplied by matrices or vectors, and can be converted to an ordinary matrix type with `Matrix(F.Q)`. """ -struct HessenbergQ{T,S<:AbstractMatrix,W<:AbstractVector,sym} <: AbstractQ{T} +struct HessenbergQ{T,S<:ArrayLike{2},W<:ArrayLike{1},sym} <: AbstractQ{T} uplo::Char factors::S τ::W - function HessenbergQ{T,S,W,sym}(uplo::AbstractChar, factors, τ) where {T,S<:AbstractMatrix,W<:AbstractVector,sym} + function HessenbergQ{T,S,W,sym}(uplo::AbstractChar, factors, τ) where {T,S<:ArrayLike{2},W<:ArrayLike{1},sym} new(uplo, factors, τ) end end @@ -510,7 +510,7 @@ rmul!(X::Adjoint{T,<:StridedMatrix{T}}, adjQ::Adjoint{<:Any,<:HessenbergQ{T}}) w # multiply x by the entries of M in the upper-k triangle, which contains # the entries of the upper-Hessenberg matrix H for k=-1 -function rmul_triu!(M::AbstractMatrix, x, k::Integer=0) +function rmul_triu!(M::ArrayLike{2}, x, k::Integer=0) require_one_based_indexing(M) m, n = size(M) for j = 1:n, i = 1:min(j-k,m) @@ -518,7 +518,7 @@ function rmul_triu!(M::AbstractMatrix, x, k::Integer=0) end return M end -function lmul_triu!(x, M::AbstractMatrix, k::Integer=0) +function lmul_triu!(x, M::ArrayLike{2}, k::Integer=0) require_one_based_indexing(M) m, n = size(M) for j = 1:n, i = 1:min(j-k,m) @@ -561,7 +561,7 @@ end -(F::Hessenberg, J::UniformScaling) = Hessenberg(F, F.μ - J.λ) -(J::UniformScaling, F::Hessenberg) = Hessenberg(-F, J.λ - F.μ) -function ldiv!(F::Hessenberg, B::AbstractVecOrMat) +function ldiv!(F::Hessenberg, B::VectorOrMatrixLike) Q = F.Q if iszero(F.μ) return lmul!(Q, ldiv!(F.H, lmul!(Q', B))) @@ -570,7 +570,7 @@ function ldiv!(F::Hessenberg, B::AbstractVecOrMat) end end -function rdiv!(B::AbstractMatrix, F::Hessenberg) +function rdiv!(B::ArrayLike{2}, F::Hessenberg) Q = F.Q return rmul!(rdiv!(rmul!(B, Q), F.H; shift=F.μ), Q') end @@ -598,8 +598,8 @@ function rdiv!(B::AbstractVecOrMat{<:Complex}, F::Hessenberg{<:Complex,<:Any,<:A return B .= Complex.(Br,Bi) end -ldiv!(F::Adjoint{<:Any,<:Hessenberg}, B::AbstractVecOrMat) = rdiv!(B', F')' -rdiv!(B::AbstractMatrix, F::Adjoint{<:Any,<:Hessenberg}) = ldiv!(F', B')' +ldiv!(F::Adjoint{<:Any,<:Hessenberg}, B::VectorOrMatrixLike) = rdiv!(B', F')' +rdiv!(B::ArrayLike{2}, F::Adjoint{<:Any,<:Hessenberg}) = ldiv!(F', B')' det(F::Hessenberg) = det(F.H; shift=F.μ) logabsdet(F::Hessenberg) = logabsdet(F.H; shift=F.μ) diff --git a/stdlib/LinearAlgebra/src/lapack.jl b/stdlib/LinearAlgebra/src/lapack.jl index 32168559dcc7f..0655d9e7a9fd1 100644 --- a/stdlib/LinearAlgebra/src/lapack.jl +++ b/stdlib/LinearAlgebra/src/lapack.jl @@ -14,7 +14,7 @@ import ..LinearAlgebra: BlasFloat, BlasInt, LAPACKException, using ..LinearAlgebra: triu, tril, dot -using Base: iszero, require_one_based_indexing +using Base: iszero, require_one_based_indexing, VectorOrMatrixLike #Generic LAPACK error handlers """ @@ -83,10 +83,10 @@ function chkdiag(diag::AbstractChar) diag end -subsetrows(X::AbstractVector, Y::AbstractArray, k) = Y[1:k] -subsetrows(X::AbstractMatrix, Y::AbstractArray, k) = Y[1:k, :] +subsetrows(X::ArrayLike{1}, Y::ArrayLike, k) = Y[1:k] +subsetrows(X::ArrayLike{2}, Y::ArrayLike, k) = Y[1:k, :] -function chkfinite(A::AbstractMatrix) +function chkfinite(A::ArrayLike{2}) for a in A if !isfinite(a) throw(ArgumentError("matrix contains Infs or NaNs")) @@ -172,7 +172,7 @@ subdiagonal containing a nonzero band, `ku` is the last superdiagonal containing one, and `m` is the first dimension of the matrix `AB`. Returns the LU factorization in-place and `ipiv`, the vector of pivots used. """ -gbtrf!(kl::Integer, ku::Integer, m::Integer, AB::AbstractMatrix) +gbtrf!(kl::Integer, ku::Integer, m::Integer, AB::ArrayLike{2}) """ gbtrs!(trans, kl, ku, m, AB, ipiv, B) @@ -183,7 +183,7 @@ first subdiagonal containing a nonzero band, `ku` is the last superdiagonal containing one, and `m` is the first dimension of the matrix `AB`. `ipiv` is the vector of pivots returned from `gbtrf!`. Returns the vector or matrix `X`, overwriting `B` in-place. """ -gbtrs!(trans::AbstractChar, kl::Integer, ku::Integer, m::Integer, AB::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) +gbtrs!(trans::AbstractChar, kl::Integer, ku::Integer, m::Integer, AB::ArrayLike{2}, ipiv::AbstractVector{BlasInt}, B::VectorOrMatrixLike) ## (GE) general matrices: balancing and back-transforming for (gebal, gebak, elty, relty) in @@ -249,7 +249,7 @@ and scaled). Modifies `A` in-place and returns `ilo`, `ihi`, and `scale`. If permuting was turned on, `A[i,j] = 0` if `j > i` and `1 < j < ilo` or `j > ihi`. `scale` contains information about the scaling/permutations performed. """ -gebal!(job::AbstractChar, A::AbstractMatrix) +gebal!(job::AbstractChar, A::ArrayLike{2}) """ gebak!(job, side, ilo, ihi, scale, V) @@ -259,7 +259,7 @@ the unscaled/unpermuted eigenvectors of the original matrix. Modifies `V` in-place. `side` can be `L` (left eigenvectors are transformed) or `R` (right eigenvectors are transformed). """ -gebak!(job::AbstractChar, side::AbstractChar, ilo::BlasInt, ihi::BlasInt, scale::AbstractVector, V::AbstractMatrix) +gebak!(job::AbstractChar, side::AbstractChar, ilo::BlasInt, ihi::BlasInt, scale::ArrayLike{1}, V::ArrayLike{2}) # (GE) general matrices, direct decompositions # @@ -566,7 +566,7 @@ containing the off-diagonal elements of `B`; `tauq`, containing the elementary reflectors representing `Q`; and `taup`, containing the elementary reflectors representing `P`. """ -gebrd!(A::AbstractMatrix) +gebrd!(A::ArrayLike{2}) """ gelqf!(A, tau) @@ -578,7 +578,7 @@ must have length greater than or equal to the smallest dimension of `A`. Returns `A` and `tau` modified in-place. """ -gelqf!(A::AbstractMatrix, tau::AbstractVector) +gelqf!(A::ArrayLike{2}, tau::ArrayLike{1}) """ geqlf!(A, tau) @@ -589,7 +589,7 @@ must have length greater than or equal to the smallest dimension of `A`. Returns `A` and `tau` modified in-place. """ -geqlf!(A::AbstractMatrix, tau::AbstractVector) +geqlf!(A::ArrayLike{2}, tau::ArrayLike{1}) """ geqp3!(A, jpvt, tau) @@ -602,7 +602,7 @@ smallest dimension of `A`. `A`, `jpvt`, and `tau` are modified in-place. """ -geqp3!(A::AbstractMatrix, jpvt::AbstractVector{BlasInt}, tau::AbstractVector) +geqp3!(A::ArrayLike{2}, jpvt::AbstractVector{BlasInt}, tau::ArrayLike{1}) """ geqrt!(A, T) @@ -615,7 +615,7 @@ dimension of `A`. Returns `A` and `T` modified in-place. """ -geqrt!(A::AbstractMatrix, T::AbstractMatrix) +geqrt!(A::ArrayLike{2}, T::ArrayLike{2}) """ geqrt3!(A, T) @@ -628,7 +628,7 @@ equal the smallest dimension of `A`. Returns `A` and `T` modified in-place. """ -geqrt3!(A::AbstractMatrix, T::AbstractMatrix) +geqrt3!(A::ArrayLike{2}, T::ArrayLike{2}) """ geqrf!(A, tau) @@ -639,7 +639,7 @@ must have length greater than or equal to the smallest dimension of `A`. Returns `A` and `tau` modified in-place. """ -geqrf!(A::AbstractMatrix, tau::AbstractVector) +geqrf!(A::ArrayLike{2}, tau::ArrayLike{1}) """ gerqf!(A, tau) @@ -650,7 +650,7 @@ must have length greater than or equal to the smallest dimension of `A`. Returns `A` and `tau` modified in-place. """ -gerqf!(A::AbstractMatrix, tau::AbstractVector) +gerqf!(A::ArrayLike{2}, tau::ArrayLike{1}) """ getrf!(A) -> (A, ipiv, info) @@ -661,7 +661,7 @@ Returns `A`, modified in-place, `ipiv`, the pivoting information, and an `info` code which indicates success (`info = 0`), a singular value in `U` (`info = i`, in which case `U[i,i]` is singular), or an error code (`info < 0`). """ -getrf!(A::AbstractMatrix, tau::AbstractVector) +getrf!(A::ArrayLike{2}, tau::ArrayLike{1}) """ gelqf!(A) -> (A, tau) @@ -913,7 +913,7 @@ can be unmodified (`trans = N`), transposed (`trans = T`), or conjugate transposed (`trans = C`). Returns matrix `C` which is modified in-place with the result of the multiplication. """ -ormrz!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractMatrix) +ormrz!(side::AbstractChar, trans::AbstractChar, A::ArrayLike{2}, tau::ArrayLike{1}, C::ArrayLike{2}) """ tzrzf!(A) -> (A, tau) @@ -922,7 +922,7 @@ Transforms the upper trapezoidal matrix `A` to upper triangular form in-place. Returns `A` and `tau`, the scalar parameters for the elementary reflectors of the transformation. """ -tzrzf!(A::AbstractMatrix) +tzrzf!(A::ArrayLike{2}) ## (GE) general matrices, solvers with factorization, solver and inverse for (gels, gesv, getrs, getri, elty) in @@ -1065,7 +1065,7 @@ may be one of `N` (no modification), `T` (transpose), or `C` (conjugate transpose). `gels!` searches for the minimum norm/least squares solution. `A` may be under or over determined. The solution is returned in `B`. """ -gels!(trans::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) +gels!(trans::AbstractChar, A::ArrayLike{2}, B::VectorOrMatrixLike) """ gesv!(A, B) -> (B, A, ipiv) @@ -1075,7 +1075,7 @@ the `LU` factorization of `A`. `A` is overwritten with its `LU` factorization and `B` is overwritten with the solution `X`. `ipiv` contains the pivoting information for the `LU` factorization of `A`. """ -gesv!(A::AbstractMatrix, B::AbstractVecOrMat) +gesv!(A::ArrayLike{2}, B::VectorOrMatrixLike) """ getrs!(trans, A, ipiv, B) @@ -1086,7 +1086,7 @@ is the `LU` factorization from `getrf!`, with `ipiv` the pivoting information. `trans` may be one of `N` (no modification), `T` (transpose), or `C` (conjugate transpose). """ -getrs!(trans::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) +getrs!(trans::AbstractChar, A::ArrayLike{2}, ipiv::AbstractVector{BlasInt}, B::VectorOrMatrixLike) """ getri!(A, ipiv) @@ -1096,7 +1096,7 @@ Computes the inverse of `A`, using its `LU` factorization found by contains the `LU` factorization of `getrf!`. `A` is overwritten with its inverse. """ -getri!(A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) +getri!(A::ArrayLike{2}, ipiv::AbstractVector{BlasInt}) for (gesvx, elty) in ((:dgesvx_,:Float64), @@ -1264,15 +1264,15 @@ condition number of `A` after equilbrating; `ferr`, the forward error bound for each solution vector in `X`; `berr`, the forward error bound for each solution vector in `X`; and `work`, the reciprocal pivot growth factor. """ -gesvx!(fact::AbstractChar, trans::AbstractChar, A::AbstractMatrix, AF::AbstractMatrix, - ipiv::AbstractVector{BlasInt}, equed::AbstractChar, R::AbstractVector, C::AbstractVector, B::AbstractVecOrMat) +gesvx!(fact::AbstractChar, trans::AbstractChar, A::ArrayLike{2}, AF::ArrayLike{2}, + ipiv::AbstractVector{BlasInt}, equed::AbstractChar, R::ArrayLike{1}, C::ArrayLike{1}, B::VectorOrMatrixLike) """ gesvx!(A, B) The no-equilibration, no-transpose simplification of `gesvx!`. """ -gesvx!(A::AbstractMatrix, B::AbstractVecOrMat) +gesvx!(A::ArrayLike{2}, B::VectorOrMatrixLike) for (gelsd, gelsy, elty) in ((:dgelsd_,:dgelsy_,:Float64), @@ -1475,7 +1475,7 @@ is overwritten with the solution `X`. Singular values below `rcond` will be treated as zero. Returns the solution in `B` and the effective rank of `A` in `rnk`. """ -gelsd!(A::AbstractMatrix, B::AbstractVecOrMat, rcond::Real) +gelsd!(A::ArrayLike{2}, B::VectorOrMatrixLike, rcond::Real) """ gelsy!(A, B, rcond) -> (B, rnk) @@ -1486,7 +1486,7 @@ is overwritten with the solution `X`. Singular values below `rcond` will be treated as zero. Returns the solution in `B` and the effective rank of `A` in `rnk`. """ -gelsy!(A::AbstractMatrix, B::AbstractVecOrMat, rcond::Real) +gelsy!(A::ArrayLike{2}, B::VectorOrMatrixLike, rcond::Real) for (gglse, elty) in ((:dgglse_, :Float64), (:sgglse_, :Float32), @@ -1546,7 +1546,7 @@ Solves the equation `A * x = c` where `x` is subject to the equality constraint `B * x = d`. Uses the formula `||c - A*x||^2 = 0` to solve. Returns `X` and the residual sum-of-squares. """ -gglse!(A::AbstractMatrix, c::AbstractVector, B::AbstractMatrix, d::AbstractVector) +gglse!(A::ArrayLike{2}, c::ArrayLike{1}, B::ArrayLike{2}, d::ArrayLike{1}) # (GE) general matrices eigenvalue-eigenvector and singular value decompositions for (geev, gesvd, gesdd, ggsvd, elty, relty) in @@ -1845,7 +1845,7 @@ aren't computed. If `jobvl = V` or `jobvr = V`, the corresponding eigenvectors are computed. Returns the eigenvalues in `W`, the right eigenvectors in `VR`, and the left eigenvectors in `VL`. """ -geev!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix) +geev!(jobvl::AbstractChar, jobvr::AbstractChar, A::ArrayLike{2}) """ gesdd!(job, A) -> (U, S, VT) @@ -1857,7 +1857,7 @@ are computed. If `job = O`, `A` is overwritten with the columns of (thin) `U` and the rows of (thin) `V'`. If `job = S`, the columns of (thin) `U` and the rows of (thin) `V'` are computed and returned separately. """ -gesdd!(job::AbstractChar, A::AbstractMatrix) +gesdd!(job::AbstractChar, A::ArrayLike{2}) """ gesvd!(jobu, jobvt, A) -> (U, S, VT) @@ -1873,7 +1873,7 @@ computed and returned separately. `jobu` and `jobvt` can't both be `O`. Returns `U`, `S`, and `Vt`, where `S` are the singular values of `A`. """ -gesvd!(jobu::AbstractChar, jobvt::AbstractChar, A::AbstractMatrix) +gesvd!(jobu::AbstractChar, jobvt::AbstractChar, A::ArrayLike{2}) """ ggsvd!(jobu, jobv, jobq, A, B) -> (U, V, Q, alpha, beta, k, l, R) @@ -1886,7 +1886,7 @@ the orthogonal/unitary matrix `Q` is computed. If `jobu`, `jobv` or `jobq` is `N`, that matrix is not computed. This function is only available in LAPACK versions prior to 3.6.0. """ -ggsvd!(jobu::AbstractChar, jobv::AbstractChar, jobq::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) +ggsvd!(jobu::AbstractChar, jobv::AbstractChar, jobq::AbstractChar, A::ArrayLike{2}, B::ArrayLike{2}) for (f, elty) in ((:dggsvd3_, :Float64), @@ -2335,7 +2335,7 @@ condition numbers are computed for the right eigenvectors and the eigenvectors. If `sense = E,B`, the right and left eigenvectors must be computed. """ -geevx!(balanc::AbstractChar, jobvl::AbstractChar, jobvr::AbstractChar, sense::AbstractChar, A::AbstractMatrix) +geevx!(balanc::AbstractChar, jobvl::AbstractChar, jobvr::AbstractChar, sense::AbstractChar, A::ArrayLike{2}) """ ggev!(jobvl, jobvr, A, B) -> (alpha, beta, vl, vr) @@ -2345,7 +2345,7 @@ the left eigenvectors aren't computed. If `jobvr = N`, the right eigenvectors aren't computed. If `jobvl = V` or `jobvr = V`, the corresponding eigenvectors are computed. """ -ggev!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) +ggev!(jobvl::AbstractChar, jobvr::AbstractChar, A::ArrayLike{2}, B::ArrayLike{2}) # One step incremental condition estimation of max/min singular values for (laic1, elty) in @@ -2525,7 +2525,7 @@ superdiagonal. Overwrites `B` with the solution `X` and returns it. """ -gtsv!(dl::AbstractVector, d::AbstractVector, du::AbstractVector, B::AbstractVecOrMat) +gtsv!(dl::ArrayLike{1}, d::ArrayLike{1}, du::ArrayLike{1}, B::VectorOrMatrixLike) """ gttrf!(dl, d, du) -> (dl, d, du, du2, ipiv) @@ -2536,7 +2536,7 @@ subdiagonal, `d` on the diagonal, and `du` on the superdiagonal. Modifies `dl`, `d`, and `du` in-place and returns them and the second superdiagonal `du2` and the pivoting vector `ipiv`. """ -gttrf!(dl::AbstractVector, d::AbstractVector, du::AbstractVector) +gttrf!(dl::ArrayLike{1}, d::ArrayLike{1}, du::ArrayLike{1}) """ gttrs!(trans, dl, d, du, du2, ipiv, B) @@ -2545,8 +2545,8 @@ Solves the equation `A * X = B` (`trans = N`), `transpose(A) * X = B` (`trans = or `adjoint(A) * X = B` (`trans = C`) using the `LU` factorization computed by `gttrf!`. `B` is overwritten with the solution `X`. """ -gttrs!(trans::AbstractChar, dl::AbstractVector, d::AbstractVector, du::AbstractVector, du2::AbstractVector, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) +gttrs!(trans::AbstractChar, dl::ArrayLike{1}, d::ArrayLike{1}, du::ArrayLike{1}, du2::ArrayLike{1}, + ipiv::AbstractVector{BlasInt}, B::VectorOrMatrixLike) ## (OR) orthogonal (or UN, unitary) matrices, extractors and multiplication for (orglq, orgqr, orgql, orgrq, ormlq, ormqr, ormql, ormrq, gemqrt, elty) in @@ -2951,7 +2951,7 @@ end Explicitly finds the matrix `Q` of a `LQ` factorization after calling `gelqf!` on `A`. Uses the output of `gelqf!`. `A` is overwritten by `Q`. """ -orglq!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) +orglq!(A::ArrayLike{2}, tau::ArrayLike{1}, k::Integer = length(tau)) """ orgqr!(A, tau, k = length(tau)) @@ -2959,7 +2959,7 @@ orglq!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) Explicitly finds the matrix `Q` of a `QR` factorization after calling `geqrf!` on `A`. Uses the output of `geqrf!`. `A` is overwritten by `Q`. """ -orgqr!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) +orgqr!(A::ArrayLike{2}, tau::ArrayLike{1}, k::Integer = length(tau)) """ orgql!(A, tau, k = length(tau)) @@ -2967,7 +2967,7 @@ orgqr!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) Explicitly finds the matrix `Q` of a `QL` factorization after calling `geqlf!` on `A`. Uses the output of `geqlf!`. `A` is overwritten by `Q`. """ -orgql!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) +orgql!(A::ArrayLike{2}, tau::ArrayLike{1}, k::Integer = length(tau)) """ orgrq!(A, tau, k = length(tau)) @@ -2975,7 +2975,7 @@ orgql!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) Explicitly finds the matrix `Q` of a `RQ` factorization after calling `gerqf!` on `A`. Uses the output of `gerqf!`. `A` is overwritten by `Q`. """ -orgrq!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) +orgrq!(A::ArrayLike{2}, tau::ArrayLike{1}, k::Integer = length(tau)) """ ormlq!(side, trans, A, tau, C) @@ -2985,7 +2985,7 @@ Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * for `side = R` using `Q` from a `LQ` factorization of `A` computed using `gelqf!`. `C` is overwritten. """ -ormlq!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) +ormlq!(side::AbstractChar, trans::AbstractChar, A::ArrayLike{2}, tau::ArrayLike{1}, C::VectorOrMatrixLike) """ ormqr!(side, trans, A, tau, C) @@ -2995,7 +2995,7 @@ Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * for `side = R` using `Q` from a `QR` factorization of `A` computed using `geqrf!`. `C` is overwritten. """ -ormqr!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) +ormqr!(side::AbstractChar, trans::AbstractChar, A::ArrayLike{2}, tau::ArrayLike{1}, C::VectorOrMatrixLike) """ ormql!(side, trans, A, tau, C) @@ -3005,7 +3005,7 @@ Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * for `side = R` using `Q` from a `QL` factorization of `A` computed using `geqlf!`. `C` is overwritten. """ -ormql!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) +ormql!(side::AbstractChar, trans::AbstractChar, A::ArrayLike{2}, tau::ArrayLike{1}, C::VectorOrMatrixLike) """ ormrq!(side, trans, A, tau, C) @@ -3015,7 +3015,7 @@ Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * for `side = R` using `Q` from a `RQ` factorization of `A` computed using `gerqf!`. `C` is overwritten. """ -ormrq!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) +ormrq!(side::AbstractChar, trans::AbstractChar, A::ArrayLike{2}, tau::ArrayLike{1}, C::VectorOrMatrixLike) """ gemqrt!(side, trans, V, T, C) @@ -3025,7 +3025,7 @@ Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * for `side = R` using `Q` from a `QR` factorization of `A` computed using `geqrt!`. `C` is overwritten. """ -gemqrt!(side::AbstractChar, trans::AbstractChar, V::AbstractMatrix, T::AbstractMatrix, C::AbstractVecOrMat) +gemqrt!(side::AbstractChar, trans::AbstractChar, V::ArrayLike{2}, T::ArrayLike{2}, C::VectorOrMatrixLike) # (PO) positive-definite symmetric matrices, for (posv, potrf, potri, potrs, pstrf, elty, rtyp) in @@ -3168,7 +3168,7 @@ of `A` is computed. If `uplo = L` the lower Cholesky decomposition of `A` is computed. `A` is overwritten by its Cholesky decomposition. `B` is overwritten with the solution `X`. """ -posv!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) +posv!(uplo::AbstractChar, A::ArrayLike{2}, B::VectorOrMatrixLike) """ potrf!(uplo, A) @@ -3177,7 +3177,7 @@ Computes the Cholesky (upper if `uplo = U`, lower if `uplo = L`) decomposition of positive-definite matrix `A`. `A` is overwritten and returned with an info code. """ -potrf!(uplo::AbstractChar, A::AbstractMatrix) +potrf!(uplo::AbstractChar, A::ArrayLike{2}) """ potri!(uplo, A) @@ -3188,7 +3188,7 @@ decomposition. `A` is overwritten by its inverse and returned. """ -potri!(uplo::AbstractChar, A::AbstractMatrix) +potri!(uplo::AbstractChar, A::ArrayLike{2}) """ potrs!(uplo, A, B) @@ -3199,7 +3199,7 @@ positive definite matrix whose Cholesky decomposition was computed by computed. If `uplo = L` the lower Cholesky decomposition of `A` was computed. `B` is overwritten with the solution `X`. """ -potrs!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) +potrs!(uplo::AbstractChar, A::ArrayLike{2}, B::VectorOrMatrixLike) """ pstrf!(uplo, A, tol) -> (A, piv, rank, info) @@ -3212,7 +3212,7 @@ Returns `A`, the pivots `piv`, the rank of `A`, and an `info` code. If `info = 0 the factorization succeeded. If `info = i > 0 `, then `A` is indefinite or rank-deficient. """ -pstrf!(uplo::AbstractChar, A::AbstractMatrix, tol::Real) +pstrf!(uplo::AbstractChar, A::ArrayLike{2}, tol::Real) # (PT) positive-definite, symmetric, tri-diagonal matrices # Direct solvers for general tridiagonal and symmetric positive-definite tridiagonal @@ -3275,7 +3275,7 @@ Solves `A * X = B` for positive-definite tridiagonal `A`. `D` is the diagonal of `A` and `E` is the off-diagonal. `B` is overwritten with the solution `X` and returned. """ -ptsv!(D::AbstractVector, E::AbstractVector, B::AbstractVecOrMat) +ptsv!(D::ArrayLike{1}, E::ArrayLike{1}, B::VectorOrMatrixLike) """ pttrf!(D, E) @@ -3284,7 +3284,7 @@ Computes the LDLt factorization of a positive-definite tridiagonal matrix with `D` as diagonal and `E` as off-diagonal. `D` and `E` are overwritten and returned. """ -pttrf!(D::AbstractVector, E::AbstractVector) +pttrf!(D::ArrayLike{1}, E::ArrayLike{1}) for (pttrs, elty, relty) in ((:dpttrs_,:Float64,:Float64), @@ -3357,7 +3357,7 @@ Solves `A * X = B` for positive-definite tridiagonal `A` with diagonal `D` and off-diagonal `E` after computing `A`'s LDLt factorization using `pttrf!`. `B` is overwritten with the solution `X`. """ -pttrs!(D::AbstractVector, E::AbstractVector, B::AbstractVecOrMat) +pttrs!(D::ArrayLike{1}, E::ArrayLike{1}, B::VectorOrMatrixLike) ## (TR) triangular matrices: solver and inverse for (trtri, trtrs, elty) in @@ -3424,7 +3424,7 @@ triangular matrix `A`. If `diag = N`, `A` has non-unit diagonal elements. If `diag = U`, all diagonal elements of `A` are one. `A` is overwritten with its inverse. """ -trtri!(uplo::AbstractChar, diag::AbstractChar, A::AbstractMatrix) +trtri!(uplo::AbstractChar, diag::AbstractChar, A::ArrayLike{2}) """ trtrs!(uplo, trans, diag, A, B) @@ -3435,7 +3435,7 @@ triangular matrix `A`. If `diag = N`, `A` has non-unit diagonal elements. If `diag = U`, all diagonal elements of `A` are one. `B` is overwritten with the solution `X`. """ -trtrs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) +trtrs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::ArrayLike{2}, B::VectorOrMatrixLike) #Eigenvector computation and condition number estimation for (trcon, trevc, trrfs, elty) in @@ -3710,7 +3710,7 @@ diagonal elements. If `diag = U`, all diagonal elements of `A` are one. If `norm = I`, the condition number is found in the infinity norm. If `norm = O` or `1`, the condition number is found in the one norm. """ -trcon!(norm::AbstractChar, uplo::AbstractChar, diag::AbstractChar, A::AbstractMatrix) +trcon!(norm::AbstractChar, uplo::AbstractChar, diag::AbstractChar, A::ArrayLike{2}) """ trevc!(side, howmny, select, T, VL = similar(T), VR = similar(T)) @@ -3723,8 +3723,8 @@ eigenvectors are found and backtransformed using `VL` and `VR`. If `howmny = S`, only the eigenvectors corresponding to the values in `select` are computed. """ -trevc!(side::AbstractChar, howmny::AbstractChar, select::AbstractVector{BlasInt}, T::AbstractMatrix, - VL::AbstractMatrix = similar(T), VR::AbstractMatrix = similar(T)) +trevc!(side::AbstractChar, howmny::AbstractChar, select::AbstractVector{BlasInt}, T::ArrayLike{2}, + VL::ArrayLike{2} = similar(T), VR::ArrayLike{2} = similar(T)) """ trrfs!(uplo, trans, diag, A, B, X, Ferr, Berr) -> (Ferr, Berr) @@ -3738,8 +3738,8 @@ diagonal elements. If `diag = U`, all diagonal elements of `A` are one. `Ferr` and `Berr` are optional inputs. `Ferr` is the forward error and `Berr` is the backward error, each component-wise. """ -trrfs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat, - X::AbstractVecOrMat, Ferr::AbstractVector, Berr::AbstractVector) +trrfs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::ArrayLike{2}, B::VectorOrMatrixLike, + X::VectorOrMatrixLike, Ferr::ArrayLike{1}, Berr::ArrayLike{1}) ## (ST) Symmetric tridiagonal - eigendecomposition for (stev, stebz, stegr, stein, elty) in @@ -3895,12 +3895,12 @@ for (stev, stebz, stegr, stein, elty) in end end end -stegr!(jobz::AbstractChar, dv::AbstractVector, ev::AbstractVector) = stegr!(jobz, 'A', dv, ev, 0.0, 0.0, 0, 0) +stegr!(jobz::AbstractChar, dv::ArrayLike{1}, ev::ArrayLike{1}) = stegr!(jobz, 'A', dv, ev, 0.0, 0.0, 0, 0) # Allow user to skip specification of iblock and isplit -stein!(dv::AbstractVector, ev::AbstractVector, w_in::AbstractVector) = stein!(dv, ev, w_in, zeros(BlasInt,0), zeros(BlasInt,0)) +stein!(dv::ArrayLike{1}, ev::ArrayLike{1}, w_in::ArrayLike{1}) = stein!(dv, ev, w_in, zeros(BlasInt,0), zeros(BlasInt,0)) # Allow user to specify just one eigenvector to get in stein! -stein!(dv::AbstractVector, ev::AbstractVector, eval::Real) = stein!(dv, ev, [eval], zeros(BlasInt,0), zeros(BlasInt,0)) +stein!(dv::ArrayLike{1}, ev::ArrayLike{1}, eval::Real) = stein!(dv, ev, [eval], zeros(BlasInt,0), zeros(BlasInt,0)) """ stev!(job, dv, ev) -> (dv, Zmat) @@ -3910,7 +3910,7 @@ diagonal and `ev` as off-diagonal. If `job = N` only the eigenvalues are found and returned in `dv`. If `job = V` then the eigenvectors are also found and returned in `Zmat`. """ -stev!(job::AbstractChar, dv::AbstractVector, ev::AbstractVector) +stev!(job::AbstractChar, dv::ArrayLike{1}, ev::ArrayLike{1}) """ stebz!(range, order, vl, vu, il, iu, abstol, dv, ev) -> (dv, iblock, isplit) @@ -3923,7 +3923,7 @@ are found. If `range = V`, the eigenvalues in the half-open interval block. If `order = E`, they are ordered across all the blocks. `abstol` can be set as a tolerance for convergence. """ -stebz!(range::AbstractChar, order::AbstractChar, vl, vu, il::Integer, iu::Integer, abstol::Real, dv::AbstractVector, ev::AbstractVector) +stebz!(range::AbstractChar, order::AbstractChar, vl, vu, il::Integer, iu::Integer, abstol::Real, dv::ArrayLike{1}, ev::ArrayLike{1}) """ stegr!(jobz, range, dv, ev, vl, vu, il, iu) -> (w, Z) @@ -3936,7 +3936,7 @@ are found. If `range = V`, the eigenvalues in the half-open interval `il` and `iu` are found. The eigenvalues are returned in `w` and the eigenvectors in `Z`. """ -stegr!(jobz::AbstractChar, range::AbstractChar, dv::AbstractVector, ev::AbstractVector, vl::Real, vu::Real, il::Integer, iu::Integer) +stegr!(jobz::AbstractChar, range::AbstractChar, dv::ArrayLike{1}, ev::ArrayLike{1}, vl::Real, vu::Real, il::Integer, iu::Integer) """ stein!(dv, ev_in, w_in, iblock_in, isplit_in) @@ -3947,7 +3947,7 @@ eigenvalues for which to find corresponding eigenvectors. `iblock_in` specifies the submatrices corresponding to the eigenvalues in `w_in`. `isplit_in` specifies the splitting points between the submatrix blocks. """ -stein!(dv::AbstractVector, ev_in::AbstractVector, w_in::AbstractVector, iblock_in::AbstractVector{BlasInt}, isplit_in::AbstractVector{BlasInt}) +stein!(dv::ArrayLike{1}, ev_in::ArrayLike{1}, w_in::ArrayLike{1}, iblock_in::AbstractVector{BlasInt}, isplit_in::AbstractVector{BlasInt}) ## (SY) symmetric real matrices - Bunch-Kaufman decomposition, ## solvers (direct and factored) and inverse. @@ -4909,7 +4909,7 @@ is upper triangular. If `uplo = L`, it is lower triangular. `ipiv` is the pivot vector from the triangular factorization. `A` is overwritten by `L` and `D`. """ -syconv!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) +syconv!(uplo::AbstractChar, A::ArrayLike{2}, ipiv::AbstractVector{BlasInt}) """ sysv!(uplo, A, B) -> (B, A, ipiv) @@ -4920,7 +4920,7 @@ the upper half of `A` is stored. If `uplo = L`, the lower half is stored. Bunch-Kaufman factorization. `ipiv` contains pivoting information about the factorization. """ -sysv!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) +sysv!(uplo::AbstractChar, A::ArrayLike{2}, B::VectorOrMatrixLike) """ sytrf!(uplo, A) -> (A, ipiv, info) @@ -4934,7 +4934,7 @@ the error code `info` which is a non-negative integer. If `info` is positive the matrix is singular and the diagonal part of the factorization is exactly zero at position `info`. """ -sytrf!(uplo::AbstractChar, A::AbstractMatrix) +sytrf!(uplo::AbstractChar, A::ArrayLike{2}) """ sytri!(uplo, A, ipiv) @@ -4943,7 +4943,7 @@ Computes the inverse of a symmetric matrix `A` using the results of `sytrf!`. If `uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower half is stored. `A` is overwritten by its inverse. """ -sytri!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) +sytri!(uplo::AbstractChar, A::ArrayLike{2}, ipiv::AbstractVector{BlasInt}) """ sytrs!(uplo, A, ipiv, B) @@ -4953,7 +4953,7 @@ results of `sytrf!`. If `uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower half is stored. `B` is overwritten by the solution `X`. """ -sytrs!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) +sytrs!(uplo::AbstractChar, A::ArrayLike{2}, ipiv::AbstractVector{BlasInt}, B::VectorOrMatrixLike) """ @@ -4965,7 +4965,7 @@ the upper half of `A` is stored. If `uplo = L`, the lower half is stored. Bunch-Kaufman factorization. `ipiv` contains pivoting information about the factorization. """ -hesv!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) +hesv!(uplo::AbstractChar, A::ArrayLike{2}, B::VectorOrMatrixLike) """ hetrf!(uplo, A) -> (A, ipiv, info) @@ -4979,7 +4979,7 @@ the error code `info` which is a non-negative integer. If `info` is positive the matrix is singular and the diagonal part of the factorization is exactly zero at position `info`. """ -hetrf!(uplo::AbstractChar, A::AbstractMatrix) +hetrf!(uplo::AbstractChar, A::ArrayLike{2}) """ hetri!(uplo, A, ipiv) @@ -4988,7 +4988,7 @@ Computes the inverse of a Hermitian matrix `A` using the results of `sytrf!`. If `uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower half is stored. `A` is overwritten by its inverse. """ -hetri!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) +hetri!(uplo::AbstractChar, A::ArrayLike{2}, ipiv::AbstractVector{BlasInt}) """ hetrs!(uplo, A, ipiv, B) @@ -4998,7 +4998,7 @@ results of `sytrf!`. If `uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower half is stored. `B` is overwritten by the solution `X`. """ -hetrs!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) +hetrs!(uplo::AbstractChar, A::ArrayLike{2}, ipiv::AbstractVector{BlasInt}, B::VectorOrMatrixLike) # Symmetric (real) eigensolvers for (syev, syevr, sygvd, elty) in @@ -5306,7 +5306,7 @@ Finds the eigenvalues (`jobz = N`) or eigenvalues and eigenvectors (`jobz = V`) of a symmetric matrix `A`. If `uplo = U`, the upper triangle of `A` is used. If `uplo = L`, the lower triangle of `A` is used. """ -syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix) +syev!(jobz::AbstractChar, uplo::AbstractChar, A::ArrayLike{2}) """ syevr!(jobz, range, uplo, A, vl, vu, il, iu, abstol) -> (W, Z) @@ -5321,7 +5321,7 @@ found. `abstol` can be set as a tolerance for convergence. The eigenvalues are returned in `W` and the eigenvectors in `Z`. """ -syevr!(jobz::AbstractChar, range::AbstractChar, uplo::AbstractChar, A::AbstractMatrix, +syevr!(jobz::AbstractChar, range::AbstractChar, uplo::AbstractChar, A::ArrayLike{2}, vl::AbstractFloat, vu::AbstractFloat, il::Integer, iu::Integer, abstol::AbstractFloat) """ @@ -5336,7 +5336,7 @@ of `A` and `B` are used. If `uplo = L`, the lower triangles of `A` and `A * B * x = lambda * x`. If `itype = 3`, the problem to solve is `B * A * x = lambda * x`. """ -sygvd!(itype::Integer, jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) +sygvd!(itype::Integer, jobz::AbstractChar, uplo::AbstractChar, A::ArrayLike{2}, B::ArrayLike{2}) ## (BD) Bidiagonal matrices - singular value decomposition for (bdsqr, relty, elty) in @@ -5398,7 +5398,7 @@ compute the product `Q' * C`. Returns the singular values in `d`, and the matrix `C` overwritten with `Q' * C`. """ -bdsqr!(uplo::AbstractChar, d::AbstractVector, e_::AbstractVector, Vt::AbstractMatrix, U::AbstractMatrix, C::AbstractMatrix) +bdsqr!(uplo::AbstractChar, d::ArrayLike{1}, e_::ArrayLike{1}, Vt::ArrayLike{2}, U::ArrayLike{2}, C::ArrayLike{2}) #Defined only for real types for (bdsdc, elty) in @@ -5470,7 +5470,7 @@ and vectors are found in compact form. Only works for real types. Returns the singular values in `d`, and if `compq = P`, the compact singular vectors in `iq`. """ -bdsdc!(uplo::AbstractChar, compq::AbstractChar, d::AbstractVector, e_::AbstractVector) +bdsdc!(uplo::AbstractChar, compq::AbstractChar, d::ArrayLike{1}, e_::ArrayLike{1}) for (gecon, elty) in ((:dgecon_,:Float64), @@ -5548,7 +5548,7 @@ the condition number is found in the infinity norm. If `normtype = O` or `1`, the condition number is found in the one norm. `A` must be the result of `getrf!` and `anorm` is the norm of `A` in the relevant norm. """ -gecon!(normtype::AbstractChar, A::AbstractMatrix, anorm) +gecon!(normtype::AbstractChar, A::ArrayLike{2}, anorm) for (gehrd, elty) in ((:dgehrd_,:Float64), @@ -5588,7 +5588,7 @@ for (gehrd, elty) in end end end -gehrd!(A::AbstractMatrix) = gehrd!(1, size(A, 1), A) +gehrd!(A::ArrayLike{2}) = gehrd!(1, size(A, 1), A) """ gehrd!(ilo, ihi, A) -> (A, tau) @@ -5598,7 +5598,7 @@ then `ilo` and `ihi` are the outputs of `gebal!`. Otherwise they should be `ilo = 1` and `ihi = size(A,2)`. `tau` contains the elementary reflectors of the factorization. """ -gehrd!(ilo::Integer, ihi::Integer, A::AbstractMatrix) +gehrd!(ilo::Integer, ihi::Integer, A::ArrayLike{2}) for (orghr, elty) in ((:dorghr_,:Float64), @@ -5646,7 +5646,7 @@ end Explicitly finds `Q`, the orthogonal/unitary matrix from `gehrd!`. `ilo`, `ihi`, `A`, and `tau` must correspond to the input/output to `gehrd!`. """ -orghr!(ilo::Integer, ihi::Integer, A::AbstractMatrix, tau::AbstractVector) +orghr!(ilo::Integer, ihi::Integer, A::ArrayLike{2}, tau::ArrayLike{1}) for (ormhr, elty) in ((:dormhr_,:Float64), @@ -5749,7 +5749,7 @@ If `uplo = U`, the upper half of `A` is stored; if `uplo = L`, the lower half is `tau` contains the elementary reflectors of the factorization, `d` contains the diagonal and `e` contains the upper/lower diagonal. """ -hetrd!(uplo::AbstractChar, A::AbstractMatrix) +hetrd!(uplo::AbstractChar, A::ArrayLike{2}) for (orgtr, elty) in ((:dorgtr_,:Float64), @@ -5799,7 +5799,7 @@ end Explicitly finds `Q`, the orthogonal/unitary matrix from `hetrd!`. `uplo`, `A`, and `tau` must correspond to the input/output to `hetrd!`. """ -orgtr!(uplo::AbstractChar, A::AbstractMatrix, tau::AbstractVector) +orgtr!(uplo::AbstractChar, A::ArrayLike{2}, tau::ArrayLike{1}) for (ormtr, elty) in ((:dormtr_,:Float64), @@ -6052,7 +6052,7 @@ vectors (`jobvs = V`) of matrix `A`. `A` is overwritten by its Schur form. Returns `A`, `vs` containing the Schur vectors, and `w`, containing the eigenvalues. """ -gees!(jobvs::AbstractChar, A::AbstractMatrix) +gees!(jobvs::AbstractChar, A::ArrayLike{2}) """ @@ -6065,7 +6065,7 @@ vectors (`jobsvl = V`), or right Schur vectors (`jobvsr = V`) of `A` and The generalized eigenvalues are returned in `alpha` and `beta`. The left Schur vectors are returned in `vsl` and the right Schur vectors are returned in `vsr`. """ -gges!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) +gges!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::ArrayLike{2}, B::ArrayLike{2}) for (trexc, trsen, tgsen, elty) in ((:dtrexc_, :dtrsen_, :dtgsen_, :Float64), @@ -6370,7 +6370,7 @@ Reorder the Schur factorization of a matrix. If `compq = V`, the Schur vectors `Q` are reordered. If `compq = N` they are not modified. `ifst` and `ilst` specify the reordering of the vectors. """ -trexc!(compq::AbstractChar, ifst::BlasInt, ilst::BlasInt, T::AbstractMatrix, Q::AbstractMatrix) +trexc!(compq::AbstractChar, ifst::BlasInt, ilst::BlasInt, T::ArrayLike{2}, Q::ArrayLike{2}) """ trsen!(compq, job, select, T, Q) -> (T, Q, w, s, sep) @@ -6388,7 +6388,7 @@ Returns `T`, `Q`, reordered eigenvalues in `w`, the condition number of the cluster of eigenvalues `s`, and the condition number of the invariant subspace `sep`. """ -trsen!(compq::AbstractChar, job::AbstractChar, select::AbstractVector{BlasInt}, T::AbstractMatrix, Q::AbstractMatrix) +trsen!(compq::AbstractChar, job::AbstractChar, select::AbstractVector{BlasInt}, T::ArrayLike{2}, Q::ArrayLike{2}) """ tgsen!(select, S, T, Q, Z) -> (S, T, alpha, beta, Q, Z) @@ -6396,7 +6396,7 @@ trsen!(compq::AbstractChar, job::AbstractChar, select::AbstractVector{BlasInt}, Reorders the vectors of a generalized Schur decomposition. `select` specifies the eigenvalues in each cluster. """ -tgsen!(select::AbstractVector{BlasInt}, S::AbstractMatrix, T::AbstractMatrix, Q::AbstractMatrix, Z::AbstractMatrix) +tgsen!(select::AbstractVector{BlasInt}, S::ArrayLike{2}, T::ArrayLike{2}, Q::ArrayLike{2}, Z::ArrayLike{2}) for (fn, elty, relty) in ((:dtrsyl_, :Float64, :Float64), (:strsyl_, :Float32, :Float32), @@ -6442,6 +6442,6 @@ transposed. Similarly for `transb` and `B`. If `isgn = 1`, the equation Returns `X` (overwriting `C`) and `scale`. """ -trsyl!(transa::AbstractChar, transb::AbstractChar, A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix, isgn::Int=1) +trsyl!(transa::AbstractChar, transb::AbstractChar, A::ArrayLike{2}, B::ArrayLike{2}, C::ArrayLike{2}, isgn::Int=1) end # module diff --git a/stdlib/LinearAlgebra/src/ldlt.jl b/stdlib/LinearAlgebra/src/ldlt.jl index de3716948f65d..4acb0edc99c98 100644 --- a/stdlib/LinearAlgebra/src/ldlt.jl +++ b/stdlib/LinearAlgebra/src/ldlt.jl @@ -49,7 +49,7 @@ struct LDLt{T,S<:AbstractMatrix{T}} <: Factorization{T} end end LDLt(data::AbstractMatrix{T}) where {T} = LDLt{T,typeof(data)}(data) -LDLt{T}(data::AbstractMatrix) where {T} = LDLt(convert(AbstractMatrix{T}, data)::AbstractMatrix{T}) +LDLt{T}(data::ArrayLike{2}) where {T} = LDLt(convert(AbstractMatrix{T}, data)::AbstractMatrix{T}) size(S::LDLt) = size(S.data) size(S::LDLt, i::Integer) = size(S.data, i) @@ -165,7 +165,7 @@ end factorize(S::SymTridiagonal) = ldlt(S) -function ldiv!(S::LDLt{<:Any,<:SymTridiagonal}, B::AbstractVecOrMat) +function ldiv!(S::LDLt{<:Any,<:SymTridiagonal}, B::VectorOrMatrixLike) require_one_based_indexing(B) n, nrhs = size(B, 1), size(B, 2) if size(S,1) != n @@ -196,7 +196,7 @@ function ldiv!(S::LDLt{<:Any,<:SymTridiagonal}, B::AbstractVecOrMat) return B end -rdiv!(B::AbstractVecOrMat, S::LDLt{<:Any,<:SymTridiagonal}) = +rdiv!(B::VectorOrMatrixLike, S::LDLt{<:Any,<:SymTridiagonal}) = transpose(ldiv!(S, transpose(B))) function logabsdet(F::LDLt{<:Any,<:SymTridiagonal}) diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl index 8006fc5509765..4754d4944cbf9 100644 --- a/stdlib/LinearAlgebra/src/lq.jl +++ b/stdlib/LinearAlgebra/src/lq.jl @@ -47,7 +47,7 @@ struct LQ{T,S<:AbstractMatrix{T}} <: Factorization{T} end end LQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQ{T,typeof(factors)}(factors, τ) -function LQ{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} +function LQ{T}(factors::ArrayLike{2}, τ::ArrayLike{1}) where {T} LQ(convert(AbstractMatrix{T}, factors), convert(Vector{T}, τ)) end @@ -56,10 +56,10 @@ Base.iterate(S::LQ) = (S.L, Val(:Q)) Base.iterate(S::LQ, ::Val{:Q}) = (S.Q, Val(:done)) Base.iterate(S::LQ, ::Val{:done}) = nothing -struct LQPackedQ{T,S<:AbstractMatrix} <: AbstractMatrix{T} +struct LQPackedQ{T,S<:ArrayLike{2}} <: AbstractMatrix{T} factors::Matrix{T} τ::Vector{T} - LQPackedQ{T,S}(factors::AbstractMatrix{T}, τ::Vector{T}) where {T,S<:AbstractMatrix} = new(factors, τ) + LQPackedQ{T,S}(factors::AbstractMatrix{T}, τ::Vector{T}) where {T,S<:ArrayLike{2}} = new(factors, τ) end LQPackedQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = LQPackedQ{T,typeof(factors)}(factors, τ) @@ -326,7 +326,7 @@ function (\)(F::LQ{T}, B::VecOrMat{Complex{T}}) where T<:BlasReal c2r = reshape(copy(transpose(reinterpret(T, reshape(B, (1, length(B)))))), size(B, 1), 2*size(B, 2)) x = ldiv!(F, c2r) return reshape(copy(reinterpret(Complex{T}, copy(transpose(reshape(x, div(length(x), 2), 2))))), - isa(B, AbstractVector) ? (size(F,2),) : (size(F,2), size(B,2))) + isa(B, ArrayLike{1}) ? (size(F,2),) : (size(F,2), size(B,2))) end diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index af2669855ac91..2f19ef473c2d7 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -60,7 +60,7 @@ end function LU(factors::AbstractMatrix{T}, ipiv::Vector{BlasInt}, info::BlasInt) where {T} LU{T,typeof(factors)}(factors, ipiv, info) end -function LU{T}(factors::AbstractMatrix, ipiv::AbstractVector{<:Integer}, info::Integer) where {T} +function LU{T}(factors::ArrayLike{2}, ipiv::AbstractVector{<:Integer}, info::Integer) where {T} LU(convert(AbstractMatrix{T}, factors), convert(Vector{BlasInt}, ipiv), BlasInt(info)) @@ -423,18 +423,18 @@ end (\)(A::Transpose{T,<:LU{T,<:StridedMatrix}}, B::Transpose{T,<:StridedVecOrMat{T}}) where {T<:BlasFloat} = LAPACK.getrs!('T', A.parent.factors, A.parent.ipiv, copy(B)) -function (/)(A::AbstractMatrix, F::Adjoint{<:Any,<:LU}) +function (/)(A::ArrayLike{2}, F::Adjoint{<:Any,<:LU}) T = promote_type(eltype(A), eltype(F)) return adjoint(ldiv!(F.parent, copy_oftype(adjoint(A), T))) end # To avoid ambiguities with definitions in adjtrans.jl and factorizations.jl -(/)(adjA::Adjoint{<:Any,<:AbstractVector}, F::Adjoint{<:Any,<:LU}) = adjoint(F.parent \ adjA.parent) -(/)(adjA::Adjoint{<:Any,<:AbstractMatrix}, F::Adjoint{<:Any,<:LU}) = adjoint(F.parent \ adjA.parent) -function (/)(trA::Transpose{<:Any,<:AbstractVector}, F::Adjoint{<:Any,<:LU}) +(/)(adjA::Adjoint{<:Any,<:ArrayLike{1}}, F::Adjoint{<:Any,<:LU}) = adjoint(F.parent \ adjA.parent) +(/)(adjA::Adjoint{<:Any,<:ArrayLike{2}}, F::Adjoint{<:Any,<:LU}) = adjoint(F.parent \ adjA.parent) +function (/)(trA::Transpose{<:Any,<:ArrayLike{1}}, F::Adjoint{<:Any,<:LU}) T = promote_type(eltype(trA), eltype(F)) return adjoint(ldiv!(F.parent, convert(AbstractVector{T}, conj(trA.parent)))) end -function (/)(trA::Transpose{<:Any,<:AbstractMatrix}, F::Adjoint{<:Any,<:LU}) +function (/)(trA::Transpose{<:Any,<:ArrayLike{2}}, F::Adjoint{<:Any,<:LU}) T = promote_type(eltype(trA), eltype(F)) return adjoint(ldiv!(F.parent, convert(AbstractMatrix{T}, conj(trA.parent)))) end @@ -577,7 +577,7 @@ function getproperty(F::LU{T,Tridiagonal{T,V}}, d::Symbol) where {T,V} end # See dgtts2.f -function ldiv!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} +function ldiv!(A::LU{T,Tridiagonal{T,V}}, B::VectorOrMatrixLike) where {T,V} require_one_based_indexing(B) n = size(A,1) if n != size(B,1) @@ -609,7 +609,7 @@ function ldiv!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} return B end -function ldiv!(transA::Transpose{<:Any,<:LU{T,Tridiagonal{T,V}}}, B::AbstractVecOrMat) where {T,V} +function ldiv!(transA::Transpose{<:Any,<:LU{T,Tridiagonal{T,V}}}, B::VectorOrMatrixLike) where {T,V} require_one_based_indexing(B) A = transA.parent n = size(A,1) @@ -645,8 +645,8 @@ function ldiv!(transA::Transpose{<:Any,<:LU{T,Tridiagonal{T,V}}}, B::AbstractVec return B end -# Ac_ldiv_B!(A::LU{T,Tridiagonal{T}}, B::AbstractVecOrMat) where {T<:Real} = At_ldiv_B!(A,B) -function ldiv!(adjA::Adjoint{<:Any,LU{T,Tridiagonal{T,V}}}, B::AbstractVecOrMat) where {T,V} +# Ac_ldiv_B!(A::LU{T,Tridiagonal{T}}, B::VectorOrMatrixLike) where {T<:Real} = At_ldiv_B!(A,B) +function ldiv!(adjA::Adjoint{<:Any,LU{T,Tridiagonal{T,V}}}, B::VectorOrMatrixLike) where {T,V} require_one_based_indexing(B) A = adjA.parent n = size(A,1) @@ -682,9 +682,9 @@ function ldiv!(adjA::Adjoint{<:Any,LU{T,Tridiagonal{T,V}}}, B::AbstractVecOrMat) return B end -rdiv!(B::AbstractMatrix, A::LU) = transpose(ldiv!(transpose(A), transpose(B))) -rdiv!(B::AbstractMatrix, A::Transpose{<:Any,<:LU}) = transpose(ldiv!(A.parent, transpose(B))) -rdiv!(B::AbstractMatrix, A::Adjoint{<:Any,<:LU}) = adjoint(ldiv!(A.parent, adjoint(B))) +rdiv!(B::ArrayLike{2}, A::LU) = transpose(ldiv!(transpose(A), transpose(B))) +rdiv!(B::ArrayLike{2}, A::Transpose{<:Any,<:LU}) = transpose(ldiv!(A.parent, transpose(B))) +rdiv!(B::ArrayLike{2}, A::Adjoint{<:Any,<:LU}) = adjoint(ldiv!(A.parent, adjoint(B))) # Conversions AbstractMatrix(F::LU) = (F.L * F.U)[invperm(F.p),:] diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index 1bbd2230c315d..73c28f6d61fc8 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -52,15 +52,15 @@ function (*)(A::AbstractMatrix{T}, x::AbstractVector{S}) where {T,S} end # these will throw a DimensionMismatch unless B has 1 row (or 1 col for transposed case): -function *(a::AbstractVector, transB::Transpose{<:Any,<:AbstractMatrix}) +function *(a::ArrayLike{1}, transB::Transpose{<:Any,<:ArrayLike{2}}) B = transB.parent reshape(a,length(a),1)*transpose(B) end -function *(a::AbstractVector, adjB::Adjoint{<:Any,<:AbstractMatrix}) +function *(a::ArrayLike{1}, adjB::Adjoint{<:Any,<:ArrayLike{2}}) B = adjB.parent reshape(a,length(a),1)*adjoint(B) end -(*)(a::AbstractVector, B::AbstractMatrix) = reshape(a,length(a),1)*B +(*)(a::ArrayLike{1}, B::ArrayLike{2}) = reshape(a,length(a),1)*B @inline mul!(y::StridedVector{T}, A::StridedVecOrMat{T}, x::StridedVector{T}, alpha::Number, beta::Number) where {T<:BlasFloat} = @@ -77,7 +77,7 @@ for elty in (Float32,Float64) end end end -@inline mul!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector, +@inline mul!(y::ArrayLike{1}, A::VectorOrMatrixLike, x::ArrayLike{1}, alpha::Number, beta::Number) = generic_matvecmul!(y, 'N', A, x, MulAddMul(alpha, beta)) @@ -96,7 +96,7 @@ end A = transA.parent return gemv!(y, 'T', A, x, alpha, beta) end -@inline function mul!(y::AbstractVector, transA::Transpose{<:Any,<:AbstractVecOrMat}, x::AbstractVector, +@inline function mul!(y::ArrayLike{1}, transA::Transpose{<:Any,<:VectorOrMatrixLike}, x::ArrayLike{1}, alpha::Number, beta::Number) A = transA.parent return generic_matvecmul!(y, 'T', A, x, MulAddMul(alpha, beta)) @@ -123,20 +123,20 @@ end A = adjA.parent return gemv!(y, 'C', A, x, alpha, beta) end -@inline function mul!(y::AbstractVector, adjA::Adjoint{<:Any,<:AbstractVecOrMat}, x::AbstractVector, +@inline function mul!(y::ArrayLike{1}, adjA::Adjoint{<:Any,<:VectorOrMatrixLike}, x::ArrayLike{1}, alpha::Number, beta::Number) A = adjA.parent return generic_matvecmul!(y, 'C', A, x, MulAddMul(alpha, beta)) end # Vector-Matrix multiplication -(*)(x::AdjointAbsVec, A::AbstractMatrix) = (A'*x')' -(*)(x::TransposeAbsVec, A::AbstractMatrix) = transpose(transpose(A)*transpose(x)) +(*)(x::AdjointAbsVec, A::ArrayLike{2}) = (A'*x')' +(*)(x::TransposeAbsVec, A::ArrayLike{2}) = transpose(transpose(A)*transpose(x)) # Matrix-matrix multiplication """ - *(A::AbstractMatrix, B::AbstractMatrix) + *(A::ArrayLike{2}, B::ArrayLike{2}) Matrix multiplication. @@ -148,7 +148,7 @@ julia> [1 1; 0 1] * [1 0; 1 1] 1 1 ``` """ -function (*)(A::AbstractMatrix, B::AbstractMatrix) +function (*)(A::ArrayLike{2}, B::ArrayLike{2}) TS = promote_op(matprod, eltype(A), eltype(B)) mul!(similar(B, TS, (size(A,1), size(B,2))), A, B) end @@ -231,7 +231,7 @@ julia> C 730.0 740.0 ``` """ -@inline mul!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat, +@inline mul!(C::ArrayLike{2}, A::VectorOrMatrixLike, B::VectorOrMatrixLike, alpha::Number, beta::Number) = generic_matmatmul!(C, 'N', 'N', A, B, MulAddMul(alpha, beta)) @@ -310,7 +310,7 @@ lmul!(A, B) return gemm_wrapper!(C, 'T', 'N', A, B, alpha, beta) end end -@inline function mul!(C::AbstractMatrix, transA::Transpose{<:Any,<:AbstractVecOrMat}, B::AbstractVecOrMat, +@inline function mul!(C::ArrayLike{2}, transA::Transpose{<:Any,<:VectorOrMatrixLike}, B::VectorOrMatrixLike, alpha::Number, beta::Number) A = transA.parent return generic_matmatmul!(C, 'T', 'N', A, B, MulAddMul(alpha, beta)) @@ -337,11 +337,11 @@ for elty in (Float32,Float64) end end end -# collapsing the following two defs with C::AbstractVecOrMat yields ambiguities -@inline mul!(C::AbstractVector, A::AbstractVecOrMat, transB::Transpose{<:Any,<:AbstractVecOrMat}, +# collapsing the following two defs with C::VectorOrMatrixLike yields ambiguities +@inline mul!(C::ArrayLike{1}, A::VectorOrMatrixLike, transB::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = generic_matmatmul!(C, 'N', 'T', A, transB.parent, MulAddMul(alpha, beta)) -@inline mul!(C::AbstractMatrix, A::AbstractVecOrMat, transB::Transpose{<:Any,<:AbstractVecOrMat}, +@inline mul!(C::ArrayLike{2}, A::VectorOrMatrixLike, transB::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = generic_matmatmul!(C, 'N', 'T', A, transB.parent, MulAddMul(alpha, beta)) @@ -351,7 +351,7 @@ end B = transB.parent return gemm_wrapper!(C, 'T', 'T', A, B, alpha, beta) end -@inline function mul!(C::AbstractMatrix, transA::Transpose{<:Any,<:AbstractVecOrMat}, transB::Transpose{<:Any,<:AbstractVecOrMat}, +@inline function mul!(C::ArrayLike{2}, transA::Transpose{<:Any,<:VectorOrMatrixLike}, transB::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) A = transA.parent B = transB.parent @@ -364,7 +364,7 @@ end B = transB.parent return gemm_wrapper!(C, 'T', 'C', A, B, alpha, beta) end -@inline function mul!(C::AbstractMatrix, transA::Transpose{<:Any,<:AbstractVecOrMat}, transB::Adjoint{<:Any,<:AbstractVecOrMat}, +@inline function mul!(C::ArrayLike{2}, transA::Transpose{<:Any,<:VectorOrMatrixLike}, transB::Adjoint{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) A = transA.parent B = transB.parent @@ -385,7 +385,7 @@ end return gemm_wrapper!(C, 'C', 'N', A, B, alpha, beta) end end -@inline function mul!(C::AbstractMatrix, adjA::Adjoint{<:Any,<:AbstractVecOrMat}, B::AbstractVecOrMat, +@inline function mul!(C::ArrayLike{2}, adjA::Adjoint{<:Any,<:VectorOrMatrixLike}, B::VectorOrMatrixLike, alpha::Number, beta::Number) A = adjA.parent return generic_matmatmul!(C, 'C', 'N', A, B, MulAddMul(alpha, beta)) @@ -405,7 +405,7 @@ end return gemm_wrapper!(C, 'N', 'C', A, B, alpha, beta) end end -@inline function mul!(C::AbstractMatrix, A::AbstractVecOrMat, adjB::Adjoint{<:Any,<:AbstractVecOrMat}, +@inline function mul!(C::ArrayLike{2}, A::VectorOrMatrixLike, adjB::Adjoint{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) B = adjB.parent return generic_matmatmul!(C, 'N', 'C', A, B, MulAddMul(alpha, beta)) @@ -417,13 +417,13 @@ end B = adjB.parent return gemm_wrapper!(C, 'C', 'C', A, B, alpha, beta) end -@inline function mul!(C::AbstractMatrix, adjA::Adjoint{<:Any,<:AbstractVecOrMat}, adjB::Adjoint{<:Any,<:AbstractVecOrMat}, +@inline function mul!(C::ArrayLike{2}, adjA::Adjoint{<:Any,<:VectorOrMatrixLike}, adjB::Adjoint{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) A = adjA.parent B = adjB.parent return generic_matmatmul!(C, 'C', 'C', A, B, MulAddMul(alpha, beta)) end -@inline function mul!(C::AbstractMatrix, adjA::Adjoint{<:Any,<:AbstractVecOrMat}, transB::Transpose{<:Any,<:AbstractVecOrMat}, +@inline function mul!(C::ArrayLike{2}, adjA::Adjoint{<:Any,<:VectorOrMatrixLike}, transB::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) A = adjA.parent B = transB.parent @@ -432,7 +432,7 @@ end # Supporting functions for matrix multiplication # copy transposed(adjoint) of upper(lower) side-digonals. Optionally include diagonal. -@inline function copytri!(A::AbstractMatrix, uplo::AbstractChar, conjugate::Bool=false, diag::Bool=false) +@inline function copytri!(A::ArrayLike{2}, uplo::AbstractChar, conjugate::Bool=false, diag::Bool=false) n = checksquare(A) off = diag ? 0 : 1 if uplo == 'U' @@ -602,9 +602,9 @@ end # blas.jl defines matmul for floats; other integer and mixed precision # cases are handled here -lapack_size(t::AbstractChar, M::AbstractVecOrMat) = (size(M, t=='N' ? 1 : 2), size(M, t=='N' ? 2 : 1)) +lapack_size(t::AbstractChar, M::VectorOrMatrixLike) = (size(M, t=='N' ? 1 : 2), size(M, t=='N' ? 2 : 1)) -function copyto!(B::AbstractVecOrMat, ir_dest::UnitRange{Int}, jr_dest::UnitRange{Int}, tM::AbstractChar, M::AbstractVecOrMat, ir_src::UnitRange{Int}, jr_src::UnitRange{Int}) +function copyto!(B::VectorOrMatrixLike, ir_dest::UnitRange{Int}, jr_dest::UnitRange{Int}, tM::AbstractChar, M::VectorOrMatrixLike, ir_src::UnitRange{Int}, jr_src::UnitRange{Int}) if tM == 'N' copyto!(B, ir_dest, jr_dest, M, ir_src, jr_src) else @@ -614,7 +614,7 @@ function copyto!(B::AbstractVecOrMat, ir_dest::UnitRange{Int}, jr_dest::UnitRang B end -function copy_transpose!(B::AbstractMatrix, ir_dest::UnitRange{Int}, jr_dest::UnitRange{Int}, tM::AbstractChar, M::AbstractVecOrMat, ir_src::UnitRange{Int}, jr_src::UnitRange{Int}) +function copy_transpose!(B::ArrayLike{2}, ir_dest::UnitRange{Int}, jr_dest::UnitRange{Int}, tM::AbstractChar, M::VectorOrMatrixLike, ir_src::UnitRange{Int}, jr_src::UnitRange{Int}) if tM == 'N' LinearAlgebra.copy_transpose!(B, ir_dest, jr_dest, M, ir_src, jr_src) else @@ -630,7 +630,7 @@ end # NOTE: the generic version is also called as fallback for # strides != 1 cases -function generic_matvecmul!(C::AbstractVector{R}, tA, A::AbstractVecOrMat, B::AbstractVector, +function generic_matvecmul!(C::AbstractVector{R}, tA, A::VectorOrMatrixLike, B::ArrayLike{1}, _add::MulAddMul = MulAddMul()) where R require_one_based_indexing(C, A, B) mB = length(B) @@ -706,7 +706,7 @@ const Abuf = [Vector{UInt8}(undef, tilebufsize)] const Bbuf = [Vector{UInt8}(undef, tilebufsize)] const Cbuf = [Vector{UInt8}(undef, tilebufsize)] -function generic_matmatmul!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, +function generic_matmatmul!(C::ArrayLike{2}, tA, tB, A::ArrayLike{2}, B::ArrayLike{2}, _add::MulAddMul=MulAddMul()) mA, nA = lapack_size(tA, A) mB, nB = lapack_size(tB, B) @@ -724,7 +724,7 @@ function generic_matmatmul!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::Abs _generic_matmatmul!(C, tA, tB, A, B, _add) end -generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::AbstractVecOrMat, B::AbstractVecOrMat, _add::MulAddMul) = +generic_matmatmul!(C::VectorOrMatrixLike, tA, tB, A::VectorOrMatrixLike, B::VectorOrMatrixLike, _add::MulAddMul) = _generic_matmatmul!(C, tA, tB, A, B, _add) function _generic_matmatmul!(C::AbstractVecOrMat{R}, tA, tB, A::AbstractVecOrMat{T}, B::AbstractVecOrMat{S}, @@ -907,7 +907,7 @@ function matmul2x2(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T, matmul2x2!(similar(B, promote_op(matprod, T, S), 2, 2), tA, tB, A, B) end -function matmul2x2!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, +function matmul2x2!(C::ArrayLike{2}, tA, tB, A::ArrayLike{2}, B::ArrayLike{2}, _add::MulAddMul = MulAddMul()) require_one_based_indexing(C, A, B) if !(size(A) == size(B) == size(C) == (2,2)) @@ -950,7 +950,7 @@ function matmul3x3(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T, matmul3x3!(similar(B, promote_op(matprod, T, S), 3, 3), tA, tB, A, B) end -function matmul3x3!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, +function matmul3x3!(C::ArrayLike{2}, tA, tB, A::ArrayLike{2}, B::ArrayLike{2}, _add::MulAddMul = MulAddMul()) require_one_based_indexing(C, A, B) if !(size(A) == size(B) == size(C) == (3,3)) diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl index 15c6bf2d5bc0a..c891fdec56bb0 100644 --- a/stdlib/LinearAlgebra/src/qr.jl +++ b/stdlib/LinearAlgebra/src/qr.jl @@ -44,7 +44,7 @@ struct QR{T,S<:AbstractMatrix{T}} <: Factorization{T} end end QR(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = QR{T,typeof(factors)}(factors, τ) -function QR{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} +function QR{T}(factors::ArrayLike{2}, τ::ArrayLike{1}) where {T} QR(convert(AbstractMatrix{T}, factors), convert(Vector{T}, τ)) end @@ -118,7 +118,7 @@ struct QRCompactWY{S,M<:AbstractMatrix{S}} <: Factorization{S} end end QRCompactWY(factors::AbstractMatrix{S}, T::Matrix{S}) where {S} = QRCompactWY{S,typeof(factors)}(factors, T) -function QRCompactWY{S}(factors::AbstractMatrix, T::AbstractMatrix) where {S} +function QRCompactWY{S}(factors::ArrayLike{2}, T::ArrayLike{2}) where {S} QRCompactWY(convert(AbstractMatrix{S}, factors), convert(Matrix{S}, T)) end @@ -172,7 +172,7 @@ struct QRPivoted{T,S<:AbstractMatrix{T}} <: Factorization{T} end QRPivoted(factors::AbstractMatrix{T}, τ::Vector{T}, jpvt::Vector{BlasInt}) where {T} = QRPivoted{T,typeof(factors)}(factors, τ, jpvt) -function QRPivoted{T}(factors::AbstractMatrix, τ::AbstractVector, jpvt::AbstractVector) where {T} +function QRPivoted{T}(factors::ArrayLike{2}, τ::ArrayLike{1}, jpvt::ArrayLike{1}) where {T} QRPivoted(convert(AbstractMatrix{T}, factors), convert(Vector{T}, τ), convert(Vector{BlasInt}, jpvt)) @@ -383,7 +383,7 @@ function qr(A::AbstractMatrix{T}, arg...; kwargs...) where T return qr!(AA, arg...; kwargs...) end qr(x::Number) = qr(fill(x,1,1)) -function qr(v::AbstractVector) +function qr(v::ArrayLike{1}) require_one_based_indexing(v) qr(reshape(v, (length(v), 1))) end @@ -468,7 +468,7 @@ Base.propertynames(F::QRPivoted, private::Bool=false) = abstract type AbstractQ{T} <: AbstractMatrix{T} end """ - QRPackedQ <: AbstractMatrix + QRPackedQ <: ArrayLike{2} The orthogonal/unitary ``Q`` matrix of a QR factorization stored in [`QR`](@ref) or [`QRPivoted`](@ref) format. @@ -483,12 +483,12 @@ struct QRPackedQ{T,S<:AbstractMatrix{T}} <: AbstractQ{T} end end QRPackedQ(factors::AbstractMatrix{T}, τ::Vector{T}) where {T} = QRPackedQ{T,typeof(factors)}(factors, τ) -function QRPackedQ{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} +function QRPackedQ{T}(factors::ArrayLike{2}, τ::ArrayLike{1}) where {T} QRPackedQ(convert(AbstractMatrix{T}, factors), convert(Vector{T}, τ)) end """ - QRCompactWYQ <: AbstractMatrix + QRCompactWYQ <: ArrayLike{2} The orthogonal/unitary ``Q`` matrix of a QR factorization stored in [`QRCompactWY`](@ref) format. @@ -503,7 +503,7 @@ struct QRCompactWYQ{S, M<:AbstractMatrix{S}} <: AbstractQ{S} end end QRCompactWYQ(factors::AbstractMatrix{S}, T::Matrix{S}) where {S} = QRCompactWYQ{S,typeof(factors)}(factors, T) -function QRCompactWYQ{S}(factors::AbstractMatrix, T::AbstractMatrix) where {S} +function QRCompactWYQ{S}(factors::ArrayLike{2}, T::ArrayLike{2}) where {S} QRCompactWYQ(convert(AbstractMatrix{S}, factors), convert(Matrix{S}, T)) end @@ -537,7 +537,7 @@ lmul!(A::QRCompactWYQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasFloat, S<:Strid LAPACK.gemqrt!('L','N',A.factors,A.T,B) lmul!(A::QRPackedQ{T,S}, B::StridedVecOrMat{T}) where {T<:BlasFloat, S<:StridedMatrix} = LAPACK.ormqr!('L','N',A.factors,A.τ,B) -function lmul!(A::QRPackedQ, B::AbstractVecOrMat) +function lmul!(A::QRPackedQ, B::VectorOrMatrixLike) require_one_based_indexing(B) mA, nA = size(A.factors) mB, nB = size(B,1), size(B,2) @@ -597,7 +597,7 @@ lmul!(adjA::Adjoint{<:Any,<:QRPackedQ{T,S}}, B::StridedVecOrMat{T}) where {T<:Bl (A = adjA.parent; LAPACK.ormqr!('L','T',A.factors,A.τ,B)) lmul!(adjA::Adjoint{<:Any,<:QRPackedQ{T,S}}, B::StridedVecOrMat{T}) where {T<:BlasComplex,S<:StridedMatrix} = (A = adjA.parent; LAPACK.ormqr!('L','C',A.factors,A.τ,B)) -function lmul!(adjA::Adjoint{<:Any,<:QRPackedQ}, B::AbstractVecOrMat) +function lmul!(adjA::Adjoint{<:Any,<:QRPackedQ}, B::VectorOrMatrixLike) require_one_based_indexing(B) A = adjA.parent mA, nA = size(A.factors) @@ -860,12 +860,12 @@ end # convenience methods ## return only the solution of a least squares problem while avoiding promoting ## vectors to matrices. -_cut_B(x::AbstractVector, r::UnitRange) = length(x) > length(r) ? x[r] : x -_cut_B(X::AbstractMatrix, r::UnitRange) = size(X, 1) > length(r) ? X[r,:] : X +_cut_B(x::ArrayLike{1}, r::UnitRange) = length(x) > length(r) ? x[r] : x +_cut_B(X::ArrayLike{2}, r::UnitRange) = size(X, 1) > length(r) ? X[r,:] : X ## append right hand side with zeros if necessary -_zeros(::Type{T}, b::AbstractVector, n::Integer) where {T} = zeros(T, max(length(b), n)) -_zeros(::Type{T}, B::AbstractMatrix, n::Integer) where {T} = zeros(T, max(size(B, 1), n), size(B, 2)) +_zeros(::Type{T}, b::ArrayLike{1}, n::Integer) where {T} = zeros(T, max(length(b), n)) +_zeros(::Type{T}, B::ArrayLike{2}, n::Integer) where {T} = zeros(T, max(size(B, 1), n), size(B, 2)) function (\)(A::Union{QR{TA},QRCompactWY{TA},QRPivoted{TA}}, B::AbstractVecOrMat{TB}) where {TA,TB} require_one_based_indexing(B) @@ -885,8 +885,8 @@ end # rhs as a real rhs with twice the number of columns. # convenience methods to compute the return size correctly for vectors and matrices -_ret_size(A::Factorization, b::AbstractVector) = (max(size(A, 2), length(b)),) -_ret_size(A::Factorization, B::AbstractMatrix) = (max(size(A, 2), size(B, 1)), size(B, 2)) +_ret_size(A::Factorization, b::ArrayLike{1}) = (max(size(A, 2), length(b)),) +_ret_size(A::Factorization, B::ArrayLike{2}) = (max(size(A, 2), size(B, 1)), size(B, 2)) function (\)(A::Union{QR{T},QRCompactWY{T},QRPivoted{T}}, BIn::VecOrMat{Complex{T}}) where T<:BlasReal require_one_based_indexing(BIn) diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl index 9ea03b0bf12a3..dcbe4eafd1c18 100644 --- a/stdlib/LinearAlgebra/src/schur.jl +++ b/stdlib/LinearAlgebra/src/schur.jl @@ -47,7 +47,7 @@ julia> t == F.T && z == F.Z && vals == F.values true ``` """ -struct Schur{Ty,S<:AbstractMatrix} <: Factorization{Ty} +struct Schur{Ty,S<:ArrayLike{2}} <: Factorization{Ty} T::S Z::S values::Vector @@ -218,7 +218,7 @@ with `F.α./F.β`. Iterating the decomposition produces the components `F.S`, `F.T`, `F.Q`, `F.Z`, `F.α`, and `F.β`. """ -struct GeneralizedSchur{Ty,M<:AbstractMatrix} <: Factorization{Ty} +struct GeneralizedSchur{Ty,M<:ArrayLike{2}} <: Factorization{Ty} S::M T::M α::Vector diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl index 843235a615d3b..bb153c4215f28 100644 --- a/stdlib/LinearAlgebra/src/svd.jl +++ b/stdlib/LinearAlgebra/src/svd.jl @@ -66,7 +66,7 @@ struct SVD{T,Tr,M<:AbstractArray{T}} <: Factorization{T} end end SVD(U::AbstractArray{T}, S::Vector{Tr}, Vt::AbstractArray{T}) where {T,Tr} = SVD{T,Tr,typeof(U)}(U, S, Vt) -function SVD{T}(U::AbstractArray, S::AbstractVector{Tr}, Vt::AbstractArray) where {T,Tr} +function SVD{T}(U::ArrayLike, S::AbstractVector{Tr}, Vt::ArrayLike) where {T,Tr} SVD(convert(AbstractArray{T}, U), convert(Vector{Tr}, S), convert(AbstractArray{T}, Vt)) @@ -288,7 +288,7 @@ end size(A::SVD, dim::Integer) = dim == 1 ? size(A.U, dim) : size(A.Vt, dim) size(A::SVD) = (size(A, 1), size(A, 2)) -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::SVD{<:Any,<:Any,<:AbstractArray}) +function show(io::IO, mime::MIME{Symbol("text/plain")}, F::SVD{<:Any,<:Any,<:ArrayLike}) summary(io, F); println(io) println(io, "U factor:") show(io, mime, F.U) @@ -560,7 +560,7 @@ end Base.propertynames(F::GeneralizedSVD) = (:alpha, :beta, :vals, :S, :D1, :D2, :R0, fieldnames(typeof(F))...) -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::GeneralizedSVD{<:Any,<:AbstractArray}) +function show(io::IO, mime::MIME{Symbol("text/plain")}, F::GeneralizedSVD{<:Any,<:ArrayLike}) summary(io, F); println(io) println(io, "U factor:") show(io, mime, F.U) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index 423331011fc03..6e8ab18022898 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -45,7 +45,7 @@ julia> Slower = Symmetric(A, :L) Note that `Supper` will not be equal to `Slower` unless `A` is itself symmetric (e.g. if `A == transpose(A)`). """ -function Symmetric(A::AbstractMatrix, uplo::Symbol=:U) +function Symmetric(A::ArrayLike{2}, uplo::Symbol=:U) checksquare(A) return symmetric_type(typeof(A))(A, char_uplo(uplo)) end @@ -61,7 +61,7 @@ If a symmetric view of a matrix is to be constructed of which the elements are n matrices nor numbers, an appropriate method of `symmetric` has to be implemented. In that case, `symmetric_type` has to be implemented, too. """ -symmetric(A::AbstractMatrix, uplo::Symbol) = Symmetric(A, uplo) +symmetric(A::ArrayLike{2}, uplo::Symbol) = Symmetric(A, uplo) symmetric(A::Number, ::Symbol) = A """ @@ -77,8 +77,8 @@ end function symmetric_type(::Type{T}) where {S<:Number, T<:AbstractMatrix{S}} return Symmetric{S, T} end -function symmetric_type(::Type{T}) where {S<:AbstractMatrix, T<:AbstractMatrix{S}} - return Symmetric{AbstractMatrix, T} +function symmetric_type(::Type{T}) where {S<:ArrayLike{2}, T<:AbstractMatrix{S}} + return Symmetric{ArrayLike{2}, T} end symmetric_type(::Type{T}) where {T<:Number} = T @@ -126,7 +126,7 @@ All non-real parts of the diagonal will be ignored. Hermitian(fill(complex(1,1), 1, 1)) == fill(1, 1, 1) ``` """ -function Hermitian(A::AbstractMatrix, uplo::Symbol=:U) +function Hermitian(A::ArrayLike{2}, uplo::Symbol=:U) n = checksquare(A) return hermitian_type(typeof(A))(A, char_uplo(uplo)) end @@ -143,7 +143,7 @@ If a hermitian view of a matrix is to be constructed of which the elements are n matrices nor numbers, an appropriate method of `hermitian` has to be implemented. In that case, `hermitian_type` has to be implemented, too. """ -hermitian(A::AbstractMatrix, uplo::Symbol) = Hermitian(A, uplo) +hermitian(A::ArrayLike{2}, uplo::Symbol) = Hermitian(A, uplo) hermitian(A::Number, ::Symbol) = convert(typeof(A), real(A)) """ @@ -159,8 +159,8 @@ end function hermitian_type(::Type{T}) where {S<:Number, T<:AbstractMatrix{S}} return Hermitian{S, T} end -function hermitian_type(::Type{T}) where {S<:AbstractMatrix, T<:AbstractMatrix{S}} - return Hermitian{AbstractMatrix, T} +function hermitian_type(::Type{T}) where {S<:ArrayLike{2}, T<:AbstractMatrix{S}} + return Hermitian{ArrayLike{2}, T} end hermitian_type(::Type{T}) where {T<:Number} = T @@ -268,11 +268,11 @@ end Array(A::Union{Symmetric,Hermitian}) = convert(Matrix, A) parent(A::HermOrSym) = A.data -Symmetric{T,S}(A::Symmetric{T,S}) where {T,S<:AbstractMatrix} = A -Symmetric{T,S}(A::Symmetric) where {T,S<:AbstractMatrix} = Symmetric{T,S}(convert(S,A.data),A.uplo) +Symmetric{T,S}(A::Symmetric{T,S}) where {T,S<:ArrayLike{2}} = A +Symmetric{T,S}(A::Symmetric) where {T,S<:ArrayLike{2}} = Symmetric{T,S}(convert(S,A.data),A.uplo) AbstractMatrix{T}(A::Symmetric) where {T} = Symmetric(convert(AbstractMatrix{T}, A.data), sym_uplo(A.uplo)) -Hermitian{T,S}(A::Hermitian{T,S}) where {T,S<:AbstractMatrix} = A -Hermitian{T,S}(A::Hermitian) where {T,S<:AbstractMatrix} = Hermitian{T,S}(convert(S,A.data),A.uplo) +Hermitian{T,S}(A::Hermitian{T,S}) where {T,S<:ArrayLike{2}} = A +Hermitian{T,S}(A::Hermitian) where {T,S<:ArrayLike{2}} = Hermitian{T,S}(convert(S,A.data),A.uplo) AbstractMatrix{T}(A::Hermitian) where {T} = Hermitian(convert(AbstractMatrix{T}, A.data), sym_uplo(A.uplo)) copy(A::Symmetric{T,S}) where {T,S} = (B = copy(A.data); Symmetric{T,typeof(B)}(B,A.uplo)) @@ -554,7 +554,7 @@ end *(A::HermOrSym, B::HermOrSym) = A * copyto!(similar(parent(B)), B) -function dot(x::AbstractVector, A::RealHermSymComplexHerm, y::AbstractVector) +function dot(x::ArrayLike{1}, A::RealHermSymComplexHerm, y::ArrayLike{1}) require_one_based_indexing(x, y) (length(x) == length(y) == size(A, 1)) || throw(DimensionMismatch()) data = A.data @@ -581,15 +581,17 @@ end # Fallbacks to avoid generic_matvecmul!/generic_matmatmul! ## Symmetric{<:Number} and Hermitian{<:Real} are invariant to transpose; peel off the t -*(transA::Transpose{<:Any,<:RealHermSymComplexSym}, B::AbstractVector) = transA.parent * B -*(transA::Transpose{<:Any,<:RealHermSymComplexSym}, B::AbstractMatrix) = transA.parent * B -*(A::AbstractMatrix, transB::Transpose{<:Any,<:RealHermSymComplexSym}) = A * transB.parent +*(transA::Transpose{<:Any,<:RealHermSymComplexSym}, B::ArrayLike{1}) = transA.parent * B +*(transA::Transpose{<:Any,<:RealHermSymComplexSym}, B::AbstractVector) = transA.parent * B # specific +*(transA::Transpose{<:Any,<:RealHermSymComplexSym}, B::ArrayLike{2}) = transA.parent * B +*(A::ArrayLike{2}, transB::Transpose{<:Any,<:RealHermSymComplexSym}) = A * transB.parent ## Hermitian{<:Number} and Symmetric{<:Real} are invariant to adjoint; peel off the c -*(adjA::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::AbstractVector) = adjA.parent * B -*(adjA::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::AbstractMatrix) = adjA.parent * B -*(A::AbstractMatrix, adjB::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * adjB.parent +*(adjA::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::ArrayLike{1}) = adjA.parent * B +*(adjA::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::AbstractVector) = adjA.parent * B # specific +*(adjA::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::ArrayLike{2}) = adjA.parent * B +*(A::ArrayLike{2}, adjB::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * adjB.parent -# ambiguities with transposed AbstractMatrix methods in linalg/matmul.jl +# ambiguities with transposed ArrayLike{2} methods in linalg/matmul.jl *(transA::Transpose{<:Any,<:RealHermSym}, transB::Transpose{<:Any,<:RealHermSym}) = transA * transB.parent *(transA::Transpose{<:Any,<:RealHermSym}, transB::Transpose{<:Any,<:RealHermSymComplexSym}) = transA * transB.parent *(transA::Transpose{<:Any,<:RealHermSymComplexSym}, transB::Transpose{<:Any,<:RealHermSymComplexSym}) = transA.parent * transB.parent @@ -631,10 +633,10 @@ det(A::RealHermSymComplexHerm) = real(det(_factorize(A; check=false))) det(A::Symmetric{<:Real}) = det(_factorize(A; check=false)) det(A::Symmetric) = det(_factorize(A; check=false)) -\(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractVector) = \(factorize(A), B) +\(A::HermOrSym{<:Any,<:StridedMatrix}, B::ArrayLike{1}) = \(factorize(A), B) # Bunch-Kaufman solves can not utilize BLAS-3 for multiple right hand sides -# so using LU is faster for AbstractMatrix right hand side -\(A::HermOrSym{<:Any,<:StridedMatrix}, B::AbstractMatrix) = \(lu(A), B) +# so using LU is faster for ArrayLike{2} right hand side +\(A::HermOrSym{<:Any,<:StridedMatrix}, B::ArrayLike{2}) = \(lu(A), B) function _inv(A::HermOrSym) n = checksquare(A) @@ -1023,23 +1025,23 @@ end *(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A.parent * B.parent *(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A.parent * B.parent # disambiguation methods: *(Adj/Trans of AbsVec/AbsMat, Adj/Trans of RealHermSymComplex{Herm|Sym}) -*(A::Adjoint{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent -*(A::Adjoint{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent -*(A::Adjoint{<:Any,<:AbstractVector}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent -*(A::Adjoint{<:Any,<:AbstractMatrix}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent -*(A::Transpose{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent -*(A::Transpose{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent -*(A::Transpose{<:Any,<:AbstractVector}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent -*(A::Transpose{<:Any,<:AbstractMatrix}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent +*(A::Adjoint{<:Any,<:ArrayLike{1}}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent +*(A::Adjoint{<:Any,<:ArrayLike{2}}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent +*(A::Adjoint{<:Any,<:ArrayLike{1}}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent +*(A::Adjoint{<:Any,<:ArrayLike{2}}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent +*(A::Transpose{<:Any,<:ArrayLike{1}}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent +*(A::Transpose{<:Any,<:ArrayLike{2}}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent +*(A::Transpose{<:Any,<:ArrayLike{1}}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent +*(A::Transpose{<:Any,<:ArrayLike{2}}, B::Transpose{<:Any,<:RealHermSymComplexSym}) = A * B.parent # disambiguation methods: *(Adj/Trans of RealHermSymComplex{Herm|Sym}, Adj/Trans of AbsVec/AbsMat) -*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Adjoint{<:Any,<:AbstractVector}) = A.parent * B -*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Adjoint{<:Any,<:AbstractMatrix}) = A.parent * B -*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Transpose{<:Any,<:AbstractVector}) = A.parent * B -*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Transpose{<:Any,<:AbstractMatrix}) = A.parent * B -*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Adjoint{<:Any,<:AbstractVector}) = A.parent * B -*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Adjoint{<:Any,<:AbstractMatrix}) = A.parent * B -*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Transpose{<:Any,<:AbstractVector}) = A.parent * B -*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Transpose{<:Any,<:AbstractMatrix}) = A.parent * B +*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Adjoint{<:Any,<:ArrayLike{1}}) = A.parent * B +*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Adjoint{<:Any,<:ArrayLike{2}}) = A.parent * B +*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Transpose{<:Any,<:ArrayLike{1}}) = A.parent * B +*(A::Adjoint{<:Any,<:RealHermSymComplexHerm}, B::Transpose{<:Any,<:ArrayLike{2}}) = A.parent * B +*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Adjoint{<:Any,<:ArrayLike{1}}) = A.parent * B +*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Adjoint{<:Any,<:ArrayLike{2}}) = A.parent * B +*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Transpose{<:Any,<:ArrayLike{1}}) = A.parent * B +*(A::Transpose{<:Any,<:RealHermSymComplexSym}, B::Transpose{<:Any,<:ArrayLike{2}}) = A.parent * B # disambiguation methods: *(Adj/Trans of AbsTri or RealHermSymComplex{Herm|Sym}, Adj/Trans of other) *(A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:RealHermSymComplexHerm}) = A * B.parent diff --git a/stdlib/LinearAlgebra/src/transpose.jl b/stdlib/LinearAlgebra/src/transpose.jl index 4b4658df55134..f49b38174f1fe 100644 --- a/stdlib/LinearAlgebra/src/transpose.jl +++ b/stdlib/LinearAlgebra/src/transpose.jl @@ -1,7 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -adjoint(a::AbstractArray) = error("adjoint not defined for $(typeof(a)). Consider using `permutedims` for higher-dimensional arrays.") -transpose(a::AbstractArray) = error("transpose not defined for $(typeof(a)). Consider using `permutedims` for higher-dimensional arrays.") +adjoint(a::ArrayLike) = error("adjoint not defined for $(typeof(a)). Consider using `permutedims` for higher-dimensional arrays.") +transpose(a::ArrayLike) = error("transpose not defined for $(typeof(a)). Consider using `permutedims` for higher-dimensional arrays.") ## Matrix transposition ## @@ -38,7 +38,7 @@ julia> A 8+7im 4+6im ``` """ -transpose!(B::AbstractMatrix, A::AbstractMatrix) = transpose_f!(transpose, B, A) +transpose!(B::ArrayLike{2}, A::ArrayLike{2}) = transpose_f!(transpose, B, A) """ adjoint!(dest,src) @@ -73,26 +73,26 @@ julia> A 8+7im 4+6im ``` """ -adjoint!(B::AbstractMatrix, A::AbstractMatrix) = transpose_f!(adjoint, B, A) -function transpose!(B::AbstractVector, A::AbstractMatrix) +adjoint!(B::ArrayLike{2}, A::ArrayLike{2}) = transpose_f!(adjoint, B, A) +function transpose!(B::ArrayLike{1}, A::ArrayLike{2}) axes(B,1) == axes(A,2) && axes(A,1) == 1:1 || throw(DimensionMismatch("transpose")) copyto!(B, A) end -function transpose!(B::AbstractMatrix, A::AbstractVector) +function transpose!(B::ArrayLike{2}, A::ArrayLike{1}) axes(B,2) == axes(A,1) && axes(B,1) == 1:1 || throw(DimensionMismatch("transpose")) copyto!(B, A) end -function adjoint!(B::AbstractVector, A::AbstractMatrix) +function adjoint!(B::ArrayLike{1}, A::ArrayLike{2}) axes(B,1) == axes(A,2) && axes(A,1) == 1:1 || throw(DimensionMismatch("transpose")) ccopy!(B, A) end -function adjoint!(B::AbstractMatrix, A::AbstractVector) +function adjoint!(B::ArrayLike{2}, A::ArrayLike{1}) axes(B,2) == axes(A,1) && axes(B,1) == 1:1 || throw(DimensionMismatch("transpose")) ccopy!(B, A) end const transposebaselength=64 -function transpose_f!(f, B::AbstractMatrix, A::AbstractMatrix) +function transpose_f!(f, B::ArrayLike{2}, A::ArrayLike{2}) inds = axes(A) axes(B,1) == inds[2] && axes(B,2) == inds[1] || throw(DimensionMismatch(string(f))) @@ -110,7 +110,7 @@ function transpose_f!(f, B::AbstractMatrix, A::AbstractMatrix) end return B end -function transposeblock!(f, B::AbstractMatrix, A::AbstractMatrix, m::Int, n::Int, offseti::Int, offsetj::Int) +function transposeblock!(f, B::ArrayLike{2}, A::ArrayLike{2}, m::Int, n::Int, offseti::Int, offsetj::Int) if m*n<=transposebaselength @inbounds begin for j = offsetj .+ (1:n) @@ -175,11 +175,11 @@ julia> copy(T) """ copy(::Union{Transpose,Adjoint}) -Base.copy(A::Transpose{<:Any,<:AbstractMatrix}) = transpose!(similar(A.parent, reverse(axes(A.parent))), A.parent) -Base.copy(A::Adjoint{<:Any,<:AbstractMatrix}) = adjoint!(similar(A.parent, reverse(axes(A.parent))), A.parent) +Base.copy(A::Transpose{<:Any,<:ArrayLike{2}}) = transpose!(similar(A.parent, reverse(axes(A.parent))), A.parent) +Base.copy(A::Adjoint{<:Any,<:ArrayLike{2}}) = adjoint!(similar(A.parent, reverse(axes(A.parent))), A.parent) -function copy_transpose!(B::AbstractVecOrMat, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, - A::AbstractVecOrMat, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}) +function copy_transpose!(B::VectorOrMatrixLike, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, + A::VectorOrMatrixLike, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}) if length(ir_dest) != length(jr_src) throw(ArgumentError(string("source and destination must have same size (got ", length(jr_src)," and ",length(ir_dest),")"))) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 2b6698ecf3840..1fd98cb2de869 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -3,7 +3,7 @@ ## Triangular # could be renamed to Triangular when that name has been fully deprecated -abstract type AbstractTriangular{T,S<:AbstractMatrix} <: AbstractMatrix{T} end +abstract type AbstractTriangular{T,S<:ArrayLike{2}} <: AbstractMatrix{T} end # First loop through all methods that don't need special care for upper/lower and unit diagonal for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular, @@ -20,10 +20,10 @@ for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular, end $t(A::$t) = A $t{T}(A::$t{T}) where {T} = A - function $t(A::AbstractMatrix) + function $t(A::ArrayLike{2}) return $t{eltype(A), typeof(A)}(A) end - function $t{T}(A::AbstractMatrix) where T + function $t{T}(A::ArrayLike{2}) where T $t(convert(AbstractMatrix{T}, A)) end @@ -66,7 +66,7 @@ UpperTriangular(U::LowerTriangular) = throw(ArgumentError( "cannot create an UpperTriangular matrix from a LowerTriangular input")) """ - LowerTriangular(A::AbstractMatrix) + LowerTriangular(A::ArrayLike{2}) Construct a `LowerTriangular` view of the matrix `A`. @@ -87,7 +87,7 @@ julia> LowerTriangular(A) """ LowerTriangular """ - UpperTriangular(A::AbstractMatrix) + UpperTriangular(A::ArrayLike{2}) Construct an `UpperTriangular` view of the matrix `A`. @@ -108,7 +108,7 @@ julia> UpperTriangular(A) """ UpperTriangular """ - UnitLowerTriangular(A::AbstractMatrix) + UnitLowerTriangular(A::ArrayLike{2}) Construct a `UnitLowerTriangular` view of the matrix `A`. Such a view has the [`oneunit`](@ref) of the [`eltype`](@ref) @@ -131,7 +131,7 @@ julia> UnitLowerTriangular(A) """ UnitLowerTriangular """ - UnitUpperTriangular(A::AbstractMatrix) + UnitUpperTriangular(A::ArrayLike{2}) Construct an `UnitUpperTriangular` view of the matrix `A`. Such a view has the [`oneunit`](@ref) of the [`eltype`](@ref) @@ -555,7 +555,7 @@ end rmul!(A::Union{UpperTriangular,LowerTriangular}, c::Number) = mul!(A, A, c) lmul!(c::Number, A::Union{UpperTriangular,LowerTriangular}) = mul!(A, c, A) -function dot(x::AbstractVector, A::UpperTriangular, y::AbstractVector) +function dot(x::ArrayLike{1}, A::UpperTriangular, y::ArrayLike{1}) require_one_based_indexing(x, y) m = size(A, 1) (length(x) == m == length(y)) || throw(DimensionMismatch()) @@ -576,7 +576,7 @@ function dot(x::AbstractVector, A::UpperTriangular, y::AbstractVector) end return r end -function dot(x::AbstractVector, A::UnitUpperTriangular, y::AbstractVector) +function dot(x::ArrayLike{1}, A::UnitUpperTriangular, y::ArrayLike{1}) require_one_based_indexing(x, y) m = size(A, 1) (length(x) == m == length(y)) || throw(DimensionMismatch()) @@ -598,7 +598,7 @@ function dot(x::AbstractVector, A::UnitUpperTriangular, y::AbstractVector) end return r end -function dot(x::AbstractVector, A::LowerTriangular, y::AbstractVector) +function dot(x::ArrayLike{1}, A::LowerTriangular, y::ArrayLike{1}) require_one_based_indexing(x, y) m = size(A, 1) (length(x) == m == length(y)) || throw(DimensionMismatch()) @@ -618,7 +618,7 @@ function dot(x::AbstractVector, A::LowerTriangular, y::AbstractVector) end return r end -function dot(x::AbstractVector, A::UnitLowerTriangular, y::AbstractVector) +function dot(x::ArrayLike{1}, A::UnitLowerTriangular, y::ArrayLike{1}) require_one_based_indexing(x, y) m = size(A, 1) (length(x) == m == length(y)) || throw(DimensionMismatch()) @@ -671,45 +671,45 @@ fillstored!(A::UnitUpperTriangular, x) = (fillband!(A.data, x, 1, size(A,2)-1); lmul!(A::Tridiagonal, B::AbstractTriangular) = A*full!(B) # is this necessary? -@inline mul!(C::AbstractMatrix, A::AbstractTriangular, B::Tridiagonal, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike{2}, A::AbstractTriangular, B::Tridiagonal, alpha::Number, beta::Number) = mul!(C, copyto!(similar(parent(A)), A), B, alpha, beta) -@inline mul!(C::AbstractMatrix, A::Tridiagonal, B::AbstractTriangular, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike{2}, A::Tridiagonal, B::AbstractTriangular, alpha::Number, beta::Number) = mul!(C, A, copyto!(similar(parent(B)), B), alpha, beta) -mul!(C::AbstractVector, A::AbstractTriangular, transB::Transpose{<:Any,<:AbstractVecOrMat}) = +mul!(C::ArrayLike{1}, A::AbstractTriangular, transB::Transpose{<:Any,<:VectorOrMatrixLike}) = (B = transB.parent; lmul!(A, transpose!(C, B))) -mul!(C::AbstractMatrix, A::AbstractTriangular, transB::Transpose{<:Any,<:AbstractVecOrMat}) = +mul!(C::ArrayLike{2}, A::AbstractTriangular, transB::Transpose{<:Any,<:VectorOrMatrixLike}) = (B = transB.parent; lmul!(A, transpose!(C, B))) -mul!(C::AbstractMatrix, A::AbstractTriangular, adjB::Adjoint{<:Any,<:AbstractVecOrMat}) = +mul!(C::ArrayLike{2}, A::AbstractTriangular, adjB::Adjoint{<:Any,<:VectorOrMatrixLike}) = (B = adjB.parent; lmul!(A, adjoint!(C, B))) -mul!(C::AbstractVecOrMat, A::AbstractTriangular, adjB::Adjoint{<:Any,<:AbstractVecOrMat}) = +mul!(C::VectorOrMatrixLike, A::AbstractTriangular, adjB::Adjoint{<:Any,<:VectorOrMatrixLike}) = (B = adjB.parent; lmul!(A, adjoint!(C, B))) # The three methods for each op are neceesary to avoid ambiguities with definitions in matmul.jl -mul!(C::AbstractVector , A::AbstractTriangular, B::AbstractVector) = lmul!(A, copyto!(C, B)) -mul!(C::AbstractMatrix , A::AbstractTriangular, B::AbstractVecOrMat) = lmul!(A, copyto!(C, B)) -mul!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) = lmul!(A, copyto!(C, B)) -mul!(C::AbstractVector , adjA::Adjoint{<:Any,<:AbstractTriangular}, B::AbstractVector) = +mul!(C::ArrayLike{1} , A::AbstractTriangular, B::ArrayLike{1}) = lmul!(A, copyto!(C, B)) +mul!(C::ArrayLike{2} , A::AbstractTriangular, B::VectorOrMatrixLike) = lmul!(A, copyto!(C, B)) +mul!(C::VectorOrMatrixLike, A::AbstractTriangular, B::VectorOrMatrixLike) = lmul!(A, copyto!(C, B)) +mul!(C::ArrayLike{1} , adjA::Adjoint{<:Any,<:AbstractTriangular}, B::ArrayLike{1}) = (A = adjA.parent; lmul!(adjoint(A), copyto!(C, B))) -mul!(C::AbstractMatrix , adjA::Adjoint{<:Any,<:AbstractTriangular}, B::AbstractVecOrMat) = +mul!(C::ArrayLike{2} , adjA::Adjoint{<:Any,<:AbstractTriangular}, B::VectorOrMatrixLike) = (A = adjA.parent; lmul!(adjoint(A), copyto!(C, B))) -mul!(C::AbstractVecOrMat, adjA::Adjoint{<:Any,<:AbstractTriangular}, B::AbstractVecOrMat) = +mul!(C::VectorOrMatrixLike, adjA::Adjoint{<:Any,<:AbstractTriangular}, B::VectorOrMatrixLike) = (A = adjA.parent; lmul!(adjoint(A), copyto!(C, B))) -mul!(C::AbstractVector , transA::Transpose{<:Any,<:AbstractTriangular}, B::AbstractVector) = +mul!(C::ArrayLike{1} , transA::Transpose{<:Any,<:AbstractTriangular}, B::ArrayLike{1}) = (A = transA.parent; lmul!(transpose(A), copyto!(C, B))) -mul!(C::AbstractMatrix , transA::Transpose{<:Any,<:AbstractTriangular}, B::AbstractVecOrMat) = +mul!(C::ArrayLike{2} , transA::Transpose{<:Any,<:AbstractTriangular}, B::VectorOrMatrixLike) = (A = transA.parent; lmul!(transpose(A), copyto!(C, B))) -mul!(C::AbstractVecOrMat, transA::Transpose{<:Any,<:AbstractTriangular}, B::AbstractVecOrMat) = +mul!(C::VectorOrMatrixLike, transA::Transpose{<:Any,<:AbstractTriangular}, B::VectorOrMatrixLike) = (A = transA.parent; lmul!(transpose(A), copyto!(C, B))) -@inline mul!(C::AbstractMatrix, A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractVecOrMat}, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = mul!(C, A, copy(B), alpha, beta) -@inline mul!(C::AbstractMatrix, A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractVecOrMat}, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike{2}, A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = mul!(C, A, copy(B), alpha, beta) -@inline mul!(C::AbstractMatrix, A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractVecOrMat}, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = mul!(C, A, copy(B), alpha, beta) -@inline mul!(C::AbstractMatrix, A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractVecOrMat}, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike{2}, A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:VectorOrMatrixLike}, alpha::Number, beta::Number) = mul!(C, A, copy(B), alpha, beta) -mul!(C::AbstractVector, A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractVecOrMat}) = throw(MethodError(mul!, (C, A, B))) -mul!(C::AbstractVector, A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractVecOrMat}) = throw(MethodError(mul!, (C, A, B))) +mul!(C::ArrayLike{1}, A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:VectorOrMatrixLike}) = throw(MethodError(mul!, (C, A, B))) +mul!(C::ArrayLike{1}, A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:VectorOrMatrixLike}) = throw(MethodError(mul!, (C, A, B))) for (t, uploc, isunitc) in ((:LowerTriangular, 'L', 'N'), (:UnitLowerTriangular, 'L', 'U'), @@ -1304,7 +1304,7 @@ end # does not significantly impact performance as of Dec 2015 # replacing repeated references to A.data[j,j] with [Ajj = A.data[j,j] and references to Ajj] # does not significantly impact performance as of Dec 2015 -function naivesub!(A::UpperTriangular, b::AbstractVector, x::AbstractVector = b) +function naivesub!(A::UpperTriangular, b::ArrayLike{1}, x::ArrayLike{1} = b) require_one_based_indexing(A, b, x) n = size(A, 2) if !(n == length(b) == length(x)) @@ -1319,7 +1319,7 @@ function naivesub!(A::UpperTriangular, b::AbstractVector, x::AbstractVector = b) end x end -function naivesub!(A::UnitUpperTriangular, b::AbstractVector, x::AbstractVector = b) +function naivesub!(A::UnitUpperTriangular, b::ArrayLike{1}, x::ArrayLike{1} = b) require_one_based_indexing(A, b, x) n = size(A, 2) if !(n == length(b) == length(x)) @@ -1333,7 +1333,7 @@ function naivesub!(A::UnitUpperTriangular, b::AbstractVector, x::AbstractVector end x end -function naivesub!(A::LowerTriangular, b::AbstractVector, x::AbstractVector = b) +function naivesub!(A::LowerTriangular, b::ArrayLike{1}, x::ArrayLike{1} = b) require_one_based_indexing(A, b, x) n = size(A, 2) if !(n == length(b) == length(x)) @@ -1348,7 +1348,7 @@ function naivesub!(A::LowerTriangular, b::AbstractVector, x::AbstractVector = b) end x end -function naivesub!(A::UnitLowerTriangular, b::AbstractVector, x::AbstractVector = b) +function naivesub!(A::UnitLowerTriangular, b::ArrayLike{1}, x::ArrayLike{1} = b) require_one_based_indexing(A, b, x) n = size(A, 2) if !(n == length(b) == length(x)) @@ -1364,7 +1364,7 @@ function naivesub!(A::UnitLowerTriangular, b::AbstractVector, x::AbstractVector end # in the following transpose and conjugate transpose naive substitution variants, # accumulating in z rather than b[j] significantly improves performance as of Dec 2015 -function ldiv!(transA::Transpose{<:Any,<:LowerTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(transA::Transpose{<:Any,<:LowerTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(transA, b, x) A = transA.parent n = size(A, 1) @@ -1381,9 +1381,9 @@ function ldiv!(transA::Transpose{<:Any,<:LowerTriangular}, b::AbstractVector, x: end x end -ldiv!(transA::Transpose{<:Any,<:LowerTriangular}, b::AbstractVector) = ldiv!(transA, b, b) +ldiv!(transA::Transpose{<:Any,<:LowerTriangular}, b::ArrayLike{1}) = ldiv!(transA, b, b) -function ldiv!(transA::Transpose{<:Any,<:UnitLowerTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(transA::Transpose{<:Any,<:UnitLowerTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(transA, b, x) A = transA.parent n = size(A, 1) @@ -1399,9 +1399,9 @@ function ldiv!(transA::Transpose{<:Any,<:UnitLowerTriangular}, b::AbstractVector end x end -ldiv!(transA::Transpose{<:Any,<:UnitLowerTriangular}, b::AbstractVector) = ldiv!(transA, b, b) +ldiv!(transA::Transpose{<:Any,<:UnitLowerTriangular}, b::ArrayLike{1}) = ldiv!(transA, b, b) -function ldiv!(transA::Transpose{<:Any,<:UpperTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(transA::Transpose{<:Any,<:UpperTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(transA, b, x) A = transA.parent n = size(A, 1) @@ -1418,9 +1418,9 @@ function ldiv!(transA::Transpose{<:Any,<:UpperTriangular}, b::AbstractVector, x: end x end -ldiv!(transA::Transpose{<:Any,<:UpperTriangular}, b::AbstractVector) = ldiv!(transA, b, b) +ldiv!(transA::Transpose{<:Any,<:UpperTriangular}, b::ArrayLike{1}) = ldiv!(transA, b, b) -function ldiv!(transA::Transpose{<:Any,<:UnitUpperTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(transA::Transpose{<:Any,<:UnitUpperTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(transA, b, x) A = transA.parent n = size(A, 1) @@ -1436,9 +1436,9 @@ function ldiv!(transA::Transpose{<:Any,<:UnitUpperTriangular}, b::AbstractVector end x end -ldiv!(transA::Transpose{<:Any,<:UnitUpperTriangular}, b::AbstractVector) = ldiv!(transA, b, b) +ldiv!(transA::Transpose{<:Any,<:UnitUpperTriangular}, b::ArrayLike{1}) = ldiv!(transA, b, b) -function ldiv!(adjA::Adjoint{<:Any,<:LowerTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(adjA::Adjoint{<:Any,<:LowerTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(adjA, b, x) A = adjA.parent n = size(A, 1) @@ -1455,9 +1455,9 @@ function ldiv!(adjA::Adjoint{<:Any,<:LowerTriangular}, b::AbstractVector, x::Abs end x end -ldiv!(adjA::Adjoint{<:Any,<:LowerTriangular}, b::AbstractVector) = ldiv!(adjA, b, b) +ldiv!(adjA::Adjoint{<:Any,<:LowerTriangular}, b::ArrayLike{1}) = ldiv!(adjA, b, b) -function ldiv!(adjA::Adjoint{<:Any,<:UnitLowerTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(adjA::Adjoint{<:Any,<:UnitLowerTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(adjA, b, x) A = adjA.parent n = size(A, 1) @@ -1473,9 +1473,9 @@ function ldiv!(adjA::Adjoint{<:Any,<:UnitLowerTriangular}, b::AbstractVector, x: end x end -ldiv!(adjA::Adjoint{<:Any,<:UnitLowerTriangular}, b::AbstractVector) = ldiv!(adjA, b, b) +ldiv!(adjA::Adjoint{<:Any,<:UnitLowerTriangular}, b::ArrayLike{1}) = ldiv!(adjA, b, b) -function ldiv!(adjA::Adjoint{<:Any,<:UpperTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(adjA::Adjoint{<:Any,<:UpperTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(adjA, b, x) A = adjA.parent n = size(A, 1) @@ -1492,9 +1492,9 @@ function ldiv!(adjA::Adjoint{<:Any,<:UpperTriangular}, b::AbstractVector, x::Abs end x end -ldiv!(adjA::Adjoint{<:Any,<:UpperTriangular}, b::AbstractVector) = ldiv!(adjA, b, b) +ldiv!(adjA::Adjoint{<:Any,<:UpperTriangular}, b::ArrayLike{1}) = ldiv!(adjA, b, b) -function ldiv!(adjA::Adjoint{<:Any,<:UnitUpperTriangular}, b::AbstractVector, x::AbstractVector) +function ldiv!(adjA::Adjoint{<:Any,<:UnitUpperTriangular}, b::ArrayLike{1}, x::ArrayLike{1}) require_one_based_indexing(adjA, b, x) A = adjA.parent n = size(A, 1) @@ -1510,7 +1510,7 @@ function ldiv!(adjA::Adjoint{<:Any,<:UnitUpperTriangular}, b::AbstractVector, x: end x end -ldiv!(adjA::Adjoint{<:Any,<:UnitUpperTriangular}, b::AbstractVector) = ldiv!(adjA, b, b) +ldiv!(adjA::Adjoint{<:Any,<:UnitUpperTriangular}, b::ArrayLike{1}) = ldiv!(adjA, b, b) function rdiv!(A::StridedMatrix, B::UpperTriangular) m, n = size(A) @@ -2090,15 +2090,15 @@ for mat in (:AbstractVector, :AbstractMatrix) end end ### Multiplication with triangle to the right and hence lhs cannot be transposed. -# Only for AbstractMatrix, hence outside the above loop. -function *(A::AbstractMatrix, B::AbstractTriangular) +# Only for ArrayLike{2}, hence outside the above loop. +function *(A::ArrayLike{2}, B::AbstractTriangular) require_one_based_indexing(A) TAB = typeof(zero(eltype(A))*zero(eltype(B)) + zero(eltype(A))*zero(eltype(B))) AA = similar(A, TAB, size(A)) copyto!(AA, A) rmul!(AA, convert(AbstractArray{TAB}, B)) end -function *(A::AbstractMatrix, adjB::Adjoint{<:Any,<:AbstractTriangular}) +function *(A::ArrayLike{2}, adjB::Adjoint{<:Any,<:AbstractTriangular}) require_one_based_indexing(A) B = adjB.parent TAB = typeof(zero(eltype(A))*zero(eltype(B)) + zero(eltype(A))*zero(eltype(B))) @@ -2106,7 +2106,7 @@ function *(A::AbstractMatrix, adjB::Adjoint{<:Any,<:AbstractTriangular}) copyto!(AA, A) rmul!(AA, adjoint(convert(AbstractArray{TAB}, B))) end -function *(A::AbstractMatrix, transB::Transpose{<:Any,<:AbstractTriangular}) +function *(A::ArrayLike{2}, transB::Transpose{<:Any,<:AbstractTriangular}) require_one_based_indexing(A) B = transB.parent TAB = typeof(zero(eltype(A))*zero(eltype(B)) + zero(eltype(A))*zero(eltype(B))) @@ -2126,16 +2126,16 @@ end # below might compute an unnecessary copy. Eliminating the copy requires adding # all the promotion logic here once again. Since these methods are probably relatively # rare, we chose not to bother for now. -*(A::Adjoint{<:Any,<:AbstractMatrix}, B::AbstractTriangular) = copy(A) * B -*(A::Transpose{<:Any,<:AbstractMatrix}, B::AbstractTriangular) = copy(A) * B -*(A::AbstractTriangular, B::Adjoint{<:Any,<:AbstractMatrix}) = A * copy(B) -*(A::AbstractTriangular, B::Transpose{<:Any,<:AbstractMatrix}) = A * copy(B) +*(A::Adjoint{<:Any,<:ArrayLike{2}}, B::AbstractTriangular) = copy(A) * B +*(A::Transpose{<:Any,<:ArrayLike{2}}, B::AbstractTriangular) = copy(A) * B +*(A::AbstractTriangular, B::Adjoint{<:Any,<:ArrayLike{2}}) = A * copy(B) +*(A::AbstractTriangular, B::Transpose{<:Any,<:ArrayLike{2}}) = A * copy(B) *(A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractTriangular}) = A * copy(B) -*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractMatrix}) = A * copy(B) -*(A::Adjoint{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:AbstractTriangular}) = copy(A) * B +*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:ArrayLike{2}}) = A * copy(B) +*(A::Adjoint{<:Any,<:ArrayLike{2}}, B::Adjoint{<:Any,<:AbstractTriangular}) = copy(A) * B *(A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractTriangular}) = A * copy(B) -*(A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractMatrix}) = A * copy(B) -*(A::Transpose{<:Any,<:AbstractMatrix}, B::Transpose{<:Any,<:AbstractTriangular}) = copy(A) * B +*(A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:ArrayLike{2}}) = A * copy(B) +*(A::Transpose{<:Any,<:ArrayLike{2}}, B::Transpose{<:Any,<:AbstractTriangular}) = copy(A) * B # Complex matrix power for upper triangular factor, see: # Higham and Lin, "A Schur-Padé algorithm for fractional powers of a Matrix", @@ -2632,24 +2632,24 @@ end factorize(A::AbstractTriangular) = A -# disambiguation methods: *(AbstractTriangular, Adj/Trans of AbstractVector) -*(A::AbstractTriangular, B::Adjoint{<:Any,<:AbstractVector}) = adjoint(adjoint(B) * adjoint(A)) -*(A::AbstractTriangular, B::Transpose{<:Any,<:AbstractVector}) = transpose(transpose(B) * transpose(A)) +# disambiguation methods: *(AbstractTriangular, Adj/Trans of ArrayLike{1}) +*(A::AbstractTriangular, B::Adjoint{<:Any,<:ArrayLike{1}}) = adjoint(adjoint(B) * adjoint(A)) +*(A::AbstractTriangular, B::Transpose{<:Any,<:ArrayLike{1}}) = transpose(transpose(B) * transpose(A)) # disambiguation methods: *(Adj/Trans of AbstractTriangular, Trans/Ajd of AbstractTriangular) *(A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractTriangular}) = copy(A) * B *(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractTriangular}) = copy(A) * B # disambiguation methods: *(Adj/Trans of AbstractTriangular, Adj/Trans of AbsVec or AbsMat) -*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractVector}) = adjoint(adjoint(B) * adjoint(A)) -*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractMatrix}) = A * copy(B) -*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractVector}) = transpose(transpose(B) * transpose(A)) -*(A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:AbstractVector}) = transpose(transpose(B) * transpose(A)) -*(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractVector}) = adjoint(adjoint(B) * adjoint(A)) -*(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:AbstractMatrix}) = A * copy(B) +*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:ArrayLike{1}}) = adjoint(adjoint(B) * adjoint(A)) +*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:ArrayLike{2}}) = A * copy(B) +*(A::Adjoint{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:ArrayLike{1}}) = transpose(transpose(B) * transpose(A)) +*(A::Transpose{<:Any,<:AbstractTriangular}, B::Transpose{<:Any,<:ArrayLike{1}}) = transpose(transpose(B) * transpose(A)) +*(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:ArrayLike{1}}) = adjoint(adjoint(B) * adjoint(A)) +*(A::Transpose{<:Any,<:AbstractTriangular}, B::Adjoint{<:Any,<:ArrayLike{2}}) = A * copy(B) # disambiguation methods: *(Adj/Trans of AbsVec or AbsMat, Adj/Trans of AbstractTriangular) -*(A::Adjoint{<:Any,<:AbstractVector}, B::Transpose{<:Any,<:AbstractTriangular}) = adjoint(adjoint(B) * adjoint(A)) -*(A::Adjoint{<:Any,<:AbstractMatrix}, B::Transpose{<:Any,<:AbstractTriangular}) = copy(A) * B -*(A::Transpose{<:Any,<:AbstractVector}, B::Adjoint{<:Any,<:AbstractTriangular}) = transpose(transpose(B) * transpose(A)) -*(A::Transpose{<:Any,<:AbstractMatrix}, B::Adjoint{<:Any,<:AbstractTriangular}) = copy(A) * B +*(A::Adjoint{<:Any,<:ArrayLike{1}}, B::Transpose{<:Any,<:AbstractTriangular}) = adjoint(adjoint(B) * adjoint(A)) +*(A::Adjoint{<:Any,<:ArrayLike{2}}, B::Transpose{<:Any,<:AbstractTriangular}) = copy(A) * B +*(A::Transpose{<:Any,<:ArrayLike{1}}, B::Adjoint{<:Any,<:AbstractTriangular}) = transpose(transpose(B) * transpose(A)) +*(A::Transpose{<:Any,<:ArrayLike{2}}, B::Adjoint{<:Any,<:AbstractTriangular}) = copy(A) * B # disambiguation methods: /(Adjoint of AbsVec, <:AbstractTriangular) /(u::AdjointAbsVec, A::Union{LowerTriangular,UpperTriangular}) = adjoint(adjoint(A) \ u.parent) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index 9ebed9ed01297..2a647aa9aac28 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -16,7 +16,7 @@ struct SymTridiagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} end """ - SymTridiagonal(dv::V, ev::V) where V <: AbstractVector + SymTridiagonal(dv::V, ev::V) where V <: ArrayLike{1} Construct a symmetric tridiagonal matrix from the diagonal (`dv`) and first sub/super-diagonal (`ev`), respectively. The result is of type `SymTridiagonal` @@ -48,13 +48,13 @@ julia> SymTridiagonal(dv, ev) """ SymTridiagonal(dv::V, ev::V) where {T,V<:AbstractVector{T}} = SymTridiagonal{T}(dv, ev) SymTridiagonal{T}(dv::V, ev::V) where {T,V<:AbstractVector{T}} = SymTridiagonal{T,V}(dv, ev) -function SymTridiagonal{T}(dv::AbstractVector, ev::AbstractVector) where {T} +function SymTridiagonal{T}(dv::ArrayLike{1}, ev::ArrayLike{1}) where {T} SymTridiagonal(convert(AbstractVector{T}, dv)::AbstractVector{T}, convert(AbstractVector{T}, ev)::AbstractVector{T}) end """ - SymTridiagonal(A::AbstractMatrix) + SymTridiagonal(A::ArrayLike{2}) Construct a symmetric tridiagonal matrix from the diagonal and first sub/super-diagonal, of the symmetric matrix `A`. @@ -74,7 +74,7 @@ julia> SymTridiagonal(A) ⋅ 5 6 ``` """ -function SymTridiagonal(A::AbstractMatrix) +function SymTridiagonal(A::ArrayLike{2}) if diag(A,1) == diag(A,-1) SymTridiagonal(diag(A,0), diag(A,1)) else @@ -202,7 +202,7 @@ end return C end -function dot(x::AbstractVector, S::SymTridiagonal, y::AbstractVector) +function dot(x::ArrayLike{1}, S::SymTridiagonal, y::ArrayLike{1}) require_one_based_indexing(x, y) nx, ny = length(x), length(y) (nx == size(S, 1) == ny) || throw(DimensionMismatch()) @@ -226,8 +226,8 @@ end (\)(T::SymTridiagonal, B::StridedVecOrMat) = ldlt(T)\B # division with optional shift for use in shifted-Hessenberg solvers (hessenberg.jl): -ldiv!(A::SymTridiagonal, B::AbstractVecOrMat; shift::Number=false) = ldiv!(ldlt(A, shift=shift), B) -rdiv!(B::AbstractVecOrMat, A::SymTridiagonal; shift::Number=false) = rdiv!(B, ldlt(A, shift=shift)) +ldiv!(A::SymTridiagonal, B::VectorOrMatrixLike; shift::Number=false) = ldiv!(ldlt(A, shift=shift), B) +rdiv!(B::VectorOrMatrixLike, A::SymTridiagonal; shift::Number=false) = rdiv!(B, ldlt(A, shift=shift)) eigen!(A::SymTridiagonal{<:BlasReal}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) eigen(A::SymTridiagonal{T}) where T = eigen!(copy_oftype(A, eigtype(T))) @@ -435,7 +435,7 @@ struct Tridiagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} end """ - Tridiagonal(dl::V, d::V, du::V) where V <: AbstractVector + Tridiagonal(dl::V, d::V, du::V) where V <: ArrayLike{1} Construct a tridiagonal matrix from the first subdiagonal, diagonal, and first superdiagonal, respectively. The result is of type `Tridiagonal` and provides efficient specialized linear @@ -461,7 +461,7 @@ julia> Tridiagonal(dl, d, du) """ Tridiagonal(dl::V, d::V, du::V) where {T,V<:AbstractVector{T}} = Tridiagonal{T,V}(dl, d, du) Tridiagonal(dl::V, d::V, du::V, du2::V) where {T,V<:AbstractVector{T}} = Tridiagonal{T,V}(dl, d, du, du2) -function Tridiagonal{T}(dl::AbstractVector, d::AbstractVector, du::AbstractVector) where {T} +function Tridiagonal{T}(dl::ArrayLike{1}, d::ArrayLike{1}, du::ArrayLike{1}) where {T} Tridiagonal(map(x->convert(AbstractVector{T}, x), (dl, d, du))...) end @@ -488,7 +488,7 @@ julia> Tridiagonal(A) ⋅ ⋅ 3 4 ``` """ -Tridiagonal(A::AbstractMatrix) = Tridiagonal(diag(A,-1), diag(A,0), diag(A,1)) +Tridiagonal(A::ArrayLike{2}) = Tridiagonal(diag(A,-1), diag(A,0), diag(A,1)) Tridiagonal(A::Tridiagonal) = A Tridiagonal{T}(A::Tridiagonal{T}) where {T} = A @@ -679,7 +679,7 @@ end Base._sum(A::Tridiagonal, ::Colon) = sum(A.d) + sum(A.dl) + sum(A.du) Base._sum(A::SymTridiagonal, ::Colon) = sum(A.dv) + 2sum(A.ev) -function dot(x::AbstractVector, A::Tridiagonal, y::AbstractVector) +function dot(x::ArrayLike{1}, A::Tridiagonal, y::ArrayLike{1}) require_one_based_indexing(x, y) nx, ny = length(x), length(y) (nx == size(A, 1) == ny) || throw(DimensionMismatch()) diff --git a/stdlib/LinearAlgebra/src/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl index 862abaed29adf..5ff74852ba0d2 100644 --- a/stdlib/LinearAlgebra/src/uniformscaling.jl +++ b/stdlib/LinearAlgebra/src/uniformscaling.jl @@ -114,13 +114,13 @@ isposdef(J::UniformScaling) = isposdef(J.λ) (+)(J1::UniformScaling, J2::UniformScaling) = UniformScaling(J1.λ+J2.λ) (+)(B::BitArray{2}, J::UniformScaling) = Array(B) + J (+)(J::UniformScaling, B::BitArray{2}) = J + Array(B) -(+)(J::UniformScaling, A::AbstractMatrix) = A + J +(+)(J::UniformScaling, A::ArrayLike{2}) = A + J (-)(J::UniformScaling) = UniformScaling(-J.λ) (-)(J1::UniformScaling, J2::UniformScaling) = UniformScaling(J1.λ-J2.λ) (-)(B::BitArray{2}, J::UniformScaling) = Array(B) - J (-)(J::UniformScaling, B::BitArray{2}) = J - Array(B) -(-)(A::AbstractMatrix, J::UniformScaling) = A + (-J) +(-)(A::ArrayLike{2}, J::UniformScaling) = A + (-J) # Unit{Lower/Upper}Triangular matrices become {Lower/Upper}Triangular under # addition with a UniformScaling @@ -160,7 +160,7 @@ function (-)(J::UniformScaling{<:Complex}, A::Hermitian) return B end -function (+)(A::AbstractMatrix, J::UniformScaling) +function (+)(A::ArrayLike{2}, J::UniformScaling) checksquare(A) B = copy_oftype(A, Base._return_type(+, Tuple{eltype(A), typeof(J)})) @inbounds for i in axes(A, 1) @@ -169,7 +169,7 @@ function (+)(A::AbstractMatrix, J::UniformScaling) return B end -function (-)(J::UniformScaling, A::AbstractMatrix) +function (-)(J::UniformScaling, A::ArrayLike{2}) checksquare(A) B = convert(AbstractMatrix{Base._return_type(+, Tuple{eltype(A), typeof(J)})}, -A) @inbounds for i in axes(A, 1) @@ -194,35 +194,35 @@ end *(J1::UniformScaling, J2::UniformScaling) = UniformScaling(J1.λ*J2.λ) *(B::BitArray{2}, J::UniformScaling) = *(Array(B), J::UniformScaling) *(J::UniformScaling, B::BitArray{2}) = *(J::UniformScaling, Array(B)) -*(A::AbstractMatrix, J::UniformScaling) = A*J.λ -*(J::UniformScaling, A::AbstractVecOrMat) = J.λ*A +*(A::ArrayLike{2}, J::UniformScaling) = A*J.λ +*(J::UniformScaling, A::VectorOrMatrixLike) = J.λ*A *(x::Number, J::UniformScaling) = UniformScaling(x*J.λ) *(J::UniformScaling, x::Number) = UniformScaling(J.λ*x) /(J1::UniformScaling, J2::UniformScaling) = J2.λ == 0 ? throw(SingularException(1)) : UniformScaling(J1.λ/J2.λ) -/(J::UniformScaling, A::AbstractMatrix) = lmul!(J.λ, inv(A)) -/(A::AbstractMatrix, J::UniformScaling) = J.λ == 0 ? throw(SingularException(1)) : A/J.λ +/(J::UniformScaling, A::ArrayLike{2}) = lmul!(J.λ, inv(A)) +/(A::ArrayLike{2}, J::UniformScaling) = J.λ == 0 ? throw(SingularException(1)) : A/J.λ /(J::UniformScaling, x::Number) = UniformScaling(J.λ/x) \(J1::UniformScaling, J2::UniformScaling) = J1.λ == 0 ? throw(SingularException(1)) : UniformScaling(J1.λ\J2.λ) \(A::Union{Bidiagonal{T},AbstractTriangular{T}}, J::UniformScaling) where {T<:Number} = rmul!(inv(A), J.λ) -\(J::UniformScaling, A::AbstractVecOrMat) = J.λ == 0 ? throw(SingularException(1)) : J.λ\A -\(A::AbstractMatrix, J::UniformScaling) = rmul!(inv(A), J.λ) +\(J::UniformScaling, A::VectorOrMatrixLike) = J.λ == 0 ? throw(SingularException(1)) : J.λ\A +\(A::ArrayLike{2}, J::UniformScaling) = rmul!(inv(A), J.λ) \(F::Factorization, J::UniformScaling) = F \ J(size(F,1)) \(x::Number, J::UniformScaling) = UniformScaling(x\J.λ) -@inline mul!(C::AbstractMatrix, A::AbstractMatrix, J::UniformScaling, alpha::Number, beta::Number) = +@inline mul!(C::ArrayLike{2}, A::ArrayLike{2}, J::UniformScaling, alpha::Number, beta::Number) = mul!(C, A, J.λ, alpha, beta) -@inline mul!(C::AbstractVecOrMat, J::UniformScaling, B::AbstractVecOrMat, alpha::Number, beta::Number) = +@inline mul!(C::VectorOrMatrixLike, J::UniformScaling, B::VectorOrMatrixLike, alpha::Number, beta::Number) = mul!(C, J.λ, B, alpha, beta) -rmul!(A::AbstractMatrix, J::UniformScaling) = rmul!(A, J.λ) -lmul!(J::UniformScaling, B::AbstractVecOrMat) = lmul!(J.λ, B) -rdiv!(A::AbstractMatrix, J::UniformScaling) = rdiv!(A, J.λ) -ldiv!(J::UniformScaling, B::AbstractVecOrMat) = ldiv!(J.λ, B) -ldiv!(Y::AbstractVecOrMat, J::UniformScaling, B::AbstractVecOrMat) = (Y .= J.λ .\ B) +rmul!(A::ArrayLike{2}, J::UniformScaling) = rmul!(A, J.λ) +lmul!(J::UniformScaling, B::VectorOrMatrixLike) = lmul!(J.λ, B) +rdiv!(A::ArrayLike{2}, J::UniformScaling) = rdiv!(A, J.λ) +ldiv!(J::UniformScaling, B::VectorOrMatrixLike) = ldiv!(J.λ, B) +ldiv!(Y::VectorOrMatrixLike, J::UniformScaling, B::VectorOrMatrixLike) = (Y .= J.λ .\ B) Broadcast.broadcasted(::typeof(*), x::Number,J::UniformScaling) = UniformScaling(x*J.λ) Broadcast.broadcasted(::typeof(*), J::UniformScaling,x::Number) = UniformScaling(J.λ*x) @@ -240,8 +240,8 @@ end ==(J1::UniformScaling,J2::UniformScaling) = (J1.λ == J2.λ) ## equality comparison with UniformScaling -==(J::UniformScaling, A::AbstractMatrix) = A == J -function ==(A::AbstractMatrix, J::UniformScaling) +==(J::UniformScaling, A::ArrayLike{2}) = A == J +function ==(A::ArrayLike{2}, J::UniformScaling) require_one_based_indexing(A) size(A, 1) == size(A, 2) || return false iszero(J.λ) && return iszero(A) @@ -262,7 +262,7 @@ function isapprox(J1::UniformScaling{T}, J2::UniformScaling{S}; atol::Real=0, rtol::Real=Base.rtoldefault(T,S,atol), nans::Bool=false) where {T<:Number,S<:Number} isapprox(J1.λ, J2.λ, rtol=rtol, atol=atol, nans=nans) end -function isapprox(J::UniformScaling, A::AbstractMatrix; +function isapprox(J::UniformScaling, A::ArrayLike{2}; atol::Real = 0, rtol::Real = Base.rtoldefault(promote_leaf_eltypes(A), eltype(J), atol), nans::Bool = false, norm::Function = norm) @@ -272,10 +272,10 @@ function isapprox(J::UniformScaling, A::AbstractMatrix; norm(Diagonal(fill(J.λ, n))) return norm(A - J) <= max(atol, rtol * max(norm(A), normJ)) end -isapprox(A::AbstractMatrix, J::UniformScaling; kwargs...) = isapprox(J, A; kwargs...) +isapprox(A::ArrayLike{2}, J::UniformScaling; kwargs...) = isapprox(J, A; kwargs...) """ - copyto!(dest::AbstractMatrix, src::UniformScaling) + copyto!(dest::ArrayLike{2}, src::UniformScaling) Copies a [`UniformScaling`](@ref) onto a matrix. @@ -283,7 +283,7 @@ Copies a [`UniformScaling`](@ref) onto a matrix. In Julia 1.0 this method only supported a square destination matrix. Julia 1.1. added support for a rectangular matrix. """ -function copyto!(A::AbstractMatrix, J::UniformScaling) +function copyto!(A::ArrayLike{2}, J::UniformScaling) require_one_based_indexing(A) fill!(A, 0) λ = J.λ @@ -303,7 +303,7 @@ end # so that the same promotion code can be used for hvcat. We pass the type T # so that we can re-use this code for sparse-matrix hcat etcetera. promote_to_arrays_(n::Int, ::Type{Matrix}, J::UniformScaling{T}) where {T} = copyto!(Matrix{T}(undef, n,n), J) -promote_to_arrays_(n::Int, ::Type, A::AbstractVecOrMat) = A +promote_to_arrays_(n::Int, ::Type, A::VectorOrMatrixLike) = A promote_to_arrays(n,k, ::Type) = () promote_to_arrays(n,k, ::Type{T}, A) where {T} = (promote_to_arrays_(n[k], T, A),) promote_to_arrays(n,k, ::Type{T}, A, B) where {T} = @@ -312,11 +312,11 @@ promote_to_arrays(n,k, ::Type{T}, A, B, C) where {T} = (promote_to_arrays_(n[k], T, A), promote_to_arrays_(n[k+1], T, B), promote_to_arrays_(n[k+2], T, C)) promote_to_arrays(n,k, ::Type{T}, A, B, Cs...) where {T} = (promote_to_arrays_(n[k], T, A), promote_to_arrays_(n[k+1], T, B), promote_to_arrays(n,k+2, T, Cs...)...) -promote_to_array_type(A::Tuple{Vararg{Union{AbstractVecOrMat,UniformScaling}}}) = Matrix +promote_to_array_type(A::Tuple{Vararg{Union{VectorOrMatrixLike,UniformScaling}}}) = Matrix for (f,dim,name) in ((:hcat,1,"rows"), (:vcat,2,"cols")) @eval begin - function $f(A::Union{AbstractVecOrMat,UniformScaling}...) + function $f(A::Union{VectorOrMatrixLike,UniformScaling}...) n = -1 for a in A if !isa(a, UniformScaling) @@ -335,7 +335,7 @@ for (f,dim,name) in ((:hcat,1,"rows"), (:vcat,2,"cols")) end -function hvcat(rows::Tuple{Vararg{Int}}, A::Union{AbstractVecOrMat,UniformScaling}...) +function hvcat(rows::Tuple{Vararg{Int}}, A::Union{VectorOrMatrixLike,UniformScaling}...) require_one_based_indexing(A...) nr = length(rows) sum(rows) == length(A) || throw(ArgumentError("mismatch between row sizes and number of arguments")) @@ -411,6 +411,6 @@ Array(s::UniformScaling, dims::Dims{2}) = Matrix(s, dims) Diagonal{T}(s::UniformScaling, m::Integer) where {T} = Diagonal{T}(fill(T(s.λ), m)) Diagonal(s::UniformScaling, m::Integer) = Diagonal{eltype(s)}(s, m) -dot(x::AbstractVector, J::UniformScaling, y::AbstractVector) = dot(x, J.λ, y) -dot(x::AbstractVector, a::Number, y::AbstractVector) = sum(t -> dot(t[1], a, t[2]), zip(x, y)) -dot(x::AbstractVector, a::Union{Real,Complex}, y::AbstractVector) = a*dot(x, y) +dot(x::ArrayLike{1}, J::UniformScaling, y::ArrayLike{1}) = dot(x, J.λ, y) +dot(x::ArrayLike{1}, a::Number, y::ArrayLike{1}) = sum(t -> dot(t[1], a, t[2]), zip(x, y)) +dot(x::ArrayLike{1}, a::Union{Real,Complex}, y::ArrayLike{1}) = a*dot(x, y) diff --git a/stdlib/LinearAlgebra/test/addmul.jl b/stdlib/LinearAlgebra/test/addmul.jl index 42529f3f4f334..3bbbf9684f9f5 100644 --- a/stdlib/LinearAlgebra/test/addmul.jl +++ b/stdlib/LinearAlgebra/test/addmul.jl @@ -23,7 +23,7 @@ end constructor_of(::Type{T}) where T = getfield(parentmodule(T), nameof(T)) -function _rand(A::Type{<: AbstractArray}, shape) +function _rand(A::Type{<: ArrayLike}, shape) data = _rand(Array{eltype(A)}, shape) T = constructor_of(A) if A <: Union{Bidiagonal, Hermitian, Symmetric} @@ -75,7 +75,7 @@ mattypes = [ UpperTriangular, ] -isnanfillable(::AbstractArray) = false +isnanfillable(::ArrayLike) = false isnanfillable(::Array{<:AbstractFloat}) = true isnanfillable(A::AbstractArray{<:AbstractFloat}) = parent(A) isa Array diff --git a/stdlib/Markdown/src/Common/block.jl b/stdlib/Markdown/src/Common/block.jl index b9dedbb2cac35..d8d5644fe745c 100644 --- a/stdlib/Markdown/src/Common/block.jl +++ b/stdlib/Markdown/src/Common/block.jl @@ -252,8 +252,8 @@ mutable struct List ordered::Int # `-1` is unordered, `>= 0` is ordered. loose::Bool # TODO: Renderers should use this field end -List(x::AbstractVector, b::Integer) = List(x, b, false) -List(x::AbstractVector) = List(x, -1) +List(x::ArrayLike{1}, b::Integer) = List(x, b, false) +List(x::ArrayLike{1}) = List(x, -1) List(b::Integer) = List(Any[], b) List(xs...) = List(vcat(xs...)) diff --git a/stdlib/Markdown/src/parse/parse.jl b/stdlib/Markdown/src/parse/parse.jl index 0da5f98a9a5ee..6841e9290a96e 100644 --- a/stdlib/Markdown/src/parse/parse.jl +++ b/stdlib/Markdown/src/parse/parse.jl @@ -4,7 +4,7 @@ mutable struct MD content::Vector{Any} meta::Dict{Any, Any} - MD(content::AbstractVector, meta::Dict = Dict()) = + MD(content::ArrayLike{1}, meta::Dict = Dict()) = new(content, meta) end diff --git a/stdlib/Random/src/Random.jl b/stdlib/Random/src/Random.jl index 5daa9a6733655..a39bde128db6c 100644 --- a/stdlib/Random/src/Random.jl +++ b/stdlib/Random/src/Random.jl @@ -260,13 +260,13 @@ rand(::Type{X}) where {X} = rand(default_rng(), X) #### arrays -rand!(A::AbstractArray{T}, X) where {T} = rand!(default_rng(), A, X) +rand!(A::ArrayLike, X) = rand!(default_rng(), A, X) rand!(A::AbstractArray{T}, ::Type{X}=T) where {T,X} = rand!(default_rng(), A, X) -rand!(rng::AbstractRNG, A::AbstractArray{T}, X) where {T} = rand!(rng, A, Sampler(rng, X)) +rand!(rng::AbstractRNG, A::ArrayLike, X) = rand!(rng, A, Sampler(rng, X)) rand!(rng::AbstractRNG, A::AbstractArray{T}, ::Type{X}=T) where {T,X} = rand!(rng, A, Sampler(rng, X)) -function rand!(rng::AbstractRNG, A::AbstractArray{T}, sp::Sampler) where T +function rand!(rng::AbstractRNG, A::ArrayLike, sp::Sampler) for i in eachindex(A) @inbounds A[i] = rand(rng, sp) end diff --git a/stdlib/Random/src/generation.jl b/stdlib/Random/src/generation.jl index caa5e3cde3f45..594c70417d4d0 100644 --- a/stdlib/Random/src/generation.jl +++ b/stdlib/Random/src/generation.jl @@ -337,12 +337,12 @@ function rand(rng::AbstractRNG, sp::SamplerBigInt) end -## random values from AbstractArray +## random values from ArrayLike -Sampler(::Type{RNG}, r::AbstractArray, n::Repetition) where {RNG<:AbstractRNG} = +Sampler(::Type{RNG}, r::ArrayLike, n::Repetition) where {RNG<:AbstractRNG} = SamplerSimple(r, Sampler(RNG, firstindex(r):lastindex(r), n)) -rand(rng::AbstractRNG, sp::SamplerSimple{<:AbstractArray,<:Sampler}) = +rand(rng::AbstractRNG, sp::SamplerSimple{<:ArrayLike,<:Sampler}) = @inbounds return sp[][rand(rng, sp.data)] diff --git a/stdlib/Random/src/misc.jl b/stdlib/Random/src/misc.jl index 0f592ab01b270..8ccc9fc4ba683 100644 --- a/stdlib/Random/src/misc.jl +++ b/stdlib/Random/src/misc.jl @@ -84,7 +84,7 @@ end # each element of A is included in S with independent probability p. # (Note that this is different from the problem of finding a random # size-m subset of A where m is fixed!) -function randsubseq!(r::AbstractRNG, S::AbstractArray, A::AbstractArray, p::Real) +function randsubseq!(r::AbstractRNG, S::ArrayLike, A::ArrayLike, p::Real) require_one_based_indexing(S, A) 0 <= p <= 1 || throw(ArgumentError("probability $p not in [0,1]")) n = length(A) @@ -140,7 +140,7 @@ julia> S 8 ``` """ -randsubseq!(S::AbstractArray, A::AbstractArray, p::Real) = randsubseq!(default_rng(), S, A, p) +randsubseq!(S::ArrayLike, A::ArrayLike, p::Real) = randsubseq!(default_rng(), S, A, p) randsubseq(r::AbstractRNG, A::AbstractArray{T}, p::Real) where {T} = randsubseq!(r, T[], A, p) @@ -163,7 +163,7 @@ julia> randsubseq(rng, collect(1:8), 0.3) 8 ``` """ -randsubseq(A::AbstractArray, p::Real) = randsubseq(default_rng(), A, p) +randsubseq(A::ArrayLike, p::Real) = randsubseq(default_rng(), A, p) ## rand Less Than Masked 52 bits (helper function) @@ -174,7 +174,7 @@ ltm52(n::Int, mask::Int=nextpow(2, n)-1) = LessThan(n-1, Masked(mask, UInt52Raw( ## shuffle & shuffle! """ - shuffle!([rng=GLOBAL_RNG,] v::AbstractArray) + shuffle!([rng=GLOBAL_RNG,] v::ArrayLike) In-place version of [`shuffle`](@ref): randomly permute `v` in-place, optionally supplying the random-number generator `rng`. @@ -203,7 +203,7 @@ julia> shuffle!(rng, Vector(1:16)) 13 ``` """ -function shuffle!(r::AbstractRNG, a::AbstractArray) +function shuffle!(r::AbstractRNG, a::ArrayLike) require_one_based_indexing(a) n = length(a) n <= 1 && return a # nextpow below won't work with n == 0 @@ -217,10 +217,10 @@ function shuffle!(r::AbstractRNG, a::AbstractArray) return a end -shuffle!(a::AbstractArray) = shuffle!(default_rng(), a) +shuffle!(a::ArrayLike) = shuffle!(default_rng(), a) """ - shuffle([rng=GLOBAL_RNG,] v::AbstractArray) + shuffle([rng=GLOBAL_RNG,] v::ArrayLike) Return a randomly permuted copy of `v`. The optional `rng` argument specifies a random number generator (see [Random Numbers](@ref)). @@ -245,8 +245,8 @@ julia> shuffle(rng, Vector(1:10)) 8 ``` """ -shuffle(r::AbstractRNG, a::AbstractArray) = shuffle!(r, copymutable(a)) -shuffle(a::AbstractArray) = shuffle(default_rng(), a) +shuffle(r::AbstractRNG, a::ArrayLike) = shuffle!(r, copymutable(a)) +shuffle(a::ArrayLike) = shuffle(default_rng(), a) ## randperm & randperm! diff --git a/stdlib/Random/src/normal.jl b/stdlib/Random/src/normal.jl index 63fb33df642a3..56b18fed6eafc 100644 --- a/stdlib/Random/src/normal.jl +++ b/stdlib/Random/src/normal.jl @@ -119,7 +119,7 @@ end ## arrays & other scalar methods """ - randn!([rng=GLOBAL_RNG], A::AbstractArray) -> A + randn!([rng=GLOBAL_RNG], A::ArrayLike) -> A Fill the array `A` with normally-distributed (mean 0, standard deviation 1) random numbers. Also see the [`rand`](@ref) function. @@ -140,7 +140,7 @@ julia> randn!(rng, zeros(5)) function randn! end """ - randexp!([rng=GLOBAL_RNG], A::AbstractArray) -> A + randexp!([rng=GLOBAL_RNG], A::ArrayLike) -> A Fill the array `A` with random numbers following the exponential distribution (with scale 1). @@ -175,7 +175,7 @@ for randfun in [:randn, :randexp] A end - $randfun!(A::AbstractArray) = $randfun!(default_rng(), A) + $randfun!(A::ArrayLike) = $randfun!(default_rng(), A) # generating arrays $randfun(rng::AbstractRNG, ::Type{T}, dims::Dims ) where {T} = $randfun!(rng, Array{T}(undef, dims)) diff --git a/stdlib/Serialization/test/runtests.jl b/stdlib/Serialization/test/runtests.jl index 9e08cfad490b0..0f4c3e099691a 100644 --- a/stdlib/Serialization/test/runtests.jl +++ b/stdlib/Serialization/test/runtests.jl @@ -266,7 +266,7 @@ end # Objects that have a SubArray as a type in a type-parameter list module ArrayWrappers -struct ArrayWrapper{T,N,A<:AbstractArray} <: AbstractArray{T,N} +struct ArrayWrapper{T,N,A<:ArrayLike} <: AbstractArray{T,N} data::A end ArrayWrapper(data::AbstractArray{T,N}) where {T,N} = ArrayWrapper{T,N,typeof(data)}(data) diff --git a/stdlib/SharedArrays/src/SharedArrays.jl b/stdlib/SharedArrays/src/SharedArrays.jl index c80a74e617227..d7960b2ee569c 100644 --- a/stdlib/SharedArrays/src/SharedArrays.jl +++ b/stdlib/SharedArrays/src/SharedArrays.jl @@ -332,7 +332,7 @@ indexpids(S::SharedArray) = S.pidx Returns the actual `Array` object backing `S`. """ sdata(S::SharedArray) = S.s -sdata(A::AbstractArray) = A +sdata(A::ArrayLike) = A """ localindices(S::SharedArray) diff --git a/stdlib/SparseArrays/src/SparseArrays.jl b/stdlib/SparseArrays/src/SparseArrays.jl index 6d30f3ab761e4..3411d2807b5f0 100644 --- a/stdlib/SparseArrays/src/SparseArrays.jl +++ b/stdlib/SparseArrays/src/SparseArrays.jl @@ -6,7 +6,7 @@ Support for sparse arrays. Provides `AbstractSparseArray` and subtypes. module SparseArrays using Base: ReshapedArray, promote_op, setindex_shape_check, to_shape, tail, - require_one_based_indexing + require_one_based_indexing, VectorOrMatrixLike using Base.Sort: Forward using LinearAlgebra diff --git a/stdlib/SparseArrays/src/abstractsparse.jl b/stdlib/SparseArrays/src/abstractsparse.jl index ccc9bbec78814..164fbc78925a1 100644 --- a/stdlib/SparseArrays/src/abstractsparse.jl +++ b/stdlib/SparseArrays/src/abstractsparse.jl @@ -50,7 +50,7 @@ julia> issparse(Array(sv)) false ``` """ -issparse(A::AbstractArray) = false +issparse(A::ArrayLike) = false issparse(S::AbstractSparseArray) = true issparse(S::LinearAlgebra.Symmetric{<:Any,<:AbstractSparseMatrix}) = true diff --git a/stdlib/SparseArrays/src/higherorderfns.jl b/stdlib/SparseArrays/src/higherorderfns.jl index e01be3697097c..222b840cf6f0d 100644 --- a/stdlib/SparseArrays/src/higherorderfns.jl +++ b/stdlib/SparseArrays/src/higherorderfns.jl @@ -81,11 +81,11 @@ Broadcast.BroadcastStyle(::PromoteToSparse, ::LinearAlgebra.StructuredMatrixStyl Broadcast.BroadcastStyle(::PromoteToSparse, ::SPVM) = PromoteToSparse() Broadcast.BroadcastStyle(::PromoteToSparse, ::Broadcast.Style{Tuple}) = Broadcast.DefaultArrayStyle{2}() -# FIXME: currently sparse broadcasts are only well-tested on known array types, while any AbstractArray +# FIXME: currently sparse broadcasts are only well-tested on known array types, while any ArrayLike # could report itself as a DefaultArrayStyle(). # See https://github.com/JuliaLang/julia/pull/23939#pullrequestreview-72075382 for more details is_supported_sparse_broadcast() = true -is_supported_sparse_broadcast(::AbstractArray, rest...) = false +is_supported_sparse_broadcast(::ArrayLike, rest...) = false is_supported_sparse_broadcast(::AbstractSparseArray, rest...) = is_supported_sparse_broadcast(rest...) is_supported_sparse_broadcast(::StructuredMatrix, rest...) = is_supported_sparse_broadcast(rest...) is_supported_sparse_broadcast(::Array, rest...) = is_supported_sparse_broadcast(rest...) @@ -199,7 +199,7 @@ end @inline _sumnnzs(A, Bs...) = nnz(A) + _sumnnzs(Bs...) @inline _iszero(x) = x == 0 @inline _iszero(x::Number) = Base.iszero(x) -@inline _iszero(x::AbstractArray) = Base.iszero(x) +@inline _iszero(x::ArrayLike) = Base.iszero(x) @inline _zeros_eltypes(A) = (zero(eltype(A)),) @inline _zeros_eltypes(A, Bs...) = (zero(eltype(A)), _zeros_eltypes(Bs...)...) @inline _promote_indtype(A) = indtype(A) @@ -1073,12 +1073,12 @@ end capturescalars((args...)->f(T, args...), Base.tail(mixedargs)) @inline capturescalars(f, mixedargs::Tuple{SparseVecOrMat, Ref{Type{T}}, Vararg{Any}}) where {T} = capturescalars((a1, args...)->f(a1, T, args...), (mixedargs[1], Base.tail(Base.tail(mixedargs))...)) -@inline capturescalars(f, mixedargs::Tuple{Union{Ref,AbstractArray{<:Any,0}}, Ref{Type{T}}, Vararg{Any}}) where {T} = +@inline capturescalars(f, mixedargs::Tuple{Union{Ref,ArrayLike{0}}, Ref{Type{T}}, Vararg{Any}}) where {T} = capturescalars((args...)->f(mixedargs[1], T, args...), Base.tail(Base.tail(mixedargs))) nonscalararg(::SparseVecOrMat) = true nonscalararg(::Any) = false -scalarwrappedarg(::Union{AbstractArray{<:Any,0},Ref}) = true +scalarwrappedarg(::Union{ArrayLike{0},Ref}) = true scalarwrappedarg(::Any) = false @inline function _capturescalars() @@ -1121,7 +1121,7 @@ broadcast(f::Tf, A::AbstractSparseMatrixCSC, ::Type{T}) where {Tf,T} = broadcast # # for combinations involving only scalars, sparse arrays, structured matrices, and dense # vectors/matrices, promote all structured matrices and dense vectors/matrices to sparse -# and rebroadcast. otherwise, divert to generic AbstractArray broadcast code. +# and rebroadcast. otherwise, divert to generic ArrayLike broadcast code. function copy(bc::Broadcasted{PromoteToSparse}) bcf = flatten(bc) @@ -1139,8 +1139,8 @@ end broadcast!(bcf.f, dest, map(_sparsifystructured, bcf.args)...) end -_sparsifystructured(M::AbstractMatrix) = SparseMatrixCSC(M) -_sparsifystructured(V::AbstractVector) = SparseVector(V) +_sparsifystructured(M::ArrayLike{2}) = SparseMatrixCSC(M) +_sparsifystructured(V::ArrayLike{1}) = SparseVector(V) _sparsifystructured(M::AbstractSparseMatrix) = SparseMatrixCSC(M) _sparsifystructured(V::AbstractSparseVector) = SparseVector(V) _sparsifystructured(S::SparseVecOrMat) = S diff --git a/stdlib/SparseArrays/src/linalg.jl b/stdlib/SparseArrays/src/linalg.jl index 09c1997685eba..a3417e0cea670 100644 --- a/stdlib/SparseArrays/src/linalg.jl +++ b/stdlib/SparseArrays/src/linalg.jl @@ -299,7 +299,7 @@ function dot(A::AbstractSparseMatrixCSC{T1,S1},B::AbstractSparseMatrixCSC{T2,S2} return r end -function dot(x::AbstractVector, A::AbstractSparseMatrixCSC, y::AbstractVector) +function dot(x::ArrayLike{1}, A::AbstractSparseMatrixCSC, y::ArrayLike{1}) require_one_based_indexing(x, y) m, n = size(A) (length(x) == m && n == length(y)) || throw(DimensionMismatch()) @@ -1436,7 +1436,7 @@ function lmul!(D::Diagonal, A::AbstractSparseMatrixCSC) return A end -function \(A::AbstractSparseMatrixCSC, B::AbstractVecOrMat) +function \(A::AbstractSparseMatrixCSC, B::VectorOrMatrixLike) require_one_based_indexing(A, B) m, n = size(A) if m == n @@ -1459,7 +1459,7 @@ function \(A::AbstractSparseMatrixCSC, B::AbstractVecOrMat) end for (xformtype, xformop) in ((:Adjoint, :adjoint), (:Transpose, :transpose)) @eval begin - function \(xformA::($xformtype){<:Any,<:AbstractSparseMatrixCSC}, B::AbstractVecOrMat) + function \(xformA::($xformtype){<:Any,<:AbstractSparseMatrixCSC}, B::VectorOrMatrixLike) A = xformA.parent require_one_based_indexing(A, B) m, n = size(A) diff --git a/stdlib/SparseArrays/src/sparseconvert.jl b/stdlib/SparseArrays/src/sparseconvert.jl index e235d332ac291..79c2eeaee4303 100644 --- a/stdlib/SparseArrays/src/sparseconvert.jl +++ b/stdlib/SparseArrays/src/sparseconvert.jl @@ -30,7 +30,7 @@ end Returns `true` if type `S` is backed by a sparse array, and `false` otherwise. """ -iswrsparse(::T) where T<:AbstractArray = iswrsparse(T) +iswrsparse(::T) where T<:ArrayLike = iswrsparse(T) iswrsparse(::Type) = false iswrsparse(::Type{T}) where T<:AbstractSparseArray = true @@ -40,7 +40,7 @@ iswrsparse(::Type{T}) where T<:AbstractSparseArray = true Returns 0 for unwrapped S, and nesting depth for wrapped (nested) abstract arrays. """ depth(::T) where T = depth(T) -depth(::Type{T}) where T<:AbstractArray = 0 +depth(::Type{T}) where T<:ArrayLike = 0 for wr in (Symmetric, Hermitian, LowerTriangular, UnitLowerTriangular, UpperTriangular, UnitUpperTriangular, @@ -67,14 +67,14 @@ function _sparsewrap(A::Union{Diagonal,Bidiagonal,Tridiagonal,SymTridiagonal}) end """ - unwrap(A::AbstractMatrix) + unwrap(A::ArrayLike{2}) In case A is a wrapper type (`SubArray, Symmetric, Adjoint, SubArray, Triangular, Tridiagonal`, etc.) convert to `Matrix` or `SparseMatrixCSC`, depending on final storage type of A. For other types return A itself. """ unwrap(A::Any) = A -unwrap(A::AbstractMatrix) = iswrsparse(A) ? convert(SparseMatrixCSC, A) : convert(Array, A) +unwrap(A::ArrayLike{2}) = iswrsparse(A) ? convert(SparseMatrixCSC, A) : convert(Array, A) # For pure sparse matrices and vectors return A. # For wrapped sparse matrices or vectors convert to SparseMatrixCSC. @@ -89,7 +89,7 @@ function _sparsem(@nospecialize A::AbstractArray{Tv}) where Tv end else # explicitly call abstract matrix fallback using getindex(A,...) - invoke(SparseMatrixCSC{Tv,Int}, Tuple{AbstractMatrix}, A) + invoke(SparseMatrixCSC{Tv,Int}, Tuple{ArrayLike{2}}, A) end end @@ -283,4 +283,3 @@ function _sparse_gen(m, n, newcolptr, newrowval, newnzval) newcolptr[1] = 1 SparseMatrixCSC(m, n, newcolptr, newrowval, newnzval) end - diff --git a/stdlib/SparseArrays/src/sparsematrix.jl b/stdlib/SparseArrays/src/sparsematrix.jl index 8620159846e63..1daeb5cb53e7f 100644 --- a/stdlib/SparseArrays/src/sparsematrix.jl +++ b/stdlib/SparseArrays/src/sparsematrix.jl @@ -369,7 +369,7 @@ _sparsesimilar(S::AbstractSparseMatrixCSC, ::Type{TvNew}, ::Type{TiNew}, dims::D _sparsesimilar(S::AbstractSparseMatrixCSC, ::Type{TvNew}, ::Type{TiNew}, dims::Dims{1}) where {TvNew,TiNew} = SparseVector(dims..., similar(rowvals(S), TiNew, 0), similar(nonzeros(S), TvNew, 0)) # -# The following methods hook into the AbstractArray similar hierarchy. The first method +# The following methods hook into the ArrayLike similar hierarchy. The first method # covers similar(A[, Tv]) calls, which preserve stored-entry structure, and the latter # methods cover similar(A[, Tv], shape...) calls, which preserve storage space when the shape # calls for a two-dimensional result. @@ -495,7 +495,7 @@ function SparseMatrixCSC(D::Diagonal{T}) where T end SparseMatrixCSC(M::AbstractMatrix{Tv}) where {Tv} = SparseMatrixCSC{Tv,Int}(M) SparseMatrixCSC{Tv}(M::AbstractMatrix{Tv}) where {Tv} = SparseMatrixCSC{Tv,Int}(M) -function SparseMatrixCSC{Tv,Ti}(M::AbstractMatrix) where {Tv,Ti} +function SparseMatrixCSC{Tv,Ti}(M::ArrayLike{2}) where {Tv,Ti} require_one_based_indexing(M) I = Ti[] V = Tv[] @@ -558,7 +558,7 @@ function Matrix(S::AbstractSparseMatrixCSC{Tv}) where Tv end Array(S::AbstractSparseMatrixCSC) = Matrix(S) -convert(T::Type{<:AbstractSparseMatrixCSC}, m::AbstractMatrix) = m isa T ? m : T(m) +convert(T::Type{<:AbstractSparseMatrixCSC}, m::ArrayLike{2}) = m isa T ? m : T(m) float(S::SparseMatrixCSC) = SparseMatrixCSC(size(S, 1), size(S, 2), copy(getcolptr(S)), copy(rowvals(S)), float.(nonzeros(S))) complex(S::SparseMatrixCSC) = SparseMatrixCSC(size(S, 1), size(S, 2), copy(getcolptr(S)), copy(rowvals(S)), complex(copy(nonzeros(S)))) @@ -566,7 +566,7 @@ complex(S::SparseMatrixCSC) = SparseMatrixCSC(size(S, 1), size(S, 2), copy(getco """ sparse(A) -Convert an AbstractMatrix `A` into a sparse matrix. +Convert an ArrayLike{2} `A` into a sparse matrix. # Examples ```jldoctest @@ -664,7 +664,7 @@ function sparse(I::AbstractVector{Ti}, J::AbstractVector{Ti}, V::AbstractVector{ end end -sparse(I::AbstractVector, J::AbstractVector, V::AbstractVector, m::Integer, n::Integer, combine) = +sparse(I::ArrayLike{1}, J::ArrayLike{1}, V::ArrayLike{1}, m::Integer, n::Integer, combine) = sparse(AbstractVector{Int}(I), AbstractVector{Int}(J), V, m, n, combine) """ @@ -843,11 +843,11 @@ dimlub(I) = isempty(I) ? 0 : Int(maximum(I)) #least upper bound on required spar sparse(I,J,v::Number) = sparse(I, J, fill(v,length(I))) -sparse(I,J,V::AbstractVector) = sparse(I, J, V, dimlub(I), dimlub(J)) +sparse(I,J,V::ArrayLike{1}) = sparse(I, J, V, dimlub(I), dimlub(J)) sparse(I,J,v::Number,m,n) = sparse(I, J, fill(v,length(I)), Int(m), Int(n)) -sparse(I,J,V::AbstractVector,m,n) = sparse(I, J, V, Int(m), Int(n), +) +sparse(I,J,V::ArrayLike{1},m,n) = sparse(I, J, V, Int(m), Int(n), +) sparse(I,J,V::AbstractVector{Bool},m,n) = sparse(I, J, V, Int(m), Int(n), |) @@ -1417,7 +1417,7 @@ function findall(p::Function, S::AbstractSparseMatrixCSC) return inds end findall(p::Base.Fix2{typeof(in)}, x::AbstractSparseMatrixCSC) = - invoke(findall, Tuple{Base.Fix2{typeof(in)}, AbstractArray}, p, x) + invoke(findall, Tuple{Base.Fix2{typeof(in)}, ArrayLike}, p, x) function findnz(S::AbstractSparseMatrixCSC{Tv,Ti}) where {Tv,Ti} numnz = nnz(S) @@ -1636,7 +1636,7 @@ function (-)(A::AbstractSparseMatrixCSC) return SparseMatrixCSC(size(A, 1), size(A, 2), copy(getcolptr(A)), copy(rowvals(A)), nzval) end -# the rest of real, conj, imag are handled correctly via AbstractArray methods +# the rest of real, conj, imag are handled correctly via ArrayLike methods function conj(A::AbstractSparseMatrixCSC{<:Complex}) nzval = similar(nonzeros(A)) map!(conj, view(nzval, 1:nnz(A)), nzvalview(A)) @@ -1752,7 +1752,7 @@ function Base._mapreduce(f, op::typeof(*), A::AbstractSparseMatrixCSC{T}) where end # General mapreducedim -function _mapreducerows!(f, op, R::AbstractArray, A::AbstractSparseMatrixCSC{T}) where T +function _mapreducerows!(f, op, R::ArrayLike, A::AbstractSparseMatrixCSC{T}) where T require_one_based_indexing(A, R) colptr = getcolptr(A) rowval = rowvals(A) @@ -1768,7 +1768,7 @@ function _mapreducerows!(f, op, R::AbstractArray, A::AbstractSparseMatrixCSC{T}) R end -function _mapreducecols!(f, op, R::AbstractArray, A::AbstractSparseMatrixCSC{Tv,Ti}) where {Tv,Ti} +function _mapreducecols!(f, op, R::ArrayLike, A::AbstractSparseMatrixCSC{Tv,Ti}) where {Tv,Ti} require_one_based_indexing(A, R) colptr = getcolptr(A) rowval = rowvals(A) @@ -1788,7 +1788,7 @@ function _mapreducecols!(f, op, R::AbstractArray, A::AbstractSparseMatrixCSC{Tv, R end -function Base._mapreducedim!(f, op, R::AbstractArray, A::AbstractSparseMatrixCSC{T}) where T +function Base._mapreducedim!(f, op, R::ArrayLike, A::AbstractSparseMatrixCSC{T}) where T require_one_based_indexing(A, R) lsiz = Base.check_reducedims(R,A) isempty(A) && return R @@ -1839,7 +1839,7 @@ end # Specialized mapreducedim for + cols to avoid allocating a # temporary array when f(0) == 0 -function _mapreducecols!(f, op::typeof(+), R::AbstractArray, A::AbstractSparseMatrixCSC{Tv,Ti}) where {Tv,Ti} +function _mapreducecols!(f, op::typeof(+), R::ArrayLike, A::AbstractSparseMatrixCSC{Tv,Ti}) where {Tv,Ti} require_one_based_indexing(A, R) nzval = nonzeros(A) m, n = size(A) @@ -2012,7 +2012,7 @@ getindex(A::AbstractSparseMatrixCSC, ::Colon, ::Colon) = copy(A) getindex(A::AbstractSparseMatrixCSC, i, ::Colon) = getindex(A, i, 1:size(A, 2)) getindex(A::AbstractSparseMatrixCSC, ::Colon, i) = getindex(A, 1:size(A, 1), i) -function getindex_cols(A::AbstractSparseMatrixCSC{Tv,Ti}, J::AbstractVector) where {Tv,Ti} +function getindex_cols(A::AbstractSparseMatrixCSC{Tv,Ti}, J::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, J) # for indexing whole columns (m, n) = size(A) @@ -2049,7 +2049,7 @@ end getindex_traverse_col(::AbstractUnitRange, lo::Integer, hi::Integer) = lo:hi getindex_traverse_col(I::StepRange, lo::Integer, hi::Integer) = step(I) > 0 ? (lo:1:hi) : (hi:-1:lo) -function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractRange, J::AbstractVector) where {Tv,Ti<:Integer} +function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractRange, J::ArrayLike{1}) where {Tv,Ti<:Integer} require_one_based_indexing(A, I, J) # Ranges for indexing rows (m, n) = size(A) @@ -2097,7 +2097,7 @@ function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractRange, J::Abstra return SparseMatrixCSC(nI, nJ, colptrS, rowvalS, nzvalS) end -function getindex_I_sorted(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::AbstractVector) where {Tv,Ti} +function getindex_I_sorted(A::AbstractSparseMatrixCSC{Tv,Ti}, I::ArrayLike{1}, J::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, I, J) # Sorted vectors for indexing rows. # Similar to getindex_general but without the transpose trick. @@ -2118,7 +2118,7 @@ function getindex_I_sorted(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector, getindex_I_sorted_linear(A, I, J) end -function getindex_I_sorted_bsearch_A(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::AbstractVector) where {Tv,Ti} +function getindex_I_sorted_bsearch_A(A::AbstractSparseMatrixCSC{Tv,Ti}, I::ArrayLike{1}, J::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, I, J) nI = length(I) nJ = length(J) @@ -2178,7 +2178,7 @@ function getindex_I_sorted_bsearch_A(A::AbstractSparseMatrixCSC{Tv,Ti}, I::Abstr return SparseMatrixCSC(nI, nJ, colptrS, rowvalS, nzvalS) end -function getindex_I_sorted_linear(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::AbstractVector) where {Tv,Ti} +function getindex_I_sorted_linear(A::AbstractSparseMatrixCSC{Tv,Ti}, I::ArrayLike{1}, J::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, I, J) nI = length(I) nJ = length(J) @@ -2238,7 +2238,7 @@ function getindex_I_sorted_linear(A::AbstractSparseMatrixCSC{Tv,Ti}, I::Abstract return SparseMatrixCSC(nI, nJ, colptrS, rowvalS, nzvalS) end -function getindex_I_sorted_bsearch_I(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::AbstractVector) where {Tv,Ti} +function getindex_I_sorted_bsearch_I(A::AbstractSparseMatrixCSC{Tv,Ti}, I::ArrayLike{1}, J::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, I, J) nI = length(I) nJ = length(J) @@ -2354,7 +2354,7 @@ function permute_rows!(S::AbstractSparseMatrixCSC{Tv,Ti}, pI::Vector{Int}) where S end -function getindex_general(A::AbstractSparseMatrixCSC, I::AbstractVector, J::AbstractVector) +function getindex_general(A::AbstractSparseMatrixCSC, I::ArrayLike{1}, J::ArrayLike{1}) require_one_based_indexing(A, I, J) pI = sortperm(I) @inbounds Is = I[pI] @@ -2362,7 +2362,7 @@ function getindex_general(A::AbstractSparseMatrixCSC, I::AbstractVector, J::Abst end # the general case: -function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::AbstractVector) where {Tv,Ti} +function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::ArrayLike{1}, J::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, I, J) (m, n) = size(A) @@ -2387,7 +2387,7 @@ function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::Abstr end end -function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractArray) where {Tv,Ti} +function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::ArrayLike) where {Tv,Ti} require_one_based_indexing(A, I) szA = size(A) nA = szA[1]*szA[2] @@ -2655,12 +2655,12 @@ function _spsetnz_setindex!(A::AbstractSparseMatrixCSC{Tv}, x::Tv, end # Nonscalar A[I,J] = B: Convert B to a SparseMatrixCSC of the appropriate shape first -_to_same_csc(::AbstractSparseMatrixCSC{Tv, Ti}, V::AbstractMatrix, I...) where {Tv,Ti} = convert(SparseMatrixCSC{Tv,Ti}, V) -_to_same_csc(::AbstractSparseMatrixCSC{Tv, Ti}, V::AbstractVector, I...) where {Tv,Ti} = convert(SparseMatrixCSC{Tv,Ti}, reshape(V, map(length, I))) +_to_same_csc(::AbstractSparseMatrixCSC{Tv, Ti}, V::ArrayLike{2}, I...) where {Tv,Ti} = convert(SparseMatrixCSC{Tv,Ti}, V) +_to_same_csc(::AbstractSparseMatrixCSC{Tv, Ti}, V::ArrayLike{1}, I...) where {Tv,Ti} = convert(SparseMatrixCSC{Tv,Ti}, reshape(V, map(length, I))) -setindex!(A::AbstractSparseMatrixCSC{Tv}, B::AbstractVecOrMat, I::Integer, J::Integer) where {Tv} = _setindex_scalar!(A, B, I, J) +setindex!(A::AbstractSparseMatrixCSC{Tv}, B::VectorOrMatrixLike, I::Integer, J::Integer) where {Tv} = _setindex_scalar!(A, B, I, J) -function setindex!(A::AbstractSparseMatrixCSC{Tv,Ti}, V::AbstractVecOrMat, Ix::Union{Integer, AbstractVector{<:Integer}, Colon}, Jx::Union{Integer, AbstractVector{<:Integer}, Colon}) where {Tv,Ti<:Integer} +function setindex!(A::AbstractSparseMatrixCSC{Tv,Ti}, V::VectorOrMatrixLike, Ix::Union{Integer, AbstractVector{<:Integer}, Colon}, Jx::Union{Integer, AbstractVector{<:Integer}, Colon}) where {Tv,Ti<:Integer} require_one_based_indexing(A, V, Ix, Jx) (I, J) = Base.ensure_indexable(to_indices(A, (Ix, Jx))) checkbounds(A, I, J) @@ -2795,7 +2795,7 @@ setindex!(A::Matrix, x::AbstractSparseMatrixCSC, I::AbstractVector{Bool}, J::Abs setindex!(A::Matrix, x::AbstractSparseMatrixCSC, I::AbstractVector{<:Integer}, J::AbstractVector{Bool}) = setindex!(A, Array(x), I, findall(J)) setindex!(A::Matrix, x::AbstractSparseMatrixCSC, I::AbstractVector{Bool}, J::AbstractVector{<:Integer}) = setindex!(A, Array(x), findall(I), J) -function setindex!(A::AbstractSparseMatrixCSC, x::AbstractArray, I::AbstractMatrix{Bool}) +function setindex!(A::AbstractSparseMatrixCSC, x::ArrayLike, I::AbstractMatrix{Bool}) require_one_based_indexing(A, x, I) checkbounds(A, I) n = sum(I) @@ -2896,7 +2896,7 @@ function setindex!(A::AbstractSparseMatrixCSC, x::AbstractArray, I::AbstractMatr A end -function setindex!(A::AbstractSparseMatrixCSC, x::AbstractArray, Ix::AbstractVector{<:Integer}) +function setindex!(A::AbstractSparseMatrixCSC, x::ArrayLike, Ix::AbstractVector{<:Integer}) require_one_based_indexing(A, x, Ix) (I,) = Base.ensure_indexable(to_indices(A, (Ix,))) # We check bounds after sorting I @@ -2915,7 +2915,7 @@ function setindex!(A::AbstractSparseMatrixCSC, x::AbstractArray, Ix::AbstractVec throw(BoundsError(A, I)) end - isa(x, AbstractArray) && setindex_shape_check(x, length(I)) + isa(x, ArrayLike) && setindex_shape_check(x, length(I)) lastcol = 0 (nrowA, ncolA) = szA @@ -3129,8 +3129,8 @@ function dropstored!(A::AbstractSparseMatrixCSC, end dropstored!(A::AbstractSparseMatrixCSC, i::Integer, J::AbstractVector{<:Integer}) = dropstored!(A, [i], J) dropstored!(A::AbstractSparseMatrixCSC, I::AbstractVector{<:Integer}, j::Integer) = dropstored!(A, I, [j]) -dropstored!(A::AbstractSparseMatrixCSC, ::Colon, j::Union{Integer,AbstractVector}) = dropstored!(A, 1:size(A,1), j) -dropstored!(A::AbstractSparseMatrixCSC, i::Union{Integer,AbstractVector}, ::Colon) = dropstored!(A, i, 1:size(A,2)) +dropstored!(A::AbstractSparseMatrixCSC, ::Colon, j::Union{Integer,ArrayLike{1}}) = dropstored!(A, 1:size(A,1), j) +dropstored!(A::AbstractSparseMatrixCSC, i::Union{Integer,ArrayLike{1}}, ::Colon) = dropstored!(A, i, 1:size(A,2)) dropstored!(A::AbstractSparseMatrixCSC, ::Colon, ::Colon) = dropstored!(A, 1:size(A,1), 1:size(A,2)) dropstored!(A::AbstractSparseMatrixCSC, ::Colon) = dropstored!(A, :, :) # TODO: Several of the preceding methods are optimization candidates. @@ -3400,7 +3400,7 @@ function istril(A::AbstractSparseMatrixCSC) end -function spdiagm_internal(kv::Pair{<:Integer,<:AbstractVector}...) +function spdiagm_internal(kv::Pair{<:Integer,<:ArrayLike{1}}...) ncoeffs = 0 for p in kv ncoeffs += length(p.second) @@ -3433,8 +3433,8 @@ function spdiagm_internal(kv::Pair{<:Integer,<:AbstractVector}...) end """ - spdiagm(kv::Pair{<:Integer,<:AbstractVector}...) - spdiagm(m::Integer, n::Ingeger, kv::Pair{<:Integer,<:AbstractVector}...) + spdiagm(kv::Pair{<:Integer,<:ArrayLike{1}}...) + spdiagm(m::Integer, n::Ingeger, kv::Pair{<:Integer,<:ArrayLike{1}}...) Construct a sparse diagonal matrix from `Pair`s of vectors and diagonals. Each vector `kv.second` will be placed on the `kv.first` diagonal. By @@ -3456,9 +3456,9 @@ julia> spdiagm(-1 => [1,2,3,4], 1 => [4,3,2,1]) [4, 5] = 1 ``` """ -spdiagm(kv::Pair{<:Integer,<:AbstractVector}...) = _spdiagm(nothing, kv...) -spdiagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...) = _spdiagm((Int(m),Int(n)), kv...) -function _spdiagm(size, kv::Pair{<:Integer,<:AbstractVector}...) +spdiagm(kv::Pair{<:Integer,<:ArrayLike{1}}...) = _spdiagm(nothing, kv...) +spdiagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:ArrayLike{1}}...) = _spdiagm((Int(m),Int(n)), kv...) +function _spdiagm(size, kv::Pair{<:Integer,<:ArrayLike{1}}...) I, J, V = spdiagm_internal(kv...) mmax, nmax = dimlub(I), dimlub(J) mnmax = max(mmax, nmax) diff --git a/stdlib/SparseArrays/src/sparsevector.jl b/stdlib/SparseArrays/src/sparsevector.jl index c072943bd9141..8c0a6e803c4b6 100644 --- a/stdlib/SparseArrays/src/sparsevector.jl +++ b/stdlib/SparseArrays/src/sparsevector.jl @@ -75,7 +75,7 @@ _sparsesimilar(S::SparseVector, ::Type{TvNew}, ::Type{TiNew}, dims::Dims{1}) whe # parent method for similar that preserves storage space (for old and new dims differ, and new is 2d) _sparsesimilar(S::SparseVector, ::Type{TvNew}, ::Type{TiNew}, dims::Dims{2}) where {TvNew,TiNew} = SparseMatrixCSC(dims..., fill(one(TiNew), last(dims)+1), similar(nonzeroinds(S), TiNew), similar(nonzeros(S), TvNew)) -# The following methods hook into the AbstractArray similar hierarchy. The first method +# The following methods hook into the ArrayLike similar hierarchy. The first method # covers similar(A[, Tv]) calls, which preserve stored-entry structure, and the latter # methods cover similar(A[, Tv], shape...) calls, which preserve nothing if the dims # specify a SparseVector result and storage space if the dims specify a SparseMatrixCSC result. @@ -186,7 +186,7 @@ julia> sparsevec([1, 3, 1, 2, 2], [true, true, false, false, false]) [3] = 1 ``` """ -function sparsevec(I::AbstractVector{<:Integer}, V::AbstractVector, combine::Function) +function sparsevec(I::AbstractVector{<:Integer}, V::ArrayLike{1}, combine::Function) require_one_based_indexing(I, V) length(I) == length(V) || throw(ArgumentError("index and value vectors must be the same length")) @@ -200,7 +200,7 @@ function sparsevec(I::AbstractVector{<:Integer}, V::AbstractVector, combine::Fun _sparsevector!(Vector(I), Vector(V), len, combine) end -function sparsevec(I::AbstractVector{<:Integer}, V::AbstractVector, len::Integer, combine::Function) +function sparsevec(I::AbstractVector{<:Integer}, V::ArrayLike{1}, len::Integer, combine::Function) require_one_based_indexing(I, V) length(I) == length(V) || throw(ArgumentError("index and value vectors must be the same length")) @@ -210,25 +210,25 @@ function sparsevec(I::AbstractVector{<:Integer}, V::AbstractVector, len::Integer _sparsevector!(Vector(I), Vector(V), len, combine) end -sparsevec(I::AbstractVector, V::Union{Number, AbstractVector}, args...) = +sparsevec(I::ArrayLike{1}, V::Union{Number, ArrayLike{1}}, args...) = sparsevec(Vector{Int}(I), V, args...) -sparsevec(I::AbstractVector, V::Union{Number, AbstractVector}) = +sparsevec(I::ArrayLike{1}, V::Union{Number, ArrayLike{1}}) = sparsevec(I, V, +) -sparsevec(I::AbstractVector, V::Union{Number, AbstractVector}, len::Integer) = +sparsevec(I::ArrayLike{1}, V::Union{Number, ArrayLike{1}}, len::Integer) = sparsevec(I, V, len, +) -sparsevec(I::AbstractVector, V::Union{Bool, AbstractVector{Bool}}) = +sparsevec(I::ArrayLike{1}, V::Union{Bool, AbstractVector{Bool}}) = sparsevec(I, V, |) -sparsevec(I::AbstractVector, V::Union{Bool, AbstractVector{Bool}}, len::Integer) = +sparsevec(I::ArrayLike{1}, V::Union{Bool, AbstractVector{Bool}}, len::Integer) = sparsevec(I, V, len, |) -sparsevec(I::AbstractVector, v::Number, combine::Function) = +sparsevec(I::ArrayLike{1}, v::Number, combine::Function) = sparsevec(I, fill(v, length(I)), combine) -sparsevec(I::AbstractVector, v::Number, len::Integer, combine::Function) = +sparsevec(I::ArrayLike{1}, v::Number, len::Integer, combine::Function) = sparsevec(I, fill(v, length(I)), len, combine) @@ -378,10 +378,10 @@ julia> sparsevec([1.0, 2.0, 0.0, 0.0, 3.0, 0.0]) ``` """ sparsevec(a::AbstractVector{T}) where {T} = SparseVector{T, Int}(a) -sparsevec(a::AbstractArray) = sparsevec(vec(a)) +sparsevec(a::ArrayLike) = sparsevec(vec(a)) sparsevec(a::AbstractSparseArray) = vec(a) sparsevec(a::AbstractSparseVector) = vec(a) -sparse(a::AbstractVector) = sparsevec(a) +sparse(a::ArrayLike{1}) = sparsevec(a) function _dense2indval!(nzind::Vector{Ti}, nzval::Vector{Tv}, s::AbstractArray{Tv}) where {Tv,Ti} require_one_based_indexing(s) @@ -431,7 +431,7 @@ SparseVector{Tv,Ti}(s::SparseVector) where {Tv,Ti} = SparseVector{Tv}(s::SparseVector{<:Any,Ti}) where {Tv,Ti} = SparseVector{Tv,Ti}(length(s::SparseVector), nonzeroinds(s), convert(Vector{Tv}, nonzeros(s))) -convert(T::Type{<:SparseVector}, m::AbstractVector) = m isa T ? m : T(m) +convert(T::Type{<:SparseVector}, m::ArrayLike{1}) = m isa T ? m : T(m) convert(T::Type{<:SparseVector}, m::AbstractSparseMatrixCSC) = T(m) convert(T::Type{<:AbstractSparseMatrixCSC}, v::SparseVector) = T(v) @@ -469,7 +469,7 @@ function copyto!(A::SparseVector, B::SparseVector) return A end -copyto!(A::SparseVector, B::AbstractVector) = copyto!(A, sparsevec(B)) +copyto!(A::SparseVector, B::ArrayLike{1}) = copyto!(A, sparsevec(B)) function copyto!(A::SparseVector, B::AbstractSparseMatrixCSC) prep_sparsevec_copy_dest!(A, length(B), nnz(B)) @@ -541,14 +541,14 @@ function getindex(x::AbstractSparseMatrixCSC, I::AbstractUnitRange, j::Integer) end # In the general case, we piggy back upon SparseMatrixCSC's optimized solution -@inline function getindex(A::AbstractSparseMatrixCSC, I::AbstractVector, J::Integer) +@inline function getindex(A::AbstractSparseMatrixCSC, I::ArrayLike{1}, J::Integer) M = A[I, [J]] SparseVector(size(M, 1), rowvals(M), nonzeros(M)) end # Row slices getindex(A::AbstractSparseMatrixCSC, i::Integer, ::Colon) = A[i, 1:end] -function Base.getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, i::Integer, J::AbstractVector) where {Tv,Ti} +function Base.getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, i::Integer, J::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, J) checkbounds(A, i, J) nJ = length(J) @@ -661,7 +661,7 @@ function getindex(A::AbstractSparseMatrixCSC{Tv}, I::AbstractUnitRange) where Tv SparseVector(n, rowvalB, nzvalB) end -function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::AbstractVector) where {Tv,Ti} +function getindex(A::AbstractSparseMatrixCSC{Tv,Ti}, I::ArrayLike{1}) where {Tv,Ti} require_one_based_indexing(A, I) @boundscheck checkbounds(A, I) szA = size(A) @@ -731,7 +731,7 @@ function findall(p::Function, x::SparseVector{<:Any,Ti}) where Ti return I end findall(p::Base.Fix2{typeof(in)}, x::SparseVector{<:Any,Ti}) where {Ti} = - invoke(findall, Tuple{Base.Fix2{typeof(in)}, AbstractArray}, p, x) + invoke(findall, Tuple{Base.Fix2{typeof(in)}, ArrayLike}, p, x) function findnz(x::SparseVector{Tv,Ti}) where {Tv,Ti} numnz = nnz(x) @@ -815,13 +815,13 @@ end getindex(x::AbstractSparseVector, I::AbstractVector{Bool}) = x[findall(I)] getindex(x::AbstractSparseVector, I::AbstractArray{Bool}) = x[findall(I)] -@inline function getindex(x::AbstractSparseVector{Tv,Ti}, I::AbstractVector) where {Tv,Ti} +@inline function getindex(x::AbstractSparseVector{Tv,Ti}, I::ArrayLike{1}) where {Tv,Ti} # SparseMatrixCSC has a nicely optimized routine for this; punt S = SparseMatrixCSC(length(x::SparseVector), 1, Ti[1,length(nonzeroinds(x))+1], nonzeroinds(x), nonzeros(x)) S[I, 1] end -function getindex(x::AbstractSparseVector{Tv,Ti}, I::AbstractArray) where {Tv,Ti} +function getindex(x::AbstractSparseVector{Tv,Ti}, I::ArrayLike) where {Tv,Ti} # punt to SparseMatrixCSC S = SparseMatrixCSC(length(x::SparseVector), 1, Ti[1,length(nonzeroinds(x))+1], nonzeroinds(x), nonzeros(x)) S[I] @@ -1119,7 +1119,7 @@ macro unarymap_nz2z_z2z(op, TF) end) end -# the rest of real, conj, imag are handled correctly via AbstractArray methods +# the rest of real, conj, imag are handled correctly via ArrayLike methods @unarymap_nz2z_z2z real Complex conj(x::SparseVector{<:Complex}) = SparseVector(length(x), copy(nonzeroinds(x)), conj(nonzeros(x))) imag(x::AbstractSparseVector{Tv,Ti}) where {Tv<:Real,Ti<:Integer} = SparseVector(length(x), Ti[], Tv[]) @@ -1354,7 +1354,7 @@ adjoint(sv::SparseVector) = Adjoint(sv) # axpy -function LinearAlgebra.axpy!(a::Number, x::SparseVectorUnion, y::AbstractVector) +function LinearAlgebra.axpy!(a::Number, x::SparseVectorUnion, y::ArrayLike{1}) require_one_based_indexing(x, y) length(x) == length(y) || throw(DimensionMismatch()) nzind = nonzeroinds(x) @@ -1474,7 +1474,7 @@ end ### BLAS-2 / dense A * sparse x -> dense y # lowrankupdate (BLAS.ger! like) -function LinearAlgebra.lowrankupdate!(A::StridedMatrix, x::AbstractVector, y::SparseVectorUnion, α::Number = 1) +function LinearAlgebra.lowrankupdate!(A::StridedMatrix, x::ArrayLike{1}, y::SparseVectorUnion, α::Number = 1) require_one_based_indexing(A, x, y) nzi = nonzeroinds(y) nzv = nonzeros(y) @@ -1503,7 +1503,7 @@ end mul!(y::AbstractVector{Ty}, A::_StridedOrTriangularMatrix, x::AbstractSparseVector{Tx}) where {Tx,Ty} = mul!(y, A, x, true, false) -function mul!(y::AbstractVector, A::_StridedOrTriangularMatrix, x::AbstractSparseVector, α::Number, β::Number) +function mul!(y::ArrayLike{1}, A::_StridedOrTriangularMatrix, x::AbstractSparseVector, α::Number, β::Number) require_one_based_indexing(y, A, x) m, n = size(A) length(x) == n && length(y) == m || throw(DimensionMismatch()) @@ -1542,7 +1542,7 @@ end mul!(y::AbstractVector{Ty}, transA::Transpose{<:Any,<:_StridedOrTriangularMatrix}, x::AbstractSparseVector{Tx}) where {Tx,Ty} = mul!(y, transA, x, true, false) -function mul!(y::AbstractVector, transA::Transpose{<:Any,<:_StridedOrTriangularMatrix}, x::AbstractSparseVector, α::Number, β::Number) +function mul!(y::ArrayLike{1}, transA::Transpose{<:Any,<:_StridedOrTriangularMatrix}, x::AbstractSparseVector, α::Number, β::Number) require_one_based_indexing(y, transA, x) m, n = size(transA) length(x) == n && length(y) == m || throw(DimensionMismatch()) @@ -1583,7 +1583,7 @@ end mul!(y::AbstractVector{Ty}, adjA::Adjoint{<:Any,<:_StridedOrTriangularMatrix}, x::AbstractSparseVector{Tx}) where {Tx,Ty} = mul!(y, adjA, x, true, false) -function mul!(y::AbstractVector, adjA::Adjoint{<:Any,<:_StridedOrTriangularMatrix}, x::AbstractSparseVector, α::Number, β::Number) +function mul!(y::ArrayLike{1}, adjA::Adjoint{<:Any,<:_StridedOrTriangularMatrix}, x::AbstractSparseVector, α::Number, β::Number) require_one_based_indexing(y, adjA, x) m, n = size(adjA) length(x) == n && length(y) == m || throw(DimensionMismatch()) @@ -1644,7 +1644,7 @@ end mul!(y::AbstractVector{Ty}, A::AbstractSparseMatrixCSC, x::AbstractSparseVector{Tx}) where {Tx,Ty} = mul!(y, A, x, true, false) -function mul!(y::AbstractVector, A::AbstractSparseMatrixCSC, x::AbstractSparseVector, α::Number, β::Number) +function mul!(y::ArrayLike{1}, A::AbstractSparseMatrixCSC, x::AbstractSparseVector, α::Number, β::Number) require_one_based_indexing(y, A, x) m, n = size(A) length(x) == n && length(y) == m || throw(DimensionMismatch()) @@ -1678,17 +1678,17 @@ end mul!(y::AbstractVector{Ty}, transA::Transpose{<:Any,<:AbstractSparseMatrixCSC}, x::AbstractSparseVector{Tx}) where {Tx,Ty} = (A = transA.parent; mul!(y, transpose(A), x, true, false)) -mul!(y::AbstractVector, transA::Transpose{<:Any,<:AbstractSparseMatrixCSC}, x::AbstractSparseVector, α::Number, β::Number) = +mul!(y::ArrayLike{1}, transA::Transpose{<:Any,<:AbstractSparseMatrixCSC}, x::AbstractSparseVector, α::Number, β::Number) = (A = transA.parent; _At_or_Ac_mul_B!((a,b) -> transpose(a) * b, y, A, x, α, β)) mul!(y::AbstractVector{Ty}, adjA::Adjoint{<:Any,<:AbstractSparseMatrixCSC}, x::AbstractSparseVector{Tx}) where {Tx,Ty} = (A = adjA.parent; mul!(y, adjoint(A), x, true, false)) -mul!(y::AbstractVector, adjA::Adjoint{<:Any,<:AbstractSparseMatrixCSC}, x::AbstractSparseVector, α::Number, β::Number) = +mul!(y::ArrayLike{1}, adjA::Adjoint{<:Any,<:AbstractSparseMatrixCSC}, x::AbstractSparseVector, α::Number, β::Number) = (A = adjA.parent; _At_or_Ac_mul_B!((a,b) -> adjoint(a) * b, y, A, x, α, β)) function _At_or_Ac_mul_B!(tfun::Function, - y::AbstractVector, A::AbstractSparseMatrixCSC, x::AbstractSparseVector, + y::ArrayLike{1}, A::AbstractSparseMatrixCSC, x::AbstractSparseVector, α::Number, β::Number) require_one_based_indexing(y, A, x) m, n = size(A) @@ -2003,7 +2003,7 @@ function copy!(dst::SparseVector, src::SparseVector) return dst end -function copy!(dst::SparseVector, src::AbstractVector) +function copy!(dst::SparseVector, src::ArrayLike{1}) length(dst::SparseVector) == length(src) || throw(ArgumentError("Sparse vector should have the same length as source for copy!")) _dense2indval!(nonzeroinds(dst), nonzeros(dst), src) return dst @@ -2052,7 +2052,7 @@ end # in-place swaps (dense) blocks start:split and split+1:fin in col -function _swap!(col::AbstractVector, start::Integer, fin::Integer, split::Integer) +function _swap!(col::ArrayLike{1}, start::Integer, fin::Integer, split::Integer) split == fin && return reverse!(col, start, split) reverse!(col, split + 1, fin) @@ -2062,7 +2062,7 @@ end # in-place shifts a sparse subvector by r. Used also by sparsematrix.jl -function subvector_shifter!(R::AbstractVector, V::AbstractVector, start::Integer, fin::Integer, m::Integer, r::Integer) +function subvector_shifter!(R::ArrayLike{1}, V::ArrayLike{1}, start::Integer, fin::Integer, m::Integer, r::Integer) split = fin @inbounds for j = start:fin # shift positions ... diff --git a/stdlib/SparseArrays/test/sparse.jl b/stdlib/SparseArrays/test/sparse.jl index a6b1d1cfd94e3..13d2251a24bb6 100644 --- a/stdlib/SparseArrays/test/sparse.jl +++ b/stdlib/SparseArrays/test/sparse.jl @@ -62,7 +62,7 @@ end @test SparseArrays.indtype(sparse(Int8[1,1],Int8[1,1],[1,1])) == Int8 end -@testset "conversion to AbstractMatrix/SparseMatrix of same eltype" begin +@testset "conversion to ArrayLike{2}/SparseMatrix of same eltype" begin a = sprand(5, 5, 0.2) @test AbstractMatrix{eltype(a)}(a) == a @test SparseMatrixCSC{eltype(a)}(a) == a @@ -1393,7 +1393,7 @@ end @test size(rotl90(a)) == (5,3) end -function test_getindex_algs(A::SparseMatrixCSC{Tv,Ti}, I::AbstractVector, J::AbstractVector, alg::Int) where {Tv,Ti} +function test_getindex_algs(A::SparseMatrixCSC{Tv,Ti}, I::ArrayLike{1}, J::ArrayLike{1}, alg::Int) where {Tv,Ti} # Sorted vectors for indexing rows. # Similar to getindex_general but without the transpose trick. (m, n) = size(A) @@ -1505,7 +1505,7 @@ end @test_throws BoundsError S[[2,1], [0,1,2]] end -@testset "test that sparse / sparsevec constructors work for AbstractMatrix subtypes" begin +@testset "test that sparse / sparsevec constructors work for ArrayLike{2} subtypes" begin D = Diagonal(fill(1,10)) sm = sparse(D) sv = sparsevec(D) diff --git a/stdlib/SuiteSparse/src/cholmod.jl b/stdlib/SuiteSparse/src/cholmod.jl index 7b66ffcdfc32e..9acf2c4c35f2d 100644 --- a/stdlib/SuiteSparse/src/cholmod.jl +++ b/stdlib/SuiteSparse/src/cholmod.jl @@ -958,9 +958,9 @@ Base.copyto!(dest::Base.PermutedDimsArrays.PermutedDimsArray, src::Dense) = _cop Base.copyto!(dest::Dense{T}, D::Dense{T}) where {T<:VTypes} = _copy!(dest, D) Base.copyto!(dest::AbstractArray{T}, D::Dense{T}) where {T<:VTypes} = _copy!(dest, D) Base.copyto!(dest::AbstractArray{T,2}, D::Dense{T}) where {T<:VTypes} = _copy!(dest, D) -Base.copyto!(dest::AbstractArray, D::Dense) = _copy!(dest, D) +Base.copyto!(dest::ArrayLike, D::Dense) = _copy!(dest, D) -function _copy!(dest::AbstractArray, D::Dense) +function _copy!(dest::ArrayLike, D::Dense) require_one_based_indexing(dest) s = unsafe_load(pointer(D)) n = s.nrow*s.ncol @@ -1576,7 +1576,7 @@ function lowrankupdowndate!(F::Factor{Tv}, C::Sparse{Tv}, update::Cint) where Tv end #Helper functions for rank updates -lowrank_reorder(V::AbstractArray,p) = Sparse(sparse(V[p,:])) +lowrank_reorder(V::ArrayLike,p) = Sparse(sparse(V[p,:])) lowrank_reorder(V::AbstractSparseArray,p) = Sparse(V[p,:]) """ diff --git a/stdlib/SuiteSparse/src/spqr.jl b/stdlib/SuiteSparse/src/spqr.jl index d5bc42562a8c7..45fdcf1714824 100644 --- a/stdlib/SuiteSparse/src/spqr.jl +++ b/stdlib/SuiteSparse/src/spqr.jl @@ -362,8 +362,8 @@ end # here we have to use \ instead of ldiv! because of limitations in SPQR ## Two helper methods -_ret_size(F::QRSparse, b::AbstractVector) = (size(F, 2),) -_ret_size(F::QRSparse, B::AbstractMatrix) = (size(F, 2), size(B, 2)) +_ret_size(F::QRSparse, b::ArrayLike{1}) = (size(F, 2),) +_ret_size(F::QRSparse, B::ArrayLike{2}) = (size(F, 2), size(B, 2)) LinearAlgebra.rank(F::QRSparse) = reduce(max, view(rowvals(F.R), 1:nnz(F.R)), init = eltype(rowvals(F.R))(0)) LinearAlgebra.rank(S::SparseMatrixCSC) = rank(qr(S)) diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index 9d556b9186c7e..b188ebbc8bcd3 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -608,7 +608,7 @@ end contains_warn(output, s::AbstractString) = occursin(s, output) contains_warn(output, s::Regex) = occursin(s, output) contains_warn(output, s::Function) = s(output) -contains_warn(output, S::Union{AbstractArray,Tuple}) = all(s -> contains_warn(output, s), S) +contains_warn(output, S::Union{ArrayLike,Tuple}) = all(s -> contains_warn(output, s), S) """ @test_warn msg expr @@ -1685,7 +1685,7 @@ end # 0.7 deprecations begin - approx_full(x::AbstractArray) = x + approx_full(x::ArrayLike) = x approx_full(x::Number) = x approx_full(x) = full(x) diff --git a/test/abstractarray.jl b/test/abstractarray.jl index 8d0c2617d78f0..c2054f5f5dd4c 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -250,7 +250,7 @@ T24Linear(::Type{T}, dims::NTuple{N,Int}) where {T,N} = T24Linear{T,N,dims}() T24Linear( X::AbstractArray{T,N}) where {T,N } = T24Linear{T,N}(X) T24Linear{T }(X::AbstractArray{_,N}) where {T,N,_} = T24Linear{T,N}(X) -T24Linear{T,N}(X::AbstractArray ) where {T,N } = T24Linear{T,N,size(X)}(X...) +T24Linear{T,N}(X::ArrayLike ) where {T,N } = T24Linear{T,N,size(X)}(X...) Base.size(::T24Linear{T,N,dims}) where {T,N,dims} = dims import Base: IndexLinear @@ -269,7 +269,7 @@ TSlow(::Type{T}, dims::NTuple{N,Int}) where {T,N} = TSlow{T,N}(Dict{NTuple{N,Int TSlow{T,N}(X::TSlow{T,N}) where {T,N } = X TSlow( X::AbstractArray{T,N}) where {T,N } = TSlow{T,N}(X) TSlow{T }(X::AbstractArray{_,N}) where {T,N,_} = TSlow{T,N}(X) -TSlow{T,N}(X::AbstractArray ) where {T,N } = begin +TSlow{T,N}(X::ArrayLike ) where {T,N } = begin A = TSlow(T, size(X)) for I in CartesianIndices(size(X)) A[I.I...] = X[I.I...] @@ -489,17 +489,17 @@ function test_primitives(::Type{T}, shape, ::Type{TestAbstractArray}) where T @test firstindex(B, 1) == firstindex(A, 1) == first(axes(B, 1)) @test firstindex(B, 2) == firstindex(A, 2) == first(axes(B, 2)) - # isassigned(a::AbstractArray, i::Int...) + # isassigned(a::ArrayLike, i::Int...) j = rand(1:length(B)) @test isassigned(B, j) == true if T == T24Linear @test isassigned(B, length(B) + 1) == false end - # reshape(a::AbstractArray, dims::Dims) + # reshape(a::ArrayLike, dims::Dims) @test_throws DimensionMismatch reshape(B, (0, 1)) - # copyto!(dest::AbstractArray, src::AbstractArray) + # copyto!(dest::ArrayLike, src::ArrayLike) @test_throws BoundsError copyto!(Vector{Int}(undef, 10), [1:11...]) # convert{T, N}(::Type{Array}, A::AbstractArray{T, N}) diff --git a/test/show.jl b/test/show.jl index e89f745454398..7fd7c3b0b5495 100644 --- a/test/show.jl +++ b/test/show.jl @@ -1348,7 +1348,7 @@ end @test sprint(show, Main) == "Main" @test sprint(Base.show_supertypes, Int64) == "Int64 <: Signed <: Integer <: Real <: Number <: Any" -@test sprint(Base.show_supertypes, Vector{String}) == "Array{String,1} <: DenseArray{String,1} <: AbstractArray{String,1} <: Any" +@test sprint(Base.show_supertypes, Vector{String}) == "Array{String,1} <: DenseArray{String,1} <: AbstractArray{String,1} <: ArrayLike{1} <: Any" # static_show diff --git a/test/testhelpers/OffsetArrays.jl b/test/testhelpers/OffsetArrays.jl index efebb74ded2d8..d20d2f490591c 100644 --- a/test/testhelpers/OffsetArrays.jl +++ b/test/testhelpers/OffsetArrays.jl @@ -11,11 +11,11 @@ using Base: Indices, IndexCartesian, IndexLinear, tail export OffsetArray -struct OffsetArray{T,N,AA<:AbstractArray} <: AbstractArray{T,N} +struct OffsetArray{T,N,AA<:ArrayLike} <: AbstractArray{T,N} parent::AA offsets::NTuple{N,Int} end -OffsetVector{T,AA<:AbstractArray} = OffsetArray{T,1,AA} +OffsetVector{T,AA<:ArrayLike} = OffsetArray{T,1,AA} OffsetArray(A::AbstractArray{T,N}, offsets::NTuple{N,Int}) where {T,N} = OffsetArray{T,N,typeof(A)}(A, offsets) OffsetArray(A::AbstractArray{T,N}, offsets::Vararg{Int,N}) where {T,N} = OffsetArray(A, offsets) @@ -49,15 +49,15 @@ const OffsetAxis = Union{Integer, UnitRange, Base.IdentityUnitRange{<:UnitRange} function Base.similar(A::OffsetArray, T::Type, dims::Dims) B = similar(parent(A), T, dims) end -function Base.similar(A::AbstractArray, T::Type, inds::Tuple{OffsetAxis,Vararg{OffsetAxis}}) +function Base.similar(A::ArrayLike, T::Type, inds::Tuple{OffsetAxis,Vararg{OffsetAxis}}) B = similar(A, T, map(indslength, inds)) OffsetArray(B, map(indsoffset, inds)) end -Base.similar(::Type{T}, shape::Tuple{OffsetAxis,Vararg{OffsetAxis}}) where {T<:AbstractArray} = +Base.similar(::Type{T}, shape::Tuple{OffsetAxis,Vararg{OffsetAxis}}) where {T<:ArrayLike} = OffsetArray(T(undef, map(indslength, shape)), map(indsoffset, shape)) -Base.reshape(A::AbstractArray, inds::Tuple{OffsetAxis,Vararg{OffsetAxis}}) = OffsetArray(reshape(A, map(indslength, inds)), map(indsoffset, inds)) +Base.reshape(A::ArrayLike, inds::Tuple{OffsetAxis,Vararg{OffsetAxis}}) = OffsetArray(reshape(A, map(indslength, inds)), map(indsoffset, inds)) Base.fill(v, inds::NTuple{N, Union{Integer, AbstractUnitRange}}) where {N} = fill!(OffsetArray(Array{typeof(v), N}(undef, map(indslength, inds)), map(indsoffset, inds)), v)