From 0b213d43ffbbb4bd0ec06af034b1964250a4faea Mon Sep 17 00:00:00 2001 From: Douglas Bates Date: Tue, 23 Apr 2013 11:39:02 -0500 Subject: [PATCH] Deprecate diagmm and diagmm! in favor of scale and scale! --- base/deprecated.jl | 2 ++ base/exports.jl | 5 +++-- base/linalg.jl | 5 +++-- base/linalg/cholmod.jl | 12 ++++++------ base/linalg/dense.jl | 4 ++-- base/linalg/diagonal.jl | 4 ++-- base/linalg/factorization.jl | 6 +++--- base/linalg/generic.jl | 4 ++-- base/linalg/hermitian.jl | 6 +++--- base/linalg/matmul.jl | 12 ++++++------ base/linalg/sparse.jl | 14 +++++++------- doc/stdlib/linalg.rst | 6 +++--- test/linalg.jl | 6 +++--- test/suitesparse.jl | 2 +- 14 files changed, 46 insertions(+), 42 deletions(-) diff --git a/base/deprecated.jl b/base/deprecated.jl index 769b61bb21cd7..99e19b6c34922 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -187,6 +187,8 @@ export PipeString @deprecate remote_call_wait remotecall_wait @deprecate has(s::Set, x) contains(s, x) @deprecate has(s::IntSet, x) contains(s, x) +@deprecate diagmm scale +@deprecate diagmm! scale! @deprecate expr(hd, a...) Expr(hd, a...) @deprecate expr(hd, a::Array{Any,1}) Expr(hd, a...) diff --git a/base/exports.jl b/base/exports.jl index f4d6cc62785a4..cb726b27a02b8 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -566,8 +566,8 @@ export det, diag, diagm, - diagmm, - diagmm!, +# diagmm, +# diagmm!, diff, dot, eig, @@ -614,6 +614,7 @@ export randsym, rank, rref, + scale, scale!, schur, schurfact, diff --git a/base/linalg.jl b/base/linalg.jl index f434d05a5b58d..1464da182726b 100644 --- a/base/linalg.jl +++ b/base/linalg.jl @@ -44,8 +44,8 @@ export det, diag, diagm, - diagmm, - diagmm!, +# diagmm, +# diagmm!, diff, dot, eig, @@ -92,6 +92,7 @@ export randsym, rank, rref, + scale, scale!, schur, schurfact!, diff --git a/base/linalg/cholmod.jl b/base/linalg/cholmod.jl index 2a216e49aa7f2..1d1d91620a813 100644 --- a/base/linalg/cholmod.jl +++ b/base/linalg/cholmod.jl @@ -17,8 +17,8 @@ import Base: (*), convert, copy, ctranspose, eltype, findnz, getindex, hcat, isvalid, nnz, show, size, sort!, transpose, vcat import ..LinAlg: (\), A_mul_Bc, A_mul_Bt, Ac_ldiv_B, Ac_mul_B, At_ldiv_B, At_mul_B, - Factorization, cholfact, cholfact!, copy, dense, det, diag, diagmm, - diagmm!, full, logdet, norm, solve, sparse + Factorization, cholfact, cholfact!, copy, dense, det, diag, #diagmm, diagmm!, + full, logdet, norm, scale, scale!, solve, sparse include("linalg/cholmod_h.jl") @@ -886,16 +886,16 @@ chm_speye(n::Integer) = chm_speye(n, n, 1.) # default shape is squar chm_spzeros(m::Integer,n::Integer,nzmax::Integer) = chm_spzeros(m,n,nzmax,1.) -function diagmm!{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T}) +function scale!{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T}) chm_scale!(A,CholmodDense(b),CHOLMOD_ROW) A end -diagmm{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T}) = diagmm!(b,copy(A)) -function diagmm!{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T}) +scale{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T}) = scale!(b,copy(A)) +function scale!{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T}) chm_scale!(A,CholmodDense(b),CHOLMOD_COL) A end -diagmm{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T}) = diagmm!(copy(A), b) +scale{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T}) = scale!(copy(A), b) norm(A::CholmodSparse) = norm(A,1) diff --git a/base/linalg/dense.jl b/base/linalg/dense.jl index 19b924c5bf3e8..ac1fe23ef367c 100644 --- a/base/linalg/dense.jl +++ b/base/linalg/dense.jl @@ -226,7 +226,7 @@ function ^(A::Matrix, p::Number) else Xinv = inv(X) end - diagmm(X, v.^p)*Xinv + scale(X, v.^p)*Xinv end function rref{T}(A::Matrix{T}) @@ -454,7 +454,7 @@ function pinv{T<:BlasFloat}(A::StridedMatrix{T}) Sinv = zeros(T, length(SVD[:S])) index = SVD[:S] .> eps(real(one(T)))*max(size(A))*max(SVD[:S]) Sinv[index] = 1.0 ./ SVD[:S][index] - SVD[:Vt]'diagmm(Sinv, SVD[:U]') + SVD[:Vt]'scale(Sinv, SVD[:U]') end pinv{T<:Integer}(A::StridedMatrix{T}) = pinv(float(A)) pinv(a::StridedVector) = pinv(reshape(a, length(a), 1)) diff --git a/base/linalg/diagonal.jl b/base/linalg/diagonal.jl index c73937ad565f5..178c138ff0541 100644 --- a/base/linalg/diagonal.jl +++ b/base/linalg/diagonal.jl @@ -29,8 +29,8 @@ isposdef(D::Diagonal) = all(D.diag .> 0) *(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag .* Db.diag) *(D::Diagonal, V::Vector) = D.diag .* V -*(A::Matrix, D::Diagonal) = diagmm(A,D.diag) -*(D::Diagonal, A::Matrix) = diagmm(D.diag,A) +*(A::Matrix, D::Diagonal) = scale(A,D.diag) +*(D::Diagonal, A::Matrix) = scale(D.diag,A) \(Da::Diagonal, Db::Diagonal) = Diagonal(Db.diag ./ Da.diag ) /(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag ./ Db.diag ) diff --git a/base/linalg/factorization.jl b/base/linalg/factorization.jl index 4cfc0aab44823..696612270d7a9 100644 --- a/base/linalg/factorization.jl +++ b/base/linalg/factorization.jl @@ -454,7 +454,7 @@ function eigmin(A::Union(Number, StridedMatrix)) iscomplex(v) ? error("Complex eigenvalues cannot be ordered") : min(v) end -inv(A::Eigen) = diagmm(A.vectors, 1.0/A.values)*A.vectors' +inv(A::Eigen) = scale(A.vectors, 1.0/A.values)*A.vectors' det(A::Eigen) = prod(A.values) # SVD @@ -506,7 +506,7 @@ function \{T<:BlasFloat}(A::SVD{T}, B::StridedVecOrMat{T}) n = length(A.S) Sinv = zeros(T, n) Sinv[A.S .> sqrt(eps())] = 1.0 ./ A.S - return diagmm(A.Vt', Sinv) * A.U[:,1:n]'B + scale(A.Vt', Sinv) * A.U[:,1:n]'B end # Generalized svd @@ -624,4 +624,4 @@ end function schur(A::StridedMatrix, B::StridedMatrix) SchurF = schurfact(A, B) return SchurF[:S], SchurF[:T], SchurF[:Q], SchurF[:Z] -end \ No newline at end of file +end diff --git a/base/linalg/generic.jl b/base/linalg/generic.jl index a475ae27797a3..4d9d374bf00b8 100644 --- a/base/linalg/generic.jl +++ b/base/linalg/generic.jl @@ -173,8 +173,8 @@ end #diagmm!(C::AbstractMatrix, b::AbstractVector, A::AbstractMatrix) -diagmm!(A::AbstractMatrix, b::AbstractVector) = diagmm!(A,A,b) -diagmm!(b::AbstractVector, A::AbstractMatrix) = diagmm!(A,b,A) +scale!(A::AbstractMatrix, b::AbstractVector) = scale!(A,A,b) +scale!(b::AbstractVector, A::AbstractMatrix) = scale!(A,b,A) #diagmm(A::AbstractMatrix, b::AbstractVector) #diagmm(b::AbstractVector, A::AbstractMatrix) diff --git a/base/linalg/hermitian.jl b/base/linalg/hermitian.jl index 81f5b7ee9b916..456c9934b03ac 100644 --- a/base/linalg/hermitian.jl +++ b/base/linalg/hermitian.jl @@ -41,17 +41,17 @@ eigmin(A::Hermitian) = eigvals(A, 1, 1)[1] function expm(A::Hermitian) F = eigfact(A) - diagmm(F[:vectors], exp(F[:values])) * F[:vectors]' + scale(F[:vectors], exp(F[:values])) * F[:vectors]' end function sqrtm(A::Hermitian, cond::Bool) F = eigfact(A) vsqrt = sqrt(complex(F[:values])) if all(imag(vsqrt) .== 0) - retmat = symmetrize!(diagmm(F[:vectors], real(vsqrt)) * F[:vectors]') + retmat = symmetrize!(scale(F[:vectors], real(vsqrt)) * F[:vectors]') else zc = complex(F[:vectors]) - retmat = symmetrize!(diagmm(zc, vsqrt) * zc') + retmat = symmetrize!(scale(zc, vsqrt) * zc') end if cond return retmat, norm(vsqrt, Inf)^2/norm(F[:values], Inf) diff --git a/base/linalg/matmul.jl b/base/linalg/matmul.jl index 849db3b25279c..8ef33a3f2e39d 100644 --- a/base/linalg/matmul.jl +++ b/base/linalg/matmul.jl @@ -1,7 +1,7 @@ # matmul.jl: Everything to do with dense matrix multiplication # multiply by diagonal matrix as vector -function diagmm!(C::Matrix, A::Matrix, b::Vector) +function scale!(C::Matrix, A::Matrix, b::Vector) m, n = size(A) if n != length(b) error("argument dimensions do not match") @@ -15,7 +15,7 @@ function diagmm!(C::Matrix, A::Matrix, b::Vector) return C end -function diagmm!(C::Matrix, b::Vector, A::Matrix) +function scale!(C::Matrix, b::Vector, A::Matrix) m, n = size(A) if m != length(b) error("argument dimensions do not match") @@ -28,11 +28,11 @@ function diagmm!(C::Matrix, b::Vector, A::Matrix) return C end -diagmm(A::Matrix, b::Vector) = - diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), A, b) +scale(A::Matrix, b::Vector) = + scale!(Array(promote_type(eltype(A),eltype(b)),size(A)), A, b) -diagmm(b::Vector, A::Matrix) = - diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), b, A) +scale(b::Vector, A::Matrix) = + scale!(Array(promote_type(eltype(A),eltype(b)),size(A)), b, A) # Dot products diff --git a/base/linalg/sparse.jl b/base/linalg/sparse.jl index fa30d80472e52..bd4c709310541 100644 --- a/base/linalg/sparse.jl +++ b/base/linalg/sparse.jl @@ -537,10 +537,10 @@ function istril(A::SparseMatrixCSC) return true end -## diagmm +## scale methods # multiply by diagonal matrix as vector -function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC, b::Vector) +function scale!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC, b::Vector) m, n = size(A) if n != length(b) || size(A) != size(C) error("argument dimensions do not match") @@ -555,7 +555,7 @@ function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC, b::Vector return C end -function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, b::Vector, A::SparseMatrixCSC) +function scale!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, b::Vector, A::SparseMatrixCSC) m, n = size(A) if n != length(b) || size(A) != size(C) error("argument dimensions do not match") @@ -570,8 +570,8 @@ function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, b::Vector, A::SparseMatrixCSC return C end -diagmm{Tv,Ti,T}(A::SparseMatrixCSC{Tv,Ti}, b::Vector{T}) = - diagmm!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), A, b) +scale{Tv,Ti,T}(A::SparseMatrixCSC{Tv,Ti}, b::Vector{T}) = + scale!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), A, b) -diagmm{T,Tv,Ti}(b::Vector{T}, A::SparseMatrixCSC{Tv,Ti}) = - diagmm!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), b, A) +scale{T,Tv,Ti}(b::Vector{T}, A::SparseMatrixCSC{Tv,Ti}) = + scale!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), b, A) diff --git a/doc/stdlib/linalg.rst b/doc/stdlib/linalg.rst index 573a9e6024c6e..b81f4a6ab10a7 100644 --- a/doc/stdlib/linalg.rst +++ b/doc/stdlib/linalg.rst @@ -31,7 +31,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: lufact(A) -> LU - Compute the LU factorization of ``A``, returning an ``LU`` object for dense ``A`` or an ``UmfpackLU`` object for sparse ``A``. The individual components of the factorization ``F`` can be accesed by indexing: ``F[:L]``, ``F[:U]``, and ``F[:P]`` (permutation matrix) or ``F[:p]`` (permutation vector). An ``UmfpackLU`` object has additional components ``F[:q]`` (the left permutation vector) and ``Rs`` the vector of scaling factors. The following functions are available for both ``LU`` and ``UmfpackLU`` objects: ``size``, ``\`` and ``det``. For ``LU`` there is also an ``inv`` method. The sparse LU factorization is such that ``L*U`` is equal to``diagmm(Rs,A)[p,q]``. + Compute the LU factorization of ``A``, returning an ``LU`` object for dense ``A`` or an ``UmfpackLU`` object for sparse ``A``. The individual components of the factorization ``F`` can be accesed by indexing: ``F[:L]``, ``F[:U]``, and ``F[:P]`` (permutation matrix) or ``F[:p]`` (permutation vector). An ``UmfpackLU`` object has additional components ``F[:q]`` (the left permutation vector) and ``Rs`` the vector of scaling factors. The following functions are available for both ``LU`` and ``UmfpackLU`` objects: ``size``, ``\`` and ``det``. For ``LU`` there is also an ``inv`` method. The sparse LU factorization is such that ``L*U`` is equal to``scale(Rs,A)[p,q]``. .. function:: lufact!(A) -> LU @@ -47,7 +47,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: cholfact(A, [ll]) -> CholmodFactor - Compute the sparse Cholesky factorization of a sparse matrix ``A``. If ``A`` is Hermitian its Cholesky factor is determined. If ``A`` is not Hermitian the Cholesky factor of ``A*A'`` is determined. A fill-reducing permutation is used. Methods for ``size``, ``solve``, ``\``, ``findn_nzs``, ``diag``, ``det`` and ``logdet``. One of the solve methods includes an integer argument that can be used to solve systems involving parts of the factorization only. The optional boolean argument, ``ll`` determines whether the factorization returned is of the ``A[p,p] = L*L'`` form, where ``L`` is lower triangular or ``A[p,p] = diagmm(L,D)*L'`` form where ``L`` is unit lower triangular and ``D`` is a non-negative vector. The default is LDL. + Compute the sparse Cholesky factorization of a sparse matrix ``A``. If ``A`` is Hermitian its Cholesky factor is determined. If ``A`` is not Hermitian the Cholesky factor of ``A*A'`` is determined. A fill-reducing permutation is used. Methods for ``size``, ``solve``, ``\``, ``findn_nzs``, ``diag``, ``det`` and ``logdet``. One of the solve methods includes an integer argument that can be used to solve systems involving parts of the factorization only. The optional boolean argument, ``ll`` determines whether the factorization returned is of the ``A[p,p] = L*L'`` form, where ``L`` is lower triangular or ``A[p,p] = scale(L,D)*L'`` form where ``L`` is unit lower triangular and ``D`` is a non-negative vector. The default is LDL. .. function:: cholfact!(A, [LU]) -> Cholesky @@ -191,7 +191,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f Construct a diagonal matrix and place ``v`` on the ``k``-th diagonal -.. function:: diagmm(matrix, vector) +.. function:: scale(matrix, vector) Multiply matrices, interpreting the vector argument as a diagonal matrix. The arguments may occur in the other order to multiply with the diagonal diff --git a/test/linalg.jl b/test/linalg.jl index de45c31630197..5018230dff751 100644 --- a/test/linalg.jl +++ b/test/linalg.jl @@ -56,7 +56,7 @@ for elty in (Float32, Float64, Complex64, Complex128) d,v = eig(asym) # symmetric eigen-decomposition @test_approx_eq asym*v[:,1] d[1]*v[:,1] - @test_approx_eq v*diagmm(d,v') asym + @test_approx_eq v*scale(d,v') asym d,v = eig(a) # non-symmetric eigen decomposition for i in 1:size(a,2) @test_approx_eq a*v[:,i] d[i]*v[:,i] end @@ -74,7 +74,7 @@ for elty in (Float32, Float64, Complex64, Complex128) @test istriu(f[:T]) || isreal(a) usv = svdfact(a) # singular value decomposition - @test_approx_eq usv[:U]*diagmm(usv[:S],usv[:Vt]) a + @test_approx_eq usv[:U]*scale(usv[:S],usv[:Vt]) a gsvd = svdfact(a,a[1:5,:]) # Generalized svd @test_approx_eq gsvd[:U]*gsvd[:D1]*gsvd[:R]*gsvd[:Q]' a @@ -382,7 +382,7 @@ for elty in (Float32, Float64, Complex64, Complex128) A = convert(Array{elty, 2}, Ainit) Asym = A'A vals, Z = LinAlg.LAPACK.syevr!('V', copy(Asym)) - @test_approx_eq Z*diagmm(vals, Z') Asym + @test_approx_eq Z*scale(vals, Z') Asym @test all(vals .> 0.0) @test_approx_eq LinAlg.LAPACK.syevr!('N','V','U',copy(Asym),0.0,1.0,4,5,-1.0)[1] vals[vals .< 1.0] @test_approx_eq LinAlg.LAPACK.syevr!('N','I','U',copy(Asym),0.0,1.0,4,5,-1.0)[1] vals[4:5] diff --git a/test/suitesparse.jl b/test/suitesparse.jl index abc0620016de4..fbabe8a8d376e 100644 --- a/test/suitesparse.jl +++ b/test/suitesparse.jl @@ -11,7 +11,7 @@ A = sparse(increment!([0,4,1,1,2,2,0,1,2,3,4,4]), [2.,1.,3.,4.,-1.,-3.,3.,6.,2.,1.,4.,2.], 5, 5) lua = lufact(A) L,U,p,q,Rs = lua[:(:)] -@test_approx_eq diagmm(Rs,A)[p,q] L*U +@test_approx_eq scale(Rs,A)[p,q] L*U @test_approx_eq det(lua) det(full(A))