Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deprecate diagmm in favor of scale #2918

Merged
merged 1 commit into from
Apr 24, 2013
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions base/deprecated.jl
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,8 @@ export PipeString
@deprecate remote_call_wait remotecall_wait
@deprecate has(s::Set, x) contains(s, x)
@deprecate has(s::IntSet, x) contains(s, x)
@deprecate diagmm scale
@deprecate diagmm! scale!

@deprecate expr(hd, a...) Expr(hd, a...)
@deprecate expr(hd, a::Array{Any,1}) Expr(hd, a...)
Expand Down
5 changes: 3 additions & 2 deletions base/exports.jl
Original file line number Diff line number Diff line change
Expand Up @@ -566,8 +566,8 @@ export
det,
diag,
diagm,
diagmm,
diagmm!,
# diagmm,
# diagmm!,
diff,
dot,
eig,
Expand Down Expand Up @@ -614,6 +614,7 @@ export
randsym,
rank,
rref,
scale,
scale!,
schur,
schurfact,
Expand Down
5 changes: 3 additions & 2 deletions base/linalg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ export
det,
diag,
diagm,
diagmm,
diagmm!,
# diagmm,
# diagmm!,
diff,
dot,
eig,
Expand Down Expand Up @@ -92,6 +92,7 @@ export
randsym,
rank,
rref,
scale,
scale!,
schur,
schurfact!,
Expand Down
12 changes: 6 additions & 6 deletions base/linalg/cholmod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ import Base: (*), convert, copy, ctranspose, eltype, findnz, getindex, hcat,
isvalid, nnz, show, size, sort!, transpose, vcat

import ..LinAlg: (\), A_mul_Bc, A_mul_Bt, Ac_ldiv_B, Ac_mul_B, At_ldiv_B, At_mul_B,
Factorization, cholfact, cholfact!, copy, dense, det, diag, diagmm,
diagmm!, full, logdet, norm, solve, sparse
Factorization, cholfact, cholfact!, copy, dense, det, diag, #diagmm, diagmm!,
full, logdet, norm, scale, scale!, solve, sparse

include("linalg/cholmod_h.jl")

Expand Down Expand Up @@ -886,16 +886,16 @@ chm_speye(n::Integer) = chm_speye(n, n, 1.) # default shape is squar

chm_spzeros(m::Integer,n::Integer,nzmax::Integer) = chm_spzeros(m,n,nzmax,1.)

function diagmm!{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T})
function scale!{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T})
chm_scale!(A,CholmodDense(b),CHOLMOD_ROW)
A
end
diagmm{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T}) = diagmm!(b,copy(A))
function diagmm!{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T})
scale{T<:CHMVTypes}(b::Vector{T}, A::CholmodSparse{T}) = scale!(b,copy(A))
function scale!{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T})
chm_scale!(A,CholmodDense(b),CHOLMOD_COL)
A
end
diagmm{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T}) = diagmm!(copy(A), b)
scale{T<:CHMVTypes}(A::CholmodSparse{T},b::Vector{T}) = scale!(copy(A), b)

norm(A::CholmodSparse) = norm(A,1)

Expand Down
4 changes: 2 additions & 2 deletions base/linalg/dense.jl
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ function ^(A::Matrix, p::Number)
else
Xinv = inv(X)
end
diagmm(X, v.^p)*Xinv
scale(X, v.^p)*Xinv
end

function rref{T}(A::Matrix{T})
Expand Down Expand Up @@ -454,7 +454,7 @@ function pinv{T<:BlasFloat}(A::StridedMatrix{T})
Sinv = zeros(T, length(SVD[:S]))
index = SVD[:S] .> eps(real(one(T)))*max(size(A))*max(SVD[:S])
Sinv[index] = 1.0 ./ SVD[:S][index]
SVD[:Vt]'diagmm(Sinv, SVD[:U]')
SVD[:Vt]'scale(Sinv, SVD[:U]')
end
pinv{T<:Integer}(A::StridedMatrix{T}) = pinv(float(A))
pinv(a::StridedVector) = pinv(reshape(a, length(a), 1))
Expand Down
4 changes: 2 additions & 2 deletions base/linalg/diagonal.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ isposdef(D::Diagonal) = all(D.diag .> 0)

*(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag .* Db.diag)
*(D::Diagonal, V::Vector) = D.diag .* V
*(A::Matrix, D::Diagonal) = diagmm(A,D.diag)
*(D::Diagonal, A::Matrix) = diagmm(D.diag,A)
*(A::Matrix, D::Diagonal) = scale(A,D.diag)
*(D::Diagonal, A::Matrix) = scale(D.diag,A)

\(Da::Diagonal, Db::Diagonal) = Diagonal(Db.diag ./ Da.diag )
/(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag ./ Db.diag )
Expand Down
6 changes: 3 additions & 3 deletions base/linalg/factorization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ function eigmin(A::Union(Number, StridedMatrix))
iscomplex(v) ? error("Complex eigenvalues cannot be ordered") : min(v)
end

inv(A::Eigen) = diagmm(A.vectors, 1.0/A.values)*A.vectors'
inv(A::Eigen) = scale(A.vectors, 1.0/A.values)*A.vectors'
det(A::Eigen) = prod(A.values)

# SVD
Expand Down Expand Up @@ -506,7 +506,7 @@ function \{T<:BlasFloat}(A::SVD{T}, B::StridedVecOrMat{T})
n = length(A.S)
Sinv = zeros(T, n)
Sinv[A.S .> sqrt(eps())] = 1.0 ./ A.S
return diagmm(A.Vt', Sinv) * A.U[:,1:n]'B
scale(A.Vt', Sinv) * A.U[:,1:n]'B
end

# Generalized svd
Expand Down Expand Up @@ -624,4 +624,4 @@ end
function schur(A::StridedMatrix, B::StridedMatrix)
SchurF = schurfact(A, B)
return SchurF[:S], SchurF[:T], SchurF[:Q], SchurF[:Z]
end
end
4 changes: 2 additions & 2 deletions base/linalg/generic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,8 @@ end

#diagmm!(C::AbstractMatrix, b::AbstractVector, A::AbstractMatrix)

diagmm!(A::AbstractMatrix, b::AbstractVector) = diagmm!(A,A,b)
diagmm!(b::AbstractVector, A::AbstractMatrix) = diagmm!(A,b,A)
scale!(A::AbstractMatrix, b::AbstractVector) = scale!(A,A,b)
scale!(b::AbstractVector, A::AbstractMatrix) = scale!(A,b,A)

#diagmm(A::AbstractMatrix, b::AbstractVector)
#diagmm(b::AbstractVector, A::AbstractMatrix)
Expand Down
6 changes: 3 additions & 3 deletions base/linalg/hermitian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,17 +41,17 @@ eigmin(A::Hermitian) = eigvals(A, 1, 1)[1]

function expm(A::Hermitian)
F = eigfact(A)
diagmm(F[:vectors], exp(F[:values])) * F[:vectors]'
scale(F[:vectors], exp(F[:values])) * F[:vectors]'
end

function sqrtm(A::Hermitian, cond::Bool)
F = eigfact(A)
vsqrt = sqrt(complex(F[:values]))
if all(imag(vsqrt) .== 0)
retmat = symmetrize!(diagmm(F[:vectors], real(vsqrt)) * F[:vectors]')
retmat = symmetrize!(scale(F[:vectors], real(vsqrt)) * F[:vectors]')
else
zc = complex(F[:vectors])
retmat = symmetrize!(diagmm(zc, vsqrt) * zc')
retmat = symmetrize!(scale(zc, vsqrt) * zc')
end
if cond
return retmat, norm(vsqrt, Inf)^2/norm(F[:values], Inf)
Expand Down
12 changes: 6 additions & 6 deletions base/linalg/matmul.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# matmul.jl: Everything to do with dense matrix multiplication

# multiply by diagonal matrix as vector
function diagmm!(C::Matrix, A::Matrix, b::Vector)
function scale!(C::Matrix, A::Matrix, b::Vector)
m, n = size(A)
if n != length(b)
error("argument dimensions do not match")
Expand All @@ -15,7 +15,7 @@ function diagmm!(C::Matrix, A::Matrix, b::Vector)
return C
end

function diagmm!(C::Matrix, b::Vector, A::Matrix)
function scale!(C::Matrix, b::Vector, A::Matrix)
m, n = size(A)
if m != length(b)
error("argument dimensions do not match")
Expand All @@ -28,11 +28,11 @@ function diagmm!(C::Matrix, b::Vector, A::Matrix)
return C
end

diagmm(A::Matrix, b::Vector) =
diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), A, b)
scale(A::Matrix, b::Vector) =
scale!(Array(promote_type(eltype(A),eltype(b)),size(A)), A, b)

diagmm(b::Vector, A::Matrix) =
diagmm!(Array(promote_type(eltype(A),eltype(b)),size(A)), b, A)
scale(b::Vector, A::Matrix) =
scale!(Array(promote_type(eltype(A),eltype(b)),size(A)), b, A)

# Dot products

Expand Down
14 changes: 7 additions & 7 deletions base/linalg/sparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -537,10 +537,10 @@ function istril(A::SparseMatrixCSC)
return true
end

## diagmm
## scale methods

# multiply by diagonal matrix as vector
function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC, b::Vector)
function scale!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC, b::Vector)
m, n = size(A)
if n != length(b) || size(A) != size(C)
error("argument dimensions do not match")
Expand All @@ -555,7 +555,7 @@ function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, A::SparseMatrixCSC, b::Vector
return C
end

function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, b::Vector, A::SparseMatrixCSC)
function scale!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, b::Vector, A::SparseMatrixCSC)
m, n = size(A)
if n != length(b) || size(A) != size(C)
error("argument dimensions do not match")
Expand All @@ -570,8 +570,8 @@ function diagmm!{Tv,Ti}(C::SparseMatrixCSC{Tv,Ti}, b::Vector, A::SparseMatrixCSC
return C
end

diagmm{Tv,Ti,T}(A::SparseMatrixCSC{Tv,Ti}, b::Vector{T}) =
diagmm!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), A, b)
scale{Tv,Ti,T}(A::SparseMatrixCSC{Tv,Ti}, b::Vector{T}) =
scale!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), A, b)

diagmm{T,Tv,Ti}(b::Vector{T}, A::SparseMatrixCSC{Tv,Ti}) =
diagmm!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), b, A)
scale{T,Tv,Ti}(b::Vector{T}, A::SparseMatrixCSC{Tv,Ti}) =
scale!(SparseMatrixCSC(size(A,1),size(A,2),Ti[],Ti[],promote_type(Tv,T)[]), b, A)
6 changes: 3 additions & 3 deletions doc/stdlib/linalg.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f

.. function:: lufact(A) -> LU

Compute the LU factorization of ``A``, returning an ``LU`` object for dense ``A`` or an ``UmfpackLU`` object for sparse ``A``. The individual components of the factorization ``F`` can be accesed by indexing: ``F[:L]``, ``F[:U]``, and ``F[:P]`` (permutation matrix) or ``F[:p]`` (permutation vector). An ``UmfpackLU`` object has additional components ``F[:q]`` (the left permutation vector) and ``Rs`` the vector of scaling factors. The following functions are available for both ``LU`` and ``UmfpackLU`` objects: ``size``, ``\`` and ``det``. For ``LU`` there is also an ``inv`` method. The sparse LU factorization is such that ``L*U`` is equal to``diagmm(Rs,A)[p,q]``.
Compute the LU factorization of ``A``, returning an ``LU`` object for dense ``A`` or an ``UmfpackLU`` object for sparse ``A``. The individual components of the factorization ``F`` can be accesed by indexing: ``F[:L]``, ``F[:U]``, and ``F[:P]`` (permutation matrix) or ``F[:p]`` (permutation vector). An ``UmfpackLU`` object has additional components ``F[:q]`` (the left permutation vector) and ``Rs`` the vector of scaling factors. The following functions are available for both ``LU`` and ``UmfpackLU`` objects: ``size``, ``\`` and ``det``. For ``LU`` there is also an ``inv`` method. The sparse LU factorization is such that ``L*U`` is equal to``scale(Rs,A)[p,q]``.

.. function:: lufact!(A) -> LU

Expand All @@ -47,7 +47,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f

.. function:: cholfact(A, [ll]) -> CholmodFactor

Compute the sparse Cholesky factorization of a sparse matrix ``A``. If ``A`` is Hermitian its Cholesky factor is determined. If ``A`` is not Hermitian the Cholesky factor of ``A*A'`` is determined. A fill-reducing permutation is used. Methods for ``size``, ``solve``, ``\``, ``findn_nzs``, ``diag``, ``det`` and ``logdet``. One of the solve methods includes an integer argument that can be used to solve systems involving parts of the factorization only. The optional boolean argument, ``ll`` determines whether the factorization returned is of the ``A[p,p] = L*L'`` form, where ``L`` is lower triangular or ``A[p,p] = diagmm(L,D)*L'`` form where ``L`` is unit lower triangular and ``D`` is a non-negative vector. The default is LDL.
Compute the sparse Cholesky factorization of a sparse matrix ``A``. If ``A`` is Hermitian its Cholesky factor is determined. If ``A`` is not Hermitian the Cholesky factor of ``A*A'`` is determined. A fill-reducing permutation is used. Methods for ``size``, ``solve``, ``\``, ``findn_nzs``, ``diag``, ``det`` and ``logdet``. One of the solve methods includes an integer argument that can be used to solve systems involving parts of the factorization only. The optional boolean argument, ``ll`` determines whether the factorization returned is of the ``A[p,p] = L*L'`` form, where ``L`` is lower triangular or ``A[p,p] = scale(L,D)*L'`` form where ``L`` is unit lower triangular and ``D`` is a non-negative vector. The default is LDL.

.. function:: cholfact!(A, [LU]) -> Cholesky

Expand Down Expand Up @@ -191,7 +191,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f

Construct a diagonal matrix and place ``v`` on the ``k``-th diagonal

.. function:: diagmm(matrix, vector)
.. function:: scale(matrix, vector)

Multiply matrices, interpreting the vector argument as a diagonal matrix.
The arguments may occur in the other order to multiply with the diagonal
Expand Down
6 changes: 3 additions & 3 deletions test/linalg.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ for elty in (Float32, Float64, Complex64, Complex128)

d,v = eig(asym) # symmetric eigen-decomposition
@test_approx_eq asym*v[:,1] d[1]*v[:,1]
@test_approx_eq v*diagmm(d,v') asym
@test_approx_eq v*scale(d,v') asym

d,v = eig(a) # non-symmetric eigen decomposition
for i in 1:size(a,2) @test_approx_eq a*v[:,i] d[i]*v[:,i] end
Expand All @@ -74,7 +74,7 @@ for elty in (Float32, Float64, Complex64, Complex128)
@test istriu(f[:T]) || isreal(a)

usv = svdfact(a) # singular value decomposition
@test_approx_eq usv[:U]*diagmm(usv[:S],usv[:Vt]) a
@test_approx_eq usv[:U]*scale(usv[:S],usv[:Vt]) a

gsvd = svdfact(a,a[1:5,:]) # Generalized svd
@test_approx_eq gsvd[:U]*gsvd[:D1]*gsvd[:R]*gsvd[:Q]' a
Expand Down Expand Up @@ -382,7 +382,7 @@ for elty in (Float32, Float64, Complex64, Complex128)
A = convert(Array{elty, 2}, Ainit)
Asym = A'A
vals, Z = LinAlg.LAPACK.syevr!('V', copy(Asym))
@test_approx_eq Z*diagmm(vals, Z') Asym
@test_approx_eq Z*scale(vals, Z') Asym
@test all(vals .> 0.0)
@test_approx_eq LinAlg.LAPACK.syevr!('N','V','U',copy(Asym),0.0,1.0,4,5,-1.0)[1] vals[vals .< 1.0]
@test_approx_eq LinAlg.LAPACK.syevr!('N','I','U',copy(Asym),0.0,1.0,4,5,-1.0)[1] vals[4:5]
Expand Down
2 changes: 1 addition & 1 deletion test/suitesparse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ A = sparse(increment!([0,4,1,1,2,2,0,1,2,3,4,4]),
[2.,1.,3.,4.,-1.,-3.,3.,6.,2.,1.,4.,2.], 5, 5)
lua = lufact(A)
L,U,p,q,Rs = lua[:(:)]
@test_approx_eq diagmm(Rs,A)[p,q] L*U
@test_approx_eq scale(Rs,A)[p,q] L*U

@test_approx_eq det(lua) det(full(A))

Expand Down