From b14356f45cfd83f3cc9e9871786e077a4a942de2 Mon Sep 17 00:00:00 2001 From: Andreas Noack Jensen Date: Thu, 12 Feb 2015 16:54:28 -0500 Subject: [PATCH 1/3] Add qrfact(SparseMatrixCSC) by wrapping SPQR. Make \(SparseMatrixCSC) work for least squares problems. Some tests. --- base/linalg/generic.jl | 8 +- base/sparse.jl | 1 + base/sparse/cholmod.jl | 12 ++- base/sparse/linalg.jl | 4 +- base/sparse/spqr.jl | 133 ++++++++++++++++++++++++++ base/sparse/spqr_h.jl | 30 ------ test/runtests.jl | 6 -- test/sparse.jl | 4 + test/{sparse => sparsedir}/cholmod.jl | 0 test/{sparse => sparsedir}/sparse.jl | 0 test/sparsedir/spqr.jl | 43 +++++++++ test/{sparse => sparsedir}/umfpack.jl | 0 12 files changed, 200 insertions(+), 41 deletions(-) create mode 100644 base/sparse/spqr.jl delete mode 100644 base/sparse/spqr_h.jl create mode 100644 test/sparse.jl rename test/{sparse => sparsedir}/cholmod.jl (100%) rename test/{sparse => sparsedir}/sparse.jl (100%) create mode 100644 test/sparsedir/spqr.jl rename test/{sparse => sparsedir}/umfpack.jl (100%) diff --git a/base/linalg/generic.jl b/base/linalg/generic.jl index cd9a04e729fd7..3fbeedf7666bd 100644 --- a/base/linalg/generic.jl +++ b/base/linalg/generic.jl @@ -237,11 +237,15 @@ function inv{T}(A::AbstractMatrix{T}) A_ldiv_B!(factorize(convert(AbstractMatrix{S}, A)), eye(S, chksquare(A))) end +function \{T}(A::AbstractMatrix{T}, B::AbstractVecOrMat{T}) + size(A,1) == size(B,1) || throw(DimensionMismatch("LHS and RHS should have the same number of rows. LHS has $(size(A,1)) rows, but RHS has $(size(B,1)) rows.")) + factorize(A)\B +end function \{TA,TB}(A::AbstractMatrix{TA}, B::AbstractVecOrMat{TB}) TC = typeof(one(TA)/one(TB)) - size(A,1) == size(B,1) || throw(DimensionMismatch("LHS and RHS should have the same number of rows. LHS has $(size(A,1)) rows, but RHS has $(size(B,1)) rows.")) - \(factorize(TA == TC ? A : convert(AbstractMatrix{TC}, A)), TB == TC ? copy(B) : convert(AbstractArray{TC}, B)) + convert(AbstractMatrix{TC}, A)\convert(AbstractArray{TC}, B) end + \(a::AbstractVector, b::AbstractArray) = reshape(a, length(a), 1) \ b /(A::AbstractVecOrMat, B::AbstractVecOrMat) = (B' \ A')' # \(A::StridedMatrix,x::Number) = inv(A)*x Should be added at some point when the old elementwise version has been deprecated long enough diff --git a/base/sparse.jl b/base/sparse.jl index 93edb6a647652..cd4de441f106f 100644 --- a/base/sparse.jl +++ b/base/sparse.jl @@ -19,5 +19,6 @@ include("sparse/csparse.jl") include("sparse/linalg.jl") include("sparse/umfpack.jl") include("sparse/cholmod.jl") +include("sparse/spqr.jl") end # module SparseMatrix diff --git a/base/sparse/cholmod.jl b/base/sparse/cholmod.jl index 994e383ba6c27..f031e243b8aad 100644 --- a/base/sparse/cholmod.jl +++ b/base/sparse/cholmod.jl @@ -13,7 +13,7 @@ export Factor, Sparse -using Base.SparseMatrix: AbstractSparseMatrix, SparseMatrixCSC, increment, increment!, indtype, decrement, decrement! +using Base.SparseMatrix: AbstractSparseMatrix, SparseMatrixCSC, increment, indtype ######### # Setup # @@ -717,7 +717,7 @@ function Sparse(filename::ASCIIString) end ## convertion back to base Julia types -function convert{T<:VTypes}(::Type{Matrix}, D::Dense{T}) +function convert{T}(::Type{Matrix{T}}, D::Dense{T}) s = unsafe_load(D.p) a = Array(T, s.nrow, s.ncol) if s.d == s.nrow @@ -731,6 +731,14 @@ function convert{T<:VTypes}(::Type{Matrix}, D::Dense{T}) end a end +convert{T}(::Type{Matrix}, D::Dense{T}) = convert(Matrix{T}, D) +function convert{T}(::Type{Vector{T}}, D::Dense{T}) + if size(D, 2) > 1 + throw(DimensionMismatch("input must be a vector but had $(size(D, 2)) columns")) + end + reshape(convert(Matrix, D), size(D, 1)) +end +convert{T}(::Type{Vector}, D::Dense{T}) = convert(Vector{T}, D) function convert{Tv,Ti}(::Type{SparseMatrixCSC{Tv,Ti}}, A::Sparse{Tv,Ti}) s = unsafe_load(A.p) diff --git a/base/sparse/linalg.jl b/base/sparse/linalg.jl index 3b5249c313699..746820a303833 100644 --- a/base/sparse/linalg.jl +++ b/base/sparse/linalg.jl @@ -686,8 +686,10 @@ function factorize(A::SparseMatrixCSC) end end return lufact(A) + elseif m > n + return qrfact(A) else - throw(ArgumentError("sparse least squares problems by QR are not handled yet")) + throw(ArgumentError("underdetermined systemes are not implemented yet")) end end diff --git a/base/sparse/spqr.jl b/base/sparse/spqr.jl new file mode 100644 index 0000000000000..83041657447f4 --- /dev/null +++ b/base/sparse/spqr.jl @@ -0,0 +1,133 @@ +module SPQR + +# ordering options */ +const ORDERING_FIXED = int32(0) +const ORDERING_NATURAL = int32(1) +const ORDERING_COLAMD = int32(2) +const ORDERING_GIVEN = int32(3) # only used for C/C++ interface +const ORDERING_CHOLMOD = int32(4) # CHOLMOD best-effort (COLAMD, METIS,...) +const ORDERING_AMD = int32(5) # AMD(A'*A) +const ORDERING_METIS = int32(6) # metis(A'*A) +const ORDERING_DEFAULT = int32(7) # SuiteSparseQR default ordering +const ORDERING_BEST = int32(8) # try COLAMD, AMD, and METIS; pick best +const ORDERING_BESTAMD = int32(9) # try COLAMD and AMD; pick best# + +# Let [m n] = size of the matrix after pruning singletons. The default +# ordering strategy is to use COLAMD if m <= 2*n. Otherwise, AMD(A'A) is +# tried. If there is a high fill-in with AMD then try METIS(A'A) and take +# the best of AMD and METIS. METIS is not tried if it isn't installed. + +# tol options +const DEFAULT_TOL = int32(-2) # if tol <= -2, the default tol is used +const NO_TOL = int32(-1) # if -2 < tol < 0, then no tol is used + +# for qmult, method can be 0,1,2,3: +const QTX = int32(0) +const QX = int32(1) +const XQT = int32(2) +const XQ = int32(3) + +# system can be 0,1,2,3: Given Q*R=A*E from SuiteSparseQR_factorize: +const RX_EQUALS_B = int32(0) # solve R*X=B or X = R\B +const RETX_EQUALS_B = int32(1) # solve R*E'*X=B or X = E*(R\B) +const RTX_EQUALS_B = int32(2) # solve R'*X=B or X = R'\B +const RTX_EQUALS_ETB = int32(3) # solve R'*X=E'*B or X = R'\(E'*B) + + +using Base.SparseMatrix: SparseMatrixCSC +using Base.SparseMatrix.CHOLMOD: C_Dense, C_Sparse, Dense, ITypes, Sparse, VTypes, common + +import Base: size +import Base.LinAlg: qrfact +import Base.SparseMatrix.CHOLMOD: convert, free! + + + +immutable C_Factorization{Tv<:VTypes,Ti<:ITypes} + xtype::Cint + factors::Ptr{Tv} +end + +type Factorization{Tv<:VTypes,Ti<:ITypes} <: Base.LinAlg.Factorization{Tv} + m::Int + n::Int + p::Ptr{C_Factorization{Tv,Ti}} +end + +size(F::Factorization) = (F.m, F.n) +function size(F::Factorization, i::Integer) + if i < 1 + throw(ArgumentError("dimension must be positive")) + end + if i == 1 + return F.m + elseif i == 2 + return F.n + end + return 1 +end + +function free!{Tv<:VTypes,Ti<:ITypes}(F::Factorization{Tv,Ti}) + ccall((:SuiteSparseQR_C_free, :libspqr), Cint, + (Ptr{Ptr{C_Factorization{Tv,Ti}}}, Ptr{Void}), + &F.p, common(Ti)) == 1 +end + +function backslash{Tv<:VTypes,Ti<:ITypes}(ordering::Integer, tol::Real, A::Sparse{Tv,Ti}, B::Dense{Tv}) + m, n = size(A) + if m != size(B, 1) + throw(DimensionMismatch("left hand side and right hand side must have same number of rows")) + end + d = Dense(ccall((:SuiteSparseQR_C_backslash, :libspqr), Ptr{C_Dense{Tv}}, + (Cint, Cdouble, Ptr{C_Sparse{Tv,Ti}}, Ptr{C_Dense{Tv}}, Ptr{Void}), + ordering, tol, A.p, B.p, common(Ti))) + finalizer(d, free!) + d +end + +function factorize{Tv<:VTypes,Ti<:ITypes}(ordering::Integer, tol::Real, A::Sparse{Tv,Ti}) + f = Factorization(size(A)..., ccall((:SuiteSparseQR_C_factorize, :libspqr), Ptr{C_Factorization{Tv,Ti}}, + (Cint, Cdouble, Ptr{Sparse{Tv,Ti}}, Ptr{Void}), + ordering, tol, A.p, common(Ti))) + finalizer(f, free!) + f +end + +function solve{Tv<:VTypes,Ti<:ITypes}(system::Integer, QR::Factorization{Tv,Ti}, B::Dense{Tv}) + m, n = size(QR) + mB = size(B, 1) + if (system == RX_EQUALS_B || system == RETX_EQUALS_B) && m != mB + throw(DimensionMismatch("number of rows in factorized matrix must equal number of rows in right hand side")) + elseif (system == RTX_EQUALS_ETB || system == RTX_EQUALS_B) && n != mB + throw(DimensionMismatch("number of columns in factorized matrix must equal number of rows in right hand side")) + end + d = Dense(ccall((:SuiteSparseQR_C_solve, :libspqr), Ptr{C_Dense{Tv}}, + (Cint, Ptr{C_Factorization{Tv,Ti}}, Ptr{C_Dense{Tv}}, Ptr{Void}), + system, QR.p, B.p, common(Ti))) + finalizer(d, free!) + d +end + +function qmult{Tv<:VTypes,Ti<:ITypes}(method::Integer, QR::Factorization{Tv,Ti}, X::Dense{Tv}) + mQR, nQR = size(QR) + mX, nX = size(X) + if (method == QTX || method == QX) && mQR != mX + throw(DimensionMismatch("Q matrix size $mQR and dense matrix has $mX rows")) + elseif (method == XQT || method == XQ) && mQR != nX + throw(DimensionMismatch("Q matrix size $mQR and dense matrix has $nX columns")) + end + d = Dense(ccall((:SuiteSparseQR_C_qmult, :libspqr), Ptr{C_Dense{Tv}}, + (Cint, Ptr{C_Factorization{Tv,Ti}}, Ptr{C_Dense{Tv}}, Ptr{Void}), + method, QR.p, X.p, common(Ti))) + finalizer(d, free!) + d +end + +qrfact(A::SparseMatrixCSC) = factorize(ORDERING_DEFAULT, DEFAULT_TOL, Sparse(A)) + +function (\){T}(F::Factorization{T}, B::StridedVecOrMat{T}) + QtB = qmult(QTX, F, Dense(B)) + convert(typeof(B), solve(RETX_EQUALS_B, F, QtB)) +end + +end # module \ No newline at end of file diff --git a/base/sparse/spqr_h.jl b/base/sparse/spqr_h.jl deleted file mode 100644 index a9f0985b4e1f1..0000000000000 --- a/base/sparse/spqr_h.jl +++ /dev/null @@ -1,30 +0,0 @@ -## SuiteSparseQR - -## ordering options -const SPQR_ORDERING_FIXED = int32(0) -const SPQR_ORDERING_NATURAL = int32(1) -const SPQR_ORDERING_COLAMD = int32(2) -const SPQR_ORDERING_GIVEN = int32(3) # only used for C/C++ interface -const SPQR_ORDERING_CHOLMOD = int32(4) # CHOLMOD best-effort (COLAMD, METIS,...) -const SPQR_ORDERING_AMD = int32(5) # AMD(A'*A) -const SPQR_ORDERING_METIS = int32(6) # metis(A'*A) -const SPQR_ORDERING_DEFAULT = int32(7) # SuiteSparseQR default ordering -const SPQR_ORDERING_BEST = int32(8) # try COLAMD, AMD, and METIS; pick best -const SPQR_ORDERING_BESTAMD = int32(9) # try COLAMD and AMD; pick best - -# Let [m n] = size of the matrix after pruning singletons. The default -# ordering strategy is to use COLAMD if m <= 2*n. Otherwise, AMD(A'A) is -# tried. If there is a high fill-in with AMD then try METIS(A'A) and take -# the best of AMD and METIS. METIS is not tried if it isn't installed. - -## Operations in qmult -const SPQR_QTX = int32(0) # Y = Q'*X -const SPQR_QX = int32(1) # Y = Q*X -const SPQR_XQT = int32(2) # Y = X*Q' -const SPQR_XQ = int32(3) # Y = X*Q - -## Types of systems to solve -const SPQR_RX_EQUALS_B = int32(0) # solve R*X=B or X = R\B -const SPQR_RETX_EQUALS_B = int32(1) # solve R*E'*X=B or X = E*(R\B) -const SPQR_RTX_EQUALS_B = int32(2) # solve R'*X=B or X = R'\B -const SPQR_RTX_EQUALS_ETB = int32(3) # solve R'*X=E'*B or X = R'\(E'*B) diff --git a/test/runtests.jl b/test/runtests.jl index 14022d2c5f7ef..9f5483d7bcbcf 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -25,12 +25,6 @@ push!(testnames, "parallel") tests = (ARGS==["all"] || isempty(ARGS)) ? testnames : ARGS -if "sparse" in tests - # specifically selected case - filter!(x -> x != "sparse", tests) - prepend!(tests, ["sparse/sparse", "sparse/cholmod", "sparse/umfpack"]) -end - if "linalg" in tests # specifically selected case filter!(x -> x != "linalg", tests) diff --git a/test/sparse.jl b/test/sparse.jl new file mode 100644 index 0000000000000..2bf3c9bc9c041 --- /dev/null +++ b/test/sparse.jl @@ -0,0 +1,4 @@ +include("sparsedir/sparse.jl") +include("sparsedir/umfpack.jl") +include("sparsedir/cholmod.jl") +include("sparsedir/spqr.jl") \ No newline at end of file diff --git a/test/sparse/cholmod.jl b/test/sparsedir/cholmod.jl similarity index 100% rename from test/sparse/cholmod.jl rename to test/sparsedir/cholmod.jl diff --git a/test/sparse/sparse.jl b/test/sparsedir/sparse.jl similarity index 100% rename from test/sparse/sparse.jl rename to test/sparsedir/sparse.jl diff --git a/test/sparsedir/spqr.jl b/test/sparsedir/spqr.jl new file mode 100644 index 0000000000000..8f1861da0b9f1 --- /dev/null +++ b/test/sparsedir/spqr.jl @@ -0,0 +1,43 @@ +using Base.Test + +using Base.SparseMatrix.SPQR +using Base.SparseMatrix.CHOLMOD + +m, n = 1000, 10 +nn = 100 + +for eltyA in (Float64, Complex{Float64}) + for eltyB in (Float64, Complex{Float64}) + if eltyA <: Real + A = sparse(rand(1:m, nn), rand(1:n, nn), randn(nn), m, n) + else + A = sparse(rand(1:m, nn), rand(1:n, nn), complex(randn(nn), randn(nn)), m, n) + end + if eltyB <: Real + B = randn(m, 2) + else + B = complex(randn(m, 2), randn(m, 2)) + end + + @test_approx_eq A\B[:,1] full(A)\B[:,1] + @test_approx_eq A\B full(A)\B + @test_throws DimensionMismatch A\B[1:m-1,:] + + if eltyA == eltyB # promotions not defined for unexported methods + F = qrfact(A) + @test size(F) == (m,n) + @test size(F, 1) == m + @test size(F, 2) == n + @test size(F, 3) == 1 + @test_throws ArgumentError size(F, 0) + + # low level wrappers + @test_throws DimensionMismatch SPQR.solve(SPQR.RX_EQUALS_B, F, CHOLMOD.Dense(B')) + @test_throws DimensionMismatch SPQR.solve(SPQR.RTX_EQUALS_B, F, CHOLMOD.Dense(B)) + @test_throws DimensionMismatch SPQR.qmult(SPQR.QX, F, CHOLMOD.Dense(B')) + @test_throws DimensionMismatch SPQR.qmult(SPQR.XQ, F, CHOLMOD.Dense(B)) + @test_approx_eq A\B SPQR.backslash(SPQR.ORDERING_DEFAULT, SPQR.DEFAULT_TOL, CHOLMOD.Sparse(A), CHOLMOD.Dense(B)) + @test_throws DimensionMismatch SPQR.backslash(SPQR.ORDERING_DEFAULT, SPQR.DEFAULT_TOL, CHOLMOD.Sparse(A), CHOLMOD.Dense(B[1:m-1,:])) + end + end +end diff --git a/test/sparse/umfpack.jl b/test/sparsedir/umfpack.jl similarity index 100% rename from test/sparse/umfpack.jl rename to test/sparsedir/umfpack.jl From 0fbfe1f5e6d452d26b65096007f8ec6a8969d3c8 Mon Sep 17 00:00:00 2001 From: Andreas Noack Jensen Date: Fri, 13 Feb 2015 16:13:10 -0500 Subject: [PATCH 2/3] Update NEWS.md and documentation for sparse QR. [ci skip] --- NEWS.md | 2 ++ doc/stdlib/linalg.rst | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/NEWS.md b/NEWS.md index fe1d9d6bb8173..4491d1a48e1a8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -101,6 +101,8 @@ Library improvements * Large speedup in sparse ``\`` and splitting of Cholesky and LDLt factorizations into ``cholfact`` and ``ldltfact`` ([#10117]) + * Add sparse least squares to ``\`` by adding ``qrfact`` for sparse matrices based on the SPQR library. ([#10180]) + * Other improvements * `assert`, `@assert` now throws an `AssertionError` exception type ([#9734]). diff --git a/doc/stdlib/linalg.rst b/doc/stdlib/linalg.rst index 48d72d69854fd..d0024478853a9 100644 --- a/doc/stdlib/linalg.rst +++ b/doc/stdlib/linalg.rst @@ -21,7 +21,7 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. function:: \\(A, B) :noindex: - Matrix division using a polyalgorithm. For input matrices ``A`` and ``B``, the result ``X`` is such that ``A*X == B`` when ``A`` is square. The solver that is used depends upon the structure of ``A``. A direct solver is used for upper- or lower triangular ``A``. For Hermitian ``A`` (equivalent to symmetric ``A`` for non-complex ``A``) the ``BunchKaufman`` factorization is used. Otherwise an LU factorization is used. For rectangular ``A`` the result is the minimum-norm least squares solution computed by a pivoted QR factorization of ``A`` and a rank estimate of A based on the R factor. For sparse, square ``A`` the LU factorization (from UMFPACK) is used. + Matrix division using a polyalgorithm. For input matrices ``A`` and ``B``, the result ``X`` is such that ``A*X == B`` when ``A`` is square. The solver that is used depends upon the structure of ``A``. A direct solver is used for upper- or lower triangular ``A``. For Hermitian ``A`` (equivalent to symmetric ``A`` for non-complex ``A``) the ``BunchKaufman`` factorization is used. Otherwise an LU factorization is used. For rectangular ``A`` the result is the minimum-norm least squares solution computed by a pivoted QR factorization of ``A`` and a rank estimate of A based on the R factor. When ``A`` is sparse, a similar polyalgorithm is used, but underdetermined systems are not supported. .. function:: dot(x, y) ⋅(x,y) @@ -161,9 +161,13 @@ Linear algebra functions in Julia are largely implemented by calling functions f .. [Bischof1987] C Bischof and C Van Loan, The WY representation for products of Householder matrices, SIAM J Sci Stat Comput 8 (1987), s2-s13. doi:10.1137/0908009 .. [Schreiber1989] R Schreiber and C Van Loan, A storage-efficient WY representation for products of Householder transformations, SIAM J Sci Stat Comput 10 (1989), 53-57. doi:10.1137/0910005 +.. function:: qrfact(A) -> SPQR.Factorization + + Compute the QR factorization of a sparse matrix ``A``. A fill-reducing permutation is used. The main application of this type is to solve least squares problems with ``\``. The function calls the C library SPQR and a few additional functions from the library are wrapped but not exported. + .. function:: qrfact!(A [,pivot=Val{false}]) - ``qrfact!`` is the same as :func:`qrfact`, but saves space by overwriting the input ``A``, instead of creating a copy. + ``qrfact!`` is the same as :func:`qrfact` when A is a subtype of ``StridedMatrix``, but saves space by overwriting the input ``A``, instead of creating a copy. .. function:: full(QRCompactWYQ[, thin=true]) -> Matrix From d8e5aabacd74c13eee2e154d14f0a796529f0d59 Mon Sep 17 00:00:00 2001 From: Andreas Noack Date: Sun, 15 Feb 2015 18:01:06 -0500 Subject: [PATCH 3/3] Fix 32 bit issue with cholmod.jl. SPQR always call the _l_ versions of the functions so we have to do that by default. --- base/sparse/cholmod.jl | 38 +++++++++++++++++++------------------- test/sparsedir/cholmod.jl | 30 +++++++++++++++--------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/base/sparse/cholmod.jl b/base/sparse/cholmod.jl index f031e243b8aad..191d2b176c133 100644 --- a/base/sparse/cholmod.jl +++ b/base/sparse/cholmod.jl @@ -22,7 +22,7 @@ using Base.SparseMatrix: AbstractSparseMatrix, SparseMatrixCSC, increment, indty include("cholmod_h.jl") ## macro to generate the name of the C function according to the integer type -macro cholmod_name(nm,typ) string("cholmod_", eval(typ) == Int64 ? "l_" : "", nm) end +macro cholmod_name(nm,typ) string("cholmod_", eval(typ) == SuiteSparse_long ? "l_" : "", nm) end for Ti in IndexTypes @eval begin @@ -198,44 +198,44 @@ end ### cholmod_core_h ### function allocate_dense(nrow::Integer, ncol::Integer, d::Integer, ::Type{Float64}) - d = Dense(ccall((:cholmod_allocate_dense, :libcholmod), Ptr{C_Dense{Float64}}, + d = Dense(ccall((:cholmod_l_allocate_dense, :libcholmod), Ptr{C_Dense{Float64}}, (Csize_t, Csize_t, Csize_t, Cint, Ptr{Void}), - nrow, ncol, d, REAL, common(Cint))) + nrow, ncol, d, REAL, common(SuiteSparse_long))) finalizer(d, free!) d end function allocate_dense(nrow::Integer, ncol::Integer, d::Integer, ::Type{Complex{Float64}}) - d = Dense(ccall((:cholmod_allocate_dense, :libcholmod), Ptr{C_Dense{Complex{Float64}}}, + d = Dense(ccall((:cholmod_l_allocate_dense, :libcholmod), Ptr{C_Dense{Complex{Float64}}}, (Csize_t, Csize_t, Csize_t, Cint, Ptr{Void}), - nrow, ncol, d, COMPLEX, common(Cint))) + nrow, ncol, d, COMPLEX, common(SuiteSparse_long))) finalizer(d, free!) d end -free_dense!{T}(p::Ptr{C_Dense{T}}) = ccall((:cholmod_free_dense, :libcholmod), Cint, (Ptr{Ptr{C_Dense{T}}}, Ptr{Void}), &p, common(Cint)) +free_dense!{T}(p::Ptr{C_Dense{T}}) = ccall((:cholmod_l_free_dense, :libcholmod), Cint, (Ptr{Ptr{C_Dense{T}}}, Ptr{Void}), &p, common(Cint)) function zeros{T<:VTypes}(m::Integer, n::Integer, ::Type{T}) - d = Dense(ccall((:cholmod_zeros, :libcholmod), Ptr{C_Dense{T}}, + d = Dense(ccall((:cholmod_l_zeros, :libcholmod), Ptr{C_Dense{T}}, (Csize_t, Csize_t, Cint, Ptr{UInt8}), - m, n, xtyp(T), common(Cint))) + m, n, xtyp(T), common(SuiteSparse_long))) finalizer(d, free!) d end zeros(m::Integer, n::Integer) = zeros(m, n, Float64) function ones{T<:VTypes}(m::Integer, n::Integer, ::Type{T}) - d = Dense(ccall((:cholmod_ones, :libcholmod), Ptr{C_Dense{T}}, + d = Dense(ccall((:cholmod_l_ones, :libcholmod), Ptr{C_Dense{T}}, (Csize_t, Csize_t, Cint, Ptr{UInt8}), - m, n, xtyp(T), common(Cint))) + m, n, xtyp(T), common(SuiteSparse_long))) finalizer(d, free!) d end ones(m::Integer, n::Integer) = ones(m, n, Float64) function eye{T<:VTypes}(m::Integer, n::Integer, ::Type{T}) - d = Dense(ccall((:cholmod_eye, :libcholmod), Ptr{C_Dense{T}}, + d = Dense(ccall((:cholmod_l_eye, :libcholmod), Ptr{C_Dense{T}}, (Csize_t, Csize_t, Cint, Ptr{UInt8}), - m, n, xtyp(T), common(Cint))) + m, n, xtyp(T), common(SuiteSparse_long))) finalizer(d, free!) d end @@ -243,9 +243,9 @@ eye(m::Integer, n::Integer) = eye(m, n, Float64) eye(n::Integer) = eye(n, n, Float64) function copy_dense{Tv<:VTypes}(A::Dense{Tv}) - d = Dense(ccall((:cholmod_copy_dense, :libcholmod), Ptr{C_Dense{Tv}}, + d = Dense(ccall((:cholmod_l_copy_dense, :libcholmod), Ptr{C_Dense{Tv}}, (Ptr{C_Dense{Tv}}, Ptr{UInt8}), - A.p, common(Cint))) + A.p, common(SuiteSparse_long))) finalizer(d, free!) d end @@ -260,16 +260,16 @@ function norm_dense{Tv<:VTypes}(D::Dense{Tv}, p::Integer) elseif p != 0 && p != 1 throw(ArgumentError("second argument must be either 0 (Inf norm), 1, or 2")) end - ccall((:cholmod_norm_dense, :libcholmod), Cdouble, + ccall((:cholmod_l_norm_dense, :libcholmod), Cdouble, (Ptr{C_Dense{Tv}}, Cint, Ptr{UInt8}), - D.p, p, common(Cint)) + D.p, p, common(SuiteSparse_long)) end ### cholmod_check.h ### function check_dense{T<:VTypes}(A::Dense{T}) - bool(ccall((:cholmod_check_dense, :libcholmod), Cint, + bool(ccall((:cholmod_l_check_dense, :libcholmod), Cint, (Ptr{C_Dense{T}}, Ptr{UInt8}), - A.p, common(Cint))) + A.p, common(SuiteSparse_long))) end # Non-Dense wrappers (which all depend on IType) @@ -711,7 +711,7 @@ Sparse(A::Dense) = dense_to_sparse(A, Cint) Sparse(L::Factor) = factor_to_sparse!(copy(L)) function Sparse(filename::ASCIIString) f = open(filename) - A = read_sparse(CFILE(f), Int) + A = read_sparse(CFILE(f), SuiteSparse_long) close(f) A end diff --git a/test/sparsedir/cholmod.jl b/test/sparsedir/cholmod.jl index 3f94894f345ad..5b48cf2e7963d 100644 --- a/test/sparsedir/cholmod.jl +++ b/test/sparsedir/cholmod.jl @@ -194,51 +194,51 @@ run(`rm tmp.mtx`) # test that Sparse(Ptr) constructor throws the right places ## The struct pointer must be constructed by the library constructor and then modified afterwards to checks that the method throws ### illegal dtype (for now but should be supprted at some point) -p = ccall((:cholmod_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, +p = ccall((:cholmod_l_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, (Csize_t, Csize_t, Csize_t, Cint, Cint, Cint, Cint, Ptr{Void}), - 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Cint)) + 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(CHOLMOD.SuiteSparse_long)) puint = convert(Ptr{Uint32}, p) unsafe_store!(puint, CHOLMOD.SINGLE, 3*div(sizeof(Csize_t), 4) + 5*div(sizeof(Ptr{Void}), 4) + 4) @test_throws CHOLMOD.CHOLMODException CHOLMOD.Sparse(p) ### illegal dtype -p = ccall((:cholmod_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, +p = ccall((:cholmod_l_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, (Csize_t, Csize_t, Csize_t, Cint, Cint, Cint, Cint, Ptr{Void}), - 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Cint)) + 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(CHOLMOD.SuiteSparse_long)) puint = convert(Ptr{Uint32}, p) unsafe_store!(puint, 5, 3*div(sizeof(Csize_t), 4) + 5*div(sizeof(Ptr{Void}), 4) + 4) @test_throws CHOLMOD.CHOLMODException CHOLMOD.Sparse(p) ### illegal xtype -p = ccall((:cholmod_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, +p = ccall((:cholmod_l_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, (Csize_t, Csize_t, Csize_t, Cint, Cint, Cint, Cint, Ptr{Void}), - 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Cint)) + 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(CHOLMOD.SuiteSparse_long)) puint = convert(Ptr{Uint32}, p) unsafe_store!(puint, 3, 3*div(sizeof(Csize_t), 4) + 5*div(sizeof(Ptr{Void}), 4) + 3) @test_throws CHOLMOD.CHOLMODException CHOLMOD.Sparse(p) ### illegal itype -p = ccall((:cholmod_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, +p = ccall((:cholmod_l_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, (Csize_t, Csize_t, Csize_t, Cint, Cint, Cint, Cint, Ptr{Void}), - 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Cint)) + 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(CHOLMOD.SuiteSparse_long)) puint = convert(Ptr{Uint32}, p) unsafe_store!(puint, CHOLMOD.INTLONG, 3*div(sizeof(Csize_t), 4) + 5*div(sizeof(Ptr{Void}), 4) + 2) @test_throws CHOLMOD.CHOLMODException CHOLMOD.Sparse(p) ### illegal itype -p = ccall((:cholmod_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, +p = ccall((:cholmod_l_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, (Csize_t, Csize_t, Csize_t, Cint, Cint, Cint, Cint, Ptr{Void}), - 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Cint)) + 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(CHOLMOD.SuiteSparse_long)) puint = convert(Ptr{Uint32}, p) unsafe_store!(puint, 5, 3*div(sizeof(Csize_t), 4) + 5*div(sizeof(Ptr{Void}), 4) + 2) @test_throws CHOLMOD.CHOLMODException CHOLMOD.Sparse(p) # test that Sparse(Ptr) works for SuiteSparse_long (on 64 bit systems) if CHOLMOD.SuiteSparse_long == Int64 - p = ccall((:cholmod_l_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, + p = ccall((:cholmod_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_SparseVoid}, (Csize_t, Csize_t, Csize_t, Cint, Cint, Cint, Cint, Ptr{Void}), - 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(CHOLMOD.SuiteSparse_long)) - @test isa(CHOLMOD.Sparse(p), CHOLMOD.Sparse{Float64,CHOLMOD.SuiteSparse_long}) + 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Int32)) + @test isa(CHOLMOD.Sparse(p), CHOLMOD.Sparse{Float64,Int32}) end # Test Dense wrappers (only Float64 supported a present) @@ -283,9 +283,9 @@ end # Test Sparse and Factor ## test free_sparse! -p = ccall((:cholmod_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_Sparse{Float64,Cint}}, +p = ccall((:cholmod_l_allocate_sparse, :libcholmod), Ptr{CHOLMOD.C_Sparse{Float64,Cint}}, (Csize_t, Csize_t, Csize_t, Cint, Cint, Cint, Cint, Ptr{Void}), - 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Cint)) + 1, 1, 1, true, true, 0, CHOLMOD.REAL, CHOLMOD.common(Int)) @test CHOLMOD.free_sparse!(p) for elty in (Float64, Complex{Float64})