diff --git a/base/array.jl b/base/array.jl index a4645f87103e0..5069913488581 100644 --- a/base/array.jl +++ b/base/array.jl @@ -25,6 +25,15 @@ typealias DenseVecOrMat{T} Union{DenseVector{T}, DenseMatrix{T}} import Core: arraysize, arrayset, arrayref +""" + Array{T,N}(dims) + +Construct an uninitialized `N`-dimensional dense array with element type `T`. `dims` may +be a tuple or a series of integer arguments corresponding to the length in each dimension. +If the rank `N` is omitted, i.e. `Array{T}(dims)`, the rank is determined based on `dims`. +""" +Array + vect() = Array{Any,1}(0) vect{T}(X::T...) = T[ X[i] for i=1:length(X) ] diff --git a/base/asyncmap.jl b/base/asyncmap.jl index 112f30663b112..617927866d97d 100644 --- a/base/asyncmap.jl +++ b/base/asyncmap.jl @@ -246,7 +246,7 @@ end # Special handling for some types. function asyncmap(f, s::AbstractString; kwargs...) - s2=Array(Char, length(s)) + s2 = Array{Char,1}(length(s)) asyncmap!(f, s2, s; kwargs...) return convert(String, s2) end diff --git a/base/boot.jl b/base/boot.jl index 92d61c8a69494..400f24f21b028 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -330,13 +330,6 @@ typealias NTuple{N,T} Tuple{Vararg{T,N}} (::Type{Array{T,1}}){T}() = Array{T,1}(0) (::Type{Array{T,2}}){T}() = Array{T,2}(0, 0) -# TODO: possibly turn these into deprecations -Array{T,N}(::Type{T}, d::NTuple{N,Int}) = Array{T,N}(d) -Array{T}(::Type{T}, d::Int...) = Array(T, d) -Array{T}(::Type{T}, m::Int) = Array{T,1}(m) -Array{T}(::Type{T}, m::Int,n::Int) = Array{T,2}(m,n) -Array{T}(::Type{T}, m::Int,n::Int,o::Int) = Array{T,3}(m,n,o) - # primitive Symbol constructors function Symbol(s::String) return ccall(:jl_symbol_n, Ref{Symbol}, (Ptr{UInt8}, Int), diff --git a/base/deprecated.jl b/base/deprecated.jl index 9877ef4853603..07de11d104777 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -1741,4 +1741,23 @@ end export @test_approx_eq # END code from base/test.jl +# Deprecate Array(T, dims...) in favor of proper type constructors +@deprecate Array{T,N}(::Type{T}, d::NTuple{N,Int}) Array{T,N}(d) +@deprecate Array{T}(::Type{T}, d::Int...) Array{T,length(d)}(d...) +@deprecate Array{T}(::Type{T}, m::Int) Array{T,1}(m) +@deprecate Array{T}(::Type{T}, m::Int,n::Int) Array{T,2}(m,n) +@deprecate Array{T}(::Type{T}, m::Int,n::Int,o::Int) Array{T,3}(m,n,o) +@deprecate Array{T}(::Type{T}, d::Integer...) Array{T,length(d)}(convert(Tuple{Vararg{Int}}, d)) +@deprecate Array{T}(::Type{T}, m::Integer) Array{T,1}(Int(m)) +@deprecate Array{T}(::Type{T}, m::Integer,n::Integer) Array{T,2}(Int(m),Int(n)) +@deprecate Array{T}(::Type{T}, m::Integer,n::Integer,o::Integer) Array{T,3}(Int(m),Int(n),Int(o)) + +# Likewise for SharedArrays +@deprecate SharedArray{T,N}(::Type{T}, dims::Dims{N}; kwargs...) SharedArray{T,N}(dims; kwargs...) +@deprecate SharedArray{T}(::Type{T}, dims::Int...; kwargs...) SharedArray{T,length(dims)}(dims...; kwargs...) +@deprecate(SharedArray{T,N}(filename::AbstractString, ::Type{T}, dims::NTuple{N,Int}, offset; kwargs...), + SharedArray{T,N}(filename, dims, offset; kwargs...)) +@deprecate(SharedArray{T}(filename::AbstractString, ::Type{T}, dims::NTuple, offset; kwargs...), + SharedArray{T,length(dims)}(filename, dims, offset; kwargs...)) + # End deprecations scheduled for 0.6 diff --git a/base/docs/helpdb/Base.jl b/base/docs/helpdb/Base.jl index 944cbd2c1b33a..ca267338a732c 100644 --- a/base/docs/helpdb/Base.jl +++ b/base/docs/helpdb/Base.jl @@ -1676,15 +1676,6 @@ by `show` generally includes Julia-specific formatting and type information. """ show(x) -""" - Array(dims) - -`Array{T}(dims)` constructs an uninitialized dense array with element type `T`. `dims` may -be a tuple or a series of integer arguments. The syntax `Array(T, dims)` is also available, -but deprecated. -""" -Array - """ issubtype(type1, type2) diff --git a/base/libc.jl b/base/libc.jl index c3c67b57e8b71..03abe0746a9a4 100644 --- a/base/libc.jl +++ b/base/libc.jl @@ -277,7 +277,7 @@ if is_windows() const FORMAT_MESSAGE_FROM_SYSTEM = UInt32(0x1000) const FORMAT_MESSAGE_IGNORE_INSERTS = UInt32(0x200) const FORMAT_MESSAGE_MAX_WIDTH_MASK = UInt32(0xFF) - lpMsgBuf = Array(Ptr{UInt16}) + lpMsgBuf = Array{Ptr{UInt16},0}() lpMsgBuf[1] = 0 len = ccall(:FormatMessageW,stdcall,UInt32,(Cint, Ptr{Void}, Cint, Cint, Ptr{Ptr{UInt16}}, Cint, Ptr{Void}), FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK, diff --git a/base/sharedarray.jl b/base/sharedarray.jl index 68c84921a626a..0eea70ecba2a3 100644 --- a/base/sharedarray.jl +++ b/base/sharedarray.jl @@ -28,19 +28,8 @@ type SharedArray{T,N} <: DenseArray{T,N} end end -(::Type{SharedArray{T}}){T,N}(d::NTuple{N,Int}; kwargs...) = - SharedArray(T, d; kwargs...) -(::Type{SharedArray{T}}){T}(d::Integer...; kwargs...) = - SharedArray(T, d; kwargs...) -(::Type{SharedArray{T}}){T}(m::Integer; kwargs...) = - SharedArray(T, m; kwargs...) -(::Type{SharedArray{T}}){T}(m::Integer, n::Integer; kwargs...) = - SharedArray(T, m, n; kwargs...) -(::Type{SharedArray{T}}){T}(m::Integer, n::Integer, o::Integer; kwargs...) = - SharedArray(T, m, n, o; kwargs...) - """ - SharedArray(T::Type, dims::NTuple; init=false, pids=Int[]) + SharedArray{T,N}(dims::NTuple; init=false, pids=Int[]) Construct a `SharedArray` of a bitstype `T` and size `dims` across the processes specified by `pids` - all of which have to be on the same host. @@ -52,8 +41,39 @@ computation with the master process acting as a driver. If an `init` function of the type `initfn(S::SharedArray)` is specified, it is called on all the participating workers. + + SharedArray{T,N}(filename::AbstractString, dims::NTuple, [offset=0]; mode=nothing, init=false, pids=Int[]) + +Construct a `SharedArray` backed by the file `filename`, with element +type `T` (must be a `bitstype`) and size `dims`, across the processes +specified by `pids` - all of which have to be on the same host. This +file is mmapped into the host memory, with the following consequences: + +- The array data must be represented in binary format (e.g., an ASCII + format like CSV cannot be supported) + +- Any changes you make to the array values (e.g., `A[3] = 0`) will + also change the values on disk + +If `pids` is left unspecified, the shared array will be mapped across +all processes on the current host, including the master. But, +`localindexes` and `indexpids` will only refer to worker +processes. This facilitates work distribution code to use workers for +actual computation with the master process acting as a driver. + +`mode` must be one of `"r"`, `"r+"`, `"w+"`, or `"a+"`, and defaults +to `"r+"` if the file specified by `filename` already exists, or +`"w+"` if not. If an `init` function of the type +`initfn(S::SharedArray)` is specified, it is called on all the +participating workers. You cannot specify an `init` function if the +file is not writable. + +`offset` allows you to skip the specified number of bytes at the +beginning of the file. """ -function SharedArray{T,N}(::Type{T}, dims::Dims{N}; init=false, pids=Int[]) +SharedArray + +function (::Type{SharedArray{T,N}}){T,N}(dims::Dims{N}; init=false, pids=Int[]) isbits(T) || throw(ArgumentError("type of SharedArray elements must be bits types, got $(T)")) pids, onlocalhost = shared_pids(pids) @@ -110,39 +130,20 @@ function SharedArray{T,N}(::Type{T}, dims::Dims{N}; init=false, pids=Int[]) S end -SharedArray(T, I::Int...; kwargs...) = SharedArray(T, I; kwargs...) - -""" - SharedArray(filename::AbstractString, T::Type, dims::NTuple, [offset=0]; mode=nothing, init=false, pids=Int[]) - -Construct a `SharedArray` backed by the file `filename`, with element -type `T` (must be a `bitstype`) and size `dims`, across the processes -specified by `pids` - all of which have to be on the same host. This -file is mmapped into the host memory, with the following consequences: - -- The array data must be represented in binary format (e.g., an ASCII - format like CSV cannot be supported) - -- Any changes you make to the array values (e.g., `A[3] = 0`) will - also change the values on disk - -If `pids` is left unspecified, the shared array will be mapped across -all processes on the current host, including the master. But, -`localindexes` and `indexpids` will only refer to worker -processes. This facilitates work distribution code to use workers for -actual computation with the master process acting as a driver. - -`mode` must be one of `"r"`, `"r+"`, `"w+"`, or `"a+"`, and defaults -to `"r+"` if the file specified by `filename` already exists, or -`"w+"` if not. If an `init` function of the type -`initfn(S::SharedArray)` is specified, it is called on all the -participating workers. You cannot specify an `init` function if the -file is not writable. +(::Type{SharedArray{T,N}}){T,N}(I::Integer...; kwargs...) = + SharedArray{T,N}(I; kwargs...) +(::Type{SharedArray{T}}){T}(d::NTuple; kwargs...) = + SharedArray{T,length(d)}(d; kwargs...) +(::Type{SharedArray{T}}){T}(I::Integer...; kwargs...) = + SharedArray{T,length(I)}(I; kwargs...) +(::Type{SharedArray{T}}){T}(m::Integer; kwargs...) = + SharedArray{T,1}(m; kwargs...) +(::Type{SharedArray{T}}){T}(m::Integer, n::Integer; kwargs...) = + SharedArray{T,2}(m, n; kwargs...) +(::Type{SharedArray{T}}){T}(m::Integer, n::Integer, o::Integer; kwargs...) = + SharedArray{T,3}(m, n, o; kwargs...) -`offset` allows you to skip the specified number of bytes at the -beginning of the file. -""" -function SharedArray{T,N}(filename::AbstractString, ::Type{T}, dims::NTuple{N,Int}, +function (::Type{SharedArray{T,N}}){T,N}(filename::AbstractString, dims::NTuple{N,Int}, offset::Integer=0; mode=nothing, init=false, pids::Vector{Int}=Int[]) if !isabspath(filename) throw(ArgumentError("$filename is not an absolute path; try abspath(filename)?")) @@ -208,6 +209,10 @@ function SharedArray{T,N}(filename::AbstractString, ::Type{T}, dims::NTuple{N,In S end +(::Type{SharedArray{T}}){T,N}(filename::AbstractString, dims::NTuple{N,Int}, offset::Integer=0; + mode=nothing, init=false, pids::Vector{Int}=Int[]) = + SharedArray{T,N}(filename, dims, offset; mode=mode, init=init, pids=pids) + function initialize_shared_array(S, onlocalhost, init, pids) if onlocalhost init_loc_flds(S) @@ -246,6 +251,7 @@ typealias SharedMatrix{T} SharedArray{T,2} length(S::SharedArray) = prod(S.dims) size(S::SharedArray) = S.dims +ndims(S::SharedArray) = length(S.dims) linearindexing{S<:SharedArray}(::Type{S}) = LinearFast() function reshape{T,N}(a::SharedArray{T}, dims::NTuple{N,Int}) @@ -307,21 +313,21 @@ localindexes(S::SharedArray) = S.pidx > 0 ? range_1dim(S, S.pidx) : 1:0 unsafe_convert{T}(::Type{Ptr{T}}, S::SharedArray) = unsafe_convert(Ptr{T}, sdata(S)) function convert(::Type{SharedArray}, A::Array) - S = SharedArray(eltype(A), size(A)) + S = SharedArray{eltype(A),ndims(A)}(size(A)) copy!(S, A) end function convert{T}(::Type{SharedArray{T}}, A::Array) - S = SharedArray(T, size(A)) + S = SharedArray{T,ndims(A)}(size(A)) copy!(S, A) end function convert{TS,TA,N}(::Type{SharedArray{TS,N}}, A::Array{TA,N}) - S = SharedArray(TS, size(A)) + S = SharedArray{TS,ndims(A)}(size(A)) copy!(S, A) end function deepcopy_internal(S::SharedArray, stackdict::ObjectIdDict) haskey(stackdict, S) && return stackdict[S] - R = SharedArray(eltype(S), size(S); pids = S.pids) + R = SharedArray{eltype(S),ndims(S)}(size(S); pids = S.pids) copy!(sdata(R), sdata(S)) stackdict[S] = R return R @@ -468,16 +474,16 @@ end # convenience constructors function shmem_fill(v, dims; kwargs...) - SharedArray(typeof(v), dims; init = S->fill!(S.loc_subarr_1d, v), kwargs...) + SharedArray{typeof(v),length(dims)}(dims; init = S->fill!(S.loc_subarr_1d, v), kwargs...) end shmem_fill(v, I::Int...; kwargs...) = shmem_fill(v, I; kwargs...) # rand variant with range function shmem_rand(TR::Union{DataType, UnitRange}, dims; kwargs...) if isa(TR, UnitRange) - SharedArray(Int, dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...) + SharedArray{Int,length(dims)}(dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...) else - SharedArray(TR, dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...) + SharedArray{TR,length(dims)}(dims; init = S -> map!(x -> rand(TR), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...) end end shmem_rand(TR::Union{DataType, UnitRange}, i::Int; kwargs...) = shmem_rand(TR, (i,); kwargs...) @@ -487,7 +493,7 @@ shmem_rand(dims; kwargs...) = shmem_rand(Float64, dims; kwargs...) shmem_rand(I::Int...; kwargs...) = shmem_rand(I; kwargs...) function shmem_randn(dims; kwargs...) - SharedArray(Float64, dims; init = S-> map!(x -> randn(), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...) + SharedArray{Float64,length(dims)}(dims; init = S-> map!(x -> randn(), S.loc_subarr_1d, S.loc_subarr_1d), kwargs...) end shmem_randn(I::Int...; kwargs...) = shmem_randn(I; kwargs...) diff --git a/base/sparse/sparsematrix.jl b/base/sparse/sparsematrix.jl index 03361eba1efad..932c45b6a68f4 100644 --- a/base/sparse/sparsematrix.jl +++ b/base/sparse/sparsematrix.jl @@ -1390,7 +1390,7 @@ speye_scaled(diag, m::Integer, n::Integer) = speye_scaled(typeof(diag), diag, m, function speye_scaled(T, diag, m::Integer, n::Integer) ((m < 0) || (n < 0)) && throw(ArgumentError("invalid array dimensions")) nnz = min(m,n) - colptr = Array(Int, 1+n) + colptr = Array{Int,1}(1+n) colptr[1:nnz+1] = 1:nnz+1 colptr[nnz+2:end] = nnz+1 SparseMatrixCSC(Int(m), Int(n), colptr, Vector{Int}(1:nnz), fill!(Vector{T}(nnz), diag)) diff --git a/base/sparse/sparsevector.jl b/base/sparse/sparsevector.jl index 63c40f7d79a7e..afe0f62a4be6d 100644 --- a/base/sparse/sparsevector.jl +++ b/base/sparse/sparsevector.jl @@ -563,7 +563,7 @@ end function find{Tv,Ti}(x::SparseVector{Tv,Ti}) numnz = nnz(x) - I = Array(Ti, numnz) + I = Array{Ti,1}(numnz) nzind = x.nzind nzval = x.nzval @@ -587,8 +587,8 @@ end function findnz{Tv,Ti}(x::SparseVector{Tv,Ti}) numnz = nnz(x) - I = Array(Ti, numnz) - V = Array(Tv, numnz) + I = Array{Ti,1}(numnz) + V = Array{Tv,1}(numnz) nzind = x.nzind nzval = x.nzval diff --git a/base/sysimg.jl b/base/sysimg.jl index 76c33c770fefc..5695eb950671b 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -99,12 +99,6 @@ include("subarray.jl") (::Type{Matrix{T}}){T}(m::Integer, n::Integer) = Matrix{T}(Int(m), Int(n)) (::Type{Matrix})(m::Integer, n::Integer) = Matrix{Any}(Int(m), Int(n)) -# TODO: possibly turn these into deprecations -Array{T}(::Type{T}, d::Integer...) = Array(T, convert(Tuple{Vararg{Int}}, d)) -Array{T}(::Type{T}, m::Integer) = Array{T,1}(Int(m)) -Array{T}(::Type{T}, m::Integer,n::Integer) = Array{T,2}(Int(m),Int(n)) -Array{T}(::Type{T}, m::Integer,n::Integer,o::Integer) = Array{T,3}(Int(m),Int(n),Int(o)) - # numeric operations include("hashing.jl") include("rounding.jl") diff --git a/doc/src/manual/parallel-computing.md b/doc/src/manual/parallel-computing.md index 22072071005d1..011afbeb3992a 100644 --- a/doc/src/manual/parallel-computing.md +++ b/doc/src/manual/parallel-computing.md @@ -312,7 +312,7 @@ Parallel for loops like these must be avoided. Fortunately, [Shared Arrays](@ref to get around this limitation: ```julia -a = SharedArray(Float64,10) +a = SharedArray{Float64}(10) @parallel for i=1:10 a[i] = i end @@ -720,10 +720,10 @@ just returns the object itself, so it's safe to use [`sdata()`](@ref) on any `Ar The constructor for a shared array is of the form: ```julia -SharedArray(T::Type, dims::NTuple; init=false, pids=Int[]) +SharedArray{T,N}(dims::NTuple; init=false, pids=Int[]) ``` -which creates a shared array of a bits type `T` and size `dims` across the processes specified +which creates an `N`-dimensional shared array of a bits type `T` and size `dims` across the processes specified by `pids`. Unlike distributed arrays, a shared array is accessible only from those participating workers specified by the `pids` named argument (and the creating process too, if it is on the same host). @@ -741,7 +741,7 @@ julia> addprocs(3) 3 4 -julia> S = SharedArray(Int, (3,4), init = S -> S[Base.localindexes(S)] = myid()) +julia> S = SharedArray{Int,2}((3,4), init = S -> S[Base.localindexes(S)] = myid()) 3×4 SharedArray{Int64,2}: 2 2 3 4 2 3 3 4 @@ -762,7 +762,7 @@ convenient for splitting up tasks among processes. You can, of course, divide th you wish: ```julia -julia> S = SharedArray(Int, (3,4), init = S -> S[indexpids(S):length(procs(S)):length(S)] = myid()) +julia> S = SharedArray{Int,2}((3,4), init = S -> S[indexpids(S):length(procs(S)):length(S)] = myid()) 3×4 SharedArray{Int64,2}: 2 2 2 2 3 3 3 3 @@ -861,8 +861,8 @@ end If we create `SharedArray`s and time these functions, we get the following results (with `julia -p 4`): ```julia -q = SharedArray(Float64, (500,500,500)) -u = SharedArray(Float64, (500,500,500)) +q = SharedArray{Float64,3}((500,500,500)) +u = SharedArray{Float64,3}((500,500,500)) # Run once to JIT-compile advection_serial!(q, u) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index 6a5b146e3755f..b369ac53d2667 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -2242,7 +2242,7 @@ ,.(map (lambda (v r) `(= ,v (call (top length) ,r))) lengths rv) (scope-block (block - (= ,result (call (core Array) ,atype ,@lengths)) + (= ,result (call (curly Array ,atype ,(length lengths)) ,@lengths)) (= ,ri 1) ,(construct-loops (reverse ranges) (reverse rv) is states (reverse lengths)) ,result))))) diff --git a/test/parallel_exec.jl b/test/parallel_exec.jl index 7657ce89d4fdc..6e2c89ab64ff3 100644 --- a/test/parallel_exec.jl +++ b/test/parallel_exec.jl @@ -231,11 +231,11 @@ test_indexing(RemoteChannel(id_other)) dims = (20,20,20) if is_linux() - S = SharedArray(Int64, dims) + S = SharedArray{Int64,3}(dims) @test startswith(S.segname, "/jl") @test !ispath("/dev/shm" * S.segname) - S = SharedArray(Int64, dims; pids=[id_other]) + S = SharedArray{Int64,3}(dims; pids=[id_other]) @test startswith(S.segname, "/jl") @test !ispath("/dev/shm" * S.segname) end @@ -298,7 +298,7 @@ copy!(s, sdata(d)) a = rand(dims) @test sdata(a) == a -d = SharedArray(Int, dims, init = D->fill!(D.loc_subarr_1d, myid())) +d = SharedArray{Int}(dims, init = D->fill!(D.loc_subarr_1d, myid())) for p in procs(d) idxes_in_p = remotecall_fetch(p, d) do D parentindexes(D.loc_subarr_1d)[1] @@ -309,7 +309,7 @@ for p in procs(d) @test d[idxl] == p end -d = @inferred(SharedArray(Float64, (2,3))) +d = @inferred(SharedArray{Float64,2}((2,3))) @test isa(d[:,2], Vector{Float64}) ### SharedArrays from a file @@ -320,7 +320,7 @@ write(fn, 1:30) sz = (6,5) Atrue = reshape(1:30, sz) -S = @inferred(SharedArray(fn, Int, sz)) +S = @inferred(SharedArray{Int,2}(fn, sz)) @test S == Atrue @test length(procs(S)) > 1 @sync begin @@ -338,14 +338,14 @@ read!(fn, filedata) finalize(S) # Error for write-only files -@test_throws ArgumentError SharedArray(fn, Int, sz, mode="w") +@test_throws ArgumentError SharedArray{Int,2}(fn, sz, mode="w") # Error for file doesn't exist, but not allowed to create -@test_throws ArgumentError SharedArray(joinpath(tempdir(),randstring()), Int, sz, mode="r") +@test_throws ArgumentError SharedArray{Int,2}(joinpath(tempdir(),randstring()), sz, mode="r") # Creating a new file fn2 = tempname() -S = SharedArray(fn2, Int, sz, init=D->D[localindexes(D)] = myid()) +S = SharedArray{Int,2}(fn2, sz, init=D->D[localindexes(D)] = myid()) @test S == filedata filedata2 = similar(Atrue) read!(fn2, filedata2) @@ -355,7 +355,7 @@ finalize(S) # Appending to a file fn3 = tempname() write(fn3, ones(UInt8, 4)) -S = SharedArray(fn3, UInt8, sz, 4, mode="a+", init=D->D[localindexes(D)]=0x02) +S = SharedArray{UInt8}(fn3, sz, 4, mode="a+", init=D->D[localindexes(D)]=0x02) len = prod(sz)+4 @test filesize(fn3) == len filedata = Array{UInt8}(len) @@ -438,7 +438,7 @@ A = @inferred(convert(SharedArray, AA)) B = @inferred(convert(SharedArray, AA')) @test B*A == ctranspose(AA)*AA -d=SharedArray(Int64, (10,10); init = D->fill!(D.loc_subarr_1d, myid()), pids=[id_me, id_other]) +d=SharedArray{Int64,2}((10,10); init = D->fill!(D.loc_subarr_1d, myid()), pids=[id_me, id_other]) d2 = map(x->1, d) @test reduce(+, d2) == 100 @@ -459,12 +459,12 @@ map!(x->1, d, d) # Shared arrays of singleton immutables @everywhere immutable ShmemFoo end for T in [Void, ShmemFoo] - s = @inferred(SharedArray(T, 10)) + s = @inferred(SharedArray{T}(10)) @test T() === remotecall_fetch(x->x[3], workers()[1], s) end # Issue #14664 -d = SharedArray(Int,10) +d = SharedArray{Int}(10) @sync @parallel for i=1:10 d[i] = i end @@ -474,8 +474,8 @@ for (x,i) in enumerate(d) end # complex -sd = SharedArray(Int,10) -se = SharedArray(Int,10) +sd = SharedArray{Int}(10) +se = SharedArray{Int}(10) @sync @parallel for i=1:10 sd[i] = i se[i] = i @@ -498,7 +498,7 @@ for id in [id_me, id_other] finalize_and_test((r=RemoteChannel(id); put!(r, 1); r)) end -d = SharedArray(Int,10) +d = SharedArray{Int}(10) finalize(d) @test_throws BoundsError d[1] diff --git a/test/test.jl b/test/test.jl index f2dc5218d8646..13ae3d433abb6 100644 --- a/test/test.jl +++ b/test/test.jl @@ -35,7 +35,7 @@ let a @test a === 1 end -a = Array(Float64, 2, 2, 2, 2, 2) +a = Array{Float64,5}(2, 2, 2, 2, 2) a[1,1,1,1,1] = 10 @test a[1,1,1,1,1] == 10 @test a[1,1,1,1,1] != 2