@@ -28,19 +28,8 @@ type SharedArray{T,N} <: DenseArray{T,N}
28
28
end
29
29
end
30
30
31
- (:: Type{SharedArray{T}} ){T,N}(d:: NTuple{N,Int} ; kwargs... ) =
32
- SharedArray (T, d; kwargs... )
33
- (:: Type{SharedArray{T}} ){T}(d:: Integer... ; kwargs... ) =
34
- SharedArray (T, d; kwargs... )
35
- (:: Type{SharedArray{T}} ){T}(m:: Integer ; kwargs... ) =
36
- SharedArray (T, m; kwargs... )
37
- (:: Type{SharedArray{T}} ){T}(m:: Integer , n:: Integer ; kwargs... ) =
38
- SharedArray (T, m, n; kwargs... )
39
- (:: Type{SharedArray{T}} ){T}(m:: Integer , n:: Integer , o:: Integer ; kwargs... ) =
40
- SharedArray (T, m, n, o; kwargs... )
41
-
42
31
"""
43
- SharedArray(T::Type, dims::NTuple; init=false, pids=Int[])
32
+ SharedArray{T,N}( dims::NTuple; init=false, pids=Int[])
44
33
45
34
Construct a `SharedArray` of a bitstype `T` and size `dims` across the processes specified
46
35
by `pids` - all of which have to be on the same host.
@@ -52,8 +41,39 @@ computation with the master process acting as a driver.
52
41
53
42
If an `init` function of the type `initfn(S::SharedArray)` is specified, it is called on all
54
43
the participating workers.
44
+
45
+ SharedArray{T,N}(filename::AbstractString, dims::NTuple, [offset=0]; mode=nothing, init=false, pids=Int[])
46
+
47
+ Construct a `SharedArray` backed by the file `filename`, with element
48
+ type `T` (must be a `bitstype`) and size `dims`, across the processes
49
+ specified by `pids` - all of which have to be on the same host. This
50
+ file is mmapped into the host memory, with the following consequences:
51
+
52
+ - The array data must be represented in binary format (e.g., an ASCII
53
+ format like CSV cannot be supported)
54
+
55
+ - Any changes you make to the array values (e.g., `A[3] = 0`) will
56
+ also change the values on disk
57
+
58
+ If `pids` is left unspecified, the shared array will be mapped across
59
+ all processes on the current host, including the master. But,
60
+ `localindexes` and `indexpids` will only refer to worker
61
+ processes. This facilitates work distribution code to use workers for
62
+ actual computation with the master process acting as a driver.
63
+
64
+ `mode` must be one of `"r"`, `"r+"`, `"w+"`, or `"a+"`, and defaults
65
+ to `"r+"` if the file specified by `filename` already exists, or
66
+ `"w+"` if not. If an `init` function of the type
67
+ `initfn(S::SharedArray)` is specified, it is called on all the
68
+ participating workers. You cannot specify an `init` function if the
69
+ file is not writable.
70
+
71
+ `offset` allows you to skip the specified number of bytes at the
72
+ beginning of the file.
55
73
"""
56
- function SharedArray {T,N} (:: Type{T} , dims:: Dims{N} ; init= false , pids= Int[])
74
+ SharedArray
75
+
76
+ function (:: Type{SharedArray{T,N}} ){T,N}(dims:: Dims{N} ; init= false , pids= Int[])
57
77
isbits (T) || throw (ArgumentError (" type of SharedArray elements must be bits types, got $(T) " ))
58
78
59
79
pids, onlocalhost = shared_pids (pids)
@@ -110,39 +130,20 @@ function SharedArray{T,N}(::Type{T}, dims::Dims{N}; init=false, pids=Int[])
110
130
S
111
131
end
112
132
113
- SharedArray (T, I:: Int... ; kwargs... ) = SharedArray (T, I; kwargs... )
114
-
115
- """
116
- SharedArray(filename::AbstractString, T::Type, dims::NTuple, [offset=0]; mode=nothing, init=false, pids=Int[])
117
-
118
- Construct a `SharedArray` backed by the file `filename`, with element
119
- type `T` (must be a `bitstype`) and size `dims`, across the processes
120
- specified by `pids` - all of which have to be on the same host. This
121
- file is mmapped into the host memory, with the following consequences:
122
-
123
- - The array data must be represented in binary format (e.g., an ASCII
124
- format like CSV cannot be supported)
125
-
126
- - Any changes you make to the array values (e.g., `A[3] = 0`) will
127
- also change the values on disk
128
-
129
- If `pids` is left unspecified, the shared array will be mapped across
130
- all processes on the current host, including the master. But,
131
- `localindexes` and `indexpids` will only refer to worker
132
- processes. This facilitates work distribution code to use workers for
133
- actual computation with the master process acting as a driver.
134
-
135
- `mode` must be one of `"r"`, `"r+"`, `"w+"`, or `"a+"`, and defaults
136
- to `"r+"` if the file specified by `filename` already exists, or
137
- `"w+"` if not. If an `init` function of the type
138
- `initfn(S::SharedArray)` is specified, it is called on all the
139
- participating workers. You cannot specify an `init` function if the
140
- file is not writable.
133
+ (:: Type{SharedArray{T,N}} ){T,N}(I:: Integer... ; kwargs... ) =
134
+ SharedArray {T,N} (I; kwargs... )
135
+ (:: Type{SharedArray{T}} ){T}(d:: NTuple ; kwargs... ) =
136
+ SharedArray {T,length(d)} (d; kwargs... )
137
+ (:: Type{SharedArray{T}} ){T}(I:: Integer... ; kwargs... ) =
138
+ SharedArray {T,length(I)} (I; kwargs... )
139
+ (:: Type{SharedArray{T}} ){T}(m:: Integer ; kwargs... ) =
140
+ SharedArray {T,1} (m; kwargs... )
141
+ (:: Type{SharedArray{T}} ){T}(m:: Integer , n:: Integer ; kwargs... ) =
142
+ SharedArray {T,2} (m, n; kwargs... )
143
+ (:: Type{SharedArray{T}} ){T}(m:: Integer , n:: Integer , o:: Integer ; kwargs... ) =
144
+ SharedArray {T,3} (m, n, o; kwargs... )
141
145
142
- `offset` allows you to skip the specified number of bytes at the
143
- beginning of the file.
144
- """
145
- function SharedArray {T,N} (filename:: AbstractString , :: Type{T} , dims:: NTuple{N,Int} ,
146
+ function (:: Type{SharedArray{T,N}} ){T,N}(filename:: AbstractString , dims:: NTuple{N,Int} ,
146
147
offset:: Integer = 0 ; mode= nothing , init= false , pids:: Vector{Int} = Int[])
147
148
if ! isabspath (filename)
148
149
throw (ArgumentError (" $filename is not an absolute path; try abspath(filename)?" ))
@@ -208,6 +209,10 @@ function SharedArray{T,N}(filename::AbstractString, ::Type{T}, dims::NTuple{N,In
208
209
S
209
210
end
210
211
212
+ (:: Type{SharedArray{T}} ){T,N}(filename:: AbstractString , dims:: NTuple{N,Int} , offset:: Integer = 0 ;
213
+ mode= nothing , init= false , pids:: Vector{Int} = Int[]) =
214
+ SharedArray {T,N} (filename, dims, offset; mode= mode, init= init, pids= pids)
215
+
211
216
function initialize_shared_array (S, onlocalhost, init, pids)
212
217
if onlocalhost
213
218
init_loc_flds (S)
@@ -246,6 +251,7 @@ typealias SharedMatrix{T} SharedArray{T,2}
246
251
247
252
length (S:: SharedArray ) = prod (S. dims)
248
253
size (S:: SharedArray ) = S. dims
254
+ ndims (S:: SharedArray ) = length (S. dims)
249
255
linearindexing {S<:SharedArray} (:: Type{S} ) = LinearFast ()
250
256
251
257
function reshape {T,N} (a:: SharedArray{T} , dims:: NTuple{N,Int} )
@@ -307,21 +313,21 @@ localindexes(S::SharedArray) = S.pidx > 0 ? range_1dim(S, S.pidx) : 1:0
307
313
unsafe_convert {T} (:: Type{Ptr{T}} , S:: SharedArray ) = unsafe_convert (Ptr{T}, sdata (S))
308
314
309
315
function convert (:: Type{SharedArray} , A:: Array )
310
- S = SharedArray ( eltype (A), size (A))
316
+ S = SharedArray { eltype(A),ndims(A)} ( size (A))
311
317
copy! (S, A)
312
318
end
313
319
function convert {T} (:: Type{SharedArray{T}} , A:: Array )
314
- S = SharedArray (T, size (A))
320
+ S = SharedArray {T,ndims(A)} ( size (A))
315
321
copy! (S, A)
316
322
end
317
323
function convert {TS,TA,N} (:: Type{SharedArray{TS,N}} , A:: Array{TA,N} )
318
- S = SharedArray ( TS, size (A))
324
+ S = SharedArray { TS,ndims(A)} ( size (A))
319
325
copy! (S, A)
320
326
end
321
327
322
328
function deepcopy_internal (S:: SharedArray , stackdict:: ObjectIdDict )
323
329
haskey (stackdict, S) && return stackdict[S]
324
- R = SharedArray ( eltype (S), size (S); pids = S. pids)
330
+ R = SharedArray { eltype(S),ndims(S)} ( size (S); pids = S. pids)
325
331
copy! (sdata (R), sdata (S))
326
332
stackdict[S] = R
327
333
return R
@@ -468,16 +474,16 @@ end
468
474
469
475
# convenience constructors
470
476
function shmem_fill (v, dims; kwargs... )
471
- SharedArray ( typeof (v), dims; init = S-> fill! (S. loc_subarr_1d, v), kwargs... )
477
+ SharedArray { typeof(v),length(dims)} ( dims; init = S-> fill! (S. loc_subarr_1d, v), kwargs... )
472
478
end
473
479
shmem_fill (v, I:: Int... ; kwargs... ) = shmem_fill (v, I; kwargs... )
474
480
475
481
# rand variant with range
476
482
function shmem_rand (TR:: Union{DataType, UnitRange} , dims; kwargs... )
477
483
if isa (TR, UnitRange)
478
- SharedArray ( Int, dims; init = S -> map! (x -> rand (TR), S. loc_subarr_1d, S. loc_subarr_1d), kwargs... )
484
+ SharedArray { Int,length(dims)} ( dims; init = S -> map! (x -> rand (TR), S. loc_subarr_1d, S. loc_subarr_1d), kwargs... )
479
485
else
480
- SharedArray ( TR, dims; init = S -> map! (x -> rand (TR), S. loc_subarr_1d, S. loc_subarr_1d), kwargs... )
486
+ SharedArray { TR,length(dims)} ( dims; init = S -> map! (x -> rand (TR), S. loc_subarr_1d, S. loc_subarr_1d), kwargs... )
481
487
end
482
488
end
483
489
shmem_rand (TR:: Union{DataType, UnitRange} , i:: Int ; kwargs... ) = shmem_rand (TR, (i,); kwargs... )
@@ -487,7 +493,7 @@ shmem_rand(dims; kwargs...) = shmem_rand(Float64, dims; kwargs...)
487
493
shmem_rand (I:: Int... ; kwargs... ) = shmem_rand (I; kwargs... )
488
494
489
495
function shmem_randn (dims; kwargs... )
490
- SharedArray ( Float64, dims; init = S-> map! (x -> randn (), S. loc_subarr_1d, S. loc_subarr_1d), kwargs... )
496
+ SharedArray { Float64,length(dims)} ( dims; init = S-> map! (x -> randn (), S. loc_subarr_1d, S. loc_subarr_1d), kwargs... )
491
497
end
492
498
shmem_randn (I:: Int... ; kwargs... ) = shmem_randn (I; kwargs... )
493
499
0 commit comments