@@ -613,113 +613,117 @@ function Base.setindex!(a::Array, d::DArray,
613
613
return a
614
614
end
615
615
616
- # Similar to Base.indexin, but just create a logical mask
617
- indexin_mask (a, b:: Number ) = a .== b
618
- indexin_mask (a, r:: Range{Int} ) = [i in r for i in a]
619
- indexin_mask (a, b:: AbstractArray{Int} ) = indexin_mask (a, IntSet (b))
620
- indexin_mask (a, b:: AbstractArray ) = indexin_mask (a, Set (b))
621
- indexin_mask (a, b) = [i in b for i in a]
622
-
623
- import Base: tail
624
- # Given a tuple of indices and a tuple of masks, restrict the indices to the
625
- # valid regions. This is, effectively, reversing Base.setindex_shape_check.
626
- # We can't just use indexing into MergedIndices here because getindex is much
627
- # pickier about singleton dimensions than setindex! is.
628
- restrict_indices (:: Tuple{} , :: Tuple{} ) = ()
629
- function restrict_indices (a:: Tuple{Any, Vararg{Any}} , b:: Tuple{Any, Vararg{Any}} )
630
- if (length (a[1 ]) == length (b[1 ]) == 1 ) || (length (a[1 ]) > 1 && length (b[1 ]) > 1 )
631
- (vec (a[1 ])[vec (b[1 ])], restrict_indices (tail (a), tail (b))... )
632
- elseif length (a[1 ]) == 1
633
- (a[1 ], restrict_indices (tail (a), b))
634
- elseif length (b[1 ]) == 1 && b[1 ][1 ]
635
- restrict_indices (a, tail (b))
636
- else
637
- throw (DimensionMismatch (" this should be caught by setindex_shape_check; please submit an issue" ))
616
+ # We also want to optimize setindex! with a SubDArray source, but this is hard
617
+ # and only works on 0.5.
618
+ if VERSION > v " 0.5.0-dev+5230"
619
+ # Similar to Base.indexin, but just create a logical mask
620
+ indexin_mask (a, b:: Number ) = a .== b
621
+ indexin_mask (a, r:: Range{Int} ) = [i in r for i in a]
622
+ indexin_mask (a, b:: AbstractArray{Int} ) = indexin_mask (a, IntSet (b))
623
+ indexin_mask (a, b:: AbstractArray ) = indexin_mask (a, Set (b))
624
+ indexin_mask (a, b) = [i in b for i in a]
625
+
626
+ import Base: tail
627
+ # Given a tuple of indices and a tuple of masks, restrict the indices to the
628
+ # valid regions. This is, effectively, reversing Base.setindex_shape_check.
629
+ # We can't just use indexing into MergedIndices here because getindex is much
630
+ # pickier about singleton dimensions than setindex! is.
631
+ restrict_indices (:: Tuple{} , :: Tuple{} ) = ()
632
+ function restrict_indices (a:: Tuple{Any, Vararg{Any}} , b:: Tuple{Any, Vararg{Any}} )
633
+ if (length (a[1 ]) == length (b[1 ]) == 1 ) || (length (a[1 ]) > 1 && length (b[1 ]) > 1 )
634
+ (vec (a[1 ])[vec (b[1 ])], restrict_indices (tail (a), tail (b))... )
635
+ elseif length (a[1 ]) == 1
636
+ (a[1 ], restrict_indices (tail (a), b))
637
+ elseif length (b[1 ]) == 1 && b[1 ][1 ]
638
+ restrict_indices (a, tail (b))
639
+ else
640
+ throw (DimensionMismatch (" this should be caught by setindex_shape_check; please submit an issue" ))
641
+ end
638
642
end
639
- end
640
- # The final indices are funky - they're allowed to accumulate together.
641
- # Too many masks is an easy fix -- just use the outer product to merge them:
642
- function restrict_indices (a:: Tuple{Any} , b :: Tuple{Any, Any, Vararg{Any}} )
643
- restrict_indices (a, ( map (Bool, vec ( vec (b[ 1 ]) * vec (b[ 2 ]) ' )), tail ( tail (b)) ... ))
644
- end
645
- # But too many indices is much harder; this will require merging the indices
646
- # in `a` before applying the final mask in `b`.
647
- function restrict_indices (a :: Tuple{Any, Any, Vararg{Any}} , b :: Tuple{Any} )
648
- if length (a[1 ]) == 1
649
- (a[ 1 ], restrict_indices ( tail (a), b))
650
- else
651
- # When one mask spans multiple indices , we need to merge the indices
652
- # together. At this point, we can just use indexing to merge them since
653
- # there's no longer special handling of singleton dimensions
654
- ( view ( MergedIndices (a, map (length, a)), b[ 1 ]),)
655
- end
656
- end
657
-
658
- immutable MergedIndices{T,N} <: AbstractArray{CartesianIndex{N}, N}
659
- indices :: T
660
- sz :: NTuple{N,Int}
661
- end
662
- Base. size (M:: MergedIndices ) = M . sz
663
- Base . getindex {_,N} (M :: MergedIndices{_,N} , I :: Vararg{Int, N} ) = CartesianIndex ( map (getindex, M . indices, I))
664
- # Boundschecking for using MergedIndices as an array index. This is overly
665
- # strict -- even for SubArrays of ReshapedIndices, we require that the entire
666
- # parent array's indices are valid. In this usage, it is just fine... and is a
667
- # huge optimization over exact bounds checking.
668
- typealias ReshapedMergedIndices {T,N,M<: MergedIndices } Base . ReshapedArray {T,N,M}
669
- typealias SubMergedIndices{T,N,M <: Union{MergedIndices, ReshapedMergedIndices} } SubArray{T,N,M }
670
- typealias MergedIndicesOrSub Union{MergedIndices, SubMergedIndices}
671
- import Base : _chkbnds
672
- # Ambiguity with linear indexing:
673
- @inline _chkbnds (A:: AbstractVector , checked:: NTuple{1,Bool} , I:: MergedIndicesOrSub ) = _chkbnds (A, checked, parent (parent (I)). indices... )
674
- @inline _chkbnds (A :: AbstractArray , checked :: NTuple{1,Bool} , I :: MergedIndicesOrSub ) = _chkbnds (A, checked, parent ( parent (I)) . indices ... )
675
- # Generic bounds checking
676
- @inline _chkbnds {T,N} (A:: AbstractArray{T,N} , checked:: NTuple{N ,Bool} , I1:: MergedIndicesOrSub , I... ) = _chkbnds (A, checked, parent (parent (I1)). indices... , I... )
677
- @inline _chkbnds {T,N,M} (A :: AbstractArray{T,N} , checked :: NTuple{M,Bool} , I1 :: MergedIndicesOrSub , I ... ) = _chkbnds (A, checked, parent ( parent (I1)) . indices ... , I ... )
678
- import Base : checkbounds_indices
679
- @inline checkbounds_indices (:: Tuple{} , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices ((), (parent (parent (I[1 ])). indices... , tail (I)... ))
680
- @inline checkbounds_indices (inds:: Tuple{Any} , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, (parent (parent (I[1 ])). indices... , tail (I)... ))
681
- @inline checkbounds_indices (inds :: Tuple , I :: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, ( parent ( parent (I[ 1 ])) . indices ... , tail (I) ... ))
682
-
683
- # The tricky thing here is that we want to optimize the accesses into the
684
- # distributed array, but in doing so, we lose track of which indices in I we
685
- # should be using.
686
- #
687
- # I’ve come to the conclusion that the function is utterly insane.
688
- # There are *6* flavors of indices with four different reference points:
689
- # 1 . Find the indices of each portion of the DArray .
690
- # 2 . Find the valid subset of indices for the SubArray into that portion.
691
- # 3. Find the portion of the `I ` indices that should be used when you access the
692
- # `K` indices in the subarray. This guy is nasty. It’s totally backwards
693
- # from all other arrays, wherein we simply iterate over the source array’s
694
- # elements. You need to *both* know which elements in `J` were skipped
695
- # (`indexin_mask`) and which dimensions should match up (`restrict_indices`)
696
- # 4. If `K` doesn’t correspond to an entire chunk, reinterpret `K` in terms of
697
- # the local portion of the source array
698
- function Base . setindex! (a :: Array , s :: SubDArray ,
699
- I :: Union{UnitRange{Int},Colon,Vector{Int},StepRange{Int,Int}} ...)
700
- Base . setindex_shape_check (s, Base . index_lengths (a, I ... ) ... )
701
- n = length (I)
702
- d = s . parent
703
- J = Base . decolon (d, s . indexes ... )
704
- @sync for i = 1 : length (d . pids)
705
- K_c = d . indexes[i]
706
- K = map (intersect, J, K_c )
707
- if ! any (isempty, K )
708
- K_mask = map (indexin_mask, J, K_c )
709
- idxs = restrict_indices (Base . decolon (a, I ... ), K_mask )
710
- if isequal (K, K_c)
711
- # whole chunk
712
- @async a[idxs ... ] = chunk (d, i)
713
- else
714
- # partial chunk
715
- @async a[idxs ... ] =
716
- remotecall_fetch (d . pids[i]) do
717
- view ( localpart (d), [K[j] - first (K_c[j]) + 1 for j = 1 : length (J)] . .. )
718
- end
643
+ # The final indices are funky - they're allowed to accumulate together.
644
+ # Too many masks is an easy fix -- just use the outer product to merge them:
645
+ function restrict_indices (a :: Tuple{Any} , b :: Tuple{Any, Any, Vararg{Any}} )
646
+ restrict_indices (a, ( map (Bool, vec ( vec (b[ 1 ]) * vec (b[ 2 ]) ' )), tail ( tail (b)) ... ) )
647
+ end
648
+ # But too many indices is much harder; this will require merging the indices
649
+ # in `a` before applying the final mask in `b`.
650
+ function restrict_indices (a :: Tuple{Any, Any, Vararg{Any}} , b :: Tuple{Any} )
651
+ if length (a[ 1 ]) == 1
652
+ (a[1 ], restrict_indices ( tail (a), b))
653
+ else
654
+ # When one mask spans multiple indices, we need to merge the indices
655
+ # together. At this point , we can just use indexing to merge them since
656
+ # there's no longer special handling of singleton dimensions
657
+ ( view ( MergedIndices (a, map (length, a)), b[ 1 ]),)
658
+ end
659
+ end
660
+
661
+ immutable MergedIndices{T,N} <: AbstractArray{CartesianIndex{N}, N}
662
+ indices :: T
663
+ sz :: NTuple{N,Int}
664
+ end
665
+ Base . size (M :: MergedIndices ) = M . sz
666
+ Base. getindex {_,N} (M:: MergedIndices{_,N} , I :: Vararg{Int, N} ) = CartesianIndex ( map (getindex, M . indices, I))
667
+ # Boundschecking for using MergedIndices as an array index. This is overly
668
+ # strict -- even for SubArrays of ReshapedIndices, we require that the entire
669
+ # parent array's indices are valid. In this usage, it is just fine... and is a
670
+ # huge optimization over exact bounds checking.
671
+ typealias ReshapedMergedIndices{T,N,M <: MergedIndices } Base . ReshapedArray{T,N,M}
672
+ typealias SubMergedIndices {T,N,M<: Union{ MergedIndices, ReshapedMergedIndices} } SubArray {T,N,M}
673
+ typealias MergedIndicesOrSub Union{MergedIndices, SubMergedIndices }
674
+ import Base : _chkbnds
675
+ # Ambiguity with linear indexing:
676
+ @inline _chkbnds (A :: AbstractVector , checked :: NTuple{1,Bool} , I :: MergedIndicesOrSub ) = _chkbnds (A, checked, parent ( parent (I)) . indices ... )
677
+ @inline _chkbnds (A:: AbstractArray , checked:: NTuple{1,Bool} , I:: MergedIndicesOrSub ) = _chkbnds (A, checked, parent (parent (I)). indices... )
678
+ # Generic bounds checking
679
+ @inline _chkbnds {T,N} (A :: AbstractArray{T,N} , checked :: NTuple{N,Bool} , I1 :: MergedIndicesOrSub , I ... ) = _chkbnds (A, checked, parent ( parent (I1)) . indices ... , I ... )
680
+ @inline _chkbnds {T,N,M } (A:: AbstractArray{T,N} , checked:: NTuple{M ,Bool} , I1:: MergedIndicesOrSub , I... ) = _chkbnds (A, checked, parent (parent (I1)). indices... , I... )
681
+ import Base : checkbounds_indices
682
+ @inline checkbounds_indices ( :: Tuple{} , I :: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices ((), ( parent ( parent (I[ 1 ])) . indices ... , tail (I) ... ))
683
+ @inline checkbounds_indices (inds :: Tuple{Any } , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, (parent (parent (I[1 ])). indices... , tail (I)... ))
684
+ @inline checkbounds_indices (inds:: Tuple , I:: Tuple{MergedIndicesOrSub,Vararg{Any}} ) = checkbounds_indices (inds, (parent (parent (I[1 ])). indices... , tail (I)... ))
685
+
686
+ # The tricky thing here is that we want to optimize the accesses into the
687
+ # distributed array, but in doing so, we lose track of which indices in I we
688
+ # should be using.
689
+ #
690
+ # I’ve come to the conclusion that the function is utterly insane.
691
+ # There are *6* flavors of indices with four different reference points:
692
+ # 1. Find the indices of each portion of the DArray.
693
+ # 2 . Find the valid subset of indices for the SubArray into that portion .
694
+ # 3 . Find the portion of the `I` indices that should be used when you access the
695
+ # `K ` indices in the subarray. This guy is nasty. It’s totally backwards
696
+ # from all other arrays, wherein we simply iterate over the source array’s
697
+ # elements. You need to *both* know which elements in `J` were skipped
698
+ # (`indexin_mask`) and which dimensions should match up (`restrict_indices`)
699
+ # 4. If `K` doesn’t correspond to an entire chunk, reinterpret `K` in terms of
700
+ # the local portion of the source array
701
+ function Base . setindex! (a :: Array , s :: SubDArray ,
702
+ I :: Union{UnitRange{Int},Colon,Vector{Int},StepRange{Int,Int}} ...)
703
+ Base . setindex_shape_check (s, Base . index_lengths (a, I ... ) ... )
704
+ n = length (I )
705
+ d = s . parent
706
+ J = Base . decolon (d, s . indexes ... )
707
+ @sync for i = 1 : length (d . pids )
708
+ K_c = d . indexes[i]
709
+ K = map (intersect, J, K_c)
710
+ if ! any (isempty, K )
711
+ K_mask = map (indexin_mask, J, K_c )
712
+ idxs = restrict_indices (Base . decolon (a, I ... ), K_mask )
713
+ if isequal (K, K_c )
714
+ # whole chunk
715
+ @async a[idxs ... ] = chunk (d, i)
716
+ else
717
+ # partial chunk
718
+ @async a[idxs ... ] =
719
+ remotecall_fetch (d . pids[i]) do
720
+ view ( localpart (d), [K[j] - first (K_c[j]) + 1 for j = 1 : length (J)] . .. )
721
+ end
722
+ end
719
723
end
720
724
end
725
+ return a
721
726
end
722
- return a
723
727
end
724
728
725
729
Base. fill! (A:: DArray , x) = begin
0 commit comments