@@ -261,18 +261,17 @@ end
261
261
return (idx, (nextidx, r, state, true ))
262
262
end
263
263
done (L:: LogicalIndex , s) = s[end ]
264
- # When wrapped in a LogicalIndex, the BitArray knows what proportion of its
265
- # values are true. The specialized findnext logic is only faster when it's able
266
- # to skip a number of elements at once.
267
- @inline start {B<:BitArray} (L:: LogicalIndex{Int, B} ) = next (L, 0 )[2 ]
264
+ # When wrapped in a BitArray we can lean upon the internal iterator state
265
+ @inline start {B<:BitArray} (L:: LogicalIndex{Int, B} ) = next (L, (0 , false ))[2 ]
268
266
@inline function next {B<:BitArray} (L:: LogicalIndex{Int, B} , s)
269
- L. sum < length (L. mask) >> 1 && return (s, findnext (L. mask, s+ 1 ))
270
- @inbounds for i in s+ 1 : length (L. mask)
271
- L. mask[i] && return (s, i)
267
+ idx = state = s[1 ]
268
+ while ! done (L. mask, state)
269
+ b, state = next (L. mask, state)
270
+ b && return (idx, (state, false ))
272
271
end
273
- return (s, 0 )
272
+ return (idx, (state, true ) )
274
273
end
275
- @inline done {B<:BitArray} (L:: LogicalIndex{Int, B} , s) = s == 0
274
+ @inline done {B<:BitArray} (L:: LogicalIndex{Int, B} , s) = s[ 2 ]
276
275
277
276
checkbounds (:: Type{Bool} , A:: AbstractArray , I:: LogicalIndex ) = checkbounds (Bool, A, I. mask)
278
277
checkindex (:: Type{Bool} , indx:: AbstractUnitRange , I:: LogicalIndex ) = checkindex (Bool, indx, I. mask)
@@ -312,19 +311,20 @@ getindex(t::Tuple, I...) = getindex(t, IteratorsMD.flatten(I)...)
312
311
313
312
@inline function _getindex (l:: LinearIndexing , A:: AbstractArray , I:: Union{Real, AbstractArray} ...)
314
313
@boundscheck checkbounds (A, I... )
315
- _unsafe_getindex (l, _maybe_reshape (l, A, I), I... )
314
+ _unsafe_getindex (l, _maybe_reshape (l, A, I... ), I... )
316
315
end
317
- # But we can speed up LinearSlow arrays by reshaping them to vectors :
318
- _maybe_reshape (:: LinearFast , A:: AbstractArray , i ) = A
319
- _maybe_reshape (:: LinearSlow , A:: AbstractVector , i ) = A
320
- @inline _maybe_reshape (:: LinearSlow , A:: AbstractArray , i ) = _maybe_reshape ( LinearSlow () , index_ndims (i ... ), A )
321
- @inline _maybe_reshape {T,N} (:: LinearIndexing , :: NTuple{ N} , A :: AbstractArray{T, N} ) = A
322
- @inline _maybe_reshape {N} (:: LinearIndexing , :: NTuple{N} , A ) = reshape (A, Val{N})
316
+ # But we can speed up LinearSlow arrays by reshaping them to the appropriate dimensionality :
317
+ _maybe_reshape (:: LinearFast , A:: AbstractArray , I ... ) = A
318
+ _maybe_reshape (:: LinearSlow , A:: AbstractVector , I ... ) = A
319
+ @inline _maybe_reshape (:: LinearSlow , A:: AbstractArray , I ... ) = __maybe_reshape (A , index_ndims (I ... ))
320
+ @inline __maybe_reshape {T,N} (A :: AbstractArray{T, N} , :: NTuple{ N} ) = A
321
+ @inline __maybe_reshape {N} (A :: AbstractArray , :: NTuple{N} ) = reshape (A, Val{N})
323
322
324
323
@generated function _unsafe_getindex (:: LinearIndexing , A:: AbstractArray , I:: Union{Real, AbstractArray} ...)
325
324
N = length (I)
326
325
quote
327
- # This is specifically *not* inlined.
326
+ # This is inlined and @generated to prevent allocations from splatting tuples
327
+ @_inline_meta
328
328
@nexprs $ N d-> (I_d = I[d])
329
329
shape = @ncall $ N index_shape I
330
330
dest = similar (A, shape)
@@ -363,8 +363,12 @@ _iterable(v) = Iterators.repeated(v)
363
363
A
364
364
end
365
365
366
- @inline function _unsafe_setindex! (:: LinearIndexing , A:: AbstractArray , x, J:: Union{Real,AbstractArray} ...)
367
- _unsafe_batchsetindex! (A, _iterable (x), J... )
366
+ @generated function _unsafe_setindex! (:: LinearIndexing , A:: AbstractArray , x, J:: Union{Real,AbstractArray} ...)
367
+ N = length (J)
368
+ quote
369
+ @_inline_meta
370
+ @ncall $ N _unsafe_batchsetindex! A _iterable (x) d-> J[d]
371
+ end
368
372
end
369
373
370
374
@generated function _unsafe_batchsetindex! (A:: AbstractArray , X, I:: Union{Real,AbstractArray} ...)
0 commit comments