@@ -1658,19 +1658,27 @@ impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
1658
1658
/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1659
1659
/// not track an item count.
1660
1660
pub ( crate ) struct RawIterRange < T > {
1661
+ // Pointer to the buckets for the current group.
1662
+ data : Bucket < T > ,
1663
+
1664
+ inner : RawIterRangeInner ,
1665
+ }
1666
+
1667
+ #[ derive( Clone , Copy ) ]
1668
+ pub ( crate ) struct RawIterRangeInner {
1661
1669
// Mask of full buckets in the current group. Bits are cleared from this
1662
1670
// mask as each element is processed.
1663
1671
current_group : BitMask ,
1664
1672
1665
- // Pointer to the buckets for the current group.
1666
- data : Bucket < T > ,
1667
-
1668
1673
// Pointer to the next group of control bytes,
1669
1674
// Must be aligned to the group size.
1670
1675
next_ctrl : * const u8 ,
1671
1676
1672
1677
// Pointer one past the last control byte of this range.
1673
1678
end : * const u8 ,
1679
+
1680
+ // Index to the buckets for the current group.
1681
+ index : usize ,
1674
1682
}
1675
1683
1676
1684
impl < T > RawIterRange < T > {
@@ -1679,19 +1687,9 @@ impl<T> RawIterRange<T> {
1679
1687
/// The control byte address must be aligned to the group size.
1680
1688
#[ cfg_attr( feature = "inline-more" , inline) ]
1681
1689
unsafe fn new ( ctrl : * const u8 , data : Bucket < T > , len : usize ) -> Self {
1682
- debug_assert_ne ! ( len, 0 ) ;
1683
- debug_assert_eq ! ( ctrl as usize % Group :: WIDTH , 0 ) ;
1684
- let end = ctrl. add ( len) ;
1685
-
1686
- // Load the first group and advance ctrl to point to the next group
1687
- let current_group = Group :: load_aligned ( ctrl) . match_full ( ) ;
1688
- let next_ctrl = ctrl. add ( Group :: WIDTH ) ;
1689
-
1690
1690
Self {
1691
- current_group,
1692
1691
data,
1693
- next_ctrl,
1694
- end,
1692
+ inner : RawIterRangeInner :: new ( ctrl, len) ,
1695
1693
}
1696
1694
}
1697
1695
@@ -1703,15 +1701,15 @@ impl<T> RawIterRange<T> {
1703
1701
#[ cfg( feature = "rayon" ) ]
1704
1702
pub ( crate ) fn split ( mut self ) -> ( Self , Option < RawIterRange < T > > ) {
1705
1703
unsafe {
1706
- if self . end <= self . next_ctrl {
1704
+ if self . inner . end <= self . inner . next_ctrl {
1707
1705
// Nothing to split if the group that we are current processing
1708
1706
// is the last one.
1709
1707
( self , None )
1710
1708
} else {
1711
1709
// len is the remaining number of elements after the group that
1712
1710
// we are currently processing. It must be a multiple of the
1713
1711
// group size (small tables are caught by the check above).
1714
- let len = offset_from ( self . end , self . next_ctrl ) ;
1712
+ let len = offset_from ( self . inner . end , self . inner . next_ctrl ) ;
1715
1713
debug_assert_eq ! ( len % Group :: WIDTH , 0 ) ;
1716
1714
1717
1715
// Split the remaining elements into two halves, but round the
@@ -1723,23 +1721,46 @@ impl<T> RawIterRange<T> {
1723
1721
let mid = ( len / 2 ) & !( Group :: WIDTH - 1 ) ;
1724
1722
1725
1723
let tail = Self :: new (
1726
- self . next_ctrl . add ( mid) ,
1724
+ self . inner . next_ctrl . add ( mid) ,
1727
1725
self . data . next_n ( Group :: WIDTH ) . next_n ( mid) ,
1728
1726
len - mid,
1729
1727
) ;
1730
1728
debug_assert_eq ! (
1731
1729
self . data. next_n( Group :: WIDTH ) . next_n( mid) . ptr,
1732
1730
tail. data. ptr
1733
1731
) ;
1734
- debug_assert_eq ! ( self . end, tail. end) ;
1735
- self . end = self . next_ctrl . add ( mid) ;
1736
- debug_assert_eq ! ( self . end. add( Group :: WIDTH ) , tail. next_ctrl) ;
1732
+ debug_assert_eq ! ( self . inner . end, tail. inner . end) ;
1733
+ self . inner . end = self . inner . next_ctrl . add ( mid) ;
1734
+ debug_assert_eq ! ( self . inner . end. add( Group :: WIDTH ) , tail. inner . next_ctrl) ;
1737
1735
( self , Some ( tail) )
1738
1736
}
1739
1737
}
1740
1738
}
1741
1739
}
1742
1740
1741
+ impl RawIterRangeInner {
1742
+ /// Returns a `RawIterRange` covering a subset of a table.
1743
+ ///
1744
+ /// The control byte address must be aligned to the group size.
1745
+ #[ cfg_attr( feature = "inline-more" , inline) ]
1746
+ unsafe fn new ( ctrl : * const u8 , len : usize ) -> Self {
1747
+ debug_assert_ne ! ( len, 0 ) ;
1748
+ debug_assert_eq ! ( ctrl as usize % Group :: WIDTH , 0 ) ;
1749
+ let end = ctrl. add ( len) ;
1750
+
1751
+ // Load the first group and advance ctrl to point to the next group
1752
+ let current_group = Group :: load_aligned ( ctrl) . match_full ( ) ;
1753
+ let next_ctrl = ctrl. add ( Group :: WIDTH ) ;
1754
+
1755
+ Self {
1756
+ current_group,
1757
+ next_ctrl,
1758
+ end,
1759
+ index : 0 ,
1760
+ }
1761
+ }
1762
+ }
1763
+
1743
1764
// We make raw iterators unconditionally Send and Sync, and let the PhantomData
1744
1765
// in the actual iterator implementations determine the real Send/Sync bounds.
1745
1766
unsafe impl < T > Send for RawIterRange < T > { }
@@ -1750,9 +1771,7 @@ impl<T> Clone for RawIterRange<T> {
1750
1771
fn clone ( & self ) -> Self {
1751
1772
Self {
1752
1773
data : self . data . clone ( ) ,
1753
- next_ctrl : self . next_ctrl ,
1754
- current_group : self . current_group ,
1755
- end : self . end ,
1774
+ inner : self . inner . clone ( ) ,
1756
1775
}
1757
1776
}
1758
1777
}
@@ -1762,11 +1781,32 @@ impl<T> Iterator for RawIterRange<T> {
1762
1781
1763
1782
#[ cfg_attr( feature = "inline-more" , inline) ]
1764
1783
fn next ( & mut self ) -> Option < Bucket < T > > {
1784
+ unsafe {
1785
+ match self . inner . next ( ) {
1786
+ Some ( index) => Some ( self . data . next_n ( index) ) ,
1787
+ None => None ,
1788
+ }
1789
+ }
1790
+ }
1791
+
1792
+ #[ cfg_attr( feature = "inline-more" , inline) ]
1793
+ fn size_hint ( & self ) -> ( usize , Option < usize > ) {
1794
+ self . inner . size_hint ( )
1795
+ }
1796
+ }
1797
+
1798
+ impl < T > FusedIterator for RawIterRange < T > { }
1799
+
1800
+ impl Iterator for RawIterRangeInner {
1801
+ type Item = usize ;
1802
+
1803
+ #[ cfg_attr( feature = "inline-more" , inline) ]
1804
+ fn next ( & mut self ) -> Option < Self :: Item > {
1765
1805
unsafe {
1766
1806
loop {
1767
- if let Some ( index ) = self . current_group . lowest_set_bit ( ) {
1807
+ if let Some ( group_index ) = self . current_group . lowest_set_bit ( ) {
1768
1808
self . current_group = self . current_group . remove_lowest_bit ( ) ;
1769
- return Some ( self . data . next_n ( index) ) ;
1809
+ return Some ( self . index + group_index ) ;
1770
1810
}
1771
1811
1772
1812
if self . next_ctrl >= self . end {
@@ -1779,7 +1819,7 @@ impl<T> Iterator for RawIterRange<T> {
1779
1819
// EMPTY. On larger tables self.end is guaranteed to be aligned
1780
1820
// to the group size (since tables are power-of-two sized).
1781
1821
self . current_group = Group :: load_aligned ( self . next_ctrl ) . match_full ( ) ;
1782
- self . data = self . data . next_n ( Group :: WIDTH ) ;
1822
+ self . index = Group :: WIDTH ;
1783
1823
self . next_ctrl = self . next_ctrl . add ( Group :: WIDTH ) ;
1784
1824
}
1785
1825
}
@@ -1795,8 +1835,6 @@ impl<T> Iterator for RawIterRange<T> {
1795
1835
}
1796
1836
}
1797
1837
1798
- impl < T > FusedIterator for RawIterRange < T > { }
1799
-
1800
1838
/// Iterator which returns a raw pointer to every full bucket in the table.
1801
1839
///
1802
1840
/// For maximum flexibility this iterator is not bound by a lifetime, but you
0 commit comments