Skip to content

Commit 03d2f5c

Browse files
committed
Auto merge of #69332 - nnethercote:revert-u8to64_le-changes, r=michaelwoerister
Revert `u8to64_le` changes from #68914. `SipHasher128`'s `u8to64_le` function was simplified in #68914. Unfortunately, the new version is slower, because it introduces `memcpy` calls with non-statically-known lengths. This commit reverts the change, and adds an explanatory comment (which is also added to `libcore/hash/sip.rs`). This barely affects `SipHasher128`'s speed because it doesn't use `u8to64_le` much, but it does result in `SipHasher128` once again being consistent with `libcore/hash/sip.rs`. r? @michaelwoerister
2 parents 87e494c + 100ff5a commit 03d2f5c

File tree

2 files changed

+46
-13
lines changed

2 files changed

+46
-13
lines changed

src/libcore/hash/sip.rs

+3-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,9 @@ macro_rules! load_int_le {
121121
}};
122122
}
123123

124-
/// Loads an u64 using up to 7 bytes of a byte slice.
124+
/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
125+
/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
126+
/// sizes and avoid calling `memcpy`, which is good for speed.
125127
///
126128
/// Unsafe because: unchecked indexing at start..start+len
127129
#[inline]

src/librustc_data_structures/sip128.rs

+43-12
Original file line numberDiff line numberDiff line change
@@ -51,17 +51,48 @@ macro_rules! compress {
5151
}};
5252
}
5353

54-
/// Loads up to 8 bytes from a byte-slice into a little-endian u64.
55-
#[inline]
56-
fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
57-
assert!(len <= 8 && start + len <= buf.len());
54+
/// Loads an integer of the desired type from a byte stream, in LE order. Uses
55+
/// `copy_nonoverlapping` to let the compiler generate the most efficient way
56+
/// to load it from a possibly unaligned address.
57+
///
58+
/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
59+
macro_rules! load_int_le {
60+
($buf:expr, $i:expr, $int_ty:ident) => {{
61+
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
62+
let mut data = 0 as $int_ty;
63+
ptr::copy_nonoverlapping(
64+
$buf.get_unchecked($i),
65+
&mut data as *mut _ as *mut u8,
66+
mem::size_of::<$int_ty>(),
67+
);
68+
data.to_le()
69+
}};
70+
}
5871

59-
let mut out = 0u64;
60-
unsafe {
61-
let out_ptr = &mut out as *mut _ as *mut u8;
62-
ptr::copy_nonoverlapping(buf.as_ptr().offset(start as isize), out_ptr, len);
72+
/// Loads a u64 using up to 7 bytes of a byte slice. It looks clumsy but the
73+
/// `copy_nonoverlapping` calls that occur (via `load_int_le!`) all have fixed
74+
/// sizes and avoid calling `memcpy`, which is good for speed.
75+
///
76+
/// Unsafe because: unchecked indexing at start..start+len
77+
#[inline]
78+
unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
79+
debug_assert!(len < 8);
80+
let mut i = 0; // current byte index (from LSB) in the output u64
81+
let mut out = 0;
82+
if i + 3 < len {
83+
out = load_int_le!(buf, start + i, u32) as u64;
84+
i += 4;
85+
}
86+
if i + 1 < len {
87+
out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
88+
i += 2
89+
}
90+
if i < len {
91+
out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
92+
i += 1;
6393
}
64-
out.to_le()
94+
debug_assert_eq!(i, len);
95+
out
6596
}
6697

6798
impl SipHasher128 {
@@ -243,7 +274,7 @@ impl Hasher for SipHasher128 {
243274

244275
if self.ntail != 0 {
245276
needed = 8 - self.ntail;
246-
self.tail |= u8to64_le(msg, 0, cmp::min(length, needed)) << (8 * self.ntail);
277+
self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << 8 * self.ntail;
247278
if length < needed {
248279
self.ntail += length;
249280
return;
@@ -261,7 +292,7 @@ impl Hasher for SipHasher128 {
261292

262293
let mut i = needed;
263294
while i < len - left {
264-
let mi = u8to64_le(msg, i, 8);
295+
let mi = unsafe { load_int_le!(msg, i, u64) };
265296

266297
self.state.v3 ^= mi;
267298
Sip24Rounds::c_rounds(&mut self.state);
@@ -270,7 +301,7 @@ impl Hasher for SipHasher128 {
270301
i += 8;
271302
}
272303

273-
self.tail = u8to64_le(msg, i, left);
304+
self.tail = unsafe { u8to64_le(msg, i, left) };
274305
self.ntail = left;
275306
}
276307

0 commit comments

Comments
 (0)