Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit e30f55f

Browse files
committedFeb 18, 2019
Optimize copying large ranges of undefmask blocks
1 parent eac0908 commit e30f55f

File tree

2 files changed

+58
-9
lines changed

2 files changed

+58
-9
lines changed
 

‎src/librustc/mir/interpret/allocation.rs

+38-7
Original file line numberDiff line numberDiff line change
@@ -613,7 +613,6 @@ impl<Tag> DerefMut for Relocations<Tag> {
613613
////////////////////////////////////////////////////////////////////////////////
614614

615615
type Block = u64;
616-
const BLOCK_SIZE: u64 = 64;
617616

618617
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
619618
pub struct UndefMask {
@@ -624,6 +623,8 @@ pub struct UndefMask {
624623
impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len});
625624

626625
impl UndefMask {
626+
pub const BLOCK_SIZE: u64 = 64;
627+
627628
pub fn new(size: Size) -> Self {
628629
let mut m = UndefMask {
629630
blocks: vec![],
@@ -643,6 +644,7 @@ impl UndefMask {
643644
return Err(self.len);
644645
}
645646

647+
// FIXME(oli-obk): optimize this for allocations larger than a block.
646648
let idx = (start.bytes()..end.bytes())
647649
.map(|i| Size::from_bytes(i))
648650
.find(|&i| !self.get(i));
@@ -662,8 +664,31 @@ impl UndefMask {
662664
}
663665

664666
pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
665-
for i in start.bytes()..end.bytes() {
666-
self.set(Size::from_bytes(i), new_state);
667+
let (blocka, bita) = bit_index(start);
668+
let (blockb, bitb) = bit_index(end);
669+
if blocka == blockb {
670+
// within a single block
671+
for i in bita .. bitb {
672+
self.set_bit(blocka, i, new_state);
673+
}
674+
return;
675+
}
676+
// across block boundaries
677+
for i in bita .. Self::BLOCK_SIZE as usize {
678+
self.set_bit(blocka, i, new_state);
679+
}
680+
for i in 0 .. bitb {
681+
self.set_bit(blockb, i, new_state);
682+
}
683+
// fill in all the other blocks (much faster than one bit at a time)
684+
if new_state {
685+
for block in (blocka + 1) .. blockb {
686+
self.blocks[block] = 0xFFFF_FFFF_FFFF_FFFF;
687+
}
688+
} else {
689+
for block in (blocka + 1) .. blockb {
690+
self.blocks[block] = 0;
691+
}
667692
}
668693
}
669694

@@ -676,6 +701,11 @@ impl UndefMask {
676701
#[inline]
677702
pub fn set(&mut self, i: Size, new_state: bool) {
678703
let (block, bit) = bit_index(i);
704+
self.set_bit(block, bit, new_state);
705+
}
706+
707+
#[inline]
708+
fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
679709
if new_state {
680710
self.blocks[block] |= 1 << bit;
681711
} else {
@@ -684,11 +714,12 @@ impl UndefMask {
684714
}
685715

686716
pub fn grow(&mut self, amount: Size, new_state: bool) {
687-
let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes();
717+
let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes();
688718
if amount.bytes() > unused_trailing_bits {
689-
let additional_blocks = amount.bytes() / BLOCK_SIZE + 1;
719+
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
690720
assert_eq!(additional_blocks as usize as u64, additional_blocks);
691721
self.blocks.extend(
722+
// FIXME(oli-obk): optimize this by repeating `new_state as Block`
692723
iter::repeat(0).take(additional_blocks as usize),
693724
);
694725
}
@@ -701,8 +732,8 @@ impl UndefMask {
701732
#[inline]
702733
fn bit_index(bits: Size) -> (usize, usize) {
703734
let bits = bits.bytes();
704-
let a = bits / BLOCK_SIZE;
705-
let b = bits % BLOCK_SIZE;
735+
let a = bits / UndefMask::BLOCK_SIZE;
736+
let b = bits % UndefMask::BLOCK_SIZE;
706737
assert_eq!(a as usize as u64, a);
707738
assert_eq!(b as usize as u64, b);
708739
(a as usize, b as usize)

‎src/librustc_mir/interpret/memory.rs

+20-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ use syntax::ast::Mutability;
2020
use super::{
2121
Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
2222
EvalResult, Scalar, EvalErrorKind, AllocKind, PointerArithmetic,
23-
Machine, AllocMap, MayLeak, ErrorHandled, InboundsCheck,
23+
Machine, AllocMap, MayLeak, ErrorHandled, InboundsCheck, UndefMask,
2424
};
2525

2626
#[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
@@ -785,10 +785,28 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
785785
assert_eq!(size.bytes() as usize as u64, size.bytes());
786786

787787
let undef_mask = self.get(src.alloc_id)?.undef_mask.clone();
788+
let get = |i| undef_mask.get(src.offset + Size::from_bytes(i));
788789
let dest_allocation = self.get_mut(dest.alloc_id)?;
789790

791+
// an optimization where we can just overwrite an entire range of definedness bits if
792+
// they are going to be uniformly `1` or `0`.
793+
if size.bytes() * repeat > UndefMask::BLOCK_SIZE {
794+
let first = undef_mask.get(src.offset);
795+
// check that all bits are the same as the first bit
796+
// FIXME(oli-obk): consider making this a function on `UndefMask` and optimize it, too
797+
if (1..size.bytes()).all(|i| get(i) == first) {
798+
dest_allocation.undef_mask.set_range(
799+
dest.offset,
800+
dest.offset + size * repeat,
801+
first,
802+
);
803+
return Ok(())
804+
}
805+
}
806+
807+
// the default path
790808
for i in 0..size.bytes() {
791-
let defined = undef_mask.get(src.offset + Size::from_bytes(i));
809+
let defined = get(i);
792810

793811
for j in 0..repeat {
794812
dest_allocation.undef_mask.set(

0 commit comments

Comments
 (0)
Please sign in to comment.