Skip to content

Commit 71b4350

Browse files
committed
Avoid copying memory representation of undef data
During MIR interpretation it may happen that a place containing uninitialized bytes is copied. This would read the current representation of these bytes and write it to the destination even though they must, by definition, not matter to the execution. This elides that representation change when no bytes are defined in such a copy, saving some cpu cycles. In such a case, the memory of the target allocation is not touched at all which also means that sometimes no physical page backing the memory allocation of the representation needs to be provided by the OS at all, reducing memory pressure on the system.
1 parent ff191b5 commit 71b4350

File tree

2 files changed

+24
-23
lines changed

2 files changed

+24
-23
lines changed

src/librustc/mir/interpret/allocation.rs

+6
Original file line numberDiff line numberDiff line change
@@ -694,6 +694,12 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
694694
}
695695
}
696696

697+
impl AllocationDefinedness {
698+
pub fn all_bytes_undef(&self) -> bool {
699+
self.initial == false && self.ranges.len() == 1
700+
}
701+
}
702+
697703
/// Relocations.
698704
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
699705
pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>);

src/librustc_mir/interpret/memory.rs

+18-23
Original file line numberDiff line numberDiff line change
@@ -813,6 +813,20 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
813813
let relocations = self.get(src.alloc_id)?
814814
.prepare_relocation_copy(self, src, size, dest, length);
815815

816+
// Prepare a copy of the undef mask.
817+
let compressed = self.get(src.alloc_id)?.compress_undef_range(src, size);
818+
819+
if compressed.all_bytes_undef() {
820+
// Fast path: If all bytes are `undef` then there is nothing to copy. The target range
821+
// is marked as undef but we otherwise omit changing the byte representation which may
822+
// be arbitrary for undef bytes.
823+
// This also avoids writing to the target bytes so that the backing allocation is never
824+
// touched if the bytes stay undef for the whole interpreter execution. On contemporary
825+
// operating system this can avoid physically allocating the page.
826+
self.get_mut(dest.alloc_id)?.mark_definedness(dest, size * length, false);
827+
return Ok(());
828+
}
829+
816830
let tcx = self.tcx.tcx;
817831

818832
// This checks relocation edges on the src.
@@ -855,8 +869,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
855869
}
856870
}
857871

858-
// copy definedness to the destination
859-
self.copy_undef_mask(src, dest, size, length)?;
872+
// now copy over the undef data
873+
self.get_mut(dest.alloc_id)?
874+
.mark_compressed_undef_range(&compressed, dest, size, length);
875+
860876
// copy the relocations to the destination
861877
self.get_mut(dest.alloc_id)?.mark_relocation_range(relocations);
862878

@@ -866,27 +882,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
866882

867883
/// Undefined bytes
868884
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
869-
// FIXME: Add a fast version for the common, nonoverlapping case
870-
fn copy_undef_mask(
871-
&mut self,
872-
src: Pointer<M::PointerTag>,
873-
dest: Pointer<M::PointerTag>,
874-
size: Size,
875-
repeat: u64,
876-
) -> InterpResult<'tcx> {
877-
// The bits have to be saved locally before writing to dest in case src and dest overlap.
878-
assert_eq!(size.bytes() as usize as u64, size.bytes());
879-
880-
let src_alloc = self.get(src.alloc_id)?;
881-
let compressed = src_alloc.compress_undef_range(src, size);
882-
883-
// now fill in all the data
884-
let dest_allocation = self.get_mut(dest.alloc_id)?;
885-
dest_allocation.mark_compressed_undef_range(&compressed, dest, size, repeat);
886-
887-
Ok(())
888-
}
889-
890885
pub fn force_ptr(
891886
&self,
892887
scalar: Scalar<M::PointerTag>,

0 commit comments

Comments
 (0)