forked from rust-lang/rust
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtraits.rs
161 lines (143 loc) · 6.94 KB
/
traits.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
use rustc::ty::{self, Ty, Instance, TypeFoldable};
use rustc::ty::layout::{Size, Align, LayoutOf, HasDataLayout};
use rustc::mir::interpret::{Scalar, Pointer, InterpResult, PointerArithmetic,};
use super::{InterpCx, Machine, MemoryKind, FnVal};
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
/// objects.
///
/// The `trait_ref` encodes the erased self type. Hence if we are
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
/// `trait_ref` would map `T:Trait`.
pub fn get_vtable(
&mut self,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref));
// All vtables must be monomorphic, bail out otherwise.
if ty.needs_subst() || poly_trait_ref.needs_subst() {
throw_inval!(TooGeneric);
}
if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) {
// This means we guarantee that there are no duplicate vtables, we will
// always use the same vtable for the same (Type, Trait) combination.
// That's not what happens in rustc, but emulating per-crate deduplication
// does not sound like it actually makes anything any better.
return Ok(vtable);
}
let methods = if let Some(poly_trait_ref) = poly_trait_ref {
let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
let trait_ref = self.tcx.erase_regions(&trait_ref);
self.tcx.vtable_methods(trait_ref)
} else {
&[]
};
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes();
let align = layout.align.abi.bytes();
let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align.abi;
// /////////////////////////////////////////////////////////////////////////////////////////
// If you touch this code, be sure to also make the corresponding changes to
// `get_vtable` in rust_codegen_llvm/meth.rs
// /////////////////////////////////////////////////////////////////////////////////////////
let vtable = self.memory.allocate(
ptr_size * (3 + methods.len() as u64),
ptr_align,
MemoryKind::Vtable,
);
let tcx = &*self.tcx;
let drop = Instance::resolve_drop_in_place(*tcx, ty);
let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
// no need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
self.memory
.get_mut(vtable.alloc_id)?
.write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, self)?;
self.memory
.get_mut(size_ptr.alloc_id)?
.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, self)?;
self.memory
.get_mut(align_ptr.alloc_id)?
.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method {
// resolve for vtable: insert shims where needed
let instance = ty::Instance::resolve_for_vtable(
*self.tcx,
self.param_env,
def_id,
substs,
).ok_or_else(|| err_inval!(TooGeneric))?;
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?;
self.memory
.get_mut(method_ptr.alloc_id)?
.write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?;
}
}
self.memory.mark_immutable(vtable.alloc_id)?;
assert!(self.vtables.insert((ty, poly_trait_ref), vtable).is_none());
Ok(vtable)
}
/// Returns the drop fn instance as well as the actual dynamic type
pub fn read_drop_type_from_vtable(
&self,
vtable: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer
let vtable = self.memory.check_ptr_access(
vtable,
self.tcx.data_layout.pointer_size,
self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST");
let drop_fn = self.memory
.get(vtable.alloc_id)?
.read_ptr_sized(self, vtable)?
.not_undef()?;
// We *need* an instance here, no other kind of function value, to be able
// to determine the type.
let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
// The drop function takes `*mut T` where `T` is the type being dropped, so get that.
let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty;
Ok((drop_instance, ty))
}
pub fn read_size_and_align_from_vtable(
&self,
vtable: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size();
// We check for size = 3*ptr_size, that covers the drop fn (unused here),
// the size, and the align (which we read below).
let vtable = self.memory.check_ptr_access(
vtable,
3*pointer_size,
self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST");
let alloc = self.memory.get(vtable.alloc_id)?;
let size = alloc.read_ptr_sized(
self,
vtable.offset(pointer_size, self)?
)?.not_undef()?;
let size = self.force_bits(size, pointer_size)? as u64;
let align = alloc.read_ptr_sized(
self,
vtable.offset(pointer_size * 2, self)?,
)?.not_undef()?;
let align = self.force_bits(align, pointer_size)? as u64;
if size >= self.tcx.data_layout().obj_size_bound() {
throw_ub_format!("invalid vtable: \
size is bigger than largest supported object");
}
Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))
}
}