Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make librustc_codegen_llvm aware of LLVM address spaces. #51576

Closed
Closed
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
Make librustc_codegen_llvm aware of LLVM address spaces.
In order to not require overloading functions based on their argument's address
space (among other things), we require the presence of a "flat" (ie an address
space which is shared with every other address space) address space.

This isn't exposed in any way to Rust code. This just makes Rust compatible with
LLVM target machines which, for example, place allocas in a different address
space. `amdgcn-amd-amdhsa-amdgiz` is a specific example, which places allocas in
address space 5 or the private (at the work item level) address space.
DiamondLovesYou committed Jan 21, 2019
commit 4ffff8f00a52ac10110d018b8521b8c50fbda444
10 changes: 7 additions & 3 deletions src/librustc_codegen_llvm/abi.rs
Original file line number Diff line number Diff line change
@@ -649,10 +649,11 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => {
self.ret.layout.immediate_llvm_type(cx)
.copy_addr_space(cx.flat_addr_space())
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(..) => {
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
llargument_tys.push(cx.type_ptr_to_flat(self.ret.memory_ty(cx)));
cx.type_void()
}
};
@@ -665,8 +666,11 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {

let llarg_ty = match arg.mode {
PassMode::Ignore => continue,
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx)
.copy_addr_space(cx.flat_addr_space()),
PassMode::Pair(..) => {
// Keep the argument type address space given by
// `scalar_pair_element_llvm_type`.
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
@@ -679,7 +683,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
continue;
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
PassMode::Indirect(_, None) => cx.type_ptr_to_flat(arg.memory_ty(cx)),
};
llargument_tys.push(llarg_ty);
}
78 changes: 73 additions & 5 deletions src/librustc_codegen_llvm/builder.rs
Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{self, False, BasicBlock};
use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate};
use rustc_codegen_ssa::{self, MemFlags};
use common::Funclet;
use common::{Funclet, val_addr_space, val_addr_space_opt};
use context::CodegenCx;
use type_::Type;
use type_of::LayoutLlvmExt;
@@ -18,6 +18,7 @@ use syntax;
use rustc_codegen_ssa::base::to_immediate;
use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_target::spec::AddrSpaceIdx;
use std::borrow::Cow;
use std::ffi::CStr;
use std::ops::{Deref, Range};
@@ -846,26 +847,59 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {

fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("ptrtoint");
let val = self.flat_addr_cast(val);
unsafe {
llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname())
}
}

fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("inttoptr");
let dest_ty = dest_ty.copy_addr_space(self.cx().flat_addr_space());
unsafe {
llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname())
}
}

fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("bitcast");
let dest_ty = dest_ty.copy_addr_space(val_addr_space(val));
unsafe {
llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname())
}
}

/// address space casts, then bitcasts to dest_ty without changing address spaces.
fn as_ptr_cast(&mut self, val: &'ll Value,
addr_space: AddrSpaceIdx,
dest_ty: &'ll Type) -> &'ll Value
{
let val = self.addrspace_cast(val, addr_space);
self.pointercast(val, dest_ty.copy_addr_space(addr_space))
}
fn flat_as_ptr_cast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.as_ptr_cast(val, self.cx().flat_addr_space(), dest_ty)
}

fn addrspace_cast(&mut self, val: &'ll Value, dest: AddrSpaceIdx) -> &'ll Value {
// LLVM considers no-op address space casts to be invalid.
let src_ty = self.cx.val_ty(val);
if src_ty.is_ptr() && src_ty.address_space() != dest {
let dest_ty = src_ty.copy_addr_space(dest);
self.cx().check_addr_space_cast(val, dest_ty);
self.count_insn("addrspacecast");
unsafe {
llvm::LLVMBuildAddrSpaceCast(self.llbuilder, val,
dest_ty, noname())
}
} else {
val
}
}

fn flat_addr_cast(&mut self, val: &'ll Value) -> &'ll Value {
self.addrspace_cast(val, self.cx().flat_addr_space())
}
fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
self.count_insn("intcast");
unsafe {
@@ -875,6 +909,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {

fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
self.count_insn("pointercast");
let dest_ty = dest_ty.copy_addr_space(val_addr_space(val));
unsafe {
llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname())
}
@@ -883,7 +918,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
/* Comparisons */
fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
self.count_insn("icmp");

let op = llvm::IntPredicate::from_generic(op);

match (val_addr_space_opt(lhs), val_addr_space_opt(rhs)) {
(Some(l), Some(r)) if l == r => {},
(Some(l), Some(r)) if l != r => {
bug!("tried to cmp ptrs of different addr spaces: lhs {:?} rhs {:?}",
lhs, rhs);
},
_ => {},
}

unsafe {
llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname())
}
@@ -1004,7 +1050,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
flags: MemFlags,
) {
let ptr_width = &self.sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let addr_space = self.val_ty(ptr).address_space();
let intrinsic_key = format!("llvm.memset.p{}i8.i{}", addr_space, ptr_width);
let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.type_i8p());
let align = self.const_u32(align.bytes() as u32);
@@ -1352,7 +1399,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr: &'ll Value) -> &'ll Value {
let dest_ptr_ty = self.cx.val_ty(ptr);
let stored_ty = self.cx.val_ty(val);
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
let stored_ptr_ty = self.cx.type_as_ptr_to(stored_ty,
dest_ptr_ty.address_space());

assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);

@@ -1398,7 +1446,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
debug!("Type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
llfn, expected_ty, i, actual_ty);
self.bitcast(actual_val, expected_ty)
if expected_ty.is_ptr() && actual_ty.is_ptr() {
let actual_val = self.addrspace_cast(actual_val,
expected_ty.address_space());
self.pointercast(actual_val, expected_ty)
} else {
let actual_val = if actual_ty.is_ptr() {
self.flat_addr_cast(actual_val)
} else {
actual_val
};
self.bitcast(actual_val, expected_ty)
}
} else {
actual_val
}
@@ -1488,7 +1547,16 @@ impl Builder<'a, 'll, 'tcx> {
return;
}

let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let addr_space = self.cx.val_ty(ptr).address_space();
// Old LLVMs don't have the address space specific intrinsics.
// So as a semi-crude workaround, don't specialize if in the
// default address space.
let lifetime_intrinsic = if let AddrSpaceIdx(0) = addr_space {
self.cx.get_intrinsic(intrinsic)
} else {
let intrinsic = format!("{}.p{}i8", intrinsic, addr_space);
self.cx.get_intrinsic(&intrinsic)
};

let ptr = self.pointercast(ptr, self.cx.type_i8p());
self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
39 changes: 32 additions & 7 deletions src/librustc_codegen_llvm/common.rs
Original file line number Diff line number Diff line change
@@ -13,7 +13,9 @@ use rustc_codegen_ssa::traits::*;
use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size};
use rustc::mir::interpret::{Scalar, AllocKind, Allocation};
use consts::const_alloc_to_llvm;
use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_target::spec::AddrSpaceIdx;

use libc::{c_uint, c_char};

@@ -170,9 +172,9 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
s.len() as c_uint,
!null_terminated as Bool);
let sym = self.generate_local_symbol_name("str");
let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", sym);
});
let addr_space = self.const_addr_space();
let g = self.define_global(&sym[..], self.val_ty(sc), addr_space)
.unwrap_or_else(|| bug!("symbol `{}` is already defined", sym) );
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
@@ -284,6 +286,10 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
}

fn const_as_cast(&self, val: &'ll Value, addr_space: AddrSpaceIdx) -> &'ll Value {
self.const_addrcast(val, addr_space)
}

fn scalar_to_backend(
&self,
cv: Scalar,
@@ -299,10 +305,16 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(self).bytes());
let llval = self.const_uint_big(self.type_ix(bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
let flat_llty = llty.copy_addr_space(self.flat_addr_space());
let llval = if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, flat_llty) }
} else {
self.const_bitcast(llval, llty)
self.const_bitcast(llval, flat_llty)
};
if llty.is_ptr() {
self.const_as_cast(llval, llty.address_space())
} else {
llval
}
},
Scalar::Ptr(ptr) => {
@@ -311,7 +323,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Some(AllocKind::Memory(alloc)) => {
let init = const_alloc_to_llvm(self, alloc);
if alloc.mutability == Mutability::Mutable {
self.static_addr_of_mut(init, alloc.align, None)
self.static_addr_of_mut(init, alloc.align, None,
self.mutable_addr_space())
} else {
self.static_addr_of(init, alloc.align, None)
}
@@ -330,6 +343,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
&self.const_usize(ptr.offset.bytes()),
1,
) };
let llval = self.const_flat_as_cast(llval);
if layout.value != layout::Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
@@ -367,6 +381,17 @@ pub fn val_ty(v: &'ll Value) -> &'ll Type {
llvm::LLVMTypeOf(v)
}
}
pub fn val_addr_space_opt(v: &'ll Value) -> Option<AddrSpaceIdx> {
let ty = val_ty(v);
if ty.kind() == TypeKind::Pointer {
Some(ty.address_space())
} else {
None
}
}
pub fn val_addr_space(v: &'ll Value) -> AddrSpaceIdx {
val_addr_space_opt(v).unwrap_or_default()
}

pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
68 changes: 54 additions & 14 deletions src/librustc_codegen_llvm/consts.rs
Original file line number Diff line number Diff line change
@@ -6,7 +6,7 @@ use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint,
use rustc::hir::Node;
use debuginfo;
use monomorphize::MonoItem;
use common::CodegenCx;
use common::{CodegenCx, val_addr_space, val_addr_space_opt};
use monomorphize::Instance;
use syntax_pos::Span;
use rustc_target::abi::HasDataLayout;
@@ -17,6 +17,7 @@ use type_of::LayoutLlvmExt;
use value::Value;
use rustc::ty::{self, Ty};
use rustc_codegen_ssa::traits::*;
use rustc_target::spec::AddrSpaceIdx;

use rustc::ty::layout::{self, Size, Align, LayoutOf};

@@ -123,7 +124,7 @@ fn check_and_apply_linkage(
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
let g1 = cx.declare_global(&sym, llty2);
let g1 = cx.declare_global(&sym, llty2, cx.flat_addr_space());
llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));

// Declare an internal global `extern_with_linkage_foo` which
@@ -134,7 +135,8 @@ fn check_and_apply_linkage(
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{
let g2 = cx.define_global(&real_name, llty, cx.flat_addr_space())
.unwrap_or_else(||{
if let Some(span) = span {
cx.sess().span_fatal(
span,
@@ -151,41 +153,62 @@ fn check_and_apply_linkage(
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global
cx.declare_global(&sym, llty)
cx.declare_global(&sym, llty, cx.flat_addr_space())
}
}

/// Won't change address spaces
pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
let ty = ty.copy_addr_space(val_addr_space(val));
unsafe {
llvm::LLVMConstPointerCast(val, ty)
}
}

impl CodegenCx<'ll, 'tcx> {
crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
let ty = if let Some(addr_space) = val_addr_space_opt(val) {
ty.copy_addr_space(addr_space)
} else {
ty
};
unsafe {
llvm::LLVMConstBitCast(val, ty)
}
}

crate fn const_addrcast(&self, val: &'ll Value, addr_space: AddrSpaceIdx) -> &'ll Value {
let src_ty = self.val_ty(val);
if src_ty.is_ptr() && src_ty.address_space() != addr_space {
let dest_ty = src_ty.copy_addr_space(addr_space);
self.check_addr_space_cast(val, dest_ty);
unsafe {
llvm::LLVMConstAddrSpaceCast(val, dest_ty)
}
} else {
val
}
}

crate fn static_addr_of_mut(
&self,
cv: &'ll Value,
align: Align,
kind: Option<&str>,
addr_space: AddrSpaceIdx,
) -> &'ll Value {
unsafe {
let gv = match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
let gv = self.define_global(&name[..],
self.val_ty(cv)).unwrap_or_else(||{
self.val_ty(cv), addr_space).unwrap_or_else(||{
bug!("symbol `{}` is already defined", name);
});
llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
gv
},
_ => self.define_private_global(self.val_ty(cv)),
_ => self.define_private_global(self.val_ty(cv), addr_space),
};
llvm::LLVMSetInitializer(gv, cv);
set_global_alignment(&self, gv, align);
@@ -218,13 +241,18 @@ impl CodegenCx<'ll, 'tcx> {
let llty = self.layout_of(ty).llvm_type(self);
let (g, attrs) = match self.tcx.hir().get(id) {
Node::Item(&hir::Item {
ref attrs, span, node: hir::ItemKind::Static(..), ..
ref attrs, span, node: hir::ItemKind::Static(_, m, _), ..
}) => {
if self.get_declared_value(&sym[..]).is_some() {
span_bug!(span, "Conflicting symbol names for static?");
}
let addr_space = if m == hir::MutMutable || !self.type_is_freeze(ty) {
self.mutable_addr_space()
} else {
self.const_addr_space()
};

let g = self.define_global(&sym[..], llty).unwrap();
let g = self.define_global(&sym[..], llty, addr_space).unwrap();

if !self.tcx.is_reachable_non_generic(def_id) {
unsafe {
@@ -334,7 +362,8 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
}
return gv;
}
let gv = self.static_addr_of_mut(cv, align, kind);
let gv = self.static_addr_of_mut(cv, align, kind,
self.const_addr_space());
unsafe {
llvm::LLVMSetGlobalConstant(gv, True);
}
@@ -370,6 +399,11 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {

let instance = Instance::mono(self.tcx, def_id);
let ty = instance.ty(self.tcx);

// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
let llvm_mutable = is_mutable || !self.type_is_freeze(ty);

let llty = self.layout_of(ty).llvm_type(self);
let g = if val_llty == llty {
g
@@ -384,8 +418,15 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {
let linkage = llvm::LLVMRustGetLinkage(g);
let visibility = llvm::LLVMRustGetVisibility(g);

let addr_space = if llvm_mutable {
self.mutable_addr_space()
} else {
self.const_addr_space()
};

let new_g = llvm::LLVMRustGetOrInsertGlobal(
self.llmod, name_string.as_ptr(), val_llty);
self.llmod, name_string.as_ptr(), val_llty,
addr_space.0);

llvm::LLVMRustSetLinkage(new_g, linkage);
llvm::LLVMRustSetVisibility(new_g, visibility);
@@ -401,10 +442,8 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {

// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if !is_mutable {
if self.type_is_freeze(ty) {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
if !llvm_mutable {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}

debuginfo::create_global_var_metadata(&self, def_id, g);
@@ -480,6 +519,7 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> {

if attrs.flags.contains(CodegenFnAttrFlags::USED) {
// This static will be stored in the llvm.used variable which is an array of i8*
// Note this ignores the address space of `g`, but that's okay here.
let cast = llvm::LLVMConstPointerCast(g, self.type_i8p());
self.used_statics.borrow_mut().push(cast);
}
143 changes: 134 additions & 9 deletions src/librustc_codegen_llvm/context.rs
Original file line number Diff line number Diff line change
@@ -7,7 +7,7 @@ use monomorphize::Instance;
use value::Value;

use monomorphize::partitioning::CodegenUnit;
use type_::Type;
use type_::{Type, AddrSpaceIdx, AddrSpaceKind, };
use type_of::PointeeInfo;
use rustc_codegen_ssa::traits::*;
use libc::c_uint;
@@ -20,7 +20,8 @@ use rustc::session::Session;
use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout, VariantIdx};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap;
use rustc_target::spec::{HasTargetSpec, Target};
use rustc_target::abi::HasDataLayout;
use rustc_target::spec::{HasTargetSpec, Target, AddrSpaceProps, };
use rustc_codegen_ssa::callee::resolve_and_get_fn;
use rustc_codegen_ssa::base::wants_msvc_seh;
use callee::get_fn;
@@ -32,6 +33,7 @@ use std::str;
use std::sync::Arc;
use syntax::symbol::LocalInternedString;
use abi::Abi;
use std::u32;

/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
/// `llvm::Context` so that several compilation units may be optimized in parallel.
@@ -83,13 +85,18 @@ pub struct CodegenCx<'ll, 'tcx: 'll> {
pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
pub isize_ty: &'ll Type,

alloca_addr_space: AddrSpaceIdx,
const_addr_space: AddrSpaceIdx,
mutable_addr_space: AddrSpaceIdx,
flat_addr_space: AddrSpaceIdx,

pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,

eh_personality: Cell<Option<&'ll Value>>,
eh_unwind_resume: Cell<Option<&'ll Value>>,
pub rust_try_fn: Cell<Option<&'ll Value>>,

intrinsics: RefCell<FxHashMap<&'static str, &'ll Value>>,
intrinsics: RefCell<FxHashMap<String, &'ll Value>>,

/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
@@ -276,6 +283,23 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {

let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());

let alloca_addr_space = tcx.data_layout().alloca_address_space;
let mutable_addr_space =
tcx.sess.target.target.options.addr_spaces
.get(&AddrSpaceKind::ReadWrite)
.map(|v| v.index )
.unwrap_or_default();
let const_addr_space =
tcx.sess.target.target.options.addr_spaces
.get(&AddrSpaceKind::ReadOnly)
.map(|v| v.index )
.unwrap_or(mutable_addr_space);
let flat_addr_space =
tcx.sess.target.target.options.addr_spaces
.get(&AddrSpaceKind::Flat)
.map(|v| v.index )
.unwrap_or_default();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this not always 0? Is there anything guaranteed about 0?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

0 is always the default (therefore always present), but not necessarily the flat space. I think a lot of code elsewhere assumes this (and it's true for AMDGPU), but I'm pretty sure it can be false.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What if you enforce it when parsing the spec file? Then we wouldn't just be blindly assuming, and we can keep a lot of code simpler.


CodegenCx {
tcx,
check_overflow,
@@ -296,6 +320,12 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
scalar_lltypes: Default::default(),
pointee_infos: Default::default(),
isize_ty,

alloca_addr_space,
const_addr_space,
mutable_addr_space,
flat_addr_space,

dbg_cx,
eh_personality: Cell::new(None),
eh_unwind_resume: Cell::new(None),
@@ -453,6 +483,32 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
llvm::LLVMSetSection(g, section.as_ptr());
}
}
fn can_cast_addr_space(&self, from: AddrSpaceIdx, to: AddrSpaceIdx) -> bool {
if from == to { return true; }

let bug = || {
bug!("no address space kind for {}", from);
};

let (to_kind, _) = self.addr_space_props_from_idx(to)
.unwrap_or_else(&bug);
let (_, from_props) = self.addr_space_props_from_idx(from)
.unwrap_or_else(&bug);

from_props.shared_with.contains(&to_kind)
}
fn alloca_addr_space(&self) -> AddrSpaceIdx {
self.alloca_addr_space
}
fn const_addr_space(&self) -> AddrSpaceIdx {
self.const_addr_space
}
fn mutable_addr_space(&self) -> AddrSpaceIdx {
self.mutable_addr_space
}
fn flat_addr_space(&self) -> AddrSpaceIdx {
self.flat_addr_space
}
}

impl CodegenCx<'b, 'tcx> {
@@ -473,23 +529,23 @@ impl CodegenCx<'b, 'tcx> {
if key == $name {
let f = self.declare_cfn($name, self.type_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
self.intrinsics.borrow_mut().insert($name.to_string(), f.clone());
return Some(f);
}
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
self.intrinsics.borrow_mut().insert($name.to_string(), f.clone());
return Some(f);
}
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
self.intrinsics.borrow_mut().insert($name.to_string(), f.clone());
return Some(f);
}
);
@@ -518,9 +574,53 @@ impl CodegenCx<'b, 'tcx> {
let t_v4f64 = self.type_vector(t_f64, 4);
let t_v8f64 = self.type_vector(t_f64, 8);

ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void);
fn parse_addr_space(s: &str) -> AddrSpaceIdx {
assert!(s.starts_with("p"));
assert!(s.ends_with("i8"));
let s = &s[1..];
let s = &s[..s.len() - 2];
AddrSpaceIdx(u32::from_str_radix(s, 10).unwrap())
}

if key.starts_with("llvm.memcpy") || key.starts_with("llvm.memmove") ||
key.starts_with("llvm.memset") {

let mut split = key.split('.');
assert_eq!(Some("llvm"), split.next());
let flavor = split.next();
let flavor = flavor.unwrap();

let dst_ptr_str = split.next();
assert!(dst_ptr_str.is_some());
let dst_ptr_str = dst_ptr_str.unwrap();
let dst_asp = parse_addr_space(dst_ptr_str);
let dst_ty = self.type_i8p_as(dst_asp);

let src_ty = if flavor != "memset" {
let src_ptr_str = split.next();
assert!(src_ptr_str.is_some());
let src_ptr_str = src_ptr_str.unwrap();
let src_asp = parse_addr_space(src_ptr_str);
self.type_i8p_as(src_asp)
} else {
t_i8
};

let len_ty = match split.next() {
Some("i16") => t_i16,
Some("i32") => t_i32,
Some("i64") => t_i64,
Some("i128") => t_i128,
l => {
bug!("unknown llvm.{} intrinsic sig (len ty): {}, {:?}", flavor, key, l);
},
};
let fty = self.type_func(&[dst_ty, src_ty, len_ty, t_i32, i1], &void);
let f = self.declare_cfn(key, fty);
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert(key.to_string(), f.clone());
return Some(f);
}

ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);
@@ -759,6 +859,24 @@ impl CodegenCx<'b, 'tcx> {
ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void);
ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void);

if key.starts_with("llvm.lifetime") {
let mut split = key.split('.');
split.next(); split.next();

let _variant = split.next();

let addr_space = match split.next() {
Some(addr_space) => parse_addr_space(addr_space),
None => unreachable!(),
};

let fty = self.type_func(&[t_i64, self.type_i8p_as(addr_space)], &void);
let f = self.declare_cfn(key, fty);
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert(key.to_string(), f.clone());
return Some(f);
}

ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void);
@@ -795,6 +913,13 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}

pub fn addr_space_props_from_idx(&self, idx: AddrSpaceIdx)
-> Option<(&AddrSpaceKind, &AddrSpaceProps)>
{
self.tcx.sess.target.target.options.addr_spaces.iter()
.find(|&(_, ref props)| props.index == idx )
}
}

impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> {
4 changes: 2 additions & 2 deletions src/librustc_codegen_llvm/debuginfo/gdb.rs
Original file line number Diff line number Diff line change
@@ -46,9 +46,9 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
unsafe {
let llvm_type = cx.type_array(cx.type_i8(),
section_contents.len() as u64);

let addr_space = cx.flat_addr_space();
let section_var = cx.define_global(section_var_name,
llvm_type).unwrap_or_else(||{
llvm_type, addr_space).unwrap_or_else(||{
bug!("symbol `{}` is already defined", section_var_name)
});
llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
17 changes: 11 additions & 6 deletions src/librustc_codegen_llvm/declare.rs
Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@ use rustc::ty::{self, PolyFnSig};
use rustc::ty::layout::LayoutOf;
use rustc::session::config::Sanitizer;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_target::spec::AddrSpaceIdx;
use abi::{FnType, FnTypeExt};
use attributes;
use context::CodegenCx;
@@ -85,12 +86,15 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {

fn declare_global(
&self,
name: &str, ty: &'ll Type
name: &str,
ty: &'ll Type,
addr_space: AddrSpaceIdx,
) -> &'ll Value {
debug!("declare_global(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe {
llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty)
llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty,
addr_space.0)
}
}

@@ -126,18 +130,19 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn define_global(
&self,
name: &str,
ty: &'ll Type
ty: &'ll Type,
addr_space: AddrSpaceIdx
) -> Option<&'ll Value> {
if self.get_defined_value(name).is_some() {
None
} else {
Some(self.declare_global(name, ty))
Some(self.declare_global(name, ty, addr_space))
}
}

fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
fn define_private_global(&self, ty: &'ll Type, addr_space: AddrSpaceIdx) -> &'ll Value {
unsafe {
llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty)
llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty, addr_space.0)
}
}

8 changes: 5 additions & 3 deletions src/librustc_codegen_llvm/intrinsic.rs
Original file line number Diff line number Diff line change
@@ -739,7 +739,9 @@ fn try_intrinsic(
if bx.sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align);
let addr_space = bx.type_addr_space(bx.val_ty(dest)).unwrap();
bx.store(bx.const_null(bx.type_i8p_as(addr_space)),
dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, func, data, local_ptr, dest);
} else {
@@ -903,10 +905,10 @@ fn codegen_gnu_try(
// rust_try ignores the selector.
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
catch.add_clause(vals, bx.const_null(bx.type_i8p()));
catch.add_clause(vals, bx.const_null(bx.type_flat_i8p()));
let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p()));
let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_flat_i8p()));
catch.store(ptr, bitcast, ptr_align);
catch.ret(bx.const_i32(1));
});
13 changes: 11 additions & 2 deletions src/librustc_codegen_llvm/llvm/ffi.rs
Original file line number Diff line number Diff line change
@@ -656,6 +656,7 @@ extern "C" {

pub fn LLVMGetElementType(Ty: &Type) -> &Type;
pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;
pub fn LLVMGetPointerAddressSpace(Ty: &Type) -> c_uint;

// Operations on other types
pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
@@ -716,6 +717,7 @@ extern "C" {
pub fn LLVMConstIntToPtr(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstBitCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstPointerCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstAddrSpaceCast(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
pub fn LLVMConstExtractValue(AggConstant: &Value,
IdxList: *const c_uint,
NumIdx: c_uint)
@@ -737,8 +739,10 @@ extern "C" {
pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMAddGlobal(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value;
pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>;
pub fn LLVMRustGetOrInsertGlobal(M: &'a Module, Name: *const c_char, T: &'a Type) -> &'a Value;
pub fn LLVMRustInsertPrivateGlobal(M: &'a Module, T: &'a Type) -> &'a Value;
pub fn LLVMRustGetOrInsertGlobal(M: &'a Module, Name: *const c_char, T: &'a Type,
AS: c_uint) -> &'a Value;
pub fn LLVMRustInsertPrivateGlobal(M: &'a Module, T: &'a Type,
AS: c_uint) -> &'a Value;
pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>;
pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>;
pub fn LLVMDeleteGlobal(GlobalVar: &Value);
@@ -1083,6 +1087,11 @@ extern "C" {
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMBuildAddrSpaceCast(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
Name: *const c_char)
-> &'a Value;
pub fn LLVMRustBuildIntCast(B: &Builder<'a>,
Val: &'a Value,
DestTy: &'a Type,
8 changes: 7 additions & 1 deletion src/librustc_codegen_llvm/mono_item.rs
Original file line number Diff line number Diff line change
@@ -4,6 +4,7 @@ use context::CodegenCx;
use llvm;
use monomorphize::Instance;
use type_of::LayoutLlvmExt;
use rustc::hir::def::Def;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::mir::mono::{Linkage, Visibility};
use rustc::ty::TypeFoldable;
@@ -22,7 +23,12 @@ impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let ty = instance.ty(self.tcx);
let llty = self.layout_of(ty).llvm_type(self);

let g = self.define_global(symbol_name, llty).unwrap_or_else(|| {
let addr_space = match self.tcx.describe_def(def_id) {
Some(Def::Static(_, true)) => self.mutable_addr_space(),
_ => self.const_addr_space(),
};

let g = self.define_global(symbol_name, llty, addr_space).unwrap_or_else(|| {
self.sess().span_fatal(self.tcx.def_span(def_id),
&format!("symbol `{}` is already defined", symbol_name))
});
52 changes: 45 additions & 7 deletions src/librustc_codegen_llvm/type_.rs
Original file line number Diff line number Diff line change
@@ -24,6 +24,8 @@ use std::ptr;

use libc::c_uint;

pub use rustc_target::spec::{AddrSpaceKind, AddrSpaceIdx};

impl PartialEq for Type {
fn eq(&self, other: &Self) -> bool {
ptr::eq(self, other)
@@ -186,10 +188,8 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
}

fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
assert_ne!(self.type_kind(ty), TypeKind::Function,
"don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead");
ty.ptr_to()
fn type_as_ptr_to(&self, ty: &'ll Type, addr_space: AddrSpaceIdx) -> &'ll Type {
ty.ptr_to(addr_space)
}

fn element_type(&self, ty: &'ll Type) -> &'ll Type {
@@ -237,6 +237,14 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn scalar_lltypes(&self) -> &RefCell<FxHashMap<Ty<'tcx>, Self::Type>> {
&self.scalar_lltypes
}

fn type_addr_space(&self, ty: &'ll Type) -> Option<AddrSpaceIdx> {
if self.type_kind(ty) == TypeKind::Pointer {
Some(ty.address_space())
} else {
None
}
}
}

impl Type {
@@ -257,12 +265,42 @@ impl Type {
}

pub fn i8p_llcx(llcx: &'ll llvm::Context) -> &'ll Type {
Type::i8_llcx(llcx).ptr_to()
Type::i8_llcx(llcx).ptr_to(Default::default())
}

pub fn kind(&self) -> TypeKind {
unsafe {
llvm::LLVMRustGetTypeKind(self).to_generic()
}
}
pub fn is_ptr(&self) -> bool {
self.kind() == TypeKind::Pointer
}

fn ptr_to(&self) -> &Type {
fn element_type(&self) -> &Type {
unsafe {
llvm::LLVMPointerType(&self, 0)
llvm::LLVMGetElementType(self)
}
}

fn ptr_to(&self, addr_space: AddrSpaceIdx) -> &Type {
unsafe {
llvm::LLVMPointerType(&self,
addr_space.0)
}
}
pub fn address_space(&self) -> AddrSpaceIdx {
AddrSpaceIdx(unsafe {
llvm::LLVMGetPointerAddressSpace(self)
})
}
pub fn copy_addr_space(&self, addr_space: AddrSpaceIdx) -> &Type {
if !self.is_ptr() { return self; }

if addr_space != self.address_space() {
self.element_type().ptr_to(addr_space)
} else {
self
}
}
}
10 changes: 8 additions & 2 deletions src/librustc_codegen_llvm/type_of.rs
Original file line number Diff line number Diff line change
@@ -237,6 +237,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
/// with the inner-most trailing unsized field using the "minimal unit"
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
///
/// Note: the address space used for ptrs is important. Due to the nested
/// nature of these types, we must assume pointers are in the flat space.
/// Spaces are overriden as needed (or will be, in a later patch), when it
/// is known in which space a memory location will reside.
///
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let layout::Abi::Scalar(ref scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
@@ -247,10 +253,10 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
let llty = match self.ty.sty {
ty::Ref(_, ty, _) |
ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
cx.type_ptr_to_flat(cx.layout_of(ty).llvm_type(cx))
}
ty::Adt(def, _) if def.is_box() => {
cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
cx.type_ptr_to_flat(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
}
ty::FnPtr(sig) => {
let sig = cx.tcx.normalize_erasing_late_bound_regions(
5 changes: 5 additions & 0 deletions src/librustc_codegen_ssa/mir/block.rs
Original file line number Diff line number Diff line change
@@ -279,6 +279,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.load(addr, self.fn_ty.ret.layout.align.abi)
}
};
// make sure pointers are flat:
let llval = bx.flat_addr_cast(llval);
bx.ret(llval);
}

@@ -389,6 +391,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
align,
Some("panic_bounds_check_loc")
);
let file_line_col = bx.cx().const_flat_as_cast(file_line_col);
(lang_items::PanicBoundsCheckFnLangItem,
vec![file_line_col, index, len])
}
@@ -405,6 +408,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
align,
Some("panic_loc")
);
let msg_file_line_col = bx.cx().const_flat_as_cast(msg_file_line_col);
(lang_items::PanicFnLangItem,
vec![msg_file_line_col])
}
@@ -529,6 +533,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
align,
Some("panic_loc"),
);
let msg_file_line_col = bx.cx().const_flat_as_cast(msg_file_line_col);

// Obtain the panic entry point.
let def_id =
12 changes: 12 additions & 0 deletions src/librustc_codegen_ssa/mir/place.rs
Original file line number Diff line number Diff line change
@@ -45,6 +45,18 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
layout: TyLayout<'tcx>,
name: &str
) -> Self {
debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
Self::new_sized(bx.flat_addr_cast(tmp), layout, layout.align.abi)
}

/// An alloca, left in the alloca address space. If unsure, use `alloca` below.
pub fn alloca_addr_space<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyLayout<'tcx>,
name: &str
) -> Self {
debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
36 changes: 32 additions & 4 deletions src/librustc_codegen_ssa/mir/rvalue.rs
Original file line number Diff line number Diff line change
@@ -89,6 +89,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
let zero = bx.cx().const_usize(0);
let start = dest.project_index(&mut bx, zero).llval;
let start = bx.flat_addr_cast(start);

if let OperandValue::Immediate(v) = cg_elem.val {
let size = bx.cx().const_usize(dest.layout.size.bytes());
@@ -110,6 +111,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {

let count = bx.cx().const_usize(count);
let end = dest.project_index(&mut bx, count).llval;
let end = bx.flat_addr_cast(end);

let mut header_bx = bx.build_sibling_block("repeat_loop_header");
let mut body_bx = bx.build_sibling_block("repeat_loop_body");
@@ -243,6 +245,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// until LLVM removes pointee types.
let lldata = bx.pointercast(lldata,
bx.cx().scalar_pair_element_backend_type(cast, 0, true));
let lldata = bx.flat_addr_cast(lldata);
OperandValue::Pair(lldata, llextra)
}
OperandValue::Immediate(lldata) => {
@@ -350,11 +353,19 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
(CastTy::Ptr(_), CastTy::Ptr(_)) |
(CastTy::FnPtr, CastTy::Ptr(_)) |
(CastTy::RPtr(_), CastTy::Ptr(_)) =>
bx.pointercast(llval, ll_t_out),
(CastTy::RPtr(_), CastTy::Ptr(_)) => {
// This is left in it's original address space. This is okay
// because a &mut T -> &T cast wouldn't change the address
// space used to load it.
bx.pointercast(llval, ll_t_out)
}
(CastTy::Ptr(_), CastTy::Int(_)) |
(CastTy::FnPtr, CastTy::Int(_)) =>
bx.ptrtoint(llval, ll_t_out),
(CastTy::FnPtr, CastTy::Int(_)) => {
// Ensure the ptr is in the flat address space.
// This might not be required, but it is safe.
let llval = bx.flat_addr_cast(llval);
bx.ptrtoint(llval, ll_t_out)
},
(CastTy::Int(_), CastTy::Ptr(_)) => {
let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
bx.inttoptr(usize_llval, ll_t_out)
@@ -607,6 +618,17 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
lhs, rhs
)
} else {
// In case we're in separate addr spaces.
// Can happen when cmp against null_mut, eg.
// `infer-addr-spaces` should propagate.
let lhs_ty = bx.cx().val_ty(rhs);
let (lhs, rhs) = if bx.cx().type_addr_space(lhs_ty).is_some() {
assert!(bx.cx().type_addr_space(bx.cx().val_ty(rhs)).is_some());
(bx.flat_addr_cast(lhs),
bx.flat_addr_cast(rhs))
} else {
(lhs, rhs)
};
bx.icmp(
base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
lhs, rhs
@@ -625,6 +647,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
rhs_extra: Bx::Value,
_input_ty: Ty<'tcx>,
) -> Bx::Value {
// In case we're in separate addr spaces.
// Can happen when cmp against null_mut, eg.
// `infer-addr-spaces` should propagate.
let lhs_addr = bx.flat_addr_cast(lhs_addr);
let rhs_addr = bx.flat_addr_cast(rhs_addr);

match op {
mir::BinOp::Eq => {
let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
13 changes: 13 additions & 0 deletions src/librustc_codegen_ssa/traits/builder.rs
Original file line number Diff line number Diff line change
@@ -9,6 +9,7 @@ use mir::operand::OperandRef;
use mir::place::PlaceRef;
use rustc::ty::Ty;
use rustc::ty::layout::{Align, Size};
use rustc_target::spec::AddrSpaceIdx;
use std::ffi::CStr;
use MemFlags;

@@ -155,8 +156,20 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
/// Impls should ignore the address space of `dest_ty`.
fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;

/// address space casts, then bitcasts to dest_ty without changing address spaces.
fn as_ptr_cast(&mut self,
val: Self::Value,
addr_space: AddrSpaceIdx,
dest_ty: Self::Type) -> Self::Value;
fn addrspace_cast(&mut self, val: Self::Value,
dest: AddrSpaceIdx) -> Self::Value;
fn flat_addr_cast(&mut self, val: Self::Value) -> Self::Value;
fn flat_as_ptr_cast(&mut self, val: Self::Value,
dest_ty: Self::Type) -> Self::Value;

fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;

10 changes: 8 additions & 2 deletions src/librustc_codegen_ssa/traits/consts.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
use super::BackendTypes;
use super::MiscMethods;
use mir::place::PlaceRef;
use rustc::mir::interpret::Allocation;
use rustc::mir::interpret::Scalar;
use rustc::ty::layout;
use syntax::symbol::LocalInternedString;
use rustc_target::spec::AddrSpaceIdx;

pub trait ConstMethods<'tcx>: BackendTypes {
pub trait ConstMethods<'tcx>: MiscMethods<'tcx> {
// Constant constructors
fn const_null(&self, t: Self::Type) -> Self::Value;
fn const_undef(&self, t: Self::Type) -> Self::Value;
@@ -35,6 +36,11 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn const_to_uint(&self, v: Self::Value) -> u64;
fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;

fn const_as_cast(&self, v: Self::Value, space: AddrSpaceIdx) -> Self::Value;
fn const_flat_as_cast(&self, v: Self::Value) -> Self::Value {
self.const_as_cast(v, self.flat_addr_space())
}

fn is_const_integral(&self, v: Self::Value) -> bool;
fn is_const_real(&self, v: Self::Value) -> bool;

10 changes: 7 additions & 3 deletions src/librustc_codegen_ssa/traits/declare.rs
Original file line number Diff line number Diff line change
@@ -3,13 +3,15 @@ use rustc::hir::def_id::DefId;
use rustc::mir::mono::{Linkage, Visibility};
use rustc::ty;
use rustc_mir::monomorphize::Instance;
use rustc_target::spec::AddrSpaceIdx;

pub trait DeclareMethods<'tcx>: BackendTypes {
/// Declare a global value.
///
/// If there’s a value with the same name already declared, the function will
/// return its Value instead.
fn declare_global(&self, name: &str, ty: Self::Type) -> Self::Value;
fn declare_global(&self, name: &str, ty: Self::Type,
addr_space: AddrSpaceIdx) -> Self::Value;

/// Declare a C ABI function.
///
@@ -32,12 +34,14 @@ pub trait DeclareMethods<'tcx>: BackendTypes {
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to user’s fault (e.g., misuse of #[no_mangle] or #[export_name] attributes).
fn define_global(&self, name: &str, ty: Self::Type) -> Option<Self::Value>;
fn define_global(&self, name: &str, ty: Self::Type,
addr_space: AddrSpaceIdx) -> Option<Self::Value>;

/// Declare a private global
///
/// Use this function when you intend to define a global without a name.
fn define_private_global(&self, ty: Self::Type) -> Self::Value;
fn define_private_global(&self, ty: Self::Type,
addr_space: AddrSpaceIdx) -> Self::Value;

/// Declare a Rust function with an intention to define it.
///
7 changes: 7 additions & 0 deletions src/librustc_codegen_ssa/traits/misc.rs
Original file line number Diff line number Diff line change
@@ -5,6 +5,7 @@ use rustc::session::Session;
use rustc::ty::{self, Instance, Ty};
use rustc::util::nodemap::FxHashMap;
use rustc_mir::monomorphize::partitioning::CodegenUnit;
use rustc_target::spec::AddrSpaceIdx;
use std::cell::RefCell;
use std::sync::Arc;

@@ -26,4 +27,10 @@ pub trait MiscMethods<'tcx>: BackendTypes {
fn set_frame_pointer_elimination(&self, llfn: Self::Value);
fn apply_target_cpu_attr(&self, llfn: Self::Value);
fn create_used_variable(&self);

fn can_cast_addr_space(&self, _from: AddrSpaceIdx, _to: AddrSpaceIdx) -> bool { true }
fn alloca_addr_space(&self) -> AddrSpaceIdx { Default::default() }
fn const_addr_space(&self) -> AddrSpaceIdx { Default::default() }
fn mutable_addr_space(&self) -> AddrSpaceIdx { Default::default() }
fn flat_addr_space(&self) -> AddrSpaceIdx { Default::default() }
}
14 changes: 14 additions & 0 deletions src/librustc_codegen_ssa/traits/mod.rs
Original file line number Diff line number Diff line change
@@ -59,6 +59,20 @@ pub trait CodegenMethods<'tcx>:
+ AsmMethods<'tcx>
+ PreDefineMethods<'tcx>
{
/// Check that we can actually cast between these addr spaces.
fn check_addr_space_cast(&self, val: Self::Value, dest: Self::Type) {
let src_ty = self.val_ty(val);

match (self.type_addr_space(src_ty), self.type_addr_space(dest)) {
(Some(left), Some(right)) if !self.can_cast_addr_space(left, right) => {
bug!("Target incompatible address space cast:\n\
source addr space `{}`, dest addr space `{}`\n\
source value: {:?}, dest ty: {:?}",
left, right, val, dest);
},
_ => { },
}
}
}

impl<'tcx, T> CodegenMethods<'tcx> for T where
77 changes: 76 additions & 1 deletion src/librustc_codegen_ssa/traits/type_.rs
Original file line number Diff line number Diff line change
@@ -7,6 +7,7 @@ use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::ty::{self, Ty};
use rustc::util::nodemap::FxHashMap;
use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
use rustc_target::spec::AddrSpaceIdx;
use std::cell::RefCell;
use syntax::ast;

@@ -36,7 +37,13 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type;
fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type;
fn type_kind(&self, ty: Self::Type) -> TypeKind;
fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;

/// Return a pointer to `ty` in the default address space.
fn type_ptr_to(&self, ty: Self::Type) -> Self::Type {
self.type_as_ptr_to(ty, Default::default())
}
fn type_as_ptr_to(&self, ty: Self::Type, addr_space: AddrSpaceIdx) -> Self::Type;

fn element_type(&self, ty: Self::Type) -> Self::Type;

/// Return the number of elements in `self` if it is a LLVM vector type.
@@ -49,7 +56,21 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
fn int_width(&self, ty: Self::Type) -> u64;

fn val_ty(&self, v: Self::Value) -> Self::Type;
fn val_addr_space(&self, v: Self::Value) -> Option<AddrSpaceIdx> {
self.type_addr_space(self.val_ty(v))
}
fn scalar_lltypes(&self) -> &RefCell<FxHashMap<Ty<'tcx>, Self::Type>>;

fn type_addr_space(&self, ty: Self::Type) -> Option<AddrSpaceIdx>;
fn type_copy_addr_space(&self, ty: Self::Type, addr_space: Option<AddrSpaceIdx>) -> Self::Type {
match (addr_space, self.type_kind(ty)) {
(Some(addr_space), TypeKind::Pointer) => {
let elem = self.element_type(ty);
self.type_as_ptr_to(elem, addr_space)
},
_ => ty,
}
}
}

pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
@@ -60,6 +81,21 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
fn type_i8p(&self) -> Self::Type {
self.type_ptr_to(self.type_i8())
}
fn type_i8p_as(&self, addr_space: AddrSpaceIdx) -> Self::Type {
self.type_as_ptr_to(self.type_i8(), addr_space)
}
fn type_alloca_i8p(&self) -> Self::Type {
self.type_i8p_as(self.alloca_addr_space())
}
fn type_const_i8p(&self) -> Self::Type {
self.type_i8p_as(self.const_addr_space())
}
fn type_mut_i8p(&self) -> Self::Type {
self.type_i8p_as(self.mutable_addr_space())
}
fn type_flat_i8p(&self) -> Self::Type {
self.type_i8p_as(self.flat_addr_space())
}

fn type_int(&self) -> Self::Type {
match &self.sess().target.target.target_c_int_width[..] {
@@ -151,6 +187,45 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
_ => bug!("unexpected unsized tail: {:?}", tail.sty),
}
}
/// Enforce no address space changes are happening in a cast.
/// Pointers in different address spaces can have different
/// machine level sizes (ie on AMDGPU, allocas are 32bits,
/// not 64bits!). We enforce that the flat address space is the
/// largest (+alignment), so that address space is safe to cast to
/// ints/etc. Also, address space changes require computing a offset
/// or two, so a straight bitcast is wrong.
fn type_check_no_addr_space_change(&self, what: &str,
src: Self::Value,
dest_ty: Self::Type) {
let src_ty = self.val_ty(src);
match (self.type_addr_space(src_ty), self.type_addr_space(dest_ty)) {
(Some(src_as), Some(dest_as)) if src_as != dest_as => {
bug!("Invalid address space cast in `{}` cast:\n\
source addr space `{}`, dest addr space `{}`\n\
source value: {:?}, dest ty: {:?}", what,
src_as, dest_as, src, dest_ty);
},
(Some(src_as), None) if src_as != self.flat_addr_space() => {
bug!("Invalid address space cast in `{}` cast:\n\
source addr space `{}` is not flat\n\
source value: {:?}",
what, src_as, src);
},
_ => { },
}
}
fn type_ptr_to_alloca(&self, ty: Self::Type) -> Self::Type {
self.type_as_ptr_to(ty, self.alloca_addr_space())
}
fn type_ptr_to_const(&self, ty: Self::Type) -> Self::Type {
self.type_as_ptr_to(ty, self.const_addr_space())
}
fn type_ptr_to_mut(&self, ty: Self::Type) -> Self::Type {
self.type_as_ptr_to(ty, self.mutable_addr_space())
}
fn type_ptr_to_flat(&self, ty: Self::Type) -> Self::Type {
self.type_as_ptr_to(ty, self.flat_addr_space())
}
}

impl<T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {}
155 changes: 140 additions & 15 deletions src/librustc_target/abi/mod.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
pub use self::Integer::*;
pub use self::Primitive::*;

use spec::Target;
use spec::{Target, AddrSpaceIdx, };
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: you have these , in a few places, you should remove them.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok


use std::fmt;
use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive};
@@ -22,13 +22,16 @@ pub struct TargetDataLayout {
pub i128_align: AbiAndPrefAlign,
pub f32_align: AbiAndPrefAlign,
pub f64_align: AbiAndPrefAlign,
pub pointers: Vec<Option<(Size, AbiAndPrefAlign)>>,
pub pointer_size: Size,
pub pointer_align: AbiAndPrefAlign,
pub aggregate_align: AbiAndPrefAlign,

/// Alignments for vector types.
pub vector_align: Vec<(Size, AbiAndPrefAlign)>,

pub alloca_address_space: AddrSpaceIdx,

pub instruction_address_space: u32,
}

@@ -46,9 +49,11 @@ impl Default for TargetDataLayout {
i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
f32_align: AbiAndPrefAlign::new(align(32)),
f64_align: AbiAndPrefAlign::new(align(64)),
pointers: vec![],
pointer_size: Size::from_bits(64),
pointer_align: AbiAndPrefAlign::new(align(64)),
aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
alloca_address_space: Default::default(),
vector_align: vec![
(Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
(Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
@@ -60,14 +65,6 @@ impl Default for TargetDataLayout {

impl TargetDataLayout {
pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
// Parse an address space index from a string.
let parse_address_space = |s: &str, cause: &str| {
s.parse::<u32>().map_err(|err| {
format!("invalid address space `{}` for `{}` in \"data-layout\": {}",
s, cause, err)
})
};

// Parse a bit count from a string.
let parse_bits = |s: &str, kind: &str, cause: &str| {
s.parse::<u64>().map_err(|err| {
@@ -100,23 +97,38 @@ impl TargetDataLayout {
})
};

fn resize_and_set<T>(vec: &mut Vec<T>, idx: usize, v: T)
where T: Default,
{
while idx >= vec.len() {
vec.push(T::default());
}

vec[idx] = v;
}

let mut dl = TargetDataLayout::default();
let mut i128_align_src = 64;
for spec in target.data_layout.split('-') {
match spec.split(':').collect::<Vec<_>>()[..] {
["e"] => dl.endian = Endian::Little,
["E"] => dl.endian = Endian::Big,
[p] if p.starts_with("P") => {
dl.instruction_address_space = parse_address_space(&p[1..], "P")?
}
["a", ref a..] => dl.aggregate_align = align(a, "a")?,
["f32", ref a..] => dl.f32_align = align(a, "f32")?,
["f64", ref a..] => dl.f64_align = align(a, "f64")?,
[p @ "p", s, ref a..] | [p @ "p0", s, ref a..] => {
dl.pointer_size = size(s, p)?;
dl.pointer_align = align(a, p)?;
}
[s, ref a..] if s.starts_with("i") => {
resize_and_set(&mut dl.pointers, 0, Some((dl.pointer_size,
dl.pointer_align)));
},
[p, s, ref a..] if p.starts_with('p') => {
let idx = parse_bits(&p[1..], "u32", "address space index")? as usize;
let size = size(s, p)?;
let align = align(a, p)?;
resize_and_set(&mut dl.pointers, idx, Some((size, align)));
},
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would we lose anything by enforcing that all address spaces have the same size & alignment for pointers?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, AMDGPU targets use non-uniform pointer sizes/alignments. Perhaps we don't need to keep the sizes/alignments around after this function returns, but I'd argue that it isn't worth the effort to remove it just for this PR: I'll just be adding it right back in and it's so trivial compared to the compiler as a whole.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why would you be adding it back in?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Vega10 has a 64kB LDS (LDS is an explicitly managed CU (compute unit, conceptually similar to a thread/core) local cache) which has throughput in excess of 13TB/s (according to AMD documentation). Vs 484GB/s to Vega10's HDM2 main memory. Vega10 also has a cross CU GDS cache, but I don't have figures for its throughput. Of AMD Gpus, all GCN archs feature a LDS, and I'm reasonably certain Nvidia has something similar. Use of these memories is absolutely critical for maximizing GPGPU performance in memory throughput constrained code.

To use LDS, what is typically done is a global variable is declared and the input is copied into it at the start of a kernel, after which a barrier is executed before the real work of the kernel utilizing the LDS global is performed.

On amdgcn-amd-amdhsa-amdgiz, LDS addresses are 32bit (and 32bit aligned), not 64bit. GDS isn't technically implemented for HSA (not sure why, but I haven't dug into it), but would also be 32bit. We'll need access to this size/alignment info if we ever want to be able to support 32bit (vs uniform but double in size 64bit) references to data in this space.

[s, ref a..] if s.starts_with("i") => {
let bits = match s[1..].parse::<u64>() {
Ok(bits) => bits,
Err(_) => {
@@ -149,7 +161,13 @@ impl TargetDataLayout {
}
// No existing entry, add a new one.
dl.vector_align.push((v_size, a));
}
},
[s, ..] if s.starts_with("A") => {
// default alloca address space
let idx = parse_bits(&s[1..], "u32",
"default alloca address space")? as u32;
dl.alloca_address_space = AddrSpaceIdx(idx);
},
_ => {} // Ignore everything else.
}
}
@@ -171,9 +189,37 @@ impl TargetDataLayout {
dl.pointer_size.bits(), target.target_pointer_width));
}

// We don't specialize pointer sizes for specific address spaces,
// so enforce that the default address space can hold all the bits
// of any other spaces. Similar for alignment.
{
let ptrs_iter = dl.pointers.iter().enumerate()
.filter_map(|(idx, ptrs)| {
ptrs.map(|(s, a)| (idx, s, a) )
});
for (idx, size, align) in ptrs_iter {
if size > dl.pointer_size {
return Err(format!("Address space {} pointer is bigger than the default \
pointer: {} vs {}",
idx, size.bits(), dl.pointer_size.bits()));
}
if align.abi > dl.pointer_align.abi {
return Err(format!("Address space {} pointer alignment is bigger than the \
default pointer: {} vs {}",
idx, align.abi.bits(), dl.pointer_align.abi.bits()));
}
}
}

Ok(dl)
}

pub fn pointer_info(&self, addr_space: AddrSpaceIdx) -> (Size, AbiAndPrefAlign) {
self.pointers.get(addr_space.0 as usize)
.and_then(|&v| v )
.unwrap_or((self.pointer_size, self.pointer_align))
}

/// Return exclusive upper bound on object size.
///
/// The theoretical maximum object size is defined as the maximum positive `isize` value.
@@ -940,3 +986,82 @@ impl<'a, Ty> TyLayout<'a, Ty> {
}
}
}

#[cfg(test)]
mod tests {
use super::*;
use spec::{Target, TargetTriple, };

#[test]
fn pointer_size_align() {
// amdgcn-amd-amdhsa-amdgiz
const DL: &'static str = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-\
p4:32:32-p5:32:32-i64:64-v16:16-v24:32-\
v32:32-v48:64-v96:128-v192:256-v256:256-\
v512:512-v1024:1024-v2048:2048-n32:64-A5";

// Doesn't need to be real...
let triple = TargetTriple::TargetTriple("x86_64-unknown-linux-gnu".into());
let mut target = Target::search(&triple).unwrap();
target.data_layout = DL.into();

let dl = TargetDataLayout::parse(&target);
assert!(dl.is_ok());
let dl = dl.unwrap();

let default = (dl.pointer_size, dl.pointer_align);

let thirty_two_size = Size::from_bits(32);
let thirty_two_align = AbiAndPrefAlign::new(Align::from_bits(32).unwrap());
let thirty_two = (thirty_two_size, thirty_two_align);
let sixty_four_size = Size::from_bits(64);
let sixty_four_align = AbiAndPrefAlign::new(Align::from_bits(64).unwrap());
let sixty_four = (sixty_four_size, sixty_four_align);

assert_eq!(dl.pointer_info(AddrSpaceIdx(0)), default);
assert_eq!(dl.pointer_info(AddrSpaceIdx(0)), sixty_four);
assert_eq!(dl.pointer_info(AddrSpaceIdx(1)), sixty_four);
assert_eq!(dl.pointer_info(AddrSpaceIdx(2)), sixty_four);
assert_eq!(dl.pointer_info(AddrSpaceIdx(3)), thirty_two);
assert_eq!(dl.pointer_info(AddrSpaceIdx(4)), thirty_two);
assert_eq!(dl.pointer_info(AddrSpaceIdx(5)), thirty_two);

// unknown address spaces need to be the same as the default:
assert_eq!(dl.pointer_info(AddrSpaceIdx(7)), default);
}

#[test]
fn default_is_biggest() {
// Note p1 is 128 bits.
const DL: &'static str = "e-p:64:64-p1:128:128-p2:64:64-p3:32:32-\
p4:32:32-p5:32:32-i64:64-v16:16-v24:32-\
v32:32-v48:64-v96:128-v192:256-v256:256-\
v512:512-v1024:1024-v2048:2048-n32:64-A5";

// Doesn't need to be real...
let triple = TargetTriple::TargetTriple("x86_64-unknown-linux-gnu".into());
let mut target = Target::search(&triple).unwrap();
target.data_layout = DL.into();

assert!(TargetDataLayout::parse(&target).is_err());
}
#[test]
fn alloca_addr_space() {
// amdgcn-amd-amdhsa-amdgiz
const DL: &'static str = "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-\
p4:32:32-p5:32:32-i64:64-v16:16-v24:32-\
v32:32-v48:64-v96:128-v192:256-v256:256-\
v512:512-v1024:1024-v2048:2048-n32:64-A5";

// Doesn't need to be real...
let triple = TargetTriple::TargetTriple("x86_64-unknown-linux-gnu".into());
let mut target = Target::search(&triple).unwrap();
target.data_layout = DL.into();

let dl = TargetDataLayout::parse(&target);
assert!(dl.is_ok());
let dl = dl.unwrap();

assert_eq!(dl.alloca_address_space, AddrSpaceIdx(5));
}
}
173 changes: 172 additions & 1 deletion src/librustc_target/spec/mod.rs
Original file line number Diff line number Diff line change
@@ -35,11 +35,12 @@
//! to the list specified by the target, rather than replace.
use serialize::json::{Json, ToJson};
use std::collections::BTreeMap;
use std::collections::{BTreeMap, BTreeSet};
use std::default::Default;
use std::{fmt, io};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::ops::{Deref, DerefMut, };
use spec::abi::{Abi, lookup as lookup_abi};

pub mod abi;
@@ -260,6 +261,158 @@ impl ToJson for MergeFunctions {
pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<String>>;
pub type TargetResult = Result<Target, String>;

#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct AddrSpaceIdx(pub u32);
impl Default for AddrSpaceIdx {
fn default() -> Self {
AddrSpaceIdx(0)
}
}
impl fmt::Display for AddrSpaceIdx {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl FromStr for AddrSpaceIdx {
type Err = <u32 as FromStr>::Err;
fn from_str(s: &str) -> Result<AddrSpaceIdx, Self::Err> {
Ok(AddrSpaceIdx(u32::from_str(s)?))
}
}

#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum AddrSpaceKind {
Flat,
Alloca,
/// aka constant
ReadOnly,
/// aka global
ReadWrite,
Named(String),
}

impl FromStr for AddrSpaceKind {
type Err = String;
fn from_str(s: &str) -> Result<AddrSpaceKind, String> {
Ok(match s {
"flat" => AddrSpaceKind::Flat,
"alloca" => AddrSpaceKind::Alloca,
"readonly" => AddrSpaceKind::ReadOnly,
"readwrite" => AddrSpaceKind::ReadWrite,
named => AddrSpaceKind::Named(named.into()),
})
}
}
impl fmt::Display for AddrSpaceKind {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", match self {
&AddrSpaceKind::Flat => "flat",
&AddrSpaceKind::Alloca => "alloca",
&AddrSpaceKind::ReadOnly => "readonly",
&AddrSpaceKind::ReadWrite => "readwrite",
&AddrSpaceKind::Named(ref s) => s,
})
}
}
impl ToJson for AddrSpaceKind {
fn to_json(&self) -> Json {
Json::String(format!("{}", self))
}
}

#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct AddrSpaceProps {
pub index: AddrSpaceIdx,
/// Indicates which addr spaces this addr space can be addrspacecast-ed to.
pub shared_with: BTreeSet<AddrSpaceKind>,
}

impl AddrSpaceProps {
pub fn from_json(json: &Json) -> Result<Self, String> {
let index = json.find("index").and_then(|v| v.as_u64() )
.ok_or_else(|| {
"invalid address space index, expected an unsigned integer"
})?;

let mut shared_with = vec![];
if let Some(shared) = json.find("shared-with").and_then(|v| v.as_array() ) {
for s in shared {
let s = s.as_string()
.ok_or_else(|| {
"expected string for address space kind"
})?;

let kind = AddrSpaceKind::from_str(s)?;
shared_with.push(kind);
}
}

Ok(AddrSpaceProps {
index: AddrSpaceIdx(index as u32),
shared_with: shared_with.into_iter().collect(),
})
}
}
impl ToJson for AddrSpaceProps {
fn to_json(&self) -> Json {
let mut obj = BTreeMap::new();
obj.insert("index".to_string(), self.index.0.to_json());
let mut shared_with = vec![];
for sw in self.shared_with.iter() {
shared_with.push(sw.to_json());
}
obj.insert("shared-with".to_string(), Json::Array(shared_with));

Json::Object(obj)
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct AddrSpaces(pub BTreeMap<AddrSpaceKind, AddrSpaceProps>);
impl Deref for AddrSpaces {
type Target = BTreeMap<AddrSpaceKind, AddrSpaceProps>;
fn deref(&self) -> &Self::Target { &self.0 }
}
impl DerefMut for AddrSpaces {
fn deref_mut(&mut self) -> &mut BTreeMap<AddrSpaceKind, AddrSpaceProps> { &mut self.0 }
}
impl ToJson for AddrSpaces {
fn to_json(&self) -> Json {
let obj = self.iter()
.map(|(k, v)| {
(format!("{}", k), v.to_json())
})
.collect();
Json::Object(obj)
}
}
impl Default for AddrSpaces {
fn default() -> Self {
let mut asp = BTreeMap::new();

let kinds = vec![AddrSpaceKind::ReadOnly,
AddrSpaceKind::ReadWrite,
AddrSpaceKind::Alloca,
AddrSpaceKind::Flat, ];
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing Instruction?


let insert = |asp: &mut BTreeMap<_, _>, kind, idx| {
let props = AddrSpaceProps {
index: idx,
shared_with: kinds.clone()
.into_iter()
.filter(|k| *k != kind)
.collect(),
};
assert!(asp.insert(kind, props).is_none());
};

for kind in kinds.iter() {
insert(&mut asp, kind.clone(), Default::default());
}

AddrSpaces(asp)
}
}

macro_rules! supported_targets {
( $(($triple:expr, $module:ident),)+ ) => (
$(mod $module;)*
@@ -732,6 +885,11 @@ pub struct TargetOptions {
/// the usual logic to figure this out from the crate itself.
pub override_export_symbols: Option<Vec<String>>,

/// Description of all address spaces and how they are shared with one another.
/// Defaults to a single, flat, address space. Note it is generally assumed that
/// the address space `0` is your flat address space.
pub addr_spaces: AddrSpaces,

/// Determines how or whether the MergeFunctions LLVM pass should run for
/// this target. Either "disabled", "trampolines", or "aliases".
/// The MergeFunctions pass is generally useful, but some targets may need
@@ -821,6 +979,7 @@ impl Default for TargetOptions {
requires_uwtable: false,
simd_types_indirect: true,
override_export_symbols: None,
addr_spaces: Default::default(),
merge_functions: MergeFunctions::Aliases,
}
}
@@ -1051,6 +1210,16 @@ impl Target {
}
}
} );
($key_name:ident, addr_spaces) => ( {
let name = (stringify!($key_name)).replace("_", "-");
if let Some(obj) = obj.find(&name[..]).and_then(|o| o.as_object() ) {
for (k, v) in obj {
let k = AddrSpaceKind::from_str(&k).unwrap();
let props = AddrSpaceProps::from_json(v)?;
base.options.$key_name.insert(k, props);
}
}
} );
}

key!(is_builtin, bool);
@@ -1126,6 +1295,7 @@ impl Target {
key!(requires_uwtable, bool);
key!(simd_types_indirect, bool);
key!(override_export_symbols, opt_list);
key!(addr_spaces, addr_spaces);
key!(merge_functions, MergeFunctions)?;

if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) {
@@ -1338,6 +1508,7 @@ impl ToJson for Target {
target_option_val!(requires_uwtable);
target_option_val!(simd_types_indirect);
target_option_val!(override_export_symbols);
target_option_val!(addr_spaces);
target_option_val!(merge_functions);

if default.abi_blacklist != self.options.abi_blacklist {
26 changes: 22 additions & 4 deletions src/rustllvm/RustWrapper.cpp
Original file line number Diff line number Diff line change
@@ -121,17 +121,35 @@ extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M,
}

extern "C" LLVMValueRef
LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, LLVMTypeRef Ty) {
return wrap(unwrap(M)->getOrInsertGlobal(Name, unwrap(Ty)));
LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, LLVMTypeRef Ty, unsigned AS) {
GlobalVariable* GV = nullptr;
Module* MM = unwrap(M);
Type* ETy = unwrap(Ty);
if (!(GV = MM->getNamedGlobal(Name))) {
GV = new GlobalVariable(ETy, false, GlobalVariable::ExternalLinkage,
nullptr, Name, GlobalVariable::NotThreadLocal, AS);
MM->getGlobalList().push_back(GV);
}
Type *GVTy = GV->getType();
PointerType *PTy = PointerType::get(ETy, GVTy->getPointerAddressSpace());
if (GVTy != PTy) {
return wrap(ConstantExpr::getBitCast(GV, PTy));
} else {
return wrap(GV);
}
}

extern "C" LLVMValueRef
LLVMRustInsertPrivateGlobal(LLVMModuleRef M, LLVMTypeRef Ty) {
LLVMRustInsertPrivateGlobal(LLVMModuleRef M, LLVMTypeRef Ty, unsigned AS) {
return wrap(new GlobalVariable(*unwrap(M),
unwrap(Ty),
false,
GlobalValue::PrivateLinkage,
nullptr));
nullptr,
"",
nullptr,
GlobalVariable::NotThreadLocal,
AS));
}

extern "C" LLVMTypeRef LLVMRustMetadataTypeInContext(LLVMContextRef C) {