diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index e0af5653753b6..32f18419753e9 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -5,12 +5,13 @@ use crate::llvm; use llvm::coverageinfo::CounterMappingRegion; use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression}; use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods}; -use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet}; -use rustc_hir::def_id::{DefId, DefIdSet}; +use rustc_data_structures::fx::FxIndexSet; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::DefIdSet; use rustc_llvm::RustString; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::coverage::CodeRegion; use rustc_middle::ty::TyCtxt; -use rustc_span::Symbol; use std::ffi::CString; @@ -46,7 +47,7 @@ pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) { // functions exist. Generate synthetic functions with a (required) single counter, and add the // MIR `Coverage` code regions to the `function_coverage_map`, before calling // `ctx.take_function_coverage_map()`. - if !tcx.sess.instrument_coverage_except_unused_functions() { + if cx.codegen_unit.is_code_coverage_dead_code_cgu() { add_unused_functions(cx); } @@ -271,26 +272,35 @@ fn save_function_record( /// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`tcx` query /// `codegened_and_inlined_items`). /// -/// *HOWEVER* the codegenned `DefId`s are partitioned across multiple `CodegenUnit`s (CGUs), and -/// this function is processing a `function_coverage_map` for the functions (`Instance`/`DefId`) -/// allocated to only one of those CGUs. We must NOT inject any unused functions's `CodeRegion`s -/// more than once, so we have to pick a CGUs `function_coverage_map` into which the unused -/// function will be inserted. +/// These unused functions are then codegen'd in one of the CGUs which is marked as the +/// "code coverage dead code cgu" during the partitioning process. This prevents us from generating +/// code regions for the same function more than once which can lead to linker errors regarding +/// duplicate symbols. fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) { - let tcx = cx.tcx; + assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu()); - // FIXME(#79622): Can this solution be simplified and/or improved? Are there other sources - // of compiler state data that might help (or better sources that could be exposed, but - // aren't yet)? + let tcx = cx.tcx; let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics(); - let all_def_ids: DefIdSet = tcx + let eligible_def_ids: DefIdSet = tcx .mir_keys(()) .iter() .filter_map(|local_def_id| { let def_id = local_def_id.to_def_id(); - if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) { + let kind = tcx.def_kind(def_id); + // `mir_keys` will give us `DefId`s for all kinds of things, not + // just "functions", like consts, statics, etc. Filter those out. + // If `ignore_unused_generics` was specified, filter out any + // generic functions from consideration as well. + if !matches!( + kind, + DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator + ) { + return None; + } else if ignore_unused_generics + && tcx.generics_of(def_id).requires_monomorphization(tcx) + { return None; } Some(local_def_id.to_def_id()) @@ -299,79 +309,17 @@ fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) { let codegenned_def_ids = tcx.codegened_and_inlined_items(()); - let mut unused_def_ids_by_file: FxHashMap<Symbol, Vec<DefId>> = FxHashMap::default(); - for &non_codegenned_def_id in all_def_ids.difference(codegenned_def_ids) { - // Make sure the non-codegenned (unused) function has at least one MIR - // `Coverage` statement with a code region, and return its file name. - if let Some(non_codegenned_file_name) = tcx.covered_file_name(non_codegenned_def_id) { - let def_ids = - unused_def_ids_by_file.entry(*non_codegenned_file_name).or_insert_with(Vec::new); - def_ids.push(non_codegenned_def_id); - } - } + for &non_codegenned_def_id in eligible_def_ids.difference(codegenned_def_ids) { + let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id); - if unused_def_ids_by_file.is_empty() { - // There are no unused functions with file names to add (in any CGU) - return; - } - - // Each `CodegenUnit` (CGU) has its own function_coverage_map, and generates a specific binary - // with its own coverage map. - // - // Each covered function `Instance` can be included in only one coverage map, produced from a - // specific function_coverage_map, from a specific CGU. - // - // Since unused functions did not generate code, they are not associated with any CGU yet. - // - // To avoid injecting the unused functions in multiple coverage maps (for multiple CGUs) - // determine which function_coverage_map has the responsibility for publishing unreachable - // coverage, based on file name: For each unused function, find the CGU that generates the - // first function (based on sorted `DefId`) from the same file. - // - // Add a new `FunctionCoverage` to the `function_coverage_map`, with unreachable code regions - // for each region in it's MIR. - - // Convert the `HashSet` of `codegenned_def_ids` to a sortable vector, and sort them. - let mut sorted_codegenned_def_ids: Vec<DefId> = codegenned_def_ids.iter().copied().collect(); - sorted_codegenned_def_ids.sort_unstable(); - - let mut first_covered_def_id_by_file: FxHashMap<Symbol, DefId> = FxHashMap::default(); - for &def_id in sorted_codegenned_def_ids.iter() { - if let Some(covered_file_name) = tcx.covered_file_name(def_id) { - // Only add files known to have unused functions - if unused_def_ids_by_file.contains_key(covered_file_name) { - first_covered_def_id_by_file.entry(*covered_file_name).or_insert(def_id); - } + // If a function is marked `#[no_coverage]`, then skip generating a + // dead code stub for it. + if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) { + debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id); + continue; } - } - - // Get the set of def_ids with coverage regions, known by *this* CoverageContext. - let cgu_covered_def_ids: DefIdSet = match cx.coverage_context() { - Some(ctx) => ctx - .function_coverage_map - .borrow() - .keys() - .map(|&instance| instance.def.def_id()) - .collect(), - None => return, - }; - let cgu_covered_files: FxHashSet<Symbol> = first_covered_def_id_by_file - .iter() - .filter_map( - |(&file_name, def_id)| { - if cgu_covered_def_ids.contains(def_id) { Some(file_name) } else { None } - }, - ) - .collect(); - - // For each file for which this CGU is responsible for adding unused function coverage, - // get the `def_id`s for each unused function (if any), define a synthetic function with a - // single LLVM coverage counter, and add the function's coverage `CodeRegion`s. to the - // function_coverage_map. - for covered_file_name in cgu_covered_files { - for def_id in unused_def_ids_by_file.remove(&covered_file_name).into_iter().flatten() { - cx.define_unused_fn(def_id); - } + debug!("generating unused fn: {:?}", non_codegenned_def_id); + cx.define_unused_fn(non_codegenned_def_id); } } diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs index 1422537cd5060..3a6c091b3313d 100644 --- a/compiler/rustc_middle/src/mir/mono.rs +++ b/compiler/rustc_middle/src/mir/mono.rs @@ -247,6 +247,9 @@ pub struct CodegenUnit<'tcx> { items: FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)>, size_estimate: Option<usize>, primary: bool, + /// True if this is CGU is used to hold code coverage information for dead code, + /// false otherwise. + is_code_coverage_dead_code_cgu: bool, } /// Specifies the linkage type for a `MonoItem`. @@ -277,7 +280,13 @@ pub enum Visibility { impl<'tcx> CodegenUnit<'tcx> { #[inline] pub fn new(name: Symbol) -> CodegenUnit<'tcx> { - CodegenUnit { name, items: Default::default(), size_estimate: None, primary: false } + CodegenUnit { + name, + items: Default::default(), + size_estimate: None, + primary: false, + is_code_coverage_dead_code_cgu: false, + } } pub fn name(&self) -> Symbol { @@ -304,6 +313,15 @@ impl<'tcx> CodegenUnit<'tcx> { &mut self.items } + pub fn is_code_coverage_dead_code_cgu(&self) -> bool { + self.is_code_coverage_dead_code_cgu + } + + /// Marks this CGU as the one used to contain code coverage information for dead code. + pub fn make_code_coverage_dead_code_cgu(&mut self) { + self.is_code_coverage_dead_code_cgu = true; + } + pub fn mangle_name(human_readable_name: &str) -> String { // We generate a 80 bit hash from the name. This should be enough to // avoid collisions and is still reasonably short for filenames. @@ -404,9 +422,11 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for CodegenUnit<'tcx> { // The size estimate is not relevant to the hash size_estimate: _, primary: _, + is_code_coverage_dead_code_cgu, } = *self; name.hash_stable(hcx, hasher); + is_code_coverage_dead_code_cgu.hash_stable(hcx, hasher); let mut items: Vec<(Fingerprint, _)> = items .iter() diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 027c0c64924b8..c4530e901120a 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -386,16 +386,6 @@ rustc_queries! { storage(ArenaCacheSelector<'tcx>) } - /// Returns the name of the file that contains the function body, if instrumented for coverage. - query covered_file_name(key: DefId) -> Option<Symbol> { - desc { - |tcx| "retrieving the covered file name, if instrumented, for `{}`", - tcx.def_path_str(key) - } - storage(ArenaCacheSelector<'tcx>) - cache_on_disk_if { key.is_local() } - } - /// Returns the `CodeRegions` for a function that has instrumented coverage, in case the /// function was optimized out before codegen, and before being added to the Coverage Map. query covered_code_regions(key: DefId) -> Vec<&'tcx mir::coverage::CodeRegion> { diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index 1721fb5cde0e8..46de6d939a1df 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -9,7 +9,6 @@ use rustc_span::def_id::DefId; /// A `query` provider for retrieving coverage information injected into MIR. pub(crate) fn provide(providers: &mut Providers) { providers.coverageinfo = |tcx, def_id| coverageinfo(tcx, def_id); - providers.covered_file_name = |tcx, def_id| covered_file_name(tcx, def_id); providers.covered_code_regions = |tcx, def_id| covered_code_regions(tcx, def_id); } @@ -137,25 +136,6 @@ fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) -> coverage_visitor.info } -fn covered_file_name(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> { - if tcx.is_mir_available(def_id) { - let body = mir_body(tcx, def_id); - for bb_data in body.basic_blocks().iter() { - for statement in bb_data.statements.iter() { - if let StatementKind::Coverage(box ref coverage) = statement.kind { - if let Some(code_region) = coverage.code_region.as_ref() { - if is_inlined(body, statement) { - continue; - } - return Some(code_region.file_name); - } - } - } - } - } - return None; -} - fn covered_code_regions<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<&'tcx CodeRegion> { let body = mir_body(tcx, def_id); body.basic_blocks() diff --git a/compiler/rustc_monomorphize/src/partitioning/mod.rs b/compiler/rustc_monomorphize/src/partitioning/mod.rs index dc22ffc6747ac..67597a0d7b46b 100644 --- a/compiler/rustc_monomorphize/src/partitioning/mod.rs +++ b/compiler/rustc_monomorphize/src/partitioning/mod.rs @@ -201,6 +201,40 @@ pub fn partition<'tcx>( partitioner.internalize_symbols(cx, &mut post_inlining); } + let instrument_dead_code = + tcx.sess.instrument_coverage() && !tcx.sess.instrument_coverage_except_unused_functions(); + + if instrument_dead_code { + assert!( + post_inlining.codegen_units.len() > 0, + "There must be at least one CGU that code coverage data can be generated in." + ); + + // Find the smallest CGU that has exported symbols and put the dead + // function stubs in that CGU. We look for exported symbols to increase + // the likelihood the linker won't throw away the dead functions. + // FIXME(#92165): In order to truly resolve this, we need to make sure + // the object file (CGU) containing the dead function stubs is included + // in the final binary. This will probably require forcing these + // function symbols to be included via `-u` or `/include` linker args. + let mut cgus: Vec<_> = post_inlining.codegen_units.iter_mut().collect(); + cgus.sort_by_key(|cgu| cgu.size_estimate()); + + let dead_code_cgu = if let Some(cgu) = cgus + .into_iter() + .rev() + .filter(|cgu| cgu.items().iter().any(|(_, (linkage, _))| *linkage == Linkage::External)) + .next() + { + cgu + } else { + // If there are no CGUs that have externally linked items, + // then we just pick the first CGU as a fallback. + &mut post_inlining.codegen_units[0] + }; + dead_code_cgu.make_code_coverage_dead_code_cgu(); + } + // Finally, sort by codegen unit name, so that we get deterministic results. let PostInliningPartitioning { codegen_units: mut result, diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs index e67ee1cab3df2..eea8f40635d74 100644 --- a/compiler/rustc_typeck/src/check/callee.rs +++ b/compiler/rustc_typeck/src/check/callee.rs @@ -496,7 +496,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { call_expr.span, call_expr, fn_sig.inputs(), - &expected_arg_tys, + expected_arg_tys, arg_exprs, fn_sig.c_variadic, TupleArgumentsFlag::DontTupleArguments, @@ -529,7 +529,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { call_expr.span, call_expr, fn_sig.inputs(), - &expected_arg_tys, + expected_arg_tys, arg_exprs, fn_sig.c_variadic, TupleArgumentsFlag::TupleArguments, diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs index 11b63a99043b7..e796fe58170d2 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs @@ -62,7 +62,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { sp, expr, &err_inputs, - &[], + vec![], args_no_rcvr, false, tuple_arguments, @@ -73,7 +73,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let method = method.unwrap(); // HACK(eddyb) ignore self in the definition (see above). - let expected_arg_tys = self.expected_inputs_for_expected_output( + let expected_input_tys = self.expected_inputs_for_expected_output( sp, expected, method.sig.output(), @@ -83,7 +83,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { sp, expr, &method.sig.inputs()[1..], - &expected_arg_tys[..], + expected_input_tys, args_no_rcvr, method.sig.c_variadic, tuple_arguments, @@ -96,34 +96,43 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// method calls and overloaded operators. pub(in super::super) fn check_argument_types( &self, - sp: Span, - expr: &'tcx hir::Expr<'tcx>, - fn_inputs: &[Ty<'tcx>], - expected_arg_tys: &[Ty<'tcx>], - args: &'tcx [hir::Expr<'tcx>], + // Span enclosing the call site + call_span: Span, + // Expression of the call site + call_expr: &'tcx hir::Expr<'tcx>, + // Types (as defined in the *signature* of the target function) + formal_input_tys: &[Ty<'tcx>], + // More specific expected types, after unifying with caller output types + expected_input_tys: Vec<Ty<'tcx>>, + // The expressions for each provided argument + provided_args: &'tcx [hir::Expr<'tcx>], + // Whether the function is variadic, for example when imported from C c_variadic: bool, + // Whether the arguments have been bundled in a tuple (ex: closures) tuple_arguments: TupleArgumentsFlag, - def_id: Option<DefId>, + // The DefId for the function being called, for better error messages + fn_def_id: Option<DefId>, ) { let tcx = self.tcx; // Grab the argument types, supplying fresh type variables // if the wrong number of arguments were supplied - let supplied_arg_count = if tuple_arguments == DontTupleArguments { args.len() } else { 1 }; + let supplied_arg_count = + if tuple_arguments == DontTupleArguments { provided_args.len() } else { 1 }; // All the input types from the fn signature must outlive the call // so as to validate implied bounds. - for (&fn_input_ty, arg_expr) in iter::zip(fn_inputs, args) { + for (&fn_input_ty, arg_expr) in iter::zip(formal_input_tys, provided_args) { self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation); } - let expected_arg_count = fn_inputs.len(); + let expected_arg_count = formal_input_tys.len(); let param_count_error = |expected_count: usize, arg_count: usize, error_code: &str, c_variadic: bool, sugg_unit: bool| { - let (span, start_span, args, ctor_of) = match &expr.kind { + let (span, start_span, args, ctor_of) = match &call_expr.kind { hir::ExprKind::Call( hir::Expr { span, @@ -156,14 +165,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { &args[1..], // Skip the receiver. None, // methods are never ctors ), - k => span_bug!(sp, "checking argument types on a non-call: `{:?}`", k), + k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k), }; - let arg_spans = if args.is_empty() { + let arg_spans = if provided_args.is_empty() { // foo() // ^^^-- supplied 0 arguments // | // expected 2 arguments - vec![tcx.sess.source_map().next_point(start_span).with_hi(sp.hi())] + vec![tcx.sess.source_map().next_point(start_span).with_hi(call_span.hi())] } else { // foo(1, 2, 3) // ^^^ - - - supplied 3 arguments @@ -196,7 +205,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ); } - if let Some(def_id) = def_id { + if let Some(def_id) = fn_def_id { if let Some(def_span) = tcx.def_ident_span(def_id) { let mut spans: MultiSpan = def_span.into(); @@ -218,7 +227,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } if sugg_unit { - let sugg_span = tcx.sess.source_map().end_point(expr.span); + let sugg_span = tcx.sess.source_map().end_point(call_expr.span); // remove closing `)` from the span let sugg_span = sugg_span.shrink_to_lo(); err.span_suggestion( @@ -240,110 +249,148 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { err.emit(); }; - let mut expected_arg_tys = expected_arg_tys.to_vec(); - - let formal_tys = if tuple_arguments == TupleArguments { - let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]); + let (formal_input_tys, expected_input_tys) = if tuple_arguments == TupleArguments { + let tuple_type = self.structurally_resolved_type(call_span, formal_input_tys[0]); match tuple_type.kind() { - ty::Tuple(arg_types) if arg_types.len() != args.len() => { - param_count_error(arg_types.len(), args.len(), "E0057", false, false); - expected_arg_tys = vec![]; - self.err_args(args.len()) + ty::Tuple(arg_types) if arg_types.len() != provided_args.len() => { + param_count_error(arg_types.len(), provided_args.len(), "E0057", false, false); + (self.err_args(provided_args.len()), vec![]) } ty::Tuple(arg_types) => { - expected_arg_tys = match expected_arg_tys.get(0) { + let expected_input_tys = match expected_input_tys.get(0) { Some(&ty) => match ty.kind() { ty::Tuple(ref tys) => tys.iter().map(|k| k.expect_ty()).collect(), _ => vec![], }, None => vec![], }; - arg_types.iter().map(|k| k.expect_ty()).collect() + (arg_types.iter().map(|k| k.expect_ty()).collect(), expected_input_tys) } _ => { struct_span_err!( tcx.sess, - sp, + call_span, E0059, "cannot use call notation; the first type parameter \ for the function trait is neither a tuple nor unit" ) .emit(); - expected_arg_tys = vec![]; - self.err_args(args.len()) + (self.err_args(provided_args.len()), vec![]) } } } else if expected_arg_count == supplied_arg_count { - fn_inputs.to_vec() + (formal_input_tys.to_vec(), expected_input_tys) } else if c_variadic { if supplied_arg_count >= expected_arg_count { - fn_inputs.to_vec() + (formal_input_tys.to_vec(), expected_input_tys) } else { param_count_error(expected_arg_count, supplied_arg_count, "E0060", true, false); - expected_arg_tys = vec![]; - self.err_args(supplied_arg_count) + (self.err_args(supplied_arg_count), vec![]) } } else { // is the missing argument of type `()`? - let sugg_unit = if expected_arg_tys.len() == 1 && supplied_arg_count == 0 { - self.resolve_vars_if_possible(expected_arg_tys[0]).is_unit() - } else if fn_inputs.len() == 1 && supplied_arg_count == 0 { - self.resolve_vars_if_possible(fn_inputs[0]).is_unit() + let sugg_unit = if expected_input_tys.len() == 1 && supplied_arg_count == 0 { + self.resolve_vars_if_possible(expected_input_tys[0]).is_unit() + } else if formal_input_tys.len() == 1 && supplied_arg_count == 0 { + self.resolve_vars_if_possible(formal_input_tys[0]).is_unit() } else { false }; param_count_error(expected_arg_count, supplied_arg_count, "E0061", false, sugg_unit); - expected_arg_tys = vec![]; - self.err_args(supplied_arg_count) + (self.err_args(supplied_arg_count), vec![]) }; debug!( - "check_argument_types: formal_tys={:?}", - formal_tys.iter().map(|t| self.ty_to_string(*t)).collect::<Vec<String>>() + "check_argument_types: formal_input_tys={:?}", + formal_input_tys.iter().map(|t| self.ty_to_string(*t)).collect::<Vec<String>>() ); - // If there is no expectation, expect formal_tys. - let expected_arg_tys = - if !expected_arg_tys.is_empty() { expected_arg_tys } else { formal_tys.clone() }; + // If there is no expectation, expect formal_input_tys. + let expected_input_tys = if !expected_input_tys.is_empty() { + expected_input_tys + } else { + formal_input_tys.clone() + }; + + assert_eq!(expected_input_tys.len(), formal_input_tys.len()); + // Keep track of the fully coerced argument types let mut final_arg_types: Vec<(usize, Ty<'_>, Ty<'_>)> = vec![]; + // We introduce a helper function to demand that a given argument satisfy a given input + // This is more complicated than just checking type equality, as arguments could be coerced + // This version writes those types back so further type checking uses the narrowed types + let demand_compatible = |idx, final_arg_types: &mut Vec<(usize, Ty<'tcx>, Ty<'tcx>)>| { + let formal_input_ty: Ty<'tcx> = formal_input_tys[idx]; + let expected_input_ty: Ty<'tcx> = expected_input_tys[idx]; + let provided_arg = &provided_args[idx]; + + debug!("checking argument {}: {:?} = {:?}", idx, provided_arg, formal_input_ty); + + // The special-cased logic below has three functions: + // 1. Provide as good of an expected type as possible. + let expectation = Expectation::rvalue_hint(self, expected_input_ty); + + let checked_ty = self.check_expr_with_expectation(provided_arg, expectation); + + // 2. Coerce to the most detailed type that could be coerced + // to, which is `expected_ty` if `rvalue_hint` returns an + // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise. + let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty); + + // Keep track of these for below + final_arg_types.push((idx, checked_ty, coerced_ty)); + + // Cause selection errors caused by resolving a single argument to point at the + // argument and not the call. This is otherwise redundant with the `demand_coerce` + // call immediately after, but it lets us customize the span pointed to in the + // fulfillment error to be more accurate. + let _ = + self.resolve_vars_with_obligations_and_mutate_fulfillment(coerced_ty, |errors| { + self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr); + self.point_at_arg_instead_of_call_if_possible( + errors, + &final_arg_types, + call_expr, + call_span, + provided_args, + ); + }); + + // We're processing function arguments so we definitely want to use + // two-phase borrows. + self.demand_coerce(&provided_arg, checked_ty, coerced_ty, None, AllowTwoPhase::Yes); + + // 3. Relate the expected type and the formal one, + // if the expected type was used for the coercion. + self.demand_suptype(provided_arg.span, formal_input_ty, coerced_ty); + }; + // Check the arguments. // We do this in a pretty awful way: first we type-check any arguments // that are not closures, then we type-check the closures. This is so // that we have more information about the types of arguments when we // type-check the functions. This isn't really the right way to do this. for check_closures in [false, true] { - debug!("check_closures={}", check_closures); - // More awful hacks: before we check argument types, try to do // an "opportunistic" trait resolution of any trait bounds on // the call. This helps coercions. if check_closures { self.select_obligations_where_possible(false, |errors| { - self.point_at_type_arg_instead_of_call_if_possible(errors, expr); + self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr); self.point_at_arg_instead_of_call_if_possible( errors, &final_arg_types, - expr, - sp, - &args, + call_expr, + call_span, + &provided_args, ); }) } - // For C-variadic functions, we don't have a declared type for all of - // the arguments hence we only do our usual type checking with - // the arguments who's types we do know. - let t = if c_variadic { - expected_arg_count - } else if tuple_arguments == TupleArguments { - args.len() - } else { - supplied_arg_count - }; - for (i, arg) in args.iter().take(t).enumerate() { + let minimum_input_count = formal_input_tys.len(); + for (idx, arg) in provided_args.iter().enumerate() { // Warn only for the first loop (the "no closures" one). // Closure arguments themselves can't be diverging, but // a previous argument can, e.g., `foo(panic!(), || {})`. @@ -351,53 +398,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.warn_if_unreachable(arg.hir_id, arg.span, "expression"); } - let is_closure = matches!(arg.kind, ExprKind::Closure(..)); + // For C-variadic functions, we don't have a declared type for all of + // the arguments hence we only do our usual type checking with + // the arguments who's types we do know. However, we *can* check + // for unreachable expressions (see above). + // FIXME: unreachable warning current isn't emitted + if idx >= minimum_input_count { + continue; + } + let is_closure = matches!(arg.kind, ExprKind::Closure(..)); if is_closure != check_closures { continue; } - let formal_ty = formal_tys[i]; - debug!("checking argument {}: {:?} = {:?}", i, arg, formal_ty); - - // The special-cased logic below has three functions: - // 1. Provide as good of an expected type as possible. - let expected = Expectation::rvalue_hint(self, expected_arg_tys[i]); - - let checked_ty = self.check_expr_with_expectation(&arg, expected); - - // 2. Coerce to the most detailed type that could be coerced - // to, which is `expected_ty` if `rvalue_hint` returns an - // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise. - let coerce_ty = expected.only_has_type(self).unwrap_or(formal_ty); - - final_arg_types.push((i, checked_ty, coerce_ty)); - - // Cause selection errors caused by resolving a single argument to point at the - // argument and not the call. This is otherwise redundant with the `demand_coerce` - // call immediately after, but it lets us customize the span pointed to in the - // fulfillment error to be more accurate. - let _ = self.resolve_vars_with_obligations_and_mutate_fulfillment( - coerce_ty, - |errors| { - self.point_at_type_arg_instead_of_call_if_possible(errors, expr); - self.point_at_arg_instead_of_call_if_possible( - errors, - &final_arg_types, - expr, - sp, - args, - ); - }, - ); - - // We're processing function arguments so we definitely want to use - // two-phase borrows. - self.demand_coerce(&arg, checked_ty, coerce_ty, None, AllowTwoPhase::Yes); - - // 3. Relate the expected type and the formal one, - // if the expected type was used for the coercion. - self.demand_suptype(arg.span, formal_ty, coerce_ty); + demand_compatible(idx, &mut final_arg_types); } } @@ -410,7 +425,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { MissingCastForVariadicArg { sess, span, ty, cast_ty }.diagnostic().emit() } - for arg in args.iter().skip(expected_arg_count) { + for arg in provided_args.iter().skip(expected_arg_count) { let arg_ty = self.check_expr(&arg); // There are a few types which get autopromoted when passed via varargs diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index d075658f51a3e..9d4f9af91a5e1 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -348,7 +348,6 @@ extern "Rust" { // This is the magic symbol to call the global alloc error handler. rustc generates // it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the // default implementations below (`__rdl_oom`) otherwise. - #[rustc_allocator_nounwind] fn __rust_alloc_error_handler(size: usize, align: usize) -> !; } @@ -367,7 +366,6 @@ extern "Rust" { #[stable(feature = "global_alloc", since = "1.28.0")] #[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")] #[cfg(all(not(no_global_oom_handling), not(test)))] -#[rustc_allocator_nounwind] #[cold] pub const fn handle_alloc_error(layout: Layout) -> ! { const fn ct_error(_: Layout) -> ! { @@ -398,13 +396,13 @@ pub mod __alloc_error_handler { // if there is no `#[alloc_error_handler]` #[rustc_std_internal_symbol] - pub unsafe extern "C" fn __rdl_oom(size: usize, _align: usize) -> ! { + pub unsafe extern "C-unwind" fn __rdl_oom(size: usize, _align: usize) -> ! { panic!("memory allocation of {} bytes failed", size) } // if there is an `#[alloc_error_handler]` #[rustc_std_internal_symbol] - pub unsafe extern "C" fn __rg_oom(size: usize, align: usize) -> ! { + pub unsafe extern "C-unwind" fn __rg_oom(size: usize, align: usize) -> ! { let layout = unsafe { Layout::from_size_align_unchecked(size, align) }; extern "Rust" { #[lang = "oom"] diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 7e663fab16af5..95da9971993e0 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -168,6 +168,7 @@ #![cfg_attr(test, feature(test))] #![feature(unboxed_closures)] #![feature(unsized_fn_params)] +#![feature(c_unwind)] // // Rustdoc features: #![feature(doc_cfg)] diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs index 7d005666a74a6..989ec0639cd6b 100644 --- a/library/core/src/mem/mod.rs +++ b/library/core/src/mem/mod.rs @@ -1045,6 +1045,10 @@ pub const fn discriminant<T>(v: &T) -> Discriminant<T> { /// return value is unspecified. Equally, if `T` is an enum with more variants than `usize::MAX` /// the return value is unspecified. Uninhabited variants will be counted. /// +/// Note that an enum may be expanded with additional variants in the future +/// as a non-breaking change, for example if it is marked `#[non_exhaustive]`, +/// which will change the result of this function. +/// /// # Examples /// /// ``` diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 5235a6b818053..7c36bb264c45e 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -13,7 +13,7 @@ import tarfile import tempfile -from time import time +from time import time, sleep # Acquire a lock on the build directory to make sure that # we don't cause a race condition while building @@ -42,8 +42,10 @@ def acquire_lock(build_dir): while True: try: curs.execute("BEGIN EXCLUSIVE") + break except sqlite3.OperationalError: pass + sleep(0.25) return curs except ImportError: print("warning: sqlite3 not available in python, skipping build directory lock") diff --git a/src/librustdoc/passes/collect_intra_doc_links.rs b/src/librustdoc/passes/collect_intra_doc_links.rs index 26ccdb1c87ecc..7953008628204 100644 --- a/src/librustdoc/passes/collect_intra_doc_links.rs +++ b/src/librustdoc/passes/collect_intra_doc_links.rs @@ -13,7 +13,7 @@ use rustc_hir::def::{ PerNS, }; use rustc_hir::def_id::{CrateNum, DefId}; -use rustc_middle::ty::TyCtxt; +use rustc_middle::ty::{Ty, TyCtxt}; use rustc_middle::{bug, span_bug, ty}; use rustc_resolve::ParentScope; use rustc_session::lint::Lint; @@ -618,6 +618,39 @@ impl<'a, 'tcx> LinkCollector<'a, 'tcx> { }) } + /// Convert a PrimitiveType to a Ty, where possible. + /// + /// This is used for resolving trait impls for primitives + fn primitive_type_to_ty(&mut self, prim: PrimitiveType) -> Option<Ty<'tcx>> { + use PrimitiveType::*; + let tcx = self.cx.tcx; + + // FIXME: Only simple types are supported here, see if we can support + // other types such as Tuple, Array, Slice, etc. + // See https://github.com/rust-lang/rust/issues/90703#issuecomment-1004263455 + Some(tcx.mk_ty(match prim { + Bool => ty::Bool, + Str => ty::Str, + Char => ty::Char, + Never => ty::Never, + I8 => ty::Int(ty::IntTy::I8), + I16 => ty::Int(ty::IntTy::I16), + I32 => ty::Int(ty::IntTy::I32), + I64 => ty::Int(ty::IntTy::I64), + I128 => ty::Int(ty::IntTy::I128), + Isize => ty::Int(ty::IntTy::Isize), + F32 => ty::Float(ty::FloatTy::F32), + F64 => ty::Float(ty::FloatTy::F64), + U8 => ty::Uint(ty::UintTy::U8), + U16 => ty::Uint(ty::UintTy::U16), + U32 => ty::Uint(ty::UintTy::U32), + U64 => ty::Uint(ty::UintTy::U64), + U128 => ty::Uint(ty::UintTy::U128), + Usize => ty::Uint(ty::UintTy::Usize), + _ => return None, + })) + } + /// Returns: /// - None if no associated item was found /// - Some((_, _, Some(_))) if an item was found and should go through a side channel @@ -632,7 +665,25 @@ impl<'a, 'tcx> LinkCollector<'a, 'tcx> { let tcx = self.cx.tcx; match root_res { - Res::Primitive(prim) => self.resolve_primitive_associated_item(prim, ns, item_name), + Res::Primitive(prim) => { + self.resolve_primitive_associated_item(prim, ns, item_name).or_else(|| { + let assoc_item = self + .primitive_type_to_ty(prim) + .map(|ty| { + resolve_associated_trait_item(ty, module_id, item_name, ns, self.cx) + }) + .flatten(); + + assoc_item.map(|item| { + let kind = item.kind; + let fragment = UrlFragment::from_assoc_item(item_name, kind, false); + // HACK(jynelson): `clean` expects the type, not the associated item + // but the disambiguator logic expects the associated item. + // Store the kind in a side channel so that only the disambiguator logic looks at it. + (root_res, fragment, Some((kind.as_def_kind(), item.def_id))) + }) + }) + } Res::Def(DefKind::TyAlias, did) => { // Resolve the link on the type the alias points to. // FIXME: if the associated item is defined directly on the type alias, @@ -666,8 +717,13 @@ impl<'a, 'tcx> LinkCollector<'a, 'tcx> { // To handle that properly resolve() would have to support // something like [`ambi_fn`](<SomeStruct as SomeTrait>::ambi_fn) .or_else(|| { - let item = - resolve_associated_trait_item(did, module_id, item_name, ns, self.cx); + let item = resolve_associated_trait_item( + tcx.type_of(did), + module_id, + item_name, + ns, + self.cx, + ); debug!("got associated item {:?}", item); item }); @@ -767,12 +823,12 @@ impl<'a, 'tcx> LinkCollector<'a, 'tcx> { /// Given `[std::io::Error::source]`, where `source` is unresolved, this would /// find `std::error::Error::source` and return /// `<io::Error as error::Error>::source`. -fn resolve_associated_trait_item( - did: DefId, +fn resolve_associated_trait_item<'a>( + ty: Ty<'a>, module: DefId, item_name: Symbol, ns: Namespace, - cx: &mut DocContext<'_>, + cx: &mut DocContext<'a>, ) -> Option<ty::AssocItem> { // FIXME: this should also consider blanket impls (`impl<T> X for T`). Unfortunately // `get_auto_trait_and_blanket_impls` is broken because the caching behavior is wrong. In the @@ -780,7 +836,7 @@ fn resolve_associated_trait_item( // Next consider explicit impls: `impl MyTrait for MyType` // Give precedence to inherent impls. - let traits = traits_implemented_by(cx, did, module); + let traits = traits_implemented_by(cx, ty, module); debug!("considering traits {:?}", traits); let mut candidates = traits.iter().filter_map(|&trait_| { cx.tcx.associated_items(trait_).find_by_name_and_namespace( @@ -799,7 +855,11 @@ fn resolve_associated_trait_item( /// /// NOTE: this cannot be a query because more traits could be available when more crates are compiled! /// So it is not stable to serialize cross-crate. -fn traits_implemented_by(cx: &mut DocContext<'_>, type_: DefId, module: DefId) -> FxHashSet<DefId> { +fn traits_implemented_by<'a>( + cx: &mut DocContext<'a>, + ty: Ty<'a>, + module: DefId, +) -> FxHashSet<DefId> { let mut resolver = cx.resolver.borrow_mut(); let in_scope_traits = cx.module_trait_cache.entry(module).or_insert_with(|| { resolver.access(|resolver| { @@ -813,7 +873,6 @@ fn traits_implemented_by(cx: &mut DocContext<'_>, type_: DefId, module: DefId) - }); let tcx = cx.tcx; - let ty = tcx.type_of(type_); let iter = in_scope_traits.iter().flat_map(|&trait_| { trace!("considering explicit impl for trait {:?}", trait_); @@ -826,19 +885,10 @@ fn traits_implemented_by(cx: &mut DocContext<'_>, type_: DefId, module: DefId) - "comparing type {} with kind {:?} against type {:?}", impl_type, impl_type.kind(), - type_ + ty ); // Fast path: if this is a primitive simple `==` will work - let saw_impl = impl_type == ty - || match impl_type.kind() { - // Check if these are the same def_id - ty::Adt(def, _) => { - debug!("adt def_id: {:?}", def.did); - def.did == type_ - } - ty::Foreign(def_id) => *def_id == type_, - _ => false, - }; + let saw_impl = impl_type == ty; if saw_impl { Some(trait_) } else { None } }) diff --git a/src/test/run-make-fulldeps/coverage-reports/expected_show_coverage.unused_mod.txt b/src/test/run-make-fulldeps/coverage-reports/expected_show_coverage.unused_mod.txt new file mode 100644 index 0000000000000..d902b7a412f3b --- /dev/null +++ b/src/test/run-make-fulldeps/coverage-reports/expected_show_coverage.unused_mod.txt @@ -0,0 +1,13 @@ +../coverage/lib/unused_mod_helper.rs: + 1| 0|pub fn never_called_function() { + 2| 0| println!("I am never called"); + 3| 0|} + +../coverage/unused_mod.rs: + 1| |#[path = "lib/unused_mod_helper.rs"] + 2| |mod unused_module; + 3| | + 4| 1|fn main() { + 5| 1| println!("hello world!"); + 6| 1|} + diff --git a/src/test/run-make-fulldeps/coverage/lib/unused_mod_helper.rs b/src/test/run-make-fulldeps/coverage/lib/unused_mod_helper.rs new file mode 100644 index 0000000000000..ae1cc1531ed75 --- /dev/null +++ b/src/test/run-make-fulldeps/coverage/lib/unused_mod_helper.rs @@ -0,0 +1,3 @@ +pub fn never_called_function() { + println!("I am never called"); +} diff --git a/src/test/run-make-fulldeps/coverage/unused_mod.rs b/src/test/run-make-fulldeps/coverage/unused_mod.rs new file mode 100644 index 0000000000000..679b4e5318803 --- /dev/null +++ b/src/test/run-make-fulldeps/coverage/unused_mod.rs @@ -0,0 +1,6 @@ +#[path = "lib/unused_mod_helper.rs"] +mod unused_module; + +fn main() { + println!("hello world!"); +} diff --git a/src/test/rustdoc-ui/intra-doc/non-path-primitives.rs b/src/test/rustdoc-ui/intra-doc/non-path-primitives.rs index 75159979e8890..587cbad684864 100644 --- a/src/test/rustdoc-ui/intra-doc/non-path-primitives.rs +++ b/src/test/rustdoc-ui/intra-doc/non-path-primitives.rs @@ -28,7 +28,6 @@ //! [unit::eq] //~ ERROR unresolved //! [tuple::eq] //~ ERROR unresolved //! [fn::eq] //~ ERROR unresolved -//! [never::eq] //~ ERROR unresolved // FIXME(#78800): This breaks because it's a blanket impl // (I think? Might break for other reasons too.) diff --git a/src/test/rustdoc-ui/intra-doc/non-path-primitives.stderr b/src/test/rustdoc-ui/intra-doc/non-path-primitives.stderr index 610c830560527..4828a30446355 100644 --- a/src/test/rustdoc-ui/intra-doc/non-path-primitives.stderr +++ b/src/test/rustdoc-ui/intra-doc/non-path-primitives.stderr @@ -53,17 +53,11 @@ error: unresolved link to `fn::eq` LL | //! [fn::eq] | ^^^^^^ the builtin type `fn` has no associated item named `eq` -error: unresolved link to `never::eq` - --> $DIR/non-path-primitives.rs:31:6 - | -LL | //! [never::eq] - | ^^^^^^^^^ the builtin type `never` has no associated item named `eq` - error: unresolved link to `reference::deref` - --> $DIR/non-path-primitives.rs:35:6 + --> $DIR/non-path-primitives.rs:34:6 | LL | //! [reference::deref] | ^^^^^^^^^^^^^^^^ the builtin type `reference` has no associated item named `deref` -error: aborting due to 9 previous errors +error: aborting due to 8 previous errors diff --git a/src/test/rustdoc/intra-doc/prim-associated-traits.rs b/src/test/rustdoc/intra-doc/prim-associated-traits.rs new file mode 100644 index 0000000000000..8639a24f7f386 --- /dev/null +++ b/src/test/rustdoc/intra-doc/prim-associated-traits.rs @@ -0,0 +1,46 @@ +#![feature(never_type)] +use std::str::FromStr; + +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.f64.html#method.from_str"]' 'f64::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.f32.html#method.from_str"]' 'f32::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.isize.html#method.from_str"]' 'isize::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.i8.html#method.from_str"]' 'i8::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.i16.html#method.from_str"]' 'i16::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.i32.html#method.from_str"]' 'i32::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.i64.html#method.from_str"]' 'i64::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.i128.html#method.from_str"]' 'i128::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.usize.html#method.from_str"]' 'usize::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.u8.html#method.from_str"]' 'u8::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.u16.html#method.from_str"]' 'u16::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.u32.html#method.from_str"]' 'u32::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.u64.html#method.from_str"]' 'u64::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.u128.html#method.from_str"]' 'u128::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.char.html#method.from_str"]' 'char::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.bool.html#method.from_str"]' 'bool::from_str()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.str.html#method.eq"]' 'str::eq()' +// @has 'prim_associated_traits/struct.Number.html' '//a[@href="{{channel}}/std/primitive.never.html#method.eq"]' 'never::eq()' +/// [`f64::from_str()`] [`f32::from_str()`] [`isize::from_str()`] [`i8::from_str()`] +/// [`i16::from_str()`] [`i32::from_str()`] [`i64::from_str()`] [`i128::from_str()`] +/// [`u16::from_str()`] [`u32::from_str()`] [`u64::from_str()`] [`u128::from_str()`] +/// [`usize::from_str()`] [`u8::from_str()`] [`char::from_str()`] [`bool::from_str()`] +/// [`str::eq()`] [`never::eq()`] +pub struct Number { + pub f_64: f64, + pub f_32: f32, + pub i_size: isize, + pub i_8: i8, + pub i_16: i16, + pub i_32: i32, + pub i_64: i64, + pub i_128: i128, + pub u_size: usize, + pub u_8: u8, + pub u_16: u16, + pub u_32: u32, + pub u_64: u64, + pub u_128: u128, + pub ch: char, + pub boolean: bool, + pub string: str, + pub n: !, +} diff --git a/src/test/ui/borrowck/issue-92015.rs b/src/test/ui/borrowck/issue-92015.rs new file mode 100644 index 0000000000000..16d651717ff2c --- /dev/null +++ b/src/test/ui/borrowck/issue-92015.rs @@ -0,0 +1,7 @@ +// Regression test for #92105. +// ICE when mutating immutable reference from last statement of a block. + +fn main() { + let foo = Some(&0).unwrap(); + *foo = 1; //~ ERROR cannot assign +} diff --git a/src/test/ui/borrowck/issue-92015.stderr b/src/test/ui/borrowck/issue-92015.stderr new file mode 100644 index 0000000000000..32a65d3b5bb0f --- /dev/null +++ b/src/test/ui/borrowck/issue-92015.stderr @@ -0,0 +1,11 @@ +error[E0594]: cannot assign to `*foo`, which is behind a `&` reference + --> $DIR/issue-92015.rs:6:5 + | +LL | let foo = Some(&0).unwrap(); + | --- help: consider changing this to be a mutable reference: `&mut i32` +LL | *foo = 1; + | ^^^^^^^^ `foo` is a `&` reference, so the data it refers to cannot be written + +error: aborting due to previous error + +For more information about this error, try `rustc --explain E0594`. diff --git a/src/test/ui/c-variadic/variadic-unreachable-arg-error.rs b/src/test/ui/c-variadic/variadic-unreachable-arg-error.rs new file mode 100644 index 0000000000000..f60f6f3e80872 --- /dev/null +++ b/src/test/ui/c-variadic/variadic-unreachable-arg-error.rs @@ -0,0 +1,14 @@ +// check-pass + +#![feature(c_variadic)] + +extern "C" { + fn foo(f: isize, x: u8, ...); +} + +fn main() { + unsafe { + // FIXME: Ideally we could give an unreachable warning + foo(1, loop {}, 1usize); + } +}