From bf7d4534a7ed27f1a0b5c9b53b2af155da33f072 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 10 Dec 2016 20:32:44 -0700 Subject: [PATCH 001/103] Refactor Block into BlockAndBuilder --- src/librustc_trans/adt.rs | 43 +- src/librustc_trans/asm.rs | 2 +- src/librustc_trans/base.rs | 180 ++-- src/librustc_trans/build.rs | 1357 ++++++++++----------------- src/librustc_trans/callee.rs | 54 +- src/librustc_trans/cleanup.rs | 76 +- src/librustc_trans/common.rs | 46 +- src/librustc_trans/debuginfo/mod.rs | 10 +- src/librustc_trans/glue.rs | 170 ++-- src/librustc_trans/intrinsic.rs | 183 ++-- src/librustc_trans/meth.rs | 6 +- src/librustc_trans/mir/block.rs | 121 ++- src/librustc_trans/mir/lvalue.rs | 2 +- src/librustc_trans/mir/mod.rs | 26 +- src/librustc_trans/mir/operand.rs | 6 +- src/librustc_trans/mir/rvalue.rs | 126 ++- src/librustc_trans/mir/statement.rs | 10 +- src/librustc_trans/tvec.rs | 36 +- src/librustc_trans/value.rs | 6 +- 19 files changed, 1012 insertions(+), 1448 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 9c82e25077371..ce1b23c1ce9b6 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -304,7 +304,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> /// Obtain a representation of the discriminant sufficient to translate /// destructuring; this may or may not involve the actual discriminant. -pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn trans_switch<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, range_assert: bool) @@ -331,7 +331,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { } /// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, +pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, cast_to: Option, range_assert: bool) -> ValueRef { @@ -371,8 +371,12 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, } } -fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath, - scrutinee: ValueRef) -> ValueRef { +fn struct_wrapped_nullable_bitdiscr( + bcx: &BlockAndBuilder, + nndiscr: u64, + discrfield: &layout::FieldPath, + scrutinee: ValueRef +) -> ValueRef { let llptrptr = GEPi(bcx, scrutinee, &discrfield.iter().map(|f| *f as usize).collect::>()[..]); let llptr = Load(bcx, llptrptr); @@ -381,7 +385,7 @@ fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layou } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, +fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { let llty = Type::from_integer(bcx.ccx(), ity); @@ -409,7 +413,7 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6 /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) +pub fn trans_case<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { let l = bcx.ccx().layout_of(t); match *l { @@ -430,7 +434,7 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, +pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr) { let l = bcx.ccx().layout_of(t); match *l { @@ -461,12 +465,11 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, // Issue #34427: As workaround for LLVM bug on // ARM, use memset of 0 on whole struct rather // than storing null to single target field. - let b = B(bcx); - let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to()); - let fill_byte = C_u8(b.ccx, 0); - let size = C_uint(b.ccx, nonnull.stride().bytes()); - let align = C_i32(b.ccx, nonnull.align.abi() as i32); - base::call_memset(&b, llptr, fill_byte, size, align, false); + let llptr = bcx.pointercast(val, Type::i8(bcx.ccx()).ptr_to()); + let fill_byte = C_u8(bcx.ccx(), 0); + let size = C_uint(bcx.ccx(), nonnull.stride().bytes()); + let align = C_i32(bcx.ccx(), nonnull.align.abi() as i32); + base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { let path = discrfield.iter().map(|&i| i as usize).collect::>(); let llptrptr = GEPi(bcx, val, &path[..]); @@ -479,7 +482,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, } } -fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool { +fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } @@ -492,9 +495,9 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { } /// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, +pub fn trans_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { - trans_field_ptr_builder(&bcx.build(), t, val, discr, ix) + trans_field_ptr_builder(bcx, t, val, discr, ix) } /// Access a field, at a point when the value's case is known. @@ -530,7 +533,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, layout::UntaggedUnion { .. } => { let fields = compute_fields(bcx.ccx(), t, 0, false); let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]); - if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } | @@ -540,9 +542,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); - // The contents of memory at this pointer can't matter, but use - // the value that's "reasonable" in case of pointer comparison. - if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } => { @@ -550,7 +549,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, assert_eq!(ix, 0); assert_eq!(discr.0, nndiscr); let ty = type_of::type_of(bcx.ccx(), nnty); - if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } bcx.pointercast(val.value, ty.ptr_to()) } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { @@ -569,9 +567,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let fty = fields[ix]; let ccx = bcx.ccx(); let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); - if bcx.is_unreachable() { - return C_undef(ll_fty.ptr_to()); - } let ptr_val = if needs_cast { let fields = st.field_index_by_increasing_offset().map(|i| { diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 665e12cbe8795..1e672e9d10955 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -25,7 +25,7 @@ use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM -pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn trans_inline_asm<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ia: &hir::InlineAsm, outputs: Vec<(ValueRef, Ty<'tcx>)>, mut inputs: Vec) { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index c7f21427a0ceb..83b40849e2761 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -54,11 +54,10 @@ use attributes; use build::*; use builder::{Builder, noname}; use callee::{Callee}; -use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint}; +use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; use common::{CrateContext, FunctionContext}; -use common::{Result}; use common::{fulfill_obligation}; use common::{type_is_zero_size, val_ty}; use common; @@ -174,11 +173,11 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { } } -pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { +pub fn get_meta(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef { StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) } -pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { +pub fn get_dataptr(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef { StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) } @@ -190,7 +189,9 @@ pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) } -fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { +fn require_alloc_fn<'blk, 'tcx>( + bcx: &BlockAndBuilder<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem +) -> DefId { match bcx.tcx().lang_items.require(it) { Ok(id) => id, Err(s) => { @@ -202,21 +203,19 @@ fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: L // The following malloc_raw_dyn* functions allocate a box to contain // a given type, but with a potentially dynamic size. -pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llty_ptr: Type, info_ty: Ty<'tcx>, size: ValueRef, align: ValueRef, debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { + -> ValueRef { let _icx = push_ctxt("malloc_raw_exchange"); // Allocate space: let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); - let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .call(bcx, debug_loc, &[size, align], None); - - Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) + let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).reify(bcx.ccx()); + PointerCast(bcx, Call(bcx, r, &[size, align], debug_loc), llty_ptr) } @@ -254,7 +253,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { } } -pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, lhs: ValueRef, rhs: ValueRef, t: Ty<'tcx>, @@ -311,7 +310,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, } /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx>) @@ -336,7 +335,7 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn coerce_unsized_into<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst: ValueRef, @@ -415,7 +414,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx } } -pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { +pub fn cast_shift_expr_rhs(cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b)) } @@ -462,38 +461,38 @@ fn cast_shift_rhs(op: hir::BinOp_, } } -pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, llfn: ValueRef, llargs: &[ValueRef], debug_loc: DebugLoc) - -> (ValueRef, Block<'blk, 'tcx>) { + -> (ValueRef, BlockAndBuilder<'blk, 'tcx>) { let _icx = push_ctxt("invoke_"); - if bcx.unreachable.get() { + if bcx.is_unreachable() { return (C_null(Type::i8(bcx.ccx())), bcx); } - if need_invoke(bcx) { - debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb); + if need_invoke(&bcx) { + debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb()); for &llarg in llargs { debug!("arg: {:?}", Value(llarg)); } - let normal_bcx = bcx.fcx.new_block("normal-return"); - let landing_pad = bcx.fcx.get_landing_pad(); + let normal_bcx = bcx.fcx().new_block("normal-return"); + let landing_pad = bcx.fcx().get_landing_pad(); - let llresult = Invoke(bcx, + let llresult = Invoke(&bcx, llfn, &llargs[..], normal_bcx.llbb, landing_pad, debug_loc); - return (llresult, normal_bcx); + return (llresult, normal_bcx.build()); } else { - debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb); + debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb()); for &llarg in llargs { debug!("arg: {:?}", Value(llarg)); } - let llresult = Call(bcx, llfn, &llargs[..], debug_loc); + let llresult = Call(&bcx, llfn, &llargs[..], debug_loc); return (llresult, bcx); } } @@ -507,15 +506,11 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { sess.target.target.options.is_like_msvc } -pub fn avoid_invoke(bcx: Block) -> bool { - bcx.sess().no_landing_pads() || bcx.lpad().is_some() -} - -pub fn need_invoke(bcx: Block) -> bool { - if avoid_invoke(bcx) { +fn need_invoke(bcx: &BlockAndBuilder) -> bool { + if bcx.sess().no_landing_pads() || bcx.lpad().is_some() { false } else { - bcx.fcx.needs_invoke() + bcx.fcx().needs_invoke() } } @@ -527,11 +522,8 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { /// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. -pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { - if cx.unreachable.get() { - return C_undef(type_of::type_of(cx.ccx(), t)); - } - load_ty_builder(&B(cx), ptr, t) +pub fn load_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { + load_ty_builder(cx, ptr, t) } pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { @@ -569,8 +561,8 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { +pub fn store_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { + if cx.is_unreachable() { return; } @@ -585,7 +577,7 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t } } -pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn store_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, data: ValueRef, extra: ValueRef, dst: ValueRef, @@ -595,18 +587,18 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, Store(cx, extra, get_meta(cx, dst)); } -pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn load_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, src: ValueRef, ty: Ty<'tcx>) -> (ValueRef, ValueRef) { - if cx.unreachable.get() { + if cx.is_unreachable() { // FIXME: remove me return (Load(cx, get_dataptr(cx, src)), Load(cx, get_meta(cx, src))); } - load_fat_ptr_builder(&B(cx), src, ty) + load_fat_ptr_builder(cx, src, ty) } pub fn load_fat_ptr_builder<'a, 'tcx>( @@ -629,7 +621,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>( (ptr, meta) } -pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { +pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { if val_ty(val) == Type::i1(bcx.ccx()) { ZExt(bcx, val, Type::i8(bcx.ccx())) } else { @@ -637,7 +629,7 @@ pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { +pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { Trunc(bcx, val, Type::i1(bcx.ccx())) } else { @@ -645,23 +637,23 @@ pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { } } -pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx> - where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> +pub fn with_cond<'blk, 'tcx, F>( + bcx: BlockAndBuilder<'blk, 'tcx>, val: ValueRef, f: F +) -> BlockAndBuilder<'blk, 'tcx> + where F: FnOnce(BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("with_cond"); - if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) { + if bcx.is_unreachable() || common::const_to_opt_uint(val) == Some(0) { return bcx; } - let fcx = bcx.fcx; - let next_cx = fcx.new_block("next"); - let cond_cx = fcx.new_block("cond"); - CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); + let fcx = bcx.fcx(); + let next_cx = fcx.new_block("next").build(); + let cond_cx = fcx.new_block("cond").build(); + CondBr(&bcx, val, cond_cx.llbb(), next_cx.llbb(), DebugLoc::None); let after_cx = f(cond_cx); - if !after_cx.terminated.get() { - Br(after_cx, next_cx.llbb, DebugLoc::None); - } + Br(&after_cx, next_cx.llbb(), DebugLoc::None); next_cx } @@ -711,26 +703,25 @@ impl Lifetime { } } -pub fn call_lifetime_start(bcx: Block, ptr: ValueRef) { - if !bcx.unreachable.get() { - Lifetime::Start.call(&bcx.build(), ptr); +pub fn call_lifetime_start(bcx: &BlockAndBuilder, ptr: ValueRef) { + if !bcx.is_unreachable() { + Lifetime::Start.call(bcx, ptr); } } -pub fn call_lifetime_end(bcx: Block, ptr: ValueRef) { - if !bcx.unreachable.get() { - Lifetime::End.call(&bcx.build(), ptr); +pub fn call_lifetime_end(bcx: &BlockAndBuilder, ptr: ValueRef) { + if !bcx.is_unreachable() { + Lifetime::End.call(bcx, ptr); } } // Generates code for resumption of unwind at the end of a landing pad. -pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { +pub fn trans_unwind_resume(bcx: &BlockAndBuilder, lpval: ValueRef) { if !bcx.sess().target.target.options.custom_unwind_resume { - Resume(bcx, lpval); + bcx.resume(lpval); } else { let exc_ptr = ExtractValue(bcx, lpval, 0); - bcx.fcx.eh_unwind_resume() - .call(bcx, DebugLoc::None, &[exc_ptr], None); + Call(bcx, bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], DebugLoc::None); } } @@ -752,11 +743,11 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { +pub fn memcpy_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { let _icx = push_ctxt("memcpy_ty"); let ccx = bcx.ccx(); - if type_is_zero_size(ccx, t) || bcx.unreachable.get() { + if type_is_zero_size(ccx, t) || bcx.is_unreachable() { return; } @@ -764,7 +755,7 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe let llty = type_of::type_of(ccx, t); let llsz = llsize_of(ccx, llty); let llalign = type_of::align_of(ccx, t); - call_memcpy(&B(bcx), dst, src, llsz, llalign as u32); + call_memcpy(bcx, dst, src, llsz, llalign as u32); } else if common::type_is_fat_ptr(bcx.tcx(), t) { let (data, extra) = load_fat_ptr(bcx, src, t); store_fat_ptr(bcx, data, extra, dst, t); @@ -773,13 +764,13 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe } } -pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { +pub fn init_zero_mem<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { + if cx.is_unreachable() { return; } let _icx = push_ctxt("init_zero_mem"); let bcx = cx; - memfill(&B(bcx), llptr, t, 0); + memfill(bcx, llptr, t, 0); } // Always use this function instead of storing a constant byte to the memory @@ -812,24 +803,17 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } -pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { assert!(!ty.has_param_types()); alloca(bcx, type_of::type_of(bcx.ccx(), ty), name) } -pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { +pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { let _icx = push_ctxt("alloca"); - if cx.unreachable.get() { - unsafe { - return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); - } - } - DebugLoc::None.apply(cx.fcx); - let result = Alloca(cx, ty, name); - debug!("alloca({:?}) = {:?}", name, result); - result + DebugLoc::None.apply(cx.fcx()); + Alloca(cx, ty, name) } impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { @@ -894,14 +878,14 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Performs setup on a newly created function, creating the entry /// scope block and allocating space for the return pointer. - pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> { - let entry_bcx = self.new_block("entry-block"); + pub fn init(&'blk self, skip_retptr: bool) -> BlockAndBuilder<'blk, 'tcx> { + let entry_bcx = self.new_block("entry-block").build(); // Use a dummy instruction as the insertion point for all allocas. // This is later removed in FunctionContext::cleanup. self.alloca_insert_pt.set(Some(unsafe { - Load(entry_bcx, C_null(Type::i8p(self.ccx))); - llvm::LLVMGetFirstInstruction(entry_bcx.llbb) + Load(&entry_bcx, C_null(Type::i8p(self.ccx))); + llvm::LLVMGetFirstInstruction(entry_bcx.llbb()) })); if !self.fn_ty.ret.is_ignore() && !skip_retptr { @@ -929,7 +913,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// and builds the return block. - pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>, + pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>, ret_debug_loc: DebugLoc) { let _icx = push_ctxt("FunctionContext::finish"); @@ -940,10 +924,9 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { } // Builds the return block for a function. - pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>, + pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>, ret_debug_location: DebugLoc) { - if self.llretslotptr.get().is_none() || - ret_cx.unreachable.get() || + if self.llretslotptr.get().is_none() || ret_cx.is_unreachable() || self.fn_ty.ret.is_indirect() { return RetVoid(ret_cx, ret_debug_location); } @@ -978,7 +961,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { assert_eq!(cast_ty, None); let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty); let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); - call_memcpy(&B(ret_cx), get_param(self.llfn, 0), + call_memcpy(&ret_cx, get_param(self.llfn, 0), retslot, llsz, llalign as u32); RetVoid(ret_cx, ret_debug_location) } @@ -1080,23 +1063,22 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { - let lldestptr = adt::trans_field_ptr(bcx, sig.output(), dest_val, Disr::from(disr), i); + let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); let arg = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; - let b = &bcx.build(); if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { let meta = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr)); - meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr)); + arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr)); + meta.store_fn_arg(&bcx, &mut llarg_idx, get_meta(&bcx, lldestptr)); } else { - arg.store_fn_arg(b, &mut llarg_idx, lldestptr); + arg.store_fn_arg(&bcx, &mut llarg_idx, lldestptr); } } - adt::trans_set_discr(bcx, sig.output(), dest, disr); + adt::trans_set_discr(&bcx, sig.output(), dest, disr); } - fcx.finish(bcx, DebugLoc::None); + fcx.finish(&bcx, DebugLoc::None); } pub fn llvm_linkage_by_name(name: &str) -> Option { diff --git a/src/librustc_trans/build.rs b/src/librustc_trans/build.rs index 8cd47bd148d0c..bea42950c5512 100644 --- a/src/librustc_trans/build.rs +++ b/src/librustc_trans/build.rs @@ -18,30 +18,12 @@ use llvm::{ValueRef, BasicBlockRef}; use common::*; use syntax_pos::Span; -use builder::Builder; use type_::Type; use value::Value; use debuginfo::DebugLoc; use libc::{c_uint, c_char}; -pub fn terminate(cx: Block, _: &str) { - debug!("terminate({})", cx.to_str()); - cx.terminated.set(true); -} - -pub fn check_not_terminated(cx: Block) { - if cx.terminated.get() { - bug!("already terminated!"); - } -} - -pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> { - let b = cx.fcx.ccx.builder(); - b.position_at_end(cx.llbb); - b -} - // The difference between a block being unreachable and being terminated is // somewhat obscure, and has to do with error checking. When a block is // terminated, we're saying that trying to add any further statements in the @@ -50,70 +32,48 @@ pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> { // for (panic/break/return statements, call to diverging functions, etc), and // further instructions to the block should simply be ignored. -pub fn RetVoid(cx: Block, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "RetVoid"); - debug_loc.apply(cx.fcx); - B(cx).ret_void(); +pub fn RetVoid(cx: &BlockAndBuilder, debug_loc: DebugLoc) { + cx.terminate(); + debug_loc.apply(cx.fcx()); + cx.ret_void(); } -pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "Ret"); - debug_loc.apply(cx.fcx); - B(cx).ret(v); +pub fn Ret(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) { + cx.terminate(); + debug_loc.apply(cx.fcx()); + cx.ret(v); } -pub fn AggregateRet(cx: Block, - ret_vals: &[ValueRef], - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "AggregateRet"); - debug_loc.apply(cx.fcx); - B(cx).aggregate_ret(ret_vals); +pub fn AggregateRet(cx: &BlockAndBuilder, + ret_vals: &[ValueRef], + debug_loc: DebugLoc) { + cx.terminate(); + debug_loc.apply(cx.fcx()); + cx.aggregate_ret(ret_vals); } -pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "Br"); - debug_loc.apply(cx.fcx); - B(cx).br(dest); -} - -pub fn CondBr(cx: Block, - if_: ValueRef, - then: BasicBlockRef, - else_: BasicBlockRef, - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "CondBr"); - debug_loc.apply(cx.fcx); - B(cx).cond_br(if_, then, else_); +pub fn Br(cx: &BlockAndBuilder, dest: BasicBlockRef, debug_loc: DebugLoc) { + cx.terminate(); + debug_loc.apply(cx.fcx()); + cx.br(dest); } -pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: usize) - -> ValueRef { - if cx.unreachable.get() { return _Undef(v); } - check_not_terminated(cx); - terminate(cx, "Switch"); - B(cx).switch(v, else_, num_cases) +pub fn CondBr(cx: &BlockAndBuilder, + if_: ValueRef, + then: BasicBlockRef, + else_: BasicBlockRef, + debug_loc: DebugLoc) { + cx.terminate(); + debug_loc.apply(cx.fcx()); + cx.cond_br(if_, then, else_); } +pub fn Switch(cx: &BlockAndBuilder, v: ValueRef, else_: BasicBlockRef, num_cases: usize) + -> ValueRef { + cx.terminate(); + cx.switch(v, else_, num_cases) + } + pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { unsafe { if llvm::LLVMIsUndef(s) == llvm::True { return; } @@ -121,475 +81,340 @@ pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { } } -pub fn IndirectBr(cx: Block, - addr: ValueRef, - num_dests: usize, - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "IndirectBr"); - debug_loc.apply(cx.fcx); - B(cx).indirect_br(addr, num_dests); -} - -pub fn Invoke(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - then: BasicBlockRef, - catch: BasicBlockRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return C_null(Type::i8(cx.ccx())); - } - check_not_terminated(cx); - terminate(cx, "Invoke"); - debug!("Invoke({:?} with arguments ({}))", - Value(fn_), - args.iter().map(|a| { - format!("{:?}", Value(*a)) - }).collect::>().join(", ")); - debug_loc.apply(cx.fcx); - let bundle = cx.lpad().and_then(|b| b.bundle()); - B(cx).invoke(fn_, args, then, catch, bundle) -} - -pub fn Unreachable(cx: Block) { - if cx.unreachable.get() { - return - } - cx.unreachable.set(true); - if !cx.terminated.get() { - B(cx).unreachable(); - } +pub fn IndirectBr(cx: &BlockAndBuilder, + addr: ValueRef, + num_dests: usize, + debug_loc: DebugLoc) { + cx.terminate(); + debug_loc.apply(cx.fcx()); + cx.indirect_br(addr, num_dests); } -pub fn _Undef(val: ValueRef) -> ValueRef { - unsafe { - return llvm::LLVMGetUndef(val_ty(val).to_ref()); - } +pub fn Invoke(cx: &BlockAndBuilder, + fn_: ValueRef, + args: &[ValueRef], + then: BasicBlockRef, + catch: BasicBlockRef, + debug_loc: DebugLoc) + -> ValueRef { + cx.terminate(); + debug!("Invoke({:?} with arguments ({}))", + Value(fn_), + args.iter().map(|a| { + format!("{:?}", Value(*a)) + }).collect::>().join(", ")); + debug_loc.apply(cx.fcx()); + let bundle = cx.lpad().and_then(|b| b.bundle()); + cx.invoke(fn_, args, then, catch, bundle) } /* Arithmetic */ -pub fn Add(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn Add(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.add(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).add(lhs, rhs) -} -pub fn NSWAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn NSWAdd(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nswadd(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).nswadd(lhs, rhs) -} -pub fn NUWAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn NUWAdd(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nuwadd(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).nuwadd(lhs, rhs) -} -pub fn FAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FAdd(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fadd(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fadd(lhs, rhs) -} -pub fn FAddFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FAddFast(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fadd_fast(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fadd_fast(lhs, rhs) -} -pub fn Sub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn Sub(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.sub(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).sub(lhs, rhs) -} -pub fn NSWSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn NSWSub(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nswsub(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).nswsub(lhs, rhs) -} -pub fn NUWSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn NUWSub(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nuwsub(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).nuwsub(lhs, rhs) -} -pub fn FSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FSub(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fsub(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fsub(lhs, rhs) -} -pub fn FSubFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FSubFast(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fsub_fast(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fsub_fast(lhs, rhs) -} -pub fn Mul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn Mul(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.mul(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).mul(lhs, rhs) -} -pub fn NSWMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn NSWMul(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nswmul(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).nswmul(lhs, rhs) -} -pub fn NUWMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn NUWMul(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nuwmul(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).nuwmul(lhs, rhs) -} -pub fn FMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FMul(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fmul(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fmul(lhs, rhs) -} -pub fn FMulFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FMulFast(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fmul_fast(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fmul_fast(lhs, rhs) -} -pub fn UDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn UDiv(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.udiv(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).udiv(lhs, rhs) -} -pub fn SDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn SDiv(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.sdiv(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).sdiv(lhs, rhs) -} -pub fn ExactSDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn ExactSDiv(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.exactsdiv(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).exactsdiv(lhs, rhs) -} -pub fn FDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FDiv(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fdiv(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fdiv(lhs, rhs) -} -pub fn FDivFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FDivFast(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fdiv_fast(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).fdiv_fast(lhs, rhs) -} -pub fn URem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn URem(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.urem(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).urem(lhs, rhs) -} -pub fn SRem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn SRem(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.srem(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).srem(lhs, rhs) -} -pub fn FRem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FRem(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.frem(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).frem(lhs, rhs) -} -pub fn FRemFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn FRemFast(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.frem_fast(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).frem_fast(lhs, rhs) -} -pub fn Shl(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn Shl(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.shl(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).shl(lhs, rhs) -} -pub fn LShr(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn LShr(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.lshr(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).lshr(lhs, rhs) -} -pub fn AShr(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn AShr(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.ashr(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).ashr(lhs, rhs) -} -pub fn And(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn And(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.and(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).and(lhs, rhs) -} -pub fn Or(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn Or(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.or(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).or(lhs, rhs) -} -pub fn Xor(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).xor(lhs, rhs) -} - -pub fn BinOp(cx: Block, - op: Opcode, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); +pub fn Xor(cx: &BlockAndBuilder, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.xor(lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).binop(op, lhs, rhs) -} -pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); +pub fn BinOp(cx: &BlockAndBuilder, + op: Opcode, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.binop(op, lhs, rhs) } - debug_loc.apply(cx.fcx); - B(cx).neg(v) + +pub fn Neg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.neg(v) } -pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).nswneg(v) +pub fn NSWNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nswneg(v) } -pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).nuwneg(v) +pub fn NUWNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.nuwneg(v) } -pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).fneg(v) +pub fn FNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fneg(v) } -pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).not(v) +pub fn Not(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.not(v) } -pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } - AllocaFcx(cx.fcx, ty, name) - } +pub fn Alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { + AllocaFcx(cx.fcx(), ty, name) } pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef { @@ -599,336 +424,179 @@ pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef { b.alloca(ty, name) } -pub fn Free(cx: Block, pointer_val: ValueRef) { - if cx.unreachable.get() { return; } - B(cx).free(pointer_val) +pub fn Free(cx: &BlockAndBuilder, pointer_val: ValueRef) { + cx.free(pointer_val) } -pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { - let ty = val_ty(pointer_val); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - return llvm::LLVMGetUndef(eltty.to_ref()); - } - B(cx).load(pointer_val) - } +pub fn Load(cx: &BlockAndBuilder, pointer_val: ValueRef) -> ValueRef { + cx.load(pointer_val) } -pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).volatile_load(pointer_val) - } +pub fn VolatileLoad(cx: &BlockAndBuilder, pointer_val: ValueRef) -> ValueRef { + cx.volatile_load(pointer_val) } -pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { - return llvm::LLVMGetUndef(ccx.int_type().to_ref()); - } - B(cx).atomic_load(pointer_val, order) - } +pub fn AtomicLoad(cx: &BlockAndBuilder, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { + cx.atomic_load(pointer_val, order) } -pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64, - hi: u64, signed: llvm::Bool) -> ValueRef { - if cx.unreachable.get() { - let ccx = cx.fcx.ccx; - let ty = val_ty(pointer_val); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - unsafe { - llvm::LLVMGetUndef(eltty.to_ref()) - } - } else { - B(cx).load_range_assert(pointer_val, lo, hi, signed) - } +pub fn LoadRangeAssert(cx: &BlockAndBuilder, pointer_val: ValueRef, lo: u64, + hi: u64, signed: llvm::Bool) -> ValueRef { + cx.load_range_assert(pointer_val, lo, hi, signed) } -pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { - let ccx = cx.fcx.ccx; - let ty = val_ty(ptr); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - unsafe { - llvm::LLVMGetUndef(eltty.to_ref()) - } - } else { - B(cx).load_nonnull(ptr) - } +pub fn LoadNonNull(cx: &BlockAndBuilder, ptr: ValueRef) -> ValueRef { + cx.load_nonnull(ptr) } -pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } - B(cx).store(val, ptr) +pub fn Store(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef) -> ValueRef { + cx.store(val, ptr) } -pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } - B(cx).volatile_store(val, ptr) +pub fn VolatileStore(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef) -> ValueRef { + cx.volatile_store(val, ptr) } -pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { - if cx.unreachable.get() { return; } - B(cx).atomic_store(val, ptr, order) +pub fn AtomicStore(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { + cx.atomic_store(val, ptr, order) } -pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).gep(pointer, indices) - } +pub fn GEP(cx: &BlockAndBuilder, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { + cx.gep(pointer, indices) } // Simple wrapper around GEP that takes an array of ints and wraps them // in C_i32() #[inline] -pub fn GEPi(cx: Block, base: ValueRef, ixs: &[usize]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).gepi(base, ixs) - } +pub fn GEPi(cx: &BlockAndBuilder, base: ValueRef, ixs: &[usize]) -> ValueRef { + cx.gepi(base, ixs) } -pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).inbounds_gep(pointer, indices) - } +pub fn InBoundsGEP(cx: &BlockAndBuilder, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { + cx.inbounds_gep(pointer, indices) } -pub fn StructGEP(cx: Block, pointer: ValueRef, idx: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).struct_gep(pointer, idx) - } +pub fn StructGEP(cx: &BlockAndBuilder, pointer: ValueRef, idx: usize) -> ValueRef { + cx.struct_gep(pointer, idx) } -pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); - } - B(cx).global_string(_str) - } +pub fn GlobalString(cx: &BlockAndBuilder, _str: *const c_char) -> ValueRef { + cx.global_string(_str) } -pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); - } - B(cx).global_string_ptr(_str) - } +pub fn GlobalStringPtr(cx: &BlockAndBuilder, _str: *const c_char) -> ValueRef { + cx.global_string_ptr(_str) } /* Casts */ -pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).trunc(val, dest_ty) - } +pub fn Trunc(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.trunc(val, dest_ty) } -pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).zext(val, dest_ty) - } +pub fn ZExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.zext(val, dest_ty) } -pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sext(val, dest_ty) - } +pub fn SExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.sext(val, dest_ty) } -pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptoui(val, dest_ty) - } +pub fn FPToUI(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.fptoui(val, dest_ty) } -pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptosi(val, dest_ty) - } +pub fn FPToSI(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.fptosi(val, dest_ty) } -pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).uitofp(val, dest_ty) - } +pub fn UIToFP(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.uitofp(val, dest_ty) } -pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sitofp(val, dest_ty) - } +pub fn SIToFP(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.sitofp(val, dest_ty) } -pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptrunc(val, dest_ty) - } +pub fn FPTrunc(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.fptrunc(val, dest_ty) } -pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fpext(val, dest_ty) - } +pub fn FPExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.fpext(val, dest_ty) } -pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).ptrtoint(val, dest_ty) - } +pub fn PtrToInt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.ptrtoint(val, dest_ty) } -pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).inttoptr(val, dest_ty) - } +pub fn IntToPtr(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.inttoptr(val, dest_ty) } -pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).bitcast(val, dest_ty) - } +pub fn BitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.bitcast(val, dest_ty) } -pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).zext_or_bitcast(val, dest_ty) - } +pub fn ZExtOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.zext_or_bitcast(val, dest_ty) } -pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sext_or_bitcast(val, dest_ty) - } +pub fn SExtOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.sext_or_bitcast(val, dest_ty) } -pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).trunc_or_bitcast(val, dest_ty) - } +pub fn TruncOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.trunc_or_bitcast(val, dest_ty) } -pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type, - _: *const u8) - -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).cast(op, val, dest_ty) +pub fn Cast(cx: &BlockAndBuilder, op: Opcode, val: ValueRef, dest_ty: Type, + _: *const u8) + -> ValueRef { + cx.cast(op, val, dest_ty) } -} -pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).pointercast(val, dest_ty) - } +pub fn PointerCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.pointercast(val, dest_ty) } -pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).intcast(val, dest_ty) - } +pub fn IntCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.intcast(val, dest_ty) } -pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fpcast(val, dest_ty) - } +pub fn FPCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { + cx.fpcast(val, dest_ty) } /* Comparisons */ -pub fn ICmp(cx: Block, - op: IntPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - debug_loc.apply(cx.fcx); - B(cx).icmp(op, lhs, rhs) +pub fn ICmp(cx: &BlockAndBuilder, + op: IntPredicate, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.icmp(op, lhs, rhs) } -} -pub fn FCmp(cx: Block, - op: RealPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - debug_loc.apply(cx.fcx); - B(cx).fcmp(op, lhs, rhs) +pub fn FCmp(cx: &BlockAndBuilder, + op: RealPredicate, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + cx.fcmp(op, lhs, rhs) } -} /* Miscellaneous instructions */ -pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).empty_phi(ty) - } +pub fn EmptyPhi(cx: &BlockAndBuilder, ty: Type) -> ValueRef { + cx.empty_phi(ty) } -pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef], - bbs: &[BasicBlockRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).phi(ty, vals, bbs) - } +pub fn Phi(cx: &BlockAndBuilder, ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef { + cx.phi(ty, vals, bbs) } pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { @@ -938,230 +606,151 @@ pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { } } -pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - let ty = val_ty(fn_); - let retty = if ty.kind() == llvm::Function { - ty.return_type() - } else { - ccx.int_type() - }; - B(cx).count_insn("ret_undef"); - llvm::LLVMGetUndef(retty.to_ref()) - } -} - -pub fn add_span_comment(cx: Block, sp: Span, text: &str) { - B(cx).add_span_comment(sp, text) +pub fn add_span_comment(cx: &BlockAndBuilder, sp: Span, text: &str) { + cx.add_span_comment(sp, text) } -pub fn add_comment(cx: Block, text: &str) { - B(cx).add_comment(text) +pub fn add_comment(cx: &BlockAndBuilder, text: &str) { + cx.add_comment(text) } -pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char, - inputs: &[ValueRef], output: Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> ValueRef { - B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) +pub fn InlineAsmCall(cx: &BlockAndBuilder, asm: *const c_char, cons: *const c_char, + inputs: &[ValueRef], output: Type, + volatile: bool, alignstack: bool, + dia: AsmDialect) -> ValueRef { + cx.inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) } -pub fn Call(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _UndefReturn(cx, fn_); +pub fn Call(cx: &BlockAndBuilder, + fn_: ValueRef, + args: &[ValueRef], + debug_loc: DebugLoc) + -> ValueRef { + debug_loc.apply(cx.fcx()); + let bundle = cx.lpad().and_then(|b| b.bundle()); + cx.call(fn_, args, bundle) } - debug_loc.apply(cx.fcx); - let bundle = cx.lpad.get().and_then(|b| b.bundle()); - B(cx).call(fn_, args, bundle) -} - -pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) { - if cx.unreachable.get() { return; } - B(cx).atomic_fence(order, scope) -} -pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { - if cx.unreachable.get() { return _Undef(then); } - B(cx).select(if_, then, else_) +pub fn AtomicFence(cx: &BlockAndBuilder, order: AtomicOrdering, scope: SynchronizationScope) { + cx.atomic_fence(order, scope) } -pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).va_arg(list, ty) - } +pub fn Select(cx: &BlockAndBuilder, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { + cx.select(if_, then, else_) } -pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).extract_element(vec_val, index) - } +pub fn VAArg(cx: &BlockAndBuilder, list: ValueRef, ty: Type) -> ValueRef { + cx.va_arg(list, ty) } -pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef, - index: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).insert_element(vec_val, elt_val, index) - } +pub fn ExtractElement(cx: &BlockAndBuilder, vec_val: ValueRef, index: ValueRef) -> ValueRef { + cx.extract_element(vec_val, index) } -pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef, - mask: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).shuffle_vector(v1, v2, mask) - } +pub fn InsertElement(cx: &BlockAndBuilder, vec_val: ValueRef, elt_val: ValueRef, + index: ValueRef) -> ValueRef { + cx.insert_element(vec_val, elt_val, index) } -pub fn VectorSplat(cx: Block, num_elts: usize, elt_val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).vector_splat(num_elts, elt_val) - } +pub fn ShuffleVector(cx: &BlockAndBuilder, v1: ValueRef, v2: ValueRef, + mask: ValueRef) -> ValueRef { + cx.shuffle_vector(v1, v2, mask) } -pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).extract_value(agg_val, index) - } +pub fn VectorSplat(cx: &BlockAndBuilder, num_elts: usize, elt_val: ValueRef) -> ValueRef { + cx.vector_splat(num_elts, elt_val) } -pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).insert_value(agg_val, elt_val, index) - } +pub fn ExtractValue(cx: &BlockAndBuilder, agg_val: ValueRef, index: usize) -> ValueRef { + cx.extract_value(agg_val, index) } -pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - B(cx).is_null(val) - } +pub fn InsertValue(cx: &BlockAndBuilder, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef { + cx.insert_value(agg_val, elt_val, index) } -pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - B(cx).is_not_null(val) - } +pub fn IsNull(cx: &BlockAndBuilder, val: ValueRef) -> ValueRef { + cx.is_null(val) } -pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); } - B(cx).ptrdiff(lhs, rhs) - } +pub fn IsNotNull(cx: &BlockAndBuilder, val: ValueRef) -> ValueRef { + cx.is_not_null(val) } -pub fn Trap(cx: Block) { - if cx.unreachable.get() { return; } - B(cx).trap(); +pub fn PtrDiff(cx: &BlockAndBuilder, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + cx.ptrdiff(lhs, rhs) } -pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef, - num_clauses: usize) -> ValueRef { - check_not_terminated(cx); - assert!(!cx.unreachable.get()); - B(cx).landing_pad(ty, pers_fn, num_clauses, cx.fcx.llfn) +pub fn Trap(cx: &BlockAndBuilder) { + cx.trap(); } -pub fn AddClause(cx: Block, landing_pad: ValueRef, clause: ValueRef) { - B(cx).add_clause(landing_pad, clause) +pub fn LandingPad(cx: &BlockAndBuilder, ty: Type, pers_fn: ValueRef, + num_clauses: usize) -> ValueRef { + assert!(!cx.is_unreachable()); + cx.landing_pad(ty, pers_fn, num_clauses, cx.fcx().llfn) } -pub fn SetCleanup(cx: Block, landing_pad: ValueRef) { - B(cx).set_cleanup(landing_pad) +pub fn AddClause(cx: &BlockAndBuilder, landing_pad: ValueRef, clause: ValueRef) { + cx.add_clause(landing_pad, clause) } -pub fn SetPersonalityFn(cx: Block, f: ValueRef) { - B(cx).set_personality_fn(f) +pub fn SetCleanup(cx: &BlockAndBuilder, landing_pad: ValueRef) { + cx.set_cleanup(landing_pad) } -pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "Resume"); - B(cx).resume(exn) +pub fn SetPersonalityFn(cx: &BlockAndBuilder, f: ValueRef) { + cx.set_personality_fn(f) } // Atomic Operations -pub fn AtomicCmpXchg(cx: Block, dst: ValueRef, - cmp: ValueRef, src: ValueRef, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: llvm::Bool) -> ValueRef { - B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order, weak) +pub fn AtomicCmpXchg(cx: &BlockAndBuilder, dst: ValueRef, + cmp: ValueRef, src: ValueRef, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: llvm::Bool) -> ValueRef { + cx.atomic_cmpxchg(dst, cmp, src, order, failure_order, weak) } -pub fn AtomicRMW(cx: Block, op: AtomicRmwBinOp, - dst: ValueRef, src: ValueRef, - order: AtomicOrdering) -> ValueRef { - B(cx).atomic_rmw(op, dst, src, order) +pub fn AtomicRMW(cx: &BlockAndBuilder, op: AtomicRmwBinOp, + dst: ValueRef, src: ValueRef, + order: AtomicOrdering) -> ValueRef { + cx.atomic_rmw(op, dst, src, order) } -pub fn CleanupPad(cx: Block, - parent: Option, - args: &[ValueRef]) -> ValueRef { - check_not_terminated(cx); - assert!(!cx.unreachable.get()); - B(cx).cleanup_pad(parent, args) +pub fn CleanupPad(cx: &BlockAndBuilder, + parent: Option, + args: &[ValueRef]) -> ValueRef { + assert!(!cx.is_unreachable()); + cx.cleanup_pad(parent, args) } -pub fn CleanupRet(cx: Block, - cleanup: ValueRef, - unwind: Option) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "CleanupRet"); - B(cx).cleanup_ret(cleanup, unwind) +pub fn CleanupRet(cx: &BlockAndBuilder, + cleanup: ValueRef, + unwind: Option) -> ValueRef { + cx.terminate(); + cx.cleanup_ret(cleanup, unwind) } -pub fn CatchPad(cx: Block, - parent: ValueRef, - args: &[ValueRef]) -> ValueRef { - check_not_terminated(cx); - assert!(!cx.unreachable.get()); - B(cx).catch_pad(parent, args) +pub fn CatchPad(cx: &BlockAndBuilder, + parent: ValueRef, + args: &[ValueRef]) -> ValueRef { + assert!(!cx.is_unreachable()); + cx.catch_pad(parent, args) } -pub fn CatchRet(cx: Block, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "CatchRet"); - B(cx).catch_ret(pad, unwind) +pub fn CatchRet(cx: &BlockAndBuilder, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef { + cx.terminate(); + cx.catch_ret(pad, unwind) } -pub fn CatchSwitch(cx: Block, - parent: Option, - unwind: Option, - num_handlers: usize) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "CatchSwitch"); - B(cx).catch_switch(parent, unwind, num_handlers) +pub fn CatchSwitch(cx: &BlockAndBuilder, + parent: Option, + unwind: Option, + num_handlers: usize) -> ValueRef { + cx.terminate(); + cx.catch_switch(parent, unwind, num_handlers) } -pub fn AddHandler(cx: Block, catch_switch: ValueRef, handler: BasicBlockRef) { - B(cx).add_handler(catch_switch, handler) +pub fn AddHandler(cx: &BlockAndBuilder, catch_switch: ValueRef, handler: BasicBlockRef) { + cx.add_handler(catch_switch, handler) } diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index d7e9f1372e06d..4f0a58e00d57d 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -26,7 +26,9 @@ use attributes; use base; use base::*; use build::*; -use common::{self, Block, Result, CrateContext, FunctionContext, SharedCrateContext}; +use common::{ + self, Block, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext +}; use consts; use debuginfo::DebugLoc; use declare; @@ -207,11 +209,11 @@ impl<'tcx> Callee<'tcx> { /// For non-lang items, `dest` is always Some, and hence the result is written /// into memory somewhere. Nonetheless we return the actual return value of the /// function. - pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>, + pub fn call<'a, 'blk>(self, bcx: BlockAndBuilder<'blk, 'tcx>, debug_loc: DebugLoc, args: &[ValueRef], dest: Option) - -> Result<'blk, 'tcx> { + -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) { trans_call_inner(bcx, debug_loc, self, args, dest) } @@ -370,8 +372,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false); - + let bcx = fcx.init(false); // the first argument (`self`) will be the (by value) closure env. @@ -381,9 +382,9 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llenv = if env_arg.is_indirect() { llargs[self_idx] } else { - let scratch = alloc_ty(bcx, closure_ty, "self"); + let scratch = alloc_ty(&bcx, closure_ty, "self"); let mut llarg_idx = self_idx; - env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch); + env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); scratch }; @@ -413,11 +414,11 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let self_scope = fcx.push_custom_cleanup_scope(); fcx.schedule_drop_mem(self_scope, llenv, closure_ty); - bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx; + let bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).0; - fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); + let bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); - fcx.finish(bcx, DebugLoc::None); + fcx.finish(&bcx, DebugLoc::None); ccx.instances().borrow_mut().insert(method_instance, lloncefn); @@ -522,7 +523,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let (block_arena, fcx): (TypedArena<_>, FunctionContext); block_arena = TypedArena::new(); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false); + let bcx = fcx.init(false); let llargs = get_params(fcx.llfn); @@ -530,7 +531,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfnpointer = llfnpointer.unwrap_or_else(|| { // the first argument (`self`) will be ptr to the fn pointer if is_by_ref { - Load(bcx, llargs[self_idx]) + Load(&bcx, llargs[self_idx]) } else { llargs[self_idx] } @@ -542,9 +543,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>( data: Fn(llfnpointer), ty: bare_fn_ty }; - bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx; - - fcx.finish(bcx, DebugLoc::None); + let bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).0; + fcx.finish(&bcx, DebugLoc::None); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); @@ -653,12 +653,12 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // ______________________________________________________________________ // Translating calls -fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, debug_loc: DebugLoc, callee: Callee<'tcx>, args: &[ValueRef], opt_llretslot: Option) - -> Result<'blk, 'tcx> { + -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) { // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose // this cleanup is to ensure that, should a panic occur while @@ -666,7 +666,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // cleaned up. If no panic occurs, the values are handed off to // the callee, and hence none of the cleanups in this temporary // scope will ever execute. - let fcx = bcx.fcx; + let fcx = &bcx.fcx(); let ccx = fcx.ccx; let fn_ret = callee.ty.fn_ret(); @@ -689,7 +689,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if fn_ty.ret.is_indirect() { let mut llretslot = opt_llretslot.unwrap(); if let Some(ty) = fn_ty.ret.cast { - llretslot = PointerCast(bcx, llretslot, ty.ptr_to()); + llretslot = PointerCast(&bcx, llretslot, ty.ptr_to()); } llargs.push(llretslot); } @@ -698,9 +698,9 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, Virtual(idx) => { llargs.push(args[0]); - let fn_ptr = meth::get_virtual_method(bcx, args[1], idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); - callee = Fn(PointerCast(bcx, fn_ptr, llty)); + let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx); + let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to(); + callee = Fn(PointerCast(&bcx, fn_ptr, llty)); llargs.extend_from_slice(&args[2..]); } _ => llargs.extend_from_slice(args) @@ -712,7 +712,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }; let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); - if !bcx.unreachable.get() { + if !bcx.is_unreachable() { fn_ty.apply_attrs_callsite(llret); // If the function we just called does not use an outpointer, @@ -722,14 +722,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // u64. if !fn_ty.ret.is_indirect() { if let Some(llretslot) = opt_llretslot { - fn_ty.ret.store(&bcx.build(), llret, llretslot); + fn_ty.ret.store(&bcx, llret, llretslot); } } } if fn_ret.0.is_never() { - Unreachable(bcx); + assert!(!bcx.is_terminated()); + bcx.set_unreachable(); + bcx.unreachable(); } - Result::new(bcx, llret) + (bcx, llret) } diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index b9f24eba9dc1e..db74e57dd8884 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -120,7 +120,7 @@ use llvm::{BasicBlockRef, ValueRef}; use base; use build; use common; -use common::{Block, FunctionContext, LandingPad}; +use common::{BlockAndBuilder, FunctionContext, LandingPad}; use debuginfo::{DebugLoc}; use glue; use type_::Type; @@ -190,9 +190,9 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// generates the code to do its cleanups for normal exit. pub fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, + bcx: BlockAndBuilder<'blk, 'tcx>, custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); assert!(self.is_valid_to_pop_custom_scope(custom_scope)); @@ -339,11 +339,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Generates the cleanups for `scope` into `bcx` fn trans_scope_cleanups(&self, // cannot borrow self, will recurse - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> { + bcx: BlockAndBuilder<'blk, 'tcx>, + scope: &CleanupScope<'tcx>) -> BlockAndBuilder<'blk, 'tcx> { let mut bcx = bcx; - if !bcx.unreachable.get() { + if !bcx.is_unreachable() { for cleanup in scope.cleanups.iter().rev() { bcx = cleanup.trans(bcx, scope.debug_loc); } @@ -419,21 +419,21 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { UnwindExit(val) => { // Generate a block that will resume unwinding to the // calling function - let bcx = self.new_block("resume"); + let bcx = self.new_block("resume").build(); match val { UnwindKind::LandingPad => { let addr = self.landingpad_alloca.get() .unwrap(); - let lp = build::Load(bcx, addr); - base::call_lifetime_end(bcx, addr); - base::trans_unwind_resume(bcx, lp); + let lp = build::Load(&bcx, addr); + base::call_lifetime_end(&bcx, addr); + base::trans_unwind_resume(&bcx, lp); } UnwindKind::CleanupPad(_) => { - let pad = build::CleanupPad(bcx, None, &[]); - build::CleanupRet(bcx, pad, None); + let pad = build::CleanupPad(&bcx, None, &[]); + build::CleanupRet(&bcx, pad, None); } } - prev_llbb = bcx.llbb; + prev_llbb = bcx.llbb(); break; } } @@ -484,16 +484,17 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let name = scope.block_name("clean"); debug!("generating cleanups for {}", name); - let bcx_in = self.new_block(&name[..]); - let exit_label = label.start(bcx_in); + let bcx_in = self.new_block(&name[..]).build(); + let exit_label = label.start(&bcx_in); + let next_llbb = bcx_in.llbb(); let mut bcx_out = bcx_in; let len = scope.cleanups.len(); for cleanup in scope.cleanups.iter().rev().take(len - skip) { bcx_out = cleanup.trans(bcx_out, scope.debug_loc); } skip = 0; - exit_label.branch(bcx_out, prev_llbb); - prev_llbb = bcx_in.llbb; + exit_label.branch(&bcx_out, prev_llbb); + prev_llbb = next_llbb; scope.add_cached_early_exit(exit_label, prev_llbb, len); } @@ -527,13 +528,13 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { Some(llbb) => return llbb, None => { let name = last_scope.block_name("unwind"); - pad_bcx = self.new_block(&name[..]); - last_scope.cached_landing_pad = Some(pad_bcx.llbb); + pad_bcx = self.new_block(&name[..]).build(); + last_scope.cached_landing_pad = Some(pad_bcx.llbb()); } } }; - let llpersonality = pad_bcx.fcx.eh_personality(); + let llpersonality = pad_bcx.fcx().eh_personality(); let val = if base::wants_msvc_seh(self.ccx.sess()) { // A cleanup pad requires a personality function to be specified, so @@ -541,8 +542,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // creation of the landingpad instruction). We then create a // cleanuppad instruction which has no filters to run cleanup on all // exceptions. - build::SetPersonalityFn(pad_bcx, llpersonality); - let llretval = build::CleanupPad(pad_bcx, None, &[]); + build::SetPersonalityFn(&pad_bcx, llpersonality); + let llretval = build::CleanupPad(&pad_bcx, None, &[]); UnwindKind::CleanupPad(llretval) } else { // The landing pad return type (the type being propagated). Not sure @@ -553,31 +554,31 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { false); // The only landing pad clause will be 'cleanup' - let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1); + let llretval = build::LandingPad(&pad_bcx, llretty, llpersonality, 1); // The landing pad block is a cleanup - build::SetCleanup(pad_bcx, llretval); + build::SetCleanup(&pad_bcx, llretval); let addr = match self.landingpad_alloca.get() { Some(addr) => addr, None => { - let addr = base::alloca(pad_bcx, common::val_ty(llretval), + let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); - base::call_lifetime_start(pad_bcx, addr); + base::call_lifetime_start(&pad_bcx, addr); self.landingpad_alloca.set(Some(addr)); addr } }; - build::Store(pad_bcx, llretval, addr); + build::Store(&pad_bcx, llretval, addr); UnwindKind::LandingPad }; // Generate the cleanup block and branch to it. let label = UnwindExit(val); let cleanup_llbb = self.trans_cleanups_to_exit_scope(label); - label.branch(pad_bcx, cleanup_llbb); + label.branch(&pad_bcx, cleanup_llbb); - return pad_bcx.llbb; + return pad_bcx.llbb(); } } @@ -628,7 +629,7 @@ impl EarlyExitLabel { /// Transitions from an exit label to other exit labels depend on the type /// of label. For example with MSVC exceptions unwind exit labels will use /// the `cleanupret` instruction instead of the `br` instruction. - fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) { + fn branch(&self, from_bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self { build::CleanupRet(from_bcx, pad, Some(to_llbb)); } else { @@ -647,15 +648,15 @@ impl EarlyExitLabel { /// /// Returns a new label which will can be used to cache `bcx` in the list of /// early exits. - fn start(&self, bcx: Block) -> EarlyExitLabel { + fn start(&self, bcx: &BlockAndBuilder) -> EarlyExitLabel { match *self { UnwindExit(UnwindKind::CleanupPad(..)) => { let pad = build::CleanupPad(bcx, None, &[]); - bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::msvc(pad)))); + bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::msvc(pad)))); UnwindExit(UnwindKind::CleanupPad(pad)) } UnwindExit(UnwindKind::LandingPad) => { - bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu()))); + bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::gnu()))); *self } } @@ -685,20 +686,19 @@ pub struct DropValue<'tcx> { impl<'tcx> DropValue<'tcx> { fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, + bcx: BlockAndBuilder<'blk, 'tcx>, debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { let skip_dtor = self.skip_dtor; let _icx = if skip_dtor { base::push_ctxt("::trans skip_dtor=true") } else { base::push_ctxt("::trans skip_dtor=false") }; - let bcx = if self.is_immediate { + if self.is_immediate { glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) } else { glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) - }; - bcx + } } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index b1d61cea39cec..1650d7376bfbc 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -441,6 +441,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { // code. Each basic block we generate is attached to a function, typically // with many basic blocks per function. All the basic blocks attached to a // function are organized as a directed graph. +#[must_use] pub struct BlockS<'blk, 'tcx: 'blk> { // The BasicBlockRef returned from a call to // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic @@ -555,6 +556,7 @@ impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> { } } +#[must_use] pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>, @@ -597,10 +599,24 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { // Methods delegated to bcx + pub fn terminate(&self) { + debug!("terminate({})", self.bcx.to_str()); + self.bcx.terminated.set(true); + } + + pub fn set_unreachable(&self) { + debug!("set_unreachable({})", self.bcx.to_str()); + self.bcx.unreachable.set(true); + } + pub fn is_unreachable(&self) -> bool { self.bcx.unreachable.get() } + pub fn is_terminated(&self) -> bool { + self.bcx.terminated.get() + } + pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { self.bcx.ccx() } @@ -700,20 +716,6 @@ impl Clone for LandingPad { } } -pub struct Result<'blk, 'tcx: 'blk> { - pub bcx: Block<'blk, 'tcx>, - pub val: ValueRef -} - -impl<'b, 'tcx> Result<'b, 'tcx> { - pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> { - Result { - bcx: bcx, - val: val, - } - } -} - pub fn val_ty(v: ValueRef) -> Type { unsafe { Type::from_ref(llvm::LLVMTypeOf(v)) @@ -1016,7 +1018,7 @@ pub fn langcall(tcx: TyCtxt, // all shifts). For 32- and 64-bit types, this matches the semantics // of Java. (See related discussion on #1877 and #10183.) -pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, lhs: ValueRef, rhs: ValueRef, binop_debug_loc: DebugLoc) -> ValueRef { @@ -1026,7 +1028,7 @@ pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, build::Shl(bcx, lhs, rhs, binop_debug_loc) } -pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef, @@ -1042,17 +1044,19 @@ pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } } -fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn shift_mask_rhs<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, rhs: ValueRef, debug_loc: DebugLoc) -> ValueRef { let rhs_llty = val_ty(rhs); build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc) } -pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llty: Type, - mask_llty: Type, - invert: bool) -> ValueRef { +pub fn shift_mask_val<'blk, 'tcx>( + bcx: &BlockAndBuilder<'blk, 'tcx>, + llty: Type, + mask_llty: Type, + invert: bool +) -> ValueRef { let kind = llty.kind(); match kind { TypeKind::Integer => { diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 4e511c05840d1..f59ecf1d6782f 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,7 +27,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use abi::Abi; -use common::{CrateContext, FunctionContext, Block, BlockAndBuilder}; +use common::{CrateContext, FunctionContext, BlockAndBuilder}; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -441,7 +441,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn declare_local<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, variable_name: ast::Name, variable_type: Ty<'tcx>, scope_metadata: DIScope, @@ -494,16 +494,16 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, address_operations.as_ptr(), address_operations.len() as c_uint, debug_loc, - bcx.llbb); + bcx.llbb()); - llvm::LLVMSetInstDebugLocation(::build::B(bcx).llbuilder, instr); + llvm::LLVMSetInstDebugLocation(bcx.llbuilder, instr); } } } match variable_kind { ArgumentVariable(_) | CapturedVariable => { - assert!(!bcx.fcx + assert!(!bcx.fcx() .debug_context .get_ref(span) .source_locations_enabled diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 90bc29c39e9b5..a1e18725704f2 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -38,38 +38,39 @@ use Disr; use arena::TypedArena; use syntax_pos::DUMMY_SP; -pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, size: ValueRef, align: ValueRef, debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("trans_exchange_free"); let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); - let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align]; + let args = [PointerCast(&bcx, v, Type::i8p(bcx.ccx())), size, align]; Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .call(bcx, debug_loc, &args, None).bcx + .call(bcx, debug_loc, &args, None).0 } -pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +pub fn trans_exchange_free<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, size: u64, align: u32, debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { + let ccx = cx.ccx(); trans_exchange_free_dyn(cx, v, - C_uint(cx.ccx(), size), - C_uint(cx.ccx(), align), + C_uint(ccx, size), + C_uint(ccx, align), debug_loc) } -pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx>, debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); let sizing_type = sizing_type_of(bcx.ccx(), content_ty); let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); @@ -129,23 +130,23 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn drop_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, - debug_loc: DebugLoc) -> Block<'blk, 'tcx> { + debug_loc: DebugLoc) -> BlockAndBuilder<'blk, 'tcx> { drop_ty_core(bcx, v, t, debug_loc, false) } -pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, debug_loc: DebugLoc, skip_dtor: bool) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { // NB: v is an *alias* of type t here, not a direct value. debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor); let _icx = push_ctxt("drop_ty"); - if bcx.fcx.type_needs_drop(t) { + if bcx.fcx().type_needs_drop(t) { let ccx = bcx.ccx(); let g = if skip_dtor { DropGlueKind::TyContents(t) @@ -155,29 +156,29 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let glue = get_drop_glue_core(ccx, g); let glue_type = get_drop_glue_type(ccx.tcx(), t); let ptr = if glue_type != t { - PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to()) + PointerCast(&bcx, v, type_of(ccx, glue_type).ptr_to()) } else { v }; // No drop-hint ==> call standard drop glue - Call(bcx, glue, &[ptr], debug_loc); + Call(&bcx, glue, &[ptr], debug_loc); } bcx } -pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn drop_ty_immediate<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, debug_loc: DebugLoc, skip_dtor: bool) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("drop_ty_immediate"); - let vp = alloc_ty(bcx, t, ""); - call_lifetime_start(bcx, vp); - store_ty(bcx, v, vp, t); + let vp = alloc_ty(&bcx, t, ""); + call_lifetime_start(&bcx, vp); + store_ty(&bcx, v, vp, t); let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor); - call_lifetime_end(bcx, vp); + call_lifetime_end(&bcx, vp); bcx } @@ -248,14 +249,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // type, so we don't need to explicitly cast the function parameter. let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); - fcx.finish(bcx, DebugLoc::None); + fcx.finish(&bcx, DebugLoc::None); } -fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, v0: ValueRef, shallow_drop: bool) - -> Block<'blk, 'tcx> + -> BlockAndBuilder<'blk, 'tcx> { debug!("trans_custom_dtor t: {}", t); let tcx = bcx.tcx(); @@ -269,12 +270,12 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // // FIXME (#14875) panic-in-drop semantics might be unsupported; we // might well consider changing below to more direct code. - let contents_scope = bcx.fcx.push_custom_cleanup_scope(); + let contents_scope = bcx.fcx().push_custom_cleanup_scope(); // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. if !shallow_drop { - bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t); + bcx.fcx().schedule_drop_adt_contents(contents_scope, v0, t); } let (sized_args, unsized_args); @@ -284,8 +285,8 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } else { // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments unsized_args = [ - Load(bcx, get_dataptr(bcx, v0)), - Load(bcx, get_meta(bcx, v0)) + Load(&bcx, get_dataptr(&bcx, v0)), + Load(&bcx, get_meta(&bcx, v0)) ]; &unsized_args }; @@ -300,9 +301,9 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }; let dtor_did = def.destructor().unwrap(); bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) - .call(bcx, DebugLoc::None, args, None).bcx; + .call(bcx, DebugLoc::None, args, None).0; - bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) + bcx.fcx().pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, @@ -416,10 +417,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } } -fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { let t = g.ty(); let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; @@ -438,27 +439,28 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); if !type_is_sized(bcx.tcx(), content_ty) { - let llval = get_dataptr(bcx, v0); - let llbox = Load(bcx, llval); + let llval = get_dataptr(&bcx, v0); + let llbox = Load(&bcx, llval); let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments - let info = get_meta(bcx, v0); - let info = Load(bcx, info); - let (llsize, llalign) = - size_and_align_of_dst(&bcx.build(), content_ty, info); + let info = get_meta(&bcx, v0); + let info = Load(&bcx, info); + let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info); // `Box` does not allocate. - let needs_free = ICmp(bcx, - llvm::IntNE, - llsize, - C_uint(bcx.ccx(), 0u64), - DebugLoc::None); + let needs_free = ICmp( + &bcx, + llvm::IntNE, + llsize, + C_uint(bcx.ccx(), 0u64), + DebugLoc::None + ); with_cond(bcx, needs_free, |bcx| { trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) }) } else { let llval = v0; - let llbox = Load(bcx, llval); + let llbox = Load(&bcx, llval); let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) } @@ -469,12 +471,12 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // okay with always calling the Drop impl, if any. // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments assert!(!skip_dtor); - let data_ptr = get_dataptr(bcx, v0); - let vtable_ptr = Load(bcx, get_meta(bcx, v0)); - let dtor = Load(bcx, vtable_ptr); - Call(bcx, + let data_ptr = get_dataptr(&bcx, v0); + let vtable_ptr = Load(&bcx, get_meta(&bcx, v0)); + let dtor = Load(&bcx, vtable_ptr); + Call(&bcx, dtor, - &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], + &[PointerCast(&bcx, Load(&bcx, data_ptr), Type::i8p(bcx.ccx()))], DebugLoc::None); bcx } @@ -485,7 +487,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, bcx } _ => { - if bcx.fcx.type_needs_drop(t) { + if bcx.fcx().type_needs_drop(t) { drop_structural_ty(bcx, v0, t) } else { bcx @@ -495,27 +497,26 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, +fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, av: ValueRef, t: Ty<'tcx>) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("drop_structural_ty"); - fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, av: adt::MaybeSizedValue, variant: &'tcx ty::VariantDef, substs: &Substs<'tcx>) - -> Block<'blk, 'tcx> { + -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("iter_variant"); let tcx = cx.tcx(); let mut cx = cx; for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); - cx = drop_ty(cx, - adt::trans_field_ptr(cx, t, av, Disr::from(variant.disr_val), i), - arg, DebugLoc::None); + let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); + cx = drop_ty(cx, field_ptr, arg, DebugLoc::None); } return cx; } @@ -524,8 +525,8 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, adt::MaybeSizedValue::sized(av) } else { // FIXME(#36457) -- we should pass unsized values as two arguments - let data = Load(cx, get_dataptr(cx, av)); - let info = Load(cx, get_meta(cx, av)); + let data = Load(&cx, get_dataptr(&cx, av)); + let info = Load(&cx, get_meta(&cx, av)); adt::MaybeSizedValue::unsized_(data, info) }; @@ -533,12 +534,12 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, match t.sty { ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { - let llupvar = adt::trans_field_ptr(cx, t, value, Disr(0), i); + let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i); cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); } } ty::TyArray(_, n) => { - let base = get_dataptr(cx, value.value); + let base = get_dataptr(&cx, value.value); let len = C_uint(cx.ccx(), n); let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(cx, base, unit_ty, len, @@ -551,7 +552,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, t, value, Disr(0), i); + let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i); cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); } } @@ -559,15 +560,15 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, AdtKind::Struct => { let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, t, value, Disr::from(discr), i); + let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i); let val = if type_is_sized(cx.tcx(), field_ty) { llfld_a } else { // FIXME(#36457) -- we should pass unsized values as two arguments - let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter"); - Store(cx, llfld_a, get_dataptr(cx, scratch)); - Store(cx, value.meta, get_meta(cx, scratch)); + let scratch = alloc_ty(&cx, field_ty, "__fat_ptr_iter"); + Store(&cx, llfld_a, get_dataptr(&cx, scratch)); + Store(&cx, value.meta, get_meta(&cx, scratch)); scratch }; cx = drop_ty(cx, val, field_ty, DebugLoc::None); @@ -577,14 +578,14 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, bug!("Union in `glue::drop_structural_ty`"); } AdtKind::Enum => { - let fcx = cx.fcx; + let fcx = cx.fcx(); let ccx = fcx.ccx; let n_variants = adt.variants.len(); // NB: we must hit the discriminant first so that structural // comparison know not to proceed when the discriminants differ. - match adt::trans_switch(cx, t, av, false) { + match adt::trans_switch(&cx, t, av, false) { (adt::BranchKind::Single, None) => { if n_variants != 0 { assert!(n_variants == 1); @@ -593,7 +594,8 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, } } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { - cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None); + let tcx = cx.tcx(); + cx = drop_ty(cx, lldiscrim_a, tcx.types.isize, DebugLoc::None); // Create a fall-through basic block for the "else" case of // the switch instruction we're about to generate. Note that @@ -608,23 +610,19 @@ fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = fcx.new_block("enum-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); - let next_cx = fcx.new_block("enum-iter-next"); + let ret_void_cx = fcx.new_block("enum-iter-ret-void").build(); + RetVoid(&ret_void_cx, DebugLoc::None); + let llswitch = Switch(&cx, lldiscrim_a, ret_void_cx.llbb(), n_variants); + let next_cx = fcx.new_block("enum-iter-next").build(); for variant in &adt.variants { - let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}", - &variant.disr_val - .to_string())); - let case_val = adt::trans_case(cx, t, Disr::from(variant.disr_val)); - AddCase(llswitch, case_val, variant_cx.llbb); - let variant_cx = iter_variant(variant_cx, - t, - value, - variant, - substs); - Br(variant_cx, next_cx.llbb, DebugLoc::None); + let variant_cx_name = format!("enum-iter-variant-{}", + &variant.disr_val.to_string()); + let variant_cx = fcx.new_block(&variant_cx_name).build(); + let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); + AddCase(llswitch, case_val, variant_cx.llbb()); + let variant_cx = iter_variant(variant_cx, t, value, variant, substs); + Br(&variant_cx, next_cx.llbb(), DebugLoc::None); } cx = next_cx; } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 577ffbad1348b..74af7c4e3a7bd 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -87,14 +87,13 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, +pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, callee_ty: Ty<'tcx>, fn_ty: &FnType, llargs: &[ValueRef], llresult: ValueRef, - call_debug_location: DebugLoc) - -> Result<'blk, 'tcx> { - let fcx = bcx.fcx; + call_debug_location: DebugLoc) { + let fcx = bcx.fcx(); let ccx = fcx.ccx; let tcx = bcx.tcx(); @@ -122,11 +121,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, if name == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); Call(bcx, llfn, &[], call_debug_location); - Unreachable(bcx); - return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); + return; } else if name == "unreachable" { - Unreachable(bcx); - return Result::new(bcx, C_nil(ccx)); + // FIXME: do nothing? + return; } let llret_ty = type_of::type_of(ccx, ret_ty); @@ -145,8 +143,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location) } (_, "try") => { - bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, - call_debug_location); + try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, call_debug_location); C_nil(ccx) } (_, "breakpoint") => { @@ -162,7 +159,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let tp_ty = substs.type_at(0); if !type_is_sized(tcx, tp_ty) { let (llsize, _) = - glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llsize } else { let lltp_ty = type_of::type_of(ccx, tp_ty); @@ -177,7 +174,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let tp_ty = substs.type_at(0); if !type_is_sized(tcx, tp_ty) { let (_, llalign) = - glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign } else { C_uint(ccx, type_of::align_of(ccx, tp_ty)) @@ -188,25 +185,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) } - (_, "drop_in_place") => { - let tp_ty = substs.type_at(0); - let is_sized = type_is_sized(tcx, tp_ty); - let ptr = if is_sized { - llargs[0] - } else { - // FIXME(#36457) -- we should pass unsized values as two arguments - let scratch = alloc_ty(bcx, tp_ty, "drop"); - call_lifetime_start(bcx, scratch); - Store(bcx, llargs[0], get_dataptr(bcx, scratch)); - Store(bcx, llargs[1], get_meta(bcx, scratch)); - scratch - }; - glue::drop_ty(bcx, ptr, tp_ty, call_debug_location); - if !is_sized { - call_lifetime_end(bcx, ptr); - } - C_nil(ccx) - } (_, "type_name") => { let tp_ty = substs.type_at(0); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); @@ -230,7 +208,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, (_, "needs_drop") => { let tp_ty = substs.type_at(0); - C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty)) + C_bool(ccx, bcx.fcx().type_needs_drop(tp_ty)) } (_, "offset") => { let ptr = llargs[0]; @@ -613,7 +591,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // qux` to be converted into `foo, bar, baz, qux`, integer // arguments to be truncated as needed and pointers to be // cast. - fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + fn modify_as_needed<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: &intrinsics::Type, arg_type: Ty<'tcx>, llarg: ValueRef) @@ -627,7 +605,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bcx.fcx.type_needs_drop(arg_type)); + assert!(!bcx.fcx().type_needs_drop(arg_type)); let arg = adt::MaybeSizedValue::sized(llarg); (0..contents.len()) .map(|i| { @@ -718,11 +696,9 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, store_ty(bcx, llval, llresult, ret_ty); } } - - Result::new(bcx, llresult) } -fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>, @@ -759,7 +735,7 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, call_debug_location) } -fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, volatile: bool, tp_ty: Ty<'tcx>, dst: ValueRef, @@ -788,7 +764,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, call_debug_location) } -fn count_zeros_intrinsic(bcx: Block, +fn count_zeros_intrinsic(bcx: &BlockAndBuilder, name: &str, val: ValueRef, call_debug_location: DebugLoc) @@ -798,7 +774,7 @@ fn count_zeros_intrinsic(bcx: Block, Call(bcx, llfn, &[val, y], call_debug_location) } -fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn with_overflow_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, name: &str, a: ValueRef, b: ValueRef, @@ -817,20 +793,21 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, C_nil(bcx.ccx()) } -fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - local_ptr: ValueRef, - dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { +fn try_intrinsic<'blk, 'tcx>( + bcx: &BlockAndBuilder<'blk, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef, + dloc: DebugLoc +) { if bcx.sess().no_landing_pads() { Call(bcx, func, &[data], dloc); - Store(bcx, C_null(Type::i8p(bcx.ccx())), dest); - bcx + Store(bcx, C_null(Type::i8p(&bcx.ccx())), dest); } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, func, data, local_ptr, dest, dloc) + trans_msvc_try(bcx, func, data, local_ptr, dest, dloc); } else { - trans_gnu_try(bcx, func, data, local_ptr, dest, dloc) + trans_gnu_try(bcx, func, data, local_ptr, dest, dloc); } } @@ -841,26 +818,26 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // instructions are meant to work for all targets, as of the time of this // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. -fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { - let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { + dloc: DebugLoc) { + let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let ccx = bcx.ccx(); let dloc = DebugLoc::None; - SetPersonalityFn(bcx, bcx.fcx.eh_personality()); + SetPersonalityFn(&bcx, bcx.fcx().eh_personality()); - let normal = bcx.fcx.new_block("normal"); - let catchswitch = bcx.fcx.new_block("catchswitch"); - let catchpad = bcx.fcx.new_block("catchpad"); - let caught = bcx.fcx.new_block("caught"); + let normal = bcx.fcx().new_block("normal").build(); + let catchswitch = bcx.fcx().new_block("catchswitch").build(); + let catchpad = bcx.fcx().new_block("catchpad").build(); + let caught = bcx.fcx().new_block("caught").build(); - let func = llvm::get_param(bcx.fcx.llfn, 0); - let data = llvm::get_param(bcx.fcx.llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); + let func = llvm::get_param(bcx.fcx().llfn, 0); + let data = llvm::get_param(bcx.fcx().llfn, 1); + let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); // We're generating an IR snippet that looks like: // @@ -902,37 +879,36 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = Alloca(bcx, i64p, "slot"); - Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc); + let slot = Alloca(&bcx, i64p, "slot"); + Invoke(&bcx, func, &[data], normal.llbb(), catchswitch.llbb(), dloc); - Ret(normal, C_i32(ccx, 0), dloc); + Ret(&normal, C_i32(ccx, 0), dloc); - let cs = CatchSwitch(catchswitch, None, None, 1); - AddHandler(catchswitch, cs, catchpad.llbb); + let cs = CatchSwitch(&catchswitch, None, None, 1); + AddHandler(&catchswitch, cs, catchpad.llbb()); let tcx = ccx.tcx(); let tydesc = match tcx.lang_items.msvc_try_filter() { Some(did) => ::consts::get_static(ccx, did), None => bug!("msvc_try_filter not defined"), }; - let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); - let addr = Load(catchpad, slot); - let arg1 = Load(catchpad, addr); + let tok = CatchPad(&catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); + let addr = Load(&catchpad, slot); + let arg1 = Load(&catchpad, addr); let val1 = C_i32(ccx, 1); - let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1])); - let local_ptr = BitCast(catchpad, local_ptr, i64p); - Store(catchpad, arg1, local_ptr); - Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1])); - CatchRet(catchpad, tok, caught.llbb); + let arg2 = Load(&catchpad, InBoundsGEP(&catchpad, addr, &[val1])); + let local_ptr = BitCast(&catchpad, local_ptr, i64p); + Store(&catchpad, arg1, local_ptr); + Store(&catchpad, arg2, InBoundsGEP(&catchpad, local_ptr, &[val1])); + CatchRet(&catchpad, tok, caught.llbb()); - Ret(caught, C_i32(ccx, 1), dloc); + Ret(&caught, C_i32(ccx, 1), dloc); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); Store(bcx, ret, dest); - return bcx } // Definition of the standard "try" function for Rust using the GNU-like model @@ -946,13 +922,13 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // function calling it, and that function may already have other personality // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. -fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { - let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { + dloc: DebugLoc) { + let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let ccx = bcx.ccx(); let dloc = DebugLoc::None; @@ -973,14 +949,14 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx.new_block("then"); - let catch = bcx.fcx.new_block("catch"); + let then = bcx.fcx().new_block("then").build(); + let catch = bcx.fcx().new_block("catch").build(); - let func = llvm::get_param(bcx.fcx.llfn, 0); - let data = llvm::get_param(bcx.fcx.llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); - Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc); - Ret(then, C_i32(ccx, 0), dloc); + let func = llvm::get_param(bcx.fcx().llfn, 0); + let data = llvm::get_param(bcx.fcx().llfn, 1); + let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + Invoke(&bcx, func, &[data], then.llbb(), catch.llbb(), dloc); + Ret(&then, C_i32(ccx, 0), dloc); // Type indicator for the exception being thrown. // @@ -990,18 +966,17 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // rust_try ignores the selector. let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let vals = LandingPad(catch, lpad_ty, bcx.fcx.eh_personality(), 1); - AddClause(catch, vals, C_null(Type::i8p(ccx))); - let ptr = ExtractValue(catch, vals, 0); - Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to())); - Ret(catch, C_i32(ccx, 1), dloc); + let vals = LandingPad(&catch, lpad_ty, bcx.fcx().eh_personality(), 1); + AddClause(&catch, vals, C_null(Type::i8p(ccx))); + let ptr = ExtractValue(&catch, vals, 0); + Store(&catch, ptr, BitCast(&catch, local_ptr, Type::i8p(ccx).ptr_to())); + Ret(&catch, C_i32(ccx, 1), dloc); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); Store(bcx, ret, dest); - return bcx; } // Helper function to give a Block to a closure to translate a shim function. @@ -1010,7 +985,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, name: &str, inputs: Vec>, output: Ty<'tcx>, - trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) + trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); @@ -1035,7 +1010,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // // This function is only generated once and is then cached. fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) + trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; if let Some(llfn) = ccx.rust_try_fn().get() { @@ -1060,16 +1035,16 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { span_err!(a, b, E0511, "{}", c); } -fn generic_simd_intrinsic<'blk, 'tcx, 'a> - (bcx: Block<'blk, 'tcx>, - name: &str, - callee_ty: Ty<'tcx>, - llargs: &[ValueRef], - ret_ty: Ty<'tcx>, - llret_ty: Type, - call_debug_location: DebugLoc, - span: Span) -> ValueRef -{ +fn generic_simd_intrinsic<'blk, 'tcx, 'a>( + bcx: &BlockAndBuilder<'blk, 'tcx>, + name: &str, + callee_ty: Ty<'tcx>, + llargs: &[ValueRef], + ret_ty: Ty<'tcx>, + llret_ty: Type, + call_debug_location: DebugLoc, + span: Span +) -> ValueRef { // macros for error handling: macro_rules! emit_error { ($msg: tt) => { diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index aa9b900fa4653..1a93773a9ecc5 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -32,7 +32,7 @@ use rustc::ty; const VTABLE_OFFSET: usize = 3; /// Extracts a method from a trait object's vtable, at the specified index. -pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, +pub fn get_virtual_method<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llvtable: ValueRef, vtable_index: usize) -> ValueRef { @@ -94,9 +94,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let dest = fcx.llretslotptr.get(); let llargs = get_params(fcx.llfn); bcx = callee.call(bcx, DebugLoc::None, - &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx; + &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).0; - fcx.finish(bcx, DebugLoc::None); + fcx.finish(&bcx, DebugLoc::None); llfn } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index fe087bc495121..9af02f40111f5 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -40,6 +40,7 @@ use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; use std::cell::Ref as CellRef; +use std::ptr; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock) { @@ -121,10 +122,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); - bcx.with_block(|bcx| { - base::call_lifetime_end(bcx, ps); - base::trans_unwind_resume(bcx, lp); - }); + base::call_lifetime_end(&bcx, ps); + base::trans_unwind_resume(&bcx, lp); } } @@ -143,9 +142,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { let discr_lvalue = self.trans_lvalue(&bcx, discr); let ty = discr_lvalue.ty.to_ty(bcx.tcx()); - let discr = bcx.with_block(|bcx| - adt::trans_get_discr(bcx, ty, discr_lvalue.llval, None, true) - ); + let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true); let mut bb_hist = FxHashMap(); for target in targets { @@ -169,8 +166,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { if default_bb != Some(target) { let llbb = llblock(self, target); - let llval = bcx.with_block(|bcx| adt::trans_case( - bcx, ty, Disr::from(adt_variant.disr_val))); + let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val)); build::AddCase(switch, llval, llbb) } } @@ -179,7 +175,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); - let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); + let discr = base::to_immediate(&bcx, discr, switch_ty); let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); for (value, target) in values.iter().zip(targets) { let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); @@ -259,13 +255,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // but I am shooting for a quick fix to #35546 // here that can be cleanly backported to beta, so // I want to avoid touching all of trans. - bcx.with_block(|bcx| { - let scratch = base::alloc_ty(bcx, ty, "drop"); - base::call_lifetime_start(bcx, scratch); - build::Store(bcx, lvalue.llval, base::get_dataptr(bcx, scratch)); - build::Store(bcx, lvalue.llextra, base::get_meta(bcx, scratch)); - scratch - }) + let scratch = base::alloc_ty(&bcx, ty, "drop"); + base::call_lifetime_start(&bcx, scratch); + build::Store(&bcx, lvalue.llval, base::get_dataptr(&bcx, scratch)); + build::Store(&bcx, lvalue.llextra, base::get_meta(&bcx, scratch)); + scratch }; if let Some(unwind) = unwind { bcx.invoke(drop_fn, @@ -443,6 +437,65 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { return; } + // FIXME: This should proxy to the drop glue in the future when the ABI matches; + // most of the below code was copied from the match arm for TerminatorKind::Drop. + if intrinsic == Some("drop_in_place") { + let &(_, target) = destination.as_ref().unwrap(); + let ty = if let ty::TyFnDef(_, substs, _) = callee.ty.sty { + substs.type_at(0) + } else { + bug!("Unexpected ty: {}", callee.ty); + }; + + // Double check for necessity to drop + if !glue::type_needs_drop(bcx.tcx(), ty) { + funclet_br(self, bcx, target); + return; + } + + let ptr = self.trans_operand(&bcx, &args[0]); + let (llval, llextra) = match ptr.val { + Immediate(llptr) => (llptr, ptr::null_mut()), + Pair(llptr, llextra) => (llptr, llextra), + Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty) + }; + + let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); + let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty); + let is_sized = common::type_is_sized(bcx.tcx(), ty); + let llvalue = if is_sized { + if drop_ty != ty { + bcx.pointercast(llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) + } else { + llval + } + } else { + // FIXME(#36457) Currently drop glue takes sized + // values as a `*(data, meta)`, but elsewhere in + // MIR we pass `(data, meta)` as two separate + // arguments. It would be better to fix drop glue, + // but I am shooting for a quick fix to #35546 + // here that can be cleanly backported to beta, so + // I want to avoid touching all of trans. + let scratch = base::alloc_ty(&bcx, ty, "drop"); + base::call_lifetime_start(&bcx, scratch); + build::Store(&bcx, llval, base::get_dataptr(&bcx, scratch)); + build::Store(&bcx, llextra, base::get_meta(&bcx, scratch)); + scratch + }; + if let Some(unwind) = *cleanup { + bcx.invoke(drop_fn, + &[llvalue], + self.blocks[target].llbb, + llblock(self, unwind), + cleanup_bundle); + } else { + bcx.call(drop_fn, &[llvalue], cleanup_bundle); + funclet_br(self, bcx, target); + } + return; + } + if intrinsic == Some("transmute") { let &(ref dest, target) = destination.as_ref().unwrap(); self.with_lvalue_ref(&bcx, dest, |this, dest| { @@ -537,10 +590,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { bug!("Cannot use direct operand with an intrinsic call") }; - bcx.with_block(|bcx| { - trans_intrinsic_call(bcx, callee.ty, &fn_ty, - &llargs, dest, debug_loc); - }); + trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, debug_loc); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { // Make a fake operand for store_return @@ -554,8 +604,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if let Some((_, target)) = *destination { funclet_br(self, bcx, target); } else { - // trans_intrinsic_call already used Unreachable. - // bcx.unreachable(); + bcx.unreachable(); } return; @@ -620,9 +669,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let (ptr, meta) = (a, b); if *next_idx == 0 { if let Virtual(idx) = *callee { - let llfn = bcx.with_block(|bcx| { - meth::get_virtual_method(bcx, meta, idx) - }); + let llfn = meth::get_virtual_method(bcx, meta, idx); let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); *callee = Fn(bcx.pointercast(llfn, llty)); } @@ -768,12 +815,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { slot } else { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - bcx.with_block(|bcx| { - let slot = base::alloca(bcx, llretty, "personalityslot"); - self.llpersonalityslot = Some(slot); - base::call_lifetime_start(bcx, slot); - slot - }) + let slot = base::alloca(bcx, llretty, "personalityslot"); + self.llpersonalityslot = Some(slot); + base::call_lifetime_start(bcx, slot); + slot } } @@ -863,18 +908,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { return if fn_ret_ty.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, ret_ty, "tmp_ret") - }); + let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); llargs.push(tmp); ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, ret_ty, "tmp_ret") - }); + let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) @@ -939,9 +980,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { - let tmp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, op.ty, "tmp_ret") - }); + let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret"); ret_ty.store(bcx, op.immediate(), tmp); self.trans_load(bcx, tmp, op.ty) } else { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index d28c466e230ba..e211a8b68d4f3 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -50,7 +50,7 @@ impl<'tcx> LvalueRef<'tcx> { -> LvalueRef<'tcx> { assert!(!ty.has_erasable_regions()); - let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name)); + let lltemp = base::alloc_ty(bcx, ty, name); LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 94dc9a5fdb489..174608bdeb987 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -181,7 +181,7 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { - let bcx = fcx.init(true).build(); + let bcx = fcx.init(true); let mir = bcx.mir(); // Analyze the temps to determine which must be lvalues @@ -240,11 +240,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { if dbg { let dbg_loc = mircx.debug_loc(source_info); if let DebugLoc::ScopeAt(scope, span) = dbg_loc { - bcx.with_block(|bcx| { - declare_local(bcx, name, ty, scope, - VariableAccess::DirectVariable { alloca: lvalue.llval }, - VariableKind::LocalVariable, span); - }); + declare_local(&bcx, name, ty, scope, + VariableAccess::DirectVariable { alloca: lvalue.llval }, + VariableKind::LocalVariable, span); } else { panic!("Unexpected"); } @@ -353,9 +351,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, _ => bug!("spread argument isn't a tuple?!") }; - let lltemp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) - }); + let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { let dst = bcx.struct_gep(lltemp, i); let arg = &fcx.fn_ty.args[idx]; @@ -376,7 +372,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // Now that we have one alloca that contains the aggregate value, // we can create one debuginfo entry for the argument. - bcx.with_block(|bcx| arg_scope.map(|scope| { + arg_scope.map(|scope| { let variable_access = VariableAccess::DirectVariable { alloca: lltemp }; @@ -384,7 +380,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, arg_ty, scope, variable_access, VariableKind::ArgumentVariable(arg_index + 1), bcx.fcx().span.unwrap_or(DUMMY_SP)); - })); + }); return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty))); } @@ -433,9 +429,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { - let lltemp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) - }); + let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); if common::type_is_fat_ptr(tcx, arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, @@ -453,7 +447,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, } lltemp }; - bcx.with_block(|bcx| arg_scope.map(|scope| { + arg_scope.map(|scope| { // Is this a regular argument? if arg_index > 0 || mir.upvar_decls.is_empty() { declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, @@ -531,7 +525,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, VariableKind::CapturedVariable, bcx.fcx().span.unwrap_or(DUMMY_SP)); } - })); + }); LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))) }).collect() } diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 83e1d03c689ab..a7fdc4330becc 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -14,7 +14,7 @@ use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{self, Block, BlockAndBuilder}; +use common::{self, BlockAndBuilder}; use value::Value; use type_of; use type_::Type; @@ -247,11 +247,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { operand: OperandRef<'tcx>) { debug!("store_operand: operand={:?} lldest={:?}", operand, lldest); - bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand)) + self.store_operand_direct(bcx, lldest, operand) } pub fn store_operand_direct(&mut self, - bcx: Block<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'bcx, 'tcx>, lldest: ValueRef, operand: OperandRef<'tcx>) { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 2ee49db477864..274871d7552f3 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -17,7 +17,7 @@ use rustc::mir; use asm; use base; use callee::Callee; -use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result}; +use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder}; use common::{C_integral}; use debuginfo::DebugLoc; use adt; @@ -70,30 +70,28 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // so the (generic) MIR may not be able to expand it. let operand = self.trans_operand(&bcx, source); let operand = operand.pack_if_pair(&bcx); - bcx.with_block(|bcx| { - match operand.val { - OperandValue::Pair(..) => bug!(), - OperandValue::Immediate(llval) => { - // unsize from an immediate structure. We don't - // really need a temporary alloca here, but - // avoiding it would require us to have - // `coerce_unsized_into` use extractvalue to - // index into the struct, and this case isn't - // important enough for it. - debug!("trans_rvalue: creating ugly alloca"); - let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp"); - base::store_ty(bcx, llval, lltemp, operand.ty); - base::coerce_unsized_into(bcx, - lltemp, operand.ty, - dest.llval, cast_ty); - } - OperandValue::Ref(llref) => { - base::coerce_unsized_into(bcx, - llref, operand.ty, - dest.llval, cast_ty); - } + match operand.val { + OperandValue::Pair(..) => bug!(), + OperandValue::Immediate(llval) => { + // unsize from an immediate structure. We don't + // really need a temporary alloca here, but + // avoiding it would require us to have + // `coerce_unsized_into` use extractvalue to + // index into the struct, and this case isn't + // important enough for it. + debug!("trans_rvalue: creating ugly alloca"); + let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp"); + base::store_ty(&bcx, llval, lltemp, operand.ty); + base::coerce_unsized_into(&bcx, + lltemp, operand.ty, + dest.llval, cast_ty); } - }); + OperandValue::Ref(llref) => { + base::coerce_unsized_into(&bcx, + llref, operand.ty, + dest.llval, cast_ty); + } + } bcx } @@ -102,11 +100,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx(), size); let base = base::get_dataptr_builder(&bcx, dest.llval); - let bcx = bcx.map_block(|block| { - tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| { - self.store_operand_direct(block, llslot, tr_elem); - block - }) + let bcx = tvec::slice_for_each(bcx, base, tr_elem.ty, size, |bcx, llslot| { + self.store_operand_direct(&bcx, llslot, tr_elem); + bcx }); bcx } @@ -115,10 +111,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { match *kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); - bcx.with_block(|bcx| { - adt::trans_set_discr(bcx, - dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr)); - }); + adt::trans_set_discr(&bcx, + dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr)); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. @@ -171,10 +165,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { self.trans_operand(&bcx, input).immediate() }).collect(); - bcx.with_block(|bcx| { - asm::trans_inline_asm(bcx, asm, outputs, input_vals); - }); - + asm::trans_inline_asm(&bcx, asm, outputs, input_vals); bcx } @@ -238,10 +229,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } OperandValue::Immediate(lldata) => { // "standard" unsize - let (lldata, llextra) = bcx.with_block(|bcx| { - base::unsize_thin_ptr(bcx, lldata, - operand.ty, cast_ty) - }); + let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata, + operand.ty, cast_ty); OperandValue::Pair(lldata, llextra) } OperandValue::Ref(_) => { @@ -281,9 +270,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let discr = match operand.val { OperandValue::Immediate(llval) => llval, OperandValue::Ref(llptr) => { - bcx.with_block(|bcx| { - adt::trans_get_discr(bcx, operand.ty, llptr, None, true) - }) + adt::trans_get_discr(&bcx, operand.ty, llptr, None, true) } OperandValue::Pair(..) => bug!("Unexpected Pair operand") }; @@ -468,19 +455,16 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let llalign = C_uint(bcx.ccx(), align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); - let mut llval = None; - let bcx = bcx.map_block(|bcx| { - let Result { bcx, val } = base::malloc_raw_dyn(bcx, - llty_ptr, - box_ty, - llsize, - llalign, - debug_loc); - llval = Some(val); - bcx - }); + let val = base::malloc_raw_dyn( + &bcx, + llty_ptr, + box_ty, + llsize, + llalign, + debug_loc + ); let operand = OperandRef { - val: OperandValue::Immediate(llval.unwrap()), + val: OperandValue::Immediate(val), ty: box_ty, }; (bcx, operand) @@ -543,21 +527,21 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::BinOp::BitAnd => bcx.and(lhs, rhs), mir::BinOp::BitXor => bcx.xor(lhs, rhs), mir::BinOp::Shl => { - bcx.with_block(|bcx| { - common::build_unchecked_lshift(bcx, - lhs, - rhs, - DebugLoc::None) - }) + common::build_unchecked_lshift( + &bcx, + lhs, + rhs, + DebugLoc::None + ) } mir::BinOp::Shr => { - bcx.with_block(|bcx| { - common::build_unchecked_rshift(bcx, - input_ty, - lhs, - rhs, - DebugLoc::None) - }) + common::build_unchecked_rshift( + bcx, + input_ty, + lhs, + rhs, + DebugLoc::None + ) } mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil { @@ -677,9 +661,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::BinOp::Shl | mir::BinOp::Shr => { let lhs_llty = val_ty(lhs); let rhs_llty = val_ty(rhs); - let invert_mask = bcx.with_block(|bcx| { - common::shift_mask_val(bcx, lhs_llty, rhs_llty, true) - }); + let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true); let outer_bits = bcx.and(rhs, invert_mask); let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 296a0e8049e08..9c872e214d2f4 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -63,12 +63,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { let ty = self.monomorphized_lvalue_ty(lvalue); let lvalue_transed = self.trans_lvalue(&bcx, lvalue); - bcx.with_block(|bcx| - adt::trans_set_discr(bcx, - ty, - lvalue_transed.llval, - Disr::from(variant_index)) - ); + adt::trans_set_discr(&bcx, + ty, + lvalue_transed.llval, + Disr::from(variant_index)); bcx } mir::StatementKind::StorageLive(ref lvalue) => { diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index cf897fc5a1518..3dbaaff1f48f0 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -18,16 +18,16 @@ use common::*; use debuginfo::DebugLoc; use rustc::ty::Ty; -pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, +pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, data_ptr: ValueRef, unit_ty: Ty<'tcx>, len: ValueRef, f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, + -> BlockAndBuilder<'blk, 'tcx> + where F: FnOnce(BlockAndBuilder<'blk, 'tcx>, ValueRef) -> BlockAndBuilder<'blk, 'tcx>, { let _icx = push_ctxt("tvec::slice_for_each"); - let fcx = bcx.fcx; + let fcx = bcx.fcx(); // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx(), unit_ty); @@ -37,27 +37,33 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, InBoundsGEP(bcx, a, &[b]) }; - let header_bcx = fcx.new_block("slice_loop_header"); - let body_bcx = fcx.new_block("slice_loop_body"); - let next_bcx = fcx.new_block("slice_loop_next"); + let body_bcx = fcx.new_block("slice_loop_body").build(); + let next_bcx = fcx.new_block("slice_loop_next").build(); + let header_bcx = fcx.new_block("slice_loop_header").build(); let start = if zst { C_uint(bcx.ccx(), 0 as usize) } else { data_ptr }; - let end = add(bcx, start, len); + let end = add(&bcx, start, len); - Br(bcx, header_bcx.llbb, DebugLoc::None); - let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]); + Br(&bcx, header_bcx.llbb(), DebugLoc::None); + let current = Phi(&header_bcx, val_ty(start), &[start], &[bcx.llbb()]); let keep_going = - ICmp(header_bcx, llvm::IntNE, current, end, DebugLoc::None); - CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); + ICmp(&header_bcx, llvm::IntNE, current, end, DebugLoc::None); + CondBr(&header_bcx, keep_going, body_bcx.llbb(), next_bcx.llbb(), DebugLoc::None); let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); - let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize)); - AddIncomingToPhi(current, next, body_bcx.llbb); - Br(body_bcx, header_bcx.llbb, DebugLoc::None); + // FIXME(simulacrum): The code below is identical to the closure (add) above, but using the + // closure doesn't compile due to body_bcx still being borrowed when dropped. + let next = if zst { + Add(&body_bcx, current, C_uint(bcx.ccx(), 1usize), DebugLoc::None) + } else { + InBoundsGEP(&body_bcx, current, &[C_uint(bcx.ccx(), 1usize)]) + }; + AddIncomingToPhi(current, next, body_bcx.llbb()); + Br(&body_bcx, header_bcx.llbb(), DebugLoc::None); next_bcx } diff --git a/src/librustc_trans/value.rs b/src/librustc_trans/value.rs index 79e0c11515fc4..b314f3ea414f6 100644 --- a/src/librustc_trans/value.rs +++ b/src/librustc_trans/value.rs @@ -11,7 +11,7 @@ use llvm; use llvm::{UseRef, ValueRef}; use basic_block::BasicBlock; -use common::Block; +use common::BlockAndBuilder; use std::fmt; @@ -65,11 +65,11 @@ impl Value { /// This only performs a search for a trivially dominating store. The store /// must be the only user of this value, and there must not be any conditional /// branches between the store and the given block. - pub fn get_dominating_store(self, bcx: Block) -> Option { + pub fn get_dominating_store(self, bcx: &BlockAndBuilder) -> Option { match self.get_single_user().and_then(|user| user.as_store_inst()) { Some(store) => { store.get_parent().and_then(|store_bb| { - let mut bb = BasicBlock(bcx.llbb); + let mut bb = BasicBlock(bcx.llbb()); let mut ret = Some(store); while bb.get() != store_bb.get() { match bb.get_single_predecessor() { From 3f17ab9618d147a3f16d49a10fff44c5e8da4e60 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 10 Dec 2016 20:35:02 -0700 Subject: [PATCH 002/103] Remove unreachable and terminated from Block --- src/librustc_trans/adt.rs | 2 +- src/librustc_trans/base.rs | 32 +++++--------------------------- src/librustc_trans/build.rs | 22 ---------------------- src/librustc_trans/callee.rs | 24 ++++++++++-------------- src/librustc_trans/cleanup.rs | 6 ++---- src/librustc_trans/common.rs | 22 ---------------------- src/librustc_trans/glue.rs | 4 ---- 7 files changed, 18 insertions(+), 94 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index ce1b23c1ce9b6..4585dccfc8bd1 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -566,7 +566,7 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ix: usize, needs_cast: bool) -> ValueRef { let fty = fields[ix]; let ccx = bcx.ccx(); - let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); + let ll_fty = type_of::in_memory_type_of(ccx, fty); let ptr_val = if needs_cast { let fields = st.field_index_by_increasing_offset().map(|i| { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 83b40849e2761..d4f11df4e2828 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -467,10 +467,6 @@ pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, debug_loc: DebugLoc) -> (ValueRef, BlockAndBuilder<'blk, 'tcx>) { let _icx = push_ctxt("invoke_"); - if bcx.is_unreachable() { - return (C_null(Type::i8(bcx.ccx())), bcx); - } - if need_invoke(&bcx) { debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb()); for &llarg in llargs { @@ -562,10 +558,6 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. pub fn store_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { - if cx.is_unreachable() { - return; - } - debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); if common::type_is_fat_ptr(cx.tcx(), t) { @@ -592,12 +584,6 @@ pub fn load_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, ty: Ty<'tcx>) -> (ValueRef, ValueRef) { - if cx.is_unreachable() { - // FIXME: remove me - return (Load(cx, get_dataptr(cx, src)), - Load(cx, get_meta(cx, src))); - } - load_fat_ptr_builder(cx, src, ty) } @@ -644,7 +630,7 @@ pub fn with_cond<'blk, 'tcx, F>( { let _icx = push_ctxt("with_cond"); - if bcx.is_unreachable() || common::const_to_opt_uint(val) == Some(0) { + if common::const_to_opt_uint(val) == Some(0) { return bcx; } @@ -704,15 +690,11 @@ impl Lifetime { } pub fn call_lifetime_start(bcx: &BlockAndBuilder, ptr: ValueRef) { - if !bcx.is_unreachable() { - Lifetime::Start.call(bcx, ptr); - } + Lifetime::Start.call(bcx, ptr); } pub fn call_lifetime_end(bcx: &BlockAndBuilder, ptr: ValueRef) { - if !bcx.is_unreachable() { - Lifetime::End.call(bcx, ptr); - } + Lifetime::End.call(bcx, ptr); } // Generates code for resumption of unwind at the end of a landing pad. @@ -747,7 +729,7 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, s let _icx = push_ctxt("memcpy_ty"); let ccx = bcx.ccx(); - if type_is_zero_size(ccx, t) || bcx.is_unreachable() { + if type_is_zero_size(ccx, t) { return; } @@ -765,9 +747,6 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, s } pub fn init_zero_mem<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.is_unreachable() { - return; - } let _icx = push_ctxt("init_zero_mem"); let bcx = cx; memfill(bcx, llptr, t, 0); @@ -926,8 +905,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // Builds the return block for a function. pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>, ret_debug_location: DebugLoc) { - if self.llretslotptr.get().is_none() || ret_cx.is_unreachable() || - self.fn_ty.ret.is_indirect() { + if self.llretslotptr.get().is_none() || self.fn_ty.ret.is_indirect() { return RetVoid(ret_cx, ret_debug_location); } diff --git a/src/librustc_trans/build.rs b/src/librustc_trans/build.rs index bea42950c5512..16f362da29fce 100644 --- a/src/librustc_trans/build.rs +++ b/src/librustc_trans/build.rs @@ -24,22 +24,12 @@ use debuginfo::DebugLoc; use libc::{c_uint, c_char}; -// The difference between a block being unreachable and being terminated is -// somewhat obscure, and has to do with error checking. When a block is -// terminated, we're saying that trying to add any further statements in the -// block is an error. On the other hand, if something is unreachable, that -// means that the block was terminated in some way that we don't want to check -// for (panic/break/return statements, call to diverging functions, etc), and -// further instructions to the block should simply be ignored. - pub fn RetVoid(cx: &BlockAndBuilder, debug_loc: DebugLoc) { - cx.terminate(); debug_loc.apply(cx.fcx()); cx.ret_void(); } pub fn Ret(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) { - cx.terminate(); debug_loc.apply(cx.fcx()); cx.ret(v); } @@ -47,13 +37,11 @@ pub fn Ret(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) { pub fn AggregateRet(cx: &BlockAndBuilder, ret_vals: &[ValueRef], debug_loc: DebugLoc) { - cx.terminate(); debug_loc.apply(cx.fcx()); cx.aggregate_ret(ret_vals); } pub fn Br(cx: &BlockAndBuilder, dest: BasicBlockRef, debug_loc: DebugLoc) { - cx.terminate(); debug_loc.apply(cx.fcx()); cx.br(dest); } @@ -63,14 +51,12 @@ pub fn CondBr(cx: &BlockAndBuilder, then: BasicBlockRef, else_: BasicBlockRef, debug_loc: DebugLoc) { - cx.terminate(); debug_loc.apply(cx.fcx()); cx.cond_br(if_, then, else_); } pub fn Switch(cx: &BlockAndBuilder, v: ValueRef, else_: BasicBlockRef, num_cases: usize) -> ValueRef { - cx.terminate(); cx.switch(v, else_, num_cases) } @@ -85,7 +71,6 @@ pub fn IndirectBr(cx: &BlockAndBuilder, addr: ValueRef, num_dests: usize, debug_loc: DebugLoc) { - cx.terminate(); debug_loc.apply(cx.fcx()); cx.indirect_br(addr, num_dests); } @@ -97,7 +82,6 @@ pub fn Invoke(cx: &BlockAndBuilder, catch: BasicBlockRef, debug_loc: DebugLoc) -> ValueRef { - cx.terminate(); debug!("Invoke({:?} with arguments ({}))", Value(fn_), args.iter().map(|a| { @@ -687,7 +671,6 @@ pub fn Trap(cx: &BlockAndBuilder) { pub fn LandingPad(cx: &BlockAndBuilder, ty: Type, pers_fn: ValueRef, num_clauses: usize) -> ValueRef { - assert!(!cx.is_unreachable()); cx.landing_pad(ty, pers_fn, num_clauses, cx.fcx().llfn) } @@ -720,26 +703,22 @@ pub fn AtomicRMW(cx: &BlockAndBuilder, op: AtomicRmwBinOp, pub fn CleanupPad(cx: &BlockAndBuilder, parent: Option, args: &[ValueRef]) -> ValueRef { - assert!(!cx.is_unreachable()); cx.cleanup_pad(parent, args) } pub fn CleanupRet(cx: &BlockAndBuilder, cleanup: ValueRef, unwind: Option) -> ValueRef { - cx.terminate(); cx.cleanup_ret(cleanup, unwind) } pub fn CatchPad(cx: &BlockAndBuilder, parent: ValueRef, args: &[ValueRef]) -> ValueRef { - assert!(!cx.is_unreachable()); cx.catch_pad(parent, args) } pub fn CatchRet(cx: &BlockAndBuilder, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef { - cx.terminate(); cx.catch_ret(pad, unwind) } @@ -747,7 +726,6 @@ pub fn CatchSwitch(cx: &BlockAndBuilder, parent: Option, unwind: Option, num_handlers: usize) -> ValueRef { - cx.terminate(); cx.catch_switch(parent, unwind, num_handlers) } diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 4f0a58e00d57d..7422c5f8805f9 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -712,24 +712,20 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, }; let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); - if !bcx.is_unreachable() { - fn_ty.apply_attrs_callsite(llret); - - // If the function we just called does not use an outpointer, - // store the result into the rust outpointer. Cast the outpointer - // type to match because some ABIs will use a different type than - // the Rust type. e.g., a {u32,u32} struct could be returned as - // u64. - if !fn_ty.ret.is_indirect() { - if let Some(llretslot) = opt_llretslot { - fn_ty.ret.store(&bcx, llret, llretslot); - } + fn_ty.apply_attrs_callsite(llret); + + // If the function we just called does not use an outpointer, + // store the result into the rust outpointer. Cast the outpointer + // type to match because some ABIs will use a different type than + // the Rust type. e.g., a {u32,u32} struct could be returned as + // u64. + if !fn_ty.ret.is_indirect() { + if let Some(llretslot) = opt_llretslot { + fn_ty.ret.store(&bcx, llret, llretslot); } } if fn_ret.0.is_never() { - assert!(!bcx.is_terminated()); - bcx.set_unreachable(); bcx.unreachable(); } diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index db74e57dd8884..3524d458e0105 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -343,10 +343,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { scope: &CleanupScope<'tcx>) -> BlockAndBuilder<'blk, 'tcx> { let mut bcx = bcx; - if !bcx.is_unreachable() { - for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx, scope.debug_loc); - } + for cleanup in scope.cleanups.iter().rev() { + bcx = cleanup.trans(bcx, scope.debug_loc); } bcx } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 1650d7376bfbc..84d30b6b97a42 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -449,8 +449,6 @@ pub struct BlockS<'blk, 'tcx: 'blk> { // instructions into that block by way of this block context. // The block pointing to this one in the function's digraph. pub llbb: BasicBlockRef, - pub terminated: Cell, - pub unreachable: Cell, // If this block part of a landing pad, then this is `Some` indicating what // kind of landing pad its in, otherwise this is none. @@ -469,8 +467,6 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> { -> Block<'blk, 'tcx> { fcx.block_arena.alloc(BlockS { llbb: llbb, - terminated: Cell::new(false), - unreachable: Cell::new(false), lpad: Cell::new(None), fcx: fcx }) @@ -599,24 +595,6 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { // Methods delegated to bcx - pub fn terminate(&self) { - debug!("terminate({})", self.bcx.to_str()); - self.bcx.terminated.set(true); - } - - pub fn set_unreachable(&self) { - debug!("set_unreachable({})", self.bcx.to_str()); - self.bcx.unreachable.set(true); - } - - pub fn is_unreachable(&self) -> bool { - self.bcx.unreachable.get() - } - - pub fn is_terminated(&self) -> bool { - self.bcx.terminated.get() - } - pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { self.bcx.ccx() } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index a1e18725704f2..7044f8d583520 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -321,10 +321,6 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let align = C_uint(bcx.ccx(), align); return (size, align); } - if bcx.is_unreachable() { - let llty = Type::int(bcx.ccx()); - return (C_undef(llty), C_undef(llty)); - } match t.sty { ty::TyAdt(def, substs) => { let ccx = bcx.ccx(); From 59ef51c12a27ab17606eb7a1c1da58e9ec0c09ad Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sun, 11 Dec 2016 08:59:20 -0700 Subject: [PATCH 003/103] Replace build.rs with calling functions on builder directly --- src/librustc_trans/abi.rs | 3 +- src/librustc_trans/adt.rs | 29 +- src/librustc_trans/asm.rs | 22 +- src/librustc_trans/base.rs | 121 ++--- src/librustc_trans/build.rs | 734 ---------------------------- src/librustc_trans/builder.rs | 14 + src/librustc_trans/callee.rs | 22 +- src/librustc_trans/cleanup.rs | 36 +- src/librustc_trans/common.rs | 32 +- src/librustc_trans/glue.rs | 104 ++-- src/librustc_trans/intrinsic.rs | 311 ++++++------ src/librustc_trans/lib.rs | 1 - src/librustc_trans/meth.rs | 9 +- src/librustc_trans/mir/block.rs | 18 +- src/librustc_trans/mir/mod.rs | 3 +- src/librustc_trans/mir/operand.rs | 5 +- src/librustc_trans/mir/rvalue.rs | 39 +- src/librustc_trans/mir/statement.rs | 9 +- src/librustc_trans/tvec.rs | 28 +- 19 files changed, 373 insertions(+), 1167 deletions(-) delete mode 100644 src/librustc_trans/build.rs diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 0ac853e99eecd..a0bea5d38b2a7 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -10,7 +10,6 @@ use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; use base; -use build::AllocaFcx; use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; use context::CrateContext; use cabi_x86; @@ -278,7 +277,7 @@ impl ArgType { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast"); + let llscratch = bcx.fcx().alloca(ty, "abi_cast"); base::Lifetime::Start.call(bcx, llscratch); // ...where we first store the value... diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 4585dccfc8bd1..ef44a5fd60ebc 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -48,7 +48,6 @@ use std; use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; -use build::*; use common::*; use debuginfo::DebugLoc; use glue; @@ -348,7 +347,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx load_discr(bcx, discr, scrutinee, min, max, range_assert) } layout::General { discr, .. } => { - let ptr = StructGEP(bcx, scrutinee, 0); + let ptr = bcx.struct_gep(scrutinee, 0); load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1, range_assert) } @@ -358,7 +357,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx let llptrty = type_of::sizing_type_of(bcx.ccx(), monomorphize::field_ty(bcx.ccx().tcx(), substs, &def.variants[nndiscr as usize].fields[0])); - ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) + bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty)) } layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) @@ -367,7 +366,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx }; match cast_to { None => val, - Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } + Some(llty) => if is_discr_signed(&l) { bcx.sext(val, llty) } else { bcx.zext(val, llty) } } } @@ -377,11 +376,11 @@ fn struct_wrapped_nullable_bitdiscr( discrfield: &layout::FieldPath, scrutinee: ValueRef ) -> ValueRef { - let llptrptr = GEPi(bcx, scrutinee, + let llptrptr = bcx.gepi(scrutinee, &discrfield.iter().map(|f| *f as usize).collect::>()[..]); - let llptr = Load(bcx, llptrptr); + let llptr = bcx.load(llptrptr); let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None) + bcx.icmp(cmp, llptr, C_null(val_ty(llptr))) } /// Helper for cases where the discriminant is simply loaded. @@ -401,11 +400,11 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the // type, which is pointless). - Load(bcx, ptr) + bcx.load(ptr) } else { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. - LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True) + bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True) } } @@ -440,12 +439,12 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx match *l { layout::CEnum{ discr, min, max, .. } => { assert_discr_in_range(Disr(min), Disr(max), to); - Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), + bcx.store(C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), val); } layout::General{ discr, .. } => { - Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), - StructGEP(bcx, val, 0)); + bcx.store(C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), + bcx.struct_gep(val, 0)); } layout::Univariant { .. } | layout::UntaggedUnion { .. } @@ -456,7 +455,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; if to.0 != nndiscr { let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); - Store(bcx, C_null(llptrty), val); + bcx.store(C_null(llptrty), val); } } layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { @@ -472,9 +471,9 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { let path = discrfield.iter().map(|&i| i as usize).collect::>(); - let llptrptr = GEPi(bcx, val, &path[..]); + let llptrptr = bcx.gepi(val, &path[..]); let llptrty = val_ty(llptrptr).element_type(); - Store(bcx, C_null(llptrty), llptrptr); + bcx.store(C_null(llptrty), llptrptr); } } } diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 1e672e9d10955..4c4f8cf67d2d7 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -12,7 +12,6 @@ use llvm::{self, ValueRef}; use base; -use build::*; use common::*; use type_of; use type_::Type; @@ -90,20 +89,21 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = InlineAsmCall(bcx, - asm.as_ptr(), - constraint_cstr.as_ptr(), - &inputs, - output_type, - ia.volatile, - ia.alignstack, - dialect); + let r = bcx.inline_asm_call( + asm.as_ptr(), + constraint_cstr.as_ptr(), + &inputs, + output_type, + ia.volatile, + ia.alignstack, + dialect + ); // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); for (i, (_, &(val, _))) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) }; - Store(bcx, v, val); + let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) }; + bcx.store(v, val); } // Store expn_id in a metadata node so we can map LLVM errors diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index d4f11df4e2828..1e4c10c4fc766 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -51,7 +51,6 @@ use session::{self, DataTypeKind, Session}; use abi::{self, Abi, FnType}; use adt; use attributes; -use build::*; use builder::{Builder, noname}; use callee::{Callee}; use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; @@ -174,11 +173,11 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { } pub fn get_meta(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) + bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) } pub fn get_dataptr(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) + bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) } pub fn get_meta_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { @@ -207,15 +206,14 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llty_ptr: Type, info_ty: Ty<'tcx>, size: ValueRef, - align: ValueRef, - debug_loc: DebugLoc) + align: ValueRef) -> ValueRef { let _icx = push_ctxt("malloc_raw_exchange"); // Allocate space: let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).reify(bcx.ccx()); - PointerCast(bcx, Call(bcx, r, &[size, align], debug_loc), llty_ptr) + bcx.pointercast(bcx.call(r, &[size, align], bcx.lpad().and_then(|b| b.bundle())), llty_ptr) } @@ -258,13 +256,12 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, rhs: ValueRef, t: Ty<'tcx>, ret_ty: Type, - op: hir::BinOp_, - debug_loc: DebugLoc) + op: hir::BinOp_) -> ValueRef { let signed = match t.sty { ty::TyFloat(_) => { let cmp = bin_op_to_fcmp_predicate(op); - return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty); + return bcx.sext(bcx.fcmp(cmp, lhs, rhs), ret_ty); }, ty::TyUint(_) => false, ty::TyInt(_) => true, @@ -276,7 +273,7 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // to get the correctly sized type. This will compile to a single instruction // once the IR is converted to assembly if the SIMD instruction is supported // by the target architecture. - SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty) + bcx.sext(bcx.icmp(cmp, lhs, rhs), ret_ty) } /// Retrieve the information we are losing (making dynamic) in an unsizing @@ -326,8 +323,7 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { assert!(common::type_is_sized(bcx.tcx(), a)); let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to(); - (PointerCast(bcx, src, ptr_ty), - unsized_info(bcx.ccx(), a, b, None)) + (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx(), a, b, None)) } _ => bug!("unsize_thin_ptr: called on bad types"), } @@ -352,7 +348,7 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // the types match up. let (base, info) = load_fat_ptr(bcx, src, src_ty); let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), dst_ty); - let base = PointerCast(bcx, base, llcast_ty); + let base = bcx.pointercast(base, llcast_ty); (base, info) } else { let base = load_ty(bcx, src, src_ty); @@ -414,8 +410,10 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx } } -pub fn cast_shift_expr_rhs(cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b)) +pub fn cast_shift_expr_rhs( + cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef +) -> ValueRef { + cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) } pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { @@ -463,8 +461,7 @@ fn cast_shift_rhs(op: hir::BinOp_, pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, llfn: ValueRef, - llargs: &[ValueRef], - debug_loc: DebugLoc) + llargs: &[ValueRef]) -> (ValueRef, BlockAndBuilder<'blk, 'tcx>) { let _icx = push_ctxt("invoke_"); if need_invoke(&bcx) { @@ -475,12 +472,13 @@ pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, let normal_bcx = bcx.fcx().new_block("normal-return"); let landing_pad = bcx.fcx().get_landing_pad(); - let llresult = Invoke(&bcx, - llfn, - &llargs[..], - normal_bcx.llbb, - landing_pad, - debug_loc); + let llresult = bcx.invoke( + llfn, + &llargs[..], + normal_bcx.llbb, + landing_pad, + bcx.lpad().and_then(|b| b.bundle()) + ); return (llresult, normal_bcx.build()); } else { debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb()); @@ -488,7 +486,7 @@ pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, debug!("arg: {:?}", Value(llarg)); } - let llresult = Call(&bcx, llfn, &llargs[..], debug_loc); + let llresult = bcx.call(llfn, &llargs[..], bcx.lpad().and_then(|b| b.bundle())); return (llresult, bcx); } } @@ -518,7 +516,9 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { /// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. -pub fn load_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { +pub fn load_ty<'blk, 'tcx>( + cx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx> +) -> ValueRef { load_ty_builder(cx, ptr, t) } @@ -557,15 +557,17 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { +pub fn store_ty<'blk, 'tcx>( + cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx> +) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); if common::type_is_fat_ptr(cx.tcx(), t) { - let lladdr = ExtractValue(cx, v, abi::FAT_PTR_ADDR); - let llextra = ExtractValue(cx, v, abi::FAT_PTR_EXTRA); + let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR); + let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA); store_fat_ptr(cx, lladdr, llextra, dst, t); } else { - Store(cx, from_immediate(cx, v), dst); + cx.store(from_immediate(cx, v), dst); } } @@ -575,8 +577,8 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, _ty: Ty<'tcx>) { // FIXME: emit metadata - Store(cx, data, get_dataptr(cx, dst)); - Store(cx, extra, get_meta(cx, dst)); + cx.store(data, get_dataptr(cx, dst)); + cx.store(extra, get_meta(cx, dst)); } pub fn load_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, @@ -609,7 +611,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>( pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { if val_ty(val) == Type::i1(bcx.ccx()) { - ZExt(bcx, val, Type::i8(bcx.ccx())) + bcx.zext(val, Type::i8(bcx.ccx())) } else { val } @@ -617,7 +619,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { - Trunc(bcx, val, Type::i1(bcx.ccx())) + bcx.trunc(val, Type::i1(bcx.ccx())) } else { val } @@ -637,9 +639,9 @@ pub fn with_cond<'blk, 'tcx, F>( let fcx = bcx.fcx(); let next_cx = fcx.new_block("next").build(); let cond_cx = fcx.new_block("cond").build(); - CondBr(&bcx, val, cond_cx.llbb(), next_cx.llbb(), DebugLoc::None); + bcx.cond_br(val, cond_cx.llbb(), next_cx.llbb()); let after_cx = f(cond_cx); - Br(&after_cx, next_cx.llbb(), DebugLoc::None); + after_cx.br(next_cx.llbb()); next_cx } @@ -702,8 +704,9 @@ pub fn trans_unwind_resume(bcx: &BlockAndBuilder, lpval: ValueRef) { if !bcx.sess().target.target.options.custom_unwind_resume { bcx.resume(lpval); } else { - let exc_ptr = ExtractValue(bcx, lpval, 0); - Call(bcx, bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], DebugLoc::None); + let exc_ptr = bcx.extract_value(lpval, 0); + bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], + bcx.lpad().and_then(|b| b.bundle())); } } @@ -725,7 +728,9 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { +pub fn memcpy_ty<'blk, 'tcx>( + bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx> +) { let _icx = push_ctxt("memcpy_ty"); let ccx = bcx.ccx(); @@ -792,7 +797,7 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { let _icx = push_ctxt("alloca"); DebugLoc::None.apply(cx.fcx()); - Alloca(cx, ty, name) + cx.fcx().alloca(ty, name) } impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { @@ -863,7 +868,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // Use a dummy instruction as the insertion point for all allocas. // This is later removed in FunctionContext::cleanup. self.alloca_insert_pt.set(Some(unsafe { - Load(&entry_bcx, C_null(Type::i8p(self.ccx))); + entry_bcx.load(C_null(Type::i8p(self.ccx))); llvm::LLVMGetFirstInstruction(entry_bcx.llbb()) })); @@ -881,7 +886,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let slot = if self.fn_ty.ret.is_indirect() { get_param(self.llfn, 0) } else { - AllocaFcx(self, llty, "sret_slot") + self.alloca(llty, "sret_slot") }; self.llretslotptr.set(Some(slot)); @@ -892,21 +897,19 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// and builds the return block. - pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>, - ret_debug_loc: DebugLoc) { + pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { let _icx = push_ctxt("FunctionContext::finish"); - self.build_return_block(ret_cx, ret_debug_loc); + self.build_return_block(ret_cx); DebugLoc::None.apply(self); self.cleanup(); } // Builds the return block for a function. - pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>, - ret_debug_location: DebugLoc) { + pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { if self.llretslotptr.get().is_none() || self.fn_ty.ret.is_indirect() { - return RetVoid(ret_cx, ret_debug_location); + return ret_cx.ret_void(); } let retslot = self.llretslotptr.get().unwrap(); @@ -925,13 +928,13 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { } if self.fn_ty.ret.is_indirect() { - Store(ret_cx, retval, get_param(self.llfn, 0)); - RetVoid(ret_cx, ret_debug_location) + ret_cx.store(retval, get_param(self.llfn, 0)); + ret_cx.ret_void() } else { if llty == Type::i1(self.ccx) { - retval = Trunc(ret_cx, retval, llty); + retval = ret_cx.trunc(retval, llty); } - Ret(ret_cx, retval, ret_debug_location) + ret_cx.ret(retval) } } (_, cast_ty) if self.fn_ty.ret.is_indirect() => { @@ -941,24 +944,24 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); call_memcpy(&ret_cx, get_param(self.llfn, 0), retslot, llsz, llalign as u32); - RetVoid(ret_cx, ret_debug_location) + ret_cx.ret_void() } (_, Some(cast_ty)) => { - let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to())); + let load = ret_cx.load(ret_cx.pointercast(retslot, cast_ty.ptr_to())); let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); unsafe { llvm::LLVMSetAlignment(load, llalign); } - Ret(ret_cx, load, ret_debug_location) + ret_cx.ret(load) } (_, None) => { let retval = if llty == Type::i1(self.ccx) { - let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False); - Trunc(ret_cx, val, llty) + let val = ret_cx.load_range_assert(retslot, 0, 2, llvm::False); + ret_cx.trunc(val, llty) } else { - Load(ret_cx, retslot) + ret_cx.load(retslot) }; - Ret(ret_cx, retval, ret_debug_location) + ret_cx.ret(retval) } } } @@ -1056,7 +1059,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, adt::trans_set_discr(&bcx, sig.output(), dest, disr); } - fcx.finish(&bcx, DebugLoc::None); + fcx.finish(&bcx); } pub fn llvm_linkage_by_name(name: &str) -> Option { diff --git a/src/librustc_trans/build.rs b/src/librustc_trans/build.rs deleted file mode 100644 index 16f362da29fce..0000000000000 --- a/src/librustc_trans/build.rs +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(dead_code)] // FFI wrappers -#![allow(non_snake_case)] - -use llvm; -use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{Opcode, IntPredicate, RealPredicate}; -use llvm::{ValueRef, BasicBlockRef}; -use common::*; -use syntax_pos::Span; - -use type_::Type; -use value::Value; -use debuginfo::DebugLoc; - -use libc::{c_uint, c_char}; - -pub fn RetVoid(cx: &BlockAndBuilder, debug_loc: DebugLoc) { - debug_loc.apply(cx.fcx()); - cx.ret_void(); -} - -pub fn Ret(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) { - debug_loc.apply(cx.fcx()); - cx.ret(v); -} - -pub fn AggregateRet(cx: &BlockAndBuilder, - ret_vals: &[ValueRef], - debug_loc: DebugLoc) { - debug_loc.apply(cx.fcx()); - cx.aggregate_ret(ret_vals); -} - -pub fn Br(cx: &BlockAndBuilder, dest: BasicBlockRef, debug_loc: DebugLoc) { - debug_loc.apply(cx.fcx()); - cx.br(dest); -} - -pub fn CondBr(cx: &BlockAndBuilder, - if_: ValueRef, - then: BasicBlockRef, - else_: BasicBlockRef, - debug_loc: DebugLoc) { - debug_loc.apply(cx.fcx()); - cx.cond_br(if_, then, else_); -} - -pub fn Switch(cx: &BlockAndBuilder, v: ValueRef, else_: BasicBlockRef, num_cases: usize) - -> ValueRef { - cx.switch(v, else_, num_cases) - } - -pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { - unsafe { - if llvm::LLVMIsUndef(s) == llvm::True { return; } - llvm::LLVMAddCase(s, on_val, dest); - } -} - -pub fn IndirectBr(cx: &BlockAndBuilder, - addr: ValueRef, - num_dests: usize, - debug_loc: DebugLoc) { - debug_loc.apply(cx.fcx()); - cx.indirect_br(addr, num_dests); -} - -pub fn Invoke(cx: &BlockAndBuilder, - fn_: ValueRef, - args: &[ValueRef], - then: BasicBlockRef, - catch: BasicBlockRef, - debug_loc: DebugLoc) - -> ValueRef { - debug!("Invoke({:?} with arguments ({}))", - Value(fn_), - args.iter().map(|a| { - format!("{:?}", Value(*a)) - }).collect::>().join(", ")); - debug_loc.apply(cx.fcx()); - let bundle = cx.lpad().and_then(|b| b.bundle()); - cx.invoke(fn_, args, then, catch, bundle) -} - -/* Arithmetic */ -pub fn Add(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.add(lhs, rhs) - } - -pub fn NSWAdd(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nswadd(lhs, rhs) - } - -pub fn NUWAdd(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nuwadd(lhs, rhs) - } - -pub fn FAdd(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fadd(lhs, rhs) - } - -pub fn FAddFast(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fadd_fast(lhs, rhs) - } - -pub fn Sub(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.sub(lhs, rhs) - } - -pub fn NSWSub(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nswsub(lhs, rhs) - } - -pub fn NUWSub(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nuwsub(lhs, rhs) - } - -pub fn FSub(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fsub(lhs, rhs) - } - -pub fn FSubFast(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fsub_fast(lhs, rhs) - } - -pub fn Mul(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.mul(lhs, rhs) - } - -pub fn NSWMul(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nswmul(lhs, rhs) - } - -pub fn NUWMul(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nuwmul(lhs, rhs) - } - -pub fn FMul(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fmul(lhs, rhs) - } - -pub fn FMulFast(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fmul_fast(lhs, rhs) - } - -pub fn UDiv(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.udiv(lhs, rhs) - } - -pub fn SDiv(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.sdiv(lhs, rhs) - } - -pub fn ExactSDiv(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.exactsdiv(lhs, rhs) - } - -pub fn FDiv(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fdiv(lhs, rhs) - } - -pub fn FDivFast(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fdiv_fast(lhs, rhs) - } - -pub fn URem(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.urem(lhs, rhs) - } - -pub fn SRem(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.srem(lhs, rhs) - } - -pub fn FRem(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.frem(lhs, rhs) - } - -pub fn FRemFast(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.frem_fast(lhs, rhs) - } - -pub fn Shl(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.shl(lhs, rhs) - } - -pub fn LShr(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.lshr(lhs, rhs) - } - -pub fn AShr(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.ashr(lhs, rhs) - } - -pub fn And(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.and(lhs, rhs) - } - -pub fn Or(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.or(lhs, rhs) - } - -pub fn Xor(cx: &BlockAndBuilder, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.xor(lhs, rhs) - } - -pub fn BinOp(cx: &BlockAndBuilder, - op: Opcode, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.binop(op, lhs, rhs) - } - -pub fn Neg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.neg(v) -} - -pub fn NSWNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nswneg(v) -} - -pub fn NUWNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.nuwneg(v) -} -pub fn FNeg(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fneg(v) -} - -pub fn Not(cx: &BlockAndBuilder, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.not(v) -} - -pub fn Alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { - AllocaFcx(cx.fcx(), ty, name) -} - -pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef { - let b = fcx.ccx.builder(); - b.position_before(fcx.alloca_insert_pt.get().unwrap()); - DebugLoc::None.apply(fcx); - b.alloca(ty, name) -} - -pub fn Free(cx: &BlockAndBuilder, pointer_val: ValueRef) { - cx.free(pointer_val) -} - -pub fn Load(cx: &BlockAndBuilder, pointer_val: ValueRef) -> ValueRef { - cx.load(pointer_val) -} - -pub fn VolatileLoad(cx: &BlockAndBuilder, pointer_val: ValueRef) -> ValueRef { - cx.volatile_load(pointer_val) -} - -pub fn AtomicLoad(cx: &BlockAndBuilder, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { - cx.atomic_load(pointer_val, order) -} - - -pub fn LoadRangeAssert(cx: &BlockAndBuilder, pointer_val: ValueRef, lo: u64, - hi: u64, signed: llvm::Bool) -> ValueRef { - cx.load_range_assert(pointer_val, lo, hi, signed) -} - -pub fn LoadNonNull(cx: &BlockAndBuilder, ptr: ValueRef) -> ValueRef { - cx.load_nonnull(ptr) -} - -pub fn Store(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef) -> ValueRef { - cx.store(val, ptr) -} - -pub fn VolatileStore(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef) -> ValueRef { - cx.volatile_store(val, ptr) -} - -pub fn AtomicStore(cx: &BlockAndBuilder, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { - cx.atomic_store(val, ptr, order) -} - -pub fn GEP(cx: &BlockAndBuilder, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - cx.gep(pointer, indices) -} - -// Simple wrapper around GEP that takes an array of ints and wraps them -// in C_i32() -#[inline] -pub fn GEPi(cx: &BlockAndBuilder, base: ValueRef, ixs: &[usize]) -> ValueRef { - cx.gepi(base, ixs) -} - -pub fn InBoundsGEP(cx: &BlockAndBuilder, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - cx.inbounds_gep(pointer, indices) -} - -pub fn StructGEP(cx: &BlockAndBuilder, pointer: ValueRef, idx: usize) -> ValueRef { - cx.struct_gep(pointer, idx) -} - -pub fn GlobalString(cx: &BlockAndBuilder, _str: *const c_char) -> ValueRef { - cx.global_string(_str) -} - -pub fn GlobalStringPtr(cx: &BlockAndBuilder, _str: *const c_char) -> ValueRef { - cx.global_string_ptr(_str) -} - -/* Casts */ -pub fn Trunc(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.trunc(val, dest_ty) -} - -pub fn ZExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.zext(val, dest_ty) -} - -pub fn SExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.sext(val, dest_ty) -} - -pub fn FPToUI(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.fptoui(val, dest_ty) -} - -pub fn FPToSI(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.fptosi(val, dest_ty) -} - -pub fn UIToFP(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.uitofp(val, dest_ty) -} - -pub fn SIToFP(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.sitofp(val, dest_ty) -} - -pub fn FPTrunc(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.fptrunc(val, dest_ty) -} - -pub fn FPExt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.fpext(val, dest_ty) -} - -pub fn PtrToInt(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.ptrtoint(val, dest_ty) -} - -pub fn IntToPtr(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.inttoptr(val, dest_ty) -} - -pub fn BitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.bitcast(val, dest_ty) -} - -pub fn ZExtOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.zext_or_bitcast(val, dest_ty) -} - -pub fn SExtOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.sext_or_bitcast(val, dest_ty) -} - -pub fn TruncOrBitCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.trunc_or_bitcast(val, dest_ty) -} - -pub fn Cast(cx: &BlockAndBuilder, op: Opcode, val: ValueRef, dest_ty: Type, - _: *const u8) - -> ValueRef { - cx.cast(op, val, dest_ty) - } - -pub fn PointerCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.pointercast(val, dest_ty) -} - -pub fn IntCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.intcast(val, dest_ty) -} - -pub fn FPCast(cx: &BlockAndBuilder, val: ValueRef, dest_ty: Type) -> ValueRef { - cx.fpcast(val, dest_ty) -} - - -/* Comparisons */ -pub fn ICmp(cx: &BlockAndBuilder, - op: IntPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.icmp(op, lhs, rhs) - } - -pub fn FCmp(cx: &BlockAndBuilder, - op: RealPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - cx.fcmp(op, lhs, rhs) - } - -/* Miscellaneous instructions */ -pub fn EmptyPhi(cx: &BlockAndBuilder, ty: Type) -> ValueRef { - cx.empty_phi(ty) -} - -pub fn Phi(cx: &BlockAndBuilder, ty: Type, vals: &[ValueRef], bbs: &[BasicBlockRef]) -> ValueRef { - cx.phi(ty, vals, bbs) -} - -pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { - unsafe { - if llvm::LLVMIsUndef(phi) == llvm::True { return; } - llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); - } -} - -pub fn add_span_comment(cx: &BlockAndBuilder, sp: Span, text: &str) { - cx.add_span_comment(sp, text) -} - -pub fn add_comment(cx: &BlockAndBuilder, text: &str) { - cx.add_comment(text) -} - -pub fn InlineAsmCall(cx: &BlockAndBuilder, asm: *const c_char, cons: *const c_char, - inputs: &[ValueRef], output: Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> ValueRef { - cx.inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) -} - -pub fn Call(cx: &BlockAndBuilder, - fn_: ValueRef, - args: &[ValueRef], - debug_loc: DebugLoc) - -> ValueRef { - debug_loc.apply(cx.fcx()); - let bundle = cx.lpad().and_then(|b| b.bundle()); - cx.call(fn_, args, bundle) - } - -pub fn AtomicFence(cx: &BlockAndBuilder, order: AtomicOrdering, scope: SynchronizationScope) { - cx.atomic_fence(order, scope) -} - -pub fn Select(cx: &BlockAndBuilder, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { - cx.select(if_, then, else_) -} - -pub fn VAArg(cx: &BlockAndBuilder, list: ValueRef, ty: Type) -> ValueRef { - cx.va_arg(list, ty) -} - -pub fn ExtractElement(cx: &BlockAndBuilder, vec_val: ValueRef, index: ValueRef) -> ValueRef { - cx.extract_element(vec_val, index) -} - -pub fn InsertElement(cx: &BlockAndBuilder, vec_val: ValueRef, elt_val: ValueRef, - index: ValueRef) -> ValueRef { - cx.insert_element(vec_val, elt_val, index) -} - -pub fn ShuffleVector(cx: &BlockAndBuilder, v1: ValueRef, v2: ValueRef, - mask: ValueRef) -> ValueRef { - cx.shuffle_vector(v1, v2, mask) -} - -pub fn VectorSplat(cx: &BlockAndBuilder, num_elts: usize, elt_val: ValueRef) -> ValueRef { - cx.vector_splat(num_elts, elt_val) -} - -pub fn ExtractValue(cx: &BlockAndBuilder, agg_val: ValueRef, index: usize) -> ValueRef { - cx.extract_value(agg_val, index) -} - -pub fn InsertValue(cx: &BlockAndBuilder, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef { - cx.insert_value(agg_val, elt_val, index) -} - -pub fn IsNull(cx: &BlockAndBuilder, val: ValueRef) -> ValueRef { - cx.is_null(val) -} - -pub fn IsNotNull(cx: &BlockAndBuilder, val: ValueRef) -> ValueRef { - cx.is_not_null(val) -} - -pub fn PtrDiff(cx: &BlockAndBuilder, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - cx.ptrdiff(lhs, rhs) -} - -pub fn Trap(cx: &BlockAndBuilder) { - cx.trap(); -} - -pub fn LandingPad(cx: &BlockAndBuilder, ty: Type, pers_fn: ValueRef, - num_clauses: usize) -> ValueRef { - cx.landing_pad(ty, pers_fn, num_clauses, cx.fcx().llfn) -} - -pub fn AddClause(cx: &BlockAndBuilder, landing_pad: ValueRef, clause: ValueRef) { - cx.add_clause(landing_pad, clause) -} - -pub fn SetCleanup(cx: &BlockAndBuilder, landing_pad: ValueRef) { - cx.set_cleanup(landing_pad) -} - -pub fn SetPersonalityFn(cx: &BlockAndBuilder, f: ValueRef) { - cx.set_personality_fn(f) -} - -// Atomic Operations -pub fn AtomicCmpXchg(cx: &BlockAndBuilder, dst: ValueRef, - cmp: ValueRef, src: ValueRef, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: llvm::Bool) -> ValueRef { - cx.atomic_cmpxchg(dst, cmp, src, order, failure_order, weak) -} -pub fn AtomicRMW(cx: &BlockAndBuilder, op: AtomicRmwBinOp, - dst: ValueRef, src: ValueRef, - order: AtomicOrdering) -> ValueRef { - cx.atomic_rmw(op, dst, src, order) -} - -pub fn CleanupPad(cx: &BlockAndBuilder, - parent: Option, - args: &[ValueRef]) -> ValueRef { - cx.cleanup_pad(parent, args) -} - -pub fn CleanupRet(cx: &BlockAndBuilder, - cleanup: ValueRef, - unwind: Option) -> ValueRef { - cx.cleanup_ret(cleanup, unwind) -} - -pub fn CatchPad(cx: &BlockAndBuilder, - parent: ValueRef, - args: &[ValueRef]) -> ValueRef { - cx.catch_pad(parent, args) -} - -pub fn CatchRet(cx: &BlockAndBuilder, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef { - cx.catch_ret(pad, unwind) -} - -pub fn CatchSwitch(cx: &BlockAndBuilder, - parent: Option, - unwind: Option, - num_handlers: usize) -> ValueRef { - cx.catch_switch(parent, unwind, num_handlers) -} - -pub fn AddHandler(cx: &BlockAndBuilder, catch_switch: ValueRef, handler: BasicBlockRef) { - cx.add_handler(catch_switch, handler) -} diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 0480bb82a998e..1f937ba6e8af5 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -1103,6 +1103,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn add_case(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { + unsafe { + if llvm::LLVMIsUndef(s) == llvm::True { return; } + llvm::LLVMAddCase(s, on_val, dest) + } + } + + pub fn add_incoming_to_phi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { + unsafe { + if llvm::LLVMIsUndef(phi) == llvm::True { return; } + llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + } + } + /// Returns the ptr value that should be used for storing `val`. fn check_store<'b>(&self, val: ValueRef, diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 7422c5f8805f9..6dd2c46ecec73 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -25,12 +25,10 @@ use abi::{Abi, FnType}; use attributes; use base; use base::*; -use build::*; use common::{ self, Block, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext }; use consts; -use debuginfo::DebugLoc; use declare; use value::Value; use meth; @@ -210,11 +208,10 @@ impl<'tcx> Callee<'tcx> { /// into memory somewhere. Nonetheless we return the actual return value of the /// function. pub fn call<'a, 'blk>(self, bcx: BlockAndBuilder<'blk, 'tcx>, - debug_loc: DebugLoc, args: &[ValueRef], dest: Option) -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) { - trans_call_inner(bcx, debug_loc, self, args, dest) + trans_call_inner(bcx, self, args, dest) } /// Turn the callee into a function pointer. @@ -414,11 +411,11 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let self_scope = fcx.push_custom_cleanup_scope(); fcx.schedule_drop_mem(self_scope, llenv, closure_ty); - let bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).0; + let bcx = callee.call(bcx, &llargs[self_idx..], dest).0; let bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); - fcx.finish(&bcx, DebugLoc::None); + fcx.finish(&bcx); ccx.instances().borrow_mut().insert(method_instance, lloncefn); @@ -531,7 +528,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfnpointer = llfnpointer.unwrap_or_else(|| { // the first argument (`self`) will be ptr to the fn pointer if is_by_ref { - Load(&bcx, llargs[self_idx]) + bcx.load(llargs[self_idx]) } else { llargs[self_idx] } @@ -543,8 +540,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>( data: Fn(llfnpointer), ty: bare_fn_ty }; - let bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).0; - fcx.finish(&bcx, DebugLoc::None); + let bcx = callee.call(bcx, &llargs[(self_idx + 1)..], dest).0; + fcx.finish(&bcx); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); @@ -654,7 +651,6 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Translating calls fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, - debug_loc: DebugLoc, callee: Callee<'tcx>, args: &[ValueRef], opt_llretslot: Option) @@ -689,7 +685,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, if fn_ty.ret.is_indirect() { let mut llretslot = opt_llretslot.unwrap(); if let Some(ty) = fn_ty.ret.cast { - llretslot = PointerCast(&bcx, llretslot, ty.ptr_to()); + llretslot = bcx.pointercast(llretslot, ty.ptr_to()); } llargs.push(llretslot); } @@ -700,7 +696,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx); let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to(); - callee = Fn(PointerCast(&bcx, fn_ptr, llty)); + callee = Fn(bcx.pointercast(fn_ptr, llty)); llargs.extend_from_slice(&args[2..]); } _ => llargs.extend_from_slice(args) @@ -711,7 +707,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, _ => bug!("expected fn pointer callee, found {:?}", callee) }; - let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); + let (llret, bcx) = base::invoke(bcx, llfn, &llargs); fn_ty.apply_attrs_callsite(llret); // If the function we just called does not use an outpointer, diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 3524d458e0105..651687286aeca 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -118,7 +118,6 @@ pub use self::EarlyExitLabel::*; use llvm::{BasicBlockRef, ValueRef}; use base; -use build; use common; use common::{BlockAndBuilder, FunctionContext, LandingPad}; use debuginfo::{DebugLoc}; @@ -344,7 +343,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let mut bcx = bcx; for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx, scope.debug_loc); + bcx = cleanup.trans(bcx); } bcx } @@ -422,13 +421,13 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { UnwindKind::LandingPad => { let addr = self.landingpad_alloca.get() .unwrap(); - let lp = build::Load(&bcx, addr); + let lp = bcx.load(addr); base::call_lifetime_end(&bcx, addr); base::trans_unwind_resume(&bcx, lp); } UnwindKind::CleanupPad(_) => { - let pad = build::CleanupPad(&bcx, None, &[]); - build::CleanupRet(&bcx, pad, None); + let pad = bcx.cleanup_pad(None, &[]); + bcx.cleanup_ret(pad, None); } } prev_llbb = bcx.llbb(); @@ -488,7 +487,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let mut bcx_out = bcx_in; let len = scope.cleanups.len(); for cleanup in scope.cleanups.iter().rev().take(len - skip) { - bcx_out = cleanup.trans(bcx_out, scope.debug_loc); + bcx_out = cleanup.trans(bcx_out); } skip = 0; exit_label.branch(&bcx_out, prev_llbb); @@ -540,8 +539,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // creation of the landingpad instruction). We then create a // cleanuppad instruction which has no filters to run cleanup on all // exceptions. - build::SetPersonalityFn(&pad_bcx, llpersonality); - let llretval = build::CleanupPad(&pad_bcx, None, &[]); + pad_bcx.set_personality_fn(llpersonality); + let llretval = pad_bcx.cleanup_pad(None, &[]); UnwindKind::CleanupPad(llretval) } else { // The landing pad return type (the type being propagated). Not sure @@ -552,10 +551,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { false); // The only landing pad clause will be 'cleanup' - let llretval = build::LandingPad(&pad_bcx, llretty, llpersonality, 1); + let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn); // The landing pad block is a cleanup - build::SetCleanup(&pad_bcx, llretval); + pad_bcx.set_cleanup(llretval); let addr = match self.landingpad_alloca.get() { Some(addr) => addr, @@ -567,7 +566,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { addr } }; - build::Store(&pad_bcx, llretval, addr); + pad_bcx.store(llretval, addr); UnwindKind::LandingPad }; @@ -629,9 +628,9 @@ impl EarlyExitLabel { /// the `cleanupret` instruction instead of the `br` instruction. fn branch(&self, from_bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self { - build::CleanupRet(from_bcx, pad, Some(to_llbb)); + from_bcx.cleanup_ret(pad, Some(to_llbb)); } else { - build::Br(from_bcx, to_llbb, DebugLoc::None); + from_bcx.br(to_llbb); } } @@ -649,7 +648,7 @@ impl EarlyExitLabel { fn start(&self, bcx: &BlockAndBuilder) -> EarlyExitLabel { match *self { UnwindExit(UnwindKind::CleanupPad(..)) => { - let pad = build::CleanupPad(bcx, None, &[]); + let pad = bcx.cleanup_pad(None, &[]); bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::msvc(pad)))); UnwindExit(UnwindKind::CleanupPad(pad)) } @@ -683,10 +682,7 @@ pub struct DropValue<'tcx> { } impl<'tcx> DropValue<'tcx> { - fn trans<'blk>(&self, - bcx: BlockAndBuilder<'blk, 'tcx>, - debug_loc: DebugLoc) - -> BlockAndBuilder<'blk, 'tcx> { + fn trans<'blk>(&self, bcx: BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx> { let skip_dtor = self.skip_dtor; let _icx = if skip_dtor { base::push_ctxt("::trans skip_dtor=true") @@ -694,9 +690,9 @@ impl<'tcx> DropValue<'tcx> { base::push_ctxt("::trans skip_dtor=false") }; if self.is_immediate { - glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) + glue::drop_ty_immediate(bcx, self.val, self.ty, self.skip_dtor) } else { - glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) + glue::drop_ty_core(bcx, self.val, self.ty, self.skip_dtor) } } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 84d30b6b97a42..32f437fea5220 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -26,12 +26,11 @@ use middle::lang_items::LangItem; use rustc::ty::subst::Substs; use abi::{Abi, FnType}; use base; -use build; use builder::Builder; use callee::Callee; use cleanup; use consts; -use debuginfo::{self, DebugLoc}; +use debuginfo; use declare; use machine; use monomorphize; @@ -434,6 +433,12 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { unwresume.set(Some(llfn)); Callee::ptr(llfn, ty) } + + pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + let b = self.ccx.builder(); + b.position_before(self.alloca_insert_pt.get().unwrap()); + b.alloca(ty, name) + } } // Basic block context. We create a block context for each basic block @@ -998,35 +1003,32 @@ pub fn langcall(tcx: TyCtxt, pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { + rhs: ValueRef) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs); // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); - build::Shl(bcx, lhs, rhs, binop_debug_loc) + let rhs = shift_mask_rhs(bcx, rhs); + bcx.shl(lhs, rhs) } pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { + rhs: ValueRef) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); + let rhs = shift_mask_rhs(bcx, rhs); let is_signed = lhs_t.is_signed(); if is_signed { - build::AShr(bcx, lhs, rhs, binop_debug_loc) + bcx.ashr(lhs, rhs) } else { - build::LShr(bcx, lhs, rhs, binop_debug_loc) + bcx.lshr(lhs, rhs) } } fn shift_mask_rhs<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - rhs: ValueRef, - debug_loc: DebugLoc) -> ValueRef { + rhs: ValueRef) -> ValueRef { let rhs_llty = val_ty(rhs); - build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc) + bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false)) } pub fn shift_mask_val<'blk, 'tcx>( @@ -1048,7 +1050,7 @@ pub fn shift_mask_val<'blk, 'tcx>( }, TypeKind::Vector => { let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert); - build::VectorSplat(bcx, mask_llty.vector_length(), mask) + bcx.vector_splat(mask_llty.vector_length(), mask) }, _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 7044f8d583520..10c38af8a7396 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -22,10 +22,9 @@ use rustc::traits; use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; use adt; use base::*; -use build::*; use callee::{Callee}; +use builder::Builder; use common::*; -use debuginfo::DebugLoc; use machine::*; use monomorphize; use trans_item::TransItem; @@ -41,35 +40,28 @@ use syntax_pos::DUMMY_SP; pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, size: ValueRef, - align: ValueRef, - debug_loc: DebugLoc) + align: ValueRef) -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("trans_exchange_free"); let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); - let args = [PointerCast(&bcx, v, Type::i8p(bcx.ccx())), size, align]; + let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align]; Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .call(bcx, debug_loc, &args, None).0 + .call(bcx, &args, None).0 } pub fn trans_exchange_free<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, size: u64, - align: u32, - debug_loc: DebugLoc) + align: u32) -> BlockAndBuilder<'blk, 'tcx> { let ccx = cx.ccx(); - trans_exchange_free_dyn(cx, - v, - C_uint(ccx, size), - C_uint(ccx, align), - debug_loc) + trans_exchange_free_dyn(cx, v, C_uint(ccx, size), C_uint(ccx, align)) } pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, - content_ty: Ty<'tcx>, - debug_loc: DebugLoc) + content_ty: Ty<'tcx>) -> BlockAndBuilder<'blk, 'tcx> { assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); let sizing_type = sizing_type_of(bcx.ccx(), content_ty); @@ -78,7 +70,7 @@ pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, // `Box` does not allocate. if content_size != 0 { let content_align = align_of(bcx.ccx(), content_ty); - trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc) + trans_exchange_free(bcx, ptr, content_size, content_align) } else { bcx } @@ -132,15 +124,13 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pub fn drop_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc) -> BlockAndBuilder<'blk, 'tcx> { - drop_ty_core(bcx, v, t, debug_loc, false) + t: Ty<'tcx>) -> BlockAndBuilder<'blk, 'tcx> { + drop_ty_core(bcx, v, t, false) } pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, - debug_loc: DebugLoc, skip_dtor: bool) -> BlockAndBuilder<'blk, 'tcx> { // NB: v is an *alias* of type t here, not a direct value. @@ -156,13 +146,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, let glue = get_drop_glue_core(ccx, g); let glue_type = get_drop_glue_type(ccx.tcx(), t); let ptr = if glue_type != t { - PointerCast(&bcx, v, type_of(ccx, glue_type).ptr_to()) + bcx.pointercast(v, type_of(ccx, glue_type).ptr_to()) } else { v }; // No drop-hint ==> call standard drop glue - Call(&bcx, glue, &[ptr], debug_loc); + bcx.call(glue, &[ptr], bcx.lpad().and_then(|b| b.bundle())); } bcx } @@ -170,14 +160,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, pub fn drop_ty_immediate<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, - debug_loc: DebugLoc, skip_dtor: bool) -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("drop_ty_immediate"); let vp = alloc_ty(&bcx, t, ""); call_lifetime_start(&bcx, vp); store_ty(&bcx, v, vp, t); - let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor); + let bcx = drop_ty_core(bcx, vp, t, skip_dtor); call_lifetime_end(&bcx, vp); bcx } @@ -249,7 +238,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // type, so we don't need to explicitly cast the function parameter. let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); - fcx.finish(&bcx, DebugLoc::None); + fcx.finish(&bcx); } fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, @@ -285,8 +274,8 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, } else { // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments unsized_args = [ - Load(&bcx, get_dataptr(&bcx, v0)), - Load(&bcx, get_meta(&bcx, v0)) + bcx.load(get_dataptr(&bcx, v0)), + bcx.load(get_meta(&bcx, v0)) ]; &unsized_args }; @@ -301,7 +290,7 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, }; let dtor_did = def.destructor().unwrap(); bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) - .call(bcx, DebugLoc::None, args, None).0; + .call(bcx, args, None).0; bcx.fcx().pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } @@ -436,29 +425,27 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, assert!(!skip_dtor); if !type_is_sized(bcx.tcx(), content_ty) { let llval = get_dataptr(&bcx, v0); - let llbox = Load(&bcx, llval); - let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); + let llbox = bcx.load(llval); + let bcx = drop_ty(bcx, v0, content_ty); // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments let info = get_meta(&bcx, v0); - let info = Load(&bcx, info); + let info = bcx.load(info); let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info); // `Box` does not allocate. - let needs_free = ICmp( - &bcx, + let needs_free = bcx.icmp( llvm::IntNE, llsize, C_uint(bcx.ccx(), 0u64), - DebugLoc::None ); with_cond(bcx, needs_free, |bcx| { - trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) + trans_exchange_free_dyn(bcx, llbox, llsize, llalign) }) } else { let llval = v0; - let llbox = Load(&bcx, llval); - let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); - trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) + let llbox = bcx.load(llval); + let bcx = drop_ty(bcx, llbox, content_ty); + trans_exchange_free_ty(bcx, llbox, content_ty) } } ty::TyDynamic(..) => { @@ -468,12 +455,11 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments assert!(!skip_dtor); let data_ptr = get_dataptr(&bcx, v0); - let vtable_ptr = Load(&bcx, get_meta(&bcx, v0)); - let dtor = Load(&bcx, vtable_ptr); - Call(&bcx, - dtor, - &[PointerCast(&bcx, Load(&bcx, data_ptr), Type::i8p(bcx.ccx()))], - DebugLoc::None); + let vtable_ptr = bcx.load(get_meta(&bcx, v0)); + let dtor = bcx.load(vtable_ptr); + bcx.call(dtor, + &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx()))], + bcx.lpad().and_then(|b| b.bundle())); bcx } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { @@ -512,7 +498,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); - cx = drop_ty(cx, field_ptr, arg, DebugLoc::None); + cx = drop_ty(cx, field_ptr, arg); } return cx; } @@ -521,8 +507,8 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, adt::MaybeSizedValue::sized(av) } else { // FIXME(#36457) -- we should pass unsized values as two arguments - let data = Load(&cx, get_dataptr(&cx, av)); - let info = Load(&cx, get_meta(&cx, av)); + let data = cx.load(get_dataptr(&cx, av)); + let info = cx.load(get_meta(&cx, av)); adt::MaybeSizedValue::unsized_(data, info) }; @@ -531,7 +517,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i); - cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); + cx = drop_ty(cx, llupvar, upvar_ty); } } ty::TyArray(_, n) => { @@ -539,17 +525,17 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, let len = C_uint(cx.ccx(), n); let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(cx, base, unit_ty, len, - |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + |bb, vv| drop_ty(bb, vv, unit_ty)); } ty::TySlice(_) | ty::TyStr => { let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta, - |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + |bb, vv| drop_ty(bb, vv, unit_ty)); } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i); - cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); + cx = drop_ty(cx, llfld_a, *arg); } } ty::TyAdt(adt, substs) => match adt.adt_kind() { @@ -563,11 +549,11 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, } else { // FIXME(#36457) -- we should pass unsized values as two arguments let scratch = alloc_ty(&cx, field_ty, "__fat_ptr_iter"); - Store(&cx, llfld_a, get_dataptr(&cx, scratch)); - Store(&cx, value.meta, get_meta(&cx, scratch)); + cx.store(llfld_a, get_dataptr(&cx, scratch)); + cx.store(value.meta, get_meta(&cx, scratch)); scratch }; - cx = drop_ty(cx, val, field_ty, DebugLoc::None); + cx = drop_ty(cx, val, field_ty); } } AdtKind::Union => { @@ -591,7 +577,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { let tcx = cx.tcx(); - cx = drop_ty(cx, lldiscrim_a, tcx.types.isize, DebugLoc::None); + cx = drop_ty(cx, lldiscrim_a, tcx.types.isize); // Create a fall-through basic block for the "else" case of // the switch instruction we're about to generate. Note that @@ -607,8 +593,8 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, // call this for an already-valid enum in which case the `ret // void` will never be hit. let ret_void_cx = fcx.new_block("enum-iter-ret-void").build(); - RetVoid(&ret_void_cx, DebugLoc::None); - let llswitch = Switch(&cx, lldiscrim_a, ret_void_cx.llbb(), n_variants); + ret_void_cx.ret_void(); + let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); let next_cx = fcx.new_block("enum-iter-next").build(); for variant in &adt.variants { @@ -616,9 +602,9 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, &variant.disr_val.to_string()); let variant_cx = fcx.new_block(&variant_cx_name).build(); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); - AddCase(llswitch, case_val, variant_cx.llbb()); + Builder::add_case(llswitch, case_val, variant_cx.llbb()); let variant_cx = iter_variant(variant_cx, t, value, variant, substs); - Br(&variant_cx, next_cx.llbb(), DebugLoc::None); + variant_cx.br(next_cx.llbb()); } cx = next_cx; } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 74af7c4e3a7bd..90f8c64a2cf8b 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -18,7 +18,6 @@ use llvm::{ValueRef}; use abi::{Abi, FnType}; use adt; use base::*; -use build::*; use common::*; use debuginfo::DebugLoc; use declare; @@ -120,7 +119,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // These are the only intrinsic functions that diverge. if name == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); - Call(bcx, llfn, &[], call_debug_location); + bcx.call(llfn, &[], bcx.lpad().and_then(|b| b.bundle())); return; } else if name == "unreachable" { // FIXME: do nothing? @@ -132,23 +131,23 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let simple = get_simple_intrinsic(ccx, name); let llval = match (simple, name) { (Some(llfn), _) => { - Call(bcx, llfn, &llargs, call_debug_location) + bcx.call(llfn, &llargs, bcx.lpad().and_then(|b| b.bundle())) } (_, "likely") => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - Call(bcx, expect, &[llargs[0], C_bool(ccx, true)], call_debug_location) + bcx.call(expect, &[llargs[0], C_bool(ccx, true)], bcx.lpad().and_then(|b| b.bundle())) } (_, "unlikely") => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location) + bcx.call(expect, &[llargs[0], C_bool(ccx, false)], bcx.lpad().and_then(|b| b.bundle())) } (_, "try") => { - try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, call_debug_location); + try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); C_nil(ccx) } (_, "breakpoint") => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); - Call(bcx, llfn, &[], call_debug_location) + bcx.call(llfn, &[], bcx.lpad().and_then(|b| b.bundle())) } (_, "size_of") => { let tp_ty = substs.type_at(0); @@ -213,12 +212,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, (_, "offset") => { let ptr = llargs[0]; let offset = llargs[1]; - InBoundsGEP(bcx, ptr, &[offset]) + bcx.inbounds_gep(ptr, &[offset]) } (_, "arith_offset") => { let ptr = llargs[0]; let offset = llargs[1]; - GEP(bcx, ptr, &[offset]) + bcx.gep(ptr, &[offset]) } (_, "copy_nonoverlapping") => { @@ -228,8 +227,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, substs.type_at(0), llargs[1], llargs[0], - llargs[2], - call_debug_location) + llargs[2]) } (_, "copy") => { copy_intrinsic(bcx, @@ -238,8 +236,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, substs.type_at(0), llargs[1], llargs[0], - llargs[2], - call_debug_location) + llargs[2]) } (_, "write_bytes") => { memset_intrinsic(bcx, @@ -247,8 +244,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, substs.type_at(0), llargs[0], llargs[1], - llargs[2], - call_debug_location) + llargs[2]) } (_, "volatile_copy_nonoverlapping_memory") => { @@ -258,8 +254,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, substs.type_at(0), llargs[0], llargs[1], - llargs[2], - call_debug_location) + llargs[2]) } (_, "volatile_copy_memory") => { copy_intrinsic(bcx, @@ -268,8 +263,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, substs.type_at(0), llargs[0], llargs[1], - llargs[2], - call_debug_location) + llargs[2]) } (_, "volatile_set_memory") => { memset_intrinsic(bcx, @@ -277,16 +271,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, substs.type_at(0), llargs[0], llargs[1], - llargs[2], - call_debug_location) + llargs[2]) } (_, "volatile_load") => { let tp_ty = substs.type_at(0); let mut ptr = llargs[0]; if let Some(ty) = fn_ty.ret.cast { - ptr = PointerCast(bcx, ptr, ty.ptr_to()); + ptr = bcx.pointercast(ptr, ty.ptr_to()); } - let load = VolatileLoad(bcx, ptr); + let load = bcx.volatile_load(ptr); unsafe { llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); } @@ -295,16 +288,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, (_, "volatile_store") => { let tp_ty = substs.type_at(0); if type_is_fat_ptr(bcx.tcx(), tp_ty) { - VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0])); - VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0])); + bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0])); + bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0])); } else { let val = if fn_ty.args[1].is_indirect() { - Load(bcx, llargs[1]) + bcx.load(llargs[1]) } else { from_immediate(bcx, llargs[1]) }; - let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to()); - let store = VolatileStore(bcx, val, ptr); + let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); + let store = bcx.volatile_store(val, ptr); unsafe { llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); } @@ -321,40 +314,39 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, Some((width, signed)) => match name { "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width), - llargs[0], call_debug_location), + llargs[0]), "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), - llargs[0], call_debug_location), - "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &llargs, call_debug_location), + llargs[0]), + "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &llargs, bcx.lpad().and_then(|b| b.bundle())), "bswap" => { if width == 8 { llargs[0] // byte swap a u8/i8 is just a no-op } else { - Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &llargs, call_debug_location) + bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), + &llargs, bcx.lpad().and_then(|b| b.bundle())) } } "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { let intrinsic = format!("llvm.{}{}.with.overflow.i{}", if signed { 's' } else { 'u' }, &name[..3], width); - with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult, - call_debug_location) + with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult) }, - "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location), - "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location), - "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location), + "overflowing_add" => bcx.add(llargs[0], llargs[1]), + "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), + "overflowing_mul" => bcx.mul(llargs[0], llargs[1]), "unchecked_div" => if signed { - SDiv(bcx, llargs[0], llargs[1], call_debug_location) + bcx.sdiv(llargs[0], llargs[1]) } else { - UDiv(bcx, llargs[0], llargs[1], call_debug_location) + bcx.udiv(llargs[0], llargs[1]) }, "unchecked_rem" => if signed { - SRem(bcx, llargs[0], llargs[1], call_debug_location) + bcx.srem(llargs[0], llargs[1]) } else { - URem(bcx, llargs[0], llargs[1], call_debug_location) + bcx.urem(llargs[0], llargs[1]) }, _ => bug!(), }, @@ -374,11 +366,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, match float_type_width(sty) { Some(_width) => match name { - "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location), - "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location), - "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location), - "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location), - "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location), + "fadd_fast" => bcx.fadd_fast(llargs[0], llargs[1]), + "fsub_fast" => bcx.fsub_fast(llargs[0], llargs[1]), + "fmul_fast" => bcx.fmul_fast(llargs[0], llargs[1]), + "fdiv_fast" => bcx.fdiv_fast(llargs[0], llargs[1]), + "frem_fast" => bcx.frem_fast(llargs[0], llargs[1]), _ => bug!(), }, None => { @@ -407,7 +399,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, callee_ty, &llargs, ret_ty, llret_ty, - call_debug_location, span) } // This requires that atomic intrinsics follow a specific naming pattern: @@ -447,12 +438,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2], - order, failorder, weak); - let result = ExtractValue(bcx, val, 0); - let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); - Store(bcx, result, StructGEP(bcx, llresult, 0)); - Store(bcx, success, StructGEP(bcx, llresult, 1)); + let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, + failorder, weak); + let result = bcx.extract_value(val, 0); + let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx())); + bcx.store(result, bcx.struct_gep(llresult, 0)); + bcx.store(success, bcx.struct_gep(llresult, 1)); } else { span_invalid_monomorphization_error( tcx.sess, span, @@ -465,7 +456,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, "load" => { let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { - AtomicLoad(bcx, llargs[0], order) + bcx.atomic_load(llargs[0], order) } else { span_invalid_monomorphization_error( tcx.sess, span, @@ -478,7 +469,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, "store" => { let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { - AtomicStore(bcx, llargs[1], llargs[0], order); + bcx.atomic_store(llargs[1], llargs[0], order); } else { span_invalid_monomorphization_error( tcx.sess, span, @@ -489,12 +480,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } "fence" => { - AtomicFence(bcx, order, llvm::SynchronizationScope::CrossThread); + bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); C_nil(ccx) } "singlethreadfence" => { - AtomicFence(bcx, order, llvm::SynchronizationScope::SingleThread); + bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); C_nil(ccx) } @@ -517,7 +508,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { - AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order) + bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order) } else { span_invalid_monomorphization_error( tcx.sess, span, @@ -609,25 +600,24 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let arg = adt::MaybeSizedValue::sized(llarg); (0..contents.len()) .map(|i| { - Load(bcx, adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) + bcx.load(adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) }) .collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); - vec![PointerCast(bcx, llarg, + vec![bcx.pointercast(llarg, llvm_elem.ptr_to())] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); - vec![BitCast(bcx, llarg, - Type::vector(&llvm_elem, length as u64))] + vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))] + vec![bcx.trunc(llarg, Type::ix(bcx.ccx(), llvm_width as u64))] } _ => vec![llarg], } @@ -664,7 +654,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let f = declare::declare_cfn(ccx, name, Type::func(&inputs, &outputs)); - Call(bcx, f, &llargs, call_debug_location) + bcx.call(f, &llargs, bcx.lpad().and_then(|b| b.bundle())) } }; @@ -674,8 +664,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, assert!(!flatten); for i in 0..elems.len() { - let val = ExtractValue(bcx, val, i); - Store(bcx, val, StructGEP(bcx, llresult, i)); + let val = bcx.extract_value(val, i); + bcx.store(val, bcx.struct_gep(llresult, i)); } C_nil(ccx) } @@ -687,8 +677,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { if let Some(ty) = fn_ty.ret.cast { - let ptr = PointerCast(bcx, llresult, ty.ptr_to()); - let store = Store(bcx, llval, ptr); + let ptr = bcx.pointercast(llresult, ty.ptr_to()); + let store = bcx.store(llval, ptr); unsafe { llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty)); } @@ -704,8 +694,7 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, tp_ty: Ty<'tcx>, dst: ValueRef, src: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) + count: ValueRef) -> ValueRef { let ccx = bcx.ccx(); let lltp_ty = type_of::type_of(ccx, tp_ty); @@ -721,18 +710,17 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size); - let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); - let src_ptr = PointerCast(bcx, src, Type::i8p(ccx)); + let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx)); + let src_ptr = bcx.pointercast(src, Type::i8p(ccx)); let llfn = ccx.get_intrinsic(&name); - Call(bcx, - llfn, - &[dst_ptr, - src_ptr, - Mul(bcx, size, count, DebugLoc::None), - align, - C_bool(ccx, volatile)], - call_debug_location) + bcx.call(llfn, + &[dst_ptr, + src_ptr, + bcx.mul(size, count), + align, + C_bool(ccx, volatile)], + bcx.lpad().and_then(|b| b.bundle())) } fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, @@ -740,8 +728,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, tp_ty: Ty<'tcx>, dst: ValueRef, val: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) + count: ValueRef) -> ValueRef { let ccx = bcx.ccx(); let lltp_ty = type_of::type_of(ccx, tp_ty); @@ -751,44 +738,42 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let name = format!("llvm.memset.p0i8.i{}", int_size); - let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); + let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx)); let llfn = ccx.get_intrinsic(&name); - Call(bcx, - llfn, - &[dst_ptr, - val, - Mul(bcx, size, count, DebugLoc::None), - align, - C_bool(ccx, volatile)], - call_debug_location) + bcx.call( + llfn, + &[dst_ptr, + val, + bcx.mul(size, count), + align, + C_bool(ccx, volatile)], + bcx.lpad().and_then(|b| b.bundle())) } fn count_zeros_intrinsic(bcx: &BlockAndBuilder, name: &str, - val: ValueRef, - call_debug_location: DebugLoc) + val: ValueRef) -> ValueRef { let y = C_bool(bcx.ccx(), false); let llfn = bcx.ccx().get_intrinsic(&name); - Call(bcx, llfn, &[val, y], call_debug_location) + bcx.call(llfn, &[val, y], bcx.lpad().and_then(|b| b.bundle())) } fn with_overflow_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, name: &str, a: ValueRef, b: ValueRef, - out: ValueRef, - call_debug_location: DebugLoc) + out: ValueRef) -> ValueRef { let llfn = bcx.ccx().get_intrinsic(&name); // Convert `i1` to a `bool`, and write it to the out parameter - let val = Call(bcx, llfn, &[a, b], call_debug_location); - let result = ExtractValue(bcx, val, 0); - let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); - Store(bcx, result, StructGEP(bcx, out, 0)); - Store(bcx, overflow, StructGEP(bcx, out, 1)); + let val = bcx.call(llfn, &[a, b], bcx.lpad().and_then(|b| b.bundle())); + let result = bcx.extract_value(val, 0); + let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx())); + bcx.store(result, bcx.struct_gep(out, 0)); + bcx.store(overflow, bcx.struct_gep(out, 1)); C_nil(bcx.ccx()) } @@ -799,15 +784,14 @@ fn try_intrinsic<'blk, 'tcx>( data: ValueRef, local_ptr: ValueRef, dest: ValueRef, - dloc: DebugLoc ) { if bcx.sess().no_landing_pads() { - Call(bcx, func, &[data], dloc); - Store(bcx, C_null(Type::i8p(&bcx.ccx())), dest); + bcx.call(func, &[data], bcx.lpad().and_then(|b| b.bundle())); + bcx.store(C_null(Type::i8p(&bcx.ccx())), dest); } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, func, data, local_ptr, dest, dloc); + trans_msvc_try(bcx, func, data, local_ptr, dest); } else { - trans_gnu_try(bcx, func, data, local_ptr, dest, dloc); + trans_gnu_try(bcx, func, data, local_ptr, dest); } } @@ -822,13 +806,11 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, func: ValueRef, data: ValueRef, local_ptr: ValueRef, - dest: ValueRef, - dloc: DebugLoc) { + dest: ValueRef) { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let ccx = bcx.ccx(); - let dloc = DebugLoc::None; - SetPersonalityFn(&bcx, bcx.fcx().eh_personality()); + bcx.set_personality_fn(bcx.fcx().eh_personality()); let normal = bcx.fcx().new_block("normal").build(); let catchswitch = bcx.fcx().new_block("catchswitch").build(); @@ -879,36 +861,37 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = Alloca(&bcx, i64p, "slot"); - Invoke(&bcx, func, &[data], normal.llbb(), catchswitch.llbb(), dloc); + let slot = bcx.fcx().alloca(i64p, "slot"); + bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), + bcx.lpad().and_then(|b| b.bundle())); - Ret(&normal, C_i32(ccx, 0), dloc); + normal.ret(C_i32(ccx, 0)); - let cs = CatchSwitch(&catchswitch, None, None, 1); - AddHandler(&catchswitch, cs, catchpad.llbb()); + let cs = catchswitch.catch_switch(None, None, 1); + catchswitch.add_handler(cs, catchpad.llbb()); let tcx = ccx.tcx(); let tydesc = match tcx.lang_items.msvc_try_filter() { Some(did) => ::consts::get_static(ccx, did), None => bug!("msvc_try_filter not defined"), }; - let tok = CatchPad(&catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); - let addr = Load(&catchpad, slot); - let arg1 = Load(&catchpad, addr); + let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]); + let addr = catchpad.load(slot); + let arg1 = catchpad.load(addr); let val1 = C_i32(ccx, 1); - let arg2 = Load(&catchpad, InBoundsGEP(&catchpad, addr, &[val1])); - let local_ptr = BitCast(&catchpad, local_ptr, i64p); - Store(&catchpad, arg1, local_ptr); - Store(&catchpad, arg2, InBoundsGEP(&catchpad, local_ptr, &[val1])); - CatchRet(&catchpad, tok, caught.llbb()); + let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1])); + let local_ptr = catchpad.bitcast(local_ptr, i64p); + catchpad.store(arg1, local_ptr); + catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1])); + catchpad.catch_ret(tok, caught.llbb()); - Ret(&caught, C_i32(ccx, 1), dloc); + caught.ret(C_i32(ccx, 1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); - Store(bcx, ret, dest); + let ret = bcx.call(llfn, &[func, data, local_ptr], bcx.lpad().and_then(|b| b.bundle())); + bcx.store(ret, dest); } // Definition of the standard "try" function for Rust using the GNU-like model @@ -926,11 +909,9 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, func: ValueRef, data: ValueRef, local_ptr: ValueRef, - dest: ValueRef, - dloc: DebugLoc) { + dest: ValueRef) { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let ccx = bcx.ccx(); - let dloc = DebugLoc::None; // Translates the shims described above: // @@ -955,8 +936,8 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let func = llvm::get_param(bcx.fcx().llfn, 0); let data = llvm::get_param(bcx.fcx().llfn, 1); let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); - Invoke(&bcx, func, &[data], then.llbb(), catch.llbb(), dloc); - Ret(&then, C_i32(ccx, 0), dloc); + bcx.invoke(func, &[data], then.llbb(), catch.llbb(), bcx.lpad().and_then(|b| b.bundle())); + then.ret(C_i32(ccx, 0)); // Type indicator for the exception being thrown. // @@ -966,17 +947,17 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // rust_try ignores the selector. let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let vals = LandingPad(&catch, lpad_ty, bcx.fcx().eh_personality(), 1); - AddClause(&catch, vals, C_null(Type::i8p(ccx))); - let ptr = ExtractValue(&catch, vals, 0); - Store(&catch, ptr, BitCast(&catch, local_ptr, Type::i8p(ccx).ptr_to())); - Ret(&catch, C_i32(ccx, 1), dloc); + let vals = catch.landing_pad(lpad_ty, bcx.fcx().eh_personality(), 1, catch.fcx().llfn); + catch.add_clause(vals, C_null(Type::i8p(ccx))); + let ptr = catch.extract_value(vals, 0); + catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to())); + catch.ret(C_i32(ccx, 1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); - Store(bcx, ret, dest); + let ret = bcx.call(llfn, &[func, data, local_ptr], bcx.lpad().and_then(|b| b.bundle())); + bcx.store(ret, dest); } // Helper function to give a Block to a closure to translate a shim function. @@ -1042,7 +1023,6 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>( llargs: &[ValueRef], ret_ty: Ty<'tcx>, llret_ty: Type, - call_debug_location: DebugLoc, span: Span ) -> ValueRef { // macros for error handling: @@ -1113,8 +1093,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>( llargs[1], in_elem, llret_ty, - cmp_op, - call_debug_location) + cmp_op) } if name.starts_with("simd_shuffle") { @@ -1163,20 +1142,20 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>( None => return C_null(llret_ty) }; - return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices)) + return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices)) } if name == "simd_insert" { require!(in_elem == arg_tys[2], "expected inserted type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, arg_tys[2]); - return InsertElement(bcx, llargs[0], llargs[2], llargs[1]) + return bcx.insert_element(llargs[0], llargs[2], llargs[1]) } if name == "simd_extract" { require!(ret_ty == in_elem, "expected return type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, ret_ty); - return ExtractElement(bcx, llargs[0], llargs[1]) + return bcx.extract_element(llargs[0], llargs[1]) } if name == "simd_cast" { @@ -1212,34 +1191,34 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>( match (in_style, out_style) { (Style::Int(in_is_signed), Style::Int(_)) => { return match in_width.cmp(&out_width) { - Ordering::Greater => Trunc(bcx, llargs[0], llret_ty), + Ordering::Greater => bcx.trunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], Ordering::Less => if in_is_signed { - SExt(bcx, llargs[0], llret_ty) + bcx.sext(llargs[0], llret_ty) } else { - ZExt(bcx, llargs[0], llret_ty) + bcx.zext(llargs[0], llret_ty) } } } (Style::Int(in_is_signed), Style::Float) => { return if in_is_signed { - SIToFP(bcx, llargs[0], llret_ty) + bcx.sitofp(llargs[0], llret_ty) } else { - UIToFP(bcx, llargs[0], llret_ty) + bcx.uitofp(llargs[0], llret_ty) } } (Style::Float, Style::Int(out_is_signed)) => { return if out_is_signed { - FPToSI(bcx, llargs[0], llret_ty) + bcx.fptosi(llargs[0], llret_ty) } else { - FPToUI(bcx, llargs[0], llret_ty) + bcx.fptoui(llargs[0], llret_ty) } } (Style::Float, Style::Float) => { return match in_width.cmp(&out_width) { - Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty), + Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], - Ordering::Less => FPExt(bcx, llargs[0], llret_ty) + Ordering::Less => bcx.fpext(llargs[0], llret_ty) } } _ => {/* Unsupported. Fallthrough. */} @@ -1250,13 +1229,13 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>( ret_ty, out_elem); } macro_rules! arith { - ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => { + ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { $( if name == stringify!($name) { match in_elem.sty { $( $(ty::$p(_))|* => { - return $call(bcx, llargs[0], llargs[1], call_debug_location) + return bcx.$call(llargs[0], llargs[1]) } )* _ => {}, @@ -1269,15 +1248,15 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>( } } arith! { - simd_add: TyUint, TyInt => Add, TyFloat => FAdd; - simd_sub: TyUint, TyInt => Sub, TyFloat => FSub; - simd_mul: TyUint, TyInt => Mul, TyFloat => FMul; - simd_div: TyFloat => FDiv; - simd_shl: TyUint, TyInt => Shl; - simd_shr: TyUint => LShr, TyInt => AShr; - simd_and: TyUint, TyInt => And; - simd_or: TyUint, TyInt => Or; - simd_xor: TyUint, TyInt => Xor; + simd_add: TyUint, TyInt => add, TyFloat => fadd; + simd_sub: TyUint, TyInt => sub, TyFloat => fsub; + simd_mul: TyUint, TyInt => mul, TyFloat => fmul; + simd_div: TyFloat => fdiv; + simd_shl: TyUint, TyInt => shl; + simd_shr: TyUint => lshr, TyInt => ashr; + simd_and: TyUint, TyInt => and; + simd_or: TyUint, TyInt => or; + simd_xor: TyUint, TyInt => xor; } span_bug!(span, "unknown SIMD intrinsic"); } diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index d842827b6fead..572d96eaef29d 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -96,7 +96,6 @@ mod assert_module_sources; mod attributes; mod base; mod basic_block; -mod build; mod builder; mod cabi_aarch64; mod cabi_arm; diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 1a93773a9ecc5..75746584becbb 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -14,11 +14,9 @@ use llvm::{ValueRef, get_params}; use rustc::traits; use abi::FnType; use base::*; -use build::*; use callee::Callee; use common::*; use consts; -use debuginfo::DebugLoc; use declare; use glue; use machine; @@ -40,7 +38,7 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, debug!("get_virtual_method(vtable_index={}, llvtable={:?})", vtable_index, Value(llvtable)); - Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET])) + bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET])) } /// Generate a shim function that allows an object type like `SomeTrait` to @@ -93,10 +91,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let dest = fcx.llretslotptr.get(); let llargs = get_params(fcx.llfn); - bcx = callee.call(bcx, DebugLoc::None, - &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).0; + bcx = callee.call(bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).0; - fcx.finish(&bcx, DebugLoc::None); + fcx.finish(&bcx); llfn } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 9af02f40111f5..caeb25241e5c8 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -16,10 +16,10 @@ use rustc::mir; use abi::{Abi, FnType, ArgType}; use adt; use base; -use build; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use common::{self, Block, BlockAndBuilder, LandingPad}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; +use builder::Builder; use consts; use debuginfo::DebugLoc; use Disr; @@ -167,7 +167,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if default_bb != Some(target) { let llbb = llblock(self, target); let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val)); - build::AddCase(switch, llval, llbb) + Builder::add_case(switch, llval, llbb) } } } @@ -180,7 +180,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { for (value, target) in values.iter().zip(targets) { let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); let llbb = llblock(self, *target); - build::AddCase(switch, val.llval, llbb) + Builder::add_case(switch, val.llval, llbb) } } @@ -204,7 +204,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let llscratch = build::AllocaFcx(bcx.fcx(), ret.original_ty, "ret"); + let llscratch = bcx.fcx().alloca(ret.original_ty, "ret"); self.store_operand(&bcx, llscratch, op); llscratch } @@ -257,8 +257,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // I want to avoid touching all of trans. let scratch = base::alloc_ty(&bcx, ty, "drop"); base::call_lifetime_start(&bcx, scratch); - build::Store(&bcx, lvalue.llval, base::get_dataptr(&bcx, scratch)); - build::Store(&bcx, lvalue.llextra, base::get_meta(&bcx, scratch)); + bcx.store(lvalue.llval, base::get_dataptr(&bcx, scratch)); + bcx.store(lvalue.llextra, base::get_meta(&bcx, scratch)); scratch }; if let Some(unwind) = unwind { @@ -479,8 +479,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // I want to avoid touching all of trans. let scratch = base::alloc_ty(&bcx, ty, "drop"); base::call_lifetime_start(&bcx, scratch); - build::Store(&bcx, llval, base::get_dataptr(&bcx, scratch)); - build::Store(&bcx, llextra, base::get_meta(&bcx, scratch)); + bcx.store(llval, base::get_dataptr(&bcx, scratch)); + bcx.store(llextra, base::get_meta(&bcx, scratch)); scratch }; if let Some(unwind) = *cleanup { @@ -702,7 +702,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let (mut llval, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg"); + let llscratch = bcx.fcx().alloca(arg.original_ty, "arg"); self.store_operand(bcx, llscratch, op); (llscratch, true) } else { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 174608bdeb987..d767a2cb1d07c 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -478,10 +478,9 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { use base::*; - use build::*; use common::*; let alloc = alloca(bcx, val_ty(llval), "__debuginfo_env_ptr"); - Store(bcx, llval, alloc); + bcx.store(llval, alloc); alloc } else { llval diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index a7fdc4330becc..053eabb6fbf27 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -264,11 +264,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty), OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty), OperandValue::Pair(a, b) => { - use build::*; let a = base::from_immediate(bcx, a); let b = base::from_immediate(bcx, b); - Store(bcx, a, StructGEP(bcx, lldest, 0)); - Store(bcx, b, StructGEP(bcx, lldest, 1)); + bcx.store(a, bcx.struct_gep(lldest, 0)); + bcx.store(b, bcx.struct_gep(lldest, 1)); } } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 274871d7552f3..d9a0895de5bb9 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -19,7 +19,6 @@ use base; use callee::Callee; use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder}; use common::{C_integral}; -use debuginfo::DebugLoc; use adt; use machine; use type_::Type; @@ -37,8 +36,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_rvalue(&mut self, bcx: BlockAndBuilder<'bcx, 'tcx>, dest: LvalueRef<'tcx>, - rvalue: &mir::Rvalue<'tcx>, - debug_loc: DebugLoc) + rvalue: &mir::Rvalue<'tcx>) -> BlockAndBuilder<'bcx, 'tcx> { debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", @@ -59,7 +57,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. - let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc); + let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); self.store_operand(&bcx, dest.llval, temp); return bcx; } @@ -171,7 +169,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { _ => { assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue)); - let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc); + let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); self.store_operand(&bcx, dest.llval, temp); bcx } @@ -180,8 +178,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_rvalue_operand(&mut self, bcx: BlockAndBuilder<'bcx, 'tcx>, - rvalue: &mir::Rvalue<'tcx>, - debug_loc: DebugLoc) + rvalue: &mir::Rvalue<'tcx>) -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>) { assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue), @@ -455,14 +452,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let llalign = C_uint(bcx.ccx(), align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); - let val = base::malloc_raw_dyn( - &bcx, - llty_ptr, - box_ty, - llsize, - llalign, - debug_loc - ); + let val = base::malloc_raw_dyn(&bcx, llty_ptr, box_ty, llsize, llalign); let operand = OperandRef { val: OperandValue::Immediate(val), ty: box_ty, @@ -526,23 +516,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::BinOp::BitOr => bcx.or(lhs, rhs), mir::BinOp::BitAnd => bcx.and(lhs, rhs), mir::BinOp::BitXor => bcx.xor(lhs, rhs), - mir::BinOp::Shl => { - common::build_unchecked_lshift( - &bcx, - lhs, - rhs, - DebugLoc::None - ) - } - mir::BinOp::Shr => { - common::build_unchecked_rshift( - bcx, - input_ty, - lhs, - rhs, - DebugLoc::None - ) - } + mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs), + mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil { C_bool(bcx.ccx(), match op { diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 9c872e214d2f4..47537c830dc3b 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -33,11 +33,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { match self.locals[index] { LocalRef::Lvalue(tr_dest) => { - self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) + self.trans_rvalue(bcx, tr_dest, rvalue) } LocalRef::Operand(None) => { - let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue, - debug_loc); + let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue); self.locals[index] = LocalRef::Operand(Some(operand)); bcx } @@ -51,13 +50,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } else { // If the type is zero-sized, it's already been set here, // but we still need to make sure we translate the operand - self.trans_rvalue_operand(bcx, rvalue, debug_loc).0 + self.trans_rvalue_operand(bcx, rvalue).0 } } } } else { let tr_dest = self.trans_lvalue(&bcx, lvalue); - self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) + self.trans_rvalue(bcx, tr_dest, rvalue) } } mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 3dbaaff1f48f0..8e0069a2a43f8 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -13,9 +13,8 @@ use llvm; use llvm::ValueRef; use base::*; -use build::*; use common::*; -use debuginfo::DebugLoc; +use builder::Builder; use rustc::ty::Ty; pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, @@ -31,10 +30,10 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx(), unit_ty); - let add = |bcx, a, b| if zst { - Add(bcx, a, b, DebugLoc::None) + let add = |bcx: &BlockAndBuilder, a, b| if zst { + bcx.add(a, b) } else { - InBoundsGEP(bcx, a, &[b]) + bcx.inbounds_gep(a, &[b]) }; let body_bcx = fcx.new_block("slice_loop_body").build(); @@ -42,28 +41,27 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, let header_bcx = fcx.new_block("slice_loop_header").build(); let start = if zst { - C_uint(bcx.ccx(), 0 as usize) + C_uint(bcx.ccx(), 0usize) } else { data_ptr }; let end = add(&bcx, start, len); - Br(&bcx, header_bcx.llbb(), DebugLoc::None); - let current = Phi(&header_bcx, val_ty(start), &[start], &[bcx.llbb()]); + bcx.br(header_bcx.llbb()); + let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]); - let keep_going = - ICmp(&header_bcx, llvm::IntNE, current, end, DebugLoc::None); - CondBr(&header_bcx, keep_going, body_bcx.llbb(), next_bcx.llbb(), DebugLoc::None); + let keep_going = header_bcx.icmp(llvm::IntNE, current, end); + header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); // FIXME(simulacrum): The code below is identical to the closure (add) above, but using the // closure doesn't compile due to body_bcx still being borrowed when dropped. let next = if zst { - Add(&body_bcx, current, C_uint(bcx.ccx(), 1usize), DebugLoc::None) + body_bcx.add(current, C_uint(bcx.ccx(), 1usize)) } else { - InBoundsGEP(&body_bcx, current, &[C_uint(bcx.ccx(), 1usize)]) + body_bcx.inbounds_gep(current, &[C_uint(bcx.ccx(), 1usize)]) }; - AddIncomingToPhi(current, next, body_bcx.llbb()); - Br(&body_bcx, header_bcx.llbb(), DebugLoc::None); + Builder::add_incoming_to_phi(current, next, body_bcx.llbb()); + body_bcx.br(header_bcx.llbb()); next_bcx } From e77d9289907c413446c5d5fee633ef29b2a09368 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sun, 11 Dec 2016 10:29:44 -0700 Subject: [PATCH 004/103] Add notes regarding malloc_raw_dyn being unwind incompatible --- src/librustc/middle/lang_items.rs | 2 ++ src/librustc_trans/base.rs | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index 1efc211b8c35b..3bc39fad7f1b5 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -327,6 +327,8 @@ language_item_table! { PanicBoundsCheckFnLangItem, "panic_bounds_check", panic_bounds_check_fn; PanicFmtLangItem, "panic_fmt", panic_fmt; + // ExchangeMallocFnLangItem cannot unwind, or MIR trans will break. See note + // on `malloc_raw_dyn` in librustc_trans/base.rs. ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn; ExchangeFreeFnLangItem, "exchange_free", exchange_free_fn; BoxFreeFnLangItem, "box_free", box_free_fn; diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 1e4c10c4fc766..df6b563d9fefa 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -199,9 +199,9 @@ fn require_alloc_fn<'blk, 'tcx>( } } -// The following malloc_raw_dyn* functions allocate a box to contain -// a given type, but with a potentially dynamic size. - +// malloc_raw_dyn allocates a box to contain a given type, but with a potentially dynamic size. +// +// MIR requires that ExchangeMallocFnLangItem cannot unwind. pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llty_ptr: Type, info_ty: Ty<'tcx>, From ad0a901d3739660bce7cb464da2ab5bcf519fbf8 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sun, 11 Dec 2016 15:03:52 -0700 Subject: [PATCH 005/103] Remove *_builder --- src/librustc_trans/adt.rs | 10 +-- src/librustc_trans/base.rs | 106 ++++++++++-------------------- src/librustc_trans/builder.rs | 4 +- src/librustc_trans/cleanup.rs | 9 ++- src/librustc_trans/common.rs | 2 +- src/librustc_trans/glue.rs | 7 +- src/librustc_trans/mir/block.rs | 19 +++--- src/librustc_trans/mir/lvalue.rs | 4 +- src/librustc_trans/mir/mod.rs | 12 ++-- src/librustc_trans/mir/operand.rs | 8 +-- src/librustc_trans/mir/rvalue.rs | 11 ++-- src/librustc_trans/tvec.rs | 3 +- 12 files changed, 70 insertions(+), 125 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index ef44a5fd60ebc..7f8eef5a51df3 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -494,19 +494,13 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { } /// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, - val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { - trans_field_ptr_builder(bcx, t, val, discr, ix) -} - -/// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, +pub fn trans_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { let l = bcx.ccx().layout_of(t); - debug!("trans_field_ptr_builder on {} represented as {:#?}", t, l); + debug!("trans_field_ptr on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index df6b563d9fefa..d8e4d05872e59 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -172,22 +172,14 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { } } -pub fn get_meta(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef { +pub fn get_meta(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) } -pub fn get_dataptr(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef { +pub fn get_dataptr(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) } -pub fn get_meta_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { - b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) -} - -pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { - b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) -} - fn require_alloc_fn<'blk, 'tcx>( bcx: &BlockAndBuilder<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem ) -> DefId { @@ -516,13 +508,7 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { /// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. -pub fn load_ty<'blk, 'tcx>( - cx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx> -) -> ValueRef { - load_ty_builder(cx, ptr, t) -} - -pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { +pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { let ccx = b.ccx; if type_is_zero_size(ccx, t) { return C_undef(type_of::type_of(ccx, t)); @@ -581,22 +567,14 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, cx.store(extra, get_meta(cx, dst)); } -pub fn load_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, - src: ValueRef, - ty: Ty<'tcx>) - -> (ValueRef, ValueRef) -{ - load_fat_ptr_builder(cx, src, ty) -} - -pub fn load_fat_ptr_builder<'a, 'tcx>( +pub fn load_fat_ptr<'a, 'tcx>( b: &Builder<'a, 'tcx>, src: ValueRef, t: Ty<'tcx>) -> (ValueRef, ValueRef) { - let ptr = get_dataptr_builder(b, src); + let ptr = get_dataptr(b, src); let ptr = if t.is_region_ptr() || t.is_unique() { b.load_nonnull(ptr) } else { @@ -604,7 +582,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>( }; // FIXME: emit metadata on `meta`. - let meta = b.load(get_meta_builder(b, src)); + let meta = b.load(get_meta(b, src)); (ptr, meta) } @@ -647,56 +625,38 @@ pub fn with_cond<'blk, 'tcx, F>( pub enum Lifetime { Start, End } -// If LLVM lifetime intrinsic support is enabled (i.e. optimizations -// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` -// and the intrinsic for `lt` and passes them to `emit`, which is in -// charge of generating code to call the passed intrinsic on whatever -// block of generated code is targetted for the intrinsic. -// -// If LLVM lifetime intrinsic support is disabled (i.e. optimizations -// off) or `ptr` is zero-sized, then no-op (does not call `emit`). -fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>, - ptr: ValueRef, - lt: Lifetime, - emit: F) - where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef) -{ - if ccx.sess().opts.optimize == config::OptLevel::No { - return; - } - - let _icx = push_ctxt(match lt { - Lifetime::Start => "lifetime_start", - Lifetime::End => "lifetime_end" - }); - - let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()); - if size == 0 { - return; - } - - let lifetime_intrinsic = ccx.get_intrinsic(match lt { - Lifetime::Start => "llvm.lifetime.start", - Lifetime::End => "llvm.lifetime.end" - }); - emit(ccx, size, lifetime_intrinsic) -} - impl Lifetime { + // If LLVM lifetime intrinsic support is enabled (i.e. optimizations + // on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + // and the intrinsic for `lt` and passes them to `emit`, which is in + // charge of generating code to call the passed intrinsic on whatever + // block of generated code is targetted for the intrinsic. + // + // If LLVM lifetime intrinsic support is disabled (i.e. optimizations + // off) or `ptr` is zero-sized, then no-op (does not call `emit`). pub fn call(self, b: &Builder, ptr: ValueRef) { - core_lifetime_emit(b.ccx, ptr, self, |ccx, size, lifetime_intrinsic| { - let ptr = b.pointercast(ptr, Type::i8p(ccx)); - b.call(lifetime_intrinsic, &[C_u64(ccx, size), ptr], None); + if b.ccx.sess().opts.optimize == config::OptLevel::No { + return; + } + + let _icx = push_ctxt(match self { + Lifetime::Start => "lifetime_start", + Lifetime::End => "lifetime_end" }); - } -} -pub fn call_lifetime_start(bcx: &BlockAndBuilder, ptr: ValueRef) { - Lifetime::Start.call(bcx, ptr); -} + let size = machine::llsize_of_alloc(b.ccx, val_ty(ptr).element_type()); + if size == 0 { + return; + } + + let lifetime_intrinsic = b.ccx.get_intrinsic(match self { + Lifetime::Start => "llvm.lifetime.start", + Lifetime::End => "llvm.lifetime.end" + }); -pub fn call_lifetime_end(bcx: &BlockAndBuilder, ptr: ValueRef) { - Lifetime::End.call(bcx, ptr); + let ptr = b.pointercast(ptr, Type::i8p(b.ccx)); + b.call(lifetime_intrinsic, &[C_u64(b.ccx, size), ptr], None); + } } // Generates code for resumption of unwind at the end of a landing pad. diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 1f937ba6e8af5..d09f049ca18d9 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -1103,14 +1103,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn add_case(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { + pub fn add_case(&self, s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { unsafe { if llvm::LLVMIsUndef(s) == llvm::True { return; } llvm::LLVMAddCase(s, on_val, dest) } } - pub fn add_incoming_to_phi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { + pub fn add_incoming_to_phi(&self, phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { unsafe { if llvm::LLVMIsUndef(phi) == llvm::True { return; } llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 651687286aeca..a3b4135fcc7de 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -117,7 +117,7 @@ pub use self::EarlyExitLabel::*; use llvm::{BasicBlockRef, ValueRef}; -use base; +use base::{self, Lifetime}; use common; use common::{BlockAndBuilder, FunctionContext, LandingPad}; use debuginfo::{DebugLoc}; @@ -422,7 +422,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let addr = self.landingpad_alloca.get() .unwrap(); let lp = bcx.load(addr); - base::call_lifetime_end(&bcx, addr); + Lifetime::End.call(&bcx, addr); base::trans_unwind_resume(&bcx, lp); } UnwindKind::CleanupPad(_) => { @@ -559,9 +559,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let addr = match self.landingpad_alloca.get() { Some(addr) => addr, None => { - let addr = base::alloca(&pad_bcx, common::val_ty(llretval), - ""); - base::call_lifetime_start(&pad_bcx, addr); + let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); + Lifetime::Start.call(&pad_bcx, addr); self.landingpad_alloca.set(Some(addr)); addr } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 32f437fea5220..1283a7796f406 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -15,7 +15,7 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef}; +use llvm::{True, False, Bool, OperandBundleDef, get_param}; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 10c38af8a7396..a9e2f00ee73bb 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -23,7 +23,6 @@ use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; use adt; use base::*; use callee::{Callee}; -use builder::Builder; use common::*; use machine::*; use monomorphize; @@ -164,10 +163,10 @@ pub fn drop_ty_immediate<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("drop_ty_immediate"); let vp = alloc_ty(&bcx, t, ""); - call_lifetime_start(&bcx, vp); + Lifetime::Start.call(&bcx, vp); store_ty(&bcx, v, vp, t); let bcx = drop_ty_core(bcx, vp, t, skip_dtor); - call_lifetime_end(&bcx, vp); + Lifetime::End.call(&bcx, vp); bcx } @@ -602,7 +601,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, &variant.disr_val.to_string()); let variant_cx = fcx.new_block(&variant_cx_name).build(); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); - Builder::add_case(llswitch, case_val, variant_cx.llbb()); + variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); let variant_cx = iter_variant(variant_cx, t, value, variant, substs); variant_cx.br(next_cx.llbb()); } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index caeb25241e5c8..a37c6d0d2bb90 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -15,11 +15,10 @@ use rustc::ty::{self, layout}; use rustc::mir; use abi::{Abi, FnType, ArgType}; use adt; -use base; +use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use common::{self, Block, BlockAndBuilder, LandingPad}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; -use builder::Builder; use consts; use debuginfo::DebugLoc; use Disr; @@ -122,7 +121,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); - base::call_lifetime_end(&bcx, ps); + Lifetime::End.call(&bcx, ps); base::trans_unwind_resume(&bcx, lp); } } @@ -167,7 +166,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if default_bb != Some(target) { let llbb = llblock(self, target); let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val)); - Builder::add_case(switch, llval, llbb) + bcx.add_case(switch, llval, llbb) } } } @@ -180,7 +179,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { for (value, target) in values.iter().zip(targets) { let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); let llbb = llblock(self, *target); - Builder::add_case(switch, val.llval, llbb) + bcx.add_case(switch, val.llval, llbb) } } @@ -256,7 +255,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // here that can be cleanly backported to beta, so // I want to avoid touching all of trans. let scratch = base::alloc_ty(&bcx, ty, "drop"); - base::call_lifetime_start(&bcx, scratch); + Lifetime::Start.call(&bcx, scratch); bcx.store(lvalue.llval, base::get_dataptr(&bcx, scratch)); bcx.store(lvalue.llextra, base::get_meta(&bcx, scratch)); scratch @@ -478,7 +477,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // here that can be cleanly backported to beta, so // I want to avoid touching all of trans. let scratch = base::alloc_ty(&bcx, ty, "drop"); - base::call_lifetime_start(&bcx, scratch); + Lifetime::Start.call(&bcx, scratch); bcx.store(llval, base::get_dataptr(&bcx, scratch)); bcx.store(llextra, base::get_meta(&bcx, scratch)); scratch @@ -752,9 +751,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { Ref(llval) => { let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr_builder(bcx, tuple.ty, base, Disr(0), n); + let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n); let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { - let (lldata, llextra) = base::load_fat_ptr_builder(bcx, ptr, ty); + let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) } else { // trans_argument will load this if it needs to @@ -817,7 +816,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); let slot = base::alloca(bcx, llretty, "personalityslot"); self.llpersonalityslot = Some(slot); - base::call_lifetime_start(bcx, slot); + Lifetime::Start.call(bcx, slot); slot } } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index e211a8b68d4f3..cb440eda18208 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -146,8 +146,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } else { adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) }; - let llprojected = adt::trans_field_ptr_builder(bcx, base_ty, base, - Disr(discr), field.index()); + let llprojected = adt::trans_field_ptr(bcx, base_ty, base, Disr(discr), + field.index()); let llextra = if is_sized { ptr::null_mut() } else { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index d767a2cb1d07c..27dea2fd231b9 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -361,10 +361,8 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // they are the two sub-fields of a single aggregate field. let meta = &fcx.fn_ty.args[idx]; idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, - base::get_dataptr_builder(bcx, dst)); - meta.store_fn_arg(bcx, &mut llarg_idx, - base::get_meta_builder(bcx, dst)); + arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst)); + meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst)); } else { arg.store_fn_arg(bcx, &mut llarg_idx, dst); } @@ -436,10 +434,8 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // so make an alloca to store them in. let meta = &fcx.fn_ty.args[idx]; idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, - base::get_dataptr_builder(bcx, lltemp)); - meta.store_fn_arg(bcx, &mut llarg_idx, - base::get_meta_builder(bcx, lltemp)); + arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp)); + meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp)); } else { // otherwise, arg is passed by value, so make a // temporary and store it there diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 053eabb6fbf27..b3ea8d5c76321 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -144,7 +144,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { debug!("trans_load: {:?} @ {:?}", Value(llval), ty); let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { - let (lldata, llextra) = base::load_fat_ptr_builder(bcx, llval, ty); + let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty); OperandValue::Pair(lldata, llextra) } else if common::type_is_imm_pair(bcx.ccx(), ty) { let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx(), ty).unwrap(); @@ -152,11 +152,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let b_ptr = bcx.struct_gep(llval, 1); OperandValue::Pair( - base::load_ty_builder(bcx, a_ptr, a_ty), - base::load_ty_builder(bcx, b_ptr, b_ty) + base::load_ty(bcx, a_ptr, a_ty), + base::load_ty(bcx, b_ptr, b_ty) ) } else if common::type_is_immediate(bcx.ccx(), ty) { - OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty)) + OperandValue::Immediate(base::load_ty(bcx, llval, ty)) } else { OperandValue::Ref(llval) }; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d9a0895de5bb9..e32d25dc01ef6 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -97,7 +97,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let tr_elem = self.trans_operand(&bcx, elem); let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx(), size); - let base = base::get_dataptr_builder(&bcx, dest.llval); + let base = base::get_dataptr(&bcx, dest.llval); let bcx = tvec::slice_for_each(bcx, base, tr_elem.ty, size, |bcx, llslot| { self.store_operand_direct(&bcx, llslot, tr_elem); bcx @@ -109,17 +109,16 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { match *kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); - adt::trans_set_discr(&bcx, - dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr)); + let dest_ty = dest.ty.to_ty(bcx.tcx()); + adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx(), op.ty) { let val = adt::MaybeSizedValue::sized(dest.llval); let field_index = active_field_index.unwrap_or(i); - let lldest_i = adt::trans_field_ptr_builder(&bcx, - dest.ty.to_ty(bcx.tcx()), - val, disr, field_index); + let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr, + field_index); self.store_operand(&bcx, lldest_i, op); } } diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 8e0069a2a43f8..931eb563e3635 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -14,7 +14,6 @@ use llvm; use llvm::ValueRef; use base::*; use common::*; -use builder::Builder; use rustc::ty::Ty; pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, @@ -61,7 +60,7 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, } else { body_bcx.inbounds_gep(current, &[C_uint(bcx.ccx(), 1usize)]) }; - Builder::add_incoming_to_phi(current, next, body_bcx.llbb()); + body_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); body_bcx.br(header_bcx.llbb()); next_bcx } From fec59c5f737d044df31284090dcab2a7b678982d Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sun, 11 Dec 2016 16:28:10 -0700 Subject: [PATCH 006/103] Replace lpad().and_then(..) with None --- src/librustc_trans/base.rs | 42 +---------------------------- src/librustc_trans/callee.rs | 47 ++++++++++++++++++++++++++++----- src/librustc_trans/glue.rs | 9 +++---- src/librustc_trans/intrinsic.rs | 34 ++++++++++++------------ src/librustc_trans/meth.rs | 2 +- 5 files changed, 63 insertions(+), 71 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index d8e4d05872e59..84d7b6bc7c9bd 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -205,7 +205,7 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // Allocate space: let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).reify(bcx.ccx()); - bcx.pointercast(bcx.call(r, &[size, align], bcx.lpad().and_then(|b| b.bundle())), llty_ptr) + bcx.pointercast(bcx.call(r, &[size, align], None), llty_ptr) } @@ -451,38 +451,6 @@ fn cast_shift_rhs(op: hir::BinOp_, } } -pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, - llfn: ValueRef, - llargs: &[ValueRef]) - -> (ValueRef, BlockAndBuilder<'blk, 'tcx>) { - let _icx = push_ctxt("invoke_"); - if need_invoke(&bcx) { - debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb()); - for &llarg in llargs { - debug!("arg: {:?}", Value(llarg)); - } - let normal_bcx = bcx.fcx().new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(); - - let llresult = bcx.invoke( - llfn, - &llargs[..], - normal_bcx.llbb, - landing_pad, - bcx.lpad().and_then(|b| b.bundle()) - ); - return (llresult, normal_bcx.build()); - } else { - debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb()); - for &llarg in llargs { - debug!("arg: {:?}", Value(llarg)); - } - - let llresult = bcx.call(llfn, &llargs[..], bcx.lpad().and_then(|b| b.bundle())); - return (llresult, bcx); - } -} - /// Returns whether this session's target will use SEH-based unwinding. /// /// This is only true for MSVC targets, and even then the 64-bit MSVC target @@ -492,14 +460,6 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { sess.target.target.options.is_like_msvc } -fn need_invoke(bcx: &BlockAndBuilder) -> bool { - if bcx.sess().no_landing_pads() || bcx.lpad().is_some() { - false - } else { - bcx.fcx().needs_invoke() - } -} - pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { let assume_intrinsic = b.ccx.get_intrinsic("llvm.assume"); b.call(assume_intrinsic, &[val], None); diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 6dd2c46ecec73..331945a5a44a5 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -209,9 +209,10 @@ impl<'tcx> Callee<'tcx> { /// function. pub fn call<'a, 'blk>(self, bcx: BlockAndBuilder<'blk, 'tcx>, args: &[ValueRef], - dest: Option) + dest: Option, + lpad: Option<&'blk llvm::OperandBundleDef>) -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) { - trans_call_inner(bcx, self, args, dest) + trans_call_inner(bcx, self, args, dest, lpad) } /// Turn the callee into a function pointer. @@ -411,7 +412,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let self_scope = fcx.push_custom_cleanup_scope(); fcx.schedule_drop_mem(self_scope, llenv, closure_ty); - let bcx = callee.call(bcx, &llargs[self_idx..], dest).0; + let bcx = callee.call(bcx, &llargs[self_idx..], dest, None).0; let bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); @@ -540,7 +541,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( data: Fn(llfnpointer), ty: bare_fn_ty }; - let bcx = callee.call(bcx, &llargs[(self_idx + 1)..], dest).0; + let bcx = callee.call(bcx, &llargs[(self_idx + 1)..], dest, None).0; fcx.finish(&bcx); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); @@ -653,7 +654,8 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, callee: Callee<'tcx>, args: &[ValueRef], - opt_llretslot: Option) + opt_llretslot: Option, + lpad: Option<&'blk llvm::OperandBundleDef>) -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) { // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose @@ -707,7 +709,40 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, _ => bug!("expected fn pointer callee, found {:?}", callee) }; - let (llret, bcx) = base::invoke(bcx, llfn, &llargs); + fn need_invoke(bcx: &BlockAndBuilder, had_lpad: bool) -> bool { + if bcx.sess().no_landing_pads() || had_lpad { + false + } else { + bcx.fcx().needs_invoke() + } + } + + let _icx = push_ctxt("invoke_"); + let (llret, bcx) = if need_invoke(&bcx, lpad.is_some()) { + debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb()); + for &llarg in &llargs { + debug!("arg: {:?}", Value(llarg)); + } + let normal_bcx = bcx.fcx().new_block("normal-return"); + let landing_pad = bcx.fcx().get_landing_pad(); + + let llresult = bcx.invoke( + llfn, + &llargs[..], + normal_bcx.llbb, + landing_pad, + lpad, + ); + (llresult, normal_bcx.build()) + } else { + debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb()); + for &llarg in &llargs { + debug!("arg: {:?}", Value(llarg)); + } + + let llresult = bcx.call(llfn, &llargs[..], lpad); + (llresult, bcx) + }; fn_ty.apply_attrs_callsite(llret); // If the function we just called does not use an outpointer, diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index a9e2f00ee73bb..e6db048cf86a7 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -46,7 +46,7 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align]; Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .call(bcx, &args, None).0 + .call(bcx, &args, None, None).0 } pub fn trans_exchange_free<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, @@ -288,8 +288,7 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, _ => bug!("dtor for {:?} is not an impl???", t) }; let dtor_did = def.destructor().unwrap(); - bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) - .call(bcx, args, None).0; + bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs).call(bcx, args, None, None).0; bcx.fcx().pop_and_trans_custom_cleanup_scope(bcx, contents_scope) } @@ -456,9 +455,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, let data_ptr = get_dataptr(&bcx, v0); let vtable_ptr = bcx.load(get_meta(&bcx, v0)); let dtor = bcx.load(vtable_ptr); - bcx.call(dtor, - &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx()))], - bcx.lpad().and_then(|b| b.bundle())); + bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx()))], None); bcx } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 90f8c64a2cf8b..303f0f273625a 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -119,7 +119,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // These are the only intrinsic functions that diverge. if name == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); - bcx.call(llfn, &[], bcx.lpad().and_then(|b| b.bundle())); + bcx.call(llfn, &[], None); return; } else if name == "unreachable" { // FIXME: do nothing? @@ -131,15 +131,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let simple = get_simple_intrinsic(ccx, name); let llval = match (simple, name) { (Some(llfn), _) => { - bcx.call(llfn, &llargs, bcx.lpad().and_then(|b| b.bundle())) + bcx.call(llfn, &llargs, None) } (_, "likely") => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - bcx.call(expect, &[llargs[0], C_bool(ccx, true)], bcx.lpad().and_then(|b| b.bundle())) + bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None) } (_, "unlikely") => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - bcx.call(expect, &[llargs[0], C_bool(ccx, false)], bcx.lpad().and_then(|b| b.bundle())) + bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) } (_, "try") => { try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); @@ -147,7 +147,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } (_, "breakpoint") => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); - bcx.call(llfn, &[], bcx.lpad().and_then(|b| b.bundle())) + bcx.call(llfn, &[], None) } (_, "size_of") => { let tp_ty = substs.type_at(0); @@ -318,13 +318,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), llargs[0]), "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &llargs, bcx.lpad().and_then(|b| b.bundle())), + &llargs, None), "bswap" => { if width == 8 { llargs[0] // byte swap a u8/i8 is just a no-op } else { bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &llargs, bcx.lpad().and_then(|b| b.bundle())) + &llargs, None) } } "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { @@ -654,7 +654,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let f = declare::declare_cfn(ccx, name, Type::func(&inputs, &outputs)); - bcx.call(f, &llargs, bcx.lpad().and_then(|b| b.bundle())) + bcx.call(f, &llargs, None) } }; @@ -720,7 +720,7 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.mul(size, count), align, C_bool(ccx, volatile)], - bcx.lpad().and_then(|b| b.bundle())) + None) } fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, @@ -748,7 +748,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.mul(size, count), align, C_bool(ccx, volatile)], - bcx.lpad().and_then(|b| b.bundle())) + None) } fn count_zeros_intrinsic(bcx: &BlockAndBuilder, @@ -757,7 +757,7 @@ fn count_zeros_intrinsic(bcx: &BlockAndBuilder, -> ValueRef { let y = C_bool(bcx.ccx(), false); let llfn = bcx.ccx().get_intrinsic(&name); - bcx.call(llfn, &[val, y], bcx.lpad().and_then(|b| b.bundle())) + bcx.call(llfn, &[val, y], None) } fn with_overflow_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, @@ -769,7 +769,7 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let llfn = bcx.ccx().get_intrinsic(&name); // Convert `i1` to a `bool`, and write it to the out parameter - let val = bcx.call(llfn, &[a, b], bcx.lpad().and_then(|b| b.bundle())); + let val = bcx.call(llfn, &[a, b], None); let result = bcx.extract_value(val, 0); let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx())); bcx.store(result, bcx.struct_gep(out, 0)); @@ -786,7 +786,7 @@ fn try_intrinsic<'blk, 'tcx>( dest: ValueRef, ) { if bcx.sess().no_landing_pads() { - bcx.call(func, &[data], bcx.lpad().and_then(|b| b.bundle())); + bcx.call(func, &[data], None); bcx.store(C_null(Type::i8p(&bcx.ccx())), dest); } else if wants_msvc_seh(bcx.sess()) { trans_msvc_try(bcx, func, data, local_ptr, dest); @@ -863,7 +863,7 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let i64p = Type::i64(ccx).ptr_to(); let slot = bcx.fcx().alloca(i64p, "slot"); bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), - bcx.lpad().and_then(|b| b.bundle())); + None); normal.ret(C_i32(ccx, 0)); @@ -890,7 +890,7 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bcx.call(llfn, &[func, data, local_ptr], bcx.lpad().and_then(|b| b.bundle())); + let ret = bcx.call(llfn, &[func, data, local_ptr], None); bcx.store(ret, dest); } @@ -936,7 +936,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let func = llvm::get_param(bcx.fcx().llfn, 0); let data = llvm::get_param(bcx.fcx().llfn, 1); let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); - bcx.invoke(func, &[data], then.llbb(), catch.llbb(), bcx.lpad().and_then(|b| b.bundle())); + bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None); then.ret(C_i32(ccx, 0)); // Type indicator for the exception being thrown. @@ -956,7 +956,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bcx.call(llfn, &[func, data, local_ptr], bcx.lpad().and_then(|b| b.bundle())); + let ret = bcx.call(llfn, &[func, data, local_ptr], None); bcx.store(ret, dest); } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 75746584becbb..e23c545753fe9 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -91,7 +91,7 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let dest = fcx.llretslotptr.get(); let llargs = get_params(fcx.llfn); - bcx = callee.call(bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).0; + bcx = callee.call(bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest, None).0; fcx.finish(&bcx); From 3dbd141b8c8d9e132709ad4453785a02bc642ecb Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sun, 11 Dec 2016 18:06:36 -0700 Subject: [PATCH 007/103] Remove unused map_block --- src/librustc_trans/common.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 1283a7796f406..31abbaf8fa8dd 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -15,7 +15,7 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef, get_param}; +use llvm::{True, False, Bool, OperandBundleDef}; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; @@ -581,14 +581,6 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { result } - pub fn map_block(self, f: F) -> Self - where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> - { - let BlockAndBuilder { bcx, owned_builder } = self; - let bcx = f(bcx); - BlockAndBuilder::new(bcx, owned_builder) - } - pub fn at_start(&self, f: F) -> R where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R { From 8f3d824cc70ecae51ea373da4006cfd6427a530f Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sun, 11 Dec 2016 22:19:39 -0700 Subject: [PATCH 008/103] Remove common::Block. --- src/librustc_trans/base.rs | 35 ++---- src/librustc_trans/callee.rs | 42 ++----- src/librustc_trans/cleanup.rs | 33 ++++-- src/librustc_trans/common.rs | 170 ++++++++--------------------- src/librustc_trans/glue.rs | 51 ++++----- src/librustc_trans/intrinsic.rs | 17 ++- src/librustc_trans/meth.rs | 5 +- src/librustc_trans/mir/analyze.rs | 16 +-- src/librustc_trans/mir/block.rs | 105 ++++++++++-------- src/librustc_trans/mir/constant.rs | 4 +- src/librustc_trans/mir/lvalue.rs | 4 +- src/librustc_trans/mir/mod.rs | 35 +++--- src/librustc_trans/mir/operand.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 6 +- src/librustc_trans/tvec.rs | 6 +- 15 files changed, 201 insertions(+), 330 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 84d7b6bc7c9bd..57c0c11ced0f1 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -575,8 +575,8 @@ pub fn with_cond<'blk, 'tcx, F>( } let fcx = bcx.fcx(); - let next_cx = fcx.new_block("next").build(); - let cond_cx = fcx.new_block("cond").build(); + let next_cx = fcx.build_new_block("next"); + let cond_cx = fcx.build_new_block("cond"); bcx.cond_br(val, cond_cx.llbb(), next_cx.llbb()); let after_cx = f(cond_cx); after_cx.br(next_cx.llbb()); @@ -619,17 +619,6 @@ impl Lifetime { } } -// Generates code for resumption of unwind at the end of a landing pad. -pub fn trans_unwind_resume(bcx: &BlockAndBuilder, lpval: ValueRef) { - if !bcx.sess().target.target.options.custom_unwind_resume { - bcx.resume(lpval); - } else { - let exc_ptr = bcx.extract_value(lpval, 0); - bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], - bcx.lpad().and_then(|b| b.bundle())); - } -} - pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, dst: ValueRef, src: ValueRef, @@ -727,8 +716,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, llfndecl: ValueRef, fn_ty: FnType, - definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, - block_arena: &'blk TypedArena>) + definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>) + //block_arena: &'blk TypedArena>) -> FunctionContext<'blk, 'tcx> { let (param_substs, def_id) = match definition { Some((instance, ..)) => { @@ -772,7 +761,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { fn_ty: fn_ty, param_substs: param_substs, span: None, - block_arena: block_arena, + //block_arena: block_arena, lpad_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, @@ -783,7 +772,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Performs setup on a newly created function, creating the entry /// scope block and allocating space for the return pointer. pub fn init(&'blk self, skip_retptr: bool) -> BlockAndBuilder<'blk, 'tcx> { - let entry_bcx = self.new_block("entry-block").build(); + let entry_bcx = self.build_new_block("entry-block"); // Use a dummy instruction as the insertion point for all allocas. // This is later removed in FunctionContext::cleanup. @@ -924,13 +913,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, - lldecl, - fn_ty, - Some((instance, &sig, abi)), - &arena); + let fcx = FunctionContext::new(ccx, lldecl, fn_ty, Some((instance, &sig, abi))); if fcx.mir.is_none() { bug!("attempted translation of `{}` w/o MIR", instance); @@ -953,9 +936,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena); + let fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None); let bcx = fcx.init(false); if !fcx.fn_ty.ret.is_ignore() { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 331945a5a44a5..a7200cf7538f7 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -16,7 +16,6 @@ pub use self::CalleeData::*; -use arena::TypedArena; use llvm::{self, ValueRef, get_params}; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; @@ -26,7 +25,7 @@ use attributes; use base; use base::*; use common::{ - self, Block, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext + self, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext }; use consts; use declare; @@ -71,25 +70,8 @@ impl<'tcx> Callee<'tcx> { } } - /// Trait or impl method call. - pub fn method_call<'blk>(bcx: Block<'blk, 'tcx>, - method_call: ty::MethodCall) - -> Callee<'tcx> { - let method = bcx.tcx().tables().method_map[&method_call]; - Callee::method(bcx, method) - } - - /// Trait or impl method. - pub fn method<'blk>(bcx: Block<'blk, 'tcx>, - method: ty::MethodCallee<'tcx>) -> Callee<'tcx> { - let substs = bcx.fcx.monomorphize(&method.substs); - Callee::def(bcx.ccx(), method.def_id, substs) - } - /// Function or method definition. - pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>) + pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Callee<'tcx> { let tcx = ccx.tcx(); @@ -367,9 +349,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); attributes::set_frame_pointer_elimination(ccx, lloncefn); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena); + let fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None); let bcx = fcx.init(false); // the first argument (`self`) will be the (by value) closure env. @@ -518,9 +498,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); let bcx = fcx.init(false); let llargs = get_params(fcx.llfn); @@ -723,17 +701,11 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, for &llarg in &llargs { debug!("arg: {:?}", Value(llarg)); } - let normal_bcx = bcx.fcx().new_block("normal-return"); + let normal_bcx = bcx.fcx().build_new_block("normal-return"); let landing_pad = bcx.fcx().get_landing_pad(); - let llresult = bcx.invoke( - llfn, - &llargs[..], - normal_bcx.llbb, - landing_pad, - lpad, - ); - (llresult, normal_bcx.build()) + let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, lpad); + (llresult, normal_bcx) } else { debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb()); for &llarg in &llargs { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index a3b4135fcc7de..f761b3bd967fe 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -416,14 +416,22 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { UnwindExit(val) => { // Generate a block that will resume unwinding to the // calling function - let bcx = self.new_block("resume").build(); + let bcx = self.build_new_block("resume"); match val { UnwindKind::LandingPad => { let addr = self.landingpad_alloca.get() .unwrap(); let lp = bcx.load(addr); Lifetime::End.call(&bcx, addr); - base::trans_unwind_resume(&bcx, lp); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(lp); + } else { + let exc_ptr = bcx.extract_value(lp, 0); + bcx.call( + bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), + &[exc_ptr], + bcx.lpad().and_then(|b| b.bundle())); + } } UnwindKind::CleanupPad(_) => { let pad = bcx.cleanup_pad(None, &[]); @@ -481,7 +489,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let name = scope.block_name("clean"); debug!("generating cleanups for {}", name); - let bcx_in = self.new_block(&name[..]).build(); + let bcx_in = self.build_new_block(&name[..]); let exit_label = label.start(&bcx_in); let next_llbb = bcx_in.llbb(); let mut bcx_out = bcx_in; @@ -525,7 +533,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { Some(llbb) => return llbb, None => { let name = last_scope.block_name("unwind"); - pad_bcx = self.new_block(&name[..]).build(); + pad_bcx = self.build_new_block(&name[..]); last_scope.cached_landing_pad = Some(pad_bcx.llbb()); } } @@ -682,16 +690,17 @@ pub struct DropValue<'tcx> { impl<'tcx> DropValue<'tcx> { fn trans<'blk>(&self, bcx: BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx> { - let skip_dtor = self.skip_dtor; - let _icx = if skip_dtor { - base::push_ctxt("::trans skip_dtor=true") - } else { - base::push_ctxt("::trans skip_dtor=false") - }; if self.is_immediate { - glue::drop_ty_immediate(bcx, self.val, self.ty, self.skip_dtor) + let vp = base::alloc_ty(&bcx, self.ty, ""); + Lifetime::Start.call(&bcx, vp); + base::store_ty(&bcx, self.val, vp, self.ty); + let lpad = bcx.lpad(); + let bcx = glue::call_drop_glue(bcx, vp, self.ty, self.skip_dtor, lpad); + Lifetime::End.call(&bcx, vp); + bcx } else { - glue::drop_ty_core(bcx, self.val, self.ty, self.skip_dtor) + let lpad = bcx.lpad(); + glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, lpad) } } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 31abbaf8fa8dd..4f800bb2bf27e 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -305,7 +305,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { pub span: Option, // The arena that blocks are allocated from. - pub block_arena: &'a TypedArena>, + //pub block_arena: &'a TypedArena>, // The arena that landing pads are allocated from. pub lpad_arena: TypedArena, @@ -333,18 +333,21 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } } - pub fn new_block(&'a self, - name: &str) - -> Block<'a, 'tcx> { + pub fn new_block(&'a self, name: &str) -> BasicBlockRef { unsafe { let name = CString::new(name).unwrap(); - let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), - self.llfn, - name.as_ptr()); - BlockS::new(llbb, self) + llvm::LLVMAppendBasicBlockInContext( + self.ccx.llcx(), + self.llfn, + name.as_ptr() + ) } } + pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> { + BlockAndBuilder::new(self.new_block(name), self) + } + pub fn monomorphize(&self, value: &T) -> T where T: TransNormalize<'tcx> { @@ -441,94 +444,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } } -// Basic block context. We create a block context for each basic block -// (single-entry, single-exit sequence of instructions) we generate from Rust -// code. Each basic block we generate is attached to a function, typically -// with many basic blocks per function. All the basic blocks attached to a -// function are organized as a directed graph. -#[must_use] -pub struct BlockS<'blk, 'tcx: 'blk> { - // The BasicBlockRef returned from a call to - // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic - // block to the function pointed to by llfn. We insert - // instructions into that block by way of this block context. - // The block pointing to this one in the function's digraph. - pub llbb: BasicBlockRef, - - // If this block part of a landing pad, then this is `Some` indicating what - // kind of landing pad its in, otherwise this is none. - pub lpad: Cell>, - - // The function context for the function to which this block is - // attached. - pub fcx: &'blk FunctionContext<'blk, 'tcx>, -} - -pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>; - -impl<'blk, 'tcx> BlockS<'blk, 'tcx> { - pub fn new(llbb: BasicBlockRef, - fcx: &'blk FunctionContext<'blk, 'tcx>) - -> Block<'blk, 'tcx> { - fcx.block_arena.alloc(BlockS { - llbb: llbb, - lpad: Cell::new(None), - fcx: fcx - }) - } - - pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { - self.fcx.ccx - } - pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> { - self.fcx - } - pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> { - self.fcx.ccx.tcx() - } - pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() } - - pub fn lpad(&self) -> Option<&'blk LandingPad> { - self.lpad.get() - } - - pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { - // FIXME: use an IVar? - self.lpad.set(lpad); - } - - pub fn set_lpad(&self, lpad: Option) { - self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) - } - - pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { - self.fcx.mir() - } - - pub fn name(&self, name: ast::Name) -> String { - name.to_string() - } - - pub fn node_id_to_string(&self, id: ast::NodeId) -> String { - self.tcx().map.node_to_string(id).to_string() - } - - pub fn to_str(&self) -> String { - format!("[block {:p}]", self) - } - - pub fn monomorphize(&self, value: &T) -> T - where T: TransNormalize<'tcx> - { - monomorphize::apply_param_substs(self.fcx.ccx.shared(), - self.fcx.param_substs, - value) - } - - pub fn build(&'blk self) -> BlockAndBuilder<'blk, 'tcx> { - BlockAndBuilder::new(self, OwnedBuilder::new_with_ccx(self.ccx())) - } -} pub struct OwnedBuilder<'blk, 'tcx: 'blk> { builder: Builder<'blk, 'tcx> @@ -559,77 +474,78 @@ impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> { #[must_use] pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { - bcx: Block<'blk, 'tcx>, + // The BasicBlockRef returned from a call to + // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic + // block to the function pointed to by llfn. We insert + // instructions into that block by way of this block context. + // The block pointing to this one in the function's digraph. + llbb: BasicBlockRef, + + // If this block part of a landing pad, then this is `Some` indicating what + // kind of landing pad its in, otherwise this is none. + lpad: Cell>, + + // The function context for the function to which this block is + // attached. + fcx: &'blk FunctionContext<'blk, 'tcx>, + owned_builder: OwnedBuilder<'blk, 'tcx>, } impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { - pub fn new(bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>) -> Self { + pub fn new(llbb: BasicBlockRef, fcx: &'blk FunctionContext<'blk, 'tcx>) -> Self { + let owned_builder = OwnedBuilder::new_with_ccx(fcx.ccx); // Set the builder's position to this block's end. - owned_builder.builder.position_at_end(bcx.llbb); + owned_builder.builder.position_at_end(llbb); BlockAndBuilder { - bcx: bcx, + llbb: llbb, + lpad: Cell::new(None), + fcx: fcx, owned_builder: owned_builder, } } - pub fn with_block(&self, f: F) -> R - where F: FnOnce(Block<'blk, 'tcx>) -> R - { - let result = f(self.bcx); - self.position_at_end(self.bcx.llbb); - result - } - pub fn at_start(&self, f: F) -> R where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R { - self.position_at_start(self.bcx.llbb); + self.position_at_start(self.llbb); let r = f(self); - self.position_at_end(self.bcx.llbb); + self.position_at_end(self.llbb); r } - // Methods delegated to bcx - pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { - self.bcx.ccx() + self.fcx.ccx } pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> { - self.bcx.fcx() + self.fcx } pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> { - self.bcx.tcx() + self.fcx.ccx.tcx() } pub fn sess(&self) -> &'blk Session { - self.bcx.sess() + self.fcx.ccx.sess() } pub fn llbb(&self) -> BasicBlockRef { - self.bcx.llbb + self.llbb } pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { - self.bcx.mir() - } - - pub fn monomorphize(&self, value: &T) -> T - where T: TransNormalize<'tcx> - { - self.bcx.monomorphize(value) + self.fcx.mir() } pub fn set_lpad(&self, lpad: Option) { - self.bcx.set_lpad(lpad) + self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) } pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { // FIXME: use an IVar? - self.bcx.set_lpad_ref(lpad); + self.lpad.set(lpad); } pub fn lpad(&self) -> Option<&'blk LandingPad> { - self.bcx.lpad() + self.lpad.get() } } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index e6db048cf86a7..44c580b07400d 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -33,7 +33,6 @@ use type_::Type; use value::Value; use Disr; -use arena::TypedArena; use syntax_pos::DUMMY_SP; pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, @@ -121,19 +120,23 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn drop_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>) -> BlockAndBuilder<'blk, 'tcx> { - drop_ty_core(bcx, v, t, false) +fn drop_ty<'blk, 'tcx>( + bcx: BlockAndBuilder<'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, +) -> BlockAndBuilder<'blk, 'tcx> { + call_drop_glue(bcx, v, t, false, None) } -pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - skip_dtor: bool) - -> BlockAndBuilder<'blk, 'tcx> { +pub fn call_drop_glue<'blk, 'tcx>( + bcx: BlockAndBuilder<'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + skip_dtor: bool, + lpad: Option<&'blk LandingPad>, +) -> BlockAndBuilder<'blk, 'tcx> { // NB: v is an *alias* of type t here, not a direct value. - debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor); + debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); let _icx = push_ctxt("drop_ty"); if bcx.fcx().type_needs_drop(t) { let ccx = bcx.ccx(); @@ -151,25 +154,11 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, }; // No drop-hint ==> call standard drop glue - bcx.call(glue, &[ptr], bcx.lpad().and_then(|b| b.bundle())); + bcx.call(glue, &[ptr], lpad.and_then(|b| b.bundle())); } bcx } -pub fn drop_ty_immediate<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - skip_dtor: bool) - -> BlockAndBuilder<'blk, 'tcx> { - let _icx = push_ctxt("drop_ty_immediate"); - let vp = alloc_ty(&bcx, t, ""); - Lifetime::Start.call(&bcx, vp); - store_ty(&bcx, v, vp, t); - let bcx = drop_ty_core(bcx, vp, t, skip_dtor); - Lifetime::End.call(&bcx, vp); - bcx -} - pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef { get_drop_glue_core(ccx, DropGlueKind::Ty(t)) } @@ -221,9 +210,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty())); let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); let bcx = fcx.init(false); @@ -588,15 +575,15 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = fcx.new_block("enum-iter-ret-void").build(); + let ret_void_cx = fcx.build_new_block("enum-iter-ret-void"); ret_void_cx.ret_void(); let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); - let next_cx = fcx.new_block("enum-iter-next").build(); + let next_cx = fcx.build_new_block("enum-iter-next"); for variant in &adt.variants { let variant_cx_name = format!("enum-iter-variant-{}", &variant.disr_val.to_string()); - let variant_cx = fcx.new_block(&variant_cx_name).build(); + let variant_cx = fcx.build_new_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); let variant_cx = iter_variant(variant_cx, t, value, variant, substs); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 303f0f273625a..94a57d9568ef2 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -10,7 +10,6 @@ #![allow(non_upper_case_globals)] -use arena::TypedArena; use intrinsics::{self, Intrinsic}; use libc; use llvm; @@ -812,10 +811,10 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.set_personality_fn(bcx.fcx().eh_personality()); - let normal = bcx.fcx().new_block("normal").build(); - let catchswitch = bcx.fcx().new_block("catchswitch").build(); - let catchpad = bcx.fcx().new_block("catchpad").build(); - let caught = bcx.fcx().new_block("caught").build(); + let normal = bcx.fcx().build_new_block("normal"); + let catchswitch = bcx.fcx().build_new_block("catchswitch"); + let catchpad = bcx.fcx().build_new_block("catchpad"); + let caught = bcx.fcx().build_new_block("caught"); let func = llvm::get_param(bcx.fcx().llfn, 0); let data = llvm::get_param(bcx.fcx().llfn, 1); @@ -930,8 +929,8 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx().new_block("then").build(); - let catch = bcx.fcx().new_block("catch").build(); + let then = bcx.fcx().build_new_block("then"); + let catch = bcx.fcx().build_new_block("catch"); let func = llvm::get_param(bcx.fcx().llfn, 0); let data = llvm::get_param(bcx.fcx().llfn, 1); @@ -978,9 +977,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let (fcx, block_arena); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); trans(fcx.init(true)); fcx.cleanup(); llfn diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index e23c545753fe9..66eb27ae11f78 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -9,7 +9,6 @@ // except according to those terms. use attributes; -use arena::TypedArena; use llvm::{ValueRef, get_params}; use rustc::traits; use abi::FnType; @@ -84,9 +83,7 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); let mut bcx = fcx.init(false); let dest = fcx.llretslotptr.get(); diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index e4d0533ec8784..4b6998d0505dc 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -16,19 +16,18 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; -use common::{self, Block, BlockAndBuilder}; +use common::{self, BlockAndBuilder}; use glue; use super::rvalue; -pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>, +pub fn lvalue_locals<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx,'tcx>, mir: &mir::Mir<'tcx>) -> BitVector { - let bcx = bcx.build(); let mut analyzer = LocalAnalyzer::new(mir, &bcx); analyzer.visit_mir(mir); for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { - let ty = bcx.monomorphize(&ty); + let ty = bcx.fcx().monomorphize(&ty); debug!("local {} has type {:?}", index, ty); if ty.is_scalar() || ty.is_unique() || @@ -142,7 +141,7 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { if let mir::Lvalue::Local(_) = proj.base { let ty = proj.base.ty(self.mir, self.bcx.tcx()); - let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx())); + let ty = self.bcx.fcx().monomorphize(&ty.to_ty(self.bcx.tcx())); if common::type_is_imm_pair(self.bcx.ccx(), ty) { if let mir::ProjectionElem::Field(..) = proj.elem { if let LvalueContext::Consume = context { @@ -172,7 +171,7 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { LvalueContext::Drop => { let ty = lvalue.ty(self.mir, self.bcx.tcx()); - let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx())); + let ty = self.bcx.fcx().monomorphize(&ty.to_ty(self.bcx.tcx())); // Only need the lvalue if we're actually dropping it. if glue::type_needs_drop(self.bcx.tcx(), ty) { @@ -200,10 +199,7 @@ pub enum CleanupKind { Internal { funclet: mir::BasicBlock } } -pub fn cleanup_kinds<'bcx,'tcx>(_bcx: Block<'bcx,'tcx>, - mir: &mir::Mir<'tcx>) - -> IndexVec -{ +pub fn cleanup_kinds<'bcx,'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { fn discover_masters<'tcx>(result: &mut IndexVec, mir: &mir::Mir<'tcx>) { for (bb, data) in mir.basic_blocks().iter_enumerated() { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index a37c6d0d2bb90..151d1f018208d 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, ValueRef}; +use llvm::{self, ValueRef, BasicBlockRef}; use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; use rustc::middle::lang_items; use rustc::ty::{self, layout}; @@ -17,7 +17,7 @@ use abi::{Abi, FnType, ArgType}; use adt; use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, Block, BlockAndBuilder, LandingPad}; +use common::{self, BlockAndBuilder, LandingPad}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use debuginfo::DebugLoc; @@ -28,6 +28,7 @@ use type_of; use glue; use type_::Type; +use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::fx::FxHashMap; use syntax::symbol::Symbol; @@ -42,18 +43,24 @@ use std::cell::Ref as CellRef; use std::ptr; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn trans_block(&mut self, bb: mir::BasicBlock) { - let mut bcx = self.bcx(bb); + pub fn trans_block(&mut self, bb: mir::BasicBlock, + lpads: &IndexVec>) { + let mut bcx = self.build_block(bb); let data = &CellRef::clone(&self.mir)[bb]; debug!("trans_block({:?}={:?})", bb, data); + let lpad = match self.cleanup_kinds[bb] { + CleanupKind::Internal { funclet } => lpads[funclet].as_ref(), + _ => lpads[bb].as_ref(), + }; + // Create the cleanup bundle, if needed. - let cleanup_pad = bcx.lpad().and_then(|lp| lp.cleanuppad()); - let cleanup_bundle = bcx.lpad().and_then(|l| l.bundle()); + let cleanup_pad = lpad.and_then(|lp| lp.cleanuppad()); + let cleanup_bundle = lpad.and_then(|l| l.bundle()); let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { - let lltarget = this.blocks[bb].llbb; + let lltarget = this.blocks[bb]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[bb] { CleanupKind::Funclet => { @@ -70,7 +77,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }; let llblock = |this: &mut Self, target: mir::BasicBlock| { - let lltarget = this.blocks[target].llbb; + let lltarget = this.blocks[target]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[target] { @@ -79,7 +86,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.fcx.new_block(name).build(); + let trampoline = this.fcx.build_new_block(name); trampoline.set_personality_fn(this.fcx.eh_personality()); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() @@ -93,7 +100,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { (this.cleanup_kinds[bb], this.cleanup_kinds[target]) { // jump *into* cleanup - need a landing pad if GNU - this.landing_pad_to(target).llbb + this.landing_pad_to(target) } else { lltarget } @@ -122,7 +129,16 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); Lifetime::End.call(&bcx, ps); - base::trans_unwind_resume(&bcx, lp); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(lp); + } else { + let exc_ptr = bcx.extract_value(lp, 0); + bcx.call( + bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), + &[exc_ptr], + cleanup_bundle, + ); + } } } @@ -158,7 +174,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // We're generating an exhaustive switch, so the else branch // can't be hit. Branching to an unreachable instruction // lets LLVM know this - _ => (None, self.unreachable_block().llbb) + _ => (None, self.unreachable_block()) }; let switch = bcx.switch(discr, default_blk, targets.len()); assert_eq!(adt_def.variants.len(), targets.len()); @@ -228,7 +244,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); - let ty = bcx.monomorphize(&ty); + let ty = bcx.fcx().monomorphize(&ty); // Double check for necessity to drop if !glue::type_needs_drop(bcx.tcx(), ty) { @@ -263,7 +279,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if let Some(unwind) = unwind { bcx.invoke(drop_fn, &[llvalue], - self.blocks[target].llbb, + self.blocks[target], llblock(self, unwind), cleanup_bundle); } else { @@ -304,15 +320,15 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.fcx.new_block("panic"); + let panic_block = self.fcx.build_new_block("panic"); if expected { - bcx.cond_br(cond, lltarget, panic_block.llbb); + bcx.cond_br(cond, lltarget, panic_block.llbb()); } else { - bcx.cond_br(cond, panic_block.llbb, lltarget); + bcx.cond_br(cond, panic_block.llbb(), lltarget); } // After this point, bcx is the block for the call to panic. - bcx = panic_block.build(); + bcx = panic_block; debug_loc.apply_to_bcx(&bcx); // Get the location information. @@ -385,7 +401,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if let Some(unwind) = cleanup { bcx.invoke(llfn, &args, - self.unreachable_block().llbb, + self.unreachable_block(), llblock(self, unwind), cleanup_bundle); } else { @@ -485,7 +501,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if let Some(unwind) = *cleanup { bcx.invoke(drop_fn, &[llvalue], - self.blocks[target].llbb, + self.blocks[target], llblock(self, unwind), cleanup_bundle); } else { @@ -508,7 +524,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let extra_args = &args[sig.inputs().len()..]; let extra_args = extra_args.iter().map(|op_arg| { let op_ty = op_arg.ty(&self.mir, bcx.tcx()); - bcx.monomorphize(&op_ty) + bcx.fcx().monomorphize(&op_ty) }).collect::>(); let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args); @@ -621,13 +637,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }; let invokeret = bcx.invoke(fn_ptr, &llargs, - ret_bcx.llbb, + ret_bcx, llblock(self, cleanup), cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); - if destination.is_some() { - let ret_bcx = ret_bcx.build(); + if let Some((_, target)) = *destination { + let ret_bcx = self.build_block(target); ret_bcx.at_start(|ret_bcx| { debug_loc.apply_to_bcx(ret_bcx); let op = OperandRef { @@ -824,8 +840,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Block<'bcx, 'tcx> - { + fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> BasicBlockRef { if let Some(block) = self.landing_pads[target_bb] { return block; } @@ -834,12 +849,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { return self.blocks[target_bb]; } - let target = self.bcx(target_bb); + let target = self.build_block(target_bb); - let block = self.fcx.new_block("cleanup"); - self.landing_pads[target_bb] = Some(block); + let bcx = self.fcx.build_new_block("cleanup"); + self.landing_pads[target_bb] = Some(bcx.llbb()); - let bcx = block.build(); let ccx = bcx.ccx(); let llpersonality = self.fcx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); @@ -848,46 +862,47 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot); bcx.br(target.llbb()); - block + bcx.llbb() } - pub fn init_cpad(&mut self, bb: mir::BasicBlock) { - let bcx = self.bcx(bb); + pub fn init_cpad(&mut self, bb: mir::BasicBlock, + lpads: &mut IndexVec>) { + let bcx = self.build_block(bb); let data = &self.mir[bb]; debug!("init_cpad({:?})", data); match self.cleanup_kinds[bb] { CleanupKind::NotCleanup => { - bcx.set_lpad(None) + lpads[bb] = None; } _ if !base::wants_msvc_seh(bcx.sess()) => { - bcx.set_lpad(Some(LandingPad::gnu())) + lpads[bb] = Some(LandingPad::gnu()); } - CleanupKind::Internal { funclet } => { + CleanupKind::Internal { funclet: _ } => { // FIXME: is this needed? bcx.set_personality_fn(self.fcx.eh_personality()); - bcx.set_lpad_ref(self.bcx(funclet).lpad()); + lpads[bb] = None; } CleanupKind::Funclet => { bcx.set_personality_fn(self.fcx.eh_personality()); DebugLoc::None.apply_to_bcx(&bcx); let cleanup_pad = bcx.cleanup_pad(None, &[]); - bcx.set_lpad(Some(LandingPad::msvc(cleanup_pad))); + lpads[bb] = Some(LandingPad::msvc(cleanup_pad)); } }; } - fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { + fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { - let bl = self.fcx.new_block("unreachable"); - bl.build().unreachable(); - self.unreachable_block = Some(bl); - bl + let bl = self.fcx.build_new_block("unreachable"); + bl.unreachable(); + self.unreachable_block = Some(bl.llbb()); + bl.llbb() }) } - fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> { - self.blocks[bb].build() + fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> { + BlockAndBuilder::new(self.blocks[bb], self.fcx) } fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index bca81fa36458f..8143190a58def 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -952,7 +952,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { -> Const<'tcx> { debug!("trans_constant({:?})", constant); - let ty = bcx.monomorphize(&constant.ty); + let ty = bcx.fcx().monomorphize(&constant.ty); let result = match constant.literal.clone() { mir::Literal::Item { def_id, substs } => { // Shortcut for zero-sized types, including function item @@ -962,7 +962,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { return Const::new(C_null(llty), ty); } - let substs = bcx.monomorphize(&substs); + let substs = bcx.fcx().monomorphize(&substs); let instance = Instance::new(def_id, substs); MirConstContext::trans_def(bcx.ccx(), instance, IndexVec::new()) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index cb440eda18208..c6be7eaa77562 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -103,7 +103,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let ptr = self.trans_consume(bcx, base); let projected_ty = LvalueTy::from_ty(ptr.ty) .projection_ty(tcx, &mir::ProjectionElem::Deref); - let projected_ty = bcx.monomorphize(&projected_ty); + let projected_ty = bcx.fcx().monomorphize(&projected_ty); let (llptr, llextra) = match ptr.val { OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), OperandValue::Pair(llptr, llextra) => (llptr, llextra), @@ -118,7 +118,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Lvalue::Projection(ref projection) => { let tr_base = self.trans_lvalue(bcx, &projection.base); let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); - let projected_ty = bcx.monomorphize(&projected_ty); + let projected_ty = bcx.fcx().monomorphize(&projected_ty); let project_index = |llindex| { let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 27dea2fd231b9..33014af0f88ed 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -9,13 +9,13 @@ // except according to those terms. use libc::c_uint; -use llvm::{self, ValueRef}; -use rustc::ty::{self, layout}; +use llvm::{self, ValueRef, BasicBlockRef}; +use rustc::ty; use rustc::mir; use rustc::mir::tcx::LvalueTy; use session::config::FullDebugInfo; use base; -use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext, C_null}; +use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, LandingPad}; use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext}; use type_of; @@ -54,17 +54,17 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { llpersonalityslot: Option, /// A `Block` for each MIR `BasicBlock` - blocks: IndexVec>, + blocks: IndexVec, /// The funclet status of each basic block cleanup_kinds: IndexVec, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. - landing_pads: IndexVec>>, + landing_pads: IndexVec>, /// Cached unreachable block - unreachable_block: Option>, + unreachable_block: Option, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `LvalueRef` representing an alloca, but not always: @@ -186,13 +186,11 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // Analyze the temps to determine which must be lvalues // FIXME - let (lvalue_locals, cleanup_kinds) = bcx.with_block(|bcx| { - (analyze::lvalue_locals(bcx, &mir), - analyze::cleanup_kinds(bcx, &mir)) - }); + let lvalue_locals = analyze::lvalue_locals(&bcx, &mir); + let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block - let block_bcxs: IndexVec> = + let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK { fcx.new_block("start") @@ -222,7 +220,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let ty = bcx.monomorphize(&decl.ty); + let ty = bcx.fcx().monomorphize(&decl.ty); if let Some(name) = decl.name { // User variable @@ -276,7 +274,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // Branch to the START block let start_bcx = mircx.blocks[mir::START_BLOCK]; - bcx.br(start_bcx.llbb); + bcx.br(start_bcx); // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location @@ -287,23 +285,26 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let mut rpo = traversal::reverse_postorder(&mir); + let mut lpads: IndexVec> = + IndexVec::from_elem(None, mir.basic_blocks()); + // Prepare each block for translation. for (bb, _) in rpo.by_ref() { - mircx.init_cpad(bb); + mircx.init_cpad(bb, &mut lpads); } rpo.reset(); // Translate the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); - mircx.trans_block(bb); + mircx.trans_block(bb, &lpads); } // Remove blocks that haven't been visited, or have no // predecessors. for bb in mir.basic_blocks().indices() { let block = mircx.blocks[bb]; - let block = BasicBlock(block.llbb); + let block = BasicBlock(block); // Unreachable block if !visited.contains(bb.index()) { debug!("trans_mir: block {:?} was not visited", bb); @@ -338,7 +339,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, mir.args_iter().enumerate().map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; - let arg_ty = bcx.monomorphize(&arg_decl.ty); + let arg_ty = bcx.fcx().monomorphize(&arg_decl.ty); if Some(local) == mir.spread_arg { // This argument (e.g. the last argument in the "rust-call" ABI) diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index b3ea8d5c76321..3c737fd6ad7d8 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -197,7 +197,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let llval = [a, b][f.index()]; let op = OperandRef { val: OperandValue::Immediate(llval), - ty: bcx.monomorphize(&ty) + ty: bcx.fcx().monomorphize(&ty) }; // Handle nested pairs. diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index e32d25dc01ef6..e71449938e9b5 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -52,7 +52,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { - let cast_ty = bcx.monomorphize(&cast_ty); + let cast_ty = bcx.fcx().monomorphize(&cast_ty); if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { // into-coerce of a thin pointer to a fat pointer - just @@ -187,7 +187,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { let operand = self.trans_operand(&bcx, source); debug!("cast operand is {:?}", operand); - let cast_ty = bcx.monomorphize(&cast_ty); + let cast_ty = bcx.fcx().monomorphize(&cast_ty); let val = match *kind { mir::CastKind::ReifyFnPointer => { @@ -444,7 +444,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } mir::Rvalue::Box(content_ty) => { - let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty); + let content_ty: Ty<'tcx> = bcx.fcx().monomorphize(&content_ty); let llty = type_of::type_of(bcx.ccx(), content_ty); let llsize = machine::llsize_of(bcx.ccx(), llty); let align = type_of::align_of(bcx.ccx(), content_ty); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 931eb563e3635..b90c66f531c46 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -35,9 +35,9 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, bcx.inbounds_gep(a, &[b]) }; - let body_bcx = fcx.new_block("slice_loop_body").build(); - let next_bcx = fcx.new_block("slice_loop_next").build(); - let header_bcx = fcx.new_block("slice_loop_header").build(); + let body_bcx = fcx.build_new_block("slice_loop_body"); + let next_bcx = fcx.build_new_block("slice_loop_next"); + let header_bcx = fcx.build_new_block("slice_loop_header"); let start = if zst { C_uint(bcx.ccx(), 0usize) From 86b2bdb4350ceb98cb0f292dfb25dc7b703c2297 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 06:48:39 -0700 Subject: [PATCH 009/103] Rename LandingPad to Funclet Changes internal storage to direct field values instead of Options, since both fields are always either set or not set. --- src/librustc_trans/base.rs | 2 +- src/librustc_trans/cleanup.rs | 26 ++++++++------- src/librustc_trans/common.rs | 58 ++++++++++++++++----------------- src/librustc_trans/glue.rs | 4 +-- src/librustc_trans/mir/block.rs | 24 +++++++------- src/librustc_trans/mir/mod.rs | 8 ++--- 6 files changed, 61 insertions(+), 61 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 57c0c11ced0f1..dc5fd1d009c2f 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -762,7 +762,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { param_substs: param_substs, span: None, //block_arena: block_arena, - lpad_arena: TypedArena::new(), + funclet_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, scopes: RefCell::new(Vec::new()), diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index f761b3bd967fe..b29eae1587c6b 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -119,7 +119,7 @@ pub use self::EarlyExitLabel::*; use llvm::{BasicBlockRef, ValueRef}; use base::{self, Lifetime}; use common; -use common::{BlockAndBuilder, FunctionContext, LandingPad}; +use common::{BlockAndBuilder, FunctionContext, Funclet}; use debuginfo::{DebugLoc}; use glue; use type_::Type; @@ -343,7 +343,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let mut bcx = bcx; for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx); + bcx = cleanup.trans(bcx.funclet(), bcx); } bcx } @@ -430,7 +430,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { bcx.call( bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], - bcx.lpad().and_then(|b| b.bundle())); + bcx.funclet().map(|b| b.bundle())); } } UnwindKind::CleanupPad(_) => { @@ -495,7 +495,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let mut bcx_out = bcx_in; let len = scope.cleanups.len(); for cleanup in scope.cleanups.iter().rev().take(len - skip) { - bcx_out = cleanup.trans(bcx_out); + bcx_out = cleanup.trans(bcx_out.funclet(), bcx_out); } skip = 0; exit_label.branch(&bcx_out, prev_llbb); @@ -645,7 +645,7 @@ impl EarlyExitLabel { /// for the same kind of early exit label that `self` is. /// /// This function will appropriately configure `bcx` based on the kind of - /// label this is. For UnwindExit labels, the `lpad` field of the block will + /// label this is. For UnwindExit labels, the `funclet` field of the block will /// be set to `Some`, and for MSVC exceptions this function will generate a /// `cleanuppad` instruction at the start of the block so it may be jumped /// to in the future (e.g. so this block can be cached as an early exit). @@ -656,11 +656,11 @@ impl EarlyExitLabel { match *self { UnwindExit(UnwindKind::CleanupPad(..)) => { let pad = bcx.cleanup_pad(None, &[]); - bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::msvc(pad)))); + bcx.set_funclet(Funclet::msvc(pad)); UnwindExit(UnwindKind::CleanupPad(pad)) } UnwindExit(UnwindKind::LandingPad) => { - bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::gnu()))); + bcx.set_funclet(Funclet::gnu()); *self } } @@ -689,18 +689,20 @@ pub struct DropValue<'tcx> { } impl<'tcx> DropValue<'tcx> { - fn trans<'blk>(&self, bcx: BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx> { + fn trans<'blk>( + &self, + funclet: Option<&'blk Funclet>, + bcx: BlockAndBuilder<'blk, 'tcx>, + ) -> BlockAndBuilder<'blk, 'tcx> { if self.is_immediate { let vp = base::alloc_ty(&bcx, self.ty, ""); Lifetime::Start.call(&bcx, vp); base::store_ty(&bcx, self.val, vp, self.ty); - let lpad = bcx.lpad(); - let bcx = glue::call_drop_glue(bcx, vp, self.ty, self.skip_dtor, lpad); + let bcx = glue::call_drop_glue(bcx, vp, self.ty, self.skip_dtor, funclet); Lifetime::End.call(&bcx, vp); bcx } else { - let lpad = bcx.lpad(); - glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, lpad) + glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) } } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 4f800bb2bf27e..440ae8326f540 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -308,7 +308,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { //pub block_arena: &'a TypedArena>, // The arena that landing pads are allocated from. - pub lpad_arena: TypedArena, + pub funclet_arena: TypedArena, // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, @@ -483,7 +483,7 @@ pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { // If this block part of a landing pad, then this is `Some` indicating what // kind of landing pad its in, otherwise this is none. - lpad: Cell>, + funclet: Cell>, // The function context for the function to which this block is // attached. @@ -499,7 +499,7 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { owned_builder.builder.position_at_end(llbb); BlockAndBuilder { llbb: llbb, - lpad: Cell::new(None), + funclet: Cell::new(None), fcx: fcx, owned_builder: owned_builder, } @@ -535,17 +535,17 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { self.fcx.mir() } - pub fn set_lpad(&self, lpad: Option) { - self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) + pub fn set_funclet(&self, funclet: Option) { + self.set_funclet_ref(funclet.map(|p| &*self.fcx().funclet_arena.alloc(p))) } - pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { + pub fn set_funclet_ref(&self, funclet: Option<&'blk Funclet>) { // FIXME: use an IVar? - self.lpad.set(lpad); + self.funclet.set(funclet); } - pub fn lpad(&self) -> Option<&'blk LandingPad> { - self.lpad.get() + pub fn funclet(&self) -> Option<&'blk Funclet> { + self.funclet.get() } } @@ -570,39 +570,37 @@ impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> { /// When inside of a landing pad, each function call in LLVM IR needs to be /// annotated with which landing pad it's a part of. This is accomplished via /// the `OperandBundleDef` value created for MSVC landing pads. -pub struct LandingPad { - cleanuppad: Option, - operand: Option, +pub struct Funclet { + cleanuppad: ValueRef, + operand: OperandBundleDef, } -impl LandingPad { - pub fn gnu() -> LandingPad { - LandingPad { cleanuppad: None, operand: None } +impl Funclet { + pub fn gnu() -> Option { + None } - pub fn msvc(cleanuppad: ValueRef) -> LandingPad { - LandingPad { - cleanuppad: Some(cleanuppad), - operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])), - } + pub fn msvc(cleanuppad: ValueRef) -> Option { + Some(Funclet { + cleanuppad: cleanuppad, + operand: OperandBundleDef::new("funclet", &[cleanuppad]), + }) } - pub fn bundle(&self) -> Option<&OperandBundleDef> { - self.operand.as_ref() + pub fn cleanuppad(&self) -> ValueRef { + self.cleanuppad } - pub fn cleanuppad(&self) -> Option { - self.cleanuppad + pub fn bundle(&self) -> &OperandBundleDef { + &self.operand } } -impl Clone for LandingPad { - fn clone(&self) -> LandingPad { - LandingPad { +impl Clone for Funclet { + fn clone(&self) -> Funclet { + Funclet { cleanuppad: self.cleanuppad, - operand: self.cleanuppad.map(|p| { - OperandBundleDef::new("funclet", &[p]) - }), + operand: OperandBundleDef::new("funclet", &[self.cleanuppad]), } } } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 44c580b07400d..c7394ba68755a 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -133,7 +133,7 @@ pub fn call_drop_glue<'blk, 'tcx>( v: ValueRef, t: Ty<'tcx>, skip_dtor: bool, - lpad: Option<&'blk LandingPad>, + funclet: Option<&'blk Funclet>, ) -> BlockAndBuilder<'blk, 'tcx> { // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); @@ -154,7 +154,7 @@ pub fn call_drop_glue<'blk, 'tcx>( }; // No drop-hint ==> call standard drop glue - bcx.call(glue, &[ptr], lpad.and_then(|b| b.bundle())); + bcx.call(glue, &[ptr], funclet.map(|b| b.bundle())); } bcx } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 151d1f018208d..3fa88c8706d4f 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -17,7 +17,7 @@ use abi::{Abi, FnType, ArgType}; use adt; use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, BlockAndBuilder, LandingPad}; +use common::{self, BlockAndBuilder, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use debuginfo::DebugLoc; @@ -44,20 +44,20 @@ use std::ptr; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock, - lpads: &IndexVec>) { + funclets: &IndexVec>) { let mut bcx = self.build_block(bb); let data = &CellRef::clone(&self.mir)[bb]; debug!("trans_block({:?}={:?})", bb, data); - let lpad = match self.cleanup_kinds[bb] { - CleanupKind::Internal { funclet } => lpads[funclet].as_ref(), - _ => lpads[bb].as_ref(), + let funclet = match self.cleanup_kinds[bb] { + CleanupKind::Internal { funclet } => funclets[funclet].as_ref(), + _ => funclets[bb].as_ref(), }; // Create the cleanup bundle, if needed. - let cleanup_pad = lpad.and_then(|lp| lp.cleanuppad()); - let cleanup_bundle = lpad.and_then(|l| l.bundle()); + let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); + let cleanup_bundle = funclet.map(|l| l.bundle()); let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { let lltarget = this.blocks[bb]; @@ -866,28 +866,28 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn init_cpad(&mut self, bb: mir::BasicBlock, - lpads: &mut IndexVec>) { + funclets: &mut IndexVec>) { let bcx = self.build_block(bb); let data = &self.mir[bb]; debug!("init_cpad({:?})", data); match self.cleanup_kinds[bb] { CleanupKind::NotCleanup => { - lpads[bb] = None; + funclets[bb] = None; } _ if !base::wants_msvc_seh(bcx.sess()) => { - lpads[bb] = Some(LandingPad::gnu()); + funclets[bb] = Funclet::gnu(); } CleanupKind::Internal { funclet: _ } => { // FIXME: is this needed? bcx.set_personality_fn(self.fcx.eh_personality()); - lpads[bb] = None; + funclets[bb] = None; } CleanupKind::Funclet => { bcx.set_personality_fn(self.fcx.eh_personality()); DebugLoc::None.apply_to_bcx(&bcx); let cleanup_pad = bcx.cleanup_pad(None, &[]); - lpads[bb] = Some(LandingPad::msvc(cleanup_pad)); + funclets[bb] = Funclet::msvc(cleanup_pad); } }; } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 33014af0f88ed..60fd80a8f9023 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -15,7 +15,7 @@ use rustc::mir; use rustc::mir::tcx::LvalueTy; use session::config::FullDebugInfo; use base; -use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, LandingPad}; +use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext}; use type_of; @@ -285,19 +285,19 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let mut rpo = traversal::reverse_postorder(&mir); - let mut lpads: IndexVec> = + let mut funclets: IndexVec> = IndexVec::from_elem(None, mir.basic_blocks()); // Prepare each block for translation. for (bb, _) in rpo.by_ref() { - mircx.init_cpad(bb, &mut lpads); + mircx.init_cpad(bb, &mut funclets); } rpo.reset(); // Translate the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); - mircx.trans_block(bb, &lpads); + mircx.trans_block(bb, &funclets); } // Remove blocks that haven't been visited, or have no From ed989d39a196acc0e7b3d30976a79ccc7c78a849 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 09:52:33 -0700 Subject: [PATCH 010/103] Simple cleanups/inlines in cleanup --- src/librustc_trans/cleanup.rs | 59 +++++------------------------------ 1 file changed, 7 insertions(+), 52 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index b29eae1587c6b..93fe705fd6e85 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -177,26 +177,21 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { CustomScopeIndex { index: index } } - /// Removes the top cleanup scope from the stack without executing its cleanups. The top - /// cleanup scope must be the temporary scope `custom_scope`. - pub fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex) { - debug!("pop_custom_cleanup_scope({})", custom_scope.index); - assert!(self.is_valid_to_pop_custom_scope(custom_scope)); - let _ = self.pop_scope(); - } - /// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// generates the code to do its cleanups for normal exit. pub fn pop_and_trans_custom_cleanup_scope(&self, - bcx: BlockAndBuilder<'blk, 'tcx>, + mut bcx: BlockAndBuilder<'blk, 'tcx>, custom_scope: CustomScopeIndex) -> BlockAndBuilder<'blk, 'tcx> { debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); - assert!(self.is_valid_to_pop_custom_scope(custom_scope)); + assert!(self.is_valid_custom_scope(custom_scope)); + assert!(custom_scope.index == self.scopes.borrow().len() - 1); let scope = self.pop_scope(); - self.trans_scope_cleanups(bcx, &scope) + for cleanup in scope.cleanups.iter().rev() { + bcx = cleanup.trans(bcx.funclet(), bcx); + } + bcx } /// Schedules a (deep) drop of `val`, which is a pointer to an instance of @@ -251,29 +246,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { self.schedule_clean(cleanup_scope, drop); } - /// Schedules a (deep) drop of `val`, which is an instance of `ty` - pub fn schedule_drop_immediate(&self, - cleanup_scope: CustomScopeIndex, - val: ValueRef, - ty: Ty<'tcx>) { - - if !self.type_needs_drop(ty) { return; } - let drop = DropValue { - is_immediate: true, - val: val, - ty: ty, - skip_dtor: false, - }; - - debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}", - cleanup_scope, - Value(val), - ty, - drop.skip_dtor); - - self.schedule_clean(cleanup_scope, drop); - } - /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) { debug!("schedule_clean_in_custom_scope(custom_scope={})", @@ -326,28 +298,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { return llbb; } - fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { - self.is_valid_custom_scope(custom_scope) && - custom_scope.index == self.scopes.borrow().len() - 1 - } - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { let scopes = self.scopes.borrow(); custom_scope.index < scopes.len() } - /// Generates the cleanups for `scope` into `bcx` - fn trans_scope_cleanups(&self, // cannot borrow self, will recurse - bcx: BlockAndBuilder<'blk, 'tcx>, - scope: &CleanupScope<'tcx>) -> BlockAndBuilder<'blk, 'tcx> { - - let mut bcx = bcx; - for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx.funclet(), bcx); - } - bcx - } - fn scopes_len(&self) -> usize { self.scopes.borrow().len() } From 5a36f88feb972d02a9f656f898e114c287fa60a1 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 11:58:55 -0700 Subject: [PATCH 011/103] Remove debug_loc from CleanupScope and privatize various helpers --- src/librustc_trans/cleanup.rs | 221 +++++++++++++++------------------- 1 file changed, 94 insertions(+), 127 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 93fe705fd6e85..43b8aa8b6c82f 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -114,13 +114,12 @@ //! code for `expr` itself is responsible for freeing any other byproducts //! that may be in play. -pub use self::EarlyExitLabel::*; +use self::EarlyExitLabel::*; use llvm::{BasicBlockRef, ValueRef}; use base::{self, Lifetime}; use common; use common::{BlockAndBuilder, FunctionContext, Funclet}; -use debuginfo::{DebugLoc}; use glue; use type_::Type; use value::Value; @@ -130,10 +129,6 @@ pub struct CleanupScope<'tcx> { // Cleanups to run upon scope exit. cleanups: Vec>, - // The debug location any drop calls generated for this scope will be - // associated with. - debug_loc: DebugLoc, - cached_early_exits: Vec, cached_landing_pad: Option, } @@ -144,18 +139,18 @@ pub struct CustomScopeIndex { } #[derive(Copy, Clone, PartialEq, Debug)] -pub enum EarlyExitLabel { +enum EarlyExitLabel { UnwindExit(UnwindKind), } #[derive(Copy, Clone, Debug)] -pub enum UnwindKind { +enum UnwindKind { LandingPad, CleanupPad(ValueRef), } #[derive(Copy, Clone)] -pub struct CachedEarlyExit { +struct CachedEarlyExit { label: EarlyExitLabel, cleanup_block: BasicBlockRef, last_cleanup: usize, @@ -165,15 +160,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { let index = self.scopes_len(); debug!("push_custom_cleanup_scope(): {}", index); - - // Just copy the debuginfo source location from the enclosing scope - let debug_loc = self.scopes - .borrow() - .last() - .map(|opt_scope| opt_scope.debug_loc) - .unwrap_or(DebugLoc::None); - - self.push_scope(CleanupScope::new(debug_loc)); + self.push_scope(CleanupScope::new()); CustomScopeIndex { index: index } } @@ -282,8 +269,67 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { popped_scopes.push(self.pop_scope()); } - // Check for an existing landing pad in the new topmost scope: - let llbb = self.get_or_create_landing_pad(); + // Creates a landing pad for the top scope, if one does not exist. The + // landing pad will perform all cleanups necessary for an unwind and then + // `resume` to continue error propagation: + // + // landing_pad -> ... cleanups ... -> [resume] + // + // (The cleanups and resume instruction are created by + // `trans_cleanups_to_exit_scope()`, not in this function itself.) + let mut scopes = self.scopes.borrow_mut(); + let last_scope = scopes.last_mut().unwrap(); + let llbb = if let Some(llbb) = last_scope.cached_landing_pad { + llbb + } else { + let name = last_scope.block_name("unwind"); + let pad_bcx = self.build_new_block(&name[..]); + last_scope.cached_landing_pad = Some(pad_bcx.llbb()); + let llpersonality = pad_bcx.fcx().eh_personality(); + + let val = if base::wants_msvc_seh(self.ccx.sess()) { + // A cleanup pad requires a personality function to be specified, so + // we do that here explicitly (happens implicitly below through + // creation of the landingpad instruction). We then create a + // cleanuppad instruction which has no filters to run cleanup on all + // exceptions. + pad_bcx.set_personality_fn(llpersonality); + let llretval = pad_bcx.cleanup_pad(None, &[]); + UnwindKind::CleanupPad(llretval) + } else { + // The landing pad return type (the type being propagated). Not sure + // what this represents but it's determined by the personality + // function and this is what the EH proposal example uses. + let llretty = Type::struct_(self.ccx, + &[Type::i8p(self.ccx), Type::i32(self.ccx)], + false); + + // The only landing pad clause will be 'cleanup' + let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, + pad_bcx.fcx().llfn); + + // The landing pad block is a cleanup + pad_bcx.set_cleanup(llretval); + + let addr = match self.landingpad_alloca.get() { + Some(addr) => addr, + None => { + let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); + Lifetime::Start.call(&pad_bcx, addr); + self.landingpad_alloca.set(Some(addr)); + addr + } + }; + pad_bcx.store(llretval, addr); + UnwindKind::LandingPad + }; + + // Generate the cleanup block and branch to it. + let label = UnwindExit(val); + let cleanup_llbb = self.trans_cleanups_to_exit_scope(label); + label.branch(&pad_bcx, cleanup_llbb); + pad_bcx.llbb() + }; // Push the scopes we removed back on: loop { @@ -346,11 +392,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// breaks. The return value would be the first basic block in that sequence /// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)` /// and it will perform all cleanups and finally branch to the `break_blk`. - fn trans_cleanups_to_exit_scope(&'blk self, - label: EarlyExitLabel) - -> BasicBlockRef { - debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", - label, self.scopes_len()); + fn trans_cleanups_to_exit_scope(&'blk self, label: EarlyExitLabel) -> BasicBlockRef { + debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", label, self.scopes_len()); let orig_scopes_len = self.scopes_len(); let mut prev_llbb; @@ -367,36 +410,34 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // (Presuming that there are no cached exits) loop { if self.scopes_len() == 0 { - match label { - UnwindExit(val) => { - // Generate a block that will resume unwinding to the - // calling function - let bcx = self.build_new_block("resume"); - match val { - UnwindKind::LandingPad => { - let addr = self.landingpad_alloca.get() - .unwrap(); - let lp = bcx.load(addr); - Lifetime::End.call(&bcx, addr); - if !bcx.sess().target.target.options.custom_unwind_resume { - bcx.resume(lp); - } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call( - bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), - &[exc_ptr], - bcx.funclet().map(|b| b.bundle())); - } - } - UnwindKind::CleanupPad(_) => { - let pad = bcx.cleanup_pad(None, &[]); - bcx.cleanup_ret(pad, None); - } + let val = match label { + UnwindExit(val) => val, + }; + // Generate a block that will resume unwinding to the + // calling function + let bcx = self.build_new_block("resume"); + match val { + UnwindKind::LandingPad => { + let addr = self.landingpad_alloca.get().unwrap(); + let lp = bcx.load(addr); + Lifetime::End.call(&bcx, addr); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(lp); + } else { + let exc_ptr = bcx.extract_value(lp, 0); + bcx.call( + bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), + &[exc_ptr], + bcx.funclet().map(|b| b.bundle())); } - prev_llbb = bcx.llbb(); - break; + } + UnwindKind::CleanupPad(_) => { + let pad = bcx.cleanup_pad(None, &[]); + bcx.cleanup_ret(pad, None); } } + prev_llbb = bcx.llbb(); + break; } // Pop off the scope, since we may be generating @@ -466,85 +507,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { assert_eq!(self.scopes_len(), orig_scopes_len); prev_llbb } - - /// Creates a landing pad for the top scope, if one does not exist. The - /// landing pad will perform all cleanups necessary for an unwind and then - /// `resume` to continue error propagation: - /// - /// landing_pad -> ... cleanups ... -> [resume] - /// - /// (The cleanups and resume instruction are created by - /// `trans_cleanups_to_exit_scope()`, not in this function itself.) - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { - let pad_bcx; - - debug!("get_or_create_landing_pad"); - - // Check if a landing pad block exists; if not, create one. - { - let mut scopes = self.scopes.borrow_mut(); - let last_scope = scopes.last_mut().unwrap(); - match last_scope.cached_landing_pad { - Some(llbb) => return llbb, - None => { - let name = last_scope.block_name("unwind"); - pad_bcx = self.build_new_block(&name[..]); - last_scope.cached_landing_pad = Some(pad_bcx.llbb()); - } - } - }; - - let llpersonality = pad_bcx.fcx().eh_personality(); - - let val = if base::wants_msvc_seh(self.ccx.sess()) { - // A cleanup pad requires a personality function to be specified, so - // we do that here explicitly (happens implicitly below through - // creation of the landingpad instruction). We then create a - // cleanuppad instruction which has no filters to run cleanup on all - // exceptions. - pad_bcx.set_personality_fn(llpersonality); - let llretval = pad_bcx.cleanup_pad(None, &[]); - UnwindKind::CleanupPad(llretval) - } else { - // The landing pad return type (the type being propagated). Not sure - // what this represents but it's determined by the personality - // function and this is what the EH proposal example uses. - let llretty = Type::struct_(self.ccx, - &[Type::i8p(self.ccx), Type::i32(self.ccx)], - false); - - // The only landing pad clause will be 'cleanup' - let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn); - - // The landing pad block is a cleanup - pad_bcx.set_cleanup(llretval); - - let addr = match self.landingpad_alloca.get() { - Some(addr) => addr, - None => { - let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); - Lifetime::Start.call(&pad_bcx, addr); - self.landingpad_alloca.set(Some(addr)); - addr - } - }; - pad_bcx.store(llretval, addr); - UnwindKind::LandingPad - }; - - // Generate the cleanup block and branch to it. - let label = UnwindExit(val); - let cleanup_llbb = self.trans_cleanups_to_exit_scope(label); - label.branch(&pad_bcx, cleanup_llbb); - - return pad_bcx.llbb(); - } } impl<'tcx> CleanupScope<'tcx> { - fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> { + fn new() -> CleanupScope<'tcx> { CleanupScope { - debug_loc: debug_loc, cleanups: vec![], cached_early_exits: vec![], cached_landing_pad: None, From ec518a0069e41e8dee1da672de2de9fec7c0f9d0 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 12:26:01 -0700 Subject: [PATCH 012/103] Remove EarlyExitLabel, it only has one variant and is as such useless --- src/librustc_trans/cleanup.rs | 41 +++++++++++++---------------------- 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 43b8aa8b6c82f..b349580b37801 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -114,8 +114,6 @@ //! code for `expr` itself is responsible for freeing any other byproducts //! that may be in play. -use self::EarlyExitLabel::*; - use llvm::{BasicBlockRef, ValueRef}; use base::{self, Lifetime}; use common; @@ -138,11 +136,6 @@ pub struct CustomScopeIndex { index: usize } -#[derive(Copy, Clone, PartialEq, Debug)] -enum EarlyExitLabel { - UnwindExit(UnwindKind), -} - #[derive(Copy, Clone, Debug)] enum UnwindKind { LandingPad, @@ -151,7 +144,7 @@ enum UnwindKind { #[derive(Copy, Clone)] struct CachedEarlyExit { - label: EarlyExitLabel, + label: UnwindKind, cleanup_block: BasicBlockRef, last_cleanup: usize, } @@ -325,9 +318,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { }; // Generate the cleanup block and branch to it. - let label = UnwindExit(val); - let cleanup_llbb = self.trans_cleanups_to_exit_scope(label); - label.branch(&pad_bcx, cleanup_llbb); + let cleanup_llbb = self.trans_cleanups_to_exit_scope(val); + val.branch(&pad_bcx, cleanup_llbb); pad_bcx.llbb() }; @@ -392,7 +384,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// breaks. The return value would be the first basic block in that sequence /// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)` /// and it will perform all cleanups and finally branch to the `break_blk`. - fn trans_cleanups_to_exit_scope(&'blk self, label: EarlyExitLabel) -> BasicBlockRef { + fn trans_cleanups_to_exit_scope(&'blk self, label: UnwindKind) -> BasicBlockRef { debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", label, self.scopes_len()); let orig_scopes_len = self.scopes_len(); @@ -410,13 +402,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // (Presuming that there are no cached exits) loop { if self.scopes_len() == 0 { - let val = match label { - UnwindExit(val) => val, - }; // Generate a block that will resume unwinding to the // calling function let bcx = self.build_new_block("resume"); - match val { + match label { UnwindKind::LandingPad => { let addr = self.landingpad_alloca.get().unwrap(); let lp = bcx.load(addr); @@ -519,7 +508,7 @@ impl<'tcx> CleanupScope<'tcx> { } fn cached_early_exit(&self, - label: EarlyExitLabel) + label: UnwindKind) -> Option<(BasicBlockRef, usize)> { self.cached_early_exits.iter().rev(). find(|e| e.label == label). @@ -527,7 +516,7 @@ impl<'tcx> CleanupScope<'tcx> { } fn add_cached_early_exit(&mut self, - label: EarlyExitLabel, + label: UnwindKind, blk: BasicBlockRef, last_cleanup: usize) { self.cached_early_exits.push( @@ -548,7 +537,7 @@ impl<'tcx> CleanupScope<'tcx> { } } -impl EarlyExitLabel { +impl UnwindKind { /// Generates a branch going from `from_bcx` to `to_llbb` where `self` is /// the exit label attached to the start of `from_bcx`. /// @@ -556,7 +545,7 @@ impl EarlyExitLabel { /// of label. For example with MSVC exceptions unwind exit labels will use /// the `cleanupret` instruction instead of the `br` instruction. fn branch(&self, from_bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { - if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self { + if let UnwindKind::CleanupPad(pad) = *self { from_bcx.cleanup_ret(pad, Some(to_llbb)); } else { from_bcx.br(to_llbb); @@ -574,14 +563,14 @@ impl EarlyExitLabel { /// /// Returns a new label which will can be used to cache `bcx` in the list of /// early exits. - fn start(&self, bcx: &BlockAndBuilder) -> EarlyExitLabel { + fn start(&self, bcx: &BlockAndBuilder) -> UnwindKind { match *self { - UnwindExit(UnwindKind::CleanupPad(..)) => { + UnwindKind::CleanupPad(..) => { let pad = bcx.cleanup_pad(None, &[]); bcx.set_funclet(Funclet::msvc(pad)); - UnwindExit(UnwindKind::CleanupPad(pad)) + UnwindKind::CleanupPad(pad) } - UnwindExit(UnwindKind::LandingPad) => { + UnwindKind::LandingPad => { bcx.set_funclet(Funclet::gnu()); *self } @@ -590,8 +579,8 @@ impl EarlyExitLabel { } impl PartialEq for UnwindKind { - fn eq(&self, val: &UnwindKind) -> bool { - match (*self, *val) { + fn eq(&self, label: &UnwindKind) -> bool { + match (*self, *label) { (UnwindKind::LandingPad, UnwindKind::LandingPad) | (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true, _ => false, From 284291258356876a9acd0b310656bf85c15b6401 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 12:32:16 -0700 Subject: [PATCH 013/103] Remove DropValue.is_immediate --- src/librustc_trans/cleanup.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index b349580b37801..53cf6b2283da8 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -182,7 +182,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { ty: Ty<'tcx>) { if !self.type_needs_drop(ty) { return; } let drop = DropValue { - is_immediate: false, val: val, ty: ty, skip_dtor: false, @@ -211,7 +210,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { if !self.type_needs_drop(ty) { return; } let drop = DropValue { - is_immediate: false, val: val, ty: ty, skip_dtor: true, @@ -593,7 +591,6 @@ impl PartialEq for UnwindKind { #[derive(Copy, Clone)] pub struct DropValue<'tcx> { - is_immediate: bool, val: ValueRef, ty: Ty<'tcx>, skip_dtor: bool, @@ -605,15 +602,6 @@ impl<'tcx> DropValue<'tcx> { funclet: Option<&'blk Funclet>, bcx: BlockAndBuilder<'blk, 'tcx>, ) -> BlockAndBuilder<'blk, 'tcx> { - if self.is_immediate { - let vp = base::alloc_ty(&bcx, self.ty, ""); - Lifetime::Start.call(&bcx, vp); - base::store_ty(&bcx, self.val, vp, self.ty); - let bcx = glue::call_drop_glue(bcx, vp, self.ty, self.skip_dtor, funclet); - Lifetime::End.call(&bcx, vp); - bcx - } else { - glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) - } + glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) } } From 48715a15ff8396c4661183df9769b1c7a881791d Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 12:40:39 -0700 Subject: [PATCH 014/103] Reformatting --- src/librustc_trans/cleanup.rs | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 53cf6b2283da8..7c4a03de6e4b9 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -505,12 +505,10 @@ impl<'tcx> CleanupScope<'tcx> { } } - fn cached_early_exit(&self, - label: UnwindKind) - -> Option<(BasicBlockRef, usize)> { - self.cached_early_exits.iter().rev(). - find(|e| e.label == label). - map(|e| (e.cleanup_block, e.last_cleanup)) + fn cached_early_exit(&self, label: UnwindKind) -> Option<(BasicBlockRef, usize)> { + self.cached_early_exits.iter().rev() + .find(|e| e.label == label) + .map(|e| (e.cleanup_block, e.last_cleanup)) } fn add_cached_early_exit(&mut self, @@ -525,8 +523,7 @@ impl<'tcx> CleanupScope<'tcx> { /// True if this scope has cleanups that need unwinding fn needs_invoke(&self) -> bool { - self.cached_landing_pad.is_some() || - !self.cleanups.is_empty() + self.cached_landing_pad.is_some() || !self.cleanups.is_empty() } /// Returns a suitable name to use for the basic block that handles this cleanup scope @@ -597,11 +594,8 @@ pub struct DropValue<'tcx> { } impl<'tcx> DropValue<'tcx> { - fn trans<'blk>( - &self, - funclet: Option<&'blk Funclet>, - bcx: BlockAndBuilder<'blk, 'tcx>, - ) -> BlockAndBuilder<'blk, 'tcx> { + fn trans<'blk>(&self, funclet: Option<&'blk Funclet>, bcx: BlockAndBuilder<'blk, 'tcx>) + -> BlockAndBuilder<'blk, 'tcx> { glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) } } From da971b765217bb3d729c448e88509fe817b94a8b Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 15:08:12 -0700 Subject: [PATCH 015/103] Pull out get_or_create landing pad to avoid issues with dynamic borrowck. --- src/librustc_trans/cleanup.rs | 133 +++++++++++++++++++--------------- 1 file changed, 73 insertions(+), 60 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 7c4a03de6e4b9..ebc2ff1f166da 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -242,6 +242,78 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) } + /// Creates a landing pad for the top scope, if one does not exist. The + /// landing pad will perform all cleanups necessary for an unwind and then + /// `resume` to continue error propagation: + /// + /// landing_pad -> ... cleanups ... -> [resume] + /// + /// (The cleanups and resume instruction are created by + /// `trans_cleanups_to_exit_scope()`, not in this function itself.) + fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { + let pad_bcx; + + debug!("get_or_create_landing_pad"); + + // Check if a landing pad block exists; if not, create one. + { + let mut scopes = self.scopes.borrow_mut(); + let last_scope = scopes.last_mut().unwrap(); + match last_scope.cached_landing_pad { + Some(llbb) => return llbb, + None => { + let name = last_scope.block_name("unwind"); + pad_bcx = self.build_new_block(&name[..]); + last_scope.cached_landing_pad = Some(pad_bcx.llbb()); + } + } + }; + + let llpersonality = pad_bcx.fcx().eh_personality(); + + let val = if base::wants_msvc_seh(self.ccx.sess()) { + // A cleanup pad requires a personality function to be specified, so + // we do that here explicitly (happens implicitly below through + // creation of the landingpad instruction). We then create a + // cleanuppad instruction which has no filters to run cleanup on all + // exceptions. + pad_bcx.set_personality_fn(llpersonality); + let llretval = pad_bcx.cleanup_pad(None, &[]); + UnwindKind::CleanupPad(llretval) + } else { + // The landing pad return type (the type being propagated). Not sure + // what this represents but it's determined by the personality + // function and this is what the EH proposal example uses. + let llretty = Type::struct_(self.ccx, + &[Type::i8p(self.ccx), Type::i32(self.ccx)], + false); + + // The only landing pad clause will be 'cleanup' + let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn); + + // The landing pad block is a cleanup + pad_bcx.set_cleanup(llretval); + + let addr = match self.landingpad_alloca.get() { + Some(addr) => addr, + None => { + let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); + Lifetime::Start.call(&pad_bcx, addr); + self.landingpad_alloca.set(Some(addr)); + addr + } + }; + pad_bcx.store(llretval, addr); + UnwindKind::LandingPad + }; + + // Generate the cleanup block and branch to it. + let cleanup_llbb = self.trans_cleanups_to_exit_scope(val); + val.branch(&pad_bcx, cleanup_llbb); + + return pad_bcx.llbb(); + } + /// Returns a basic block to branch to in the event of a panic. This block /// will run the panic cleanups and eventually resume the exception that /// caused the landing pad to be run. @@ -260,66 +332,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { popped_scopes.push(self.pop_scope()); } - // Creates a landing pad for the top scope, if one does not exist. The - // landing pad will perform all cleanups necessary for an unwind and then - // `resume` to continue error propagation: - // - // landing_pad -> ... cleanups ... -> [resume] - // - // (The cleanups and resume instruction are created by - // `trans_cleanups_to_exit_scope()`, not in this function itself.) - let mut scopes = self.scopes.borrow_mut(); - let last_scope = scopes.last_mut().unwrap(); - let llbb = if let Some(llbb) = last_scope.cached_landing_pad { - llbb - } else { - let name = last_scope.block_name("unwind"); - let pad_bcx = self.build_new_block(&name[..]); - last_scope.cached_landing_pad = Some(pad_bcx.llbb()); - let llpersonality = pad_bcx.fcx().eh_personality(); - - let val = if base::wants_msvc_seh(self.ccx.sess()) { - // A cleanup pad requires a personality function to be specified, so - // we do that here explicitly (happens implicitly below through - // creation of the landingpad instruction). We then create a - // cleanuppad instruction which has no filters to run cleanup on all - // exceptions. - pad_bcx.set_personality_fn(llpersonality); - let llretval = pad_bcx.cleanup_pad(None, &[]); - UnwindKind::CleanupPad(llretval) - } else { - // The landing pad return type (the type being propagated). Not sure - // what this represents but it's determined by the personality - // function and this is what the EH proposal example uses. - let llretty = Type::struct_(self.ccx, - &[Type::i8p(self.ccx), Type::i32(self.ccx)], - false); - - // The only landing pad clause will be 'cleanup' - let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, - pad_bcx.fcx().llfn); - - // The landing pad block is a cleanup - pad_bcx.set_cleanup(llretval); - - let addr = match self.landingpad_alloca.get() { - Some(addr) => addr, - None => { - let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); - Lifetime::Start.call(&pad_bcx, addr); - self.landingpad_alloca.set(Some(addr)); - addr - } - }; - pad_bcx.store(llretval, addr); - UnwindKind::LandingPad - }; - - // Generate the cleanup block and branch to it. - let cleanup_llbb = self.trans_cleanups_to_exit_scope(val); - val.branch(&pad_bcx, cleanup_llbb); - pad_bcx.llbb() - }; + let llbb = self.get_or_create_landing_pad(); // Push the scopes we removed back on: loop { From 85ef02d40446cccbc4a43a0d9fd4d39a67ff9037 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 18:00:42 -0700 Subject: [PATCH 016/103] Only one DropValue per CleanupScope --- src/librustc_trans/callee.rs | 3 +- src/librustc_trans/cleanup.rs | 148 +++++++++++++--------------------- src/librustc_trans/glue.rs | 10 +-- 3 files changed, 64 insertions(+), 97 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index a7200cf7538f7..971e01cc9e948 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -389,8 +389,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let self_scope = fcx.push_custom_cleanup_scope(); - fcx.schedule_drop_mem(self_scope, llenv, closure_ty); + let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); let bcx = callee.call(bcx, &llargs[self_idx..], dest, None).0; diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index ebc2ff1f166da..002e92d5223c6 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -124,10 +124,10 @@ use value::Value; use rustc::ty::Ty; pub struct CleanupScope<'tcx> { - // Cleanups to run upon scope exit. - cleanups: Vec>, + // Cleanup to run upon scope exit. + cleanup: DropValue<'tcx>, - cached_early_exits: Vec, + cached_early_exit: Option, cached_landing_pad: Option, } @@ -146,54 +146,43 @@ enum UnwindKind { struct CachedEarlyExit { label: UnwindKind, cleanup_block: BasicBlockRef, - last_cleanup: usize, } impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { - pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { - let index = self.scopes_len(); - debug!("push_custom_cleanup_scope(): {}", index); - self.push_scope(CleanupScope::new()); - CustomScopeIndex { index: index } - } - /// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// generates the code to do its cleanups for normal exit. pub fn pop_and_trans_custom_cleanup_scope(&self, - mut bcx: BlockAndBuilder<'blk, 'tcx>, - custom_scope: CustomScopeIndex) + bcx: BlockAndBuilder<'blk, 'tcx>, + custom_scope: Option) -> BlockAndBuilder<'blk, 'tcx> { debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); + + let custom_scope = if let Some(scope) = custom_scope { + scope + } else { + return bcx; + }; + assert!(self.is_valid_custom_scope(custom_scope)); assert!(custom_scope.index == self.scopes.borrow().len() - 1); let scope = self.pop_scope(); - for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx.funclet(), bcx); - } - bcx + scope.cleanup.trans(bcx.funclet(), bcx) } /// Schedules a (deep) drop of `val`, which is a pointer to an instance of /// `ty` - pub fn schedule_drop_mem(&self, - cleanup_scope: CustomScopeIndex, - val: ValueRef, - ty: Ty<'tcx>) { - if !self.type_needs_drop(ty) { return; } + pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> Option { + if !self.type_needs_drop(ty) { return None; } let drop = DropValue { val: val, ty: ty, skip_dtor: false, }; - debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}", - cleanup_scope, - Value(val), - ty, - drop.skip_dtor); + debug!("schedule_drop_mem(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop); + Some(self.push_scope(CleanupScope::new(drop))) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -201,13 +190,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, - cleanup_scope: CustomScopeIndex, - val: ValueRef, - ty: Ty<'tcx>) { + pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) + -> Option { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !self.type_needs_drop(ty) { return; } + if !self.type_needs_drop(ty) { return None; } let drop = DropValue { val: val, @@ -215,26 +202,12 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { skip_dtor: true, }; - debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}", - cleanup_scope, + debug!("schedule_drop_adt_contents(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop); - } - - /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. - fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) { - debug!("schedule_clean_in_custom_scope(custom_scope={})", - custom_scope.index); - - assert!(self.is_valid_custom_scope(custom_scope)); - - let mut scopes = self.scopes.borrow_mut(); - let scope = &mut (*scopes)[custom_scope.index]; - scope.cleanups.push(cleanup); - scope.cached_landing_pad = None; + Some(self.push_scope(CleanupScope::new(drop))) } /// Returns true if there are pending cleanups that should execute on panic. @@ -335,11 +308,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let llbb = self.get_or_create_landing_pad(); // Push the scopes we removed back on: - loop { - match popped_scopes.pop() { - Some(scope) => self.push_scope(scope), - None => break - } + while let Some(scope) = popped_scopes.pop() { + self.push_scope(scope); } assert_eq!(self.scopes_len(), orig_scopes_len); @@ -356,8 +326,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { self.scopes.borrow().len() } - fn push_scope(&self, scope: CleanupScope<'tcx>) { - self.scopes.borrow_mut().push(scope) + fn push_scope(&self, scope: CleanupScope<'tcx>) -> CustomScopeIndex { + let index = self.scopes_len(); + debug!("pushing custom cleanup scope: {}", index); + self.scopes.borrow_mut().push(scope); + CustomScopeIndex { index: index } } fn pop_scope(&self) -> CleanupScope<'tcx> { @@ -401,7 +374,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let orig_scopes_len = self.scopes_len(); let mut prev_llbb; let mut popped_scopes = vec![]; - let mut skip = 0; // First we pop off all the cleanup stacks that are // traversed until the exit is reached, pushing them @@ -450,9 +422,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // scope for this label. If so, we can stop popping scopes // and branch to the cached label, since it contains the // cleanups for any subsequent scopes. - if let Some((exit, last_cleanup)) = cached_exit { + if let Some(exit) = cached_exit { prev_llbb = exit; - skip = last_cleanup; break; } } @@ -481,24 +452,17 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // At this point, `popped_scopes` is empty, and so the final block // that we return to the user is `Cleanup(AST 24)`. while let Some(mut scope) = popped_scopes.pop() { - if !scope.cleanups.is_empty() { - let name = scope.block_name("clean"); - debug!("generating cleanups for {}", name); - - let bcx_in = self.build_new_block(&name[..]); - let exit_label = label.start(&bcx_in); - let next_llbb = bcx_in.llbb(); - let mut bcx_out = bcx_in; - let len = scope.cleanups.len(); - for cleanup in scope.cleanups.iter().rev().take(len - skip) { - bcx_out = cleanup.trans(bcx_out.funclet(), bcx_out); - } - skip = 0; - exit_label.branch(&bcx_out, prev_llbb); - prev_llbb = next_llbb; + let name = scope.block_name("clean"); + debug!("generating cleanup for {}", name); - scope.add_cached_early_exit(exit_label, prev_llbb, len); - } + let bcx_in = self.build_new_block(&name[..]); + let exit_label = label.start(&bcx_in); + let next_llbb = bcx_in.llbb(); + let bcx_out = scope.cleanup.trans(bcx_in.funclet(), bcx_in); + exit_label.branch(&bcx_out, prev_llbb); + prev_llbb = next_llbb; + + scope.add_cached_early_exit(exit_label, prev_llbb); self.push_scope(scope); } @@ -510,33 +474,37 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { } impl<'tcx> CleanupScope<'tcx> { - fn new() -> CleanupScope<'tcx> { + fn new(drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { - cleanups: vec![], - cached_early_exits: vec![], + cleanup: drop_val, + cached_early_exit: None, cached_landing_pad: None, } } - fn cached_early_exit(&self, label: UnwindKind) -> Option<(BasicBlockRef, usize)> { - self.cached_early_exits.iter().rev() - .find(|e| e.label == label) - .map(|e| (e.cleanup_block, e.last_cleanup)) + fn cached_early_exit(&self, label: UnwindKind) -> Option { + if let Some(e) = self.cached_early_exit { + if e.label == label { + return Some(e.cleanup_block); + } + } + None } fn add_cached_early_exit(&mut self, label: UnwindKind, - blk: BasicBlockRef, - last_cleanup: usize) { - self.cached_early_exits.push( - CachedEarlyExit { label: label, - cleanup_block: blk, - last_cleanup: last_cleanup}); + blk: BasicBlockRef) { + assert!(self.cached_early_exit.is_none()); + self.cached_early_exit = Some(CachedEarlyExit { + label: label, + cleanup_block: blk, + }); } /// True if this scope has cleanups that need unwinding fn needs_invoke(&self) -> bool { - self.cached_landing_pad.is_some() || !self.cleanups.is_empty() + true + //self.cached_landing_pad.is_some() || self.cleanups.is_empty() } /// Returns a suitable name to use for the basic block that handles this cleanup scope diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index c7394ba68755a..692bf22d6f80c 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -245,13 +245,13 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, // // FIXME (#14875) panic-in-drop semantics might be unsupported; we // might well consider changing below to more direct code. - let contents_scope = bcx.fcx().push_custom_cleanup_scope(); - // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. - if !shallow_drop { - bcx.fcx().schedule_drop_adt_contents(contents_scope, v0, t); - } + let contents_scope = if !shallow_drop { + bcx.fcx().schedule_drop_adt_contents(v0, t) + } else { + None + }; let (sized_args, unsized_args); let args: &[ValueRef] = if type_is_sized(tcx, t) { From 6412f3128d308371dfffc8b030872d89473874d8 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 18:29:49 -0700 Subject: [PATCH 017/103] Propagate CleanupScope::needs_invoke being always true --- src/librustc_trans/cleanup.rs | 44 ++++------------------------------- 1 file changed, 4 insertions(+), 40 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 002e92d5223c6..362af9601d185 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -212,10 +212,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Returns true if there are pending cleanups that should execute on panic. pub fn needs_invoke(&self) -> bool { - self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) + self.scopes_len() > 0 } - /// Creates a landing pad for the top scope, if one does not exist. The + /// Creates a landing pad for the top scope, if one does not exist. The /// landing pad will perform all cleanups necessary for an unwind and then /// `resume` to continue error propagation: /// @@ -223,10 +223,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// /// (The cleanups and resume instruction are created by /// `trans_cleanups_to_exit_scope()`, not in this function itself.) - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { + pub fn get_landing_pad(&'blk self) -> BasicBlockRef { let pad_bcx; - debug!("get_or_create_landing_pad"); + debug!("get_landing_pad"); // Check if a landing pad block exists; if not, create one. { @@ -287,36 +287,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { return pad_bcx.llbb(); } - /// Returns a basic block to branch to in the event of a panic. This block - /// will run the panic cleanups and eventually resume the exception that - /// caused the landing pad to be run. - pub fn get_landing_pad(&'blk self) -> BasicBlockRef { - let _icx = base::push_ctxt("get_landing_pad"); - - debug!("get_landing_pad"); - - let orig_scopes_len = self.scopes_len(); - assert!(orig_scopes_len > 0); - - // Remove any scopes that do not have cleanups on panic: - let mut popped_scopes = vec![]; - while !self.top_scope(|s| s.needs_invoke()) { - debug!("top scope does not need invoke"); - popped_scopes.push(self.pop_scope()); - } - - let llbb = self.get_or_create_landing_pad(); - - // Push the scopes we removed back on: - while let Some(scope) = popped_scopes.pop() { - self.push_scope(scope); - } - - assert_eq!(self.scopes_len(), orig_scopes_len); - - return llbb; - } - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { let scopes = self.scopes.borrow(); custom_scope.index < scopes.len() @@ -501,12 +471,6 @@ impl<'tcx> CleanupScope<'tcx> { }); } - /// True if this scope has cleanups that need unwinding - fn needs_invoke(&self) -> bool { - true - //self.cached_landing_pad.is_some() || self.cleanups.is_empty() - } - /// Returns a suitable name to use for the basic block that handles this cleanup scope fn block_name(&self, prefix: &str) -> String { format!("{}_custom_", prefix) From 91707dc9911d9e10193faaa87e47ea8cb0592818 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Mon, 12 Dec 2016 22:43:53 -0700 Subject: [PATCH 018/103] Merge need_invoke and needs_invoke --- src/librustc_trans/callee.rs | 10 +--------- src/librustc_trans/cleanup.rs | 9 +++++++-- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 971e01cc9e948..c7eb6b88c7a52 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -686,16 +686,8 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, _ => bug!("expected fn pointer callee, found {:?}", callee) }; - fn need_invoke(bcx: &BlockAndBuilder, had_lpad: bool) -> bool { - if bcx.sess().no_landing_pads() || had_lpad { - false - } else { - bcx.fcx().needs_invoke() - } - } - let _icx = push_ctxt("invoke_"); - let (llret, bcx) = if need_invoke(&bcx, lpad.is_some()) { + let (llret, bcx) = if bcx.fcx().needs_invoke(lpad.is_some()) { debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb()); for &llarg in &llargs { debug!("arg: {:?}", Value(llarg)); diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 362af9601d185..63e0598f285c9 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -211,8 +211,12 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { } /// Returns true if there are pending cleanups that should execute on panic. - pub fn needs_invoke(&self) -> bool { - self.scopes_len() > 0 + pub fn needs_invoke(&self, lpad_present: bool) -> bool { + if self.ccx.sess().no_landing_pads() || lpad_present { + false + } else { + self.scopes_len() > 0 + } } /// Creates a landing pad for the top scope, if one does not exist. The @@ -297,6 +301,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { } fn push_scope(&self, scope: CleanupScope<'tcx>) -> CustomScopeIndex { + assert!(self.scopes_len() == 0); let index = self.scopes_len(); debug!("pushing custom cleanup scope: {}", index); self.scopes.borrow_mut().push(scope); From 51dfba1185104a64157235dc771953c21d89a284 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Tue, 13 Dec 2016 16:45:09 -0700 Subject: [PATCH 019/103] Refactor Vec into Option. --- src/librustc_trans/base.rs | 2 +- src/librustc_trans/callee.rs | 4 +- src/librustc_trans/cleanup.rs | 279 +++++++++++-------------------- src/librustc_trans/common.rs | 16 +- src/librustc_trans/glue.rs | 49 +++--- src/librustc_trans/mir/rvalue.rs | 6 +- src/librustc_trans/tvec.rs | 6 +- 7 files changed, 128 insertions(+), 234 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index dc5fd1d009c2f..0d455d037de9b 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -765,7 +765,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { funclet_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, - scopes: RefCell::new(Vec::new()), + cleanup_scope: RefCell::new(None), } } diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index c7eb6b88c7a52..f772ff68ac948 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -392,9 +392,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); let bcx = callee.call(bcx, &llargs[self_idx..], dest, None).0; - - let bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); - + fcx.pop_and_trans_custom_cleanup_scope(&bcx, self_scope); fcx.finish(&bcx); ccx.instances().borrow_mut().insert(method_instance, lloncefn); diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 63e0598f285c9..8952fe9d8b8fe 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -152,27 +152,21 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// generates the code to do its cleanups for normal exit. pub fn pop_and_trans_custom_cleanup_scope(&self, - bcx: BlockAndBuilder<'blk, 'tcx>, - custom_scope: Option) - -> BlockAndBuilder<'blk, 'tcx> { + bcx: &BlockAndBuilder<'blk, 'tcx>, + custom_scope: Option<()>) { debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); - let custom_scope = if let Some(scope) = custom_scope { - scope - } else { - return bcx; - }; - - assert!(self.is_valid_custom_scope(custom_scope)); - assert!(custom_scope.index == self.scopes.borrow().len() - 1); + if custom_scope.is_none() { + return; + } let scope = self.pop_scope(); - scope.cleanup.trans(bcx.funclet(), bcx) + scope.cleanup.trans(bcx.funclet(), &bcx); } /// Schedules a (deep) drop of `val`, which is a pointer to an instance of /// `ty` - pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> Option { + pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> Option<()> { if !self.type_needs_drop(ty) { return None; } let drop = DropValue { val: val, @@ -182,7 +176,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { debug!("schedule_drop_mem(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - Some(self.push_scope(CleanupScope::new(drop))) + Some(self.set_scope(CleanupScope::new(drop))) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -190,8 +184,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) - -> Option { + pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) -> Option<()> { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. if !self.type_needs_drop(ty) { return None; } @@ -207,7 +200,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { ty, drop.skip_dtor); - Some(self.push_scope(CleanupScope::new(drop))) + Some(self.set_scope(CleanupScope::new(drop))) } /// Returns true if there are pending cleanups that should execute on panic. @@ -215,7 +208,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { if self.ccx.sess().no_landing_pads() || lpad_present { false } else { - self.scopes_len() > 0 + self.has_scope() } } @@ -228,14 +221,14 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// (The cleanups and resume instruction are created by /// `trans_cleanups_to_exit_scope()`, not in this function itself.) pub fn get_landing_pad(&'blk self) -> BasicBlockRef { - let pad_bcx; + let mut pad_bcx; debug!("get_landing_pad"); // Check if a landing pad block exists; if not, create one. { - let mut scopes = self.scopes.borrow_mut(); - let last_scope = scopes.last_mut().unwrap(); + let mut last_scope = self.cleanup_scope.borrow_mut(); + let mut last_scope = last_scope.as_mut().unwrap(); match last_scope.cached_landing_pad { Some(llbb) => return llbb, None => { @@ -286,165 +279,94 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // Generate the cleanup block and branch to it. let cleanup_llbb = self.trans_cleanups_to_exit_scope(val); - val.branch(&pad_bcx, cleanup_llbb); + val.branch(&mut pad_bcx, cleanup_llbb); return pad_bcx.llbb(); } - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { - let scopes = self.scopes.borrow(); - custom_scope.index < scopes.len() - } - - fn scopes_len(&self) -> usize { - self.scopes.borrow().len() + fn has_scope(&self) -> bool { + self.cleanup_scope.borrow().is_some() } - fn push_scope(&self, scope: CleanupScope<'tcx>) -> CustomScopeIndex { - assert!(self.scopes_len() == 0); - let index = self.scopes_len(); - debug!("pushing custom cleanup scope: {}", index); - self.scopes.borrow_mut().push(scope); - CustomScopeIndex { index: index } + fn set_scope(&self, scope: CleanupScope<'tcx>) { + assert!(self.cleanup_scope.borrow().is_none()); + *self.cleanup_scope.borrow_mut() = Some(scope); } fn pop_scope(&self) -> CleanupScope<'tcx> { - debug!("popping cleanup scope {}, {} scopes remaining", - self.top_scope(|s| s.block_name("")), - self.scopes_len() - 1); - - self.scopes.borrow_mut().pop().unwrap() + debug!("took cleanup scope {}", self.top_scope(|s| s.block_name(""))); + self.cleanup_scope.borrow_mut().take().unwrap() } fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R { - f(self.scopes.borrow().last().unwrap()) + f(self.cleanup_scope.borrow().as_ref().unwrap()) + } + + fn generate_resume_block(&self, label: UnwindKind) -> BasicBlockRef { + // Generate a block that will resume unwinding to the calling function + let bcx = self.build_new_block("resume"); + match label { + UnwindKind::LandingPad => { + let addr = self.landingpad_alloca.get().unwrap(); + let lp = bcx.load(addr); + Lifetime::End.call(&bcx, addr); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(lp); + } else { + let exc_ptr = bcx.extract_value(lp, 0); + bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], None); + } + } + UnwindKind::CleanupPad(_) => { + bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); + } + } + bcx.llbb() } /// Used when the caller wishes to jump to an early exit, such as a return, /// break, continue, or unwind. This function will generate all cleanups /// between the top of the stack and the exit `label` and return a basic /// block that the caller can branch to. - /// - /// For example, if the current stack of cleanups were as follows: - /// - /// AST 22 - /// Custom 1 - /// AST 23 - /// Loop 23 - /// Custom 2 - /// AST 24 - /// - /// and the `label` specifies a break from `Loop 23`, then this function - /// would generate a series of basic blocks as follows: - /// - /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk - /// - /// where `break_blk` is the block specified in `Loop 23` as the target for - /// breaks. The return value would be the first basic block in that sequence - /// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)` - /// and it will perform all cleanups and finally branch to the `break_blk`. fn trans_cleanups_to_exit_scope(&'blk self, label: UnwindKind) -> BasicBlockRef { - debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", label, self.scopes_len()); - - let orig_scopes_len = self.scopes_len(); - let mut prev_llbb; - let mut popped_scopes = vec![]; - - // First we pop off all the cleanup stacks that are - // traversed until the exit is reached, pushing them - // onto the side vector `popped_scopes`. No code is - // generated at this time. - // - // So, continuing the example from above, we would wind up - // with a `popped_scopes` vector of `[AST 24, Custom 2]`. - // (Presuming that there are no cached exits) - loop { - if self.scopes_len() == 0 { - // Generate a block that will resume unwinding to the - // calling function - let bcx = self.build_new_block("resume"); - match label { - UnwindKind::LandingPad => { - let addr = self.landingpad_alloca.get().unwrap(); - let lp = bcx.load(addr); - Lifetime::End.call(&bcx, addr); - if !bcx.sess().target.target.options.custom_unwind_resume { - bcx.resume(lp); - } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call( - bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), - &[exc_ptr], - bcx.funclet().map(|b| b.bundle())); - } - } - UnwindKind::CleanupPad(_) => { - let pad = bcx.cleanup_pad(None, &[]); - bcx.cleanup_ret(pad, None); - } - } - prev_llbb = bcx.llbb(); - break; - } + debug!("trans_cleanups_to_exit_scope label={:?} has_scope={}", label, self.has_scope()); - // Pop off the scope, since we may be generating - // unwinding code for it. - let top_scope = self.pop_scope(); - let cached_exit = top_scope.cached_early_exit(label); - popped_scopes.push(top_scope); - - // Check if we have already cached the unwinding of this - // scope for this label. If so, we can stop popping scopes - // and branch to the cached label, since it contains the - // cleanups for any subsequent scopes. - if let Some(exit) = cached_exit { - prev_llbb = exit; - break; - } + // If there is no current scope, then there are no cleanups to run, so we should + // simply generate a resume block which will branch to the label. + if !self.has_scope() { + debug!("trans_cleanups_to_exit_scope: returning new block scope"); + return self.generate_resume_block(label); } - debug!("trans_cleanups_to_exit_scope: popped {} scopes", - popped_scopes.len()); - - // Now push the popped scopes back on. As we go, - // we track in `prev_llbb` the exit to which this scope - // should branch when it's done. - // - // So, continuing with our example, we will start out with - // `prev_llbb` being set to `break_blk` (or possibly a cached - // early exit). We will then pop the scopes from `popped_scopes` - // and generate a basic block for each one, prepending it in the - // series and updating `prev_llbb`. So we begin by popping `Custom 2` - // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)` - // branch to `prev_llbb == break_blk`, giving us a sequence like: - // - // Cleanup(Custom 2) -> prev_llbb - // - // We then pop `AST 24` and repeat the process, giving us the sequence: - // - // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb - // - // At this point, `popped_scopes` is empty, and so the final block - // that we return to the user is `Cleanup(AST 24)`. - while let Some(mut scope) = popped_scopes.pop() { - let name = scope.block_name("clean"); - debug!("generating cleanup for {}", name); - - let bcx_in = self.build_new_block(&name[..]); - let exit_label = label.start(&bcx_in); - let next_llbb = bcx_in.llbb(); - let bcx_out = scope.cleanup.trans(bcx_in.funclet(), bcx_in); - exit_label.branch(&bcx_out, prev_llbb); - prev_llbb = next_llbb; - - scope.add_cached_early_exit(exit_label, prev_llbb); - self.push_scope(scope); - } + // Pop off the scope, since we may be generating unwinding code for it. + let mut scope = self.pop_scope(); + let cached_exit = scope.cached_early_exit(label); + + // Check if we have already cached the unwinding of this + // scope for this label. If so, we can just branch to the cached block. + let exit_llbb = cached_exit.unwrap_or_else(|| self.generate_resume_block(label)); + + let name = scope.block_name("clean"); + debug!("generating cleanup for {}", name); + + let mut cleanup = self.build_new_block(&name[..]); + + // Insert cleanup instructions into the cleanup block + scope.cleanup.trans(label.get_funclet(&cleanup).as_ref(), &cleanup); + + // Insert instruction into cleanup block to branch to the exit + label.branch(&mut cleanup, exit_llbb); + + // Cache the work we've done here + // FIXME: Can this get called more than once per scope? If not, no need to cache. + scope.add_cached_early_exit(label, cleanup.llbb()); + + // Put the scope back + self.set_scope(scope); - debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb); + debug!("trans_cleanups_to_exit_scope: llbb={:?}", cleanup.llbb()); - assert_eq!(self.scopes_len(), orig_scopes_len); - prev_llbb + cleanup.llbb() } } @@ -483,42 +405,30 @@ impl<'tcx> CleanupScope<'tcx> { } impl UnwindKind { - /// Generates a branch going from `from_bcx` to `to_llbb` where `self` is - /// the exit label attached to the start of `from_bcx`. + /// Generates a branch going from `bcx` to `to_llbb` where `self` is + /// the exit label attached to the start of `bcx`. /// /// Transitions from an exit label to other exit labels depend on the type /// of label. For example with MSVC exceptions unwind exit labels will use /// the `cleanupret` instruction instead of the `br` instruction. - fn branch(&self, from_bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { - if let UnwindKind::CleanupPad(pad) = *self { - from_bcx.cleanup_ret(pad, Some(to_llbb)); - } else { - from_bcx.br(to_llbb); + fn branch(&self, bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { + match *self { + UnwindKind::CleanupPad(pad) => { + bcx.cleanup_ret(pad, Some(to_llbb)); + } + UnwindKind::LandingPad => { + bcx.br(to_llbb); + } } } - /// Generates the necessary instructions at the start of `bcx` to prepare - /// for the same kind of early exit label that `self` is. - /// - /// This function will appropriately configure `bcx` based on the kind of - /// label this is. For UnwindExit labels, the `funclet` field of the block will - /// be set to `Some`, and for MSVC exceptions this function will generate a - /// `cleanuppad` instruction at the start of the block so it may be jumped - /// to in the future (e.g. so this block can be cached as an early exit). - /// - /// Returns a new label which will can be used to cache `bcx` in the list of - /// early exits. - fn start(&self, bcx: &BlockAndBuilder) -> UnwindKind { + fn get_funclet(&self, bcx: &BlockAndBuilder) -> Option { match *self { - UnwindKind::CleanupPad(..) => { + UnwindKind::CleanupPad(_) => { let pad = bcx.cleanup_pad(None, &[]); - bcx.set_funclet(Funclet::msvc(pad)); - UnwindKind::CleanupPad(pad) - } - UnwindKind::LandingPad => { - bcx.set_funclet(Funclet::gnu()); - *self - } + Funclet::msvc(pad) + }, + UnwindKind::LandingPad => Funclet::gnu(), } } } @@ -544,8 +454,7 @@ pub struct DropValue<'tcx> { } impl<'tcx> DropValue<'tcx> { - fn trans<'blk>(&self, funclet: Option<&'blk Funclet>, bcx: BlockAndBuilder<'blk, 'tcx>) - -> BlockAndBuilder<'blk, 'tcx> { + fn trans<'blk>(&self, funclet: Option<&'blk Funclet>, bcx: &BlockAndBuilder<'blk, 'tcx>) { glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 440ae8326f540..0ef56895ecea6 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -317,7 +317,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { pub debug_context: debuginfo::FunctionDebugContext, // Cleanup scopes. - pub scopes: RefCell>>, + pub cleanup_scope: RefCell>>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { @@ -483,7 +483,7 @@ pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { // If this block part of a landing pad, then this is `Some` indicating what // kind of landing pad its in, otherwise this is none. - funclet: Cell>, + funclet: Option<&'blk Funclet>, // The function context for the function to which this block is // attached. @@ -499,7 +499,7 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { owned_builder.builder.position_at_end(llbb); BlockAndBuilder { llbb: llbb, - funclet: Cell::new(None), + funclet: None, fcx: fcx, owned_builder: owned_builder, } @@ -535,17 +535,17 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { self.fcx.mir() } - pub fn set_funclet(&self, funclet: Option) { - self.set_funclet_ref(funclet.map(|p| &*self.fcx().funclet_arena.alloc(p))) + pub fn set_funclet(&mut self, funclet: Option) { + self.funclet = funclet.map(|p| &*self.fcx().funclet_arena.alloc(p)); } - pub fn set_funclet_ref(&self, funclet: Option<&'blk Funclet>) { + pub fn set_funclet_ref(&mut self, funclet: Option<&'blk Funclet>) { // FIXME: use an IVar? - self.funclet.set(funclet); + self.funclet = funclet; } pub fn funclet(&self) -> Option<&'blk Funclet> { - self.funclet.get() + self.funclet } } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 692bf22d6f80c..1265381ff21d5 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -120,21 +120,17 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -fn drop_ty<'blk, 'tcx>( - bcx: BlockAndBuilder<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, -) -> BlockAndBuilder<'blk, 'tcx> { +fn drop_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) { call_drop_glue(bcx, v, t, false, None) } pub fn call_drop_glue<'blk, 'tcx>( - bcx: BlockAndBuilder<'blk, 'tcx>, + bcx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>, skip_dtor: bool, funclet: Option<&'blk Funclet>, -) -> BlockAndBuilder<'blk, 'tcx> { +) { // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); let _icx = push_ctxt("drop_ty"); @@ -156,7 +152,6 @@ pub fn call_drop_glue<'blk, 'tcx>( // No drop-hint ==> call standard drop glue bcx.call(glue, &[ptr], funclet.map(|b| b.bundle())); } - bcx } pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef { @@ -235,7 +230,6 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, { debug!("trans_custom_dtor t: {}", t); let tcx = bcx.tcx(); - let mut bcx = bcx; let def = t.ty_adt_def().unwrap(); @@ -275,9 +269,9 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, _ => bug!("dtor for {:?} is not an impl???", t) }; let dtor_did = def.destructor().unwrap(); - bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs).call(bcx, args, None, None).0; - - bcx.fcx().pop_and_trans_custom_cleanup_scope(bcx, contents_scope) + let bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs).call(bcx, args, None, None).0; + bcx.fcx().pop_and_trans_custom_cleanup_scope(&bcx, contents_scope); + bcx } pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, @@ -411,7 +405,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, if !type_is_sized(bcx.tcx(), content_ty) { let llval = get_dataptr(&bcx, v0); let llbox = bcx.load(llval); - let bcx = drop_ty(bcx, v0, content_ty); + drop_ty(&bcx, v0, content_ty); // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments let info = get_meta(&bcx, v0); let info = bcx.load(info); @@ -429,7 +423,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, } else { let llval = v0; let llbox = bcx.load(llval); - let bcx = drop_ty(bcx, llbox, content_ty); + drop_ty(&bcx, llbox, content_ty); trans_exchange_free_ty(bcx, llbox, content_ty) } } @@ -468,22 +462,18 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, -> BlockAndBuilder<'blk, 'tcx> { let _icx = push_ctxt("drop_structural_ty"); - fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, + fn iter_variant<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, av: adt::MaybeSizedValue, variant: &'tcx ty::VariantDef, - substs: &Substs<'tcx>) - -> BlockAndBuilder<'blk, 'tcx> { + substs: &Substs<'tcx>) { let _icx = push_ctxt("iter_variant"); let tcx = cx.tcx(); - let mut cx = cx; - for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); - cx = drop_ty(cx, field_ptr, arg); + drop_ty(&cx, field_ptr, arg); } - return cx; } let value = if type_is_sized(cx.tcx(), t) { @@ -500,25 +490,24 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i); - cx = drop_ty(cx, llupvar, upvar_ty); + drop_ty(&cx, llupvar, upvar_ty); } } ty::TyArray(_, n) => { let base = get_dataptr(&cx, value.value); let len = C_uint(cx.ccx(), n); let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::slice_for_each(cx, base, unit_ty, len, - |bb, vv| drop_ty(bb, vv, unit_ty)); + cx = tvec::slice_for_each(&cx, base, unit_ty, len, |bb, vv| drop_ty(bb, vv, unit_ty)); } ty::TySlice(_) | ty::TyStr => { let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta, + cx = tvec::slice_for_each(&cx, value.value, unit_ty, value.meta, |bb, vv| drop_ty(bb, vv, unit_ty)); } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i); - cx = drop_ty(cx, llfld_a, *arg); + drop_ty(&cx, llfld_a, *arg); } } ty::TyAdt(adt, substs) => match adt.adt_kind() { @@ -536,7 +525,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, cx.store(value.meta, get_meta(&cx, scratch)); scratch }; - cx = drop_ty(cx, val, field_ty); + drop_ty(&cx, val, field_ty); } } AdtKind::Union => { @@ -554,13 +543,13 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, (adt::BranchKind::Single, None) => { if n_variants != 0 { assert!(n_variants == 1); - cx = iter_variant(cx, t, adt::MaybeSizedValue::sized(av), + iter_variant(&cx, t, adt::MaybeSizedValue::sized(av), &adt.variants[0], substs); } } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { let tcx = cx.tcx(); - cx = drop_ty(cx, lldiscrim_a, tcx.types.isize); + drop_ty(&cx, lldiscrim_a, tcx.types.isize); // Create a fall-through basic block for the "else" case of // the switch instruction we're about to generate. Note that @@ -586,7 +575,7 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, let variant_cx = fcx.build_new_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); - let variant_cx = iter_variant(variant_cx, t, value, variant, substs); + iter_variant(&variant_cx, t, value, variant, substs); variant_cx.br(next_cx.llbb()); } cx = next_cx; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index e71449938e9b5..31cbc4074b206 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -98,11 +98,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); let size = C_uint(bcx.ccx(), size); let base = base::get_dataptr(&bcx, dest.llval); - let bcx = tvec::slice_for_each(bcx, base, tr_elem.ty, size, |bcx, llslot| { + tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { self.store_operand_direct(&bcx, llslot, tr_elem); - bcx - }); - bcx + }) } mir::Rvalue::Aggregate(ref kind, ref operands) => { diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index b90c66f531c46..6519d372c72c4 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -16,13 +16,13 @@ use base::*; use common::*; use rustc::ty::Ty; -pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, +pub fn slice_for_each<'blk, 'tcx, F>(bcx: &BlockAndBuilder<'blk, 'tcx>, data_ptr: ValueRef, unit_ty: Ty<'tcx>, len: ValueRef, f: F) -> BlockAndBuilder<'blk, 'tcx> - where F: FnOnce(BlockAndBuilder<'blk, 'tcx>, ValueRef) -> BlockAndBuilder<'blk, 'tcx>, + where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>, ValueRef) { let _icx = push_ctxt("tvec::slice_for_each"); let fcx = bcx.fcx(); @@ -52,7 +52,7 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>, let keep_going = header_bcx.icmp(llvm::IntNE, current, end); header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); - let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); + f(&body_bcx, if zst { data_ptr } else { current }); // FIXME(simulacrum): The code below is identical to the closure (add) above, but using the // closure doesn't compile due to body_bcx still being borrowed when dropped. let next = if zst { From 28d00e781bbe111d3c9f7a864e6ecdd3d29bcfa9 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Wed, 14 Dec 2016 07:27:59 -0700 Subject: [PATCH 020/103] Remove cleanup scope from FunctionContext --- src/librustc_trans/base.rs | 1 - src/librustc_trans/callee.rs | 81 +++++++++++++++++++++------ src/librustc_trans/cleanup.rs | 101 ++++++++++------------------------ src/librustc_trans/common.rs | 6 +- src/librustc_trans/glue.rs | 63 +++++++++++++++++++-- 5 files changed, 154 insertions(+), 98 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 0d455d037de9b..3cbc3935cfc72 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -765,7 +765,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { funclet_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, - cleanup_scope: RefCell::new(None), } } diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index f772ff68ac948..1a4afb4b02e6b 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -27,6 +27,7 @@ use base::*; use common::{ self, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext }; +use cleanup::CleanupScope; use consts; use declare; use value::Value; @@ -389,10 +390,10 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); - - let bcx = callee.call(bcx, &llargs[self_idx..], dest, None).0; - fcx.pop_and_trans_custom_cleanup_scope(&bcx, self_scope); + let mut self_scope = fcx.schedule_drop_mem(llenv, closure_ty); + let bcx = trans_call_fn_once_adapter_shim( + bcx, callee, &llargs[self_idx..], dest, &mut self_scope); + fcx.trans_scope(&bcx, self_scope); fcx.finish(&bcx); ccx.instances().borrow_mut().insert(method_instance, lloncefn); @@ -685,23 +686,69 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, }; let _icx = push_ctxt("invoke_"); - let (llret, bcx) = if bcx.fcx().needs_invoke(lpad.is_some()) { - debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb()); - for &llarg in &llargs { - debug!("arg: {:?}", Value(llarg)); + let llret = bcx.call(llfn, &llargs[..], lpad); + fn_ty.apply_attrs_callsite(llret); + + // If the function we just called does not use an outpointer, + // store the result into the rust outpointer. Cast the outpointer + // type to match because some ABIs will use a different type than + // the Rust type. e.g., a {u32,u32} struct could be returned as + // u64. + if !fn_ty.ret.is_indirect() { + if let Some(llretslot) = opt_llretslot { + fn_ty.ret.store(&bcx, llret, llretslot); + } + } + + if fn_ret.0.is_never() { + bcx.unreachable(); + } + + (bcx, llret) +} + +// This is a cleaned up version of trans_call_inner. +fn trans_call_fn_once_adapter_shim<'a, 'blk, 'tcx>( + bcx: BlockAndBuilder<'blk, 'tcx>, + callee: Callee<'tcx>, + args: &[ValueRef], + opt_llretslot: Option, + cleanup_scope: &mut Option>, +) -> BlockAndBuilder<'blk, 'tcx> { + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); + + // If there no destination, return must be direct, with no cast. + if opt_llretslot.is_none() { + assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); + } + + let mut llargs = Vec::new(); + + if fn_ty.ret.is_indirect() { + let mut llretslot = opt_llretslot.unwrap(); + if let Some(ty) = fn_ty.ret.cast { + llretslot = bcx.pointercast(llretslot, ty.ptr_to()); } + llargs.push(llretslot); + } + + llargs.extend_from_slice(args); + + let llfn = match callee.data { + Fn(f) => f, + _ => bug!("expected fn pointer callee, found {:?}", callee) + }; + + let _icx = push_ctxt("invoke_"); + let (llret, bcx) = if cleanup_scope.is_some() && !bcx.sess().no_landing_pads() { let normal_bcx = bcx.fcx().build_new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(); + let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope); - let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, lpad); + let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); (llresult, normal_bcx) } else { - debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb()); - for &llarg in &llargs { - debug!("arg: {:?}", Value(llarg)); - } - - let llresult = bcx.call(llfn, &llargs[..], lpad); + let llresult = bcx.call(llfn, &llargs[..], None); (llresult, bcx) }; fn_ty.apply_attrs_callsite(llret); @@ -721,5 +768,5 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, bcx.unreachable(); } - (bcx, llret) + bcx } diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 8952fe9d8b8fe..d9e8b795cb16d 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -149,24 +149,19 @@ struct CachedEarlyExit { } impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { - /// Removes the top cleanup scope from the stack, which must be a temporary scope, and - /// generates the code to do its cleanups for normal exit. - pub fn pop_and_trans_custom_cleanup_scope(&self, - bcx: &BlockAndBuilder<'blk, 'tcx>, - custom_scope: Option<()>) { - debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); - - if custom_scope.is_none() { - return; + pub fn trans_scope( + &self, + bcx: &BlockAndBuilder<'blk, 'tcx>, + custom_scope: Option> + ) { + if let Some(scope) = custom_scope { + scope.cleanup.trans(bcx.funclet(), &bcx); } - - let scope = self.pop_scope(); - scope.cleanup.trans(bcx.funclet(), &bcx); } /// Schedules a (deep) drop of `val`, which is a pointer to an instance of /// `ty` - pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> Option<()> { + pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> Option> { if !self.type_needs_drop(ty) { return None; } let drop = DropValue { val: val, @@ -176,7 +171,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { debug!("schedule_drop_mem(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - Some(self.set_scope(CleanupScope::new(drop))) + Some(CleanupScope::new(drop)) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -184,7 +179,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) -> Option<()> { + pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) + -> Option> { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. if !self.type_needs_drop(ty) { return None; } @@ -200,16 +196,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { ty, drop.skip_dtor); - Some(self.set_scope(CleanupScope::new(drop))) - } - - /// Returns true if there are pending cleanups that should execute on panic. - pub fn needs_invoke(&self, lpad_present: bool) -> bool { - if self.ccx.sess().no_landing_pads() || lpad_present { - false - } else { - self.has_scope() - } + Some(CleanupScope::new(drop)) } /// Creates a landing pad for the top scope, if one does not exist. The @@ -220,22 +207,21 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// /// (The cleanups and resume instruction are created by /// `trans_cleanups_to_exit_scope()`, not in this function itself.) - pub fn get_landing_pad(&'blk self) -> BasicBlockRef { - let mut pad_bcx; + pub fn get_landing_pad(&'blk self, scope: &mut Option>) -> BasicBlockRef { + // TODO: Factor out and take a CleanupScope. + assert!(scope.is_some()); debug!("get_landing_pad"); // Check if a landing pad block exists; if not, create one. - { - let mut last_scope = self.cleanup_scope.borrow_mut(); - let mut last_scope = last_scope.as_mut().unwrap(); - match last_scope.cached_landing_pad { - Some(llbb) => return llbb, - None => { - let name = last_scope.block_name("unwind"); - pad_bcx = self.build_new_block(&name[..]); - last_scope.cached_landing_pad = Some(pad_bcx.llbb()); - } + let mut scope = scope.as_mut().unwrap(); + let mut pad_bcx = match scope.cached_landing_pad { + Some(llbb) => return llbb, + None => { + let name = scope.block_name("unwind"); + let pad_bcx = self.build_new_block(&name[..]); + scope.cached_landing_pad = Some(pad_bcx.llbb()); + pad_bcx } }; @@ -278,30 +264,12 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { }; // Generate the cleanup block and branch to it. - let cleanup_llbb = self.trans_cleanups_to_exit_scope(val); + let cleanup_llbb = self.trans_cleanups_to_exit_scope(val, scope); val.branch(&mut pad_bcx, cleanup_llbb); return pad_bcx.llbb(); } - fn has_scope(&self) -> bool { - self.cleanup_scope.borrow().is_some() - } - - fn set_scope(&self, scope: CleanupScope<'tcx>) { - assert!(self.cleanup_scope.borrow().is_none()); - *self.cleanup_scope.borrow_mut() = Some(scope); - } - - fn pop_scope(&self) -> CleanupScope<'tcx> { - debug!("took cleanup scope {}", self.top_scope(|s| s.block_name(""))); - self.cleanup_scope.borrow_mut().take().unwrap() - } - - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R { - f(self.cleanup_scope.borrow().as_ref().unwrap()) - } - fn generate_resume_block(&self, label: UnwindKind) -> BasicBlockRef { // Generate a block that will resume unwinding to the calling function let bcx = self.build_new_block("resume"); @@ -328,18 +296,12 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// break, continue, or unwind. This function will generate all cleanups /// between the top of the stack and the exit `label` and return a basic /// block that the caller can branch to. - fn trans_cleanups_to_exit_scope(&'blk self, label: UnwindKind) -> BasicBlockRef { - debug!("trans_cleanups_to_exit_scope label={:?} has_scope={}", label, self.has_scope()); - - // If there is no current scope, then there are no cleanups to run, so we should - // simply generate a resume block which will branch to the label. - if !self.has_scope() { - debug!("trans_cleanups_to_exit_scope: returning new block scope"); - return self.generate_resume_block(label); - } - - // Pop off the scope, since we may be generating unwinding code for it. - let mut scope = self.pop_scope(); + fn trans_cleanups_to_exit_scope( + &'blk self, + label: UnwindKind, + scope: &mut CleanupScope<'tcx> + ) -> BasicBlockRef { + debug!("trans_cleanups_to_exit_scope label={:?}`", label); let cached_exit = scope.cached_early_exit(label); // Check if we have already cached the unwinding of this @@ -361,9 +323,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // FIXME: Can this get called more than once per scope? If not, no need to cache. scope.add_cached_early_exit(label, cleanup.llbb()); - // Put the scope back - self.set_scope(scope); - debug!("trans_cleanups_to_exit_scope: llbb={:?}", cleanup.llbb()); cleanup.llbb() diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 0ef56895ecea6..8637bb322ca16 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -28,7 +28,6 @@ use abi::{Abi, FnType}; use base; use builder::Builder; use callee::Callee; -use cleanup; use consts; use debuginfo; use declare; @@ -48,7 +47,7 @@ use std::borrow::Cow; use std::iter; use std::ops::Deref; use std::ffi::CString; -use std::cell::{Cell, RefCell, Ref}; +use std::cell::{Cell, Ref}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; @@ -315,9 +314,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Used and maintained by the debuginfo module. pub debug_context: debuginfo::FunctionDebugContext, - - // Cleanup scopes. - pub cleanup_scope: RefCell>>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 1265381ff21d5..dc3f75d52b98f 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -22,7 +22,9 @@ use rustc::traits; use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; use adt; use base::*; -use callee::{Callee}; +use callee::{Callee, CalleeData}; +use cleanup::CleanupScope; +use meth; use common::*; use machine::*; use monomorphize; @@ -241,7 +243,7 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, // might well consider changing below to more direct code. // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. - let contents_scope = if !shallow_drop { + let mut contents_scope = if !shallow_drop { bcx.fcx().schedule_drop_adt_contents(v0, t) } else { None @@ -269,8 +271,61 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, _ => bug!("dtor for {:?} is not an impl???", t) }; let dtor_did = def.destructor().unwrap(); - let bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs).call(bcx, args, None, None).0; - bcx.fcx().pop_and_trans_custom_cleanup_scope(&bcx, contents_scope); + let callee = Callee::def(bcx.ccx(), dtor_did, vtbl.substs); + let bcx = trans_call_custom_dtor(bcx, callee, args, &mut contents_scope); + bcx.fcx().trans_scope(&bcx, contents_scope); + bcx +} + +// Inlined and simplified version of callee::trans_call_inner +fn trans_call_custom_dtor<'a, 'blk, 'tcx>( + bcx: BlockAndBuilder<'blk, 'tcx>, + callee: Callee<'tcx>, + args: &[ValueRef], + cleanup_scope: &mut Option>, +) -> BlockAndBuilder<'blk, 'tcx> { + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); + + // Return must be direct, with no cast. + assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); + + let mut llargs = Vec::new(); + + let llfn = match callee.data { + CalleeData::Virtual(idx) => { + llargs.push(args[0]); + + let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx); + let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to(); + let llfn = bcx.pointercast(fn_ptr, llty); + llargs.extend_from_slice(&args[2..]); + llfn + } + CalleeData::Fn(f) => { + llargs.extend_from_slice(args); + f + } + _ => bug!("Expected virtual or fn pointer callee, found {:?}", callee) + }; + + let _icx = push_ctxt("invoke_"); + let (llret, bcx) = if cleanup_scope.is_some() && !bcx.sess().no_landing_pads() { + let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope); + + let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); + (llresult, normal_bcx) + } else { + let llresult = bcx.call(llfn, &llargs[..], None); + (llresult, bcx) + }; + fn_ty.apply_attrs_callsite(llret); + + if fn_ret.0.is_never() { + bcx.unreachable(); + } + bcx } From cd57bbe27abac8ece0b927f9ab830ef98f7927e3 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Wed, 14 Dec 2016 09:26:27 -0700 Subject: [PATCH 021/103] Refactor get_landing_pad to take a CleanupScope It unwrapped the Option anyway, so this more closely resembles the reality of what's happening. --- src/librustc_trans/callee.rs | 2 +- src/librustc_trans/cleanup.rs | 50 +++++++++++++++-------------------- src/librustc_trans/glue.rs | 2 +- 3 files changed, 24 insertions(+), 30 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 1a4afb4b02e6b..982a8e9514c23 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -743,7 +743,7 @@ fn trans_call_fn_once_adapter_shim<'a, 'blk, 'tcx>( let _icx = push_ctxt("invoke_"); let (llret, bcx) = if cleanup_scope.is_some() && !bcx.sess().no_landing_pads() { let normal_bcx = bcx.fcx().build_new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope); + let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope.as_mut().unwrap()); let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); (llresult, normal_bcx) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index d9e8b795cb16d..f1ca228035a66 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -207,14 +207,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// /// (The cleanups and resume instruction are created by /// `trans_cleanups_to_exit_scope()`, not in this function itself.) - pub fn get_landing_pad(&'blk self, scope: &mut Option>) -> BasicBlockRef { - // TODO: Factor out and take a CleanupScope. - assert!(scope.is_some()); - + pub fn get_landing_pad(&'blk self, scope: &mut CleanupScope<'tcx>) -> BasicBlockRef { debug!("get_landing_pad"); // Check if a landing pad block exists; if not, create one. - let mut scope = scope.as_mut().unwrap(); let mut pad_bcx = match scope.cached_landing_pad { Some(llbb) => return llbb, None => { @@ -270,28 +266,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { return pad_bcx.llbb(); } - fn generate_resume_block(&self, label: UnwindKind) -> BasicBlockRef { - // Generate a block that will resume unwinding to the calling function - let bcx = self.build_new_block("resume"); - match label { - UnwindKind::LandingPad => { - let addr = self.landingpad_alloca.get().unwrap(); - let lp = bcx.load(addr); - Lifetime::End.call(&bcx, addr); - if !bcx.sess().target.target.options.custom_unwind_resume { - bcx.resume(lp); - } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], None); - } - } - UnwindKind::CleanupPad(_) => { - bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); - } - } - bcx.llbb() - } - /// Used when the caller wishes to jump to an early exit, such as a return, /// break, continue, or unwind. This function will generate all cleanups /// between the top of the stack and the exit `label` and return a basic @@ -306,7 +280,27 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // Check if we have already cached the unwinding of this // scope for this label. If so, we can just branch to the cached block. - let exit_llbb = cached_exit.unwrap_or_else(|| self.generate_resume_block(label)); + let exit_llbb = cached_exit.unwrap_or_else(|| { + // Generate a block that will resume unwinding to the calling function + let bcx = self.build_new_block("resume"); + match label { + UnwindKind::LandingPad => { + let addr = self.landingpad_alloca.get().unwrap(); + let lp = bcx.load(addr); + Lifetime::End.call(&bcx, addr); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(lp); + } else { + let exc_ptr = bcx.extract_value(lp, 0); + bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], None); + } + } + UnwindKind::CleanupPad(_) => { + bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); + } + } + bcx.llbb() + }); let name = scope.block_name("clean"); debug!("generating cleanup for {}", name); diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index dc3f75d52b98f..549f1db22e7d8 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -312,7 +312,7 @@ fn trans_call_custom_dtor<'a, 'blk, 'tcx>( let _icx = push_ctxt("invoke_"); let (llret, bcx) = if cleanup_scope.is_some() && !bcx.sess().no_landing_pads() { let normal_bcx = bcx.fcx().build_new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope); + let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope.as_mut().unwrap()); let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); (llresult, normal_bcx) From 3265afa08f304716ca6d8631a8526e6b98a1178b Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Wed, 14 Dec 2016 16:41:00 -0700 Subject: [PATCH 022/103] Inline and simplify Callee::call duplicates. --- src/librustc_trans/callee.rs | 102 ++++++++++++----------------------- src/librustc_trans/glue.rs | 63 ++++------------------ 2 files changed, 44 insertions(+), 121 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 982a8e9514c23..2e09f7273f1c5 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -27,7 +27,6 @@ use base::*; use common::{ self, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext }; -use cleanup::CleanupScope; use consts; use declare; use value::Value; @@ -351,7 +350,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( attributes::set_frame_pointer_elimination(ccx, lloncefn); let fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None); - let bcx = fcx.init(false); + let mut bcx = fcx.init(false); // the first argument (`self`) will be the (by value) closure env. @@ -391,8 +390,39 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. let mut self_scope = fcx.schedule_drop_mem(llenv, closure_ty); - let bcx = trans_call_fn_once_adapter_shim( - bcx, callee, &llargs[self_idx..], dest, &mut self_scope); + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); + + let first_llarg = if fn_ty.ret.is_indirect() { + dest + } else { + None + }; + let llargs = first_llarg.into_iter().chain(llargs[self_idx..].iter().cloned()) + .collect::>(); + + let llfn = callee.reify(bcx.ccx()); + let llret; + if self_scope.is_some() && !bcx.sess().no_landing_pads() { + let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let landing_pad = bcx.fcx().get_landing_pad(self_scope.as_mut().unwrap()); + + llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); + bcx = normal_bcx; + } else { + llret = bcx.call(llfn, &llargs[..], None); + } + fn_ty.apply_attrs_callsite(llret); + + if !fn_ty.ret.is_indirect() { + if let Some(llretslot) = dest { + fn_ty.ret.store(&bcx, llret, llretslot); + } + } + + if fn_ret.0.is_never() { + bcx.unreachable(); + } fcx.trans_scope(&bcx, self_scope); fcx.finish(&bcx); @@ -706,67 +736,3 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, (bcx, llret) } - -// This is a cleaned up version of trans_call_inner. -fn trans_call_fn_once_adapter_shim<'a, 'blk, 'tcx>( - bcx: BlockAndBuilder<'blk, 'tcx>, - callee: Callee<'tcx>, - args: &[ValueRef], - opt_llretslot: Option, - cleanup_scope: &mut Option>, -) -> BlockAndBuilder<'blk, 'tcx> { - let fn_ret = callee.ty.fn_ret(); - let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); - - // If there no destination, return must be direct, with no cast. - if opt_llretslot.is_none() { - assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); - } - - let mut llargs = Vec::new(); - - if fn_ty.ret.is_indirect() { - let mut llretslot = opt_llretslot.unwrap(); - if let Some(ty) = fn_ty.ret.cast { - llretslot = bcx.pointercast(llretslot, ty.ptr_to()); - } - llargs.push(llretslot); - } - - llargs.extend_from_slice(args); - - let llfn = match callee.data { - Fn(f) => f, - _ => bug!("expected fn pointer callee, found {:?}", callee) - }; - - let _icx = push_ctxt("invoke_"); - let (llret, bcx) = if cleanup_scope.is_some() && !bcx.sess().no_landing_pads() { - let normal_bcx = bcx.fcx().build_new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope.as_mut().unwrap()); - - let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); - (llresult, normal_bcx) - } else { - let llresult = bcx.call(llfn, &llargs[..], None); - (llresult, bcx) - }; - fn_ty.apply_attrs_callsite(llret); - - // If the function we just called does not use an outpointer, - // store the result into the rust outpointer. Cast the outpointer - // type to match because some ABIs will use a different type than - // the Rust type. e.g., a {u32,u32} struct could be returned as - // u64. - if !fn_ty.ret.is_indirect() { - if let Some(llretslot) = opt_llretslot { - fn_ty.ret.store(&bcx, llret, llretslot); - } - } - - if fn_ret.0.is_never() { - bcx.unreachable(); - } - - bcx -} diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 549f1db22e7d8..a6e9af57aaedf 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -22,9 +22,7 @@ use rustc::traits; use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; use adt; use base::*; -use callee::{Callee, CalleeData}; -use cleanup::CleanupScope; -use meth; +use callee::Callee; use common::*; use machine::*; use monomorphize; @@ -224,7 +222,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fcx.finish(&bcx); } -fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, +fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, v0: ValueRef, shallow_drop: bool) @@ -272,60 +270,19 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, }; let dtor_did = def.destructor().unwrap(); let callee = Callee::def(bcx.ccx(), dtor_did, vtbl.substs); - let bcx = trans_call_custom_dtor(bcx, callee, args, &mut contents_scope); - bcx.fcx().trans_scope(&bcx, contents_scope); - bcx -} - -// Inlined and simplified version of callee::trans_call_inner -fn trans_call_custom_dtor<'a, 'blk, 'tcx>( - bcx: BlockAndBuilder<'blk, 'tcx>, - callee: Callee<'tcx>, - args: &[ValueRef], - cleanup_scope: &mut Option>, -) -> BlockAndBuilder<'blk, 'tcx> { - let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); - - // Return must be direct, with no cast. - assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); - - let mut llargs = Vec::new(); - - let llfn = match callee.data { - CalleeData::Virtual(idx) => { - llargs.push(args[0]); - - let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx); - let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to(); - let llfn = bcx.pointercast(fn_ptr, llty); - llargs.extend_from_slice(&args[2..]); - llfn - } - CalleeData::Fn(f) => { - llargs.extend_from_slice(args); - f - } - _ => bug!("Expected virtual or fn pointer callee, found {:?}", callee) - }; - - let _icx = push_ctxt("invoke_"); - let (llret, bcx) = if cleanup_scope.is_some() && !bcx.sess().no_landing_pads() { + let llret; + if contents_scope.is_some() && !bcx.sess().no_landing_pads() { let normal_bcx = bcx.fcx().build_new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(cleanup_scope.as_mut().unwrap()); + let landing_pad = bcx.fcx().get_landing_pad(contents_scope.as_mut().unwrap()); - let llresult = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); - (llresult, normal_bcx) + llret = bcx.invoke(callee.reify(bcx.ccx()), args, normal_bcx.llbb(), landing_pad, None); + bcx = normal_bcx; } else { - let llresult = bcx.call(llfn, &llargs[..], None); - (llresult, bcx) - }; - fn_ty.apply_attrs_callsite(llret); - - if fn_ret.0.is_never() { - bcx.unreachable(); + llret = bcx.call(callee.reify(bcx.ccx()), args, None); } - + fn_ty.apply_attrs_callsite(llret); + bcx.fcx().trans_scope(&bcx, contents_scope); bcx } From 6441c977cb387b2d057dc6e3b704b475afd97137 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 10:11:49 -0700 Subject: [PATCH 023/103] Remove push_ctxt --- src/librustc_trans/base.rs | 30 ------------------------------ src/librustc_trans/callee.rs | 4 ---- src/librustc_trans/consts.rs | 3 +-- src/librustc_trans/glue.rs | 8 -------- src/librustc_trans/intrinsic.rs | 2 -- src/librustc_trans/meth.rs | 3 --- src/librustc_trans/tvec.rs | 2 -- 7 files changed, 1 insertion(+), 51 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 3cbc3935cfc72..011552b39627b 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -127,18 +127,6 @@ impl Drop for _InsnCtxt { } } -pub fn push_ctxt(s: &'static str) -> _InsnCtxt { - debug!("new InsnCtxt: {}", s); - TASK_LOCAL_INSN_KEY.with(|slot| { - if let Some(ctx) = slot.borrow_mut().as_mut() { - ctx.push(s) - } - }); - _InsnCtxt { - _cannot_construct_outside_of_this_module: (), - } -} - pub struct StatRecorder<'a, 'tcx: 'a> { ccx: &'a CrateContext<'a, 'tcx>, name: Option, @@ -200,8 +188,6 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, size: ValueRef, align: ValueRef) -> ValueRef { - let _icx = push_ctxt("malloc_raw_exchange"); - // Allocate space: let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).reify(bcx.ccx()); @@ -568,8 +554,6 @@ pub fn with_cond<'blk, 'tcx, F>( ) -> BlockAndBuilder<'blk, 'tcx> where F: FnOnce(BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx> { - let _icx = push_ctxt("with_cond"); - if common::const_to_opt_uint(val) == Some(0) { return bcx; } @@ -599,11 +583,6 @@ impl Lifetime { return; } - let _icx = push_ctxt(match self { - Lifetime::Start => "lifetime_start", - Lifetime::End => "lifetime_end" - }); - let size = machine::llsize_of_alloc(b.ccx, val_ty(ptr).element_type()); if size == 0 { return; @@ -624,7 +603,6 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, src: ValueRef, n_bytes: ValueRef, align: u32) { - let _icx = push_ctxt("call_memcpy"); let ccx = b.ccx; let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); @@ -640,7 +618,6 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, pub fn memcpy_ty<'blk, 'tcx>( bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx> ) { - let _icx = push_ctxt("memcpy_ty"); let ccx = bcx.ccx(); if type_is_zero_size(ccx, t) { @@ -661,7 +638,6 @@ pub fn memcpy_ty<'blk, 'tcx>( } pub fn init_zero_mem<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - let _icx = push_ctxt("init_zero_mem"); let bcx = cx; memfill(bcx, llptr, t, 0); } @@ -672,7 +648,6 @@ pub fn init_zero_mem<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, llptr: ValueR // awful. (A telltale sign of this is large quantities of // `mov [byte ptr foo],0` in the generated code.) fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) { - let _icx = push_ctxt("memfill"); let ccx = b.ccx; let llty = type_of::type_of(ccx, ty); let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to()); @@ -704,7 +679,6 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { - let _icx = push_ctxt("alloca"); DebugLoc::None.apply(cx.fcx()); cx.fcx().alloca(ty, name) } @@ -806,8 +780,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// and builds the return block. pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { - let _icx = push_ctxt("FunctionContext::finish"); - self.build_return_block(ret_cx); DebugLoc::None.apply(self); @@ -890,8 +862,6 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance // release builds. info!("trans_instance({})", instance); - let _icx = push_ctxt("trans_instance"); - let fn_ty = ccx.tcx().item_type(instance.def); let fn_ty = ccx.tcx().erase_regions(&fn_ty); let fn_ty = monomorphize::apply_param_substs(ccx.shared(), instance.substs, &fn_ty); diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 2e09f7273f1c5..54a41a7e23dab 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -249,8 +249,6 @@ fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, // then adapt the self type let llfn_closure_kind = ccx.tcx().closure_kind(def_id); - let _icx = push_ctxt("trans_closure_adapter_shim"); - debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \ trait_closure_kind={:?}, llfn={:?})", llfn_closure_kind, trait_closure_kind, Value(llfn)); @@ -450,7 +448,6 @@ fn trans_fn_pointer_shim<'a, 'tcx>( bare_fn_ty: Ty<'tcx>) -> ValueRef { - let _icx = push_ctxt("trans_fn_pointer_shim"); let tcx = ccx.tcx(); // Normalize the type for better caching. @@ -715,7 +712,6 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, _ => bug!("expected fn pointer callee, found {:?}", callee) }; - let _icx = push_ctxt("invoke_"); let llret = bcx.call(llfn, &llargs[..], lpad); fn_ty.apply_attrs_callsite(llret); diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 730a4025a59a8..2e2644d91bb6c 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -16,7 +16,7 @@ use rustc_const_eval::ConstEvalErr; use rustc::hir::def_id::DefId; use rustc::hir::map as hir_map; use {debuginfo, machine}; -use base::{self, push_ctxt}; +use base; use trans_item::TransItem; use common::{CrateContext, val_ty}; use declare; @@ -221,7 +221,6 @@ pub fn trans_static(ccx: &CrateContext, attrs: &[ast::Attribute]) -> Result { unsafe { - let _icx = push_ctxt("trans_static"); let def_id = ccx.tcx().map.local_def_id(id); let g = get_static(ccx, def_id); diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index a6e9af57aaedf..ea44e24c5dec9 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -40,8 +40,6 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, size: ValueRef, align: ValueRef) -> BlockAndBuilder<'blk, 'tcx> { - let _icx = push_ctxt("trans_exchange_free"); - let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align]; Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) @@ -133,7 +131,6 @@ pub fn call_drop_glue<'blk, 'tcx>( ) { // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); - let _icx = push_ctxt("drop_ty"); if bcx.fcx().type_needs_drop(t) { let ccx = bcx.ccx(); let g = if skip_dtor { @@ -401,8 +398,6 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; // NB: v0 is an *alias* of type t here, not a direct value. - let _icx = push_ctxt("make_drop_glue"); - // Only drop the value when it ... well, we used to check for // non-null, (and maybe we need to continue doing so), but we now // must definitely check for special bit-patterns corresponding to @@ -472,14 +467,11 @@ fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, av: ValueRef, t: Ty<'tcx>) -> BlockAndBuilder<'blk, 'tcx> { - let _icx = push_ctxt("drop_structural_ty"); - fn iter_variant<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, av: adt::MaybeSizedValue, variant: &'tcx ty::VariantDef, substs: &Substs<'tcx>) { - let _icx = push_ctxt("iter_variant"); let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 94a57d9568ef2..9be505556c5d1 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -95,8 +95,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let ccx = fcx.ccx; let tcx = bcx.tcx(); - let _icx = push_ctxt("trans_intrinsic_call"); - let (def_id, substs, fty) = match callee_ty.sty { ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), _ => bug!("expected fn item type, found {}", callee_ty) diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 66eb27ae11f78..82c1fa94a4e99 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -12,7 +12,6 @@ use attributes; use llvm::{ValueRef, get_params}; use rustc::traits; use abi::FnType; -use base::*; use callee::Callee; use common::*; use consts; @@ -64,7 +63,6 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, callee: Callee<'tcx>) -> ValueRef { - let _icx = push_ctxt("trans_object_shim"); let tcx = ccx.tcx(); debug!("trans_object_shim({:?})", callee); @@ -109,7 +107,6 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> ValueRef { let tcx = ccx.tcx(); - let _icx = push_ctxt("meth::get_vtable"); debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 6519d372c72c4..904e3765017a9 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -12,7 +12,6 @@ use llvm; use llvm::ValueRef; -use base::*; use common::*; use rustc::ty::Ty; @@ -24,7 +23,6 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: &BlockAndBuilder<'blk, 'tcx>, -> BlockAndBuilder<'blk, 'tcx> where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>, ValueRef) { - let _icx = push_ctxt("tvec::slice_for_each"); let fcx = bcx.fcx(); // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) From 6710af358051725342d4eec11e0c423aabd98511 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 10:32:12 -0700 Subject: [PATCH 024/103] Slightly simplify tvec::slice_for_each --- src/librustc_trans/tvec.rs | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 904e3765017a9..36762885cc99a 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -15,16 +15,13 @@ use llvm::ValueRef; use common::*; use rustc::ty::Ty; -pub fn slice_for_each<'blk, 'tcx, F>(bcx: &BlockAndBuilder<'blk, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F) - -> BlockAndBuilder<'blk, 'tcx> - where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>, ValueRef) -{ - let fcx = bcx.fcx(); - +pub fn slice_for_each<'blk, 'tcx, F>( + bcx: &BlockAndBuilder<'blk, 'tcx>, + data_ptr: ValueRef, + unit_ty: Ty<'tcx>, + len: ValueRef, + f: F +) -> BlockAndBuilder<'blk, 'tcx> where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>, ValueRef) { // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx(), unit_ty); let add = |bcx: &BlockAndBuilder, a, b| if zst { @@ -33,9 +30,9 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.inbounds_gep(a, &[b]) }; - let body_bcx = fcx.build_new_block("slice_loop_body"); - let next_bcx = fcx.build_new_block("slice_loop_next"); - let header_bcx = fcx.build_new_block("slice_loop_header"); + let body_bcx = bcx.fcx().build_new_block("slice_loop_body"); + let next_bcx = bcx.fcx().build_new_block("slice_loop_next"); + let header_bcx = bcx.fcx().build_new_block("slice_loop_header"); let start = if zst { C_uint(bcx.ccx(), 0usize) @@ -51,13 +48,7 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: &BlockAndBuilder<'blk, 'tcx>, header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); f(&body_bcx, if zst { data_ptr } else { current }); - // FIXME(simulacrum): The code below is identical to the closure (add) above, but using the - // closure doesn't compile due to body_bcx still being borrowed when dropped. - let next = if zst { - body_bcx.add(current, C_uint(bcx.ccx(), 1usize)) - } else { - body_bcx.inbounds_gep(current, &[C_uint(bcx.ccx(), 1usize)]) - }; + let next = add(&body_bcx, current, C_uint(bcx.ccx(), 1usize)); body_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); body_bcx.br(header_bcx.llbb()); next_bcx From 31691692a2907059ac2e7499d67b28ce6e788d77 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 10:39:29 -0700 Subject: [PATCH 025/103] Deduplicate store_operand_direct and store_operand --- src/librustc_trans/mir/operand.rs | 13 ++----------- src/librustc_trans/mir/rvalue.rs | 2 +- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 3c737fd6ad7d8..f52c08794629c 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -244,17 +244,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn store_operand(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, lldest: ValueRef, - operand: OperandRef<'tcx>) - { - debug!("store_operand: operand={:?} lldest={:?}", operand, lldest); - self.store_operand_direct(bcx, lldest, operand) - } - - pub fn store_operand_direct(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, - lldest: ValueRef, - operand: OperandRef<'tcx>) - { + operand: OperandRef<'tcx>) { + debug!("store_operand: operand={:?}", operand); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. if common::type_is_zero_size(bcx.ccx(), operand.ty) { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 31cbc4074b206..5aba7160c2353 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -99,7 +99,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let size = C_uint(bcx.ccx(), size); let base = base::get_dataptr(&bcx, dest.llval); tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { - self.store_operand_direct(&bcx, llslot, tr_elem); + self.store_operand(&bcx, llslot, tr_elem); }) } From c7f8b0cd81eb3921890795537ba526d922e21fb1 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 14:42:21 -0700 Subject: [PATCH 026/103] Eagerly evaluate landing pads for cleanup scopes --- src/librustc_trans/callee.rs | 6 +- src/librustc_trans/cleanup.rs | 147 ++++++++++++---------------------- src/librustc_trans/glue.rs | 6 +- 3 files changed, 54 insertions(+), 105 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 54a41a7e23dab..4f6165e1dbe8b 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -387,7 +387,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let mut self_scope = fcx.schedule_drop_mem(llenv, closure_ty); + let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); @@ -401,10 +401,8 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llfn = callee.reify(bcx.ccx()); let llret; - if self_scope.is_some() && !bcx.sess().no_landing_pads() { + if let Some(landing_pad) = self_scope.as_ref().and_then(|c| c.landing_pad) { let normal_bcx = bcx.fcx().build_new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(self_scope.as_mut().unwrap()); - llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index f1ca228035a66..439058daaf9c4 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -127,8 +127,8 @@ pub struct CleanupScope<'tcx> { // Cleanup to run upon scope exit. cleanup: DropValue<'tcx>, - cached_early_exit: Option, - cached_landing_pad: Option, + // Computed on creation if compiling with landing pads (!sess.no_landing_pads) + pub landing_pad: Option, } #[derive(Copy, Clone, Debug)] @@ -142,12 +142,6 @@ enum UnwindKind { CleanupPad(ValueRef), } -#[derive(Copy, Clone)] -struct CachedEarlyExit { - label: UnwindKind, - cleanup_block: BasicBlockRef, -} - impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { pub fn trans_scope( &self, @@ -171,7 +165,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { debug!("schedule_drop_mem(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - Some(CleanupScope::new(drop)) + Some(CleanupScope::new(self, drop)) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -196,7 +190,21 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { ty, drop.skip_dtor); - Some(CleanupScope::new(drop)) + Some(CleanupScope::new(self, drop)) + } + +} + +impl<'tcx> CleanupScope<'tcx> { + fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { + CleanupScope { + cleanup: drop_val, + landing_pad: if !fcx.ccx.sess().no_landing_pads() { + Some(CleanupScope::get_landing_pad(fcx, &drop_val)) + } else { + None + }, + } } /// Creates a landing pad for the top scope, if one does not exist. The @@ -207,23 +215,15 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// /// (The cleanups and resume instruction are created by /// `trans_cleanups_to_exit_scope()`, not in this function itself.) - pub fn get_landing_pad(&'blk self, scope: &mut CleanupScope<'tcx>) -> BasicBlockRef { + fn get_landing_pad<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: &DropValue<'tcx>) + -> BasicBlockRef { debug!("get_landing_pad"); - // Check if a landing pad block exists; if not, create one. - let mut pad_bcx = match scope.cached_landing_pad { - Some(llbb) => return llbb, - None => { - let name = scope.block_name("unwind"); - let pad_bcx = self.build_new_block(&name[..]); - scope.cached_landing_pad = Some(pad_bcx.llbb()); - pad_bcx - } - }; + let mut pad_bcx = fcx.build_new_block("unwind_custom_"); let llpersonality = pad_bcx.fcx().eh_personality(); - let val = if base::wants_msvc_seh(self.ccx.sess()) { + let val = if base::wants_msvc_seh(fcx.ccx.sess()) { // A cleanup pad requires a personality function to be specified, so // we do that here explicitly (happens implicitly below through // creation of the landingpad instruction). We then create a @@ -236,8 +236,8 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // The landing pad return type (the type being propagated). Not sure // what this represents but it's determined by the personality // function and this is what the EH proposal example uses. - let llretty = Type::struct_(self.ccx, - &[Type::i8p(self.ccx), Type::i32(self.ccx)], + let llretty = Type::struct_(fcx.ccx, + &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); // The only landing pad clause will be 'cleanup' @@ -246,12 +246,12 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // The landing pad block is a cleanup pad_bcx.set_cleanup(llretval); - let addr = match self.landingpad_alloca.get() { + let addr = match fcx.landingpad_alloca.get() { Some(addr) => addr, None => { let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); Lifetime::Start.call(&pad_bcx, addr); - self.landingpad_alloca.set(Some(addr)); + fcx.landingpad_alloca.set(Some(addr)); addr } }; @@ -260,7 +260,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { }; // Generate the cleanup block and branch to it. - let cleanup_llbb = self.trans_cleanups_to_exit_scope(val, scope); + let cleanup_llbb = CleanupScope::trans_cleanups_to_exit_scope(fcx, val, drop_val); val.branch(&mut pad_bcx, cleanup_llbb); return pad_bcx.llbb(); @@ -270,52 +270,39 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// break, continue, or unwind. This function will generate all cleanups /// between the top of the stack and the exit `label` and return a basic /// block that the caller can branch to. - fn trans_cleanups_to_exit_scope( - &'blk self, + fn trans_cleanups_to_exit_scope<'a>( + fcx: &FunctionContext<'a, 'tcx>, label: UnwindKind, - scope: &mut CleanupScope<'tcx> + drop_val: &DropValue<'tcx> ) -> BasicBlockRef { debug!("trans_cleanups_to_exit_scope label={:?}`", label); - let cached_exit = scope.cached_early_exit(label); - - // Check if we have already cached the unwinding of this - // scope for this label. If so, we can just branch to the cached block. - let exit_llbb = cached_exit.unwrap_or_else(|| { - // Generate a block that will resume unwinding to the calling function - let bcx = self.build_new_block("resume"); - match label { - UnwindKind::LandingPad => { - let addr = self.landingpad_alloca.get().unwrap(); - let lp = bcx.load(addr); - Lifetime::End.call(&bcx, addr); - if !bcx.sess().target.target.options.custom_unwind_resume { - bcx.resume(lp); - } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], None); - } - } - UnwindKind::CleanupPad(_) => { - bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); + + // Generate a block that will resume unwinding to the calling function + let bcx = fcx.build_new_block("resume"); + match label { + UnwindKind::LandingPad => { + let addr = fcx.landingpad_alloca.get().unwrap(); + let lp = bcx.load(addr); + Lifetime::End.call(&bcx, addr); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(lp); + } else { + let exc_ptr = bcx.extract_value(lp, 0); + bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], None); } } - bcx.llbb() - }); - - let name = scope.block_name("clean"); - debug!("generating cleanup for {}", name); + UnwindKind::CleanupPad(_) => { + bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); + } + } - let mut cleanup = self.build_new_block(&name[..]); + let mut cleanup = fcx.build_new_block("clean_custom_"); // Insert cleanup instructions into the cleanup block - scope.cleanup.trans(label.get_funclet(&cleanup).as_ref(), &cleanup); + drop_val.trans(label.get_funclet(&cleanup).as_ref(), &cleanup); // Insert instruction into cleanup block to branch to the exit - label.branch(&mut cleanup, exit_llbb); - - // Cache the work we've done here - // FIXME: Can this get called more than once per scope? If not, no need to cache. - scope.add_cached_early_exit(label, cleanup.llbb()); + label.branch(&mut cleanup, bcx.llbb()); debug!("trans_cleanups_to_exit_scope: llbb={:?}", cleanup.llbb()); @@ -323,40 +310,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { } } -impl<'tcx> CleanupScope<'tcx> { - fn new(drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { - CleanupScope { - cleanup: drop_val, - cached_early_exit: None, - cached_landing_pad: None, - } - } - - fn cached_early_exit(&self, label: UnwindKind) -> Option { - if let Some(e) = self.cached_early_exit { - if e.label == label { - return Some(e.cleanup_block); - } - } - None - } - - fn add_cached_early_exit(&mut self, - label: UnwindKind, - blk: BasicBlockRef) { - assert!(self.cached_early_exit.is_none()); - self.cached_early_exit = Some(CachedEarlyExit { - label: label, - cleanup_block: blk, - }); - } - - /// Returns a suitable name to use for the basic block that handles this cleanup scope - fn block_name(&self, prefix: &str) -> String { - format!("{}_custom_", prefix) - } -} - impl UnwindKind { /// Generates a branch going from `bcx` to `to_llbb` where `self` is /// the exit label attached to the start of `bcx`. diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index ea44e24c5dec9..8ad951c5ade66 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -238,7 +238,7 @@ fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, // might well consider changing below to more direct code. // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. - let mut contents_scope = if !shallow_drop { + let contents_scope = if !shallow_drop { bcx.fcx().schedule_drop_adt_contents(v0, t) } else { None @@ -269,10 +269,8 @@ fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, let callee = Callee::def(bcx.ccx(), dtor_did, vtbl.substs); let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); let llret; - if contents_scope.is_some() && !bcx.sess().no_landing_pads() { + if let Some(landing_pad) = contents_scope.as_ref().and_then(|c| c.landing_pad) { let normal_bcx = bcx.fcx().build_new_block("normal-return"); - let landing_pad = bcx.fcx().get_landing_pad(contents_scope.as_mut().unwrap()); - llret = bcx.invoke(callee.reify(bcx.ccx()), args, normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { From b10d89a0961bb8682dc3e6d2781c6e390c6cf25c Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 14:43:15 -0700 Subject: [PATCH 027/103] Move around code in cleanup for a more logical ordering, and fix comments --- src/librustc_trans/cleanup.rs | 244 +++++++++------------------------- 1 file changed, 60 insertions(+), 184 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 439058daaf9c4..67b9dd1828848 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -11,108 +11,12 @@ //! ## The Cleanup module //! //! The cleanup module tracks what values need to be cleaned up as scopes -//! are exited, either via panic or just normal control flow. The basic -//! idea is that the function context maintains a stack of cleanup scopes -//! that are pushed/popped as we traverse the AST tree. There is typically -//! at least one cleanup scope per AST node; some AST nodes may introduce -//! additional temporary scopes. +//! are exited, either via panic or just normal control flow. //! //! Cleanup items can be scheduled into any of the scopes on the stack. -//! Typically, when a scope is popped, we will also generate the code for -//! each of its cleanups at that time. This corresponds to a normal exit -//! from a block (for example, an expression completing evaluation -//! successfully without panic). However, it is also possible to pop a -//! block *without* executing its cleanups; this is typically used to -//! guard intermediate values that must be cleaned up on panic, but not -//! if everything goes right. See the section on custom scopes below for -//! more details. -//! -//! Cleanup scopes come in three kinds: -//! -//! - **AST scopes:** each AST node in a function body has a corresponding -//! AST scope. We push the AST scope when we start generate code for an AST -//! node and pop it once the AST node has been fully generated. -//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are -//! never scheduled into loop scopes; instead, they are used to record the -//! basic blocks that we should branch to when a `continue` or `break` statement -//! is encountered. -//! - **Custom scopes:** custom scopes are typically used to ensure cleanup -//! of intermediate values. -//! -//! ### When to schedule cleanup -//! -//! Although the cleanup system is intended to *feel* fairly declarative, -//! it's still important to time calls to `schedule_clean()` correctly. -//! Basically, you should not schedule cleanup for memory until it has -//! been initialized, because if an unwind should occur before the memory -//! is fully initialized, then the cleanup will run and try to free or -//! drop uninitialized memory. If the initialization itself produces -//! byproducts that need to be freed, then you should use temporary custom -//! scopes to ensure that those byproducts will get freed on unwind. For -//! example, an expression like `box foo()` will first allocate a box in the -//! heap and then call `foo()` -- if `foo()` should panic, this box needs -//! to be *shallowly* freed. -//! -//! ### Long-distance jumps -//! -//! In addition to popping a scope, which corresponds to normal control -//! flow exiting the scope, we may also *jump out* of a scope into some -//! earlier scope on the stack. This can occur in response to a `return`, -//! `break`, or `continue` statement, but also in response to panic. In -//! any of these cases, we will generate a series of cleanup blocks for -//! each of the scopes that is exited. So, if the stack contains scopes A -//! ... Z, and we break out of a loop whose corresponding cleanup scope is -//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z. -//! After cleanup is done we would branch to the exit point for scope X. -//! But if panic should occur, we would generate cleanups for all the -//! scopes from A to Z and then resume the unwind process afterwards. -//! -//! To avoid generating tons of code, we cache the cleanup blocks that we -//! create for breaks, returns, unwinds, and other jumps. Whenever a new -//! cleanup is scheduled, though, we must clear these cached blocks. A -//! possible improvement would be to keep the cached blocks but simply -//! generate a new block which performs the additional cleanup and then -//! branches to the existing cached blocks. -//! -//! ### AST and loop cleanup scopes -//! -//! AST cleanup scopes are pushed when we begin and end processing an AST -//! node. They are used to house cleanups related to rvalue temporary that -//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an -//! AST scope is popped, we always trans all the cleanups, adding the cleanup -//! code after the postdominator of the AST node. -//! -//! AST nodes that represent breakable loops also push a loop scope; the -//! loop scope never has any actual cleanups, it's just used to point to -//! the basic blocks where control should flow after a "continue" or -//! "break" statement. Popping a loop scope never generates code. -//! -//! ### Custom cleanup scopes -//! -//! Custom cleanup scopes are used for a variety of purposes. The most -//! common though is to handle temporary byproducts, where cleanup only -//! needs to occur on panic. The general strategy is to push a custom -//! cleanup scope, schedule *shallow* cleanups into the custom scope, and -//! then pop the custom scope (without transing the cleanups) when -//! execution succeeds normally. This way the cleanups are only trans'd on -//! unwind, and only up until the point where execution succeeded, at -//! which time the complete value should be stored in an lvalue or some -//! other place where normal cleanup applies. -//! -//! To spell it out, here is an example. Imagine an expression `box expr`. -//! We would basically: -//! -//! 1. Push a custom cleanup scope C. -//! 2. Allocate the box. -//! 3. Schedule a shallow free in the scope C. -//! 4. Trans `expr` into the box. -//! 5. Pop the scope C. -//! 6. Return the box as an rvalue. -//! -//! This way, if a panic occurs while transing `expr`, the custom -//! cleanup scope C is pushed and hence the box will be freed. The trans -//! code for `expr` itself is responsible for freeing any other byproducts -//! that may be in play. +//! Typically, when a scope is finished, we generate the cleanup code. This +//! corresponds to a normal exit from a block (for example, an expression +//! completing evaluation successfully without panic). use llvm::{BasicBlockRef, ValueRef}; use base::{self, Lifetime}; @@ -131,9 +35,17 @@ pub struct CleanupScope<'tcx> { pub landing_pad: Option, } -#[derive(Copy, Clone, Debug)] -pub struct CustomScopeIndex { - index: usize +#[derive(Copy, Clone)] +pub struct DropValue<'tcx> { + val: ValueRef, + ty: Ty<'tcx>, + skip_dtor: bool, +} + +impl<'tcx> DropValue<'tcx> { + fn trans<'blk>(&self, funclet: Option<&'blk Funclet>, bcx: &BlockAndBuilder<'blk, 'tcx>) { + glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) + } } #[derive(Copy, Clone, Debug)] @@ -142,6 +54,44 @@ enum UnwindKind { CleanupPad(ValueRef), } +impl UnwindKind { + /// Generates a branch going from `bcx` to `to_llbb` where `self` is + /// the exit label attached to the start of `bcx`. + /// + /// Transitions from an exit label to other exit labels depend on the type + /// of label. For example with MSVC exceptions unwind exit labels will use + /// the `cleanupret` instruction instead of the `br` instruction. + fn branch(&self, bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { + match *self { + UnwindKind::CleanupPad(pad) => { + bcx.cleanup_ret(pad, Some(to_llbb)); + } + UnwindKind::LandingPad => { + bcx.br(to_llbb); + } + } + } + + fn get_funclet(&self, bcx: &BlockAndBuilder) -> Option { + match *self { + UnwindKind::CleanupPad(_) => { + let pad = bcx.cleanup_pad(None, &[]); + Funclet::msvc(pad) + }, + UnwindKind::LandingPad => Funclet::gnu(), + } + } +} + +impl PartialEq for UnwindKind { + fn eq(&self, label: &UnwindKind) -> bool { + match (*self, *label) { + (UnwindKind::LandingPad, UnwindKind::LandingPad) | + (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true, + _ => false, + } + } +} impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { pub fn trans_scope( &self, @@ -186,9 +136,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { }; debug!("schedule_drop_adt_contents(val={:?}, ty={:?}) skip_dtor={}", - Value(val), - ty, - drop.skip_dtor); + Value(val), ty, drop.skip_dtor); Some(CleanupScope::new(self, drop)) } @@ -259,27 +207,9 @@ impl<'tcx> CleanupScope<'tcx> { UnwindKind::LandingPad }; - // Generate the cleanup block and branch to it. - let cleanup_llbb = CleanupScope::trans_cleanups_to_exit_scope(fcx, val, drop_val); - val.branch(&mut pad_bcx, cleanup_llbb); - - return pad_bcx.llbb(); - } - - /// Used when the caller wishes to jump to an early exit, such as a return, - /// break, continue, or unwind. This function will generate all cleanups - /// between the top of the stack and the exit `label` and return a basic - /// block that the caller can branch to. - fn trans_cleanups_to_exit_scope<'a>( - fcx: &FunctionContext<'a, 'tcx>, - label: UnwindKind, - drop_val: &DropValue<'tcx> - ) -> BasicBlockRef { - debug!("trans_cleanups_to_exit_scope label={:?}`", label); - // Generate a block that will resume unwinding to the calling function let bcx = fcx.build_new_block("resume"); - match label { + match val { UnwindKind::LandingPad => { let addr = fcx.landingpad_alloca.get().unwrap(); let lp = bcx.load(addr); @@ -299,68 +229,14 @@ impl<'tcx> CleanupScope<'tcx> { let mut cleanup = fcx.build_new_block("clean_custom_"); // Insert cleanup instructions into the cleanup block - drop_val.trans(label.get_funclet(&cleanup).as_ref(), &cleanup); + drop_val.trans(val.get_funclet(&cleanup).as_ref(), &cleanup); // Insert instruction into cleanup block to branch to the exit - label.branch(&mut cleanup, bcx.llbb()); - - debug!("trans_cleanups_to_exit_scope: llbb={:?}", cleanup.llbb()); - - cleanup.llbb() - } -} - -impl UnwindKind { - /// Generates a branch going from `bcx` to `to_llbb` where `self` is - /// the exit label attached to the start of `bcx`. - /// - /// Transitions from an exit label to other exit labels depend on the type - /// of label. For example with MSVC exceptions unwind exit labels will use - /// the `cleanupret` instruction instead of the `br` instruction. - fn branch(&self, bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { - match *self { - UnwindKind::CleanupPad(pad) => { - bcx.cleanup_ret(pad, Some(to_llbb)); - } - UnwindKind::LandingPad => { - bcx.br(to_llbb); - } - } - } - - fn get_funclet(&self, bcx: &BlockAndBuilder) -> Option { - match *self { - UnwindKind::CleanupPad(_) => { - let pad = bcx.cleanup_pad(None, &[]); - Funclet::msvc(pad) - }, - UnwindKind::LandingPad => Funclet::gnu(), - } - } -} + val.branch(&mut cleanup, bcx.llbb()); -impl PartialEq for UnwindKind { - fn eq(&self, label: &UnwindKind) -> bool { - match (*self, *label) { - (UnwindKind::LandingPad, UnwindKind::LandingPad) | - (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true, - _ => false, - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Cleanup types + // Branch into the cleanup block + val.branch(&mut pad_bcx, cleanup.llbb()); -#[derive(Copy, Clone)] -pub struct DropValue<'tcx> { - val: ValueRef, - ty: Ty<'tcx>, - skip_dtor: bool, -} - -impl<'tcx> DropValue<'tcx> { - fn trans<'blk>(&self, funclet: Option<&'blk Funclet>, bcx: &BlockAndBuilder<'blk, 'tcx>) { - glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) + return pad_bcx.llbb(); } } From 14ae76d96b87d6b3e8cbe9264534e148ee88fb89 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 15:08:18 -0700 Subject: [PATCH 028/103] Unbox FunctionDebugContextData. It is only a pointer and a Cell, which is quite small. --- src/librustc_trans/debuginfo/create_scope_map.rs | 2 +- src/librustc_trans/debuginfo/mod.rs | 14 ++++---------- src/librustc_trans/debuginfo/source_loc.rs | 10 ++-------- 3 files changed, 7 insertions(+), 19 deletions(-) diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index e0c1a80be394d..23f415d95cf4a 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -54,7 +54,7 @@ pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec data.fn_metadata, + FunctionDebugContext::RegularContext(ref data) => data.fn_metadata, FunctionDebugContext::DebugInfoDisabled | FunctionDebugContext::FunctionWithoutDebugInfo => { return scopes; diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index f59ecf1d6782f..1bf3c0acec55a 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -97,7 +97,7 @@ impl<'tcx> CrateDebugContext<'tcx> { } pub enum FunctionDebugContext { - RegularContext(Box), + RegularContext(FunctionDebugContextData), DebugInfoDisabled, FunctionWithoutDebugInfo, } @@ -107,7 +107,7 @@ impl FunctionDebugContext { span: Span) -> &'a FunctionDebugContextData { match *self { - FunctionDebugContext::RegularContext(box ref data) => data, + FunctionDebugContext::RegularContext(ref data) => data, FunctionDebugContext::DebugInfoDisabled => { span_bug!(span, "{}", @@ -134,7 +134,6 @@ impl FunctionDebugContext { pub struct FunctionDebugContextData { fn_metadata: DISubprogram, source_locations_enabled: Cell, - source_location_override: Cell, } pub enum VariableAccess<'a> { @@ -293,10 +292,9 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; // Initialize fn debug context (including scope map and namespace map) - let fn_debug_context = box FunctionDebugContextData { + let fn_debug_context = FunctionDebugContextData { fn_metadata: fn_metadata, source_locations_enabled: Cell::new(false), - source_location_override: Cell::new(false), }; return FunctionDebugContext::RegularContext(fn_debug_context); @@ -503,11 +501,7 @@ pub fn declare_local<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, match variable_kind { ArgumentVariable(_) | CapturedVariable => { - assert!(!bcx.fcx() - .debug_context - .get_ref(span) - .source_locations_enabled - .get()); + assert!(!bcx.fcx().debug_context.get_ref(span).source_locations_enabled.get()); source_loc::set_debug_location(cx, None, UnknownLocation); } _ => { /* nothing to do */ } diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index 1aee27c144a36..86ecc0e65a9de 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -36,15 +36,9 @@ pub fn set_source_location(fcx: &FunctionContext, set_debug_location(fcx.ccx, builder, UnknownLocation); return; } - FunctionDebugContext::RegularContext(box ref data) => data + FunctionDebugContext::RegularContext(ref data) => data }; - if function_debug_context.source_location_override.get() { - // Just ignore any attempts to set a new debug location while - // the override is active. - return; - } - let dbg_loc = if function_debug_context.source_locations_enabled.get() { let (scope, span) = match debug_loc { DebugLoc::ScopeAt(scope, span) => (scope, span), @@ -72,7 +66,7 @@ pub fn set_source_location(fcx: &FunctionContext, /// first real statement/expression of the function is translated. pub fn start_emitting_source_locations(fcx: &FunctionContext) { match fcx.debug_context { - FunctionDebugContext::RegularContext(box ref data) => { + FunctionDebugContext::RegularContext(ref data) => { data.source_locations_enabled.set(true) }, _ => { /* safe to ignore */ } From dda6c8cf2f035d23ff3f67c5fc0e805bb18cd0a4 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 18:00:19 -0700 Subject: [PATCH 029/103] Inline base::malloc_raw_dyn. Move comment about not unwinding into liballoc. --- src/liballoc/heap.rs | 1 + src/librustc/middle/lang_items.rs | 2 -- src/librustc_trans/base.rs | 29 +---------------------------- src/librustc_trans/mir/rvalue.rs | 14 +++++++++++++- 4 files changed, 15 insertions(+), 31 deletions(-) diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 12809171b7438..a1e3263698081 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -127,6 +127,7 @@ pub fn usable_size(size: usize, align: usize) -> usize { pub const EMPTY: *mut () = 0x1 as *mut (); /// The allocator for unique pointers. +// This function must not unwind. If it does, MIR trans will fail. #[cfg(not(test))] #[lang = "exchange_malloc"] #[inline] diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index 3bc39fad7f1b5..1efc211b8c35b 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -327,8 +327,6 @@ language_item_table! { PanicBoundsCheckFnLangItem, "panic_bounds_check", panic_bounds_check_fn; PanicFmtLangItem, "panic_fmt", panic_fmt; - // ExchangeMallocFnLangItem cannot unwind, or MIR trans will break. See note - // on `malloc_raw_dyn` in librustc_trans/base.rs. ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn; ExchangeFreeFnLangItem, "exchange_free", exchange_free_fn; BoxFreeFnLangItem, "box_free", box_free_fn; diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 011552b39627b..3f477a463abd7 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -37,7 +37,7 @@ use back::symbol_export::{self, ExportedSymbols}; use llvm::{Linkage, ValueRef, Vector, get_param}; use llvm; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; -use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem}; +use middle::lang_items::StartFnLangItem; use rustc::ty::subst::Substs; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; @@ -168,33 +168,6 @@ pub fn get_dataptr(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) } -fn require_alloc_fn<'blk, 'tcx>( - bcx: &BlockAndBuilder<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem -) -> DefId { - match bcx.tcx().lang_items.require(it) { - Ok(id) => id, - Err(s) => { - bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s)); - } - } -} - -// malloc_raw_dyn allocates a box to contain a given type, but with a potentially dynamic size. -// -// MIR requires that ExchangeMallocFnLangItem cannot unwind. -pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - llty_ptr: Type, - info_ty: Ty<'tcx>, - size: ValueRef, - align: ValueRef) - -> ValueRef { - // Allocate space: - let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); - let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).reify(bcx.ccx()); - bcx.pointercast(bcx.call(r, &[size, align], None), llty_ptr) -} - - pub fn bin_op_to_icmp_predicate(op: hir::BinOp_, signed: bool) -> llvm::IntPredicate { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 5aba7160c2353..d15598e76af6e 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -13,6 +13,7 @@ use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::Layout; use rustc::mir; +use middle::lang_items::ExchangeMallocFnLangItem; use asm; use base; @@ -449,7 +450,18 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { let llalign = C_uint(bcx.ccx(), align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); - let val = base::malloc_raw_dyn(&bcx, llty_ptr, box_ty, llsize, llalign); + + // Allocate space: + let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) { + Ok(id) => id, + Err(s) => { + bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); + } + }; + let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) + .reify(bcx.ccx()); + let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr); + let operand = OperandRef { val: OperandValue::Immediate(val), ty: box_ty, From 5bdcc22b79a6080ff9a66f390160f987b8b5469a Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 18:01:51 -0700 Subject: [PATCH 030/103] Remove FIXME --- src/librustc_trans/intrinsic.rs | 1 - src/librustc_trans/mir/block.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 9be505556c5d1..b70a1d119de49 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -119,7 +119,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.call(llfn, &[], None); return; } else if name == "unreachable" { - // FIXME: do nothing? return; } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 3fa88c8706d4f..d76385b8bfe9b 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -468,7 +468,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { return; } - let ptr = self.trans_operand(&bcx, &args[0]); + let ptr = self.trans_operand(&bcx, &args[0]); let (llval, llextra) = match ptr.val { Immediate(llptr) => (llptr, ptr::null_mut()), Pair(llptr, llextra) => (llptr, llextra), From da23332b65ae6933bc70239bfee555a374961be7 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 18:05:05 -0700 Subject: [PATCH 031/103] Remove remaining traces of block_arena --- src/librustc_trans/base.rs | 2 -- src/librustc_trans/common.rs | 3 --- 2 files changed, 5 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 3f477a463abd7..8db339f66b2c5 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -664,7 +664,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { llfndecl: ValueRef, fn_ty: FnType, definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>) - //block_arena: &'blk TypedArena>) -> FunctionContext<'blk, 'tcx> { let (param_substs, def_id) = match definition { Some((instance, ..)) => { @@ -708,7 +707,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { fn_ty: fn_ty, param_substs: param_substs, span: None, - //block_arena: block_arena, funclet_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 8637bb322ca16..6306312c58ed0 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -303,9 +303,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // error reporting and symbol generation. pub span: Option, - // The arena that blocks are allocated from. - //pub block_arena: &'a TypedArena>, - // The arena that landing pads are allocated from. pub funclet_arena: TypedArena, From e0ccc81bbf0069bf968a741594dd81338cf98ac3 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Thu, 15 Dec 2016 18:22:55 -0700 Subject: [PATCH 032/103] Remove needless allows --- src/librustc_trans/base.rs | 2 -- src/librustc_trans/cabi_arm.rs | 2 -- src/librustc_trans/tvec.rs | 2 -- src/librustc_trans/type_of.rs | 2 -- 4 files changed, 8 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 8db339f66b2c5..06a368e896cc6 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -23,8 +23,6 @@ //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int, //! int) and rec(x=int, y=int, z=int) will have the same TypeRef. -#![allow(non_camel_case_types)] - use super::CrateTranslation; use super::ModuleLlvm; use super::ModuleSource; diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 93d43f7d96116..85b26074bae6d 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_upper_case_globals)] - use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; use abi::{self, align_up_to, FnType, ArgType}; use context::CrateContext; diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 36762885cc99a..5b4cb74bf4fc6 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - use llvm; use llvm::ValueRef; use common::*; diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 22c405fe254a6..b38cd86e4bcde 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - use abi::FnType; use adt; use common::*; From 28f511cfbd9faaec4cc54ea002b43bacb3da497c Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 08:28:20 -0700 Subject: [PATCH 033/103] Remove global Builder --- src/librustc_trans/base.rs | 96 +------------------------------ src/librustc_trans/common.rs | 104 +++++++++++++++++++++++++++++++--- src/librustc_trans/context.rs | 5 -- 3 files changed, 99 insertions(+), 106 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 06a368e896cc6..4f346f8897ebd 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -53,7 +53,7 @@ use builder::{Builder, noname}; use callee::{Callee}; use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; -use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; +use common::{C_struct_in_context, C_u64, C_u8, C_undef}; use common::{CrateContext, FunctionContext}; use common::{fulfill_obligation}; use common::{type_is_zero_size, val_ty}; @@ -77,10 +77,9 @@ use value::Value; use Disr; use util::nodemap::{NodeSet, FxHashMap, FxHashSet}; -use arena::TypedArena; use libc::c_uint; use std::ffi::{CStr, CString}; -use std::cell::{Cell, RefCell}; +use std::cell::RefCell; use std::ptr; use std::rc::Rc; use std::str; @@ -655,97 +654,6 @@ pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { } impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { - /// Create a function context for the given function. - /// Beware that you must call `fcx.init` or `fcx.bind_args` - /// before doing anything with the returned function context. - pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, - llfndecl: ValueRef, - fn_ty: FnType, - definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>) - -> FunctionContext<'blk, 'tcx> { - let (param_substs, def_id) = match definition { - Some((instance, ..)) => { - common::validate_substs(instance.substs); - (instance.substs, Some(instance.def)) - } - None => (ccx.tcx().intern_substs(&[]), None) - }; - - let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); - - debug!("FunctionContext::new({})", - definition.map_or(String::new(), |d| d.0.to_string())); - - let no_debug = if let Some(id) = local_id { - ccx.tcx().map.attrs(id) - .iter().any(|item| item.check_name("no_debug")) - } else if let Some(def_id) = def_id { - ccx.sess().cstore.item_attrs(def_id) - .iter().any(|item| item.check_name("no_debug")) - } else { - false - }; - - let mir = def_id.map(|id| ccx.tcx().item_mir(id)); - - let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) = - (no_debug, definition, &mir) { - debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir) - } else { - debuginfo::empty_function_debug_context(ccx) - }; - - FunctionContext { - mir: mir, - llfn: llfndecl, - llretslotptr: Cell::new(None), - param_env: ccx.tcx().empty_parameter_environment(), - alloca_insert_pt: Cell::new(None), - landingpad_alloca: Cell::new(None), - fn_ty: fn_ty, - param_substs: param_substs, - span: None, - funclet_arena: TypedArena::new(), - ccx: ccx, - debug_context: debug_context, - } - } - - /// Performs setup on a newly created function, creating the entry - /// scope block and allocating space for the return pointer. - pub fn init(&'blk self, skip_retptr: bool) -> BlockAndBuilder<'blk, 'tcx> { - let entry_bcx = self.build_new_block("entry-block"); - - // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in FunctionContext::cleanup. - self.alloca_insert_pt.set(Some(unsafe { - entry_bcx.load(C_null(Type::i8p(self.ccx))); - llvm::LLVMGetFirstInstruction(entry_bcx.llbb()) - })); - - if !self.fn_ty.ret.is_ignore() && !skip_retptr { - // We normally allocate the llretslotptr, unless we - // have been instructed to skip it for immediate return - // values, or there is nothing to return at all. - - // We create an alloca to hold a pointer of type `ret.original_ty` - // which will hold the pointer to the right alloca which has the - // final ret value - let llty = self.fn_ty.ret.memory_ty(self.ccx); - // But if there are no nested returns, we skip the indirection - // and have a single retslot - let slot = if self.fn_ty.ret.is_indirect() { - get_param(self.llfn, 0) - } else { - self.alloca(llty, "sret_slot") - }; - - self.llretslotptr.set(Some(slot)); - } - - entry_bcx - } - /// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// and builds the return block. pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 6306312c58ed0..e4c3d6497fd75 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -15,7 +15,8 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef}; +use llvm::{True, False, Bool, OperandBundleDef, get_param}; +use monomorphize::Instance; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; @@ -311,18 +312,109 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Used and maintained by the debuginfo module. pub debug_context: debuginfo::FunctionDebugContext, + + owned_builder: OwnedBuilder<'a, 'tcx>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { + /// Create a function context for the given function. + /// Beware that you must call `fcx.init` or `fcx.bind_args` + /// before doing anything with the returned function context. + pub fn new(ccx: &'a CrateContext<'a, 'tcx>, + llfndecl: ValueRef, + fn_ty: FnType, + definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>) + -> FunctionContext<'a, 'tcx> { + let (param_substs, def_id) = match definition { + Some((instance, ..)) => { + validate_substs(instance.substs); + (instance.substs, Some(instance.def)) + } + None => (ccx.tcx().intern_substs(&[]), None) + }; + + let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); + + debug!("FunctionContext::new({})", definition.map_or(String::new(), |d| d.0.to_string())); + + let no_debug = if let Some(id) = local_id { + ccx.tcx().map.attrs(id).iter().any(|item| item.check_name("no_debug")) + } else if let Some(def_id) = def_id { + ccx.sess().cstore.item_attrs(def_id).iter().any(|item| item.check_name("no_debug")) + } else { + false + }; + + let mir = def_id.map(|id| ccx.tcx().item_mir(id)); + + let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) = + (no_debug, definition, &mir) { + debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir) + } else { + debuginfo::empty_function_debug_context(ccx) + }; + + FunctionContext { + mir: mir, + llfn: llfndecl, + llretslotptr: Cell::new(None), + param_env: ccx.tcx().empty_parameter_environment(), + alloca_insert_pt: Cell::new(None), + landingpad_alloca: Cell::new(None), + fn_ty: fn_ty, + param_substs: param_substs, + span: None, + funclet_arena: TypedArena::new(), + ccx: ccx, + debug_context: debug_context, + owned_builder: OwnedBuilder::new_with_ccx(ccx), + } + } + + /// Performs setup on a newly created function, creating the entry + /// scope block and allocating space for the return pointer. + pub fn init(&'a self, skip_retptr: bool) -> BlockAndBuilder<'a, 'tcx> { + let entry_bcx = self.build_new_block("entry-block"); + + // Use a dummy instruction as the insertion point for all allocas. + // This is later removed in FunctionContext::cleanup. + self.alloca_insert_pt.set(Some(unsafe { + entry_bcx.load(C_null(Type::i8p(self.ccx))); + llvm::LLVMGetFirstInstruction(entry_bcx.llbb()) + })); + + self.owned_builder.builder.position_at_start(entry_bcx.llbb()); + + if !self.fn_ty.ret.is_ignore() && !skip_retptr { + // We normally allocate the llretslotptr, unless we + // have been instructed to skip it for immediate return + // values, or there is nothing to return at all. + + // We create an alloca to hold a pointer of type `ret.original_ty` + // which will hold the pointer to the right alloca which has the + // final ret value + let llty = self.fn_ty.ret.memory_ty(self.ccx); + // But if there are no nested returns, we skip the indirection + // and have a single retslot + let slot = if self.fn_ty.ret.is_indirect() { + get_param(self.llfn, 0) + } else { + self.alloca(llty, "sret_slot") + }; + + self.llretslotptr.set(Some(slot)); + } + + entry_bcx + } + pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { self.mir.as_ref().map(Ref::clone).expect("fcx.mir was empty") } pub fn cleanup(&self) { unsafe { - llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt - .get() - .unwrap()); + llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.get().unwrap()); } } @@ -431,9 +523,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { - let b = self.ccx.builder(); - b.position_before(self.alloca_insert_pt.get().unwrap()); - b.alloca(ty, name) + self.owned_builder.builder.alloca(ty, name) } } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 6435b20eeaa00..4f524af29456e 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -17,7 +17,6 @@ use rustc::hir::def::ExportMap; use rustc::hir::def_id::DefId; use rustc::traits; use base; -use builder::Builder; use common::BuilderRef_res; use debuginfo; use declare; @@ -731,10 +730,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.shared.tcx.sess } - pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> { - Builder::new(self) - } - pub fn raw_builder<'a>(&'a self) -> BuilderRef { self.local().builder.b } From bc0b172f3b3944eea0a326e032a961d0f5f5df11 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 10:48:46 -0700 Subject: [PATCH 034/103] Remove BlockAndBuilder.funclet --- src/librustc_trans/cleanup.rs | 2 +- src/librustc_trans/common.rs | 23 ----------------------- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 67b9dd1828848..bd387ccae2055 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -99,7 +99,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { custom_scope: Option> ) { if let Some(scope) = custom_scope { - scope.cleanup.trans(bcx.funclet(), &bcx); + scope.cleanup.trans(None, &bcx); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index e4c3d6497fd75..e2abb7779c98f 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -42,7 +42,6 @@ use rustc::traits::{self, SelectionContext, Reveal}; use rustc::ty::fold::TypeFoldable; use rustc::hir; -use arena::TypedArena; use libc::{c_uint, c_char}; use std::borrow::Cow; use std::iter; @@ -304,9 +303,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // error reporting and symbol generation. pub span: Option, - // The arena that landing pads are allocated from. - pub funclet_arena: TypedArena, - // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, @@ -364,7 +360,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { fn_ty: fn_ty, param_substs: param_substs, span: None, - funclet_arena: TypedArena::new(), ccx: ccx, debug_context: debug_context, owned_builder: OwnedBuilder::new_with_ccx(ccx), @@ -564,10 +559,6 @@ pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { // The block pointing to this one in the function's digraph. llbb: BasicBlockRef, - // If this block part of a landing pad, then this is `Some` indicating what - // kind of landing pad its in, otherwise this is none. - funclet: Option<&'blk Funclet>, - // The function context for the function to which this block is // attached. fcx: &'blk FunctionContext<'blk, 'tcx>, @@ -582,7 +573,6 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { owned_builder.builder.position_at_end(llbb); BlockAndBuilder { llbb: llbb, - funclet: None, fcx: fcx, owned_builder: owned_builder, } @@ -617,19 +607,6 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { self.fcx.mir() } - - pub fn set_funclet(&mut self, funclet: Option) { - self.funclet = funclet.map(|p| &*self.fcx().funclet_arena.alloc(p)); - } - - pub fn set_funclet_ref(&mut self, funclet: Option<&'blk Funclet>) { - // FIXME: use an IVar? - self.funclet = funclet; - } - - pub fn funclet(&self) -> Option<&'blk Funclet> { - self.funclet - } } impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> { From be981dce80f6d115b04a6898bb2d6bf316dc402d Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 11:32:59 -0700 Subject: [PATCH 035/103] Start FunctionContext privatization and reduction --- src/librustc_trans/common.rs | 10 +++------- src/librustc_trans/mir/mod.rs | 2 +- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index e2abb7779c98f..1025f9d4051f9 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -269,7 +269,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { pub llfn: ValueRef, // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv - pub param_env: ty::ParameterEnvironment<'tcx>, + param_env: ty::ParameterEnvironment<'tcx>, // A pointer to where to store the return value. If the return type is // immediate, this points to an alloca in the function. Otherwise, it's a @@ -314,8 +314,8 @@ pub struct FunctionContext<'a, 'tcx: 'a> { impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Create a function context for the given function. - /// Beware that you must call `fcx.init` or `fcx.bind_args` - /// before doing anything with the returned function context. + /// Beware that you must call `fcx.init` before doing anything with the returned function + /// context. pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef, fn_ty: FnType, @@ -603,10 +603,6 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { pub fn llbb(&self) -> BasicBlockRef { self.llbb } - - pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { - self.fcx.mir() - } } impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 60fd80a8f9023..c3cc5a7a9b39c 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -182,7 +182,7 @@ impl<'tcx> LocalRef<'tcx> { pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let bcx = fcx.init(true); - let mir = bcx.mir(); + let mir = fcx.mir(); // Analyze the temps to determine which must be lvalues // FIXME From 820164582dc22e8d31283da377825331bfc06da9 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 13:25:18 -0700 Subject: [PATCH 036/103] Remove DebugLoc. --- src/librustc_trans/adt.rs | 4 --- src/librustc_trans/base.rs | 5 +--- src/librustc_trans/common.rs | 5 ++++ src/librustc_trans/debuginfo/mod.rs | 19 ++----------- src/librustc_trans/debuginfo/source_loc.rs | 27 ++++++------------ src/librustc_trans/intrinsic.rs | 13 ++------- src/librustc_trans/mir/block.rs | 13 ++++----- src/librustc_trans/mir/mod.rs | 32 +++++++++------------- src/librustc_trans/mir/statement.rs | 5 ++-- 9 files changed, 38 insertions(+), 85 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 7f8eef5a51df3..5482200f13ec8 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -49,7 +49,6 @@ use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; use common::*; -use debuginfo::DebugLoc; use glue; use base; use machine; @@ -595,8 +594,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, return bcx.struct_gep(ptr_val, ix); } - let dbloc = DebugLoc::None; - // We need to get the pointer manually now. // We do this by casting to a *i8, then offsetting it by the appropriate amount. // We do this instead of, say, simply adjusting the pointer from the result of a GEP @@ -627,7 +624,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // (unaligned offset + (align - 1)) & -align // Calculate offset - dbloc.apply(bcx.fcx()); let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64)); let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), bcx.neg(align)); diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 4f346f8897ebd..b1be2e4abe6de 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -60,7 +60,7 @@ use common::{type_is_zero_size, val_ty}; use common; use consts; use context::{SharedCrateContext, CrateContextList}; -use debuginfo::{self, DebugLoc}; +use debuginfo; use declare; use machine; use machine::{llalign_of_min, llsize_of}; @@ -649,7 +649,6 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { - DebugLoc::None.apply(cx.fcx()); cx.fcx().alloca(ty, name) } @@ -658,8 +657,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// and builds the return block. pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { self.build_return_block(ret_cx); - - DebugLoc::None.apply(self); self.cleanup(); } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 1025f9d4051f9..7aceb0b88bb1c 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -16,6 +16,7 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef, get_param}; +use llvm::debuginfo::DIScope; use monomorphize::Instance; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; @@ -578,6 +579,10 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { } } + pub fn set_source_location(&self, scope: DIScope, sp: Span) { + debuginfo::set_source_location(self.fcx(), self, scope, sp) + } + pub fn at_start(&self, f: F) -> R where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R { diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 1bf3c0acec55a..f915f60c29460 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,7 +27,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use abi::Abi; -use common::{CrateContext, FunctionContext, BlockAndBuilder}; +use common::{CrateContext, BlockAndBuilder}; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -55,6 +55,7 @@ pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; pub use self::source_loc::start_emitting_source_locations; pub use self::metadata::create_global_var_metadata; pub use self::metadata::extend_scope_to_file; +pub use self::source_loc::set_source_location; #[allow(non_upper_case_globals)] const DW_TAG_auto_variable: c_uint = 0x100; @@ -507,19 +508,3 @@ pub fn declare_local<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, _ => { /* nothing to do */ } } } - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum DebugLoc { - ScopeAt(DIScope, Span), - None -} - -impl DebugLoc { - pub fn apply(self, fcx: &FunctionContext) { - source_loc::set_source_location(fcx, None, self); - } - - pub fn apply_to_bcx(self, bcx: &BlockAndBuilder) { - source_loc::set_source_location(bcx.fcx(), Some(bcx), self); - } -} diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index 86ecc0e65a9de..e03ad1a8c8fdb 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -11,8 +11,8 @@ use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; -use super::metadata::{UNKNOWN_COLUMN_NUMBER}; -use super::{FunctionDebugContext, DebugLoc}; +use super::metadata::UNKNOWN_COLUMN_NUMBER; +use super::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; @@ -21,41 +21,30 @@ use common::{CrateContext, FunctionContext}; use libc::c_uint; use std::ptr; -use syntax_pos::Pos; +use syntax_pos::{Span, Pos}; /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). -pub fn set_source_location(fcx: &FunctionContext, - builder: Option<&Builder>, - debug_loc: DebugLoc) { - let builder = builder.map(|b| b.llbuilder); +pub fn set_source_location(fcx: &FunctionContext, builder: &Builder, scope: DIScope, span: Span) { + let builder = builder.llbuilder; let function_debug_context = match fcx.debug_context { FunctionDebugContext::DebugInfoDisabled => return, FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, builder, UnknownLocation); + set_debug_location(fcx.ccx, Some(builder), UnknownLocation); return; } FunctionDebugContext::RegularContext(ref data) => data }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - let (scope, span) = match debug_loc { - DebugLoc::ScopeAt(scope, span) => (scope, span), - DebugLoc::None => { - set_debug_location(fcx.ccx, builder, UnknownLocation); - return; - } - }; - - debug!("set_source_location: {}", - fcx.ccx.sess().codemap().span_to_string(span)); + debug!("set_source_location: {}", fcx.ccx.sess().codemap().span_to_string(span)); let loc = span_start(fcx.ccx, span); InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) } else { UnknownLocation }; - set_debug_location(fcx.ccx, builder, dbg_loc); + set_debug_location(fcx.ccx, Some(builder), dbg_loc); } /// Enables emitting source locations for the given functions. diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index b70a1d119de49..84a6406c8e7f7 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -18,7 +18,6 @@ use abi::{Abi, FnType}; use adt; use base::*; use common::*; -use debuginfo::DebugLoc; use declare; use glue; use type_of; @@ -31,7 +30,7 @@ use syntax::ast; use syntax::symbol::Symbol; use rustc::session::Session; -use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::Span; use std::cmp::Ordering; use std::iter; @@ -90,7 +89,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, fn_ty: &FnType, llargs: &[ValueRef], llresult: ValueRef, - call_debug_location: DebugLoc) { + span: Span) { let fcx = bcx.fcx(); let ccx = fcx.ccx; let tcx = bcx.tcx(); @@ -105,14 +104,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let ret_ty = sig.output(); let name = &*tcx.item_name(def_id).as_str(); - let span = match call_debug_location { - DebugLoc::ScopeAt(_, span) => span, - DebugLoc::None => { - span_bug!(fcx.span.unwrap_or(DUMMY_SP), - "intrinsic `{}` called with missing span", name); - } - }; - // These are the only intrinsic functions that diverge. if name == "abort" { let llfn = ccx.get_intrinsic(&("llvm.trap")); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index d76385b8bfe9b..88e623f1ecc54 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -20,7 +20,6 @@ use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use common::{self, BlockAndBuilder, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; -use debuginfo::DebugLoc; use Disr; use machine::{llalign_of_min, llbitsize_of_real}; use meth; @@ -115,9 +114,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { debug!("trans_block: terminator: {:?}", terminator); let span = terminator.source_info.span; - let debug_loc = self.debug_loc(terminator.source_info); - debug_loc.apply_to_bcx(&bcx); - debug_loc.apply(bcx.fcx()); + let (scope, debug_span) = self.debug_loc(terminator.source_info); + bcx.set_source_location(scope, debug_span); match terminator.kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { @@ -329,7 +327,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // After this point, bcx is the block for the call to panic. bcx = panic_block; - debug_loc.apply_to_bcx(&bcx); + bcx.set_source_location(scope, debug_span); // Get the location information. let loc = bcx.sess().codemap().lookup_char_pos(span.lo); @@ -605,7 +603,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { bug!("Cannot use direct operand with an intrinsic call") }; - trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, debug_loc); + trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, debug_span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { // Make a fake operand for store_return @@ -645,7 +643,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { if let Some((_, target)) = *destination { let ret_bcx = self.build_block(target); ret_bcx.at_start(|ret_bcx| { - debug_loc.apply_to_bcx(ret_bcx); + bcx.set_source_location(scope, debug_span); let op = OperandRef { val: Immediate(invokeret), ty: sig.output(), @@ -885,7 +883,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } CleanupKind::Funclet => { bcx.set_personality_fn(self.fcx.eh_personality()); - DebugLoc::None.apply_to_bcx(&bcx); let cleanup_pad = bcx.cleanup_pad(None, &[]); funclets[bb] = Funclet::msvc(cleanup_pad); } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index c3cc5a7a9b39c..7ceba69c37192 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -10,16 +10,18 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; +use llvm::debuginfo::DIScope; use rustc::ty; use rustc::mir; use rustc::mir::tcx::LvalueTy; use session::config::FullDebugInfo; use base; use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; -use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext}; +use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; +use machine; use type_of; -use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos}; +use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos, Span}; use syntax::symbol::keywords; use std::cell::Ref; @@ -88,15 +90,12 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { } impl<'blk, 'tcx> MirContext<'blk, 'tcx> { - pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc { + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) { // Bail out if debug info emission is not enabled. match self.fcx.debug_context { FunctionDebugContext::DebugInfoDisabled | FunctionDebugContext::FunctionWithoutDebugInfo => { - // Can't return DebugLoc::None here because intrinsic::trans_intrinsic_call() - // relies on debug location to obtain span of the call site. - return DebugLoc::ScopeAt(self.scopes[source_info.scope].scope_metadata, - source_info.span); + return (self.scopes[source_info.scope].scope_metadata, source_info.span); } FunctionDebugContext::RegularContext(_) =>{} } @@ -109,8 +108,8 @@ impl<'blk, 'tcx> MirContext<'blk, 'tcx> { self.fcx.ccx.sess().opts.debugging_opts.debug_macros { let scope_metadata = self.scope_metadata_for_loc(source_info.scope, - source_info.span.lo); - DebugLoc::ScopeAt(scope_metadata, source_info.span) + source_info.span.lo); + (scope_metadata, source_info.span) } else { let cm = self.fcx.ccx.sess().codemap(); // Walk up the macro expansion chain until we reach a non-expanded span. @@ -125,7 +124,7 @@ impl<'blk, 'tcx> MirContext<'blk, 'tcx> { } let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo); // Use span of the outermost call site, while keeping the original lexical scope - DebugLoc::ScopeAt(scope_metadata, span) + (scope_metadata, span) } } @@ -236,14 +235,10 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { debug!("alloc: {:?} ({}) -> lvalue", local, name); let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); if dbg { - let dbg_loc = mircx.debug_loc(source_info); - if let DebugLoc::ScopeAt(scope, span) = dbg_loc { - declare_local(&bcx, name, ty, scope, - VariableAccess::DirectVariable { alloca: lvalue.llval }, - VariableKind::LocalVariable, span); - } else { - panic!("Unexpected"); - } + let (scope, span) = mircx.debug_loc(source_info); + declare_local(&bcx, name, ty, scope, + VariableAccess::DirectVariable { alloca: lvalue.llval }, + VariableKind::LocalVariable, span); } LocalRef::Lvalue(lvalue) } else { @@ -312,7 +307,6 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { } } - DebugLoc::None.apply(fcx); fcx.cleanup(); } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 47537c830dc3b..6cc3f6aad9fd6 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -25,9 +25,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { -> BlockAndBuilder<'bcx, 'tcx> { debug!("trans_statement(statement={:?})", statement); - let debug_loc = self.debug_loc(statement.source_info); - debug_loc.apply_to_bcx(&bcx); - debug_loc.apply(bcx.fcx()); + let (scope, span) = self.debug_loc(statement.source_info); + bcx.set_source_location(scope, span); match statement.kind { mir::StatementKind::Assign(ref lvalue, ref rvalue) => { if let mir::Lvalue::Local(index) = *lvalue { From 5262113fa6a4701d7f97a1d4e6f6ecf248b623f4 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 14:10:32 -0700 Subject: [PATCH 037/103] Remove fcx.span --- src/librustc_trans/common.rs | 5 ----- src/librustc_trans/mir/mod.rs | 6 +++--- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 7aceb0b88bb1c..093e57816b965 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -300,10 +300,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // substitutions used. pub param_substs: &'tcx Substs<'tcx>, - // The source span and nesting context where this function comes from, for - // error reporting and symbol generation. - pub span: Option, - // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, @@ -360,7 +356,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { landingpad_alloca: Cell::new(None), fn_ty: fn_ty, param_substs: param_substs, - span: None, ccx: ccx, debug_context: debug_context, owned_builder: OwnedBuilder::new_with_ccx(ccx), diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 7ceba69c37192..15683a6bb15d4 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -372,7 +372,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, variable_access, VariableKind::ArgumentVariable(arg_index + 1), - bcx.fcx().span.unwrap_or(DUMMY_SP)); + DUMMY_SP); }); return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty))); @@ -444,7 +444,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, VariableAccess::DirectVariable { alloca: llval }, VariableKind::ArgumentVariable(arg_index + 1), - bcx.fcx().span.unwrap_or(DUMMY_SP)); + DUMMY_SP); return; } @@ -513,7 +513,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, }; declare_local(bcx, decl.debug_name, ty, scope, variable_access, VariableKind::CapturedVariable, - bcx.fcx().span.unwrap_or(DUMMY_SP)); + DUMMY_SP); } }); LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))) From bf8614b55ad248c254be6c00f4eba1f88ac97740 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 14:11:17 -0700 Subject: [PATCH 038/103] Rename Builder::alloca to dynamic_alloca --- src/librustc_trans/base.rs | 6 +----- src/librustc_trans/builder.rs | 2 +- src/librustc_trans/cleanup.rs | 2 +- src/librustc_trans/common.rs | 17 ++++++++--------- src/librustc_trans/mir/block.rs | 2 +- src/librustc_trans/mir/mod.rs | 4 +--- 6 files changed, 13 insertions(+), 20 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index b1be2e4abe6de..6aee8fa5f97c5 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -645,11 +645,7 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { assert!(!ty.has_param_types()); - alloca(bcx, type_of::type_of(bcx.ccx(), ty), name) -} - -pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef { - cx.fcx().alloca(ty, name) + bcx.fcx().alloca(type_of::type_of(bcx.ccx(), ty), name) } impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index d09f049ca18d9..8c6a53da0e197 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -462,7 +462,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef { self.count_insn("alloca"); unsafe { if name.is_empty() { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index bd387ccae2055..dfcaedcba7c4c 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -197,7 +197,7 @@ impl<'tcx> CleanupScope<'tcx> { let addr = match fcx.landingpad_alloca.get() { Some(addr) => addr, None => { - let addr = base::alloca(&pad_bcx, common::val_ty(llretval), ""); + let addr = pad_bcx.fcx().alloca(common::val_ty(llretval), ""); Lifetime::Start.call(&pad_bcx, addr); fcx.landingpad_alloca.set(Some(addr)); addr diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 093e57816b965..5d49030755228 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -306,7 +306,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Used and maintained by the debuginfo module. pub debug_context: debuginfo::FunctionDebugContext, - owned_builder: OwnedBuilder<'a, 'tcx>, + alloca_builder: OwnedBuilder<'a, 'tcx>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { @@ -358,7 +358,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { param_substs: param_substs, ccx: ccx, debug_context: debug_context, - owned_builder: OwnedBuilder::new_with_ccx(ccx), + alloca_builder: OwnedBuilder::new_with_ccx(ccx), } } @@ -374,23 +374,22 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { llvm::LLVMGetFirstInstruction(entry_bcx.llbb()) })); - self.owned_builder.builder.position_at_start(entry_bcx.llbb()); + self.alloca_builder.builder.position_at_start(entry_bcx.llbb()); if !self.fn_ty.ret.is_ignore() && !skip_retptr { // We normally allocate the llretslotptr, unless we // have been instructed to skip it for immediate return // values, or there is nothing to return at all. - // We create an alloca to hold a pointer of type `ret.original_ty` - // which will hold the pointer to the right alloca which has the - // final ret value - let llty = self.fn_ty.ret.memory_ty(self.ccx); // But if there are no nested returns, we skip the indirection // and have a single retslot let slot = if self.fn_ty.ret.is_indirect() { get_param(self.llfn, 0) } else { - self.alloca(llty, "sret_slot") + // We create an alloca to hold a pointer of type `ret.original_ty` + // which will hold the pointer to the right alloca which has the + // final ret value + self.alloca(self.fn_ty.ret.memory_ty(self.ccx), "sret_slot") }; self.llretslotptr.set(Some(slot)); @@ -514,7 +513,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { - self.owned_builder.builder.alloca(ty, name) + self.alloca_builder.builder.dynamic_alloca(ty, name) } } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 88e623f1ecc54..45f9279049cd7 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -828,7 +828,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { slot } else { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = base::alloca(bcx, llretty, "personalityslot"); + let slot = bcx.fcx().alloca(llretty, "personalityslot"); self.llpersonalityslot = Some(slot); Lifetime::Start.call(bcx, slot); slot diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 15683a6bb15d4..c721fa0d2ac78 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -468,9 +468,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - use base::*; - use common::*; - let alloc = alloca(bcx, val_ty(llval), "__debuginfo_env_ptr"); + let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr"); bcx.store(llval, alloc); alloc } else { From cbbdb73eb07fb9a29e20f4fb16b8f1c059e9763f Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 16:00:17 -0700 Subject: [PATCH 039/103] Remove FunctionContext::cleanup, replacing it with a Drop impl. Move alloca and initial entry block creation into FunctionContext::new. --- src/librustc_llvm/ffi.rs | 1 + src/librustc_trans/base.rs | 1 - src/librustc_trans/common.rs | 49 ++++++++++++++++++--------------- src/librustc_trans/intrinsic.rs | 1 - src/librustc_trans/mir/mod.rs | 2 -- 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index f3d4c17654dcc..d2b86ade7a2ab 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -710,6 +710,7 @@ extern "C" { // Operations on instructions pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef; + pub fn LLVMGetFirstBasicBlock(Fn: ValueRef) -> BasicBlockRef; pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef; pub fn LLVMInstructionEraseFromParent(Inst: ValueRef); diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 6aee8fa5f97c5..e8a75d26534c8 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -653,7 +653,6 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// and builds the return block. pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { self.build_return_block(ret_cx); - self.cleanup(); } // Builds the return block for a function. diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 5d49030755228..11c995accc53b 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -283,7 +283,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // the function, due to LLVM's quirks. // A marker for the place where we want to insert the function's static // allocas, so that LLVM will coalesce them into a single alloca call. - pub alloca_insert_pt: Cell>, + alloca_insert_pt: Option, // When working with landingpad-based exceptions this value is alloca'd and // later loaded when using the resume instruction. This ends up being @@ -347,35 +347,37 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { debuginfo::empty_function_debug_context(ccx) }; - FunctionContext { + let mut fcx = FunctionContext { mir: mir, llfn: llfndecl, llretslotptr: Cell::new(None), param_env: ccx.tcx().empty_parameter_environment(), - alloca_insert_pt: Cell::new(None), + alloca_insert_pt: None, landingpad_alloca: Cell::new(None), fn_ty: fn_ty, param_substs: param_substs, ccx: ccx, debug_context: debug_context, alloca_builder: OwnedBuilder::new_with_ccx(ccx), - } - } + }; - /// Performs setup on a newly created function, creating the entry - /// scope block and allocating space for the return pointer. - pub fn init(&'a self, skip_retptr: bool) -> BlockAndBuilder<'a, 'tcx> { - let entry_bcx = self.build_new_block("entry-block"); + let val = { + let entry_bcx = fcx.build_new_block("entry-block"); + let val = entry_bcx.load(C_null(Type::i8p(ccx))); + fcx.alloca_builder.builder.position_at_start(entry_bcx.llbb()); + val + }; // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in FunctionContext::cleanup. - self.alloca_insert_pt.set(Some(unsafe { - entry_bcx.load(C_null(Type::i8p(self.ccx))); - llvm::LLVMGetFirstInstruction(entry_bcx.llbb()) - })); + // This is later removed in the drop of FunctionContext. + fcx.alloca_insert_pt = Some(val); - self.alloca_builder.builder.position_at_start(entry_bcx.llbb()); + fcx + } + /// Performs setup on a newly created function, creating the entry + /// scope block and allocating space for the return pointer. + pub fn init(&'a self, skip_retptr: bool) -> BlockAndBuilder<'a, 'tcx> { if !self.fn_ty.ret.is_ignore() && !skip_retptr { // We normally allocate the llretslotptr, unless we // have been instructed to skip it for immediate return @@ -395,19 +397,15 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { self.llretslotptr.set(Some(slot)); } - entry_bcx + BlockAndBuilder::new(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn) + }, self) } pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { self.mir.as_ref().map(Ref::clone).expect("fcx.mir was empty") } - pub fn cleanup(&self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.get().unwrap()); - } - } - pub fn new_block(&'a self, name: &str) -> BasicBlockRef { unsafe { let name = CString::new(name).unwrap(); @@ -517,6 +515,13 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } } +impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { + fn drop(&mut self) { + unsafe { + llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap()); + } + } +} pub struct OwnedBuilder<'blk, 'tcx: 'blk> { builder: Builder<'blk, 'tcx> diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 84a6406c8e7f7..716cbec718e3c 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -967,7 +967,6 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); trans(fcx.init(true)); - fcx.cleanup(); llfn } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index c721fa0d2ac78..6c72791ec0fc6 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -306,8 +306,6 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { block.delete(); } } - - fcx.cleanup(); } /// Produce, for each argument, a `ValueRef` pointing at the From c4f6173af8cccda2154211a6de41c3eaa4bceaac Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 16:45:52 -0700 Subject: [PATCH 040/103] Replace init with get_entry_block. --- src/librustc_trans/base.rs | 12 ++++----- src/librustc_trans/callee.rs | 18 +++++-------- src/librustc_trans/common.rs | 46 ++++++++++++++++----------------- src/librustc_trans/glue.rs | 5 ++-- src/librustc_trans/intrinsic.rs | 4 +-- src/librustc_trans/meth.rs | 8 +++--- src/librustc_trans/mir/mod.rs | 2 +- 7 files changed, 44 insertions(+), 51 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index e8a75d26534c8..30c1074b89c1e 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -657,11 +657,11 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { // Builds the return block for a function. pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { - if self.llretslotptr.get().is_none() || self.fn_ty.ret.is_indirect() { + if self.llretslotptr.is_none() || self.fn_ty.ret.is_indirect() { return ret_cx.ret_void(); } - let retslot = self.llretslotptr.get().unwrap(); + let retslot = self.llretslotptr.unwrap(); let retptr = Value(retslot); let llty = self.fn_ty.ret.original_ty; match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) { @@ -751,7 +751,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let fcx = FunctionContext::new(ccx, lldecl, fn_ty, Some((instance, &sig, abi))); + let fcx = FunctionContext::new(ccx, lldecl, fn_ty, Some((instance, &sig, abi)), true); if fcx.mir.is_none() { bug!("attempted translation of `{}` w/o MIR", instance); @@ -774,11 +774,11 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None); - let bcx = fcx.init(false); + let fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, false); + let bcx = fcx.get_entry_block(); if !fcx.fn_ty.ret.is_ignore() { - let dest = fcx.llretslotptr.get().unwrap(); + let dest = fcx.llretslotptr.unwrap(); let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 4f6165e1dbe8b..8c880210f62f0 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -347,8 +347,8 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); attributes::set_frame_pointer_elimination(ccx, lloncefn); - let fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None); - let mut bcx = fcx.init(false); + let fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, false); + let mut bcx = fcx.get_entry_block(); // the first argument (`self`) will be the (by value) closure env. @@ -378,8 +378,6 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( llargs[self_idx] = llenv; } - let dest = fcx.llretslotptr.get(); - let callee = Callee { data: Fn(llreffn), ty: llref_fn_ty @@ -392,7 +390,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); let first_llarg = if fn_ty.ret.is_indirect() { - dest + fcx.llretslotptr } else { None }; @@ -411,7 +409,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( fn_ty.apply_attrs_callsite(llret); if !fn_ty.ret.is_indirect() { - if let Some(llretslot) = dest { + if let Some(llretslot) = fcx.llretslotptr { fn_ty.ret.store(&bcx, llret, llretslot); } } @@ -521,8 +519,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); - let bcx = fcx.init(false); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); + let bcx = fcx.get_entry_block(); let llargs = get_params(fcx.llfn); @@ -536,13 +534,11 @@ fn trans_fn_pointer_shim<'a, 'tcx>( } }); - let dest = fcx.llretslotptr.get(); - let callee = Callee { data: Fn(llfnpointer), ty: bare_fn_ty }; - let bcx = callee.call(bcx, &llargs[(self_idx + 1)..], dest, None).0; + let bcx = callee.call(bcx, &llargs[(self_idx + 1)..], fcx.llretslotptr, None).0; fcx.finish(&bcx); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 11c995accc53b..610c4cfd10879 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -276,7 +276,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // immediate, this points to an alloca in the function. Otherwise, it's a // pointer to the hidden first parameter of the function. After function // construction, this should always be Some. - pub llretslotptr: Cell>, + pub llretslotptr: Option, // These pub elements: "hoisted basic blocks" containing // administrative activities that have to happen in only one place in @@ -311,13 +311,14 @@ pub struct FunctionContext<'a, 'tcx: 'a> { impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Create a function context for the given function. - /// Beware that you must call `fcx.init` before doing anything with the returned function - /// context. - pub fn new(ccx: &'a CrateContext<'a, 'tcx>, - llfndecl: ValueRef, - fn_ty: FnType, - definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>) - -> FunctionContext<'a, 'tcx> { + /// Call FunctionContext::get_entry_block for the first entry block. + pub fn new( + ccx: &'a CrateContext<'a, 'tcx>, + llfndecl: ValueRef, + fn_ty: FnType, + definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, + skip_retptr: bool, + ) -> FunctionContext<'a, 'tcx> { let (param_substs, def_id) = match definition { Some((instance, ..)) => { validate_substs(instance.substs); @@ -350,7 +351,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { let mut fcx = FunctionContext { mir: mir, llfn: llfndecl, - llretslotptr: Cell::new(None), + llretslotptr: None, param_env: ccx.tcx().empty_parameter_environment(), alloca_insert_pt: None, landingpad_alloca: Cell::new(None), @@ -372,31 +373,28 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { // This is later removed in the drop of FunctionContext. fcx.alloca_insert_pt = Some(val); - fcx - } - - /// Performs setup on a newly created function, creating the entry - /// scope block and allocating space for the return pointer. - pub fn init(&'a self, skip_retptr: bool) -> BlockAndBuilder<'a, 'tcx> { - if !self.fn_ty.ret.is_ignore() && !skip_retptr { - // We normally allocate the llretslotptr, unless we - // have been instructed to skip it for immediate return - // values, or there is nothing to return at all. - + // We normally allocate the llretslotptr, unless we + // have been instructed to skip it for immediate return + // values, or there is nothing to return at all. + if !fcx.fn_ty.ret.is_ignore() && !skip_retptr { // But if there are no nested returns, we skip the indirection // and have a single retslot - let slot = if self.fn_ty.ret.is_indirect() { - get_param(self.llfn, 0) + let slot = if fcx.fn_ty.ret.is_indirect() { + get_param(fcx.llfn, 0) } else { // We create an alloca to hold a pointer of type `ret.original_ty` // which will hold the pointer to the right alloca which has the // final ret value - self.alloca(self.fn_ty.ret.memory_ty(self.ccx), "sret_slot") + fcx.alloca(fcx.fn_ty.ret.memory_ty(ccx), "sret_slot") }; - self.llretslotptr.set(Some(slot)); + fcx.llretslotptr = Some(slot); } + fcx + } + + pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> { BlockAndBuilder::new(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn) }, self) diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 8ad951c5ade66..1f58327555e25 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -202,9 +202,8 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty())); let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); - - let bcx = fcx.init(false); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); + let bcx = fcx.get_entry_block(); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 716cbec718e3c..ec6b908c26485 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -965,8 +965,8 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); - trans(fcx.init(true)); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, true); + trans(fcx.get_entry_block()); llfn } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 82c1fa94a4e99..99330c1253765 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -81,12 +81,12 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None); - let mut bcx = fcx.init(false); + let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); + let mut bcx = fcx.get_entry_block(); - let dest = fcx.llretslotptr.get(); let llargs = get_params(fcx.llfn); - bcx = callee.call(bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest, None).0; + bcx = callee.call(bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], fcx.llretslotptr, + None).0; fcx.finish(&bcx); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 6c72791ec0fc6..f02a8f725f78d 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -180,7 +180,7 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { - let bcx = fcx.init(true); + let bcx = fcx.get_entry_block(); let mir = fcx.mir(); // Analyze the temps to determine which must be lvalues From 8ed11209d6f7ae0ca8ea21a1ffed902345a4217f Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 16:50:53 -0700 Subject: [PATCH 041/103] Minor cleanup to context --- src/librustc_trans/context.rs | 50 +---------------------------------- 1 file changed, 1 insertion(+), 49 deletions(-) diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 4f524af29456e..0f1fe15a7ecb5 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -315,38 +315,6 @@ impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> { } } -/// The iterator produced by `CrateContext::maybe_iter`. -pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> { - shared: &'a SharedCrateContext<'a, 'tcx>, - local_ccxs: &'a [LocalCrateContext<'tcx>], - index: usize, - single: bool, - origin: usize, -} - -impl<'a, 'tcx> Iterator for CrateContextMaybeIterator<'a, 'tcx> { - type Item = (CrateContext<'a, 'tcx>, bool); - - fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> { - if self.index >= self.local_ccxs.len() { - return None; - } - - let index = self.index; - self.index += 1; - if self.single { - self.index = self.local_ccxs.len(); - } - - let ccx = CrateContext { - shared: self.shared, - index: index, - local_ccxs: self.local_ccxs - }; - Some((ccx, index == self.origin)) - } -} - pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { Some(ref s) => &s[..], @@ -702,26 +670,10 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { self.shared } - pub fn local(&self) -> &'b LocalCrateContext<'tcx> { + fn local(&self) -> &'b LocalCrateContext<'tcx> { &self.local_ccxs[self.index] } - /// Either iterate over only `self`, or iterate over all `CrateContext`s in - /// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)` - /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false` - /// otherwise. This method is useful for avoiding code duplication in - /// cases where it may or may not be necessary to translate code into every - /// context. - pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> { - CrateContextMaybeIterator { - shared: self.shared, - index: if iter_all { 0 } else { self.index }, - single: !iter_all, - origin: self.index, - local_ccxs: self.local_ccxs, - } - } - pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { self.shared.tcx } From 755850f31863d19b22083eaa2bbdc2c495f0b072 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 17:39:35 -0700 Subject: [PATCH 042/103] Merge OwnedBuilder and Builder --- src/librustc_trans/builder.rs | 16 +++++++++++-- src/librustc_trans/common.rs | 45 +++++++---------------------------- 2 files changed, 23 insertions(+), 38 deletions(-) diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 8c6a53da0e197..b710c08e1a473 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -32,6 +32,14 @@ pub struct Builder<'a, 'tcx: 'a> { pub ccx: &'a CrateContext<'a, 'tcx>, } +impl<'blk, 'tcx> Drop for Builder<'blk, 'tcx> { + fn drop(&mut self) { + unsafe { + llvm::LLVMDisposeBuilder(self.llbuilder); + } + } +} + // This is a really awful way to get a zero-length c-string, but better (and a // lot more efficient) than doing str::as_c_str("", ...) every time. pub fn noname() -> *const c_char { @@ -40,9 +48,13 @@ pub fn noname() -> *const c_char { } impl<'a, 'tcx> Builder<'a, 'tcx> { - pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> { + pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self { + // Create a fresh builder from the crate context. + let llbuilder = unsafe { + llvm::LLVMCreateBuilderInContext(ccx.llcx()) + }; Builder { - llbuilder: ccx.raw_builder(), + llbuilder: llbuilder, ccx: ccx, } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 610c4cfd10879..d09cb8ce2c8a8 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -306,7 +306,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Used and maintained by the debuginfo module. pub debug_context: debuginfo::FunctionDebugContext, - alloca_builder: OwnedBuilder<'a, 'tcx>, + alloca_builder: Builder<'a, 'tcx>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { @@ -359,13 +359,13 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { param_substs: param_substs, ccx: ccx, debug_context: debug_context, - alloca_builder: OwnedBuilder::new_with_ccx(ccx), + alloca_builder: Builder::with_ccx(ccx), }; let val = { let entry_bcx = fcx.build_new_block("entry-block"); let val = entry_bcx.load(C_null(Type::i8p(ccx))); - fcx.alloca_builder.builder.position_at_start(entry_bcx.llbb()); + fcx.alloca_builder.position_at_start(entry_bcx.llbb()); val }; @@ -509,7 +509,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { } pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { - self.alloca_builder.builder.dynamic_alloca(ty, name) + self.alloca_builder.dynamic_alloca(ty, name) } } @@ -521,33 +521,6 @@ impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { } } -pub struct OwnedBuilder<'blk, 'tcx: 'blk> { - builder: Builder<'blk, 'tcx> -} - -impl<'blk, 'tcx> OwnedBuilder<'blk, 'tcx> { - pub fn new_with_ccx(ccx: &'blk CrateContext<'blk, 'tcx>) -> Self { - // Create a fresh builder from the crate context. - let llbuilder = unsafe { - llvm::LLVMCreateBuilderInContext(ccx.llcx()) - }; - OwnedBuilder { - builder: Builder { - llbuilder: llbuilder, - ccx: ccx, - } - } - } -} - -impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> { - fn drop(&mut self) { - unsafe { - llvm::LLVMDisposeBuilder(self.builder.llbuilder); - } - } -} - #[must_use] pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { // The BasicBlockRef returned from a call to @@ -561,18 +534,18 @@ pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { // attached. fcx: &'blk FunctionContext<'blk, 'tcx>, - owned_builder: OwnedBuilder<'blk, 'tcx>, + builder: Builder<'blk, 'tcx>, } impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { pub fn new(llbb: BasicBlockRef, fcx: &'blk FunctionContext<'blk, 'tcx>) -> Self { - let owned_builder = OwnedBuilder::new_with_ccx(fcx.ccx); + let builder = Builder::with_ccx(fcx.ccx); // Set the builder's position to this block's end. - owned_builder.builder.position_at_end(llbb); + builder.position_at_end(llbb); BlockAndBuilder { llbb: llbb, fcx: fcx, - owned_builder: owned_builder, + builder: builder, } } @@ -610,7 +583,7 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> { type Target = Builder<'blk, 'tcx>; fn deref(&self) -> &Self::Target { - &self.owned_builder.builder + &self.builder } } From 85ab08084d3de9aa96c62091588311c6d3e7e270 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 18:48:25 -0700 Subject: [PATCH 043/103] Remove global builder --- src/librustc_trans/base.rs | 61 +++++++--------------- src/librustc_trans/common.rs | 20 +------ src/librustc_trans/context.rs | 12 +---- src/librustc_trans/debuginfo/gdb.rs | 27 +++------- src/librustc_trans/debuginfo/mod.rs | 27 ++++------ src/librustc_trans/debuginfo/source_loc.rs | 20 ++----- 6 files changed, 43 insertions(+), 124 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 30c1074b89c1e..7e03b196cfe37 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -49,7 +49,7 @@ use session::{self, DataTypeKind, Session}; use abi::{self, Abi, FnType}; use adt; use attributes; -use builder::{Builder, noname}; +use builder::Builder; use callee::{Callee}; use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; @@ -80,7 +80,6 @@ use util::nodemap::{NodeSet, FxHashMap, FxHashSet}; use libc::c_uint; use std::ffi::{CStr, CString}; use std::cell::RefCell; -use std::ptr; use std::rc::Rc; use std::str; use std::i32; @@ -870,9 +869,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { let et = ccx.sess().entry_type.get().unwrap(); match et { - config::EntryMain => { - create_entry_fn(ccx, span, main_llfn, true); - } + config::EntryMain => create_entry_fn(ccx, span, main_llfn, true), config::EntryStart => create_entry_fn(ccx, span, main_llfn, false), config::EntryNone => {} // Do nothing. } @@ -897,47 +894,27 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { attributes::set_frame_pointer_elimination(ccx, llfn); let llbb = unsafe { - llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _) + let name = CString::new("top").unwrap(); + llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, name.as_ptr()) }; - let bld = ccx.raw_builder(); - unsafe { - llvm::LLVMPositionBuilderAtEnd(bld, llbb); - - debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx); + let bld = Builder::with_ccx(ccx); + bld.position_at_end(llbb); - let (start_fn, args) = if use_start_lang_item { - let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) { - Ok(id) => id, - Err(s) => ccx.sess().fatal(&s) - }; - let empty_substs = ccx.tcx().intern_substs(&[]); - let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx); - let args = { - let opaque_rust_main = - llvm::LLVMBuildPointerCast(bld, - rust_main, - Type::i8p(ccx).to_ref(), - "rust_main\0".as_ptr() as *const _); - - vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)] - }; - (start_fn, args) - } else { - debug!("using user-defined start fn"); - let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)]; + debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld); - (rust_main, args) - }; - - let result = llvm::LLVMRustBuildCall(bld, - start_fn, - args.as_ptr(), - args.len() as c_uint, - ptr::null_mut(), - noname()); + let (start_fn, args) = if use_start_lang_item { + let start_def_id = ccx.tcx().require_lang_item(StartFnLangItem); + let empty_substs = ccx.tcx().intern_substs(&[]); + let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx); + (start_fn, vec![bld.pointercast(rust_main, Type::i8p(ccx).ptr_to()), get_param(llfn, 0), + get_param(llfn, 1)]) + } else { + debug!("using user-defined start fn"); + (rust_main, vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)]) + }; - llvm::LLVMBuildRet(bld, result); - } + let result = bld.call(start_fn, &args, None); + bld.ret(result); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index d09cb8ce2c8a8..0a79c8d43f6fe 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -14,7 +14,7 @@ use session::Session; use llvm; -use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; +use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef, get_param}; use llvm::debuginfo::DIScope; use monomorphize::Instance; @@ -235,24 +235,6 @@ impl<'a, 'tcx> VariantInfo<'tcx> { } } -pub struct BuilderRef_res { - pub b: BuilderRef, -} - -impl Drop for BuilderRef_res { - fn drop(&mut self) { - unsafe { - llvm::LLVMDisposeBuilder(self.b); - } - } -} - -pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res { - BuilderRef_res { - b: b - } -} - pub fn validate_substs(substs: &Substs) { assert!(!substs.needs_infer()); } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 0f1fe15a7ecb5..ead23b333ff24 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -9,15 +9,13 @@ // except according to those terms. use llvm; -use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef}; -use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig, - WorkProduct}; +use llvm::{ContextRef, ModuleRef, ValueRef}; +use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig, WorkProduct}; use middle::cstore::LinkMeta; use rustc::hir::def::ExportMap; use rustc::hir::def_id::DefId; use rustc::traits; use base; -use common::BuilderRef_res; use debuginfo; use declare; use glue::DropGlueKind; @@ -139,7 +137,6 @@ pub struct LocalCrateContext<'tcx> { int_type: Type, opaque_vec_type: Type, str_slice_type: Type, - builder: BuilderRef_res, /// Holds the LLVM values for closure IDs. closure_vals: RefCell, ValueRef>>, @@ -605,7 +602,6 @@ impl<'tcx> LocalCrateContext<'tcx> { int_type: Type::from_ref(ptr::null_mut()), opaque_vec_type: Type::from_ref(ptr::null_mut()), str_slice_type: Type::from_ref(ptr::null_mut()), - builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)), closure_vals: RefCell::new(FxHashMap()), dbg_cx: dbg_cx, eh_personality: Cell::new(None), @@ -682,10 +678,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.shared.tcx.sess } - pub fn raw_builder<'a>(&'a self) -> BuilderRef { - self.local().builder.b - } - pub fn get_intrinsic(&self, key: &str) -> ValueRef { if let Some(v) = self.intrinsics().borrow().get(key).cloned() { return v; diff --git a/src/librustc_trans/debuginfo/gdb.rs b/src/librustc_trans/debuginfo/gdb.rs index 8f937d3fe25cb..e8728a3999308 100644 --- a/src/librustc_trans/debuginfo/gdb.rs +++ b/src/librustc_trans/debuginfo/gdb.rs @@ -13,37 +13,26 @@ use llvm; use common::{C_bytes, CrateContext, C_i32}; +use builder::Builder; use declare; use type_::Type; use session::config::NoDebugInfo; -use std::ffi::CString; use std::ptr; use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext) { +pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext, builder: &Builder) { if needs_gdb_debug_scripts_section(ccx) { - let empty = CString::new("").unwrap(); - let gdb_debug_scripts_section_global = - get_or_insert_gdb_debug_scripts_section_global(ccx); + let gdb_debug_scripts_section_global = get_or_insert_gdb_debug_scripts_section_global(ccx); + // Load just the first byte as that's all that's necessary to force + // LLVM to keep around the reference to the global. + let indices = [C_i32(ccx, 0), C_i32(ccx, 0)]; + let element = builder.inbounds_gep(gdb_debug_scripts_section_global, &indices); + let volative_load_instruction = builder.volatile_load(element); unsafe { - // Load just the first byte as that's all that's necessary to force - // LLVM to keep around the reference to the global. - let indices = [C_i32(ccx, 0), C_i32(ccx, 0)]; - let element = - llvm::LLVMBuildInBoundsGEP(ccx.raw_builder(), - gdb_debug_scripts_section_global, - indices.as_ptr(), - indices.len() as ::libc::c_uint, - empty.as_ptr()); - let volative_load_instruction = - llvm::LLVMBuildLoad(ccx.raw_builder(), - element, - empty.as_ptr()); - llvm::LLVMSetVolatile(volative_load_instruction, llvm::True); llvm::LLVMSetAlignment(volative_load_instruction, 1); } } diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index f915f60c29460..9cc2c72648f02 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -66,7 +66,6 @@ const DW_TAG_arg_variable: c_uint = 0x101; pub struct CrateDebugContext<'tcx> { llcontext: ContextRef, builder: DIBuilderRef, - current_debug_location: Cell, created_files: RefCell>, created_enum_disr_types: RefCell>, @@ -84,16 +83,15 @@ impl<'tcx> CrateDebugContext<'tcx> { let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) }; // DIBuilder inherits context from the module, so we'd better use the same one let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) }; - return CrateDebugContext { + CrateDebugContext { llcontext: llcontext, builder: builder, - current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation), created_files: RefCell::new(FxHashMap()), created_enum_disr_types: RefCell::new(FxHashMap()), type_map: RefCell::new(TypeMap::new()), namespace_map: RefCell::new(DefIdMap()), composite_types_completed: RefCell::new(FxHashSet()), - }; + } } } @@ -198,15 +196,12 @@ pub fn finalize(cx: &CrateContext) { } /// Creates a function-specific debug context for a function w/o debuginfo. -pub fn empty_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>) - -> FunctionDebugContext { +pub fn empty_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>) -> FunctionDebugContext { if cx.sess().opts.debuginfo == NoDebugInfo { - return FunctionDebugContext::DebugInfoDisabled; + FunctionDebugContext::DebugInfoDisabled + } else { + FunctionDebugContext::FunctionWithoutDebugInfo } - - // Clear the debug location so we don't assign them in the function prelude. - source_loc::set_debug_location(cx, None, UnknownLocation); - FunctionDebugContext::FunctionWithoutDebugInfo } /// Creates the function-specific debug context. @@ -225,10 +220,6 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return FunctionDebugContext::DebugInfoDisabled; } - // Clear the debug location so we don't assign them in the function prelude. - // Do this here already, in case we do an early exit from this function. - source_loc::set_debug_location(cx, None, UnknownLocation); - let containing_scope = get_containing_scope(cx, instance); let span = mir.span; @@ -482,10 +473,10 @@ pub fn declare_local<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, align as u64, ) }; - source_loc::set_debug_location(cx, None, + source_loc::set_debug_location(cx, bcx, InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); unsafe { - let debug_loc = llvm::LLVMGetCurrentDebugLocation(cx.raw_builder()); + let debug_loc = llvm::LLVMGetCurrentDebugLocation(bcx.llbuilder); let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( DIB(cx), alloca, @@ -503,7 +494,7 @@ pub fn declare_local<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, match variable_kind { ArgumentVariable(_) | CapturedVariable => { assert!(!bcx.fcx().debug_context.get_ref(span).source_locations_enabled.get()); - source_loc::set_debug_location(cx, None, UnknownLocation); + source_loc::set_debug_location(cx, bcx, UnknownLocation); } _ => { /* nothing to do */ } } diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index e03ad1a8c8fdb..2a168e342d05e 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -27,11 +27,10 @@ use syntax_pos::{Span, Pos}; /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). pub fn set_source_location(fcx: &FunctionContext, builder: &Builder, scope: DIScope, span: Span) { - let builder = builder.llbuilder; let function_debug_context = match fcx.debug_context { FunctionDebugContext::DebugInfoDisabled => return, FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, Some(builder), UnknownLocation); + set_debug_location(fcx.ccx, builder, UnknownLocation); return; } FunctionDebugContext::RegularContext(ref data) => data @@ -44,7 +43,7 @@ pub fn set_source_location(fcx: &FunctionContext, builder: &Builder, scope: DISc } else { UnknownLocation }; - set_debug_location(fcx.ccx, Some(builder), dbg_loc); + set_debug_location(fcx.ccx, builder, dbg_loc); } /// Enables emitting source locations for the given functions. @@ -80,14 +79,8 @@ impl InternalDebugLocation { } pub fn set_debug_location(cx: &CrateContext, - builder: Option, + builder: &Builder, debug_location: InternalDebugLocation) { - if builder.is_none() { - if debug_location == debug_context(cx).current_debug_location.get() { - return; - } - } - let metadata_node = match debug_location { KnownLocation { scope, line, .. } => { // Always set the column to zero like Clang and GCC @@ -109,12 +102,7 @@ pub fn set_debug_location(cx: &CrateContext, } }; - if builder.is_none() { - debug_context(cx).current_debug_location.set(debug_location); - } - - let builder = builder.unwrap_or_else(|| cx.raw_builder()); unsafe { - llvm::LLVMSetCurrentDebugLocation(builder, metadata_node); + llvm::LLVMSetCurrentDebugLocation(builder.llbuilder, metadata_node); } } From 05d107d4a186aab7f92bb6423e0a23a98a1395d5 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 19:39:35 -0700 Subject: [PATCH 044/103] Inline validate_substs --- src/librustc_trans/common.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 0a79c8d43f6fe..c4ef81f923110 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -235,12 +235,7 @@ impl<'a, 'tcx> VariantInfo<'tcx> { } } -pub fn validate_substs(substs: &Substs) { - assert!(!substs.needs_infer()); -} - -// Function context. Every LLVM function we create will have one of -// these. +// Function context. Every LLVM function we create will have one of these. pub struct FunctionContext<'a, 'tcx: 'a> { // The MIR for this function. pub mir: Option>>, @@ -303,7 +298,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { ) -> FunctionContext<'a, 'tcx> { let (param_substs, def_id) = match definition { Some((instance, ..)) => { - validate_substs(instance.substs); + assert!(!instance.substs.needs_infer()); (instance.substs, Some(instance.def)) } None => (ccx.tcx().intern_substs(&[]), None) From 65f040031eeb1e3f0416290aaf6ab0aedb357443 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 20:36:42 -0700 Subject: [PATCH 045/103] Remove FunctionContext.landingpad_alloca. --- src/librustc_trans/cleanup.rs | 50 +++++++++++------------------------ src/librustc_trans/common.rs | 11 +------- 2 files changed, 17 insertions(+), 44 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index dfcaedcba7c4c..fbf380723e633 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -155,14 +155,12 @@ impl<'tcx> CleanupScope<'tcx> { } } - /// Creates a landing pad for the top scope, if one does not exist. The - /// landing pad will perform all cleanups necessary for an unwind and then - /// `resume` to continue error propagation: + /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary + /// for an unwind and then `resume` to continue error propagation: /// /// landing_pad -> ... cleanups ... -> [resume] /// - /// (The cleanups and resume instruction are created by - /// `trans_cleanups_to_exit_scope()`, not in this function itself.) + /// This should only be called once per function, as it creates an alloca for the landingpad. fn get_landing_pad<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: &DropValue<'tcx>) -> BasicBlockRef { debug!("get_landing_pad"); @@ -171,6 +169,7 @@ impl<'tcx> CleanupScope<'tcx> { let llpersonality = pad_bcx.fcx().eh_personality(); + let resume_bcx = fcx.build_new_block("resume"); let val = if base::wants_msvc_seh(fcx.ccx.sess()) { // A cleanup pad requires a personality function to be specified, so // we do that here explicitly (happens implicitly below through @@ -179,6 +178,7 @@ impl<'tcx> CleanupScope<'tcx> { // exceptions. pad_bcx.set_personality_fn(llpersonality); let llretval = pad_bcx.cleanup_pad(None, &[]); + resume_bcx.cleanup_ret(resume_bcx.cleanup_pad(None, &[]), None); UnwindKind::CleanupPad(llretval) } else { // The landing pad return type (the type being propagated). Not sure @@ -194,45 +194,27 @@ impl<'tcx> CleanupScope<'tcx> { // The landing pad block is a cleanup pad_bcx.set_cleanup(llretval); - let addr = match fcx.landingpad_alloca.get() { - Some(addr) => addr, - None => { - let addr = pad_bcx.fcx().alloca(common::val_ty(llretval), ""); - Lifetime::Start.call(&pad_bcx, addr); - fcx.landingpad_alloca.set(Some(addr)); - addr - } - }; + let addr = pad_bcx.fcx().alloca(common::val_ty(llretval), ""); + Lifetime::Start.call(&pad_bcx, addr); pad_bcx.store(llretval, addr); + let lp = resume_bcx.load(addr); + Lifetime::End.call(&resume_bcx, addr); + if !resume_bcx.sess().target.target.options.custom_unwind_resume { + resume_bcx.resume(lp); + } else { + let exc_ptr = resume_bcx.extract_value(lp, 0); + resume_bcx.call(fcx.eh_unwind_resume().reify(fcx.ccx), &[exc_ptr], None); + } UnwindKind::LandingPad }; - // Generate a block that will resume unwinding to the calling function - let bcx = fcx.build_new_block("resume"); - match val { - UnwindKind::LandingPad => { - let addr = fcx.landingpad_alloca.get().unwrap(); - let lp = bcx.load(addr); - Lifetime::End.call(&bcx, addr); - if !bcx.sess().target.target.options.custom_unwind_resume { - bcx.resume(lp); - } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call(bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], None); - } - } - UnwindKind::CleanupPad(_) => { - bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); - } - } - let mut cleanup = fcx.build_new_block("clean_custom_"); // Insert cleanup instructions into the cleanup block drop_val.trans(val.get_funclet(&cleanup).as_ref(), &cleanup); // Insert instruction into cleanup block to branch to the exit - val.branch(&mut cleanup, bcx.llbb()); + val.branch(&mut cleanup, resume_bcx.llbb()); // Branch into the cleanup block val.branch(&mut pad_bcx, cleanup.llbb()); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index c4ef81f923110..a01d079c374ae 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -48,7 +48,7 @@ use std::borrow::Cow; use std::iter; use std::ops::Deref; use std::ffi::CString; -use std::cell::{Cell, Ref}; +use std::cell::Ref; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; @@ -262,14 +262,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // allocas, so that LLVM will coalesce them into a single alloca call. alloca_insert_pt: Option, - // When working with landingpad-based exceptions this value is alloca'd and - // later loaded when using the resume instruction. This ends up being - // critical to chaining landing pads and resuing already-translated - // cleanups. - // - // Note that for cleanuppad-based exceptions this is not used. - pub landingpad_alloca: Cell>, - // Describes the return/argument LLVM types and their ABI handling. pub fn_ty: FnType, @@ -331,7 +323,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { llretslotptr: None, param_env: ccx.tcx().empty_parameter_environment(), alloca_insert_pt: None, - landingpad_alloca: Cell::new(None), fn_ty: fn_ty, param_substs: param_substs, ccx: ccx, From 9c38a54cae966cd56db7358479a0d612572dc6f9 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 20:52:20 -0700 Subject: [PATCH 046/103] Inline FunctionContext.mir --- src/librustc_trans/base.rs | 6 +----- src/librustc_trans/common.rs | 10 ---------- src/librustc_trans/debuginfo/create_scope_map.rs | 7 +++++-- src/librustc_trans/mir/mod.rs | 10 ++++++---- 4 files changed, 12 insertions(+), 21 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 7e03b196cfe37..71875b008fadf 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -752,11 +752,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fcx = FunctionContext::new(ccx, lldecl, fn_ty, Some((instance, &sig, abi)), true); - if fcx.mir.is_none() { - bug!("attempted translation of `{}` w/o MIR", instance); - } - - mir::trans_mir(&fcx); + mir::trans_mir(&fcx, ccx.tcx().item_mir(instance.def)); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index a01d079c374ae..3fe9d64d575df 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -22,7 +22,6 @@ use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::infer::TransNormalize; -use rustc::mir::Mir; use rustc::util::common::MemoizationMap; use middle::lang_items::LangItem; use rustc::ty::subst::Substs; @@ -48,7 +47,6 @@ use std::borrow::Cow; use std::iter; use std::ops::Deref; use std::ffi::CString; -use std::cell::Ref; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; @@ -237,9 +235,6 @@ impl<'a, 'tcx> VariantInfo<'tcx> { // Function context. Every LLVM function we create will have one of these. pub struct FunctionContext<'a, 'tcx: 'a> { - // The MIR for this function. - pub mir: Option>>, - // The ValueRef returned from a call to llvm::LLVMAddFunction; the // address of the first instruction in the sequence of // instructions for this function that will go in the .text @@ -318,7 +313,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { }; let mut fcx = FunctionContext { - mir: mir, llfn: llfndecl, llretslotptr: None, param_env: ccx.tcx().empty_parameter_environment(), @@ -368,10 +362,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { }, self) } - pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { - self.mir.as_ref().map(Ref::clone).expect("fcx.mir was empty") - } - pub fn new_block(&'a self, name: &str) -> BasicBlockRef { unsafe { let name = CString::new(name).unwrap(); diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index 23f415d95cf4a..3ddbed7aebdc0 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -19,6 +19,7 @@ use rustc::mir::{Mir, VisibilityScope}; use libc::c_uint; use std::ptr; +use std::cell::Ref; use syntax_pos::Pos; @@ -44,8 +45,10 @@ impl MirDebugScope { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec { - let mir = fcx.mir(); +pub fn create_mir_scopes<'tcx>( + fcx: &FunctionContext, + mir: Ref<'tcx, Mir<'tcx>>, +) -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), file_start_pos: BytePos(0), diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index f02a8f725f78d..b0b41cd1b069a 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -12,7 +12,7 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; use rustc::ty; -use rustc::mir; +use rustc::mir::{self, Mir}; use rustc::mir::tcx::LvalueTy; use session::config::FullDebugInfo; use base; @@ -179,9 +179,11 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// -pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { +pub fn trans_mir<'blk, 'tcx: 'blk>( + fcx: &'blk FunctionContext<'blk, 'tcx>, + mir: Ref<'tcx, Mir<'tcx>> +) { let bcx = fcx.get_entry_block(); - let mir = fcx.mir(); // Analyze the temps to determine which must be lvalues // FIXME @@ -199,7 +201,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(fcx); + let scopes = debuginfo::create_mir_scopes(fcx, Ref::clone(&mir)); let mut mircx = MirContext { mir: Ref::clone(&mir), From cc1e210ee8eedfa253122f1dd8d78fbde2646e80 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Fri, 16 Dec 2016 21:14:23 -0700 Subject: [PATCH 047/103] Inline trans_exchange_free --- src/librustc_trans/glue.rs | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 1f58327555e25..13dbac85c54fd 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -46,15 +46,6 @@ pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, .call(bcx, &args, None, None).0 } -pub fn trans_exchange_free<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, - v: ValueRef, - size: u64, - align: u32) - -> BlockAndBuilder<'blk, 'tcx> { - let ccx = cx.ccx(); - trans_exchange_free_dyn(cx, v, C_uint(ccx, size), C_uint(ccx, align)) -} - pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx>) @@ -66,19 +57,18 @@ pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, // `Box` does not allocate. if content_size != 0 { let content_align = align_of(bcx.ccx(), content_ty); - trans_exchange_free(bcx, ptr, content_size, content_align) + let ccx = bcx.ccx(); + trans_exchange_free_dyn(bcx, ptr, C_uint(ccx, content_size), C_uint(ccx, content_align)) } else { bcx } } -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>) -> bool { +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment()) } -pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - t: Ty<'tcx>) -> Ty<'tcx> { +pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { assert!(t.is_normalized_for_trans()); let t = tcx.erase_regions(&t); @@ -182,8 +172,7 @@ impl<'tcx> DropGlueKind<'tcx> { } } -fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - g: DropGlueKind<'tcx>) -> ValueRef { +fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef { let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t)); match ccx.drop_glues().borrow().get(&g) { Some(&(glue, _)) => glue, From 449c6d82a7f5e25cd294fb2e9a49079e91c5284c Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 17 Dec 2016 08:24:17 -0700 Subject: [PATCH 048/103] Simplify basic_block.rs --- src/librustc_trans/basic_block.rs | 19 +++++-------------- src/librustc_trans/lib.rs | 1 + 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/src/librustc_trans/basic_block.rs b/src/librustc_trans/basic_block.rs index 60bd3fb8ef1b8..50246a1c5b35a 100644 --- a/src/librustc_trans/basic_block.rs +++ b/src/librustc_trans/basic_block.rs @@ -10,18 +10,15 @@ use llvm; use llvm::BasicBlockRef; -use value::{Users, Value}; -use std::iter::{Filter, Map}; +use value::Value; #[derive(Copy, Clone)] pub struct BasicBlock(pub BasicBlockRef); -pub type Preds = Map bool>, fn(Value) -> BasicBlock>; - /// Wrapper for LLVM BasicBlockRef impl BasicBlock { pub fn get(&self) -> BasicBlockRef { - let BasicBlock(v) = *self; v + self.0 } pub fn as_value(self) -> Value { @@ -30,16 +27,10 @@ impl BasicBlock { } } - pub fn pred_iter(self) -> Preds { - fn is_a_terminator_inst(user: &Value) -> bool { user.is_a_terminator_inst() } - let is_a_terminator_inst: fn(&Value) -> bool = is_a_terminator_inst; - - fn get_parent(user: Value) -> BasicBlock { user.get_parent().unwrap() } - let get_parent: fn(Value) -> BasicBlock = get_parent; - + pub fn pred_iter(self) -> impl Iterator { self.as_value().user_iter() - .filter(is_a_terminator_inst) - .map(get_parent) + .filter(|user| user.is_a_terminator_inst()) + .map(|user| user.get_parent().unwrap()) } pub fn get_single_predecessor(self) -> Option { diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 572d96eaef29d..bd8121e2b9c68 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -36,6 +36,7 @@ #![feature(slice_patterns)] #![feature(staged_api)] #![feature(unicode)] +#![feature(conservative_impl_trait)] use rustc::dep_graph::WorkProduct; From 88b2024a28120ced73b59d95275861acfef4f42f Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 17 Dec 2016 10:21:15 -0700 Subject: [PATCH 049/103] Cleanup instruction counting --- src/librustc_trans/base.rs | 39 +---------------------------- src/librustc_trans/builder.rs | 47 ++++------------------------------- src/librustc_trans/context.rs | 14 ----------- 3 files changed, 6 insertions(+), 94 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 71875b008fadf..5c53ba290957c 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -89,40 +89,6 @@ use rustc::hir; use rustc::ty::layout::{self, Layout}; use syntax::ast; -thread_local! { - static TASK_LOCAL_INSN_KEY: RefCell>> = { - RefCell::new(None) - } -} - -pub fn with_insn_ctxt(blk: F) - where F: FnOnce(&[&'static str]) -{ - TASK_LOCAL_INSN_KEY.with(move |slot| { - slot.borrow().as_ref().map(move |s| blk(s)); - }) -} - -pub fn init_insn_ctxt() { - TASK_LOCAL_INSN_KEY.with(|slot| { - *slot.borrow_mut() = Some(Vec::new()); - }); -} - -pub struct _InsnCtxt { - _cannot_construct_outside_of_this_module: (), -} - -impl Drop for _InsnCtxt { - fn drop(&mut self) { - TASK_LOCAL_INSN_KEY.with(|slot| { - if let Some(ctx) = slot.borrow_mut().as_mut() { - ctx.pop(); - } - }) - } -} - pub struct StatRecorder<'a, 'tcx: 'a> { ccx: &'a CrateContext<'a, 'tcx>, name: Option, @@ -144,10 +110,7 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { fn drop(&mut self) { if self.ccx.sess().trans_stats() { let iend = self.ccx.stats().n_llvm_insns.get(); - self.ccx - .stats() - .fn_stats - .borrow_mut() + self.ccx.stats().fn_stats.borrow_mut() .push((self.name.take().unwrap(), iend - self.istart)); self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1); // Reset LLVM insn count to avoid compound costs. diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index b710c08e1a473..575150ca4c29c 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -42,7 +42,7 @@ impl<'blk, 'tcx> Drop for Builder<'blk, 'tcx> { // This is a really awful way to get a zero-length c-string, but better (and a // lot more efficient) than doing str::as_c_str("", ...) every time. -pub fn noname() -> *const c_char { +fn noname() -> *const c_char { static CNULL: c_char = 0; &CNULL } @@ -59,50 +59,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn count_insn(&self, category: &str) { + fn count_insn(&self, category: &str) { if self.ccx.sess().trans_stats() { - self.ccx.stats().n_llvm_insns.set(self.ccx - .stats() - .n_llvm_insns - .get() + 1); + self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1); } - self.ccx.count_llvm_insn(); if self.ccx.sess().count_llvm_insns() { - base::with_insn_ctxt(|v| { - let mut h = self.ccx.stats().llvm_insns.borrow_mut(); - - // Build version of path with cycles removed. - - // Pass 1: scan table mapping str -> rightmost pos. - let mut mm = FxHashMap(); - let len = v.len(); - let mut i = 0; - while i < len { - mm.insert(v[i], i); - i += 1; - } - - // Pass 2: concat strings for each elt, skipping - // forwards over any cycles by advancing to rightmost - // occurrence of each element in path. - let mut s = String::from("."); - i = 0; - while i < len { - i = mm[v[i]]; - s.push('/'); - s.push_str(v[i]); - i += 1; - } - - s.push('/'); - s.push_str(category); - - let n = match h.get(&s) { - Some(&n) => n, - _ => 0 - }; - h.insert(s, n+1); - }) + let mut h = self.ccx.stats().llvm_insns.borrow_mut(); + *h.entry(category.to_string()).or_insert(0) += 1; } } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index ead23b333ff24..2b5d32a9650c0 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -149,11 +149,6 @@ pub struct LocalCrateContext<'tcx> { intrinsics: RefCell>, - /// Number of LLVM instructions translated into this `LocalCrateContext`. - /// This is used to perform some basic load-balancing to keep all LLVM - /// contexts around the same size. - n_llvm_insns: Cell, - /// Depth of the current type-of computation - used to bail out type_of_depth: Cell, @@ -608,7 +603,6 @@ impl<'tcx> LocalCrateContext<'tcx> { eh_unwind_resume: Cell::new(None), rust_try_fn: Cell::new(None), intrinsics: RefCell::new(FxHashMap()), - n_llvm_insns: Cell::new(0), type_of_depth: Cell::new(0), symbol_map: symbol_map, local_gen_sym_counter: Cell::new(0), @@ -634,10 +628,6 @@ impl<'tcx> LocalCrateContext<'tcx> { local_ccx.opaque_vec_type = opaque_vec_type; local_ccx.str_slice_type = str_slice_ty; - if shared.tcx.sess.count_llvm_insns() { - base::init_insn_ctxt() - } - local_ccx } } @@ -841,10 +831,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().intrinsics } - pub fn count_llvm_insn(&self) { - self.local().n_llvm_insns.set(self.local().n_llvm_insns.get() + 1); - } - pub fn obj_size_bound(&self) -> u64 { self.tcx().data_layout.obj_size_bound() } From 21bd747948dd996273423390f20ee4da12f97803 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 17 Dec 2016 12:01:51 -0700 Subject: [PATCH 050/103] Remove unused functions in abi --- src/librustc_trans/abi.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index a0bea5d38b2a7..681bad1461ce6 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -98,21 +98,11 @@ impl ArgAttributes { self } - pub fn unset(&mut self, attr: ArgAttribute) -> &mut Self { - self.regular = self.regular - attr; - self - } - pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self { self.dereferenceable_bytes = bytes; self } - pub fn unset_dereferenceable(&mut self) -> &mut Self { - self.dereferenceable_bytes = 0; - self - } - pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { unsafe { self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); From 9a198534e25516ae071357a3187e650d9b07ea8b Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 17 Dec 2016 12:02:09 -0700 Subject: [PATCH 051/103] Remove unused imports --- src/librustc_trans/base.rs | 1 - src/librustc_trans/builder.rs | 2 -- src/librustc_trans/context.rs | 1 - 3 files changed, 4 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 5c53ba290957c..526562f857b61 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -79,7 +79,6 @@ use util::nodemap::{NodeSet, FxHashMap, FxHashSet}; use libc::c_uint; use std::ffi::{CStr, CString}; -use std::cell::RefCell; use std::rc::Rc; use std::str; use std::i32; diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 575150ca4c29c..b023ff6ea24dd 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -14,12 +14,10 @@ use llvm; use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; -use base; use common::*; use machine::llalign_of_pref; use type_::Type; use value::Value; -use util::nodemap::FxHashMap; use libc::{c_uint, c_char}; use std::borrow::Cow; diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 2b5d32a9650c0..9578182b0c185 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -15,7 +15,6 @@ use middle::cstore::LinkMeta; use rustc::hir::def::ExportMap; use rustc::hir::def_id::DefId; use rustc::traits; -use base; use debuginfo; use declare; use glue::DropGlueKind; From 937001a1f8a05a07b16233bcfdf53d9d3381e45c Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 17 Dec 2016 12:27:48 -0700 Subject: [PATCH 052/103] Refactor Callee::call to take bcx by-reference. Also change to not return anything; nothing used the return ValueRef. Inlines with_cond. --- src/librustc_trans/base.rs | 18 ----------- src/librustc_trans/callee.rs | 63 ++++++++++++++---------------------- src/librustc_trans/glue.rs | 34 ++++++++++--------- src/librustc_trans/meth.rs | 6 ++-- 4 files changed, 46 insertions(+), 75 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 526562f857b61..aefdfee9d8cd0 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -480,24 +480,6 @@ pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { } } -pub fn with_cond<'blk, 'tcx, F>( - bcx: BlockAndBuilder<'blk, 'tcx>, val: ValueRef, f: F -) -> BlockAndBuilder<'blk, 'tcx> - where F: FnOnce(BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx> -{ - if common::const_to_opt_uint(val) == Some(0) { - return bcx; - } - - let fcx = bcx.fcx(); - let next_cx = fcx.build_new_block("next"); - let cond_cx = fcx.build_new_block("cond"); - bcx.cond_br(val, cond_cx.llbb(), next_cx.llbb()); - let after_cx = f(cond_cx); - after_cx.br(next_cx.llbb()); - next_cx -} - pub enum Lifetime { Start, End } impl Lifetime { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 8c880210f62f0..23903d9d7f851 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -189,11 +189,10 @@ impl<'tcx> Callee<'tcx> { /// For non-lang items, `dest` is always Some, and hence the result is written /// into memory somewhere. Nonetheless we return the actual return value of the /// function. - pub fn call<'a, 'blk>(self, bcx: BlockAndBuilder<'blk, 'tcx>, + pub fn call<'a, 'blk>(self, bcx: &BlockAndBuilder<'blk, 'tcx>, args: &[ValueRef], dest: Option, - lpad: Option<&'blk llvm::OperandBundleDef>) - -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) { + lpad: Option<&'blk llvm::OperandBundleDef>) { trans_call_inner(bcx, self, args, dest, lpad) } @@ -538,7 +537,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( data: Fn(llfnpointer), ty: bare_fn_ty }; - let bcx = callee.call(bcx, &llargs[(self_idx + 1)..], fcx.llretslotptr, None).0; + callee.call(&bcx, &llargs[(self_idx + 1)..], fcx.llretslotptr, None); fcx.finish(&bcx); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); @@ -648,12 +647,11 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // ______________________________________________________________________ // Translating calls -fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, +fn trans_call_inner<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, callee: Callee<'tcx>, args: &[ValueRef], - opt_llretslot: Option, - lpad: Option<&'blk llvm::OperandBundleDef>) - -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) { + dest: Option, + lpad: Option<&'blk llvm::OperandBundleDef>) { // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose // this cleanup is to ensure that, should a panic occur while @@ -661,61 +659,52 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, // cleaned up. If no panic occurs, the values are handed off to // the callee, and hence none of the cleanups in this temporary // scope will ever execute. - let fcx = &bcx.fcx(); - let ccx = fcx.ccx; - + let ccx = bcx.ccx(); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); - let mut callee = match callee.data { - NamedTupleConstructor(_) | Intrinsic => { - bug!("{:?} calls should not go through Callee::call", callee); - } - f => f - }; - // If there no destination, return must be direct, with no cast. - if opt_llretslot.is_none() { + if dest.is_none() { assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); } let mut llargs = Vec::new(); if fn_ty.ret.is_indirect() { - let mut llretslot = opt_llretslot.unwrap(); - if let Some(ty) = fn_ty.ret.cast { - llretslot = bcx.pointercast(llretslot, ty.ptr_to()); - } + let dest = dest.unwrap(); + let llretslot = if let Some(ty) = fn_ty.ret.cast { + bcx.pointercast(dest, ty.ptr_to()) + } else { + dest + }; llargs.push(llretslot); } - match callee { + let llfn = match callee.data { + NamedTupleConstructor(_) | Intrinsic => { + bug!("{:?} calls should not go through Callee::call", callee); + } Virtual(idx) => { llargs.push(args[0]); let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx); let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to(); - callee = Fn(bcx.pointercast(fn_ptr, llty)); llargs.extend_from_slice(&args[2..]); + bcx.pointercast(fn_ptr, llty) + } + Fn(f) => { + llargs.extend_from_slice(args); + f } - _ => llargs.extend_from_slice(args) - } - - let llfn = match callee { - Fn(f) => f, - _ => bug!("expected fn pointer callee, found {:?}", callee) }; let llret = bcx.call(llfn, &llargs[..], lpad); fn_ty.apply_attrs_callsite(llret); // If the function we just called does not use an outpointer, - // store the result into the rust outpointer. Cast the outpointer - // type to match because some ABIs will use a different type than - // the Rust type. e.g., a {u32,u32} struct could be returned as - // u64. + // store the result into the Rust outpointer. if !fn_ty.ret.is_indirect() { - if let Some(llretslot) = opt_llretslot { + if let Some(llretslot) = dest { fn_ty.ret.store(&bcx, llret, llretslot); } } @@ -723,6 +712,4 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, if fn_ret.0.is_never() { bcx.unreachable(); } - - (bcx, llret) } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 13dbac85c54fd..748707b4978b2 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -35,21 +35,18 @@ use Disr; use syntax_pos::DUMMY_SP; -pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, +pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, size: ValueRef, - align: ValueRef) - -> BlockAndBuilder<'blk, 'tcx> { + align: ValueRef) { let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align]; - Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .call(bcx, &args, None, None).0 + Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).call(&bcx, &args, None, None) } -pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, +pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, - content_ty: Ty<'tcx>) - -> BlockAndBuilder<'blk, 'tcx> { + content_ty: Ty<'tcx>) { assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); let sizing_type = sizing_type_of(bcx.ccx(), content_ty); let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); @@ -58,9 +55,7 @@ pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, if content_size != 0 { let content_align = align_of(bcx.ccx(), content_ty); let ccx = bcx.ccx(); - trans_exchange_free_dyn(bcx, ptr, C_uint(ccx, content_size), C_uint(ccx, content_align)) - } else { - bcx + trans_exchange_free_dyn(bcx, ptr, C_uint(ccx, content_size), C_uint(ccx, content_align)); } } @@ -410,14 +405,23 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, llsize, C_uint(bcx.ccx(), 0u64), ); - with_cond(bcx, needs_free, |bcx| { - trans_exchange_free_dyn(bcx, llbox, llsize, llalign) - }) + if const_to_opt_uint(needs_free) == Some(0) { + bcx + } else { + let fcx = bcx.fcx(); + let next_cx = fcx.build_new_block("next"); + let cond_cx = fcx.build_new_block("cond"); + bcx.cond_br(needs_free, cond_cx.llbb(), next_cx.llbb()); + trans_exchange_free_dyn(&cond_cx, llbox, llsize, llalign); + cond_cx.br(next_cx.llbb()); + next_cx + } } else { let llval = v0; let llbox = bcx.load(llval); drop_ty(&bcx, llbox, content_ty); - trans_exchange_free_ty(bcx, llbox, content_ty) + trans_exchange_free_ty(&bcx, llbox, content_ty); + bcx } } ty::TyDynamic(..) => { diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 99330c1253765..88b18ecae4c67 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -82,12 +82,10 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, attributes::set_frame_pointer_elimination(ccx, llfn); let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); - let mut bcx = fcx.get_entry_block(); + let bcx = fcx.get_entry_block(); let llargs = get_params(fcx.llfn); - bcx = callee.call(bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], fcx.llretslotptr, - None).0; - + callee.call(&bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], fcx.llretslotptr, None); fcx.finish(&bcx); llfn From 1804131b6da1765678c1b6881dcd1b4f4ca00bc4 Mon Sep 17 00:00:00 2001 From: Mark-Simulacrum Date: Sat, 17 Dec 2016 12:56:33 -0700 Subject: [PATCH 053/103] Remove Ref::clone for MirContext mir --- src/librustc_trans/base.rs | 3 ++- src/librustc_trans/debuginfo/create_scope_map.rs | 3 +-- src/librustc_trans/mir/block.rs | 3 +-- src/librustc_trans/mir/mod.rs | 12 ++++-------- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index aefdfee9d8cd0..48f8c2efe86b5 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -696,7 +696,8 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fcx = FunctionContext::new(ccx, lldecl, fn_ty, Some((instance, &sig, abi)), true); - mir::trans_mir(&fcx, ccx.tcx().item_mir(instance.def)); + let mir = ccx.tcx().item_mir(instance.def); + mir::trans_mir(&fcx, &mir); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index 3ddbed7aebdc0..0f20ed285b60e 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -19,7 +19,6 @@ use rustc::mir::{Mir, VisibilityScope}; use libc::c_uint; use std::ptr; -use std::cell::Ref; use syntax_pos::Pos; @@ -47,7 +46,7 @@ impl MirDebugScope { /// If debuginfo is disabled, the returned vector is empty. pub fn create_mir_scopes<'tcx>( fcx: &FunctionContext, - mir: Ref<'tcx, Mir<'tcx>>, + mir: &'tcx Mir<'tcx>, ) -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 45f9279049cd7..4d78170f845aa 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -38,14 +38,13 @@ use super::lvalue::{LvalueRef}; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -use std::cell::Ref as CellRef; use std::ptr; impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock, funclets: &IndexVec>) { let mut bcx = self.build_block(bb); - let data = &CellRef::clone(&self.mir)[bb]; + let data = &self.mir[bb]; debug!("trans_block({:?}={:?})", bb, data); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index b0b41cd1b069a..158e14c17aa60 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -24,7 +24,6 @@ use type_of; use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos, Span}; use syntax::symbol::keywords; -use std::cell::Ref; use std::iter; use basic_block::BasicBlock; @@ -41,7 +40,7 @@ use self::operand::{OperandRef, OperandValue}; /// Master context for translating MIR. pub struct MirContext<'bcx, 'tcx:'bcx> { - mir: Ref<'tcx, mir::Mir<'tcx>>, + mir: &'bcx mir::Mir<'tcx>, /// Function context fcx: &'bcx common::FunctionContext<'bcx, 'tcx>, @@ -179,10 +178,7 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// -pub fn trans_mir<'blk, 'tcx: 'blk>( - fcx: &'blk FunctionContext<'blk, 'tcx>, - mir: Ref<'tcx, Mir<'tcx>> -) { +pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>, mir: &'blk Mir<'tcx>) { let bcx = fcx.get_entry_block(); // Analyze the temps to determine which must be lvalues @@ -201,10 +197,10 @@ pub fn trans_mir<'blk, 'tcx: 'blk>( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(fcx, Ref::clone(&mir)); + let scopes = debuginfo::create_mir_scopes(fcx, mir); let mut mircx = MirContext { - mir: Ref::clone(&mir), + mir: mir, fcx: fcx, llpersonalityslot: None, blocks: block_bcxs, From f051c60d925d37f535e61637d8bda700d6d3be74 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sat, 17 Dec 2016 15:34:38 -0700 Subject: [PATCH 054/103] Reduce extensions to FunctionContext in cleanup. --- src/librustc_trans/callee.rs | 4 ++-- src/librustc_trans/cleanup.rs | 45 ++++++++++++++++++----------------- src/librustc_trans/glue.rs | 7 +++--- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 23903d9d7f851..bfb7a8330091d 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -398,7 +398,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llfn = callee.reify(bcx.ccx()); let llret; - if let Some(landing_pad) = self_scope.as_ref().and_then(|c| c.landing_pad) { + if let Some(landing_pad) = self_scope.landing_pad { let normal_bcx = bcx.fcx().build_new_block("normal-return"); llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; @@ -416,7 +416,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( if fn_ret.0.is_never() { bcx.unreachable(); } - fcx.trans_scope(&bcx, self_scope); + self_scope.trans(&bcx); fcx.finish(&bcx); ccx.instances().borrow_mut().insert(method_instance, lloncefn); diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index fbf380723e633..8a9283442cbd3 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -29,7 +29,7 @@ use rustc::ty::Ty; pub struct CleanupScope<'tcx> { // Cleanup to run upon scope exit. - cleanup: DropValue<'tcx>, + cleanup: Option>, // Computed on creation if compiling with landing pads (!sess.no_landing_pads) pub landing_pad: Option, @@ -92,21 +92,11 @@ impl PartialEq for UnwindKind { } } } -impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { - pub fn trans_scope( - &self, - bcx: &BlockAndBuilder<'blk, 'tcx>, - custom_scope: Option> - ) { - if let Some(scope) = custom_scope { - scope.cleanup.trans(None, &bcx); - } - } - /// Schedules a (deep) drop of `val`, which is a pointer to an instance of - /// `ty` - pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> Option> { - if !self.type_needs_drop(ty) { return None; } +impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { + /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` + pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { + if !self.type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { val: val, ty: ty, @@ -115,7 +105,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { debug!("schedule_drop_mem(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - Some(CleanupScope::new(self, drop)) + CleanupScope::new(self, drop) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -123,11 +113,10 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) - -> Option> { + pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !self.type_needs_drop(ty) { return None; } + if !self.type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { val: val, @@ -138,15 +127,14 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { debug!("schedule_drop_adt_contents(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - Some(CleanupScope::new(self, drop)) + CleanupScope::new(self, drop) } - } impl<'tcx> CleanupScope<'tcx> { fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { - cleanup: drop_val, + cleanup: Some(drop_val), landing_pad: if !fcx.ccx.sess().no_landing_pads() { Some(CleanupScope::get_landing_pad(fcx, &drop_val)) } else { @@ -155,6 +143,19 @@ impl<'tcx> CleanupScope<'tcx> { } } + pub fn noop() -> CleanupScope<'tcx> { + CleanupScope { + cleanup: None, + landing_pad: None, + } + } + + pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) { + if let Some(cleanup) = self.cleanup { + cleanup.trans(None, &bcx); + } + } + /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary /// for an unwind and then `resume` to continue error propagation: /// diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 748707b4978b2..dca5907b8afed 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -32,6 +32,7 @@ use type_of::{type_of, sizing_type_of, align_of}; use type_::Type; use value::Value; use Disr; +use cleanup::CleanupScope; use syntax_pos::DUMMY_SP; @@ -224,7 +225,7 @@ fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, let contents_scope = if !shallow_drop { bcx.fcx().schedule_drop_adt_contents(v0, t) } else { - None + CleanupScope::noop() }; let (sized_args, unsized_args); @@ -252,7 +253,7 @@ fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, let callee = Callee::def(bcx.ccx(), dtor_did, vtbl.substs); let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); let llret; - if let Some(landing_pad) = contents_scope.as_ref().and_then(|c| c.landing_pad) { + if let Some(landing_pad) = contents_scope.landing_pad { let normal_bcx = bcx.fcx().build_new_block("normal-return"); llret = bcx.invoke(callee.reify(bcx.ccx()), args, normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; @@ -260,7 +261,7 @@ fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, llret = bcx.call(callee.reify(bcx.ccx()), args, None); } fn_ty.apply_attrs_callsite(llret); - bcx.fcx().trans_scope(&bcx, contents_scope); + contents_scope.trans(&bcx); bcx } From c693bcc6253aa1b82e633c0fd0a5252c5812460b Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sat, 17 Dec 2016 15:52:29 -0700 Subject: [PATCH 055/103] Inline memfill and merge with memset_intrinsic. --- src/librustc_trans/base.rs | 33 ++++-------------- src/librustc_trans/intrinsic.rs | 61 ++++++++++++--------------------- 2 files changed, 27 insertions(+), 67 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 48f8c2efe86b5..21fd3e02047fc 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -53,7 +53,7 @@ use builder::Builder; use callee::{Callee}; use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; -use common::{C_struct_in_context, C_u64, C_u8, C_undef}; +use common::{C_struct_in_context, C_u64, C_undef}; use common::{CrateContext, FunctionContext}; use common::{fulfill_obligation}; use common::{type_is_zero_size, val_ty}; @@ -550,38 +550,17 @@ pub fn memcpy_ty<'blk, 'tcx>( } } -pub fn init_zero_mem<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - let bcx = cx; - memfill(bcx, llptr, t, 0); -} - -// Always use this function instead of storing a constant byte to the memory -// in question. e.g. if you store a zero constant, LLVM will drown in vreg -// allocation for large data structures, and the generated code will be -// awful. (A telltale sign of this is large quantities of -// `mov [byte ptr foo],0` in the generated code.) -fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) { - let ccx = b.ccx; - let llty = type_of::type_of(ccx, ty); - let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to()); - let llzeroval = C_u8(ccx, byte); - let size = machine::llsize_of(ccx, llty); - let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); - call_memset(b, llptr, llzeroval, size, align, false); -} - pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, ptr: ValueRef, fill_byte: ValueRef, size: ValueRef, align: ValueRef, - volatile: bool) { - let ccx = b.ccx; - let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; + volatile: bool) -> ValueRef { + let ptr_width = &b.ccx.sess().target.target.target_pointer_width[..]; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key); - let volatile = C_bool(ccx, volatile); - b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); + let llintrinsicfn = b.ccx.get_intrinsic(&intrinsic_key); + let volatile = C_bool(b.ccx, volatile); + b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index ec6b908c26485..3de7d10c8d428 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -180,10 +180,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0))) } (_, "init") => { - let tp_ty = substs.type_at(0); - if !type_is_zero_size(ccx, tp_ty) { - // Just zero out the stack slot. (See comment on base::memzero for explanation) - init_zero_mem(bcx, llresult, tp_ty); + let ty = substs.type_at(0); + if !type_is_zero_size(ccx, ty) { + // Just zero out the stack slot. + // If we store a zero constant, LLVM will drown in vreg allocation for large data + // structures, and the generated code will be awful. (A telltale sign of this is + // large quantities of `mov [byte ptr foo],0` in the generated code.) + memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_uint(ccx, 1usize)); } C_nil(ccx) } @@ -226,12 +229,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llargs[2]) } (_, "write_bytes") => { - memset_intrinsic(bcx, - false, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2]) + memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } (_, "volatile_copy_nonoverlapping_memory") => { @@ -253,12 +251,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llargs[2]) } (_, "volatile_set_memory") => { - memset_intrinsic(bcx, - true, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2]) + memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } (_, "volatile_load") => { let tp_ty = substs.type_at(0); @@ -710,32 +703,20 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, None) } -fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - val: ValueRef, - count: ValueRef) - -> ValueRef { +fn memset_intrinsic<'blk, 'tcx>( + bcx: &BlockAndBuilder<'blk, 'tcx>, + volatile: bool, + ty: Ty<'tcx>, + dst: ValueRef, + val: ValueRef, + count: ValueRef +) -> ValueRef { let ccx = bcx.ccx(); - let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); + let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); + let lltp_ty = type_of::type_of(ccx, ty); let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - let name = format!("llvm.memset.p0i8.i{}", int_size); - - let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx)); - let llfn = ccx.get_intrinsic(&name); - - bcx.call( - llfn, - &[dst_ptr, - val, - bcx.mul(size, count), - align, - C_bool(ccx, volatile)], - None) + let dst = bcx.pointercast(dst, Type::i8p(ccx)); + call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile) } fn count_zeros_intrinsic(bcx: &BlockAndBuilder, From 611e90b11823e8328a9478c6a1c521696d4c5b75 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sat, 17 Dec 2016 16:42:16 -0700 Subject: [PATCH 056/103] Simplify intrinsic match statement --- src/librustc_trans/intrinsic.rs | 73 ++++++++++++++++----------------- 1 file changed, 36 insertions(+), 37 deletions(-) diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 3de7d10c8d428..8501364ba36e0 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -116,32 +116,32 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let llret_ty = type_of::type_of(ccx, ret_ty); let simple = get_simple_intrinsic(ccx, name); - let llval = match (simple, name) { - (Some(llfn), _) => { - bcx.call(llfn, &llargs, None) + let llval = match name { + _ if simple.is_some() => { + bcx.call(simple.unwrap(), &llargs, None) } - (_, "likely") => { + "likely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None) } - (_, "unlikely") => { + "unlikely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) } - (_, "try") => { + "try" => { try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); C_nil(ccx) } - (_, "breakpoint") => { + "breakpoint" => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); bcx.call(llfn, &[], None) } - (_, "size_of") => { + "size_of" => { let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) } - (_, "size_of_val") => { + "size_of_val" => { let tp_ty = substs.type_at(0); if !type_is_sized(tcx, tp_ty) { let (llsize, _) = @@ -152,11 +152,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) } } - (_, "min_align_of") => { + "min_align_of" => { let tp_ty = substs.type_at(0); C_uint(ccx, type_of::align_of(ccx, tp_ty)) } - (_, "min_align_of_val") => { + "min_align_of_val" => { let tp_ty = substs.type_at(0); if !type_is_sized(tcx, tp_ty) { let (_, llalign) = @@ -166,20 +166,20 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, C_uint(ccx, type_of::align_of(ccx, tp_ty)) } } - (_, "pref_align_of") => { + "pref_align_of" => { let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) } - (_, "type_name") => { + "type_name" => { let tp_ty = substs.type_at(0); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); C_str_slice(ccx, ty_name) } - (_, "type_id") => { + "type_id" => { C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0))) } - (_, "init") => { + "init" => { let ty = substs.type_at(0); if !type_is_zero_size(ccx, ty) { // Just zero out the stack slot. @@ -191,26 +191,26 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, C_nil(ccx) } // Effectively no-ops - (_, "uninit") | (_, "forget") => { + "uninit" | "forget" => { C_nil(ccx) } - (_, "needs_drop") => { + "needs_drop" => { let tp_ty = substs.type_at(0); C_bool(ccx, bcx.fcx().type_needs_drop(tp_ty)) } - (_, "offset") => { + "offset" => { let ptr = llargs[0]; let offset = llargs[1]; bcx.inbounds_gep(ptr, &[offset]) } - (_, "arith_offset") => { + "arith_offset" => { let ptr = llargs[0]; let offset = llargs[1]; bcx.gep(ptr, &[offset]) } - (_, "copy_nonoverlapping") => { + "copy_nonoverlapping" => { copy_intrinsic(bcx, false, false, @@ -219,7 +219,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llargs[0], llargs[2]) } - (_, "copy") => { + "copy" => { copy_intrinsic(bcx, true, false, @@ -228,11 +228,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llargs[0], llargs[2]) } - (_, "write_bytes") => { + "write_bytes" => { memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } - (_, "volatile_copy_nonoverlapping_memory") => { + "volatile_copy_nonoverlapping_memory" => { copy_intrinsic(bcx, false, true, @@ -241,7 +241,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llargs[1], llargs[2]) } - (_, "volatile_copy_memory") => { + "volatile_copy_memory" => { copy_intrinsic(bcx, true, true, @@ -250,10 +250,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, llargs[1], llargs[2]) } - (_, "volatile_set_memory") => { + "volatile_set_memory" => { memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } - (_, "volatile_load") => { + "volatile_load" => { let tp_ty = substs.type_at(0); let mut ptr = llargs[0]; if let Some(ty) = fn_ty.ret.cast { @@ -265,7 +265,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } to_immediate(bcx, load, tp_ty) }, - (_, "volatile_store") => { + "volatile_store" => { let tp_ty = substs.type_at(0); if type_is_fat_ptr(bcx.tcx(), tp_ty) { bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0])); @@ -285,10 +285,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, C_nil(ccx) }, - (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") | - (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") | - (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") | - (_, "unchecked_div") | (_, "unchecked_rem") => { + "ctlz" | "cttz" | "ctpop" | "bswap" | + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | + "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "unchecked_div" | "unchecked_rem" => { let sty = &arg_tys[0].sty; match int_type_width_signed(sty, ccx) { Some((width, signed)) => @@ -340,8 +340,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } }, - (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") | - (_, "frem_fast") => { + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { let sty = &arg_tys[0].sty; match float_type_width(sty) { Some(_width) => @@ -364,7 +363,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, }, - (_, "discriminant_value") => { + "discriminant_value" => { let val_ty = substs.type_at(0); match val_ty.sty { ty::TyAdt(adt, ..) if adt.is_enum() => { @@ -374,7 +373,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, _ => C_null(llret_ty) } } - (_, name) if name.starts_with("simd_") => { + name if name.starts_with("simd_") => { generic_simd_intrinsic(bcx, name, callee_ty, &llargs, @@ -383,7 +382,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst - (_, name) if name.starts_with("atomic_") => { + name if name.starts_with("atomic_") => { use llvm::AtomicOrdering::*; let split: Vec<&str> = name.split('_').collect(); @@ -501,7 +500,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } - (..) => { + _ => { let intr = match Intrinsic::find(&name) { Some(intr) => intr, None => bug!("unknown intrinsic '{}'", name), From 99816a67435dbe2d9343f82ea16753e1c63edbe2 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sat, 17 Dec 2016 18:25:32 -0700 Subject: [PATCH 057/103] Further simplify intrinsic matching --- src/librustc_trans/intrinsic.rs | 118 +++++++++----------------------- 1 file changed, 33 insertions(+), 85 deletions(-) diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 8501364ba36e0..17a04c14cba9c 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -211,44 +211,20 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } "copy_nonoverlapping" => { - copy_intrinsic(bcx, - false, - false, - substs.type_at(0), - llargs[1], - llargs[0], - llargs[2]) + copy_intrinsic(bcx, false, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) } "copy" => { - copy_intrinsic(bcx, - true, - false, - substs.type_at(0), - llargs[1], - llargs[0], - llargs[2]) + copy_intrinsic(bcx, true, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) } "write_bytes" => { memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(bcx, - false, - true, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2]) + copy_intrinsic(bcx, false, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } "volatile_copy_memory" => { - copy_intrinsic(bcx, - true, - true, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2]) + copy_intrinsic(bcx, true, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } "volatile_set_memory" => { memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) @@ -293,10 +269,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, match int_type_width_signed(sty, ccx) { Some((width, signed)) => match name { - "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width), - llargs[0]), - "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), - llargs[0]), + "ctlz" | "cttz" => { + let y = C_bool(bcx.ccx(), false); + let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); + bcx.call(llfn, &[llargs[0], y], None) + } "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), &llargs, None), "bswap" => { @@ -311,7 +288,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let intrinsic = format!("llvm.{}{}.with.overflow.i{}", if signed { 's' } else { 'u' }, &name[..3], width); - with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult) + let llfn = bcx.ccx().get_intrinsic(&intrinsic); + + // Convert `i1` to a `bool`, and write it to the out parameter + let val = bcx.call(llfn, &[llargs[0], llargs[1]], None); + let result = bcx.extract_value(val, 0); + let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx)); + bcx.store(result, bcx.struct_gep(llresult, 0)); + bcx.store(overflow, bcx.struct_gep(llresult, 1)); + + C_nil(bcx.ccx()) }, "overflowing_add" => bcx.add(llargs[0], llargs[1]), "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), @@ -412,6 +398,12 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, _ => ccx.sess().fatal("Atomic intrinsic not in correct format"), }; + let invalid_monomorphization = |sty| { + span_invalid_monomorphization_error(tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, sty)); + }; + match split[1] { "cxchg" | "cxchgweak" => { let sty = &substs.type_at(0).sty; @@ -424,10 +416,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.store(result, bcx.struct_gep(llresult, 0)); bcx.store(success, bcx.struct_gep(llresult, 1)); } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); } C_nil(ccx) } @@ -437,10 +426,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, if int_type_width_signed(sty, ccx).is_some() { bcx.atomic_load(llargs[0], order) } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); C_nil(ccx) } } @@ -450,10 +436,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, if int_type_width_signed(sty, ccx).is_some() { bcx.atomic_store(llargs[1], llargs[0], order); } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); } C_nil(ccx) } @@ -489,15 +472,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, if int_type_width_signed(sty, ccx).is_some() { bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order) } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); C_nil(ccx) } } } - } _ => { @@ -529,18 +508,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, *any_changes_needed |= llvm_elem.is_some(); let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, - any_changes_needed)); + let elem = one(ty_to_type(ccx, t, any_changes_needed)); vec![elem.ptr_to()] } Vector(ref t, ref llvm_elem, length) => { *any_changes_needed |= llvm_elem.is_some(); let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, - any_changes_needed)); - vec![Type::vector(&elem, - length as u64)] + let elem = one(ty_to_type(ccx, t, any_changes_needed)); + vec![Type::vector(&elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() @@ -585,8 +561,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); - vec![bcx.pointercast(llarg, - llvm_elem.ptr_to())] + vec![bcx.pointercast(llarg, llvm_elem.ptr_to())] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); @@ -718,33 +693,6 @@ fn memset_intrinsic<'blk, 'tcx>( call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile) } -fn count_zeros_intrinsic(bcx: &BlockAndBuilder, - name: &str, - val: ValueRef) - -> ValueRef { - let y = C_bool(bcx.ccx(), false); - let llfn = bcx.ccx().get_intrinsic(&name); - bcx.call(llfn, &[val, y], None) -} - -fn with_overflow_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - name: &str, - a: ValueRef, - b: ValueRef, - out: ValueRef) - -> ValueRef { - let llfn = bcx.ccx().get_intrinsic(&name); - - // Convert `i1` to a `bool`, and write it to the out parameter - let val = bcx.call(llfn, &[a, b], None); - let result = bcx.extract_value(val, 0); - let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx())); - bcx.store(result, bcx.struct_gep(out, 0)); - bcx.store(overflow, bcx.struct_gep(out, 1)); - - C_nil(bcx.ccx()) -} - fn try_intrinsic<'blk, 'tcx>( bcx: &BlockAndBuilder<'blk, 'tcx>, func: ValueRef, From b48e74b5b015cfc3a71e0b7e0f3d12972aab20f6 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sat, 17 Dec 2016 19:54:32 -0700 Subject: [PATCH 058/103] Rename 'blk and 'bcx to 'a --- src/librustc_trans/adt.rs | 56 ++++++++++++--------- src/librustc_trans/asm.rs | 10 ++-- src/librustc_trans/base.rs | 76 +++++++++++++---------------- src/librustc_trans/builder.rs | 2 +- src/librustc_trans/callee.rs | 22 +++++---- src/librustc_trans/cleanup.rs | 4 +- src/librustc_trans/common.rs | 46 ++++++++--------- src/librustc_trans/debuginfo/mod.rs | 14 +++--- src/librustc_trans/glue.rs | 69 +++++++++++++------------- src/librustc_trans/intrinsic.rs | 70 +++++++++++++------------- src/librustc_trans/meth.rs | 8 +-- src/librustc_trans/mir/analyze.rs | 18 +++---- src/librustc_trans/mir/block.rs | 16 +++--- src/librustc_trans/mir/constant.rs | 4 +- src/librustc_trans/mir/lvalue.rs | 10 ++-- src/librustc_trans/mir/mod.rs | 22 ++++----- src/librustc_trans/mir/operand.rs | 16 +++--- src/librustc_trans/mir/rvalue.rs | 22 ++++----- src/librustc_trans/mir/statement.rs | 10 ++-- src/librustc_trans/tvec.rs | 6 +-- 20 files changed, 256 insertions(+), 245 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 5482200f13ec8..4a06982cd379d 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -302,11 +302,12 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> /// Obtain a representation of the discriminant sufficient to translate /// destructuring; this may or may not involve the actual discriminant. -pub fn trans_switch<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - t: Ty<'tcx>, - scrutinee: ValueRef, - range_assert: bool) - -> (BranchKind, Option) { +pub fn trans_switch<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + scrutinee: ValueRef, + range_assert: bool +) -> (BranchKind, Option) { let l = bcx.ccx().layout_of(t); match *l { layout::CEnum { .. } | layout::General { .. } | @@ -329,10 +330,13 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { } /// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, - scrutinee: ValueRef, cast_to: Option, - range_assert: bool) - -> ValueRef { +pub fn trans_get_discr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + scrutinee: ValueRef, + cast_to: Option, + range_assert: bool +) -> ValueRef { let (def, substs) = match t.sty { ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs), _ => bug!("{} is not an enum", t) @@ -411,8 +415,7 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) - -> ValueRef { +pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { let l = bcx.ccx().layout_of(t); match *l { layout::CEnum { discr, .. } @@ -432,8 +435,9 @@ pub fn trans_case<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, va /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, - val: ValueRef, to: Disr) { +pub fn trans_set_discr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr +) { let l = bcx.ccx().layout_of(t); match *l { layout::CEnum{ discr, min, max, .. } => { @@ -480,7 +484,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx } } -fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>) -> bool { +fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } @@ -493,11 +497,13 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { } /// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - t: Ty<'tcx>, - val: MaybeSizedValue, - discr: Disr, ix: usize) - -> ValueRef { +pub fn trans_field_ptr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + val: MaybeSizedValue, + discr: Disr, + ix: usize +) -> ValueRef { let l = bcx.ccx().layout_of(t); debug!("trans_field_ptr on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we @@ -553,9 +559,15 @@ pub fn trans_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } } -fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - st: &layout::Struct, fields: &Vec>, val: MaybeSizedValue, - ix: usize, needs_cast: bool) -> ValueRef { +fn struct_field_ptr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + st: &layout::Struct, + fields: &Vec>, + val: MaybeSizedValue, + ix: usize, + needs_cast: bool +) -> ValueRef { + let ccx = bcx.ccx(); let fty = fields[ix]; let ccx = bcx.ccx(); let ll_fty = type_of::in_memory_type_of(ccx, fty); diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 4c4f8cf67d2d7..6ec198aa24726 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -24,10 +24,12 @@ use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM -pub fn trans_inline_asm<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - ia: &hir::InlineAsm, - outputs: Vec<(ValueRef, Ty<'tcx>)>, - mut inputs: Vec) { +pub fn trans_inline_asm<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + ia: &hir::InlineAsm, + outputs: Vec<(ValueRef, Ty<'tcx>)>, + mut inputs: Vec +) { let mut ext_constraints = vec![]; let mut output_types = vec![]; diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 21fd3e02047fc..d7efd2ce0a828 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -160,13 +160,14 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { } } -pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - t: Ty<'tcx>, - ret_ty: Type, - op: hir::BinOp_) - -> ValueRef { +pub fn compare_simd_types<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + lhs: ValueRef, + rhs: ValueRef, + t: Ty<'tcx>, + ret_ty: Type, + op: hir::BinOp_ +) -> ValueRef { let signed = match t.sty { ty::TyFloat(_) => { let cmp = bin_op_to_fcmp_predicate(op); @@ -216,11 +217,12 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, } /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - src: ValueRef, - src_ty: Ty<'tcx>, - dst_ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { +pub fn unsize_thin_ptr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + src: ValueRef, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx> +) -> (ValueRef, ValueRef) { debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); match (&src_ty.sty, &dst_ty.sty) { (&ty::TyBox(a), &ty::TyBox(b)) | @@ -240,11 +242,11 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - src: ValueRef, - src_ty: Ty<'tcx>, - dst: ValueRef, - dst_ty: Ty<'tcx>) { +pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + src: ValueRef, + src_ty: Ty<'tcx>, + dst: ValueRef, + dst_ty: Ty<'tcx>) { match (&src_ty.sty, &dst_ty.sty) { (&ty::TyBox(..), &ty::TyBox(..)) | (&ty::TyRef(..), &ty::TyRef(..)) | @@ -420,9 +422,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'blk, 'tcx>( - cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx> -) { +pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); if common::type_is_fat_ptr(cx.tcx(), t) { @@ -434,23 +434,19 @@ pub fn store_ty<'blk, 'tcx>( } } -pub fn store_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, - data: ValueRef, - extra: ValueRef, - dst: ValueRef, - _ty: Ty<'tcx>) { +pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, + data: ValueRef, + extra: ValueRef, + dst: ValueRef, + _ty: Ty<'tcx>) { // FIXME: emit metadata cx.store(data, get_dataptr(cx, dst)); cx.store(extra, get_meta(cx, dst)); } pub fn load_fat_ptr<'a, 'tcx>( - b: &Builder<'a, 'tcx>, - src: ValueRef, - t: Ty<'tcx>) - -> (ValueRef, ValueRef) -{ - + b: &Builder<'a, 'tcx>, src: ValueRef, t: Ty<'tcx> +) -> (ValueRef, ValueRef) { let ptr = get_dataptr(b, src); let ptr = if t.is_region_ptr() || t.is_unique() { b.load_nonnull(ptr) @@ -511,7 +507,7 @@ impl Lifetime { } } -pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, +pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, @@ -528,8 +524,8 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'blk, 'tcx>( - bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx> +pub fn memcpy_ty<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx> ) { let ccx = bcx.ccx(); @@ -550,7 +546,7 @@ pub fn memcpy_ty<'blk, 'tcx>( } } -pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, +pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, fill_byte: ValueRef, size: ValueRef, @@ -563,22 +559,20 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } -pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str) -> ValueRef { +pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { assert!(!ty.has_param_types()); bcx.fcx().alloca(type_of::type_of(bcx.ccx(), ty), name) } -impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { +impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// and builds the return block. - pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { + pub fn finish(&'a self, ret_cx: &BlockAndBuilder<'a, 'tcx>) { self.build_return_block(ret_cx); } // Builds the return block for a function. - pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>) { + pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'a, 'tcx>) { if self.llretslotptr.is_none() || self.fn_ty.ret.is_indirect() { return ret_cx.ret_void(); } diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index b023ff6ea24dd..136d1aad31a03 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -30,7 +30,7 @@ pub struct Builder<'a, 'tcx: 'a> { pub ccx: &'a CrateContext<'a, 'tcx>, } -impl<'blk, 'tcx> Drop for Builder<'blk, 'tcx> { +impl<'a, 'tcx> Drop for Builder<'a, 'tcx> { fn drop(&mut self) { unsafe { llvm::LLVMDisposeBuilder(self.llbuilder); diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index bfb7a8330091d..23351e01df8cf 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -189,10 +189,12 @@ impl<'tcx> Callee<'tcx> { /// For non-lang items, `dest` is always Some, and hence the result is written /// into memory somewhere. Nonetheless we return the actual return value of the /// function. - pub fn call<'a, 'blk>(self, bcx: &BlockAndBuilder<'blk, 'tcx>, - args: &[ValueRef], - dest: Option, - lpad: Option<&'blk llvm::OperandBundleDef>) { + pub fn call<'a>(self, + bcx: &BlockAndBuilder<'a, 'tcx>, + args: &[ValueRef], + dest: Option, + lpad: Option<&'a llvm::OperandBundleDef> + ) { trans_call_inner(bcx, self, args, dest, lpad) } @@ -647,11 +649,13 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // ______________________________________________________________________ // Translating calls -fn trans_call_inner<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - callee: Callee<'tcx>, - args: &[ValueRef], - dest: Option, - lpad: Option<&'blk llvm::OperandBundleDef>) { +fn trans_call_inner<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + callee: Callee<'tcx>, + args: &[ValueRef], + dest: Option, + lpad: Option<&'a llvm::OperandBundleDef> +) { // Introduce a temporary cleanup scope that will contain cleanups // for the arguments while they are being evaluated. The purpose // this cleanup is to ensure that, should a panic occur while diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 8a9283442cbd3..ee02fe1d7f5cf 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -43,7 +43,7 @@ pub struct DropValue<'tcx> { } impl<'tcx> DropValue<'tcx> { - fn trans<'blk>(&self, funclet: Option<&'blk Funclet>, bcx: &BlockAndBuilder<'blk, 'tcx>) { + fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) { glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) } } @@ -93,7 +93,7 @@ impl PartialEq for UnwindKind { } } -impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { +impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { if !self.type_needs_drop(ty) { return CleanupScope::noop(); } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 3fe9d64d575df..123e0609f0693 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -480,7 +480,7 @@ impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { } #[must_use] -pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { +pub struct BlockAndBuilder<'a, 'tcx: 'a> { // The BasicBlockRef returned from a call to // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic // block to the function pointed to by llfn. We insert @@ -490,13 +490,13 @@ pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { // The function context for the function to which this block is // attached. - fcx: &'blk FunctionContext<'blk, 'tcx>, + fcx: &'a FunctionContext<'a, 'tcx>, - builder: Builder<'blk, 'tcx>, + builder: Builder<'a, 'tcx>, } -impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { - pub fn new(llbb: BasicBlockRef, fcx: &'blk FunctionContext<'blk, 'tcx>) -> Self { +impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> { + pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self { let builder = Builder::with_ccx(fcx.ccx); // Set the builder's position to this block's end. builder.position_at_end(llbb); @@ -512,7 +512,7 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { } pub fn at_start(&self, f: F) -> R - where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R + where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R { self.position_at_start(self.llbb); let r = f(self); @@ -520,16 +520,16 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { r } - pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { + pub fn ccx(&self) -> &'a CrateContext<'a, 'tcx> { self.fcx.ccx } - pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> { + pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> { self.fcx } - pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> { + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.fcx.ccx.tcx() } - pub fn sess(&self) -> &'blk Session { + pub fn sess(&self) -> &'a Session { self.fcx.ccx.sess() } @@ -538,8 +538,8 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { } } -impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> { - type Target = Builder<'blk, 'tcx>; +impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> { + type Target = Builder<'a, 'tcx>; fn deref(&self) -> &Self::Target { &self.builder } @@ -896,19 +896,20 @@ pub fn langcall(tcx: TyCtxt, // all shifts). For 32- and 64-bit types, this matches the semantics // of Java. (See related discussion on #1877 and #10183.) -pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef) -> ValueRef { +pub fn build_unchecked_lshift<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + lhs: ValueRef, + rhs: ValueRef +) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bcx, rhs); bcx.shl(lhs, rhs) } -pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef) -> ValueRef { +pub fn build_unchecked_rshift<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef +) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); // #1877, #10183: Ensure that input is always valid let rhs = shift_mask_rhs(bcx, rhs); @@ -920,14 +921,13 @@ pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } } -fn shift_mask_rhs<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - rhs: ValueRef) -> ValueRef { +fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { let rhs_llty = val_ty(rhs); bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false)) } -pub fn shift_mask_val<'blk, 'tcx>( - bcx: &BlockAndBuilder<'blk, 'tcx>, +pub fn shift_mask_val<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, llty: Type, mask_llty: Type, invert: bool diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 9cc2c72648f02..c130c4bc99399 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -431,13 +431,13 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -pub fn declare_local<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: DIScope, - variable_access: VariableAccess, - variable_kind: VariableKind, - span: Span) { +pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: DIScope, + variable_access: VariableAccess, + variable_kind: VariableKind, + span: Span) { let cx: &CrateContext = bcx.ccx(); let file = span_start(cx, span).file; diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index dca5907b8afed..26445efe06c04 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -36,18 +36,20 @@ use cleanup::CleanupScope; use syntax_pos::DUMMY_SP; -pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - v: ValueRef, - size: ValueRef, - align: ValueRef) { +pub fn trans_exchange_free_dyn<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + v: ValueRef, + size: ValueRef, + align: ValueRef +) { let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align]; Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).call(&bcx, &args, None, None) } -pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - ptr: ValueRef, - content_ty: Ty<'tcx>) { +pub fn trans_exchange_free_ty<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx> +) { assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); let sizing_type = sizing_type_of(bcx.ccx(), content_ty); let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); @@ -104,16 +106,16 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> } } -fn drop_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) { +fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, t: Ty<'tcx>) { call_drop_glue(bcx, v, t, false, None) } -pub fn call_drop_glue<'blk, 'tcx>( - bcx: &BlockAndBuilder<'blk, 'tcx>, +pub fn call_drop_glue<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, t: Ty<'tcx>, skip_dtor: bool, - funclet: Option<&'blk Funclet>, + funclet: Option<&'a Funclet>, ) { // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); @@ -181,8 +183,7 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'t } } -pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - g: DropGlueKind<'tcx>) { +pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) { let tcx = ccx.tcx(); assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty())); let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); @@ -203,11 +204,11 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fcx.finish(&bcx); } -fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, - t: Ty<'tcx>, - v0: ValueRef, - shallow_drop: bool) - -> BlockAndBuilder<'blk, 'tcx> +fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + v0: ValueRef, + shallow_drop: bool) + -> BlockAndBuilder<'a, 'tcx> { debug!("trans_custom_dtor t: {}", t); let tcx = bcx.tcx(); @@ -265,9 +266,9 @@ fn trans_custom_dtor<'blk, 'tcx>(mut bcx: BlockAndBuilder<'blk, 'tcx>, bcx } -pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - t: Ty<'tcx>, info: ValueRef) - -> (ValueRef, ValueRef) { +pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, info: ValueRef) + -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); if type_is_sized(bcx.tcx(), t) { @@ -372,10 +373,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } } -fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, - v0: ValueRef, - g: DropGlueKind<'tcx>) - -> BlockAndBuilder<'blk, 'tcx> { +fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, + v0: ValueRef, + g: DropGlueKind<'tcx>) + -> BlockAndBuilder<'a, 'tcx> { let t = g.ty(); let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; @@ -454,15 +455,15 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>, } // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>, - av: ValueRef, - t: Ty<'tcx>) - -> BlockAndBuilder<'blk, 'tcx> { - fn iter_variant<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, - t: Ty<'tcx>, - av: adt::MaybeSizedValue, - variant: &'tcx ty::VariantDef, - substs: &Substs<'tcx>) { +fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, + av: ValueRef, + t: Ty<'tcx>) + -> BlockAndBuilder<'a, 'tcx> { + fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + av: adt::MaybeSizedValue, + variant: &'tcx ty::VariantDef, + substs: &Substs<'tcx>) { let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 17a04c14cba9c..caa4e75f0b02a 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -84,12 +84,12 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - callee_ty: Ty<'tcx>, - fn_ty: &FnType, - llargs: &[ValueRef], - llresult: ValueRef, - span: Span) { +pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + callee_ty: Ty<'tcx>, + fn_ty: &FnType, + llargs: &[ValueRef], + llresult: ValueRef, + span: Span) { let fcx = bcx.fcx(); let ccx = fcx.ccx; let tcx = bcx.tcx(); @@ -537,11 +537,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // qux` to be converted into `foo, bar, baz, qux`, integer // arguments to be truncated as needed and pointers to be // cast. - fn modify_as_needed<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - t: &intrinsics::Type, - arg_type: Ty<'tcx>, - llarg: ValueRef) - -> Vec + fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + t: &intrinsics::Type, + arg_type: Ty<'tcx>, + llarg: ValueRef) + -> Vec { match *t { intrinsics::Type::Aggregate(true, ref contents) => { @@ -642,14 +642,14 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, } } -fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - allow_overlap: bool, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - src: ValueRef, - count: ValueRef) - -> ValueRef { +fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + allow_overlap: bool, + volatile: bool, + tp_ty: Ty<'tcx>, + dst: ValueRef, + src: ValueRef, + count: ValueRef) + -> ValueRef { let ccx = bcx.ccx(); let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); @@ -677,8 +677,8 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, None) } -fn memset_intrinsic<'blk, 'tcx>( - bcx: &BlockAndBuilder<'blk, 'tcx>, +fn memset_intrinsic<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, volatile: bool, ty: Ty<'tcx>, dst: ValueRef, @@ -693,8 +693,8 @@ fn memset_intrinsic<'blk, 'tcx>( call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile) } -fn try_intrinsic<'blk, 'tcx>( - bcx: &BlockAndBuilder<'blk, 'tcx>, +fn try_intrinsic<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, func: ValueRef, data: ValueRef, local_ptr: ValueRef, @@ -717,11 +717,11 @@ fn try_intrinsic<'blk, 'tcx>( // instructions are meant to work for all targets, as of the time of this // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. -fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - local_ptr: ValueRef, - dest: ValueRef) { +fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef) { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let ccx = bcx.ccx(); @@ -820,11 +820,11 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // function calling it, and that function may already have other personality // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. -fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - local_ptr: ValueRef, - dest: ValueRef) { +fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef) { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let ccx = bcx.ccx(); @@ -928,8 +928,8 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { span_err!(a, b, E0511, "{}", c); } -fn generic_simd_intrinsic<'blk, 'tcx, 'a>( - bcx: &BlockAndBuilder<'blk, 'tcx>, +fn generic_simd_intrinsic<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, name: &str, callee_ty: Ty<'tcx>, llargs: &[ValueRef], diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 88b18ecae4c67..b8901231f1c09 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -28,10 +28,10 @@ use rustc::ty; const VTABLE_OFFSET: usize = 3; /// Extracts a method from a trait object's vtable, at the specified index. -pub fn get_virtual_method<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - llvtable: ValueRef, - vtable_index: usize) - -> ValueRef { +pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + llvtable: ValueRef, + vtable_index: usize) + -> ValueRef { // Load the data pointer from the object. debug!("get_virtual_method(vtable_index={}, llvtable={:?})", vtable_index, Value(llvtable)); diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 4b6998d0505dc..21b021cacc985 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -20,8 +20,7 @@ use common::{self, BlockAndBuilder}; use glue; use super::rvalue; -pub fn lvalue_locals<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx,'tcx>, - mir: &mir::Mir<'tcx>) -> BitVector { +pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mir: &mir::Mir<'tcx>) -> BitVector { let mut analyzer = LocalAnalyzer::new(mir, &bcx); analyzer.visit_mir(mir); @@ -55,17 +54,16 @@ pub fn lvalue_locals<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx,'tcx>, analyzer.lvalue_locals } -struct LocalAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> { +struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> { mir: &'mir mir::Mir<'tcx>, - bcx: &'mir BlockAndBuilder<'bcx, 'tcx>, + bcx: &'mir BlockAndBuilder<'a, 'tcx>, lvalue_locals: BitVector, seen_assigned: BitVector } -impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> { - fn new(mir: &'mir mir::Mir<'tcx>, - bcx: &'mir BlockAndBuilder<'bcx, 'tcx>) - -> LocalAnalyzer<'mir, 'bcx, 'tcx> { +impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> { + fn new(mir: &'mir mir::Mir<'tcx>, bcx: &'mir BlockAndBuilder<'a, 'tcx>) + -> LocalAnalyzer<'mir, 'a, 'tcx> { LocalAnalyzer { mir: mir, bcx: bcx, @@ -86,7 +84,7 @@ impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> { } } -impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { +impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { fn visit_assign(&mut self, block: mir::BasicBlock, lvalue: &mir::Lvalue<'tcx>, @@ -199,7 +197,7 @@ pub enum CleanupKind { Internal { funclet: mir::BasicBlock } } -pub fn cleanup_kinds<'bcx,'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { +pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { fn discover_masters<'tcx>(result: &mut IndexVec, mir: &mir::Mir<'tcx>) { for (bb, data) in mir.basic_blocks().iter_enumerated() { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 4d78170f845aa..99b598baf8412 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -40,7 +40,7 @@ use super::operand::OperandValue::{Pair, Ref, Immediate}; use std::ptr; -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock, funclets: &IndexVec>) { let mut bcx = self.build_block(bb); @@ -669,7 +669,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } fn trans_argument(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -745,7 +745,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } fn trans_arguments_untupled(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -821,7 +821,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } - fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef { + fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef { let ccx = bcx.ccx(); if let Some(slot) = self.llpersonalityslot { slot @@ -897,11 +897,11 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { }) } - fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> { + fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { BlockAndBuilder::new(self.blocks[bb], self.fcx) } - fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { // If the return is ignored, we can just return a do-nothing ReturnDest @@ -946,7 +946,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } - fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let mut val = self.trans_operand(bcx, src); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { @@ -974,7 +974,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Stores the return value of a function call into it's final location. fn store_return(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, dest: ReturnDest, ret_ty: ArgType, op: OperandRef<'tcx>) { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 8143190a58def..03ee6b136a4f0 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -945,9 +945,9 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_constant(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, constant: &mir::Constant<'tcx>) -> Const<'tcx> { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index c6be7eaa77562..5bfe614f45e7f 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -44,7 +44,7 @@ impl<'tcx> LvalueRef<'tcx> { LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } } - pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, + pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> @@ -67,9 +67,9 @@ impl<'tcx> LvalueRef<'tcx> { } } -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_lvalue(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> LvalueRef<'tcx> { debug!("trans_lvalue(lvalue={:?})", lvalue); @@ -214,7 +214,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { // Perform an action using the given Lvalue. // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot // is created first, then used as an operand to update the Lvalue. - pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, f: F) -> U where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U { @@ -255,7 +255,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { /// /// nmatsakis: is this still necessary? Not sure. fn prepare_index(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 158e14c17aa60..8a0e5e107a80c 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -39,11 +39,11 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for translating MIR. -pub struct MirContext<'bcx, 'tcx:'bcx> { - mir: &'bcx mir::Mir<'tcx>, +pub struct MirContext<'a, 'tcx:'a> { + mir: &'a mir::Mir<'tcx>, /// Function context - fcx: &'bcx common::FunctionContext<'bcx, 'tcx>, + fcx: &'a common::FunctionContext<'a, 'tcx>, /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the @@ -88,7 +88,7 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { scopes: IndexVec, } -impl<'blk, 'tcx> MirContext<'blk, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) { // Bail out if debug info emission is not enabled. match self.fcx.debug_context { @@ -152,7 +152,7 @@ enum LocalRef<'tcx> { } impl<'tcx> LocalRef<'tcx> { - fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>, + fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>, ty: ty::Ty<'tcx>) -> LocalRef<'tcx> { if common::type_is_zero_size(ccx, ty) { // Zero-size temporaries aren't always initialized, which @@ -178,7 +178,7 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// -pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>, mir: &'blk Mir<'tcx>) { +pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir<'tcx>) { let bcx = fcx.get_entry_block(); // Analyze the temps to determine which must be lvalues @@ -309,11 +309,11 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>, mir: /// Produce, for each argument, a `ValueRef` pointing at the /// argument's value. As arguments are lvalues, these are always /// indirect. -fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, - mir: &mir::Mir<'tcx>, - scopes: &IndexVec, - lvalue_locals: &BitVector) - -> Vec> { +fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + mir: &mir::Mir<'tcx>, + scopes: &IndexVec, + lvalue_locals: &BitVector) + -> Vec> { let fcx = bcx.fcx(); let tcx = bcx.tcx(); let mut idx = 0; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index f52c08794629c..c89eb9899d11c 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -73,7 +73,7 @@ impl<'tcx> fmt::Debug for OperandRef<'tcx> { } } -impl<'bcx, 'tcx> OperandRef<'tcx> { +impl<'a, 'tcx> OperandRef<'tcx> { /// Asserts that this operand refers to a scalar and returns /// a reference to its value. pub fn immediate(self) -> ValueRef { @@ -85,7 +85,7 @@ impl<'bcx, 'tcx> OperandRef<'tcx> { /// If this operand is a Pair, we return an /// Immediate aggregate with the two values. - pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) + pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. @@ -107,7 +107,7 @@ impl<'bcx, 'tcx> OperandRef<'tcx> { /// If this operand is a pair in an Immediate, /// we return a Pair with the two halves. - pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) + pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Immediate(llval) = self.val { // Deconstruct the immediate aggregate. @@ -134,9 +134,9 @@ impl<'bcx, 'tcx> OperandRef<'tcx> { } } -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_load(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, llval: ValueRef, ty: Ty<'tcx>) -> OperandRef<'tcx> @@ -165,7 +165,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn trans_consume(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> OperandRef<'tcx> { @@ -217,7 +217,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn trans_operand(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, operand: &mir::Operand<'tcx>) -> OperandRef<'tcx> { @@ -242,7 +242,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn store_operand(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, lldest: ValueRef, operand: OperandRef<'tcx>) { debug!("store_operand: operand={:?}", operand); diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d15598e76af6e..4a532924694c0 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -33,12 +33,12 @@ use super::constant::const_scalar_checked_binop; use super::operand::{OperandRef, OperandValue}; use super::lvalue::{LvalueRef}; -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_rvalue(&mut self, - bcx: BlockAndBuilder<'bcx, 'tcx>, + bcx: BlockAndBuilder<'a, 'tcx>, dest: LvalueRef<'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> BlockAndBuilder<'bcx, 'tcx> + -> BlockAndBuilder<'a, 'tcx> { debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", Value(dest.llval), rvalue); @@ -175,9 +175,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn trans_rvalue_operand(&mut self, - bcx: BlockAndBuilder<'bcx, 'tcx>, + bcx: BlockAndBuilder<'a, 'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>) + -> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>) { assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue), "cannot trans {:?} to operand", rvalue); @@ -483,7 +483,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn trans_scalar_binop(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -558,7 +558,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn trans_fat_ptr_binop(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: mir::BinOp, lhs_addr: ValueRef, lhs_extra: ValueRef, @@ -605,7 +605,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } pub fn trans_scalar_checked_binop(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -662,9 +662,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } } -pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>, - _bcx: &BlockAndBuilder<'bcx, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) -> bool { +pub fn rvalue_creates_operand<'a, 'tcx>(_mir: &mir::Mir<'tcx>, + _bcx: &BlockAndBuilder<'a, 'tcx>, + rvalue: &mir::Rvalue<'tcx>) -> bool { match *rvalue { mir::Rvalue::Ref(..) | mir::Rvalue::Len(..) | diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 6cc3f6aad9fd6..ddbefe43ce4f0 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -18,11 +18,11 @@ use super::LocalRef; use super::super::adt; use super::super::disr::Disr; -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, - bcx: BlockAndBuilder<'bcx, 'tcx>, + bcx: BlockAndBuilder<'a, 'tcx>, statement: &mir::Statement<'tcx>) - -> BlockAndBuilder<'bcx, 'tcx> { + -> BlockAndBuilder<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); let (scope, span) = self.debug_loc(statement.source_info); @@ -78,10 +78,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { } fn trans_storage_liveness(&self, - bcx: BlockAndBuilder<'bcx, 'tcx>, + bcx: BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, intrinsic: base::Lifetime) - -> BlockAndBuilder<'bcx, 'tcx> { + -> BlockAndBuilder<'a, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { if let LocalRef::Lvalue(tr_lval) = self.locals[index] { intrinsic.call(&bcx, tr_lval.llval); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 5b4cb74bf4fc6..c693a5ceabedd 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -13,13 +13,13 @@ use llvm::ValueRef; use common::*; use rustc::ty::Ty; -pub fn slice_for_each<'blk, 'tcx, F>( - bcx: &BlockAndBuilder<'blk, 'tcx>, +pub fn slice_for_each<'a, 'tcx, F>( + bcx: &BlockAndBuilder<'a, 'tcx>, data_ptr: ValueRef, unit_ty: Ty<'tcx>, len: ValueRef, f: F -) -> BlockAndBuilder<'blk, 'tcx> where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>, ValueRef) { +) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) { // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx(), unit_ty); let add = |bcx: &BlockAndBuilder, a, b| if zst { From 515d14f0944a5e5e6b2df206549e08ebe39e18f1 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sat, 17 Dec 2016 20:19:34 -0700 Subject: [PATCH 059/103] Inline/Replace finish with build_return_block --- src/librustc_trans/base.rs | 8 +------- src/librustc_trans/callee.rs | 4 ++-- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/meth.rs | 2 +- 4 files changed, 5 insertions(+), 11 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index d7efd2ce0a828..1e37f743b6fbe 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -565,12 +565,6 @@ pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: & } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - /// Ties up the llstaticallocas -> llloadenv -> lltop edges, - /// and builds the return block. - pub fn finish(&'a self, ret_cx: &BlockAndBuilder<'a, 'tcx>) { - self.build_return_block(ret_cx); - } - // Builds the return block for a function. pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'a, 'tcx>) { if self.llretslotptr.is_none() || self.fn_ty.ret.is_indirect() { @@ -711,7 +705,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, adt::trans_set_discr(&bcx, sig.output(), dest, disr); } - fcx.finish(&bcx); + fcx.build_return_block(&bcx); } pub fn llvm_linkage_by_name(name: &str) -> Option { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 23351e01df8cf..fa24c5ecf2e41 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -419,7 +419,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( bcx.unreachable(); } self_scope.trans(&bcx); - fcx.finish(&bcx); + fcx.build_return_block(&bcx); ccx.instances().borrow_mut().insert(method_instance, lloncefn); @@ -540,7 +540,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( ty: bare_fn_ty }; callee.call(&bcx, &llargs[(self_idx + 1)..], fcx.llretslotptr, None); - fcx.finish(&bcx); + fcx.build_return_block(&bcx); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 26445efe06c04..936ab0901123d 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -201,7 +201,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // type, so we don't need to explicitly cast the function parameter. let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); - fcx.finish(&bcx); + fcx.build_return_block(&bcx); } fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>, diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index b8901231f1c09..a09838f282b37 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -86,7 +86,7 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llargs = get_params(fcx.llfn); callee.call(&bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], fcx.llretslotptr, None); - fcx.finish(&bcx); + fcx.build_return_block(&bcx); llfn } From 97a2096e5e779fe2473c77b051725bf9f0f8b01d Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 09:07:35 -0700 Subject: [PATCH 060/103] Inline and cleanup build_return_block --- src/librustc_trans/base.rs | 101 ++++++++++++----------------------- src/librustc_trans/callee.rs | 45 +++++++++++----- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/meth.rs | 20 ++++++- 4 files changed, 87 insertions(+), 81 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 1e37f743b6fbe..f9b8b18a5ec2b 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -564,68 +564,6 @@ pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: & bcx.fcx().alloca(type_of::type_of(bcx.ccx(), ty), name) } -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - // Builds the return block for a function. - pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'a, 'tcx>) { - if self.llretslotptr.is_none() || self.fn_ty.ret.is_indirect() { - return ret_cx.ret_void(); - } - - let retslot = self.llretslotptr.unwrap(); - let retptr = Value(retslot); - let llty = self.fn_ty.ret.original_ty; - match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) { - // If there's only a single store to the ret slot, we can directly return - // the value that was stored and omit the store and the alloca. - // However, we only want to do this when there is no cast needed. - (Some(s), None) => { - let mut retval = s.get_operand(0).unwrap().get(); - s.erase_from_parent(); - - if retptr.has_no_uses() { - retptr.erase_from_parent(); - } - - if self.fn_ty.ret.is_indirect() { - ret_cx.store(retval, get_param(self.llfn, 0)); - ret_cx.ret_void() - } else { - if llty == Type::i1(self.ccx) { - retval = ret_cx.trunc(retval, llty); - } - ret_cx.ret(retval) - } - } - (_, cast_ty) if self.fn_ty.ret.is_indirect() => { - // Otherwise, copy the return value to the ret slot. - assert_eq!(cast_ty, None); - let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty); - let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); - call_memcpy(&ret_cx, get_param(self.llfn, 0), - retslot, llsz, llalign as u32); - ret_cx.ret_void() - } - (_, Some(cast_ty)) => { - let load = ret_cx.load(ret_cx.pointercast(retslot, cast_ty.ptr_to())); - let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); - unsafe { - llvm::LLVMSetAlignment(load, llalign); - } - ret_cx.ret(load) - } - (_, None) => { - let retval = if llty == Type::i1(self.ccx) { - let val = ret_cx.load_range_assert(retslot, 0, 2, llvm::False); - ret_cx.trunc(val, llty) - } else { - ret_cx.load(retslot) - }; - ret_cx.ret(retval) - } - } - } -} - pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { let _s = if ccx.sess().trans_stats() { let mut instance_name = String::new(); @@ -683,9 +621,17 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, false); let bcx = fcx.get_entry_block(); - if !fcx.fn_ty.ret.is_ignore() { - let dest = fcx.llretslotptr.unwrap(); + // But if there are no nested returns, we skip the indirection + // and have a single retslot + let dest = if fcx.fn_ty.ret.is_indirect() { + get_param(fcx.llfn, 0) + } else { + // We create an alloca to hold a pointer of type `ret.original_ty` + // which will hold the pointer to the right alloca which has the + // final ret value + fcx.alloca(fcx.fn_ty.ret.memory_ty(ccx), "sret_slot") + }; let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; @@ -703,9 +649,32 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } } adt::trans_set_discr(&bcx, sig.output(), dest, disr); - } - fcx.build_return_block(&bcx); + if fcx.fn_ty.ret.is_indirect() { + bcx.ret_void(); + return; + } + + if let Some(cast_ty) = fcx.fn_ty.ret.cast { + let load = bcx.load(bcx.pointercast(dest, cast_ty.ptr_to())); + let llalign = llalign_of_min(fcx.ccx, fcx.fn_ty.ret.ty); + unsafe { + llvm::LLVMSetAlignment(load, llalign); + } + bcx.ret(load) + } else { + let llty = fcx.fn_ty.ret.original_ty; + let retval = if llty == Type::i1(fcx.ccx) { + let val = bcx.load_range_assert(dest, 0, 2, llvm::False); + bcx.trunc(val, llty) + } else { + bcx.load(dest) + }; + bcx.ret(retval) + } + } else { + bcx.ret_void(); + } } pub fn llvm_linkage_by_name(name: &str) -> Option { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index fa24c5ecf2e41..e0ecd1d8bf57a 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -16,7 +16,7 @@ pub use self::CalleeData::*; -use llvm::{self, ValueRef, get_params}; +use llvm::{self, ValueRef, get_param, get_params}; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::traits; @@ -390,8 +390,8 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); - let first_llarg = if fn_ty.ret.is_indirect() { - fcx.llretslotptr + let first_llarg = if fn_ty.ret.is_indirect() && !fcx.fn_ty.ret.is_ignore() { + Some(get_param(fcx.llfn, 0)) } else { None }; @@ -409,17 +409,16 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( } fn_ty.apply_attrs_callsite(llret); - if !fn_ty.ret.is_indirect() { - if let Some(llretslot) = fcx.llretslotptr { - fn_ty.ret.store(&bcx, llret, llretslot); - } - } - if fn_ret.0.is_never() { bcx.unreachable(); } self_scope.trans(&bcx); - fcx.build_return_block(&bcx); + + if fcx.fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } ccx.instances().borrow_mut().insert(method_instance, lloncefn); @@ -539,9 +538,31 @@ fn trans_fn_pointer_shim<'a, 'tcx>( data: Fn(llfnpointer), ty: bare_fn_ty }; - callee.call(&bcx, &llargs[(self_idx + 1)..], fcx.llretslotptr, None); - fcx.build_return_block(&bcx); + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); + + let mut args = Vec::new(); + + if fn_ty.ret.is_indirect() { + if !fn_ty.ret.is_ignore() { + args.push(get_param(fcx.llfn, 0)); + } + } + args.extend_from_slice(&llargs[(self_idx + 1)..]); + + let llret = bcx.call(llfnpointer, &args, None); + fn_ty.apply_attrs_callsite(llret); + + if fn_ret.0.is_never() { + bcx.unreachable(); + } + + if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); llfn diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 936ab0901123d..4778f36855465 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -201,7 +201,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // type, so we don't need to explicitly cast the function parameter. let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); - fcx.build_return_block(&bcx); + bcx.ret_void(); } fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>, diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index a09838f282b37..33b43e9a3ab06 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -85,8 +85,24 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let bcx = fcx.get_entry_block(); let llargs = get_params(fcx.llfn); - callee.call(&bcx, &llargs[fcx.fn_ty.ret.is_indirect() as usize..], fcx.llretslotptr, None); - fcx.build_return_block(&bcx); + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); + + let mut args = Vec::new(); + + args.extend_from_slice(&llargs); + let llret = bcx.call(callee.reify(ccx), &args, None); + fn_ty.apply_attrs_callsite(llret); + + if fn_ret.0.is_never() { + bcx.unreachable(); + } + + if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } llfn } From 1173db00628882300c2651e6e5d8eb6ddbcb5057 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 09:42:50 -0700 Subject: [PATCH 061/103] Inline last remaining use of Callee::call and delete unused code --- src/librustc_trans/basic_block.rs | 49 ---------- src/librustc_trans/callee.rs | 94 +----------------- src/librustc_trans/glue.rs | 15 ++- src/librustc_trans/lib.rs | 1 - src/librustc_trans/mir/mod.rs | 8 +- src/librustc_trans/value.rs | 156 +----------------------------- 6 files changed, 19 insertions(+), 304 deletions(-) delete mode 100644 src/librustc_trans/basic_block.rs diff --git a/src/librustc_trans/basic_block.rs b/src/librustc_trans/basic_block.rs deleted file mode 100644 index 50246a1c5b35a..0000000000000 --- a/src/librustc_trans/basic_block.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use llvm::BasicBlockRef; -use value::Value; - -#[derive(Copy, Clone)] -pub struct BasicBlock(pub BasicBlockRef); - -/// Wrapper for LLVM BasicBlockRef -impl BasicBlock { - pub fn get(&self) -> BasicBlockRef { - self.0 - } - - pub fn as_value(self) -> Value { - unsafe { - Value(llvm::LLVMBasicBlockAsValue(self.get())) - } - } - - pub fn pred_iter(self) -> impl Iterator { - self.as_value().user_iter() - .filter(|user| user.is_a_terminator_inst()) - .map(|user| user.get_parent().unwrap()) - } - - pub fn get_single_predecessor(self) -> Option { - let mut iter = self.pred_iter(); - match (iter.next(), iter.next()) { - (Some(first), None) => Some(first), - _ => None - } - } - - pub fn delete(self) { - unsafe { - llvm::LLVMDeleteBasicBlock(self.0); - } - } -} diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index e0ecd1d8bf57a..4dbfdc9d4999e 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -25,7 +25,7 @@ use attributes; use base; use base::*; use common::{ - self, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext + self, CrateContext, FunctionContext, SharedCrateContext }; use consts; use declare; @@ -178,26 +178,6 @@ impl<'tcx> Callee<'tcx> { fn_ty } - /// This behemoth of a function translates function calls. Unfortunately, in - /// order to generate more efficient LLVM output at -O0, it has quite a complex - /// signature (refactoring this into two functions seems like a good idea). - /// - /// In particular, for lang items, it is invoked with a dest of None, and in - /// that case the return value contains the result of the fn. The lang item must - /// not return a structural type or else all heck breaks loose. - /// - /// For non-lang items, `dest` is always Some, and hence the result is written - /// into memory somewhere. Nonetheless we return the actual return value of the - /// function. - pub fn call<'a>(self, - bcx: &BlockAndBuilder<'a, 'tcx>, - args: &[ValueRef], - dest: Option, - lpad: Option<&'a llvm::OperandBundleDef> - ) { - trans_call_inner(bcx, self, args, dest, lpad) - } - /// Turn the callee into a function pointer. pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { match self.data { @@ -666,75 +646,3 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, (llfn, fn_ty) } - -// ______________________________________________________________________ -// Translating calls - -fn trans_call_inner<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - callee: Callee<'tcx>, - args: &[ValueRef], - dest: Option, - lpad: Option<&'a llvm::OperandBundleDef> -) { - // Introduce a temporary cleanup scope that will contain cleanups - // for the arguments while they are being evaluated. The purpose - // this cleanup is to ensure that, should a panic occur while - // evaluating argument N, the values for arguments 0...N-1 are all - // cleaned up. If no panic occurs, the values are handed off to - // the callee, and hence none of the cleanups in this temporary - // scope will ever execute. - let ccx = bcx.ccx(); - let fn_ret = callee.ty.fn_ret(); - let fn_ty = callee.direct_fn_type(ccx, &[]); - - // If there no destination, return must be direct, with no cast. - if dest.is_none() { - assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); - } - - let mut llargs = Vec::new(); - - if fn_ty.ret.is_indirect() { - let dest = dest.unwrap(); - let llretslot = if let Some(ty) = fn_ty.ret.cast { - bcx.pointercast(dest, ty.ptr_to()) - } else { - dest - }; - llargs.push(llretslot); - } - - let llfn = match callee.data { - NamedTupleConstructor(_) | Intrinsic => { - bug!("{:?} calls should not go through Callee::call", callee); - } - Virtual(idx) => { - llargs.push(args[0]); - - let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx); - let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to(); - llargs.extend_from_slice(&args[2..]); - bcx.pointercast(fn_ptr, llty) - } - Fn(f) => { - llargs.extend_from_slice(args); - f - } - }; - - let llret = bcx.call(llfn, &llargs[..], lpad); - fn_ty.apply_attrs_callsite(llret); - - // If the function we just called does not use an outpointer, - // store the result into the Rust outpointer. - if !fn_ty.ret.is_indirect() { - if let Some(llretslot) = dest { - fn_ty.ret.store(&bcx, llret, llretslot); - } - } - - if fn_ret.0.is_never() { - bcx.unreachable(); - } -} diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 4778f36855465..b1d5955c18d8e 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -44,7 +44,20 @@ pub fn trans_exchange_free_dyn<'a, 'tcx>( ) { let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align]; - Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).call(&bcx, &args, None, None) + let callee = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])); + + let ccx = bcx.ccx(); + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); + + assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); + + let llret = bcx.call(callee.reify(ccx), &args[..], None); + fn_ty.apply_attrs_callsite(llret); + + if fn_ret.0.is_never() { + bcx.unreachable(); + } } pub fn trans_exchange_free_ty<'a, 'tcx>( diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index bd8121e2b9c68..2fb0e8c24c540 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -96,7 +96,6 @@ mod asm; mod assert_module_sources; mod attributes; mod base; -mod basic_block; mod builder; mod cabi_aarch64; mod cabi_arm; diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 8a0e5e107a80c..496e14ff2d9d5 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -26,8 +26,6 @@ use syntax::symbol::keywords; use std::iter; -use basic_block::BasicBlock; - use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -296,12 +294,12 @@ pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir< // Remove blocks that haven't been visited, or have no // predecessors. for bb in mir.basic_blocks().indices() { - let block = mircx.blocks[bb]; - let block = BasicBlock(block); // Unreachable block if !visited.contains(bb.index()) { debug!("trans_mir: block {:?} was not visited", bb); - block.delete(); + unsafe { + llvm::LLVMDeleteBasicBlock(mircx.blocks[bb]); + } } } } diff --git a/src/librustc_trans/value.rs b/src/librustc_trans/value.rs index b314f3ea414f6..287ad87caacf9 100644 --- a/src/librustc_trans/value.rs +++ b/src/librustc_trans/value.rs @@ -9,16 +9,11 @@ // except according to those terms. use llvm; -use llvm::{UseRef, ValueRef}; -use basic_block::BasicBlock; -use common::BlockAndBuilder; use std::fmt; -use libc::c_uint; - #[derive(Copy, Clone, PartialEq)] -pub struct Value(pub ValueRef); +pub struct Value(pub llvm::ValueRef); impl fmt::Debug for Value { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -27,152 +22,3 @@ impl fmt::Debug for Value { }).expect("nun-UTF8 value description from LLVM")) } } - -macro_rules! opt_val { ($e:expr) => ( - unsafe { - match $e { - p if !p.is_null() => Some(Value(p)), - _ => None - } - } -) } - -/// Wrapper for LLVM ValueRef -impl Value { - /// Returns the native ValueRef - pub fn get(&self) -> ValueRef { - let Value(v) = *self; v - } - - /// Returns the BasicBlock that contains this value - pub fn get_parent(self) -> Option { - unsafe { - match llvm::LLVMGetInstructionParent(self.get()) { - p if !p.is_null() => Some(BasicBlock(p)), - _ => None - } - } - } - - /// Removes this value from its containing BasicBlock - pub fn erase_from_parent(self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.get()); - } - } - - /// Returns the single dominating store to this value, if any - /// This only performs a search for a trivially dominating store. The store - /// must be the only user of this value, and there must not be any conditional - /// branches between the store and the given block. - pub fn get_dominating_store(self, bcx: &BlockAndBuilder) -> Option { - match self.get_single_user().and_then(|user| user.as_store_inst()) { - Some(store) => { - store.get_parent().and_then(|store_bb| { - let mut bb = BasicBlock(bcx.llbb()); - let mut ret = Some(store); - while bb.get() != store_bb.get() { - match bb.get_single_predecessor() { - Some(pred) => bb = pred, - None => { ret = None; break } - } - } - ret - }) - } - _ => None - } - } - - /// Returns the first use of this value, if any - pub fn get_first_use(self) -> Option { - unsafe { - match llvm::LLVMGetFirstUse(self.get()) { - u if !u.is_null() => Some(Use(u)), - _ => None - } - } - } - - /// Tests if there are no uses of this value - pub fn has_no_uses(self) -> bool { - self.get_first_use().is_none() - } - - /// Returns the single user of this value - /// If there are no users or multiple users, this returns None - pub fn get_single_user(self) -> Option { - let mut iter = self.user_iter(); - match (iter.next(), iter.next()) { - (Some(first), None) => Some(first), - _ => None - } - } - - /// Returns an iterator for the users of this value - pub fn user_iter(self) -> Users { - Users { - next: self.get_first_use() - } - } - - /// Returns the requested operand of this instruction - /// Returns None, if there's no operand at the given index - pub fn get_operand(self, i: usize) -> Option { - opt_val!(llvm::LLVMGetOperand(self.get(), i as c_uint)) - } - - /// Returns the Store represent by this value, if any - pub fn as_store_inst(self) -> Option { - opt_val!(llvm::LLVMIsAStoreInst(self.get())) - } - - /// Tests if this value is a terminator instruction - pub fn is_a_terminator_inst(self) -> bool { - unsafe { - !llvm::LLVMIsATerminatorInst(self.get()).is_null() - } - } -} - -/// Wrapper for LLVM UseRef -#[derive(Copy, Clone)] -pub struct Use(UseRef); - -impl Use { - pub fn get(&self) -> UseRef { - let Use(v) = *self; v - } - - pub fn get_user(self) -> Value { - unsafe { - Value(llvm::LLVMGetUser(self.get())) - } - } - - pub fn get_next_use(self) -> Option { - unsafe { - match llvm::LLVMGetNextUse(self.get()) { - u if !u.is_null() => Some(Use(u)), - _ => None - } - } - } -} - -/// Iterator for the users of a value -pub struct Users { - next: Option -} - -impl Iterator for Users { - type Item = Value; - - fn next(&mut self) -> Option { - let current = self.next; - - self.next = current.and_then(|u| u.get_next_use()); - - current.map(|u| u.get_user()) - } -} From a802b9f75fc7ac6985cb6b865fbc503584f139b6 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 09:46:11 -0700 Subject: [PATCH 062/103] Inline get_funclet --- src/librustc_trans/cleanup.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index ee02fe1d7f5cf..7ba36e457529e 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -71,16 +71,6 @@ impl UnwindKind { } } } - - fn get_funclet(&self, bcx: &BlockAndBuilder) -> Option { - match *self { - UnwindKind::CleanupPad(_) => { - let pad = bcx.cleanup_pad(None, &[]); - Funclet::msvc(pad) - }, - UnwindKind::LandingPad => Funclet::gnu(), - } - } } impl PartialEq for UnwindKind { @@ -212,7 +202,11 @@ impl<'tcx> CleanupScope<'tcx> { let mut cleanup = fcx.build_new_block("clean_custom_"); // Insert cleanup instructions into the cleanup block - drop_val.trans(val.get_funclet(&cleanup).as_ref(), &cleanup); + let funclet = match val { + UnwindKind::CleanupPad(_) => Funclet::msvc(cleanup.cleanup_pad(None, &[])), + UnwindKind::LandingPad => Funclet::gnu(), + }; + drop_val.trans(funclet.as_ref(), &cleanup); // Insert instruction into cleanup block to branch to the exit val.branch(&mut cleanup, resume_bcx.llbb()); From fc8c280fbc5eb7af126857a7fc18952f042be92c Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 10:18:47 -0700 Subject: [PATCH 063/103] Remove lifetime parameter --- src/librustc_trans/debuginfo/create_scope_map.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index 0f20ed285b60e..b1fdb07427425 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -44,10 +44,8 @@ impl MirDebugScope { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes<'tcx>( - fcx: &FunctionContext, - mir: &'tcx Mir<'tcx>, -) -> IndexVec { +pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir) + -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), file_start_pos: BytePos(0), From 2b9a0efef470fd061c70592892f6e40844415344 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 11:08:57 -0700 Subject: [PATCH 064/103] Move debug_context to MirContext from FunctionContext --- src/librustc_trans/base.rs | 2 +- src/librustc_trans/common.rs | 37 ++------------ .../debuginfo/create_scope_map.rs | 4 +- src/librustc_trans/debuginfo/mod.rs | 5 +- src/librustc_trans/debuginfo/source_loc.rs | 19 +++---- src/librustc_trans/mir/block.rs | 7 +-- src/librustc_trans/mir/mod.rs | 51 +++++++++++++++---- src/librustc_trans/mir/statement.rs | 3 +- 8 files changed, 66 insertions(+), 62 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index f9b8b18a5ec2b..bd0b4bafe6247 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -602,7 +602,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fcx = FunctionContext::new(ccx, lldecl, fn_ty, Some((instance, &sig, abi)), true); let mir = ccx.tcx().item_mir(instance.def); - mir::trans_mir(&fcx, &mir); + mir::trans_mir(&fcx, &mir, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 123e0609f0693..899a3bdb20a90 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -16,7 +16,6 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef, get_param}; -use llvm::debuginfo::DIScope; use monomorphize::Instance; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; @@ -30,7 +29,6 @@ use base; use builder::Builder; use callee::Callee; use consts; -use debuginfo; use declare; use machine; use monomorphize; @@ -267,9 +265,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, - // Used and maintained by the debuginfo module. - pub debug_context: debuginfo::FunctionDebugContext, - alloca_builder: Builder<'a, 'tcx>, } @@ -283,33 +278,12 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, skip_retptr: bool, ) -> FunctionContext<'a, 'tcx> { - let (param_substs, def_id) = match definition { + let param_substs = match definition { Some((instance, ..)) => { assert!(!instance.substs.needs_infer()); - (instance.substs, Some(instance.def)) + instance.substs } - None => (ccx.tcx().intern_substs(&[]), None) - }; - - let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); - - debug!("FunctionContext::new({})", definition.map_or(String::new(), |d| d.0.to_string())); - - let no_debug = if let Some(id) = local_id { - ccx.tcx().map.attrs(id).iter().any(|item| item.check_name("no_debug")) - } else if let Some(def_id) = def_id { - ccx.sess().cstore.item_attrs(def_id).iter().any(|item| item.check_name("no_debug")) - } else { - false - }; - - let mir = def_id.map(|id| ccx.tcx().item_mir(id)); - - let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) = - (no_debug, definition, &mir) { - debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir) - } else { - debuginfo::empty_function_debug_context(ccx) + None => ccx.tcx().intern_substs(&[]) }; let mut fcx = FunctionContext { @@ -320,7 +294,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { fn_ty: fn_ty, param_substs: param_substs, ccx: ccx, - debug_context: debug_context, alloca_builder: Builder::with_ccx(ccx), }; @@ -507,10 +480,6 @@ impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> { } } - pub fn set_source_location(&self, scope: DIScope, sp: Span) { - debuginfo::set_source_location(self.fcx(), self, scope, sp) - } - pub fn at_start(&self, f: F) -> R where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R { diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index b1fdb07427425..f5a8eeacf38ad 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -44,7 +44,7 @@ impl MirDebugScope { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir) +pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext) -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), @@ -53,7 +53,7 @@ pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir) }; let mut scopes = IndexVec::from_elem(null_scope, &mir.visibility_scopes); - let fn_metadata = match fcx.debug_context { + let fn_metadata = match *debug_context { FunctionDebugContext::RegularContext(ref data) => data.fn_metadata, FunctionDebugContext::DebugInfoDisabled | FunctionDebugContext::FunctionWithoutDebugInfo => { diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index c130c4bc99399..b22bb080d0529 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -28,6 +28,7 @@ use rustc::ty::subst::Substs; use abi::Abi; use common::{CrateContext, BlockAndBuilder}; +use mir::MirContext; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -225,6 +226,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // This can be the case for functions inlined from another crate if span == syntax_pos::DUMMY_SP { + // FIXME(simulacrum): Probably can't happen; remove. return FunctionDebugContext::FunctionWithoutDebugInfo; } @@ -432,6 +434,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + mir: &MirContext, variable_name: ast::Name, variable_type: Ty<'tcx>, scope_metadata: DIScope, @@ -493,7 +496,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, match variable_kind { ArgumentVariable(_) | CapturedVariable => { - assert!(!bcx.fcx().debug_context.get_ref(span).source_locations_enabled.get()); + assert!(!mir.debug_context.get_ref(span).source_locations_enabled.get()); source_loc::set_debug_location(cx, bcx, UnknownLocation); } _ => { /* nothing to do */ } diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index 2a168e342d05e..16b32f2e3d635 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -17,7 +17,8 @@ use super::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; -use common::{CrateContext, FunctionContext}; +use common::CrateContext; +use mir::MirContext; use libc::c_uint; use std::ptr; @@ -26,24 +27,24 @@ use syntax_pos::{Span, Pos}; /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). -pub fn set_source_location(fcx: &FunctionContext, builder: &Builder, scope: DIScope, span: Span) { - let function_debug_context = match fcx.debug_context { +pub fn set_source_location(mir: &MirContext, builder: &Builder, scope: DIScope, span: Span) { + let function_debug_context = match mir.debug_context { FunctionDebugContext::DebugInfoDisabled => return, FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, builder, UnknownLocation); + set_debug_location(mir.ccx(), builder, UnknownLocation); return; } FunctionDebugContext::RegularContext(ref data) => data }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", fcx.ccx.sess().codemap().span_to_string(span)); - let loc = span_start(fcx.ccx, span); + debug!("set_source_location: {}", mir.ccx().sess().codemap().span_to_string(span)); + let loc = span_start(mir.ccx(), span); InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) } else { UnknownLocation }; - set_debug_location(fcx.ccx, builder, dbg_loc); + set_debug_location(mir.ccx(), builder, dbg_loc); } /// Enables emitting source locations for the given functions. @@ -52,8 +53,8 @@ pub fn set_source_location(fcx: &FunctionContext, builder: &Builder, scope: DISc /// they are disabled when beginning to translate a new function. This functions /// switches source location emitting on and must therefore be called before the /// first real statement/expression of the function is translated. -pub fn start_emitting_source_locations(fcx: &FunctionContext) { - match fcx.debug_context { +pub fn start_emitting_source_locations(mir: &MirContext) { + match mir.debug_context { FunctionDebugContext::RegularContext(ref data) => { data.source_locations_enabled.set(true) }, diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 99b598baf8412..83a45b7a41e3e 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -20,6 +20,7 @@ use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use common::{self, BlockAndBuilder, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; +use debuginfo; use Disr; use machine::{llalign_of_min, llbitsize_of_real}; use meth; @@ -114,7 +115,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let span = terminator.source_info.span; let (scope, debug_span) = self.debug_loc(terminator.source_info); - bcx.set_source_location(scope, debug_span); + debuginfo::set_source_location(self, &bcx, scope, debug_span); match terminator.kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { @@ -326,7 +327,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // After this point, bcx is the block for the call to panic. bcx = panic_block; - bcx.set_source_location(scope, debug_span); + debuginfo::set_source_location(self, &bcx, scope, debug_span); // Get the location information. let loc = bcx.sess().codemap().lookup_char_pos(span.lo); @@ -642,7 +643,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some((_, target)) = *destination { let ret_bcx = self.build_block(target); ret_bcx.at_start(|ret_bcx| { - bcx.set_source_location(scope, debug_span); + debuginfo::set_source_location(self, &ret_bcx, scope, debug_span); let op = OperandRef { val: Immediate(invokeret), ty: sig.output(), diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 496e14ff2d9d5..f1fe38d7b0c5f 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -18,11 +18,13 @@ use session::config::FullDebugInfo; use base; use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; +use monomorphize::Instance; use machine; use type_of; use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos, Span}; use syntax::symbol::keywords; +use syntax::abi::Abi; use std::iter; @@ -38,7 +40,9 @@ use self::operand::{OperandRef, OperandValue}; /// Master context for translating MIR. pub struct MirContext<'a, 'tcx:'a> { - mir: &'a mir::Mir<'tcx>, + pub mir: &'a mir::Mir<'tcx>, + + pub debug_context: debuginfo::FunctionDebugContext, /// Function context fcx: &'a common::FunctionContext<'a, 'tcx>, @@ -89,7 +93,7 @@ pub struct MirContext<'a, 'tcx:'a> { impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) { // Bail out if debug info emission is not enabled. - match self.fcx.debug_context { + match self.debug_context { FunctionDebugContext::DebugInfoDisabled | FunctionDebugContext::FunctionWithoutDebugInfo => { return (self.scopes[source_info.scope].scope_metadata, source_info.span); @@ -142,6 +146,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { scope_metadata } } + + pub fn ccx(&self) -> &'a CrateContext<'a, 'tcx> { + self.fcx.ccx + } } enum LocalRef<'tcx> { @@ -176,7 +184,26 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// -pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir<'tcx>) { +pub fn trans_mir<'a, 'tcx: 'a>( + fcx: &'a FunctionContext<'a, 'tcx>, + mir: &'a Mir<'tcx>, + instance: Instance<'tcx>, + sig: &ty::FnSig<'tcx>, + abi: Abi, +) { + let def_id = instance.def; + let local_id = fcx.ccx.tcx().map.as_local_node_id(def_id); + let no_debug = if let Some(id) = local_id { + fcx.ccx.tcx().map.attrs(id).iter().any(|item| item.check_name("no_debug")) + } else { + fcx.ccx.sess().cstore.item_attrs(def_id).iter().any(|item| item.check_name("no_debug")) + }; + + let debug_context = if !no_debug { + debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir) + } else { + debuginfo::empty_function_debug_context(fcx.ccx) + }; let bcx = fcx.get_entry_block(); // Analyze the temps to determine which must be lvalues @@ -195,7 +222,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir< }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(fcx, mir); + let scopes = debuginfo::create_mir_scopes(fcx, mir, &debug_context); let mut mircx = MirContext { mir: mir, @@ -207,11 +234,12 @@ pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir< landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), scopes: scopes, locals: IndexVec::new(), + debug_context: debug_context, }; // Allocate variable and temp allocas mircx.locals = { - let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals); + let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &lvalue_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; @@ -232,7 +260,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir< let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); if dbg { let (scope, span) = mircx.debug_loc(source_info); - declare_local(&bcx, name, ty, scope, + declare_local(&bcx, &mircx, name, ty, scope, VariableAccess::DirectVariable { alloca: lvalue.llval }, VariableKind::LocalVariable, span); } @@ -270,7 +298,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir< // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. - debuginfo::start_emitting_source_locations(fcx); + debuginfo::start_emitting_source_locations(&mircx); let mut visited = BitVector::new(mir.basic_blocks().len()); @@ -308,10 +336,11 @@ pub fn trans_mir<'a, 'tcx: 'a>(fcx: &'a FunctionContext<'a, 'tcx>, mir: &'a Mir< /// argument's value. As arguments are lvalues, these are always /// indirect. fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - mir: &mir::Mir<'tcx>, + mircx: &MirContext<'a, 'tcx>, scopes: &IndexVec, lvalue_locals: &BitVector) -> Vec> { + let mir = mircx.mir; let fcx = bcx.fcx(); let tcx = bcx.tcx(); let mut idx = 0; @@ -363,7 +392,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let variable_access = VariableAccess::DirectVariable { alloca: lltemp }; - declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), + declare_local(bcx, mircx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, variable_access, VariableKind::ArgumentVariable(arg_index + 1), DUMMY_SP); @@ -435,7 +464,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, arg_scope.map(|scope| { // Is this a regular argument? if arg_index > 0 || mir.upvar_decls.is_empty() { - declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, + declare_local(bcx, mircx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, VariableAccess::DirectVariable { alloca: llval }, VariableKind::ArgumentVariable(arg_index + 1), DUMMY_SP); @@ -503,7 +532,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, alloca: env_ptr, address_operations: &ops }; - declare_local(bcx, decl.debug_name, ty, scope, variable_access, + declare_local(bcx, mircx, decl.debug_name, ty, scope, variable_access, VariableKind::CapturedVariable, DUMMY_SP); } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index ddbefe43ce4f0..62ee768ab0754 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -11,6 +11,7 @@ use rustc::mir; use base; +use debuginfo; use common::{self, BlockAndBuilder}; use super::MirContext; @@ -26,7 +27,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); let (scope, span) = self.debug_loc(statement.source_info); - bcx.set_source_location(scope, span); + debuginfo::set_source_location(self, &bcx, scope, span); match statement.kind { mir::StatementKind::Assign(ref lvalue, ref rvalue) => { if let mir::Lvalue::Local(index) = *lvalue { From a42a3429fe357fc5a455e96d65cda7465d0be860 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 11:50:07 -0700 Subject: [PATCH 065/103] Move param_env onto SharedCrateContext, and move functions which need the ParamEnv onto it. --- src/librustc_trans/abi.rs | 6 ++-- src/librustc_trans/adt.rs | 5 ++-- src/librustc_trans/base.rs | 13 ++++---- src/librustc_trans/cleanup.rs | 4 +-- src/librustc_trans/collector.rs | 30 +++++++++---------- src/librustc_trans/common.rs | 26 ++++------------ src/librustc_trans/context.rs | 11 +++++++ src/librustc_trans/glue.rs | 48 +++++++++++++----------------- src/librustc_trans/intrinsic.rs | 10 +++---- src/librustc_trans/mir/analyze.rs | 5 ++-- src/librustc_trans/mir/block.rs | 16 +++++----- src/librustc_trans/mir/constant.rs | 16 +++++----- src/librustc_trans/mir/lvalue.rs | 2 +- src/librustc_trans/mir/mod.rs | 6 ++-- src/librustc_trans/mir/operand.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 12 ++++---- src/librustc_trans/trans_item.rs | 2 +- src/librustc_trans/type_of.rs | 12 ++++---- 18 files changed, 107 insertions(+), 119 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 681bad1461ce6..0f876eadd73c0 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -420,7 +420,7 @@ impl FnType { let ret_ty = sig.output(); let mut ret = arg_of(ret_ty, true); - if !type_is_fat_ptr(ccx.tcx(), ret_ty) { + if !type_is_fat_ptr(ccx, ret_ty) { // The `noalias` attribute on the return value is useful to a // function ptr caller. if let ty::TyBox(_) = ret_ty.sty { @@ -485,7 +485,7 @@ impl FnType { for ty in inputs.iter().chain(extra_args.iter()) { let mut arg = arg_of(ty, false); - if type_is_fat_ptr(ccx.tcx(), ty) { + if type_is_fat_ptr(ccx, ty) { let original_tys = arg.original_ty.field_types(); let sizing_tys = arg.ty.field_types(); assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2)); @@ -558,7 +558,7 @@ impl FnType { }; // Fat pointers are returned by-value. if !self.ret.is_ignore() { - if !type_is_fat_ptr(ccx.tcx(), sig.output()) { + if !type_is_fat_ptr(ccx, sig.output()) { fixup(&mut self.ret); } } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 4a06982cd379d..a59e2fe506f48 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -293,7 +293,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> sizing: bool, dst: bool) -> Vec { let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]); if sizing { - fields.filter(|ty| !dst || type_is_sized(cx.tcx(), *ty)) + fields.filter(|ty| !dst || cx.shared().type_is_sized(*ty)) .map(|ty| type_of::sizing_type_of(cx, ty)).collect() } else { fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect() @@ -586,7 +586,8 @@ fn struct_field_ptr<'a, 'tcx>( // * First field - Always aligned properly // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already - if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || type_is_sized(bcx.tcx(), fty) { + if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || + bcx.ccx().shared().type_is_sized(fty) { return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index bd0b4bafe6247..35507c6e6ab7c 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -232,7 +232,7 @@ pub fn unsize_thin_ptr<'a, 'tcx>( &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(common::type_is_sized(bcx.tcx(), a)); + assert!(bcx.ccx().shared().type_is_sized(a)); let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx(), a, b, None)) } @@ -252,7 +252,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, (&ty::TyRef(..), &ty::TyRef(..)) | (&ty::TyRef(..), &ty::TyRawPtr(..)) | (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => { - let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) { + let (base, info) = if common::type_is_fat_ptr(bcx.ccx(), src_ty) { // fat-ptr to fat-ptr unsize preserves the vtable // i.e. &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure @@ -412,8 +412,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V // a char is a Unicode codepoint, and so takes values from 0 // to 0x10FFFF inclusive only. b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False) - } else if (t.is_region_ptr() || t.is_unique()) && - !common::type_is_fat_ptr(ccx.tcx(), t) { + } else if (t.is_region_ptr() || t.is_unique()) && !common::type_is_fat_ptr(ccx, t) { b.load_nonnull(ptr) } else { b.load(ptr) @@ -425,7 +424,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); - if common::type_is_fat_ptr(cx.tcx(), t) { + if common::type_is_fat_ptr(cx.ccx(), t) { let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR); let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA); store_fat_ptr(cx, lladdr, llextra, dst, t); @@ -538,7 +537,7 @@ pub fn memcpy_ty<'a, 'tcx>( let llsz = llsize_of(ccx, llty); let llalign = type_of::align_of(ccx, t); call_memcpy(bcx, dst, src, llsz, llalign as u32); - } else if common::type_is_fat_ptr(bcx.tcx(), t) { + } else if common::type_is_fat_ptr(bcx.ccx(), t) { let (data, extra) = load_fat_ptr(bcx, src, t); store_fat_ptr(bcx, data, extra, dst, t); } else { @@ -639,7 +638,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); let arg = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; - if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { + if common::type_is_fat_ptr(bcx.ccx(), arg_ty) { let meta = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr)); diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 7ba36e457529e..7b655bbd60675 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -86,7 +86,7 @@ impl PartialEq for UnwindKind { impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { - if !self.type_needs_drop(ty) { return CleanupScope::noop(); } + if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { val: val, ty: ty, @@ -106,7 +106,7 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !self.type_needs_drop(ty) { return CleanupScope::noop(); } + if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { val: val, diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index 3af3ada66b3e9..d8c212745376d 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -208,7 +208,7 @@ use syntax::abi::Abi; use syntax_pos::DUMMY_SP; use base::custom_coerce_unsize_info; use context::SharedCrateContext; -use common::{fulfill_obligation, type_is_sized}; +use common::fulfill_obligation; use glue::{self, DropGlueKind}; use monomorphize::{self, Instance}; use util::nodemap::{FxHashSet, FxHashMap, DefIdMap}; @@ -337,7 +337,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>, TransItem::Static(node_id) => { let def_id = scx.tcx().map.local_def_id(node_id); let ty = scx.tcx().item_type(def_id); - let ty = glue::get_drop_glue_type(scx.tcx(), ty); + let ty = glue::get_drop_glue_type(scx, ty); neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); recursion_depth_reset = None; @@ -542,7 +542,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { self.param_substs, &ty); assert!(ty.is_normalized_for_trans()); - let ty = glue::get_drop_glue_type(self.scx.tcx(), ty); + let ty = glue::get_drop_glue_type(self.scx, ty); self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); } @@ -678,7 +678,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { let operand_ty = monomorphize::apply_param_substs(self.scx, self.param_substs, &mt.ty); - let ty = glue::get_drop_glue_type(tcx, operand_ty); + let ty = glue::get_drop_glue_type(self.scx, operand_ty); self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); } else { bug!("Has the drop_in_place() intrinsic's signature changed?") @@ -804,17 +804,17 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, let field_type = monomorphize::apply_param_substs(scx, substs, &field_type); - let field_type = glue::get_drop_glue_type(scx.tcx(), field_type); + let field_type = glue::get_drop_glue_type(scx, field_type); - if glue::type_needs_drop(scx.tcx(), field_type) { + if scx.type_needs_drop(field_type) { output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type))); } } } ty::TyClosure(def_id, substs) => { for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) { - let upvar_ty = glue::get_drop_glue_type(scx.tcx(), upvar_ty); - if glue::type_needs_drop(scx.tcx(), upvar_ty) { + let upvar_ty = glue::get_drop_glue_type(scx, upvar_ty); + if scx.type_needs_drop(upvar_ty) { output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty))); } } @@ -822,15 +822,15 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, ty::TyBox(inner_type) | ty::TySlice(inner_type) | ty::TyArray(inner_type, _) => { - let inner_type = glue::get_drop_glue_type(scx.tcx(), inner_type); - if glue::type_needs_drop(scx.tcx(), inner_type) { + let inner_type = glue::get_drop_glue_type(scx, inner_type); + if scx.type_needs_drop(inner_type) { output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type))); } } ty::TyTuple(args) => { for arg in args { - let arg = glue::get_drop_glue_type(scx.tcx(), arg); - if glue::type_needs_drop(scx.tcx(), arg) { + let arg = glue::get_drop_glue_type(scx, arg); + if scx.type_needs_drop(arg) { output.push(TransItem::DropGlue(DropGlueKind::Ty(arg))); } } @@ -969,7 +969,7 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { let (inner_source, inner_target) = (a, b); - if !type_is_sized(scx.tcx(), inner_source) { + if !scx.type_is_sized(inner_source) { (inner_source, inner_target) } else { scx.tcx().struct_lockstep_tails(inner_source, inner_target) @@ -1051,7 +1051,7 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a, output.extend(methods); } // Also add the destructor - let dg_type = glue::get_drop_glue_type(scx.tcx(), impl_ty); + let dg_type = glue::get_drop_glue_type(scx, impl_ty); output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type))); } } @@ -1097,7 +1097,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { def_id_to_string(self.scx.tcx(), def_id)); let ty = self.scx.tcx().item_type(def_id); - let ty = glue::get_drop_glue_type(self.scx.tcx(), ty); + let ty = glue::get_drop_glue_type(self.scx, ty); self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 899a3bdb20a90..528ecf2a42665 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -48,21 +48,16 @@ use std::ffi::CString; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; -use syntax_pos::{DUMMY_SP, Span}; +use syntax_pos::Span; pub use context::{CrateContext, SharedCrateContext}; -/// Is the type's representation size known at compile time? -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP) -} - -pub fn type_is_fat_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { +pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::TyRawPtr(ty::TypeAndMut{ty, ..}) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyBox(ty) => { - !type_is_sized(tcx, ty) + !ccx.shared().type_is_sized(ty) } _ => { false @@ -74,14 +69,13 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - use machine::llsize_of_alloc; use type_of::sizing_type_of; - let tcx = ccx.tcx(); let simple = ty.is_scalar() || ty.is_unique() || ty.is_region_ptr() || ty.is_simd(); - if simple && !type_is_fat_ptr(tcx, ty) { + if simple && !type_is_fat_ptr(ccx, ty) { return true; } - if !type_is_sized(tcx, ty) { + if !ccx.shared().type_is_sized(ty) { return false; } match ty.sty { @@ -239,9 +233,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // section of the executable we're generating. pub llfn: ValueRef, - // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv - param_env: ty::ParameterEnvironment<'tcx>, - // A pointer to where to store the return value. If the return type is // immediate, this points to an alloca in the function. Otherwise, it's a // pointer to the hidden first parameter of the function. After function @@ -289,7 +280,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { let mut fcx = FunctionContext { llfn: llfndecl, llretslotptr: None, - param_env: ccx.tcx().empty_parameter_environment(), alloca_insert_pt: None, fn_ty: fn_ty, param_substs: param_substs, @@ -358,12 +348,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { value) } - /// This is the same as `common::type_needs_drop`, except that it - /// may use or update caches within this `FunctionContext`. - pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env) - } - pub fn eh_personality(&self) -> ValueRef { // The exception handling personality function. // diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 9578182b0c185..d9fc21dd79f65 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -40,6 +40,7 @@ use std::rc::Rc; use std::str; use syntax::ast; use syntax::symbol::InternedString; +use syntax_pos::DUMMY_SP; use abi::FnType; pub struct Stats { @@ -67,6 +68,7 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { exported_symbols: NodeSet, link_meta: LinkMeta, tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParameterEnvironment<'tcx>, stats: Stats, check_overflow: bool, @@ -454,6 +456,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { export_map: export_map, exported_symbols: exported_symbols, link_meta: link_meta, + param_env: tcx.empty_parameter_environment(), tcx: tcx, stats: Stats { n_glues_created: Cell::new(0), @@ -474,6 +477,14 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { } } + pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + self.tcx.type_needs_drop_given_env(ty, &self.param_env) + } + + pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + ty.is_sized(self.tcx, &self.param_env, DUMMY_SP) + } + pub fn metadata_llmod(&self) -> ModuleRef { self.metadata_llmod } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index b1d5955c18d8e..1176de4010a17 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -19,7 +19,7 @@ use llvm::{ValueRef, get_param}; use middle::lang_items::ExchangeFreeFnLangItem; use rustc::ty::subst::{Substs}; use rustc::traits; -use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, AdtKind, Ty, TypeFoldable}; use adt; use base::*; use callee::Callee; @@ -63,7 +63,7 @@ pub fn trans_exchange_free_dyn<'a, 'tcx>( pub fn trans_exchange_free_ty<'a, 'tcx>( bcx: &BlockAndBuilder<'a, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx> ) { - assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); + assert!(bcx.ccx().shared().type_is_sized(content_ty)); let sizing_type = sizing_type_of(bcx.ccx(), content_ty); let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); @@ -75,18 +75,14 @@ pub fn trans_exchange_free_ty<'a, 'tcx>( } } -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment()) -} - -pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { +pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { assert!(t.is_normalized_for_trans()); - let t = tcx.erase_regions(&t); + let t = scx.tcx().erase_regions(&t); // Even if there is no dtor for t, there might be one deeper down and we // might need to pass in the vtable ptr. - if !type_is_sized(tcx, t) { + if !scx.type_is_sized(t) { return t; } @@ -99,17 +95,16 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> // returned `tcx.types.i8` does not appear unsound. The impact on // code quality is unknown at this time.) - if !type_needs_drop(tcx, t) { - return tcx.types.i8; + if !scx.type_needs_drop(t) { + return scx.tcx().types.i8; } match t.sty { - ty::TyBox(typ) if !type_needs_drop(tcx, typ) - && type_is_sized(tcx, typ) => { - tcx.infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { + ty::TyBox(typ) if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) => { + scx.tcx().infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { let layout = t.layout(&infcx).unwrap(); - if layout.size(&tcx.data_layout).bytes() == 0 { + if layout.size(&scx.tcx().data_layout).bytes() == 0 { // `Box` does not allocate. - tcx.types.i8 + scx.tcx().types.i8 } else { t } @@ -132,7 +127,7 @@ pub fn call_drop_glue<'a, 'tcx>( ) { // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); - if bcx.fcx().type_needs_drop(t) { + if bcx.ccx().shared().type_needs_drop(t) { let ccx = bcx.ccx(); let g = if skip_dtor { DropGlueKind::TyContents(t) @@ -140,7 +135,7 @@ pub fn call_drop_glue<'a, 'tcx>( DropGlueKind::Ty(t) }; let glue = get_drop_glue_core(ccx, g); - let glue_type = get_drop_glue_type(ccx.tcx(), t); + let glue_type = get_drop_glue_type(ccx.shared(), t); let ptr = if glue_type != t { bcx.pointercast(v, type_of(ccx, glue_type).ptr_to()) } else { @@ -184,7 +179,7 @@ impl<'tcx> DropGlueKind<'tcx> { } fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef { - let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t)); + let g = g.map_ty(|t| get_drop_glue_type(ccx.shared(), t)); match ccx.drop_glues().borrow().get(&g) { Some(&(glue, _)) => glue, None => { @@ -197,8 +192,7 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'t } pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) { - let tcx = ccx.tcx(); - assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty())); + assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); @@ -243,7 +237,7 @@ fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>, }; let (sized_args, unsized_args); - let args: &[ValueRef] = if type_is_sized(tcx, t) { + let args: &[ValueRef] = if bcx.ccx().shared().type_is_sized(t) { sized_args = [v0]; &sized_args } else { @@ -284,7 +278,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); - if type_is_sized(bcx.tcx(), t) { + if bcx.ccx().shared().type_is_sized(t) { let sizing_type = sizing_type_of(bcx.ccx(), t); let size = llsize_of_alloc(bcx.ccx(), sizing_type); let align = align_of(bcx.ccx(), t); @@ -405,7 +399,7 @@ fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, // special. It may move to library and have Drop impl. As // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); - if !type_is_sized(bcx.tcx(), content_ty) { + if !bcx.ccx().shared().type_is_sized(content_ty) { let llval = get_dataptr(&bcx, v0); let llbox = bcx.load(llval); drop_ty(&bcx, v0, content_ty); @@ -458,7 +452,7 @@ fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, bcx } _ => { - if bcx.fcx().type_needs_drop(t) { + if bcx.ccx().shared().type_needs_drop(t) { drop_structural_ty(bcx, v0, t) } else { bcx @@ -485,7 +479,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, } } - let value = if type_is_sized(cx.tcx(), t) { + let value = if cx.ccx().shared().type_is_sized(t) { adt::MaybeSizedValue::sized(av) } else { // FIXME(#36457) -- we should pass unsized values as two arguments @@ -525,7 +519,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, for (i, &Field(_, field_ty)) in fields.iter().enumerate() { let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i); - let val = if type_is_sized(cx.tcx(), field_ty) { + let val = if cx.ccx().shared().type_is_sized(field_ty) { llfld_a } else { // FIXME(#36457) -- we should pass unsized values as two arguments diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index caa4e75f0b02a..fd5048f8c5938 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -143,7 +143,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } "size_of_val" => { let tp_ty = substs.type_at(0); - if !type_is_sized(tcx, tp_ty) { + if !bcx.ccx().shared().type_is_sized(tp_ty) { let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llsize @@ -158,7 +158,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } "min_align_of_val" => { let tp_ty = substs.type_at(0); - if !type_is_sized(tcx, tp_ty) { + if !bcx.ccx().shared().type_is_sized(tp_ty) { let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign @@ -197,7 +197,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, "needs_drop" => { let tp_ty = substs.type_at(0); - C_bool(ccx, bcx.fcx().type_needs_drop(tp_ty)) + C_bool(ccx, bcx.ccx().shared().type_needs_drop(tp_ty)) } "offset" => { let ptr = llargs[0]; @@ -243,7 +243,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, }, "volatile_store" => { let tp_ty = substs.type_at(0); - if type_is_fat_ptr(bcx.tcx(), tp_ty) { + if type_is_fat_ptr(bcx.ccx(), tp_ty) { bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0])); bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0])); } else { @@ -551,7 +551,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bcx.fcx().type_needs_drop(arg_type)); + assert!(!bcx.ccx().shared().type_needs_drop(arg_type)); let arg = adt::MaybeSizedValue::sized(llarg); (0..contents.len()) .map(|i| { diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 21b021cacc985..38e21bdefb2c8 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -17,7 +17,6 @@ use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; use common::{self, BlockAndBuilder}; -use glue; use super::rvalue; pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mir: &mir::Mir<'tcx>) -> BitVector { @@ -37,7 +36,7 @@ pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mir: &mir::Mir<' // These sorts of types are immediates that we can store // in an ValueRef without an alloca. assert!(common::type_is_immediate(bcx.ccx(), ty) || - common::type_is_fat_ptr(bcx.tcx(), ty)); + common::type_is_fat_ptr(bcx.ccx(), ty)); } else if common::type_is_imm_pair(bcx.ccx(), ty) { // We allow pairs and uses of any of their 2 fields. } else { @@ -172,7 +171,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { let ty = self.bcx.fcx().monomorphize(&ty.to_ty(self.bcx.tcx())); // Only need the lvalue if we're actually dropping it. - if glue::type_needs_drop(self.bcx.tcx(), ty) { + if self.bcx.ccx().shared().type_needs_drop(ty) { self.mark_as_lvalue(index); } } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 83a45b7a41e3e..2ccf92a743eac 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -245,15 +245,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ty = bcx.fcx().monomorphize(&ty); // Double check for necessity to drop - if !glue::type_needs_drop(bcx.tcx(), ty) { + if !bcx.ccx().shared().type_needs_drop(ty) { funclet_br(self, bcx, target); return; } let lvalue = self.trans_lvalue(&bcx, location); let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); - let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty); - let is_sized = common::type_is_sized(bcx.tcx(), ty); + let drop_ty = glue::get_drop_glue_type(bcx.ccx().shared(), ty); + let is_sized = bcx.ccx().shared().type_is_sized(ty); let llvalue = if is_sized { if drop_ty != ty { bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) @@ -461,7 +461,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; // Double check for necessity to drop - if !glue::type_needs_drop(bcx.tcx(), ty) { + if !bcx.ccx().shared().type_needs_drop(ty) { funclet_br(self, bcx, target); return; } @@ -474,8 +474,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); - let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty); - let is_sized = common::type_is_sized(bcx.tcx(), ty); + let drop_ty = glue::get_drop_glue_type(bcx.ccx().shared(), ty); + let is_sized = bcx.ccx().shared().type_is_sized(ty); let llvalue = if is_sized { if drop_ty != ty { bcx.pointercast(llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) @@ -678,7 +678,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { callee: &mut CalleeData) { if let Pair(a, b) = op.val { // Treat the values in a fat pointer separately. - if common::type_is_fat_ptr(bcx.tcx(), op.ty) { + if common::type_is_fat_ptr(bcx.ccx(), op.ty) { let (ptr, meta) = (a, b); if *next_idx == 0 { if let Virtual(idx) = *callee { @@ -766,7 +766,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n); - let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { + let val = if common::type_is_fat_ptr(bcx.ccx(), ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) } else { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 03ee6b136a4f0..9628ed254066c 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -25,7 +25,7 @@ use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use {abi, adt, base, Disr, machine}; use callee::Callee; -use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty, type_is_sized}; +use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; use common::{const_to_opt_int, const_to_opt_uint}; @@ -401,7 +401,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { .projection_ty(tcx, &projection.elem); let base = tr_base.to_const(span); let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx); - let is_sized = common::type_is_sized(tcx, projected_ty); + let is_sized = self.ccx.shared().type_is_sized(projected_ty); let (projected, llextra) = match projection.elem { mir::ProjectionElem::Deref => { @@ -598,11 +598,11 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::CastKind::Unsize => { // unsize targets other than to a fat pointer currently // can't be in constants. - assert!(common::type_is_fat_ptr(tcx, cast_ty)); + assert!(common::type_is_fat_ptr(self.ccx, cast_ty)); let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference) .expect("consts: unsizing got non-pointer type").ty; - let (base, old_info) = if !common::type_is_sized(tcx, pointee_ty) { + let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) { // Normally, the source is a thin pointer and we are // adding extra info to make a fat pointer. The exception // is when we are upcasting an existing object fat pointer @@ -685,9 +685,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::CastKind::Misc => { // Casts from a fat-ptr. let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty); let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty); - if common::type_is_fat_ptr(tcx, operand.ty) { + if common::type_is_fat_ptr(self.ccx, operand.ty) { let (data_ptr, meta_ptr) = operand.get_fat_ptr(); - if common::type_is_fat_ptr(tcx, cast_ty) { + if common::type_is_fat_ptr(self.ccx, cast_ty) { let ll_cft = ll_cast_ty.field_types(); let ll_fft = ll_from_ty.field_types(); let data_cast = consts::ptrcast(data_ptr, ll_cft[0]); @@ -716,7 +716,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let base = match tr_lvalue.base { Base::Value(llval) => { // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug) - let align = if type_is_sized(self.ccx.tcx(), ty) { + let align = if self.ccx.shared().type_is_sized(ty) { type_of::align_of(self.ccx, ty) } else { self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign @@ -731,7 +731,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { Base::Static(llval) => llval }; - let ptr = if common::type_is_sized(tcx, ty) { + let ptr = if self.ccx.shared().type_is_sized(ty) { base } else { C_struct(self.ccx, &[base, tr_lvalue.llextra], false) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5bfe614f45e7f..1582dc9a6aa1e 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -140,7 +140,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, }; let discr = discr as u64; - let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx)); + let is_sized = self.ccx().shared().type_is_sized(projected_ty.to_ty(tcx)); let base = if is_sized { adt::MaybeSizedValue::sized(tr_base.llval) } else { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index f1fe38d7b0c5f..6fbbaa7bc7661 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -374,7 +374,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let dst = bcx.struct_gep(lltemp, i); let arg = &fcx.fn_ty.args[idx]; idx += 1; - if common::type_is_fat_ptr(tcx, tupled_arg_ty) { + if common::type_is_fat_ptr(bcx.ccx(), tupled_arg_ty) { // We pass fat pointers as two words, but inside the tuple // they are the two sub-fields of a single aggregate field. let meta = &fcx.fn_ty.args[idx]; @@ -429,7 +429,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); llarg_idx += 1; - let val = if common::type_is_fat_ptr(tcx, arg_ty) { + let val = if common::type_is_fat_ptr(bcx.ccx(), arg_ty) { let meta = &fcx.fn_ty.args[idx]; idx += 1; assert_eq!((meta.cast, meta.pad), (None, None)); @@ -446,7 +446,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); - if common::type_is_fat_ptr(tcx, arg_ty) { + if common::type_is_fat_ptr(bcx.ccx(), arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, // so make an alloca to store them in. diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index c89eb9899d11c..20364d6320c65 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -143,7 +143,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { { debug!("trans_load: {:?} @ {:?}", Value(llval), ty); - let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { + let val = if common::type_is_fat_ptr(bcx.ccx(), ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty); OperandValue::Pair(lldata, llextra) } else if common::type_is_imm_pair(bcx.ccx(), ty) { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 4a532924694c0..d7a4adb1dd4d1 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -55,7 +55,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { let cast_ty = bcx.fcx().monomorphize(&cast_ty); - if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { + if common::type_is_fat_ptr(bcx.ccx(), cast_ty) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); @@ -208,7 +208,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::CastKind::Unsize => { // unsize targets other than to a fat pointer currently // can't be operands. - assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty)); + assert!(common::type_is_fat_ptr(bcx.ccx(), cast_ty)); match operand.val { OperandValue::Pair(lldata, llextra) => { @@ -234,11 +234,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } } - mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => { + mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx(), operand.ty) => { let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty); let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty); if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val { - if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { + if common::type_is_fat_ptr(bcx.ccx(), cast_ty) { let ll_cft = ll_cast_ty.field_types(); let ll_fft = ll_from_ty.field_types(); let data_cast = bcx.pointercast(data_ptr, ll_cft[0]); @@ -358,7 +358,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Note: lvalues are indirect, so storing the `llval` into the // destination effectively creates a reference. - let operand = if common::type_is_sized(bcx.tcx(), ty) { + let operand = if bcx.ccx().shared().type_is_sized(ty) { OperandRef { val: OperandValue::Immediate(tr_lvalue.llval), ty: ref_ty, @@ -385,7 +385,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); - let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) { + let llresult = if common::type_is_fat_ptr(bcx.ccx(), lhs.ty) { match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index 214eaeb817f30..527bee832956a 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -184,7 +184,7 @@ impl<'a, 'tcx> TransItem<'tcx> { linkage: llvm::Linkage, symbol_name: &str) { let tcx = ccx.tcx(); - assert_eq!(dg.ty(), glue::get_drop_glue_type(tcx, dg.ty())); + assert_eq!(dg.ty(), glue::get_drop_glue_type(ccx.shared(), dg.ty())); let t = dg.ty(); let sig = tcx.mk_fn_sig(iter::once(tcx.mk_mut_ptr(tcx.types.i8)), tcx.mk_nil(), false); diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index b38cd86e4bcde..469214b466e1a 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -39,7 +39,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ let _recursion_lock = cx.enter_type_of(t); let llsizingty = match t.sty { - _ if !type_is_sized(cx.tcx(), t) => { + _ if !cx.shared().type_is_sized(t) => { Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false) } @@ -53,7 +53,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - if type_is_sized(cx.tcx(), ty) { + if cx.shared().type_is_sized(ty) { Type::i8p(cx) } else { Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false) @@ -102,7 +102,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ // FIXME(eddyb) Temporary sanity check for ty::layout. let layout = cx.layout_of(t); - if !type_is_sized(cx.tcx(), t) { + if !cx.shared().type_is_sized(t) { if !layout.is_unsized() { bug!("layout should be unsized for type `{}` / {:#?}", t, layout); @@ -133,7 +133,7 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> match ty.sty { ty::TyBox(t) | ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | - ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !type_is_sized(ccx.tcx(), t) => { + ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !ccx.shared().type_is_sized(t) => { in_memory_type_of(ccx, t).ptr_to() } _ => bug!("expected fat ptr ty but got {:?}", ty) @@ -170,7 +170,7 @@ pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> /// is too large for it to be placed in SSA value (by our rules). /// For the raw type without far pointer indirection, see `in_memory_type_of`. pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let ty = if !type_is_sized(cx.tcx(), ty) { + let ty = if !cx.shared().type_is_sized(ty) { cx.tcx().mk_imm_ptr(ty) } else { ty @@ -230,7 +230,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - if !type_is_sized(cx.tcx(), ty) { + if !cx.shared().type_is_sized(ty) { if let ty::TyStr = ty.sty { // This means we get a nicer name in the output (str is always // unsized). From e10695f161fade16829f9fa96f48465b123c133e Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 16:05:40 -0700 Subject: [PATCH 066/103] Move param_substs onto MirContext --- src/librustc_trans/common.rs | 25 ------------------------- src/librustc_trans/mir/analyze.rs | 29 ++++++++++++++++------------- src/librustc_trans/mir/block.rs | 4 ++-- src/librustc_trans/mir/constant.rs | 7 +++---- src/librustc_trans/mir/lvalue.rs | 6 +++--- src/librustc_trans/mir/mod.rs | 25 ++++++++++++++++++++----- src/librustc_trans/mir/operand.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 6 +++--- 8 files changed, 48 insertions(+), 56 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 528ecf2a42665..9a6127746ff3c 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -20,10 +20,8 @@ use monomorphize::Instance; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; -use rustc::infer::TransNormalize; use rustc::util::common::MemoizationMap; use middle::lang_items::LangItem; -use rustc::ty::subst::Substs; use abi::{Abi, FnType}; use base; use builder::Builder; @@ -37,7 +35,6 @@ use value::Value; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::Layout; use rustc::traits::{self, SelectionContext, Reveal}; -use rustc::ty::fold::TypeFoldable; use rustc::hir; use libc::{c_uint, c_char}; @@ -249,10 +246,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // Describes the return/argument LLVM types and their ABI handling. pub fn_ty: FnType, - // If this function is being monomorphized, this contains the type - // substitutions used. - pub param_substs: &'tcx Substs<'tcx>, - // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, @@ -266,23 +259,13 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef, fn_ty: FnType, - definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, skip_retptr: bool, ) -> FunctionContext<'a, 'tcx> { - let param_substs = match definition { - Some((instance, ..)) => { - assert!(!instance.substs.needs_infer()); - instance.substs - } - None => ccx.tcx().intern_substs(&[]) - }; - let mut fcx = FunctionContext { llfn: llfndecl, llretslotptr: None, alloca_insert_pt: None, fn_ty: fn_ty, - param_substs: param_substs, ccx: ccx, alloca_builder: Builder::with_ccx(ccx), }; @@ -340,14 +323,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { BlockAndBuilder::new(self.new_block(name), self) } - pub fn monomorphize(&self, value: &T) -> T - where T: TransNormalize<'tcx> - { - monomorphize::apply_param_substs(self.ccx.shared(), - self.param_substs, - value) - } - pub fn eh_personality(&self) -> ValueRef { // The exception handling personality function. // diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 38e21bdefb2c8..71375f1160c3f 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -17,15 +17,18 @@ use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; use common::{self, BlockAndBuilder}; +use super::MirContext; use super::rvalue; -pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mir: &mir::Mir<'tcx>) -> BitVector { - let mut analyzer = LocalAnalyzer::new(mir, &bcx); +pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mircx: &MirContext<'a, 'tcx>) + -> BitVector { + let mir = mircx.mir; + let mut analyzer = LocalAnalyzer::new(mircx, &bcx); analyzer.visit_mir(mir); for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { - let ty = bcx.fcx().monomorphize(&ty); + let ty = mircx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); if ty.is_scalar() || ty.is_unique() || @@ -54,20 +57,20 @@ pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mir: &mir::Mir<' } struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> { - mir: &'mir mir::Mir<'tcx>, + mir: &'mir MirContext<'a, 'tcx>, bcx: &'mir BlockAndBuilder<'a, 'tcx>, lvalue_locals: BitVector, seen_assigned: BitVector } impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> { - fn new(mir: &'mir mir::Mir<'tcx>, bcx: &'mir BlockAndBuilder<'a, 'tcx>) + fn new(mircx: &'mir MirContext<'a, 'tcx>, bcx: &'mir BlockAndBuilder<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> { LocalAnalyzer { - mir: mir, + mir: mircx, bcx: bcx, - lvalue_locals: BitVector::new(mir.local_decls.len()), - seen_assigned: BitVector::new(mir.local_decls.len()) + lvalue_locals: BitVector::new(mircx.mir.local_decls.len()), + seen_assigned: BitVector::new(mircx.mir.local_decls.len()) } } @@ -93,7 +96,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { self.mark_assigned(index); - if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) { + if !rvalue::rvalue_creates_operand(self.mir.mir, self.bcx, rvalue) { self.mark_as_lvalue(index); } } else { @@ -136,9 +139,9 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { // Allow uses of projections of immediate pair fields. if let mir::Lvalue::Projection(ref proj) = *lvalue { if let mir::Lvalue::Local(_) = proj.base { - let ty = proj.base.ty(self.mir, self.bcx.tcx()); + let ty = proj.base.ty(self.mir.mir, self.bcx.tcx()); - let ty = self.bcx.fcx().monomorphize(&ty.to_ty(self.bcx.tcx())); + let ty = self.mir.monomorphize(&ty.to_ty(self.bcx.tcx())); if common::type_is_imm_pair(self.bcx.ccx(), ty) { if let mir::ProjectionElem::Field(..) = proj.elem { if let LvalueContext::Consume = context { @@ -167,8 +170,8 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { } LvalueContext::Drop => { - let ty = lvalue.ty(self.mir, self.bcx.tcx()); - let ty = self.bcx.fcx().monomorphize(&ty.to_ty(self.bcx.tcx())); + let ty = lvalue.ty(self.mir.mir, self.bcx.tcx()); + let ty = self.mir.monomorphize(&ty.to_ty(self.bcx.tcx())); // Only need the lvalue if we're actually dropping it. if self.bcx.ccx().shared().type_needs_drop(ty) { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 2ccf92a743eac..577c304d0f7aa 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -242,7 +242,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); - let ty = bcx.fcx().monomorphize(&ty); + let ty = self.monomorphize(&ty); // Double check for necessity to drop if !bcx.ccx().shared().type_needs_drop(ty) { @@ -522,7 +522,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let extra_args = &args[sig.inputs().len()..]; let extra_args = extra_args.iter().map(|op_arg| { let op_ty = op_arg.ty(&self.mir, bcx.tcx()); - bcx.fcx().monomorphize(&op_ty) + self.monomorphize(&op_ty) }).collect::>(); let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 9628ed254066c..56f88977c865d 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -952,7 +952,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { -> Const<'tcx> { debug!("trans_constant({:?})", constant); - let ty = bcx.fcx().monomorphize(&constant.ty); + let ty = self.monomorphize(&constant.ty); let result = match constant.literal.clone() { mir::Literal::Item { def_id, substs } => { // Shortcut for zero-sized types, including function item @@ -962,14 +962,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return Const::new(C_null(llty), ty); } - let substs = bcx.fcx().monomorphize(&substs); + let substs = self.monomorphize(&substs); let instance = Instance::new(def_id, substs); MirConstContext::trans_def(bcx.ccx(), instance, IndexVec::new()) } mir::Literal::Promoted { index } => { let mir = &self.mir.promoted[index]; - MirConstContext::new(bcx.ccx(), mir, bcx.fcx().param_substs, - IndexVec::new()).trans() + MirConstContext::new(bcx.ccx(), mir, self.param_substs, IndexVec::new()).trans() } mir::Literal::Value { value } => { Ok(Const::from_constval(bcx.ccx(), value, ty)) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 1582dc9a6aa1e..673a786f1f8bb 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -103,7 +103,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ptr = self.trans_consume(bcx, base); let projected_ty = LvalueTy::from_ty(ptr.ty) .projection_ty(tcx, &mir::ProjectionElem::Deref); - let projected_ty = bcx.fcx().monomorphize(&projected_ty); + let projected_ty = self.monomorphize(&projected_ty); let (llptr, llextra) = match ptr.val { OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), OperandValue::Pair(llptr, llextra) => (llptr, llextra), @@ -118,7 +118,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Lvalue::Projection(ref projection) => { let tr_base = self.trans_lvalue(bcx, &projection.base); let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); - let projected_ty = bcx.fcx().monomorphize(&projected_ty); + let projected_ty = self.monomorphize(&projected_ty); let project_index = |llindex| { let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty { @@ -274,6 +274,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { let tcx = self.fcx.ccx.tcx(); let lvalue_ty = lvalue.ty(&self.mir, tcx); - self.fcx.monomorphize(&lvalue_ty.to_ty(tcx)) + self.monomorphize(&lvalue_ty.to_ty(tcx)) } } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 6fbbaa7bc7661..6f376251d9b7c 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -14,11 +14,14 @@ use llvm::debuginfo::DIScope; use rustc::ty; use rustc::mir::{self, Mir}; use rustc::mir::tcx::LvalueTy; +use rustc::ty::subst::Substs; +use rustc::infer::TransNormalize; +use rustc::ty::TypeFoldable; use session::config::FullDebugInfo; use base; use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; -use monomorphize::Instance; +use monomorphize::{self, Instance}; use machine; use type_of; @@ -88,9 +91,17 @@ pub struct MirContext<'a, 'tcx:'a> { /// Debug information for MIR scopes. scopes: IndexVec, + + /// If this function is being monomorphized, this contains the type substitutions used. + param_substs: &'tcx Substs<'tcx>, } impl<'a, 'tcx> MirContext<'a, 'tcx> { + pub fn monomorphize(&self, value: &T) -> T + where T: TransNormalize<'tcx> { + monomorphize::apply_param_substs(self.fcx.ccx.shared(), self.param_substs, value) + } + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) { // Bail out if debug info emission is not enabled. match self.debug_context { @@ -207,8 +218,6 @@ pub fn trans_mir<'a, 'tcx: 'a>( let bcx = fcx.get_entry_block(); // Analyze the temps to determine which must be lvalues - // FIXME - let lvalue_locals = analyze::lvalue_locals(&bcx, &mir); let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block @@ -235,15 +244,21 @@ pub fn trans_mir<'a, 'tcx: 'a>( scopes: scopes, locals: IndexVec::new(), debug_context: debug_context, + param_substs: { + assert!(!instance.substs.needs_infer()); + instance.substs + }, }; + let lvalue_locals = analyze::lvalue_locals(&bcx, &mircx); + // Allocate variable and temp allocas mircx.locals = { let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &lvalue_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let ty = bcx.fcx().monomorphize(&decl.ty); + let ty = mircx.monomorphize(&decl.ty); if let Some(name) = decl.name { // User variable @@ -356,7 +371,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mir.args_iter().enumerate().map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; - let arg_ty = bcx.fcx().monomorphize(&arg_decl.ty); + let arg_ty = mircx.monomorphize(&arg_decl.ty); if Some(local) == mir.spread_arg { // This argument (e.g. the last argument in the "rust-call" ABI) diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 20364d6320c65..6e69608e51e6b 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -197,7 +197,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let llval = [a, b][f.index()]; let op = OperandRef { val: OperandValue::Immediate(llval), - ty: bcx.fcx().monomorphize(&ty) + ty: self.monomorphize(&ty) }; // Handle nested pairs. diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d7a4adb1dd4d1..5037bd9dae395 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -53,7 +53,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { - let cast_ty = bcx.fcx().monomorphize(&cast_ty); + let cast_ty = self.monomorphize(&cast_ty); if common::type_is_fat_ptr(bcx.ccx(), cast_ty) { // into-coerce of a thin pointer to a fat pointer - just @@ -186,7 +186,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { let operand = self.trans_operand(&bcx, source); debug!("cast operand is {:?}", operand); - let cast_ty = bcx.fcx().monomorphize(&cast_ty); + let cast_ty = self.monomorphize(&cast_ty); let val = match *kind { mir::CastKind::ReifyFnPointer => { @@ -443,7 +443,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::Rvalue::Box(content_ty) => { - let content_ty: Ty<'tcx> = bcx.fcx().monomorphize(&content_ty); + let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let llty = type_of::type_of(bcx.ccx(), content_ty); let llsize = machine::llsize_of(bcx.ccx(), llty); let align = type_of::align_of(bcx.ccx(), content_ty); From 0a71b3880397b89b04e2a4b25b0528df41d39c70 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 16:06:41 -0700 Subject: [PATCH 067/103] Remove llretslotptr from FunctionContext --- src/librustc_trans/base.rs | 4 ++-- src/librustc_trans/callee.rs | 4 ++-- src/librustc_trans/common.rs | 29 +---------------------------- src/librustc_trans/glue.rs | 2 +- src/librustc_trans/intrinsic.rs | 2 +- src/librustc_trans/meth.rs | 2 +- 6 files changed, 8 insertions(+), 35 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 35507c6e6ab7c..7abf2bb315192 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -598,7 +598,7 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let fcx = FunctionContext::new(ccx, lldecl, fn_ty, Some((instance, &sig, abi)), true); + let fcx = FunctionContext::new(ccx, lldecl, fn_ty); let mir = ccx.tcx().item_mir(instance.def); mir::trans_mir(&fcx, &mir, instance, &sig, abi); @@ -618,7 +618,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, false); + let fcx = FunctionContext::new(ccx, llfndecl, fn_ty); let bcx = fcx.get_entry_block(); if !fcx.fn_ty.ret.is_ignore() { // But if there are no nested returns, we skip the indirection diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 4dbfdc9d4999e..892b402bf1009 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -328,7 +328,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); attributes::set_frame_pointer_elimination(ccx, lloncefn); - let fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, false); + let fcx = FunctionContext::new(ccx, lloncefn, fn_ty); let mut bcx = fcx.get_entry_block(); // the first argument (`self`) will be the (by value) closure env. @@ -499,7 +499,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); + let fcx = FunctionContext::new(ccx, llfn, fn_ty); let bcx = fcx.get_entry_block(); let llargs = get_params(fcx.llfn); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 9a6127746ff3c..b7f2fabf184da 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -15,8 +15,7 @@ use session::Session; use llvm; use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef, get_param}; -use monomorphize::Instance; +use llvm::{True, False, Bool, OperandBundleDef}; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; @@ -230,12 +229,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // section of the executable we're generating. pub llfn: ValueRef, - // A pointer to where to store the return value. If the return type is - // immediate, this points to an alloca in the function. Otherwise, it's a - // pointer to the hidden first parameter of the function. After function - // construction, this should always be Some. - pub llretslotptr: Option, - // These pub elements: "hoisted basic blocks" containing // administrative activities that have to happen in only one place in // the function, due to LLVM's quirks. @@ -259,11 +252,9 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef, fn_ty: FnType, - skip_retptr: bool, ) -> FunctionContext<'a, 'tcx> { let mut fcx = FunctionContext { llfn: llfndecl, - llretslotptr: None, alloca_insert_pt: None, fn_ty: fn_ty, ccx: ccx, @@ -281,24 +272,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { // This is later removed in the drop of FunctionContext. fcx.alloca_insert_pt = Some(val); - // We normally allocate the llretslotptr, unless we - // have been instructed to skip it for immediate return - // values, or there is nothing to return at all. - if !fcx.fn_ty.ret.is_ignore() && !skip_retptr { - // But if there are no nested returns, we skip the indirection - // and have a single retslot - let slot = if fcx.fn_ty.ret.is_indirect() { - get_param(fcx.llfn, 0) - } else { - // We create an alloca to hold a pointer of type `ret.original_ty` - // which will hold the pointer to the right alloca which has the - // final ret value - fcx.alloca(fcx.fn_ty.ret.memory_ty(ccx), "sret_slot") - }; - - fcx.llretslotptr = Some(slot); - } - fcx } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 1176de4010a17..262a958347d54 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -195,7 +195,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); + let fcx = FunctionContext::new(ccx, llfn, fn_ty); let bcx = fcx.get_entry_block(); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index fd5048f8c5938..30f3e549e78c2 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -893,7 +893,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, true); + let fcx = FunctionContext::new(ccx, llfn, fn_ty); trans(fcx.get_entry_block()); llfn } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 33b43e9a3ab06..b9a6491f52f3f 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -81,7 +81,7 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false); + let fcx = FunctionContext::new(ccx, llfn, fn_ty); let bcx = fcx.get_entry_block(); let llargs = get_params(fcx.llfn); From 4c7041ea7da7ab335cd20bbc4e32dbe8f953bcdb Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 16:25:37 -0700 Subject: [PATCH 068/103] Don't special case abort/unreachable intrinsics --- src/librustc_trans/intrinsic.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 30f3e549e78c2..e646f61bddbbe 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -76,6 +76,7 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { "roundf32" => "llvm.round.f32", "roundf64" => "llvm.round.f64", "assume" => "llvm.assume", + "abort" => "llvm.trap", _ => return None }; Some(ccx.get_intrinsic(&llvm_name)) @@ -90,8 +91,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, llargs: &[ValueRef], llresult: ValueRef, span: Span) { - let fcx = bcx.fcx(); - let ccx = fcx.ccx; + let ccx = bcx.ccx(); let tcx = bcx.tcx(); let (def_id, substs, fty) = match callee_ty.sty { @@ -104,15 +104,6 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let ret_ty = sig.output(); let name = &*tcx.item_name(def_id).as_str(); - // These are the only intrinsic functions that diverge. - if name == "abort" { - let llfn = ccx.get_intrinsic(&("llvm.trap")); - bcx.call(llfn, &[], None); - return; - } else if name == "unreachable" { - return; - } - let llret_ty = type_of::type_of(ccx, ret_ty); let simple = get_simple_intrinsic(ccx, name); @@ -120,6 +111,9 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, _ if simple.is_some() => { bcx.call(simple.unwrap(), &llargs, None) } + "unreachable" => { + return; + }, "likely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None) @@ -628,8 +622,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } }; - if val_ty(llval) != Type::void(ccx) && - machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { + if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { if let Some(ty) = fn_ty.ret.cast { let ptr = bcx.pointercast(llresult, ty.ptr_to()); let store = bcx.store(llval, ptr); From 2bda3b7acb6bd3f118ac64757f3adb64e2abc8bf Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 17:04:00 -0700 Subject: [PATCH 069/103] Inline and simplify init_cpad --- src/librustc_trans/mir/block.rs | 28 +--------------------------- src/librustc_trans/mir/mod.rs | 27 +++++++++++++++++++-------- 2 files changed, 20 insertions(+), 35 deletions(-) diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 577c304d0f7aa..e259f7c20f2f2 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -863,32 +863,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx.llbb() } - pub fn init_cpad(&mut self, bb: mir::BasicBlock, - funclets: &mut IndexVec>) { - let bcx = self.build_block(bb); - let data = &self.mir[bb]; - debug!("init_cpad({:?})", data); - - match self.cleanup_kinds[bb] { - CleanupKind::NotCleanup => { - funclets[bb] = None; - } - _ if !base::wants_msvc_seh(bcx.sess()) => { - funclets[bb] = Funclet::gnu(); - } - CleanupKind::Internal { funclet: _ } => { - // FIXME: is this needed? - bcx.set_personality_fn(self.fcx.eh_personality()); - funclets[bb] = None; - } - CleanupKind::Funclet => { - bcx.set_personality_fn(self.fcx.eh_personality()); - let cleanup_pad = bcx.cleanup_pad(None, &[]); - funclets[bb] = Funclet::msvc(cleanup_pad); - } - }; - } - fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { let bl = self.fcx.build_new_block("unreachable"); @@ -898,7 +872,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }) } - fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { + pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { BlockAndBuilder::new(self.blocks[bb], self.fcx) } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 6f376251d9b7c..f8bd087b0da1e 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -36,6 +36,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx}; pub use self::constant::trans_static_initializer; +use self::analyze::CleanupKind; use self::lvalue::{LvalueRef}; use rustc::mir::traversal; @@ -315,18 +316,28 @@ pub fn trans_mir<'a, 'tcx: 'a>( // emitting should be enabled. debuginfo::start_emitting_source_locations(&mircx); - let mut visited = BitVector::new(mir.basic_blocks().len()); - - let mut rpo = traversal::reverse_postorder(&mir); - let mut funclets: IndexVec> = IndexVec::from_elem(None, mir.basic_blocks()); - // Prepare each block for translation. - for (bb, _) in rpo.by_ref() { - mircx.init_cpad(bb, &mut funclets); + // If false, all funclets should be None (which is the default) + if base::wants_msvc_seh(fcx.ccx.sess()) { + for (bb, cleanup_kind) in mircx.cleanup_kinds.iter_enumerated() { + let bcx = mircx.build_block(bb); + match *cleanup_kind { + CleanupKind::Internal { .. } => { + bcx.set_personality_fn(fcx.eh_personality()); + } + CleanupKind::Funclet => { + bcx.set_personality_fn(fcx.eh_personality()); + funclets[bb] = Funclet::msvc(bcx.cleanup_pad(None, &[])); + } + _ => {} + } + } } - rpo.reset(); + + let rpo = traversal::reverse_postorder(&mir); + let mut visited = BitVector::new(mir.basic_blocks().len()); // Translate the body of each block using reverse postorder for (bb, _) in rpo { From 63a0d85457664e9af6c19cc0fb3927a218c31865 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 17:13:09 -0700 Subject: [PATCH 070/103] Make add_incoming_to_phi call slightly less confusing. --- src/librustc_trans/tvec.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index c693a5ceabedd..0792896487315 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -47,7 +47,7 @@ pub fn slice_for_each<'a, 'tcx, F>( f(&body_bcx, if zst { data_ptr } else { current }); let next = add(&body_bcx, current, C_uint(bcx.ccx(), 1usize)); - body_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); + header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); body_bcx.br(header_bcx.llbb()); next_bcx } From f9f1406eef844db064365bbdb4f7fa3eed8eea6f Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 17:31:54 -0700 Subject: [PATCH 071/103] Rebase fixes --- src/librustc_trans/adt.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index a59e2fe506f48..49a6fa22c49fe 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -567,10 +567,8 @@ fn struct_field_ptr<'a, 'tcx>( ix: usize, needs_cast: bool ) -> ValueRef { - let ccx = bcx.ccx(); let fty = fields[ix]; let ccx = bcx.ccx(); - let ll_fty = type_of::in_memory_type_of(ccx, fty); let ptr_val = if needs_cast { let fields = st.field_index_by_increasing_offset().map(|i| { From 6e3d8cda2c2b1272340eaed618e18e7e35b1134b Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 22:24:42 -0700 Subject: [PATCH 072/103] Fix and cleanup callee shims --- src/librustc_trans/callee.rs | 19 +++++-------------- src/librustc_trans/meth.rs | 18 ++++++++++++------ src/test/run-pass/trans-object-shim.rs | 14 ++++++++++++++ 3 files changed, 31 insertions(+), 20 deletions(-) create mode 100644 src/test/run-pass/trans-object-shim.rs diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 892b402bf1009..cab42e8b5e61a 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -502,15 +502,15 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let fcx = FunctionContext::new(ccx, llfn, fn_ty); let bcx = fcx.get_entry_block(); - let llargs = get_params(fcx.llfn); + let mut llargs = get_params(fcx.llfn); - let self_idx = fcx.fn_ty.ret.is_indirect() as usize; + let self_arg = llargs.remove(fcx.fn_ty.ret.is_indirect() as usize); let llfnpointer = llfnpointer.unwrap_or_else(|| { // the first argument (`self`) will be ptr to the fn pointer if is_by_ref { - bcx.load(llargs[self_idx]) + bcx.load(self_arg) } else { - llargs[self_idx] + self_arg } }); @@ -522,16 +522,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); - let mut args = Vec::new(); - - if fn_ty.ret.is_indirect() { - if !fn_ty.ret.is_ignore() { - args.push(get_param(fcx.llfn, 0)); - } - } - args.extend_from_slice(&llargs[(self_idx + 1)..]); - - let llret = bcx.call(llfnpointer, &args, None); + let llret = bcx.call(llfnpointer, &llargs, None); fn_ty.apply_attrs_callsite(llret); if fn_ret.0.is_never() { diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index b9a6491f52f3f..6bc3c1e0b1e6c 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -12,7 +12,7 @@ use attributes; use llvm::{ValueRef, get_params}; use rustc::traits; use abi::FnType; -use callee::Callee; +use callee::{Callee, CalleeData}; use common::*; use consts; use declare; @@ -84,14 +84,20 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let fcx = FunctionContext::new(ccx, llfn, fn_ty); let bcx = fcx.get_entry_block(); - let llargs = get_params(fcx.llfn); + let mut llargs = get_params(fcx.llfn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); - let mut args = Vec::new(); - - args.extend_from_slice(&llargs); - let llret = bcx.call(callee.reify(ccx), &args, None); + let fn_ptr = match callee.data { + CalleeData::Virtual(idx) => { + let fn_ptr = get_virtual_method(&bcx, + llargs.remove(fn_ty.ret.is_indirect() as usize + 1), idx); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + bcx.pointercast(fn_ptr, llty) + }, + _ => bug!("trans_object_shim called with non-virtual callee"), + }; + let llret = bcx.call(fn_ptr, &llargs, None); fn_ty.apply_attrs_callsite(llret); if fn_ret.0.is_never() { diff --git a/src/test/run-pass/trans-object-shim.rs b/src/test/run-pass/trans-object-shim.rs new file mode 100644 index 0000000000000..5fbfef05e10d4 --- /dev/null +++ b/src/test/run-pass/trans-object-shim.rs @@ -0,0 +1,14 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + assert_eq!((ToString::to_string as fn(&(ToString+'static)) -> String)(&"foo"), + String::from("foo")); +} From dd1890f7f4acd7acf2c319ce2b743b4f8d41731d Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 23:01:06 -0700 Subject: [PATCH 073/103] Remove unreachable call to unreachable --- src/librustc_trans/glue.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 262a958347d54..e5f53b3b64d76 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -47,17 +47,12 @@ pub fn trans_exchange_free_dyn<'a, 'tcx>( let callee = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])); let ccx = bcx.ccx(); - let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); let llret = bcx.call(callee.reify(ccx), &args[..], None); fn_ty.apply_attrs_callsite(llret); - - if fn_ret.0.is_never() { - bcx.unreachable(); - } } pub fn trans_exchange_free_ty<'a, 'tcx>( From 7f5dffbb197083e5a673e7cb206dc7bdac94dcd1 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Sun, 18 Dec 2016 23:04:25 -0700 Subject: [PATCH 074/103] Make debuginfo take debug_context instead of MirContext --- src/librustc_trans/debuginfo/mod.rs | 9 ++-- src/librustc_trans/debuginfo/source_loc.rs | 26 ++++++------ src/librustc_trans/mir/block.rs | 7 ++-- src/librustc_trans/mir/mod.rs | 48 +++++++++++++++------- src/librustc_trans/mir/statement.rs | 2 +- 5 files changed, 54 insertions(+), 38 deletions(-) diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index b22bb080d0529..e984edacaf871 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -28,7 +28,6 @@ use rustc::ty::subst::Substs; use abi::Abi; use common::{CrateContext, BlockAndBuilder}; -use mir::MirContext; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -434,7 +433,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - mir: &MirContext, + dbg_context: &FunctionDebugContext, variable_name: ast::Name, variable_type: Ty<'tcx>, scope_metadata: DIScope, @@ -476,7 +475,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, align as u64, ) }; - source_loc::set_debug_location(cx, bcx, + source_loc::set_debug_location(bcx, InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); unsafe { let debug_loc = llvm::LLVMGetCurrentDebugLocation(bcx.llbuilder); @@ -496,8 +495,8 @@ pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, match variable_kind { ArgumentVariable(_) | CapturedVariable => { - assert!(!mir.debug_context.get_ref(span).source_locations_enabled.get()); - source_loc::set_debug_location(cx, bcx, UnknownLocation); + assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + source_loc::set_debug_location(bcx, UnknownLocation); } _ => { /* nothing to do */ } } diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index 16b32f2e3d635..e02c8be19a2f4 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -17,8 +17,6 @@ use super::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; -use common::CrateContext; -use mir::MirContext; use libc::c_uint; use std::ptr; @@ -27,24 +25,26 @@ use syntax_pos::{Span, Pos}; /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). -pub fn set_source_location(mir: &MirContext, builder: &Builder, scope: DIScope, span: Span) { - let function_debug_context = match mir.debug_context { +pub fn set_source_location( + debug_context: &FunctionDebugContext, builder: &Builder, scope: DIScope, span: Span +) { + let function_debug_context = match *debug_context { FunctionDebugContext::DebugInfoDisabled => return, FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(mir.ccx(), builder, UnknownLocation); + set_debug_location(builder, UnknownLocation); return; } FunctionDebugContext::RegularContext(ref data) => data }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", mir.ccx().sess().codemap().span_to_string(span)); - let loc = span_start(mir.ccx(), span); + debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span)); + let loc = span_start(builder.ccx, span); InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) } else { UnknownLocation }; - set_debug_location(mir.ccx(), builder, dbg_loc); + set_debug_location(builder, dbg_loc); } /// Enables emitting source locations for the given functions. @@ -53,8 +53,8 @@ pub fn set_source_location(mir: &MirContext, builder: &Builder, scope: DIScope, /// they are disabled when beginning to translate a new function. This functions /// switches source location emitting on and must therefore be called before the /// first real statement/expression of the function is translated. -pub fn start_emitting_source_locations(mir: &MirContext) { - match mir.debug_context { +pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { + match *dbg_context { FunctionDebugContext::RegularContext(ref data) => { data.source_locations_enabled.set(true) }, @@ -79,9 +79,7 @@ impl InternalDebugLocation { } } -pub fn set_debug_location(cx: &CrateContext, - builder: &Builder, - debug_location: InternalDebugLocation) { +pub fn set_debug_location(builder: &Builder, debug_location: InternalDebugLocation) { let metadata_node = match debug_location { KnownLocation { scope, line, .. } => { // Always set the column to zero like Clang and GCC @@ -90,7 +88,7 @@ pub fn set_debug_location(cx: &CrateContext, unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation( - debug_context(cx).llcontext, + debug_context(builder.ccx).llcontext, line as c_uint, col as c_uint, scope, diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index e259f7c20f2f2..6b6ca1a98a386 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -115,7 +115,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let span = terminator.source_info.span; let (scope, debug_span) = self.debug_loc(terminator.source_info); - debuginfo::set_source_location(self, &bcx, scope, debug_span); + debuginfo::set_source_location(&self.debug_context, &bcx, scope, debug_span); match terminator.kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { @@ -327,7 +327,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // After this point, bcx is the block for the call to panic. bcx = panic_block; - debuginfo::set_source_location(self, &bcx, scope, debug_span); + debuginfo::set_source_location(&self.debug_context, &bcx, scope, debug_span); // Get the location information. let loc = bcx.sess().codemap().lookup_char_pos(span.lo); @@ -643,7 +643,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some((_, target)) = *destination { let ret_bcx = self.build_block(target); ret_bcx.at_start(|ret_bcx| { - debuginfo::set_source_location(self, &ret_bcx, scope, debug_span); + debuginfo::set_source_location(&self.debug_context, + &ret_bcx, scope, debug_span); let op = OperandRef { val: Immediate(invokeret), ty: sig.output(), diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index f8bd087b0da1e..d0123d6e6f7c4 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -44,9 +44,9 @@ use self::operand::{OperandRef, OperandValue}; /// Master context for translating MIR. pub struct MirContext<'a, 'tcx:'a> { - pub mir: &'a mir::Mir<'tcx>, + mir: &'a mir::Mir<'tcx>, - pub debug_context: debuginfo::FunctionDebugContext, + debug_context: debuginfo::FunctionDebugContext, /// Function context fcx: &'a common::FunctionContext<'a, 'tcx>, @@ -276,7 +276,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); if dbg { let (scope, span) = mircx.debug_loc(source_info); - declare_local(&bcx, &mircx, name, ty, scope, + declare_local(&bcx, &mircx.debug_context, name, ty, scope, VariableAccess::DirectVariable { alloca: lvalue.llval }, VariableKind::LocalVariable, span); } @@ -314,7 +314,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. - debuginfo::start_emitting_source_locations(&mircx); + debuginfo::start_emitting_source_locations(&mircx.debug_context); let mut funclets: IndexVec> = IndexVec::from_elem(None, mir.basic_blocks()); @@ -418,10 +418,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let variable_access = VariableAccess::DirectVariable { alloca: lltemp }; - declare_local(bcx, mircx, arg_decl.name.unwrap_or(keywords::Invalid.name()), - arg_ty, scope, variable_access, - VariableKind::ArgumentVariable(arg_index + 1), - DUMMY_SP); + declare_local( + bcx, + &mircx.debug_context, + arg_decl.name.unwrap_or(keywords::Invalid.name()), + arg_ty, scope, + variable_access, + VariableKind::ArgumentVariable(arg_index + 1), + DUMMY_SP + ); }); return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty))); @@ -490,10 +495,16 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, arg_scope.map(|scope| { // Is this a regular argument? if arg_index > 0 || mir.upvar_decls.is_empty() { - declare_local(bcx, mircx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, - scope, VariableAccess::DirectVariable { alloca: llval }, - VariableKind::ArgumentVariable(arg_index + 1), - DUMMY_SP); + declare_local( + bcx, + &mircx.debug_context, + arg_decl.name.unwrap_or(keywords::Invalid.name()), + arg_ty, + scope, + VariableAccess::DirectVariable { alloca: llval }, + VariableKind::ArgumentVariable(arg_index + 1), + DUMMY_SP + ); return; } @@ -558,9 +569,16 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, alloca: env_ptr, address_operations: &ops }; - declare_local(bcx, mircx, decl.debug_name, ty, scope, variable_access, - VariableKind::CapturedVariable, - DUMMY_SP); + declare_local( + bcx, + &mircx.debug_context, + decl.debug_name, + ty, + scope, + variable_access, + VariableKind::CapturedVariable, + DUMMY_SP + ); } }); LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))) diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 62ee768ab0754..6fc5d7db676d3 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -27,7 +27,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); let (scope, span) = self.debug_loc(statement.source_info); - debuginfo::set_source_location(self, &bcx, scope, span); + debuginfo::set_source_location(&self.debug_context, &bcx, scope, span); match statement.kind { mir::StatementKind::Assign(ref lvalue, ref rvalue) => { if let mir::Lvalue::Local(index) = *lvalue { From a445199afb7e57254de40afd96d9e2ab5bb01e4a Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 07:15:00 -0700 Subject: [PATCH 075/103] Remove public ccx function on MirContext --- src/librustc_trans/mir/lvalue.rs | 2 +- src/librustc_trans/mir/mod.rs | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 673a786f1f8bb..3e5ad826e2ab7 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -140,7 +140,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, }; let discr = discr as u64; - let is_sized = self.ccx().shared().type_is_sized(projected_ty.to_ty(tcx)); + let is_sized = self.fcx.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); let base = if is_sized { adt::MaybeSizedValue::sized(tr_base.llval) } else { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index d0123d6e6f7c4..1cfe60727fcd2 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -158,10 +158,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { scope_metadata } } - - pub fn ccx(&self) -> &'a CrateContext<'a, 'tcx> { - self.fcx.ccx - } } enum LocalRef<'tcx> { From 0256f60461cf607a7f84554926298c0fb74f381e Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 07:33:35 -0700 Subject: [PATCH 076/103] Move debug info check into create_function_debug_context --- src/librustc_trans/debuginfo/mod.rs | 15 ++++++--------- src/librustc_trans/mir/mod.rs | 16 ++-------------- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index e984edacaf871..8bd9786c66573 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -195,15 +195,6 @@ pub fn finalize(cx: &CrateContext) { }; } -/// Creates a function-specific debug context for a function w/o debuginfo. -pub fn empty_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>) -> FunctionDebugContext { - if cx.sess().opts.debuginfo == NoDebugInfo { - FunctionDebugContext::DebugInfoDisabled - } else { - FunctionDebugContext::FunctionWithoutDebugInfo - } -} - /// Creates the function-specific debug context. /// /// Returns the FunctionDebugContext for the function which holds state needed @@ -220,6 +211,12 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return FunctionDebugContext::DebugInfoDisabled; } + for attr in cx.tcx().get_attrs(instance.def).iter() { + if attr.check_name("no_debug") { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } + } + let containing_scope = get_containing_scope(cx, instance); let span = mir.span; diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 1cfe60727fcd2..4a908e8cea9b4 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -199,22 +199,10 @@ pub fn trans_mir<'a, 'tcx: 'a>( sig: &ty::FnSig<'tcx>, abi: Abi, ) { - let def_id = instance.def; - let local_id = fcx.ccx.tcx().map.as_local_node_id(def_id); - let no_debug = if let Some(id) = local_id { - fcx.ccx.tcx().map.attrs(id).iter().any(|item| item.check_name("no_debug")) - } else { - fcx.ccx.sess().cstore.item_attrs(def_id).iter().any(|item| item.check_name("no_debug")) - }; - - let debug_context = if !no_debug { - debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir) - } else { - debuginfo::empty_function_debug_context(fcx.ccx) - }; + let debug_context = + debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir); let bcx = fcx.get_entry_block(); - // Analyze the temps to determine which must be lvalues let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block From 5301d380b66066f71b3eabe1408f88cda348f084 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 07:47:09 -0700 Subject: [PATCH 077/103] Remove unused bcx from LocalAnalyzer. --- src/librustc_trans/mir/analyze.rs | 40 ++++++++++++++----------------- src/librustc_trans/mir/mod.rs | 2 +- src/librustc_trans/mir/rvalue.rs | 9 +++---- 3 files changed, 22 insertions(+), 29 deletions(-) diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 71375f1160c3f..3f94af8255394 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -16,14 +16,13 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; -use common::{self, BlockAndBuilder}; +use common; use super::MirContext; use super::rvalue; -pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mircx: &MirContext<'a, 'tcx>) - -> BitVector { +pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { let mir = mircx.mir; - let mut analyzer = LocalAnalyzer::new(mircx, &bcx); + let mut analyzer = LocalAnalyzer::new(mircx); analyzer.visit_mir(mir); @@ -34,13 +33,13 @@ pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mircx: &MirConte ty.is_unique() || ty.is_region_ptr() || ty.is_simd() || - common::type_is_zero_size(bcx.ccx(), ty) + common::type_is_zero_size(mircx.fcx.ccx, ty) { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - assert!(common::type_is_immediate(bcx.ccx(), ty) || - common::type_is_fat_ptr(bcx.ccx(), ty)); - } else if common::type_is_imm_pair(bcx.ccx(), ty) { + assert!(common::type_is_immediate(mircx.fcx.ccx, ty) || + common::type_is_fat_ptr(mircx.fcx.ccx, ty)); + } else if common::type_is_imm_pair(mircx.fcx.ccx, ty) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -57,18 +56,15 @@ pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mircx: &MirConte } struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> { - mir: &'mir MirContext<'a, 'tcx>, - bcx: &'mir BlockAndBuilder<'a, 'tcx>, + cx: &'mir MirContext<'a, 'tcx>, lvalue_locals: BitVector, seen_assigned: BitVector } impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> { - fn new(mircx: &'mir MirContext<'a, 'tcx>, bcx: &'mir BlockAndBuilder<'a, 'tcx>) - -> LocalAnalyzer<'mir, 'a, 'tcx> { + fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> { LocalAnalyzer { - mir: mircx, - bcx: bcx, + cx: mircx, lvalue_locals: BitVector::new(mircx.mir.local_decls.len()), seen_assigned: BitVector::new(mircx.mir.local_decls.len()) } @@ -96,7 +92,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { self.mark_assigned(index); - if !rvalue::rvalue_creates_operand(self.mir.mir, self.bcx, rvalue) { + if !rvalue::rvalue_creates_operand(rvalue) { self.mark_as_lvalue(index); } } else { @@ -116,7 +112,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { literal: mir::Literal::Item { def_id, .. }, .. }), ref args, .. - } if Some(def_id) == self.bcx.tcx().lang_items.box_free_fn() => { + } if Some(def_id) == self.cx.fcx.ccx.tcx().lang_items.box_free_fn() => { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -139,10 +135,10 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { // Allow uses of projections of immediate pair fields. if let mir::Lvalue::Projection(ref proj) = *lvalue { if let mir::Lvalue::Local(_) = proj.base { - let ty = proj.base.ty(self.mir.mir, self.bcx.tcx()); + let ty = proj.base.ty(self.cx.mir, self.cx.fcx.ccx.tcx()); - let ty = self.mir.monomorphize(&ty.to_ty(self.bcx.tcx())); - if common::type_is_imm_pair(self.bcx.ccx(), ty) { + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.fcx.ccx.tcx())); + if common::type_is_imm_pair(self.cx.fcx.ccx, ty) { if let mir::ProjectionElem::Field(..) = proj.elem { if let LvalueContext::Consume = context { return; @@ -170,11 +166,11 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { } LvalueContext::Drop => { - let ty = lvalue.ty(self.mir.mir, self.bcx.tcx()); - let ty = self.mir.monomorphize(&ty.to_ty(self.bcx.tcx())); + let ty = lvalue.ty(self.cx.mir, self.cx.fcx.ccx.tcx()); + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.fcx.ccx.tcx())); // Only need the lvalue if we're actually dropping it. - if self.bcx.ccx().shared().type_needs_drop(ty) { + if self.cx.fcx.ccx.shared().type_needs_drop(ty) { self.mark_as_lvalue(index); } } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 4a908e8cea9b4..846b4162e06fd 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -235,7 +235,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( }, }; - let lvalue_locals = analyze::lvalue_locals(&bcx, &mircx); + let lvalue_locals = analyze::lvalue_locals(&mircx); // Allocate variable and temp allocas mircx.locals = { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 5037bd9dae395..5f17db73d6629 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -166,7 +166,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } _ => { - assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue)); + assert!(rvalue_creates_operand(rvalue)); let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); self.store_operand(&bcx, dest.llval, temp); bcx @@ -179,8 +179,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { rvalue: &mir::Rvalue<'tcx>) -> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>) { - assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue), - "cannot trans {:?} to operand", rvalue); + assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { @@ -662,9 +661,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } -pub fn rvalue_creates_operand<'a, 'tcx>(_mir: &mir::Mir<'tcx>, - _bcx: &BlockAndBuilder<'a, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) -> bool { +pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool { match *rvalue { mir::Rvalue::Ref(..) | mir::Rvalue::Len(..) | From 7f871630710e8b9661711cd333cb87e7230fe94f Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 08:00:46 -0700 Subject: [PATCH 078/103] Simplify funclets creation. --- src/librustc_trans/mir/mod.rs | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 846b4162e06fd..2cb181340405e 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -300,25 +300,23 @@ pub fn trans_mir<'a, 'tcx: 'a>( // emitting should be enabled. debuginfo::start_emitting_source_locations(&mircx.debug_context); - let mut funclets: IndexVec> = - IndexVec::from_elem(None, mir.basic_blocks()); - // If false, all funclets should be None (which is the default) - if base::wants_msvc_seh(fcx.ccx.sess()) { - for (bb, cleanup_kind) in mircx.cleanup_kinds.iter_enumerated() { - let bcx = mircx.build_block(bb); - match *cleanup_kind { - CleanupKind::Internal { .. } => { - bcx.set_personality_fn(fcx.eh_personality()); - } - CleanupKind::Funclet => { - bcx.set_personality_fn(fcx.eh_personality()); - funclets[bb] = Funclet::msvc(bcx.cleanup_pad(None, &[])); - } - _ => {} + let funclets: IndexVec> = + mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { + let bcx = mircx.build_block(bb); + match *cleanup_kind { + _ if !base::wants_msvc_seh(fcx.ccx.sess()) => None, + CleanupKind::Internal { .. } => { + bcx.set_personality_fn(fcx.eh_personality()); + None } + CleanupKind::Funclet => { + bcx.set_personality_fn(fcx.eh_personality()); + Funclet::msvc(bcx.cleanup_pad(None, &[])) + } + _ => None } - } + }).collect(); let rpo = traversal::reverse_postorder(&mir); let mut visited = BitVector::new(mir.basic_blocks().len()); From 5ef85dd57e46e59e8035f522a672eeffba989d05 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 08:09:03 -0700 Subject: [PATCH 079/103] Change param_env to empty_param_env --- src/librustc_trans/context.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index d9fc21dd79f65..9b6d911bf5e64 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -68,7 +68,7 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { exported_symbols: NodeSet, link_meta: LinkMeta, tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParameterEnvironment<'tcx>, + empty_param_env: ty::ParameterEnvironment<'tcx>, stats: Stats, check_overflow: bool, @@ -456,7 +456,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { export_map: export_map, exported_symbols: exported_symbols, link_meta: link_meta, - param_env: tcx.empty_parameter_environment(), + empty_param_env: tcx.empty_parameter_environment(), tcx: tcx, stats: Stats { n_glues_created: Cell::new(0), @@ -478,11 +478,11 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { } pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - self.tcx.type_needs_drop_given_env(ty, &self.param_env) + self.tcx.type_needs_drop_given_env(ty, &self.empty_param_env) } pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - ty.is_sized(self.tcx, &self.param_env, DUMMY_SP) + ty.is_sized(self.tcx, &self.empty_param_env, DUMMY_SP) } pub fn metadata_llmod(&self) -> ModuleRef { From 22bf541e27eaea348f1ab534b1ec2f1e701254d5 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 14:10:48 -0700 Subject: [PATCH 080/103] Clean up uses of set_personality_fn. Remove gnu/msvc constructors for Funclet; these are worse for readability than explicit Some/None. --- src/librustc_trans/cleanup.rs | 4 ++-- src/librustc_trans/common.rs | 10 +++------- src/librustc_trans/mir/block.rs | 4 ---- src/librustc_trans/mir/mod.rs | 18 +++++++----------- 4 files changed, 12 insertions(+), 24 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 7b655bbd60675..4eb786d63941b 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -203,8 +203,8 @@ impl<'tcx> CleanupScope<'tcx> { // Insert cleanup instructions into the cleanup block let funclet = match val { - UnwindKind::CleanupPad(_) => Funclet::msvc(cleanup.cleanup_pad(None, &[])), - UnwindKind::LandingPad => Funclet::gnu(), + UnwindKind::CleanupPad(_) => Some(Funclet::new(cleanup.cleanup_pad(None, &[]))), + UnwindKind::LandingPad => None, }; drop_val.trans(funclet.as_ref(), &cleanup); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index b7f2fabf184da..81aba269a8b3b 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -466,15 +466,11 @@ pub struct Funclet { } impl Funclet { - pub fn gnu() -> Option { - None - } - - pub fn msvc(cleanuppad: ValueRef) -> Option { - Some(Funclet { + pub fn new(cleanuppad: ValueRef) -> Funclet { + Funclet { cleanuppad: cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]), - }) + } } pub fn cleanuppad(&self) -> ValueRef { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 6b6ca1a98a386..7edc3b0053238 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -86,7 +86,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); let trampoline = this.fcx.build_new_block(name); - trampoline.set_personality_fn(this.fcx.eh_personality()); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() } @@ -121,9 +120,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some(cleanup_pad) = cleanup_pad { bcx.cleanup_ret(cleanup_pad, None); } else { - let llpersonality = bcx.fcx().eh_personality(); - bcx.set_personality_fn(llpersonality); - let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); Lifetime::End.call(&bcx, ps); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 2cb181340405e..6041b76d18e69 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -303,19 +303,15 @@ pub fn trans_mir<'a, 'tcx: 'a>( // If false, all funclets should be None (which is the default) let funclets: IndexVec> = mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { - let bcx = mircx.build_block(bb); - match *cleanup_kind { - _ if !base::wants_msvc_seh(fcx.ccx.sess()) => None, - CleanupKind::Internal { .. } => { - bcx.set_personality_fn(fcx.eh_personality()); - None + if let CleanupKind::Funclet = *cleanup_kind { + let bcx = mircx.build_block(bb); + bcx.set_personality_fn(fcx.eh_personality()); + if base::wants_msvc_seh(fcx.ccx.sess()) { + return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); } - CleanupKind::Funclet => { - bcx.set_personality_fn(fcx.eh_personality()); - Funclet::msvc(bcx.cleanup_pad(None, &[])) - } - _ => None } + + None }).collect(); let rpo = traversal::reverse_postorder(&mir); From f11721a13e5dee8b8e2661d33ffa0a576e4643a6 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 14:38:16 -0700 Subject: [PATCH 081/103] Add helper function to set debug locations --- src/librustc_trans/debuginfo/mod.rs | 12 +++--------- src/librustc_trans/mir/block.rs | 12 +++++------- src/librustc_trans/mir/mod.rs | 14 +++++++++----- src/librustc_trans/mir/statement.rs | 4 +--- 4 files changed, 18 insertions(+), 24 deletions(-) diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 8bd9786c66573..eb7f5d9c108d4 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -102,20 +102,14 @@ pub enum FunctionDebugContext { } impl FunctionDebugContext { - fn get_ref<'a>(&'a self, - span: Span) - -> &'a FunctionDebugContextData { + fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData { match *self { FunctionDebugContext::RegularContext(ref data) => data, FunctionDebugContext::DebugInfoDisabled => { - span_bug!(span, - "{}", - FunctionDebugContext::debuginfo_disabled_message()); + span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message()); } FunctionDebugContext::FunctionWithoutDebugInfo => { - span_bug!(span, - "{}", - FunctionDebugContext::should_be_ignored_message()); + span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message()); } } } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 7edc3b0053238..eb71e7dcfe3c9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -20,7 +20,6 @@ use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; use common::{self, BlockAndBuilder, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; -use debuginfo; use Disr; use machine::{llalign_of_min, llbitsize_of_real}; use meth; @@ -113,8 +112,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("trans_block: terminator: {:?}", terminator); let span = terminator.source_info.span; - let (scope, debug_span) = self.debug_loc(terminator.source_info); - debuginfo::set_source_location(&self.debug_context, &bcx, scope, debug_span); + self.set_debug_loc(&bcx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { @@ -323,7 +321,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // After this point, bcx is the block for the call to panic. bcx = panic_block; - debuginfo::set_source_location(&self.debug_context, &bcx, scope, debug_span); + self.set_debug_loc(&bcx, terminator.source_info); // Get the location information. let loc = bcx.sess().codemap().lookup_char_pos(span.lo); @@ -599,7 +597,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bug!("Cannot use direct operand with an intrinsic call") }; - trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, debug_span); + trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, + terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { // Make a fake operand for store_return @@ -639,8 +638,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some((_, target)) = *destination { let ret_bcx = self.build_block(target); ret_bcx.at_start(|ret_bcx| { - debuginfo::set_source_location(&self.debug_context, - &ret_bcx, scope, debug_span); + self.set_debug_loc(&ret_bcx, terminator.source_info); let op = OperandRef { val: Immediate(invokeret), ty: sig.output(), diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 6041b76d18e69..8b43930260d86 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -103,6 +103,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { monomorphize::apply_param_substs(self.fcx.ccx.shared(), self.param_substs, value) } + pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) { + let (scope, span) = self.debug_loc(source_info); + debuginfo::set_source_location(&self.debug_context, bcx, scope, span); + } + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) { // Bail out if debug info emission is not enabled. match self.debug_context { @@ -120,9 +125,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { source_info.span.expn_id == COMMAND_LINE_EXPN || self.fcx.ccx.sess().opts.debugging_opts.debug_macros { - let scope_metadata = self.scope_metadata_for_loc(source_info.scope, - source_info.span.lo); - (scope_metadata, source_info.span) + let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo); + (scope, source_info.span) } else { let cm = self.fcx.ccx.sess().codemap(); // Walk up the macro expansion chain until we reach a non-expanded span. @@ -135,9 +139,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { break; } } - let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo); + let scope = self.scope_metadata_for_loc(source_info.scope, span.lo); // Use span of the outermost call site, while keeping the original lexical scope - (scope_metadata, span) + (scope, span) } } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 6fc5d7db676d3..51d7a1967371c 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -11,7 +11,6 @@ use rustc::mir; use base; -use debuginfo; use common::{self, BlockAndBuilder}; use super::MirContext; @@ -26,8 +25,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { -> BlockAndBuilder<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); - let (scope, span) = self.debug_loc(statement.source_info); - debuginfo::set_source_location(&self.debug_context, &bcx, scope, span); + self.set_debug_loc(&bcx, statement.source_info); match statement.kind { mir::StatementKind::Assign(ref lvalue, ref rvalue) => { if let mir::Lvalue::Local(index) = *lvalue { From f103ea4f8969718df071964a9489d6c70354f99b Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 16:16:22 -0700 Subject: [PATCH 082/103] Remove unecessary logic. --- src/librustc_trans/base.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 7abf2bb315192..d5e16913ecc0d 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -662,14 +662,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } bcx.ret(load) } else { - let llty = fcx.fn_ty.ret.original_ty; - let retval = if llty == Type::i1(fcx.ccx) { - let val = bcx.load_range_assert(dest, 0, 2, llvm::False); - bcx.trunc(val, llty) - } else { - bcx.load(dest) - }; - bcx.ret(retval) + bcx.ret(bcx.load(dest)) } } else { bcx.ret_void(); From 88202c5b838ea9f328d37f19c022eba5040e234f Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 16:25:00 -0700 Subject: [PATCH 083/103] Replace bcx.ccx() with bcx.ccx --- src/librustc_trans/abi.rs | 2 +- src/librustc_trans/adt.rs | 72 +++++++++++----------- src/librustc_trans/asm.rs | 12 ++-- src/librustc_trans/base.rs | 26 ++++---- src/librustc_trans/callee.rs | 4 +- src/librustc_trans/debuginfo/mod.rs | 2 +- src/librustc_trans/glue.rs | 68 ++++++++++---------- src/librustc_trans/intrinsic.rs | 40 ++++++------ src/librustc_trans/meth.rs | 2 +- src/librustc_trans/mir/block.rs | 96 ++++++++++++++--------------- src/librustc_trans/mir/constant.rs | 12 ++-- src/librustc_trans/mir/lvalue.rs | 26 ++++---- src/librustc_trans/mir/mod.rs | 15 +++-- src/librustc_trans/mir/operand.rs | 26 ++++---- src/librustc_trans/mir/rvalue.rs | 66 ++++++++++---------- src/librustc_trans/mir/statement.rs | 2 +- src/librustc_trans/tvec.rs | 6 +- 17 files changed, 238 insertions(+), 239 deletions(-) diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 0f876eadd73c0..8b4343af1990f 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -235,7 +235,7 @@ impl ArgType { if self.is_ignore() { return; } - let ccx = bcx.ccx(); + let ccx = bcx.ccx; if self.is_indirect() { let llsz = llsize_of(ccx, self.ty); let llalign = llalign_of_min(ccx, self.ty); diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 49a6fa22c49fe..31a5538a3c117 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -308,7 +308,7 @@ pub fn trans_switch<'a, 'tcx>( scrutinee: ValueRef, range_assert: bool ) -> (BranchKind, Option) { - let l = bcx.ccx().layout_of(t); + let l = bcx.ccx.layout_of(t); match *l { layout::CEnum { .. } | layout::General { .. } | layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { @@ -343,7 +343,7 @@ pub fn trans_get_discr<'a, 'tcx>( }; debug!("trans_get_discr t: {:?}", t); - let l = bcx.ccx().layout_of(t); + let l = bcx.ccx.layout_of(t); let val = match *l { layout::CEnum { discr, min, max, .. } => { @@ -354,11 +354,11 @@ pub fn trans_get_discr<'a, 'tcx>( load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1, range_assert) } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx(), 0), + layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), layout::RawNullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - let llptrty = type_of::sizing_type_of(bcx.ccx(), - monomorphize::field_ty(bcx.ccx().tcx(), substs, + let llptrty = type_of::sizing_type_of(bcx.ccx, + monomorphize::field_ty(bcx.ccx.tcx(), substs, &def.variants[nndiscr as usize].fields[0])); bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty)) } @@ -390,7 +390,7 @@ fn struct_wrapped_nullable_bitdiscr( fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { - let llty = Type::from_integer(bcx.ccx(), ity); + let llty = Type::from_integer(bcx.ccx, ity); assert_eq!(val_ty(ptr), llty.ptr_to()); let bits = ity.size().bits(); assert!(bits <= 64); @@ -416,16 +416,16 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u /// /// This should ideally be less tightly tied to `_match`. pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { - let l = bcx.ccx().layout_of(t); + let l = bcx.ccx.layout_of(t); match *l { layout::CEnum { discr, .. } | layout::General { discr, .. }=> { - C_integral(Type::from_integer(bcx.ccx(), discr), value.0, true) + C_integral(Type::from_integer(bcx.ccx, discr), value.0, true) } layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { assert!(value == Disr(0) || value == Disr(1)); - C_bool(bcx.ccx(), value != Disr(0)) + C_bool(bcx.ccx, value != Disr(0)) } _ => { bug!("{} does not have a discriminant. Represented as {:#?}", t, l); @@ -438,15 +438,15 @@ pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: pub fn trans_set_discr<'a, 'tcx>( bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr ) { - let l = bcx.ccx().layout_of(t); + let l = bcx.ccx.layout_of(t); match *l { layout::CEnum{ discr, min, max, .. } => { assert_discr_in_range(Disr(min), Disr(max), to); - bcx.store(C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), + bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true), val); } layout::General{ discr, .. } => { - bcx.store(C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), + bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true), bcx.struct_gep(val, 0)); } layout::Univariant { .. } @@ -455,9 +455,9 @@ pub fn trans_set_discr<'a, 'tcx>( assert_eq!(to, Disr(0)); } layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; if to.0 != nndiscr { - let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); + let llptrty = type_of::sizing_type_of(bcx.ccx, nnty); bcx.store(C_null(llptrty), val); } } @@ -467,10 +467,10 @@ pub fn trans_set_discr<'a, 'tcx>( // Issue #34427: As workaround for LLVM bug on // ARM, use memset of 0 on whole struct rather // than storing null to single target field. - let llptr = bcx.pointercast(val, Type::i8(bcx.ccx()).ptr_to()); - let fill_byte = C_u8(bcx.ccx(), 0); - let size = C_uint(bcx.ccx(), nonnull.stride().bytes()); - let align = C_i32(bcx.ccx(), nonnull.align.abi() as i32); + let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to()); + let fill_byte = C_u8(bcx.ccx, 0); + let size = C_uint(bcx.ccx, nonnull.stride().bytes()); + let align = C_i32(bcx.ccx, nonnull.align.abi() as i32); base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { let path = discrfield.iter().map(|&i| i as usize).collect::>(); @@ -504,7 +504,7 @@ pub fn trans_field_ptr<'a, 'tcx>( discr: Disr, ix: usize ) -> ValueRef { - let l = bcx.ccx().layout_of(t); + let l = bcx.ccx.layout_of(t); debug!("trans_field_ptr on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr @@ -513,7 +513,7 @@ pub fn trans_field_ptr<'a, 'tcx>( layout::Univariant { ref variant, .. } => { assert_eq!(discr, Disr(0)); struct_field_ptr(bcx, &variant, - &compute_fields(bcx.ccx(), t, 0, false), + &compute_fields(bcx.ccx, t, 0, false), val, ix, false) } layout::Vector { count, .. } => { @@ -522,37 +522,37 @@ pub fn trans_field_ptr<'a, 'tcx>( bcx.struct_gep(val.value, ix) } layout::General { discr: d, ref variants, .. } => { - let mut fields = compute_fields(bcx.ccx(), t, discr.0 as usize, false); - fields.insert(0, d.to_ty(&bcx.ccx().tcx(), false)); + let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false); + fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false)); struct_field_ptr(bcx, &variants[discr.0 as usize], &fields, val, ix + 1, true) } layout::UntaggedUnion { .. } => { - let fields = compute_fields(bcx.ccx(), t, 0, false); - let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]); + let fields = compute_fields(bcx.ccx, t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); bcx.pointercast(val.value, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } | layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { - let nullfields = compute_fields(bcx.ccx(), t, (1-nndiscr) as usize, false); + let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); + let ty = type_of::type_of(bcx.ccx, nullfields[ix]); + assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); bcx.pointercast(val.value, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; assert_eq!(ix, 0); assert_eq!(discr.0, nndiscr); - let ty = type_of::type_of(bcx.ccx(), nnty); + let ty = type_of::type_of(bcx.ccx, nnty); bcx.pointercast(val.value, ty.ptr_to()) } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { assert_eq!(discr.0, nndiscr); struct_field_ptr(bcx, &nonnull, - &compute_fields(bcx.ccx(), t, discr.0 as usize, false), + &compute_fields(bcx.ccx, t, discr.0 as usize, false), val, ix, false) } _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) @@ -568,7 +568,7 @@ fn struct_field_ptr<'a, 'tcx>( needs_cast: bool ) -> ValueRef { let fty = fields[ix]; - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let ptr_val = if needs_cast { let fields = st.field_index_by_increasing_offset().map(|i| { @@ -585,7 +585,7 @@ fn struct_field_ptr<'a, 'tcx>( // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || - bcx.ccx().shared().type_is_sized(fty) { + bcx.ccx.shared().type_is_sized(fty) { return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); } @@ -624,7 +624,7 @@ fn struct_field_ptr<'a, 'tcx>( let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_uint(bcx.ccx(), offset); + let unaligned_offset = C_uint(bcx.ccx, offset); // Get the alignment of the field let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); @@ -635,18 +635,18 @@ fn struct_field_ptr<'a, 'tcx>( // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64)); + let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), bcx.neg(align)); debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx())); + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); + let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); debug!("struct_field_ptr: Field type is {:?}", ll_fty); bcx.pointercast(byte_ptr, ll_fty.ptr_to()) } diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 6ec198aa24726..d6385e1ca1562 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -48,7 +48,7 @@ pub fn trans_inline_asm<'a, 'tcx>( if out.is_indirect { indirect_outputs.push(val.unwrap()); } else { - output_types.push(type_of::type_of(bcx.ccx(), ty)); + output_types.push(type_of::type_of(bcx.ccx, ty)); } } if !indirect_outputs.is_empty() { @@ -79,9 +79,9 @@ pub fn trans_inline_asm<'a, 'tcx>( // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => Type::void(bcx.ccx()), + 0 => Type::void(bcx.ccx), 1 => output_types[0], - _ => Type::struct_(bcx.ccx(), &output_types[..], false) + _ => Type::struct_(bcx.ccx, &output_types[..], false) }; let dialect = match ia.dialect { @@ -112,12 +112,12 @@ pub fn trans_inline_asm<'a, 'tcx>( // back to source locations. See #17552. unsafe { let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(), + let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx.llcx(), key.as_ptr() as *const c_char, key.len() as c_uint); - let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32); + let val: llvm::ValueRef = C_i32(bcx.ccx, ia.expn_id.into_u32() as i32); llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1)); + llvm::LLVMMDNodeInContext(bcx.ccx.llcx(), &val, 1)); } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index d5e16913ecc0d..9697f2604349b 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -232,9 +232,9 @@ pub fn unsize_thin_ptr<'a, 'tcx>( &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bcx.ccx().shared().type_is_sized(a)); - let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to(); - (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx(), a, b, None)) + assert!(bcx.ccx.shared().type_is_sized(a)); + let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to(); + (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } _ => bug!("unsize_thin_ptr: called on bad types"), } @@ -252,13 +252,13 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, (&ty::TyRef(..), &ty::TyRef(..)) | (&ty::TyRef(..), &ty::TyRawPtr(..)) | (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => { - let (base, info) = if common::type_is_fat_ptr(bcx.ccx(), src_ty) { + let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) { // fat-ptr to fat-ptr unsize preserves the vtable // i.e. &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. let (base, info) = load_fat_ptr(bcx, src, src_ty); - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), dst_ty); + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); let base = bcx.pointercast(base, llcast_ty); (base, info) } else { @@ -283,7 +283,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let iter = src_fields.zip(dst_fields).enumerate(); for (i, (src_fty, dst_fty)) in iter { - if type_is_zero_size(bcx.ccx(), dst_fty) { + if type_is_zero_size(bcx.ccx, dst_fty) { continue; } @@ -460,8 +460,8 @@ pub fn load_fat_ptr<'a, 'tcx>( } pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { - if val_ty(val) == Type::i1(bcx.ccx()) { - bcx.zext(val, Type::i8(bcx.ccx())) + if val_ty(val) == Type::i1(bcx.ccx) { + bcx.zext(val, Type::i8(bcx.ccx)) } else { val } @@ -469,7 +469,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { - bcx.trunc(val, Type::i1(bcx.ccx())) + bcx.trunc(val, Type::i1(bcx.ccx)) } else { val } @@ -526,7 +526,7 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, pub fn memcpy_ty<'a, 'tcx>( bcx: &BlockAndBuilder<'a, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx> ) { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; if type_is_zero_size(ccx, t) { return; @@ -537,7 +537,7 @@ pub fn memcpy_ty<'a, 'tcx>( let llsz = llsize_of(ccx, llty); let llalign = type_of::align_of(ccx, t); call_memcpy(bcx, dst, src, llsz, llalign as u32); - } else if common::type_is_fat_ptr(bcx.ccx(), t) { + } else if common::type_is_fat_ptr(bcx.ccx, t) { let (data, extra) = load_fat_ptr(bcx, src, t); store_fat_ptr(bcx, data, extra, dst, t); } else { @@ -560,7 +560,7 @@ pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { assert!(!ty.has_param_types()); - bcx.fcx().alloca(type_of::type_of(bcx.ccx(), ty), name) + bcx.fcx().alloca(type_of::type_of(bcx.ccx, ty), name) } pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { @@ -638,7 +638,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); let arg = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; - if common::type_is_fat_ptr(bcx.ccx(), arg_ty) { + if common::type_is_fat_ptr(bcx.ccx, arg_ty) { let meta = &fcx.fn_ty.args[arg_idx]; arg_idx += 1; arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr)); diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index cab42e8b5e61a..384c70ffa448b 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -368,7 +368,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // to drop `self` when the body returns, or in case it unwinds. let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); let fn_ret = callee.ty.fn_ret(); - let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); + let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let first_llarg = if fn_ty.ret.is_indirect() && !fcx.fn_ty.ret.is_ignore() { Some(get_param(fcx.llfn, 0)) @@ -378,7 +378,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llargs = first_llarg.into_iter().chain(llargs[self_idx..].iter().cloned()) .collect::>(); - let llfn = callee.reify(bcx.ccx()); + let llfn = callee.reify(bcx.ccx); let llret; if let Some(landing_pad) = self_scope.landing_pad { let normal_bcx = bcx.fcx().build_new_block("normal-return"); diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index eb7f5d9c108d4..86099d241df68 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -431,7 +431,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, variable_access: VariableAccess, variable_kind: VariableKind, span: Span) { - let cx: &CrateContext = bcx.ccx(); + let cx = bcx.ccx; let file = span_start(cx, span).file; let filename = file.name.clone(); diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index e5f53b3b64d76..ea34dbbeeb426 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -43,10 +43,10 @@ pub fn trans_exchange_free_dyn<'a, 'tcx>( align: ValueRef ) { let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); - let args = [bcx.pointercast(v, Type::i8p(bcx.ccx())), size, align]; - let callee = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])); + let args = [bcx.pointercast(v, Type::i8p(bcx.ccx)), size, align]; + let callee = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[])); - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let fn_ty = callee.direct_fn_type(ccx, &[]); assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); @@ -58,14 +58,14 @@ pub fn trans_exchange_free_dyn<'a, 'tcx>( pub fn trans_exchange_free_ty<'a, 'tcx>( bcx: &BlockAndBuilder<'a, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx> ) { - assert!(bcx.ccx().shared().type_is_sized(content_ty)); - let sizing_type = sizing_type_of(bcx.ccx(), content_ty); - let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); + assert!(bcx.ccx.shared().type_is_sized(content_ty)); + let sizing_type = sizing_type_of(bcx.ccx, content_ty); + let content_size = llsize_of_alloc(bcx.ccx, sizing_type); // `Box` does not allocate. if content_size != 0 { - let content_align = align_of(bcx.ccx(), content_ty); - let ccx = bcx.ccx(); + let content_align = align_of(bcx.ccx, content_ty); + let ccx = bcx.ccx; trans_exchange_free_dyn(bcx, ptr, C_uint(ccx, content_size), C_uint(ccx, content_align)); } } @@ -122,8 +122,8 @@ pub fn call_drop_glue<'a, 'tcx>( ) { // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); - if bcx.ccx().shared().type_needs_drop(t) { - let ccx = bcx.ccx(); + if bcx.ccx.shared().type_needs_drop(t) { + let ccx = bcx.ccx; let g = if skip_dtor { DropGlueKind::TyContents(t) } else { @@ -232,7 +232,7 @@ fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>, }; let (sized_args, unsized_args); - let args: &[ValueRef] = if bcx.ccx().shared().type_is_sized(t) { + let args: &[ValueRef] = if bcx.ccx.shared().type_is_sized(t) { sized_args = [v0]; &sized_args } else { @@ -248,20 +248,20 @@ fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>, def_id: tcx.lang_items.drop_trait().unwrap(), substs: tcx.mk_substs_trait(t, &[]) }); - let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) { + let vtbl = match fulfill_obligation(bcx.ccx.shared(), DUMMY_SP, trait_ref) { traits::VtableImpl(data) => data, _ => bug!("dtor for {:?} is not an impl???", t) }; let dtor_did = def.destructor().unwrap(); - let callee = Callee::def(bcx.ccx(), dtor_did, vtbl.substs); - let fn_ty = callee.direct_fn_type(bcx.ccx(), &[]); + let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs); + let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let llret; if let Some(landing_pad) = contents_scope.landing_pad { let normal_bcx = bcx.fcx().build_new_block("normal-return"); - llret = bcx.invoke(callee.reify(bcx.ccx()), args, normal_bcx.llbb(), landing_pad, None); + llret = bcx.invoke(callee.reify(bcx.ccx), args, normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { - llret = bcx.call(callee.reify(bcx.ccx()), args, None); + llret = bcx.call(callee.reify(bcx.ccx), args, None); } fn_ty.apply_attrs_callsite(llret); contents_scope.trans(&bcx); @@ -273,19 +273,19 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); - if bcx.ccx().shared().type_is_sized(t) { - let sizing_type = sizing_type_of(bcx.ccx(), t); - let size = llsize_of_alloc(bcx.ccx(), sizing_type); - let align = align_of(bcx.ccx(), t); + if bcx.ccx.shared().type_is_sized(t) { + let sizing_type = sizing_type_of(bcx.ccx, t); + let size = llsize_of_alloc(bcx.ccx, sizing_type); + let align = align_of(bcx.ccx, t); debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", t, Value(info), size, align); - let size = C_uint(bcx.ccx(), size); - let align = C_uint(bcx.ccx(), align); + let size = C_uint(bcx.ccx, size); + let align = C_uint(bcx.ccx, align); return (size, align); } match t.sty { ty::TyAdt(def, substs) => { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; // First get the size of all statically known fields. // Don't use type_of::sizing_type_of because that expects t to be sized, // and it also rounds up to alignment, which we want to avoid, @@ -348,7 +348,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // // `(size + (align-1)) & -align` - let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64)); + let addend = bcx.sub(align, C_uint(bcx.ccx, 1_u64)); let size = bcx.and(bcx.add(size, addend), bcx.neg(align)); (size, align) @@ -356,7 +356,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty::TyDynamic(..) => { // info points to the vtable and the second entry in the vtable is the // dynamic size of the object. - let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to()); + let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to()); let size_ptr = bcx.gepi(info, &[1]); let align_ptr = bcx.gepi(info, &[2]); (bcx.load(size_ptr), bcx.load(align_ptr)) @@ -365,11 +365,11 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let unit_ty = t.sequence_element_type(bcx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty); - let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); - let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty); - (bcx.mul(info, C_uint(bcx.ccx(), unit_size)), - C_uint(bcx.ccx(), unit_align)) + let llunit_ty = sizing_type_of(bcx.ccx, unit_ty); + let unit_align = llalign_of_min(bcx.ccx, llunit_ty); + let unit_size = llsize_of_alloc(bcx.ccx, llunit_ty); + (bcx.mul(info, C_uint(bcx.ccx, unit_size)), + C_uint(bcx.ccx, unit_align)) } _ => bug!("Unexpected unsized type, found {}", t) } @@ -394,7 +394,7 @@ fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, // special. It may move to library and have Drop impl. As // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); - if !bcx.ccx().shared().type_is_sized(content_ty) { + if !bcx.ccx.shared().type_is_sized(content_ty) { let llval = get_dataptr(&bcx, v0); let llbox = bcx.load(llval); drop_ty(&bcx, v0, content_ty); @@ -407,7 +407,7 @@ fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, let needs_free = bcx.icmp( llvm::IntNE, llsize, - C_uint(bcx.ccx(), 0u64), + C_uint(bcx.ccx, 0u64), ); if const_to_opt_uint(needs_free) == Some(0) { bcx @@ -437,7 +437,7 @@ fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, let data_ptr = get_dataptr(&bcx, v0); let vtable_ptr = bcx.load(get_meta(&bcx, v0)); let dtor = bcx.load(vtable_ptr); - bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx()))], None); + bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx))], None); bcx } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { @@ -447,7 +447,7 @@ fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, bcx } _ => { - if bcx.ccx().shared().type_needs_drop(t) { + if bcx.ccx.shared().type_needs_drop(t) { drop_structural_ty(bcx, v0, t) } else { bcx diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index e646f61bddbbe..d218459eeb59c 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -91,7 +91,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, llargs: &[ValueRef], llresult: ValueRef, span: Span) { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let tcx = bcx.tcx(); let (def_id, substs, fty) = match callee_ty.sty { @@ -137,7 +137,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } "size_of_val" => { let tp_ty = substs.type_at(0); - if !bcx.ccx().shared().type_is_sized(tp_ty) { + if !bcx.ccx.shared().type_is_sized(tp_ty) { let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llsize @@ -152,7 +152,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } "min_align_of_val" => { let tp_ty = substs.type_at(0); - if !bcx.ccx().shared().type_is_sized(tp_ty) { + if !bcx.ccx.shared().type_is_sized(tp_ty) { let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign @@ -191,7 +191,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, "needs_drop" => { let tp_ty = substs.type_at(0); - C_bool(ccx, bcx.ccx().shared().type_needs_drop(tp_ty)) + C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty)) } "offset" => { let ptr = llargs[0]; @@ -237,7 +237,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, }, "volatile_store" => { let tp_ty = substs.type_at(0); - if type_is_fat_ptr(bcx.ccx(), tp_ty) { + if type_is_fat_ptr(bcx.ccx, tp_ty) { bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0])); bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0])); } else { @@ -264,7 +264,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, Some((width, signed)) => match name { "ctlz" | "cttz" => { - let y = C_bool(bcx.ccx(), false); + let y = C_bool(bcx.ccx, false); let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); bcx.call(llfn, &[llargs[0], y], None) } @@ -282,7 +282,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let intrinsic = format!("llvm.{}{}.with.overflow.i{}", if signed { 's' } else { 'u' }, &name[..3], width); - let llfn = bcx.ccx().get_intrinsic(&intrinsic); + let llfn = bcx.ccx.get_intrinsic(&intrinsic); // Convert `i1` to a `bool`, and write it to the out parameter let val = bcx.call(llfn, &[llargs[0], llargs[1]], None); @@ -291,7 +291,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, bcx.store(result, bcx.struct_gep(llresult, 0)); bcx.store(overflow, bcx.struct_gep(llresult, 1)); - C_nil(bcx.ccx()) + C_nil(bcx.ccx) }, "overflowing_add" => bcx.add(llargs[0], llargs[1]), "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), @@ -406,7 +406,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, failorder, weak); let result = bcx.extract_value(val, 0); - let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx())); + let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx)); bcx.store(result, bcx.struct_gep(llresult, 0)); bcx.store(success, bcx.struct_gep(llresult, 1)); } else { @@ -545,7 +545,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bcx.ccx().shared().type_needs_drop(arg_type)); + assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); let arg = adt::MaybeSizedValue::sized(llarg); (0..contents.len()) .map(|i| { @@ -554,18 +554,18 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, .collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); vec![bcx.pointercast(llarg, llvm_elem.ptr_to())] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![bcx.trunc(llarg, Type::ix(bcx.ccx(), llvm_width as u64))] + vec![bcx.trunc(llarg, Type::ix(bcx.ccx, llvm_width as u64))] } _ => vec![llarg], } @@ -643,7 +643,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, src: ValueRef, count: ValueRef) -> ValueRef { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); let size = machine::llsize_of(ccx, lltp_ty); @@ -678,7 +678,7 @@ fn memset_intrinsic<'a, 'tcx>( val: ValueRef, count: ValueRef ) -> ValueRef { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); let lltp_ty = type_of::type_of(ccx, ty); let size = machine::llsize_of(ccx, lltp_ty); @@ -695,7 +695,7 @@ fn try_intrinsic<'a, 'tcx>( ) { if bcx.sess().no_landing_pads() { bcx.call(func, &[data], None); - bcx.store(C_null(Type::i8p(&bcx.ccx())), dest); + bcx.store(C_null(Type::i8p(&bcx.ccx)), dest); } else if wants_msvc_seh(bcx.sess()) { trans_msvc_try(bcx, func, data, local_ptr, dest); } else { @@ -716,7 +716,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, local_ptr: ValueRef, dest: ValueRef) { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; bcx.set_personality_fn(bcx.fcx().eh_personality()); @@ -819,7 +819,7 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, local_ptr: ValueRef, dest: ValueRef) { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; // Translates the shims described above: // @@ -947,7 +947,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( ($cond: expr, $($fmt: tt)*) => { if !$cond { emit_error!($($fmt)*); - return C_nil(bcx.ccx()) + return C_nil(bcx.ccx) } } } @@ -1038,7 +1038,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( arg_idx, total_len); None } - Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)), + Some(idx) => Some(C_i32(bcx.ccx, idx as i32)), } }) .collect(); diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 6bc3c1e0b1e6c..df893ac7ab074 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -92,7 +92,7 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, CalleeData::Virtual(idx) => { let fn_ptr = get_virtual_method(&bcx, llargs.remove(fn_ty.ret.is_indirect() as usize + 1), idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); bcx.pointercast(fn_ptr, llty) }, _ => bug!("trans_object_shim called with non-virtual callee"), diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index eb71e7dcfe3c9..3d3796aeebffc 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -126,7 +126,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } else { let exc_ptr = bcx.extract_value(lp, 0); bcx.call( - bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), + bcx.fcx().eh_unwind_resume().reify(bcx.ccx), &[exc_ptr], cleanup_bundle, ); @@ -185,7 +185,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let discr = base::to_immediate(&bcx, discr, switch_ty); let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); for (value, target) in values.iter().zip(targets) { - let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); + let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty); let llbb = llblock(self, *target); bcx.add_case(switch, val.llval, llbb) } @@ -218,7 +218,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Ref(llval) => llval }; let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to())); - let llalign = llalign_of_min(bcx.ccx(), ret.ty); + let llalign = llalign_of_min(bcx.ccx, ret.ty); unsafe { llvm::LLVMSetAlignment(load, llalign); } @@ -239,18 +239,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ty = self.monomorphize(&ty); // Double check for necessity to drop - if !bcx.ccx().shared().type_needs_drop(ty) { + if !bcx.ccx.shared().type_needs_drop(ty) { funclet_br(self, bcx, target); return; } let lvalue = self.trans_lvalue(&bcx, location); - let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); - let drop_ty = glue::get_drop_glue_type(bcx.ccx().shared(), ty); - let is_sized = bcx.ccx().shared().type_is_sized(ty); + let drop_fn = glue::get_drop_glue(bcx.ccx, ty); + let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty); + let is_sized = bcx.ccx.shared().type_is_sized(ty); let llvalue = if is_sized { if drop_ty != ty { - bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) + bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()) } else { lvalue.llval } @@ -291,7 +291,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. - if !bcx.ccx().check_overflow() { + if !bcx.ccx.check_overflow() { use rustc_const_math::ConstMathErr::Overflow; use rustc_const_math::Op::Neg; @@ -307,8 +307,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } // Pass the condition through llvm.expect for branch hinting. - let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); - let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx(), expected)], None); + let expect = bcx.ccx.get_intrinsic(&"llvm.expect.i1"); + let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); @@ -326,8 +326,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Get the location information. let loc = bcx.sess().codemap().lookup_char_pos(span.lo); let filename = Symbol::intern(&loc.file.name).as_str(); - let filename = C_str_slice(bcx.ccx(), filename); - let line = C_u32(bcx.ccx(), loc.line as u32); + let filename = C_str_slice(bcx.ccx, filename); + let line = C_u32(bcx.ccx, loc.line as u32); // Put together the arguments to the panic entry point. let (lang_item, args, const_err) = match *msg { @@ -344,9 +344,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }) }); - let file_line = C_struct(bcx.ccx(), &[filename, line], false); - let align = llalign_of_min(bcx.ccx(), common::val_ty(file_line)); - let file_line = consts::addr_of(bcx.ccx(), + let file_line = C_struct(bcx.ccx, &[filename, line], false); + let align = llalign_of_min(bcx.ccx, common::val_ty(file_line)); + let file_line = consts::addr_of(bcx.ccx, file_line, align, "panic_bounds_check_loc"); @@ -356,12 +356,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::AssertMessage::Math(ref err) => { let msg_str = Symbol::intern(err.description()).as_str(); - let msg_str = C_str_slice(bcx.ccx(), msg_str); - let msg_file_line = C_struct(bcx.ccx(), + let msg_str = C_str_slice(bcx.ccx, msg_str); + let msg_file_line = C_struct(bcx.ccx, &[msg_str, filename, line], false); - let align = llalign_of_min(bcx.ccx(), common::val_ty(msg_file_line)); - let msg_file_line = consts::addr_of(bcx.ccx(), + let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line)); + let msg_file_line = consts::addr_of(bcx.ccx, msg_file_line, align, "panic_loc"); @@ -385,9 +385,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Obtain the panic entry point. let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); - let callee = Callee::def(bcx.ccx(), def_id, - bcx.ccx().empty_substs_for_def_id(def_id)); - let llfn = callee.reify(bcx.ccx()); + let callee = Callee::def(bcx.ccx, def_id, + bcx.ccx.empty_substs_for_def_id(def_id)); + let llfn = callee.reify(bcx.ccx); // Translate the actual panic invoke/call. if let Some(unwind) = cleanup { @@ -412,7 +412,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (mut callee, abi, sig) = match callee.ty.sty { ty::TyFnDef(def_id, substs, f) => { - (Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig) + (Callee::def(bcx.ccx, def_id, substs), f.abi, &f.sig) } ty::TyFnPtr(f) => { (Callee { @@ -455,7 +455,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; // Double check for necessity to drop - if !bcx.ccx().shared().type_needs_drop(ty) { + if !bcx.ccx.shared().type_needs_drop(ty) { funclet_br(self, bcx, target); return; } @@ -467,12 +467,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty) }; - let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); - let drop_ty = glue::get_drop_glue_type(bcx.ccx().shared(), ty); - let is_sized = bcx.ccx().shared().type_is_sized(ty); + let drop_fn = glue::get_drop_glue(bcx.ccx, ty); + let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty); + let is_sized = bcx.ccx.shared().type_is_sized(ty); let llvalue = if is_sized { if drop_ty != ty { - bcx.pointercast(llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) + bcx.pointercast(llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()) } else { llval } @@ -518,7 +518,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let op_ty = op_arg.ty(&self.mir, bcx.tcx()); self.monomorphize(&op_ty) }).collect::>(); - let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args); + let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args); // The arguments we'll be passing. Plus one to account for outptr, if used. let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; @@ -579,7 +579,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let fn_ptr = match callee.data { NamedTupleConstructor(_) => { // FIXME translate this like mir::Rvalue::Aggregate. - callee.reify(bcx.ccx()) + callee.reify(bcx.ccx) } Intrinsic => { use intrinsic::trans_intrinsic_call; @@ -673,12 +673,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { callee: &mut CalleeData) { if let Pair(a, b) = op.val { // Treat the values in a fat pointer separately. - if common::type_is_fat_ptr(bcx.ccx(), op.ty) { + if common::type_is_fat_ptr(bcx.ccx, op.ty) { let (ptr, meta) = (a, b); if *next_idx == 0 { if let Virtual(idx) = *callee { let llfn = meth::get_virtual_method(bcx, meta, idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); *callee = Fn(bcx.pointercast(llfn, llty)); } } @@ -722,13 +722,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if arg.original_ty == Type::i1(bcx.ccx()) { + if arg.original_ty == Type::i1(bcx.ccx) { // We store bools as i8 so we need to truncate to i1. llval = bcx.load_range_assert(llval, 0, 2, llvm::False); llval = bcx.trunc(llval, arg.original_ty); } else if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.ptr_to())); - let llalign = llalign_of_min(bcx.ccx(), arg.ty); + let llalign = llalign_of_min(bcx.ccx, arg.ty); unsafe { llvm::LLVMSetAlignment(llval, llalign); } @@ -761,7 +761,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n); - let val = if common::type_is_fat_ptr(bcx.ccx(), ty) { + let val = if common::type_is_fat_ptr(bcx.ccx, ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) } else { @@ -777,7 +777,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } Immediate(llval) => { - let l = bcx.ccx().layout_of(tuple.ty); + let l = bcx.ccx.layout_of(tuple.ty); let v = if let layout::Univariant { ref variant, .. } = *l { variant } else { @@ -786,8 +786,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { for (n, &ty) in arg_types.iter().enumerate() { let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize); // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx())); + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx)); } // If the tuple is immediate, the elements are as well let op = OperandRef { @@ -802,8 +802,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { for (n, &ty) in arg_types.iter().enumerate() { let mut elem = elems[n]; // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx())); + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx)); } // Pair is always made up of immediates let op = OperandRef { @@ -818,7 +818,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; if let Some(slot) = self.llpersonalityslot { slot } else { @@ -847,7 +847,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let bcx = self.fcx.build_new_block("cleanup"); self.landing_pads[target_bb] = Some(bcx.llbb()); - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let llpersonality = self.fcx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); @@ -920,23 +920,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let mut val = self.trans_operand(bcx, src); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { - let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx())); - let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype); + let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx())); + let out_type_size = llbitsize_of_real(bcx.ccx, llouttype); if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. - let f = Callee::def(bcx.ccx(), def_id, substs); + let f = Callee::def(bcx.ccx, def_id, substs); let ty = match f.ty.sty { ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f), _ => f.ty }; val = OperandRef { - val: Immediate(f.reify(bcx.ccx())), + val: Immediate(f.reify(bcx.ccx)), ty: ty }; } } - let llty = type_of::type_of(bcx.ccx(), val.ty); + let llty = type_of::type_of(bcx.ccx, val.ty); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); self.store_operand(bcx, cast_ptr, val); } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 56f88977c865d..08f68f8d49c78 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -957,27 +957,27 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Literal::Item { def_id, substs } => { // Shortcut for zero-sized types, including function item // types, which would not work with MirConstContext. - if common::type_is_zero_size(bcx.ccx(), ty) { - let llty = type_of::type_of(bcx.ccx(), ty); + if common::type_is_zero_size(bcx.ccx, ty) { + let llty = type_of::type_of(bcx.ccx, ty); return Const::new(C_null(llty), ty); } let substs = self.monomorphize(&substs); let instance = Instance::new(def_id, substs); - MirConstContext::trans_def(bcx.ccx(), instance, IndexVec::new()) + MirConstContext::trans_def(bcx.ccx, instance, IndexVec::new()) } mir::Literal::Promoted { index } => { let mir = &self.mir.promoted[index]; - MirConstContext::new(bcx.ccx(), mir, self.param_substs, IndexVec::new()).trans() + MirConstContext::new(bcx.ccx, mir, self.param_substs, IndexVec::new()).trans() } mir::Literal::Value { value } => { - Ok(Const::from_constval(bcx.ccx(), value, ty)) + Ok(Const::from_constval(bcx.ccx, value, ty)) } }; let result = result.unwrap_or_else(|_| { // We've errored, so we don't have to produce working code. - let llty = type_of::type_of(bcx.ccx(), ty); + let llty = type_of::type_of(bcx.ccx, ty); Const::new(C_undef(llty), ty) }); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 3e5ad826e2ab7..980c6e678c525 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -74,7 +74,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { -> LvalueRef<'tcx> { debug!("trans_lvalue(lvalue={:?})", lvalue); - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let tcx = bcx.tcx(); if let mir::Lvalue::Local(index) = *lvalue { @@ -125,7 +125,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Slices already point to the array element type. bcx.inbounds_gep(tr_base.llval, &[llindex]) } else { - let zero = common::C_uint(bcx.ccx(), 0u64); + let zero = common::C_uint(bcx.ccx, 0u64); bcx.inbounds_gep(tr_base.llval, &[zero, llindex]) }; element @@ -162,19 +162,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = C_uint(bcx.ccx(), offset); + let lloffset = C_uint(bcx.ccx, offset); (project_index(lloffset), ptr::null_mut()) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = C_uint(bcx.ccx(), offset); - let lllen = tr_base.len(bcx.ccx()); + let lloffset = C_uint(bcx.ccx, offset); + let lllen = tr_base.len(bcx.ccx); let llindex = bcx.sub(lllen, lloffset); (project_index(llindex), ptr::null_mut()) } mir::ProjectionElem::Subslice { from, to } => { - let llindex = C_uint(bcx.ccx(), from); + let llindex = C_uint(bcx.ccx, from); let llbase = project_index(llindex); let base_ty = tr_base.ty.to_ty(bcx.tcx()); @@ -183,14 +183,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // must cast the lvalue pointer type to the new // array type (*[%_; new_len]). let base_ty = self.monomorphized_lvalue_ty(lvalue); - let llbasety = type_of::type_of(bcx.ccx(), base_ty).ptr_to(); + let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to(); let llbase = bcx.pointercast(llbase, llbasety); (llbase, ptr::null_mut()) } ty::TySlice(..) => { assert!(tr_base.llextra != ptr::null_mut()); let lllen = bcx.sub(tr_base.llextra, - C_uint(bcx.ccx(), from+to)); + C_uint(bcx.ccx, from+to)); (llbase, lllen) } _ => bug!("unexpected type {:?} in Subslice", base_ty) @@ -235,9 +235,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // See comments in LocalRef::new_operand as to why // we always have Some in a ZST LocalRef::Operand. let ty = self.monomorphized_lvalue_ty(lvalue); - if common::type_is_zero_size(bcx.ccx(), ty) { + if common::type_is_zero_size(bcx.ccx, ty) { // Pass an undef pointer as no stores can actually occur. - let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to()); + let llptr = C_undef(type_of(bcx.ccx, ty).ptr_to()); f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty))) } else { bug!("Lvalue local already set"); @@ -259,9 +259,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { llindex: ValueRef) -> ValueRef { - let ccx = bcx.ccx(); - let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type()); + let ccx = bcx.ccx; + let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); + let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type()); if index_size < int_size { bcx.zext(llindex, ccx.int_type()) } else if index_size > int_size { diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 8b43930260d86..93caa565817e4 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -257,7 +257,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( if !lvalue_locals.contains(local.index()) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bcx.ccx(), ty); + return LocalRef::new_operand(bcx.ccx, ty); } debug!("alloc: {:?} ({}) -> lvalue", local, name); @@ -283,7 +283,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bcx.ccx(), ty) + LocalRef::new_operand(bcx.ccx, ty) } } }; @@ -382,7 +382,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let dst = bcx.struct_gep(lltemp, i); let arg = &fcx.fn_ty.args[idx]; idx += 1; - if common::type_is_fat_ptr(bcx.ccx(), tupled_arg_ty) { + if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) { // We pass fat pointers as two words, but inside the tuple // they are the two sub-fields of a single aggregate field. let meta = &fcx.fn_ty.args[idx]; @@ -431,7 +431,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, !arg.is_indirect() && arg.cast.is_none() && arg_scope.is_none() { if arg.is_ignore() { - return LocalRef::new_operand(bcx.ccx(), arg_ty); + return LocalRef::new_operand(bcx.ccx, arg_ty); } // We don't have to cast or keep the argument in the alloca. @@ -442,7 +442,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); llarg_idx += 1; - let val = if common::type_is_fat_ptr(bcx.ccx(), arg_ty) { + let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { let meta = &fcx.fn_ty.args[idx]; idx += 1; assert_eq!((meta.cast, meta.pad), (None, None)); @@ -459,7 +459,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); - if common::type_is_fat_ptr(bcx.ccx(), arg_ty) { + if common::type_is_fat_ptr(bcx.ccx, arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, // so make an alloca to store them in. @@ -517,7 +517,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, llval }; - let layout = bcx.ccx().layout_of(closure_ty); + let layout = bcx.ccx.layout_of(closure_ty); let offsets = match *layout { layout::Univariant { ref variant, .. } => &variant.offsets[..], _ => bug!("Closures are only supposed to be Univariant") @@ -526,7 +526,6 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { let byte_offset_of_var_in_env = offsets[i].bytes(); - let ops = unsafe { [llvm::LLVMRustDIBuilderCreateOpDeref(), llvm::LLVMRustDIBuilderCreateOpPlus(), diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 6e69608e51e6b..a15d51d9da64d 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -89,14 +89,14 @@ impl<'a, 'tcx> OperandRef<'tcx> { -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. - let llty = type_of::type_of(bcx.ccx(), self.ty); + let llty = type_of::type_of(bcx.ccx, self.ty); let mut llpair = common::C_undef(llty); let elems = [a, b]; for i in 0..2 { let mut elem = elems[i]; // Extend boolean i1's to i8. - if common::val_ty(elem) == Type::i1(bcx.ccx()) { - elem = bcx.zext(elem, Type::i8(bcx.ccx())); + if common::val_ty(elem) == Type::i1(bcx.ccx) { + elem = bcx.zext(elem, Type::i8(bcx.ccx)); } llpair = bcx.insert_value(llpair, elem, i); } @@ -111,19 +111,19 @@ impl<'a, 'tcx> OperandRef<'tcx> { -> OperandRef<'tcx> { if let OperandValue::Immediate(llval) = self.val { // Deconstruct the immediate aggregate. - if common::type_is_imm_pair(bcx.ccx(), self.ty) { + if common::type_is_imm_pair(bcx.ccx, self.ty) { debug!("Operand::unpack_if_pair: unpacking {:?}", self); let mut a = bcx.extract_value(llval, 0); let mut b = bcx.extract_value(llval, 1); - let pair_fields = common::type_pair_fields(bcx.ccx(), self.ty); + let pair_fields = common::type_pair_fields(bcx.ccx, self.ty); if let Some([a_ty, b_ty]) = pair_fields { if a_ty.is_bool() { - a = bcx.trunc(a, Type::i1(bcx.ccx())); + a = bcx.trunc(a, Type::i1(bcx.ccx)); } if b_ty.is_bool() { - b = bcx.trunc(b, Type::i1(bcx.ccx())); + b = bcx.trunc(b, Type::i1(bcx.ccx)); } } @@ -143,11 +143,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { { debug!("trans_load: {:?} @ {:?}", Value(llval), ty); - let val = if common::type_is_fat_ptr(bcx.ccx(), ty) { + let val = if common::type_is_fat_ptr(bcx.ccx, ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty); OperandValue::Pair(lldata, llextra) - } else if common::type_is_imm_pair(bcx.ccx(), ty) { - let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx(), ty).unwrap(); + } else if common::type_is_imm_pair(bcx.ccx, ty) { + let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap(); let a_ptr = bcx.struct_gep(llval, 0); let b_ptr = bcx.struct_gep(llval, 1); @@ -155,7 +155,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { base::load_ty(bcx, a_ptr, a_ty), base::load_ty(bcx, b_ptr, b_ty) ) - } else if common::type_is_immediate(bcx.ccx(), ty) { + } else if common::type_is_immediate(bcx.ccx, ty) { OperandValue::Immediate(base::load_ty(bcx, llval, ty)) } else { OperandValue::Ref(llval) @@ -230,7 +230,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Operand::Constant(ref constant) => { let val = self.trans_constant(bcx, constant); - let operand = val.to_operand(bcx.ccx()); + let operand = val.to_operand(bcx.ccx); if let OperandValue::Ref(ptr) = operand.val { // If this is a OperandValue::Ref to an immediate constant, load it. self.trans_load(bcx, ptr, operand.ty) @@ -248,7 +248,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("store_operand: operand={:?}", operand); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. - if common::type_is_zero_size(bcx.ccx(), operand.ty) { + if common::type_is_zero_size(bcx.ccx, operand.ty) { return; } match operand.val { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 5f17db73d6629..0bca8429d22b2 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -55,7 +55,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { let cast_ty = self.monomorphize(&cast_ty); - if common::type_is_fat_ptr(bcx.ccx(), cast_ty) { + if common::type_is_fat_ptr(bcx.ccx, cast_ty) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); @@ -97,7 +97,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Repeat(ref elem, ref count) => { let tr_elem = self.trans_operand(&bcx, elem); let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); - let size = C_uint(bcx.ccx(), size); + let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { self.store_operand(&bcx, llslot, tr_elem); @@ -113,7 +113,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx(), op.ty) { + if !common::type_is_zero_size(bcx.ccx, op.ty) { let val = adt::MaybeSizedValue::sized(dest.llval); let field_index = active_field_index.unwrap_or(i); let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr, @@ -124,7 +124,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }, _ => { // If this is a tuple or closure, we need to translate GEP indices. - let layout = bcx.ccx().layout_of(dest.ty.to_ty(bcx.tcx())); + let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); let translation = if let Layout::Univariant { ref variant, .. } = *layout { Some(&variant.memory_index) } else { @@ -133,7 +133,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx(), op.ty) { + if !common::type_is_zero_size(bcx.ccx, op.ty) { // Note: perhaps this should be StructGep, but // note that in some cases the values here will // not be structs but arrays. @@ -192,8 +192,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match operand.ty.sty { ty::TyFnDef(def_id, substs, _) => { OperandValue::Immediate( - Callee::def(bcx.ccx(), def_id, substs) - .reify(bcx.ccx())) + Callee::def(bcx.ccx, def_id, substs) + .reify(bcx.ccx)) } _ => { bug!("{} cannot be reified to a fn ptr", operand.ty) @@ -207,7 +207,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::CastKind::Unsize => { // unsize targets other than to a fat pointer currently // can't be operands. - assert!(common::type_is_fat_ptr(bcx.ccx(), cast_ty)); + assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty)); match operand.val { OperandValue::Pair(lldata, llextra) => { @@ -217,7 +217,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // &'a fmt::Debug+Send => &'a fmt::Debug, // So we need to pointercast the base to ensure // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty); + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty); let lldata = bcx.pointercast(lldata, llcast_ty); OperandValue::Pair(lldata, llextra) } @@ -233,11 +233,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } } - mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx(), operand.ty) => { - let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty); - let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty); + mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => { + let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty); + let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty); if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val { - if common::type_is_fat_ptr(bcx.ccx(), cast_ty) { + if common::type_is_fat_ptr(bcx.ccx, cast_ty) { let ll_cft = ll_cast_ty.field_types(); let ll_fft = ll_from_ty.field_types(); let data_cast = bcx.pointercast(data_ptr, ll_cft[0]); @@ -254,13 +254,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } mir::CastKind::Misc => { - debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); + debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty)); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); - let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); + let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty); + let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty); let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let l = bcx.ccx().layout_of(operand.ty); + let l = bcx.ccx.layout_of(operand.ty); let discr = match operand.val { OperandValue::Immediate(llval) => llval, OperandValue::Ref(llptr) => { @@ -357,7 +357,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Note: lvalues are indirect, so storing the `llval` into the // destination effectively creates a reference. - let operand = if bcx.ccx().shared().type_is_sized(ty) { + let operand = if bcx.ccx.shared().type_is_sized(ty) { OperandRef { val: OperandValue::Immediate(tr_lvalue.llval), ty: ref_ty, @@ -375,7 +375,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Len(ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let operand = OperandRef { - val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())), + val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)), ty: bcx.tcx().types.usize, }; (bcx, operand) @@ -384,7 +384,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); - let llresult = if common::type_is_fat_ptr(bcx.ccx(), lhs.ty) { + let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) { match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { @@ -443,10 +443,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Box(content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let llty = type_of::type_of(bcx.ccx(), content_ty); - let llsize = machine::llsize_of(bcx.ccx(), llty); - let align = type_of::align_of(bcx.ccx(), content_ty); - let llalign = C_uint(bcx.ccx(), align); + let llty = type_of::type_of(bcx.ccx, content_ty); + let llsize = machine::llsize_of(bcx.ccx, llty); + let align = type_of::align_of(bcx.ccx, content_ty); + let llalign = C_uint(bcx.ccx, align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); @@ -457,8 +457,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); } }; - let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .reify(bcx.ccx()); + let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[])) + .reify(bcx.ccx); let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr); let operand = OperandRef { @@ -528,7 +528,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil { - C_bool(bcx.ccx(), match op { + C_bool(bcx.ccx, match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -542,8 +542,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (lhs, rhs) = if is_bool { // FIXME(#36856) -- extend the bools into `i8` because // LLVM's i1 comparisons are broken. - (bcx.zext(lhs, Type::i8(bcx.ccx())), - bcx.zext(rhs, Type::i8(bcx.ccx()))) + (bcx.zext(lhs, Type::i8(bcx.ccx)), + bcx.zext(rhs, Type::i8(bcx.ccx))) } else { (lhs, rhs) }; @@ -613,9 +613,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bcx.ccx().check_overflow() { + if !bcx.ccx.check_overflow() { let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, C_bool(bcx.ccx(), false)); + return OperandValue::Pair(val, C_bool(bcx.ccx, false)); } // First try performing the operation on constants, which @@ -623,7 +623,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // This is necessary to determine when an overflow Assert // will always panic at runtime, and produce a warning. if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) { - return OperandValue::Pair(val, C_bool(bcx.ccx(), of)); + return OperandValue::Pair(val, C_bool(bcx.ccx, of)); } let (val, of) = match op { @@ -752,5 +752,5 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> Val }, }; - bcx.ccx().get_intrinsic(&name) + bcx.ccx.get_intrinsic(&name) } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 51d7a1967371c..cc85f68c197ec 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -41,7 +41,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LocalRef::Operand(Some(_)) => { let ty = self.monomorphized_lvalue_ty(lvalue); - if !common::type_is_zero_size(bcx.ccx(), ty) { + if !common::type_is_zero_size(bcx.ccx, ty) { span_bug!(statement.source_info.span, "operand {:?} already assigned", rvalue); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index 0792896487315..c09726fda0810 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -21,7 +21,7 @@ pub fn slice_for_each<'a, 'tcx, F>( f: F ) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) { // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) - let zst = type_is_zero_size(bcx.ccx(), unit_ty); + let zst = type_is_zero_size(bcx.ccx, unit_ty); let add = |bcx: &BlockAndBuilder, a, b| if zst { bcx.add(a, b) } else { @@ -33,7 +33,7 @@ pub fn slice_for_each<'a, 'tcx, F>( let header_bcx = bcx.fcx().build_new_block("slice_loop_header"); let start = if zst { - C_uint(bcx.ccx(), 0usize) + C_uint(bcx.ccx, 0usize) } else { data_ptr }; @@ -46,7 +46,7 @@ pub fn slice_for_each<'a, 'tcx, F>( header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); f(&body_bcx, if zst { data_ptr } else { current }); - let next = add(&body_bcx, current, C_uint(bcx.ccx(), 1usize)); + let next = add(&body_bcx, current, C_uint(bcx.ccx, 1usize)); header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); body_bcx.br(header_bcx.llbb()); next_bcx From 15c9e5e35bc3a9675c476ae10eb656df2c2c14ed Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 17:39:14 -0700 Subject: [PATCH 084/103] Mutate llargs instead of reconstructing it. --- src/librustc_trans/callee.rs | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 384c70ffa448b..44e94a1dfe3e5 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -331,16 +331,21 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let fcx = FunctionContext::new(ccx, lloncefn, fn_ty); let mut bcx = fcx.get_entry_block(); + let callee = Callee { + data: Fn(llreffn), + ty: llref_fn_ty + }; + // the first argument (`self`) will be the (by value) closure env. let mut llargs = get_params(fcx.llfn); - let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize; + let idx = fcx.fn_ty.ret.is_indirect() as usize; let env_arg = &fcx.fn_ty.args[0]; let llenv = if env_arg.is_indirect() { - llargs[self_idx] + llargs[idx] } else { let scratch = alloc_ty(&bcx, closure_ty, "self"); - let mut llarg_idx = self_idx; + let mut llarg_idx = idx; env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); scratch }; @@ -349,35 +354,24 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Adjust llargs such that llargs[self_idx..] has the call arguments. // For zero-sized closures that means sneaking in a new argument. if env_arg.is_ignore() { - if self_idx > 0 { - self_idx -= 1; - llargs[self_idx] = llenv; + if fcx.fn_ty.ret.is_indirect() { + llargs[0] = llenv; } else { llargs.insert(0, llenv); } } else { - llargs[self_idx] = llenv; + llargs[idx] = llenv; } - let callee = Callee { - data: Fn(llreffn), - ty: llref_fn_ty - }; - // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); - let first_llarg = if fn_ty.ret.is_indirect() && !fcx.fn_ty.ret.is_ignore() { - Some(get_param(fcx.llfn, 0)) - } else { - None - }; - let llargs = first_llarg.into_iter().chain(llargs[self_idx..].iter().cloned()) - .collect::>(); - + if fn_ty.ret.is_indirect() { + llargs.insert(0, get_param(fcx.llfn, 0)); + } let llfn = callee.reify(bcx.ccx); let llret; if let Some(landing_pad) = self_scope.landing_pad { From 6fac0a1a84891ccf38f0cfbaf7b93e1c64578bfb Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 17:48:41 -0700 Subject: [PATCH 085/103] Change *.fcx.ccx to *.ccx --- src/librustc_trans/base.rs | 4 ++-- src/librustc_trans/cleanup.rs | 4 +--- src/librustc_trans/common.rs | 7 ++----- src/librustc_trans/glue.rs | 16 +++++++--------- src/librustc_trans/mir/analyze.rs | 22 +++++++++++----------- src/librustc_trans/mir/block.rs | 2 +- src/librustc_trans/mir/lvalue.rs | 4 ++-- src/librustc_trans/mir/mod.rs | 16 ++++++++-------- 8 files changed, 34 insertions(+), 41 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 9697f2604349b..3f5cbd6804248 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -424,7 +424,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); - if common::type_is_fat_ptr(cx.ccx(), t) { + if common::type_is_fat_ptr(cx.ccx, t) { let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR); let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA); store_fat_ptr(cx, lladdr, llextra, dst, t); @@ -656,7 +656,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, if let Some(cast_ty) = fcx.fn_ty.ret.cast { let load = bcx.load(bcx.pointercast(dest, cast_ty.ptr_to())); - let llalign = llalign_of_min(fcx.ccx, fcx.fn_ty.ret.ty); + let llalign = llalign_of_min(ccx, fcx.fn_ty.ret.ty); unsafe { llvm::LLVMSetAlignment(load, llalign); } diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 4eb786d63941b..67dc347f21fed 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -175,9 +175,7 @@ impl<'tcx> CleanupScope<'tcx> { // The landing pad return type (the type being propagated). Not sure // what this represents but it's determined by the personality // function and this is what the EH proposal example uses. - let llretty = Type::struct_(fcx.ccx, - &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], - false); + let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); // The only landing pad clause will be 'cleanup' let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 81aba269a8b3b..f022aa50184c7 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -421,17 +421,14 @@ impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> { r } - pub fn ccx(&self) -> &'a CrateContext<'a, 'tcx> { - self.fcx.ccx - } pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> { self.fcx } pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { - self.fcx.ccx.tcx() + self.ccx.tcx() } pub fn sess(&self) -> &'a Session { - self.fcx.ccx.sess() + self.ccx.sess() } pub fn llbb(&self) -> BasicBlockRef { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index ea34dbbeeb426..c9690ee0d1b97 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -474,7 +474,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, } } - let value = if cx.ccx().shared().type_is_sized(t) { + let value = if cx.ccx.shared().type_is_sized(t) { adt::MaybeSizedValue::sized(av) } else { // FIXME(#36457) -- we should pass unsized values as two arguments @@ -493,7 +493,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, } ty::TyArray(_, n) => { let base = get_dataptr(&cx, value.value); - let len = C_uint(cx.ccx(), n); + let len = C_uint(cx.ccx, n); let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(&cx, base, unit_ty, len, |bb, vv| drop_ty(bb, vv, unit_ty)); } @@ -514,7 +514,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, for (i, &Field(_, field_ty)) in fields.iter().enumerate() { let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i); - let val = if cx.ccx().shared().type_is_sized(field_ty) { + let val = if cx.ccx.shared().type_is_sized(field_ty) { llfld_a } else { // FIXME(#36457) -- we should pass unsized values as two arguments @@ -530,8 +530,6 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, bug!("Union in `glue::drop_structural_ty`"); } AdtKind::Enum => { - let fcx = cx.fcx(); - let ccx = fcx.ccx; let n_variants = adt.variants.len(); // NB: we must hit the discriminant first so that structural @@ -562,15 +560,15 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = fcx.build_new_block("enum-iter-ret-void"); + let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void"); ret_void_cx.ret_void(); let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); - let next_cx = fcx.build_new_block("enum-iter-next"); + let next_cx = cx.fcx().build_new_block("enum-iter-next"); for variant in &adt.variants { let variant_cx_name = format!("enum-iter-variant-{}", &variant.disr_val.to_string()); - let variant_cx = fcx.build_new_block(&variant_cx_name); + let variant_cx = cx.fcx().build_new_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); iter_variant(&variant_cx, t, value, variant, substs); @@ -578,7 +576,7 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, } cx = next_cx; } - _ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + _ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), } } }, diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 3f94af8255394..8df24da713588 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -33,13 +33,13 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { ty.is_unique() || ty.is_region_ptr() || ty.is_simd() || - common::type_is_zero_size(mircx.fcx.ccx, ty) + common::type_is_zero_size(mircx.ccx, ty) { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - assert!(common::type_is_immediate(mircx.fcx.ccx, ty) || - common::type_is_fat_ptr(mircx.fcx.ccx, ty)); - } else if common::type_is_imm_pair(mircx.fcx.ccx, ty) { + assert!(common::type_is_immediate(mircx.ccx, ty) || + common::type_is_fat_ptr(mircx.ccx, ty)); + } else if common::type_is_imm_pair(mircx.ccx, ty) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -112,7 +112,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { literal: mir::Literal::Item { def_id, .. }, .. }), ref args, .. - } if Some(def_id) == self.cx.fcx.ccx.tcx().lang_items.box_free_fn() => { + } if Some(def_id) == self.cx.ccx.tcx().lang_items.box_free_fn() => { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -135,10 +135,10 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { // Allow uses of projections of immediate pair fields. if let mir::Lvalue::Projection(ref proj) = *lvalue { if let mir::Lvalue::Local(_) = proj.base { - let ty = proj.base.ty(self.cx.mir, self.cx.fcx.ccx.tcx()); + let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx()); - let ty = self.cx.monomorphize(&ty.to_ty(self.cx.fcx.ccx.tcx())); - if common::type_is_imm_pair(self.cx.fcx.ccx, ty) { + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); + if common::type_is_imm_pair(self.cx.ccx, ty) { if let mir::ProjectionElem::Field(..) = proj.elem { if let LvalueContext::Consume = context { return; @@ -166,11 +166,11 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { } LvalueContext::Drop => { - let ty = lvalue.ty(self.cx.mir, self.cx.fcx.ccx.tcx()); - let ty = self.cx.monomorphize(&ty.to_ty(self.cx.fcx.ccx.tcx())); + let ty = lvalue.ty(self.cx.mir, self.cx.ccx.tcx()); + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); // Only need the lvalue if we're actually dropping it. - if self.cx.fcx.ccx.shared().type_needs_drop(ty) { + if self.cx.ccx.shared().type_needs_drop(ty) { self.mark_as_lvalue(index); } } diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 3d3796aeebffc..b4e9d301039b9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -838,7 +838,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return block; } - if base::wants_msvc_seh(self.fcx.ccx.sess()) { + if base::wants_msvc_seh(self.ccx.sess()) { return self.blocks[target_bb]; } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 980c6e678c525..0cd7f007c5df9 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -140,7 +140,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, }; let discr = discr as u64; - let is_sized = self.fcx.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); + let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); let base = if is_sized { adt::MaybeSizedValue::sized(tr_base.llval) } else { @@ -272,7 +272,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { - let tcx = self.fcx.ccx.tcx(); + let tcx = self.ccx.tcx(); let lvalue_ty = lvalue.ty(&self.mir, tcx); self.monomorphize(&lvalue_ty.to_ty(tcx)) } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 93caa565817e4..04e8802cf4e95 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -48,9 +48,10 @@ pub struct MirContext<'a, 'tcx:'a> { debug_context: debuginfo::FunctionDebugContext, - /// Function context fcx: &'a common::FunctionContext<'a, 'tcx>, + ccx: &'a CrateContext<'a, 'tcx>, + /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the /// resume instruction. The personality is (afaik) some kind of @@ -100,7 +101,7 @@ pub struct MirContext<'a, 'tcx:'a> { impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn monomorphize(&self, value: &T) -> T where T: TransNormalize<'tcx> { - monomorphize::apply_param_substs(self.fcx.ccx.shared(), self.param_substs, value) + monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value) } pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) { @@ -123,12 +124,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // (unless the crate is being compiled with `-Z debug-macros`). if source_info.span.expn_id == NO_EXPANSION || source_info.span.expn_id == COMMAND_LINE_EXPN || - self.fcx.ccx.sess().opts.debugging_opts.debug_macros { + self.ccx.sess().opts.debugging_opts.debug_macros { let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo); (scope, source_info.span) } else { - let cm = self.fcx.ccx.sess().codemap(); + let cm = self.ccx.sess().codemap(); // Walk up the macro expansion chain until we reach a non-expanded span. let mut span = source_info.span; while span.expn_id != NO_EXPANSION && span.expn_id != COMMAND_LINE_EXPN { @@ -154,10 +155,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let scope_metadata = self.scopes[scope_id].scope_metadata; if pos < self.scopes[scope_id].file_start_pos || pos >= self.scopes[scope_id].file_end_pos { - let cm = self.fcx.ccx.sess().codemap(); - debuginfo::extend_scope_to_file(self.fcx.ccx, - scope_metadata, - &cm.lookup_char_pos(pos).file) + let cm = self.ccx.sess().codemap(); + debuginfo::extend_scope_to_file(self.ccx, scope_metadata, &cm.lookup_char_pos(pos).file) } else { scope_metadata } @@ -225,6 +224,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( let mut mircx = MirContext { mir: mir, fcx: fcx, + ccx: fcx.ccx, llpersonalityslot: None, blocks: block_bcxs, unreachable_block: None, From d55e73954a1019a1f5dea8fdca752b8f8e6647ff Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 17:59:26 -0700 Subject: [PATCH 086/103] Do not use BAB after calling unreachable. This does not make unreachable and other terminators take self by-value because it is deemed too difficult. We would need to create by-value methods on BAB that call into Builder, due to the Deref to builder. --- src/librustc_trans/callee.rs | 24 +++++++++++++----------- src/librustc_trans/meth.rs | 10 +++++----- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 44e94a1dfe3e5..dea0a2664a07c 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -385,13 +385,14 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( if fn_ret.0.is_never() { bcx.unreachable(); - } - self_scope.trans(&bcx); - - if fcx.fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { - bcx.ret_void(); } else { - bcx.ret(llret); + self_scope.trans(&bcx); + + if fcx.fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } } ccx.instances().borrow_mut().insert(method_instance, lloncefn); @@ -521,13 +522,14 @@ fn trans_fn_pointer_shim<'a, 'tcx>( if fn_ret.0.is_never() { bcx.unreachable(); - } - - if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { - bcx.ret_void(); } else { - bcx.ret(llret); + if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } } + ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); llfn diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index df893ac7ab074..89ea7a760d491 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -102,12 +102,12 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, if fn_ret.0.is_never() { bcx.unreachable(); - } - - if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { - bcx.ret_void(); } else { - bcx.ret(llret); + if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } } llfn From 6f368e6da045b0ac179ac3fb02423c4d7db3c62c Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 18:07:19 -0700 Subject: [PATCH 087/103] Use fn_ty directly --- src/librustc_trans/callee.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index dea0a2664a07c..e1baf441084ee 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -339,7 +339,9 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // the first argument (`self`) will be the (by value) closure env. let mut llargs = get_params(fcx.llfn); - let idx = fcx.fn_ty.ret.is_indirect() as usize; + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); + let idx = fn_ty.ret.is_indirect() as usize; let env_arg = &fcx.fn_ty.args[0]; let llenv = if env_arg.is_indirect() { llargs[idx] @@ -354,7 +356,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Adjust llargs such that llargs[self_idx..] has the call arguments. // For zero-sized closures that means sneaking in a new argument. if env_arg.is_ignore() { - if fcx.fn_ty.ret.is_indirect() { + if fn_ty.ret.is_indirect() { llargs[0] = llenv; } else { llargs.insert(0, llenv); @@ -366,8 +368,6 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); - let fn_ret = callee.ty.fn_ret(); - let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); if fn_ty.ret.is_indirect() { llargs.insert(0, get_param(fcx.llfn, 0)); @@ -388,7 +388,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( } else { self_scope.trans(&bcx); - if fcx.fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() { bcx.ret_void(); } else { bcx.ret(llret); @@ -513,17 +513,15 @@ fn trans_fn_pointer_shim<'a, 'tcx>( data: Fn(llfnpointer), ty: bare_fn_ty }; - let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); - let llret = bcx.call(llfnpointer, &llargs, None); fn_ty.apply_attrs_callsite(llret); if fn_ret.0.is_never() { bcx.unreachable(); } else { - if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() { bcx.ret_void(); } else { bcx.ret(llret); From 0d5a8ad11029d484e48821326bb8b193c519aa49 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 18:19:19 -0700 Subject: [PATCH 088/103] Move get_landing_pad onto DropVal. --- src/librustc_trans/cleanup.rs | 149 ++++++++++++++++------------------ 1 file changed, 69 insertions(+), 80 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 67dc347f21fed..6dd8d8b2247b0 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -46,6 +46,74 @@ impl<'tcx> DropValue<'tcx> { fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) { glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) } + + /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary + /// for an unwind and then `resume` to continue error propagation: + /// + /// landing_pad -> ... cleanups ... -> [resume] + /// + /// This should only be called once per function, as it creates an alloca for the landingpad. + fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef { + debug!("get_landing_pad"); + + let mut pad_bcx = fcx.build_new_block("unwind_custom_"); + + let llpersonality = pad_bcx.fcx().eh_personality(); + + let resume_bcx = fcx.build_new_block("resume"); + let val = if base::wants_msvc_seh(fcx.ccx.sess()) { + // A cleanup pad requires a personality function to be specified, so + // we do that here explicitly (happens implicitly below through + // creation of the landingpad instruction). We then create a + // cleanuppad instruction which has no filters to run cleanup on all + // exceptions. + pad_bcx.set_personality_fn(llpersonality); + let llretval = pad_bcx.cleanup_pad(None, &[]); + resume_bcx.cleanup_ret(resume_bcx.cleanup_pad(None, &[]), None); + UnwindKind::CleanupPad(llretval) + } else { + // The landing pad return type (the type being propagated). Not sure + // what this represents but it's determined by the personality + // function and this is what the EH proposal example uses. + let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); + + // The only landing pad clause will be 'cleanup' + let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn); + + // The landing pad block is a cleanup + pad_bcx.set_cleanup(llretval); + + let addr = pad_bcx.fcx().alloca(common::val_ty(llretval), ""); + Lifetime::Start.call(&pad_bcx, addr); + pad_bcx.store(llretval, addr); + let lp = resume_bcx.load(addr); + Lifetime::End.call(&resume_bcx, addr); + if !resume_bcx.sess().target.target.options.custom_unwind_resume { + resume_bcx.resume(lp); + } else { + let exc_ptr = resume_bcx.extract_value(lp, 0); + resume_bcx.call(fcx.eh_unwind_resume().reify(fcx.ccx), &[exc_ptr], None); + } + UnwindKind::LandingPad + }; + + let mut cleanup = fcx.build_new_block("clean_custom_"); + + // Insert cleanup instructions into the cleanup block + let funclet = match val { + UnwindKind::CleanupPad(_) => Some(Funclet::new(cleanup.cleanup_pad(None, &[]))), + UnwindKind::LandingPad => None, + }; + self.trans(funclet.as_ref(), &cleanup); + + // Insert instruction into cleanup block to branch to the exit + val.branch(&mut cleanup, resume_bcx.llbb()); + + // Branch into the cleanup block + val.branch(&mut pad_bcx, cleanup.llbb()); + + pad_bcx.llbb() + } } #[derive(Copy, Clone, Debug)] @@ -73,16 +141,6 @@ impl UnwindKind { } } -impl PartialEq for UnwindKind { - fn eq(&self, label: &UnwindKind) -> bool { - match (*self, *label) { - (UnwindKind::LandingPad, UnwindKind::LandingPad) | - (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true, - _ => false, - } - } -} - impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { @@ -126,7 +184,7 @@ impl<'tcx> CleanupScope<'tcx> { CleanupScope { cleanup: Some(drop_val), landing_pad: if !fcx.ccx.sess().no_landing_pads() { - Some(CleanupScope::get_landing_pad(fcx, &drop_val)) + Some(drop_val.get_landing_pad(fcx)) } else { None }, @@ -145,73 +203,4 @@ impl<'tcx> CleanupScope<'tcx> { cleanup.trans(None, &bcx); } } - - /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary - /// for an unwind and then `resume` to continue error propagation: - /// - /// landing_pad -> ... cleanups ... -> [resume] - /// - /// This should only be called once per function, as it creates an alloca for the landingpad. - fn get_landing_pad<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: &DropValue<'tcx>) - -> BasicBlockRef { - debug!("get_landing_pad"); - - let mut pad_bcx = fcx.build_new_block("unwind_custom_"); - - let llpersonality = pad_bcx.fcx().eh_personality(); - - let resume_bcx = fcx.build_new_block("resume"); - let val = if base::wants_msvc_seh(fcx.ccx.sess()) { - // A cleanup pad requires a personality function to be specified, so - // we do that here explicitly (happens implicitly below through - // creation of the landingpad instruction). We then create a - // cleanuppad instruction which has no filters to run cleanup on all - // exceptions. - pad_bcx.set_personality_fn(llpersonality); - let llretval = pad_bcx.cleanup_pad(None, &[]); - resume_bcx.cleanup_ret(resume_bcx.cleanup_pad(None, &[]), None); - UnwindKind::CleanupPad(llretval) - } else { - // The landing pad return type (the type being propagated). Not sure - // what this represents but it's determined by the personality - // function and this is what the EH proposal example uses. - let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); - - // The only landing pad clause will be 'cleanup' - let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn); - - // The landing pad block is a cleanup - pad_bcx.set_cleanup(llretval); - - let addr = pad_bcx.fcx().alloca(common::val_ty(llretval), ""); - Lifetime::Start.call(&pad_bcx, addr); - pad_bcx.store(llretval, addr); - let lp = resume_bcx.load(addr); - Lifetime::End.call(&resume_bcx, addr); - if !resume_bcx.sess().target.target.options.custom_unwind_resume { - resume_bcx.resume(lp); - } else { - let exc_ptr = resume_bcx.extract_value(lp, 0); - resume_bcx.call(fcx.eh_unwind_resume().reify(fcx.ccx), &[exc_ptr], None); - } - UnwindKind::LandingPad - }; - - let mut cleanup = fcx.build_new_block("clean_custom_"); - - // Insert cleanup instructions into the cleanup block - let funclet = match val { - UnwindKind::CleanupPad(_) => Some(Funclet::new(cleanup.cleanup_pad(None, &[]))), - UnwindKind::LandingPad => None, - }; - drop_val.trans(funclet.as_ref(), &cleanup); - - // Insert instruction into cleanup block to branch to the exit - val.branch(&mut cleanup, resume_bcx.llbb()); - - // Branch into the cleanup block - val.branch(&mut pad_bcx, cleanup.llbb()); - - return pad_bcx.llbb(); - } } From 6a1ec55c4722aa1d2f4a68e062bb1e2b7342ae70 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 18:25:56 -0700 Subject: [PATCH 089/103] Remove needless check --- src/librustc_trans/glue.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index c9690ee0d1b97..d50352f262314 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -49,8 +49,6 @@ pub fn trans_exchange_free_dyn<'a, 'tcx>( let ccx = bcx.ccx; let fn_ty = callee.direct_fn_type(ccx, &[]); - assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); - let llret = bcx.call(callee.reify(ccx), &args[..], None); fn_ty.apply_attrs_callsite(llret); } From b9f1064760b19d8b861f8113be4fc147240096ab Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 18:34:07 -0700 Subject: [PATCH 090/103] Inline make_drop_glue --- src/librustc_trans/glue.rs | 156 ++++++++++++++++++------------------- 1 file changed, 74 insertions(+), 82 deletions(-) diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index d50352f262314..1e5bd7eb60680 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -200,7 +200,80 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // llfn is expected be declared to take a parameter of the appropriate // type, so we don't need to explicitly cast the function parameter. - let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); + // NB: v0 is an *alias* of type t here, not a direct value. + // Only drop the value when it ... well, we used to check for + // non-null, (and maybe we need to continue doing so), but we now + // must definitely check for special bit-patterns corresponding to + // the special dtor markings. + let v0 = get_param(llfn, 0); + let t = g.ty(); + + let skip_dtor = match g { + DropGlueKind::Ty(_) => false, + DropGlueKind::TyContents(_) => true + }; + + let bcx = match t.sty { + ty::TyBox(content_ty) => { + // Support for TyBox is built-in and its drop glue is + // special. It may move to library and have Drop impl. As + // a safe-guard, assert TyBox not used with TyContents. + assert!(!skip_dtor); + if !bcx.ccx.shared().type_is_sized(content_ty) { + let llval = get_dataptr(&bcx, v0); + let llbox = bcx.load(llval); + drop_ty(&bcx, v0, content_ty); + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + let info = get_meta(&bcx, v0); + let info = bcx.load(info); + let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info); + + // `Box` does not allocate. + let needs_free = bcx.icmp(llvm::IntNE, llsize, C_uint(bcx.ccx, 0u64)); + if const_to_opt_uint(needs_free) == Some(0) { + bcx + } else { + let next_cx = bcx.fcx().build_new_block("next"); + let cond_cx = bcx.fcx().build_new_block("cond"); + bcx.cond_br(needs_free, cond_cx.llbb(), next_cx.llbb()); + trans_exchange_free_dyn(&cond_cx, llbox, llsize, llalign); + cond_cx.br(next_cx.llbb()); + next_cx + } + } else { + let llval = v0; + let llbox = bcx.load(llval); + drop_ty(&bcx, llbox, content_ty); + trans_exchange_free_ty(&bcx, llbox, content_ty); + bcx + } + } + ty::TyDynamic(..) => { + // No support in vtable for distinguishing destroying with + // versus without calling Drop::drop. Assert caller is + // okay with always calling the Drop impl, if any. + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + assert!(!skip_dtor); + let data_ptr = get_dataptr(&bcx, v0); + let vtable_ptr = bcx.load(get_meta(&bcx, v0)); + let dtor = bcx.load(vtable_ptr); + bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx))], None); + bcx + } + ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { + trans_custom_dtor(bcx, t, v0, def.is_union()) + } + ty::TyAdt(def, ..) if def.is_union() => { + bcx + } + _ => { + if bcx.ccx.shared().type_needs_drop(t) { + drop_structural_ty(bcx, v0, t) + } else { + bcx + } + } + }; bcx.ret_void(); } @@ -373,87 +446,6 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } } -fn make_drop_glue<'a, 'tcx>(bcx: BlockAndBuilder<'a, 'tcx>, - v0: ValueRef, - g: DropGlueKind<'tcx>) - -> BlockAndBuilder<'a, 'tcx> { - let t = g.ty(); - - let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; - // NB: v0 is an *alias* of type t here, not a direct value. - // Only drop the value when it ... well, we used to check for - // non-null, (and maybe we need to continue doing so), but we now - // must definitely check for special bit-patterns corresponding to - // the special dtor markings. - - match t.sty { - ty::TyBox(content_ty) => { - // Support for TyBox is built-in and its drop glue is - // special. It may move to library and have Drop impl. As - // a safe-guard, assert TyBox not used with TyContents. - assert!(!skip_dtor); - if !bcx.ccx.shared().type_is_sized(content_ty) { - let llval = get_dataptr(&bcx, v0); - let llbox = bcx.load(llval); - drop_ty(&bcx, v0, content_ty); - // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments - let info = get_meta(&bcx, v0); - let info = bcx.load(info); - let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info); - - // `Box` does not allocate. - let needs_free = bcx.icmp( - llvm::IntNE, - llsize, - C_uint(bcx.ccx, 0u64), - ); - if const_to_opt_uint(needs_free) == Some(0) { - bcx - } else { - let fcx = bcx.fcx(); - let next_cx = fcx.build_new_block("next"); - let cond_cx = fcx.build_new_block("cond"); - bcx.cond_br(needs_free, cond_cx.llbb(), next_cx.llbb()); - trans_exchange_free_dyn(&cond_cx, llbox, llsize, llalign); - cond_cx.br(next_cx.llbb()); - next_cx - } - } else { - let llval = v0; - let llbox = bcx.load(llval); - drop_ty(&bcx, llbox, content_ty); - trans_exchange_free_ty(&bcx, llbox, content_ty); - bcx - } - } - ty::TyDynamic(..) => { - // No support in vtable for distinguishing destroying with - // versus without calling Drop::drop. Assert caller is - // okay with always calling the Drop impl, if any. - // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments - assert!(!skip_dtor); - let data_ptr = get_dataptr(&bcx, v0); - let vtable_ptr = bcx.load(get_meta(&bcx, v0)); - let dtor = bcx.load(vtable_ptr); - bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx))], None); - bcx - } - ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { - trans_custom_dtor(bcx, t, v0, def.is_union()) - } - ty::TyAdt(def, ..) if def.is_union() => { - bcx - } - _ => { - if bcx.ccx.shared().type_needs_drop(t) { - drop_structural_ty(bcx, v0, t) - } else { - bcx - } - } - } -} - // Iterates through the elements of a structural type, dropping them. fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, av: ValueRef, From 98a13ffe443442054b41bee0dace0265c31c29a7 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 18:34:42 -0700 Subject: [PATCH 091/103] Remove outdated comment --- src/librustc_trans/mir/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 04e8802cf4e95..cb1ad3f415e8a 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -304,7 +304,6 @@ pub fn trans_mir<'a, 'tcx: 'a>( // emitting should be enabled. debuginfo::start_emitting_source_locations(&mircx.debug_context); - // If false, all funclets should be None (which is the default) let funclets: IndexVec> = mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { if let CleanupKind::Funclet = *cleanup_kind { From 295ea0d6c36ce6592be9161f2b5d539cfff236bc Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 18:47:30 -0700 Subject: [PATCH 092/103] Reduce coerce_unsized_into to one call We cannot inline due to it being recursive. --- src/librustc_trans/mir/rvalue.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 0bca8429d22b2..a22b0a13f7836 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -69,7 +69,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // so the (generic) MIR may not be able to expand it. let operand = self.trans_operand(&bcx, source); let operand = operand.pack_if_pair(&bcx); - match operand.val { + let llref = match operand.val { OperandValue::Pair(..) => bug!(), OperandValue::Immediate(llval) => { // unsize from an immediate structure. We don't @@ -81,16 +81,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("trans_rvalue: creating ugly alloca"); let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp"); base::store_ty(&bcx, llval, lltemp, operand.ty); - base::coerce_unsized_into(&bcx, - lltemp, operand.ty, - dest.llval, cast_ty); + lltemp } - OperandValue::Ref(llref) => { - base::coerce_unsized_into(&bcx, - llref, operand.ty, - dest.llval, cast_ty); - } - } + OperandValue::Ref(llref) => llref + }; + base::coerce_unsized_into(&bcx, llref, operand.ty, dest.llval, cast_ty); bcx } From 15b9b27bb091c98fe4504c24bc62c738411c8c8d Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 18:51:10 -0700 Subject: [PATCH 093/103] slice_for_each gives a reference already --- src/librustc_trans/mir/rvalue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index a22b0a13f7836..b17550087edf7 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -95,7 +95,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let size = C_uint(bcx.ccx, size); let base = base::get_dataptr(&bcx, dest.llval); tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { - self.store_operand(&bcx, llslot, tr_elem); + self.store_operand(bcx, llslot, tr_elem); }) } From bd009dc4441eff189066dd89447a829f5a190dee Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 19:16:36 -0700 Subject: [PATCH 094/103] Remove fn_ty from FunctionContext --- src/librustc_trans/base.rs | 25 ++++++++++++------------- src/librustc_trans/callee.rs | 9 +++++---- src/librustc_trans/common.rs | 12 ++---------- src/librustc_trans/glue.rs | 4 ++-- src/librustc_trans/intrinsic.rs | 3 +-- src/librustc_trans/meth.rs | 16 +++++----------- src/librustc_trans/mir/block.rs | 2 +- src/librustc_trans/mir/mod.rs | 19 ++++++++++++------- 8 files changed, 40 insertions(+), 50 deletions(-) diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 3f5cbd6804248..76bb1c56af381 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -598,10 +598,9 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let fcx = FunctionContext::new(ccx, lldecl, fn_ty); - + let fcx = FunctionContext::new(ccx, lldecl); let mir = ccx.tcx().item_mir(instance.def); - mir::trans_mir(&fcx, &mir, instance, &sig, abi); + mir::trans_mir(&fcx, fn_ty, &mir, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, @@ -618,28 +617,28 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let fcx = FunctionContext::new(ccx, llfndecl, fn_ty); + let fcx = FunctionContext::new(ccx, llfndecl); let bcx = fcx.get_entry_block(); - if !fcx.fn_ty.ret.is_ignore() { + if !fn_ty.ret.is_ignore() { // But if there are no nested returns, we skip the indirection // and have a single retslot - let dest = if fcx.fn_ty.ret.is_indirect() { + let dest = if fn_ty.ret.is_indirect() { get_param(fcx.llfn, 0) } else { // We create an alloca to hold a pointer of type `ret.original_ty` // which will hold the pointer to the right alloca which has the // final ret value - fcx.alloca(fcx.fn_ty.ret.memory_ty(ccx), "sret_slot") + fcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") }; let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value - let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); - let arg = &fcx.fn_ty.args[arg_idx]; + let arg = &fn_ty.args[arg_idx]; arg_idx += 1; if common::type_is_fat_ptr(bcx.ccx, arg_ty) { - let meta = &fcx.fn_ty.args[arg_idx]; + let meta = &fn_ty.args[arg_idx]; arg_idx += 1; arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr)); meta.store_fn_arg(&bcx, &mut llarg_idx, get_meta(&bcx, lldestptr)); @@ -649,14 +648,14 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } adt::trans_set_discr(&bcx, sig.output(), dest, disr); - if fcx.fn_ty.ret.is_indirect() { + if fn_ty.ret.is_indirect() { bcx.ret_void(); return; } - if let Some(cast_ty) = fcx.fn_ty.ret.cast { + if let Some(cast_ty) = fn_ty.ret.cast { let load = bcx.load(bcx.pointercast(dest, cast_ty.ptr_to())); - let llalign = llalign_of_min(ccx, fcx.fn_ty.ret.ty); + let llalign = llalign_of_min(ccx, fn_ty.ret.ty); unsafe { llvm::LLVMSetAlignment(load, llalign); } diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index e1baf441084ee..651cc40f993c2 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -328,7 +328,8 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); attributes::set_frame_pointer_elimination(ccx, lloncefn); - let fcx = FunctionContext::new(ccx, lloncefn, fn_ty); + let orig_fn_ty = fn_ty; + let fcx = FunctionContext::new(ccx, lloncefn); let mut bcx = fcx.get_entry_block(); let callee = Callee { @@ -342,7 +343,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let idx = fn_ty.ret.is_indirect() as usize; - let env_arg = &fcx.fn_ty.args[0]; + let env_arg = &orig_fn_ty.args[0]; let llenv = if env_arg.is_indirect() { llargs[idx] } else { @@ -494,12 +495,12 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let fcx = FunctionContext::new(ccx, llfn, fn_ty); + let fcx = FunctionContext::new(ccx, llfn); let bcx = fcx.get_entry_block(); let mut llargs = get_params(fcx.llfn); - let self_arg = llargs.remove(fcx.fn_ty.ret.is_indirect() as usize); + let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize); let llfnpointer = llfnpointer.unwrap_or_else(|| { // the first argument (`self`) will be ptr to the fn pointer if is_by_ref { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index f022aa50184c7..b9e17c53c0afb 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -21,7 +21,7 @@ use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::util::common::MemoizationMap; use middle::lang_items::LangItem; -use abi::{Abi, FnType}; +use abi::Abi; use base; use builder::Builder; use callee::Callee; @@ -236,9 +236,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // allocas, so that LLVM will coalesce them into a single alloca call. alloca_insert_pt: Option, - // Describes the return/argument LLVM types and their ABI handling. - pub fn_ty: FnType, - // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, @@ -248,15 +245,10 @@ pub struct FunctionContext<'a, 'tcx: 'a> { impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// Create a function context for the given function. /// Call FunctionContext::get_entry_block for the first entry block. - pub fn new( - ccx: &'a CrateContext<'a, 'tcx>, - llfndecl: ValueRef, - fn_ty: FnType, - ) -> FunctionContext<'a, 'tcx> { + pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionContext<'a, 'tcx> { let mut fcx = FunctionContext { llfn: llfndecl, alloca_insert_pt: None, - fn_ty: fn_ty, ccx: ccx, alloca_builder: Builder::with_ccx(ccx), }; diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 1e5bd7eb60680..5fb4a0e088f62 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -186,9 +186,9 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'t pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) { assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); - let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); + let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let fcx = FunctionContext::new(ccx, llfn, fn_ty); + let fcx = FunctionContext::new(ccx, llfn); let bcx = fcx.get_entry_block(); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index d218459eeb59c..e052fa01da05d 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -878,7 +878,6 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, -> ValueRef { let ccx = fcx.ccx; let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); - let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Unsafe, @@ -886,7 +885,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let fcx = FunctionContext::new(ccx, llfn, fn_ty); + let fcx = FunctionContext::new(ccx, llfn); trans(fcx.get_entry_block()); llfn } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 89ea7a760d491..cf50e7be2afb5 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -11,7 +11,6 @@ use attributes; use llvm::{ValueRef, get_params}; use rustc::traits; -use abi::FnType; use callee::{Callee, CalleeData}; use common::*; use consts; @@ -63,25 +62,20 @@ pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, callee: Callee<'tcx>) -> ValueRef { - let tcx = ccx.tcx(); - debug!("trans_object_shim({:?})", callee); - let (sig, abi, function_name) = match callee.ty.sty { - ty::TyFnDef(def_id, substs, f) => { + let function_name = match callee.ty.sty { + ty::TyFnDef(def_id, substs, _) => { let instance = Instance::new(def_id, substs); - (&f.sig, f.abi, instance.symbol_name(ccx.shared())) + instance.symbol_name(ccx.shared()) } _ => bug!() }; - let sig = tcx.erase_late_bound_regions_and_normalize(sig); - let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let fcx = FunctionContext::new(ccx, llfn, fn_ty); + let fcx = FunctionContext::new(ccx, llfn); let bcx = fcx.get_entry_block(); let mut llargs = get_params(fcx.llfn); @@ -103,7 +97,7 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, if fn_ret.0.is_never() { bcx.unreachable(); } else { - if fn_ty.ret.is_indirect() || fcx.fn_ty.ret.is_ignore() { + if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() { bcx.ret_void(); } else { bcx.ret(llret); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index b4e9d301039b9..d36857a8581a8 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -192,7 +192,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } mir::TerminatorKind::Return => { - let ret = bcx.fcx().fn_ty.ret; + let ret = self.fn_ty.ret; if ret.is_ignore() || ret.is_indirect() { bcx.ret_void(); return; diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index cb1ad3f415e8a..581e403f4208f 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -22,6 +22,7 @@ use base; use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::{self, Instance}; +use abi::FnType; use machine; use type_of; @@ -52,6 +53,8 @@ pub struct MirContext<'a, 'tcx:'a> { ccx: &'a CrateContext<'a, 'tcx>, + fn_ty: FnType, + /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the /// resume instruction. The personality is (afaik) some kind of @@ -197,6 +200,7 @@ impl<'tcx> LocalRef<'tcx> { pub fn trans_mir<'a, 'tcx: 'a>( fcx: &'a FunctionContext<'a, 'tcx>, + fn_ty: FnType, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, sig: &ty::FnSig<'tcx>, @@ -224,6 +228,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( let mut mircx = MirContext { mir: mir, fcx: fcx, + fn_ty: fn_ty, ccx: fcx.ccx, llpersonalityslot: None, blocks: block_bcxs, @@ -271,7 +276,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( LocalRef::Lvalue(lvalue) } else { // Temporary or return pointer - if local == mir::RETURN_POINTER && fcx.fn_ty.ret.is_indirect() { + if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); let llretptr = llvm::get_param(fcx.llfn, 0); LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) @@ -351,7 +356,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let fcx = bcx.fcx(); let tcx = bcx.tcx(); let mut idx = 0; - let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; @@ -379,12 +384,12 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { let dst = bcx.struct_gep(lltemp, i); - let arg = &fcx.fn_ty.args[idx]; + let arg = &mircx.fn_ty.args[idx]; idx += 1; if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) { // We pass fat pointers as two words, but inside the tuple // they are the two sub-fields of a single aggregate field. - let meta = &fcx.fn_ty.args[idx]; + let meta = &mircx.fn_ty.args[idx]; idx += 1; arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst)); meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst)); @@ -413,7 +418,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty))); } - let arg = &fcx.fn_ty.args[idx]; + let arg = &mircx.fn_ty.args[idx]; idx += 1; let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { // Don't copy an indirect argument to an alloca, the caller @@ -442,7 +447,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); llarg_idx += 1; let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { - let meta = &fcx.fn_ty.args[idx]; + let meta = &mircx.fn_ty.args[idx]; idx += 1; assert_eq!((meta.cast, meta.pad), (None, None)); let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint); @@ -462,7 +467,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, // so make an alloca to store them in. - let meta = &fcx.fn_ty.args[idx]; + let meta = &mircx.fn_ty.args[idx]; idx += 1; arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp)); meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp)); From 3198797050cc46cb784a6454f4ad0fcc9d068211 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 19:17:03 -0700 Subject: [PATCH 095/103] Remove outdated comment --- src/librustc_trans/common.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index b9e17c53c0afb..59a44133f743a 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -229,9 +229,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> { // section of the executable we're generating. pub llfn: ValueRef, - // These pub elements: "hoisted basic blocks" containing - // administrative activities that have to happen in only one place in - // the function, due to LLVM's quirks. // A marker for the place where we want to insert the function's static // allocas, so that LLVM will coalesce them into a single alloca call. alloca_insert_pt: Option, From 57914f626b32d924ad183e250459ce278dd0b20b Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 19:32:50 -0700 Subject: [PATCH 096/103] Move eh_personality() onto CrateContext --- src/librustc_trans/cleanup.rs | 2 +- src/librustc_trans/common.rs | 44 ----------------------------- src/librustc_trans/context.rs | 50 ++++++++++++++++++++++++++++++--- src/librustc_trans/intrinsic.rs | 4 +-- src/librustc_trans/mir/block.rs | 2 +- src/librustc_trans/mir/mod.rs | 2 +- 6 files changed, 51 insertions(+), 53 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 6dd8d8b2247b0..21eccb101f73a 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -58,7 +58,7 @@ impl<'tcx> DropValue<'tcx> { let mut pad_bcx = fcx.build_new_block("unwind_custom_"); - let llpersonality = pad_bcx.fcx().eh_personality(); + let llpersonality = pad_bcx.ccx.eh_personality(); let resume_bcx = fcx.build_new_block("resume"); let val = if base::wants_msvc_seh(fcx.ccx.sess()) { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 59a44133f743a..ce5a72c7a0f21 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -285,50 +285,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { BlockAndBuilder::new(self.new_block(name), self) } - pub fn eh_personality(&self) -> ValueRef { - // The exception handling personality function. - // - // If our compilation unit has the `eh_personality` lang item somewhere - // within it, then we just need to translate that. Otherwise, we're - // building an rlib which will depend on some upstream implementation of - // this function, so we just codegen a generic reference to it. We don't - // specify any of the types for the function, we just make it a symbol - // that LLVM can later use. - // - // Note that MSVC is a little special here in that we don't use the - // `eh_personality` lang item at all. Currently LLVM has support for - // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the - // *name of the personality function* to decide what kind of unwind side - // tables/landing pads to emit. It looks like Dwarf is used by default, - // injecting a dependency on the `_Unwind_Resume` symbol for resuming - // an "exception", but for MSVC we want to force SEH. This means that we - // can't actually have the personality function be our standard - // `rust_eh_personality` function, but rather we wired it up to the - // CRT's custom personality function, which forces LLVM to consider - // landing pads as "landing pads for SEH". - let ccx = self.ccx; - let tcx = ccx.tcx(); - match tcx.lang_items.eh_personality() { - Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => { - Callee::def(ccx, def_id, tcx.intern_substs(&[])).reify(ccx) - } - _ => { - if let Some(llpersonality) = ccx.eh_personality().get() { - return llpersonality - } - let name = if base::wants_msvc_seh(ccx.sess()) { - "__CxxFrameHandler3" - } else { - "rust_eh_personality" - }; - let fty = Type::variadic_func(&[], &Type::i32(ccx)); - let f = declare::declare_cfn(ccx, name, fty); - ccx.eh_personality().set(Some(f)); - f - } - } - } - // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, // otherwise declares it as an external function. pub fn eh_unwind_resume(&self) -> Callee<'tcx> { diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 9b6d911bf5e64..25a7a5eddd464 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -16,6 +16,8 @@ use rustc::hir::def::ExportMap; use rustc::hir::def_id::DefId; use rustc::traits; use debuginfo; +use callee::Callee; +use base; use declare; use glue::DropGlueKind; use monomorphize::Instance; @@ -825,10 +827,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().dbg_cx } - pub fn eh_personality<'a>(&'a self) -> &'a Cell> { - &self.local().eh_personality - } - pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell> { &self.local().eh_unwind_resume } @@ -909,6 +907,50 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { base_n::push_str(idx as u64, base_n::ALPHANUMERIC_ONLY, &mut name); name } + + pub fn eh_personality(&self) -> ValueRef { + // The exception handling personality function. + // + // If our compilation unit has the `eh_personality` lang item somewhere + // within it, then we just need to translate that. Otherwise, we're + // building an rlib which will depend on some upstream implementation of + // this function, so we just codegen a generic reference to it. We don't + // specify any of the types for the function, we just make it a symbol + // that LLVM can later use. + // + // Note that MSVC is a little special here in that we don't use the + // `eh_personality` lang item at all. Currently LLVM has support for + // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the + // *name of the personality function* to decide what kind of unwind side + // tables/landing pads to emit. It looks like Dwarf is used by default, + // injecting a dependency on the `_Unwind_Resume` symbol for resuming + // an "exception", but for MSVC we want to force SEH. This means that we + // can't actually have the personality function be our standard + // `rust_eh_personality` function, but rather we wired it up to the + // CRT's custom personality function, which forces LLVM to consider + // landing pads as "landing pads for SEH". + let tcx = self.tcx(); + match tcx.lang_items.eh_personality() { + Some(def_id) if !base::wants_msvc_seh(self.sess()) => { + Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self) + } + _ => { + if let Some(llpersonality) = self.local().eh_personality.get() { + return llpersonality + } + let name = if base::wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } else { + "rust_eh_personality" + }; + let fty = Type::variadic_func(&[], &Type::i32(self)); + let f = declare::declare_cfn(self, name, fty); + self.local().eh_personality.set(Some(f)); + f + } + } + } + } pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index e052fa01da05d..b7116ba1f338b 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -718,7 +718,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { let ccx = bcx.ccx; - bcx.set_personality_fn(bcx.fcx().eh_personality()); + bcx.set_personality_fn(bcx.ccx.eh_personality()); let normal = bcx.fcx().build_new_block("normal"); let catchswitch = bcx.fcx().build_new_block("catchswitch"); @@ -855,7 +855,7 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // rust_try ignores the selector. let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let vals = catch.landing_pad(lpad_ty, bcx.fcx().eh_personality(), 1, catch.fcx().llfn); + let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn); catch.add_clause(vals, C_null(Type::i8p(ccx))); let ptr = catch.extract_value(vals, 0); catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to())); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index d36857a8581a8..2b408e3170eed 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -848,7 +848,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { self.landing_pads[target_bb] = Some(bcx.llbb()); let ccx = bcx.ccx; - let llpersonality = self.fcx.eh_personality(); + let llpersonality = self.ccx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); bcx.set_cleanup(llretval); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 581e403f4208f..71f431def68d4 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -313,7 +313,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { if let CleanupKind::Funclet = *cleanup_kind { let bcx = mircx.build_block(bb); - bcx.set_personality_fn(fcx.eh_personality()); + bcx.set_personality_fn(mircx.ccx.eh_personality()); if base::wants_msvc_seh(fcx.ccx.sess()) { return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); } From 07cf2a90059f46bc00586eebd0737e7432b83734 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 19:57:39 -0700 Subject: [PATCH 097/103] Simplify callee by removing is_indirect branch. --- src/librustc_trans/callee.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 651cc40f993c2..ac832b6f746fd 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -16,7 +16,7 @@ pub use self::CalleeData::*; -use llvm::{self, ValueRef, get_param, get_params}; +use llvm::{self, ValueRef, get_params}; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::traits; @@ -342,13 +342,13 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let mut llargs = get_params(fcx.llfn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); - let idx = fn_ty.ret.is_indirect() as usize; + let self_idx = fn_ty.ret.is_indirect() as usize; let env_arg = &orig_fn_ty.args[0]; let llenv = if env_arg.is_indirect() { - llargs[idx] + llargs[self_idx] } else { let scratch = alloc_ty(&bcx, closure_ty, "self"); - let mut llarg_idx = idx; + let mut llarg_idx = self_idx; env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); scratch }; @@ -357,22 +357,15 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Adjust llargs such that llargs[self_idx..] has the call arguments. // For zero-sized closures that means sneaking in a new argument. if env_arg.is_ignore() { - if fn_ty.ret.is_indirect() { - llargs[0] = llenv; - } else { - llargs.insert(0, llenv); - } + llargs.insert(self_idx, llenv); } else { - llargs[idx] = llenv; + llargs[self_idx] = llenv; } // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); - if fn_ty.ret.is_indirect() { - llargs.insert(0, get_param(fcx.llfn, 0)); - } let llfn = callee.reify(bcx.ccx); let llret; if let Some(landing_pad) = self_scope.landing_pad { From 654131cb5376514eb355f40837790afca7fa3e7d Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 20:09:51 -0700 Subject: [PATCH 098/103] Add unreachable() after calls to eh_unwind_resume. --- src/librustc_trans/cleanup.rs | 1 + src/librustc_trans/mir/block.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 21eccb101f73a..5bf8aecf66738 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -93,6 +93,7 @@ impl<'tcx> DropValue<'tcx> { } else { let exc_ptr = resume_bcx.extract_value(lp, 0); resume_bcx.call(fcx.eh_unwind_resume().reify(fcx.ccx), &[exc_ptr], None); + resume_bcx.unreachable(); } UnwindKind::LandingPad }; diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 2b408e3170eed..e665c7a2307f5 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -130,6 +130,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { &[exc_ptr], cleanup_bundle, ); + bcx.unreachable(); } } } From a811f608349ee5044ffcdcd2c8190b913d0668e4 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 21:16:58 -0700 Subject: [PATCH 099/103] Simplify get_landing_pad by inlining UnwindKind. --- src/librustc_trans/cleanup.rs | 94 ++++++++--------------------------- 1 file changed, 22 insertions(+), 72 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 5bf8aecf66738..84a731c9d7df7 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -19,8 +19,7 @@ //! completing evaluation successfully without panic). use llvm::{BasicBlockRef, ValueRef}; -use base::{self, Lifetime}; -use common; +use base; use common::{BlockAndBuilder, FunctionContext, Funclet}; use glue; use type_::Type; @@ -55,22 +54,16 @@ impl<'tcx> DropValue<'tcx> { /// This should only be called once per function, as it creates an alloca for the landingpad. fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef { debug!("get_landing_pad"); + let bcx = fcx.build_new_block("cleanup_unwind"); + let llpersonality = bcx.ccx.eh_personality(); + bcx.set_personality_fn(llpersonality); - let mut pad_bcx = fcx.build_new_block("unwind_custom_"); - - let llpersonality = pad_bcx.ccx.eh_personality(); - - let resume_bcx = fcx.build_new_block("resume"); - let val = if base::wants_msvc_seh(fcx.ccx.sess()) { - // A cleanup pad requires a personality function to be specified, so - // we do that here explicitly (happens implicitly below through - // creation of the landingpad instruction). We then create a - // cleanuppad instruction which has no filters to run cleanup on all - // exceptions. - pad_bcx.set_personality_fn(llpersonality); - let llretval = pad_bcx.cleanup_pad(None, &[]); - resume_bcx.cleanup_ret(resume_bcx.cleanup_pad(None, &[]), None); - UnwindKind::CleanupPad(llretval) + if base::wants_msvc_seh(fcx.ccx.sess()) { + // Insert cleanup instructions into the cleanup block + let funclet = Some(Funclet::new(bcx.cleanup_pad(None, &[]))); + self.trans(funclet.as_ref(), &bcx); + + bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); } else { // The landing pad return type (the type being propagated). Not sure // what this represents but it's determined by the personality @@ -78,67 +71,24 @@ impl<'tcx> DropValue<'tcx> { let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); // The only landing pad clause will be 'cleanup' - let llretval = pad_bcx.landing_pad(llretty, llpersonality, 1, pad_bcx.fcx().llfn); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn); // The landing pad block is a cleanup - pad_bcx.set_cleanup(llretval); - - let addr = pad_bcx.fcx().alloca(common::val_ty(llretval), ""); - Lifetime::Start.call(&pad_bcx, addr); - pad_bcx.store(llretval, addr); - let lp = resume_bcx.load(addr); - Lifetime::End.call(&resume_bcx, addr); - if !resume_bcx.sess().target.target.options.custom_unwind_resume { - resume_bcx.resume(lp); - } else { - let exc_ptr = resume_bcx.extract_value(lp, 0); - resume_bcx.call(fcx.eh_unwind_resume().reify(fcx.ccx), &[exc_ptr], None); - resume_bcx.unreachable(); - } - UnwindKind::LandingPad - }; + bcx.set_cleanup(llretval); - let mut cleanup = fcx.build_new_block("clean_custom_"); + // Insert cleanup instructions into the cleanup block + self.trans(None, &bcx); - // Insert cleanup instructions into the cleanup block - let funclet = match val { - UnwindKind::CleanupPad(_) => Some(Funclet::new(cleanup.cleanup_pad(None, &[]))), - UnwindKind::LandingPad => None, - }; - self.trans(funclet.as_ref(), &cleanup); - - // Insert instruction into cleanup block to branch to the exit - val.branch(&mut cleanup, resume_bcx.llbb()); - - // Branch into the cleanup block - val.branch(&mut pad_bcx, cleanup.llbb()); - - pad_bcx.llbb() - } -} - -#[derive(Copy, Clone, Debug)] -enum UnwindKind { - LandingPad, - CleanupPad(ValueRef), -} - -impl UnwindKind { - /// Generates a branch going from `bcx` to `to_llbb` where `self` is - /// the exit label attached to the start of `bcx`. - /// - /// Transitions from an exit label to other exit labels depend on the type - /// of label. For example with MSVC exceptions unwind exit labels will use - /// the `cleanupret` instruction instead of the `br` instruction. - fn branch(&self, bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) { - match *self { - UnwindKind::CleanupPad(pad) => { - bcx.cleanup_ret(pad, Some(to_llbb)); - } - UnwindKind::LandingPad => { - bcx.br(to_llbb); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(llretval); + } else { + let exc_ptr = bcx.extract_value(llretval, 0); + bcx.call(fcx.eh_unwind_resume().reify(fcx.ccx), &[exc_ptr], None); + bcx.unreachable(); } } + + bcx.llbb() } } From a9b5c63d437c71c5ee11581a75e8fdecf8a1794c Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 21:26:49 -0700 Subject: [PATCH 100/103] Move eh_unwind_resume into CrateContext Also improves cache quality. --- src/librustc_trans/cleanup.rs | 2 +- src/librustc_trans/common.rs | 33 --------------------------- src/librustc_trans/context.rs | 40 ++++++++++++++++++++++++++++----- src/librustc_trans/mir/block.rs | 6 +---- 4 files changed, 37 insertions(+), 44 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 84a731c9d7df7..9409ac5f8e1c7 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -83,7 +83,7 @@ impl<'tcx> DropValue<'tcx> { bcx.resume(llretval); } else { let exc_ptr = bcx.extract_value(llretval, 0); - bcx.call(fcx.eh_unwind_resume().reify(fcx.ccx), &[exc_ptr], None); + bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], None); bcx.unreachable(); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index ce5a72c7a0f21..71e17f1ea7405 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -21,10 +21,8 @@ use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::util::common::MemoizationMap; use middle::lang_items::LangItem; -use abi::Abi; use base; use builder::Builder; -use callee::Callee; use consts; use declare; use machine; @@ -285,37 +283,6 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { BlockAndBuilder::new(self.new_block(name), self) } - // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, - // otherwise declares it as an external function. - pub fn eh_unwind_resume(&self) -> Callee<'tcx> { - use attributes; - let ccx = self.ccx; - let tcx = ccx.tcx(); - assert!(ccx.sess().target.target.options.custom_unwind_resume); - if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { - return Callee::def(ccx, def_id, tcx.intern_substs(&[])); - } - - let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: Abi::C, - sig: ty::Binder(tcx.mk_fn_sig( - iter::once(tcx.mk_mut_ptr(tcx.types.u8)), - tcx.types.never, - false - )), - })); - - let unwresume = ccx.eh_unwind_resume(); - if let Some(llfn) = unwresume.get() { - return Callee::ptr(llfn, ty); - } - let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty); - attributes::unwind(llfn, true); - unwresume.set(Some(llfn)); - Callee::ptr(llfn, ty) - } - pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { self.alloca_builder.dynamic_alloca(ty, name) } diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 25a7a5eddd464..0f56aa70bd979 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -12,6 +12,7 @@ use llvm; use llvm::{ContextRef, ModuleRef, ValueRef}; use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig, WorkProduct}; use middle::cstore::LinkMeta; +use rustc::hir; use rustc::hir::def::ExportMap; use rustc::hir::def_id::DefId; use rustc::traits; @@ -38,12 +39,13 @@ use std::ffi::{CStr, CString}; use std::cell::{Cell, RefCell}; use std::marker::PhantomData; use std::ptr; +use std::iter; use std::rc::Rc; use std::str; use syntax::ast; use syntax::symbol::InternedString; use syntax_pos::DUMMY_SP; -use abi::FnType; +use abi::{Abi, FnType}; pub struct Stats { pub n_glues_created: Cell, @@ -827,10 +829,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().dbg_cx } - pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell> { - &self.local().eh_unwind_resume - } - pub fn rust_try_fn<'a>(&'a self) -> &'a Cell> { &self.local().rust_try_fn } @@ -951,6 +949,38 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { } } + // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, + // otherwise declares it as an external function. + pub fn eh_unwind_resume(&self) -> ValueRef { + use attributes; + let unwresume = &self.local().eh_unwind_resume; + if let Some(llfn) = unwresume.get() { + return llfn; + } + + let tcx = self.tcx(); + assert!(self.sess().target.target.options.custom_unwind_resume); + if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { + let llfn = Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self); + unwresume.set(Some(llfn)); + return llfn; + } + + let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: Abi::C, + sig: ty::Binder(tcx.mk_fn_sig( + iter::once(tcx.mk_mut_ptr(tcx.types.u8)), + tcx.types.never, + false + )), + })); + + let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty); + attributes::unwind(llfn, true); + unwresume.set(Some(llfn)); + llfn + } } pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index e665c7a2307f5..5ad52b3d252cb 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -125,11 +125,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx.resume(lp); } else { let exc_ptr = bcx.extract_value(lp, 0); - bcx.call( - bcx.fcx().eh_unwind_resume().reify(bcx.ccx), - &[exc_ptr], - cleanup_bundle, - ); + bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], cleanup_bundle); bcx.unreachable(); } } From c1bc5e51d6cb6ea997da1757e16ddb1b9093d25e Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Mon, 19 Dec 2016 21:41:03 -0700 Subject: [PATCH 101/103] Improve cache quality for eh_personality. --- src/librustc_trans/context.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 0f56aa70bd979..f292a70965004 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -927,26 +927,26 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { // `rust_eh_personality` function, but rather we wired it up to the // CRT's custom personality function, which forces LLVM to consider // landing pads as "landing pads for SEH". + if let Some(llpersonality) = self.local().eh_personality.get() { + return llpersonality + } let tcx = self.tcx(); - match tcx.lang_items.eh_personality() { + let llfn = match tcx.lang_items.eh_personality() { Some(def_id) if !base::wants_msvc_seh(self.sess()) => { Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self) } _ => { - if let Some(llpersonality) = self.local().eh_personality.get() { - return llpersonality - } let name = if base::wants_msvc_seh(self.sess()) { "__CxxFrameHandler3" } else { "rust_eh_personality" }; let fty = Type::variadic_func(&[], &Type::i32(self)); - let f = declare::declare_cfn(self, name, fty); - self.local().eh_personality.set(Some(f)); - f + declare::declare_cfn(self, name, fty) } - } + }; + self.local().eh_personality.set(Some(llfn)); + llfn } // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, From 079abd0b1ea7d087586de53f49fa129aacd26ca4 Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Tue, 20 Dec 2016 14:14:30 -0700 Subject: [PATCH 102/103] Reuse cleanup pad declared at start of block. --- src/librustc_trans/cleanup.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 9409ac5f8e1c7..add820748acfc 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -59,11 +59,11 @@ impl<'tcx> DropValue<'tcx> { bcx.set_personality_fn(llpersonality); if base::wants_msvc_seh(fcx.ccx.sess()) { - // Insert cleanup instructions into the cleanup block - let funclet = Some(Funclet::new(bcx.cleanup_pad(None, &[]))); + let pad = bcx.cleanup_pad(None, &[]); + let funclet = Some(Funclet::new(pad)); self.trans(funclet.as_ref(), &bcx); - bcx.cleanup_ret(bcx.cleanup_pad(None, &[]), None); + bcx.cleanup_ret(pad, None); } else { // The landing pad return type (the type being propagated). Not sure // what this represents but it's determined by the personality From 0013d4cdf61a61abab79789c9ad5320bd1e2d56a Mon Sep 17 00:00:00 2001 From: Mark Simulacrum Date: Tue, 20 Dec 2016 20:37:19 -0700 Subject: [PATCH 103/103] Fix rebase errors. --- src/librustc_trans/mir/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 71f431def68d4..7a50e5cbe8c79 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -11,7 +11,7 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; -use rustc::ty; +use rustc::ty::{self, layout}; use rustc::mir::{self, Mir}; use rustc::mir::tcx::LvalueTy; use rustc::ty::subst::Substs; @@ -23,7 +23,6 @@ use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funcl use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::{self, Instance}; use abi::FnType; -use machine; use type_of; use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos, Span};