From 62d196feb1607c93fb4bc9dc54bf08883496a6a2 Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Mon, 9 Sep 2024 11:20:22 +0200 Subject: [PATCH 01/14] bootstrap/Makefile.in: miri: add missing BOOTSTRAP ARGS also don't unnecessarily set BOOTSTRAP_SKIP_TARGET_SANITY while we are at it --- src/bootstrap/mk/Makefile.in | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/bootstrap/mk/Makefile.in b/src/bootstrap/mk/Makefile.in index 9acd85cddde6a..3fa2b3c22921e 100644 --- a/src/bootstrap/mk/Makefile.in +++ b/src/bootstrap/mk/Makefile.in @@ -54,30 +54,34 @@ check-aux: src/etc/test-float-parse \ $(BOOTSTRAP_ARGS) # Run standard library tests in Miri. - $(Q)BOOTSTRAP_SKIP_TARGET_SANITY=1 \ - $(BOOTSTRAP) miri --stage 2 \ + $(Q)$(BOOTSTRAP) miri --stage 2 \ library/core \ library/alloc \ + $(BOOTSTRAP_ARGS) \ --no-doc # Some doctests use file system operations to demonstrate dealing with `Result`. $(Q)MIRIFLAGS="-Zmiri-disable-isolation" \ $(BOOTSTRAP) miri --stage 2 \ library/core \ library/alloc \ + $(BOOTSTRAP_ARGS) \ --doc # In `std` we cannot test everything, so we skip some modules. $(Q)MIRIFLAGS="-Zmiri-disable-isolation" \ $(BOOTSTRAP) miri --stage 2 library/std \ + $(BOOTSTRAP_ARGS) \ --no-doc -- \ --skip fs:: --skip net:: --skip process:: --skip sys::pal:: $(Q)MIRIFLAGS="-Zmiri-disable-isolation" \ $(BOOTSTRAP) miri --stage 2 library/std \ + $(BOOTSTRAP_ARGS) \ --doc -- \ --skip fs:: --skip net:: --skip process:: --skip sys::pal:: # Also test some very target-specific modules on other targets # (making sure to cover an i686 target as well). $(Q)MIRIFLAGS="-Zmiri-disable-isolation" BOOTSTRAP_SKIP_TARGET_SANITY=1 \ $(BOOTSTRAP) miri --stage 2 library/std \ + $(BOOTSTRAP_ARGS) \ --target aarch64-apple-darwin,i686-pc-windows-msvc \ --no-doc -- \ time:: sync:: thread:: env:: From 6d61dfd2e48d4f183ca4bca4aaa60b2e2ffb4fa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Le=C3=B3n=20Orell=20Valerian=20Liehr?= Date: Mon, 9 Sep 2024 22:35:10 +0200 Subject: [PATCH 02/14] rustdoc: add two regression tests --- .../projection-as-union-type-error.rs | 14 +++++++++++ .../projection-as-union-type-error.stderr | 15 ++++++++++++ ...ias-impl-trait-declaration-too-subtle-2.rs | 23 +++++++++++++++++++ 3 files changed, 52 insertions(+) create mode 100644 tests/rustdoc-ui/projection-as-union-type-error.rs create mode 100644 tests/rustdoc-ui/projection-as-union-type-error.stderr create mode 100644 tests/rustdoc-ui/recursive-type-alias-impl-trait-declaration-too-subtle-2.rs diff --git a/tests/rustdoc-ui/projection-as-union-type-error.rs b/tests/rustdoc-ui/projection-as-union-type-error.rs new file mode 100644 index 0000000000000..d1d2e72249dc0 --- /dev/null +++ b/tests/rustdoc-ui/projection-as-union-type-error.rs @@ -0,0 +1,14 @@ +// Test to ensure that there is no ICE when normalizing a projection. +// See also . +// issue: rust-lang/rust#107872 + +pub trait Identity { + type Identity; +} + +pub type Foo = u8; + +pub union Bar { + a: ::Identity, //~ ERROR the trait bound `u8: Identity` is not satisfied + b: u8, +} diff --git a/tests/rustdoc-ui/projection-as-union-type-error.stderr b/tests/rustdoc-ui/projection-as-union-type-error.stderr new file mode 100644 index 0000000000000..32598015864cb --- /dev/null +++ b/tests/rustdoc-ui/projection-as-union-type-error.stderr @@ -0,0 +1,15 @@ +error[E0277]: the trait bound `u8: Identity` is not satisfied + --> $DIR/projection-as-union-type-error.rs:12:9 + | +LL | a: ::Identity, + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ the trait `Identity` is not implemented for `u8` + | +help: this trait has no implementations, consider adding one + --> $DIR/projection-as-union-type-error.rs:5:1 + | +LL | pub trait Identity { + | ^^^^^^^^^^^^^^^^^^ + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0277`. diff --git a/tests/rustdoc-ui/recursive-type-alias-impl-trait-declaration-too-subtle-2.rs b/tests/rustdoc-ui/recursive-type-alias-impl-trait-declaration-too-subtle-2.rs new file mode 100644 index 0000000000000..9f8053d553876 --- /dev/null +++ b/tests/rustdoc-ui/recursive-type-alias-impl-trait-declaration-too-subtle-2.rs @@ -0,0 +1,23 @@ +// issue: rust-lang/rust#98250 +//@ check-pass + +#![feature(type_alias_impl_trait)] + +mod foo { + pub type Foo = impl PartialEq<(Foo, i32)>; + + fn foo() -> Foo { + super::Bar + } +} +use foo::Foo; + +struct Bar; + +impl PartialEq<(Foo, i32)> for Bar { + fn eq(&self, _other: &(Foo, i32)) -> bool { + true + } +} + +fn main() {} From 8235af07d21d60012acbd217f24049e1386081bd Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 28 Aug 2024 08:24:10 +1000 Subject: [PATCH 03/14] Improve comment formatting. By reflowing comment lines that are too long, and a few that are very short. Plus some other very minor formatting tweaks. --- compiler/rustc_mir_transform/src/add_retag.rs | 10 +++-- .../src/check_const_item_mutation.rs | 1 + compiler/rustc_mir_transform/src/copy_prop.rs | 3 +- .../rustc_mir_transform/src/coverage/mod.rs | 3 +- .../src/dataflow_const_prop.rs | 3 +- .../src/deduce_param_attrs.rs | 6 +-- .../src/deduplicate_blocks.rs | 7 ++-- .../src/early_otherwise_branch.rs | 4 +- .../src/elaborate_drops.rs | 16 ++++---- .../src/ffi_unwind_calls.rs | 5 ++- .../src/function_item_references.rs | 4 +- compiler/rustc_mir_transform/src/gvn.rs | 10 +++-- compiler/rustc_mir_transform/src/inline.rs | 3 +- .../src/known_panics_lint.rs | 11 +++--- .../rustc_mir_transform/src/large_enums.rs | 5 ++- compiler/rustc_mir_transform/src/lib.rs | 31 +++++++++------ .../rustc_mir_transform/src/match_branches.rs | 14 ++++--- .../src/mentioned_items.rs | 4 +- compiler/rustc_mir_transform/src/prettify.rs | 4 +- .../rustc_mir_transform/src/promote_consts.rs | 38 ++++++++++--------- .../src/remove_uninit_drops.rs | 5 ++- .../rustc_mir_transform/src/reveal_all.rs | 6 +-- compiler/rustc_mir_transform/src/shim.rs | 3 +- .../src/simplify_comparison_integral.rs | 22 +++++++---- .../src/unreachable_enum_branching.rs | 6 +-- .../src/unreachable_prop.rs | 8 ++-- compiler/rustc_mir_transform/src/validate.rs | 30 ++++++++------- 27 files changed, 151 insertions(+), 111 deletions(-) diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs index 794e85fbfedfb..8ad364ed05502 100644 --- a/compiler/rustc_mir_transform/src/add_retag.rs +++ b/compiler/rustc_mir_transform/src/add_retag.rs @@ -60,7 +60,9 @@ impl<'tcx> crate::MirPass<'tcx> for AddRetag { let basic_blocks = body.basic_blocks.as_mut(); let local_decls = &body.local_decls; let needs_retag = |place: &Place<'tcx>| { - !place.is_indirect_first_projection() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway + // We're not really interested in stores to "outside" locations, they are hard to keep + // track of anyway. + !place.is_indirect_first_projection() && may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx) && !local_decls[place.local].is_deref_temp() }; @@ -129,9 +131,9 @@ impl<'tcx> crate::MirPass<'tcx> for AddRetag { StatementKind::Assign(box (ref place, ref rvalue)) => { let add_retag = match rvalue { // Ptr-creating operations already do their own internal retagging, no - // need to also add a retag statement. - // *Except* if we are deref'ing a Box, because those get desugared to directly working - // with the inner raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it + // need to also add a retag statement. *Except* if we are deref'ing a + // Box, because those get desugared to directly working with the inner + // raw pointer! That's relevant for `RawPtr` as Miri otherwise makes it // a NOP when the original pointer is already raw. Rvalue::RawPtr(_mutbl, place) => { // Using `is_box_global` here is a bit sketchy: if this code is diff --git a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs index 234ed8206f5d5..048dd9ccb8f3c 100644 --- a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs +++ b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs @@ -123,6 +123,7 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> { self.super_statement(stmt, loc); self.target_local = None; } + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, loc: Location) { if let Rvalue::Ref(_, BorrowKind::Mut { .. }, place) = rvalue { let local = place.local; diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs index dc4ee58ea83e3..1cb56d29b85e0 100644 --- a/compiler/rustc_mir_transform/src/copy_prop.rs +++ b/compiler/rustc_mir_transform/src/copy_prop.rs @@ -140,7 +140,8 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> { fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) { if let Operand::Move(place) = *operand - // A move out of a projection of a copy is equivalent to a copy of the original projection. + // A move out of a projection of a copy is equivalent to a copy of the original + // projection. && !place.is_indirect_first_projection() && !self.fully_moved.contains(place.local) { diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 042f589768a99..f78fc7931f9f9 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -279,7 +279,8 @@ fn inject_mcdc_statements<'tcx>( basic_coverage_blocks: &CoverageGraph, extracted_mappings: &ExtractedMappings, ) { - // Inject test vector update first because `inject_statement` always insert new statement at head. + // Inject test vector update first because `inject_statement` always insert new statement at + // head. for &mappings::MCDCDecision { span: _, ref end_bcbs, diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 79f12be4bc353..7ac019ce81216 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -647,7 +647,8 @@ fn try_write_constant<'tcx>( ty::FnDef(..) => {} // Those are scalars, must be handled above. - ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => throw_machine_stop_str!("primitive type with provenance"), + ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => + throw_machine_stop_str!("primitive type with provenance"), ty::Tuple(elem_tys) => { for (i, elem) in elem_tys.iter().enumerate() { diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs index e870a71b05aa7..c645bbee08a54 100644 --- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs +++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs @@ -42,9 +42,9 @@ impl<'tcx> Visitor<'tcx> for DeduceReadOnly { } PlaceContext::NonMutatingUse(NonMutatingUseContext::RawBorrow) => { // Whether mutating though a `&raw const` is allowed is still undecided, so we - // disable any sketchy `readonly` optimizations for now. - // But we only need to do this if the pointer would point into the argument. - // IOW: for indirect places, like `&raw (*local).field`, this surely cannot mutate `local`. + // disable any sketchy `readonly` optimizations for now. But we only need to do + // this if the pointer would point into the argument. IOW: for indirect places, + // like `&raw (*local).field`, this surely cannot mutate `local`. !place.is_indirect() } PlaceContext::NonMutatingUse(..) | PlaceContext::NonUse(..) => { diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs index c8179d513c7ce..ad204e76d0d15 100644 --- a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs +++ b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs @@ -69,8 +69,8 @@ fn find_duplicates(body: &Body<'_>) -> FxHashMap { // For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes. // Then we will see that bb2 is a duplicate of bb3, // and insert bb2 with the replacement bb3 in the duplicates list. - // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list - // with replacement bb3. + // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the + // duplicates list with replacement bb3. // When the duplicates are removed, we will end up with only bb3. for (bb, bbd) in body.basic_blocks.iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup) { // Basic blocks can get really big, so to avoid checking for duplicates in basic blocks @@ -105,7 +105,8 @@ struct BasicBlockHashable<'tcx, 'a> { impl Hash for BasicBlockHashable<'_, '_> { fn hash(&self, state: &mut H) { hash_statements(state, self.basic_block_data.statements.iter()); - // Note that since we only hash the kind, we lose span information if we deduplicate the blocks + // Note that since we only hash the kind, we lose span information if we deduplicate the + // blocks. self.basic_block_data.terminator().kind.hash(state); } } diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs index 3884321df2a2c..704ed508b22a8 100644 --- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs +++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs @@ -261,8 +261,8 @@ fn evaluate_candidate<'tcx>( // }; // ``` // - // Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant of an - // invalid value, which is UB. + // Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant + // of an invalid value, which is UB. // In order to fix this, **we would either need to show that the discriminant computation of // `place` is computed in all branches**. // FIXME(#95162) For the moment, we adopt a conservative approach and diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index cb729792dc538..42d13fa3613ab 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -20,8 +20,8 @@ use tracing::{debug, instrument}; use crate::deref_separator::deref_finder; /// During MIR building, Drop terminators are inserted in every place where a drop may occur. -/// However, in this phase, the presence of these terminators does not guarantee that a destructor will run, -/// as the target of the drop may be uninitialized. +/// However, in this phase, the presence of these terminators does not guarantee that a destructor +/// will run, as the target of the drop may be uninitialized. /// In general, the compiler cannot determine at compile time whether a destructor will run or not. /// /// At a high level, this pass refines Drop to only run the destructor if the @@ -30,10 +30,10 @@ use crate::deref_separator::deref_finder; /// Once this is complete, Drop terminators in the MIR correspond to a call to the "drop glue" or /// "drop shim" for the type of the dropped place. /// -/// This pass relies on dropped places having an associated move path, which is then used to determine -/// the initialization status of the place and its descendants. -/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill formed, -/// as it would allow running a destructor on a place behind a reference: +/// This pass relies on dropped places having an associated move path, which is then used to +/// determine the initialization status of the place and its descendants. +/// It's worth noting that a MIR containing a Drop without an associated move path is probably ill +/// formed, as it would allow running a destructor on a place behind a reference: /// /// ```text /// fn drop_term(t: &mut T) { @@ -377,8 +377,8 @@ impl<'a, 'tcx> ElaborateDropsCtxt<'a, 'tcx> { ); } // A drop and replace behind a pointer/array/whatever. - // The borrow checker requires that these locations are initialized before the assignment, - // so we just leave an unconditional drop. + // The borrow checker requires that these locations are initialized before the + // assignment, so we just leave an unconditional drop. assert!(!data.is_cleanup); } } diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs index 81875e3d01246..8490b7e235818 100644 --- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs +++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs @@ -60,8 +60,9 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool { let fn_def_id = match ty.kind() { ty::FnPtr(..) => None, &ty::FnDef(def_id, _) => { - // Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust ABI). - // So the leak of the foreign unwind into Rust can only be elsewhere, not here. + // Rust calls cannot themselves create foreign unwinds (even if they use a non-Rust + // ABI). So the leak of the foreign unwind into Rust can only be elsewhere, not + // here. if !tcx.is_foreign_item(def_id) { continue; } diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs index 0efaa172e0c6f..eb09a7d3b2114 100644 --- a/compiler/rustc_mir_transform/src/function_item_references.rs +++ b/compiler/rustc_mir_transform/src/function_item_references.rs @@ -92,8 +92,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { { let mut span = self.nth_arg_span(args, arg_num); if span.from_expansion() { - // The operand's ctxt wouldn't display the lint since it's inside a macro so - // we have to use the callsite's ctxt. + // The operand's ctxt wouldn't display the lint since it's + // inside a macro so we have to use the callsite's ctxt. let callsite_ctxt = span.source_callsite().ctxt(); span = span.with_ctxt(callsite_ctxt); } diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index e5d2b13efd8fe..715461de06482 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -139,8 +139,8 @@ fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // Try to get some insight. AssignedValue::Rvalue(rvalue) => { let value = state.simplify_rvalue(rvalue, location); - // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark `local` as - // reusable if we have an exact type match. + // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark + // `local` as reusable if we have an exact type match. if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) { return; } @@ -480,7 +480,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let pointer = self.evaluated[local].as_ref()?; let mut mplace = self.ecx.deref_pointer(pointer).ok()?; for proj in place.projection.iter().skip(1) { - // We have no call stack to associate a local with a value, so we cannot interpret indexing. + // We have no call stack to associate a local with a value, so we cannot + // interpret indexing. if matches!(proj, ProjectionElem::Index(_)) { return None; } @@ -1382,7 +1383,8 @@ fn op_to_prop_const<'tcx>( return Some(ConstValue::ZeroSized); } - // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to avoid. + // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to + // avoid. if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { return None; } diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 03ef808efecb6..870cb180ce1be 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -568,7 +568,8 @@ impl<'tcx> Inliner<'tcx> { // if the no-attribute function ends up with the same instruction set anyway. return Err("Cannot move inline-asm across instruction sets"); } else if let TerminatorKind::TailCall { .. } = term.kind { - // FIXME(explicit_tail_calls): figure out how exactly functions containing tail calls can be inlined (and if they even should) + // FIXME(explicit_tail_calls): figure out how exactly functions containing tail + // calls can be inlined (and if they even should) return Err("can't inline functions with tail calls"); } else { work_list.extend(term.successors()) diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index e964310c5429e..46b17c3c7e04d 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -1,8 +1,6 @@ -//! A lint that checks for known panics like -//! overflows, division by zero, -//! out-of-bound access etc. -//! Uses const propagation to determine the -//! values of operands during checks. +//! A lint that checks for known panics like overflows, division by zero, +//! out-of-bound access etc. Uses const propagation to determine the values of +//! operands during checks. use std::fmt::Debug; @@ -562,7 +560,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?; if matches!(val.layout.abi, Abi::ScalarPair(..)) { - // FIXME `Value` should properly support pairs in `Immediate`... but currently it does not. + // FIXME `Value` should properly support pairs in `Immediate`... but currently + // it does not. let (val, overflow) = val.to_pair(&self.ecx); Value::Aggregate { variant: VariantIdx::ZERO, diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs index f61cf236f77cc..45edcff29eb28 100644 --- a/compiler/rustc_mir_transform/src/large_enums.rs +++ b/compiler/rustc_mir_transform/src/large_enums.rs @@ -16,8 +16,7 @@ use rustc_target::abi::{HasDataLayout, Size, TagEncoding, Variants}; /// Large([u32; 1024]), /// } /// ``` -/// Instead of emitting moves of the large variant, -/// Perform a memcpy instead. +/// Instead of emitting moves of the large variant, perform a memcpy instead. /// Based off of [this HackMD](https://hackmd.io/@ft4bxUsFT5CEUBmRKYHr7w/rJM8BBPzD). /// /// In summary, what this does is at runtime determine which enum variant is active, @@ -34,6 +33,7 @@ impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt { // https://github.com/rust-lang/rust/pull/85158#issuecomment-1101836457 sess.opts.unstable_opts.unsound_mir_opts || sess.mir_opt_level() >= 3 } + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // NOTE: This pass may produce different MIR based on the alignment of the target // platform, but it will still be valid. @@ -116,6 +116,7 @@ impl EnumSizeOpt { let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc)); Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc))) } + fn optim<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let mut alloc_cache = FxHashMap::default(); let body_did = body.source.def_id(); diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 0bbbf047f634c..dcfcad7cc7c5b 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -168,8 +168,9 @@ fn remap_mir_for_const_eval_select<'tcx>( let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) = match tupled_args.node { Operand::Constant(_) => { - // there is no good way of extracting a tuple arg from a constant (const generic stuff) - // so we just create a temporary and deconstruct that. + // There is no good way of extracting a tuple arg from a constant + // (const generic stuff) so we just create a temporary and deconstruct + // that. let local = body.local_decls.push(LocalDecl::new(ty, fn_span)); bb.statements.push(Statement { source_info: SourceInfo::outermost(fn_span), @@ -480,7 +481,8 @@ pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<' &[&remove_uninit_drops::RemoveUninitDrops, &simplify::SimplifyCfg::RemoveFalseEdges], None, ); - check_consts::post_drop_elaboration::check_live_drops(tcx, body); // FIXME: make this a MIR lint + // FIXME: make this a MIR lint + check_consts::post_drop_elaboration::check_live_drops(tcx, body); } debug!("runtime_mir_lowering({:?})", did); @@ -509,10 +511,12 @@ fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { /// Returns the sequence of passes that lowers analysis to runtime MIR. fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let passes: &[&dyn MirPass<'tcx>] = &[ - // These next passes must be executed together + // These next passes must be executed together. &add_call_guards::CriticalCallEdges, - &reveal_all::RevealAll, // has to be done before drop elaboration, since we need to drop opaque types, too. - &add_subtyping_projections::Subtyper, // calling this after reveal_all ensures that we don't deal with opaque types + // Must be done before drop elaboration because we need to drop opaque types, too. + &reveal_all::RevealAll, + // Calling this after reveal_all ensures that we don't deal with opaque types. + &add_subtyping_projections::Subtyper, &elaborate_drops::ElaborateDrops, // This will remove extraneous landing pads which are no longer // necessary as well as forcing any call in a non-unwinding @@ -521,8 +525,8 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // AddMovesForPackedDrops needs to run after drop // elaboration. &add_moves_for_packed_drops::AddMovesForPackedDrops, - // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`. Otherwise it should run fairly late, - // but before optimizations begin. + // `AddRetag` needs to run after `ElaborateDrops` but before `ElaborateBoxDerefs`. + // Otherwise it should run fairly late, but before optimizations begin. &add_retag::AddRetag, &elaborate_box_derefs::ElaborateBoxDerefs, &coroutine::StateTransform, @@ -563,13 +567,15 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // Before inlining: trim down MIR with passes to reduce inlining work. // Has to be done before inlining, otherwise actual call will be almost always inlined. - // Also simple, so can just do first + // Also simple, so can just do first. &lower_slice_len::LowerSliceLenCalls, - // Perform instsimplify before inline to eliminate some trivial calls (like clone shims). + // Perform instsimplify before inline to eliminate some trivial calls (like clone + // shims). &instsimplify::InstSimplify::BeforeInline, // Perform inlining, which may add a lot of code. &inline::Inline, - // Code from other crates may have storage markers, so this needs to happen after inlining. + // Code from other crates may have storage markers, so this needs to happen after + // inlining. &remove_storage_markers::RemoveStorageMarkers, // Inlining and instantiation may introduce ZST and useless drops. &remove_zsts::RemoveZsts, @@ -586,7 +592,8 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &match_branches::MatchBranchSimplification, // inst combine is after MatchBranchSimplification to clean up Ne(_1, false) &multiple_return_terminators::MultipleReturnTerminators, - // After simplifycfg, it allows us to discover new opportunities for peephole optimizations. + // After simplifycfg, it allows us to discover new opportunities for peephole + // optimizations. &instsimplify::InstSimplify::AfterSimplifyCfg, &simplify::SimplifyLocals::BeforeConstProp, &dead_store_elimination::DeadStoreElimination::Initial, diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs index 2a2616b20a6de..0f981425cfd0d 100644 --- a/compiler/rustc_mir_transform/src/match_branches.rs +++ b/compiler/rustc_mir_transform/src/match_branches.rs @@ -57,8 +57,9 @@ impl<'tcx> crate::MirPass<'tcx> for MatchBranchSimplification { } trait SimplifyMatch<'tcx> { - /// Simplifies a match statement, returning true if the simplification succeeds, false otherwise. - /// Generic code is written here, and we generally don't need a custom implementation. + /// Simplifies a match statement, returning true if the simplification succeeds, false + /// otherwise. Generic code is written here, and we generally don't need a custom + /// implementation. fn simplify( &mut self, tcx: TyCtxt<'tcx>, @@ -240,7 +241,8 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToIf { // Same value in both blocks. Use statement as is. patch.add_statement(parent_end, f.kind.clone()); } else { - // Different value between blocks. Make value conditional on switch condition. + // Different value between blocks. Make value conditional on switch + // condition. let size = tcx.layout_of(param_env.and(discr_ty)).unwrap().size; let const_cmp = Operand::const_from_scalar( tcx, @@ -394,14 +396,16 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp { return None; } - // We first compare the two branches, and then the other branches need to fulfill the same conditions. + // We first compare the two branches, and then the other branches need to fulfill the same + // conditions. let mut expected_transform_kinds = Vec::new(); for (f, s) in iter::zip(first_stmts, second_stmts) { let compare_type = match (&f.kind, &s.kind) { // If two statements are exactly the same, we can optimize. (f_s, s_s) if f_s == s_s => ExpectedTransformKind::Same(f_s), - // If two statements are assignments with the match values to the same place, we can optimize. + // If two statements are assignments with the match values to the same place, we + // can optimize. ( StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))), StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))), diff --git a/compiler/rustc_mir_transform/src/mentioned_items.rs b/compiler/rustc_mir_transform/src/mentioned_items.rs index 7f9d0a5b6985c..4402e0dd42d40 100644 --- a/compiler/rustc_mir_transform/src/mentioned_items.rs +++ b/compiler/rustc_mir_transform/src/mentioned_items.rs @@ -82,7 +82,9 @@ impl<'tcx> Visitor<'tcx> for MentionedItemsVisitor<'_, 'tcx> { source_ty.builtin_deref(true).map(|t| t.kind()), target_ty.builtin_deref(true).map(|t| t.kind()), ) { - (Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false, // &str/&[T] unsizing + // &str/&[T] unsizing + (Some(ty::Array(..)), Some(ty::Str | ty::Slice(..))) => false, + _ => true, }; if may_involve_vtable { diff --git a/compiler/rustc_mir_transform/src/prettify.rs b/compiler/rustc_mir_transform/src/prettify.rs index ef011d230c279..937c207776b35 100644 --- a/compiler/rustc_mir_transform/src/prettify.rs +++ b/compiler/rustc_mir_transform/src/prettify.rs @@ -63,7 +63,7 @@ impl<'tcx> crate::MirPass<'tcx> for ReorderLocals { finder.visit_basic_block_data(bb, bbd); } - // track everything in case there are some locals that we never saw, + // Track everything in case there are some locals that we never saw, // such as in non-block things like debug info or in non-uses. for local in body.local_decls.indices() { finder.track(local); @@ -87,7 +87,7 @@ impl<'tcx> crate::MirPass<'tcx> for ReorderLocals { fn permute(data: &mut IndexVec, map: &IndexSlice) { // FIXME: It would be nice to have a less-awkward way to apply permutations, - // but I don't know one that exists. `sort_by_cached_key` has logic for it + // but I don't know one that exists. `sort_by_cached_key` has logic for it // internally, but not in a way that we're allowed to use here. let mut enumerated: Vec<_> = std::mem::take(data).into_iter_enumerated().collect(); enumerated.sort_by_key(|p| map[p.0]); diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index 65309f63d5937..c968053a6d6e8 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -1,16 +1,14 @@ //! A pass that promotes borrows of constant rvalues. //! -//! The rvalues considered constant are trees of temps, -//! each with exactly one initialization, and holding -//! a constant value with no interior mutability. -//! They are placed into a new MIR constant body in -//! `promoted` and the borrow rvalue is replaced with -//! a `Literal::Promoted` using the index into `promoted` -//! of that constant MIR. +//! The rvalues considered constant are trees of temps, each with exactly one +//! initialization, and holding a constant value with no interior mutability. +//! They are placed into a new MIR constant body in `promoted` and the borrow +//! rvalue is replaced with a `Literal::Promoted` using the index into +//! `promoted` of that constant MIR. //! -//! This pass assumes that every use is dominated by an -//! initialization and can otherwise silence errors, if -//! move analysis runs after promotion on broken MIR. +//! This pass assumes that every use is dominated by an initialization and can +//! otherwise silence errors, if move analysis runs after promotion on broken +//! MIR. use std::assert_matches::assert_matches; use std::cell::Cell; @@ -386,7 +384,8 @@ impl<'tcx> Validator<'_, 'tcx> { fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> { match kind { // Reject these borrow types just to be safe. - // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase. + // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a + // usecase. BorrowKind::Fake(_) | BorrowKind::Mut { kind: MutBorrowKind::ClosureCapture } => { return Err(Unpromotable); } @@ -468,7 +467,8 @@ impl<'tcx> Validator<'_, 'tcx> { let lhs_ty = lhs.ty(self.body, self.tcx); if let ty::RawPtr(_, _) | ty::FnPtr(..) = lhs_ty.kind() { - // Raw and fn pointer operations are not allowed inside consts and thus not promotable. + // Raw and fn pointer operations are not allowed inside consts and thus not + // promotable. assert_matches!( op, BinOp::Eq @@ -498,7 +498,8 @@ impl<'tcx> Validator<'_, 'tcx> { Some(x) if x != 0 => {} // okay _ => return Err(Unpromotable), // value not known or 0 -- not okay } - // Furthermore, for signed division, we also have to exclude `int::MIN / -1`. + // Furthermore, for signed division, we also have to exclude `int::MIN / + // -1`. if lhs_ty.is_signed() { match rhs_val.map(|x| x.to_int(sz)) { Some(-1) | None => { @@ -512,8 +513,11 @@ impl<'tcx> Validator<'_, 'tcx> { }; let lhs_min = sz.signed_int_min(); match lhs_val.map(|x| x.to_int(sz)) { - Some(x) if x != lhs_min => {} // okay - _ => return Err(Unpromotable), // value not known or int::MIN -- not okay + // okay + Some(x) if x != lhs_min => {} + + // value not known or int::MIN -- not okay + _ => return Err(Unpromotable), } } _ => {} @@ -815,8 +819,8 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { TerminatorKind::Call { mut func, mut args, call_source: desugar, fn_span, .. } => { - // This promoted involves a function call, so it may fail to evaluate. - // Let's make sure it is added to `required_consts` so that failure cannot get lost. + // This promoted involves a function call, so it may fail to evaluate. Let's + // make sure it is added to `required_consts` so that failure cannot get lost. self.add_to_required = true; self.visit_operand(&mut func, loc); diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs index c58f492655a84..d80a4edecdf37 100644 --- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs @@ -106,8 +106,9 @@ fn is_needs_drop_and_init<'tcx>( // If its projection *is* present in `MoveData`, then the field may have been moved // from separate from its parent. Recurse. adt.variants().iter_enumerated().any(|(vid, variant)| { - // Enums have multiple variants, which are discriminated with a `Downcast` projection. - // Structs have a single variant, and don't use a `Downcast` projection. + // Enums have multiple variants, which are discriminated with a `Downcast` + // projection. Structs have a single variant, and don't use a `Downcast` + // projection. let mpi = if adt.is_enum() { let downcast = move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid)); diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs index 3db962bd94aee..f3b2f78b31c3a 100644 --- a/compiler/rustc_mir_transform/src/reveal_all.rs +++ b/compiler/rustc_mir_transform/src/reveal_all.rs @@ -35,9 +35,9 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> { if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) { return; } - // `OpaqueCast` projections are only needed if there are opaque types on which projections are performed. - // After the `RevealAll` pass, all opaque types are replaced with their hidden types, so we don't need these - // projections anymore. + // `OpaqueCast` projections are only needed if there are opaque types on which projections + // are performed. After the `RevealAll` pass, all opaque types are replaced with their + // hidden types, so we don't need these projections anymore. place.projection = self.tcx.mk_place_elems( &place .projection diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index cf8ef580b2720..e1615c76bb6f9 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -1003,7 +1003,8 @@ fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'t let locals = local_decls_for_sig(&sig, span); let source_info = SourceInfo::outermost(span); - // FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful provenance. + // FIXME: use `expose_provenance` once we figure out whether function pointers have meaningful + // provenance. let rvalue = Rvalue::Cast( CastKind::FnPtrToPtr, Operand::Move(Place::from(Local::new(1))), diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs index 644bcb58d567d..e8d8335b136b5 100644 --- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs +++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs @@ -73,12 +73,13 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral { _ => unreachable!(), } - // delete comparison statement if it the value being switched on was moved, which means it can not be user later on + // delete comparison statement if it the value being switched on was moved, which means + // it can not be user later on if opt.can_remove_bin_op_stmt { bb.statements[opt.bin_op_stmt_idx].make_nop(); } else { - // if the integer being compared to a const integral is being moved into the comparison, - // e.g `_2 = Eq(move _3, const 'x');` + // if the integer being compared to a const integral is being moved into the + // comparison, e.g `_2 = Eq(move _3, const 'x');` // we want to avoid making a double move later on in the switchInt on _3. // So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`, // we convert the move in the comparison statement to a copy. @@ -102,12 +103,15 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyComparisonIntegral { // remove StorageDead (if it exists) being used in the assign of the comparison for (stmt_idx, stmt) in bb.statements.iter().enumerate() { - if !matches!(stmt.kind, StatementKind::StorageDead(local) if local == opt.to_switch_on.local) - { + if !matches!( + stmt.kind, + StatementKind::StorageDead(local) if local == opt.to_switch_on.local + ) { continue; } storage_deads_to_remove.push((stmt_idx, opt.bb_idx)); - // if we have StorageDeads to remove then make sure to insert them at the top of each target + // if we have StorageDeads to remove then make sure to insert them at the top of + // each target for bb_idx in new_targets.all_targets() { storage_deads_to_insert.push(( *bb_idx, @@ -207,7 +211,8 @@ fn find_branch_value_info<'tcx>( (Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on)) | (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => { let branch_value_ty = branch_value.const_.ty(); - // we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats + // we only want to apply this optimization if we are matching on integrals (and chars), + // as it is not possible to switch on floats if !branch_value_ty.is_integral() && !branch_value_ty.is_char() { return None; }; @@ -222,7 +227,8 @@ fn find_branch_value_info<'tcx>( struct OptimizationInfo<'tcx> { /// Basic block to apply the optimization bb_idx: BasicBlock, - /// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be removed - i.e the statement is used later on + /// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be + /// removed - i.e the statement is used later on bin_op_stmt_idx: usize, /// Can remove Eq/Ne assignment can_remove_bin_op_stmt: bool, diff --git a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs index 6957394ed1044..5612e779d6bca 100644 --- a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs +++ b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs @@ -156,9 +156,9 @@ impl<'tcx> crate::MirPass<'tcx> for UnreachableEnumBranching { }; true } - // If and only if there is a variant that does not have a branch set, - // change the current of otherwise as the variant branch and set otherwise to unreachable. - // It transforms following code + // If and only if there is a variant that does not have a branch set, change the + // current of otherwise as the variant branch and set otherwise to unreachable. It + // transforms following code // ```rust // match c { // Ordering::Less => 1, diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs index c60cbae21422c..f3dafd13824f3 100644 --- a/compiler/rustc_mir_transform/src/unreachable_prop.rs +++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs @@ -26,7 +26,8 @@ impl crate::MirPass<'_> for UnreachablePropagation { let terminator = bb_data.terminator(); let is_unreachable = match &terminator.kind { TerminatorKind::Unreachable => true, - // This will unconditionally run into an unreachable and is therefore unreachable as well. + // This will unconditionally run into an unreachable and is therefore unreachable + // as well. TerminatorKind::Goto { target } if unreachable_blocks.contains(target) => { patch.patch_terminator(bb, TerminatorKind::Unreachable); true @@ -85,8 +86,9 @@ fn remove_successors_from_switch<'tcx>( // } // } // - // This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or LLVM to - // turn it into just `x` later. Without the unreachable, such a transformation would be illegal. + // This generates a `switchInt() -> [0: 0, 1: 1, otherwise: unreachable]`, which allows us or + // LLVM to turn it into just `x` later. Without the unreachable, such a transformation would be + // illegal. // // In order to preserve this information, we record reachable and unreachable targets as // `Assume` statements in MIR. diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs index 18865c73f75b1..3b84755ddedde 100644 --- a/compiler/rustc_mir_transform/src/validate.rs +++ b/compiler/rustc_mir_transform/src/validate.rs @@ -388,10 +388,11 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { } self.check_unwind_edge(location, unwind); - // The code generation assumes that there are no critical call edges. The assumption - // is used to simplify inserting code that should be executed along the return edge - // from the call. FIXME(tmiasko): Since this is a strictly code generation concern, - // the code generation should be responsible for handling it. + // The code generation assumes that there are no critical call edges. The + // assumption is used to simplify inserting code that should be executed along + // the return edge from the call. FIXME(tmiasko): Since this is a strictly code + // generation concern, the code generation should be responsible for handling + // it. if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Optimized) && self.is_critical_call_edge(target, unwind) { @@ -404,8 +405,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { ); } - // The call destination place and Operand::Move place used as an argument might be - // passed by a reference to the callee. Consequently they cannot be packed. + // The call destination place and Operand::Move place used as an argument might + // be passed by a reference to the callee. Consequently they cannot be packed. if is_within_packed(self.tcx, &self.body.local_decls, destination).is_some() { // This is bad! The callee will expect the memory to be aligned. self.fail( @@ -953,9 +954,9 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } AggregateKind::RawPtr(pointee_ty, mutability) => { if !matches!(self.mir_phase, MirPhase::Runtime(_)) { - // It would probably be fine to support this in earlier phases, - // but at the time of writing it's only ever introduced from intrinsic lowering, - // so earlier things just `bug!` on it. + // It would probably be fine to support this in earlier phases, but at the + // time of writing it's only ever introduced from intrinsic lowering, so + // earlier things just `bug!` on it. self.fail(location, "RawPtr should be in runtime MIR only"); } @@ -1109,10 +1110,10 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } UnOp::PtrMetadata => { if !matches!(self.mir_phase, MirPhase::Runtime(_)) { - // It would probably be fine to support this in earlier phases, - // but at the time of writing it's only ever introduced from intrinsic lowering - // or other runtime-phase optimization passes, - // so earlier things can just `bug!` on it. + // It would probably be fine to support this in earlier phases, but at + // the time of writing it's only ever introduced from intrinsic + // lowering or other runtime-phase optimization passes, so earlier + // things can just `bug!` on it. self.fail(location, "PtrMetadata should be in runtime MIR only"); } @@ -1506,7 +1507,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } if let TerminatorKind::TailCall { .. } = terminator.kind { - // FIXME(explicit_tail_calls): implement tail-call specific checks here (such as signature matching, forbidding closures, etc) + // FIXME(explicit_tail_calls): implement tail-call specific checks here (such + // as signature matching, forbidding closures, etc) } } TerminatorKind::Assert { cond, .. } => { From 48064d44982d505412d9328004ed5cc67cb5f210 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 28 Aug 2024 10:13:17 +1000 Subject: [PATCH 04/14] Inline and remove some functions. These are all functions with a single callsite, where having a separate function does nothing to help with readability. These changes make the code a little shorter and easier to read. --- .../src/add_call_guards.rs | 6 - .../src/add_moves_for_packed_drops.rs | 45 ++- .../src/add_subtyping_projections.rs | 18 +- compiler/rustc_mir_transform/src/copy_prop.rs | 45 ++- compiler/rustc_mir_transform/src/gvn.rs | 85 +++-- .../rustc_mir_transform/src/instsimplify.rs | 8 +- .../rustc_mir_transform/src/large_enums.rs | 345 +++++++++--------- .../src/lower_slice_len.rs | 26 +- .../src/remove_noop_landing_pads.rs | 113 +++--- compiler/rustc_mir_transform/src/simplify.rs | 48 ++- 10 files changed, 344 insertions(+), 395 deletions(-) diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs index 18a0746f54f08..24c955c0c78c6 100644 --- a/compiler/rustc_mir_transform/src/add_call_guards.rs +++ b/compiler/rustc_mir_transform/src/add_call_guards.rs @@ -32,12 +32,6 @@ pub(super) use self::AddCallGuards::*; impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - self.add_call_guards(body); - } -} - -impl AddCallGuards { - pub(super) fn add_call_guards(&self, body: &mut Body<'_>) { let mut pred_count: IndexVec<_, _> = body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect(); pred_count[START_BLOCK] += 1; diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs index 47572d8d3b22b..74df5f7479e06 100644 --- a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs +++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs @@ -40,35 +40,34 @@ pub(super) struct AddMovesForPackedDrops; impl<'tcx> crate::MirPass<'tcx> for AddMovesForPackedDrops { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!("add_moves_for_packed_drops({:?} @ {:?})", body.source, body.span); - add_moves_for_packed_drops(tcx, body); - } -} - -fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let patch = add_moves_for_packed_drops_patch(tcx, body); - patch.apply(body); -} -fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> MirPatch<'tcx> { - let def_id = body.source.def_id(); - let mut patch = MirPatch::new(body); - let param_env = tcx.param_env(def_id); + let def_id = body.source.def_id(); + let mut patch = MirPatch::new(body); + let param_env = tcx.param_env(def_id); - for (bb, data) in body.basic_blocks.iter_enumerated() { - let loc = Location { block: bb, statement_index: data.statements.len() }; - let terminator = data.terminator(); + for (bb, data) in body.basic_blocks.iter_enumerated() { + let loc = Location { block: bb, statement_index: data.statements.len() }; + let terminator = data.terminator(); - match terminator.kind { - TerminatorKind::Drop { place, .. } - if util::is_disaligned(tcx, body, param_env, place) => - { - add_move_for_packed_drop(tcx, body, &mut patch, terminator, loc, data.is_cleanup); + match terminator.kind { + TerminatorKind::Drop { place, .. } + if util::is_disaligned(tcx, body, param_env, place) => + { + add_move_for_packed_drop( + tcx, + body, + &mut patch, + terminator, + loc, + data.is_cleanup, + ); + } + _ => {} } - _ => {} } - } - patch + patch.apply(body); + } } fn add_move_for_packed_drop<'tcx>( diff --git a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs index ab6bf18b30cb3..e585e338613d8 100644 --- a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs +++ b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs @@ -51,18 +51,14 @@ impl<'a, 'tcx> MutVisitor<'tcx> for SubTypeChecker<'a, 'tcx> { // // gets transformed to // let temp: rval_ty = rval; // let place: place_ty = temp as place_ty; -fn subtype_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let patch = MirPatch::new(body); - let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls }; - - for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { - checker.visit_basic_block_data(bb, data); - } - checker.patcher.apply(body); -} - impl<'tcx> crate::MirPass<'tcx> for Subtyper { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - subtype_finder(tcx, body); + let patch = MirPatch::new(body); + let mut checker = SubTypeChecker { tcx, patcher: patch, local_decls: &body.local_decls }; + + for (bb, data) in body.basic_blocks.as_mut_preserves_cfg().iter_enumerated_mut() { + checker.visit_basic_block_data(bb, data); + } + checker.patcher.apply(body); } } diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs index 1cb56d29b85e0..b3db566912108 100644 --- a/compiler/rustc_mir_transform/src/copy_prop.rs +++ b/compiler/rustc_mir_transform/src/copy_prop.rs @@ -27,37 +27,34 @@ impl<'tcx> crate::MirPass<'tcx> for CopyProp { #[instrument(level = "trace", skip(self, tcx, body))] fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!(def_id = ?body.source.def_id()); - propagate_ssa(tcx, body); - } -} -fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); - let ssa = SsaLocals::new(tcx, body, param_env); + let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); + let ssa = SsaLocals::new(tcx, body, param_env); - let fully_moved = fully_moved_locals(&ssa, body); - debug!(?fully_moved); + let fully_moved = fully_moved_locals(&ssa, body); + debug!(?fully_moved); - let mut storage_to_remove = BitSet::new_empty(fully_moved.domain_size()); - for (local, &head) in ssa.copy_classes().iter_enumerated() { - if local != head { - storage_to_remove.insert(head); + let mut storage_to_remove = BitSet::new_empty(fully_moved.domain_size()); + for (local, &head) in ssa.copy_classes().iter_enumerated() { + if local != head { + storage_to_remove.insert(head); + } } - } - let any_replacement = ssa.copy_classes().iter_enumerated().any(|(l, &h)| l != h); + let any_replacement = ssa.copy_classes().iter_enumerated().any(|(l, &h)| l != h); - Replacer { - tcx, - copy_classes: ssa.copy_classes(), - fully_moved, - borrowed_locals: ssa.borrowed_locals(), - storage_to_remove, - } - .visit_body_preserves_cfg(body); + Replacer { + tcx, + copy_classes: ssa.copy_classes(), + fully_moved, + borrowed_locals: ssa.borrowed_locals(), + storage_to_remove, + } + .visit_body_preserves_cfg(body); - if any_replacement { - crate::simplify::remove_unused_definitions(body); + if any_replacement { + crate::simplify::remove_unused_definitions(body); + } } } diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 715461de06482..d514664f368d6 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -119,53 +119,50 @@ impl<'tcx> crate::MirPass<'tcx> for GVN { #[instrument(level = "trace", skip(self, tcx, body))] fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!(def_id = ?body.source.def_id()); - propagate_ssa(tcx, body); - } -} -fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); - let ssa = SsaLocals::new(tcx, body, param_env); - // Clone dominators as we need them while mutating the body. - let dominators = body.basic_blocks.dominators().clone(); - - let mut state = VnState::new(tcx, body, param_env, &ssa, &dominators, &body.local_decls); - ssa.for_each_assignment_mut( - body.basic_blocks.as_mut_preserves_cfg(), - |local, value, location| { - let value = match value { - // We do not know anything of this assigned value. - AssignedValue::Arg | AssignedValue::Terminator => None, - // Try to get some insight. - AssignedValue::Rvalue(rvalue) => { - let value = state.simplify_rvalue(rvalue, location); - // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark - // `local` as reusable if we have an exact type match. - if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) { - return; + let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); + let ssa = SsaLocals::new(tcx, body, param_env); + // Clone dominators as we need them while mutating the body. + let dominators = body.basic_blocks.dominators().clone(); + + let mut state = VnState::new(tcx, body, param_env, &ssa, &dominators, &body.local_decls); + ssa.for_each_assignment_mut( + body.basic_blocks.as_mut_preserves_cfg(), + |local, value, location| { + let value = match value { + // We do not know anything of this assigned value. + AssignedValue::Arg | AssignedValue::Terminator => None, + // Try to get some insight. + AssignedValue::Rvalue(rvalue) => { + let value = state.simplify_rvalue(rvalue, location); + // FIXME(#112651) `rvalue` may have a subtype to `local`. We can only mark + // `local` as reusable if we have an exact type match. + if state.local_decls[local].ty != rvalue.ty(state.local_decls, tcx) { + return; + } + value } - value - } - }; - // `next_opaque` is `Some`, so `new_opaque` must return `Some`. - let value = value.or_else(|| state.new_opaque()).unwrap(); - state.assign(local, value); - }, - ); - - // Stop creating opaques during replacement as it is useless. - state.next_opaque = None; - - let reverse_postorder = body.basic_blocks.reverse_postorder().to_vec(); - for bb in reverse_postorder { - let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb]; - state.visit_basic_block_data(bb, data); - } + }; + // `next_opaque` is `Some`, so `new_opaque` must return `Some`. + let value = value.or_else(|| state.new_opaque()).unwrap(); + state.assign(local, value); + }, + ); + + // Stop creating opaques during replacement as it is useless. + state.next_opaque = None; - // For each local that is reused (`y` above), we remove its storage statements do avoid any - // difficulty. Those locals are SSA, so should be easy to optimize by LLVM without storage - // statements. - StorageRemover { tcx, reused_locals: state.reused_locals }.visit_body_preserves_cfg(body); + let reverse_postorder = body.basic_blocks.reverse_postorder().to_vec(); + for bb in reverse_postorder { + let data = &mut body.basic_blocks.as_mut_preserves_cfg()[bb]; + state.visit_basic_block_data(bb, data); + } + + // For each local that is reused (`y` above), we remove its storage statements do avoid any + // difficulty. Those locals are SSA, so should be easy to optimize by LLVM without storage + // statements. + StorageRemover { tcx, reused_locals: state.reused_locals }.visit_body_preserves_cfg(body); + } } newtype_index! { diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs index a9591bf39848f..0b344f29b078b 100644 --- a/compiler/rustc_mir_transform/src/instsimplify.rs +++ b/compiler/rustc_mir_transform/src/instsimplify.rs @@ -18,19 +18,13 @@ pub(super) enum InstSimplify { AfterSimplifyCfg, } -impl InstSimplify { +impl<'tcx> crate::MirPass<'tcx> for InstSimplify { fn name(&self) -> &'static str { match self { InstSimplify::BeforeInline => "InstSimplify-before-inline", InstSimplify::AfterSimplifyCfg => "InstSimplify-after-simplifycfg", } } -} - -impl<'tcx> crate::MirPass<'tcx> for InstSimplify { - fn name(&self) -> &'static str { - self.name() - } fn is_enabled(&self, sess: &rustc_session::Session) -> bool { sess.mir_opt_level() > 0 diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs index 45edcff29eb28..8a33849f20e0b 100644 --- a/compiler/rustc_mir_transform/src/large_enums.rs +++ b/compiler/rustc_mir_transform/src/large_enums.rs @@ -37,7 +37,169 @@ impl<'tcx> crate::MirPass<'tcx> for EnumSizeOpt { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // NOTE: This pass may produce different MIR based on the alignment of the target // platform, but it will still be valid. - self.optim(tcx, body); + + let mut alloc_cache = FxHashMap::default(); + let body_did = body.source.def_id(); + let param_env = tcx.param_env_reveal_all_normalized(body_did); + + let blocks = body.basic_blocks.as_mut(); + let local_decls = &mut body.local_decls; + + for bb in blocks { + bb.expand_statements(|st| { + let StatementKind::Assign(box ( + lhs, + Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)), + )) = &st.kind + else { + return None; + }; + + let ty = lhs.ty(local_decls, tcx).ty; + + let (adt_def, num_variants, alloc_id) = + self.candidate(tcx, param_env, ty, &mut alloc_cache)?; + + let source_info = st.source_info; + let span = source_info.span; + + let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64); + let size_array_local = local_decls.push(LocalDecl::new(tmp_ty, span)); + let store_live = + Statement { source_info, kind: StatementKind::StorageLive(size_array_local) }; + + let place = Place::from(size_array_local); + let constant_vals = ConstOperand { + span, + user_ty: None, + const_: Const::Val( + ConstValue::Indirect { alloc_id, offset: Size::ZERO }, + tmp_ty, + ), + }; + let rval = Rvalue::Use(Operand::Constant(Box::new(constant_vals))); + let const_assign = + Statement { source_info, kind: StatementKind::Assign(Box::new((place, rval))) }; + + let discr_place = Place::from( + local_decls.push(LocalDecl::new(adt_def.repr().discr_type().to_ty(tcx), span)), + ); + let store_discr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + discr_place, + Rvalue::Discriminant(*rhs), + ))), + }; + + let discr_cast_place = + Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); + let cast_discr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + discr_cast_place, + Rvalue::Cast( + CastKind::IntToInt, + Operand::Copy(discr_place), + tcx.types.usize, + ), + ))), + }; + + let size_place = + Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); + let store_size = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + size_place, + Rvalue::Use(Operand::Copy(Place { + local: size_array_local, + projection: tcx + .mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]), + })), + ))), + }; + + let dst = + Place::from(local_decls.push(LocalDecl::new(Ty::new_mut_ptr(tcx, ty), span))); + let dst_ptr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + dst, + Rvalue::RawPtr(Mutability::Mut, *lhs), + ))), + }; + + let dst_cast_ty = Ty::new_mut_ptr(tcx, tcx.types.u8); + let dst_cast_place = + Place::from(local_decls.push(LocalDecl::new(dst_cast_ty, span))); + let dst_cast = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + dst_cast_place, + Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty), + ))), + }; + + let src = + Place::from(local_decls.push(LocalDecl::new(Ty::new_imm_ptr(tcx, ty), span))); + let src_ptr = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + src, + Rvalue::RawPtr(Mutability::Not, *rhs), + ))), + }; + + let src_cast_ty = Ty::new_imm_ptr(tcx, tcx.types.u8); + let src_cast_place = + Place::from(local_decls.push(LocalDecl::new(src_cast_ty, span))); + let src_cast = Statement { + source_info, + kind: StatementKind::Assign(Box::new(( + src_cast_place, + Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty), + ))), + }; + + let deinit_old = + Statement { source_info, kind: StatementKind::Deinit(Box::new(dst)) }; + + let copy_bytes = Statement { + source_info, + kind: StatementKind::Intrinsic(Box::new( + NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping { + src: Operand::Copy(src_cast_place), + dst: Operand::Copy(dst_cast_place), + count: Operand::Copy(size_place), + }), + )), + }; + + let store_dead = + Statement { source_info, kind: StatementKind::StorageDead(size_array_local) }; + + let iter = [ + store_live, + const_assign, + store_discr, + cast_discr, + store_size, + dst_ptr, + dst_cast, + src_ptr, + src_cast, + deinit_old, + copy_bytes, + store_dead, + ] + .into_iter(); + + st.make_nop(); + + Some(iter) + }); + } } } @@ -116,185 +278,4 @@ impl EnumSizeOpt { let alloc = tcx.reserve_and_set_memory_alloc(tcx.mk_const_alloc(alloc)); Some((*adt_def, num_discrs, *alloc_cache.entry(ty).or_insert(alloc))) } - - fn optim<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let mut alloc_cache = FxHashMap::default(); - let body_did = body.source.def_id(); - let param_env = tcx.param_env_reveal_all_normalized(body_did); - - let blocks = body.basic_blocks.as_mut(); - let local_decls = &mut body.local_decls; - - for bb in blocks { - bb.expand_statements(|st| { - if let StatementKind::Assign(box ( - lhs, - Rvalue::Use(Operand::Copy(rhs) | Operand::Move(rhs)), - )) = &st.kind - { - let ty = lhs.ty(local_decls, tcx).ty; - - let source_info = st.source_info; - let span = source_info.span; - - let (adt_def, num_variants, alloc_id) = - self.candidate(tcx, param_env, ty, &mut alloc_cache)?; - - let tmp_ty = Ty::new_array(tcx, tcx.types.usize, num_variants as u64); - - let size_array_local = local_decls.push(LocalDecl::new(tmp_ty, span)); - let store_live = Statement { - source_info, - kind: StatementKind::StorageLive(size_array_local), - }; - - let place = Place::from(size_array_local); - let constant_vals = ConstOperand { - span, - user_ty: None, - const_: Const::Val( - ConstValue::Indirect { alloc_id, offset: Size::ZERO }, - tmp_ty, - ), - }; - let rval = Rvalue::Use(Operand::Constant(Box::new(constant_vals))); - - let const_assign = Statement { - source_info, - kind: StatementKind::Assign(Box::new((place, rval))), - }; - - let discr_place = Place::from( - local_decls - .push(LocalDecl::new(adt_def.repr().discr_type().to_ty(tcx), span)), - ); - - let store_discr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - discr_place, - Rvalue::Discriminant(*rhs), - ))), - }; - - let discr_cast_place = - Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); - - let cast_discr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - discr_cast_place, - Rvalue::Cast( - CastKind::IntToInt, - Operand::Copy(discr_place), - tcx.types.usize, - ), - ))), - }; - - let size_place = - Place::from(local_decls.push(LocalDecl::new(tcx.types.usize, span))); - - let store_size = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - size_place, - Rvalue::Use(Operand::Copy(Place { - local: size_array_local, - projection: tcx - .mk_place_elems(&[PlaceElem::Index(discr_cast_place.local)]), - })), - ))), - }; - - let dst = Place::from( - local_decls.push(LocalDecl::new(Ty::new_mut_ptr(tcx, ty), span)), - ); - - let dst_ptr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - dst, - Rvalue::RawPtr(Mutability::Mut, *lhs), - ))), - }; - - let dst_cast_ty = Ty::new_mut_ptr(tcx, tcx.types.u8); - let dst_cast_place = - Place::from(local_decls.push(LocalDecl::new(dst_cast_ty, span))); - - let dst_cast = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - dst_cast_place, - Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(dst), dst_cast_ty), - ))), - }; - - let src = Place::from( - local_decls.push(LocalDecl::new(Ty::new_imm_ptr(tcx, ty), span)), - ); - - let src_ptr = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - src, - Rvalue::RawPtr(Mutability::Not, *rhs), - ))), - }; - - let src_cast_ty = Ty::new_imm_ptr(tcx, tcx.types.u8); - let src_cast_place = - Place::from(local_decls.push(LocalDecl::new(src_cast_ty, span))); - - let src_cast = Statement { - source_info, - kind: StatementKind::Assign(Box::new(( - src_cast_place, - Rvalue::Cast(CastKind::PtrToPtr, Operand::Copy(src), src_cast_ty), - ))), - }; - - let deinit_old = - Statement { source_info, kind: StatementKind::Deinit(Box::new(dst)) }; - - let copy_bytes = Statement { - source_info, - kind: StatementKind::Intrinsic(Box::new( - NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping { - src: Operand::Copy(src_cast_place), - dst: Operand::Copy(dst_cast_place), - count: Operand::Copy(size_place), - }), - )), - }; - - let store_dead = Statement { - source_info, - kind: StatementKind::StorageDead(size_array_local), - }; - let iter = [ - store_live, - const_assign, - store_discr, - cast_discr, - store_size, - dst_ptr, - dst_cast, - src_ptr, - src_cast, - deinit_old, - copy_bytes, - store_dead, - ] - .into_iter(); - - st.make_nop(); - Some(iter) - } else { - None - } - }); - } - } } diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs index ca59d4d12acad..420661f29c8cf 100644 --- a/compiler/rustc_mir_transform/src/lower_slice_len.rs +++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs @@ -13,22 +13,18 @@ impl<'tcx> crate::MirPass<'tcx> for LowerSliceLenCalls { } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - lower_slice_len_calls(tcx, body) - } -} - -fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let language_items = tcx.lang_items(); - let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else { - // there is no lang item to compare to :) - return; - }; + let language_items = tcx.lang_items(); + let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else { + // there is no lang item to compare to :) + return; + }; - // The one successor remains unchanged, so no need to invalidate - let basic_blocks = body.basic_blocks.as_mut_preserves_cfg(); - for block in basic_blocks { - // lower `<[_]>::len` calls - lower_slice_len_call(block, slice_len_fn_item_def_id); + // The one successor remains unchanged, so no need to invalidate + let basic_blocks = body.basic_blocks.as_mut_preserves_cfg(); + for block in basic_blocks { + // lower `<[_]>::len` calls + lower_slice_len_call(block, slice_len_fn_item_def_id); + } } } diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs index 37197c3f573d3..55394e93a5cb7 100644 --- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs +++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs @@ -18,7 +18,61 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveNoopLandingPads { fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let def_id = body.source.def_id(); debug!(?def_id); - self.remove_nop_landing_pads(body) + + // Skip the pass if there are no blocks with a resume terminator. + let has_resume = body + .basic_blocks + .iter_enumerated() + .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::UnwindResume)); + if !has_resume { + debug!("remove_noop_landing_pads: no resume block in MIR"); + return; + } + + // make sure there's a resume block without any statements + let resume_block = { + let mut patch = MirPatch::new(body); + let resume_block = patch.resume_block(); + patch.apply(body); + resume_block + }; + debug!("remove_noop_landing_pads: resume block is {:?}", resume_block); + + let mut jumps_folded = 0; + let mut landing_pads_removed = 0; + let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len()); + + // This is a post-order traversal, so that if A post-dominates B + // then A will be visited before B. + let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect(); + for bb in postorder { + debug!(" processing {:?}", bb); + if let Some(unwind) = body[bb].terminator_mut().unwind_mut() { + if let UnwindAction::Cleanup(unwind_bb) = *unwind { + if nop_landing_pads.contains(unwind_bb) { + debug!(" removing noop landing pad"); + landing_pads_removed += 1; + *unwind = UnwindAction::Continue; + } + } + } + + for target in body[bb].terminator_mut().successors_mut() { + if *target != resume_block && nop_landing_pads.contains(*target) { + debug!(" folding noop jump to {:?} to resume block", target); + *target = resume_block; + jumps_folded += 1; + } + } + + let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads); + if is_nop_landing_pad { + nop_landing_pads.insert(bb); + } + debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad); + } + + debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed); } } @@ -82,61 +136,4 @@ impl RemoveNoopLandingPads { | TerminatorKind::InlineAsm { .. } => false, } } - - fn remove_nop_landing_pads(&self, body: &mut Body<'_>) { - // Skip the pass if there are no blocks with a resume terminator. - let has_resume = body - .basic_blocks - .iter_enumerated() - .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::UnwindResume)); - if !has_resume { - debug!("remove_noop_landing_pads: no resume block in MIR"); - return; - } - - // make sure there's a resume block without any statements - let resume_block = { - let mut patch = MirPatch::new(body); - let resume_block = patch.resume_block(); - patch.apply(body); - resume_block - }; - debug!("remove_noop_landing_pads: resume block is {:?}", resume_block); - - let mut jumps_folded = 0; - let mut landing_pads_removed = 0; - let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len()); - - // This is a post-order traversal, so that if A post-dominates B - // then A will be visited before B. - let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect(); - for bb in postorder { - debug!(" processing {:?}", bb); - if let Some(unwind) = body[bb].terminator_mut().unwind_mut() { - if let UnwindAction::Cleanup(unwind_bb) = *unwind { - if nop_landing_pads.contains(unwind_bb) { - debug!(" removing noop landing pad"); - landing_pads_removed += 1; - *unwind = UnwindAction::Continue; - } - } - } - - for target in body[bb].terminator_mut().successors_mut() { - if *target != resume_block && nop_landing_pads.contains(*target) { - debug!(" folding noop jump to {:?} to resume block", target); - *target = resume_block; - jumps_folded += 1; - } - } - - let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads); - if is_nop_landing_pad { - nop_landing_pads.insert(bb); - } - debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad); - } - - debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed); - } } diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs index cb8e1dfda98ba..7ed43547e1127 100644 --- a/compiler/rustc_mir_transform/src/simplify.rs +++ b/compiler/rustc_mir_transform/src/simplify.rs @@ -381,23 +381,33 @@ impl<'tcx> crate::MirPass<'tcx> for SimplifyLocals { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { trace!("running SimplifyLocals on {:?}", body.source); - simplify_locals(body, tcx); - } -} -pub(super) fn remove_unused_definitions<'tcx>(body: &mut Body<'tcx>) { - // First, we're going to get a count of *actual* uses for every `Local`. - let mut used_locals = UsedLocals::new(body); + // First, we're going to get a count of *actual* uses for every `Local`. + let mut used_locals = UsedLocals::new(body); - // Next, we're going to remove any `Local` with zero actual uses. When we remove those - // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals` - // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from - // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a - // fixedpoint where there are no more unused locals. - remove_unused_definitions_helper(&mut used_locals, body); + // Next, we're going to remove any `Local` with zero actual uses. When we remove those + // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals` + // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from + // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a + // fixedpoint where there are no more unused locals. + remove_unused_definitions_helper(&mut used_locals, body); + + // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the + // `Local`s. + let map = make_local_map(&mut body.local_decls, &used_locals); + + // Only bother running the `LocalUpdater` if we actually found locals to remove. + if map.iter().any(Option::is_none) { + // Update references to all vars and tmps now + let mut updater = LocalUpdater { map, tcx }; + updater.visit_body_preserves_cfg(body); + + body.local_decls.shrink_to_fit(); + } + } } -fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) { +pub(super) fn remove_unused_definitions<'tcx>(body: &mut Body<'tcx>) { // First, we're going to get a count of *actual* uses for every `Local`. let mut used_locals = UsedLocals::new(body); @@ -407,18 +417,6 @@ fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) { // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a // fixedpoint where there are no more unused locals. remove_unused_definitions_helper(&mut used_locals, body); - - // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s. - let map = make_local_map(&mut body.local_decls, &used_locals); - - // Only bother running the `LocalUpdater` if we actually found locals to remove. - if map.iter().any(Option::is_none) { - // Update references to all vars and tmps now - let mut updater = LocalUpdater { map, tcx }; - updater.visit_body_preserves_cfg(body); - - body.local_decls.shrink_to_fit(); - } } /// Construct the mapping while swapping out unused stuff out from the `vec`. From baa16d24715abeed857202ad917a0f05e364b8a5 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Fri, 30 Aug 2024 13:51:59 +1000 Subject: [PATCH 05/14] Clarify a comment. The "as" is equivalent to "because", but I originally read it more like "when", which was confusing. --- compiler/rustc_mir_transform/src/gvn.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index d514664f368d6..0b78affc31cc1 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -122,7 +122,7 @@ impl<'tcx> crate::MirPass<'tcx> for GVN { let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); let ssa = SsaLocals::new(tcx, body, param_env); - // Clone dominators as we need them while mutating the body. + // Clone dominators because we need them while mutating the body. let dominators = body.basic_blocks.dominators().clone(); let mut state = VnState::new(tcx, body, param_env, &ssa, &dominators, &body.local_decls); From e26692d559c3b564a3efdd5b6db316c0c67fb52e Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Tue, 3 Sep 2024 11:00:15 +1000 Subject: [PATCH 06/14] Add a useful comment. --- compiler/rustc_mir_transform/src/large_enums.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs index 8a33849f20e0b..3e263aa406774 100644 --- a/compiler/rustc_mir_transform/src/large_enums.rs +++ b/compiler/rustc_mir_transform/src/large_enums.rs @@ -244,6 +244,8 @@ impl EnumSizeOpt { let ptr_sized_int = data_layout.ptr_sized_integer(); let target_bytes = ptr_sized_int.size().bytes() as usize; let mut data = vec![0; target_bytes * num_discrs]; + + // We use a macro because `$bytes` can be u32 or u64. macro_rules! encode_store { ($curr_idx: expr, $endian: expr, $bytes: expr) => { let bytes = match $endian { From 51e1c3958dd68372c4a4499d59223ccecc1a7955 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 4 Sep 2024 13:20:20 +1000 Subject: [PATCH 07/14] Add a useful comment about `PromoteTemps`. This was non-obvious to me. --- compiler/rustc_mir_transform/src/promote_consts.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index c968053a6d6e8..59df99f858dcf 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -36,6 +36,7 @@ use tracing::{debug, instrument}; /// newly created `Constant`. #[derive(Default)] pub(super) struct PromoteTemps<'tcx> { + // Must use `Cell` because `run_pass` takes `&self`, not `&mut self`. pub promoted_fragments: Cell>>, } From d1c55a305eefedbe3153c7348c4b373ecd07d144 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 4 Sep 2024 13:58:26 +1000 Subject: [PATCH 08/14] Use `IndexVec::from_raw` to construct a const `IndexVec`. --- compiler/rustc_mir_transform/src/shim.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index e1615c76bb6f9..f1bd803d83530 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -404,8 +404,7 @@ fn build_thread_local_shim<'tcx>( let span = tcx.def_span(def_id); let source_info = SourceInfo::outermost(span); - let mut blocks = IndexVec::with_capacity(1); - blocks.push(BasicBlockData { + let blocks = IndexVec::from_raw(vec![BasicBlockData { statements: vec![Statement { source_info, kind: StatementKind::Assign(Box::new(( @@ -415,7 +414,7 @@ fn build_thread_local_shim<'tcx>( }], terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }), is_cleanup: false, - }); + }]); new_body( MirSource::from_instance(instance), From 702340269136b9818359a82c5f7fc659ec26a0bf Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Wed, 4 Sep 2024 13:47:26 +1000 Subject: [PATCH 09/14] Remove references from some structs. In all cases the struct can own the relevant thing instead of having a reference to it. This makes the code simpler, and in some cases removes a struct lifetime. --- compiler/rustc_mir_transform/src/dest_prop.rs | 14 +++---- compiler/rustc_mir_transform/src/gvn.rs | 8 ++-- .../rustc_mir_transform/src/jump_threading.rs | 42 +++++++++---------- compiler/rustc_mir_transform/src/lib.rs | 14 ++++--- .../src/mentioned_items.rs | 8 ++-- compiler/rustc_mir_transform/src/ref_prop.rs | 15 +++---- .../src/required_consts.rs | 19 ++++----- 7 files changed, 56 insertions(+), 64 deletions(-) diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs index 175097e30ee30..f123658580da4 100644 --- a/compiler/rustc_mir_transform/src/dest_prop.rs +++ b/compiler/rustc_mir_transform/src/dest_prop.rs @@ -242,7 +242,7 @@ impl<'tcx> crate::MirPass<'tcx> for DestinationPropagation { } round_count += 1; - apply_merges(body, tcx, &merges, &merged_locals); + apply_merges(body, tcx, merges, merged_locals); } trace!(round_count); @@ -281,20 +281,20 @@ struct Candidates { fn apply_merges<'tcx>( body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>, - merges: &FxIndexMap, - merged_locals: &BitSet, + merges: FxIndexMap, + merged_locals: BitSet, ) { let mut merger = Merger { tcx, merges, merged_locals }; merger.visit_body_preserves_cfg(body); } -struct Merger<'a, 'tcx> { +struct Merger<'tcx> { tcx: TyCtxt<'tcx>, - merges: &'a FxIndexMap, - merged_locals: &'a BitSet, + merges: FxIndexMap, + merged_locals: BitSet, } -impl<'a, 'tcx> MutVisitor<'tcx> for Merger<'a, 'tcx> { +impl<'tcx> MutVisitor<'tcx> for Merger<'tcx> { fn tcx(&self) -> TyCtxt<'tcx> { self.tcx } diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 0b78affc31cc1..598bf61483d2f 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -125,7 +125,7 @@ impl<'tcx> crate::MirPass<'tcx> for GVN { // Clone dominators because we need them while mutating the body. let dominators = body.basic_blocks.dominators().clone(); - let mut state = VnState::new(tcx, body, param_env, &ssa, &dominators, &body.local_decls); + let mut state = VnState::new(tcx, body, param_env, &ssa, dominators, &body.local_decls); ssa.for_each_assignment_mut( body.basic_blocks.as_mut_preserves_cfg(), |local, value, location| { @@ -258,7 +258,7 @@ struct VnState<'body, 'tcx> { /// Cache the value of the `unsized_locals` features, to avoid fetching it repeatedly in a loop. feature_unsized_locals: bool, ssa: &'body SsaLocals, - dominators: &'body Dominators, + dominators: Dominators, reused_locals: BitSet, } @@ -268,7 +268,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { body: &Body<'tcx>, param_env: ty::ParamEnv<'tcx>, ssa: &'body SsaLocals, - dominators: &'body Dominators, + dominators: Dominators, local_decls: &'body LocalDecls<'tcx>, ) -> Self { // Compute a rough estimate of the number of values in the body from the number of @@ -1490,7 +1490,7 @@ impl<'tcx> VnState<'_, 'tcx> { let other = self.rev_locals.get(index)?; other .iter() - .find(|&&other| self.ssa.assignment_dominates(self.dominators, other, loc)) + .find(|&&other| self.ssa.assignment_dominates(&self.dominators, other, loc)) .copied() } } diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs index e950c2cb72ab8..d48df59ada860 100644 --- a/compiler/rustc_mir_transform/src/jump_threading.rs +++ b/compiler/rustc_mir_transform/src/jump_threading.rs @@ -78,18 +78,16 @@ impl<'tcx> crate::MirPass<'tcx> for JumpThreading { } let param_env = tcx.param_env_reveal_all_normalized(def_id); - let map = Map::new(tcx, body, Some(MAX_PLACES)); - let loop_headers = loop_headers(body); - let arena = DroplessArena::default(); + let arena = &DroplessArena::default(); let mut finder = TOFinder { tcx, param_env, ecx: InterpCx::new(tcx, DUMMY_SP, param_env, DummyMachine), body, - arena: &arena, - map: &map, - loop_headers: &loop_headers, + arena, + map: Map::new(tcx, body, Some(MAX_PLACES)), + loop_headers: loop_headers(body), opportunities: Vec::new(), }; @@ -105,7 +103,7 @@ impl<'tcx> crate::MirPass<'tcx> for JumpThreading { // Verify that we do not thread through a loop header. for to in opportunities.iter() { - assert!(to.chain.iter().all(|&block| !loop_headers.contains(block))); + assert!(to.chain.iter().all(|&block| !finder.loop_headers.contains(block))); } OpportunitySet::new(body, opportunities).apply(body); } @@ -124,8 +122,8 @@ struct TOFinder<'tcx, 'a> { param_env: ty::ParamEnv<'tcx>, ecx: InterpCx<'tcx, DummyMachine>, body: &'a Body<'tcx>, - map: &'a Map<'tcx>, - loop_headers: &'a BitSet, + map: Map<'tcx>, + loop_headers: BitSet, /// We use an arena to avoid cloning the slices when cloning `state`. arena: &'a DroplessArena, opportunities: Vec, @@ -223,7 +221,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { })) }; let conds = ConditionSet(conds); - state.insert_value_idx(discr, conds, self.map); + state.insert_value_idx(discr, conds, &self.map); self.find_opportunity(bb, state, cost, 0); } @@ -264,7 +262,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { // _1 = 5 // Whatever happens here, it won't change the result of a `SwitchInt`. // _1 = 6 if let Some((lhs, tail)) = self.mutated_statement(stmt) { - state.flood_with_tail_elem(lhs.as_ref(), tail, self.map, ConditionSet::BOTTOM); + state.flood_with_tail_elem(lhs.as_ref(), tail, &self.map, ConditionSet::BOTTOM); } } @@ -370,7 +368,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { self.opportunities.push(ThreadingOpportunity { chain: vec![bb], target: c.target }) }; - if let Some(conditions) = state.try_get_idx(lhs, self.map) + if let Some(conditions) = state.try_get_idx(lhs, &self.map) && let Immediate::Scalar(Scalar::Int(int)) = *rhs { conditions.iter_matches(int).for_each(register_opportunity); @@ -406,7 +404,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } }, &mut |place, op| { - if let Some(conditions) = state.try_get_idx(place, self.map) + if let Some(conditions) = state.try_get_idx(place, &self.map) && let Ok(imm) = self.ecx.read_immediate_raw(op) && let Some(imm) = imm.right() && let Immediate::Scalar(Scalar::Int(int)) = *imm @@ -441,7 +439,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { // Transfer the conditions on the copied rhs. Operand::Move(rhs) | Operand::Copy(rhs) => { let Some(rhs) = self.map.find(rhs.as_ref()) else { return }; - state.insert_place_idx(rhs, lhs, self.map); + state.insert_place_idx(rhs, lhs, &self.map); } } } @@ -461,7 +459,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { Rvalue::CopyForDeref(rhs) => self.process_operand(bb, lhs, &Operand::Copy(*rhs), state), Rvalue::Discriminant(rhs) => { let Some(rhs) = self.map.find_discr(rhs.as_ref()) else { return }; - state.insert_place_idx(rhs, lhs, self.map); + state.insert_place_idx(rhs, lhs, &self.map); } // If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`. Rvalue::Aggregate(box ref kind, ref operands) => { @@ -492,10 +490,10 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { } // Transfer the conditions on the copy rhs, after inversing polarity. Rvalue::UnaryOp(UnOp::Not, Operand::Move(place) | Operand::Copy(place)) => { - let Some(conditions) = state.try_get_idx(lhs, self.map) else { return }; + let Some(conditions) = state.try_get_idx(lhs, &self.map) else { return }; let Some(place) = self.map.find(place.as_ref()) else { return }; let conds = conditions.map(self.arena, Condition::inv); - state.insert_value_idx(place, conds, self.map); + state.insert_value_idx(place, conds, &self.map); } // We expect `lhs ?= A`. We found `lhs = Eq(rhs, B)`. // Create a condition on `rhs ?= B`. @@ -504,7 +502,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { box (Operand::Move(place) | Operand::Copy(place), Operand::Constant(value)) | box (Operand::Constant(value), Operand::Move(place) | Operand::Copy(place)), ) => { - let Some(conditions) = state.try_get_idx(lhs, self.map) else { return }; + let Some(conditions) = state.try_get_idx(lhs, &self.map) else { return }; let Some(place) = self.map.find(place.as_ref()) else { return }; let equals = match op { BinOp::Eq => ScalarInt::TRUE, @@ -528,7 +526,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { polarity: if c.matches(equals) { Polarity::Eq } else { Polarity::Ne }, ..c }); - state.insert_value_idx(place, conds, self.map); + state.insert_value_idx(place, conds, &self.map); } _ => {} @@ -583,7 +581,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume( Operand::Copy(place) | Operand::Move(place), )) => { - let Some(conditions) = state.try_get(place.as_ref(), self.map) else { return }; + let Some(conditions) = state.try_get(place.as_ref(), &self.map) else { return }; conditions.iter_matches(ScalarInt::TRUE).for_each(register_opportunity); } StatementKind::Assign(box (lhs_place, rhs)) => { @@ -631,7 +629,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { // We can recurse through this terminator. let mut state = state(); if let Some(place_to_flood) = place_to_flood { - state.flood_with(place_to_flood.as_ref(), self.map, ConditionSet::BOTTOM); + state.flood_with(place_to_flood.as_ref(), &self.map, ConditionSet::BOTTOM); } self.find_opportunity(bb, state, cost.clone(), depth + 1); } @@ -650,7 +648,7 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> { let Some(discr) = discr.place() else { return }; let discr_ty = discr.ty(self.body, self.tcx).ty; let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return }; - let Some(conditions) = state.try_get(discr.as_ref(), self.map) else { return }; + let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return }; if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) { let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else { return }; diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index dcfcad7cc7c5b..6c9b46d8b6f6a 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -223,14 +223,14 @@ fn is_mir_available(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { /// MIR associated with them. fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet { // All body-owners have MIR associated with them. - let mut set: FxIndexSet<_> = tcx.hir().body_owners().collect(); + let set: FxIndexSet<_> = tcx.hir().body_owners().collect(); // Additionally, tuple struct/variant constructors have MIR, but // they don't have a BodyId, so we need to build them separately. - struct GatherCtors<'a> { - set: &'a mut FxIndexSet, + struct GatherCtors { + set: FxIndexSet, } - impl<'tcx> Visitor<'tcx> for GatherCtors<'_> { + impl<'tcx> Visitor<'tcx> for GatherCtors { fn visit_variant_data(&mut self, v: &'tcx hir::VariantData<'tcx>) { if let hir::VariantData::Tuple(_, _, def_id) = *v { self.set.insert(def_id); @@ -238,9 +238,11 @@ fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet { intravisit::walk_struct_def(self, v) } } - tcx.hir().visit_all_item_likes_in_crate(&mut GatherCtors { set: &mut set }); - set + let mut gather_ctors = GatherCtors { set }; + tcx.hir().visit_all_item_likes_in_crate(&mut gather_ctors); + + gather_ctors.set } fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs { diff --git a/compiler/rustc_mir_transform/src/mentioned_items.rs b/compiler/rustc_mir_transform/src/mentioned_items.rs index 4402e0dd42d40..f24de609e6b29 100644 --- a/compiler/rustc_mir_transform/src/mentioned_items.rs +++ b/compiler/rustc_mir_transform/src/mentioned_items.rs @@ -10,7 +10,7 @@ pub(super) struct MentionedItems; struct MentionedItemsVisitor<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, - mentioned_items: &'a mut Vec>>, + mentioned_items: Vec>>, } impl<'tcx> crate::MirPass<'tcx> for MentionedItems { @@ -23,9 +23,9 @@ impl<'tcx> crate::MirPass<'tcx> for MentionedItems { } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) { - let mut mentioned_items = Vec::new(); - MentionedItemsVisitor { tcx, body, mentioned_items: &mut mentioned_items }.visit_body(body); - body.set_mentioned_items(mentioned_items); + let mut visitor = MentionedItemsVisitor { tcx, body, mentioned_items: Vec::new() }; + visitor.visit_body(body); + body.set_mentioned_items(visitor.mentioned_items); } } diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs index 25b98786c66b7..4c3a99b79d4f2 100644 --- a/compiler/rustc_mir_transform/src/ref_prop.rs +++ b/compiler/rustc_mir_transform/src/ref_prop.rs @@ -253,11 +253,8 @@ fn compute_replacement<'tcx>( debug!(?targets); - let mut finder = ReplacementFinder { - targets: &mut targets, - can_perform_opt, - allowed_replacements: FxHashSet::default(), - }; + let mut finder = + ReplacementFinder { targets, can_perform_opt, allowed_replacements: FxHashSet::default() }; let reachable_blocks = traversal::reachable_as_bitset(body); for (bb, bbdata) in body.basic_blocks.iter_enumerated() { // Only visit reachable blocks as we rely on dataflow. @@ -269,19 +266,19 @@ fn compute_replacement<'tcx>( let allowed_replacements = finder.allowed_replacements; return Replacer { tcx, - targets, + targets: finder.targets, storage_to_remove, allowed_replacements, any_replacement: false, }; - struct ReplacementFinder<'a, 'tcx, F> { - targets: &'a mut IndexVec>, + struct ReplacementFinder<'tcx, F> { + targets: IndexVec>, can_perform_opt: F, allowed_replacements: FxHashSet<(Local, Location)>, } - impl<'tcx, F> Visitor<'tcx> for ReplacementFinder<'_, 'tcx, F> + impl<'tcx, F> Visitor<'tcx> for ReplacementFinder<'tcx, F> where F: FnMut(Place<'tcx>, Location) -> bool, { diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs index ebcf5b5d27b84..99d1cd6f63ecf 100644 --- a/compiler/rustc_mir_transform/src/required_consts.rs +++ b/compiler/rustc_mir_transform/src/required_consts.rs @@ -1,26 +1,21 @@ use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::{traversal, Body, ConstOperand, Location}; -pub(super) struct RequiredConstsVisitor<'a, 'tcx> { - required_consts: &'a mut Vec>, +pub(super) struct RequiredConstsVisitor<'tcx> { + required_consts: Vec>, } -impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> { - fn new(required_consts: &'a mut Vec>) -> Self { - RequiredConstsVisitor { required_consts } - } - +impl<'tcx> RequiredConstsVisitor<'tcx> { pub(super) fn compute_required_consts(body: &mut Body<'tcx>) { - let mut required_consts = Vec::new(); - let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts); + let mut visitor = RequiredConstsVisitor { required_consts: Vec::new() }; for (bb, bb_data) in traversal::reverse_postorder(&body) { - required_consts_visitor.visit_basic_block_data(bb, bb_data); + visitor.visit_basic_block_data(bb, bb_data); } - body.set_required_consts(required_consts); + body.set_required_consts(visitor.required_consts); } } -impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> { +impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'tcx> { fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, _: Location) { if constant.const_.is_required_const() { self.required_consts.push(*constant); From 8949b443d5d0415c0c95fc931b1e4ee54de2f301 Mon Sep 17 00:00:00 2001 From: Nicholas Nethercote Date: Thu, 5 Sep 2024 10:39:11 +1000 Subject: [PATCH 10/14] Make `check_live_drops` into a `MirLint`. It's a thin wrapper around `check_live_drops`, but it's enough to fix the FIXME comment. --- compiler/rustc_mir_transform/src/lib.rs | 9 ++++++--- .../src/post_drop_elaboration.rs | 13 +++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 compiler/rustc_mir_transform/src/post_drop_elaboration.rs diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 6c9b46d8b6f6a..84d07d3833071 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -87,6 +87,7 @@ mod match_branches; mod mentioned_items; mod multiple_return_terminators; mod nrvo; +mod post_drop_elaboration; mod prettify; mod promote_consts; mod ref_prop; @@ -480,11 +481,13 @@ pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<' pm::run_passes( tcx, body, - &[&remove_uninit_drops::RemoveUninitDrops, &simplify::SimplifyCfg::RemoveFalseEdges], + &[ + &remove_uninit_drops::RemoveUninitDrops, + &simplify::SimplifyCfg::RemoveFalseEdges, + &Lint(post_drop_elaboration::CheckLiveDrops), + ], None, ); - // FIXME: make this a MIR lint - check_consts::post_drop_elaboration::check_live_drops(tcx, body); } debug!("runtime_mir_lowering({:?})", did); diff --git a/compiler/rustc_mir_transform/src/post_drop_elaboration.rs b/compiler/rustc_mir_transform/src/post_drop_elaboration.rs new file mode 100644 index 0000000000000..75721d4607626 --- /dev/null +++ b/compiler/rustc_mir_transform/src/post_drop_elaboration.rs @@ -0,0 +1,13 @@ +use rustc_const_eval::check_consts; +use rustc_middle::mir::*; +use rustc_middle::ty::TyCtxt; + +use crate::MirLint; + +pub(super) struct CheckLiveDrops; + +impl<'tcx> MirLint<'tcx> for CheckLiveDrops { + fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { + check_consts::post_drop_elaboration::check_live_drops(tcx, body); + } +} From 96d545a33bb6dc4ed3c1c80b997cce2035c571a6 Mon Sep 17 00:00:00 2001 From: Zalathar Date: Tue, 10 Sep 2024 20:16:43 +1000 Subject: [PATCH 11/14] coverage: Avoid referring to "coverage spans" in counter creation The counter-creation code needs to know which BCB nodes require counters, but isn't interested in why, so treat that as an opaque detail. --- .../src/coverage/counters.rs | 43 +++++++------------ 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index ea1c0d2df4517..c9eceff9b507b 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -74,12 +74,11 @@ pub(super) struct CoverageCounters { } impl CoverageCounters { - /// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or - /// indirectly associated with coverage spans, and accumulates additional `Expression`s - /// representing intermediate values. + /// Ensures that each BCB node needing a counter has one, by creating physical + /// counters or counter expressions for nodes and edges as required. pub(super) fn make_bcb_counters( basic_coverage_blocks: &CoverageGraph, - bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool, + bcb_needs_counter: impl Fn(BasicCoverageBlock) -> bool, ) -> Self { let num_bcbs = basic_coverage_blocks.num_nodes(); @@ -91,8 +90,7 @@ impl CoverageCounters { expressions_memo: FxHashMap::default(), }; - MakeBcbCounters::new(&mut this, basic_coverage_blocks) - .make_bcb_counters(bcb_has_coverage_spans); + MakeBcbCounters::new(&mut this, basic_coverage_blocks).make_bcb_counters(bcb_needs_counter); this } @@ -241,10 +239,7 @@ impl CoverageCounters { } } -/// Traverse the `CoverageGraph` and add either a `Counter` or `Expression` to every BCB, to be -/// injected with coverage spans. `Expressions` have no runtime overhead, so if a viable expression -/// (adding or subtracting two other counters or expressions) can compute the same result as an -/// embedded counter, an `Expression` should be used. +/// Helper struct that allows counter creation to inspect the BCB graph. struct MakeBcbCounters<'a> { coverage_counters: &'a mut CoverageCounters, basic_coverage_blocks: &'a CoverageGraph, @@ -264,30 +259,21 @@ impl<'a> MakeBcbCounters<'a> { /// One way to predict which branch executes the least is by considering loops. A loop is exited /// at a branch, so the branch that jumps to a `BasicCoverageBlock` outside the loop is almost /// always executed less than the branch that does not exit the loop. - fn make_bcb_counters(&mut self, bcb_has_coverage_spans: impl Fn(BasicCoverageBlock) -> bool) { + fn make_bcb_counters(&mut self, bcb_needs_counter: impl Fn(BasicCoverageBlock) -> bool) { debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock"); - // Walk the `CoverageGraph`. For each `BasicCoverageBlock` node with an associated - // coverage span, add a counter. If the `BasicCoverageBlock` branches, add a counter or - // expression to each branch `BasicCoverageBlock` (if the branch BCB has only one incoming - // edge) or edge from the branching BCB to the branch BCB (if the branch BCB has multiple - // incoming edges). + // Traverse the coverage graph, ensuring that every node that needs a + // coverage counter has one. // - // The `TraverseCoverageGraphWithLoops` traversal ensures that, when a loop is encountered, - // all `BasicCoverageBlock` nodes in the loop are visited before visiting any node outside - // the loop. The `traversal` state includes a `context_stack`, providing a way to know if - // the current BCB is in one or more nested loops or not. + // The traversal tries to ensure that, when a loop is encountered, all + // nodes within the loop are visited before visiting any nodes outside + // the loop. It also keeps track of which loop(s) the traversal is + // currently inside. let mut traversal = TraverseCoverageGraphWithLoops::new(self.basic_coverage_blocks); while let Some(bcb) = traversal.next() { - if bcb_has_coverage_spans(bcb) { - debug!("{:?} has at least one coverage span. Get or make its counter", bcb); + let _span = debug_span!("traversal", ?bcb).entered(); + if bcb_needs_counter(bcb) { self.make_node_and_branch_counters(&traversal, bcb); - } else { - debug!( - "{:?} does not have any coverage spans. A counter will only be added if \ - and when a covered BCB has an expression dependency.", - bcb, - ); } } @@ -298,6 +284,7 @@ impl<'a> MakeBcbCounters<'a> { ); } + #[instrument(level = "debug", skip(self, traversal))] fn make_node_and_branch_counters( &mut self, traversal: &TraverseCoverageGraphWithLoops<'_>, From 8be70c7b2c88be04e07adcf0c2dafbd0e8a6d70f Mon Sep 17 00:00:00 2001 From: Zalathar Date: Tue, 10 Sep 2024 20:44:15 +1000 Subject: [PATCH 12/14] coverage: Avoid referring to out-edges as "branches" This makes the graph terminology a bit more consistent, and avoids potential confusion with branch coverage. --- .../src/coverage/counters.rs | 177 ++++++++---------- 1 file changed, 80 insertions(+), 97 deletions(-) diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index c9eceff9b507b..f55d6cf8efc71 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -253,12 +253,6 @@ impl<'a> MakeBcbCounters<'a> { Self { coverage_counters, basic_coverage_blocks } } - /// If two `BasicCoverageBlock`s branch from another `BasicCoverageBlock`, one of the branches - /// can be counted by `Expression` by subtracting the other branch from the branching - /// block. Otherwise, the `BasicCoverageBlock` executed the least should have the `Counter`. - /// One way to predict which branch executes the least is by considering loops. A loop is exited - /// at a branch, so the branch that jumps to a `BasicCoverageBlock` outside the loop is almost - /// always executed less than the branch that does not exit the loop. fn make_bcb_counters(&mut self, bcb_needs_counter: impl Fn(BasicCoverageBlock) -> bool) { debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock"); @@ -273,7 +267,7 @@ impl<'a> MakeBcbCounters<'a> { while let Some(bcb) = traversal.next() { let _span = debug_span!("traversal", ?bcb).entered(); if bcb_needs_counter(bcb) { - self.make_node_and_branch_counters(&traversal, bcb); + self.make_node_counter_and_out_edge_counters(&traversal, bcb); } } @@ -284,74 +278,66 @@ impl<'a> MakeBcbCounters<'a> { ); } + /// Make sure the given node has a node counter, and then make sure each of + /// its out-edges has an edge counter (if appropriate). #[instrument(level = "debug", skip(self, traversal))] - fn make_node_and_branch_counters( + fn make_node_counter_and_out_edge_counters( &mut self, traversal: &TraverseCoverageGraphWithLoops<'_>, from_bcb: BasicCoverageBlock, ) { // First, ensure that this node has a counter of some kind. - // We might also use its term later to compute one of the branch counters. + // We might also use that counter to compute one of the out-edge counters. let from_bcb_operand = self.get_or_make_counter_operand(from_bcb); - let branch_target_bcbs = self.basic_coverage_blocks.successors[from_bcb].as_slice(); + let successors = self.basic_coverage_blocks.successors[from_bcb].as_slice(); // If this node doesn't have multiple out-edges, or all of its out-edges // already have counters, then we don't need to create edge counters. - let needs_branch_counters = branch_target_bcbs.len() > 1 - && branch_target_bcbs - .iter() - .any(|&to_bcb| self.branch_has_no_counter(from_bcb, to_bcb)); - if !needs_branch_counters { + let needs_out_edge_counters = successors.len() > 1 + && successors.iter().any(|&to_bcb| self.edge_has_no_counter(from_bcb, to_bcb)); + if !needs_out_edge_counters { return; } - debug!( - "{from_bcb:?} has some branch(es) without counters:\n {}", - branch_target_bcbs - .iter() - .map(|&to_bcb| { - format!("{from_bcb:?}->{to_bcb:?}: {:?}", self.branch_counter(from_bcb, to_bcb)) - }) - .collect::>() - .join("\n "), - ); + if tracing::enabled!(tracing::Level::DEBUG) { + let _span = + debug_span!("node has some out-edges without counters", ?from_bcb).entered(); + for &to_bcb in successors { + debug!(?to_bcb, counter=?self.edge_counter(from_bcb, to_bcb)); + } + } - // Of the branch edges that don't have counters yet, one can be given an expression - // (computed from the other edges) instead of a dedicated counter. - let expression_to_bcb = self.choose_preferred_expression_branch(traversal, from_bcb); + // Of the out-edges that don't have counters yet, one can be given an expression + // (computed from the other out-edges) instead of a dedicated counter. + let expression_to_bcb = self.choose_out_edge_for_expression(traversal, from_bcb); - // For each branch arm other than the one that was chosen to get an expression, + // For each out-edge other than the one that was chosen to get an expression, // ensure that it has a counter (existing counter/expression or a new counter), - // and accumulate the corresponding terms into a single sum term. - let sum_of_all_other_branches: BcbCounter = { - let _span = debug_span!("sum_of_all_other_branches", ?expression_to_bcb).entered(); - branch_target_bcbs + // and accumulate the corresponding counters into a single sum expression. + let sum_of_all_other_out_edges: BcbCounter = { + let _span = debug_span!("sum_of_all_other_out_edges", ?expression_to_bcb).entered(); + successors .iter() .copied() - // Skip the chosen branch, since we'll calculate it from the other branches. + // Skip the chosen edge, since we'll calculate its count from this sum. .filter(|&to_bcb| to_bcb != expression_to_bcb) .fold(None, |accum, to_bcb| { let _span = debug_span!("to_bcb", ?accum, ?to_bcb).entered(); - let branch_counter = self.get_or_make_edge_counter_operand(from_bcb, to_bcb); - Some(self.coverage_counters.make_sum_expression(accum, branch_counter)) + let edge_counter = self.get_or_make_edge_counter_operand(from_bcb, to_bcb); + Some(self.coverage_counters.make_sum_expression(accum, edge_counter)) }) - .expect("there must be at least one other branch") + .expect("there must be at least one other out-edge") }; - // For the branch that was chosen to get an expression, create that expression - // by taking the count of the node we're branching from, and subtracting the - // sum of all the other branches. - debug!( - "Making an expression for the selected expression_branch: \ - {expression_to_bcb:?} (expression_branch predecessors: {:?})", - self.bcb_predecessors(expression_to_bcb), - ); + // Now create an expression for the chosen edge, by taking the counter + // for its source node and subtracting the sum of its sibling out-edges. let expression = self.coverage_counters.make_expression( from_bcb_operand, Op::Subtract, - sum_of_all_other_branches, + sum_of_all_other_out_edges, ); + debug!("{expression_to_bcb:?} gets an expression: {expression:?}"); if self.basic_coverage_blocks.bcb_has_multiple_in_edges(expression_to_bcb) { self.coverage_counters.set_bcb_edge_counter(from_bcb, expression_to_bcb, expression); @@ -442,82 +428,79 @@ impl<'a> MakeBcbCounters<'a> { self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind) } - /// Select a branch for the expression, either the recommended `reloop_branch`, or if none was - /// found, select any branch. - fn choose_preferred_expression_branch( + /// Choose one of the out-edges of `from_bcb` to receive an expression + /// instead of a physical counter, and returns that edge's target node. + /// + /// - Precondition: The node must have at least one out-edge without a counter. + /// - Postcondition: The selected edge does not have an edge counter. + fn choose_out_edge_for_expression( &self, traversal: &TraverseCoverageGraphWithLoops<'_>, from_bcb: BasicCoverageBlock, ) -> BasicCoverageBlock { - let good_reloop_branch = self.find_good_reloop_branch(traversal, from_bcb); - if let Some(reloop_target) = good_reloop_branch { - assert!(self.branch_has_no_counter(from_bcb, reloop_target)); + if let Some(reloop_target) = self.find_good_reloop_edge(traversal, from_bcb) { + assert!(self.edge_has_no_counter(from_bcb, reloop_target)); debug!("Selecting reloop target {reloop_target:?} to get an expression"); - reloop_target - } else { - let &branch_without_counter = self - .bcb_successors(from_bcb) - .iter() - .find(|&&to_bcb| self.branch_has_no_counter(from_bcb, to_bcb)) - .expect( - "needs_branch_counters was `true` so there should be at least one \ - branch", - ); - debug!( - "Selecting any branch={:?} that still needs a counter, to get the \ - `Expression` because there was no `reloop_branch`, or it already had a \ - counter", - branch_without_counter - ); - branch_without_counter + return reloop_target; } + + // We couldn't identify a "good" edge, so just choose any edge that + // doesn't already have a counter. + let arbitrary_target = self + .bcb_successors(from_bcb) + .iter() + .copied() + .find(|&to_bcb| self.edge_has_no_counter(from_bcb, to_bcb)) + .expect("precondition: at least one out-edge without a counter"); + debug!(?arbitrary_target, "selecting arbitrary out-edge to get an expression"); + arbitrary_target } - /// Tries to find a branch that leads back to the top of a loop, and that - /// doesn't already have a counter. Such branches are good candidates to + /// Tries to find an edge that leads back to the top of a loop, and that + /// doesn't already have a counter. Such edges are good candidates to /// be given an expression (instead of a physical counter), because they - /// will tend to be executed more times than a loop-exit branch. - fn find_good_reloop_branch( + /// will tend to be executed more times than a loop-exit edge. + fn find_good_reloop_edge( &self, traversal: &TraverseCoverageGraphWithLoops<'_>, from_bcb: BasicCoverageBlock, ) -> Option { - let branch_target_bcbs = self.bcb_successors(from_bcb); + let successors = self.bcb_successors(from_bcb); // Consider each loop on the current traversal context stack, top-down. for reloop_bcbs in traversal.reloop_bcbs_per_loop() { - let mut all_branches_exit_this_loop = true; + let mut all_edges_exit_this_loop = true; - // Try to find a branch that doesn't exit this loop and doesn't + // Try to find an out-edge that doesn't exit this loop and doesn't // already have a counter. - for &branch_target_bcb in branch_target_bcbs { - // A branch is a reloop branch if it dominates any BCB that has - // an edge back to the loop header. (Other branches are exits.) - let is_reloop_branch = reloop_bcbs.iter().any(|&reloop_bcb| { - self.basic_coverage_blocks.dominates(branch_target_bcb, reloop_bcb) + for &target_bcb in successors { + // An edge is a reloop edge if its target dominates any BCB that has + // an edge back to the loop header. (Otherwise it's an exit edge.) + let is_reloop_edge = reloop_bcbs.iter().any(|&reloop_bcb| { + self.basic_coverage_blocks.dominates(target_bcb, reloop_bcb) }); - if is_reloop_branch { - all_branches_exit_this_loop = false; - if self.branch_has_no_counter(from_bcb, branch_target_bcb) { - // We found a good branch to be given an expression. - return Some(branch_target_bcb); + if is_reloop_edge { + all_edges_exit_this_loop = false; + if self.edge_has_no_counter(from_bcb, target_bcb) { + // We found a good out-edge to be given an expression. + return Some(target_bcb); } - // Keep looking for another reloop branch without a counter. + // Keep looking for another reloop edge without a counter. } else { - // This branch exits the loop. + // This edge exits the loop. } } - if !all_branches_exit_this_loop { - // We found one or more reloop branches, but all of them already - // have counters. Let the caller choose one of the exit branches. - debug!("All reloop branches had counters; skip checking the other loops"); + if !all_edges_exit_this_loop { + // We found one or more reloop edges, but all of them already + // have counters. Let the caller choose one of the other edges. + debug!("All reloop edges had counters; skipping the other loops"); return None; } - // All of the branches exit this loop, so keep looking for a good - // reloop branch for one of the outer loops. + // All of the out-edges exit this loop, so keep looking for a good + // reloop edge for one of the outer loops. } None @@ -534,15 +517,15 @@ impl<'a> MakeBcbCounters<'a> { } #[inline] - fn branch_has_no_counter( + fn edge_has_no_counter( &self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, ) -> bool { - self.branch_counter(from_bcb, to_bcb).is_none() + self.edge_counter(from_bcb, to_bcb).is_none() } - fn branch_counter( + fn edge_counter( &self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, From 10cd5e8386a1ed20dc7fe07b481d3965e51e592f Mon Sep 17 00:00:00 2001 From: Zalathar Date: Tue, 10 Sep 2024 20:48:28 +1000 Subject: [PATCH 13/14] coverage: Avoid referring to "operands" in counter creation --- .../rustc_mir_transform/src/coverage/counters.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index f55d6cf8efc71..1c240366afa2e 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -288,7 +288,7 @@ impl<'a> MakeBcbCounters<'a> { ) { // First, ensure that this node has a counter of some kind. // We might also use that counter to compute one of the out-edge counters. - let from_bcb_operand = self.get_or_make_counter_operand(from_bcb); + let node_counter = self.get_or_make_node_counter(from_bcb); let successors = self.basic_coverage_blocks.successors[from_bcb].as_slice(); @@ -324,7 +324,7 @@ impl<'a> MakeBcbCounters<'a> { .filter(|&to_bcb| to_bcb != expression_to_bcb) .fold(None, |accum, to_bcb| { let _span = debug_span!("to_bcb", ?accum, ?to_bcb).entered(); - let edge_counter = self.get_or_make_edge_counter_operand(from_bcb, to_bcb); + let edge_counter = self.get_or_make_edge_counter(from_bcb, to_bcb); Some(self.coverage_counters.make_sum_expression(accum, edge_counter)) }) .expect("there must be at least one other out-edge") @@ -333,7 +333,7 @@ impl<'a> MakeBcbCounters<'a> { // Now create an expression for the chosen edge, by taking the counter // for its source node and subtracting the sum of its sibling out-edges. let expression = self.coverage_counters.make_expression( - from_bcb_operand, + node_counter, Op::Subtract, sum_of_all_other_out_edges, ); @@ -347,7 +347,7 @@ impl<'a> MakeBcbCounters<'a> { } #[instrument(level = "debug", skip(self))] - fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { + fn get_or_make_node_counter(&mut self, bcb: BasicCoverageBlock) -> BcbCounter { // If the BCB already has a counter, return it. if let Some(counter_kind) = self.coverage_counters.bcb_counters[bcb] { debug!("{bcb:?} already has a counter: {counter_kind:?}"); @@ -384,7 +384,7 @@ impl<'a> MakeBcbCounters<'a> { .copied() .fold(None, |accum, from_bcb| { let _span = debug_span!("from_bcb", ?accum, ?from_bcb).entered(); - let edge_counter = self.get_or_make_edge_counter_operand(from_bcb, bcb); + let edge_counter = self.get_or_make_edge_counter(from_bcb, bcb); Some(self.coverage_counters.make_sum_expression(accum, edge_counter)) }) .expect("there must be at least one in-edge") @@ -395,7 +395,7 @@ impl<'a> MakeBcbCounters<'a> { } #[instrument(level = "debug", skip(self))] - fn get_or_make_edge_counter_operand( + fn get_or_make_edge_counter( &mut self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, @@ -404,13 +404,13 @@ impl<'a> MakeBcbCounters<'a> { // a node counter instead, since it will have the same value. if !self.basic_coverage_blocks.bcb_has_multiple_in_edges(to_bcb) { assert_eq!([from_bcb].as_slice(), self.basic_coverage_blocks.predecessors[to_bcb]); - return self.get_or_make_counter_operand(to_bcb); + return self.get_or_make_node_counter(to_bcb); } // If the source BCB has only one successor (assumed to be the given target), an edge // counter is unnecessary. Just get or make a counter for the source BCB. if self.bcb_successors(from_bcb).len() == 1 { - return self.get_or_make_counter_operand(from_bcb); + return self.get_or_make_node_counter(from_bcb); } // If the edge already has a counter, return it. From 86075759cc96748e56d70f6e25bbf319141ea82f Mon Sep 17 00:00:00 2001 From: Ralf Jung Date: Tue, 10 Sep 2024 13:18:20 +0200 Subject: [PATCH 14/14] abi/compatibility test: remove tests inside repr(C) wrappers --- tests/ui/abi/compatibility.rs | 52 ++++++++--------------------------- 1 file changed, 11 insertions(+), 41 deletions(-) diff --git a/tests/ui/abi/compatibility.rs b/tests/ui/abi/compatibility.rs index d37e793d9892d..9600c8dff67b1 100644 --- a/tests/ui/abi/compatibility.rs +++ b/tests/ui/abi/compatibility.rs @@ -189,7 +189,7 @@ mod prelude { #[cfg(not(host))] use prelude::*; -macro_rules! assert_abi_compatible { +macro_rules! test_abi_compatible { ($name:ident, $t1:ty, $t2:ty) => { mod $name { use super::*; @@ -212,16 +212,6 @@ impl Clone for Zst { } } -#[repr(C)] -struct ReprC1(T); -#[repr(C)] -struct ReprC2Int(i32, T); -#[repr(C)] -struct ReprC2Float(f32, T); -#[repr(C)] -struct ReprC4(T, Vec, Zst, T); -#[repr(C)] -struct ReprC4Mixed(T, f32, i32, T); #[repr(C)] enum ReprCEnum { Variant1, @@ -233,23 +223,6 @@ union ReprCUnion { something: ManuallyDrop, } -macro_rules! test_abi_compatible { - ($name:ident, $t1:ty, $t2:ty) => { - mod $name { - use super::*; - assert_abi_compatible!(plain, $t1, $t2); - // We also do some tests with differences in fields of `repr(C)` types. - assert_abi_compatible!(repr_c_1, ReprC1<$t1>, ReprC1<$t2>); - assert_abi_compatible!(repr_c_2_int, ReprC2Int<$t1>, ReprC2Int<$t2>); - assert_abi_compatible!(repr_c_2_float, ReprC2Float<$t1>, ReprC2Float<$t2>); - assert_abi_compatible!(repr_c_4, ReprC4<$t1>, ReprC4<$t2>); - assert_abi_compatible!(repr_c_4mixed, ReprC4Mixed<$t1>, ReprC4Mixed<$t2>); - assert_abi_compatible!(repr_c_enum, ReprCEnum<$t1>, ReprCEnum<$t2>); - assert_abi_compatible!(repr_c_union, ReprCUnion<$t1>, ReprCUnion<$t2>); - } - }; -} - // Compatibility of pointers. test_abi_compatible!(ptr_mut, *const i32, *mut i32); test_abi_compatible!(ptr_pointee, *const i32, *const Vec); @@ -268,7 +241,6 @@ test_abi_compatible!(isize_int, isize, i64); // Compatibility of 1-ZST. test_abi_compatible!(zst_unit, Zst, ()); -#[cfg(not(any(target_arch = "sparc64")))] test_abi_compatible!(zst_array, Zst, [u8; 0]); test_abi_compatible!(nonzero_int, NonZero, i32); @@ -285,13 +257,13 @@ test_abi_compatible!(arc, Arc, *mut i32); // `repr(transparent)` compatibility. #[repr(transparent)] -struct Wrapper1(T); +struct TransparentWrapper1(T); #[repr(transparent)] -struct Wrapper2((), Zst, T); +struct TransparentWrapper2((), Zst, T); #[repr(transparent)] -struct Wrapper3(T, [u8; 0], PhantomData); +struct TransparentWrapper3(T, [u8; 0], PhantomData); #[repr(transparent)] -union WrapperUnion { +union TransparentWrapperUnion { nothing: (), something: ManuallyDrop, } @@ -300,10 +272,10 @@ macro_rules! test_transparent { ($name:ident, $t:ty) => { mod $name { use super::*; - test_abi_compatible!(wrap1, $t, Wrapper1<$t>); - test_abi_compatible!(wrap2, $t, Wrapper2<$t>); - test_abi_compatible!(wrap3, $t, Wrapper3<$t>); - test_abi_compatible!(wrap4, $t, WrapperUnion<$t>); + test_abi_compatible!(wrap1, $t, TransparentWrapper1<$t>); + test_abi_compatible!(wrap2, $t, TransparentWrapper2<$t>); + test_abi_compatible!(wrap3, $t, TransparentWrapper3<$t>); + test_abi_compatible!(wrap4, $t, TransparentWrapperUnion<$t>); } }; } @@ -342,10 +314,8 @@ macro_rules! test_transparent_unsized { ($name:ident, $t:ty) => { mod $name { use super::*; - assert_abi_compatible!(wrap1, $t, Wrapper1<$t>); - assert_abi_compatible!(wrap1_reprc, ReprC1<$t>, ReprC1>); - assert_abi_compatible!(wrap2, $t, Wrapper2<$t>); - assert_abi_compatible!(wrap2_reprc, ReprC1<$t>, ReprC1>); + test_abi_compatible!(wrap1, $t, TransparentWrapper1<$t>); + test_abi_compatible!(wrap2, $t, TransparentWrapper2<$t>); } }; }