diff --git a/crates/nargo_cli/tests/execution_success/array_dynamic/src/main.nr b/crates/nargo_cli/tests/execution_success/array_dynamic/src/main.nr index b27c96b30c3..69e5fdc93db 100644 --- a/crates/nargo_cli/tests/execution_success/array_dynamic/src/main.nr +++ b/crates/nargo_cli/tests/execution_success/array_dynamic/src/main.nr @@ -21,7 +21,7 @@ fn dyn_array(mut x: [u32; 5], y: Field, z: Field) { assert(x[y] == 111); assert(x[z] == 101); x[z] = 0; - assert(x[y] == 111); + assert(x[y] == 111); assert(x[1] == 0); if y as u32 < 10 { x[y] = x[y] - 2; diff --git a/crates/nargo_cli/tests/execution_success/references/src/main.nr b/crates/nargo_cli/tests/execution_success/references/src/main.nr index ae96693c3d2..ec23f2f3a38 100644 --- a/crates/nargo_cli/tests/execution_success/references/src/main.nr +++ b/crates/nargo_cli/tests/execution_success/references/src/main.nr @@ -35,6 +35,12 @@ fn main(mut x: Field) { regression_2054(); regression_2030(); regression_2255(); + + assert(x == 3); + regression_2218_if_inner_if(x, 10); + regression_2218_if_inner_else(20, x); + regression_2218_else(x, 3); + regression_2218_loop(x, 10); } fn add1(x: &mut Field) { @@ -111,3 +117,116 @@ fn regression_2255() { fn regression_2255_helper(mut x: &mut Field) { *x = 1; } + +fn regression_2218(x: Field, y: Field) -> Field { + let q = &mut &mut 0; + let q1 = *q; + let q2 = *q; + + if x != y { + *q1 = 1; + // Make sure that we correct load reference aliases through multiple blocks + if x != 20 { + *q1 = 10; + *q2 = 2; // now we'd expect q1 == q2 == 2 + + assert(*q1 == 2); + } else { + *q2 = 15; + assert(*q1 == 15); + } + } else { + *q2 = 20; + assert(*q1 == 20); + } + // Have to assign value to return it + let value = *q1; + value +} + +fn regression_2218_if_inner_if(x: Field, y: Field) { + let value = regression_2218(x, y); + assert(value == 2); +} + +fn regression_2218_if_inner_else(x: Field, y: Field) { + let value = regression_2218(x, y); + assert(value == 15); +} + +fn regression_2218_else(x: Field, y: Field) { + let value = regression_2218(x, y); + assert(value == 20); +} + +fn regression_2218_loop(x: Field, y: Field) { + let q = &mut &mut 0; + let q1 = *q; + let q2 = *q; + + for _ in 0..1 { + if x != y { + *q1 = 10; + *q2 = 2; // now we'd expect q1 == q2 == 2 + + assert(*q1 == 2); + } else { + *q2 = 20; + assert(*q1 == 20); + } + } + assert(*q1 == 2); + + for _ in 0..1 { + for _ in 0..5 { + if x != y { + *q1 = 1; + // Make sure that we correct load reference aliases through multiple blocks + if x != 20 { + *q1 = 10; + *q2 = 2; // now we'd expect q1 == q2 == 2 + + assert(*q1 == 2); + } + } else { + *q2 = 20; + assert(*q1 == 20); + } + } + if x != y { + *q1 = 1; + for _ in 0..5 { + // Make sure that we correct load reference aliases through multiple blocks + if x != 20 { + *q1 = 10; + *q2 = 2; // now we'd expect q1 == q2 == 2 + + assert(*q1 == 2); + } + } + } else { + *q2 = 20; + assert(*q1 == 20); + } + } + assert(*q1 == 2); + + if x != y { + for _ in 0..5 { + if x != y { + *q1 = 1; + // Make sure that we correct load reference aliases through multiple blocks + if x != 20 { + *q1 = 10; + *q2 = 2; // now we'd expect q1 == q2 == 2 + + assert(*q1 == 2); + } + } + } + } else { + *q2 = 20; + assert(*q1 == 20); + } + assert(*q1 == 2); +} \ No newline at end of file diff --git a/crates/noirc_evaluator/src/ssa.rs b/crates/noirc_evaluator/src/ssa.rs index 10099037dfe..b215429332e 100644 --- a/crates/noirc_evaluator/src/ssa.rs +++ b/crates/noirc_evaluator/src/ssa.rs @@ -49,13 +49,21 @@ pub(crate) fn optimize_into_acir( ssa = ssa .inline_functions() .print(print_ssa_passes, "After Inlining:") + // Run mem2reg with the CFG separated into blocks + .mem2reg() + .print(print_ssa_passes, "After Mem2Reg:") .evaluate_assert_constant()? .unroll_loops()? .print(print_ssa_passes, "After Unrolling:") .simplify_cfg() .print(print_ssa_passes, "After Simplifying:") + // Run mem2reg before flattening to handle any promotion + // of values that can be accessed after loop unrolling + .mem2reg() + .print(print_ssa_passes, "After Mem2Reg:") .flatten_cfg() .print(print_ssa_passes, "After Flattening:") + // Run mem2reg once more with the flattened CFG to catch any remaining loads/stores .mem2reg() .print(print_ssa_passes, "After Mem2Reg:") .fold_constants() diff --git a/crates/noirc_evaluator/src/ssa/ir/cfg.rs b/crates/noirc_evaluator/src/ssa/ir/cfg.rs index 8faa0fa8565..62c08335891 100644 --- a/crates/noirc_evaluator/src/ssa/ir/cfg.rs +++ b/crates/noirc_evaluator/src/ssa/ir/cfg.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeSet, HashMap}; use super::{ basic_block::{BasicBlock, BasicBlockId}, @@ -10,13 +10,14 @@ use super::{ struct CfgNode { /// Set of blocks that containing jumps that target this block. /// The predecessor set has no meaningful order. - pub(crate) predecessors: HashSet, + pub(crate) predecessors: BTreeSet, /// Set of blocks that are the targets of jumps in this block. /// The successors set has no meaningful order. - pub(crate) successors: HashSet, + pub(crate) successors: BTreeSet, } +#[derive(Clone)] /// The Control Flow Graph maintains a mapping of blocks to their predecessors /// and successors where predecessors are basic blocks and successors are /// basic blocks. @@ -31,7 +32,7 @@ impl ControlFlowGraph { // therefore we must ensure that a node exists for the entry block, regardless of whether // it later comes to describe any edges after calling compute. let entry_block = func.entry_block(); - let empty_node = CfgNode { predecessors: HashSet::new(), successors: HashSet::new() }; + let empty_node = CfgNode { predecessors: BTreeSet::new(), successors: BTreeSet::new() }; let data = HashMap::from([(entry_block, empty_node)]); let mut cfg = ControlFlowGraph { data }; diff --git a/crates/noirc_evaluator/src/ssa/ir/function.rs b/crates/noirc_evaluator/src/ssa/ir/function.rs index ab04c1497bd..ab97d418783 100644 --- a/crates/noirc_evaluator/src/ssa/ir/function.rs +++ b/crates/noirc_evaluator/src/ssa/ir/function.rs @@ -1,4 +1,4 @@ -use std::collections::HashSet; +use std::collections::BTreeSet; use iter_extended::vecmap; @@ -107,8 +107,8 @@ impl Function { /// Note that self.dfg.basic_blocks_iter() iterates over all blocks, /// whether reachable or not. This function should be used if you /// want to iterate only reachable blocks. - pub(crate) fn reachable_blocks(&self) -> HashSet { - let mut blocks = HashSet::new(); + pub(crate) fn reachable_blocks(&self) -> BTreeSet { + let mut blocks = BTreeSet::new(); let mut stack = vec![self.entry_block]; while let Some(block) = stack.pop() { diff --git a/crates/noirc_evaluator/src/ssa/opt/mem2reg.rs b/crates/noirc_evaluator/src/ssa/opt/mem2reg.rs index b9e849bb77c..be0ded802b3 100644 --- a/crates/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/crates/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -1,50 +1,86 @@ //! mem2reg implements a pass for promoting values stored in memory to values in registers where //! possible. This is particularly important for converting our memory-based representation of //! mutable variables into values that are easier to manipulate. -use std::collections::{BTreeMap, HashMap, HashSet}; - -use iter_extended::vecmap; +use std::collections::{BTreeMap, HashSet}; use crate::ssa::{ ir::{ basic_block::BasicBlockId, + cfg::ControlFlowGraph, dfg::DataFlowGraph, + dom::DominatorTree, + function::Function, instruction::{Instruction, InstructionId, TerminatorInstruction}, + post_order::PostOrder, value::{Value, ValueId}, }, ssa_gen::Ssa, }; +use super::unrolling::{find_all_loops, Loops}; + impl Ssa { /// Attempts to remove any load instructions that recover values that are already available in - /// scope, and attempts to remove store that are subsequently redundant, as long as they are - /// not stores on memory that will be passed into a function call or returned. + /// scope, and attempts to remove stores that are subsequently redundant. + /// As long as they are not stores on memory used inside of loops pub(crate) fn mem2reg(mut self) -> Ssa { for function in self.functions.values_mut() { let mut all_protected_allocations = HashSet::new(); - let contexts = vecmap(function.reachable_blocks(), |block| { - let mut context = PerBlockContext::new(block); - let allocations_protected_by_block = - context.analyze_allocations_and_eliminate_known_loads(&mut function.dfg); + + let mut context = PerFunctionContext::new(function); + + for block in function.reachable_blocks() { + // Maps Load instruction id -> value to replace the result of the load with + let mut loads_to_substitute_per_block = BTreeMap::new(); + + // Maps Load result id -> value to replace the result of the load with + let mut load_values_to_substitute = BTreeMap::new(); + + let allocations_protected_by_block = context + .analyze_allocations_and_eliminate_known_loads( + &mut function.dfg, + &mut loads_to_substitute_per_block, + &mut load_values_to_substitute, + block, + ); all_protected_allocations.extend(allocations_protected_by_block.into_iter()); - context - }); - // Now that we have a comprehensive list of used allocations across all the - // function's blocks, it is safe to remove any stores that do not touch such - // allocations. - for context in contexts { - context.remove_unused_stores(&mut function.dfg, &all_protected_allocations); } - } + for block in function.reachable_blocks() { + context.remove_unused_stores(&mut function.dfg, &all_protected_allocations, block); + } + } self } } -struct PerBlockContext { - block_id: BasicBlockId, - last_stores: BTreeMap, +struct PerFunctionContext { + last_stores_with_block: BTreeMap<(AllocId, BasicBlockId), ValueId>, + // Maps Load result id -> (value, block_id) + // Used to replace the result of a load with the appropriate block + load_values_to_substitute_per_func: BTreeMap, store_ids: Vec, + cfg: ControlFlowGraph, + post_order: PostOrder, + dom_tree: DominatorTree, + loops: Loops, +} + +impl PerFunctionContext { + fn new(function: &Function) -> Self { + let cfg = ControlFlowGraph::with_function(function); + let post_order = PostOrder::with_function(function); + let dom_tree = DominatorTree::with_cfg_and_post_order(&cfg, &post_order); + PerFunctionContext { + last_stores_with_block: BTreeMap::new(), + load_values_to_substitute_per_func: BTreeMap::new(), + store_ids: Vec::new(), + cfg, + post_order, + dom_tree, + loops: find_all_loops(function), + } + } } /// An AllocId is the ValueId returned from an allocate instruction. E.g. v0 in v0 = allocate. @@ -52,51 +88,57 @@ struct PerBlockContext { /// an allocate instruction. type AllocId = ValueId; -impl PerBlockContext { - fn new(block_id: BasicBlockId) -> Self { - PerBlockContext { block_id, last_stores: BTreeMap::new(), store_ids: Vec::new() } - } - +impl PerFunctionContext { // Attempts to remove load instructions for which the result is already known from previous // store instructions to the same address in the same block. fn analyze_allocations_and_eliminate_known_loads( &mut self, dfg: &mut DataFlowGraph, + loads_to_substitute: &mut BTreeMap, + load_values_to_substitute_per_block: &mut BTreeMap, + block_id: BasicBlockId, ) -> HashSet { let mut protected_allocations = HashSet::new(); - let block = &dfg[self.block_id]; - - // Maps Load instruction id -> value to replace the result of the load with - let mut loads_to_substitute = HashMap::new(); + let block = &dfg[block_id]; - // Maps Load result id -> value to replace the result of the load with - let mut load_values_to_substitute = HashMap::new(); + // Check whether the block has a common successor here to avoid analyzing + // the CFG for every block instruction. + let has_common_successor = self.has_common_successor(block_id); for instruction_id in block.instructions() { match &dfg[*instruction_id] { Instruction::Store { mut address, value } => { - if let Some(value) = load_values_to_substitute.get(&address) { - address = *value; + if has_common_successor { + protected_allocations.insert(address); } + address = self.fetch_load_value_to_substitute(block_id, address); - self.last_stores.insert(address, *value); + self.last_stores_with_block.insert((address, block_id), *value); self.store_ids.push(*instruction_id); + + if has_common_successor { + protected_allocations.insert(address); + } } Instruction::Load { mut address } => { - if let Some(value) = load_values_to_substitute.get(&address) { - address = *value; - } - - if let Some(last_value) = self.last_stores.get(&address) { - let result_value = *dfg - .instruction_results(*instruction_id) - .first() - .expect("ICE: Load instructions should have single result"); - - loads_to_substitute.insert(*instruction_id, *last_value); - load_values_to_substitute.insert(result_value, *last_value); - } else { + address = self.fetch_load_value_to_substitute(block_id, address); + + let found_last_value = self.find_load_to_substitute( + block_id, + address, + dfg, + instruction_id, + loads_to_substitute, + load_values_to_substitute_per_block, + ); + if !found_last_value { + // We want to protect allocations that do not have a load to substitute protected_allocations.insert(address); + // We also want to check for allocations that share a value + // with the one we are protecting. + // This check prevents the pass from removing stores to a value that + // is used by reference aliases in different blocks + protected_allocations.insert(dfg.resolve(address)); } } Instruction::Call { arguments, .. } => { @@ -122,21 +164,179 @@ impl PerBlockContext { } // Substitute load result values - for (result_value, new_value) in load_values_to_substitute { - let result_value = dfg.resolve(result_value); - dfg.set_value_from_id(result_value, new_value); + for (result_value, new_value) in load_values_to_substitute_per_block { + let result_value = dfg.resolve(*result_value); + dfg.set_value_from_id(result_value, *new_value); } // Delete load instructions // Even though we could let DIE handle this, doing it here makes the debug output easier // to read. - dfg[self.block_id] + dfg[block_id] .instructions_mut() .retain(|instruction| !loads_to_substitute.contains_key(instruction)); protected_allocations } + // This method will fetch already saved load values to substitute for a given address. + // The search starts at the block supplied as a parameter. If there is not a load to substitute + // the CFG is analyzed to determine whether a predecessor block has a load value to substitute. + // If there is no load value to substitute the original address is returned. + fn fetch_load_value_to_substitute( + &mut self, + block_id: BasicBlockId, + address: ValueId, + ) -> ValueId { + let mut stack = vec![block_id]; + let mut visited = HashSet::new(); + + while let Some(block) = stack.pop() { + visited.insert(block); + + // We do not want to substitute loads that take place within loops or yet to be simplified branches + // as this pass can occur before loop unrolling and flattening. + // The mem2reg pass should be ran again following all optimization passes as new values + // may be able to be promoted + for l in self.loops.yet_to_unroll.iter() { + // We do not want to substitute loads that take place within loops as this pass + // can occur before loop unrolling + // The pass should be ran again following loop unrolling as new values + if l.blocks.contains(&block) { + return address; + } + } + + // Check whether there is a load value to substitute in the current block. + // Return the value if found. + if let Some((value, load_block_id)) = + self.load_values_to_substitute_per_func.get(&address) + { + if *load_block_id == block { + return *value; + } + } + + // If no load values to substitute have been found in the current block, check the block's predecessors. + if let Some(predecessor) = self.block_has_predecessor(block, &visited) { + stack.push(predecessor); + } + } + address + } + + // This method determines which loads should be substituted and saves them + // to be substituted further in the pass. + // Starting at the block supplied as a parameter, we check whether a store has occurred with the given address. + // If no store has occurred in the supplied block, the CFG is analyzed to determine whether + // a predecessor block has a store at the given address. + fn find_load_to_substitute( + &mut self, + block_id: BasicBlockId, + address: ValueId, + dfg: &DataFlowGraph, + instruction_id: &InstructionId, + loads_to_substitute: &mut BTreeMap, + load_values_to_substitute_per_block: &mut BTreeMap, + ) -> bool { + let mut stack = vec![block_id]; + let mut visited = HashSet::new(); + + while let Some(block) = stack.pop() { + visited.insert(block); + + // We do not want to substitute loads that take place within loops or yet to be simplified branches + // as this pass can occur before loop unrolling and flattening. + // The mem2reg pass should be ran again following all optimization passes as new values + // may be able to be promoted + for l in self.loops.yet_to_unroll.iter() { + // We do not want to substitute loads that take place within loops as this pass + // can occur before loop unrolling + // The pass should be ran again following loop unrolling as new values + if l.blocks.contains(&block) { + return false; + } + } + + // Check whether there has been a store instruction in the current block + // If there has been a store, add a load to be substituted. + if let Some(last_value) = self.last_stores_with_block.get(&(address, block)) { + let result_value = *dfg + .instruction_results(*instruction_id) + .first() + .expect("ICE: Load instructions should have single result"); + + loads_to_substitute.insert(*instruction_id, *last_value); + load_values_to_substitute_per_block.insert(result_value, *last_value); + self.load_values_to_substitute_per_func.insert(result_value, (*last_value, block)); + return true; + } + + // If no load values to substitute have been found in the current block, check the block's predecessors. + if let Some(predecessor) = self.block_has_predecessor(block, &visited) { + stack.push(predecessor); + } + } + false + } + + // If no loads or stores have been found in the current block, check the block's predecessors. + // Only check blocks with one predecessor as otherwise a constant value could be propogated + // through successor blocks with multiple branches that rely on other simplification passes + // such as loop unrolling or flattening of the CFG. + fn block_has_predecessor( + &mut self, + block_id: BasicBlockId, + visited: &HashSet, + ) -> Option { + let mut predecessors = self.cfg.predecessors(block_id); + if predecessors.len() == 1 { + let predecessor = predecessors.next().unwrap(); + if self.dom_tree.is_reachable(predecessor) + && self.dom_tree.dominates(predecessor, block_id) + && !visited.contains(&predecessor) + { + return Some(predecessor); + } + } + None + } + + fn has_common_successor(&mut self, block_id: BasicBlockId) -> bool { + let mut predecessors = self.cfg.predecessors(block_id); + if let Some(predecessor) = predecessors.next() { + let pred_successors = self.find_all_successors(predecessor); + let current_successors: HashSet<_> = self.cfg.successors(block_id).collect(); + return pred_successors.into_iter().any(|b| current_successors.contains(&b)); + } + false + } + + fn find_all_successors(&self, block_id: BasicBlockId) -> HashSet { + let mut stack = vec![]; + let mut visited = HashSet::new(); + + // Fetch initial block successors + let successors = self.cfg.successors(block_id); + for successor in successors { + if !visited.contains(&successor) { + stack.push(successor); + } + } + + // Follow the CFG to fetch the remaining successors + while let Some(block) = stack.pop() { + visited.insert(block); + let successors = self.cfg.successors(block); + for successor in successors { + if !visited.contains(&successor) { + stack.push(successor); + } + } + } + visited + } + /// Checks whether the given value id refers to an allocation. fn value_is_from_allocation(value: ValueId, dfg: &DataFlowGraph) -> bool { match &dfg[value] { @@ -150,9 +350,10 @@ impl PerBlockContext { /// Removes all store instructions identified during analysis that aren't present in the /// provided `protected_allocations` `HashSet`. fn remove_unused_stores( - self, + &self, dfg: &mut DataFlowGraph, protected_allocations: &HashSet, + block_id: BasicBlockId, ) { // Scan for unused stores let mut stores_to_remove = HashSet::new(); @@ -169,7 +370,7 @@ impl PerBlockContext { } // Delete unused stores - dfg[self.block_id] + dfg[block_id] .instructions_mut() .retain(|instruction| !stores_to_remove.contains(instruction)); } @@ -187,7 +388,7 @@ mod tests { basic_block::BasicBlockId, dfg::DataFlowGraph, function::RuntimeType, - instruction::{Instruction, Intrinsic, TerminatorInstruction}, + instruction::{BinaryOp, Instruction, Intrinsic, TerminatorInstruction}, map::Id, types::Type, }, @@ -320,7 +521,7 @@ mod tests { .count() } - // Test that loads across multiple blocks are not removed + // Test that loads across multiple blocks are removed #[test] fn multiple_blocks() { // fn main { @@ -364,24 +565,22 @@ mod tests { // fn main { // b0(): // v0 = allocate - // store v0, Field 5 - // jmp b1(Field 5): // Optimized to constant 5 - // b1(v2: Field): - // v3 = load v0 // kept in program - // store v0, Field 6 - // return v2, v3, Field 6 // Optimized to constant 6 + // jmp b1(Field 5) + // b1(v3: Field): + // return v3, Field 5, Field 6 // Optimized to constants 5 and 6 // } let ssa = ssa.mem2reg(); + let main = ssa.main(); assert_eq!(main.reachable_blocks().len(), 2); - // Only the load from the entry block should be removed + // The loads should be removed assert_eq!(count_loads(main.entry_block(), &main.dfg), 0); - assert_eq!(count_loads(b1, &main.dfg), 1); + assert_eq!(count_loads(b1, &main.dfg), 0); - // All stores should be kept - assert_eq!(count_stores(main.entry_block(), &main.dfg), 1); - assert_eq!(count_stores(b1, &main.dfg), 1); + // All stores should be removed + assert_eq!(count_stores(main.entry_block(), &main.dfg), 0); + assert_eq!(count_stores(b1, &main.dfg), 0); // The jmp to b1 should also be a constant 5 now match main.dfg[main.entry_block()].terminator() { @@ -394,4 +593,95 @@ mod tests { _ => unreachable!(), }; } + + // Test that a load in a predecessor block has been removed if the value + // is later stored in a successor block + #[test] + fn store_with_load_in_predecessor_block() { + // fn main { + // b0(): + // v0 = allocate + // store Field 0 at v0 + // v2 = allocate + // store v0 at v2 + // v3 = load v2 + // v4 = load v2 + // jmp b1() + // b1(): + // store Field 1 at v3 + // store Field 2 at v4 + // v8 = load v3 + // v9 = eq v8, Field 2 + // return + // } + let main_id = Id::test_new(0); + let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir); + + let v0 = builder.insert_allocate(); + + let zero = builder.field_constant(0u128); + builder.insert_store(v0, zero); + + let v2 = builder.insert_allocate(); + builder.insert_store(v2, v0); + + let v3 = builder.insert_load(v2, Type::field()); + let v4 = builder.insert_load(v2, Type::field()); + let b1 = builder.insert_block(); + builder.terminate_with_jmp(b1, vec![]); + + builder.switch_to_block(b1); + + let one = builder.field_constant(1u128); + builder.insert_store(v3, one); + + let two = builder.field_constant(2u128); + builder.insert_store(v4, two); + + let v8 = builder.insert_load(v3, Type::field()); + let _ = builder.insert_binary(v8, BinaryOp::Eq, two); + + builder.terminate_with_return(vec![]); + + let ssa = builder.finish(); + assert_eq!(ssa.main().reachable_blocks().len(), 2); + + // Expected result: + // fn main { + // b0(): + // v0 = allocate + // v2 = allocate + // jmp b1() + // b1(): + // v8 = eq Field 2, Field 2 + // return + // } + let ssa = ssa.mem2reg(); + + let main = ssa.main(); + assert_eq!(main.reachable_blocks().len(), 2); + + // All loads should be removed + assert_eq!(count_loads(main.entry_block(), &main.dfg), 0); + assert_eq!(count_loads(b1, &main.dfg), 0); + + // All stores should be removed + assert_eq!(count_stores(main.entry_block(), &main.dfg), 0); + assert_eq!(count_stores(b1, &main.dfg), 0); + + let b1_instructions = main.dfg[b1].instructions(); + // The first instruction should be a binary operation + match &main.dfg[b1_instructions[0]] { + Instruction::Binary(binary) => { + let lhs = + main.dfg.get_numeric_constant(binary.lhs).expect("Expected constant value"); + let rhs = + main.dfg.get_numeric_constant(binary.rhs).expect("Expected constant value"); + + assert_eq!(lhs, rhs); + assert_eq!(lhs, FieldElement::from(2u128)); + } + _ => unreachable!(), + } + } } diff --git a/crates/noirc_evaluator/src/ssa/opt/unrolling.rs b/crates/noirc_evaluator/src/ssa/opt/unrolling.rs index ac38f102712..def9d2fa20b 100644 --- a/crates/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/crates/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -46,7 +46,7 @@ impl Ssa { } } -struct Loop { +pub(crate) struct Loop { /// The header block of a loop is the block which dominates all the /// other blocks in the loop. header: BasicBlockId, @@ -56,15 +56,15 @@ struct Loop { back_edge_start: BasicBlockId, /// All the blocks contained within the loop, including `header` and `back_edge_start`. - blocks: HashSet, + pub(crate) blocks: HashSet, } -struct Loops { +pub(crate) struct Loops { /// The loops that failed to be unrolled so that we do not try to unroll them again. /// Each loop is identified by its header block id. failed_to_unroll: HashSet, - yet_to_unroll: Vec, + pub(crate) yet_to_unroll: Vec, modified_blocks: HashSet, cfg: ControlFlowGraph, dom_tree: DominatorTree, @@ -72,7 +72,7 @@ struct Loops { /// Find a loop in the program by finding a node that dominates any predecessor node. /// The edge where this happens will be the back-edge of the loop. -fn find_all_loops(function: &Function) -> Loops { +pub(crate) fn find_all_loops(function: &Function) -> Loops { let cfg = ControlFlowGraph::with_function(function); let post_order = PostOrder::with_function(function); let mut dom_tree = DominatorTree::with_cfg_and_post_order(&cfg, &post_order); diff --git a/crates/noirc_frontend/src/graph/mod.rs b/crates/noirc_frontend/src/graph/mod.rs index 7a1daef45fd..90490c47c36 100644 --- a/crates/noirc_frontend/src/graph/mod.rs +++ b/crates/noirc_frontend/src/graph/mod.rs @@ -30,7 +30,7 @@ impl CrateId { pub struct CrateName(SmolStr); impl CrateName { - fn is_valid_name(name: &str) -> bool { + fn is_valid_name(name: &str) -> bool { !name.is_empty() && name.chars().all(|n| !CHARACTER_BLACK_LIST.contains(&n)) } }