diff --git a/compiler/noirc_evaluator/src/ssa/ir/cfg.rs b/compiler/noirc_evaluator/src/ssa/ir/cfg.rs index 5253c68c72c..8badcc5fa29 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/cfg.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/cfg.rs @@ -115,7 +115,7 @@ impl ControlFlowGraph { pub(crate) fn successors( &self, basic_block_id: BasicBlockId, - ) -> impl ExactSizeIterator + '_ { + ) -> impl ExactSizeIterator + DoubleEndedIterator + '_ { self.data .get(&basic_block_id) .expect("ICE: Attempted to iterate successors of block not found within cfg.") @@ -123,12 +123,34 @@ impl ControlFlowGraph { .iter() .copied() } + + /// Reverse the control flow graph + #[cfg(test)] + pub(crate) fn reverse(&self) -> Self { + let mut reversed_cfg = ControlFlowGraph::default(); + + for (block_id, node) in &self.data { + // For each block, reverse the edges + // In the reversed CFG, successors becomes predecessors + for &successor in &node.successors { + reversed_cfg.add_edge(successor, *block_id); + } + } + + reversed_cfg + } + + /// Returns the entry blocks for a CFG. This is all nodes without any predecessors. + pub(crate) fn compute_entry_blocks(&self) -> Vec { + self.data.keys().filter(|&&block| self.predecessors(block).len() == 0).cloned().collect() + } } #[cfg(test)] mod tests { use crate::ssa::ir::{ - call_stack::CallStackId, instruction::TerminatorInstruction, map::Id, types::Type, + basic_block::BasicBlockId, call_stack::CallStackId, instruction::TerminatorInstruction, + map::Id, types::Type, }; use super::{super::function::Function, ControlFlowGraph}; @@ -146,8 +168,7 @@ mod tests { ControlFlowGraph::with_function(&func); } - #[test] - fn jumps() { + fn build_test_function() -> (Function, BasicBlockId, BasicBlockId, BasicBlockId) { // Build function of form // fn func { // block0(cond: u1): @@ -181,6 +202,50 @@ mod tests { call_stack: CallStackId::root(), }); + (func, block0_id, block1_id, block2_id) + } + + fn modify_test_function( + func: &mut Function, + block0_id: BasicBlockId, + block1_id: BasicBlockId, + block2_id: BasicBlockId, + ) -> BasicBlockId { + // Modify function to form: + // fn func { + // block0(cond: u1): + // jmpif cond, then: block1, else: ret_block + // block1(): + // jmpif cond, then: block1, else: block2 + // block2(): + // jmp ret_block() + // ret_block(): + // return () + // } + let ret_block_id = func.dfg.make_block(); + func.dfg[ret_block_id].set_terminator(TerminatorInstruction::Return { + return_values: vec![], + call_stack: CallStackId::root(), + }); + func.dfg[block2_id].set_terminator(TerminatorInstruction::Jmp { + destination: ret_block_id, + arguments: vec![], + call_stack: CallStackId::root(), + }); + let cond = func.dfg[block0_id].parameters()[0]; + func.dfg[block0_id].set_terminator(TerminatorInstruction::JmpIf { + condition: cond, + then_destination: block1_id, + else_destination: ret_block_id, + call_stack: CallStackId::root(), + }); + ret_block_id + } + + #[test] + fn jumps() { + let (mut func, block0_id, block1_id, block2_id) = build_test_function(); + let mut cfg = ControlFlowGraph::with_function(&func); #[allow(clippy::needless_collect)] @@ -212,33 +277,7 @@ mod tests { assert!(block1_successors.contains(&block2_id)); } - // Modify function to form: - // fn func { - // block0(cond: u1): - // jmpif cond, then: block1, else: ret_block - // block1(): - // jmpif cond, then: block1, else: block2 - // block2(): - // jmp ret_block() - // ret_block(): - // return () - // } - let ret_block_id = func.dfg.make_block(); - func.dfg[ret_block_id].set_terminator(TerminatorInstruction::Return { - return_values: vec![], - call_stack: CallStackId::root(), - }); - func.dfg[block2_id].set_terminator(TerminatorInstruction::Jmp { - destination: ret_block_id, - arguments: vec![], - call_stack: CallStackId::root(), - }); - func.dfg[block0_id].set_terminator(TerminatorInstruction::JmpIf { - condition: cond, - then_destination: block1_id, - else_destination: ret_block_id, - call_stack: CallStackId::root(), - }); + let ret_block_id = modify_test_function(&mut func, block0_id, block1_id, block2_id); // Recompute new and changed blocks cfg.recompute_block(&func, block0_id); @@ -275,4 +314,86 @@ mod tests { assert!(block2_successors.contains(&ret_block_id)); } } + + #[test] + fn reversed_cfg_jumps() { + let (mut func, block0_id, block1_id, block2_id) = build_test_function(); + + let mut cfg = ControlFlowGraph::with_function(&func); + let reversed_cfg = cfg.reverse(); + + #[allow(clippy::needless_collect)] + { + let block0_predecessors: Vec<_> = reversed_cfg.predecessors(block0_id).collect(); + let block1_predecessors: Vec<_> = reversed_cfg.predecessors(block1_id).collect(); + let block2_predecessors: Vec<_> = reversed_cfg.predecessors(block2_id).collect(); + + let block0_successors: Vec<_> = reversed_cfg.successors(block0_id).collect(); + let block1_successors: Vec<_> = reversed_cfg.successors(block1_id).collect(); + let block2_successors: Vec<_> = reversed_cfg.successors(block2_id).collect(); + + assert_eq!(block0_predecessors.len(), 2); + assert_eq!(block1_predecessors.len(), 2); + assert_eq!(block2_predecessors.len(), 0); + + assert!(block0_predecessors.contains(&block1_id)); + assert!(block0_predecessors.contains(&block2_id)); + assert!(block1_predecessors.contains(&block1_id)); + assert!(block1_predecessors.contains(&block2_id)); + + assert_eq!(block0_successors.len(), 0); + assert_eq!(block1_successors.len(), 2); + assert_eq!(block2_successors.len(), 2); + + assert!(block1_successors.contains(&block0_id)); + assert!(block1_successors.contains(&block1_id)); + assert!(block2_successors.contains(&block0_id)); + assert!(block2_successors.contains(&block1_id)); + } + + let ret_block_id = modify_test_function(&mut func, block0_id, block1_id, block2_id); + + // Recompute new and changed blocks + cfg.recompute_block(&func, block0_id); + cfg.recompute_block(&func, block2_id); + cfg.recompute_block(&func, ret_block_id); + + let reversed_cfg = cfg.reverse(); + + #[allow(clippy::needless_collect)] + { + let block0_predecessors: Vec<_> = reversed_cfg.predecessors(block0_id).collect(); + let block1_predecessors: Vec<_> = reversed_cfg.predecessors(block1_id).collect(); + let block2_predecessors: Vec<_> = reversed_cfg.predecessors(block2_id).collect(); + let ret_block_predecessors: Vec<_> = reversed_cfg.predecessors(ret_block_id).collect(); + + let block0_successors: Vec<_> = reversed_cfg.successors(block0_id).collect(); + let block1_successors: Vec<_> = reversed_cfg.successors(block1_id).collect(); + let block2_successors: Vec<_> = reversed_cfg.successors(block2_id).collect(); + let ret_block_successors: Vec<_> = reversed_cfg.successors(ret_block_id).collect(); + + assert_eq!(block0_predecessors.len(), 2); + assert_eq!(block1_predecessors.len(), 2); + assert_eq!(block2_predecessors.len(), 1); + assert_eq!(ret_block_predecessors.len(), 0); + + assert!(block0_predecessors.contains(&block1_id)); + assert!(block0_predecessors.contains(&ret_block_id)); + assert!(block1_predecessors.contains(&block1_id)); + assert!(block1_predecessors.contains(&block2_id)); + assert!(!block2_predecessors.contains(&block0_id)); + assert!(block2_predecessors.contains(&ret_block_id)); + + assert_eq!(block0_successors.len(), 0); + assert_eq!(block1_successors.len(), 2); + assert_eq!(block2_successors.len(), 1); + assert_eq!(ret_block_successors.len(), 2); + + assert!(block1_successors.contains(&block0_id)); + assert!(block1_successors.contains(&block1_id)); + assert!(block2_successors.contains(&block1_id)); + assert!(ret_block_successors.contains(&block0_id)); + assert!(ret_block_successors.contains(&block2_id)); + } + } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/dom.rs b/compiler/noirc_evaluator/src/ssa/ir/dom.rs index a6f878dafbf..c9e5a95650c 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/dom.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/dom.rs @@ -142,6 +142,9 @@ impl DominatorTree { /// Allocate and compute a dominator tree from a pre-computed control flow graph and /// post-order counterpart. + /// + /// This method should be used for when we want to compute a post-dominator tree. + /// A post-dominator tree just expects the control flow graph to be reversed. pub(crate) fn with_cfg_and_post_order(cfg: &ControlFlowGraph, post_order: &PostOrder) -> Self { let mut dom_tree = DominatorTree { nodes: HashMap::default(), cache: HashMap::default() }; dom_tree.compute_dominator_tree(cfg, post_order); @@ -159,6 +162,18 @@ impl DominatorTree { Self::with_cfg_and_post_order(&cfg, &post_order) } + /// Allocate and compute a post-dominator tree for the given function. + /// + /// This approach computes the reversed control flow graph and post-order internally and then + /// discards them. If either should be retained reuse it is better to instead pre-compute them + /// and build the dominator tree with `DominatorTree::with_cfg_and_post_order`. + #[cfg(test)] + pub(crate) fn with_function_post_dom(func: &Function) -> Self { + let reversed_cfg = ControlFlowGraph::with_function(func).reverse(); + let post_order = PostOrder::with_cfg(&reversed_cfg); + Self::with_cfg_and_post_order(&reversed_cfg, &post_order) + } + /// Build a dominator tree from a control flow graph using Keith D. Cooper's /// "Simple, Fast Dominator Algorithm." fn compute_dominator_tree(&mut self, cfg: &ControlFlowGraph, post_order: &PostOrder) { @@ -403,8 +418,7 @@ mod tests { dt.dominates(b2, b1); } - #[test] - fn backwards_layout() { + fn backwards_layout_setup() -> (Function, BasicBlockId, BasicBlockId, BasicBlockId) { // func { // block0(): // jmp block2() @@ -425,10 +439,16 @@ mod tests { builder.terminate_with_jmp(block1_id, vec![]); let ssa = builder.finish(); - let func = ssa.main(); + let func = ssa.main().clone(); let block0_id = func.entry_block(); - let mut dt = DominatorTree::with_function(func); + (func, block0_id, block1_id, block2_id) + } + + #[test] + fn backwards_layout() { + let (func, block0_id, block1_id, block2_id) = backwards_layout_setup(); + let mut dt = DominatorTree::with_function(&func); // Expected dominance tree: // block0 { @@ -473,6 +493,55 @@ mod tests { assert!(dt.dominates(block2_id, block2_id)); } + #[test] + fn post_dom_backwards_layout() { + let (func, block0_id, block1_id, block2_id) = backwards_layout_setup(); + + let mut post_dom = DominatorTree::with_function_post_dom(&func); + + // Expected post-dominator tree: + // block1 { + // block2 { + // block0 + // } + // } + + assert_eq!(post_dom.immediate_dominator(block1_id), None); + assert_eq!(post_dom.immediate_dominator(block2_id), Some(block1_id)); + assert_eq!(post_dom.immediate_dominator(block0_id), Some(block2_id)); + + assert_eq!(post_dom.reverse_post_order_cmp(block0_id, block0_id), Ordering::Equal); + assert_eq!(post_dom.reverse_post_order_cmp(block0_id, block1_id), Ordering::Greater); + assert_eq!(post_dom.reverse_post_order_cmp(block0_id, block2_id), Ordering::Greater); + + assert_eq!(post_dom.reverse_post_order_cmp(block1_id, block0_id), Ordering::Less); + assert_eq!(post_dom.reverse_post_order_cmp(block1_id, block1_id), Ordering::Equal); + assert_eq!(post_dom.reverse_post_order_cmp(block1_id, block2_id), Ordering::Less); + + assert_eq!(post_dom.reverse_post_order_cmp(block2_id, block0_id), Ordering::Less); + assert_eq!(post_dom.reverse_post_order_cmp(block2_id, block1_id), Ordering::Greater); + assert_eq!(post_dom.reverse_post_order_cmp(block2_id, block2_id), Ordering::Equal); + + // Post-dominance matrix: + // ✓: Row item post-dominates column item + // b0 b1 b2 + // b0 ✓ + // b1 ✓ ✓ ✓ + // b2 ✓ ✓ + + assert!(post_dom.dominates(block0_id, block0_id)); + assert!(!post_dom.dominates(block0_id, block1_id)); + assert!(!post_dom.dominates(block0_id, block2_id)); + + assert!(post_dom.dominates(block1_id, block0_id)); + assert!(post_dom.dominates(block1_id, block1_id)); + assert!(post_dom.dominates(block1_id, block2_id)); + + assert!(post_dom.dominates(block2_id, block0_id)); + assert!(!post_dom.dominates(block2_id, block1_id)); + assert!(post_dom.dominates(block2_id, block2_id)); + } + #[test] fn test_find_map_dominator() { let (dt, b0, b1, b2, _b3) = unreachable_node_setup(); diff --git a/compiler/noirc_evaluator/src/ssa/ir/post_order.rs b/compiler/noirc_evaluator/src/ssa/ir/post_order.rs index 08f195e53d1..183d2ce4b49 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/post_order.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/post_order.rs @@ -7,6 +7,8 @@ use std::collections::HashSet; use crate::ssa::ir::{basic_block::BasicBlockId, function::Function}; +use super::cfg::ControlFlowGraph; + /// Depth-first traversal stack state marker for computing the cfg post-order. enum Visit { First, @@ -25,21 +27,30 @@ impl PostOrder { impl PostOrder { /// Allocate and compute a function's block post-order. pub(crate) fn with_function(func: &Function) -> Self { - PostOrder(Self::compute_post_order(func)) + let cfg = ControlFlowGraph::with_function(func); + Self::with_cfg(&cfg) + } + + /// Allocate and compute a function's block post-order. + pub(crate) fn with_cfg(cfg: &ControlFlowGraph) -> Self { + PostOrder(Self::compute_post_order(cfg)) } pub(crate) fn into_vec(self) -> Vec { self.0 } - // Computes the post-order of the function by doing a depth-first traversal of the + // Computes the post-order of the CFG by doing a depth-first traversal of the // function's entry block's previously unvisited children. Each block is sequenced according // to when the traversal exits it. - fn compute_post_order(func: &Function) -> Vec { - let mut stack = vec![(Visit::First, func.entry_block())]; + fn compute_post_order(cfg: &ControlFlowGraph) -> Vec { + let mut stack = vec![]; let mut visited: HashSet = HashSet::new(); let mut post_order: Vec = Vec::new(); + // Set root blocks + stack.extend(cfg.compute_entry_blocks().into_iter().map(|root| (Visit::First, root))); + while let Some((visit, block_id)) = stack.pop() { match visit { Visit::First => { @@ -50,10 +61,10 @@ impl PostOrder { stack.push((Visit::Last, block_id)); // Stack successors for visiting. Because items are taken from the top of the // stack, we push the item that's due for a visit first to the top. - for successor_id in func.dfg[block_id].successors().rev() { + for successor_id in cfg.successors(block_id).rev() { if !visited.contains(&successor_id) { - // This not visited check would also be cover by the next - // iteration, but checking here two saves an iteration per successor. + // This not visited check would also be covered by the next + // iteration, but checking here too saves an iteration per successor. stack.push((Visit::First, successor_id)); } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/basic_conditional.rs b/compiler/noirc_evaluator/src/ssa/opt/basic_conditional.rs index 669a3dd7783..a534e394b78 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/basic_conditional.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/basic_conditional.rs @@ -433,11 +433,11 @@ mod test { v3 = eq v0, u32 5 jmpif v3 then: b2, else: b1 b1(): - v6 = make_array b"foo" - jmp b3(v6) - b2(): - v10 = make_array b"bar" + v10 = make_array b"foo" jmp b3(v10) + b2(): + v7 = make_array b"bar" + jmp b3(v7) b3(v1: [u8; 3]): return v1 } @@ -487,36 +487,36 @@ mod test { assert_eq!(ssa.main().reachable_blocks().len(), 10); let expected = " - brillig(inline) fn foo f0 { - b0(v0: u32): - v3 = eq v0, u32 5 - v4 = not v3 - jmpif v3 then: b2, else: b1 - b1(): - v6 = lt v0, u32 3 - v7 = truncate v0 to 1 bits, max_bit_size: 32 - v8 = not v6 - v9 = truncate v0 to 2 bits, max_bit_size: 32 - v10 = cast v6 as u32 - v11 = cast v8 as u32 - v12 = unchecked_mul v10, v7 - v13 = unchecked_mul v11, v9 - v14 = unchecked_add v12, v13 - jmp b3(v14) - b2(): - v16 = lt u32 2, v0 - v17 = and v0, u32 2 - v18 = not v16 - v19 = truncate v0 to 3 bits, max_bit_size: 32 - v20 = cast v16 as u32 - v21 = cast v18 as u32 - v22 = unchecked_mul v20, v17 - v23 = unchecked_mul v21, v19 - v24 = unchecked_add v22, v23 - jmp b3(v24) - b3(v1: u32): - return v1 - } + brillig(inline) fn foo f0 { + b0(v0: u32): + v3 = eq v0, u32 5 + v4 = not v3 + jmpif v3 then: b2, else: b1 + b1(): + v16 = lt v0, u32 3 + v17 = truncate v0 to 1 bits, max_bit_size: 32 + v18 = not v16 + v19 = truncate v0 to 2 bits, max_bit_size: 32 + v20 = cast v16 as u32 + v21 = cast v18 as u32 + v22 = unchecked_mul v20, v17 + v23 = unchecked_mul v21, v19 + v24 = unchecked_add v22, v23 + jmp b3(v24) + b2(): + v6 = lt u32 2, v0 + v7 = and v0, u32 2 + v8 = not v6 + v9 = truncate v0 to 3 bits, max_bit_size: 32 + v10 = cast v6 as u32 + v11 = cast v8 as u32 + v12 = unchecked_mul v10, v7 + v13 = unchecked_mul v11, v9 + v14 = unchecked_add v12, v13 + jmp b3(v14) + b3(v1: u32): + return v1 + } "; let ssa = ssa.flatten_basic_conditionals(); diff --git a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index 3bde1d7530f..a66811826c6 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -1603,10 +1603,10 @@ mod test { b2(): jmp b5() b3(): - v4 = sub v0, u32 1 // We can't hoist this because v0 is zero here and it will lead to an underflow + v5 = sub v0, u32 1 // We can't hoist this because v0 is zero here and it will lead to an underflow jmp b5() b4(): - v5 = sub v0, u32 1 + v4 = sub v0, u32 1 jmp b5() b5(): return diff --git a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs index 524805238b9..851918a497c 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs @@ -513,11 +513,11 @@ mod tests { jmpif v4 then: b2, else: b1 b1(): constrain v0 == Field 3 - v7 = call f3(v1) -> u32 - jmp b3(v7) - b2(): - v9 = call f2(v1) -> u32 + v9 = call f3(v1) -> u32 jmp b3(v9) + b2(): + v6 = call f2(v1) -> u32 + jmp b3(v6) b3(v2: u32): return v2 } diff --git a/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs b/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs index 9bbac1a4c0c..eb65fa077ab 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs @@ -475,12 +475,12 @@ mod test { v8 = lt v3, i32 4 jmpif v8 then: b6, else: b5 b5(): - v10 = unchecked_add v2, i32 1 - jmp b1(v10) + v12 = unchecked_add v2, i32 1 + jmp b1(v12) b6(): constrain v4 == i32 6 - v12 = unchecked_add v3, i32 1 - jmp b4(v12) + v11 = unchecked_add v3, i32 1 + jmp b4(v11) } "; @@ -566,16 +566,16 @@ mod test { v7 = lt v2, u32 4 jmpif v7 then: b3, else: b2 b2(): - v8 = load v5 -> [u32; 5] - v10 = array_get v8, index u32 2 -> u32 - constrain v10 == u32 3 + v12 = load v5 -> [u32; 5] + v14 = array_get v12, index u32 2 -> u32 + constrain v14 == u32 3 return b3(): - v12 = load v5 -> [u32; 5] - v13 = array_set v12, index v0, value v1 - store v13 at v5 - v15 = unchecked_add v2, u32 1 - jmp b1(v15) + v8 = load v5 -> [u32; 5] + v9 = array_set v8, index v0, value v1 + store v9 at v5 + v11 = unchecked_add v2, u32 1 + jmp b1(v11) } "; @@ -670,23 +670,23 @@ mod test { v12 = lt v3, u32 4 jmpif v12 then: b6, else: b5 b5(): - v14 = unchecked_add v2, u32 1 - jmp b1(v14) + v19 = unchecked_add v2, u32 1 + jmp b1(v19) b6(): - v15 = array_get v6, index v3 -> u32 - v16 = eq v15, v0 + v13 = array_get v6, index v3 -> u32 + v14 = eq v13, v0 jmp b7(u32 0) b7(v4: u32): - v17 = lt v4, u32 4 - jmpif v17 then: b9, else: b8 + v15 = lt v4, u32 4 + jmpif v15 then: b9, else: b8 b8(): v18 = unchecked_add v3, u32 1 jmp b4(v18) b9(): constrain v10 == v0 - constrain v15 == v0 - v19 = unchecked_add v4, u32 1 - jmp b7(v19) + constrain v13 == v0 + v17 = unchecked_add v4, u32 1 + jmp b7(v17) } "; @@ -762,17 +762,17 @@ mod test { v17 = lt v2, u32 5 jmpif v17 then: b3, else: b2 b2(): - v18 = load v9 -> [Field; 5] - call f1(v18) + v24 = load v9 -> [Field; 5] + call f1(v24) return b3(): inc_rc v14 - v20 = allocate -> &mut [Field; 5] - v21 = add v1, v2 - v23 = array_set v14, index v21, value Field 128 - call f1(v23) - v24 = unchecked_add v2, u32 1 - jmp b1(v24) + v18 = allocate -> &mut [Field; 5] + v19 = add v1, v2 + v21 = array_set v14, index v19, value Field 128 + call f1(v21) + v23 = unchecked_add v2, u32 1 + jmp b1(v23) } brillig(inline) fn foo f1 { b0(v0: [Field; 5]): @@ -919,13 +919,13 @@ mod test { v5 = lt v1, i32 4 jmpif v5 then: b6, else: b5 b5(): - v7 = unchecked_add v0, i32 1 - jmp b1(v7) + v11 = unchecked_add v0, i32 1 + jmp b1(v11) b6(): - v9 = div i32 10, v0 - constrain v9 == i32 6 - v11 = unchecked_add v1, i32 1 - jmp b4(v11) + v7 = div i32 10, v0 + constrain v7 == i32 6 + v10 = unchecked_add v1, i32 1 + jmp b4(v10) } "; @@ -983,12 +983,12 @@ mod test { v8 = lt v1, i32 4 jmpif v8 then: b6, else: b5 b5(): - v9 = unchecked_add v0, i32 1 - jmp b1(v9) + v11 = unchecked_add v0, i32 1 + jmp b1(v11) b6(): constrain v6 == i32 6 - v11 = unchecked_add v1, i32 1 - jmp b4(v11) + v10 = unchecked_add v1, i32 1 + jmp b4(v10) } "; diff --git a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs index 9b58a0de329..9d4dc99d392 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/mem2reg.rs @@ -1125,8 +1125,13 @@ mod tests { jmp b1(Field 0) b1(v0: Field): v4 = eq v0, Field 0 - jmpif v4 then: b3, else: b2 + jmpif v4 then: b2, else: b3 b2(): + v11 = load v3 -> &mut Field + store Field 2 at v11 + v13 = add v0, Field 1 + jmp b1(v13) + b3(): v5 = load v1 -> Field v7 = eq v5, Field 2 constrain v5 == Field 2 @@ -1135,11 +1140,6 @@ mod tests { v10 = eq v9, Field 2 constrain v9 == Field 2 return - b3(): - v11 = load v3 -> &mut Field - store Field 2 at v11 - v13 = add v0, Field 1 - jmp b1(v13) } "; @@ -1163,21 +1163,21 @@ mod tests { v4 = eq v0, Field 0 jmpif v4 then: b3, else: b2 b2(): - v5 = load v1 -> Field - v7 = eq v5, Field 2 - constrain v5 == Field 2 - v8 = load v3 -> &mut Field + v9 = load v1 -> Field + v10 = eq v9, Field 2 + constrain v9 == Field 2 + v11 = load v3 -> &mut Field call f1(v3) - v10 = load v3 -> &mut Field - v11 = load v10 -> Field - v12 = eq v11, Field 2 - constrain v11 == Field 2 + v13 = load v3 -> &mut Field + v14 = load v13 -> Field + v15 = eq v14, Field 2 + constrain v14 == Field 2 return b3(): - v13 = load v3 -> &mut Field - store Field 2 at v13 - v15 = add v0, Field 1 - jmp b1(v15) + v5 = load v3 -> &mut Field + store Field 2 at v5 + v8 = add v0, Field 1 + jmp b1(v8) } acir(inline) fn foo f1 { b0(v0: &mut Field): @@ -1201,13 +1201,13 @@ mod tests { b0(v0: u1): jmpif v0 then: b2, else: b1 b1(): - v4 = allocate -> &mut Field - store Field 1 at v4 - jmp b3(v4, v4, v4) - b2(): v6 = allocate -> &mut Field - store Field 0 at v6 + store Field 1 at v6 jmp b3(v6, v6, v6) + b2(): + v4 = allocate -> &mut Field + store Field 0 at v4 + jmp b3(v4, v4, v4) b3(v1: &mut Field, v2: &mut Field, v3: &mut Field): v8 = load v1 -> Field store Field 2 at v2 diff --git a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs index b248f6734a9..d38f7c83855 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/normalize_value_ids.rs @@ -76,13 +76,14 @@ impl Context { new_function.dfg.make_global(value.get_type().into_owned()); } - let mut reachable_blocks = PostOrder::with_function(old_function).into_vec(); - reachable_blocks.reverse(); - + let reachable_blocks = old_function.reachable_blocks().into_iter().collect::>(); self.new_ids.populate_blocks(&reachable_blocks, old_function, new_function); + let mut rpo = PostOrder::with_function(old_function).into_vec(); + rpo.reverse(); + // Map each parameter, instruction, and terminator - for old_block_id in reachable_blocks { + for old_block_id in rpo { let new_block_id = self.new_ids.blocks[&old_block_id]; let old_block = &mut old_function.dfg[old_block_id]; diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 43afb9fa41a..5b7e34c8e0d 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -1086,30 +1086,30 @@ mod tests { let ssa = Ssa::from_str(src).unwrap(); let expected = " - acir(inline) fn main f0 { - b0(): - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - jmp b1() - b1(): - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - jmp b2() - b2(): - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - constrain u1 0 == Field 1 - jmp b3() - b3(): - jmp b4() - b4(): - return Field 0 - } + acir(inline) fn main f0 { + b0(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b2() + b1(): + return Field 0 + b2(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b3() + b3(): + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + constrain u1 0 == Field 1 + jmp b4() + b4(): + jmp b1() + } "; // The final block count is not 1 because unrolling creates some unnecessary jmps. @@ -1354,24 +1354,24 @@ mod tests { v7 = eq v0, u32 2 jmpif v7 then: b7, else: b3 b3(): - v9 = eq v0, u32 5 - jmpif v9 then: b5, else: b4 + v11 = eq v0, u32 5 + jmpif v11 then: b5, else: b4 b4(): - v10 = load v1 -> Field - v12 = add v10, Field 1 - store v12 at v1 - v14 = add v0, u32 1 - jmp b1(v14) + v15 = load v1 -> Field + v17 = add v15, Field 1 + store v17 at v1 + v18 = add v0, u32 1 + jmp b1(v18) b5(): jmp b6() b6(): - v15 = load v1 -> Field - v17 = eq v15, Field 4 - constrain v15 == Field 4 + v12 = load v1 -> Field + v14 = eq v12, Field 4 + constrain v12 == Field 4 return b7(): - v18 = add v0, u32 1 - jmp b1(v18) + v9 = add v0, u32 1 + jmp b1(v9) } "; let ssa = Ssa::from_str(src).unwrap(); diff --git a/compiler/noirc_evaluator/src/ssa/parser/tests.rs b/compiler/noirc_evaluator/src/ssa/parser/tests.rs index a865a31a060..ee41fd90485 100644 --- a/compiler/noirc_evaluator/src/ssa/parser/tests.rs +++ b/compiler/noirc_evaluator/src/ssa/parser/tests.rs @@ -139,6 +139,18 @@ fn test_multiple_blocks_and_jmp() { #[test] fn test_jmpif() { + let src = " + acir(inline) fn main f0 { + b0(v0: Field): + jmpif v0 then: b1, else: b2 + b1(): + return + b2(): + return + } + "; + assert_ssa_roundtrip(src); + let src = " acir(inline) fn main f0 { b0(v0: Field):