Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: PR 6782 w/ PR 6783 #6784

Closed
wants to merge 11 commits into from
117 changes: 116 additions & 1 deletion compiler/noirc_evaluator/src/ssa/opt/die.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,14 @@ use crate::ssa::{
function::Function,
instruction::{BinaryOp, Instruction, InstructionId, Intrinsic},
post_order::PostOrder,
types::NumericType,
types::{NumericType, Type},
value::{Value, ValueId},
},
ssa_gen::Ssa,
};

use super::rc::{pop_rc_for, RcInstruction};

impl Ssa {
/// Performs Dead Instruction Elimination (DIE) to remove any instructions with
/// unused results.
Expand Down Expand Up @@ -104,6 +106,8 @@ impl Context {

let instructions_len = block.instructions().len();

let mut rc_tracker = RcTracker::default();

// Indexes of instructions that might be out of bounds.
// We'll remove those, but before that we'll insert bounds checks for them.
let mut possible_index_out_of_bounds_indexes = Vec::new();
Expand Down Expand Up @@ -131,8 +135,13 @@ impl Context {
});
}
}

rc_tracker.track_inc_rcs_to_remove(*instruction_id, function);
}

self.instructions_to_remove.extend(rc_tracker.get_non_mutated_arrays(&function.dfg));
self.instructions_to_remove.extend(rc_tracker.rc_pairs_to_remove);

// If there are some instructions that might trigger an out of bounds error,
// first add constrain checks. Then run the DIE pass again, which will remove those
// but leave the constrains (any any value needed by those constrains)
Expand Down Expand Up @@ -517,6 +526,112 @@ fn apply_side_effects(
(lhs, rhs)
}

#[derive(Default)]
struct RcTracker {
// We can track IncrementRc instructions per block to determine whether they are useless.
// IncrementRc and DecrementRc instructions are normally side effectual instructions, but we remove
// them if their value is not used anywhere in the function. However, even when their value is used, their existence
// is pointless logic if there is no array set between the increment and the decrement of the reference counter.
// We track per block whether an IncrementRc instruction has a paired DecrementRc instruction
// with the same value but no array set in between.
// If we see an inc/dec RC pair within a block we can safely remove both instructions.
rcs_with_possible_pairs: HashMap<Type, Vec<RcInstruction>>,
rc_pairs_to_remove: HashSet<InstructionId>,
// We also separately track all IncrementRc instructions and all arrays which have been mutably borrowed.
// If an array has not been mutably borrowed we can then safely remove all IncrementRc instructions on that array.
inc_rcs: HashMap<ValueId, HashSet<InstructionId>>,
mutated_array_types: HashSet<Type>,
// The SSA often creates patterns where after simplifications we end up with repeat
// IncrementRc instructions on the same value. We track whether the previous instruction was an IncrementRc,
// and if the current instruction is also an IncrementRc on the same value we remove the current instruction.
// `None` if the previous instruction was anything other than an IncrementRc
previous_inc_rc: Option<ValueId>,
}

impl RcTracker {
fn track_inc_rcs_to_remove(&mut self, instruction_id: InstructionId, function: &Function) {
let instruction = &function.dfg[instruction_id];

if let Instruction::IncrementRc { value } = instruction {
if let Some(previous_value) = self.previous_inc_rc {
if previous_value == *value {
self.rc_pairs_to_remove.insert(instruction_id);
}
}
self.previous_inc_rc = Some(*value);
} else {
self.previous_inc_rc = None;
}

// DIE loops over a block in reverse order, so we insert an RC instruction for possible removal
// when we see a DecrementRc and check whether it was possibly mutated when we see an IncrementRc.
match instruction {
Instruction::IncrementRc { value } => {
if let Some(inc_rc) =
pop_rc_for(*value, function, &mut self.rcs_with_possible_pairs)
{
if !inc_rc.possibly_mutated {
self.rc_pairs_to_remove.insert(inc_rc.id);
self.rc_pairs_to_remove.insert(instruction_id);
}
}

self.inc_rcs.entry(*value).or_default().insert(instruction_id);
}
Instruction::DecrementRc { value } => {
let typ = function.dfg.type_of_value(*value);

// We assume arrays aren't mutated until we find an array_set
let dec_rc =
RcInstruction { id: instruction_id, array: *value, possibly_mutated: false };
self.rcs_with_possible_pairs.entry(typ).or_default().push(dec_rc);
}
Instruction::ArraySet { array, .. } => {
let typ = function.dfg.type_of_value(*array);
if let Some(dec_rcs) = self.rcs_with_possible_pairs.get_mut(&typ) {
for dec_rc in dec_rcs {
dec_rc.possibly_mutated = true;
}
}

self.mutated_array_types.insert(typ);
}
Instruction::Store { value, .. } => {
// We are very conservative and say that any store of an array type means it has the potential to be mutated.
let typ = function.dfg.type_of_value(*value);
if matches!(&typ, Type::Array(..) | Type::Slice(..)) {
self.mutated_array_types.insert(typ);
}
}
Instruction::Call { arguments, .. } => {
for arg in arguments {
let typ = function.dfg.type_of_value(*arg);
if matches!(&typ, Type::Array(..) | Type::Slice(..)) {
self.mutated_array_types.insert(typ);
}
}
}
_ => {}
}
}

fn get_non_mutated_arrays(&self, dfg: &DataFlowGraph) -> HashSet<InstructionId> {
self.inc_rcs
.keys()
.filter_map(|value| {
let typ = dfg.type_of_value(*value);
if !self.mutated_array_types.contains(&typ) {
Some(&self.inc_rcs[value])
} else {
None
}
})
.flatten()
.copied()
.collect()
}
}

#[cfg(test)]
mod test {
use std::sync::Arc;
Expand Down
114 changes: 114 additions & 0 deletions compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
//! We also check that we are not hoisting instructions with side effects.
use acvm::{acir::AcirField, FieldElement};
use fxhash::{FxHashMap as HashMap, FxHashSet as HashSet};
use iter_extended::vecmap;

use crate::ssa::{
ir::{
Expand Down Expand Up @@ -109,6 +110,28 @@ impl<'f> LoopInvariantContext<'f> {

if hoist_invariant {
self.inserter.push_instruction(instruction_id, pre_header);

// We track whether we may mutate MakeArray instructions before we deduplicate
// them but we still need to issue an extra inc_rc in case they're mutated afterward.
if matches!(
self.inserter.function.dfg[instruction_id],
Instruction::MakeArray { .. }
) {
let results =
self.inserter.function.dfg.instruction_results(instruction_id).to_vec();
let results = vecmap(results, |value| self.inserter.resolve(value));
assert_eq!(
results.len(),
1,
"ICE: We expect only a single result from an `Instruction::MakeArray`"
);
let inc_rc = Instruction::IncrementRc { value: results[0] };
let call_stack = self.inserter.function.dfg.get_call_stack(instruction_id);
self.inserter
.function
.dfg
.insert_instruction_and_results(inc_rc, *block, None, call_stack);
}
} else {
self.inserter.push_instruction(instruction_id, *block);
}
Expand Down Expand Up @@ -186,6 +209,7 @@ impl<'f> LoopInvariantContext<'f> {
});

let can_be_deduplicated = instruction.can_be_deduplicated(self.inserter.function, false)
|| matches!(instruction, Instruction::MakeArray { .. })
|| self.can_be_deduplicated_from_upper_bound(&instruction);

is_loop_invariant && can_be_deduplicated
Expand Down Expand Up @@ -555,4 +579,94 @@ mod test {
let ssa = ssa.loop_invariant_code_motion();
assert_normalized_ssa_equals(ssa, expected);
}

#[test]
fn insert_inc_rc_when_moving_make_array() {
// SSA for the following program:
//
// unconstrained fn main(x: u32, y: u32) {
// let mut a1 = [1, 2, 3, 4, 5];
// a1[x] = 64;
// for i in 0 .. 5 {
// let mut a2 = [1, 2, 3, 4, 5];
// a2[y + i] = 128;
// foo(a2);
// }
// foo(a1);
// }
//
// We want to make sure move a loop invariant make_array instruction,
// to account for whether that array has been marked as mutable.
// To do so, we increment the reference counter on the array we are moving.
// In the SSA below, we want to move `v42` out of the loop.
let src = "
brillig(inline) fn main f0 {
b0(v0: u32, v1: u32):
v8 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
v9 = allocate -> &mut [Field; 5]
v11 = array_set v8, index v0, value Field 64
v13 = add v0, u32 1
store v11 at v9
jmp b1(u32 0)
b1(v2: u32):
v16 = lt v2, u32 5
jmpif v16 then: b3, else: b2
b2():
v17 = load v9 -> [Field; 5]
call f1(v17)
return
b3():
v19 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
v20 = allocate -> &mut [Field; 5]
v21 = add v1, v2
v23 = array_set v19, index v21, value Field 128
call f1(v23)
v25 = add v2, u32 1
jmp b1(v25)
}
brillig(inline) fn foo f1 {
b0(v0: [Field; 5]):
return
}
";

let ssa = Ssa::from_str(src).unwrap();

// We expect the `make_array` at the top of `b3` to be replaced with an `inc_rc`
// of the newly hoisted `make_array` at the end of `b0`.
let expected = "
brillig(inline) fn main f0 {
b0(v0: u32, v1: u32):
v8 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
v9 = allocate -> &mut [Field; 5]
v11 = array_set v8, index v0, value Field 64
v13 = add v0, u32 1
store v11 at v9
v14 = make_array [Field 1, Field 2, Field 3, Field 4, Field 5] : [Field; 5]
jmp b1(u32 0)
b1(v2: u32):
v17 = lt v2, u32 5
jmpif v17 then: b3, else: b2
b2():
v18 = load v9 -> [Field; 5]
call f1(v18)
return
b3():
inc_rc v14
v20 = allocate -> &mut [Field; 5]
v21 = add v1, v2
v23 = array_set v14, index v21, value Field 128
call f1(v23)
v25 = add v2, u32 1
jmp b1(v25)
}
brillig(inline) fn foo f1 {
b0(v0: [Field; 5]):
return
}
";

let ssa = ssa.loop_invariant_code_motion();
assert_normalized_ssa_equals(ssa, expected);
}
}
2 changes: 0 additions & 2 deletions test_programs/gates_report_brillig_execution.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@ excluded_dirs=(
"double_verify_nested_proof"
"overlapping_dep_and_mod"
"comptime_println"
# Takes a very long time to execute as large loops do not get simplified.
"regression_4709"
# bit sizes for bigint operation doesn't match up.
"bigint"
# Expected to fail as test asserts on which runtime it is in.
Expand Down
4 changes: 1 addition & 3 deletions tooling/nargo_cli/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@

/// Some tests are explicitly ignored in brillig due to them failing.
/// These should be fixed and removed from this list.
const IGNORED_BRILLIG_TESTS: [&str; 11] = [
// Takes a very long time to execute as large loops do not get simplified.
"regression_4709",
const IGNORED_BRILLIG_TESTS: [&str; 10] = [
// bit sizes for bigint operation doesn't match up.
"bigint",
// ICE due to looking for function which doesn't exist.
Expand Down Expand Up @@ -182,7 +180,7 @@
// We need to isolate test cases in the same group, otherwise they overwrite each other's artifacts.
// On CI we use `cargo nextest`, which runs tests in different processes; for this we use a file lock.
// Locally we might be using `cargo test`, which run tests in the same process; in this case the file lock
// wouldn't work, becuase the process itself has the lock, and it looks like it can have N instances without

Check warning on line 183 in tooling/nargo_cli/build.rs

View workflow job for this annotation

GitHub Actions / Code

Unknown word (becuase)
// any problems; for this reason we also use a `Mutex`.
let mutex_name = format! {"TEST_MUTEX_{}", test_name.to_uppercase()};
write!(
Expand Down
Loading