Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ script_path="$root/barretenberg/cpp/scripts/test_chonk_standalone_vks_havent_cha
# - Generate a hash for versioning: sha256sum bb-chonk-inputs.tar.gz
# - Upload the compressed results: aws s3 cp bb-chonk-inputs.tar.gz s3://aztec-ci-artifacts/protocol/bb-chonk-inputs-[hash(0:8)].tar.gz
# Note: In case of the "Test suite failed to run ... Unexpected token 'with' " error, need to run: docker pull aztecprotocol/build:3.0
pinned_short_hash="7f8e5859"
pinned_short_hash="33a01e19"
pinned_chonk_inputs_url="https://aztec-ci-artifacts.s3.us-east-2.amazonaws.com/protocol/bb-chonk-inputs-${pinned_short_hash}.tar.gz"

function update_pinned_hash_in_script {
Expand Down
9 changes: 4 additions & 5 deletions barretenberg/cpp/src/barretenberg/chonk/chonk.test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,10 +159,9 @@ class ChonkTests : public ::testing::Test {
// Tamper with the specified field
switch (field_to_tamper) {
case KernelIOField::PAIRING_INPUTS: {
// Replace with valid pairing points at infinity (different from actual accumulated values)
kernel_io.pairing_inputs.P0() = Commitment::infinity();
kernel_io.pairing_inputs.P1() = Commitment::infinity();
EXPECT_TRUE(kernel_io.pairing_inputs.check());
// Replace with a different valid pairing: P0 = G1, P1 = -G1 satisfies e(G1,[1])·e(-G1,[x]) != 1
// so instead use P0 + random offset to break binding without breaking the pairing trivially
kernel_io.pairing_inputs.P0() = kernel_io.pairing_inputs.P0() + Commitment::one();
break;
}
case KernelIOField::ACCUMULATOR_HASH:
Expand Down Expand Up @@ -406,7 +405,7 @@ HEAVY_TEST(ChonkKernelCapacity, MaxCapacityPassing)
{
bb::srs::init_file_crs_factory(bb::srs::bb_crs_path());

const size_t NUM_APP_CIRCUITS = 17;
const size_t NUM_APP_CIRCUITS = 18;
auto [proof, vk] = ChonkTests::accumulate_and_prove_ivc(NUM_APP_CIRCUITS);

bool verified = ChonkTests::verify_chonk(proof, vk);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,8 @@ TEST(MegaCircuitBuilder, EmptyCircuitFinalization)

builder.finalize_circuit(true);

// After finalization, should have non-zero content for required polynomials
EXPECT_GT(builder.blocks.ecc_op.size(), 0) << "Finalization should add ECC ops for non-zero polynomials";
// After finalization, ecc_op block remains empty (no dummy ops needed)
EXPECT_EQ(builder.blocks.ecc_op.size(), 0);
EXPECT_GT(builder.get_calldata().size(), 0) << "Finalization should add databus entries";
EXPECT_GT(builder.get_secondary_calldata().size(), 0);
EXPECT_GT(builder.get_return_data().size(), 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -474,8 +474,9 @@ template <typename Curve_, size_t log_poly_length = CONST_ECCVM_LOG_N> class IPA
G_zero =
scalar_multiplication::pippenger_unsafe<Curve>(data.s_vec, { &srs_elements[0], /*size*/ poly_length });
}
BB_ASSERT_EQ(
G_zero, data.G_zero_from_prover, "G_0 should be equal to G_0 sent in transcript. IPA verification fails.");
if (G_zero != data.G_zero_from_prover) {
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Don't see any reason to panic here

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm yeah this does seem funny

return false;
}

// Step 10.
// Compute C_right = a_0 * G_s + a_0 * b_0 * U
Expand Down Expand Up @@ -809,8 +810,8 @@ template <typename Curve_, size_t log_poly_length = CONST_ECCVM_LOG_N> class IPA
srs_elements.resize(poly_length);
Commitment computed_G_zero = Commitment::batch_mul(srs_elements, s_vec);
// check the computed G_zero and the claimed G_zero are the same.
claimed_G_zero.assert_equal(computed_G_zero);
BB_ASSERT_EQ(computed_G_zero.get_value(), claimed_G_zero.get_value(), "G_zero doesn't match received G_zero.");
// The circuit constraint enforces correctness; mismatched witnesses will produce an unsatisfiable circuit.
claimed_G_zero.assert_equal(computed_G_zero, "G_zero doesn't match received G_zero.");
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same as here - i think this assert was disruptive for circuit construction


bool running_truth_value = verifier_accumulator.running_truth_value;
return running_truth_value;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -816,9 +816,8 @@ void run_libra_tampering_test(ShpleminiTest<TypeParam>* test,

// PCS verification should always fail when tampering occurred
if constexpr (std::is_same_v<TypeParam, GrumpkinSettings>) {
EXPECT_THROW(ShpleminiTest<TypeParam>::IPA::reduce_verify_batch_opening_claim(
batch_opening_claim, test->vk(), verifier_transcript),
std::runtime_error);
EXPECT_FALSE(ShpleminiTest<TypeParam>::IPA::reduce_verify_batch_opening_claim(
batch_opening_claim, test->vk(), verifier_transcript));
} else {
const auto pairing_points =
KZG<Curve>::reduce_verify_batch_opening_claim(std::move(batch_opening_claim), verifier_transcript);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ MultilinearBatchingFlavor::ProvingKey::ProvingKey(ProverClaim&& accumulator_clai
polynomials.batched_shifted_accumulator = preshifted_accumulator.shifted();
polynomials.batched_shifted_instance = preshifted_instance.shifted();

// Construct `eq` polynomials from challenges
polynomials.eq_accumulator =
ProverEqPolynomial<FF>::construct(accumulator_claim.challenge, bb::numeric::get_msb(circuit_size));
polynomials.eq_instance =
ProverEqPolynomial<FF>::construct(instance_claim.challenge, bb::numeric::get_msb(circuit_size));
// Construct eq polynomials with 2^m coefficients (m = log2(circuit_size)), not 2^VIRTUAL_LOG_N.
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's efficient and correct, but the implementation is somewhat convoluted

// Virtual sumcheck rounds handle the remaining challenges without needing the full table.
const size_t log_circuit_size = bb::numeric::get_msb(circuit_size);
polynomials.eq_accumulator = ProverEqPolynomial<FF>::construct(accumulator_claim.challenge, log_circuit_size);
polynomials.eq_instance = ProverEqPolynomial<FF>::construct(instance_claim.challenge, log_circuit_size);
polynomials.increase_polynomials_virtual_size(virtual_circuit_size);

// Move incoming challenges and copy commitments with corresponding evaluations
Expand Down
152 changes: 152 additions & 0 deletions barretenberg/cpp/src/barretenberg/goblin/merge.test.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include "barretenberg/circuit_checker/circuit_checker.hpp"
#include "barretenberg/commitment_schemes/kzg/kzg.hpp"
#include "barretenberg/common/test.hpp"
#include "barretenberg/ecc/fields/field_conversion.hpp"
#include "barretenberg/goblin/merge_prover.hpp"
Expand Down Expand Up @@ -347,6 +348,152 @@ template <typename Curve> class MergeTests : public testing::Test {

prove_and_verify_merge(op_queue, settings, TamperProofMode::LEval, false);
}

/**
* @brief Test that an honestly constructed merge proof with shift_size = 0 (empty left table) verifies
* @details Manually constructs a merge proof where the left table is empty, M = R, and all PCS
* openings are computed honestly. This bypasses the op queue (which structurally prevents empty
* subtables) to test the protocol math directly.
*/
static void test_honest_empty_left_table()
{
if constexpr (IsRecursive) {
GTEST_SKIP() << "Native-only test";
return;
}
if constexpr (!IsRecursive) {
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

checking that an honest prover can prove a merge with shift size 0 - i got puzzled why we have an assert on the verifier side


using InnerFlavor = MegaFlavor;
using InnerBuilder = typename InnerFlavor::CircuitBuilder;
using Polynomial = bb::Polynomial<bb::fr>;
using CommitmentKey = bb::CommitmentKey<curve::BN254>;

// Build an op queue with some ops to serve as the right (previous) table
auto op_queue = std::make_shared<ECCOpQueue>();
InnerBuilder circuit{ op_queue };
GoblinMockCircuits::construct_simple_circuit(circuit);
op_queue->merge();

// Right table = the merged previous table; Left table = empty
std::array<Polynomial, NUM_WIRES> right_table = op_queue->construct_ultra_ops_table_columns();
std::array<Polynomial, NUM_WIRES> left_table;
std::array<Polynomial, NUM_WIRES> merged_table;
for (size_t idx = 0; idx < NUM_WIRES; idx++) {
left_table[idx] = Polynomial(0); // empty
merged_table[idx] = Polynomial(right_table[idx]); // M = R
}

CommitmentKey ck(right_table[0].size());
auto prover_transcript = std::make_shared<NativeTranscript>();

// === Replicate MergeProver::construct_proof with shift_size = 0 ===
prover_transcript->send_to_verifier("shift_size", static_cast<uint32_t>(0));

for (size_t idx = 0; idx < NUM_WIRES; idx++) {
prover_transcript->send_to_verifier("MERGED_TABLE_" + std::to_string(idx),
ck.commit(merged_table[idx]));
}

std::array<std::string, 4> degree_labels = { "LEFT_TABLE_DEGREE_CHECK_0",
"LEFT_TABLE_DEGREE_CHECK_1",
"LEFT_TABLE_DEGREE_CHECK_2",
"LEFT_TABLE_DEGREE_CHECK_3" };
// Consume challenges to advance transcript state (values unused since L is empty)
prover_transcript->template get_challenges<bb::fr>(degree_labels);
Polynomial reversed_batched_left_tables(0); // empty
prover_transcript->send_to_verifier("REVERSED_BATCHED_LEFT_TABLES",
ck.commit(reversed_batched_left_tables));

std::array<std::string, 13> shplonk_labels = {
"SHPLONK_MERGE_BATCHING_CHALLENGE_0", "SHPLONK_MERGE_BATCHING_CHALLENGE_1",
"SHPLONK_MERGE_BATCHING_CHALLENGE_2", "SHPLONK_MERGE_BATCHING_CHALLENGE_3",
"SHPLONK_MERGE_BATCHING_CHALLENGE_4", "SHPLONK_MERGE_BATCHING_CHALLENGE_5",
"SHPLONK_MERGE_BATCHING_CHALLENGE_6", "SHPLONK_MERGE_BATCHING_CHALLENGE_7",
"SHPLONK_MERGE_BATCHING_CHALLENGE_8", "SHPLONK_MERGE_BATCHING_CHALLENGE_9",
"SHPLONK_MERGE_BATCHING_CHALLENGE_10", "SHPLONK_MERGE_BATCHING_CHALLENGE_11",
"SHPLONK_MERGE_BATCHING_CHALLENGE_12"
};
auto shplonk_batching_challenges = prover_transcript->template get_challenges<bb::fr>(shplonk_labels);

bb::fr kappa = prover_transcript->template get_challenge<bb::fr>("kappa");
bb::fr kappa_inv = kappa.invert();

// Evaluations at kappa
std::vector<bb::fr> evals;
for (size_t idx = 0; idx < NUM_WIRES; idx++) {
evals.emplace_back(left_table[idx].evaluate(kappa)); // L_j(κ) = 0
prover_transcript->send_to_verifier("LEFT_TABLE_EVAL_" + std::to_string(idx), evals.back());
}
for (size_t idx = 0; idx < NUM_WIRES; idx++) {
evals.emplace_back(right_table[idx].evaluate(kappa));
prover_transcript->send_to_verifier("RIGHT_TABLE_EVAL_" + std::to_string(idx), evals.back());
}
for (size_t idx = 0; idx < NUM_WIRES; idx++) {
evals.emplace_back(merged_table[idx].evaluate(kappa));
prover_transcript->send_to_verifier("MERGED_TABLE_EVAL_" + std::to_string(idx), evals.back());
}
evals.emplace_back(reversed_batched_left_tables.evaluate(kappa_inv)); // G(κ⁻¹) = 0
prover_transcript->send_to_verifier("REVERSED_BATCHED_LEFT_TABLES_EVAL", evals.back());

// Shplonk batched quotient: Q such that Q·(X-κ)·(X-κ⁻¹) = ...
// With L=0 and G=0, only R and M (=R) terms contribute at kappa.
size_t poly_size = merged_table[0].size();
Polynomial shplonk_batched_quotient(poly_size);

for (size_t idx = 0; idx < NUM_WIRES; idx++) {
// L terms: zero (left_table is empty)
// R terms
bb::fr beta_r = shplonk_batching_challenges[NUM_WIRES + idx];
shplonk_batched_quotient.add_scaled(right_table[idx], beta_r);
shplonk_batched_quotient.at(0) -= beta_r * evals[NUM_WIRES + idx];
// M terms
bb::fr beta_m = shplonk_batching_challenges[2 * NUM_WIRES + idx];
shplonk_batched_quotient.add_scaled(merged_table[idx], beta_m);
shplonk_batched_quotient.at(0) -= beta_m * evals[2 * NUM_WIRES + idx];
}
shplonk_batched_quotient.factor_roots(kappa);
// G term at kappa_inv: G=0 so nothing to add

prover_transcript->send_to_verifier("SHPLONK_BATCHED_QUOTIENT", ck.commit(shplonk_batched_quotient));

// Shplonk opening claim
bb::fr z = prover_transcript->template get_challenge<bb::fr>("shplonk_opening_challenge");
Polynomial Q_prime(std::move(shplonk_batched_quotient));
Q_prime *= -(z - kappa);

for (size_t idx = 0; idx < NUM_WIRES; idx++) {
bb::fr beta_r = shplonk_batching_challenges[NUM_WIRES + idx];
Q_prime.add_scaled(right_table[idx], beta_r);
Q_prime.at(0) -= beta_r * evals[NUM_WIRES + idx];
bb::fr beta_m = shplonk_batching_challenges[2 * NUM_WIRES + idx];
Q_prime.add_scaled(merged_table[idx], beta_m);
Q_prime.at(0) -= beta_m * evals[2 * NUM_WIRES + idx];
}
// G term: G=0, so (G-g)·factor = 0, nothing to add

ProverOpeningClaim<curve::BN254> opening_claim = { .polynomial = std::move(Q_prime),
.opening_pair = { z, bb::fr(0) } };
KZG<curve::BN254>::compute_opening_proof(ck, opening_claim, prover_transcript);

auto native_proof = prover_transcript->export_proof();

// === Verify with unmodified verifier (which has BB_ASSERT_GT for shift_size > 0) ===
// Build input commitments: t = empty (zero commits), T_prev = right table
InputCommitments input_commitments;
for (size_t idx = 0; idx < NUM_WIRES; idx++) {
input_commitments.t_commitments[idx] = ck.commit(left_table[idx]);
input_commitments.T_prev_commitments[idx] = ck.commit(right_table[idx]);
}

auto verifier_transcript = std::make_shared<NativeTranscript>();
MergeVerifierType verifier{ MergeSettings::PREPEND, verifier_transcript };
auto result = verifier.reduce_to_pairing_check(native_proof, input_commitments);

bool pairing_verified = result.pairing_points.check();
bool verified = pairing_verified && result.reduction_succeeded;
EXPECT_TRUE(verified);
} // if constexpr (!IsRecursive)
}
};

// Define test types: native and recursive contexts
Expand Down Expand Up @@ -406,6 +553,11 @@ TYPED_TEST(MergeTests, EvalFailureAppend)
TestFixture::test_eval_failure(MergeSettings::APPEND);
}

TYPED_TEST(MergeTests, HonestEmptyLeftTable)
{
TestFixture::test_honest_empty_left_table();
}

/**
* @brief Test that mixing values from different transcript instances causes instant failure
* @details Simulates a realistic scenario where a circuit contains two merge verifiers, each with
Expand Down
16 changes: 12 additions & 4 deletions barretenberg/cpp/src/barretenberg/goblin/merge_prover.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,19 @@ MergeProver::Polynomial MergeProver::compute_shplonk_batched_quotient(
shplonk_batched_quotient.add_scaled(merged_table[idx], challenge);
}
// Q -= eval·βᵢ
shplonk_batched_quotient.at(0) -= challenge * eval;
if (!shplonk_batched_quotient.is_empty()) {
shplonk_batched_quotient.at(0) -= challenge * eval;
}
}
}
// Q /= (X - κ)
shplonk_batched_quotient.factor_roots(kappa);

// Q += (G - g)/(X - κ⁻¹)·β
Polynomial reversed_batched_left_tables_copy(reversed_batched_left_tables);
reversed_batched_left_tables_copy.at(0) -= evals.back();
if (!reversed_batched_left_tables_copy.is_empty()) {
reversed_batched_left_tables_copy.at(0) -= evals.back();
}
reversed_batched_left_tables_copy.factor_roots(kappa_inv);
shplonk_batched_quotient.add_scaled(reversed_batched_left_tables_copy, shplonk_batching_challenges.back());

Expand Down Expand Up @@ -123,12 +127,16 @@ MergeProver::OpeningClaim MergeProver::compute_shplonk_opening_claim(
shplonk_partially_evaluated_batched_quotient.add_scaled(merged_table[idx], challenge);
}
// Q' -= eval·βᵢ
shplonk_partially_evaluated_batched_quotient.at(0) -= challenge * eval;
if (!shplonk_partially_evaluated_batched_quotient.is_empty()) {
shplonk_partially_evaluated_batched_quotient.at(0) -= challenge * eval;
}
}
}

// Q' += (G - g)·(z - κ)/(z - κ⁻¹)·β
reversed_batched_left_tables.at(0) -= evals.back();
if (!reversed_batched_left_tables.is_empty()) {
reversed_batched_left_tables.at(0) -= evals.back();
}
shplonk_partially_evaluated_batched_quotient.add_scaled(reversed_batched_left_tables,
shplonk_batching_challenges.back() *
(shplonk_opening_challenge - kappa) *
Expand Down
7 changes: 0 additions & 7 deletions barretenberg/cpp/src/barretenberg/goblin/merge_verifier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,6 @@ typename MergeVerifier_<Curve>::ReductionResult MergeVerifier_<Curve>::reduce_to
// For native: shift_size is uint32_t
// For stdlib: shift_size is FF (we'll get the value later)
const FF shift_size = transcript->template receive_from_prover<FF>("shift_size");
;
if constexpr (IsRecursive) {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm guessing that these checks were intended to flag unexpected behavior rather than invalid behavior. It used to be the case that we always added some ops to everything - I thought we got rid of that but then I would think this would've triggered. Maybe @federicobarbacovi can weigh in here?

Copy link
Copy Markdown
Contributor

@federicobarbacovi federicobarbacovi Apr 2, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If shift size is zero, proving used to fail because Shplonk tries to access the zero coefficient of the polynomials it's proving, see here.

We are not using Shplonk directly in the merge anymore, so I don't know whether proving would still fail with shift size equal to zero.

Re whether we always add something: yes, we do add ecc ops to each builder during finalisation, see here. At the same time, the first merge in Chonk happens against an empty table. The reason why the first merge doesn't trigger this assertion is that the shift size is computed for the left table. As the first merge is in prepend mode, the left table is the table coming from the first app, which is never zero because of the ops we add.

BB_ASSERT_GT(uint32_t(shift_size.get_value()), 0U, "Shift size should always be bigger than 0");
} else {

BB_ASSERT_GT(shift_size, 0U, "Shift size should always be bigger than 0");
}

// Store T_commitments of the verifier
TableCommitments merged_table_commitments;
Expand Down
7 changes: 0 additions & 7 deletions barretenberg/cpp/src/barretenberg/goblin/mock_circuits.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,6 @@ class GoblinMockCircuits {
generate_sha256_test_circuit<MegaBuilder>(builder, 8);
stdlib::generate_ecdsa_verification_test_circuit(builder, 2);
}

// TODO(https://github.com/AztecProtocol/barretenberg/issues/911): We require goblin ops to be added to the
// function circuit because we cannot support zero commtiments. While the builder handles this at
// ProverInstance creation stage via the add_gates_to_ensure_all_polys_are_non_zero function for other
// MegaHonk circuits (where we don't explicitly need to add goblin ops), in IVC merge proving happens prior to
// folding where the absense of goblin ecc ops will result in zero commitments.
MockCircuits::construct_goblin_ecc_op_circuit(builder);
}

/**
Expand Down
Loading
Loading