Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
tx_execution.test.cpp
Go to the documentation of this file.
12
13#include <gmock/gmock.h>
14#include <gtest/gtest.h>
15
16namespace bb::avm2::simulation {
17namespace {
18
19using ::testing::_;
20using ::testing::NiceMock;
21using ::testing::Return;
22using ::testing::ReturnRef;
23
24class TxExecutionTest : public ::testing::Test {
25 protected:
26 TxExecutionTest() = default;
27
28 NiceMock<MockContextProvider> context_provider;
29 EventEmitter<TxEvent> tx_event_emitter;
30 NiceMock<MockHighLevelMerkleDB> merkle_db;
31 NiceMock<MockExecution> execution;
32 NiceMock<MockFieldGreaterThan> field_gt;
33 NiceMock<MockPoseidon2> poseidon2;
34 NiceMock<MockWrittenPublicDataSlotsTreeCheck> written_public_data_slots_tree_check;
35 NiceMock<MockRetrievedBytecodesTreeCheck> retrieved_bytecodes_tree_check;
36 TxExecution tx_execution = TxExecution(execution,
37 context_provider,
38 merkle_db,
39 written_public_data_slots_tree_check,
40 retrieved_bytecodes_tree_check,
41 field_gt,
43 tx_event_emitter);
44};
45
46TEST_F(TxExecutionTest, simulateTx)
47{
48 // Create a mock transaction
49 Tx tx = {
50 .hash = "0x1234567890abcdef",
51 .nonRevertibleAccumulatedData =
52 AccumulatedData{
53 .noteHashes = testing::random_fields(5),
54 .nullifiers = testing::random_fields(6),
55 .l2ToL1Messages = testing::random_l2_to_l1_messages(2),
56 },
57 .revertibleAccumulatedData =
58 AccumulatedData{
59 .noteHashes = testing::random_fields(5),
60 .nullifiers = testing::random_fields(2),
61 .l2ToL1Messages = testing::random_l2_to_l1_messages(2),
62 },
63 .setupEnqueuedCalls = testing::random_enqueued_calls(1),
64 .appLogicEnqueuedCalls = testing::random_enqueued_calls(1),
65 .teardownEnqueuedCall = testing::random_enqueued_calls(1)[0],
66 };
67
68 AppendOnlyTreeSnapshot dummy_snapshot = {
69 .root = 0,
70 .nextAvailableLeafIndex = 0,
71 };
72 TreeStates tree_state = {
73 .noteHashTree = { .tree = dummy_snapshot, .counter = 0 },
74 .nullifierTree = { .tree = dummy_snapshot, .counter = 0 },
75 .l1ToL2MessageTree = { .tree = dummy_snapshot, .counter = 0 },
76 .publicDataTree = { .tree = dummy_snapshot, .counter = 0 },
77 };
78 ON_CALL(merkle_db, get_tree_state()).WillByDefault([&]() { return tree_state; });
79 ON_CALL(merkle_db, siloed_nullifier_write(_)).WillByDefault(Return());
80 // Number of Enqueued Calls in the transaction : 1 setup, 1 app logic, and 1 teardown
81
82 auto setup_context = std::make_unique<NiceMock<MockContext>>();
83 ON_CALL(*setup_context, halted()).WillByDefault(Return(true)); // dont do any actual
84
85 auto app_logic_context = std::make_unique<NiceMock<MockContext>>();
86 ON_CALL(*app_logic_context, halted()).WillByDefault(Return(true));
87
88 auto teardown_context = std::make_unique<NiceMock<MockContext>>();
89 ON_CALL(*teardown_context, halted()).WillByDefault(Return(true));
90
91 // Configure mock execution to return successful results
92 ExecutionResult successful_result = {
93 .rd_offset = 0,
94 .rd_size = 0,
95 .gas_used = Gas{ 100, 100 },
96 .side_effect_states = SideEffectStates{},
97 .success = true // This is the key - mark execution as successful
98 };
99 ON_CALL(execution, execute(_)).WillByDefault(Return(successful_result));
100
101 EXPECT_CALL(context_provider, make_enqueued_context)
102 .WillOnce(Return(std::move(setup_context)))
103 .WillOnce(Return(std::move(app_logic_context)))
104 .WillOnce(Return(std::move(teardown_context)));
105 EXPECT_CALL(merkle_db, create_checkpoint()).Times(1);
106
107 EXPECT_CALL(merkle_db, pad_trees()).Times(1);
108
110
111 // Check the event counts
112 bool has_startup_event = false;
113 auto expected_private_append_tree_events =
114 tx.nonRevertibleAccumulatedData.noteHashes.size() + tx.nonRevertibleAccumulatedData.nullifiers.size() +
115 tx.revertibleAccumulatedData.noteHashes.size() + tx.revertibleAccumulatedData.nullifiers.size();
116 auto actual_private_append_tree_events = 0;
117
118 auto expected_l2_l1_msg_events =
119 tx.nonRevertibleAccumulatedData.l2ToL1Messages.size() + tx.revertibleAccumulatedData.l2ToL1Messages.size();
120 auto actual_l2_l1_msg_events = 0;
121
122 auto expected_public_call_events = 3; // setup, app logic, teardown
123 auto actual_public_call_events = 0;
124
125 bool has_collect_fee_event = false;
126
127 // Get PrivateAppendTreeEvent from tx event dump events
128 auto events = tx_event_emitter.get_events();
129 for (const auto& tx_event : events) {
131 has_startup_event = true;
132 continue;
133 }
134 auto event = std::get<TxPhaseEvent>(tx_event).event;
136 actual_private_append_tree_events++;
137 }
139 actual_l2_l1_msg_events++;
140 }
142 actual_public_call_events++;
143 }
145 has_collect_fee_event = true;
146 }
147 }
148
149 EXPECT_TRUE(has_startup_event);
150 EXPECT_EQ(actual_private_append_tree_events, expected_private_append_tree_events);
151 EXPECT_EQ(expected_l2_l1_msg_events, actual_l2_l1_msg_events);
152 EXPECT_EQ(expected_public_call_events, actual_public_call_events);
153 EXPECT_TRUE(has_collect_fee_event);
154}
155
156TEST_F(TxExecutionTest, NoteHashLimitReached)
157{
158 // Create a mock transaction
159 Tx tx = {
160 .hash = "0x1234567890abcdef",
161 .nonRevertibleAccumulatedData =
162 AccumulatedData{
164 .nullifiers = testing::random_fields(1),
165 },
166 .revertibleAccumulatedData =
167 AccumulatedData{
168 .noteHashes = testing::random_fields(1),
169 },
170 .appLogicEnqueuedCalls = testing::random_enqueued_calls(1),
171 };
172
173 AppendOnlyTreeSnapshot dummy_snapshot = {
174 .root = 0,
175 .nextAvailableLeafIndex = 0,
176 };
177 TreeStates tree_state = {
178 .noteHashTree = { .tree = dummy_snapshot, .counter = 0 },
179 .nullifierTree = { .tree = dummy_snapshot, .counter = 0 },
180 .l1ToL2MessageTree = { .tree = dummy_snapshot, .counter = 0 },
181 .publicDataTree = { .tree = dummy_snapshot, .counter = 0 },
182 };
183 ON_CALL(merkle_db, get_tree_state()).WillByDefault([&]() { return tree_state; });
184 ON_CALL(merkle_db, siloed_nullifier_write(_)).WillByDefault([&](const auto& /*nullifier*/) {
185 tree_state.nullifierTree.counter++;
186 });
187 ON_CALL(merkle_db, siloed_note_hash_write(_)).WillByDefault([&](const auto& /*note_hash*/) {
188 tree_state.noteHashTree.counter++;
189 return true;
190 });
191 ON_CALL(merkle_db, unique_note_hash_write(_)).WillByDefault([&](const auto& /*note_hash*/) {
192 tree_state.noteHashTree.counter++;
193 return true;
194 });
195
196 EXPECT_CALL(merkle_db, create_checkpoint()).Times(2); // once at start, once after app-logic revert
197
199
200 // Check the event counts
201 bool has_startup_event = false;
202 auto expected_private_append_tree_events =
203 tx.nonRevertibleAccumulatedData.noteHashes.size() + tx.nonRevertibleAccumulatedData.nullifiers.size() +
204 tx.revertibleAccumulatedData.noteHashes.size() + tx.revertibleAccumulatedData.nullifiers.size();
205 auto actual_private_append_tree_events = 0;
206
207 auto expected_l2_l1_msg_events =
208 tx.nonRevertibleAccumulatedData.l2ToL1Messages.size() + tx.revertibleAccumulatedData.l2ToL1Messages.size();
209 auto actual_l2_l1_msg_events = 0;
210
211 auto expected_public_call_events = 0; // None, since we revert before the public call
212 auto actual_public_call_events = 0;
213 auto reverts = 0;
214
215 bool has_collect_fee_event = false;
216
217 // Get PrivateAppendTreeEvent from tx event dump events
218 auto events = tx_event_emitter.get_events();
219 for (const auto& tx_event : events) {
221 has_startup_event = true;
222 continue;
223 }
224 TxPhaseEvent phase_event = std::get<TxPhaseEvent>(tx_event);
225 if (phase_event.reverted) {
226 reverts++;
227 }
228 auto event = phase_event.event;
230 actual_private_append_tree_events++;
231 }
233 actual_l2_l1_msg_events++;
234 }
236 actual_public_call_events++;
237 }
239 has_collect_fee_event = true;
240 }
241 }
242
243 EXPECT_TRUE(has_startup_event);
244 EXPECT_EQ(actual_private_append_tree_events, expected_private_append_tree_events);
245 EXPECT_EQ(expected_l2_l1_msg_events, actual_l2_l1_msg_events);
246 EXPECT_EQ(expected_public_call_events, actual_public_call_events);
247 EXPECT_TRUE(has_collect_fee_event);
248 EXPECT_EQ(reverts, 1);
249}
250
251TEST_F(TxExecutionTest, NullifierLimitReached)
252{
253 // Create a mock transaction
254 Tx tx = {
255 .hash = "0x1234567890abcdef",
256 .nonRevertibleAccumulatedData =
257 AccumulatedData{
259 },
260 .revertibleAccumulatedData =
261 AccumulatedData{
262 .nullifiers = testing::random_fields(1),
263 },
264 .appLogicEnqueuedCalls = testing::random_enqueued_calls(1),
265 };
266
267 AppendOnlyTreeSnapshot dummy_snapshot = {
268 .root = 0,
269 .nextAvailableLeafIndex = 0,
270 };
271 TreeStates tree_state = {
272 .noteHashTree = { .tree = dummy_snapshot, .counter = 0 },
273 .nullifierTree = { .tree = dummy_snapshot, .counter = 0 },
274 .l1ToL2MessageTree = { .tree = dummy_snapshot, .counter = 0 },
275 .publicDataTree = { .tree = dummy_snapshot, .counter = 0 },
276 };
277 ON_CALL(merkle_db, get_tree_state()).WillByDefault([&]() { return tree_state; });
278 ON_CALL(merkle_db, siloed_nullifier_write(_)).WillByDefault([&](const auto& /*nullifier*/) {
279 tree_state.nullifierTree.counter++;
280 return true;
281 });
282 ON_CALL(merkle_db, siloed_note_hash_write(_)).WillByDefault([&](const auto& /*note_hash*/) {
283 tree_state.noteHashTree.counter++;
284 return true;
285 });
286 ON_CALL(merkle_db, unique_note_hash_write(_)).WillByDefault([&](const auto& /*note_hash*/) {
287 tree_state.noteHashTree.counter++;
288 return true;
289 });
290
291 EXPECT_CALL(merkle_db, create_checkpoint()).Times(2); // once at start, once after app-logic revert
292
294
295 // Check the event counts
296 bool has_startup_event = false;
297 auto expected_private_append_tree_events =
298 tx.nonRevertibleAccumulatedData.noteHashes.size() + tx.nonRevertibleAccumulatedData.nullifiers.size() +
299 tx.revertibleAccumulatedData.noteHashes.size() + tx.revertibleAccumulatedData.nullifiers.size();
300 auto actual_private_append_tree_events = 0;
301
302 auto expected_l2_l1_msg_events =
303 tx.nonRevertibleAccumulatedData.l2ToL1Messages.size() + tx.revertibleAccumulatedData.l2ToL1Messages.size();
304 auto actual_l2_l1_msg_events = 0;
305
306 auto expected_public_call_events = 0; // None, since we revert before the public call
307 auto actual_public_call_events = 0;
308 auto reverts = 0;
309
310 bool has_collect_fee_event = false;
311
312 // Get PrivateAppendTreeEvent from tx event dump events
313 auto events = tx_event_emitter.get_events();
314 for (const auto& tx_event : events) {
316 has_startup_event = true;
317 continue;
318 }
319 TxPhaseEvent phase_event = std::get<TxPhaseEvent>(tx_event);
320 if (phase_event.reverted) {
321 reverts++;
322 }
323 auto event = phase_event.event;
325 actual_private_append_tree_events++;
326 }
328 actual_l2_l1_msg_events++;
329 }
331 actual_public_call_events++;
332 }
334 has_collect_fee_event = true;
335 }
336 }
337
338 EXPECT_TRUE(has_startup_event);
339 EXPECT_EQ(actual_private_append_tree_events, expected_private_append_tree_events);
340 EXPECT_EQ(expected_l2_l1_msg_events, actual_l2_l1_msg_events);
341 EXPECT_EQ(expected_public_call_events, actual_public_call_events);
342 EXPECT_TRUE(has_collect_fee_event);
343 EXPECT_EQ(reverts, 1);
344}
345
346TEST_F(TxExecutionTest, L2ToL1MessageLimitReached)
347{
348 // Create a mock transaction
349 Tx tx = {
350 .hash = "0x1234567890abcdef",
351 .nonRevertibleAccumulatedData =
352 AccumulatedData{
353 .nullifiers = testing::random_fields(1),
355 },
356 .revertibleAccumulatedData =
357 AccumulatedData{
358 .l2ToL1Messages = testing::random_l2_to_l1_messages(1),
359 },
360 .appLogicEnqueuedCalls = testing::random_enqueued_calls(1),
361 };
362
363 AppendOnlyTreeSnapshot dummy_snapshot = {
364 .root = 0,
365 .nextAvailableLeafIndex = 0,
366 };
367 TreeStates tree_state = {
368 .noteHashTree = { .tree = dummy_snapshot, .counter = 0 },
369 .nullifierTree = { .tree = dummy_snapshot, .counter = 0 },
370 .l1ToL2MessageTree = { .tree = dummy_snapshot, .counter = 0 },
371 .publicDataTree = { .tree = dummy_snapshot, .counter = 0 },
372 };
373 ON_CALL(merkle_db, get_tree_state()).WillByDefault([&]() { return tree_state; });
374 ON_CALL(merkle_db, siloed_nullifier_write(_)).WillByDefault([&](const auto& /*nullifier*/) {
375 tree_state.nullifierTree.counter++;
376 return true;
377 });
378 ON_CALL(merkle_db, siloed_note_hash_write(_)).WillByDefault([&](const auto& /*note_hash*/) {
379 tree_state.noteHashTree.counter++;
380 return true;
381 });
382 ON_CALL(merkle_db, unique_note_hash_write(_)).WillByDefault([&](const auto& /*note_hash*/) {
383 tree_state.noteHashTree.counter++;
384 return true;
385 });
386
387 EXPECT_CALL(merkle_db, create_checkpoint()).Times(2); // once at start, once after app-logic revert
388
390
391 // Check the event counts
392 bool has_startup_event = false;
393 auto expected_private_append_tree_events =
394 tx.nonRevertibleAccumulatedData.noteHashes.size() + tx.nonRevertibleAccumulatedData.nullifiers.size() +
395 tx.revertibleAccumulatedData.noteHashes.size() + tx.revertibleAccumulatedData.nullifiers.size();
396 auto actual_private_append_tree_events = 0;
397
398 auto expected_l2_l1_msg_events =
399 tx.nonRevertibleAccumulatedData.l2ToL1Messages.size() + tx.revertibleAccumulatedData.l2ToL1Messages.size();
400 auto actual_l2_l1_msg_events = 0;
401
402 auto expected_public_call_events = 0; // None, since we revert before the public call
403 auto actual_public_call_events = 0;
404 auto reverts = 0;
405
406 bool has_collect_fee_event = false;
407
408 // Get PrivateAppendTreeEvent from tx event dump events
409 auto events = tx_event_emitter.get_events();
410 for (const auto& tx_event : events) {
412 has_startup_event = true;
413 continue;
414 }
415 TxPhaseEvent phase_event = std::get<TxPhaseEvent>(tx_event);
416 if (phase_event.reverted) {
417 reverts++;
418 }
419 auto event = phase_event.event;
421 actual_private_append_tree_events++;
422 }
424 actual_l2_l1_msg_events++;
425 }
427 actual_public_call_events++;
428 }
430 has_collect_fee_event = true;
431 }
432 }
433
434 EXPECT_TRUE(has_startup_event);
435 EXPECT_EQ(actual_private_append_tree_events, expected_private_append_tree_events);
436 EXPECT_EQ(expected_l2_l1_msg_events, actual_l2_l1_msg_events);
437 EXPECT_EQ(expected_public_call_events, actual_public_call_events);
438 EXPECT_TRUE(has_collect_fee_event);
439 EXPECT_EQ(reverts, 1);
440}
441
442} // namespace
443} // namespace bb::avm2::simulation
#define MAX_L2_TO_L1_MSGS_PER_TX
#define MAX_NOTE_HASHES_PER_TX
StrictMock< MockHighLevelMerkleDB > merkle_db
StrictMock< MockRetrievedBytecodesTreeCheck > retrieved_bytecodes_tree_check
std::vector< PublicCallRequestWithCalldata > random_enqueued_calls(size_t n)
Definition fixtures.cpp:60
std::vector< ScopedL2ToL1Message > random_l2_to_l1_messages(size_t n)
Definition fixtures.cpp:43
std::vector< FF > random_fields(size_t n)
Definition fixtures.cpp:23
CommandResponse execute(BBApiRequest &request, Command &&command)
Executes a command by visiting a variant of all possible commands.
TEST_F(IPATest, ChallengesAreZero)
Definition ipa.test.cpp:188
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
simulation::PublicDataTreeReadWriteEvent event
FieldGreaterThan field_gt
TxExecution tx_execution
NiceMock< MockContextProvider > context_provider
NiceMock< MockExecution > execution
EventEmitter< TxEvent > tx_event_emitter
NiceMock< MockWrittenPublicDataSlotsTreeCheck > written_public_data_slots_tree_check