Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
execution_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <array>
5#include <cstddef>
6#include <cstdint>
7#include <numeric>
8#include <ranges>
9#include <stdexcept>
10#include <sys/types.h>
11#include <unordered_map>
12
19
46
50
51namespace bb::avm2::tracegen {
52namespace {
53
54constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_COLUMNS = {
55 C::execution_op_0_, C::execution_op_1_, C::execution_op_2_, C::execution_op_3_,
56 C::execution_op_4_, C::execution_op_5_, C::execution_op_6_,
57};
58constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_IS_ADDRESS_COLUMNS = {
59 C::execution_sel_op_is_address_0_, C::execution_sel_op_is_address_1_, C::execution_sel_op_is_address_2_,
60 C::execution_sel_op_is_address_3_, C::execution_sel_op_is_address_4_, C::execution_sel_op_is_address_5_,
61 C::execution_sel_op_is_address_6_,
62};
63constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_AFTER_RELATIVE_COLUMNS = {
64 C::execution_op_after_relative_0_, C::execution_op_after_relative_1_, C::execution_op_after_relative_2_,
65 C::execution_op_after_relative_3_, C::execution_op_after_relative_4_, C::execution_op_after_relative_5_,
66 C::execution_op_after_relative_6_,
67};
68constexpr std::array<Column, AVM_MAX_OPERANDS> RESOLVED_OPERAND_COLUMNS = {
69 C::execution_rop_0_, C::execution_rop_1_, C::execution_rop_2_, C::execution_rop_3_,
70 C::execution_rop_4_, C::execution_rop_5_, C::execution_rop_6_,
71};
72constexpr std::array<Column, AVM_MAX_OPERANDS> RESOLVED_OPERAND_TAG_COLUMNS = {
73 C::execution_rop_tag_0_, C::execution_rop_tag_1_, C::execution_rop_tag_2_, C::execution_rop_tag_3_,
74 C::execution_rop_tag_4_, C::execution_rop_tag_5_, C::execution_rop_tag_6_,
75};
76constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS = {
77 C::execution_sel_should_apply_indirection_0_, C::execution_sel_should_apply_indirection_1_,
78 C::execution_sel_should_apply_indirection_2_, C::execution_sel_should_apply_indirection_3_,
79 C::execution_sel_should_apply_indirection_4_, C::execution_sel_should_apply_indirection_5_,
80 C::execution_sel_should_apply_indirection_6_,
81};
82constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_RELATIVE_OVERFLOW_COLUMNS = {
83 C::execution_sel_relative_overflow_0_, C::execution_sel_relative_overflow_1_, C::execution_sel_relative_overflow_2_,
84 C::execution_sel_relative_overflow_3_, C::execution_sel_relative_overflow_4_, C::execution_sel_relative_overflow_5_,
85 C::execution_sel_relative_overflow_6_,
86};
87constexpr std::array<Column, AVM_MAX_OPERANDS> OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS = {
88 C::execution_sel_op_do_overflow_check_0_, C::execution_sel_op_do_overflow_check_1_,
89 C::execution_sel_op_do_overflow_check_2_, C::execution_sel_op_do_overflow_check_3_,
90 C::execution_sel_op_do_overflow_check_4_, C::execution_sel_op_do_overflow_check_5_,
91 C::execution_sel_op_do_overflow_check_6_,
92};
93constexpr size_t TOTAL_INDIRECT_BITS = 16;
94static_assert(AVM_MAX_OPERANDS * 2 <= TOTAL_INDIRECT_BITS);
95constexpr std::array<Column, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_RELATIVE_WIRE_COLUMNS = {
96 C::execution_sel_op_is_relative_wire_0_, C::execution_sel_op_is_relative_wire_1_,
97 C::execution_sel_op_is_relative_wire_2_, C::execution_sel_op_is_relative_wire_3_,
98 C::execution_sel_op_is_relative_wire_4_, C::execution_sel_op_is_relative_wire_5_,
99 C::execution_sel_op_is_relative_wire_6_, C::execution_sel_op_is_relative_wire_7_,
100
101};
102constexpr std::array<Column, TOTAL_INDIRECT_BITS / 2> OPERAND_IS_INDIRECT_WIRE_COLUMNS = {
103 C::execution_sel_op_is_indirect_wire_0_, C::execution_sel_op_is_indirect_wire_1_,
104 C::execution_sel_op_is_indirect_wire_2_, C::execution_sel_op_is_indirect_wire_3_,
105 C::execution_sel_op_is_indirect_wire_4_, C::execution_sel_op_is_indirect_wire_5_,
106 C::execution_sel_op_is_indirect_wire_6_, C::execution_sel_op_is_indirect_wire_7_,
107};
108
109constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_COLUMNS = {
110 C::execution_register_0_, C::execution_register_1_, C::execution_register_2_,
111 C::execution_register_3_, C::execution_register_4_, C::execution_register_5_,
112};
113constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_MEM_TAG_COLUMNS = {
114 C::execution_mem_tag_reg_0_, C::execution_mem_tag_reg_1_, C::execution_mem_tag_reg_2_,
115 C::execution_mem_tag_reg_3_, C::execution_mem_tag_reg_4_, C::execution_mem_tag_reg_5_,
116};
117constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_IS_WRITE_COLUMNS = {
118 C::execution_rw_reg_0_, C::execution_rw_reg_1_, C::execution_rw_reg_2_,
119 C::execution_rw_reg_3_, C::execution_rw_reg_4_, C::execution_rw_reg_5_,
120};
121constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_MEM_OP_COLUMNS = {
122 C::execution_sel_mem_op_reg_0_, C::execution_sel_mem_op_reg_1_, C::execution_sel_mem_op_reg_2_,
123 C::execution_sel_mem_op_reg_3_, C::execution_sel_mem_op_reg_4_, C::execution_sel_mem_op_reg_5_,
124};
125constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_EXPECTED_TAG_COLUMNS = {
126 C::execution_expected_tag_reg_0_, C::execution_expected_tag_reg_1_, C::execution_expected_tag_reg_2_,
127 C::execution_expected_tag_reg_3_, C::execution_expected_tag_reg_4_, C::execution_expected_tag_reg_5_,
128};
129constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_TAG_CHECK_COLUMNS = {
130 C::execution_sel_tag_check_reg_0_, C::execution_sel_tag_check_reg_1_, C::execution_sel_tag_check_reg_2_,
131 C::execution_sel_tag_check_reg_3_, C::execution_sel_tag_check_reg_4_, C::execution_sel_tag_check_reg_5_,
132};
133constexpr std::array<Column, AVM_MAX_REGISTERS> REGISTER_OP_REG_EFFECTIVE_COLUMNS = {
134 C::execution_sel_op_reg_effective_0_, C::execution_sel_op_reg_effective_1_, C::execution_sel_op_reg_effective_2_,
135 C::execution_sel_op_reg_effective_3_, C::execution_sel_op_reg_effective_4_, C::execution_sel_op_reg_effective_5_,
136};
137
145Column get_execution_opcode_selector(ExecutionOpCode exec_opcode)
146{
147 switch (exec_opcode) {
149 return C::execution_sel_execute_get_env_var;
151 return C::execution_sel_execute_mov;
153 return C::execution_sel_execute_jump;
155 return C::execution_sel_execute_jumpi;
157 return C::execution_sel_execute_call;
159 return C::execution_sel_execute_static_call;
161 return C::execution_sel_execute_internal_call;
163 return C::execution_sel_execute_internal_return;
165 return C::execution_sel_execute_return;
167 return C::execution_sel_execute_revert;
169 return C::execution_sel_execute_success_copy;
171 return C::execution_sel_execute_returndata_size;
173 return C::execution_sel_execute_debug_log;
175 return C::execution_sel_execute_sload;
177 return C::execution_sel_execute_sstore;
179 return C::execution_sel_execute_notehash_exists;
181 return C::execution_sel_execute_emit_notehash;
183 return C::execution_sel_execute_l1_to_l2_message_exists;
185 return C::execution_sel_execute_nullifier_exists;
187 return C::execution_sel_execute_emit_nullifier;
189 return C::execution_sel_execute_send_l2_to_l1_msg;
190 default:
191 throw std::runtime_error("Execution opcode does not have a corresponding selector");
192 }
193}
194
198struct FailingContexts {
199 bool app_logic_failure = false;
200 bool teardown_failure = false;
203 std::unordered_set<uint32_t> does_context_fail;
204};
205
217FailingContexts preprocess_for_discard(
219{
220 FailingContexts dying_info;
221
222 // Preprocessing pass 1: find the events that exit the app logic and teardown phases
223 for (const auto& ex_event : ex_events) {
224 bool is_exit = ex_event.is_exit();
225 bool is_top_level = ex_event.after_context_event.parent_id == 0;
226
227 if (is_exit && is_top_level) {
228 // TODO(dbanks12): confirm this should be after_context_event and not before_context_event
229 if (ex_event.after_context_event.phase == TransactionPhase::APP_LOGIC) {
230 dying_info.app_logic_failure = ex_event.is_failure();
231 dying_info.app_logic_exit_context_id = ex_event.after_context_event.id;
232 } else if (ex_event.after_context_event.phase == TransactionPhase::TEARDOWN) {
233 dying_info.teardown_failure = ex_event.is_failure();
234 dying_info.teardown_exit_context_id = ex_event.after_context_event.id;
235 break; // Teardown is the last phase we care about
236 }
237 }
238 }
239
240 // Preprocessing pass 2: find all contexts that fail and mark them
241 for (const auto& ex_event : ex_events) {
242 if (ex_event.is_failure()) {
243 dying_info.does_context_fail.insert(ex_event.after_context_event.id);
244 }
245 }
246
247 return dying_info;
248}
249
257bool is_phase_discarded(TransactionPhase phase, const FailingContexts& failures)
258{
259 // Note that app logic also gets discarded if teardown failures
260 return (phase == TransactionPhase::APP_LOGIC && (failures.app_logic_failure || failures.teardown_failure)) ||
261 (phase == TransactionPhase::TEARDOWN && failures.teardown_failure);
262}
263
271uint32_t dying_context_for_phase(TransactionPhase phase, const FailingContexts& failures)
272{
273 assert((phase == TransactionPhase::APP_LOGIC || phase == TransactionPhase::TEARDOWN) &&
274 "Execution events must have app logic or teardown phase");
275
276 switch (phase) {
278 // Note that app logic also gets discarded if teardown failures
279 return failures.app_logic_failure ? failures.app_logic_exit_context_id
280 : failures.teardown_failure ? failures.teardown_exit_context_id
281 : 0;
283 return failures.teardown_failure ? failures.teardown_exit_context_id : 0;
284 default:
285 __builtin_unreachable(); // tell the compiler "we never reach here"
286 }
287}
288
289} // namespace
290
293{
294 uint32_t row = 1; // We start from row 1 because this trace contains shifted columns.
295
296 // Preprocess events to determine which contexts will fail
297 FailingContexts failures = preprocess_for_discard(ex_events);
298
299 uint32_t last_seen_parent_id = 0;
300
301 // Some variables updated per loop iteration to track
302 // whether or not the upcoming row should "discard" [side effects].
303 uint32_t discard = 0;
304 uint32_t dying_context_id = 0;
305 FF dying_context_id_inv = 0;
306 bool is_first_event_in_enqueued_call = true;
307 bool prev_row_was_enter_call = false;
308
309 for (const auto& ex_event : ex_events) {
310 // Check if this is the first event in an enqueued call and whether
311 // the phase should be discarded
312 if (discard == 0 && is_first_event_in_enqueued_call &&
313 is_phase_discarded(ex_event.after_context_event.phase, failures)) {
314 discard = 1;
315 dying_context_id = dying_context_for_phase(ex_event.after_context_event.phase, failures);
316 dying_context_id_inv = dying_context_id; // Will be inverted in batch later.
317 }
318
319 // Cache the parent id inversion since we will repeatedly just be doing the same expensive inversion
320 bool has_parent = ex_event.after_context_event.parent_id != 0;
321 if (last_seen_parent_id != ex_event.after_context_event.parent_id) {
322 last_seen_parent_id = ex_event.after_context_event.parent_id;
323 }
324
325 /**************************************************************************************************
326 * Setup.
327 **************************************************************************************************/
328
329 trace.set(
330 row,
331 { {
332 { C::execution_sel, 1 },
333 // Selectors that indicate "dispatch" from tx trace
334 // Note: Enqueued Call End is determined during the opcode execution temporality group
335 { C::execution_enqueued_call_start, is_first_event_in_enqueued_call ? 1 : 0 },
336 // Context
337 { C::execution_context_id, ex_event.after_context_event.id },
338 { C::execution_parent_id, ex_event.after_context_event.parent_id },
339 { C::execution_pc, ex_event.before_context_event.pc },
340 { C::execution_msg_sender, ex_event.after_context_event.msg_sender },
341 { C::execution_contract_address, ex_event.after_context_event.contract_addr },
342 { C::execution_transaction_fee, ex_event.after_context_event.transaction_fee },
343 { C::execution_is_static, ex_event.after_context_event.is_static },
344 { C::execution_parent_calldata_addr, ex_event.after_context_event.parent_cd_addr },
345 { C::execution_parent_calldata_size, ex_event.after_context_event.parent_cd_size },
346 { C::execution_last_child_returndata_addr, ex_event.after_context_event.last_child_rd_addr },
347 { C::execution_last_child_returndata_size, ex_event.after_context_event.last_child_rd_size },
348 { C::execution_last_child_success, ex_event.after_context_event.last_child_success },
349 { C::execution_last_child_id, ex_event.after_context_event.last_child_id },
350 { C::execution_l2_gas_limit, ex_event.after_context_event.gas_limit.l2Gas },
351 { C::execution_da_gas_limit, ex_event.after_context_event.gas_limit.daGas },
352 { C::execution_l2_gas_used, ex_event.after_context_event.gas_used.l2Gas },
353 { C::execution_da_gas_used, ex_event.after_context_event.gas_used.daGas },
354 { C::execution_parent_l2_gas_limit, ex_event.after_context_event.parent_gas_limit.l2Gas },
355 { C::execution_parent_da_gas_limit, ex_event.after_context_event.parent_gas_limit.daGas },
356 { C::execution_parent_l2_gas_used, ex_event.after_context_event.parent_gas_used.l2Gas },
357 { C::execution_parent_da_gas_used, ex_event.after_context_event.parent_gas_used.daGas },
358 { C::execution_next_context_id, ex_event.next_context_id },
359 // Context - gas.
360 { C::execution_prev_l2_gas_used, ex_event.before_context_event.gas_used.l2Gas },
361 { C::execution_prev_da_gas_used, ex_event.before_context_event.gas_used.daGas },
362 // Context - tree states
363 // Context - tree states - Written public data slots tree
364 { C::execution_prev_written_public_data_slots_tree_root,
365 ex_event.before_context_event.written_public_data_slots_tree_snapshot.root },
366 { C::execution_prev_written_public_data_slots_tree_size,
367 ex_event.before_context_event.written_public_data_slots_tree_snapshot.nextAvailableLeafIndex },
368 { C::execution_written_public_data_slots_tree_root,
369 ex_event.after_context_event.written_public_data_slots_tree_snapshot.root },
370 { C::execution_written_public_data_slots_tree_size,
371 ex_event.after_context_event.written_public_data_slots_tree_snapshot.nextAvailableLeafIndex },
372 { C::execution_prev_public_data_tree_root,
373 ex_event.before_context_event.tree_states.publicDataTree.tree.root },
374 { C::execution_prev_public_data_tree_size,
375 ex_event.before_context_event.tree_states.publicDataTree.tree.nextAvailableLeafIndex },
376 // Context - tree states - Nullifier tree
377 { C::execution_prev_nullifier_tree_root,
378 ex_event.before_context_event.tree_states.nullifierTree.tree.root },
379 { C::execution_prev_nullifier_tree_size,
380 ex_event.before_context_event.tree_states.nullifierTree.tree.nextAvailableLeafIndex },
381 { C::execution_prev_num_nullifiers_emitted,
382 ex_event.before_context_event.tree_states.nullifierTree.counter },
383 { C::execution_nullifier_tree_root, ex_event.after_context_event.tree_states.nullifierTree.tree.root },
384 { C::execution_nullifier_tree_size,
385 ex_event.after_context_event.tree_states.nullifierTree.tree.nextAvailableLeafIndex },
386 { C::execution_num_nullifiers_emitted, ex_event.after_context_event.tree_states.nullifierTree.counter },
387 // Context - tree states - Public data tree
388 { C::execution_public_data_tree_root,
389 ex_event.after_context_event.tree_states.publicDataTree.tree.root },
390 { C::execution_public_data_tree_size,
391 ex_event.after_context_event.tree_states.publicDataTree.tree.nextAvailableLeafIndex },
392 // Context - tree states - Note hash tree
393 { C::execution_prev_note_hash_tree_root,
394 ex_event.before_context_event.tree_states.noteHashTree.tree.root },
395 { C::execution_prev_note_hash_tree_size,
396 ex_event.before_context_event.tree_states.noteHashTree.tree.nextAvailableLeafIndex },
397 { C::execution_prev_num_note_hashes_emitted,
398 ex_event.before_context_event.tree_states.noteHashTree.counter },
399 { C::execution_note_hash_tree_root, ex_event.after_context_event.tree_states.noteHashTree.tree.root },
400 { C::execution_note_hash_tree_size,
401 ex_event.after_context_event.tree_states.noteHashTree.tree.nextAvailableLeafIndex },
402 { C::execution_num_note_hashes_emitted, ex_event.after_context_event.tree_states.noteHashTree.counter },
403 // Context - tree states - L1 to L2 message tree
404 { C::execution_l1_l2_tree_root, ex_event.after_context_event.tree_states.l1ToL2MessageTree.tree.root },
405 // Context - tree states - Retrieved bytecodes tree
406 { C::execution_prev_retrieved_bytecodes_tree_root,
407 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.root },
408 { C::execution_prev_retrieved_bytecodes_tree_size,
409 ex_event.before_context_event.retrieved_bytecodes_tree_snapshot.nextAvailableLeafIndex },
410 { C::execution_retrieved_bytecodes_tree_root,
411 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.root },
412 { C::execution_retrieved_bytecodes_tree_size,
413 ex_event.after_context_event.retrieved_bytecodes_tree_snapshot.nextAvailableLeafIndex },
414 // Context - side effects
415 { C::execution_prev_num_unencrypted_log_fields,
416 ex_event.before_context_event.side_effect_states.numUnencryptedLogFields },
417 { C::execution_num_unencrypted_log_fields,
418 ex_event.after_context_event.side_effect_states.numUnencryptedLogFields },
419 { C::execution_prev_num_l2_to_l1_messages,
420 ex_event.before_context_event.side_effect_states.numL2ToL1Messages },
421 { C::execution_num_l2_to_l1_messages,
422 ex_event.after_context_event.side_effect_states.numL2ToL1Messages },
423 // Helpers for identifying parent context
424 { C::execution_has_parent_ctx, has_parent ? 1 : 0 },
425 { C::execution_is_parent_id_inv, has_parent ? last_seen_parent_id : 0 },
426 } });
427
428 // Internal stack
429 trace.set(row,
430 { {
431 { C::execution_internal_call_id, ex_event.before_context_event.internal_call_id },
432 { C::execution_internal_call_return_id, ex_event.before_context_event.internal_call_return_id },
433 { C::execution_next_internal_call_id, ex_event.before_context_event.next_internal_call_id },
434 } });
435
436 /**************************************************************************************************
437 * Temporality group 1: Bytecode retrieval.
438 **************************************************************************************************/
439
440 bool bytecode_retrieval_failed = ex_event.error == ExecutionError::BYTECODE_RETRIEVAL;
441 trace.set(row,
442 { {
443 { C::execution_sel_bytecode_retrieval_failure, bytecode_retrieval_failed ? 1 : 0 },
444 { C::execution_sel_bytecode_retrieval_success, !bytecode_retrieval_failed ? 1 : 0 },
445 { C::execution_bytecode_id, ex_event.after_context_event.bytecode_id },
446 } });
447
448 /**************************************************************************************************
449 * Temporality group 2: Instruction fetching. Mapping from wire to execution and addressing.
450 **************************************************************************************************/
451
452 // This will only have a value if instruction fetching succeeded.
454 bool error_in_instruction_fetching = ex_event.error == ExecutionError::INSTRUCTION_FETCHING;
455 bool instruction_fetching_success = !bytecode_retrieval_failed && !error_in_instruction_fetching;
456 trace.set(C::execution_sel_instruction_fetching_failure, row, error_in_instruction_fetching ? 1 : 0);
457
458 if (instruction_fetching_success) {
459 exec_opcode = ex_event.wire_instruction.get_exec_opcode();
460 process_instr_fetching(ex_event.wire_instruction, trace, row);
461 // If we fetched an instruction successfully, we can set the next PC.
462 trace.set(row,
463 { {
464 { C::execution_next_pc,
465 ex_event.before_context_event.pc + ex_event.wire_instruction.size_in_bytes() },
466 } });
467
468 // Along this function we need to set the info we get from the EXEC_SPEC_READ lookup.
469 process_execution_spec(ex_event, trace, row);
470
471 process_addressing(ex_event.addressing_event, ex_event.wire_instruction, trace, row);
472 }
473
474 bool addressing_failed = ex_event.error == ExecutionError::ADDRESSING;
475
476 /**************************************************************************************************
477 * Temporality group 3: Registers read.
478 **************************************************************************************************/
479
480 // Note that if addressing did not fail, register reading will not fail.
482 std::ranges::fill(registers.begin(), registers.end(), TaggedValue::from<FF>(0));
483 bool should_process_registers = instruction_fetching_success && !addressing_failed;
484 bool register_processing_failed = ex_event.error == ExecutionError::REGISTER_READ;
485 if (should_process_registers) {
486 process_registers(*exec_opcode, ex_event.inputs, ex_event.output, registers, trace, row);
487 }
488
489 /**************************************************************************************************
490 * Temporality group 4: Gas (both base and dynamic).
491 **************************************************************************************************/
492
493 bool should_check_gas = should_process_registers && !register_processing_failed;
494 bool oog = ex_event.error == ExecutionError::GAS;
495 trace.set(C::execution_sel_should_check_gas, row, should_check_gas ? 1 : 0);
496 if (should_check_gas) {
497 process_gas(ex_event.gas_event, *exec_opcode, trace, row);
498 // todo(ilyas): this is a bad place to do this, but we need the register information to compute dyn gas
499 // factor. process_gas does not have access to it and nor should it.
500 if (*exec_opcode == ExecutionOpCode::TORADIXBE) {
501 uint32_t radix = ex_event.inputs[1].as<uint32_t>(); // Safe since already tag checked
502 uint32_t num_limbs = ex_event.inputs[2].as<uint32_t>(); // Safe since already tag checked
503 uint32_t num_p_limbs = radix > 256 ? 32 : static_cast<uint32_t>(get_p_limbs_per_radix_size(radix));
504 trace.set(row,
505 { {
506 // To Radix BE Dynamic Gas
507 { C::execution_two_five_six, 256 },
508 { C::execution_sel_radix_gt_256, radix > 256 ? 1 : 0 },
509 { C::execution_sel_lookup_num_p_limbs, radix <= 256 ? 1 : 0 },
510 { C::execution_num_p_limbs, num_p_limbs },
511 { C::execution_sel_use_num_limbs, num_limbs > num_p_limbs ? 1 : 0 },
512 // Don't set dyn gas factor here since already set in process_gas
513 } });
514 }
515 }
516
517 /**************************************************************************************************
518 * Temporality group 5: Opcode execution.
519 **************************************************************************************************/
520
521 // TODO(ilyas): This can possibly be gated with some boolean but I'm not sure what is going on.
522 // TODO: this needs a refactor and is most likely wrong.
523
524 // Overly verbose but maximising readibility here
525 // FIXME(ilyas): We currently cannot move this into the if statement because they are used outside of this
526 // temporality group (e.g. in recomputing discard)
527 bool should_execute_opcode = should_check_gas && !oog;
528 bool should_execute_call =
529 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::CALL;
530 bool should_execute_static_call =
531 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::STATICCALL;
532 bool should_execute_return =
533 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::RETURN;
534 bool should_execute_revert =
535 should_execute_opcode && exec_opcode.has_value() && *exec_opcode == ExecutionOpCode::REVERT;
536
537 bool is_err = ex_event.error != ExecutionError::NONE;
538 bool is_failure = should_execute_revert || is_err;
539 bool sel_enter_call = should_execute_call || should_execute_static_call;
540 // TODO: would is_err here catch any error at the opcode execution step which we dont want to consider?
541 bool sel_exit_call = should_execute_return || should_execute_revert || is_err;
542
543 if (sel_exit_call) {
544 // We rollback if we revert or error and we have a parent context.
545 trace.set(row,
546 { {
547 // Exit reason - opcode or error
548 { C::execution_sel_execute_return, should_execute_return ? 1 : 0 },
549 { C::execution_sel_execute_revert, should_execute_revert ? 1 : 0 },
550 { C::execution_sel_exit_call, 1 },
551 { C::execution_nested_return, should_execute_return && has_parent ? 1 : 0 },
552 // Enqueued or nested exit dependent on if we are a child context
553 { C::execution_enqueued_call_end, !has_parent ? 1 : 0 },
554 { C::execution_nested_exit_call, has_parent ? 1 : 0 },
555 } });
556 }
557
558 bool opcode_execution_failed = ex_event.error == ExecutionError::OPCODE_EXECUTION;
559 if (should_execute_opcode) {
560 // At this point we can assume instruction fetching succeeded, so this should never fail.
561 const auto& dispatch_to_subtrace = SUBTRACE_INFO_MAP.at(*exec_opcode);
562 trace.set(row,
563 { {
564 { C::execution_sel_should_execute_opcode, 1 },
565 { C::execution_sel_opcode_error, opcode_execution_failed ? 1 : 0 },
566 { get_subtrace_selector(dispatch_to_subtrace.subtrace_selector), 1 },
567 } });
568
569 // Execution Trace opcodes - separating for clarity
570 if (dispatch_to_subtrace.subtrace_selector == SubtraceSel::EXECUTION) {
571 trace.set(get_execution_opcode_selector(*exec_opcode), row, 1);
572 }
573
574 // Call specific logic
575 if (sel_enter_call) {
576 Gas gas_left = ex_event.after_context_event.gas_limit - ex_event.after_context_event.gas_used;
577
578 uint32_t allocated_l2_gas = registers[0].as<uint32_t>();
579 bool is_l2_gas_allocated_lt_left = allocated_l2_gas < gas_left.l2Gas;
580
581 uint32_t allocated_da_gas = registers[1].as<uint32_t>();
582 bool is_da_gas_allocated_lt_left = allocated_da_gas < gas_left.daGas;
583
584 trace.set(row,
585 { {
586 { C::execution_sel_enter_call, sel_enter_call ? 1 : 0 },
587 { C::execution_sel_execute_call, should_execute_call ? 1 : 0 },
588 { C::execution_sel_execute_static_call, should_execute_static_call ? 1 : 0 },
589 { C::execution_l2_gas_left, gas_left.l2Gas },
590 { C::execution_da_gas_left, gas_left.daGas },
591 { C::execution_call_is_l2_gas_allocated_lt_left, is_l2_gas_allocated_lt_left },
592 { C::execution_call_is_da_gas_allocated_lt_left, is_da_gas_allocated_lt_left },
593 } });
594 }
595 // Separate if-statement for opcodes.
596 // This cannot be an else-if chained to the above,
597 // because `sel_exit_call` can happen on any opcode
598 // and we still need to tracegen the opcode-specific logic.
599 if (exec_opcode == ExecutionOpCode::GETENVVAR) {
600 assert(ex_event.addressing_event.resolution_info.size() == 2 &&
601 "GETENVVAR should have exactly two resolved operands (envvar enum and output)");
602 // rop[1] is the envvar enum
603 TaggedValue envvar_enum = ex_event.addressing_event.resolution_info[1].resolved_operand;
604 process_get_env_var_opcode(envvar_enum, ex_event.output, trace, row);
605 } else if (exec_opcode == ExecutionOpCode::INTERNALRETURN) {
606 trace.set(C::execution_internal_call_return_id_inv,
607 row,
608 ex_event.before_context_event.internal_call_return_id); // Will be inverted in batch later.
609 } else if (exec_opcode == ExecutionOpCode::SSTORE) {
610 uint32_t remaining_data_writes = MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX -
611 ex_event.before_context_event.tree_states.publicDataTree.counter;
612
613 trace.set(row,
614 { {
615 { C::execution_max_data_writes_reached, remaining_data_writes == 0 },
616 { C::execution_remaining_data_writes_inv,
617 remaining_data_writes }, // Will be inverted in batch later.
618 { C::execution_sel_write_public_data, !opcode_execution_failed },
619 } });
620 } else if (exec_opcode == ExecutionOpCode::NOTEHASHEXISTS) {
621 uint64_t leaf_index = registers[1].as<uint64_t>();
622 uint64_t note_hash_tree_leaf_count = NOTE_HASH_TREE_LEAF_COUNT;
623 bool note_hash_leaf_in_range = leaf_index < note_hash_tree_leaf_count;
624
625 trace.set(row,
626 { {
627 { C::execution_note_hash_leaf_in_range, note_hash_leaf_in_range },
628 { C::execution_note_hash_tree_leaf_count, FF(note_hash_tree_leaf_count) },
629 } });
630 } else if (exec_opcode == ExecutionOpCode::EMITNOTEHASH) {
631 uint32_t remaining_note_hashes =
632 MAX_NOTE_HASHES_PER_TX - ex_event.before_context_event.tree_states.noteHashTree.counter;
633
634 trace.set(row,
635 { {
636 { C::execution_sel_reached_max_note_hashes, remaining_note_hashes == 0 },
637 { C::execution_remaining_note_hashes_inv,
638 remaining_note_hashes }, // Will be inverted in batch later.
639 { C::execution_sel_write_note_hash, !opcode_execution_failed },
640 } });
641 } else if (exec_opcode == ExecutionOpCode::L1TOL2MSGEXISTS) {
642 uint64_t leaf_index = registers[1].as<uint64_t>();
643 uint64_t l1_to_l2_msg_tree_leaf_count = L1_TO_L2_MSG_TREE_LEAF_COUNT;
644 bool l1_to_l2_msg_leaf_in_range = leaf_index < l1_to_l2_msg_tree_leaf_count;
645
646 trace.set(row,
647 { {
648 { C::execution_l1_to_l2_msg_leaf_in_range, l1_to_l2_msg_leaf_in_range },
649 { C::execution_l1_to_l2_msg_tree_leaf_count, FF(l1_to_l2_msg_tree_leaf_count) },
650 } });
651 //} else if (exec_opcode == ExecutionOpCode::NULLIFIEREXISTS) {
652 // no custom columns!
653 } else if (exec_opcode == ExecutionOpCode::EMITNULLIFIER) {
654 uint32_t remaining_nullifiers =
655 MAX_NULLIFIERS_PER_TX - ex_event.before_context_event.tree_states.nullifierTree.counter;
656
657 trace.set(row,
658 { {
659 { C::execution_sel_reached_max_nullifiers, remaining_nullifiers == 0 },
660 { C::execution_remaining_nullifiers_inv,
661 remaining_nullifiers }, // Will be inverted in batch later.
662 { C::execution_sel_write_nullifier,
663 remaining_nullifiers != 0 && !ex_event.before_context_event.is_static },
664 } });
665 } else if (exec_opcode == ExecutionOpCode::SENDL2TOL1MSG) {
666 uint32_t remaining_l2_to_l1_msgs =
667 MAX_L2_TO_L1_MSGS_PER_TX - ex_event.before_context_event.side_effect_states.numL2ToL1Messages;
668
669 trace.set(row,
670 { { { C::execution_sel_l2_to_l1_msg_limit_error, remaining_l2_to_l1_msgs == 0 },
671 { C::execution_remaining_l2_to_l1_msgs_inv,
672 remaining_l2_to_l1_msgs }, // Will be inverted in batch later.
673 { C::execution_sel_write_l2_to_l1_msg, !opcode_execution_failed && !discard },
674 {
675 C::execution_public_inputs_index,
677 ex_event.before_context_event.side_effect_states.numL2ToL1Messages,
678 } } });
679 }
680 }
681
682 /**************************************************************************************************
683 * Temporality group 6: Register write.
684 **************************************************************************************************/
685
686 bool should_process_register_write = should_execute_opcode && !opcode_execution_failed;
687 if (should_process_register_write) {
688 process_registers_write(*exec_opcode, trace, row);
689 }
690
691 /**************************************************************************************************
692 * Discarding.
693 **************************************************************************************************/
694
695 bool is_dying_context = discard == 1 && (ex_event.after_context_event.id == dying_context_id);
696
697 // Need to generate the item below for checking "is dying context" in circuit
698 FF dying_context_diff_inv = 0;
699 if (!is_dying_context) {
700 // Compute inversion when context_id != dying_context_id
701 FF diff = FF(ex_event.after_context_event.id) - FF(dying_context_id);
702 dying_context_diff_inv = diff; // Will be inverted in batch later.
703 }
704
705 // Needed for bc retrieval
706 bool sel_first_row_in_context = prev_row_was_enter_call || is_first_event_in_enqueued_call;
707
708 bool enqueued_call_end = sel_exit_call && !has_parent;
709 bool resolves_dying_context = is_failure && is_dying_context;
710 bool nested_call_rom_undiscarded_context = sel_enter_call && discard == 0;
711 bool propagate_discard = !enqueued_call_end && !resolves_dying_context && !nested_call_rom_undiscarded_context;
712
713 // This is here instead of guarded by `should_execute_opcode` because is_err is a higher level error
714 // than just an opcode error (i.e., it is on if there are any errors in any temporality group).
715 bool rollback_context = (should_execute_revert || is_err) && has_parent;
716
717 trace.set(
718 row,
719 { {
720
721 // sel_exit_call and rollback has to be set here because they include sel_error
722 { C::execution_sel_exit_call, sel_exit_call ? 1 : 0 },
723 { C::execution_rollback_context, rollback_context ? 1 : 0 },
724 { C::execution_sel_error, is_err ? 1 : 0 },
725 { C::execution_sel_failure, is_failure ? 1 : 0 },
726 { C::execution_discard, discard },
727 { C::execution_dying_context_id, dying_context_id },
728 { C::execution_dying_context_id_inv, dying_context_id_inv },
729 { C::execution_is_dying_context, is_dying_context ? 1 : 0 },
730 { C::execution_dying_context_diff_inv, dying_context_diff_inv },
731 { C::execution_enqueued_call_end, enqueued_call_end ? 1 : 0 },
732 { C::execution_sel_first_row_in_context, sel_first_row_in_context ? 1 : 0 },
733 { C::execution_resolves_dying_context, resolves_dying_context ? 1 : 0 },
734 { C::execution_nested_call_from_undiscarded_context, nested_call_rom_undiscarded_context ? 1 : 0 },
735 { C::execution_propagate_discard, propagate_discard ? 1 : 0 },
736 } });
737
738 // Trace-generation is done for this event.
739 // Now, use this event to determine whether we should set/reset the discard flag for the NEXT event
740 bool event_kills_dying_context =
741 discard == 1 && is_failure && ex_event.after_context_event.id == dying_context_id;
742
743 if (event_kills_dying_context) {
744 // Set/unset discard flag if the current event is the one that kills the dying context
745 dying_context_id = 0;
746 dying_context_id_inv = 0;
747 discard = 0;
748 } else if (sel_enter_call && discard == 0 && !is_err &&
749 failures.does_context_fail.contains(ex_event.next_context_id)) {
750 // If making a nested call, and discard isn't already high...
751 // if the nested context being entered eventually dies, raise discard flag and remember which
752 // context is dying. NOTE: if a [STATIC]CALL instruction _itself_ errors, we don't set the
753 // discard flag because we aren't actually entering a new context!
754 dying_context_id = ex_event.next_context_id;
755 dying_context_id_inv = dying_context_id; // Will be inverted in batch later.
756 discard = 1;
757 }
758 // Otherwise, we aren't entering or exiting a dying context,
759 // so just propagate discard and dying context.
760 // Implicit: dying_context_id = dying_context_id; discard = discard;
761
762 // If an enqueued call just exited, next event (if any) is the first in an enqueued call.
763 // Update flag for next iteration.
764 is_first_event_in_enqueued_call = ex_event.after_context_event.parent_id == 0 && sel_exit_call;
765
766 // Track this bool for use determining whether the next row is the first in a context
767 prev_row_was_enter_call = sel_enter_call;
768
769 row++;
770 }
771
772 if (!ex_events.empty()) {
773 trace.set(C::execution_last, row - 1, 1);
774 }
775
776 // Batch invert the columns.
778}
779
782 uint32_t row)
783{
784 trace.set(row,
785 { {
786 { C::execution_sel_instruction_fetching_success, 1 },
787 { C::execution_ex_opcode, static_cast<uint8_t>(instruction.get_exec_opcode()) },
788 { C::execution_indirect, instruction.indirect },
789 { C::execution_instr_length, instruction.size_in_bytes() },
790 } });
791
792 // At this point we can assume instruction fetching succeeded.
793 auto operands = instruction.operands;
794 assert(operands.size() <= AVM_MAX_OPERANDS);
795 operands.resize(AVM_MAX_OPERANDS, simulation::Operand::from<FF>(0));
796
797 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
798 trace.set(OPERAND_COLUMNS[i], row, operands.at(i));
799 }
800}
801
804 uint32_t row)
805{
806 // At this point we can assume instruction fetching succeeded, so this should never fail.
807 ExecutionOpCode exec_opcode = ex_event.wire_instruction.get_exec_opcode();
808 const auto& exec_spec = EXEC_INSTRUCTION_SPEC.at(exec_opcode);
809 const auto& gas_cost = exec_spec.gas_cost;
810
811 // Gas.
812 trace.set(row,
813 { {
814 { C::execution_opcode_gas, gas_cost.opcode_gas },
815 { C::execution_base_da_gas, gas_cost.base_da },
816 { C::execution_dynamic_l2_gas, gas_cost.dyn_l2 },
817 { C::execution_dynamic_da_gas, gas_cost.dyn_da },
818 } });
819
820 const auto& register_info = exec_spec.register_info;
821 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
822 trace.set(row,
823 { {
824 { REGISTER_IS_WRITE_COLUMNS[i], register_info.is_write(i) ? 1 : 0 },
825 { REGISTER_MEM_OP_COLUMNS[i], register_info.is_active(i) ? 1 : 0 },
826 { REGISTER_EXPECTED_TAG_COLUMNS[i],
827 register_info.need_tag_check(i) ? static_cast<uint32_t>(*register_info.expected_tag(i)) : 0 },
828 { REGISTER_TAG_CHECK_COLUMNS[i], register_info.need_tag_check(i) ? 1 : 0 },
829 } });
830 }
831
832 // Set is_address columns
833 const auto& num_addresses = exec_spec.num_addresses;
834 for (size_t i = 0; i < num_addresses; i++) {
835 trace.set(OPERAND_IS_ADDRESS_COLUMNS[i], row, 1);
836 }
837
838 // At this point we can assume instruction fetching succeeded, so this should never fail.
839 const auto& dispatch_to_subtrace = SUBTRACE_INFO_MAP.at(exec_opcode);
840 trace.set(row,
841 { {
842 { C::execution_subtrace_id, get_subtrace_id(dispatch_to_subtrace.subtrace_selector) },
843 { C::execution_subtrace_operation_id, dispatch_to_subtrace.subtrace_operation_id },
844 { C::execution_dyn_gas_id, exec_spec.dyn_gas_id },
845 } });
846}
847
849 ExecutionOpCode exec_opcode,
851 uint32_t row)
852{
853 bool oog = gas_event.oog_l2 || gas_event.oog_da;
854 trace.set(row,
855 { {
856 { C::execution_out_of_gas_l2, gas_event.oog_l2 ? 1 : 0 },
857 { C::execution_out_of_gas_da, gas_event.oog_da ? 1 : 0 },
858 { C::execution_sel_out_of_gas, oog ? 1 : 0 },
859 // Base gas.
860 { C::execution_addressing_gas, gas_event.addressing_gas },
861 // Dynamic gas.
862 { C::execution_dynamic_l2_gas_factor, gas_event.dynamic_gas_factor.l2Gas },
863 { C::execution_dynamic_da_gas_factor, gas_event.dynamic_gas_factor.daGas },
864 // Derived cumulative gas used.
865 { C::execution_total_gas_l2, gas_event.total_gas_used_l2 },
866 { C::execution_total_gas_da, gas_event.total_gas_used_da },
867 } });
868
869 const auto& exec_spec = EXEC_INSTRUCTION_SPEC.at(exec_opcode);
870 if (exec_spec.dyn_gas_id != 0) {
871 trace.set(get_dyn_gas_selector(exec_spec.dyn_gas_id), row, 1);
872 }
873}
874
878 uint32_t row)
879{
880 // At this point we can assume instruction fetching succeeded, so this should never fail.
881 ExecutionOpCode exec_opcode = instruction.get_exec_opcode();
882 const ExecInstructionSpec& ex_spec = EXEC_INSTRUCTION_SPEC.at(exec_opcode);
883
884 auto resolution_info_vec = addr_event.resolution_info;
885 assert(resolution_info_vec.size() <= AVM_MAX_OPERANDS);
886 resolution_info_vec.resize(AVM_MAX_OPERANDS,
887 {
888 // This is the default we want: both tag and value 0.
889 .after_relative = simulation::Operand::from<FF>(0),
890 .resolved_operand = simulation::Operand::from<FF>(0),
891 });
892
893 std::array<bool, AVM_MAX_OPERANDS> should_apply_indirection{};
894 std::array<bool, AVM_MAX_OPERANDS> is_relative_effective{};
895 std::array<bool, AVM_MAX_OPERANDS> is_indirect_effective{};
897 std::array<FF, AVM_MAX_OPERANDS> after_relative{};
898 std::array<FF, AVM_MAX_OPERANDS> resolved_operand{};
899 std::array<uint8_t, AVM_MAX_OPERANDS> resolved_operand_tag{};
900 uint8_t num_relative_operands = 0;
901
902 bool base_address_invalid = false;
903 bool do_base_check = false;
904
905 // Gather operand information.
906 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
907 const auto& resolution_info = resolution_info_vec.at(i);
908 bool op_is_address = i < ex_spec.num_addresses;
909 relative_oob[i] = resolution_info.error.has_value() &&
910 *resolution_info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB;
911 base_address_invalid =
912 base_address_invalid ||
913 (resolution_info.error.has_value() && *resolution_info.error == AddressingEventError::BASE_ADDRESS_INVALID);
914 is_indirect_effective[i] = op_is_address && is_operand_indirect(instruction.indirect, i);
915 is_relative_effective[i] = op_is_address && is_operand_relative(instruction.indirect, i);
916 should_apply_indirection[i] = is_indirect_effective[i] && !relative_oob[i] && !base_address_invalid;
917 resolved_operand_tag[i] = static_cast<uint8_t>(resolution_info.resolved_operand.get_tag());
918 after_relative[i] = resolution_info.after_relative;
919 resolved_operand[i] = resolution_info.resolved_operand;
920 if (is_relative_effective[i]) {
921 do_base_check = true;
922 num_relative_operands++;
923 }
924 }
925
926 // Set the operand columns.
927 for (size_t i = 0; i < AVM_MAX_OPERANDS; i++) {
928 trace.set(row,
929 { {
930 { OPERAND_RELATIVE_OVERFLOW_COLUMNS[i], relative_oob[i] ? 1 : 0 },
931 { OPERAND_AFTER_RELATIVE_COLUMNS[i], after_relative[i] },
932 { OPERAND_SHOULD_APPLY_INDIRECTION_COLUMNS[i], should_apply_indirection[i] ? 1 : 0 },
933 { OPERAND_IS_RELATIVE_VALID_BASE_COLUMNS[i],
934 is_relative_effective[i] && !base_address_invalid ? 1 : 0 },
935 { RESOLVED_OPERAND_COLUMNS[i], resolved_operand[i] },
936 { RESOLVED_OPERAND_TAG_COLUMNS[i], resolved_operand_tag[i] },
937 } });
938 }
939
940 // We need to compute relative and indirect over the whole 16 bits of the indirect flag.
941 // See comment in PIL file about indirect upper bits.
942 for (size_t i = 0; i < TOTAL_INDIRECT_BITS / 2; i++) {
943 bool is_relative = is_operand_relative(instruction.indirect, i);
944 bool is_indirect = is_operand_indirect(instruction.indirect, i);
945 trace.set(row,
946 { {
947 { OPERAND_IS_RELATIVE_WIRE_COLUMNS[i], is_relative ? 1 : 0 },
948 { OPERAND_IS_INDIRECT_WIRE_COLUMNS[i], is_indirect ? 1 : 0 },
949 } });
950 }
951
952 // Inverse when base address is invalid.
953 FF base_address_tag_diff_inv = base_address_invalid ? FF(static_cast<uint8_t>(addr_event.base_address.get_tag())) -
954 FF(static_cast<uint8_t>(MemoryTag::U32))
955 : 0; // Will be inverted in batch later.
956
957 // Tag check after indirection.
958 bool some_final_check_failed =
959 std::any_of(addr_event.resolution_info.begin(), addr_event.resolution_info.end(), [](const auto& info) {
960 return info.error.has_value() && *info.error == AddressingEventError::INVALID_ADDRESS_AFTER_INDIRECTION;
961 });
962 FF batched_tags_diff_inv = 0;
963 if (some_final_check_failed) {
964 FF batched_tags_diff = 0;
965 FF power_of_2 = 1;
966 for (size_t i = 0; i < AVM_MAX_OPERANDS; ++i) {
967 batched_tags_diff +=
968 FF(is_indirect_effective[i]) * power_of_2 * (FF(resolved_operand_tag[i]) - FF(MEM_TAG_U32));
969 power_of_2 *= 8; // 2^3
970 }
971 batched_tags_diff_inv = batched_tags_diff; // Will be inverted in batch later.
972 }
973
974 // Collect addressing errors. See PIL file for reference.
975 bool addressing_failed = std::any_of(addr_event.resolution_info.begin(),
976 addr_event.resolution_info.end(),
977 [](const auto& info) { return info.error.has_value(); });
978 FF addressing_error_collection_inv =
979 addressing_failed
980 ? FF(
981 // Base address invalid.
982 (base_address_invalid ? 1 : 0) +
983 // Relative overflow.
984 std::accumulate(addr_event.resolution_info.begin(),
985 addr_event.resolution_info.end(),
986 static_cast<uint32_t>(0),
987 [](uint32_t acc, const auto& info) {
988 return acc +
989 (info.error.has_value() &&
990 *info.error == AddressingEventError::RELATIVE_COMPUTATION_OOB
991 ? 1
992 : 0);
993 }) +
994 // Some invalid address after indirection.
995 (some_final_check_failed ? 1 : 0))
996 : 0; // Will be inverted in batch later.
997
998 trace.set(row,
999 { {
1000 { C::execution_sel_addressing_error, addressing_failed ? 1 : 0 },
1001 { C::execution_addressing_error_collection_inv, addressing_error_collection_inv },
1002 { C::execution_base_address_val, addr_event.base_address.as_ff() },
1003 { C::execution_base_address_tag, static_cast<uint8_t>(addr_event.base_address.get_tag()) },
1004 { C::execution_base_address_tag_diff_inv, base_address_tag_diff_inv },
1005 { C::execution_sel_some_final_check_failed, some_final_check_failed ? 1 : 0 },
1006 { C::execution_sel_base_address_failure, base_address_invalid ? 1 : 0 },
1007 { C::execution_num_relative_operands_inv,
1008 do_base_check ? num_relative_operands : 0 }, // Will be inverted in batch later.
1009 { C::execution_sel_do_base_check, do_base_check ? 1 : 0 },
1010 { C::execution_highest_address, AVM_HIGHEST_MEM_ADDRESS },
1011 } });
1012}
1013
1015{
1016 trace.invert_columns({ {
1017 // Registers.
1018 C::execution_batched_tags_diff_inv_reg,
1019 // Context.
1020 C::execution_is_parent_id_inv,
1021 C::execution_internal_call_return_id_inv,
1022 // Trees.
1023 C::execution_remaining_data_writes_inv,
1024 C::execution_remaining_note_hashes_inv,
1025 C::execution_remaining_nullifiers_inv,
1026 // L1ToL2MsgExists.
1027 C::execution_remaining_l2_to_l1_msgs_inv,
1028 // Discard.
1029 C::execution_dying_context_id_inv,
1030 C::execution_dying_context_diff_inv,
1031 // Addressing.
1032 C::execution_addressing_error_collection_inv,
1033 C::execution_base_address_tag_diff_inv,
1034 C::execution_num_relative_operands_inv,
1035 } });
1036}
1037
1039 const std::vector<TaggedValue>& inputs,
1040 const TaggedValue& output,
1043 uint32_t row)
1044{
1045 assert(registers.size() == AVM_MAX_REGISTERS);
1046 // At this point we can assume instruction fetching succeeded, so this should never fail.
1047 const auto& register_info = EXEC_INSTRUCTION_SPEC.at(exec_opcode).register_info;
1048
1049 // Registers.
1050 size_t input_counter = 0;
1051 for (uint8_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1052 if (register_info.is_active(i)) {
1053 if (register_info.is_write(i)) {
1054 // If this is a write operation, we need to get the value from the output.
1055 registers[i] = output;
1056 } else {
1057 // If this is a read operation, we need to get the value from the input.
1058 auto input = inputs.size() > input_counter ? inputs.at(input_counter) : TaggedValue::from<FF>(0);
1059 registers[i] = input;
1060 input_counter++;
1061 }
1062 }
1063 }
1064
1065 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1066 trace.set(REGISTER_COLUMNS[i], row, registers[i]);
1067 trace.set(REGISTER_MEM_TAG_COLUMNS[i], row, static_cast<uint8_t>(registers[i].get_tag()));
1068 // This one is special because it sets the reads (but not the writes).
1069 // If we got here, sel_should_read_registers=1.
1070 if (register_info.is_active(i) && !register_info.is_write(i)) {
1071 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1072 }
1073 }
1074
1075 // Tag check.
1076 bool some_tag_check_failed = false;
1077 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1078 if (register_info.need_tag_check(i)) {
1079 if (registers[i].get_tag() != *register_info.expected_tag(i)) {
1080 some_tag_check_failed = true;
1081 break;
1082 }
1083 }
1084 }
1085
1086 FF batched_tags_diff_inv_reg = 0;
1087 if (some_tag_check_failed) {
1088 FF batched_tags_diff = 0;
1089 FF power_of_2 = 1;
1090 for (size_t i = 0; i < AVM_MAX_REGISTERS; ++i) {
1091 if (register_info.need_tag_check(i)) {
1092 batched_tags_diff += power_of_2 * (FF(static_cast<uint8_t>(registers[i].get_tag())) -
1093 FF(static_cast<uint8_t>(*register_info.expected_tag(i))));
1094 }
1095 power_of_2 *= 8; // 2^3
1096 }
1097 batched_tags_diff_inv_reg = batched_tags_diff; // Will be inverted in batch later.
1098 }
1099
1100 trace.set(row,
1101 { {
1102 { C::execution_sel_should_read_registers, 1 },
1103 { C::execution_batched_tags_diff_inv_reg, batched_tags_diff_inv_reg },
1104 { C::execution_sel_register_read_error, some_tag_check_failed ? 1 : 0 },
1105 } });
1106}
1107
1109{
1110 const auto& register_info = EXEC_INSTRUCTION_SPEC.at(exec_opcode).register_info;
1111 trace.set(C::execution_sel_should_write_registers, row, 1);
1112
1113 for (size_t i = 0; i < AVM_MAX_REGISTERS; i++) {
1114 // This one is special because it sets the writes.
1115 // If we got here, sel_should_write_registers=1.
1116 if (register_info.is_active(i) && register_info.is_write(i)) {
1117 trace.set(REGISTER_OP_REG_EFFECTIVE_COLUMNS[i], row, 1);
1118 }
1119 }
1120}
1121
1123 TaggedValue output,
1125 uint32_t row)
1126{
1127 assert(envvar_enum.get_tag() == ValueTag::U8);
1128 const auto& envvar_spec = GetEnvVarSpec::get_table(envvar_enum.as<uint8_t>());
1129
1130 trace.set(row,
1131 { {
1132 { C::execution_sel_execute_get_env_var, 1 },
1133 { C::execution_sel_envvar_pi_lookup_col0, envvar_spec.envvar_pi_lookup_col0 ? 1 : 0 },
1134 { C::execution_sel_envvar_pi_lookup_col1, envvar_spec.envvar_pi_lookup_col1 ? 1 : 0 },
1135 { C::execution_envvar_pi_row_idx, envvar_spec.envvar_pi_row_idx },
1136 { C::execution_is_address, envvar_spec.is_address ? 1 : 0 },
1137 { C::execution_is_sender, envvar_spec.is_sender ? 1 : 0 },
1138 { C::execution_is_transactionfee, envvar_spec.is_transactionfee ? 1 : 0 },
1139 { C::execution_is_isstaticcall, envvar_spec.is_isstaticcall ? 1 : 0 },
1140 { C::execution_is_l2gasleft, envvar_spec.is_l2gasleft ? 1 : 0 },
1141 { C::execution_is_dagasleft, envvar_spec.is_dagasleft ? 1 : 0 },
1142 { C::execution_value_from_pi,
1143 envvar_spec.envvar_pi_lookup_col0 || envvar_spec.envvar_pi_lookup_col1 ? output.as_ff() : 0 },
1144 { C::execution_mem_tag_reg_0_, envvar_spec.out_tag },
1145 } });
1146}
1147
1150 // Execution specification (precomputed)
1152 // Bytecode retrieval
1153 .add<lookup_execution_bytecode_retrieval_result_settings, InteractionType::LookupGeneric>()
1154 // Instruction fetching
1156 .add<lookup_execution_instruction_fetching_body_settings, InteractionType::LookupGeneric>()
1157 // Addressing
1159 .add<lookup_addressing_relative_overflow_result_1_settings, InteractionType::LookupGeneric>(Column::gt_sel)
1161 .add<lookup_addressing_relative_overflow_result_3_settings, InteractionType::LookupGeneric>(Column::gt_sel)
1163 .add<lookup_addressing_relative_overflow_result_5_settings, InteractionType::LookupGeneric>(Column::gt_sel)
1165 // Internal Call Stack
1166 .add<lookup_internal_call_push_call_stack_settings_, InteractionType::LookupSequential>()
1168 // Gas
1169 .add<lookup_gas_addressing_gas_read_settings, InteractionType::LookupIntoIndexedByClk>()
1171 .add<lookup_gas_is_out_of_gas_da_settings, InteractionType::LookupGeneric>(Column::gt_sel)
1173 // Gas - ToRadix BE
1174 .add<lookup_execution_check_radix_gt_256_settings, InteractionType::LookupGeneric>(Column::gt_sel)
1176 .add<lookup_execution_get_max_limbs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
1177 // Dynamic Gas - SStore
1179 // Context Stack
1180 .add<lookup_context_ctx_stack_call_settings, InteractionType::LookupGeneric>()
1182 .add<lookup_context_ctx_stack_return_settings, InteractionType::LookupGeneric>()
1183 // External Call
1185 Column::gt_sel)
1186 .add<lookup_external_call_call_is_da_gas_allocated_lt_left_settings, InteractionType::LookupGeneric>(
1187 Column::gt_sel)
1188 // GetEnvVar opcode
1190 .add<lookup_get_env_var_read_from_public_inputs_col0_settings, InteractionType::LookupIntoIndexedByClk>()
1192 // Sload opcode
1193 .add<lookup_sload_storage_read_settings, InteractionType::LookupGeneric>()
1194 // Sstore opcode
1196 // NoteHashExists
1197 .add<lookup_notehash_exists_note_hash_read_settings, InteractionType::LookupSequential>()
1199 Column::gt_sel)
1200 // NullifierExists opcode
1201 .add<lookup_nullifier_exists_nullifier_exists_check_settings, InteractionType::LookupSequential>()
1202 // EmitNullifier
1204 // EmitNoteHash
1205 .add<lookup_emit_notehash_notehash_tree_write_settings, InteractionType::LookupSequential>()
1206 // L1ToL2MsgExists
1208 Column::gt_sel)
1209 .add<lookup_l1_to_l2_message_exists_l1_to_l2_msg_read_settings, InteractionType::LookupSequential>()
1210 // Dispatching to other sub-traces
1212 .add<lookup_execution_dispatch_to_bitwise_settings, InteractionType::LookupGeneric>()
1214 .add<perm_execution_dispatch_to_rd_copy_settings, InteractionType::Permutation>()
1216 .add<lookup_execution_dispatch_to_set_settings, InteractionType::LookupGeneric>()
1218 .add<lookup_execution_dispatch_to_emit_unencrypted_log_settings, InteractionType::LookupGeneric>()
1220 .add<perm_execution_dispatch_to_sha256_compression_settings, InteractionType::Permutation>()
1222 .add<perm_execution_dispatch_to_ecc_add_settings, InteractionType::Permutation>()
1224 // SendL2ToL1Msg
1225 .add<lookup_send_l2_to_l1_msg_write_l2_to_l1_msg_settings, InteractionType::LookupIntoIndexedByClk>();
1226
1227} // namespace bb::avm2::tracegen
#define MEM_TAG_U32
#define AVM_PUBLIC_INPUTS_AVM_ACCUMULATED_DATA_L2_TO_L1_MSGS_ROW_IDX
#define AVM_MAX_OPERANDS
#define NOTE_HASH_TREE_LEAF_COUNT
#define L1_TO_L2_MSG_TREE_LEAF_COUNT
#define AVM_MAX_REGISTERS
#define MAX_L2_TO_L1_MSGS_PER_TX
#define MAX_NOTE_HASHES_PER_TX
#define MAX_NULLIFIERS_PER_TX
#define AVM_HIGHEST_MEM_ADDRESS
#define MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX
ValueTag get_tag() const
void process_get_env_var_opcode(TaggedValue envvar_enum, TaggedValue output, TraceContainer &trace, uint32_t row)
void process_execution_spec(const simulation::ExecutionEvent &ex_event, TraceContainer &trace, uint32_t row)
void process_instr_fetching(const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static const InteractionDefinition interactions
void process_registers_write(ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process_gas(const simulation::GasEvent &gas_event, ExecutionOpCode exec_opcode, TraceContainer &trace, uint32_t row)
void process(const simulation::EventEmitterInterface< simulation::ExecutionEvent >::Container &ex_events, TraceContainer &trace)
void process_registers(ExecutionOpCode exec_opcode, const std::vector< TaggedValue > &inputs, const TaggedValue &output, std::span< TaggedValue > registers, TraceContainer &trace, uint32_t row)
void process_addressing(const simulation::AddressingEvent &addr_event, const simulation::Instruction &instruction, TraceContainer &trace, uint32_t row)
static Table get_table(uint8_t envvar)
InteractionDefinition & add(auto &&... args)
void info(Args... args)
Definition log.hpp:74
TestTraceContainer trace
bool app_logic_failure
uint32_t app_logic_exit_context_id
bool teardown_failure
std::unordered_set< uint32_t > does_context_fail
uint32_t teardown_exit_context_id
GasEvent gas_event
Instruction instruction
const std::unordered_map< ExecutionOpCode, SubtraceInfo > SUBTRACE_INFO_MAP
Column get_dyn_gas_selector(uint32_t dyn_gas_id)
Get the column selector for a given dynamic gas ID.
Column get_subtrace_selector(SubtraceSel subtrace_sel)
Get the column selector for a given subtrace selector.
FF get_subtrace_id(SubtraceSel subtrace_sel)
Get the subtrace ID for a given subtrace enum.
lookup_settings< lookup_get_env_var_read_from_public_inputs_col1_settings_ > lookup_get_env_var_read_from_public_inputs_col1_settings
permutation_settings< perm_execution_dispatch_to_poseidon2_perm_settings_ > perm_execution_dispatch_to_poseidon2_perm_settings
permutation_settings< perm_execution_dispatch_to_get_contract_instance_settings_ > perm_execution_dispatch_to_get_contract_instance_settings
lookup_settings< lookup_external_call_call_is_l2_gas_allocated_lt_left_settings_ > lookup_external_call_call_is_l2_gas_allocated_lt_left_settings
lookup_settings< lookup_execution_check_written_storage_slot_settings_ > lookup_execution_check_written_storage_slot_settings
lookup_settings< lookup_addressing_relative_overflow_result_2_settings_ > lookup_addressing_relative_overflow_result_2_settings
lookup_settings< lookup_addressing_relative_overflow_result_4_settings_ > lookup_addressing_relative_overflow_result_4_settings
lookup_settings< lookup_execution_dyn_l2_factor_bitwise_settings_ > lookup_execution_dyn_l2_factor_bitwise_settings
lookup_settings< lookup_execution_dispatch_to_alu_settings_ > lookup_execution_dispatch_to_alu_settings
bool is_operand_relative(uint16_t indirect_flag, size_t operand_index)
Definition addressing.hpp:8
lookup_settings< lookup_emit_nullifier_write_nullifier_settings_ > lookup_emit_nullifier_write_nullifier_settings
size_t get_p_limbs_per_radix_size(size_t radix)
Definition to_radix.cpp:54
lookup_settings< lookup_gas_is_out_of_gas_l2_settings_ > lookup_gas_is_out_of_gas_l2_settings
permutation_settings< perm_execution_dispatch_to_cd_copy_settings_ > perm_execution_dispatch_to_cd_copy_settings
lookup_settings< lookup_execution_dispatch_to_cast_settings_ > lookup_execution_dispatch_to_cast_settings
lookup_settings< lookup_context_ctx_stack_rollback_settings_ > lookup_context_ctx_stack_rollback_settings
permutation_settings< perm_execution_dispatch_to_keccakf1600_settings_ > perm_execution_dispatch_to_keccakf1600_settings
bool is_operand_indirect(uint16_t indirect_flag, size_t operand_index)
lookup_settings< lookup_execution_get_p_limbs_settings_ > lookup_execution_get_p_limbs_settings
lookup_settings< lookup_execution_exec_spec_read_settings_ > lookup_execution_exec_spec_read_settings
lookup_settings< lookup_get_env_var_precomputed_info_settings_ > lookup_get_env_var_precomputed_info_settings
lookup_settings< lookup_addressing_relative_overflow_result_0_settings_ > lookup_addressing_relative_overflow_result_0_settings
lookup_settings< lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings_ > lookup_l1_to_l2_message_exists_l1_to_l2_msg_leaf_index_in_range_settings
lookup_settings< lookup_addressing_relative_overflow_result_6_settings_ > lookup_addressing_relative_overflow_result_6_settings
lookup_settings< lookup_execution_instruction_fetching_result_settings_ > lookup_execution_instruction_fetching_result_settings
lookup_settings< lookup_notehash_exists_note_hash_leaf_index_in_range_settings_ > lookup_notehash_exists_note_hash_leaf_index_in_range_settings
permutation_settings< perm_execution_dispatch_to_to_radix_settings_ > perm_execution_dispatch_to_to_radix_settings
lookup_settings< lookup_sstore_record_written_storage_slot_settings_ > lookup_sstore_record_written_storage_slot_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
const std::unordered_map< ExecutionOpCode, ExecInstructionSpec > EXEC_INSTRUCTION_SPEC
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
std::vector< OperandResolutionInfo > resolution_info
ExecutionOpCode get_exec_opcode() const