Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
bytecode_trace.cpp
Go to the documentation of this file.
2
3#include <cmath>
4#include <cstddef>
5#include <cstdint>
6#include <memory>
7#include <ranges>
8#include <stdexcept>
9#include <vector>
10
25
27
28namespace bb::avm2::tracegen {
29
33{
34 using C = Column;
35 // Since next_packed_pc - pc is always in the range [0, 31), we can precompute the inverses:
36 std::vector<FF> next_packed_pc_min_pc_inverses = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
37 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 };
38 FF::batch_invert(next_packed_pc_min_pc_inverses);
39
40 // We start from row 1 because we need a row of zeroes for the shifts.
41 uint32_t row = 1;
42
43 for (const auto& event : events) {
44 const auto& bytecode = *event.bytecode;
45 const auto id = event.bytecode_id;
46 auto bytecode_at = [&bytecode](size_t i) -> uint8_t { return i < bytecode.size() ? bytecode[i] : 0; };
47 const uint32_t bytecode_len = static_cast<uint32_t>(bytecode.size());
48
49 for (uint32_t i = 0; i < bytecode_len; i++) {
50 const uint32_t remaining = bytecode_len - i;
51 const uint32_t bytes_to_read = std::min(remaining, DECOMPOSE_WINDOW_SIZE);
52 const bool is_last = remaining == 1;
53 const bool is_windows_eq_remaining = remaining == DECOMPOSE_WINDOW_SIZE;
54
55 // Check that we still expect the max public bytecode in bytes to fit within 24 bits (i.e. <= 0xffffff).
56 static_assert(MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS * 32 <= 0xffffff);
57
58 // We set the decomposition in bytes, and other values.
59 trace.set(row + i,
60 { {
61 { C::bc_decomposition_sel, 1 },
62 { C::bc_decomposition_id, id },
63 { C::bc_decomposition_pc, i },
64 { C::bc_decomposition_last_of_contract, is_last ? 1 : 0 },
65 { C::bc_decomposition_bytes_remaining, remaining },
66 { C::bc_decomposition_bytes_to_read, bytes_to_read },
67 { C::bc_decomposition_sel_windows_gt_remaining, DECOMPOSE_WINDOW_SIZE > remaining ? 1 : 0 },
68 { C::bc_decomposition_is_windows_eq_remaining, is_windows_eq_remaining ? 1 : 0 },
69 // Inverses will be calculated in batch later.
70 { C::bc_decomposition_bytes_rem_inv, remaining },
71 { C::bc_decomposition_bytes_rem_min_one_inv, is_last ? 0 : FF(remaining - 1) },
72 { C::bc_decomposition_windows_min_remaining_inv,
73 is_windows_eq_remaining ? 0 : FF(DECOMPOSE_WINDOW_SIZE) - FF(remaining) },
74 // Sliding window.
75 { C::bc_decomposition_bytes, bytecode_at(i) },
76 { C::bc_decomposition_bytes_pc_plus_1, bytecode_at(i + 1) },
77 { C::bc_decomposition_bytes_pc_plus_2, bytecode_at(i + 2) },
78 { C::bc_decomposition_bytes_pc_plus_3, bytecode_at(i + 3) },
79 { C::bc_decomposition_bytes_pc_plus_4, bytecode_at(i + 4) },
80 { C::bc_decomposition_bytes_pc_plus_5, bytecode_at(i + 5) },
81 { C::bc_decomposition_bytes_pc_plus_6, bytecode_at(i + 6) },
82 { C::bc_decomposition_bytes_pc_plus_7, bytecode_at(i + 7) },
83 { C::bc_decomposition_bytes_pc_plus_8, bytecode_at(i + 8) },
84 { C::bc_decomposition_bytes_pc_plus_9, bytecode_at(i + 9) },
85 { C::bc_decomposition_bytes_pc_plus_10, bytecode_at(i + 10) },
86 { C::bc_decomposition_bytes_pc_plus_11, bytecode_at(i + 11) },
87 { C::bc_decomposition_bytes_pc_plus_12, bytecode_at(i + 12) },
88 { C::bc_decomposition_bytes_pc_plus_13, bytecode_at(i + 13) },
89 { C::bc_decomposition_bytes_pc_plus_14, bytecode_at(i + 14) },
90 { C::bc_decomposition_bytes_pc_plus_15, bytecode_at(i + 15) },
91 { C::bc_decomposition_bytes_pc_plus_16, bytecode_at(i + 16) },
92 { C::bc_decomposition_bytes_pc_plus_17, bytecode_at(i + 17) },
93 { C::bc_decomposition_bytes_pc_plus_18, bytecode_at(i + 18) },
94 { C::bc_decomposition_bytes_pc_plus_19, bytecode_at(i + 19) },
95 { C::bc_decomposition_bytes_pc_plus_20, bytecode_at(i + 20) },
96 { C::bc_decomposition_bytes_pc_plus_21, bytecode_at(i + 21) },
97 { C::bc_decomposition_bytes_pc_plus_22, bytecode_at(i + 22) },
98 { C::bc_decomposition_bytes_pc_plus_23, bytecode_at(i + 23) },
99 { C::bc_decomposition_bytes_pc_plus_24, bytecode_at(i + 24) },
100 { C::bc_decomposition_bytes_pc_plus_25, bytecode_at(i + 25) },
101 { C::bc_decomposition_bytes_pc_plus_26, bytecode_at(i + 26) },
102 { C::bc_decomposition_bytes_pc_plus_27, bytecode_at(i + 27) },
103 { C::bc_decomposition_bytes_pc_plus_28, bytecode_at(i + 28) },
104 { C::bc_decomposition_bytes_pc_plus_29, bytecode_at(i + 29) },
105 { C::bc_decomposition_bytes_pc_plus_30, bytecode_at(i + 30) },
106 { C::bc_decomposition_bytes_pc_plus_31, bytecode_at(i + 31) },
107 { C::bc_decomposition_bytes_pc_plus_32, bytecode_at(i + 32) },
108 { C::bc_decomposition_bytes_pc_plus_33, bytecode_at(i + 33) },
109 { C::bc_decomposition_bytes_pc_plus_34, bytecode_at(i + 34) },
110 { C::bc_decomposition_bytes_pc_plus_35, bytecode_at(i + 35) },
111 { C::bc_decomposition_bytes_pc_plus_36, bytecode_at(i + 36) },
112 } });
113 }
114
115 // We set the packed field every 31 bytes.
116 auto bytecode_field_at = [&](size_t i) -> FF {
117 // We need to read uint256_ts because reading FFs messes up the order of the bytes.
118 uint256_t as_int = 0;
119 if (bytecode_len - i >= 32) {
120 as_int = from_buffer<uint256_t>(bytecode, i);
121 } else {
122 std::vector<uint8_t> tail(bytecode.begin() + static_cast<ssize_t>(i), bytecode.end());
123 tail.resize(32, 0);
124 as_int = from_buffer<uint256_t>(tail, 0);
125 }
126 return as_int >> 8;
127 };
128 for (uint32_t i = 0; i < bytecode_len; i += 31) {
129 trace.set(row + i,
130 { {
131 { C::bc_decomposition_sel_packed, 1 },
132 { C::bc_decomposition_packed_field, bytecode_field_at(i) },
133 { C::bc_decomposition_next_packed_pc, i },
134 { C::bc_decomposition_next_packed_pc_min_pc_inv, 0 },
135 } });
136 for (uint32_t j = i + 1; j < std::min(bytecode_len, i + 31); j++) {
137 trace.set(
138 row + j,
139 { {
140 { C::bc_decomposition_next_packed_pc, i + 31 },
141 { C::bc_decomposition_next_packed_pc_min_pc_inv, next_packed_pc_min_pc_inverses[i + 31 - j] },
142 } });
143 }
144 }
145
146 // We advance to the next bytecode.
147 row += bytecode_len;
148 }
149
150 // Batch invert the columns.
151 trace.invert_columns({ { C::bc_decomposition_bytes_rem_inv,
152 C::bc_decomposition_bytes_rem_min_one_inv,
153 C::bc_decomposition_windows_min_remaining_inv } });
154}
155
158{
159 using C = Column;
160 uint32_t row = 1;
161
162 for (const auto& event : events) {
163 const auto id = event.bytecode_id;
164 // Note that bytecode fields from the BytecodeHashingEvent do not contain the prepended separator
165 std::vector<FF> fields = { GENERATOR_INDEX__PUBLIC_BYTECODE };
166 fields.insert(fields.end(), event.bytecode_fields.begin(), event.bytecode_fields.end());
167 auto bytecode_field_at = [&fields](size_t i) -> FF { return i < fields.size() ? fields[i] : 0; };
168 FF output_hash = Poseidon2::hash(fields);
169 auto padding_amount = (3 - (fields.size() % 3)) % 3;
170 auto num_rounds = (fields.size() + padding_amount) / 3;
171 uint32_t pc_index = 0;
172 for (uint32_t i = 0; i < fields.size(); i += 3) {
173 bool start_of_bytecode = i == 0;
174 bool end_of_bytecode = i + 3 >= fields.size();
175 // When we start the bytecode, we want to look up field 1 at pc = 0 in the decomposition trace, since we
176 // force field 0 to be the separator:
177 uint32_t pc_index_1 = start_of_bytecode ? 0 : pc_index + 31;
178 trace.set(row,
179 { { { C::bc_hashing_sel, 1 },
180 { C::bc_hashing_start, start_of_bytecode },
181 { C::bc_hashing_sel_not_start, !start_of_bytecode },
182 { C::bc_hashing_latch, end_of_bytecode },
183 { C::bc_hashing_bytecode_id, id },
184 { C::bc_hashing_input_len, fields.size() },
185 { C::bc_hashing_rounds_rem, num_rounds },
186 { C::bc_hashing_pc_index, pc_index },
187 { C::bc_hashing_pc_index_1, pc_index_1 },
188 { C::bc_hashing_pc_index_2, pc_index_1 + 31 },
189 { C::bc_hashing_packed_fields_0, bytecode_field_at(i) },
190 { C::bc_hashing_packed_fields_1, bytecode_field_at(i + 1) },
191 { C::bc_hashing_packed_fields_2, bytecode_field_at(i + 2) },
192 { C::bc_hashing_sel_not_padding_1, end_of_bytecode && padding_amount == 2 ? 0 : 1 },
193 { C::bc_hashing_sel_not_padding_2, end_of_bytecode && padding_amount > 0 ? 0 : 1 },
194 { C::bc_hashing_output_hash, output_hash } } });
195 if (end_of_bytecode) {
196 // TODO(MW): Cleanup: below sets the pc at which the final field starts.
197 // It can't just be pc_index + 31 * padding_amount because we 'skip' 31 bytes at start == 1 to force
198 // the first field to be the separator:
199 trace.set(row,
200 { {
201 { C::bc_hashing_pc_at_final_field,
202 padding_amount == 2 ? pc_index : pc_index_1 + (31 * (1 - padding_amount)) },
203 } });
204 }
205 pc_index = pc_index_1 + 62;
206 row++;
207 num_rounds--;
208 }
209 }
210}
211
215{
216 using C = Column;
217
218 uint32_t row = 1;
219 for (const auto& event : events) {
220 uint64_t remaining_bytecodes = MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS +
222 event.retrieved_bytecodes_snapshot_before.nextAvailableLeafIndex;
223 bool error = event.instance_not_found_error || event.limit_error;
224 trace.set(
225 row,
226 { {
227 { C::bc_retrieval_sel, 1 },
228 { C::bc_retrieval_bytecode_id, event.bytecode_id },
229 { C::bc_retrieval_address, event.address },
230 { C::bc_retrieval_error, error },
231
232 // Contract instance members (for lookup into contract_instance_retrieval)
233 { C::bc_retrieval_current_class_id, event.current_class_id },
234
235 // Tree context (for lookup into contract_instance_retrieval)
236 { C::bc_retrieval_public_data_tree_root, event.public_data_tree_root },
237 { C::bc_retrieval_nullifier_tree_root, event.nullifier_root },
238
239 // Retrieved bytecodes tree state
240 { C::bc_retrieval_prev_retrieved_bytecodes_tree_root, event.retrieved_bytecodes_snapshot_before.root },
241 { C::bc_retrieval_prev_retrieved_bytecodes_tree_size,
242 event.retrieved_bytecodes_snapshot_before.nextAvailableLeafIndex },
243 { C::bc_retrieval_next_retrieved_bytecodes_tree_root, event.retrieved_bytecodes_snapshot_after.root },
244 { C::bc_retrieval_next_retrieved_bytecodes_tree_size,
245 event.retrieved_bytecodes_snapshot_after.nextAvailableLeafIndex },
246
247 // Instance existence determined by shared contract instance retrieval
248 { C::bc_retrieval_instance_exists, !event.instance_not_found_error },
249
250 // Limit handling
251 { C::bc_retrieval_no_remaining_bytecodes, remaining_bytecodes == 0 },
252 { C::bc_retrieval_remaining_bytecodes_inv, remaining_bytecodes }, // Will be inverted in batch later.
253 { C::bc_retrieval_is_new_class, event.is_new_class },
254 { C::bc_retrieval_should_retrieve, !error },
255
256 // Contract class for bytecode operations
257 { C::bc_retrieval_artifact_hash, event.contract_class.artifact_hash },
258 { C::bc_retrieval_private_function_root, event.contract_class.private_function_root },
259
260 } });
261 row++;
262 }
263
264 // Batch invert the columns.
265 trace.invert_columns({ { C::bc_retrieval_remaining_bytecodes_inv } });
266}
267
271{
272 using C = Column;
278
279 // We start from row 1 because we need a row of zeroes for the shifts.
280 uint32_t row = 1;
281
282 for (const auto& event : events) {
283 const auto bytecode_id = event.bytecode_id;
284 const auto bytecode_size = event.bytecode->size();
285
286 auto get_operand = [&](size_t i) -> FF {
287 return i < event.instruction.operands.size() ? static_cast<FF>(event.instruction.operands[i]) : 0;
288 };
289 auto bytecode_at = [&](size_t i) -> uint8_t { return i < bytecode_size ? (*event.bytecode)[i] : 0; };
290
291 const uint8_t wire_opcode = bytecode_at(event.pc);
292 const bool wire_opcode_in_range =
293 event.error != PC_OUT_OF_RANGE && wire_opcode < static_cast<uint8_t>(WireOpCode::LAST_OPCODE_SENTINEL);
294
295 uint32_t size_in_bytes = 0;
296 ExecutionOpCode exec_opcode = static_cast<ExecutionOpCode>(0);
297 std::array<uint8_t, NUM_OP_DC_SELECTORS> op_dc_selectors{};
298 uint8_t has_tag = 0;
299 uint8_t tag_is_op2 = 0;
300 uint8_t tag_value = 0;
301
302 if (wire_opcode_in_range) {
303 const auto& wire_instr_spec = WIRE_INSTRUCTION_SPEC.at(static_cast<WireOpCode>(wire_opcode));
304 size_in_bytes = wire_instr_spec.size_in_bytes;
305 exec_opcode = wire_instr_spec.exec_opcode;
306 op_dc_selectors = wire_instr_spec.op_dc_selectors;
307
308 if (wire_instr_spec.tag_operand_idx.has_value()) {
309 const auto tag_value_idx = wire_instr_spec.tag_operand_idx.value();
310 assert((tag_value_idx == 2 || tag_value_idx == 3) &&
311 "Current constraints support only tag for operand index equal to 2 or 3");
312 has_tag = 1;
313
314 if (tag_value_idx == 2) {
315 tag_is_op2 = 1;
316 tag_value = static_cast<uint8_t>(get_operand(1)); // in instruction.operands, op2 has index 1
317 } else {
318 tag_value = static_cast<uint8_t>(get_operand(2));
319 }
320 }
321 }
322
323 const uint32_t bytes_remaining =
324 event.error == PC_OUT_OF_RANGE ? 0 : static_cast<uint32_t>(bytecode_size - event.pc);
325 const uint32_t bytes_to_read = std::min(bytes_remaining, DECOMPOSE_WINDOW_SIZE);
326
327 uint32_t instr_abs_diff = 0;
328 if (size_in_bytes <= bytes_to_read) {
329 instr_abs_diff = bytes_to_read - size_in_bytes;
330 } else {
331 instr_abs_diff = size_in_bytes - bytes_to_read - 1;
332 }
333
334 uint32_t bytecode_size_u32 = static_cast<uint32_t>(bytecode_size);
335 uint32_t pc_abs_diff =
336 bytecode_size_u32 > event.pc ? bytecode_size_u32 - event.pc - 1 : event.pc - bytecode_size_u32;
337
338 trace.set(row,
339 { {
340 { C::instr_fetching_sel, 1 },
341 { C::instr_fetching_bytecode_id, bytecode_id },
342 { C::instr_fetching_pc, event.pc },
343 // indirect + operands.
344 { C::instr_fetching_indirect, event.instruction.indirect },
345 { C::instr_fetching_op1, get_operand(0) },
346 { C::instr_fetching_op2, get_operand(1) },
347 { C::instr_fetching_op3, get_operand(2) },
348 { C::instr_fetching_op4, get_operand(3) },
349 { C::instr_fetching_op5, get_operand(4) },
350 { C::instr_fetching_op6, get_operand(5) },
351 { C::instr_fetching_op7, get_operand(6) },
352 // Single bytes.
353 { C::instr_fetching_bd0, wire_opcode },
354 { C::instr_fetching_bd1, bytecode_at(event.pc + 1) },
355 { C::instr_fetching_bd2, bytecode_at(event.pc + 2) },
356 { C::instr_fetching_bd3, bytecode_at(event.pc + 3) },
357 { C::instr_fetching_bd4, bytecode_at(event.pc + 4) },
358 { C::instr_fetching_bd5, bytecode_at(event.pc + 5) },
359 { C::instr_fetching_bd6, bytecode_at(event.pc + 6) },
360 { C::instr_fetching_bd7, bytecode_at(event.pc + 7) },
361 { C::instr_fetching_bd8, bytecode_at(event.pc + 8) },
362 { C::instr_fetching_bd9, bytecode_at(event.pc + 9) },
363 { C::instr_fetching_bd10, bytecode_at(event.pc + 10) },
364 { C::instr_fetching_bd11, bytecode_at(event.pc + 11) },
365 { C::instr_fetching_bd12, bytecode_at(event.pc + 12) },
366 { C::instr_fetching_bd13, bytecode_at(event.pc + 13) },
367 { C::instr_fetching_bd14, bytecode_at(event.pc + 14) },
368 { C::instr_fetching_bd15, bytecode_at(event.pc + 15) },
369 { C::instr_fetching_bd16, bytecode_at(event.pc + 16) },
370 { C::instr_fetching_bd17, bytecode_at(event.pc + 17) },
371 { C::instr_fetching_bd18, bytecode_at(event.pc + 18) },
372 { C::instr_fetching_bd19, bytecode_at(event.pc + 19) },
373 { C::instr_fetching_bd20, bytecode_at(event.pc + 20) },
374 { C::instr_fetching_bd21, bytecode_at(event.pc + 21) },
375 { C::instr_fetching_bd22, bytecode_at(event.pc + 22) },
376 { C::instr_fetching_bd23, bytecode_at(event.pc + 23) },
377 { C::instr_fetching_bd24, bytecode_at(event.pc + 24) },
378 { C::instr_fetching_bd25, bytecode_at(event.pc + 25) },
379 { C::instr_fetching_bd26, bytecode_at(event.pc + 26) },
380 { C::instr_fetching_bd27, bytecode_at(event.pc + 27) },
381 { C::instr_fetching_bd28, bytecode_at(event.pc + 28) },
382 { C::instr_fetching_bd29, bytecode_at(event.pc + 29) },
383 { C::instr_fetching_bd30, bytecode_at(event.pc + 30) },
384 { C::instr_fetching_bd31, bytecode_at(event.pc + 31) },
385 { C::instr_fetching_bd32, bytecode_at(event.pc + 32) },
386 { C::instr_fetching_bd33, bytecode_at(event.pc + 33) },
387 { C::instr_fetching_bd34, bytecode_at(event.pc + 34) },
388 { C::instr_fetching_bd35, bytecode_at(event.pc + 35) },
389 { C::instr_fetching_bd36, bytecode_at(event.pc + 36) },
390
391 // From instruction table.
392 { C::instr_fetching_exec_opcode, static_cast<uint32_t>(exec_opcode) },
393 { C::instr_fetching_instr_size, size_in_bytes },
394 { C::instr_fetching_sel_has_tag, has_tag },
395 { C::instr_fetching_sel_tag_is_op2, tag_is_op2 },
396
397 // Fill operand decomposition selectors
398 { C::instr_fetching_sel_op_dc_0, op_dc_selectors.at(0) },
399 { C::instr_fetching_sel_op_dc_1, op_dc_selectors.at(1) },
400 { C::instr_fetching_sel_op_dc_2, op_dc_selectors.at(2) },
401 { C::instr_fetching_sel_op_dc_3, op_dc_selectors.at(3) },
402 { C::instr_fetching_sel_op_dc_4, op_dc_selectors.at(4) },
403 { C::instr_fetching_sel_op_dc_5, op_dc_selectors.at(5) },
404 { C::instr_fetching_sel_op_dc_6, op_dc_selectors.at(6) },
405 { C::instr_fetching_sel_op_dc_7, op_dc_selectors.at(7) },
406 { C::instr_fetching_sel_op_dc_8, op_dc_selectors.at(8) },
407 { C::instr_fetching_sel_op_dc_9, op_dc_selectors.at(9) },
408 { C::instr_fetching_sel_op_dc_10, op_dc_selectors.at(10) },
409 { C::instr_fetching_sel_op_dc_11, op_dc_selectors.at(11) },
410 { C::instr_fetching_sel_op_dc_12, op_dc_selectors.at(12) },
411 { C::instr_fetching_sel_op_dc_13, op_dc_selectors.at(13) },
412 { C::instr_fetching_sel_op_dc_14, op_dc_selectors.at(14) },
413 { C::instr_fetching_sel_op_dc_15, op_dc_selectors.at(15) },
414 { C::instr_fetching_sel_op_dc_16, op_dc_selectors.at(16) },
415
416 // Parsing errors
417 { C::instr_fetching_pc_out_of_range, event.error == PC_OUT_OF_RANGE ? 1 : 0 },
418 { C::instr_fetching_opcode_out_of_range, event.error == OPCODE_OUT_OF_RANGE ? 1 : 0 },
419 { C::instr_fetching_instr_out_of_range, event.error == INSTRUCTION_OUT_OF_RANGE ? 1 : 0 },
420 { C::instr_fetching_tag_out_of_range, event.error == TAG_OUT_OF_RANGE ? 1 : 0 },
421 { C::instr_fetching_sel_parsing_err, event.error.has_value() ? 1 : 0 },
422
423 // selector for lookups
424 { C::instr_fetching_sel_pc_in_range, event.error != PC_OUT_OF_RANGE ? 1 : 0 },
425
426 { C::instr_fetching_bytecode_size, bytecode_size },
427 { C::instr_fetching_bytes_to_read, bytes_to_read },
428 { C::instr_fetching_instr_abs_diff, instr_abs_diff },
429 { C::instr_fetching_pc_abs_diff, pc_abs_diff },
430 { C::instr_fetching_pc_size_in_bits,
431 AVM_PC_SIZE_IN_BITS }, // Remove when we support constants in lookups
432 { C::instr_fetching_tag_value, tag_value },
433 } });
434 row++;
435 }
436}
437
440 // Bytecode Hashing
442 .add<lookup_bc_hashing_poseidon2_hash_settings, InteractionType::LookupSequential>()
443 // Bytecode Retrieval
445 .add<lookup_bc_retrieval_contract_instance_retrieval_settings, InteractionType::LookupSequential>()
447 .add<lookup_bc_retrieval_retrieved_bytecodes_insertion_settings, InteractionType::LookupSequential>()
448 // Bytecode Decomposition
453 perm_bc_hashing_get_packed_field_2_settings>(Column::bc_decomposition_sel_packed)
454 // Instruction Fetching
456 .add<lookup_instr_fetching_bytecode_size_from_bc_dec_settings, InteractionType::LookupGeneric>()
458 .add<lookup_instr_fetching_instr_abs_diff_positive_settings, InteractionType::LookupIntoIndexedByClk>()
460 .add<lookup_instr_fetching_pc_abs_diff_positive_settings, InteractionType::LookupGeneric>();
461
462} // namespace bb::avm2::tracegen
#define AVM_RETRIEVED_BYTECODES_TREE_INITIAL_SIZE
#define MAX_PUBLIC_CALLS_TO_UNIQUE_CONTRACT_CLASS_IDS
#define AVM_PC_SIZE_IN_BITS
#define GENERATOR_INDEX__PUBLIC_BYTECODE
#define MAX_PACKED_PUBLIC_BYTECODE_SIZE_IN_FIELDS
void process_retrieval(const simulation::EventEmitterInterface< simulation::BytecodeRetrievalEvent >::Container &events, TraceContainer &trace)
static const InteractionDefinition interactions
void process_decomposition(const simulation::EventEmitterInterface< simulation::BytecodeDecompositionEvent >::Container &events, TraceContainer &trace)
void process_hashing(const simulation::EventEmitterInterface< simulation::BytecodeHashingEvent >::Container &events, TraceContainer &trace)
void process_instruction_fetching(const simulation::EventEmitterInterface< simulation::InstructionFetchingEvent >::Container &events, TraceContainer &trace)
InteractionDefinition & add(auto &&... args)
static FF hash(const std::vector< FF > &input)
Hashes a vector of field elements.
Implements a parallelized batch insertion indexed tree Accepts template argument of the type of store...
TestTraceContainer trace
lookup_settings< lookup_instr_fetching_wire_instruction_info_settings_ > lookup_instr_fetching_wire_instruction_info_settings
lookup_settings< lookup_instr_fetching_bytes_from_bc_dec_settings_ > lookup_instr_fetching_bytes_from_bc_dec_settings
constexpr uint32_t DECOMPOSE_WINDOW_SIZE
lookup_settings< lookup_instr_fetching_tag_value_validation_settings_ > lookup_instr_fetching_tag_value_validation_settings
lookup_settings< lookup_bc_retrieval_class_id_derivation_settings_ > lookup_bc_retrieval_class_id_derivation_settings
lookup_settings< lookup_bc_retrieval_is_new_class_check_settings_ > lookup_bc_retrieval_is_new_class_check_settings
lookup_settings< lookup_bc_hashing_check_final_bytes_remaining_settings_ > lookup_bc_hashing_check_final_bytes_remaining_settings
const std::unordered_map< WireOpCode, WireInstructionSpec > WIRE_INSTRUCTION_SPEC
lookup_settings< lookup_bc_decomposition_bytes_are_bytes_settings_ > lookup_bc_decomposition_bytes_are_bytes_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
simulation::PublicDataTreeReadWriteEvent event