Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
data_copy_trace.cpp
Go to the documentation of this file.
2
3#include <cassert>
4#include <cstdint>
5#include <memory>
6
15
16namespace bb::avm2::tracegen {
17
19
22{
23 using C = Column;
24
25 uint32_t row = 1;
26 // When processing the events, we need to handle any potential errors and create the respective error columns
27 for (const auto& event : events) {
28 // We first set elements of the row that are unconditional, i.e. they are always set regardless of success/error
29 bool is_cd_copy = event.operation == simulation::DataCopyOperation::CD_COPY;
30 bool is_rd_copy = event.operation == simulation::DataCopyOperation::RD_COPY;
31
32 // todo(ilyas): Can optimize this as we only need the inverse if CD_COPY as well
33 bool is_top_level = event.read_context_id == 0;
34 FF parent_id_inv = is_top_level ? 0 : FF(event.read_context_id); // Will be inverted in batch later
35
36 // While we know at this point data copy size and data offset are guaranteed to be U32
37 // we cast to a wider integer type to detect overflows
38 uint64_t copy_size = static_cast<uint64_t>(event.data_copy_size);
39 uint64_t data_offset = static_cast<uint64_t>(event.data_offset);
40 uint64_t max_read_index = std::min(data_offset + copy_size, static_cast<uint64_t>(event.data_size));
41
42 uint64_t max_read_addr = static_cast<uint64_t>(event.data_addr) + max_read_index;
43 uint64_t max_write_addr = static_cast<uint64_t>(event.dst_addr) + copy_size;
44
45 trace.set(row,
46 { {
47 // Unconditional values
48 { C::data_copy_clk, event.execution_clk },
49 { C::data_copy_sel_start, 1 },
50 { C::data_copy_sel_cd_copy, is_cd_copy ? 1 : 0 },
51 { C::data_copy_sel_cd_copy_start, is_cd_copy ? 1 : 0 },
52 { C::data_copy_sel_rd_copy, is_rd_copy ? 1 : 0 },
53 { C::data_copy_sel_rd_copy_start, is_rd_copy ? 1 : 0 },
54 { C::data_copy_thirty_two, 32 }, // Need this for range checks
55
56 { C::data_copy_src_context_id, event.read_context_id },
57 { C::data_copy_dst_context_id, event.write_context_id },
58
59 { C::data_copy_copy_size, event.data_copy_size },
60 { C::data_copy_offset, event.data_offset },
61
62 { C::data_copy_src_addr, event.data_addr },
63 { C::data_copy_src_data_size, event.data_size },
64 { C::data_copy_dst_addr, event.dst_addr },
65
66 { C::data_copy_is_top_level, is_top_level ? 1 : 0 },
67 { C::data_copy_parent_id_inv, parent_id_inv },
68
69 // Compute Max Read Index
70 { C::data_copy_offset_plus_size, data_offset + copy_size },
71 { C::data_copy_offset_plus_size_is_gt, data_offset + copy_size > event.data_size ? 1 : 0 },
72 { C::data_copy_max_read_index, max_read_index },
73
74 // Max Addresses
75 { C::data_copy_max_mem_addr, MAX_MEM_ADDR },
76 { C::data_copy_max_read_addr, max_read_addr },
77 { C::data_copy_max_write_addr, max_write_addr },
78
79 } });
80
82 // Memory Address Range Check
84 // We need to check that the read and write addresses are within the valid memory range.
85 // Note: for enqueued calls, there is no out of bound read since we read from a column.
86
87 bool read_address_overflow = max_read_addr > MAX_MEM_ADDR;
88 bool write_address_overflow = max_write_addr > MAX_MEM_ADDR;
89 if (read_address_overflow || write_address_overflow) {
90 trace.set(row,
91 { {
92 { C::data_copy_sel_end, 1 },
93 // Add error flag - note we can be out of range for both reads and writes
94 { C::data_copy_src_out_of_range_err, read_address_overflow ? 1 : 0 },
95 { C::data_copy_dst_out_of_range_err, write_address_overflow ? 1 : 0 },
96 { C::data_copy_err, 1 },
97 } });
98 row++;
99 continue; // Go to the next event
100 }
101
102 auto reads_left = data_offset > max_read_index ? 0 : max_read_index - data_offset;
103
105 // Check for Zero Sized Copy
107 // This has to happen outside of the next loop since we will not enter it if the copy size is zero
108 if (copy_size == 0) {
109 trace.set(row,
110 { {
111 { C::data_copy_sel_start_no_err, 1 },
112 { C::data_copy_sel_end, 1 },
113 { C::data_copy_sel_write_count_is_zero, 1 },
114 } });
115 row++;
116 continue; // Go to the next event
117 }
118
120 // Process Data Copy Rows
122 for (uint32_t i = 0; i < event.calldata.size(); i++) {
123 bool start = i == 0;
124 auto current_copy_size = copy_size - i;
125 bool end = (current_copy_size - 1) == 0;
126
127 bool is_padding_row = reads_left == 0;
128
129 // These are guaranteed not to overflow since we checked the read/write addresses above
130 auto read_addr = event.data_addr + data_offset + i;
131 bool read_cd_col = is_cd_copy && is_top_level && !is_padding_row;
132
133 // Read from memory if this is not a padding row and we are either RD_COPY-ing or a nested CD_COPY
134 bool sel_mem_read = !is_padding_row && (is_rd_copy || event.read_context_id != 0);
135 FF value = is_padding_row ? 0 : event.calldata[i];
136
137 trace.set(
138 row,
139 { {
140 { C::data_copy_clk, event.execution_clk },
141 { C::data_copy_sel_cd_copy, is_cd_copy ? 1 : 0 },
142 { C::data_copy_sel_rd_copy, is_rd_copy ? 1 : 0 },
143 { C::data_copy_thirty_two, 32 }, // Need this for range checks
144
145 { C::data_copy_src_context_id, event.read_context_id },
146 { C::data_copy_dst_context_id, event.write_context_id },
147 { C::data_copy_dst_addr, event.dst_addr + i },
148
149 { C::data_copy_sel_start_no_err, start ? 1 : 0 },
150 { C::data_copy_sel_end, end ? 1 : 0 },
151 { C::data_copy_copy_size, current_copy_size },
152 { C::data_copy_write_count_minus_one_inv,
153 current_copy_size - 1 }, // Will be inverted in batch later
154
155 { C::data_copy_sel_mem_write, 1 },
156
157 { C::data_copy_is_top_level, is_top_level ? 1 : 0 },
158 { C::data_copy_parent_id_inv, parent_id_inv },
159
160 { C::data_copy_sel_mem_read, sel_mem_read ? 1 : 0 },
161 { C::data_copy_read_addr, read_addr },
162 { C::data_copy_read_addr_plus_one, read_cd_col ? read_addr + 1 : 0 },
163
164 { C::data_copy_reads_left_inv, reads_left }, // Will be inverted in batch later
165 { C::data_copy_padding, is_padding_row ? 1 : 0 },
166 { C::data_copy_value, value },
167
168 { C::data_copy_cd_copy_col_read, read_cd_col ? 1 : 0 },
169
170 // Reads Left
171 { C::data_copy_reads_left, reads_left },
172 { C::data_copy_offset_gt_max_read_index, (start && data_offset > max_read_index) ? 1 : 0 },
173
174 // Non-zero Copy Size
175 { C::data_copy_write_count_zero_inv, start ? FF(copy_size) : 0 }, // Will be inverted in batch later
176 } });
177
178 reads_left = reads_left == 0 ? 0 : reads_left - 1;
179 row++;
180 }
181 }
182
183 // Batch invert the columns.
184 trace.invert_columns({ { C::data_copy_parent_id_inv,
185 C::data_copy_write_count_zero_inv,
186 C::data_copy_reads_left_inv,
187 C::data_copy_write_count_minus_one_inv } });
188}
189
192 // Enqueued Call Col Read
194 // GT checks
195 .add<lookup_data_copy_max_read_index_gt_settings, InteractionType::LookupGeneric>(Column::gt_sel)
197 .add<lookup_data_copy_check_dst_addr_in_range_settings, InteractionType::LookupGeneric>(Column::gt_sel)
199} // namespace bb::avm2::tracegen
#define AVM_HIGHEST_MEM_ADDRESS
static const InteractionDefinition interactions
void process(const simulation::EventEmitterInterface< simulation::DataCopyEvent >::Container &events, TraceContainer &trace)
InteractionDefinition & add(auto &&... args)
TestTraceContainer trace
constexpr uint32_t MAX_MEM_ADDR
lookup_settings< lookup_data_copy_col_read_settings_ > lookup_data_copy_col_read_settings
lookup_settings< lookup_data_copy_check_src_addr_in_range_settings_ > lookup_data_copy_check_src_addr_in_range_settings
lookup_settings< lookup_data_copy_offset_gt_max_read_index_settings_ > lookup_data_copy_offset_gt_max_read_index_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
simulation::PublicDataTreeReadWriteEvent event