Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
sha256_trace.cpp
Go to the documentation of this file.
2
3#include <algorithm>
4#include <any>
5#include <concepts>
6#include <cstddef>
7#include <cstdint>
8#include <memory>
9#include <ranges>
10#include <stdexcept>
11
17
18namespace bb::avm2::tracegen {
19
20namespace {
21
22// These are some useful groupings of columns for the SHA256 trace that we will iterate over.
23constexpr std::array<Column, 8> state_cols = {
24 Column::sha256_a, Column::sha256_b, Column::sha256_c, Column::sha256_d,
25 Column::sha256_e, Column::sha256_f, Column::sha256_g, Column::sha256_h,
26};
27
28constexpr std::array<Column, 8> init_state_cols = {
29 Column::sha256_init_a, Column::sha256_init_b, Column::sha256_init_c, Column::sha256_init_d,
30 Column::sha256_init_e, Column::sha256_init_f, Column::sha256_init_g, Column::sha256_init_h,
31};
32
33constexpr std::array<Column, 16> w_cols = {
34 Column::sha256_helper_w0, Column::sha256_helper_w1, Column::sha256_helper_w2, Column::sha256_helper_w3,
35 Column::sha256_helper_w4, Column::sha256_helper_w5, Column::sha256_helper_w6, Column::sha256_helper_w7,
36 Column::sha256_helper_w8, Column::sha256_helper_w9, Column::sha256_helper_w10, Column::sha256_helper_w11,
37 Column::sha256_helper_w12, Column::sha256_helper_w13, Column::sha256_helper_w14, Column::sha256_helper_w15,
38};
39
40constexpr std::array<Column, 16> output_cols = {
41 Column::sha256_output_a_lhs, Column::sha256_output_a_rhs, Column::sha256_output_b_lhs, Column::sha256_output_b_rhs,
42 Column::sha256_output_c_lhs, Column::sha256_output_c_rhs, Column::sha256_output_d_lhs, Column::sha256_output_d_rhs,
43 Column::sha256_output_e_lhs, Column::sha256_output_e_rhs, Column::sha256_output_f_lhs, Column::sha256_output_f_rhs,
44 Column::sha256_output_g_lhs, Column::sha256_output_g_rhs, Column::sha256_output_h_lhs, Column::sha256_output_h_rhs,
45};
46
47constexpr std::array<uint32_t, 64> round_constants = {
48 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
49 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
50 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
51 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
52 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
53 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
54 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
55 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
56};
57
58}; // namespace
59
60// These are helper functions to iterate and set repetitive columns in the trace.
62{
63 for (size_t i = 0; i < 16; i++) {
64 trace.set(row, { { { w_cols[i], prev_w_helpers[i] } } });
65 }
66}
67
68void Sha256TraceBuilder::set_state_cols(const std::array<uint32_t, 8>& state, TraceContainer& trace)
69{
70 for (size_t i = 0; i < 8; i++) {
71 trace.set(row, { { { state_cols[i], state[i] } } });
72 }
73}
74
75void Sha256TraceBuilder::set_init_state_cols(const std::array<uint32_t, 8>& init_state, TraceContainer& trace)
76{
77 for (size_t i = 0; i < 8; i++) {
78 trace.set(row, { { { init_state_cols[i], init_state[i] } } });
79 }
80}
81
82// Decomposes a into two 32-bit values at the bit position b and inserts witness data into the trace.
84 uint64_t a, const uint8_t b, Column c_lhs, Column c_rhs, TraceContainer& trace)
85{
86 uint32_t a_lhs = static_cast<uint32_t>(a >> b);
87 uint32_t a_rhs = static_cast<uint32_t>(a) & static_cast<uint32_t>((static_cast<uint64_t>(1) << b) - 1);
88 trace.set(row, { { { c_lhs, a_lhs }, { c_rhs, a_rhs } } });
89}
90
91// Performs 32-bit rotation with witness data inserted into the trace.
93 const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer& trace)
94{
95 auto result = (val >> (shift & 31U)) | (val << (32U - (shift & 31U)));
96 into_limbs_with_witness(val, shift, c_lhs, c_rhs, trace);
97 trace.set(c_result, row, result);
98 return result;
99}
100
101// Performs 32-bit shift right with witness data inserted into the trace.
103 const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer& trace)
104{
105 auto result = val >> shift;
106 into_limbs_with_witness(val, shift, c_lhs, c_rhs, trace);
107 trace.set(c_result, row, result);
108 return result;
109}
110
111// Computes and returns the message schedule (w) value for that round, and inserts witness data into the trace.
114{
115 using C = Column;
116
117 // Computing w[j] := w[j-16] + s0 + w[j-7] + s1
118
119 // Step (1) s0 := ror(w[i - 15], 7) ^ ror(w[i - 15], 18) ^ (w[i - 15] >> 3);
120 // Compute ror(w[i - 15], 7)
121 uint32_t rot_7 =
122 ror_with_witness(prev_w_helpers[1], 7, C::sha256_w_15_rotr_7, C::sha256_lhs_w_7, C::sha256_rhs_w_7, trace);
123 trace.set(C::sha256_two_pow_7, row, 128); // Store 2^7 for reference
124 // Compute ror(w[i - 15], 18)
125 uint32_t rot_18 =
126 ror_with_witness(prev_w_helpers[1], 18, C::sha256_w_15_rotr_18, C::sha256_lhs_w_18, C::sha256_rhs_w_18, trace);
127 trace.set(C::sha256_two_pow_18, row, 262144); // Store 2^18 for reference
128 // Compute (w[i - 15] >> 3)
129 uint32_t shift_3 =
130 shr_with_witness(prev_w_helpers[1], 3, C::sha256_w_15_rshift_3, C::sha256_lhs_w_3, C::sha256_rhs_w_3, trace);
131 trace.set(C::sha256_two_pow_3, row, 8); // Store 2^3 for reference
132
133 // Compute ror(w[i - 15], 7) ^ ror(w[i - 15], 18)
134 trace.set(C::sha256_w_15_rotr_7_xor_w_15_rotr_18, row, rot_7 ^ rot_18);
135 // Compute s0;
136 uint32_t w_s_0 = rot_7 ^ rot_18 ^ shift_3;
137 trace.set(C::sha256_w_s_0, row, w_s_0);
138
139 // Step (2) s1 := ror(w[i - 2], 17) ^ ror(w[i - 2], 19) ^ (w[i - 2] >> 10);
140 // Compute ror(w[i - 2], 17)
141 uint32_t rot_17 =
142 ror_with_witness(prev_w_helpers[14], 17, C::sha256_w_2_rotr_17, C::sha256_lhs_w_17, C::sha256_rhs_w_17, trace);
143 trace.set(C::sha256_two_pow_17, row, 131072); // Store 2^17 for reference
144 // Compute ror(wi - 2, 19)
145 uint32_t rot_19 =
146 ror_with_witness(prev_w_helpers[14], 19, C::sha256_w_2_rotr_19, C::sha256_lhs_w_19, C::sha256_rhs_w_19, trace);
147 trace.set(C::sha256_two_pow_19, row, 524288); // Store 2^19 for reference
148 // Compute (w[i - 2] >> 10)
149 uint32_t shift_10 = shr_with_witness(
150 prev_w_helpers[14], 10, C::sha256_w_2_rshift_10, C::sha256_lhs_w_10, C::sha256_rhs_w_10, trace);
151 trace.set(C::sha256_two_pow_10, row, 1024); // Store 2^10 for reference
152
153 // Compute ror(w[i - 2], 17) ^ ror(w[i - 2], 19)
154 trace.set(C::sha256_w_2_rotr_17_xor_w_2_rotr_19, row, rot_17 ^ rot_19);
155 // Compute s1;
156 uint32_t w_s_1 = rot_17 ^ rot_19 ^ shift_10;
157 trace.set(C::sha256_w_s_1, row, w_s_1);
158
159 // Compute w:= w[0] + s0 + w[9] + s1
160 // The computation of w can overflow 32 bits so we need to use a 64-bit integer and perform modulo reduction
161 uint64_t computed_w =
162 prev_w_helpers[0] + static_cast<uint64_t>(w_s_0) + prev_w_helpers[9] + static_cast<uint64_t>(w_s_1);
163
164 into_limbs_with_witness(computed_w, 32, C::sha256_computed_w_lhs, C::sha256_computed_w_rhs, trace);
165 return static_cast<uint32_t>(computed_w);
166}
167
168// Perform the SHA-256 compression function for a single round and insert witness data into the trace.
169std::array<uint32_t, 8> Sha256TraceBuilder::compute_compression_with_witness(const std::array<uint32_t, 8>& state,
170 uint32_t round_w,
171 uint32_t round_constant,
172 uint32_t row,
174{
175 using C = Column;
176
177 // Apply SHA-256 compression function to the message schedule
178 // Compute S1 := ror(e, 6U) ^ ror(e, 11U) ^ ror(e, 25U);
179 // Compute ror(e, 6)
180 uint32_t rot_6 = ror_with_witness(state[4], 6, C::sha256_e_rotr_6, C::sha256_lhs_e_6, C::sha256_rhs_e_6, trace);
181 trace.set(C::sha256_two_pow_6, row, 64); // Store 2^6 for reference
182 // Compute ror(e, 11)
183 uint32_t rot_11 =
184 ror_with_witness(state[4], 11, C::sha256_e_rotr_11, C::sha256_lhs_e_11, C::sha256_rhs_e_11, trace);
185 trace.set(C::sha256_two_pow_11, row, 2048); // Store 2^11 for reference
186 // Compute ror(e, 25)
187 uint32_t rot_25 =
188 ror_with_witness(state[4], 25, C::sha256_e_rotr_25, C::sha256_lhs_e_25, C::sha256_rhs_e_25, trace);
189 trace.set(C::sha256_two_pow_25, row, 33554432); // Store 2^25 for reference
190
191 // Compute ror(e, 6) ^ ror(e, 11)
192 trace.set(C::sha256_e_rotr_6_xor_e_rotr_11, row, rot_6 ^ rot_11);
193 // Compute S1, this can't overflow but we expand to uint64_t for later use
194 uint64_t S1 = rot_6 ^ rot_11 ^ rot_25;
195 trace.set(C::sha256_s_1, row, S1);
196
197 // Compute ch := (e & f) ^ (~e & g);
198 // Compute ~e
199 uint32_t not_e = ~state[4];
200 trace.set(C::sha256_not_e, row, not_e);
201 // Compute e & f
202 uint32_t e_and_f = state[4] & state[5];
203 trace.set(C::sha256_e_and_f, row, e_and_f);
204 // Compute ~e & g
205 uint32_t not_e_and_g = not_e & state[6];
206 trace.set(C::sha256_not_e_and_g, row, not_e_and_g);
207 // Compute (e & f) ^ (~e & g)
208 uint64_t ch = e_and_f ^ not_e_and_g;
209 trace.set(C::sha256_ch, row, ch);
210
211 // Compute S0 := ror(a, 2U) ^ ror(a, 13U) ^ ror(a, 22U);
212 // Compute ror(a, 2)
213 uint32_t rot_2 = ror_with_witness(state[0], 2, C::sha256_a_rotr_2, C::sha256_lhs_a_2, C::sha256_rhs_a_2, trace);
214 trace.set(C::sha256_two_pow_2, row, 4); // Store 2^2 for reference
215 // Compute ror(a, 13)
216 uint32_t rot_13 =
217 ror_with_witness(state[0], 13, C::sha256_a_rotr_13, C::sha256_lhs_a_13, C::sha256_rhs_a_13, trace);
218 trace.set(C::sha256_two_pow_13, row, 8192); // Store 2^13 for reference
219 // Compute ror(a, 22)
220 uint32_t rot_22 =
221 ror_with_witness(state[0], 22, C::sha256_a_rotr_22, C::sha256_lhs_a_22, C::sha256_rhs_a_22, trace);
222 trace.set(C::sha256_two_pow_22, row, 4194304); // Store 2^22 for reference
223
224 // Compute ror(a, 2) ^ ror(a, 13)
225 trace.set(C::sha256_a_rotr_2_xor_a_rotr_13, row, rot_2 ^ rot_13);
226 // Compute S0, this can't overflow but we expand to uint64_t for later use
227 uint64_t S0 = rot_2 ^ rot_13 ^ rot_22;
228 trace.set(C::sha256_s_0, row, S0);
229
230 // Compute Maj := (a & b) ^ (a & c) ^ (b & c);
231 // Compute a & b
232 uint32_t a_and_b = state[0] & state[1];
233 trace.set(C::sha256_a_and_b, row, a_and_b);
234 // Compute a & c
235 uint32_t a_and_c = state[0] & state[2];
236 trace.set(C::sha256_a_and_c, row, a_and_c);
237 // Compute b & c
238 uint32_t b_and_c = state[1] & state[2];
239 trace.set(C::sha256_b_and_c, row, b_and_c);
240 // Compute (a & b) ^ (a & c)
241 trace.set(C::sha256_a_and_b_xor_a_and_c, row, a_and_b ^ a_and_c);
242 // Compute Maj, this is expanded to uint64_t to detect later overflows
243 uint64_t maj = a_and_b ^ a_and_c ^ b_and_c;
244 trace.set(C::sha256_maj, row, maj);
245
246 // Compute temp values, these need be 64-bit integers and performed modulo 2^32
247 uint64_t temp1 = static_cast<uint64_t>(state[7]) + S1 + ch + round_constant + round_w;
248 uint64_t temp2 = S0 + maj;
249 uint64_t next_a = temp1 + temp2;
250 into_limbs_with_witness(next_a, 32, C::sha256_next_a_lhs, C::sha256_next_a_rhs, trace);
251 trace.set(C::sha256_round_constant, row, round_constant);
252 uint32_t a = static_cast<uint32_t>(next_a);
253
254 // Additions can overflow 32 bits so we perform modulo reduction
255 uint64_t next_e = state[3] + temp1;
256 into_limbs_with_witness(next_e, 32, C::sha256_next_e_lhs, C::sha256_next_e_rhs, trace);
257 uint32_t e = static_cast<uint32_t>(next_e);
258
259 return {
260 a, /*a = temp1 + temp2*/
261 state[0], /*b = a*/
262 state[1], /*c = b*/
263 state[2], /*d = c*/
264 e, /*e = d + temp1*/
265 state[4], /*f = e*/
266 state[5], /*g = f*/
267 state[6], /*h = g*/
268 };
269}
270
271// Computes the final output from the final round state and inserts witness data into the trace.
272void Sha256TraceBuilder::compute_sha256_output(const std::array<uint32_t, 8>& out_state,
273 const std::array<uint32_t, 8>& init_state,
275{
276 uint32_t counter = 0;
277 for (const auto& [init, state] : zip_view(init_state, out_state)) {
278 uint64_t output = static_cast<uint64_t>(init) + static_cast<uint64_t>(state);
279 into_limbs_with_witness(output, 32, output_cols[counter], output_cols[counter + 1], trace);
280 counter += 2;
281 }
282}
283
287{
288 using C = Column;
289
290 for (const auto& event : events) {
291
293 // Memory Components of SHA-256 Compression Function
295 // Upcast addresses to uint64_t to avoid overflow issues
296 uint64_t state_addr = static_cast<uint64_t>(event.state_addr);
297 uint64_t input_addr = static_cast<uint64_t>(event.input_addr);
298 uint64_t output_addr = static_cast<uint64_t>(event.output_addr);
299
300 uint64_t max_state_addr = state_addr + 7; // State is 8 elements
301 uint64_t max_input_addr = input_addr + 15; // Input is 16 elements
302 uint64_t max_output_addr = output_addr + 7; // Output is 8 elements
303
304 // These are unconditional values that must always be set at the start
305 trace.set(row,
306 { {
307 { C::sha256_sel, 1 },
308 { C::sha256_start, 1 },
309 { C::sha256_execution_clk, event.execution_clk },
310 { C::sha256_space_id, event.space_id },
311 { C::sha256_u32_tag, static_cast<uint8_t>(MemoryTag::U32) },
312 // Operand Addresses
313 { C::sha256_state_addr, state_addr },
314 { C::sha256_input_addr, input_addr },
315 { C::sha256_output_addr, output_addr },
316 // Helpers
317 { C::sha256_max_mem_addr, AVM_HIGHEST_MEM_ADDRESS },
318 { C::sha256_max_state_addr, max_state_addr },
319 { C::sha256_max_input_addr, max_input_addr },
320 { C::sha256_max_output_addr, max_output_addr },
321 { C::sha256_input_rounds_rem, 16 }, // Number of inputs
322 { C::sha256_sel_is_input_round, 1 },
323 { C::sha256_rounds_remaining, 64 }, // Number of Sha256 Rounds
324 } });
325
327 // Error Handling - Memory Out of Range
329 bool state_out_of_range = max_state_addr > AVM_HIGHEST_MEM_ADDRESS;
330 bool input_out_of_range = max_input_addr > AVM_HIGHEST_MEM_ADDRESS;
331 bool output_out_of_range = max_output_addr > AVM_HIGHEST_MEM_ADDRESS;
332
333 bool out_of_range_err = output_out_of_range || input_out_of_range || state_out_of_range;
334 if (out_of_range_err) {
335 trace.set(row,
336 { {
337 // Error flags
338 { C::sha256_sel_state_out_of_range_err, state_out_of_range ? 1 : 0 },
339 { C::sha256_sel_input_out_of_range_err, input_out_of_range ? 1 : 0 },
340 { C::sha256_sel_output_out_of_range_err, output_out_of_range ? 1 : 0 },
341 { C::sha256_mem_out_of_range_err, 1 },
342 { C::sha256_err, 1 }, // Set the error flag
343 { C::sha256_latch, 1 }, // Latch is set on error
344 } });
345 row++;
346 continue; // Skip to the next event if we have an out of range error
347 }
348
350 // Load Initial State from Memory
352 // If we get here we are safe to load the memory, we need to split this up between the parallel and sequential
353 // loading. State is loaded in parallel, whilst inputs are loaded sequential.
354
355 // Since we treat them as separate temporality groups, if there is an error in the state loading, we will not
356 // load the input
357 trace.set(row,
358 { {
359 // State Loading Selectors
360 { C::sha256_sel_mem_state_or_output, 1 },
361 // State Addresses
362 { C::sha256_memory_address_0_, state_addr },
363 { C::sha256_memory_address_1_, state_addr + 1 },
364 { C::sha256_memory_address_2_, state_addr + 2 },
365 { C::sha256_memory_address_3_, state_addr + 3 },
366 { C::sha256_memory_address_4_, state_addr + 4 },
367 { C::sha256_memory_address_5_, state_addr + 5 },
368 { C::sha256_memory_address_6_, state_addr + 6 },
369 { C::sha256_memory_address_7_, state_addr + 7 },
370 // State Values
371 { C::sha256_memory_register_0_, event.state[0].as_ff() },
372 { C::sha256_memory_register_1_, event.state[1].as_ff() },
373 { C::sha256_memory_register_2_, event.state[2].as_ff() },
374 { C::sha256_memory_register_3_, event.state[3].as_ff() },
375 { C::sha256_memory_register_4_, event.state[4].as_ff() },
376 { C::sha256_memory_register_5_, event.state[5].as_ff() },
377 { C::sha256_memory_register_6_, event.state[6].as_ff() },
378 { C::sha256_memory_register_7_, event.state[7].as_ff() },
379 // Values need to match initial state of sha256 compression
380 { C::sha256_init_a, event.state[0].as_ff() },
381 { C::sha256_init_b, event.state[1].as_ff() },
382 { C::sha256_init_c, event.state[2].as_ff() },
383 { C::sha256_init_d, event.state[3].as_ff() },
384 { C::sha256_init_e, event.state[4].as_ff() },
385 { C::sha256_init_f, event.state[5].as_ff() },
386 { C::sha256_init_g, event.state[6].as_ff() },
387 { C::sha256_init_h, event.state[7].as_ff() },
388 // State Memory Tags
389 { C::sha256_memory_tag_0_, static_cast<uint8_t>(event.state[0].get_tag()) },
390 { C::sha256_memory_tag_1_, static_cast<uint8_t>(event.state[1].get_tag()) },
391 { C::sha256_memory_tag_2_, static_cast<uint8_t>(event.state[2].get_tag()) },
392 { C::sha256_memory_tag_3_, static_cast<uint8_t>(event.state[3].get_tag()) },
393 { C::sha256_memory_tag_4_, static_cast<uint8_t>(event.state[4].get_tag()) },
394 { C::sha256_memory_tag_5_, static_cast<uint8_t>(event.state[5].get_tag()) },
395 { C::sha256_memory_tag_6_, static_cast<uint8_t>(event.state[6].get_tag()) },
396 { C::sha256_memory_tag_7_, static_cast<uint8_t>(event.state[7].get_tag()) },
397 } });
398
400 // Check for Tag Errors in State
402 bool invalid_state_tag_err = std::ranges::any_of(
403 event.state, [](const MemoryValue& state) { return state.get_tag() != MemoryTag::U32; });
404
405 if (invalid_state_tag_err) {
406 // This is the more efficient batched tag check we perform in the circuit
407 uint64_t batched_check = 0;
408 // Batch the state tag checks
409 for (uint32_t i = 0; i < event.state.size(); i++) {
410 batched_check |=
411 (static_cast<uint64_t>(event.state[i].get_tag()) - static_cast<uint64_t>(MemoryTag::U32))
412 << (i * 3);
413 }
414 trace.set(row,
415 { {
416 { C::sha256_sel_invalid_state_tag_err, 1 },
417 { C::sha256_batch_tag_inv, FF(batched_check).invert() },
418 { C::sha256_latch, 1 },
419 { C::sha256_err, 1 }, // Set the error flag
420 } });
421
422 row++;
423 continue; // Skip to the next event if we have an invalid state tag error
424 }
425
427 // Load Hash inputs and check for tag errors
429 // The inputs vector is expected to 16 elements and each element is expected to be a 32-bit value
430 // If during simulation we encounter an invalid tag, it will have been the last element we retrieved
431 // before we threw an error - so it will be the last element in the input vector.
432 // Therefore, it is just sufficient to check the tag of the last element
433 bool invalid_tag_err = event.input.back().get_tag() != MemoryTag::U32;
434
435 // Note that if we encountered an invalid tag error, the row that loaded the invalid tag needs to contain
436 // sel_invalid_input_ROW_tag_err. And all the rows before need to contain sel_invalid_input_tag_err.
437 // The former is used to constrain the specific error, while the latter is used to propagate the error
438 // to the start row (to communicate back to execution) and to turn off any computation constraints.
439 for (uint32_t i = 0; i < event.input.size(); i++) {
440 uint32_t input_rounds_rem = 16 - i;
441 FF input_rounds_rem_inv = input_rounds_rem == 0 ? 0 : FF(input_rounds_rem).invert();
442
443 MemoryValue round_input = event.input[i];
444 FF input_tag = FF(static_cast<uint8_t>(round_input.get_tag()));
445 FF expected_tag = FF(static_cast<uint8_t>(MemoryTag::U32));
446 FF input_tag_diff = input_tag - expected_tag;
447 FF input_tag_diff_inv = input_tag_diff == 0 ? 0 : input_tag_diff.invert();
448
449 bool is_last = (i == event.input.size() - 1);
450 trace.set(row + i,
451 { {
452 { C::sha256_sel, 1 },
453 // Propagated Fields
454 { C::sha256_execution_clk, event.execution_clk },
455 { C::sha256_space_id, event.space_id },
456 { C::sha256_output_addr, output_addr },
457 { C::sha256_sel_is_input_round, 1 },
458 { C::sha256_u32_tag, expected_tag },
459 { C::sha256_sel_read_input_from_memory, 1 },
460 // Input Rounds Control Flow
461 { C::sha256_input_rounds_rem, input_rounds_rem },
462 { C::sha256_input_rounds_rem_inv, input_rounds_rem_inv },
463 { C::sha256_input_addr, input_addr + i },
464 { C::sha256_input, round_input.as_ff() },
465 { C::sha256_input_tag, input_tag },
466 { C::sha256_input_tag_diff_inv, input_tag_diff_inv },
467 // Set input value
468 { C::sha256_w, round_input.as_ff() },
469 // Error Columns
470 // Propagated tag error columns
471 { C::sha256_sel_invalid_input_tag_err, invalid_tag_err ? 1 : 0 },
472 // Invalid Row Tag Error Columns
473 { C::sha256_sel_invalid_input_row_tag_err, (is_last && invalid_tag_err) ? 1 : 0 },
474 { C::sha256_err, invalid_tag_err ? 1 : 0 },
475 { C::sha256_latch, (is_last && invalid_tag_err) ? 1 : 0 },
476 } });
477 }
478
479 if (invalid_tag_err) {
480 // We need to increment the row counter for the next event (since we may have added rows for input loading)
481 row += event.input.size();
482 continue;
483 }
484
485 // If we get to this point, we are safe to proceed with the SHA-256 compression function
486 // and we won't encounter any more errors
487
489 // Execute SHA-256 Compression Function
491 std::array<uint32_t, 8> state;
492 std::ranges::transform(event.state.begin(), event.state.end(), state.begin(), [](const MemoryValue& val) {
493 return val.as<uint32_t>();
494 });
495
496 std::array<uint32_t, 16> prev_w_helpers;
497 std::ranges::transform(event.input.begin(),
498 event.input.end(),
499 prev_w_helpers.begin(),
500 [](const MemoryValue& val) { return val.as<uint32_t>(); });
501 std::array<uint32_t, 8> round_state = state;
502
503 // Each event results in 65 rows in the trace.
504 // 64 rows for the 64 rounds of the SHA-256 compression function
505 // 1 row for the final state
506
507 // Begin the rounds loop
508 for (size_t i = 0; i < 64; i++) {
509 // Detect if we are still using the inputs for values of w
510 bool is_an_input_round = i < 16;
511 // Used to check we non-zero rounds remaining
512 FF inv = FF(64 - i).invert();
513 uint32_t round_w =
514 is_an_input_round ? event.input[i].as<uint32_t>() : compute_w_with_witness(prev_w_helpers, trace);
515 trace.set(row,
516 { {
517 { C::sha256_sel, 1 },
518 // Propagated Fields
519 { C::sha256_execution_clk, event.execution_clk },
520 { C::sha256_space_id, event.space_id },
521 { C::sha256_output_addr, output_addr },
522 { C::sha256_u32_tag, static_cast<uint8_t>(MemoryTag::U32) },
523 { C::sha256_two_pow_32, 1UL << 32 },
524 // For round selectors
525 { C::sha256_xor_sel, 2 },
526 { C::sha256_perform_round, 1 },
527 { C::sha256_round_count, i },
528 { C::sha256_rounds_remaining, 64 - i },
529 { C::sha256_rounds_remaining_inv, inv },
530 { C::sha256_w, round_w },
531 { C::sha256_sel_compute_w, is_an_input_round ? 0 : 1 },
532 } });
533 // Set the init state columns - propagated down
535 // Set the state columns
536 set_state_cols(round_state, trace);
537 // Set the round columns
538 set_helper_cols(prev_w_helpers, trace);
539
540 // Apply SHA-256 compression function to the message schedule and update the state
541 round_state = compute_compression_with_witness(round_state, round_w, round_constants[i], row, trace);
542
543 // Update the prev_w_helpers, we shift all the values to the left and add the new round_w to
544 // the end
545 for (size_t j = 0; j < 15; j++) {
546 prev_w_helpers[j] = prev_w_helpers[j + 1];
547 }
548 prev_w_helpers[15] = round_w;
549
550 row++;
551 }
552
553 // Set the final row
554 trace.set(row,
555 { {
556 { C::sha256_latch, 1 },
557 { C::sha256_sel, 1 },
558 { C::sha256_xor_sel, 2 },
559 { C::sha256_round_count, 64 },
560 } });
561
562 // Set the init state columns - propagated down
564 // Set the state column
565 set_state_cols(round_state, trace);
566 // Set the round columns
567 set_helper_cols(prev_w_helpers, trace);
568 // Compute the output from the final round state
569 compute_sha256_output(round_state, state, trace);
570
572 // Write output memory
574 trace.set(row,
575 { {
576 // Memory Fields
577 { C::sha256_execution_clk, event.execution_clk },
578 { C::sha256_space_id, event.space_id },
579 { C::sha256_sel_mem_state_or_output, 1 },
580 { C::sha256_rw, 1 }, // Writing output
581 { C::sha256_u32_tag, static_cast<uint8_t>(MemoryTag::U32) },
582 { C::sha256_two_pow_32, 1UL << 32 },
583 { C::sha256_output_addr, output_addr },
584 // Output Addresses
585 { C::sha256_memory_address_0_, output_addr },
586 { C::sha256_memory_address_1_, output_addr + 1 },
587 { C::sha256_memory_address_2_, output_addr + 2 },
588 { C::sha256_memory_address_3_, output_addr + 3 },
589 { C::sha256_memory_address_4_, output_addr + 4 },
590 { C::sha256_memory_address_5_, output_addr + 5 },
591 { C::sha256_memory_address_6_, output_addr + 6 },
592 { C::sha256_memory_address_7_, output_addr + 7 },
593 // Output Values
594 { C::sha256_memory_register_0_, round_state[0] + state[0] },
595 { C::sha256_memory_register_1_, round_state[1] + state[1] },
596 { C::sha256_memory_register_2_, round_state[2] + state[2] },
597 { C::sha256_memory_register_3_, round_state[3] + state[3] },
598 { C::sha256_memory_register_4_, round_state[4] + state[4] },
599 { C::sha256_memory_register_5_, round_state[5] + state[5] },
600 { C::sha256_memory_register_6_, round_state[6] + state[6] },
601 { C::sha256_memory_register_7_, round_state[7] + state[7] },
602 // Output Memory Tags
603 { C::sha256_memory_tag_0_, static_cast<uint8_t>(MemoryTag::U32) },
604 { C::sha256_memory_tag_1_, static_cast<uint8_t>(MemoryTag::U32) },
605 { C::sha256_memory_tag_2_, static_cast<uint8_t>(MemoryTag::U32) },
606 { C::sha256_memory_tag_3_, static_cast<uint8_t>(MemoryTag::U32) },
607 { C::sha256_memory_tag_4_, static_cast<uint8_t>(MemoryTag::U32) },
608 { C::sha256_memory_tag_5_, static_cast<uint8_t>(MemoryTag::U32) },
609 { C::sha256_memory_tag_6_, static_cast<uint8_t>(MemoryTag::U32) },
610 { C::sha256_memory_tag_7_, static_cast<uint8_t>(MemoryTag::U32) },
611 } });
612
613 row++;
614 }
615}
616
620 // GT Interactions
621 .add<lookup_sha256_mem_check_state_addr_in_range_settings, InteractionType::LookupGeneric>(Column::gt_sel)
623 .add<lookup_sha256_mem_check_output_addr_in_range_settings, InteractionType::LookupGeneric>(Column::gt_sel)
624 // Bitwise operations
626 .add<lookup_sha256_w_s_0_xor_1_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
628 .add<lookup_sha256_w_s_1_xor_1_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
630 .add<lookup_sha256_s_1_xor_1_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
632 .add<lookup_sha256_ch_and_1_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
634 .add<lookup_sha256_s_0_xor_0_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
636 .add<lookup_sha256_maj_and_0_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
638 .add<lookup_sha256_maj_and_2_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
640 .add<lookup_sha256_maj_xor_1_settings, InteractionType::LookupGeneric>(Column::bitwise_sel)
641 // GT Checks for Rotations and Shifts
643 .add<lookup_sha256_range_rhs_w_18_settings, InteractionType::LookupGeneric>(Column::gt_sel)
645 .add<lookup_sha256_range_rhs_w_17_settings, InteractionType::LookupGeneric>(Column::gt_sel)
647 .add<lookup_sha256_range_rhs_w_10_settings, InteractionType::LookupGeneric>(Column::gt_sel)
649 .add<lookup_sha256_range_rhs_e_11_settings, InteractionType::LookupGeneric>(Column::gt_sel)
651 .add<lookup_sha256_range_rhs_a_2_settings, InteractionType::LookupGeneric>(Column::gt_sel)
653 .add<lookup_sha256_range_rhs_a_22_settings, InteractionType::LookupGeneric>(Column::gt_sel)
654 // GT Checks for modulo add
656 .add<lookup_sha256_range_comp_w_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
658 .add<lookup_sha256_range_comp_next_a_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
660 .add<lookup_sha256_range_comp_next_e_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
662 .add<lookup_sha256_range_comp_a_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
664 .add<lookup_sha256_range_comp_b_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
666 .add<lookup_sha256_range_comp_c_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
668 .add<lookup_sha256_range_comp_d_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
670 .add<lookup_sha256_range_comp_e_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
672 .add<lookup_sha256_range_comp_f_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
674 .add<lookup_sha256_range_comp_g_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel)
676 .add<lookup_sha256_range_comp_h_rhs_settings, InteractionType::LookupGeneric>(Column::gt_sel);
677
678} // namespace bb::avm2::tracegen
#define AVM_HIGHEST_MEM_ADDRESS
ValueTag get_tag() const
InteractionDefinition & add(auto &&... args)
void process(const simulation::EventEmitterInterface< simulation::Sha256CompressionEvent >::Container &events, TraceContainer &trace)
uint32_t shr_with_witness(const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer &trace)
static const InteractionDefinition interactions
void into_limbs_with_witness(const uint64_t, const uint8_t b, Column c_lhs, Column c_rhs, TraceContainer &trace)
std::array< uint32_t, 8 > compute_compression_with_witness(const std::array< uint32_t, 8 > &state, uint32_t round_w, uint32_t round_constant, uint32_t row, TraceContainer &trace)
void compute_sha256_output(const std::array< uint32_t, 8 > &out_state, const std::array< uint32_t, 8 > &init_state, TraceContainer &trace)
uint32_t ror_with_witness(const uint32_t val, const uint8_t shift, Column c_result, Column c_lhs, Column c_rhs, TraceContainer &trace)
void set_state_cols(const std::array< uint32_t, 8 > &state, TraceContainer &trace)
uint32_t compute_w_with_witness(const std::array< uint32_t, 16 > &prev_w_helpers, TraceContainer &trace)
void set_init_state_cols(const std::array< uint32_t, 8 > &init_state, TraceContainer &trace)
void set_helper_cols(const std::array< uint32_t, 16 > &prev_w_helpers, TraceContainer &trace)
TestTraceContainer trace
FF a
FF b
const auto init
Definition fr.bench.cpp:141
constexpr uint32_t round_constants[64]
lookup_settings< lookup_sha256_range_rhs_e_6_settings_ > lookup_sha256_range_rhs_e_6_settings
lookup_settings< lookup_sha256_range_comp_b_lhs_settings_ > lookup_sha256_range_comp_b_lhs_settings
lookup_settings< lookup_sha256_range_rhs_a_13_settings_ > lookup_sha256_range_rhs_a_13_settings
lookup_settings< lookup_sha256_mem_check_input_addr_in_range_settings_ > lookup_sha256_mem_check_input_addr_in_range_settings
lookup_settings< lookup_sha256_maj_xor_0_settings_ > lookup_sha256_maj_xor_0_settings
lookup_settings< lookup_sha256_s_1_xor_0_settings_ > lookup_sha256_s_1_xor_0_settings
lookup_settings< lookup_sha256_w_s_0_xor_0_settings_ > lookup_sha256_w_s_0_xor_0_settings
lookup_settings< lookup_sha256_w_s_1_xor_0_settings_ > lookup_sha256_w_s_1_xor_0_settings
lookup_settings< lookup_sha256_range_comp_w_lhs_settings_ > lookup_sha256_range_comp_w_lhs_settings
lookup_settings< lookup_sha256_ch_xor_settings_ > lookup_sha256_ch_xor_settings
lookup_settings< lookup_sha256_range_comp_f_lhs_settings_ > lookup_sha256_range_comp_f_lhs_settings
lookup_settings< lookup_sha256_range_comp_next_e_lhs_settings_ > lookup_sha256_range_comp_next_e_lhs_settings
lookup_settings< lookup_sha256_range_comp_c_lhs_settings_ > lookup_sha256_range_comp_c_lhs_settings
lookup_settings< lookup_sha256_ch_and_0_settings_ > lookup_sha256_ch_and_0_settings
lookup_settings< lookup_sha256_range_rhs_w_3_settings_ > lookup_sha256_range_rhs_w_3_settings
lookup_settings< lookup_sha256_round_constant_settings_ > lookup_sha256_round_constant_settings
lookup_settings< lookup_sha256_range_comp_h_lhs_settings_ > lookup_sha256_range_comp_h_lhs_settings
lookup_settings< lookup_sha256_range_rhs_e_25_settings_ > lookup_sha256_range_rhs_e_25_settings
lookup_settings< lookup_sha256_range_comp_next_a_lhs_settings_ > lookup_sha256_range_comp_next_a_lhs_settings
lookup_settings< lookup_sha256_range_comp_g_lhs_settings_ > lookup_sha256_range_comp_g_lhs_settings
lookup_settings< lookup_sha256_s_0_xor_1_settings_ > lookup_sha256_s_0_xor_1_settings
lookup_settings< lookup_sha256_range_rhs_w_7_settings_ > lookup_sha256_range_rhs_w_7_settings
lookup_settings< lookup_sha256_range_comp_e_lhs_settings_ > lookup_sha256_range_comp_e_lhs_settings
lookup_settings< lookup_sha256_maj_and_1_settings_ > lookup_sha256_maj_and_1_settings
lookup_settings< lookup_sha256_range_comp_a_lhs_settings_ > lookup_sha256_range_comp_a_lhs_settings
lookup_settings< lookup_sha256_range_rhs_w_19_settings_ > lookup_sha256_range_rhs_w_19_settings
lookup_settings< lookup_sha256_range_comp_d_lhs_settings_ > lookup_sha256_range_comp_d_lhs_settings
AvmFlavorSettings::FF FF
Definition field.hpp:10
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
simulation::PublicDataTreeReadWriteEvent event