Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
biggroup_nafs.hpp
Go to the documentation of this file.
1// === AUDIT STATUS ===
2// internal: { status: not started, auditors: [], date: YYYY-MM-DD }
3// external_1: { status: not started, auditors: [], date: YYYY-MM-DD }
4// external_2: { status: not started, auditors: [], date: YYYY-MM-DD }
5// =====================
6
7#pragma once
11
13
14template <typename C, class Fq, class Fr, class G>
15template <size_t wnaf_size>
17 const uint64_t stagger,
18 bool is_negative,
19 bool wnaf_skew)
20{
21 // If there is no stagger then there is no need to change anything
22 if (stagger == 0) {
23 return std::make_pair(0, wnaf_skew);
24 }
25
26 // Sanity check input fragment
27 BB_ASSERT_LT(fragment_u64, (1ULL << stagger), "biggroup_nafs: fragment value ≥ 2^{stagger}");
28
29 // Convert the fragment to signed int for easier manipulation
30 int fragment = static_cast<int>(fragment_u64);
31
32 // Inverse the fragment if it's negative
33 if (is_negative) {
34 fragment = -fragment;
35 }
36 // If the value is positive and there is a skew in wnaf, subtract 2^{stagger}.
37 if (!is_negative && wnaf_skew) {
38 fragment -= (1 << stagger);
39 }
40
41 // If the value is negative and there is a skew in wnaf, add 2^{stagger}.
42 if (is_negative && wnaf_skew) {
43 fragment += (1 << stagger);
44 }
45
46 // If the lowest bit is zero, then set final skew to 1 and
47 // (i) add 1 to the absolute value of the fragment if it's positive
48 // (ii) subtract 1 from the absolute value of the fragment if it's negative
49 bool output_skew = (fragment_u64 & 1) == 0;
50 if (!is_negative && output_skew) {
51 fragment += 1;
52 } else if (is_negative && output_skew) {
53 fragment -= 1;
54 }
55
56 // Compute raw wnaf value: w = 2e + 1 => e = (w - 1) / 2 => e = ⌊w / 2⌋
57 const int signed_wnaf_value = (fragment / 2);
58 constexpr int wnaf_window_size = (1ULL << (wnaf_size - 1));
59 uint64_t output_fragment = 0;
60 if (fragment < 0) {
61 output_fragment = static_cast<uint64_t>(wnaf_window_size + signed_wnaf_value - 1);
62 } else {
63 output_fragment = static_cast<uint64_t>(wnaf_window_size + signed_wnaf_value);
64 }
65
66 return std::make_pair(output_fragment, output_skew);
67}
68
69template <typename C, class Fq, class Fr, class G>
70template <size_t wnaf_size>
72 C* builder, const uint64_t* wnaf_values, bool is_negative, size_t rounds, const bool range_constrain_wnaf)
73{
74 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
75
76 std::vector<field_t<C>> wnaf_entries;
77 for (size_t i = 0; i < rounds; ++i) {
78 // Predicate == sign of current wnaf value
79 const bool predicate = (wnaf_values[i] >> 31U) & 1U; // sign bit (32nd bit)
80 const uint64_t wnaf_magnitude = (wnaf_values[i] & 0x7fffffffU); // 31-bit magnitude
81
82 // If the signs of current entry and the whole scalar are the same, then add the magnitude of the
83 // wnaf value to the windows size to form an entry. Otherwise, subract the magnitude along with 1.
84 // The extra 1 is needed to get a uniform representation of (2e' + 1) as explained in the README.
85 uint64_t offset_wnaf_entry = 0;
86 if ((!predicate && !is_negative) || (predicate && is_negative)) {
87 offset_wnaf_entry = wnaf_window_size + wnaf_magnitude;
88 } else {
89 offset_wnaf_entry = wnaf_window_size - wnaf_magnitude - 1;
90 }
91 field_t<C> wnaf_entry(witness_t<C>(builder, offset_wnaf_entry));
92
93 // In some cases we may want to skip range constraining the wnaf entries. For example when we use these
94 // entries to lookup in a ROM or regular table, it implicitly enforces the range constraint.
95 if (range_constrain_wnaf) {
96 wnaf_entry.create_range_constraint(wnaf_size, "biggroup_nafs: wnaf_entry is not in range");
97 }
98 wnaf_entries.emplace_back(wnaf_entry);
99 }
100 return wnaf_entries;
101}
102
103template <typename C, class Fq, class Fr, class G>
104template <size_t wnaf_size>
106 const std::vector<field_t<Builder>>& wnaf,
107 const bool_ct& positive_skew,
108 const bool_ct& negative_skew,
109 const field_t<Builder>& stagger_fragment,
110 const size_t stagger,
111 const size_t rounds)
112{
113 // Collect positive wnaf entries for accumulation
114 std::vector<field_t<C>> accumulator;
115 for (size_t i = 0; i < rounds; ++i) {
116 field_t<C> entry = wnaf[rounds - 1 - i];
117 entry *= field_t<C>(uint256_t(1) << (i * wnaf_size));
118 accumulator.emplace_back(entry);
119 }
120
121 // Accumulate entries, shift by stagger and add the stagger itself
123 sum = sum * field_t<C>(bb::fr(1ULL << stagger));
124 sum += (stagger_fragment);
125 sum = sum.normalize();
126
127 // Convert this value to bigfield element
128 Fr reconstructed_positive_part =
129 Fr(sum, field_t<C>::from_witness_index(builder, builder->zero_idx()), /*can_overflow*/ false);
130
131 // Double the final value and add the positive skew
132 reconstructed_positive_part =
133 (reconstructed_positive_part + reconstructed_positive_part)
134 .add_to_lower_limb(field_t<Builder>(positive_skew), /*other_maximum_value*/ uint256_t(1));
135
136 // Start reconstructing the negative part: start with wnaf constant 0xff...ff
137 // See the README for explanation of this constant
138 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
139 uint256_t negative_constant_wnaf_offset(0);
140 for (size_t i = 0; i < rounds; ++i) {
141 negative_constant_wnaf_offset += uint256_t((wnaf_window_size * 2) - 1) * (uint256_t(1) << (i * wnaf_size));
142 }
143
144 // Shift by stagger
145 negative_constant_wnaf_offset = negative_constant_wnaf_offset << stagger;
146
147 // Add for stagger (if any)
148 if (stagger > 0) {
149 negative_constant_wnaf_offset += ((1ULL << wnaf_size) - 1ULL); // from stagger fragment
150 }
151
152 // Add the negative skew to the bigfield constant
153 Fr reconstructed_negative_part =
154 Fr(nullptr, negative_constant_wnaf_offset).add_to_lower_limb(field_t<Builder>(negative_skew), uint256_t(1));
155
156 // output = x_pos - x_neg (x_pos and x_neg are both non-negative)
157 Fr reconstructed = reconstructed_positive_part - reconstructed_negative_part;
158
159 return reconstructed;
160}
161
162template <typename C, class Fq, class Fr, class G>
163template <size_t num_bits, size_t wnaf_size, size_t lo_stagger, size_t hi_stagger>
165 C* builder,
166 const secp256k1::fr& scalar,
167 size_t stagger,
168 bool is_negative,
169 const bool range_constrain_wnaf,
170 bool is_lo)
171{
172 // The number of rounds is the minimal required to cover the whole scalar with wnaf_size windows
173 constexpr size_t num_rounds = ((num_bits + wnaf_size - 1) / wnaf_size);
174
175 // Stagger mask is needed to retrieve the lowest bits that will not be used in montgomery ladder directly
176 const uint64_t stagger_mask = (1ULL << stagger) - 1;
177
178 // Stagger scalar represents the lower "staggered" bits that are not used in the ladder
179 const uint64_t stagger_scalar = scalar.data[0] & stagger_mask;
180
181 std::array<uint64_t, num_rounds> wnaf_values = { 0 };
182 bool skew_without_stagger = false;
183 uint256_t k_u256{ scalar.data[0], scalar.data[1], scalar.data[2], scalar.data[3] };
184 k_u256 = k_u256 >> stagger;
185 if (is_lo) {
186 bb::wnaf::fixed_wnaf<num_bits - lo_stagger, 1, wnaf_size>(
187 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
188 } else {
189 bb::wnaf::fixed_wnaf<num_bits - hi_stagger, 1, wnaf_size>(
190 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
191 }
192
193 // Number of rounds that are needed to reconstruct the scalar without staggered bits
194 const size_t num_rounds_excluding_stagger_bits = ((num_bits + wnaf_size - 1 - stagger) / wnaf_size);
195
196 // Compute the stagger-related fragment and the final skew due to the same
197 const auto [first_fragment, skew] =
198 get_staggered_wnaf_fragment_value<wnaf_size>(stagger_scalar, stagger, is_negative, skew_without_stagger);
199
200 // Get wnaf witnesses
201 // Note that we only range constrain the wnaf entries if range_constrain_wnaf is set to true.
202 std::vector<field_t<C>> wnaf = convert_wnaf_values_to_witnesses<wnaf_size>(
203 builder, &wnaf_values[0], is_negative, num_rounds_excluding_stagger_bits, range_constrain_wnaf);
204
205 // Compute and constrain skews
206 bool_ct negative_skew(witness_t<Builder>(builder, is_negative ? 0 : skew), /*use_range_constraint*/ true);
207 bool_ct positive_skew(witness_t<Builder>(builder, is_negative ? skew : 0), /*use_range_constraint*/ true);
208
209 // Enforce that both positive_skew, negative_skew are not set at the same time
210 bool_ct both_skews_cannot_be_one = !(positive_skew & negative_skew);
211 both_skews_cannot_be_one.assert_equal(
212 bool_ct(builder, true), "biggroup_nafs: both positive and negative skews cannot be set at the same time");
213
214 // Initialize stagger witness
215 field_t<C> stagger_fragment = witness_t<C>(builder, first_fragment);
216
217 // We only range constrain the stagger fragment if range_constrain_wnaf is set. This is because in some cases
218 // we may use the stagger fragment to lookup in a ROM/regular table, which implicitly enforces the range constraint.
219 if (range_constrain_wnaf) {
220 stagger_fragment.create_range_constraint(wnaf_size, "biggroup_nafs: stagger fragment is not in range");
221 }
222
223 // Reconstruct the bigfield scalar from (wnaf + stagger) representation
224 Fr reconstructed = reconstruct_bigfield_from_wnaf<wnaf_size>(
225 builder, wnaf, positive_skew, negative_skew, stagger_fragment, stagger, num_rounds_excluding_stagger_bits);
226
227 secp256k1_wnaf wnaf_out{ .wnaf = wnaf,
228 .positive_skew = positive_skew,
229 .negative_skew = negative_skew,
230 .least_significant_wnaf_fragment = stagger_fragment,
231 .has_wnaf_fragment = (stagger > 0) };
232
233 return std::make_pair(reconstructed, wnaf_out);
234}
235
323template <typename C, class Fq, class Fr, class G>
324template <size_t wnaf_size, size_t lo_stagger, size_t hi_stagger>
326 const Fr& scalar, const bool range_constrain_wnaf)
327{
354 C* ctx = scalar.context;
355
356 constexpr size_t num_bits = 129;
357
358 // Decomposes the scalar k into two 129-bit scalars klo, khi such that
359 // k = klo + ζ * khi (mod n)
360 // = klo - λ * khi (mod n)
361 // where ζ is the primitive sixth root of unity mod n, and λ is the primitive cube root of unity mod n
362 // (note that ζ = -λ). We know that for any scalar k, such a decomposition exists and klo and khi are 128-bits long.
363 secp256k1::fr k(uint256_t(scalar.get_value() % Fr::modulus_u512));
364 secp256k1::fr klo(0);
365 secp256k1::fr khi(0);
366 bool klo_negative = false;
367 bool khi_negative = false;
369
370 // The low and high scalars must be less than 2^129 in absolute value. In some cases, the khi value
371 // is returned as negative, in which case we negate it and set a flag to indicate this. This is because
372 // we decompose the scalar as:
373 // k = klo + ζ * khi (mod n)
374 // = klo - λ * khi (mod n)
375 // where λ is the cube root of unity. If khi is negative, then -λ * khi is positive, and vice versa.
376 if (khi.uint256_t_no_montgomery_conversion().get_msb() >= 129) {
377 khi_negative = true;
378 khi = -khi;
379 }
380
381 BB_ASSERT_LT(klo.uint256_t_no_montgomery_conversion().get_msb(), 129ULL, "biggroup_nafs: klo > 129 bits");
382 BB_ASSERT_LT(khi.uint256_t_no_montgomery_conversion().get_msb(), 129ULL, "biggroup_nafs: khi > 129 bits");
383
384 const auto [klo_reconstructed, klo_out] =
386 ctx, klo, lo_stagger, klo_negative, range_constrain_wnaf, true);
387
388 const auto [khi_reconstructed, khi_out] =
390 ctx, khi, hi_stagger, khi_negative, range_constrain_wnaf, false);
391
392 uint256_t minus_lambda_val(-secp256k1::fr::cube_root_of_unity());
393 Fr minus_lambda(bb::fr(minus_lambda_val.slice(0, 136)), bb::fr(minus_lambda_val.slice(136, 256)), false);
394
395 Fr reconstructed_scalar = khi_reconstructed.madd(minus_lambda, { klo_reconstructed });
396
397 // Validate that the reconstructed scalar matches the original scalar in circuit
398 scalar.assert_equal(reconstructed_scalar, "biggroup_nafs: reconstructed scalar does not match reduced input");
399
400 return { .klo = klo_out, .khi = khi_out };
401}
402
403template <typename C, class Fq, class Fr, class G>
404template <size_t max_num_bits, size_t WNAF_SIZE>
406{
407 C* ctx = scalar.context;
408 uint512_t scalar_multiplier_512 = uint512_t(uint256_t(scalar.get_value()) % Fr::modulus);
409 uint256_t scalar_multiplier = scalar_multiplier_512.lo;
410
411 constexpr size_t num_bits = (max_num_bits == 0) ? (Fr::modulus.get_msb() + 1) : (max_num_bits);
412 constexpr size_t num_rounds = ((num_bits + WNAF_SIZE - 1) / WNAF_SIZE);
413
414 uint64_t wnaf_values[num_rounds] = { 0 };
415 bool skew = false;
416 bb::wnaf::fixed_wnaf<num_bits, 1, WNAF_SIZE>(&scalar_multiplier.data[0], &wnaf_values[0], skew, 0);
417
418 std::vector<field_t<C>> wnaf_entries;
419 for (size_t i = 0; i < num_rounds; ++i) {
420 bool predicate = bool((wnaf_values[i] >> 31U) & 1U);
421 uint64_t offset_entry;
422 if (!predicate) {
423 offset_entry = (1ULL << (WNAF_SIZE - 1)) + (wnaf_values[i] & 0xffffff);
424 } else {
425 offset_entry = (1ULL << (WNAF_SIZE - 1)) - 1 - (wnaf_values[i] & 0xffffff);
426 }
427 field_t<C> entry(witness_t<C>(ctx, offset_entry));
428 ctx->create_new_range_constraint(entry.witness_index, 1ULL << (WNAF_SIZE), "biggroup_nafs");
429
430 wnaf_entries.emplace_back(entry);
431 }
432
433 // add skew
434 wnaf_entries.emplace_back(witness_t<C>(ctx, skew));
435 ctx->create_new_range_constraint(wnaf_entries[wnaf_entries.size() - 1].witness_index, 1, "biggroup_nafs");
436
437 // TODO(https://github.com/AztecProtocol/barretenberg/issues/664)
438 // VALIDATE SUM DOES NOT OVERFLOW P
439
440 // validate correctness of wNAF
441 if constexpr (!Fr::is_composite) {
442 std::vector<Fr> accumulators;
443 for (size_t i = 0; i < num_rounds; ++i) {
444 Fr entry = wnaf_entries[wnaf_entries.size() - 2 - i];
445 entry *= 2;
446 // entry -= 15;
447 entry *= static_cast<Fr>(uint256_t(1) << (i * WNAF_SIZE));
448 accumulators.emplace_back(entry);
449 }
450 accumulators.emplace_back(wnaf_entries[wnaf_entries.size() - 1] * -1);
451 uint256_t negative_offset(0);
452 for (size_t i = 0; i < num_rounds; ++i) {
453 negative_offset += uint256_t((1ULL << WNAF_SIZE) - 1) * (uint256_t(1) << (i * WNAF_SIZE));
454 }
455 accumulators.emplace_back(-Fr(negative_offset));
456 Fr accumulator_result = Fr::accumulate(accumulators);
457 scalar.assert_equal(accumulator_result);
458 } else {
459 // If Fr is a non-native field element, we can't just accumulate the wnaf entries into a single value,
460 // as we could overflow the circuit modulus
461 //
462 // We add the first 34 wnaf entries into a 'low' 136-bit accumulator (136 = 2 68 bit limbs)
463 // We add the remaining wnaf entries into a 'high' accumulator
464 // We can then directly construct a Fr element from the accumulators.
465 // However we cannot underflow our accumulators, and our wnafs represent negative and positive values
466 // The raw value of each wnaf value is contained in the range [0, 15], however these values represent integers
467 // [-15, -13, -11, ..., 13, 15]
468 //
469 // To map from the raw value to the actual value, we must compute `value * 2 - 15`
470 // However, we do not subtract off the -15 term when constructing our low and high accumulators. Instead of
471 // multiplying by two when accumulating we simply add the accumulated value to itself. This way it automatically
472 // updates multiplicative constants without computing new witnesses. This ensures the low accumulator will not
473 // underflow
474 //
475 // Once we have reconstructed an Fr element out of our accumulators,
476 // we ALSO construct an Fr element from the constant offset terms we left out
477 // We then subtract off the constant term and call `Fr::assert_is_in_field` to reduce the value modulo
478 // Fr::modulus
479 const auto reconstruct_half_wnaf = [](field_t<C>* wnafs, const size_t half_round_length) {
480 std::vector<field_t<C>> half_accumulators;
481 for (size_t i = 0; i < half_round_length; ++i) {
482 field_t<C> entry = wnafs[half_round_length - 1 - i];
483 entry *= static_cast<field_t<C>>(uint256_t(1) << (i * 4));
484 half_accumulators.emplace_back(entry);
485 }
486 return field_t<C>::accumulate(half_accumulators);
487 };
488 const size_t midpoint = num_rounds - (Fr::NUM_LIMB_BITS * 2) / WNAF_SIZE;
489 auto hi_accumulators = reconstruct_half_wnaf(&wnaf_entries[0], midpoint);
490 auto lo_accumulators = reconstruct_half_wnaf(&wnaf_entries[midpoint], num_rounds - midpoint);
491 uint256_t negative_lo(0);
492 uint256_t negative_hi(0);
493 for (size_t i = 0; i < midpoint; ++i) {
494 negative_hi += uint256_t(15) * (uint256_t(1) << (i * 4));
495 }
496 for (size_t i = 0; i < (num_rounds - midpoint); ++i) {
497 negative_lo += uint256_t(15) * (uint256_t(1) << (i * 4));
498 }
499 BB_ASSERT_EQ((num_rounds - midpoint) * 4, 136U);
500 // If skew == 1 lo_offset = 0, else = 0xf...f
501 field_t<C> lo_offset = (-field_t<C>(bb::fr(negative_lo)))
502 .madd(wnaf_entries[wnaf_entries.size() - 1], field_t<C>(bb::fr(negative_lo)))
503 .normalize();
504 Fr offset = Fr(lo_offset, field_t<C>(bb::fr(negative_hi)) + wnaf_entries[wnaf_entries.size() - 1], true);
505 Fr reconstructed = Fr(lo_accumulators, hi_accumulators, true);
506 reconstructed = (reconstructed + reconstructed) - offset;
507 reconstructed.reduce_mod_target_modulus();
508 reconstructed.assert_equal(scalar);
509 }
510
511 // Set tags of wnaf_entries to the original scalar tag
512 const auto original_tag = scalar.get_origin_tag();
513 for (auto& entry : wnaf_entries) {
514 entry.set_origin_tag(original_tag);
515 }
516 return wnaf_entries;
517}
518
519template <typename C, class Fq, class Fr, class G>
520std::vector<bool_t<C>> element<C, Fq, Fr, G>::compute_naf(const Fr& scalar, const size_t max_num_bits)
521{
522 // We are not handling the case of odd bit lengths here.
523 BB_ASSERT_EQ(max_num_bits % 2, 0U);
524 // Apply range constraint gates instead of arithmetic gates to constrain boolean witnesses
525 static constexpr bool use_bool_range_constraint = true;
526
527 C* ctx = scalar.context;
528 uint512_t scalar_multiplier_512 = uint512_t(uint256_t(scalar.get_value()) % Fr::modulus);
529 uint256_t scalar_multiplier = scalar_multiplier_512.lo;
530 // NAF can't handle 0
531 if (scalar_multiplier == 0) {
532 scalar_multiplier = Fr::modulus;
533 }
534
535 const size_t num_rounds = (max_num_bits == 0) ? Fr::modulus.get_msb() + 1 : max_num_bits;
536 std::vector<bool_ct> naf_entries(num_rounds + 1);
537
538 // if boolean is false => do NOT flip y
539 // if boolean is true => DO flip y
540 // first entry is skew. i.e. do we subtract one from the final result or not
541 naf_entries[num_rounds] = bool_ct(witness_t(ctx, !scalar_multiplier.get_bit(0)), use_bool_range_constraint);
542 scalar_multiplier += uint256_t(!scalar_multiplier.get_bit(0));
543
544 // We need to manually propagate the origin tag
545 naf_entries[num_rounds].set_origin_tag(scalar.get_origin_tag());
546
547 for (size_t i = 0; i < num_rounds - 1; ++i) {
548 bool next_entry = scalar_multiplier.get_bit(i + 1);
549 // if the next entry is false, we need to flip the sign of the current entry. i.e. make negative
550 // Apply a basic range constraint per bool, and not a full 1-bit range gate. Results in ~`num_rounds`/4 gates
551 // per scalar.
552 bool_ct bit(witness_t<C>(ctx, !next_entry), use_bool_range_constraint);
553
554 naf_entries[num_rounds - i - 1] = bit;
555
556 // We need to manually propagate the origin tag
557 naf_entries[num_rounds - i - 1].set_origin_tag(scalar.get_origin_tag());
558 }
559 naf_entries[0] = bool_ct(ctx, false); // most significant entry is always true
560
561 // validate correctness of NAF
562 if constexpr (!Fr::is_composite) {
563 std::vector<Fr> accumulators;
564 for (size_t i = 0; i < num_rounds; ++i) {
565 // bit = 1 - 2 * naf
566 Fr entry(naf_entries[naf_entries.size() - 2 - i]);
567 entry *= -2;
568 entry += 1;
569 entry *= static_cast<Fr>(uint256_t(1) << (i));
570 accumulators.emplace_back(entry);
571 }
572 accumulators.emplace_back(Fr(naf_entries[naf_entries.size() - 1]) * -1); // skew
573 Fr accumulator_result = Fr::accumulate(accumulators);
574 scalar.assert_equal(accumulator_result);
575 } else {
576 const auto reconstruct_half_naf = [](bool_ct* nafs, const size_t half_round_length) {
577 // Q: need constraint to start from zero?
578 field_t<C> negative_accumulator(0);
579 field_t<C> positive_accumulator(0);
580 for (size_t i = 0; i < half_round_length; ++i) {
581 negative_accumulator = negative_accumulator + negative_accumulator + field_t<C>(nafs[i]);
582 positive_accumulator =
583 positive_accumulator + positive_accumulator + field_t<C>(1) - field_t<C>(nafs[i]);
584 }
585 return std::make_pair(positive_accumulator, negative_accumulator);
586 };
587 const size_t midpoint =
588 (num_rounds > Fr::NUM_LIMB_BITS * 2) ? num_rounds - Fr::NUM_LIMB_BITS * 2 : num_rounds / 2;
589
590 std::pair<field_t<C>, field_t<C>> hi_accumulators;
591 std::pair<field_t<C>, field_t<C>> lo_accumulators;
592
593 if (num_rounds > Fr::NUM_LIMB_BITS * 2) {
594 hi_accumulators = reconstruct_half_naf(&naf_entries[0], midpoint);
595 lo_accumulators = reconstruct_half_naf(&naf_entries[midpoint], num_rounds - midpoint);
596
597 } else {
598 // If the number of rounds is smaller than Fr::NUM_LIMB_BITS, the high bits of the resulting Fr element are
599 // 0.
600 const field_t<C> zero = field_t<C>::from_witness_index(ctx, 0);
601 lo_accumulators = reconstruct_half_naf(&naf_entries[0], num_rounds);
602 hi_accumulators = std::make_pair(zero, zero);
603 }
604
605 lo_accumulators.second = lo_accumulators.second + field_t<C>(naf_entries[num_rounds]);
606
607 Fr reconstructed_positive = Fr(lo_accumulators.first, hi_accumulators.first);
608 Fr reconstructed_negative = Fr(lo_accumulators.second, hi_accumulators.second);
609 Fr accumulator = reconstructed_positive - reconstructed_negative;
610 accumulator.assert_equal(scalar);
611 }
612 // Propagate tags to naf
613 const auto original_tag = scalar.get_origin_tag();
614 for (auto& naf_entry : naf_entries) {
615 naf_entry.set_origin_tag(original_tag);
616 }
617 return naf_entries;
618}
619} // namespace bb::stdlib::element_default
#define BB_ASSERT_EQ(actual, expected,...)
Definition assert.hpp:88
#define BB_ASSERT_LT(left, right,...)
Definition assert.hpp:148
constexpr bool get_bit(uint64_t bit_index) const
constexpr uint256_t slice(uint64_t start, uint64_t end) const
constexpr uint64_t get_msb() const
Implements boolean logic in-circuit.
Definition bool.hpp:59
void set_origin_tag(const OriginTag &new_tag) const
Definition bool.hpp:128
void assert_equal(const bool_t &rhs, std::string const &msg="bool_t::assert_equal") const
Implements copy constraint for bool_t elements.
Definition bool.cpp:423
static field_t from_witness_index(Builder *ctx, uint32_t witness_index)
Definition field.cpp:61
static field_t accumulate(const std::vector< field_t > &input)
Efficiently compute the sum of vector entries. Using big_add_gate we reduce the number of gates neede...
Definition field.cpp:1157
void create_range_constraint(size_t num_bits, std::string const &msg="field_t::range_constraint") const
Let x = *this.normalize(), constrain x.v < 2^{num_bits}.
Definition field.cpp:910
uint32_t witness_index
Definition field.hpp:132
AluTraceBuilder builder
Definition alu.test.cpp:123
ssize_t offset
Definition engine.cpp:36
stdlib::bool_t< Builder > bool_ct
constexpr T get_msb(const T in)
Definition get_msb.hpp:47
uintx< uint256_t > uint512_t
Definition uintx.hpp:307
void fixed_wnaf(const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const uint64_t point_index, const uint64_t num_points, const size_t wnaf_bits) noexcept
Performs fixed-window non-adjacent form (WNAF) computation for scalar multiplication.
Definition wnaf.hpp:178
Inner sum(Cont< Inner, Args... > const &in)
Definition container.hpp:70
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
Curve::ScalarField Fr
static constexpr field cube_root_of_unity()
static constexpr uint256_t modulus
static void split_into_endomorphism_scalars(const field &k, field &k1, field &k2)
constexpr uint256_t uint256_t_no_montgomery_conversion() const noexcept
BB_INLINE constexpr field from_montgomery_form() const noexcept
std::vector< field_t< Builder > > wnaf
Definition biggroup.hpp:35
#define WNAF_SIZE(x)
Definition wnaf.hpp:16