17 const uint64_t stagger,
27 BB_ASSERT_LT(fragment_u64, (1ULL << stagger),
"biggroup_nafs: fragment value ≥ 2^{stagger}");
30 int fragment =
static_cast<int>(fragment_u64);
37 if (!is_negative && wnaf_skew) {
38 fragment -= (1 << stagger);
42 if (is_negative && wnaf_skew) {
43 fragment += (1 << stagger);
49 bool output_skew = (fragment_u64 & 1) == 0;
50 if (!is_negative && output_skew) {
52 }
else if (is_negative && output_skew) {
57 const int signed_wnaf_value = (fragment / 2);
58 constexpr int wnaf_window_size = (1ULL << (wnaf_size - 1));
59 uint64_t output_fragment = 0;
61 output_fragment =
static_cast<uint64_t
>(wnaf_window_size + signed_wnaf_value - 1);
63 output_fragment =
static_cast<uint64_t
>(wnaf_window_size + signed_wnaf_value);
72 C*
builder,
const uint64_t* wnaf_values,
bool is_negative,
size_t rounds,
const bool range_constrain_wnaf)
74 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
77 for (
size_t i = 0; i < rounds; ++i) {
79 const bool predicate = (wnaf_values[i] >> 31U) & 1U;
80 const uint64_t wnaf_magnitude = (wnaf_values[i] & 0x7fffffffU);
85 uint64_t offset_wnaf_entry = 0;
86 if ((!predicate && !is_negative) || (predicate && is_negative)) {
87 offset_wnaf_entry = wnaf_window_size + wnaf_magnitude;
89 offset_wnaf_entry = wnaf_window_size - wnaf_magnitude - 1;
95 if (range_constrain_wnaf) {
98 wnaf_entries.emplace_back(wnaf_entry);
110 const size_t stagger,
115 for (
size_t i = 0; i < rounds; ++i) {
118 accumulator.emplace_back(entry);
124 sum += (stagger_fragment);
128 Fr reconstructed_positive_part =
132 reconstructed_positive_part =
133 (reconstructed_positive_part + reconstructed_positive_part)
138 constexpr uint64_t wnaf_window_size = (1ULL << (wnaf_size - 1));
139 uint256_t negative_constant_wnaf_offset(0);
140 for (
size_t i = 0; i < rounds; ++i) {
141 negative_constant_wnaf_offset +=
uint256_t((wnaf_window_size * 2) - 1) * (
uint256_t(1) << (i * wnaf_size));
145 negative_constant_wnaf_offset = negative_constant_wnaf_offset << stagger;
149 negative_constant_wnaf_offset += ((1ULL << wnaf_size) - 1ULL);
153 Fr reconstructed_negative_part =
157 Fr reconstructed = reconstructed_positive_part - reconstructed_negative_part;
159 return reconstructed;
169 const bool range_constrain_wnaf,
173 constexpr size_t num_rounds = ((num_bits + wnaf_size - 1) / wnaf_size);
176 const uint64_t stagger_mask = (1ULL << stagger) - 1;
179 const uint64_t stagger_scalar = scalar.
data[0] & stagger_mask;
182 bool skew_without_stagger =
false;
184 k_u256 = k_u256 >> stagger;
187 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
190 &k_u256.data[0], &wnaf_values[0], skew_without_stagger, 0);
194 const size_t num_rounds_excluding_stagger_bits = ((num_bits + wnaf_size - 1 - stagger) / wnaf_size);
197 const auto [first_fragment, skew] =
198 get_staggered_wnaf_fragment_value<wnaf_size>(stagger_scalar, stagger, is_negative, skew_without_stagger);
203 builder, &wnaf_values[0], is_negative, num_rounds_excluding_stagger_bits, range_constrain_wnaf);
210 bool_ct both_skews_cannot_be_one = !(positive_skew & negative_skew);
212 bool_ct(
builder,
true),
"biggroup_nafs: both positive and negative skews cannot be set at the same time");
219 if (range_constrain_wnaf) {
224 Fr reconstructed = reconstruct_bigfield_from_wnaf<wnaf_size>(
225 builder, wnaf, positive_skew, negative_skew, stagger_fragment, stagger, num_rounds_excluding_stagger_bits);
228 .positive_skew = positive_skew,
229 .negative_skew = negative_skew,
230 .least_significant_wnaf_fragment = stagger_fragment,
231 .has_wnaf_fragment = (stagger > 0) };
326 const Fr& scalar,
const bool range_constrain_wnaf)
354 C* ctx = scalar.context;
356 constexpr size_t num_bits = 129;
366 bool klo_negative =
false;
367 bool khi_negative =
false;
384 const auto [klo_reconstructed, klo_out] =
386 ctx, klo, lo_stagger, klo_negative, range_constrain_wnaf,
true);
388 const auto [khi_reconstructed, khi_out] =
390 ctx, khi, hi_stagger, khi_negative, range_constrain_wnaf,
false);
395 Fr reconstructed_scalar = khi_reconstructed.madd(minus_lambda, { klo_reconstructed });
398 scalar.assert_equal(reconstructed_scalar,
"biggroup_nafs: reconstructed scalar does not match reduced input");
400 return { .klo = klo_out, .khi = khi_out };
407 C* ctx = scalar.context;
409 uint256_t scalar_multiplier = scalar_multiplier_512.
lo;
411 constexpr size_t num_bits = (max_num_bits == 0) ? (
Fr::modulus.
get_msb() + 1) : (max_num_bits);
414 uint64_t wnaf_values[num_rounds] = { 0 };
416 bb::wnaf::fixed_wnaf<num_bits, 1, WNAF_SIZE>(&scalar_multiplier.
data[0], &wnaf_values[0], skew, 0);
419 for (
size_t i = 0; i < num_rounds; ++i) {
420 bool predicate = bool((wnaf_values[i] >> 31U) & 1U);
421 uint64_t offset_entry;
423 offset_entry = (1ULL << (
WNAF_SIZE - 1)) + (wnaf_values[i] & 0xffffff);
425 offset_entry = (1ULL << (
WNAF_SIZE - 1)) - 1 - (wnaf_values[i] & 0xffffff);
430 wnaf_entries.emplace_back(entry);
435 ctx->create_new_range_constraint(wnaf_entries[wnaf_entries.size() - 1].witness_index, 1,
"biggroup_nafs");
441 if constexpr (!Fr::is_composite) {
442 std::vector<Fr> accumulators;
443 for (
size_t i = 0; i < num_rounds; ++i) {
444 Fr entry = wnaf_entries[wnaf_entries.size() - 2 - i];
448 accumulators.emplace_back(entry);
450 accumulators.emplace_back(wnaf_entries[wnaf_entries.size() - 1] * -1);
452 for (
size_t i = 0; i < num_rounds; ++i) {
455 accumulators.emplace_back(-
Fr(negative_offset));
456 Fr accumulator_result = Fr::accumulate(accumulators);
457 scalar.assert_equal(accumulator_result);
479 const auto reconstruct_half_wnaf = [](
field_t<C>* wnafs,
const size_t half_round_length) {
481 for (
size_t i = 0; i < half_round_length; ++i) {
482 field_t<C> entry = wnafs[half_round_length - 1 - i];
484 half_accumulators.emplace_back(entry);
488 const size_t midpoint = num_rounds - (Fr::NUM_LIMB_BITS * 2) /
WNAF_SIZE;
489 auto hi_accumulators = reconstruct_half_wnaf(&wnaf_entries[0], midpoint);
490 auto lo_accumulators = reconstruct_half_wnaf(&wnaf_entries[midpoint], num_rounds - midpoint);
493 for (
size_t i = 0; i < midpoint; ++i) {
496 for (
size_t i = 0; i < (num_rounds - midpoint); ++i) {
502 .madd(wnaf_entries[wnaf_entries.size() - 1],
field_t<C>(
bb::fr(negative_lo)))
505 Fr reconstructed =
Fr(lo_accumulators, hi_accumulators,
true);
506 reconstructed = (reconstructed + reconstructed) -
offset;
507 reconstructed.reduce_mod_target_modulus();
508 reconstructed.assert_equal(scalar);
512 const auto original_tag = scalar.get_origin_tag();
513 for (
auto& entry : wnaf_entries) {
514 entry.set_origin_tag(original_tag);
525 static constexpr bool use_bool_range_constraint =
true;
527 C* ctx = scalar.context;
529 uint256_t scalar_multiplier = scalar_multiplier_512.
lo;
531 if (scalar_multiplier == 0) {
535 const size_t num_rounds = (max_num_bits == 0) ?
Fr::modulus.
get_msb() + 1 : max_num_bits;
545 naf_entries[num_rounds].set_origin_tag(scalar.get_origin_tag());
547 for (
size_t i = 0; i < num_rounds - 1; ++i) {
548 bool next_entry = scalar_multiplier.
get_bit(i + 1);
554 naf_entries[num_rounds - i - 1] = bit;
557 naf_entries[num_rounds - i - 1].
set_origin_tag(scalar.get_origin_tag());
559 naf_entries[0] =
bool_ct(ctx,
false);
562 if constexpr (!Fr::is_composite) {
563 std::vector<Fr> accumulators;
564 for (
size_t i = 0; i < num_rounds; ++i) {
566 Fr entry(naf_entries[naf_entries.size() - 2 - i]);
570 accumulators.emplace_back(entry);
572 accumulators.emplace_back(
Fr(naf_entries[naf_entries.size() - 1]) * -1);
573 Fr accumulator_result = Fr::accumulate(accumulators);
574 scalar.assert_equal(accumulator_result);
576 const auto reconstruct_half_naf = [](
bool_ct* nafs,
const size_t half_round_length) {
580 for (
size_t i = 0; i < half_round_length; ++i) {
581 negative_accumulator = negative_accumulator + negative_accumulator +
field_t<C>(nafs[i]);
582 positive_accumulator =
585 return std::make_pair(positive_accumulator, negative_accumulator);
587 const size_t midpoint =
588 (num_rounds > Fr::NUM_LIMB_BITS * 2) ? num_rounds - Fr::NUM_LIMB_BITS * 2 : num_rounds / 2;
593 if (num_rounds > Fr::NUM_LIMB_BITS * 2) {
594 hi_accumulators = reconstruct_half_naf(&naf_entries[0], midpoint);
595 lo_accumulators = reconstruct_half_naf(&naf_entries[midpoint], num_rounds - midpoint);
601 lo_accumulators = reconstruct_half_naf(&naf_entries[0], num_rounds);
605 lo_accumulators.second = lo_accumulators.second +
field_t<C>(naf_entries[num_rounds]);
607 Fr reconstructed_positive =
Fr(lo_accumulators.first, hi_accumulators.first);
608 Fr reconstructed_negative =
Fr(lo_accumulators.second, hi_accumulators.second);
609 Fr accumulator = reconstructed_positive - reconstructed_negative;
610 accumulator.assert_equal(scalar);
613 const auto original_tag = scalar.get_origin_tag();
614 for (
auto& naf_entry : naf_entries) {
615 naf_entry.set_origin_tag(original_tag);
void fixed_wnaf(const uint64_t *scalar, uint64_t *wnaf, bool &skew_map, const uint64_t point_index, const uint64_t num_points, const size_t wnaf_bits) noexcept
Performs fixed-window non-adjacent form (WNAF) computation for scalar multiplication.