Barretenberg
The ZK-SNARK library at the core of Aztec
Loading...
Searching...
No Matches
scalar_multiplication.test.cpp
Go to the documentation of this file.
10#include <filesystem>
11#include <gtest/gtest.h>
12
13using namespace bb;
14
15namespace {
17} // namespace
18
19template <class Curve> class ScalarMultiplicationTest : public ::testing::Test {
20 public:
21 using Group = typename Curve::Group;
22 using Element = typename Curve::Element;
25
26 static constexpr size_t num_points = 201123;
27 static inline std::vector<AffineElement> generators{};
28 static inline std::vector<ScalarField> scalars{};
29
30 static AffineElement naive_msm(std::span<ScalarField> input_scalars, std::span<const AffineElement> input_points)
31 {
32 size_t total_points = input_scalars.size();
33 size_t num_threads = get_num_cpus();
34 std::vector<Element> expected_accs(num_threads);
35 size_t range_per_thread = (total_points + num_threads - 1) / num_threads;
36 parallel_for(num_threads, [&](size_t thread_idx) {
37 Element expected_thread_acc;
38 expected_thread_acc.self_set_infinity();
39 size_t start = thread_idx * range_per_thread;
40 size_t end = ((thread_idx + 1) * range_per_thread > total_points) ? total_points
41 : (thread_idx + 1) * range_per_thread;
42 bool skip = start >= total_points;
43 if (!skip) {
44 for (size_t i = start; i < end; ++i) {
45 expected_thread_acc += input_points[i] * input_scalars[i];
46 }
47 }
48 expected_accs[thread_idx] = expected_thread_acc;
49 });
50
51 Element expected_acc = Element();
52 expected_acc.self_set_infinity();
53 for (auto& acc : expected_accs) {
54 expected_acc += acc;
55 }
56 return AffineElement(expected_acc);
57 }
58 static void SetUpTestSuite()
59 {
60 generators.resize(num_points);
61 scalars.resize(num_points);
62 parallel_for_range(num_points, [&](size_t start, size_t end) {
63 for (size_t i = start; i < end; ++i) {
64 generators[i] = Group::one * Curve::ScalarField::random_element(&engine);
65 scalars[i] = Curve::ScalarField::random_element(&engine);
66 }
67 });
68 for (size_t i = 0; i < num_points - 1; ++i) {
69 ASSERT_EQ(generators[i].x == generators[i + 1].x, false);
70 }
71 };
72};
73
74using CurveTypes = ::testing::Types<bb::curve::BN254, bb::curve::Grumpkin>;
76
77#define SCALAR_MULTIPLICATION_TYPE_ALIASES \
78 using Curve = TypeParam; \
79 using ScalarField = typename Curve::ScalarField; \
80 // using AffineElement = typename Curve::AffineEleent;
81
83{
85 const size_t fr_size = 254;
86 const size_t slice_bits = 7;
87 size_t num_slices = (fr_size + 6) / 7;
88 size_t last_slice_bits = fr_size - ((num_slices - 1) * slice_bits);
89
90 for (size_t x = 0; x < 100; ++x) {
91
92 uint256_t input_u256 = engine.get_random_uint256();
93 input_u256.data[3] = input_u256.data[3] & 0x3FFFFFFFFFFFFFFF; // 254 bits
94 while (input_u256 > ScalarField::modulus) {
95 input_u256 -= ScalarField::modulus;
96 }
97 std::vector<uint32_t> slices(num_slices);
98
99 uint256_t acc = input_u256;
100 for (size_t i = 0; i < num_slices; ++i) {
101 size_t mask = ((1 << slice_bits) - 1UL);
102 size_t shift = slice_bits;
103 if (i == 0) {
104 mask = ((1UL << last_slice_bits) - 1UL);
105 shift = last_slice_bits;
106 }
107 slices[num_slices - 1 - i] = static_cast<uint32_t>((acc & mask).data[0]);
108 acc = acc >> shift;
109 }
110 // uint256_t input_u256 = 0;
111
112 // for (size_t i = 0; i < num_slices; ++i) {
113 // bool valid_slice = false;
114 // while (!valid_slice) {
115 // size_t mask = ((1 << slice_bits) - 1);
116 // if (i == num_slices - 1) {
117 // mask = ((1 << last_slice_bits) - 1);
118 // }
119 // const uint32_t slice = engine.get_random_uint32() & mask;
120
121 // size_t shift = (fr_size - slice_bits - (i * slice_bits));
122 // if (i == num_slices - 1) {
123 // shift = 0;
124 // }
125
126 // const uint256_t new_input_u256 = input_u256 + (uint256_t(slice) << shift);
127 // // ASSERT(new_input_u256 < fr::modulus);
128 // if (new_input_u256 < fr::modulus) {
129 // input_u256 = new_input_u256;
130 // slices[i] = slice;
131 // valid_slice = true;
132 // }
133 // }
134 // }
135
136 // ASSERT(input_u256 < fr::modulus);
137 // while (input_u256 > fr::modulus) {
138 // input_u256 -= fr::modulus;
139 // }
140 ScalarField input(input_u256);
141 input.self_from_montgomery_form();
142
143 ASSERT_EQ(input.data[0], input_u256.data[0]);
144 ASSERT_EQ(input.data[1], input_u256.data[1]);
145 ASSERT_EQ(input.data[2], input_u256.data[2]);
146 ASSERT_EQ(input.data[3], input_u256.data[3]);
147
148 for (size_t i = 0; i < num_slices; ++i) {
149
150 uint32_t result = scalar_multiplication::MSM<Curve>::get_scalar_slice(input, i, slice_bits);
151 EXPECT_EQ(result, slices[i]);
152 }
153 }
154 // fr test = 0;
155 // test.data[0] = 0b;
156 // test.data[1] = 0b010101
157}
158
160{
161 using Curve = TypeParam;
162 using AffineElement = typename Curve::AffineElement;
163 // todo make this not a multiple of 10k
164 const size_t total_points = 30071;
165 const size_t num_buckets = 128;
166
167 std::vector<uint64_t> input_point_schedule;
168 for (size_t i = 0; i < total_points; ++i) {
169
170 uint64_t bucket = static_cast<uint64_t>(engine.get_random_uint8()) & 0x7f;
171
172 uint64_t schedule = static_cast<uint64_t>(bucket) + (static_cast<uint64_t>(i) << 32);
173 input_point_schedule.push_back(schedule);
174 }
177 typename scalar_multiplication::MSM<Curve>::BucketAccumulators bucket_data(num_buckets);
179 input_point_schedule, TestFixture::generators, affine_data, bucket_data, 0, 0);
180
181 std::vector<typename Curve::Element> expected_buckets(num_buckets);
182 for (auto& e : expected_buckets) {
183 e.self_set_infinity();
184 }
185 // std::cout << "computing expected" << std::endl;
186 for (size_t i = 0; i < total_points; ++i) {
187 uint64_t bucket = input_point_schedule[i] & 0xFFFFFFFF;
188 EXPECT_LT(static_cast<size_t>(bucket), num_buckets);
189 expected_buckets[static_cast<size_t>(bucket)] += TestFixture::generators[i];
190 }
191 for (size_t i = 0; i < num_buckets; ++i) {
192 if (!expected_buckets[i].is_point_at_infinity()) {
193 AffineElement expected(expected_buckets[i]);
194 EXPECT_EQ(expected, bucket_data.buckets[i]);
195 } else {
196 EXPECT_FALSE(bucket_data.bucket_exists.get(i));
197 }
198 }
199}
200
201TYPED_TEST(ScalarMultiplicationTest, ConsumePointBatchAndAccumulate)
202{
204 using Element = typename Curve::Element;
205 using AffineElement = typename Curve::AffineElement;
206
207 // todo make this not a multiple of 10k
208 const size_t total_points = 30071;
209 const size_t num_buckets = 128;
210
211 std::vector<uint64_t> input_point_schedule;
212 for (size_t i = 0; i < total_points; ++i) {
213
214 uint64_t bucket = static_cast<uint64_t>(engine.get_random_uint8()) & 0x7f;
215
216 uint64_t schedule = static_cast<uint64_t>(bucket) + (static_cast<uint64_t>(i) << 32);
217 input_point_schedule.push_back(schedule);
218 }
221 typename scalar_multiplication::MSM<Curve>::BucketAccumulators bucket_data(num_buckets);
223 input_point_schedule, TestFixture::generators, affine_data, bucket_data, 0, 0);
224
226
227 Element expected_acc = Element();
228 expected_acc.self_set_infinity();
229 size_t num_threads = get_num_cpus();
230 std::vector<Element> expected_accs(num_threads);
231 size_t range_per_thread = (total_points + num_threads - 1) / num_threads;
232 parallel_for(num_threads, [&](size_t thread_idx) {
233 Element expected_thread_acc;
234 expected_thread_acc.self_set_infinity();
235 size_t start = thread_idx * range_per_thread;
236 size_t end = (thread_idx == num_threads - 1) ? total_points : (thread_idx + 1) * range_per_thread;
237 bool skip = start >= total_points;
238 if (!skip) {
239 for (size_t i = start; i < end; ++i) {
240 ScalarField scalar = input_point_schedule[i] & 0xFFFFFFFF;
241 expected_thread_acc += TestFixture::generators[i] * scalar;
242 }
243 }
244 expected_accs[thread_idx] = expected_thread_acc;
245 });
246
247 for (size_t i = 0; i < num_threads; ++i) {
248 expected_acc += expected_accs[i];
249 }
250 AffineElement expected(expected_acc);
251 EXPECT_EQ(AffineElement(result), expected);
252}
253
254TYPED_TEST(ScalarMultiplicationTest, RadixSortCountZeroEntries)
255{
256 const size_t total_points = 30071;
257
258 std::vector<uint64_t> input_point_schedule;
259 for (size_t i = 0; i < total_points; ++i) {
260
261 uint64_t bucket = static_cast<uint64_t>(engine.get_random_uint8()) & 0x7f;
262
263 uint64_t schedule = static_cast<uint64_t>(bucket) + (static_cast<uint64_t>(i) << 32);
264 input_point_schedule.push_back(schedule);
265 }
266
268 &input_point_schedule[0], input_point_schedule.size(), 7);
269 size_t expected = 0;
270 for (size_t i = 0; i < total_points; ++i) {
271 expected += static_cast<size_t>((input_point_schedule[i] & 0xFFFFFFFF) == 0);
272 }
273 EXPECT_EQ(result, expected);
274}
275
276TYPED_TEST(ScalarMultiplicationTest, EvaluatePippengerRound)
277{
279 using AffineElement = typename Curve::AffineElement;
280 using Element = typename Curve::Element;
281
282 const size_t num_points = 2;
283 std::vector<ScalarField> scalars(num_points);
284 constexpr size_t NUM_BITS_IN_FIELD = fr::modulus.get_msb() + 1;
285 const size_t normal_slice_size = 7; // stop hardcoding
286 const size_t num_buckets = 1 << normal_slice_size;
287
288 const size_t num_rounds = (NUM_BITS_IN_FIELD + normal_slice_size - 1) / normal_slice_size;
291 typename scalar_multiplication::MSM<Curve>::BucketAccumulators bucket_data(num_buckets);
292
293 for (size_t round_index = num_rounds - 1; round_index < num_rounds; round_index++) {
294 const size_t num_bits_in_slice =
295 (round_index == (num_rounds - 1)) ? (NUM_BITS_IN_FIELD % normal_slice_size) : normal_slice_size;
296 for (size_t i = 0; i < num_points; ++i) {
297
298 size_t hi_bit = NUM_BITS_IN_FIELD - (round_index * normal_slice_size);
299 size_t lo_bit = hi_bit - normal_slice_size;
300 if (hi_bit < normal_slice_size) {
301 lo_bit = 0;
302 }
303 uint64_t slice = engine.get_random_uint64() & ((1 << num_bits_in_slice) - 1);
304 // at this point in the algo, scalars has been converted out of montgomery form
305 uint256_t scalar = uint256_t(slice) << lo_bit;
306 scalars[i].data[0] = scalar.data[0];
307 scalars[i].data[1] = scalar.data[1];
308 scalars[i].data[2] = scalar.data[2];
309 scalars[i].data[3] = scalar.data[3];
310 scalars[i].self_to_montgomery_form();
311 }
312
313 std::vector<uint32_t> indices;
315
316 Element previous_round_output;
317 previous_round_output.self_set_infinity();
318 for (auto x : indices) {
319 ASSERT_LT(x, num_points);
320 }
321 std::vector<uint64_t> point_schedule(scalars.size());
323 scalars, TestFixture::generators, indices, point_schedule);
325 msm_data, round_index, affine_data, bucket_data, previous_round_output, 7);
326 Element expected;
327 expected.self_set_infinity();
328 for (size_t i = 0; i < num_points; ++i) {
329 ScalarField baz = scalars[i].to_montgomery_form();
330 expected += (TestFixture::generators[i] * baz);
331 }
332 size_t num_doublings = NUM_BITS_IN_FIELD - (normal_slice_size * (round_index + 1));
333 if (round_index == num_rounds - 1) {
334 num_doublings = 0;
335 }
336 for (size_t i = 0; i < num_doublings; ++i) {
337 result.self_dbl();
338 }
339 EXPECT_EQ(AffineElement(result), AffineElement(expected));
340 }
341}
342
344{
346 using AffineElement = typename Curve::AffineElement;
347
348 const size_t num_points = TestFixture::num_points;
349
350 std::span<ScalarField> scalars(&TestFixture::scalars[0], num_points);
351 AffineElement result =
352 scalar_multiplication::MSM<Curve>::msm(TestFixture::generators, PolynomialSpan<ScalarField>(0, scalars));
353
354 AffineElement expected = TestFixture::naive_msm(scalars, TestFixture::generators);
355 EXPECT_EQ(result, expected);
356}
357
359{
360 BB_BENCH_NAME("BatchMultiScalarMul");
362 using AffineElement = typename Curve::AffineElement;
363
364 const size_t num_msms = static_cast<size_t>(engine.get_random_uint8());
365 std::vector<AffineElement> expected(num_msms);
366
368 std::vector<std::span<ScalarField>> batch_scalars_spans;
369
370 size_t vector_offset = 0;
371 for (size_t k = 0; k < num_msms; ++k) {
372 const size_t num_points = static_cast<size_t>(engine.get_random_uint16()) % 400;
373
374 ASSERT_LT(vector_offset + num_points, TestFixture::num_points);
375 std::span<ScalarField> batch_scalars(&TestFixture::scalars[vector_offset], num_points);
376 std::span<const AffineElement> batch_points(&TestFixture::generators[vector_offset], num_points);
377
378 vector_offset += num_points;
379 batch_points_span.push_back(batch_points);
380 batch_scalars_spans.push_back(batch_scalars);
381
382 expected[k] = TestFixture::naive_msm(batch_scalars_spans[k], batch_points_span[k]);
383 }
384
385 std::vector<AffineElement> result =
386 scalar_multiplication::MSM<Curve>::batch_multi_scalar_mul(batch_points_span, batch_scalars_spans);
387
388 EXPECT_EQ(result, expected);
389}
390
391TYPED_TEST(ScalarMultiplicationTest, BatchMultiScalarMulSparse)
392{
394 using AffineElement = typename Curve::AffineElement;
395
396 const size_t num_msms = 10;
397 std::vector<AffineElement> expected(num_msms);
398
399 std::vector<std::vector<ScalarField>> batch_scalars(num_msms);
400 std::vector<std::vector<AffineElement>> batch_input_points(num_msms);
402 std::vector<std::span<ScalarField>> batch_scalars_spans;
403
404 for (size_t k = 0; k < num_msms; ++k) {
405 const size_t num_points = 33;
406 auto& scalars = batch_scalars[k];
407
408 scalars.resize(num_points);
409
410 size_t fixture_offset = k * num_points;
411
412 std::span<AffineElement> batch_points(&TestFixture::generators[fixture_offset], num_points);
413 for (size_t i = 0; i < 13; ++i) {
414 scalars[i] = 0;
415 }
416 for (size_t i = 13; i < 23; ++i) {
417 scalars[i] = TestFixture::scalars[fixture_offset + i + 13];
418 }
419 for (size_t i = 23; i < num_points; ++i) {
420 scalars[i] = 0;
421 }
422 batch_points_span.push_back(batch_points);
423 batch_scalars_spans.push_back(batch_scalars[k]);
424
425 expected[k] = TestFixture::naive_msm(batch_scalars[k], batch_points);
426 }
427
428 std::vector<AffineElement> result =
429 scalar_multiplication::MSM<Curve>::batch_multi_scalar_mul(batch_points_span, batch_scalars_spans);
430
431 EXPECT_EQ(result, expected);
432}
433
435{
437 using AffineElement = typename Curve::AffineElement;
438
439 const size_t start_index = 1234;
440 const size_t num_points = TestFixture::num_points - start_index;
441
442 PolynomialSpan<ScalarField> scalar_span =
443 PolynomialSpan<ScalarField>(start_index, std::span<ScalarField>(&TestFixture::scalars[0], num_points));
444 AffineElement result = scalar_multiplication::MSM<Curve>::msm(TestFixture::generators, scalar_span);
445
446 std::span<AffineElement> points(&TestFixture::generators[start_index], num_points);
447 AffineElement expected = TestFixture::naive_msm(scalar_span.span, points);
448 EXPECT_EQ(result, expected);
449}
450
452{
454 using AffineElement = typename Curve::AffineElement;
455
456 const size_t start_index = 1234;
457 const size_t num_points = TestFixture::num_points - start_index;
458 std::vector<ScalarField> scalars(num_points);
459
460 for (size_t i = 0; i < num_points; ++i) {
461 scalars[i] = 0;
462 }
463
464 PolynomialSpan<ScalarField> scalar_span = PolynomialSpan<ScalarField>(start_index, scalars);
465 AffineElement result = scalar_multiplication::MSM<Curve>::msm(TestFixture::generators, scalar_span);
466
467 EXPECT_EQ(result, Curve::Group::affine_point_at_infinity);
468}
469
471{
473 using AffineElement = typename Curve::AffineElement;
474
475 const size_t num_points = 0;
476 std::vector<ScalarField> scalars(num_points);
477 std::vector<AffineElement> input_points(num_points);
479 AffineElement result = scalar_multiplication::MSM<Curve>::msm(input_points, scalar_span);
480
481 EXPECT_EQ(result, Curve::Group::affine_point_at_infinity);
482}
483
484// Helper function to generate scalars with specified sparsity
485template <typename ScalarField>
486std::vector<ScalarField> generate_sparse_scalars(size_t num_scalars, double sparsity_rate, auto& rng)
487{
488 std::vector<ScalarField> scalars(num_scalars);
489 for (size_t i = 0; i < num_scalars; ++i) {
490 // Generate random value to determine if this scalar should be zero
491 double rand_val = static_cast<double>(rng.get_random_uint32()) / static_cast<double>(UINT32_MAX);
492 if (rand_val < sparsity_rate) {
493 scalars[i] = 0;
494 } else {
495 scalars[i] = ScalarField::random_element(&rng);
496 }
497 }
498 return scalars;
499}
500
501// Test different MSM strategies with detailed benchmarking
502// NOTE this requres BB_BENCH=1 to be set before the test command
504{
505#ifndef __wasm__
507#else
508 {
509#endif
511 << "Skipping BatchMultiScalarMulStrategyComparison as BB_BENCH=1 is not passed (OR we are in wasm).\n";
512 return;
513 }
515
516 using AffineElement = typename Curve::AffineElement;
517
518 const size_t num_msms = 3;
519 const size_t msm_max_size = 1 << 17;
520 const double max_sparsity = 0.1;
521
522 // Generate test data with varying sparsity
525 std::vector<AffineElement> all_commitments;
527
528 for (size_t i = 0; i < num_msms; ++i) {
529 // Generate random sizes and density of 0s
530 const size_t size = engine.get_random_uint64() % msm_max_size;
531 const double sparsity = engine.get_random_uint8() / 255.0 * max_sparsity;
532 auto scalars = generate_sparse_scalars<ScalarField>(size, sparsity, engine);
533 scalar_storage.push_back(std::move(scalars));
534
535 std::span<const AffineElement> points(&TestFixture::generators[i], size);
536 all_points.push_back(points);
537 all_scalars.push_back(scalar_storage.back());
538 all_commitments.push_back(TestFixture::naive_msm(all_scalars.back(), all_points.back()));
539 }
540 auto func = [&]<bb::detail::OperationLabel thread_prefix>(size_t num_threads) {
541 set_parallel_for_concurrency(num_threads);
542 // Strategy 1: Individual MSMs
543 {
544 BB_BENCH_NAME((bb::detail::concat<thread_prefix, "IndividualMSMs">()));
545 for (size_t i = 0; i < num_msms; ++i) {
546 std::vector<std::span<const AffineElement>> single_points = { all_points[i] };
547 std::vector<std::span<ScalarField>> single_scalars = { all_scalars[i] };
548 auto result = scalar_multiplication::MSM<Curve>::batch_multi_scalar_mul(single_points, single_scalars);
549 EXPECT_EQ(result[0], all_commitments[i]);
550 }
551 }
552 // Strategy 2: Batch
553 {
554 BB_BENCH_NAME((bb::detail::concat<thread_prefix, "BatchMSMs">()));
555 auto result = scalar_multiplication::MSM<Curve>::batch_multi_scalar_mul(all_points, all_scalars);
556 EXPECT_EQ(result, all_commitments);
557 }
558 };
559 // call lambda with template param
560 func.template operator()<"1 thread ">(1);
561 func.template operator()<"2 threads ">(2);
562 func.template operator()<"4 threads ">(4);
563 func.template operator()<"8 threads ">(8);
564 func.template operator()<"16 threads ">(16);
565 func.template operator()<"32 threads ">(32);
566}
567
568TEST(ScalarMultiplication, SmallInputsExplicit)
569{
570 uint256_t x0(0x68df84429941826a, 0xeb08934ed806781c, 0xc14b6a2e4f796a73, 0x08dc1a9a11a3c8db);
571 uint256_t y0(0x8ae5c31aa997f141, 0xe85f20c504f2c11b, 0x81a94193f3b1ce2b, 0x26f2c37372adb5b7);
572 uint256_t x1(0x80f5a592d919d32f, 0x1362652b984e51ca, 0xa0b26666f770c2a1, 0x142c6e1964e5c3c5);
573 uint256_t y1(0xb6c322ebb5ae4bc5, 0xf9fef6c7909c00f8, 0xb37ca1cc9af3b421, 0x1e331c7fa73d6a59);
574 uint256_t s0(0xe48bf12a24272e08, 0xf8dd0182577f3567, 0xec8fd222b8a6becb, 0x102d76b945612c9b);
575 uint256_t s1(0x098ae8d69f1e4e9e, 0xb5c8313c0f6040ed, 0xf78041e30cc46c44, 0x1d1e6e0c21892e13);
576
577 std::vector<grumpkin::fr> scalars{ s0, s1 };
578
581
583
584 auto result = scalar_multiplication::MSM<curve::Grumpkin>::msm(points, scalar_span);
585
586 grumpkin::g1::element expected = (points[0] * scalars[0]) + (points[1] * scalars[1]);
587
588 EXPECT_EQ(result, grumpkin::g1::affine_element(expected));
589}
#define BB_BENCH_NAME(name)
Definition bb_bench.hpp:218
BB_INLINE bool get(size_t index) const noexcept
Definition bitvector.hpp:36
typename Curve::ScalarField ScalarField
static std::vector< AffineElement > generators
static std::vector< ScalarField > scalars
typename Curve::AffineElement AffineElement
static AffineElement naive_msm(std::span< ScalarField > input_scalars, std::span< const AffineElement > input_points)
typename Group::element Element
Definition grumpkin.hpp:55
typename grumpkin::g1 Group
Definition grumpkin.hpp:54
typename Group::affine_element AffineElement
Definition grumpkin.hpp:56
element class. Implements ecc group arithmetic using Jacobian coordinates See https://hyperelliptic....
Definition element.hpp:33
group_elements::affine_element< Fq, Fr, Params > affine_element
Definition group.hpp:42
constexpr uint64_t get_msb() const
static Element accumulate_buckets(BucketType &bucket_accumulators) noexcept
static uint32_t get_scalar_slice(const ScalarField &scalar, size_t round, size_t normal_slice_size) noexcept
Given a scalar that is NOT in Montgomery form, extract a slice_size-bit chunk.
static void consume_point_schedule(std::span< const uint64_t > point_schedule, std::span< const AffineElement > points, AffineAdditionData &affine_data, BucketAccumulators &bucket_data, size_t num_input_points_processed, size_t num_queued_affine_points) noexcept
Given a list of points and target buckets to add into, perform required group operations.
static std::vector< AffineElement > batch_multi_scalar_mul(std::span< std::span< const AffineElement > > points, std::span< std::span< ScalarField > > scalars, bool handle_edge_cases=true) noexcept
Compute multiple multi-scalar multiplications.
static Element evaluate_pippenger_round(MSMData &msm_data, const size_t round_index, AffineAdditionData &affine_data, BucketAccumulators &bucket_data, Element previous_round_output, const size_t bits_per_slice) noexcept
Evaluate a single Pippenger round where we use the affine trick.
static void transform_scalar_and_get_nonzero_scalar_indices(std::span< typename Curve::ScalarField > scalars, std::vector< uint32_t > &consolidated_indices) noexcept
Convert scalar out of Montgomery form. Populate consolidated_indices with nonzero scalar indices.
static AffineElement msm(std::span< const AffineElement > points, PolynomialSpan< const ScalarField > _scalars, bool handle_edge_cases=false) noexcept
Helper method to evaluate a single MSM. Internally calls batch_multi_scalar_mul
const std::vector< FF > data
#define SCALAR_MULTIPLICATION_TYPE_ALIASES
std::vector< ScalarField > generate_sparse_scalars(size_t num_scalars, double sparsity_rate, auto &rng)
bool use_bb_bench
Definition bb_bench.cpp:172
RNG & get_randomness()
Definition engine.cpp:203
size_t process_buckets_count_zero_entries(uint64_t *wnaf_entries, const size_t num_entries, const uint32_t num_bits) noexcept
Entry point for Barretenberg command-line interface.
TYPED_TEST_SUITE(ShpleminiTest, TestSettings)
size_t get_num_cpus()
Definition thread.cpp:33
C slice(C const &container, size_t start)
Definition container.hpp:9
::testing::Types< curve::BN254, curve::Grumpkin > CurveTypes
TYPED_TEST(ShpleminiTest, CorrectnessOfMultivariateClaimBatching)
TEST(BoomerangMegaCircuitBuilder, BasicCircuit)
void set_parallel_for_concurrency(size_t num_cores)
Definition thread.cpp:24
void parallel_for(size_t num_iterations, const std::function< void(size_t)> &func)
Definition thread.cpp:111
void parallel_for_range(size_t num_points, const std::function< void(size_t, size_t)> &func, size_t no_multhreading_if_less_or_equal)
Split a loop into several loops running in parallel.
Definition thread.cpp:141
constexpr decltype(auto) get(::tuplet::tuple< T... > &&t) noexcept
Definition tuple.hpp:13
Curve::Element Element
std::span< Fr > span
static constexpr uint256_t modulus
Temp data structure, one created per thread!
Temp data structure, one created per thread!