Skip to content

Commit

Permalink
Fuzzer-found bugs that are not related to bigfield (#1605)
Browse files Browse the repository at this point in the history
* Added a regression test to detect original parallelized asm bugs

* FIxed parallelized SQR asm bugs

* Added comment that Emerson wanted

* Fix for create_range_constraint in the constant case

* Various safe_uint bug fixes having to do with not handling constant cases

* Fixed the comparison issue for the point at infinity

* Added regression tests and fixed 3 more missing flags in optimized SQR

* Fixed tabulation in asm_macros.hpp

* Fixed Montgomery Issue that Adrian Found

* Fixed issue with negative zero found by Guido

* FIxed same self_neg bug found by Guido

* Renamed 1 test

* Fixed one more SQR bug

* Added extra regression tests

* Slightly moved one test

* Added TODO comments into field_impl for potential optimisations

* Addressed Zac's comments

Co-authored-by: zac-williamson <blorktronics@gmail.com>
  • Loading branch information
Rumata888 and zac-williamson authored Nov 7, 2022
1 parent 147fa90 commit 0be9a97
Show file tree
Hide file tree
Showing 12 changed files with 135 additions and 35 deletions.
26 changes: 26 additions & 0 deletions src/aztec/ecc/curves/bn254/fq.test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -471,3 +471,29 @@ TEST(fq, pow_regression_check)
EXPECT_EQ(zero.pow(uint256_t(0)), one);
}
// 438268ca91d42ad f1e7025a7b654e1f f8d9d72e0438b995 8c422ec208ac8a6e

TEST(fq, sqr_regression)
{
uint256_t values[] = { uint256_t(0xbdf876654b0ade1b, 0x2c3a66c64569f338, 0x2cd8bf2ec1fe55a3, 0x11c0ea9ee5693ede),
uint256_t(0x551b14ec34f2151c, 0x62e472ed83a2891e, 0xf208d5e5c9b5b3fb, 0x14315aeaf6027d8c),
uint256_t(0xad39959ae8013750, 0x7f1d2c709ab84cbb, 0x408028b80a60c2f1, 0x1dcd116fc26f856e),
uint256_t(0x95e967d30dcce9ce, 0x56139274241d2ea1, 0x85b19c1c616ec456, 0x1f1780cf9bf045b4),
uint256_t(0xbe841c861d8eb80e, 0xc5980d67a21386c0, 0x5fd1f1afecddeeb5, 0x24dbb8c1baea0250),
uint256_t(0x3ae4b3a27f05d6e3, 0xc5f6785b12df8d29, 0xc3a6c5f095103046, 0xd6b94cb2cc1fd4b),
uint256_t(0xc003c71932a6ced5, 0x6302a413f68e26e9, 0x2ed4a9b64d69fad, 0xfe61ffab1ae227d) };
for (auto& value : values) {
fq element(value);
EXPECT_EQ(element.sqr(), element * element);
}
}

TEST(fq, neg_and_self_neg_0_cmp_regression)
{
fq a = 0;
fq a_neg = -a;
EXPECT_EQ((a == a_neg), true);
a = 0;
a_neg = 0;
a_neg.self_neg();
EXPECT_EQ((a == a_neg), true);
}
12 changes: 12 additions & 0 deletions src/aztec/ecc/curves/secp256k1/secp256k1.test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -494,4 +494,16 @@ TEST(secp256k1, derive_generators)
}
}
*/

TEST(secp256k1, neg_and_self_neg_0_cmp_regression)
{
secp256k1::fq a = 0;
secp256k1::fq a_neg = -a;
EXPECT_EQ((a == a_neg), true);
a = 0;
a_neg = 0;
a_neg.self_neg();
EXPECT_EQ((a == a_neg), true);
}

} // namespace test_secp256k1
20 changes: 14 additions & 6 deletions src/aztec/ecc/fields/asm_macros.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@
"mulxq %[modulus_1], %%rdi, %%rcx \n\t" /* (t[2], t[3]) <- (modulus[1] * k) */ \
"adcq %%rcx, %%r10 \n\t" /* r[2] += t[3] + flag_c */ \
"adcq $0, %%r11 \n\t" /* r[4] += flag_c */ \
/* Partial fix "adcq $0, %%r12 \n\t"*/ /* r[4] += flag_c */ \
/* Partial fix "adcq $0, %%r12 \n\t"*/ /* r[4] += flag_c */ \
"addq %%rdi, %%r9 \n\t" /* r[1] += t[2] */ \
"mulxq %[modulus_2], %%rdi, %%rcx \n\t" /* (t[0], t[1]) <- (modulus[3] * k) */ \
"mulxq %[modulus_3], %%r8, %%rdx \n\t" /* (t[2], t[3]) <- (modulus[2] * k) */ \
Expand Down Expand Up @@ -540,6 +540,7 @@
"adcxq %%rcx, %%r13 \n\t" /* r[5] += t[4] + flag_o */ \
"adoxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_o */ \
"adcxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_c */ \
"adoxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_o */ \
\
/* double result registers */ \
"adoxq %%r9, %%r9 \n\t" /* r[1] = 2r[1] */ \
Expand Down Expand Up @@ -573,10 +574,12 @@
"mulxq %[modulus_0], %%rdi, %%rcx \n\t" /* (t[0], t[1]) <- (modulus[0] * k) */ \
"adoxq %%rdi, %%r8 \n\t" /* r[0] += t[0] (%r8 now free) */ \
"mulxq %[modulus_3], %%r8, %%rdi \n\t" /* (t[2], t[3]) <- (modulus[2] * k) */ \
"adcxq %%rdi, %%r12 \n\t" /* r[4] += t[3] + flag_o */ \
"adoxq %%rcx, %%r9 \n\t" /* r[1] += t[1] + flag_c */ \
"adcxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_o */ \
"adcxq %%rdi, %%r12 \n\t" /* r[4] += t[3] + flag_c */ \
"adoxq %%rcx, %%r9 \n\t" /* r[1] += t[1] + flag_o */ \
"adcxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_c */ \
"adcxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_c */ \
"mulxq %[modulus_1], %%rdi, %%rcx \n\t" /* (t[2], t[3]) <- (modulus[1] * k) */ \
"adcxq %[zero_reference], %%r15 \n\t" /* r[7] += flag_c */ \
"adoxq %%rcx, %%r10 \n\t" /* r[2] += t[3] + flag_o */ \
"adcxq %%rdi, %%r9 \n\t" /* r[1] += t[2] */ \
"adoxq %%r8, %%r11 \n\t" /* r[3] += t[2] + flag_o */ \
Expand All @@ -594,6 +597,9 @@
"adoxq %%rcx, %%r13 \n\t" /* r[5] += t[3] + flag_o */ \
"adcxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_c */ \
"adoxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_o */ \
"adcxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_c */ \
"adoxq %[zero_reference], %%r15 \n\t" /* r[7] += flag_o */ \
"adcxq %[zero_reference], %%r15 \n\t" /* r[7] += flag_c */ \
"mulxq %[modulus_0], %%r8, %%rcx \n\t" /* (t[0], t[1]) <- (modulus[0] * k) */ \
"adcxq %%r8, %%r9 \n\t" /* r[1] += t[0] (%r9 now free) */ \
"adoxq %%rcx, %%r10 \n\t" /* r[2] += t[1] + flag_c */ \
Expand All @@ -614,12 +620,14 @@
"adcxq %%r8, %%r13 \n\t" /* r[5] += t[2] + flag_c */ \
"adoxq %%r9, %%r14 \n\t" /* r[6] += t[3] + flag_c */ \
"adcxq %[zero_reference], %%r14 \n\t" /* r[6] += flag_o */ \
"adoxq %[zero_reference], %%r15 \n\t" /* r[7] += flag_c */ \
"adoxq %[zero_reference], %%r15 \n\t" /* r[7] += flag_o */ \
"adcxq %[zero_reference], %%r15 \n\t" /* r[7] += flag_c */ \
"mulxq %[modulus_0], %%r8, %%r9 \n\t" /* (t[0], t[1]) <- (modulus[0] * k) */ \
"adcxq %%r8, %%r10 \n\t" /* r[2] += t[0] (%r10 now free) */ \
"adoxq %%r9, %%r11 \n\t" /* r[3] += t[1] + flag_c */ \
"adcxq %%rdi, %%r11 \n\t" /* r[3] += t[2] */ \
"adoxq %[zero_reference], %%r12 \n\t" /* r[4] += flag_c */ \
"adoxq %[zero_reference], %%r12 \n\t" /* r[4] += flag_o */ \
"adoxq %[zero_reference], %%r13 \n\t" /* r[5] += flag_o */ \
\
/* perform modular reduction: r[3] */ \
"movq %%r11, %%rdx \n\t" /* move r11 into %rdx */ \
Expand Down
1 change: 1 addition & 0 deletions src/aztec/ecc/fields/field.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
namespace barretenberg {
template <class Params> struct alignas(32) field {
public:
// We don't initialize data by default since we'd lose a lot of time on pointless initializations.
field() noexcept {}

constexpr field(const uint256_t& input) noexcept
Expand Down
33 changes: 28 additions & 5 deletions src/aztec/ecc/fields/field_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,25 @@ template <class T> constexpr field<T> field<T>::operator-() const noexcept
constexpr field p{ modulus.data[0], modulus.data[1], modulus.data[2], modulus.data[3] };
return p - *this; // modulus - *this;
}

// TODO: there are 3 ways we can make this more efficient
// 1: we subtract `p` from `*this` instead of `2p`
// 2: instead of `p - *this`, we use an asm block that does `p - *this` without the assembly reduction step
// 3: we replace `(p - *this).reduce_once()` with an assembly block that is equivalent to `p - *this`,
// but we call `REDUCE_FIELD_ELEMENT` with `not_twice_modulus` instead of `twice_modulus`
// not sure which is faster and whether any of the above might break something!
//
// More context below:
// the operator-(a, b) method's asm implementation has a sneaky was to check underflow.
// if `a - b` underflows we need to add in `2p`. Instead of conditional branching which would cause pipeline
// flushes, we add `2p` into the result of `a - b`. If the result triggers the overflow flag, then we know we are
// correcting an *underflow* produced from computing `a - b`. Finally...we use the overflow flag to conditionally
// move data into registers such that we end up with either `a - b` or `2p + (a - b)` (this is branchless). OK! So
// what's the problem? Well we assume that every field element lies between 0 and 2p - 1. But we are computing `2p -
// *this`! If *this = 0 then we exceed this bound hence the need for the extra reduction step. HOWEVER, we also know
// that 2p - *this won't underflow so we could skip the underflow check present in the assembly code
constexpr field p{ twice_modulus.data[0], twice_modulus.data[1], twice_modulus.data[2], twice_modulus.data[3] };
return p - *this; // modulus - *this;
return (p - *this).reduce_once(); // modulus - *this;
}

template <class T> constexpr field<T> field<T>::operator-=(const field& other) noexcept
Expand All @@ -179,7 +196,7 @@ template <class T> constexpr void field<T>::self_neg() noexcept
*this = p - *this;
} else {
constexpr field p{ twice_modulus.data[0], twice_modulus.data[1], twice_modulus.data[2], twice_modulus.data[3] };
*this = p - *this;
*this = (p - *this).reduce_once();
}
}

Expand Down Expand Up @@ -236,9 +253,15 @@ template <class T> constexpr field<T> field<T>::to_montgomery_form() const noexc
constexpr field r_squared{ T::r_squared_0, T::r_squared_1, T::r_squared_2, T::r_squared_3 };

field result = *this;
result.reduce_once();
result.reduce_once();
result.reduce_once();
// TODO: are these reductions needed?
// Rationale: We want to take any 256-bit input and be able to convert into montgomery form.
// A basic heuristic we use is that any input into the `*` operator must be between [0, 2p - 1]
// to prevent overflows in the asm algorithm.
// However... r_squared is already reduced so perhaps we can relax this requirement?
// (would be good to identify a failure case where not calling self_reduce triggers an error)
result.self_reduce_once();
result.self_reduce_once();
result.self_reduce_once();
return (result * r_squared).reduce_once();
}

Expand Down
13 changes: 12 additions & 1 deletion src/aztec/ecc/groups/affine_element.test.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#include <ecc/curves/bn254/g1.hpp>
#include <ecc/curves/secp256k1/secp256k1.hpp>
#include <common/test.hpp>
#include <fstream>
#include <common/serialize.hpp>
Expand Down Expand Up @@ -28,11 +29,21 @@ TEST(affine_element, read_write_buffer)

// Regression test to ensure that the point at infinity is not equal to its coordinate-wise reduction, which may lie
// on the curve, depending on the y-coordinate.
TEST(affine_element, infinity_regression)
TEST(affine_element, infinity_equality_regression)
{
g1::affine_element P;
P.self_set_infinity();
g1::affine_element R(0, P.y);
ASSERT_FALSE(P == R);
}

// Regression test to ensure that the point at infinity is not equal to its coordinate-wise reduction, which may lie
// on the curve, depending on the y-coordinate.
TEST(affine_element, infinity_ordering_regression)
{
secp256k1::g1::affine_element P(0, 1), Q(0, 1);

P.self_set_infinity();
EXPECT_NE(P < Q, Q < P);
}
} // namespace test_affine_element
9 changes: 9 additions & 0 deletions src/aztec/ecc/groups/affine_element_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,19 @@ constexpr bool affine_element<Fq, Fr, T>::operator==(const affine_element& other

/**
* Comparison operators (for std::sort)
*
* @details CAUTION!! Don't use this operator. It has no meaning other than for use by std::sort.
**/
template <class Fq, class Fr, class T>
constexpr bool affine_element<Fq, Fr, T>::operator>(const affine_element& other) const noexcept
{
// We are setting point at infinity to always be the lowest element
if (is_point_at_infinity()) {
return false;
} else if (other.is_point_at_infinity()) {
return true;
}

if (x > other.x) {
return true;
} else if (x == other.x && y > other.y) {
Expand Down
3 changes: 2 additions & 1 deletion src/aztec/numeric/uint256/uint256.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,8 @@ class alignas(32) uint256_t {

uint64_t data[4];

constexpr std::pair<uint256_t, uint256_t> divmod(const uint256_t& b) const;

private:
constexpr std::pair<uint64_t, uint64_t> mul_wide(const uint64_t a, const uint64_t b) const;
constexpr std::pair<uint64_t, uint64_t> addc(const uint64_t a, const uint64_t b, const uint64_t carry_in) const;
Expand All @@ -162,7 +164,6 @@ class alignas(32) uint256_t {
const uint64_t b,
const uint64_t c,
const uint64_t carry_in) const;
constexpr std::pair<uint256_t, uint256_t> divmod(const uint256_t& b) const;
};

inline std::ostream& operator<<(std::ostream& os, uint256_t const& a)
Expand Down
13 changes: 7 additions & 6 deletions src/aztec/stdlib/primitives/field/field.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -575,14 +575,15 @@ void field_t<ComposerContext>::create_range_constraint(const size_t num_bits, st
{
if (num_bits == 0) {
assert_is_zero("0-bit range_constraint on non-zero field_t.");
}
if (is_constant()) {
ASSERT(uint256_t(get_value()).get_msb() < num_bits);
} else {
if constexpr (ComposerContext::type == waffle::ComposerType::PLOOKUP) {
context->decompose_into_default_range(normalize().get_witness_index(), num_bits, msg);
if (is_constant()) {
ASSERT(uint256_t(get_value()).get_msb() < num_bits);
} else {
context->decompose_into_base4_accumulators(normalize().get_witness_index(), num_bits, msg);
if constexpr (ComposerContext::type == waffle::ComposerType::PLOOKUP) {
context->decompose_into_default_range(normalize().get_witness_index(), num_bits, msg);
} else {
context->decompose_into_base4_accumulators(normalize().get_witness_index(), num_bits, msg);
}
}
}
}
Expand Down
21 changes: 15 additions & 6 deletions src/aztec/stdlib/primitives/safe_uint/safe_uint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ safe_uint_t<ComposerContext> safe_uint_t<ComposerContext>::operator*(const safe_

uint512_t new_max = uint512_t(current_max) * uint512_t(other.current_max);
ASSERT(new_max.hi == 0);

return safe_uint_t((value * other.value), new_max.lo, IS_UNSAFE);
}

Expand Down Expand Up @@ -65,10 +64,14 @@ std::array<safe_uint_t<ComposerContext>, 3> safe_uint_t<ComposerContext>::slice(
const uint8_t lsb) const
{
ASSERT(msb >= lsb);
ASSERT(static_cast<size_t>(msb) <= rollup::MAX_NO_WRAP_INTEGER_BIT_LENGTH);
const safe_uint_t lhs = *this;
ComposerContext* ctx = lhs.get_context();

const uint256_t value = uint256_t(get_value());
// This should be caught by the proof itself, but the circuit creator will have now way of knowing where the issue
// is
ASSERT(value < (static_cast<uint256_t>(1) << rollup::MAX_NO_WRAP_INTEGER_BIT_LENGTH));
const auto msb_plus_one = uint32_t(msb) + 1;
const auto hi_mask = ((uint256_t(1) << (256 - uint32_t(msb))) - 1);
const auto hi = (value >> msb_plus_one) & hi_mask;
Expand All @@ -78,11 +81,17 @@ std::array<safe_uint_t<ComposerContext>, 3> safe_uint_t<ComposerContext>::slice(

const auto slice_mask = ((uint256_t(1) << (uint32_t(msb - lsb) + 1)) - 1);
const auto slice = (value >> lsb) & slice_mask;

const safe_uint_t hi_wit(witness_t(ctx, hi), rollup::MAX_NO_WRAP_INTEGER_BIT_LENGTH - uint32_t(msb), "hi_wit");
const safe_uint_t lo_wit(witness_t(ctx, lo), lsb, "lo_wit");
const safe_uint_t slice_wit(witness_t(ctx, slice), msb_plus_one - lsb, "slice_wit");

safe_uint_t lo_wit, slice_wit, hi_wit;
if (this->value.is_constant()) {
hi_wit = safe_uint_t(hi);
lo_wit = safe_uint_t(lo);
slice_wit = safe_uint_t(slice);

} else {
hi_wit = safe_uint_t(witness_t(ctx, hi), rollup::MAX_NO_WRAP_INTEGER_BIT_LENGTH - uint32_t(msb), "hi_wit");
lo_wit = safe_uint_t(witness_t(ctx, lo), lsb, "lo_wit");
slice_wit = safe_uint_t(witness_t(ctx, slice), msb_plus_one - lsb, "slice_wit");
}
assert_equal(((hi_wit * safe_uint_t(uint256_t(1) << msb_plus_one)) + lo_wit +
(slice_wit * safe_uint_t(uint256_t(1) << lsb))));

Expand Down
7 changes: 5 additions & 2 deletions src/aztec/stdlib/primitives/safe_uint/safe_uint.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ template <typename ComposerContext> class safe_uint_t {
std::string const& description = "") const
{
ASSERT(difference_bit_size <= MAX_BIT_NUM);
ASSERT(!(this->value.is_constant() && other.value.is_constant()));
field_ct difference_val = this->value - other.value;
safe_uint_t<ComposerContext> difference(difference_val, difference_bit_size, format("subtract: ", description));
// This checks the subtraction is correct for integers without any wraps
Expand All @@ -110,6 +111,9 @@ template <typename ComposerContext> class safe_uint_t {

safe_uint_t operator-(const safe_uint_t& other) const
{
// We could get a constant underflow
ASSERT(!(this->value.is_constant() && other.value.is_constant() &&
static_cast<uint256_t>(value.get_value()) < static_cast<uint256_t>(other.value.get_value())));
field_ct difference_val = this->value - other.value;
safe_uint_t<ComposerContext> difference(difference_val, (size_t)(current_max.get_msb() + 1), "- operator");
// This checks the subtraction is correct for integers without any wraps
Expand Down Expand Up @@ -160,8 +164,7 @@ template <typename ComposerContext> class safe_uint_t {
{
ASSERT(this->value.is_constant() == false);
uint256_t val = this->value.get_value();
auto quotient_val = (uint256_t)(val / (uint256_t)other.value.get_value());
auto remainder_val = (uint256_t)(val % (uint256_t)other.value.get_value());
auto [quotient_val, remainder_val] = val.divmod((uint256_t)other.value.get_value());
field_ct quotient_field(witness_t(value.context, quotient_val));
field_ct remainder_field(witness_t(value.context, remainder_val));
safe_uint_t<ComposerContext> quotient(
Expand Down
Loading

0 comments on commit 0be9a97

Please sign in to comment.