diff --git a/src/ecmult_const_impl.h b/src/ecmult_const_impl.h index 7fd7eadc7b..7dc4aac25d 100644 --- a/src/ecmult_const_impl.h +++ b/src/ecmult_const_impl.h @@ -87,8 +87,6 @@ static void secp256k1_ecmult_const_odd_multiples_table_globalz(secp256k1_ge *pre secp256k1_fe neg_y; \ VERIFY_CHECK((n) < (1U << ECMULT_CONST_GROUP_SIZE)); \ VERIFY_CHECK(index < (1U << (ECMULT_CONST_GROUP_SIZE - 1))); \ - VERIFY_SETUP(secp256k1_fe_clear(&(r)->x)); \ - VERIFY_SETUP(secp256k1_fe_clear(&(r)->y)); \ /* Unconditionally set r->x = (pre)[m].x. r->y = (pre)[m].y. because it's either the correct one * or will get replaced in the later iterations, this is needed to make sure `r` is initialized. */ \ (r)->x = (pre)[m].x; \ @@ -349,9 +347,7 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n, secp256k1_fe_mul(&g, &g, n); if (d) { secp256k1_fe b; -#ifdef VERIFY VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero(d)); -#endif secp256k1_fe_sqr(&b, d); VERIFY_CHECK(SECP256K1_B <= 8); /* magnitude of b will be <= 8 after the next call */ secp256k1_fe_mul_int(&b, SECP256K1_B); @@ -384,13 +380,9 @@ static int secp256k1_ecmult_const_xonly(secp256k1_fe* r, const secp256k1_fe *n, p.infinity = 0; /* Perform x-only EC multiplication of P with q. */ -#ifdef VERIFY VERIFY_CHECK(!secp256k1_scalar_is_zero(q)); -#endif secp256k1_ecmult_const(&rj, &p, q); -#ifdef VERIFY VERIFY_CHECK(!secp256k1_gej_is_infinity(&rj)); -#endif /* The resulting (X, Y, Z) point on the effective-affine isomorphic curve corresponds to * (X, Y, Z*v) on the secp256k1 curve. The affine version of that has X coordinate diff --git a/src/field.h b/src/field.h index ccd228e1ae..df7411131b 100644 --- a/src/field.h +++ b/src/field.h @@ -345,8 +345,10 @@ static int secp256k1_fe_is_square_var(const secp256k1_fe *a); /** Check invariants on a field element (no-op unless VERIFY is enabled). */ static void secp256k1_fe_verify(const secp256k1_fe *a); +#define SECP256K1_FE_VERIFY(a) secp256k1_fe_verify(a) /** Check that magnitude of a is at most m (no-op unless VERIFY is enabled). */ static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m); +#define SECP256K1_FE_VERIFY_MAGNITUDE(a, m) secp256k1_fe_verify_magnitude(a, m) #endif /* SECP256K1_FIELD_H */ diff --git a/src/field_10x26_impl.h b/src/field_10x26_impl.h index 8445db1639..666068c712 100644 --- a/src/field_10x26_impl.h +++ b/src/field_10x26_impl.h @@ -403,11 +403,7 @@ void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a); #else -#ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#endif SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t * SECP256K1_RESTRICT b) { uint64_t c, d; diff --git a/src/field_5x52_int128_impl.h b/src/field_5x52_int128_impl.h index 1a472f97bb..f23f8ee1c4 100644 --- a/src/field_5x52_int128_impl.h +++ b/src/field_5x52_int128_impl.h @@ -12,13 +12,8 @@ #include "int128.h" #include "util.h" -#ifdef VERIFY #define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0) #define VERIFY_BITS_128(x, n) VERIFY_CHECK(secp256k1_u128_check_bits((x), (n))) -#else -#define VERIFY_BITS(x, n) do { } while(0) -#define VERIFY_BITS_128(x, n) do { } while(0) -#endif SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t *a, const uint64_t * SECP256K1_RESTRICT b) { secp256k1_uint128 c, d; diff --git a/src/field_impl.h b/src/field_impl.h index 80d34b9ef2..989e9cdb2f 100644 --- a/src/field_impl.h +++ b/src/field_impl.h @@ -20,12 +20,11 @@ SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) { secp256k1_fe na; -#ifdef VERIFY - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); - secp256k1_fe_verify_magnitude(a, 1); - secp256k1_fe_verify_magnitude(b, 31); -#endif + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(b); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 1); + SECP256K1_FE_VERIFY_MAGNITUDE(b, 31); + secp256k1_fe_negate(&na, a, 1); secp256k1_fe_add(&na, b); return secp256k1_fe_normalizes_to_zero(&na); @@ -44,11 +43,9 @@ static int secp256k1_fe_sqrt(secp256k1_fe * SECP256K1_RESTRICT r, const secp256k secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j, ret; -#ifdef VERIFY VERIFY_CHECK(r != a); - secp256k1_fe_verify(a); - secp256k1_fe_verify_magnitude(a, 8); -#endif + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 8); /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: @@ -151,11 +148,11 @@ static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m) { (void) static void secp256k1_fe_impl_verify(const secp256k1_fe *a); static void secp256k1_fe_verify(const secp256k1_fe *a) { /* Magnitude between 0 and 32. */ - secp256k1_fe_verify_magnitude(a, 32); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 32); /* Normalized is 0 or 1. */ VERIFY_CHECK((a->normalized == 0) || (a->normalized == 1)); /* If normalized, magnitude must be 0 or 1. */ - if (a->normalized) secp256k1_fe_verify_magnitude(a, 1); + if (a->normalized) SECP256K1_FE_VERIFY_MAGNITUDE(a, 1); /* Invoke implementation-specific checks. */ secp256k1_fe_impl_verify(a); } @@ -168,59 +165,71 @@ static void secp256k1_fe_verify_magnitude(const secp256k1_fe *a, int m) { static void secp256k1_fe_impl_normalize(secp256k1_fe *r); SECP256K1_INLINE static void secp256k1_fe_normalize(secp256k1_fe *r) { - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); + secp256k1_fe_impl_normalize(r); r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_normalize_weak(secp256k1_fe *r); SECP256K1_INLINE static void secp256k1_fe_normalize_weak(secp256k1_fe *r) { - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); + secp256k1_fe_impl_normalize_weak(r); r->magnitude = 1; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_normalize_var(secp256k1_fe *r); SECP256K1_INLINE static void secp256k1_fe_normalize_var(secp256k1_fe *r) { - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); + secp256k1_fe_impl_normalize_var(r); r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static int secp256k1_fe_impl_normalizes_to_zero(const secp256k1_fe *r); SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero(const secp256k1_fe *r) { - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); + return secp256k1_fe_impl_normalizes_to_zero(r); } static int secp256k1_fe_impl_normalizes_to_zero_var(const secp256k1_fe *r); SECP256K1_INLINE static int secp256k1_fe_normalizes_to_zero_var(const secp256k1_fe *r) { - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); + return secp256k1_fe_impl_normalizes_to_zero_var(r); } static void secp256k1_fe_impl_set_int(secp256k1_fe *r, int a); SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) { VERIFY_CHECK(0 <= a && a <= 0x7FFF); + secp256k1_fe_impl_set_int(r, a); r->magnitude = (a != 0); r->normalized = 1; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_add_int(secp256k1_fe *r, int a); SECP256K1_INLINE static void secp256k1_fe_add_int(secp256k1_fe *r, int a) { VERIFY_CHECK(0 <= a && a <= 0x7FFF); - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); + secp256k1_fe_impl_add_int(r, a); r->magnitude += 1; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_clear(secp256k1_fe *a); @@ -228,29 +237,33 @@ SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) { a->magnitude = 0; a->normalized = 1; secp256k1_fe_impl_clear(a); - secp256k1_fe_verify(a); + + SECP256K1_FE_VERIFY(a); } static int secp256k1_fe_impl_is_zero(const secp256k1_fe *a); SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) { - secp256k1_fe_verify(a); + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); + return secp256k1_fe_impl_is_zero(a); } static int secp256k1_fe_impl_is_odd(const secp256k1_fe *a); SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) { - secp256k1_fe_verify(a); + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); + return secp256k1_fe_impl_is_odd(a); } static int secp256k1_fe_impl_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b); SECP256K1_INLINE static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) { - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(b); VERIFY_CHECK(a->normalized); VERIFY_CHECK(b->normalized); + return secp256k1_fe_impl_cmp_var(a, b); } @@ -259,7 +272,8 @@ SECP256K1_INLINE static void secp256k1_fe_set_b32_mod(secp256k1_fe *r, const uns secp256k1_fe_impl_set_b32_mod(r, a); r->magnitude = 1; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static int secp256k1_fe_impl_set_b32_limit(secp256k1_fe *r, const unsigned char *a); @@ -267,7 +281,7 @@ SECP256K1_INLINE static int secp256k1_fe_set_b32_limit(secp256k1_fe *r, const un if (secp256k1_fe_impl_set_b32_limit(r, a)) { r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); return 1; } else { /* Mark the output field element as invalid. */ @@ -278,83 +292,97 @@ SECP256K1_INLINE static int secp256k1_fe_set_b32_limit(secp256k1_fe *r, const un static void secp256k1_fe_impl_get_b32(unsigned char *r, const secp256k1_fe *a); SECP256K1_INLINE static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) { - secp256k1_fe_verify(a); + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); + secp256k1_fe_impl_get_b32(r, a); } static void secp256k1_fe_impl_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m); SECP256K1_INLINE static void secp256k1_fe_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m) { - secp256k1_fe_verify(a); + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(m >= 0 && m <= 31); - secp256k1_fe_verify_magnitude(a, m); + SECP256K1_FE_VERIFY_MAGNITUDE(a, m); + secp256k1_fe_impl_negate_unchecked(r, a, m); r->magnitude = m + 1; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_mul_int_unchecked(secp256k1_fe *r, int a); SECP256K1_INLINE static void secp256k1_fe_mul_int_unchecked(secp256k1_fe *r, int a) { - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); + VERIFY_CHECK(a >= 0 && a <= 32); VERIFY_CHECK(a*r->magnitude <= 32); secp256k1_fe_impl_mul_int_unchecked(r, a); r->magnitude *= a; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_add(secp256k1_fe *r, const secp256k1_fe *a); SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) { - secp256k1_fe_verify(r); - secp256k1_fe_verify(a); + SECP256K1_FE_VERIFY(r); + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(r->magnitude + a->magnitude <= 32); + secp256k1_fe_impl_add(r, a); r->magnitude += a->magnitude; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b); SECP256K1_INLINE static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) { - secp256k1_fe_verify(a); - secp256k1_fe_verify(b); - secp256k1_fe_verify_magnitude(a, 8); - secp256k1_fe_verify_magnitude(b, 8); + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(b); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 8); + SECP256K1_FE_VERIFY_MAGNITUDE(b, 8); VERIFY_CHECK(r != b); VERIFY_CHECK(a != b); + secp256k1_fe_impl_mul(r, a, b); r->magnitude = 1; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_sqr(secp256k1_fe *r, const secp256k1_fe *a); SECP256K1_INLINE static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) { - secp256k1_fe_verify(a); - secp256k1_fe_verify_magnitude(a, 8); + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY_MAGNITUDE(a, 8); + secp256k1_fe_impl_sqr(r, a); r->magnitude = 1; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag); SECP256K1_INLINE static void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) { VERIFY_CHECK(flag == 0 || flag == 1); - secp256k1_fe_verify(a); - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(a); + SECP256K1_FE_VERIFY(r); + secp256k1_fe_impl_cmov(r, a, flag); if (a->magnitude > r->magnitude) r->magnitude = a->magnitude; if (!a->normalized) r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a); SECP256K1_INLINE static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) { - secp256k1_fe_verify(a); + SECP256K1_FE_VERIFY(a); VERIFY_CHECK(a->normalized); + secp256k1_fe_impl_to_storage(r, a); } @@ -363,36 +391,42 @@ SECP256K1_INLINE static void secp256k1_fe_from_storage(secp256k1_fe *r, const se secp256k1_fe_impl_from_storage(r, a); r->magnitude = 1; r->normalized = 1; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_inv(secp256k1_fe *r, const secp256k1_fe *x); SECP256K1_INLINE static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x) { int input_is_zero = secp256k1_fe_normalizes_to_zero(x); - secp256k1_fe_verify(x); + SECP256K1_FE_VERIFY(x); + secp256k1_fe_impl_inv(r, x); r->magnitude = x->magnitude > 0; r->normalized = 1; + VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero); - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_inv_var(secp256k1_fe *r, const secp256k1_fe *x); SECP256K1_INLINE static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x) { int input_is_zero = secp256k1_fe_normalizes_to_zero(x); - secp256k1_fe_verify(x); + SECP256K1_FE_VERIFY(x); + secp256k1_fe_impl_inv_var(r, x); r->magnitude = x->magnitude > 0; r->normalized = 1; + VERIFY_CHECK(secp256k1_fe_normalizes_to_zero(r) == input_is_zero); - secp256k1_fe_verify(r); + SECP256K1_FE_VERIFY(r); } static int secp256k1_fe_impl_is_square_var(const secp256k1_fe *x); SECP256K1_INLINE static int secp256k1_fe_is_square_var(const secp256k1_fe *x) { int ret; secp256k1_fe tmp = *x, sqrt; - secp256k1_fe_verify(x); + SECP256K1_FE_VERIFY(x); + ret = secp256k1_fe_impl_is_square_var(x); secp256k1_fe_normalize_weak(&tmp); VERIFY_CHECK(ret == secp256k1_fe_sqrt(&sqrt, &tmp)); @@ -403,20 +437,24 @@ static void secp256k1_fe_impl_get_bounds(secp256k1_fe* r, int m); SECP256K1_INLINE static void secp256k1_fe_get_bounds(secp256k1_fe* r, int m) { VERIFY_CHECK(m >= 0); VERIFY_CHECK(m <= 32); + secp256k1_fe_impl_get_bounds(r, m); r->magnitude = m; r->normalized = (m == 0); - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } static void secp256k1_fe_impl_half(secp256k1_fe *r); SECP256K1_INLINE static void secp256k1_fe_half(secp256k1_fe *r) { - secp256k1_fe_verify(r); - secp256k1_fe_verify_magnitude(r, 31); + SECP256K1_FE_VERIFY(r); + SECP256K1_FE_VERIFY_MAGNITUDE(r, 31); + secp256k1_fe_impl_half(r); r->magnitude = (r->magnitude >> 1) + 1; r->normalized = 0; - secp256k1_fe_verify(r); + + SECP256K1_FE_VERIFY(r); } #endif /* defined(VERIFY) */ diff --git a/src/group.h b/src/group.h index 86eb9e1f82..0f5161fbf2 100644 --- a/src/group.h +++ b/src/group.h @@ -181,8 +181,10 @@ static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge); /** Check invariants on an affine group element (no-op unless VERIFY is enabled). */ static void secp256k1_ge_verify(const secp256k1_ge *a); +#define SECP256K1_GE_VERIFY(a) secp256k1_ge_verify(a) /** Check invariants on a Jacobian group element (no-op unless VERIFY is enabled). */ static void secp256k1_gej_verify(const secp256k1_gej *a); +#define SECP256K1_GEJ_VERIFY(a) secp256k1_gej_verify(a) #endif /* SECP256K1_GROUP_H */ diff --git a/src/group_impl.h b/src/group_impl.h index b9542ce8ae..cd5f0b01f9 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -74,26 +74,22 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_G; /* End of section generated by sage/gen_exhaustive_groups.sage. */ static void secp256k1_ge_verify(const secp256k1_ge *a) { -#ifdef VERIFY - secp256k1_fe_verify(&a->x); - secp256k1_fe_verify(&a->y); - secp256k1_fe_verify_magnitude(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX); - secp256k1_fe_verify_magnitude(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY(&a->x); + SECP256K1_FE_VERIFY(&a->y); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GE_X_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GE_Y_MAGNITUDE_MAX); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); -#endif (void)a; } static void secp256k1_gej_verify(const secp256k1_gej *a) { -#ifdef VERIFY - secp256k1_fe_verify(&a->x); - secp256k1_fe_verify(&a->y); - secp256k1_fe_verify(&a->z); - secp256k1_fe_verify_magnitude(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX); - secp256k1_fe_verify_magnitude(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX); - secp256k1_fe_verify_magnitude(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY(&a->x); + SECP256K1_FE_VERIFY(&a->y); + SECP256K1_FE_VERIFY(&a->z); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->x, SECP256K1_GEJ_X_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX); + SECP256K1_FE_VERIFY_MAGNITUDE(&a->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX); VERIFY_CHECK(a->infinity == 0 || a->infinity == 1); -#endif (void)a; } @@ -101,8 +97,8 @@ static void secp256k1_gej_verify(const secp256k1_gej *a) { static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { secp256k1_fe zi2; secp256k1_fe zi3; - secp256k1_gej_verify(a); - secp256k1_fe_verify(zi); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_FE_VERIFY(zi); VERIFY_CHECK(!a->infinity); secp256k1_fe_sqr(&zi2, zi); @@ -111,15 +107,15 @@ static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, c secp256k1_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } /* Set r to the affine coordinates of Jacobian point (a.x, a.y, 1/zi). */ static void secp256k1_ge_set_ge_zinv(secp256k1_ge *r, const secp256k1_ge *a, const secp256k1_fe *zi) { secp256k1_fe zi2; secp256k1_fe zi3; - secp256k1_ge_verify(a); - secp256k1_fe_verify(zi); + SECP256K1_GE_VERIFY(a); + SECP256K1_FE_VERIFY(zi); VERIFY_CHECK(!a->infinity); secp256k1_fe_sqr(&zi2, zi); @@ -128,39 +124,39 @@ static void secp256k1_ge_set_ge_zinv(secp256k1_ge *r, const secp256k1_ge *a, con secp256k1_fe_mul(&r->y, &a->y, &zi3); r->infinity = a->infinity; - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } static void secp256k1_ge_set_xy(secp256k1_ge *r, const secp256k1_fe *x, const secp256k1_fe *y) { - secp256k1_fe_verify(x); - secp256k1_fe_verify(y); + SECP256K1_FE_VERIFY(x); + SECP256K1_FE_VERIFY(y); r->infinity = 0; r->x = *x; r->y = *y; - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } static int secp256k1_ge_is_infinity(const secp256k1_ge *a) { - secp256k1_ge_verify(a); + SECP256K1_GE_VERIFY(a); return a->infinity; } static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a) { - secp256k1_ge_verify(a); + SECP256K1_GE_VERIFY(a); *r = *a; secp256k1_fe_normalize_weak(&r->y); secp256k1_fe_negate(&r->y, &r->y, 1); - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) { secp256k1_fe z2, z3; - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(a); r->infinity = a->infinity; secp256k1_fe_inv(&a->z, &a->z); @@ -172,13 +168,13 @@ static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) { r->x = a->x; r->y = a->y; - secp256k1_gej_verify(a); - secp256k1_ge_verify(r); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(r); } static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { secp256k1_fe z2, z3; - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(a); if (secp256k1_gej_is_infinity(a)) { secp256k1_ge_set_infinity(r); @@ -193,8 +189,8 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { secp256k1_fe_set_int(&a->z, 1); secp256k1_ge_set_xy(r, &a->x, &a->y); - secp256k1_gej_verify(a); - secp256k1_ge_verify(r); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(r); } static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) { @@ -203,7 +199,7 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a size_t last_i = SIZE_MAX; #ifdef VERIFY for (i = 0; i < len; i++) { - secp256k1_gej_verify(&a[i]); + SECP256K1_GEJ_VERIFY(&a[i]); } #endif @@ -245,7 +241,7 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a #ifdef VERIFY for (i = 0; i < len; i++) { - secp256k1_ge_verify(&r[i]); + SECP256K1_GE_VERIFY(&r[i]); } #endif } @@ -255,8 +251,8 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se secp256k1_fe zs; #ifdef VERIFY for (i = 0; i < len; i++) { - secp256k1_ge_verify(&a[i]); - secp256k1_fe_verify(&zr[i]); + SECP256K1_GE_VERIFY(&a[i]); + SECP256K1_FE_VERIFY(&zr[i]); } #endif @@ -278,7 +274,7 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se #ifdef VERIFY for (i = 0; i < len; i++) { - secp256k1_ge_verify(&a[i]); + SECP256K1_GE_VERIFY(&a[i]); } #endif } @@ -289,7 +285,7 @@ static void secp256k1_gej_set_infinity(secp256k1_gej *r) { secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->z); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static void secp256k1_ge_set_infinity(secp256k1_ge *r) { @@ -297,7 +293,7 @@ static void secp256k1_ge_set_infinity(secp256k1_ge *r) { secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->y); - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } static void secp256k1_gej_clear(secp256k1_gej *r) { @@ -306,7 +302,7 @@ static void secp256k1_gej_clear(secp256k1_gej *r) { secp256k1_fe_clear(&r->y); secp256k1_fe_clear(&r->z); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static void secp256k1_ge_clear(secp256k1_ge *r) { @@ -314,13 +310,13 @@ static void secp256k1_ge_clear(secp256k1_ge *r) { secp256k1_fe_clear(&r->x); secp256k1_fe_clear(&r->y); - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int odd) { secp256k1_fe x2, x3; int ret; - secp256k1_fe_verify(x); + SECP256K1_FE_VERIFY(x); r->x = *x; secp256k1_fe_sqr(&x2, x); @@ -333,25 +329,25 @@ static int secp256k1_ge_set_xo_var(secp256k1_ge *r, const secp256k1_fe *x, int o secp256k1_fe_negate(&r->y, &r->y, 1); } - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); return ret; } static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a) { - secp256k1_ge_verify(a); + SECP256K1_GE_VERIFY(a); r->infinity = a->infinity; r->x = a->x; r->y = a->y; secp256k1_fe_set_int(&r->z, 1); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static int secp256k1_gej_eq_var(const secp256k1_gej *a, const secp256k1_gej *b) { secp256k1_gej tmp; - secp256k1_gej_verify(b); - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(b); + SECP256K1_GEJ_VERIFY(a); secp256k1_gej_neg(&tmp, a); secp256k1_gej_add_var(&tmp, &tmp, b, NULL); @@ -360,18 +356,16 @@ static int secp256k1_gej_eq_var(const secp256k1_gej *a, const secp256k1_gej *b) static int secp256k1_gej_eq_x_var(const secp256k1_fe *x, const secp256k1_gej *a) { secp256k1_fe r; - secp256k1_fe_verify(x); - secp256k1_gej_verify(a); -#ifdef VERIFY + SECP256K1_FE_VERIFY(x); + SECP256K1_GEJ_VERIFY(a); VERIFY_CHECK(!a->infinity); -#endif secp256k1_fe_sqr(&r, &a->z); secp256k1_fe_mul(&r, &r, x); return secp256k1_fe_equal(&r, &a->x); } static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) { - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(a); r->infinity = a->infinity; r->x = a->x; @@ -380,18 +374,18 @@ static void secp256k1_gej_neg(secp256k1_gej *r, const secp256k1_gej *a) { secp256k1_fe_normalize_weak(&r->y); secp256k1_fe_negate(&r->y, &r->y, 1); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static int secp256k1_gej_is_infinity(const secp256k1_gej *a) { - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(a); return a->infinity; } static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { secp256k1_fe y2, x3; - secp256k1_ge_verify(a); + SECP256K1_GE_VERIFY(a); if (a->infinity) { return 0; @@ -406,7 +400,7 @@ static int secp256k1_ge_is_valid_var(const secp256k1_ge *a) { static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp256k1_gej *a) { /* Operations: 3 mul, 4 sqr, 8 add/half/mul_int/negate */ secp256k1_fe l, s, t; - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(a); r->infinity = a->infinity; @@ -435,11 +429,11 @@ static SECP256K1_INLINE void secp256k1_gej_double(secp256k1_gej *r, const secp25 secp256k1_fe_add(&r->y, &s); /* Y3 = L*(X3 + T) + S^2 (2) */ secp256k1_fe_negate(&r->y, &r->y, 2); /* Y3 = -(L*(X3 + T) + S^2) (3) */ - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr) { - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(a); /** For secp256k1, 2Q is infinity if and only if Q is infinity. This is because if 2Q = infinity, * Q must equal -Q, or that Q.y == -(Q.y), or Q.y is 0. For a point on y^2 = x^3 + 7 to have @@ -466,14 +460,14 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s secp256k1_gej_double(r, a); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr) { /* 12 mul, 4 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ secp256k1_fe z22, z12, u1, u2, s1, s2, h, i, h2, h3, t; - secp256k1_gej_verify(a); - secp256k1_gej_verify(b); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GEJ_VERIFY(b); if (a->infinity) { VERIFY_CHECK(rzr == NULL); @@ -530,14 +524,14 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_add(&r->y, &h3); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr) { /* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ secp256k1_fe z12, u1, u2, s1, s2, h, i, h2, h3, t; - secp256k1_gej_verify(a); - secp256k1_ge_verify(b); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(b); if (a->infinity) { VERIFY_CHECK(rzr == NULL); @@ -592,16 +586,16 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_add(&r->y, &h3); - secp256k1_gej_verify(r); - if (rzr != NULL) secp256k1_fe_verify(rzr); + SECP256K1_GEJ_VERIFY(r); + if (rzr != NULL) SECP256K1_FE_VERIFY(rzr); } static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv) { /* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */ secp256k1_fe az, z12, u1, u2, s1, s2, h, i, h2, h3, t; - secp256k1_gej_verify(a); - secp256k1_ge_verify(b); - secp256k1_fe_verify(bzinv); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(b); + SECP256K1_FE_VERIFY(bzinv); if (a->infinity) { secp256k1_fe bzinv2, bzinv3; @@ -611,7 +605,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe_mul(&r->x, &b->x, &bzinv2); secp256k1_fe_mul(&r->y, &b->y, &bzinv3); secp256k1_fe_set_int(&r->z, 1); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); return; } if (b->infinity) { @@ -663,7 +657,7 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe_mul(&h3, &h3, &s1); secp256k1_fe_add(&r->y, &h3); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } @@ -672,8 +666,8 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_fe zz, u1, u2, s1, s2, t, tt, m, n, q, rr; secp256k1_fe m_alt, rr_alt; int degenerate; - secp256k1_gej_verify(a); - secp256k1_ge_verify(b); + SECP256K1_GEJ_VERIFY(a); + SECP256K1_GE_VERIFY(b); VERIFY_CHECK(!b->infinity); /* In: @@ -801,17 +795,15 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const * Then r->infinity = ((y1 + y2)Z == 0) = (y1 == -y2) = false. */ r->infinity = secp256k1_fe_normalizes_to_zero(&r->z); - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) { /* Operations: 4 mul, 1 sqr */ secp256k1_fe zz; - secp256k1_gej_verify(r); - secp256k1_fe_verify(s); -#ifdef VERIFY + SECP256K1_GEJ_VERIFY(r); + SECP256K1_FE_VERIFY(s); VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(s)); -#endif secp256k1_fe_sqr(&zz, s); secp256k1_fe_mul(&r->x, &r->x, &zz); /* r->x *= s^2 */ @@ -819,12 +811,12 @@ static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *s) { secp256k1_fe_mul(&r->y, &r->y, s); /* r->y *= s^3 */ secp256k1_fe_mul(&r->z, &r->z, s); /* r->z *= s */ - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a) { secp256k1_fe x, y; - secp256k1_ge_verify(a); + SECP256K1_GE_VERIFY(a); VERIFY_CHECK(!a->infinity); x = a->x; @@ -840,19 +832,19 @@ static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storag secp256k1_fe_from_storage(&r->y, &a->y); r->infinity = 0; - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } static SECP256K1_INLINE void secp256k1_gej_cmov(secp256k1_gej *r, const secp256k1_gej *a, int flag) { - secp256k1_gej_verify(r); - secp256k1_gej_verify(a); + SECP256K1_GEJ_VERIFY(r); + SECP256K1_GEJ_VERIFY(a); secp256k1_fe_cmov(&r->x, &a->x, flag); secp256k1_fe_cmov(&r->y, &a->y, flag); secp256k1_fe_cmov(&r->z, &a->z, flag); r->infinity ^= (r->infinity ^ a->infinity) & flag; - secp256k1_gej_verify(r); + SECP256K1_GEJ_VERIFY(r); } static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag) { @@ -861,19 +853,19 @@ static SECP256K1_INLINE void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, } static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a) { - secp256k1_ge_verify(a); + SECP256K1_GE_VERIFY(a); *r = *a; secp256k1_fe_mul(&r->x, &r->x, &secp256k1_const_beta); - secp256k1_ge_verify(r); + SECP256K1_GE_VERIFY(r); } static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) { #ifdef EXHAUSTIVE_TEST_ORDER secp256k1_gej out; int i; - secp256k1_ge_verify(ge); + SECP256K1_GE_VERIFY(ge); /* A very simple EC multiplication ladder that avoids a dependency on ecmult. */ secp256k1_gej_set_infinity(&out); @@ -885,7 +877,7 @@ static int secp256k1_ge_is_in_correct_subgroup(const secp256k1_ge* ge) { } return secp256k1_gej_is_infinity(&out); #else - secp256k1_ge_verify(ge); + SECP256K1_GE_VERIFY(ge); (void)ge; /* The real secp256k1 group has cofactor 1, so the subgroup is the entire curve. */ @@ -907,9 +899,8 @@ static int secp256k1_ge_x_frac_on_curve_var(const secp256k1_fe *xn, const secp25 * (xn/xd)^3 + 7 is square <=> xd*xn^3 + 7*xd^4 is square (multiplying by xd^4, a square). */ secp256k1_fe r, t; -#ifdef VERIFY VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(xd)); -#endif + secp256k1_fe_mul(&r, xd, xn); /* r = xd*xn */ secp256k1_fe_sqr(&t, xn); /* t = xn^2 */ secp256k1_fe_mul(&r, &r, &t); /* r = xd*xn^3 */ diff --git a/src/modinv32_impl.h b/src/modinv32_impl.h index 0ea2699863..75eb354ff0 100644 --- a/src/modinv32_impl.h +++ b/src/modinv32_impl.h @@ -144,7 +144,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3 r->v[7] = r7; r->v[8] = r8; -#ifdef VERIFY VERIFY_CHECK(r0 >> 30 == 0); VERIFY_CHECK(r1 >> 30 == 0); VERIFY_CHECK(r2 >> 30 == 0); @@ -156,7 +155,6 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3 VERIFY_CHECK(r8 >> 30 == 0); VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ -#endif } /* Data type for transition matrices (see section 3 of explanation). @@ -413,14 +411,13 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp int32_t di, ei, md, me, sd, se; int64_t cd, ce; int i; -#ifdef VERIFY VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK(labs(u) <= (M30 + 1 - labs(v))); /* |u|+|v| <= 2^30 */ VERIFY_CHECK(labs(q) <= (M30 + 1 - labs(r))); /* |q|+|r| <= 2^30 */ -#endif + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d->v[8] >> 31; se = e->v[8] >> 31; @@ -455,12 +452,11 @@ static void secp256k1_modinv32_update_de_30(secp256k1_modinv32_signed30 *d, secp /* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */ d->v[8] = (int32_t)cd; e->v[8] = (int32_t)ce; -#ifdef VERIFY + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ -#endif } /* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. @@ -550,25 +546,23 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m /* Update d,e using that transition matrix. */ secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + secp256k1_modinv32_update_fg_30(&f, &g, &t); -#ifdef VERIFY + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ @@ -578,7 +572,6 @@ static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_m secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && (secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || secp256k1_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); -#endif /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv32_normalize_30(&d, f.v[8], modinfo); @@ -607,12 +600,12 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256 /* Update d,e using that transition matrix. */ secp256k1_modinv32_update_de_30(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t); /* If the bottom limb of g is 0, there is a chance g=0. */ if (g.v[0] == 0) { @@ -637,18 +630,17 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256 g.v[len - 2] |= (uint32_t)gn << 30; --len; } -#ifdef VERIFY + VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ @@ -658,7 +650,6 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256 secp256k1_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && (secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); -#endif /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv32_normalize_30(&d, f.v[len - 1], modinfo); @@ -697,12 +688,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co secp256k1_modinv32_trans2x2 t; eta = secp256k1_modinv32_posdivsteps_30_var(eta, f.v[0] | ((uint32_t)f.v[1] << 30), g.v[0] | ((uint32_t)g.v[1] << 30), &t, &jac); /* Update f,g using that transition matrix. */ -#ifdef VERIFY VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + secp256k1_modinv32_update_fg_30_var(len, &f, &g, &t); /* If the bottom limb of f is 1, there is a chance that f=1. */ if (f.v[0] == 1) { @@ -723,12 +713,11 @@ static int secp256k1_jacobi32_maybe_var(const secp256k1_modinv32_signed30 *x, co cond |= gn; /* If so, reduce length. */ if (cond == 0) --len; -#ifdef VERIFY + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif } /* The loop failed to converge to f=g after 1500 iterations. Return 0, indicating unknown result. */ diff --git a/src/modinv64_impl.h b/src/modinv64_impl.h index c7cef872a4..0dc1e80696 100644 --- a/src/modinv64_impl.h +++ b/src/modinv64_impl.h @@ -144,7 +144,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6 r->v[3] = r3; r->v[4] = r4; -#ifdef VERIFY VERIFY_CHECK(r0 >> 62 == 0); VERIFY_CHECK(r1 >> 62 == 0); VERIFY_CHECK(r2 >> 62 == 0); @@ -152,7 +151,6 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6 VERIFY_CHECK(r4 >> 62 == 0); VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ -#endif } /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)). @@ -216,7 +214,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_ t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; -#ifdef VERIFY + /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the @@ -224,7 +222,7 @@ static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_ * 8*identity (which has determinant 2^6) means the overall outputs has determinant * 2^65. */ VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65, 0)); -#endif + return zeta; } @@ -301,13 +299,13 @@ static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; -#ifdef VERIFY + /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2, the * aggregate of 62 of them will have determinant 2^62. */ VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 0)); -#endif + return eta; } @@ -392,13 +390,13 @@ static int64_t secp256k1_modinv64_posdivsteps_62_var(int64_t eta, uint64_t f0, u t->v = (int64_t)v; t->q = (int64_t)q; t->r = (int64_t)r; -#ifdef VERIFY + /* The determinant of t must be a power of two. This guarantees that multiplication with t * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which * will be divided out again). As each divstep's individual matrix has determinant 2 or -2, * the aggregate of 62 of them will have determinant 2^62 or -2^62. */ VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62, 1)); -#endif + *jacp = jac; return eta; } @@ -417,14 +415,13 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp const int64_t u = t->u, v = t->v, q = t->q, r = t->r; int64_t md, me, sd, se; secp256k1_int128 cd, ce; -#ifdef VERIFY VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ VERIFY_CHECK(secp256k1_modinv64_abs(u) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(v))); /* |u|+|v| <= 2^62 */ VERIFY_CHECK(secp256k1_modinv64_abs(q) <= (((int64_t)1 << 62) - secp256k1_modinv64_abs(r))); /* |q|+|r| <= 2^62 */ -#endif + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ sd = d4 >> 63; se = e4 >> 63; @@ -489,12 +486,11 @@ static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ d->v[4] = secp256k1_i128_to_i64(&cd); e->v[4] = secp256k1_i128_to_i64(&ce); -#ifdef VERIFY + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ -#endif } /* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62. @@ -606,25 +602,23 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m /* Update d,e using that transition matrix. */ secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + secp256k1_modinv64_update_fg_62(&f, &g, &t); -#ifdef VERIFY + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif } /* At this point sufficient iterations have been performed that g must have reached 0 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g * values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ @@ -634,7 +628,6 @@ static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_m secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); -#endif /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo); @@ -663,12 +656,11 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256 /* Update d,e using that transition matrix. */ secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo); /* Update f,g using that transition matrix. */ -#ifdef VERIFY VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); /* If the bottom limb of g is zero, there is a chance that g=0. */ if (g.v[0] == 0) { @@ -693,18 +685,17 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256 g.v[len - 2] |= (uint64_t)gn << 62; --len; } -#ifdef VERIFY + VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif } /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ -#ifdef VERIFY + /* g == 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ @@ -714,7 +705,6 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256 secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); -#endif /* Optionally negate d, normalize to [0,modulus), and return it. */ secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo); @@ -753,12 +743,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co secp256k1_modinv64_trans2x2 t; eta = secp256k1_modinv64_posdivsteps_62_var(eta, f.v[0] | ((uint64_t)f.v[1] << 62), g.v[0] | ((uint64_t)g.v[1] << 62), &t, &jac); /* Update f,g using that transition matrix. */ -#ifdef VERIFY VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif + secp256k1_modinv64_update_fg_62_var(len, &f, &g, &t); /* If the bottom limb of f is 1, there is a chance that f=1. */ if (f.v[0] == 1) { @@ -779,12 +768,11 @@ static int secp256k1_jacobi64_maybe_var(const secp256k1_modinv64_signed62 *x, co cond |= gn; /* If so, reduce length. */ if (cond == 0) --len; -#ifdef VERIFY + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 0) > 0); /* f > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 0) > 0); /* g > 0 */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ -#endif } /* The loop failed to converge to f=g after 1550 iterations. Return 0, indicating unknown result. */ diff --git a/src/modules/ellswift/main_impl.h b/src/modules/ellswift/main_impl.h index 00bb8a3da5..b54ec08a22 100644 --- a/src/modules/ellswift/main_impl.h +++ b/src/modules/ellswift/main_impl.h @@ -126,9 +126,8 @@ static void secp256k1_ellswift_xswiftec_frac_var(secp256k1_fe *xn, secp256k1_fe secp256k1_fe_mul(&l, &p, &u1); /* l = u*(g+s) */ secp256k1_fe_add(&n, &l); /* n = u*(c1*s+c2*g)+u*(g+s) */ secp256k1_fe_negate(xn, &n, 2); /* n = -u*(c1*s+c2*g)-u*(g+s) */ -#ifdef VERIFY + VERIFY_CHECK(secp256k1_ge_x_frac_on_curve_var(xn, &p)); -#endif /* Return x3 = n/p = -(u*(c1*s+c2*g)/(g+s)+u) */ } @@ -193,10 +192,8 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_ secp256k1_fe_normalize_weak(&x); secp256k1_fe_normalize_weak(&u); -#ifdef VERIFY VERIFY_CHECK(c >= 0 && c < 8); VERIFY_CHECK(secp256k1_ge_x_on_curve_var(&x)); -#endif if (!(c & 2)) { /* c is in {0, 1, 4, 5}. In this case we look for an inverse under the x1 (if c=0 or @@ -230,9 +227,7 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_ * that (-u-x)^3 + B is not square (the secp256k1_ge_x_on_curve_var(&m) * test above would have failed). This is a contradiction, and thus the * assumption s=0 is false. */ -#ifdef VERIFY VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&s)); -#endif /* If s is not square, fail. We have not fully computed s yet, but s is square iff * -(u^3+7)*(u^2+u*x+x^2) is square (because a/b is square iff a*b is square and b is @@ -272,7 +267,11 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_ secp256k1_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */ if (!secp256k1_fe_is_square_var(&q)) return 0; ret = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */ +#ifdef VERIFY VERIFY_CHECK(ret); +#else + (void)ret; +#endif /* If (c & 1) = 1 and r = 0, fail. */ if (EXPECT((c & 1) && secp256k1_fe_normalizes_to_zero_var(&r), 0)) return 0; @@ -320,10 +319,9 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256 buf4[3] = cnt >> 24; secp256k1_sha256_write(&hash, buf4, 4); secp256k1_sha256_finalize(&hash, out32); -#ifdef VERIFY + /* Writing and finalizing together should trigger exactly one SHA256 compression. */ VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1)); -#endif } /** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate. @@ -361,9 +359,8 @@ static void secp256k1_ellswift_xelligatorswift_var(unsigned char *u32, secp256k1 /* Since u is the output of a hash, it should practically never be 0. We could apply the * u=0 to u=1 correction here too to deal with that case still, but it's such a low * probability event that we do not bother. */ -#ifdef VERIFY VERIFY_CHECK(!secp256k1_fe_normalizes_to_zero_var(&u)); -#endif + /* Find a remainder t, and return it if found. */ if (EXPECT(secp256k1_ellswift_xswiftec_inv_var(t, x, &u, branch), 0)) break; } @@ -417,7 +414,11 @@ int secp256k1_ellswift_encode(const secp256k1_context *ctx, unsigned char *ell64 * BIP340 tagged hash with tag "secp256k1_ellswift_encode". */ secp256k1_ellswift_sha256_init_encode(&hash); ser_ret = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1); +#ifdef VERIFY VERIFY_CHECK(ser_ret && ser_size == 33); +#else + (void)ser_ret; +#endif secp256k1_sha256_write(&hash, p64, sizeof(p64)); secp256k1_sha256_write(&hash, rnd32, 32); diff --git a/src/modules/ellswift/tests_impl.h b/src/modules/ellswift/tests_impl.h index 47f443d980..b478e39326 100644 --- a/src/modules/ellswift/tests_impl.h +++ b/src/modules/ellswift/tests_impl.h @@ -285,7 +285,7 @@ void run_ellswift_tests(void) { ret = secp256k1_ellswift_xdh(CTX, share32, ell64, ell64, sec32, i & 1, &ellswift_xdh_hash_x32, NULL); CHECK(ret); (void)secp256k1_fe_set_b32_limit(&share_x, share32); /* no overflow is possible */ - secp256k1_fe_verify(&share_x); + SECP256K1_FE_VERIFY(&share_x); /* Compute seckey*pubkey directly. */ secp256k1_ecmult(&resj, &decj, &sec, NULL); secp256k1_ge_set_gej(&res, &resj); diff --git a/src/scalar.h b/src/scalar.h index 00a390bc5b..98b1287bb5 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -100,5 +100,6 @@ static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a /** Check invariants on a scalar (no-op unless VERIFY is enabled). */ static void secp256k1_scalar_verify(const secp256k1_scalar *r); +#define SECP256K1_SCALAR_VERIFY(r) secp256k1_scalar_verify(r) #endif /* SECP256K1_SCALAR_H */ diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index b3cc7523f7..7b9c542f07 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -42,18 +42,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig r->d[2] = 0; r->d[3] = 0; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); @@ -93,15 +93,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne secp256k1_u128_accum_u64(&t, r->d[3]); r->d[3] = secp256k1_u128_to_u64(&t); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { int overflow; secp256k1_uint128 t; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); secp256k1_u128_from_u64(&t, a->d[0]); secp256k1_u128_accum_u64(&t, b->d[0]); @@ -119,14 +119,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, VERIFY_CHECK(overflow == 0 || overflow == 1); secp256k1_scalar_reduce(r, overflow); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { secp256k1_uint128 t; volatile int vflag = flag; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 256); bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ @@ -143,10 +143,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); r->d[3] = secp256k1_u128_to_u64(&t); - secp256k1_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0); -#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { @@ -160,11 +158,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b *overflow = over; } - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_write_be64(&bin[0], a->d[3]); secp256k1_write_be64(&bin[8], a->d[2]); @@ -173,7 +171,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; } @@ -181,7 +179,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0); secp256k1_uint128 t; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_u128_from_u64(&t, ~a->d[0]); secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1); @@ -196,7 +194,7 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar secp256k1_u128_accum_u64(&t, SECP256K1_N_3); r->d[3] = secp256k1_u128_to_u64(&t) & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { @@ -216,7 +214,7 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a */ uint64_t mask = -(uint64_t)(a->d[0] & 1U); secp256k1_uint128 t; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63)); secp256k1_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask); @@ -236,12 +234,12 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a secp256k1_u128_rshift(&t, 64); VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); #endif } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; } @@ -249,7 +247,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { int yes = 0; int no = 0; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); no |= (a->d[3] < SECP256K1_N_H_3); yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; @@ -267,7 +265,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { uint64_t mask = -vflag; uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; secp256k1_uint128 t; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); secp256k1_u128_from_u64(&t, r->d[0] ^ mask); secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask); @@ -282,7 +280,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask); r->d[3] = secp256k1_u128_to_u64(&t) & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return 2 * (mask == 0) - 1; } @@ -841,17 +839,17 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { uint64_t l[8]; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_reduce_512(r, l); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -862,13 +860,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r r2->d[2] = 0; r2->d[3] = 0; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } @@ -878,8 +876,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); VERIFY_CHECK(shift >= 256); secp256k1_scalar_mul_512(l, a, b); @@ -892,13 +890,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint64_t mask0, mask1; volatile int vflag = flag; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = vflag + ~((uint64_t)0); @@ -908,7 +906,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1); r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) { @@ -928,13 +926,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_ r->d[2] = a2 >> 4 | a3 << 58; r->d[3] = a3 >> 6 | a4 << 56; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) { const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->v[0] = a0 & M62; r->v[1] = (a0 >> 62 | a1 << 2) & M62; @@ -953,16 +951,14 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed62(&s, x); secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); - secp256k1_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { @@ -970,20 +966,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed62(&s, x); secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); - secp256k1_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return !(a->d[0] & 1); } diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index fed2e77b69..58ae51bc02 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -59,18 +59,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig r->d[6] = 0; r->d[7] = 0; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); @@ -121,15 +121,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_ t += (uint64_t)r->d[7]; r->d[7] = t & 0xFFFFFFFFUL; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { int overflow; uint64_t t = (uint64_t)a->d[0] + b->d[0]; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[1] + b->d[1]; @@ -150,14 +150,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, VERIFY_CHECK(overflow == 0 || overflow == 1); secp256k1_scalar_reduce(r, overflow); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { uint64_t t; volatile int vflag = flag; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 256); bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ @@ -178,10 +178,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); r->d[7] = t & 0xFFFFFFFFULL; - secp256k1_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK((t >> 32) == 0); -#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { @@ -199,11 +197,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b *overflow = over; } - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_write_be32(&bin[0], a->d[7]); secp256k1_write_be32(&bin[4], a->d[6]); @@ -216,7 +214,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } @@ -224,7 +222,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0); uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; @@ -242,7 +240,7 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar t += (uint64_t)(~a->d[7]) + SECP256K1_N_7; r->d[7] = t & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { @@ -262,7 +260,7 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a */ uint32_t mask = -(uint32_t)(a->d[0] & 1U); uint64_t t = (uint32_t)((a->d[0] >> 1) | (a->d[1] << 31)); - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); t += (SECP256K1_N_H_0 + 1U) & mask; r->d[0] = t; t >>= 32; @@ -285,17 +283,16 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a t += SECP256K1_N_H_6 & mask; r->d[6] = t; t >>= 32; r->d[7] = (uint32_t)t + (uint32_t)(a->d[7] >> 1) + (SECP256K1_N_H_7 & mask); -#ifdef VERIFY + /* The line above only computed the bottom 32 bits of r->d[7]. Redo the computation * in full 64 bits to make sure the top 32 bits are indeed zero. */ VERIFY_CHECK((t + (a->d[7] >> 1) + (SECP256K1_N_H_7 & mask)) >> 32 == 0); - secp256k1_scalar_verify(r); -#endif + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } @@ -303,7 +300,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { int yes = 0; int no = 0; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); no |= (a->d[7] < SECP256K1_N_H_7); yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; @@ -327,7 +324,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { uint32_t mask = -vflag; uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -345,7 +342,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); r->d[7] = t & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return 2 * (mask == 0) - 1; } @@ -653,17 +650,17 @@ static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, con static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { uint32_t l[16]; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_reduce_512(r, l); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -682,13 +679,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r r2->d[6] = 0; r2->d[7] = 0; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } @@ -698,8 +695,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); VERIFY_CHECK(shift >= 256); secp256k1_scalar_mul_512(l, a, b); @@ -716,13 +713,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = vflag + ~((uint32_t)0); @@ -736,7 +733,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1); r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_modinv32_signed30 *a) { @@ -765,14 +762,14 @@ static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_ r->d[6] = a6 >> 12 | a7 << 18; r->d[7] = a7 >> 14 | a8 << 16; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a) { const uint32_t M30 = UINT32_MAX >> 2; const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->v[0] = a0 & M30; r->v[1] = (a0 >> 30 | a1 << 2) & M30; @@ -795,16 +792,14 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed30(&s, x); secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); - secp256k1_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { @@ -812,20 +807,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed30(&s, x); secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); - secp256k1_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); -#endif } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return !(a->d[0] & 1); } diff --git a/src/scalar_impl.h b/src/scalar_impl.h index 3eca23b4f9..bbba83e937 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -31,14 +31,12 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c int overflow; secp256k1_scalar_set_b32(r, bin, &overflow); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return (!overflow) & (!secp256k1_scalar_is_zero(r)); } static void secp256k1_scalar_verify(const secp256k1_scalar *r) { -#ifdef VERIFY VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0); -#endif (void)r; } @@ -63,7 +61,7 @@ static void secp256k1_scalar_verify(const secp256k1_scalar *r) { * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). */ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) { - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); VERIFY_CHECK(r1 != r2); @@ -71,8 +69,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } #else /** @@ -155,7 +153,7 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL ); - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); VERIFY_CHECK(r1 != r2); @@ -170,8 +168,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT secp256k1_scalar_negate(r1, r1); secp256k1_scalar_add(r1, r1, k); - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); #ifdef VERIFY secp256k1_scalar_split_lambda_verify(r1, r2, k); #endif diff --git a/src/scalar_low_impl.h b/src/scalar_low_impl.h index 1929b175e0..0895db6a17 100644 --- a/src/scalar_low_impl.h +++ b/src/scalar_low_impl.h @@ -14,7 +14,7 @@ #include SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return !(*a & 1); } @@ -24,11 +24,11 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); if (offset < 32) return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); @@ -37,7 +37,7 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_s } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return secp256k1_scalar_get_bits(a, offset, count); } @@ -45,27 +45,25 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256 SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return *r < *b; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); if (flag && bit < 32) *r += ((uint32_t)1 << bit); - secp256k1_scalar_verify(r); -#ifdef VERIFY + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 32); /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); -#endif } static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) { @@ -81,24 +79,24 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b } if (overflow) *overflow = over; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); memset(bin, 0, 32); bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return *a == 0; } static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); if (*a == 0) { *r = 0; @@ -106,52 +104,52 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *r = EXHAUSTIVE_TEST_ORDER - *a; } - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return *a == 1; } static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return *a > EXHAUSTIVE_TEST_ORDER / 2; } static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); if (flag) secp256k1_scalar_negate(r, r); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return flag ? -1 : 1; } static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); *r1 = *a; *r2 = 0; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return *a == *b; } @@ -159,45 +157,45 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r)); mask0 = vflag + ~((uint32_t)0); mask1 = ~mask0; *r = (*r & mask0) | (*a & mask1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { int i; *r = 0; - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) *r = i; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus * have a composite group order; fix it in exhaustive_tests.c). */ VERIFY_CHECK(*r != 0); } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_inverse(r, x); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); *r = (*a + ((-(uint32_t)(*a & 1)) & EXHAUSTIVE_TEST_ORDER)) >> 1; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/src/util.h b/src/util.h index cf7e5d1af5..187bf1c5e0 100644 --- a/src/util.h +++ b/src/util.h @@ -132,16 +132,11 @@ static const secp256k1_callback default_error_callback = { } while(0) #endif -/* Like assert(), but when VERIFY is defined, and side-effect safe. */ -#if defined(COVERAGE) -#define VERIFY_CHECK(check) -#define VERIFY_SETUP(stmt) -#elif defined(VERIFY) +/* Like assert(), but when VERIFY is defined. */ +#if defined(VERIFY) #define VERIFY_CHECK CHECK -#define VERIFY_SETUP(stmt) do { stmt; } while(0) #else -#define VERIFY_CHECK(cond) do { (void)(cond); } while(0) -#define VERIFY_SETUP(stmt) +#define VERIFY_CHECK(cond) #endif static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) {