diff --git a/src/scalar.h b/src/scalar.h index 00a390bc5b..98b1287bb5 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -100,5 +100,6 @@ static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a /** Check invariants on a scalar (no-op unless VERIFY is enabled). */ static void secp256k1_scalar_verify(const secp256k1_scalar *r); +#define SECP256K1_SCALAR_VERIFY(r) secp256k1_scalar_verify(r) #endif /* SECP256K1_SCALAR_H */ diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 0b6e558c14..7b9c542f07 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -42,18 +42,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig r->d[2] = 0; r->d[3] = 0; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6); return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); @@ -93,15 +93,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne secp256k1_u128_accum_u64(&t, r->d[3]); r->d[3] = secp256k1_u128_to_u64(&t); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { int overflow; secp256k1_uint128 t; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); secp256k1_u128_from_u64(&t, a->d[0]); secp256k1_u128_accum_u64(&t, b->d[0]); @@ -119,14 +119,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, VERIFY_CHECK(overflow == 0 || overflow == 1); secp256k1_scalar_reduce(r, overflow); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { secp256k1_uint128 t; volatile int vflag = flag; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 256); bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */ @@ -143,7 +143,7 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F)); r->d[3] = secp256k1_u128_to_u64(&t); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0); } @@ -158,11 +158,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b *overflow = over; } - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_write_be64(&bin[0], a->d[3]); secp256k1_write_be64(&bin[8], a->d[2]); @@ -171,7 +171,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0; } @@ -179,7 +179,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0); secp256k1_uint128 t; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_u128_from_u64(&t, ~a->d[0]); secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1); @@ -194,7 +194,7 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar secp256k1_u128_accum_u64(&t, SECP256K1_N_3); r->d[3] = secp256k1_u128_to_u64(&t) & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { @@ -214,7 +214,7 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a */ uint64_t mask = -(uint64_t)(a->d[0] & 1U); secp256k1_uint128 t; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63)); secp256k1_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask); @@ -234,12 +234,12 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a secp256k1_u128_rshift(&t, 64); VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); #endif } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0; } @@ -247,7 +247,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { int yes = 0; int no = 0; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); no |= (a->d[3] < SECP256K1_N_H_3); yes |= (a->d[3] > SECP256K1_N_H_3) & ~no; @@ -265,7 +265,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { uint64_t mask = -vflag; uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1; secp256k1_uint128 t; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); secp256k1_u128_from_u64(&t, r->d[0] ^ mask); secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask); @@ -280,7 +280,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask); r->d[3] = secp256k1_u128_to_u64(&t) & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return 2 * (mask == 0) - 1; } @@ -839,17 +839,17 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { uint64_t l[8]; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_reduce_512(r, l); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -860,13 +860,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r r2->d[2] = 0; r2->d[3] = 0; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } @@ -876,8 +876,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); VERIFY_CHECK(shift >= 256); secp256k1_scalar_mul_512(l, a, b); @@ -890,13 +890,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint64_t mask0, mask1; volatile int vflag = flag; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = vflag + ~((uint64_t)0); @@ -906,7 +906,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1); r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) { @@ -926,13 +926,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_ r->d[2] = a2 >> 4 | a3 << 58; r->d[3] = a3 >> 6 | a4 << 56; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) { const uint64_t M62 = UINT64_MAX >> 2; const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->v[0] = a0 & M62; r->v[1] = (a0 >> 62 | a1 << 2) & M62; @@ -951,13 +951,13 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed62(&s, x); secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } @@ -966,18 +966,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed62(&s, x); secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return !(a->d[0] & 1); } diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index faa62089a5..58ae51bc02 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -59,18 +59,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig r->d[6] = 0; r->d[7] = 0; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5); return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); VERIFY_CHECK(count < 32); VERIFY_CHECK(offset + count <= 256); @@ -121,15 +121,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_ t += (uint64_t)r->d[7]; r->d[7] = t & 0xFFFFFFFFUL; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { int overflow; uint64_t t = (uint64_t)a->d[0] + b->d[0]; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[1] + b->d[1]; @@ -150,14 +150,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, VERIFY_CHECK(overflow == 0 || overflow == 1); secp256k1_scalar_reduce(r, overflow); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return overflow; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { uint64_t t; volatile int vflag = flag; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 256); bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */ @@ -178,7 +178,7 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F)); r->d[7] = t & 0xFFFFFFFFULL; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK((t >> 32) == 0); } @@ -197,11 +197,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b *overflow = over; } - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); secp256k1_write_be32(&bin[0], a->d[7]); secp256k1_write_be32(&bin[4], a->d[6]); @@ -214,7 +214,7 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } @@ -222,7 +222,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0); uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(~a->d[1]) + SECP256K1_N_1; @@ -240,7 +240,7 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar t += (uint64_t)(~a->d[7]) + SECP256K1_N_7; r->d[7] = t & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { @@ -260,7 +260,7 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a */ uint32_t mask = -(uint32_t)(a->d[0] & 1U); uint64_t t = (uint32_t)((a->d[0] >> 1) | (a->d[1] << 31)); - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); t += (SECP256K1_N_H_0 + 1U) & mask; r->d[0] = t; t >>= 32; @@ -283,17 +283,16 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a t += SECP256K1_N_H_6 & mask; r->d[6] = t; t >>= 32; r->d[7] = (uint32_t)t + (uint32_t)(a->d[7] >> 1) + (SECP256K1_N_H_7 & mask); -#ifdef VERIFY + /* The line above only computed the bottom 32 bits of r->d[7]. Redo the computation * in full 64 bits to make sure the top 32 bits are indeed zero. */ VERIFY_CHECK((t + (a->d[7] >> 1) + (SECP256K1_N_H_7 & mask)) >> 32 == 0); - secp256k1_scalar_verify(r); -#endif + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0; } @@ -301,7 +300,7 @@ SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { int yes = 0; int no = 0; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); no |= (a->d[7] < SECP256K1_N_H_7); yes |= (a->d[7] > SECP256K1_N_H_7) & ~no; @@ -325,7 +324,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { uint32_t mask = -vflag; uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0); uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); r->d[0] = t & nonzero; t >>= 32; t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask); @@ -343,7 +342,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask); r->d[7] = t & nonzero; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return 2 * (mask == 0) - 1; } @@ -651,17 +650,17 @@ static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, con static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { uint32_t l[16]; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); secp256k1_scalar_mul_512(l, a, b); secp256k1_scalar_reduce_512(r, l); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -680,13 +679,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r r2->d[6] = 0; r2->d[7] = 0; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } @@ -696,8 +695,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, unsigned int shiftlimbs; unsigned int shiftlow; unsigned int shifthigh; - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); VERIFY_CHECK(shift >= 256); secp256k1_scalar_mul_512(l, a, b); @@ -714,13 +713,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d)); mask0 = vflag + ~((uint32_t)0); @@ -734,7 +733,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se r->d[6] = (r->d[6] & mask0) | (a->d[6] & mask1); r->d[7] = (r->d[7] & mask0) | (a->d[7] & mask1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_modinv32_signed30 *a) { @@ -763,14 +762,14 @@ static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_ r->d[6] = a6 >> 12 | a7 << 18; r->d[7] = a7 >> 14 | a8 << 16; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a) { const uint32_t M30 = UINT32_MAX >> 2; const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); r->v[0] = a0 & M30; r->v[1] = (a0 >> 30 | a1 << 2) & M30; @@ -793,13 +792,13 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed30(&s, x); secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } @@ -808,18 +807,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc #ifdef VERIFY int zero_in = secp256k1_scalar_is_zero(x); #endif - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed30(&s, x); secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return !(a->d[0] & 1); } diff --git a/src/scalar_impl.h b/src/scalar_impl.h index b09821fd51..bbba83e937 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -31,7 +31,7 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c int overflow; secp256k1_scalar_set_b32(r, bin, &overflow); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return (!overflow) & (!secp256k1_scalar_is_zero(r)); } @@ -61,7 +61,7 @@ static void secp256k1_scalar_verify(const secp256k1_scalar *r) { * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). */ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) { - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); VERIFY_CHECK(r1 != r2); @@ -69,8 +69,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } #else /** @@ -153,7 +153,7 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C4UL, 0x221208ACUL, 0x9DF506C6UL, 0x1571B4AEUL, 0x8AC47F71UL ); - secp256k1_scalar_verify(k); + SECP256K1_SCALAR_VERIFY(k); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); VERIFY_CHECK(r1 != r2); @@ -168,8 +168,8 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT secp256k1_scalar_negate(r1, r1); secp256k1_scalar_add(r1, r1, k); - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); #ifdef VERIFY secp256k1_scalar_split_lambda_verify(r1, r2, k); #endif diff --git a/src/scalar_low_impl.h b/src/scalar_low_impl.h index f2f0d3467a..0895db6a17 100644 --- a/src/scalar_low_impl.h +++ b/src/scalar_low_impl.h @@ -14,7 +14,7 @@ #include SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return !(*a & 1); } @@ -24,11 +24,11 @@ SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); if (offset < 32) return ((*a >> offset) & ((((uint32_t)1) << count) - 1)); @@ -37,7 +37,7 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_s } SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return secp256k1_scalar_get_bits(a, offset, count); } @@ -45,22 +45,22 @@ SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256 SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; } static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return *r < *b; } static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) { - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); if (flag && bit < 32) *r += ((uint32_t)1 << bit); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); VERIFY_CHECK(bit < 32); /* Verify that adding (1 << bit) will not overflow any in-range scalar *r by overflowing the underlying uint32_t. */ VERIFY_CHECK(((uint32_t)1 << bit) - 1 <= UINT32_MAX - EXHAUSTIVE_TEST_ORDER); @@ -79,24 +79,24 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b } if (overflow) *overflow = over; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); memset(bin, 0, 32); bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a; } SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return *a == 0; } static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); if (*a == 0) { *r = 0; @@ -104,52 +104,52 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *r = EXHAUSTIVE_TEST_ORDER - *a; } - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return *a == 1; } static int secp256k1_scalar_is_high(const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); return *a > EXHAUSTIVE_TEST_ORDER / 2; } static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) { - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); if (flag) secp256k1_scalar_negate(r, r); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); return flag ? -1 : 1; } static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); *r1 = *a; *r2 = 0; - secp256k1_scalar_verify(r1); - secp256k1_scalar_verify(r2); + SECP256K1_SCALAR_VERIFY(r1); + SECP256K1_SCALAR_VERIFY(r2); } SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) { - secp256k1_scalar_verify(a); - secp256k1_scalar_verify(b); + SECP256K1_SCALAR_VERIFY(a); + SECP256K1_SCALAR_VERIFY(b); return *a == *b; } @@ -157,45 +157,45 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) { uint32_t mask0, mask1; volatile int vflag = flag; - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); SECP256K1_CHECKMEM_CHECK_VERIFY(r, sizeof(*r)); mask0 = vflag + ~((uint32_t)0); mask1 = ~mask0; *r = (*r & mask0) | (*a & mask1); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { int i; *r = 0; - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) *r = i; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus * have a composite group order; fix it in exhaustive_tests.c). */ VERIFY_CHECK(*r != 0); } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { - secp256k1_scalar_verify(x); + SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_inverse(r, x); - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) { - secp256k1_scalar_verify(a); + SECP256K1_SCALAR_VERIFY(a); *r = (*a + ((-(uint32_t)(*a & 1)) & EXHAUSTIVE_TEST_ORDER)) >> 1; - secp256k1_scalar_verify(r); + SECP256K1_SCALAR_VERIFY(r); } #endif /* SECP256K1_SCALAR_REPR_IMPL_H */