mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-09 10:43:19 -05:00
Merge pull request #127
c35ff1e
Convert lambda splitter to pure scalar code. (Pieter Wuille)cc604e9
Avoid division when decomposing scalars (Peter Dettman)ff8746d
Add secp256k1_scalar_mul_shift_var (Pieter Wuille)
This commit is contained in:
commit
d5e8362ae5
10 changed files with 187 additions and 127 deletions
|
@ -19,6 +19,7 @@ env:
|
|||
- FIELD=32bit
|
||||
- FIELD=32bit ENDOMORPHISM=yes
|
||||
- BIGNUM=none
|
||||
- BIGNUM=none ENDOMORPHISM=yes
|
||||
- BUILD=distcheck
|
||||
- EXTRAFLAGS=CFLAGS=-DDETERMINISTIC
|
||||
before_script: ./autogen.sh
|
||||
|
|
|
@ -270,10 +270,7 @@ if test x"$set_field" = x"gmp" || test x"$set_bignum" = x"gmp"; then
|
|||
fi
|
||||
|
||||
if test x"$use_endomorphism" = x"yes"; then
|
||||
if test x"$set_bignum" = x"none"; then
|
||||
AC_MSG_ERROR([Cannot use endomorphism optimization without a bignum implementation])
|
||||
fi
|
||||
AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism])
|
||||
AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization])
|
||||
fi
|
||||
|
||||
AC_MSG_NOTICE([Using field implementation: $set_field])
|
||||
|
|
|
@ -168,8 +168,10 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const
|
|||
secp256k1_scalar_split_lambda_var(&na_1, &na_lam, na);
|
||||
|
||||
/* build wnaf representation for na_1 and na_lam. */
|
||||
int wnaf_na_1[129]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A);
|
||||
int wnaf_na_lam[129]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A);
|
||||
int wnaf_na_1[130]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A);
|
||||
int wnaf_na_lam[130]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A);
|
||||
VERIFY_CHECK(bits_na_1 <= 130);
|
||||
VERIFY_CHECK(bits_na_lam <= 130);
|
||||
int bits = bits_na_1;
|
||||
if (bits_na_lam > bits) bits = bits_na_lam;
|
||||
#else
|
||||
|
|
|
@ -47,15 +47,12 @@ static void secp256k1_num_sub(secp256k1_num_t *r, const secp256k1_num_t *a, cons
|
|||
/** Multiply two (signed) numbers. */
|
||||
static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b);
|
||||
|
||||
/** Divide two (signed) numbers. */
|
||||
static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b);
|
||||
|
||||
/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
|
||||
even if r was negative. */
|
||||
static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m);
|
||||
|
||||
/** Right-shift the passed number by bits bits, and return those bits. */
|
||||
static int secp256k1_num_shift(secp256k1_num_t *r, int bits);
|
||||
/** Right-shift the passed number by bits bits. */
|
||||
static void secp256k1_num_shift(secp256k1_num_t *r, int bits);
|
||||
|
||||
/** Check whether a number is zero. */
|
||||
static int secp256k1_num_is_zero(const secp256k1_num_t *a);
|
||||
|
|
|
@ -206,31 +206,23 @@ static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, cons
|
|||
memset(tmp, 0, sizeof(tmp));
|
||||
}
|
||||
|
||||
static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) {
|
||||
secp256k1_num_sanity(a);
|
||||
secp256k1_num_sanity(b);
|
||||
if (b->limbs > a->limbs) {
|
||||
r->limbs = 1;
|
||||
r->data[0] = 0;
|
||||
r->neg = 0;
|
||||
return;
|
||||
static void secp256k1_num_shift(secp256k1_num_t *r, int bits) {
|
||||
if (bits % GMP_NUMB_BITS) {
|
||||
// Shift within limbs.
|
||||
mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
|
||||
}
|
||||
|
||||
mp_limb_t quo[2*NUM_LIMBS+1];
|
||||
mp_limb_t rem[2*NUM_LIMBS+1];
|
||||
mpn_tdiv_qr(quo, rem, 0, a->data, a->limbs, b->data, b->limbs);
|
||||
mpn_copyi(r->data, quo, a->limbs - b->limbs + 1);
|
||||
r->limbs = a->limbs - b->limbs + 1;
|
||||
while (r->limbs > 1 && r->data[r->limbs - 1]==0) r->limbs--;
|
||||
r->neg = a->neg ^ b->neg;
|
||||
}
|
||||
|
||||
static int secp256k1_num_shift(secp256k1_num_t *r, int bits) {
|
||||
VERIFY_CHECK(bits <= GMP_NUMB_BITS);
|
||||
mp_limb_t ret = mpn_rshift(r->data, r->data, r->limbs, bits);
|
||||
if (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--;
|
||||
ret >>= (GMP_NUMB_BITS - bits);
|
||||
return ret;
|
||||
if (bits >= GMP_NUMB_BITS) {
|
||||
// Shift full limbs.
|
||||
for (int i = 0; i < r->limbs; i++) {
|
||||
int index = i + (bits / GMP_NUMB_BITS);
|
||||
if (index < r->limbs && index < 2*NUM_LIMBS) {
|
||||
r->data[i] = r->data[index];
|
||||
} else {
|
||||
r->data[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
while (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--;
|
||||
}
|
||||
|
||||
static void secp256k1_num_negate(secp256k1_num_t *r) {
|
||||
|
|
|
@ -90,4 +90,7 @@ static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_
|
|||
static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a);
|
||||
#endif
|
||||
|
||||
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
|
||||
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -314,13 +314,11 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l
|
|||
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
/* 160 bit accumulator. */
|
||||
uint64_t c0 = 0, c1 = 0;
|
||||
uint32_t c2 = 0;
|
||||
|
||||
uint64_t l[8];
|
||||
|
||||
/* l[0..7] = a[0..3] * b[0..3]. */
|
||||
muladd_fast(a->d[0], b->d[0]);
|
||||
extract_fast(l[0]);
|
||||
|
@ -347,17 +345,13 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
extract_fast(l[6]);
|
||||
VERIFY_CHECK(c1 <= 0);
|
||||
l[7] = c0;
|
||||
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
|
||||
static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar_t *a) {
|
||||
/* 160 bit accumulator. */
|
||||
uint64_t c0 = 0, c1 = 0;
|
||||
uint32_t c2 = 0;
|
||||
|
||||
uint64_t l[8];
|
||||
|
||||
/* l[0..7] = a[0..3] * b[0..3]. */
|
||||
muladd_fast(a->d[0], a->d[0]);
|
||||
extract_fast(l[0]);
|
||||
|
@ -378,8 +372,6 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
extract_fast(l[6]);
|
||||
VERIFY_CHECK(c1 == 0);
|
||||
l[7] = c0;
|
||||
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
#undef sumadd
|
||||
|
@ -390,6 +382,18 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
#undef extract
|
||||
#undef extract_fast
|
||||
|
||||
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
uint64_t l[8];
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
|
||||
uint64_t l[8];
|
||||
secp256k1_scalar_sqr_512(l, a);
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
|
||||
r1->d[0] = a->d[0];
|
||||
r1->d[1] = a->d[1];
|
||||
|
@ -405,4 +409,20 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, con
|
|||
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) {
|
||||
VERIFY_CHECK(shift >= 256);
|
||||
uint64_t l[8];
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
unsigned int shiftlimbs = shift >> 6;
|
||||
unsigned int shiftlow = shift & 0x3F;
|
||||
unsigned int shifthigh = 64 - shiftlow;
|
||||
r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
|
||||
if ((l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1) {
|
||||
secp256k1_scalar_add_bit(r, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -451,12 +451,10 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l
|
|||
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
static void secp256k1_scalar_mul_512(uint32_t l[16], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
/* 96 bit accumulator. */
|
||||
uint32_t c0 = 0, c1 = 0, c2 = 0;
|
||||
|
||||
uint32_t l[16];
|
||||
|
||||
/* l[0..15] = a[0..7] * b[0..7]. */
|
||||
muladd_fast(a->d[0], b->d[0]);
|
||||
extract_fast(l[0]);
|
||||
|
@ -539,16 +537,12 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
extract_fast(l[14]);
|
||||
VERIFY_CHECK(c1 == 0);
|
||||
l[15] = c0;
|
||||
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
|
||||
static void secp256k1_scalar_sqr_512(uint32_t l[16], const secp256k1_scalar_t *a) {
|
||||
/* 96 bit accumulator. */
|
||||
uint32_t c0 = 0, c1 = 0, c2 = 0;
|
||||
|
||||
uint32_t l[16];
|
||||
|
||||
/* l[0..15] = a[0..7]^2. */
|
||||
muladd_fast(a->d[0], a->d[0]);
|
||||
extract_fast(l[0]);
|
||||
|
@ -603,8 +597,6 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
extract_fast(l[14]);
|
||||
VERIFY_CHECK(c1 == 0);
|
||||
l[15] = c0;
|
||||
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
#undef sumadd
|
||||
|
@ -615,6 +607,18 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t
|
|||
#undef extract
|
||||
#undef extract_fast
|
||||
|
||||
static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
|
||||
uint32_t l[16];
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
|
||||
uint32_t l[16];
|
||||
secp256k1_scalar_sqr_512(l, a);
|
||||
secp256k1_scalar_reduce_512(r, l);
|
||||
}
|
||||
|
||||
static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
|
||||
r1->d[0] = a->d[0];
|
||||
r1->d[1] = a->d[1];
|
||||
|
@ -638,4 +642,24 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, con
|
|||
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
|
||||
}
|
||||
|
||||
SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) {
|
||||
VERIFY_CHECK(shift >= 256);
|
||||
uint32_t l[16];
|
||||
secp256k1_scalar_mul_512(l, a, b);
|
||||
unsigned int shiftlimbs = shift >> 5;
|
||||
unsigned int shiftlow = shift & 0x1F;
|
||||
unsigned int shifthigh = 32 - shiftlow;
|
||||
r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
|
||||
r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
|
||||
if ((l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1) {
|
||||
secp256k1_scalar_add_bit(r, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,7 +29,7 @@ typedef struct {
|
|||
secp256k1_num_t order;
|
||||
#endif
|
||||
#ifdef USE_ENDOMORPHISM
|
||||
secp256k1_num_t a1b2, b1, a2;
|
||||
secp256k1_scalar_t minus_lambda, minus_b1, minus_b2, g1, g2;
|
||||
#endif
|
||||
} secp256k1_scalar_consts_t;
|
||||
|
||||
|
@ -52,23 +52,78 @@ static void secp256k1_scalar_start(void) {
|
|||
secp256k1_num_set_bin(&ret->order, secp256k1_scalar_consts_order, sizeof(secp256k1_scalar_consts_order));
|
||||
#endif
|
||||
#ifdef USE_ENDOMORPHISM
|
||||
static const unsigned char secp256k1_scalar_consts_a1b2[] = {
|
||||
0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,
|
||||
0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15
|
||||
/**
|
||||
* Lambda is a scalar which has the property for secp256k1 that point multiplication by
|
||||
* it is efficiently computable (see secp256k1_gej_mul_lambda). */
|
||||
static const unsigned char secp256k1_scalar_consts_lambda[32] = {
|
||||
0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,
|
||||
0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
|
||||
0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,
|
||||
0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72
|
||||
};
|
||||
static const unsigned char secp256k1_scalar_consts_b1[] = {
|
||||
/**
|
||||
* "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
|
||||
* (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
|
||||
* and k2 have a small size.
|
||||
* It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
|
||||
*
|
||||
* - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
|
||||
* - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
|
||||
* - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
|
||||
* - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
|
||||
*
|
||||
* The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
|
||||
* k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
|
||||
* compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
|
||||
*
|
||||
* g1, g2 are precomputed constants used to replace division with a rounded multiplication
|
||||
* when decomposing the scalar for an endomorphism-based point multiplication.
|
||||
*
|
||||
* The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
|
||||
* Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
|
||||
*
|
||||
* The derivation is described in the paper "Efficient Software Implementation of Public-Key
|
||||
* Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
|
||||
* Section 4.3 (here we use a somewhat higher-precision estimate):
|
||||
* d = a1*b2 - b1*a2
|
||||
* g1 = round((2^272)*b2/d)
|
||||
* g2 = round((2^272)*b1/d)
|
||||
*
|
||||
* (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
|
||||
* as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
|
||||
*/
|
||||
static const unsigned char secp256k1_scalar_consts_minus_b1[32] = {
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,
|
||||
0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3
|
||||
};
|
||||
static const unsigned char secp256k1_scalar_consts_a2[] = {
|
||||
0x01,
|
||||
0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,
|
||||
0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8
|
||||
static const unsigned char secp256k1_scalar_consts_b2[32] = {
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,
|
||||
0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15
|
||||
};
|
||||
static const unsigned char secp256k1_scalar_consts_g1[32] = {
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x86,
|
||||
0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,
|
||||
0x90,0xe4,0x92,0x84,0xeb,0x15,0x3d,0xab
|
||||
};
|
||||
static const unsigned char secp256k1_scalar_consts_g2[32] = {
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
|
||||
0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x43,
|
||||
0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,
|
||||
0x7f,0xa9,0x0a,0xbf,0xe4,0xc4,0x22,0x12
|
||||
};
|
||||
|
||||
secp256k1_num_set_bin(&ret->a1b2, secp256k1_scalar_consts_a1b2, sizeof(secp256k1_scalar_consts_a1b2));
|
||||
secp256k1_num_set_bin(&ret->a2, secp256k1_scalar_consts_a2, sizeof(secp256k1_scalar_consts_a2));
|
||||
secp256k1_num_set_bin(&ret->b1, secp256k1_scalar_consts_b1, sizeof(secp256k1_scalar_consts_b1));
|
||||
secp256k1_scalar_set_b32(&ret->minus_lambda, secp256k1_scalar_consts_lambda, NULL);
|
||||
secp256k1_scalar_negate(&ret->minus_lambda, &ret->minus_lambda);
|
||||
secp256k1_scalar_set_b32(&ret->minus_b1, secp256k1_scalar_consts_minus_b1, NULL);
|
||||
secp256k1_scalar_set_b32(&ret->minus_b2, secp256k1_scalar_consts_b2, NULL);
|
||||
secp256k1_scalar_negate(&ret->minus_b2, &ret->minus_b2);
|
||||
secp256k1_scalar_set_b32(&ret->g1, secp256k1_scalar_consts_g1, NULL);
|
||||
secp256k1_scalar_set_b32(&ret->g2, secp256k1_scalar_consts_g2, NULL);
|
||||
#endif
|
||||
|
||||
/* Set the global pointer. */
|
||||
|
@ -265,45 +320,16 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_
|
|||
|
||||
#ifdef USE_ENDOMORPHISM
|
||||
static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) {
|
||||
unsigned char b[32];
|
||||
secp256k1_scalar_get_b32(b, a);
|
||||
secp256k1_num_t na;
|
||||
secp256k1_num_set_bin(&na, b, 32);
|
||||
|
||||
secp256k1_num_t rn1, rn2;
|
||||
|
||||
const secp256k1_scalar_consts_t *c = secp256k1_scalar_consts;
|
||||
secp256k1_num_t bnc1, bnc2, bnt1, bnt2, bnn2;
|
||||
|
||||
secp256k1_num_copy(&bnn2, &c->order);
|
||||
secp256k1_num_shift(&bnn2, 1);
|
||||
|
||||
secp256k1_num_mul(&bnc1, &na, &c->a1b2);
|
||||
secp256k1_num_add(&bnc1, &bnc1, &bnn2);
|
||||
secp256k1_num_div(&bnc1, &bnc1, &c->order);
|
||||
|
||||
secp256k1_num_mul(&bnc2, &na, &c->b1);
|
||||
secp256k1_num_add(&bnc2, &bnc2, &bnn2);
|
||||
secp256k1_num_div(&bnc2, &bnc2, &c->order);
|
||||
|
||||
secp256k1_num_mul(&bnt1, &bnc1, &c->a1b2);
|
||||
secp256k1_num_mul(&bnt2, &bnc2, &c->a2);
|
||||
secp256k1_num_add(&bnt1, &bnt1, &bnt2);
|
||||
secp256k1_num_sub(&rn1, &na, &bnt1);
|
||||
secp256k1_num_mul(&bnt1, &bnc1, &c->b1);
|
||||
secp256k1_num_mul(&bnt2, &bnc2, &c->a1b2);
|
||||
secp256k1_num_sub(&rn2, &bnt1, &bnt2);
|
||||
|
||||
secp256k1_num_get_bin(b, 32, &rn1);
|
||||
secp256k1_scalar_set_b32(r1, b, NULL);
|
||||
if (secp256k1_num_is_neg(&rn1)) {
|
||||
secp256k1_scalar_negate(r1, r1);
|
||||
}
|
||||
secp256k1_num_get_bin(b, 32, &rn2);
|
||||
secp256k1_scalar_set_b32(r2, b, NULL);
|
||||
if (secp256k1_num_is_neg(&rn2)) {
|
||||
secp256k1_scalar_negate(r2, r2);
|
||||
}
|
||||
VERIFY_CHECK(r1 != a);
|
||||
VERIFY_CHECK(r2 != a);
|
||||
secp256k1_scalar_t c1, c2;
|
||||
secp256k1_scalar_mul_shift_var(&c1, a, &secp256k1_scalar_consts->g1, 272);
|
||||
secp256k1_scalar_mul_shift_var(&c2, a, &secp256k1_scalar_consts->g2, 272);
|
||||
secp256k1_scalar_mul(&c1, &c1, &secp256k1_scalar_consts->minus_b1);
|
||||
secp256k1_scalar_mul(&c2, &c2, &secp256k1_scalar_consts->minus_b2);
|
||||
secp256k1_scalar_add(r2, &c1, &c2);
|
||||
secp256k1_scalar_mul(r1, r2, &secp256k1_scalar_consts->minus_lambda);
|
||||
secp256k1_scalar_add(r1, r1, a);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
40
src/tests.c
40
src/tests.c
|
@ -109,26 +109,6 @@ void random_num_order(secp256k1_num_t *num) {
|
|||
secp256k1_scalar_get_num(num, &sc);
|
||||
}
|
||||
|
||||
void test_num_get_set_bin(void) {
|
||||
secp256k1_num_t n1,n2;
|
||||
random_num_order_test(&n1);
|
||||
unsigned char c[32];
|
||||
secp256k1_num_get_bin(c, 32, &n1);
|
||||
secp256k1_num_set_bin(&n2, c, 32);
|
||||
CHECK(secp256k1_num_eq(&n1, &n2));
|
||||
for (int i=0; i<32; i++) {
|
||||
/* check whether the lower 8 bits correspond to the last byte */
|
||||
int low1 = secp256k1_num_shift(&n1, 8);
|
||||
int low2 = c[31];
|
||||
CHECK(low1 == low2);
|
||||
/* shift bits off the byte representation, and compare */
|
||||
memmove(c+1, c, 31);
|
||||
c[0] = 0;
|
||||
secp256k1_num_set_bin(&n2, c, 32);
|
||||
CHECK(secp256k1_num_eq(&n1, &n2));
|
||||
}
|
||||
}
|
||||
|
||||
void test_num_negate(void) {
|
||||
secp256k1_num_t n1;
|
||||
secp256k1_num_t n2;
|
||||
|
@ -180,7 +160,6 @@ void test_num_add_sub(void) {
|
|||
|
||||
void run_num_smalltests(void) {
|
||||
for (int i=0; i<100*count; i++) {
|
||||
test_num_get_set_bin();
|
||||
test_num_negate();
|
||||
test_num_add_sub();
|
||||
}
|
||||
|
@ -308,6 +287,24 @@ void scalar_test(void) {
|
|||
/* Negating zero should still result in zero. */
|
||||
CHECK(secp256k1_scalar_is_zero(&neg));
|
||||
}
|
||||
|
||||
{
|
||||
/* Test secp256k1_scalar_mul_shift_var. */
|
||||
secp256k1_scalar_t r;
|
||||
unsigned int shift = 256 + (secp256k1_rand32() % 257);
|
||||
secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
|
||||
secp256k1_num_t rnum;
|
||||
secp256k1_num_mul(&rnum, &s1num, &s2num);
|
||||
secp256k1_num_shift(&rnum, shift - 1);
|
||||
secp256k1_num_t one;
|
||||
unsigned char cone[1] = {0x01};
|
||||
secp256k1_num_set_bin(&one, cone, 1);
|
||||
secp256k1_num_add(&rnum, &rnum, &one);
|
||||
secp256k1_num_shift(&rnum, 1);
|
||||
secp256k1_num_t rnum2;
|
||||
secp256k1_scalar_get_num(&rnum2, &r);
|
||||
CHECK(secp256k1_num_eq(&rnum, &rnum2));
|
||||
}
|
||||
#endif
|
||||
|
||||
{
|
||||
|
@ -403,6 +400,7 @@ void scalar_test(void) {
|
|||
secp256k1_scalar_mul(&r2, &s1, &s1);
|
||||
CHECK(secp256k1_scalar_eq(&r1, &r2));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void run_scalar_tests(void) {
|
||||
|
|
Loading…
Add table
Reference in a new issue