From ff8746d457ad20c624fa9cec3e341ff1b723a3aa Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Mon, 1 Dec 2014 17:11:59 +0100 Subject: [PATCH 1/3] Add secp256k1_scalar_mul_shift_var --- src/num.h | 4 ++-- src/num_gmp_impl.h | 23 ++++++++++++++++------ src/scalar.h | 3 +++ src/scalar_4x64_impl.h | 40 ++++++++++++++++++++++++++++---------- src/scalar_8x32_impl.h | 44 ++++++++++++++++++++++++++++++++---------- src/tests.c | 40 ++++++++++++++++++-------------------- 6 files changed, 105 insertions(+), 49 deletions(-) diff --git a/src/num.h b/src/num.h index 9e073b4a598..b65443dbcc2 100644 --- a/src/num.h +++ b/src/num.h @@ -54,8 +54,8 @@ static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, cons even if r was negative. */ static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m); -/** Right-shift the passed number by bits bits, and return those bits. */ -static int secp256k1_num_shift(secp256k1_num_t *r, int bits); +/** Right-shift the passed number by bits bits. */ +static void secp256k1_num_shift(secp256k1_num_t *r, int bits); /** Check whether a number is zero. */ static int secp256k1_num_is_zero(const secp256k1_num_t *a); diff --git a/src/num_gmp_impl.h b/src/num_gmp_impl.h index 420a42d1b47..7848493ac61 100644 --- a/src/num_gmp_impl.h +++ b/src/num_gmp_impl.h @@ -225,12 +225,23 @@ static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, cons r->neg = a->neg ^ b->neg; } -static int secp256k1_num_shift(secp256k1_num_t *r, int bits) { - VERIFY_CHECK(bits <= GMP_NUMB_BITS); - mp_limb_t ret = mpn_rshift(r->data, r->data, r->limbs, bits); - if (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--; - ret >>= (GMP_NUMB_BITS - bits); - return ret; +static void secp256k1_num_shift(secp256k1_num_t *r, int bits) { + if (bits % GMP_NUMB_BITS) { + // Shift within limbs. + mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS); + } + if (bits >= GMP_NUMB_BITS) { + // Shift full limbs. + for (int i = 0; i < r->limbs; i++) { + int index = i + (bits / GMP_NUMB_BITS); + if (index < r->limbs && index < 2*NUM_LIMBS) { + r->data[i] = r->data[index]; + } else { + r->data[i] = 0; + } + } + } + while (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--; } static void secp256k1_num_negate(secp256k1_num_t *r) { diff --git a/src/scalar.h b/src/scalar.h index b5c43b45bfe..df0b40ea138 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -90,4 +90,7 @@ static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_ static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a); #endif +/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ +static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift); + #endif diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index afa7a540181..07864917b19 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -314,13 +314,11 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } -static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; - uint64_t l[8]; - /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); @@ -347,17 +345,13 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[6]); VERIFY_CHECK(c1 <= 0); l[7] = c0; - - secp256k1_scalar_reduce_512(r, l); } -static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { +static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar_t *a) { /* 160 bit accumulator. */ uint64_t c0 = 0, c1 = 0; uint32_t c2 = 0; - uint64_t l[8]; - /* l[0..7] = a[0..3] * b[0..3]. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); @@ -378,8 +372,6 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[6]); VERIFY_CHECK(c1 == 0); l[7] = c0; - - secp256k1_scalar_reduce_512(r, l); } #undef sumadd @@ -390,6 +382,18 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t #undef extract #undef extract_fast +static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { + uint64_t l[8]; + secp256k1_scalar_mul_512(l, a, b); + secp256k1_scalar_reduce_512(r, l); +} + +static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { + uint64_t l[8]; + secp256k1_scalar_sqr_512(l, a); + secp256k1_scalar_reduce_512(r, l); +} + static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { r1->d[0] = a->d[0]; r1->d[1] = a->d[1]; @@ -405,4 +409,20 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, con return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0; } +SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) { + VERIFY_CHECK(shift >= 256); + uint64_t l[8]; + secp256k1_scalar_mul_512(l, a, b); + unsigned int shiftlimbs = shift >> 6; + unsigned int shiftlow = shift & 0x3F; + unsigned int shifthigh = 64 - shiftlow; + r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0; + if ((l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1) { + secp256k1_scalar_add_bit(r, 0); + } +} + #endif diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 0f82bfbb0b6..a32abbeb4df 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -451,12 +451,10 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint32_t *l secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r)); } -static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static void secp256k1_scalar_mul_512(uint32_t l[16], const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; - uint32_t l[16]; - /* l[0..15] = a[0..7] * b[0..7]. */ muladd_fast(a->d[0], b->d[0]); extract_fast(l[0]); @@ -539,16 +537,12 @@ static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[14]); VERIFY_CHECK(c1 == 0); l[15] = c0; - - secp256k1_scalar_reduce_512(r, l); } -static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { +static void secp256k1_scalar_sqr_512(uint32_t l[16], const secp256k1_scalar_t *a) { /* 96 bit accumulator. */ uint32_t c0 = 0, c1 = 0, c2 = 0; - uint32_t l[16]; - /* l[0..15] = a[0..7]^2. */ muladd_fast(a->d[0], a->d[0]); extract_fast(l[0]); @@ -603,8 +597,6 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t extract_fast(l[14]); VERIFY_CHECK(c1 == 0); l[15] = c0; - - secp256k1_scalar_reduce_512(r, l); } #undef sumadd @@ -615,6 +607,18 @@ static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t #undef extract #undef extract_fast +static void secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { + uint32_t l[16]; + secp256k1_scalar_mul_512(l, a, b); + secp256k1_scalar_reduce_512(r, l); +} + +static void secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) { + uint32_t l[16]; + secp256k1_scalar_sqr_512(l, a); + secp256k1_scalar_reduce_512(r, l); +} + static void secp256k1_scalar_split_128(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { r1->d[0] = a->d[0]; r1->d[1] = a->d[1]; @@ -638,4 +642,24 @@ SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, con return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0; } +SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b, unsigned int shift) { + VERIFY_CHECK(shift >= 256); + uint32_t l[16]; + secp256k1_scalar_mul_512(l, a, b); + unsigned int shiftlimbs = shift >> 5; + unsigned int shiftlow = shift & 0x1F; + unsigned int shifthigh = 32 - shiftlow; + r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0; + r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0; + if ((l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1) { + secp256k1_scalar_add_bit(r, 0); + } +} + #endif diff --git a/src/tests.c b/src/tests.c index dc0fce2fc69..0836e1cfe04 100644 --- a/src/tests.c +++ b/src/tests.c @@ -109,26 +109,6 @@ void random_num_order(secp256k1_num_t *num) { secp256k1_scalar_get_num(num, &sc); } -void test_num_get_set_bin(void) { - secp256k1_num_t n1,n2; - random_num_order_test(&n1); - unsigned char c[32]; - secp256k1_num_get_bin(c, 32, &n1); - secp256k1_num_set_bin(&n2, c, 32); - CHECK(secp256k1_num_eq(&n1, &n2)); - for (int i=0; i<32; i++) { - /* check whether the lower 8 bits correspond to the last byte */ - int low1 = secp256k1_num_shift(&n1, 8); - int low2 = c[31]; - CHECK(low1 == low2); - /* shift bits off the byte representation, and compare */ - memmove(c+1, c, 31); - c[0] = 0; - secp256k1_num_set_bin(&n2, c, 32); - CHECK(secp256k1_num_eq(&n1, &n2)); - } -} - void test_num_negate(void) { secp256k1_num_t n1; secp256k1_num_t n2; @@ -180,7 +160,6 @@ void test_num_add_sub(void) { void run_num_smalltests(void) { for (int i=0; i<100*count; i++) { - test_num_get_set_bin(); test_num_negate(); test_num_add_sub(); } @@ -308,6 +287,24 @@ void scalar_test(void) { /* Negating zero should still result in zero. */ CHECK(secp256k1_scalar_is_zero(&neg)); } + + { + /* Test secp256k1_scalar_mul_shift_var. */ + secp256k1_scalar_t r; + unsigned int shift = 256 + (secp256k1_rand32() % 257); + secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift); + secp256k1_num_t rnum; + secp256k1_num_mul(&rnum, &s1num, &s2num); + secp256k1_num_shift(&rnum, shift - 1); + secp256k1_num_t one; + unsigned char cone[1] = {0x01}; + secp256k1_num_set_bin(&one, cone, 1); + secp256k1_num_add(&rnum, &rnum, &one); + secp256k1_num_shift(&rnum, 1); + secp256k1_num_t rnum2; + secp256k1_scalar_get_num(&rnum2, &r); + CHECK(secp256k1_num_eq(&rnum, &rnum2)); + } #endif { @@ -403,6 +400,7 @@ void scalar_test(void) { secp256k1_scalar_mul(&r2, &s1, &s1); CHECK(secp256k1_scalar_eq(&r1, &r2)); } + } void run_scalar_tests(void) { From cc604e984287765e0c616cd907329c1f2dc7f342 Mon Sep 17 00:00:00 2001 From: Peter Dettman Date: Sat, 15 Nov 2014 23:04:02 +0700 Subject: [PATCH 2/3] Avoid division when decomposing scalars - In secp256k1_gej_split_exp, there are two divisions used. Since the denominator is a constant known at compile-time, each can be replaced by a multiplication followed by a right-shift (and rounding). - Add the constants g1, g2 for this purpose and rewrite secp256k1_scalar_split_lambda_var accordingly. - Remove secp256k1_num_div since no longer used Rebased-by: Pieter Wuille --- src/ecmult_impl.h | 2 ++ src/num.h | 3 --- src/num_gmp_impl.h | 19 ------------- src/scalar_impl.h | 66 +++++++++++++++++++++++++++++++++------------- 4 files changed, 50 insertions(+), 40 deletions(-) diff --git a/src/ecmult_impl.h b/src/ecmult_impl.h index 56e46cdf808..bbf731c88a3 100644 --- a/src/ecmult_impl.h +++ b/src/ecmult_impl.h @@ -170,6 +170,8 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const /* build wnaf representation for na_1 and na_lam. */ int wnaf_na_1[129]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A); int wnaf_na_lam[129]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A); + VERIFY_CHECK(bits_na_1 <= 129); + VERIFY_CHECK(bits_na_lam <= 129); int bits = bits_na_1; if (bits_na_lam > bits) bits = bits_na_lam; #else diff --git a/src/num.h b/src/num.h index b65443dbcc2..339b6bb6ec2 100644 --- a/src/num.h +++ b/src/num.h @@ -47,9 +47,6 @@ static void secp256k1_num_sub(secp256k1_num_t *r, const secp256k1_num_t *a, cons /** Multiply two (signed) numbers. */ static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b); -/** Divide two (signed) numbers. */ -static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b); - /** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1, even if r was negative. */ static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m); diff --git a/src/num_gmp_impl.h b/src/num_gmp_impl.h index 7848493ac61..19d474e59ff 100644 --- a/src/num_gmp_impl.h +++ b/src/num_gmp_impl.h @@ -206,25 +206,6 @@ static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, cons memset(tmp, 0, sizeof(tmp)); } -static void secp256k1_num_div(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) { - secp256k1_num_sanity(a); - secp256k1_num_sanity(b); - if (b->limbs > a->limbs) { - r->limbs = 1; - r->data[0] = 0; - r->neg = 0; - return; - } - - mp_limb_t quo[2*NUM_LIMBS+1]; - mp_limb_t rem[2*NUM_LIMBS+1]; - mpn_tdiv_qr(quo, rem, 0, a->data, a->limbs, b->data, b->limbs); - mpn_copyi(r->data, quo, a->limbs - b->limbs + 1); - r->limbs = a->limbs - b->limbs + 1; - while (r->limbs > 1 && r->data[r->limbs - 1]==0) r->limbs--; - r->neg = a->neg ^ b->neg; -} - static void secp256k1_num_shift(secp256k1_num_t *r, int bits) { if (bits % GMP_NUMB_BITS) { // Shift within limbs. diff --git a/src/scalar_impl.h b/src/scalar_impl.h index 3e8d8b8c56d..df7a24e1f8c 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -29,7 +29,7 @@ typedef struct { secp256k1_num_t order; #endif #ifdef USE_ENDOMORPHISM - secp256k1_num_t a1b2, b1, a2; + secp256k1_num_t a1b2, b1, a2, g1, g2; #endif } secp256k1_scalar_consts_t; @@ -65,10 +65,38 @@ static void secp256k1_scalar_start(void) { 0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6, 0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8 }; - + /** + * g1, g2 are precomputed constants used to replace division with a rounded multiplication + * when decomposing the scalar for an endomorphism-based point multiplication. + * + * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve + * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5. + * + * The derivation is described in the paper "Efficient Software Implementation of Public-Key + * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez), + * Section 4.3 (here we use a somewhat higher-precision estimate): + * d = a1*b2 - b1*a2 + * g1 = round((2^272)*b2/d) + * g2 = round((2^272)*b1/d) + * + * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found + * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda'). + */ + static const unsigned char secp256k1_scalar_consts_g1[] = { + 0x30,0x86, + 0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c, + 0x90,0xe4,0x92,0x84,0xeb,0x15,0x3d,0xab + }; + static const unsigned char secp256k1_scalar_consts_g2[] = { + 0xe4,0x43, + 0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54, + 0x7f,0xa9,0x0a,0xbf,0xe4,0xc4,0x22,0x12 + }; secp256k1_num_set_bin(&ret->a1b2, secp256k1_scalar_consts_a1b2, sizeof(secp256k1_scalar_consts_a1b2)); secp256k1_num_set_bin(&ret->a2, secp256k1_scalar_consts_a2, sizeof(secp256k1_scalar_consts_a2)); secp256k1_num_set_bin(&ret->b1, secp256k1_scalar_consts_b1, sizeof(secp256k1_scalar_consts_b1)); + secp256k1_num_set_bin(&ret->g1, secp256k1_scalar_consts_g1, sizeof(secp256k1_scalar_consts_g1)); + secp256k1_num_set_bin(&ret->g2, secp256k1_scalar_consts_g2, sizeof(secp256k1_scalar_consts_g2)); #endif /* Set the global pointer. */ @@ -273,26 +301,28 @@ static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_ secp256k1_num_t rn1, rn2; const secp256k1_scalar_consts_t *c = secp256k1_scalar_consts; - secp256k1_num_t bnc1, bnc2, bnt1, bnt2, bnn2; + secp256k1_num_t d1, d2, t, one; + unsigned char cone[1] = {0x01}; + secp256k1_num_set_bin(&one, cone, 1); - secp256k1_num_copy(&bnn2, &c->order); - secp256k1_num_shift(&bnn2, 1); + secp256k1_num_mul(&d1, &na, &c->g1); + secp256k1_num_shift(&d1, 271); + secp256k1_num_add(&d1, &d1, &one); + secp256k1_num_shift(&d1, 1); - secp256k1_num_mul(&bnc1, &na, &c->a1b2); - secp256k1_num_add(&bnc1, &bnc1, &bnn2); - secp256k1_num_div(&bnc1, &bnc1, &c->order); + secp256k1_num_mul(&d2, &na, &c->g2); + secp256k1_num_shift(&d2, 271); + secp256k1_num_add(&d2, &d2, &one); + secp256k1_num_shift(&d2, 1); - secp256k1_num_mul(&bnc2, &na, &c->b1); - secp256k1_num_add(&bnc2, &bnc2, &bnn2); - secp256k1_num_div(&bnc2, &bnc2, &c->order); + secp256k1_num_mul(&t, &d1, &c->a1b2); + secp256k1_num_sub(&rn1, &na, &t); + secp256k1_num_mul(&t, &d2, &c->a2); + secp256k1_num_sub(&rn1, &rn1, &t); - secp256k1_num_mul(&bnt1, &bnc1, &c->a1b2); - secp256k1_num_mul(&bnt2, &bnc2, &c->a2); - secp256k1_num_add(&bnt1, &bnt1, &bnt2); - secp256k1_num_sub(&rn1, &na, &bnt1); - secp256k1_num_mul(&bnt1, &bnc1, &c->b1); - secp256k1_num_mul(&bnt2, &bnc2, &c->a1b2); - secp256k1_num_sub(&rn2, &bnt1, &bnt2); + secp256k1_num_mul(&rn2, &d1, &c->b1); + secp256k1_num_mul(&t, &d2, &c->a1b2); + secp256k1_num_sub(&rn2, &rn2, &t); secp256k1_num_get_bin(b, 32, &rn1); secp256k1_scalar_set_b32(r1, b, NULL); From c35ff1ea44ff71d600a18dd79d87aad187f7c5af Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Mon, 1 Dec 2014 18:22:04 +0100 Subject: [PATCH 3/3] Convert lambda splitter to pure scalar code. This enables the use of the endomorphism optimization without bignum. --- .travis.yml | 1 + configure.ac | 5 +- src/ecmult_impl.h | 8 +-- src/scalar_impl.h | 122 ++++++++++++++++++++++------------------------ 4 files changed, 65 insertions(+), 71 deletions(-) diff --git a/.travis.yml b/.travis.yml index 12692deafa6..3a85e8cba0d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,6 +19,7 @@ env: - FIELD=32bit - FIELD=32bit ENDOMORPHISM=yes - BIGNUM=none + - BIGNUM=none ENDOMORPHISM=yes - BUILD=distcheck - EXTRAFLAGS=CFLAGS=-DDETERMINISTIC before_script: ./autogen.sh diff --git a/configure.ac b/configure.ac index f0d69f93def..6e6fccd7fdd 100644 --- a/configure.ac +++ b/configure.ac @@ -270,10 +270,7 @@ if test x"$set_field" = x"gmp" || test x"$set_bignum" = x"gmp"; then fi if test x"$use_endomorphism" = x"yes"; then - if test x"$set_bignum" = x"none"; then - AC_MSG_ERROR([Cannot use endomorphism optimization without a bignum implementation]) - fi - AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism]) + AC_DEFINE(USE_ENDOMORPHISM, 1, [Define this symbol to use endomorphism optimization]) fi AC_MSG_NOTICE([Using field implementation: $set_field]) diff --git a/src/ecmult_impl.h b/src/ecmult_impl.h index bbf731c88a3..445b81593f8 100644 --- a/src/ecmult_impl.h +++ b/src/ecmult_impl.h @@ -168,10 +168,10 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const secp256k1_scalar_split_lambda_var(&na_1, &na_lam, na); /* build wnaf representation for na_1 and na_lam. */ - int wnaf_na_1[129]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A); - int wnaf_na_lam[129]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A); - VERIFY_CHECK(bits_na_1 <= 129); - VERIFY_CHECK(bits_na_lam <= 129); + int wnaf_na_1[130]; int bits_na_1 = secp256k1_ecmult_wnaf(wnaf_na_1, &na_1, WINDOW_A); + int wnaf_na_lam[130]; int bits_na_lam = secp256k1_ecmult_wnaf(wnaf_na_lam, &na_lam, WINDOW_A); + VERIFY_CHECK(bits_na_1 <= 130); + VERIFY_CHECK(bits_na_lam <= 130); int bits = bits_na_1; if (bits_na_lam > bits) bits = bits_na_lam; #else diff --git a/src/scalar_impl.h b/src/scalar_impl.h index df7a24e1f8c..7fc159df772 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -29,7 +29,7 @@ typedef struct { secp256k1_num_t order; #endif #ifdef USE_ENDOMORPHISM - secp256k1_num_t a1b2, b1, a2, g1, g2; + secp256k1_scalar_t minus_lambda, minus_b1, minus_b2, g1, g2; #endif } secp256k1_scalar_consts_t; @@ -52,20 +52,30 @@ static void secp256k1_scalar_start(void) { secp256k1_num_set_bin(&ret->order, secp256k1_scalar_consts_order, sizeof(secp256k1_scalar_consts_order)); #endif #ifdef USE_ENDOMORPHISM - static const unsigned char secp256k1_scalar_consts_a1b2[] = { - 0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd, - 0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15 - }; - static const unsigned char secp256k1_scalar_consts_b1[] = { - 0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28, - 0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3 - }; - static const unsigned char secp256k1_scalar_consts_a2[] = { - 0x01, - 0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6, - 0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8 + /** + * Lambda is a scalar which has the property for secp256k1 that point multiplication by + * it is efficiently computable (see secp256k1_gej_mul_lambda). */ + static const unsigned char secp256k1_scalar_consts_lambda[32] = { + 0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0, + 0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a, + 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78, + 0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72 }; /** + * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm + * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1 + * and k2 have a small size. + * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are: + * + * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} + * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3} + * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8} + * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15} + * + * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives + * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and + * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2. + * * g1, g2 are precomputed constants used to replace division with a rounded multiplication * when decomposing the scalar for an endomorphism-based point multiplication. * @@ -82,21 +92,38 @@ static void secp256k1_scalar_start(void) { * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda'). */ - static const unsigned char secp256k1_scalar_consts_g1[] = { - 0x30,0x86, + static const unsigned char secp256k1_scalar_consts_minus_b1[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28, + 0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3 + }; + static const unsigned char secp256k1_scalar_consts_b2[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd, + 0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15 + }; + static const unsigned char secp256k1_scalar_consts_g1[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x30,0x86, 0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c, 0x90,0xe4,0x92,0x84,0xeb,0x15,0x3d,0xab }; - static const unsigned char secp256k1_scalar_consts_g2[] = { - 0xe4,0x43, + static const unsigned char secp256k1_scalar_consts_g2[32] = { + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x43, 0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54, 0x7f,0xa9,0x0a,0xbf,0xe4,0xc4,0x22,0x12 }; - secp256k1_num_set_bin(&ret->a1b2, secp256k1_scalar_consts_a1b2, sizeof(secp256k1_scalar_consts_a1b2)); - secp256k1_num_set_bin(&ret->a2, secp256k1_scalar_consts_a2, sizeof(secp256k1_scalar_consts_a2)); - secp256k1_num_set_bin(&ret->b1, secp256k1_scalar_consts_b1, sizeof(secp256k1_scalar_consts_b1)); - secp256k1_num_set_bin(&ret->g1, secp256k1_scalar_consts_g1, sizeof(secp256k1_scalar_consts_g1)); - secp256k1_num_set_bin(&ret->g2, secp256k1_scalar_consts_g2, sizeof(secp256k1_scalar_consts_g2)); + + secp256k1_scalar_set_b32(&ret->minus_lambda, secp256k1_scalar_consts_lambda, NULL); + secp256k1_scalar_negate(&ret->minus_lambda, &ret->minus_lambda); + secp256k1_scalar_set_b32(&ret->minus_b1, secp256k1_scalar_consts_minus_b1, NULL); + secp256k1_scalar_set_b32(&ret->minus_b2, secp256k1_scalar_consts_b2, NULL); + secp256k1_scalar_negate(&ret->minus_b2, &ret->minus_b2); + secp256k1_scalar_set_b32(&ret->g1, secp256k1_scalar_consts_g1, NULL); + secp256k1_scalar_set_b32(&ret->g2, secp256k1_scalar_consts_g2, NULL); #endif /* Set the global pointer. */ @@ -293,47 +320,16 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar_t *r, const secp256k1_ #ifdef USE_ENDOMORPHISM static void secp256k1_scalar_split_lambda_var(secp256k1_scalar_t *r1, secp256k1_scalar_t *r2, const secp256k1_scalar_t *a) { - unsigned char b[32]; - secp256k1_scalar_get_b32(b, a); - secp256k1_num_t na; - secp256k1_num_set_bin(&na, b, 32); - - secp256k1_num_t rn1, rn2; - - const secp256k1_scalar_consts_t *c = secp256k1_scalar_consts; - secp256k1_num_t d1, d2, t, one; - unsigned char cone[1] = {0x01}; - secp256k1_num_set_bin(&one, cone, 1); - - secp256k1_num_mul(&d1, &na, &c->g1); - secp256k1_num_shift(&d1, 271); - secp256k1_num_add(&d1, &d1, &one); - secp256k1_num_shift(&d1, 1); - - secp256k1_num_mul(&d2, &na, &c->g2); - secp256k1_num_shift(&d2, 271); - secp256k1_num_add(&d2, &d2, &one); - secp256k1_num_shift(&d2, 1); - - secp256k1_num_mul(&t, &d1, &c->a1b2); - secp256k1_num_sub(&rn1, &na, &t); - secp256k1_num_mul(&t, &d2, &c->a2); - secp256k1_num_sub(&rn1, &rn1, &t); - - secp256k1_num_mul(&rn2, &d1, &c->b1); - secp256k1_num_mul(&t, &d2, &c->a1b2); - secp256k1_num_sub(&rn2, &rn2, &t); - - secp256k1_num_get_bin(b, 32, &rn1); - secp256k1_scalar_set_b32(r1, b, NULL); - if (secp256k1_num_is_neg(&rn1)) { - secp256k1_scalar_negate(r1, r1); - } - secp256k1_num_get_bin(b, 32, &rn2); - secp256k1_scalar_set_b32(r2, b, NULL); - if (secp256k1_num_is_neg(&rn2)) { - secp256k1_scalar_negate(r2, r2); - } + VERIFY_CHECK(r1 != a); + VERIFY_CHECK(r2 != a); + secp256k1_scalar_t c1, c2; + secp256k1_scalar_mul_shift_var(&c1, a, &secp256k1_scalar_consts->g1, 272); + secp256k1_scalar_mul_shift_var(&c2, a, &secp256k1_scalar_consts->g2, 272); + secp256k1_scalar_mul(&c1, &c1, &secp256k1_scalar_consts->minus_b1); + secp256k1_scalar_mul(&c2, &c2, &secp256k1_scalar_consts->minus_b2); + secp256k1_scalar_add(r2, &c1, &c2); + secp256k1_scalar_mul(r1, r2, &secp256k1_scalar_consts->minus_lambda); + secp256k1_scalar_add(r1, r1, a); } #endif