mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-04 10:07:27 -05:00
Use constant-time conditional moves instead of byte slicing
This commit is contained in:
parent
d220062ac2
commit
efb7d4b299
7 changed files with 55 additions and 10 deletions
|
@ -23,8 +23,8 @@ typedef struct {
|
|||
* precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63).
|
||||
* None of the resulting prec group elements have a known scalar, and neither do any of
|
||||
* the intermediate sums while computing a*G.
|
||||
* To make memory access uniform, the bytes of prec(i, n_i) are sliced per value of n_i. */
|
||||
unsigned char prec[64][sizeof(secp256k1_ge_t)][16]; /* prec[j][k][i] = k'th byte of (16^j * i * G + U_i) */
|
||||
*/
|
||||
secp256k1_fe_t prec[64][16][2]; /* prec[j][i] = (16^j * i * G + U_i).{x,y} */
|
||||
} secp256k1_ecmult_gen_consts_t;
|
||||
|
||||
static const secp256k1_ecmult_gen_consts_t *secp256k1_ecmult_gen_consts = NULL;
|
||||
|
@ -81,9 +81,9 @@ static void secp256k1_ecmult_gen_start(void) {
|
|||
}
|
||||
for (int j=0; j<64; j++) {
|
||||
for (int i=0; i<16; i++) {
|
||||
const unsigned char* raw = (const unsigned char*)(&prec[j*16 + i]);
|
||||
for (size_t k=0; k<sizeof(secp256k1_ge_t); k++)
|
||||
ret->prec[j][k][i] = raw[k];
|
||||
VERIFY_CHECK(!secp256k1_ge_is_infinity(&prec[j*16 + i]));
|
||||
ret->prec[j][i][0] = prec[j*16 + i].x;
|
||||
ret->prec[j][i][1] = prec[j*16 + i].y;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,11 +104,14 @@ static void secp256k1_ecmult_gen(secp256k1_gej_t *r, const secp256k1_scalar_t *g
|
|||
const secp256k1_ecmult_gen_consts_t *c = secp256k1_ecmult_gen_consts;
|
||||
secp256k1_gej_set_infinity(r);
|
||||
secp256k1_ge_t add;
|
||||
add.infinity = 0;
|
||||
int bits;
|
||||
for (int j=0; j<64; j++) {
|
||||
bits = secp256k1_scalar_get_bits(gn, j * 4, 4);
|
||||
for (size_t k=0; k<sizeof(secp256k1_ge_t); k++)
|
||||
((unsigned char*)(&add))[k] = c->prec[j][k][bits];
|
||||
for (int i=0; i<16; i++) {
|
||||
secp256k1_fe_cmov(&add.x, &c->prec[j][i][0], i == bits);
|
||||
secp256k1_fe_cmov(&add.y, &c->prec[j][i][1], i == bits);
|
||||
}
|
||||
secp256k1_gej_add_ge(r, r, &add);
|
||||
}
|
||||
bits = 0;
|
||||
|
|
|
@ -110,11 +110,13 @@ static void secp256k1_fe_inv_all(size_t len, secp256k1_fe_t r[len], const secp25
|
|||
/** Potentially faster version of secp256k1_fe_inv_all, without constant-time guarantee. */
|
||||
static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe_t r[len], const secp256k1_fe_t a[len]);
|
||||
|
||||
|
||||
/** Convert a field element to a hexadecimal string. */
|
||||
static void secp256k1_fe_get_hex(char *r, int *rlen, const secp256k1_fe_t *a);
|
||||
|
||||
/** Convert a hexadecimal string to a field element. */
|
||||
static int secp256k1_fe_set_hex(secp256k1_fe_t *r, const char *a, int alen);
|
||||
|
||||
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
|
||||
static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -900,4 +900,24 @@ static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
|
|||
#endif
|
||||
}
|
||||
|
||||
static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) {
|
||||
uint32_t mask0 = flag + ~((uint32_t)0), mask1 = ~mask0;
|
||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||
r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
|
||||
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
|
||||
r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
|
||||
r->n[5] = (r->n[5] & mask0) | (a->n[5] & mask1);
|
||||
r->n[6] = (r->n[6] & mask0) | (a->n[6] & mask1);
|
||||
r->n[7] = (r->n[7] & mask0) | (a->n[7] & mask1);
|
||||
r->n[8] = (r->n[8] & mask0) | (a->n[8] & mask1);
|
||||
r->n[9] = (r->n[9] & mask0) | (a->n[9] & mask1);
|
||||
#ifdef VERIFY
|
||||
if (flag) {
|
||||
r->magnitude = a->magnitude;
|
||||
r->normalized = a->normalized;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -276,4 +276,19 @@ static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
|
|||
#endif
|
||||
}
|
||||
|
||||
static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) {
|
||||
uint64_t mask0 = flag + ~((uint64_t)0), mask1 = ~mask0;
|
||||
r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
|
||||
r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
|
||||
r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
|
||||
r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
|
||||
r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
|
||||
#ifdef VERIFY
|
||||
if (flag) {
|
||||
r->magnitude = a->magnitude;
|
||||
r->normalized = a->normalized;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -170,4 +170,11 @@ static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
|
|||
secp256k1_fe_reduce(r, tmp);
|
||||
}
|
||||
|
||||
static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) {
|
||||
mp_limb_t mask0 = flag + ~((mp_limb_t)0), mask1 = ~mask0;
|
||||
for (int i = 0; i <= FIELD_LIMBS; i++) {
|
||||
r->n[i] = (r->n[i] & mask0) | (a->n[i] & mask1);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -117,5 +117,4 @@ static void secp256k1_gej_clear(secp256k1_gej_t *r);
|
|||
/** Clear a secp256k1_ge_t to prevent leaking sensitive information. */
|
||||
static void secp256k1_ge_clear(secp256k1_ge_t *r);
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -411,7 +411,6 @@ static void secp256k1_gej_mul_lambda(secp256k1_gej_t *r, const secp256k1_gej_t *
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void secp256k1_ge_start(void) {
|
||||
static const unsigned char secp256k1_ge_consts_g_x[] = {
|
||||
0x79,0xBE,0x66,0x7E,0xF9,0xDC,0xBB,0xAC,
|
||||
|
|
Loading…
Add table
Reference in a new issue