b0210a9 Merge pull request #135 ee3eb4b Fix a memory leak and add a number of small tests. 4d879a3 Merge pull request #134 d5e8362 Merge pull request #127 7b92cf6 Merge pull request #132 0bf70a5 Merge pull request #133 29ae131 Make scalar_add_bit test's overflow detection exact 9048def Avoid undefined shift behaviour efb7d4b Use constant-time conditional moves instead of byte slicing d220062 Merge pull request #131 82f9254 Fix typo 601ca04 Merge pull request #129 35399e0 Bugfix: b is restricted, not r c35ff1e Convert lambda splitter to pure scalar code. cc604e9 Avoid division when decomposing scalars ff8746d Add secp256k1_scalar_mul_shift_var bd313f7 Merge pull request #119 276f987 Merge pull request #124 25d125e Merge pull request #126 24b3c65 Add a test case for ECDSA recomputing infinity 32600e5 Add a test for r >= order signature handling 4d4eeea Make secp256k1_fe_mul_inner use the r != property be82e92 Require that r and b are different for field multiplication. 597128d Make num optional 659b554 Make constant initializers independent from num 0af5b47 Merge pull request #120 e2e8a36 Merge pull request #117 c76be9e Remove unused num functions 4285a98 Move lambda-splitting code to scalar. f24041d Switch all EC/ECDSA logic from num to scalar 6794be6 Add scalar splitting functions d1502eb Add secp256k1_scalar_inverse_var which delegates to GMP b5c9ee7 Make test_point_times_order test meaningful again 0b73059 Switch wnaf splitting from num-based to scalar-based 1e6c77c Generalize secp256k1_scalar_get_bits 5213207 Add secp256k1_scalar_add_bit 3c0ae43 Merge pull request #122 6e05287 Do signature recovery/verification with 4 possible recid case e3d692f Explain why no y=0 check is necessary for doubling f7dc1c6 Optimize doubling: secp256k1 has no y=0 point 666d3b5 Merge pull request #121 2a54f9b Correct typo in comment 9d64145 Merge pull request #114 99f0728 Fix secp256k1_num_set_bin handling of 0 d907ebc Add bounds checking to field element setters bb2cd94 Merge pull request #116 665775b Don't split the g factor when not using endomorphism 9431d6b Merge pull request #115 e2274c5 build: osx: attempt to work with homebrew keg-only packages git-subtree-dir: src/secp256k1 git-subtree-split: b0210a95da433e048a11d298efbcc14eb423c95f
181 lines
6.5 KiB
C
181 lines
6.5 KiB
C
/**********************************************************************
|
|
* Copyright (c) 2013, 2014 Pieter Wuille *
|
|
* Distributed under the MIT software license, see the accompanying *
|
|
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
|
|
**********************************************************************/
|
|
|
|
#ifndef _SECP256K1_FIELD_REPR_IMPL_H_
|
|
#define _SECP256K1_FIELD_REPR_IMPL_H_
|
|
|
|
#include <stdio.h>
|
|
#include <string.h>
|
|
#include "num.h"
|
|
#include "field.h"
|
|
|
|
static mp_limb_t secp256k1_field_p[FIELD_LIMBS];
|
|
static mp_limb_t secp256k1_field_pc[(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS];
|
|
|
|
static void secp256k1_fe_inner_start(void) {
|
|
for (int i=0; i<(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; i++)
|
|
secp256k1_field_pc[i] = 0;
|
|
secp256k1_field_pc[0] += 0x3D1UL;
|
|
secp256k1_field_pc[32/GMP_NUMB_BITS] += (((mp_limb_t)1) << (32 % GMP_NUMB_BITS));
|
|
for (int i=0; i<FIELD_LIMBS; i++) {
|
|
secp256k1_field_p[i] = 0;
|
|
}
|
|
mpn_sub(secp256k1_field_p, secp256k1_field_p, FIELD_LIMBS, secp256k1_field_pc, (33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS);
|
|
}
|
|
|
|
static void secp256k1_fe_inner_stop(void) {
|
|
}
|
|
|
|
static void secp256k1_fe_normalize(secp256k1_fe_t *r) {
|
|
if (r->n[FIELD_LIMBS] != 0) {
|
|
#if (GMP_NUMB_BITS >= 40)
|
|
mp_limb_t carry = mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x1000003D1ULL * r->n[FIELD_LIMBS]);
|
|
mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x1000003D1ULL * carry);
|
|
#else
|
|
mp_limb_t carry = mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x3D1UL * r->n[FIELD_LIMBS]) +
|
|
mpn_add_1(r->n+(32/GMP_NUMB_BITS), r->n+(32/GMP_NUMB_BITS), FIELD_LIMBS-(32/GMP_NUMB_BITS), r->n[FIELD_LIMBS] << (32 % GMP_NUMB_BITS));
|
|
mpn_add_1(r->n, r->n, FIELD_LIMBS, 0x3D1UL * carry);
|
|
mpn_add_1(r->n+(32/GMP_NUMB_BITS), r->n+(32/GMP_NUMB_BITS), FIELD_LIMBS-(32/GMP_NUMB_BITS), carry << (32%GMP_NUMB_BITS));
|
|
#endif
|
|
r->n[FIELD_LIMBS] = 0;
|
|
}
|
|
if (mpn_cmp(r->n, secp256k1_field_p, FIELD_LIMBS) >= 0)
|
|
mpn_sub(r->n, r->n, FIELD_LIMBS, secp256k1_field_p, FIELD_LIMBS);
|
|
}
|
|
|
|
SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe_t *r, int a) {
|
|
r->n[0] = a;
|
|
for (int i=1; i<FIELD_LIMBS+1; i++)
|
|
r->n[i] = 0;
|
|
}
|
|
|
|
SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe_t *r) {
|
|
for (int i=0; i<FIELD_LIMBS+1; i++)
|
|
r->n[i] = 0;
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe_t *a) {
|
|
int ret = 1;
|
|
for (int i=0; i<FIELD_LIMBS+1; i++)
|
|
ret &= (a->n[i] == 0);
|
|
return ret;
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe_t *a) {
|
|
return a->n[0] & 1;
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_fe_equal(const secp256k1_fe_t *a, const secp256k1_fe_t *b) {
|
|
int ret = 1;
|
|
for (int i=0; i<FIELD_LIMBS+1; i++)
|
|
ret &= (a->n[i] == b->n[i]);
|
|
return ret;
|
|
}
|
|
|
|
SECP256K1_INLINE static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b) {
|
|
for (int i=FIELD_LIMBS; i>=0; i--) {
|
|
if (a->n[i] > b->n[i]) return 1;
|
|
if (a->n[i] < b->n[i]) return -1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int secp256k1_fe_set_b32(secp256k1_fe_t *r, const unsigned char *a) {
|
|
for (int i=0; i<FIELD_LIMBS+1; i++)
|
|
r->n[i] = 0;
|
|
for (int i=0; i<256; i++) {
|
|
int limb = i/GMP_NUMB_BITS;
|
|
int shift = i%GMP_NUMB_BITS;
|
|
r->n[limb] |= (mp_limb_t)((a[31-i/8] >> (i%8)) & 0x1) << shift;
|
|
}
|
|
return (mpn_cmp(r->n, secp256k1_field_p, FIELD_LIMBS) < 0);
|
|
}
|
|
|
|
/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
|
|
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe_t *a) {
|
|
for (int i=0; i<32; i++) {
|
|
int c = 0;
|
|
for (int j=0; j<8; j++) {
|
|
int limb = (8*i+j)/GMP_NUMB_BITS;
|
|
int shift = (8*i+j)%GMP_NUMB_BITS;
|
|
c |= ((a->n[limb] >> shift) & 0x1) << j;
|
|
}
|
|
r[31-i] = c;
|
|
}
|
|
}
|
|
|
|
SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe_t *r, const secp256k1_fe_t *a, int m) {
|
|
(void)m;
|
|
*r = *a;
|
|
secp256k1_fe_normalize(r);
|
|
for (int i=0; i<FIELD_LIMBS; i++)
|
|
r->n[i] = ~(r->n[i]);
|
|
#if (GMP_NUMB_BITS >= 33)
|
|
mpn_sub_1(r->n, r->n, FIELD_LIMBS, 0x1000003D0ULL);
|
|
#else
|
|
mpn_sub_1(r->n, r->n, FIELD_LIMBS, 0x3D0UL);
|
|
mpn_sub_1(r->n+(32/GMP_NUMB_BITS), r->n+(32/GMP_NUMB_BITS), FIELD_LIMBS-(32/GMP_NUMB_BITS), 0x1UL << (32%GMP_NUMB_BITS));
|
|
#endif
|
|
}
|
|
|
|
SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe_t *r, int a) {
|
|
mpn_mul_1(r->n, r->n, FIELD_LIMBS+1, a);
|
|
}
|
|
|
|
SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
|
|
mpn_add(r->n, r->n, FIELD_LIMBS+1, a->n, FIELD_LIMBS+1);
|
|
}
|
|
|
|
static void secp256k1_fe_reduce(secp256k1_fe_t *r, mp_limb_t *tmp) {
|
|
/** <A1 A2 A3 A4> <B1 B2 B3 B4>
|
|
* B1 B2 B3 B4
|
|
* + C * A1 A2 A3 A4
|
|
* + A1 A2 A3 A4
|
|
*/
|
|
|
|
#if (GMP_NUMB_BITS >= 33)
|
|
mp_limb_t o = mpn_addmul_1(tmp, tmp+FIELD_LIMBS, FIELD_LIMBS, 0x1000003D1ULL);
|
|
#else
|
|
mp_limb_t o = mpn_addmul_1(tmp, tmp+FIELD_LIMBS, FIELD_LIMBS, 0x3D1UL) +
|
|
mpn_addmul_1(tmp+(32/GMP_NUMB_BITS), tmp+FIELD_LIMBS, FIELD_LIMBS-(32/GMP_NUMB_BITS), 0x1UL << (32%GMP_NUMB_BITS));
|
|
#endif
|
|
mp_limb_t q[1+(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS];
|
|
q[(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS] = mpn_mul_1(q, secp256k1_field_pc, (33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS, o);
|
|
#if (GMP_NUMB_BITS <= 32)
|
|
mp_limb_t o2 = tmp[2*FIELD_LIMBS-(32/GMP_NUMB_BITS)] << (32%GMP_NUMB_BITS);
|
|
q[(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS] += mpn_addmul_1(q, secp256k1_field_pc, (33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS, o2);
|
|
#endif
|
|
r->n[FIELD_LIMBS] = mpn_add(r->n, tmp, FIELD_LIMBS, q, 1+(33+GMP_NUMB_BITS-1)/GMP_NUMB_BITS);
|
|
}
|
|
|
|
static void secp256k1_fe_mul(secp256k1_fe_t *r, const secp256k1_fe_t *a, const secp256k1_fe_t * SECP256K1_RESTRICT b) {
|
|
VERIFY_CHECK(r != b);
|
|
secp256k1_fe_t ac = *a;
|
|
secp256k1_fe_t bc = *b;
|
|
secp256k1_fe_normalize(&ac);
|
|
secp256k1_fe_normalize(&bc);
|
|
mp_limb_t tmp[2*FIELD_LIMBS];
|
|
mpn_mul_n(tmp, ac.n, bc.n, FIELD_LIMBS);
|
|
secp256k1_fe_reduce(r, tmp);
|
|
}
|
|
|
|
static void secp256k1_fe_sqr(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
|
|
secp256k1_fe_t ac = *a;
|
|
secp256k1_fe_normalize(&ac);
|
|
mp_limb_t tmp[2*FIELD_LIMBS];
|
|
mpn_sqr(tmp, ac.n, FIELD_LIMBS);
|
|
secp256k1_fe_reduce(r, tmp);
|
|
}
|
|
|
|
static void secp256k1_fe_cmov(secp256k1_fe_t *r, const secp256k1_fe_t *a, int flag) {
|
|
mp_limb_t mask0 = flag + ~((mp_limb_t)0), mask1 = ~mask0;
|
|
for (int i = 0; i <= FIELD_LIMBS; i++) {
|
|
r->n[i] = (r->n[i] & mask0) | (a->n[i] & mask1);
|
|
}
|
|
}
|
|
|
|
#endif
|