VirtualBox

source: vbox/trunk/src/libs/openssl-3.3.2/crypto/ec/ecp_nistp521.c@ 109193

Last change on this file since 109193 was 108206, checked in by vboxsync, 3 months ago

openssl-3.3.2: Exported all files to OSE and removed .scm-settings ​bugref:10757

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 73.1 KB
Line 
1/*
2 * Copyright 2011-2023 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10/* Copyright 2011 Google Inc.
11 *
12 * Licensed under the Apache License, Version 2.0 (the "License");
13 *
14 * you may not use this file except in compliance with the License.
15 * You may obtain a copy of the License at
16 *
17 * http://www.apache.org/licenses/LICENSE-2.0
18 *
19 * Unless required by applicable law or agreed to in writing, software
20 * distributed under the License is distributed on an "AS IS" BASIS,
21 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 * See the License for the specific language governing permissions and
23 * limitations under the License.
24 */
25
26/*
27 * ECDSA low level APIs are deprecated for public use, but still ok for
28 * internal use.
29 */
30#include "internal/deprecated.h"
31
32/*
33 * A 64-bit implementation of the NIST P-521 elliptic curve point multiplication
34 *
35 * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
36 * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
37 * work which got its smarts from Daniel J. Bernstein's work on the same.
38 */
39
40#include <openssl/e_os2.h>
41
42#include <string.h>
43#include <openssl/err.h>
44#include "ec_local.h"
45
46#include "internal/numbers.h"
47
48#ifndef INT128_MAX
49# error "Your compiler doesn't appear to support 128-bit integer types"
50#endif
51
52typedef uint8_t u8;
53typedef uint64_t u64;
54
55/*
56 * The underlying field. P521 operates over GF(2^521-1). We can serialize an
57 * element of this field into 66 bytes where the most significant byte
58 * contains only a single bit. We call this an felem_bytearray.
59 */
60
61typedef u8 felem_bytearray[66];
62
63/*
64 * These are the parameters of P521, taken from FIPS 186-3, section D.1.2.5.
65 * These values are big-endian.
66 */
67static const felem_bytearray nistp521_curve_params[5] = {
68 {0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* p */
69 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
70 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
71 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
72 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
73 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
74 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
75 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
76 0xff, 0xff},
77 {0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* a = -3 */
78 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
79 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
80 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
81 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
84 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
85 0xff, 0xfc},
86 {0x00, 0x51, 0x95, 0x3e, 0xb9, 0x61, 0x8e, 0x1c, /* b */
87 0x9a, 0x1f, 0x92, 0x9a, 0x21, 0xa0, 0xb6, 0x85,
88 0x40, 0xee, 0xa2, 0xda, 0x72, 0x5b, 0x99, 0xb3,
89 0x15, 0xf3, 0xb8, 0xb4, 0x89, 0x91, 0x8e, 0xf1,
90 0x09, 0xe1, 0x56, 0x19, 0x39, 0x51, 0xec, 0x7e,
91 0x93, 0x7b, 0x16, 0x52, 0xc0, 0xbd, 0x3b, 0xb1,
92 0xbf, 0x07, 0x35, 0x73, 0xdf, 0x88, 0x3d, 0x2c,
93 0x34, 0xf1, 0xef, 0x45, 0x1f, 0xd4, 0x6b, 0x50,
94 0x3f, 0x00},
95 {0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, 0x04, /* x */
96 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, 0x95,
97 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, 0x3f,
98 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, 0x4d,
99 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, 0xe7,
100 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff,
101 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, 0x6a,
102 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, 0xe5,
103 0xbd, 0x66},
104 {0x01, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, /* y */
105 0xc0, 0x04, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d,
106 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b,
107 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e,
108 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4,
109 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad,
110 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72,
111 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1,
112 0x66, 0x50}
113};
114
115/*-
116 * The representation of field elements.
117 * ------------------------------------
118 *
119 * We represent field elements with nine values. These values are either 64 or
120 * 128 bits and the field element represented is:
121 * v[0]*2^0 + v[1]*2^58 + v[2]*2^116 + ... + v[8]*2^464 (mod p)
122 * Each of the nine values is called a 'limb'. Since the limbs are spaced only
123 * 58 bits apart, but are greater than 58 bits in length, the most significant
124 * bits of each limb overlap with the least significant bits of the next.
125 *
126 * A field element with 64-bit limbs is an 'felem'. One with 128-bit limbs is a
127 * 'largefelem' */
128
129#define NLIMBS 9
130
131typedef uint64_t limb;
132typedef limb limb_aX __attribute((__aligned__(1)));
133typedef limb felem[NLIMBS];
134typedef uint128_t largefelem[NLIMBS];
135
136static const limb bottom57bits = 0x1ffffffffffffff;
137static const limb bottom58bits = 0x3ffffffffffffff;
138
139/*
140 * bin66_to_felem takes a little-endian byte array and converts it into felem
141 * form. This assumes that the CPU is little-endian.
142 */
143static void bin66_to_felem(felem out, const u8 in[66])
144{
145 out[0] = (*((limb *) & in[0])) & bottom58bits;
146 out[1] = (*((limb_aX *) & in[7]) >> 2) & bottom58bits;
147 out[2] = (*((limb_aX *) & in[14]) >> 4) & bottom58bits;
148 out[3] = (*((limb_aX *) & in[21]) >> 6) & bottom58bits;
149 out[4] = (*((limb_aX *) & in[29])) & bottom58bits;
150 out[5] = (*((limb_aX *) & in[36]) >> 2) & bottom58bits;
151 out[6] = (*((limb_aX *) & in[43]) >> 4) & bottom58bits;
152 out[7] = (*((limb_aX *) & in[50]) >> 6) & bottom58bits;
153 out[8] = (*((limb_aX *) & in[58])) & bottom57bits;
154}
155
156/*
157 * felem_to_bin66 takes an felem and serializes into a little endian, 66 byte
158 * array. This assumes that the CPU is little-endian.
159 */
160static void felem_to_bin66(u8 out[66], const felem in)
161{
162 memset(out, 0, 66);
163 (*((limb *) & out[0])) = in[0];
164 (*((limb_aX *) & out[7])) |= in[1] << 2;
165 (*((limb_aX *) & out[14])) |= in[2] << 4;
166 (*((limb_aX *) & out[21])) |= in[3] << 6;
167 (*((limb_aX *) & out[29])) = in[4];
168 (*((limb_aX *) & out[36])) |= in[5] << 2;
169 (*((limb_aX *) & out[43])) |= in[6] << 4;
170 (*((limb_aX *) & out[50])) |= in[7] << 6;
171 (*((limb_aX *) & out[58])) = in[8];
172}
173
174/* BN_to_felem converts an OpenSSL BIGNUM into an felem */
175static int BN_to_felem(felem out, const BIGNUM *bn)
176{
177 felem_bytearray b_out;
178 int num_bytes;
179
180 if (BN_is_negative(bn)) {
181 ERR_raise(ERR_LIB_EC, EC_R_BIGNUM_OUT_OF_RANGE);
182 return 0;
183 }
184 num_bytes = BN_bn2lebinpad(bn, b_out, sizeof(b_out));
185 if (num_bytes < 0) {
186 ERR_raise(ERR_LIB_EC, EC_R_BIGNUM_OUT_OF_RANGE);
187 return 0;
188 }
189 bin66_to_felem(out, b_out);
190 return 1;
191}
192
193/* felem_to_BN converts an felem into an OpenSSL BIGNUM */
194static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
195{
196 felem_bytearray b_out;
197 felem_to_bin66(b_out, in);
198 return BN_lebin2bn(b_out, sizeof(b_out), out);
199}
200
201/*-
202 * Field operations
203 * ----------------
204 */
205
206static void felem_one(felem out)
207{
208 out[0] = 1;
209 out[1] = 0;
210 out[2] = 0;
211 out[3] = 0;
212 out[4] = 0;
213 out[5] = 0;
214 out[6] = 0;
215 out[7] = 0;
216 out[8] = 0;
217}
218
219static void felem_assign(felem out, const felem in)
220{
221 out[0] = in[0];
222 out[1] = in[1];
223 out[2] = in[2];
224 out[3] = in[3];
225 out[4] = in[4];
226 out[5] = in[5];
227 out[6] = in[6];
228 out[7] = in[7];
229 out[8] = in[8];
230}
231
232/* felem_sum64 sets out = out + in. */
233static void felem_sum64(felem out, const felem in)
234{
235 out[0] += in[0];
236 out[1] += in[1];
237 out[2] += in[2];
238 out[3] += in[3];
239 out[4] += in[4];
240 out[5] += in[5];
241 out[6] += in[6];
242 out[7] += in[7];
243 out[8] += in[8];
244}
245
246/* felem_scalar sets out = in * scalar */
247static void felem_scalar(felem out, const felem in, limb scalar)
248{
249 out[0] = in[0] * scalar;
250 out[1] = in[1] * scalar;
251 out[2] = in[2] * scalar;
252 out[3] = in[3] * scalar;
253 out[4] = in[4] * scalar;
254 out[5] = in[5] * scalar;
255 out[6] = in[6] * scalar;
256 out[7] = in[7] * scalar;
257 out[8] = in[8] * scalar;
258}
259
260/* felem_scalar64 sets out = out * scalar */
261static void felem_scalar64(felem out, limb scalar)
262{
263 out[0] *= scalar;
264 out[1] *= scalar;
265 out[2] *= scalar;
266 out[3] *= scalar;
267 out[4] *= scalar;
268 out[5] *= scalar;
269 out[6] *= scalar;
270 out[7] *= scalar;
271 out[8] *= scalar;
272}
273
274/* felem_scalar128 sets out = out * scalar */
275static void felem_scalar128(largefelem out, limb scalar)
276{
277 out[0] *= scalar;
278 out[1] *= scalar;
279 out[2] *= scalar;
280 out[3] *= scalar;
281 out[4] *= scalar;
282 out[5] *= scalar;
283 out[6] *= scalar;
284 out[7] *= scalar;
285 out[8] *= scalar;
286}
287
288/*-
289 * felem_neg sets |out| to |-in|
290 * On entry:
291 * in[i] < 2^59 + 2^14
292 * On exit:
293 * out[i] < 2^62
294 */
295static void felem_neg(felem out, const felem in)
296{
297 /* In order to prevent underflow, we subtract from 0 mod p. */
298 static const limb two62m3 = (((limb) 1) << 62) - (((limb) 1) << 5);
299 static const limb two62m2 = (((limb) 1) << 62) - (((limb) 1) << 4);
300
301 out[0] = two62m3 - in[0];
302 out[1] = two62m2 - in[1];
303 out[2] = two62m2 - in[2];
304 out[3] = two62m2 - in[3];
305 out[4] = two62m2 - in[4];
306 out[5] = two62m2 - in[5];
307 out[6] = two62m2 - in[6];
308 out[7] = two62m2 - in[7];
309 out[8] = two62m2 - in[8];
310}
311
312/*-
313 * felem_diff64 subtracts |in| from |out|
314 * On entry:
315 * in[i] < 2^59 + 2^14
316 * On exit:
317 * out[i] < out[i] + 2^62
318 */
319static void felem_diff64(felem out, const felem in)
320{
321 /*
322 * In order to prevent underflow, we add 0 mod p before subtracting.
323 */
324 static const limb two62m3 = (((limb) 1) << 62) - (((limb) 1) << 5);
325 static const limb two62m2 = (((limb) 1) << 62) - (((limb) 1) << 4);
326
327 out[0] += two62m3 - in[0];
328 out[1] += two62m2 - in[1];
329 out[2] += two62m2 - in[2];
330 out[3] += two62m2 - in[3];
331 out[4] += two62m2 - in[4];
332 out[5] += two62m2 - in[5];
333 out[6] += two62m2 - in[6];
334 out[7] += two62m2 - in[7];
335 out[8] += two62m2 - in[8];
336}
337
338/*-
339 * felem_diff_128_64 subtracts |in| from |out|
340 * On entry:
341 * in[i] < 2^62 + 2^17
342 * On exit:
343 * out[i] < out[i] + 2^63
344 */
345static void felem_diff_128_64(largefelem out, const felem in)
346{
347 /*
348 * In order to prevent underflow, we add 64p mod p (which is equivalent
349 * to 0 mod p) before subtracting. p is 2^521 - 1, i.e. in binary a 521
350 * digit number with all bits set to 1. See "The representation of field
351 * elements" comment above for a description of how limbs are used to
352 * represent a number. 64p is represented with 8 limbs containing a number
353 * with 58 bits set and one limb with a number with 57 bits set.
354 */
355 static const limb two63m6 = (((limb) 1) << 63) - (((limb) 1) << 6);
356 static const limb two63m5 = (((limb) 1) << 63) - (((limb) 1) << 5);
357
358 out[0] += two63m6 - in[0];
359 out[1] += two63m5 - in[1];
360 out[2] += two63m5 - in[2];
361 out[3] += two63m5 - in[3];
362 out[4] += two63m5 - in[4];
363 out[5] += two63m5 - in[5];
364 out[6] += two63m5 - in[6];
365 out[7] += two63m5 - in[7];
366 out[8] += two63m5 - in[8];
367}
368
369/*-
370 * felem_diff_128_64 subtracts |in| from |out|
371 * On entry:
372 * in[i] < 2^126
373 * On exit:
374 * out[i] < out[i] + 2^127 - 2^69
375 */
376static void felem_diff128(largefelem out, const largefelem in)
377{
378 /*
379 * In order to prevent underflow, we add 0 mod p before subtracting.
380 */
381 static const uint128_t two127m70 =
382 (((uint128_t) 1) << 127) - (((uint128_t) 1) << 70);
383 static const uint128_t two127m69 =
384 (((uint128_t) 1) << 127) - (((uint128_t) 1) << 69);
385
386 out[0] += (two127m70 - in[0]);
387 out[1] += (two127m69 - in[1]);
388 out[2] += (two127m69 - in[2]);
389 out[3] += (two127m69 - in[3]);
390 out[4] += (two127m69 - in[4]);
391 out[5] += (two127m69 - in[5]);
392 out[6] += (two127m69 - in[6]);
393 out[7] += (two127m69 - in[7]);
394 out[8] += (two127m69 - in[8]);
395}
396
397/*-
398 * felem_square sets |out| = |in|^2
399 * On entry:
400 * in[i] < 2^62
401 * On exit:
402 * out[i] < 17 * max(in[i]) * max(in[i])
403 */
404static void felem_square_ref(largefelem out, const felem in)
405{
406 felem inx2, inx4;
407 felem_scalar(inx2, in, 2);
408 felem_scalar(inx4, in, 4);
409
410 /*-
411 * We have many cases were we want to do
412 * in[x] * in[y] +
413 * in[y] * in[x]
414 * This is obviously just
415 * 2 * in[x] * in[y]
416 * However, rather than do the doubling on the 128 bit result, we
417 * double one of the inputs to the multiplication by reading from
418 * |inx2|
419 */
420
421 out[0] = ((uint128_t) in[0]) * in[0];
422 out[1] = ((uint128_t) in[0]) * inx2[1];
423 out[2] = ((uint128_t) in[0]) * inx2[2] + ((uint128_t) in[1]) * in[1];
424 out[3] = ((uint128_t) in[0]) * inx2[3] + ((uint128_t) in[1]) * inx2[2];
425 out[4] = ((uint128_t) in[0]) * inx2[4] +
426 ((uint128_t) in[1]) * inx2[3] + ((uint128_t) in[2]) * in[2];
427 out[5] = ((uint128_t) in[0]) * inx2[5] +
428 ((uint128_t) in[1]) * inx2[4] + ((uint128_t) in[2]) * inx2[3];
429 out[6] = ((uint128_t) in[0]) * inx2[6] +
430 ((uint128_t) in[1]) * inx2[5] +
431 ((uint128_t) in[2]) * inx2[4] + ((uint128_t) in[3]) * in[3];
432 out[7] = ((uint128_t) in[0]) * inx2[7] +
433 ((uint128_t) in[1]) * inx2[6] +
434 ((uint128_t) in[2]) * inx2[5] + ((uint128_t) in[3]) * inx2[4];
435 out[8] = ((uint128_t) in[0]) * inx2[8] +
436 ((uint128_t) in[1]) * inx2[7] +
437 ((uint128_t) in[2]) * inx2[6] +
438 ((uint128_t) in[3]) * inx2[5] + ((uint128_t) in[4]) * in[4];
439
440 /*
441 * The remaining limbs fall above 2^521, with the first falling at 2^522.
442 * They correspond to locations one bit up from the limbs produced above
443 * so we would have to multiply by two to align them. Again, rather than
444 * operate on the 128-bit result, we double one of the inputs to the
445 * multiplication. If we want to double for both this reason, and the
446 * reason above, then we end up multiplying by four.
447 */
448
449 /* 9 */
450 out[0] += ((uint128_t) in[1]) * inx4[8] +
451 ((uint128_t) in[2]) * inx4[7] +
452 ((uint128_t) in[3]) * inx4[6] + ((uint128_t) in[4]) * inx4[5];
453
454 /* 10 */
455 out[1] += ((uint128_t) in[2]) * inx4[8] +
456 ((uint128_t) in[3]) * inx4[7] +
457 ((uint128_t) in[4]) * inx4[6] + ((uint128_t) in[5]) * inx2[5];
458
459 /* 11 */
460 out[2] += ((uint128_t) in[3]) * inx4[8] +
461 ((uint128_t) in[4]) * inx4[7] + ((uint128_t) in[5]) * inx4[6];
462
463 /* 12 */
464 out[3] += ((uint128_t) in[4]) * inx4[8] +
465 ((uint128_t) in[5]) * inx4[7] + ((uint128_t) in[6]) * inx2[6];
466
467 /* 13 */
468 out[4] += ((uint128_t) in[5]) * inx4[8] + ((uint128_t) in[6]) * inx4[7];
469
470 /* 14 */
471 out[5] += ((uint128_t) in[6]) * inx4[8] + ((uint128_t) in[7]) * inx2[7];
472
473 /* 15 */
474 out[6] += ((uint128_t) in[7]) * inx4[8];
475
476 /* 16 */
477 out[7] += ((uint128_t) in[8]) * inx2[8];
478}
479
480/*-
481 * felem_mul sets |out| = |in1| * |in2|
482 * On entry:
483 * in1[i] < 2^64
484 * in2[i] < 2^63
485 * On exit:
486 * out[i] < 17 * max(in1[i]) * max(in2[i])
487 */
488static void felem_mul_ref(largefelem out, const felem in1, const felem in2)
489{
490 felem in2x2;
491 felem_scalar(in2x2, in2, 2);
492
493 out[0] = ((uint128_t) in1[0]) * in2[0];
494
495 out[1] = ((uint128_t) in1[0]) * in2[1] +
496 ((uint128_t) in1[1]) * in2[0];
497
498 out[2] = ((uint128_t) in1[0]) * in2[2] +
499 ((uint128_t) in1[1]) * in2[1] +
500 ((uint128_t) in1[2]) * in2[0];
501
502 out[3] = ((uint128_t) in1[0]) * in2[3] +
503 ((uint128_t) in1[1]) * in2[2] +
504 ((uint128_t) in1[2]) * in2[1] +
505 ((uint128_t) in1[3]) * in2[0];
506
507 out[4] = ((uint128_t) in1[0]) * in2[4] +
508 ((uint128_t) in1[1]) * in2[3] +
509 ((uint128_t) in1[2]) * in2[2] +
510 ((uint128_t) in1[3]) * in2[1] +
511 ((uint128_t) in1[4]) * in2[0];
512
513 out[5] = ((uint128_t) in1[0]) * in2[5] +
514 ((uint128_t) in1[1]) * in2[4] +
515 ((uint128_t) in1[2]) * in2[3] +
516 ((uint128_t) in1[3]) * in2[2] +
517 ((uint128_t) in1[4]) * in2[1] +
518 ((uint128_t) in1[5]) * in2[0];
519
520 out[6] = ((uint128_t) in1[0]) * in2[6] +
521 ((uint128_t) in1[1]) * in2[5] +
522 ((uint128_t) in1[2]) * in2[4] +
523 ((uint128_t) in1[3]) * in2[3] +
524 ((uint128_t) in1[4]) * in2[2] +
525 ((uint128_t) in1[5]) * in2[1] +
526 ((uint128_t) in1[6]) * in2[0];
527
528 out[7] = ((uint128_t) in1[0]) * in2[7] +
529 ((uint128_t) in1[1]) * in2[6] +
530 ((uint128_t) in1[2]) * in2[5] +
531 ((uint128_t) in1[3]) * in2[4] +
532 ((uint128_t) in1[4]) * in2[3] +
533 ((uint128_t) in1[5]) * in2[2] +
534 ((uint128_t) in1[6]) * in2[1] +
535 ((uint128_t) in1[7]) * in2[0];
536
537 out[8] = ((uint128_t) in1[0]) * in2[8] +
538 ((uint128_t) in1[1]) * in2[7] +
539 ((uint128_t) in1[2]) * in2[6] +
540 ((uint128_t) in1[3]) * in2[5] +
541 ((uint128_t) in1[4]) * in2[4] +
542 ((uint128_t) in1[5]) * in2[3] +
543 ((uint128_t) in1[6]) * in2[2] +
544 ((uint128_t) in1[7]) * in2[1] +
545 ((uint128_t) in1[8]) * in2[0];
546
547 /* See comment in felem_square about the use of in2x2 here */
548
549 out[0] += ((uint128_t) in1[1]) * in2x2[8] +
550 ((uint128_t) in1[2]) * in2x2[7] +
551 ((uint128_t) in1[3]) * in2x2[6] +
552 ((uint128_t) in1[4]) * in2x2[5] +
553 ((uint128_t) in1[5]) * in2x2[4] +
554 ((uint128_t) in1[6]) * in2x2[3] +
555 ((uint128_t) in1[7]) * in2x2[2] +
556 ((uint128_t) in1[8]) * in2x2[1];
557
558 out[1] += ((uint128_t) in1[2]) * in2x2[8] +
559 ((uint128_t) in1[3]) * in2x2[7] +
560 ((uint128_t) in1[4]) * in2x2[6] +
561 ((uint128_t) in1[5]) * in2x2[5] +
562 ((uint128_t) in1[6]) * in2x2[4] +
563 ((uint128_t) in1[7]) * in2x2[3] +
564 ((uint128_t) in1[8]) * in2x2[2];
565
566 out[2] += ((uint128_t) in1[3]) * in2x2[8] +
567 ((uint128_t) in1[4]) * in2x2[7] +
568 ((uint128_t) in1[5]) * in2x2[6] +
569 ((uint128_t) in1[6]) * in2x2[5] +
570 ((uint128_t) in1[7]) * in2x2[4] +
571 ((uint128_t) in1[8]) * in2x2[3];
572
573 out[3] += ((uint128_t) in1[4]) * in2x2[8] +
574 ((uint128_t) in1[5]) * in2x2[7] +
575 ((uint128_t) in1[6]) * in2x2[6] +
576 ((uint128_t) in1[7]) * in2x2[5] +
577 ((uint128_t) in1[8]) * in2x2[4];
578
579 out[4] += ((uint128_t) in1[5]) * in2x2[8] +
580 ((uint128_t) in1[6]) * in2x2[7] +
581 ((uint128_t) in1[7]) * in2x2[6] +
582 ((uint128_t) in1[8]) * in2x2[5];
583
584 out[5] += ((uint128_t) in1[6]) * in2x2[8] +
585 ((uint128_t) in1[7]) * in2x2[7] +
586 ((uint128_t) in1[8]) * in2x2[6];
587
588 out[6] += ((uint128_t) in1[7]) * in2x2[8] +
589 ((uint128_t) in1[8]) * in2x2[7];
590
591 out[7] += ((uint128_t) in1[8]) * in2x2[8];
592}
593
594static const limb bottom52bits = 0xfffffffffffff;
595
596/*-
597 * felem_reduce converts a largefelem to an felem.
598 * On entry:
599 * in[i] < 2^128
600 * On exit:
601 * out[i] < 2^59 + 2^14
602 */
603static void felem_reduce(felem out, const largefelem in)
604{
605 u64 overflow1, overflow2;
606
607 out[0] = ((limb) in[0]) & bottom58bits;
608 out[1] = ((limb) in[1]) & bottom58bits;
609 out[2] = ((limb) in[2]) & bottom58bits;
610 out[3] = ((limb) in[3]) & bottom58bits;
611 out[4] = ((limb) in[4]) & bottom58bits;
612 out[5] = ((limb) in[5]) & bottom58bits;
613 out[6] = ((limb) in[6]) & bottom58bits;
614 out[7] = ((limb) in[7]) & bottom58bits;
615 out[8] = ((limb) in[8]) & bottom58bits;
616
617 /* out[i] < 2^58 */
618
619 out[1] += ((limb) in[0]) >> 58;
620 out[1] += (((limb) (in[0] >> 64)) & bottom52bits) << 6;
621 /*-
622 * out[1] < 2^58 + 2^6 + 2^58
623 * = 2^59 + 2^6
624 */
625 out[2] += ((limb) (in[0] >> 64)) >> 52;
626
627 out[2] += ((limb) in[1]) >> 58;
628 out[2] += (((limb) (in[1] >> 64)) & bottom52bits) << 6;
629 out[3] += ((limb) (in[1] >> 64)) >> 52;
630
631 out[3] += ((limb) in[2]) >> 58;
632 out[3] += (((limb) (in[2] >> 64)) & bottom52bits) << 6;
633 out[4] += ((limb) (in[2] >> 64)) >> 52;
634
635 out[4] += ((limb) in[3]) >> 58;
636 out[4] += (((limb) (in[3] >> 64)) & bottom52bits) << 6;
637 out[5] += ((limb) (in[3] >> 64)) >> 52;
638
639 out[5] += ((limb) in[4]) >> 58;
640 out[5] += (((limb) (in[4] >> 64)) & bottom52bits) << 6;
641 out[6] += ((limb) (in[4] >> 64)) >> 52;
642
643 out[6] += ((limb) in[5]) >> 58;
644 out[6] += (((limb) (in[5] >> 64)) & bottom52bits) << 6;
645 out[7] += ((limb) (in[5] >> 64)) >> 52;
646
647 out[7] += ((limb) in[6]) >> 58;
648 out[7] += (((limb) (in[6] >> 64)) & bottom52bits) << 6;
649 out[8] += ((limb) (in[6] >> 64)) >> 52;
650
651 out[8] += ((limb) in[7]) >> 58;
652 out[8] += (((limb) (in[7] >> 64)) & bottom52bits) << 6;
653 /*-
654 * out[x > 1] < 2^58 + 2^6 + 2^58 + 2^12
655 * < 2^59 + 2^13
656 */
657 overflow1 = ((limb) (in[7] >> 64)) >> 52;
658
659 overflow1 += ((limb) in[8]) >> 58;
660 overflow1 += (((limb) (in[8] >> 64)) & bottom52bits) << 6;
661 overflow2 = ((limb) (in[8] >> 64)) >> 52;
662
663 overflow1 <<= 1; /* overflow1 < 2^13 + 2^7 + 2^59 */
664 overflow2 <<= 1; /* overflow2 < 2^13 */
665
666 out[0] += overflow1; /* out[0] < 2^60 */
667 out[1] += overflow2; /* out[1] < 2^59 + 2^6 + 2^13 */
668
669 out[1] += out[0] >> 58;
670 out[0] &= bottom58bits;
671 /*-
672 * out[0] < 2^58
673 * out[1] < 2^59 + 2^6 + 2^13 + 2^2
674 * < 2^59 + 2^14
675 */
676}
677
678#if defined(ECP_NISTP521_ASM)
679static void felem_square_wrapper(largefelem out, const felem in);
680static void felem_mul_wrapper(largefelem out, const felem in1, const felem in2);
681
682static void (*felem_square_p)(largefelem out, const felem in) =
683 felem_square_wrapper;
684static void (*felem_mul_p)(largefelem out, const felem in1, const felem in2) =
685 felem_mul_wrapper;
686
687void p521_felem_square(largefelem out, const felem in);
688void p521_felem_mul(largefelem out, const felem in1, const felem in2);
689
690# if defined(_ARCH_PPC64)
691# include "crypto/ppc_arch.h"
692# endif
693
694static void felem_select(void)
695{
696# if defined(_ARCH_PPC64)
697 if ((OPENSSL_ppccap_P & PPC_MADD300) && (OPENSSL_ppccap_P & PPC_ALTIVEC)) {
698 felem_square_p = p521_felem_square;
699 felem_mul_p = p521_felem_mul;
700
701 return;
702 }
703# endif
704
705 /* Default */
706 felem_square_p = felem_square_ref;
707 felem_mul_p = felem_mul_ref;
708}
709
710static void felem_square_wrapper(largefelem out, const felem in)
711{
712 felem_select();
713 felem_square_p(out, in);
714}
715
716static void felem_mul_wrapper(largefelem out, const felem in1, const felem in2)
717{
718 felem_select();
719 felem_mul_p(out, in1, in2);
720}
721
722# define felem_square felem_square_p
723# define felem_mul felem_mul_p
724#else
725# define felem_square felem_square_ref
726# define felem_mul felem_mul_ref
727#endif
728
729static void felem_square_reduce(felem out, const felem in)
730{
731 largefelem tmp;
732 felem_square(tmp, in);
733 felem_reduce(out, tmp);
734}
735
736static void felem_mul_reduce(felem out, const felem in1, const felem in2)
737{
738 largefelem tmp;
739 felem_mul(tmp, in1, in2);
740 felem_reduce(out, tmp);
741}
742
743/*-
744 * felem_inv calculates |out| = |in|^{-1}
745 *
746 * Based on Fermat's Little Theorem:
747 * a^p = a (mod p)
748 * a^{p-1} = 1 (mod p)
749 * a^{p-2} = a^{-1} (mod p)
750 */
751static void felem_inv(felem out, const felem in)
752{
753 felem ftmp, ftmp2, ftmp3, ftmp4;
754 largefelem tmp;
755 unsigned i;
756
757 felem_square(tmp, in);
758 felem_reduce(ftmp, tmp); /* 2^1 */
759 felem_mul(tmp, in, ftmp);
760 felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
761 felem_assign(ftmp2, ftmp);
762 felem_square(tmp, ftmp);
763 felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
764 felem_mul(tmp, in, ftmp);
765 felem_reduce(ftmp, tmp); /* 2^3 - 2^0 */
766 felem_square(tmp, ftmp);
767 felem_reduce(ftmp, tmp); /* 2^4 - 2^1 */
768
769 felem_square(tmp, ftmp2);
770 felem_reduce(ftmp3, tmp); /* 2^3 - 2^1 */
771 felem_square(tmp, ftmp3);
772 felem_reduce(ftmp3, tmp); /* 2^4 - 2^2 */
773 felem_mul(tmp, ftmp3, ftmp2);
774 felem_reduce(ftmp3, tmp); /* 2^4 - 2^0 */
775
776 felem_assign(ftmp2, ftmp3);
777 felem_square(tmp, ftmp3);
778 felem_reduce(ftmp3, tmp); /* 2^5 - 2^1 */
779 felem_square(tmp, ftmp3);
780 felem_reduce(ftmp3, tmp); /* 2^6 - 2^2 */
781 felem_square(tmp, ftmp3);
782 felem_reduce(ftmp3, tmp); /* 2^7 - 2^3 */
783 felem_square(tmp, ftmp3);
784 felem_reduce(ftmp3, tmp); /* 2^8 - 2^4 */
785 felem_mul(tmp, ftmp3, ftmp);
786 felem_reduce(ftmp4, tmp); /* 2^8 - 2^1 */
787 felem_square(tmp, ftmp4);
788 felem_reduce(ftmp4, tmp); /* 2^9 - 2^2 */
789 felem_mul(tmp, ftmp3, ftmp2);
790 felem_reduce(ftmp3, tmp); /* 2^8 - 2^0 */
791 felem_assign(ftmp2, ftmp3);
792
793 for (i = 0; i < 8; i++) {
794 felem_square(tmp, ftmp3);
795 felem_reduce(ftmp3, tmp); /* 2^16 - 2^8 */
796 }
797 felem_mul(tmp, ftmp3, ftmp2);
798 felem_reduce(ftmp3, tmp); /* 2^16 - 2^0 */
799 felem_assign(ftmp2, ftmp3);
800
801 for (i = 0; i < 16; i++) {
802 felem_square(tmp, ftmp3);
803 felem_reduce(ftmp3, tmp); /* 2^32 - 2^16 */
804 }
805 felem_mul(tmp, ftmp3, ftmp2);
806 felem_reduce(ftmp3, tmp); /* 2^32 - 2^0 */
807 felem_assign(ftmp2, ftmp3);
808
809 for (i = 0; i < 32; i++) {
810 felem_square(tmp, ftmp3);
811 felem_reduce(ftmp3, tmp); /* 2^64 - 2^32 */
812 }
813 felem_mul(tmp, ftmp3, ftmp2);
814 felem_reduce(ftmp3, tmp); /* 2^64 - 2^0 */
815 felem_assign(ftmp2, ftmp3);
816
817 for (i = 0; i < 64; i++) {
818 felem_square(tmp, ftmp3);
819 felem_reduce(ftmp3, tmp); /* 2^128 - 2^64 */
820 }
821 felem_mul(tmp, ftmp3, ftmp2);
822 felem_reduce(ftmp3, tmp); /* 2^128 - 2^0 */
823 felem_assign(ftmp2, ftmp3);
824
825 for (i = 0; i < 128; i++) {
826 felem_square(tmp, ftmp3);
827 felem_reduce(ftmp3, tmp); /* 2^256 - 2^128 */
828 }
829 felem_mul(tmp, ftmp3, ftmp2);
830 felem_reduce(ftmp3, tmp); /* 2^256 - 2^0 */
831 felem_assign(ftmp2, ftmp3);
832
833 for (i = 0; i < 256; i++) {
834 felem_square(tmp, ftmp3);
835 felem_reduce(ftmp3, tmp); /* 2^512 - 2^256 */
836 }
837 felem_mul(tmp, ftmp3, ftmp2);
838 felem_reduce(ftmp3, tmp); /* 2^512 - 2^0 */
839
840 for (i = 0; i < 9; i++) {
841 felem_square(tmp, ftmp3);
842 felem_reduce(ftmp3, tmp); /* 2^521 - 2^9 */
843 }
844 felem_mul(tmp, ftmp3, ftmp4);
845 felem_reduce(ftmp3, tmp); /* 2^521 - 2^2 */
846 felem_mul(tmp, ftmp3, in);
847 felem_reduce(out, tmp); /* 2^521 - 3 */
848}
849
850/* This is 2^521-1, expressed as an felem */
851static const felem kPrime = {
852 0x03ffffffffffffff, 0x03ffffffffffffff, 0x03ffffffffffffff,
853 0x03ffffffffffffff, 0x03ffffffffffffff, 0x03ffffffffffffff,
854 0x03ffffffffffffff, 0x03ffffffffffffff, 0x01ffffffffffffff
855};
856
857/*-
858 * felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
859 * otherwise.
860 * On entry:
861 * in[i] < 2^59 + 2^14
862 */
863static limb felem_is_zero(const felem in)
864{
865 felem ftmp;
866 limb is_zero, is_p;
867 felem_assign(ftmp, in);
868
869 ftmp[0] += ftmp[8] >> 57;
870 ftmp[8] &= bottom57bits;
871 /* ftmp[8] < 2^57 */
872 ftmp[1] += ftmp[0] >> 58;
873 ftmp[0] &= bottom58bits;
874 ftmp[2] += ftmp[1] >> 58;
875 ftmp[1] &= bottom58bits;
876 ftmp[3] += ftmp[2] >> 58;
877 ftmp[2] &= bottom58bits;
878 ftmp[4] += ftmp[3] >> 58;
879 ftmp[3] &= bottom58bits;
880 ftmp[5] += ftmp[4] >> 58;
881 ftmp[4] &= bottom58bits;
882 ftmp[6] += ftmp[5] >> 58;
883 ftmp[5] &= bottom58bits;
884 ftmp[7] += ftmp[6] >> 58;
885 ftmp[6] &= bottom58bits;
886 ftmp[8] += ftmp[7] >> 58;
887 ftmp[7] &= bottom58bits;
888 /* ftmp[8] < 2^57 + 4 */
889
890 /*
891 * The ninth limb of 2*(2^521-1) is 0x03ffffffffffffff, which is greater
892 * than our bound for ftmp[8]. Therefore we only have to check if the
893 * zero is zero or 2^521-1.
894 */
895
896 is_zero = 0;
897 is_zero |= ftmp[0];
898 is_zero |= ftmp[1];
899 is_zero |= ftmp[2];
900 is_zero |= ftmp[3];
901 is_zero |= ftmp[4];
902 is_zero |= ftmp[5];
903 is_zero |= ftmp[6];
904 is_zero |= ftmp[7];
905 is_zero |= ftmp[8];
906
907 is_zero--;
908 /*
909 * We know that ftmp[i] < 2^63, therefore the only way that the top bit
910 * can be set is if is_zero was 0 before the decrement.
911 */
912 is_zero = 0 - (is_zero >> 63);
913
914 is_p = ftmp[0] ^ kPrime[0];
915 is_p |= ftmp[1] ^ kPrime[1];
916 is_p |= ftmp[2] ^ kPrime[2];
917 is_p |= ftmp[3] ^ kPrime[3];
918 is_p |= ftmp[4] ^ kPrime[4];
919 is_p |= ftmp[5] ^ kPrime[5];
920 is_p |= ftmp[6] ^ kPrime[6];
921 is_p |= ftmp[7] ^ kPrime[7];
922 is_p |= ftmp[8] ^ kPrime[8];
923
924 is_p--;
925 is_p = 0 - (is_p >> 63);
926
927 is_zero |= is_p;
928 return is_zero;
929}
930
931static int felem_is_zero_int(const void *in)
932{
933 return (int)(felem_is_zero(in) & ((limb) 1));
934}
935
936/*-
937 * felem_contract converts |in| to its unique, minimal representation.
938 * On entry:
939 * in[i] < 2^59 + 2^14
940 */
941static void felem_contract(felem out, const felem in)
942{
943 limb is_p, is_greater, sign;
944 static const limb two58 = ((limb) 1) << 58;
945
946 felem_assign(out, in);
947
948 out[0] += out[8] >> 57;
949 out[8] &= bottom57bits;
950 /* out[8] < 2^57 */
951 out[1] += out[0] >> 58;
952 out[0] &= bottom58bits;
953 out[2] += out[1] >> 58;
954 out[1] &= bottom58bits;
955 out[3] += out[2] >> 58;
956 out[2] &= bottom58bits;
957 out[4] += out[3] >> 58;
958 out[3] &= bottom58bits;
959 out[5] += out[4] >> 58;
960 out[4] &= bottom58bits;
961 out[6] += out[5] >> 58;
962 out[5] &= bottom58bits;
963 out[7] += out[6] >> 58;
964 out[6] &= bottom58bits;
965 out[8] += out[7] >> 58;
966 out[7] &= bottom58bits;
967 /* out[8] < 2^57 + 4 */
968
969 /*
970 * If the value is greater than 2^521-1 then we have to subtract 2^521-1
971 * out. See the comments in felem_is_zero regarding why we don't test for
972 * other multiples of the prime.
973 */
974
975 /*
976 * First, if |out| is equal to 2^521-1, we subtract it out to get zero.
977 */
978
979 is_p = out[0] ^ kPrime[0];
980 is_p |= out[1] ^ kPrime[1];
981 is_p |= out[2] ^ kPrime[2];
982 is_p |= out[3] ^ kPrime[3];
983 is_p |= out[4] ^ kPrime[4];
984 is_p |= out[5] ^ kPrime[5];
985 is_p |= out[6] ^ kPrime[6];
986 is_p |= out[7] ^ kPrime[7];
987 is_p |= out[8] ^ kPrime[8];
988
989 is_p--;
990 is_p &= is_p << 32;
991 is_p &= is_p << 16;
992 is_p &= is_p << 8;
993 is_p &= is_p << 4;
994 is_p &= is_p << 2;
995 is_p &= is_p << 1;
996 is_p = 0 - (is_p >> 63);
997 is_p = ~is_p;
998
999 /* is_p is 0 iff |out| == 2^521-1 and all ones otherwise */
1000
1001 out[0] &= is_p;
1002 out[1] &= is_p;
1003 out[2] &= is_p;
1004 out[3] &= is_p;
1005 out[4] &= is_p;
1006 out[5] &= is_p;
1007 out[6] &= is_p;
1008 out[7] &= is_p;
1009 out[8] &= is_p;
1010
1011 /*
1012 * In order to test that |out| >= 2^521-1 we need only test if out[8] >>
1013 * 57 is greater than zero as (2^521-1) + x >= 2^522
1014 */
1015 is_greater = out[8] >> 57;
1016 is_greater |= is_greater << 32;
1017 is_greater |= is_greater << 16;
1018 is_greater |= is_greater << 8;
1019 is_greater |= is_greater << 4;
1020 is_greater |= is_greater << 2;
1021 is_greater |= is_greater << 1;
1022 is_greater = 0 - (is_greater >> 63);
1023
1024 out[0] -= kPrime[0] & is_greater;
1025 out[1] -= kPrime[1] & is_greater;
1026 out[2] -= kPrime[2] & is_greater;
1027 out[3] -= kPrime[3] & is_greater;
1028 out[4] -= kPrime[4] & is_greater;
1029 out[5] -= kPrime[5] & is_greater;
1030 out[6] -= kPrime[6] & is_greater;
1031 out[7] -= kPrime[7] & is_greater;
1032 out[8] -= kPrime[8] & is_greater;
1033
1034 /* Eliminate negative coefficients */
1035 sign = -(out[0] >> 63);
1036 out[0] += (two58 & sign);
1037 out[1] -= (1 & sign);
1038 sign = -(out[1] >> 63);
1039 out[1] += (two58 & sign);
1040 out[2] -= (1 & sign);
1041 sign = -(out[2] >> 63);
1042 out[2] += (two58 & sign);
1043 out[3] -= (1 & sign);
1044 sign = -(out[3] >> 63);
1045 out[3] += (two58 & sign);
1046 out[4] -= (1 & sign);
1047 sign = -(out[4] >> 63);
1048 out[4] += (two58 & sign);
1049 out[5] -= (1 & sign);
1050 sign = -(out[0] >> 63);
1051 out[5] += (two58 & sign);
1052 out[6] -= (1 & sign);
1053 sign = -(out[6] >> 63);
1054 out[6] += (two58 & sign);
1055 out[7] -= (1 & sign);
1056 sign = -(out[7] >> 63);
1057 out[7] += (two58 & sign);
1058 out[8] -= (1 & sign);
1059 sign = -(out[5] >> 63);
1060 out[5] += (two58 & sign);
1061 out[6] -= (1 & sign);
1062 sign = -(out[6] >> 63);
1063 out[6] += (two58 & sign);
1064 out[7] -= (1 & sign);
1065 sign = -(out[7] >> 63);
1066 out[7] += (two58 & sign);
1067 out[8] -= (1 & sign);
1068}
1069
1070/*-
1071 * Group operations
1072 * ----------------
1073 *
1074 * Building on top of the field operations we have the operations on the
1075 * elliptic curve group itself. Points on the curve are represented in Jacobian
1076 * coordinates */
1077
1078/*-
1079 * point_double calculates 2*(x_in, y_in, z_in)
1080 *
1081 * The method is taken from:
1082 * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
1083 *
1084 * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
1085 * while x_out == y_in is not (maybe this works, but it's not tested). */
1086static void
1087point_double(felem x_out, felem y_out, felem z_out,
1088 const felem x_in, const felem y_in, const felem z_in)
1089{
1090 largefelem tmp, tmp2;
1091 felem delta, gamma, beta, alpha, ftmp, ftmp2;
1092
1093 felem_assign(ftmp, x_in);
1094 felem_assign(ftmp2, x_in);
1095
1096 /* delta = z^2 */
1097 felem_square(tmp, z_in);
1098 felem_reduce(delta, tmp); /* delta[i] < 2^59 + 2^14 */
1099
1100 /* gamma = y^2 */
1101 felem_square(tmp, y_in);
1102 felem_reduce(gamma, tmp); /* gamma[i] < 2^59 + 2^14 */
1103
1104 /* beta = x*gamma */
1105 felem_mul(tmp, x_in, gamma);
1106 felem_reduce(beta, tmp); /* beta[i] < 2^59 + 2^14 */
1107
1108 /* alpha = 3*(x-delta)*(x+delta) */
1109 felem_diff64(ftmp, delta);
1110 /* ftmp[i] < 2^61 */
1111 felem_sum64(ftmp2, delta);
1112 /* ftmp2[i] < 2^60 + 2^15 */
1113 felem_scalar64(ftmp2, 3);
1114 /* ftmp2[i] < 3*2^60 + 3*2^15 */
1115 felem_mul(tmp, ftmp, ftmp2);
1116 /*-
1117 * tmp[i] < 17(3*2^121 + 3*2^76)
1118 * = 61*2^121 + 61*2^76
1119 * < 64*2^121 + 64*2^76
1120 * = 2^127 + 2^82
1121 * < 2^128
1122 */
1123 felem_reduce(alpha, tmp);
1124
1125 /* x' = alpha^2 - 8*beta */
1126 felem_square(tmp, alpha);
1127 /*
1128 * tmp[i] < 17*2^120 < 2^125
1129 */
1130 felem_assign(ftmp, beta);
1131 felem_scalar64(ftmp, 8);
1132 /* ftmp[i] < 2^62 + 2^17 */
1133 felem_diff_128_64(tmp, ftmp);
1134 /* tmp[i] < 2^125 + 2^63 + 2^62 + 2^17 */
1135 felem_reduce(x_out, tmp);
1136
1137 /* z' = (y + z)^2 - gamma - delta */
1138 felem_sum64(delta, gamma);
1139 /* delta[i] < 2^60 + 2^15 */
1140 felem_assign(ftmp, y_in);
1141 felem_sum64(ftmp, z_in);
1142 /* ftmp[i] < 2^60 + 2^15 */
1143 felem_square(tmp, ftmp);
1144 /*
1145 * tmp[i] < 17(2^122) < 2^127
1146 */
1147 felem_diff_128_64(tmp, delta);
1148 /* tmp[i] < 2^127 + 2^63 */
1149 felem_reduce(z_out, tmp);
1150
1151 /* y' = alpha*(4*beta - x') - 8*gamma^2 */
1152 felem_scalar64(beta, 4);
1153 /* beta[i] < 2^61 + 2^16 */
1154 felem_diff64(beta, x_out);
1155 /* beta[i] < 2^61 + 2^60 + 2^16 */
1156 felem_mul(tmp, alpha, beta);
1157 /*-
1158 * tmp[i] < 17*((2^59 + 2^14)(2^61 + 2^60 + 2^16))
1159 * = 17*(2^120 + 2^75 + 2^119 + 2^74 + 2^75 + 2^30)
1160 * = 17*(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
1161 * < 2^128
1162 */
1163 felem_square(tmp2, gamma);
1164 /*-
1165 * tmp2[i] < 17*(2^59 + 2^14)^2
1166 * = 17*(2^118 + 2^74 + 2^28)
1167 */
1168 felem_scalar128(tmp2, 8);
1169 /*-
1170 * tmp2[i] < 8*17*(2^118 + 2^74 + 2^28)
1171 * = 2^125 + 2^121 + 2^81 + 2^77 + 2^35 + 2^31
1172 * < 2^126
1173 */
1174 felem_diff128(tmp, tmp2);
1175 /*-
1176 * tmp[i] < 2^127 - 2^69 + 17(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
1177 * = 2^127 + 2^124 + 2^122 + 2^120 + 2^118 + 2^80 + 2^78 + 2^76 +
1178 * 2^74 + 2^69 + 2^34 + 2^30
1179 * < 2^128
1180 */
1181 felem_reduce(y_out, tmp);
1182}
1183
1184/* copy_conditional copies in to out iff mask is all ones. */
1185static void copy_conditional(felem out, const felem in, limb mask)
1186{
1187 unsigned i;
1188 for (i = 0; i < NLIMBS; ++i) {
1189 const limb tmp = mask & (in[i] ^ out[i]);
1190 out[i] ^= tmp;
1191 }
1192}
1193
1194/*-
1195 * point_add calculates (x1, y1, z1) + (x2, y2, z2)
1196 *
1197 * The method is taken from
1198 * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
1199 * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
1200 *
1201 * This function includes a branch for checking whether the two input points
1202 * are equal (while not equal to the point at infinity). See comment below
1203 * on constant-time.
1204 */
1205static void point_add(felem x3, felem y3, felem z3,
1206 const felem x1, const felem y1, const felem z1,
1207 const int mixed, const felem x2, const felem y2,
1208 const felem z2)
1209{
1210 felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6, x_out, y_out, z_out;
1211 largefelem tmp, tmp2;
1212 limb x_equal, y_equal, z1_is_zero, z2_is_zero;
1213 limb points_equal;
1214
1215 z1_is_zero = felem_is_zero(z1);
1216 z2_is_zero = felem_is_zero(z2);
1217
1218 /* ftmp = z1z1 = z1**2 */
1219 felem_square(tmp, z1);
1220 felem_reduce(ftmp, tmp);
1221
1222 if (!mixed) {
1223 /* ftmp2 = z2z2 = z2**2 */
1224 felem_square(tmp, z2);
1225 felem_reduce(ftmp2, tmp);
1226
1227 /* u1 = ftmp3 = x1*z2z2 */
1228 felem_mul(tmp, x1, ftmp2);
1229 felem_reduce(ftmp3, tmp);
1230
1231 /* ftmp5 = z1 + z2 */
1232 felem_assign(ftmp5, z1);
1233 felem_sum64(ftmp5, z2);
1234 /* ftmp5[i] < 2^61 */
1235
1236 /* ftmp5 = (z1 + z2)**2 - z1z1 - z2z2 = 2*z1z2 */
1237 felem_square(tmp, ftmp5);
1238 /* tmp[i] < 17*2^122 */
1239 felem_diff_128_64(tmp, ftmp);
1240 /* tmp[i] < 17*2^122 + 2^63 */
1241 felem_diff_128_64(tmp, ftmp2);
1242 /* tmp[i] < 17*2^122 + 2^64 */
1243 felem_reduce(ftmp5, tmp);
1244
1245 /* ftmp2 = z2 * z2z2 */
1246 felem_mul(tmp, ftmp2, z2);
1247 felem_reduce(ftmp2, tmp);
1248
1249 /* s1 = ftmp6 = y1 * z2**3 */
1250 felem_mul(tmp, y1, ftmp2);
1251 felem_reduce(ftmp6, tmp);
1252 } else {
1253 /*
1254 * We'll assume z2 = 1 (special case z2 = 0 is handled later)
1255 */
1256
1257 /* u1 = ftmp3 = x1*z2z2 */
1258 felem_assign(ftmp3, x1);
1259
1260 /* ftmp5 = 2*z1z2 */
1261 felem_scalar(ftmp5, z1, 2);
1262
1263 /* s1 = ftmp6 = y1 * z2**3 */
1264 felem_assign(ftmp6, y1);
1265 }
1266
1267 /* u2 = x2*z1z1 */
1268 felem_mul(tmp, x2, ftmp);
1269 /* tmp[i] < 17*2^120 */
1270
1271 /* h = ftmp4 = u2 - u1 */
1272 felem_diff_128_64(tmp, ftmp3);
1273 /* tmp[i] < 17*2^120 + 2^63 */
1274 felem_reduce(ftmp4, tmp);
1275
1276 x_equal = felem_is_zero(ftmp4);
1277
1278 /* z_out = ftmp5 * h */
1279 felem_mul(tmp, ftmp5, ftmp4);
1280 felem_reduce(z_out, tmp);
1281
1282 /* ftmp = z1 * z1z1 */
1283 felem_mul(tmp, ftmp, z1);
1284 felem_reduce(ftmp, tmp);
1285
1286 /* s2 = tmp = y2 * z1**3 */
1287 felem_mul(tmp, y2, ftmp);
1288 /* tmp[i] < 17*2^120 */
1289
1290 /* r = ftmp5 = (s2 - s1)*2 */
1291 felem_diff_128_64(tmp, ftmp6);
1292 /* tmp[i] < 17*2^120 + 2^63 */
1293 felem_reduce(ftmp5, tmp);
1294 y_equal = felem_is_zero(ftmp5);
1295 felem_scalar64(ftmp5, 2);
1296 /* ftmp5[i] < 2^61 */
1297
1298 /*
1299 * The formulae are incorrect if the points are equal, in affine coordinates
1300 * (X_1, Y_1) == (X_2, Y_2), so we check for this and do doubling if this
1301 * happens.
1302 *
1303 * We use bitwise operations to avoid potential side-channels introduced by
1304 * the short-circuiting behaviour of boolean operators.
1305 *
1306 * The special case of either point being the point at infinity (z1 and/or
1307 * z2 are zero), is handled separately later on in this function, so we
1308 * avoid jumping to point_double here in those special cases.
1309 *
1310 * Notice the comment below on the implications of this branching for timing
1311 * leaks and why it is considered practically irrelevant.
1312 */
1313 points_equal = (x_equal & y_equal & (~z1_is_zero) & (~z2_is_zero));
1314
1315 if (points_equal) {
1316 /*
1317 * This is obviously not constant-time but it will almost-never happen
1318 * for ECDH / ECDSA. The case where it can happen is during scalar-mult
1319 * where the intermediate value gets very close to the group order.
1320 * Since |ossl_ec_GFp_nistp_recode_scalar_bits| produces signed digits
1321 * for the scalar, it's possible for the intermediate value to be a small
1322 * negative multiple of the base point, and for the final signed digit
1323 * to be the same value. We believe that this only occurs for the scalar
1324 * 1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
1325 * ffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb
1326 * 71e913863f7, in that case the penultimate intermediate is -9G and
1327 * the final digit is also -9G. Since this only happens for a single
1328 * scalar, the timing leak is irrelevant. (Any attacker who wanted to
1329 * check whether a secret scalar was that exact value, can already do
1330 * so.)
1331 */
1332 point_double(x3, y3, z3, x1, y1, z1);
1333 return;
1334 }
1335
1336 /* I = ftmp = (2h)**2 */
1337 felem_assign(ftmp, ftmp4);
1338 felem_scalar64(ftmp, 2);
1339 /* ftmp[i] < 2^61 */
1340 felem_square(tmp, ftmp);
1341 /* tmp[i] < 17*2^122 */
1342 felem_reduce(ftmp, tmp);
1343
1344 /* J = ftmp2 = h * I */
1345 felem_mul(tmp, ftmp4, ftmp);
1346 felem_reduce(ftmp2, tmp);
1347
1348 /* V = ftmp4 = U1 * I */
1349 felem_mul(tmp, ftmp3, ftmp);
1350 felem_reduce(ftmp4, tmp);
1351
1352 /* x_out = r**2 - J - 2V */
1353 felem_square(tmp, ftmp5);
1354 /* tmp[i] < 17*2^122 */
1355 felem_diff_128_64(tmp, ftmp2);
1356 /* tmp[i] < 17*2^122 + 2^63 */
1357 felem_assign(ftmp3, ftmp4);
1358 felem_scalar64(ftmp4, 2);
1359 /* ftmp4[i] < 2^61 */
1360 felem_diff_128_64(tmp, ftmp4);
1361 /* tmp[i] < 17*2^122 + 2^64 */
1362 felem_reduce(x_out, tmp);
1363
1364 /* y_out = r(V-x_out) - 2 * s1 * J */
1365 felem_diff64(ftmp3, x_out);
1366 /*
1367 * ftmp3[i] < 2^60 + 2^60 = 2^61
1368 */
1369 felem_mul(tmp, ftmp5, ftmp3);
1370 /* tmp[i] < 17*2^122 */
1371 felem_mul(tmp2, ftmp6, ftmp2);
1372 /* tmp2[i] < 17*2^120 */
1373 felem_scalar128(tmp2, 2);
1374 /* tmp2[i] < 17*2^121 */
1375 felem_diff128(tmp, tmp2);
1376 /*-
1377 * tmp[i] < 2^127 - 2^69 + 17*2^122
1378 * = 2^126 - 2^122 - 2^6 - 2^2 - 1
1379 * < 2^127
1380 */
1381 felem_reduce(y_out, tmp);
1382
1383 copy_conditional(x_out, x2, z1_is_zero);
1384 copy_conditional(x_out, x1, z2_is_zero);
1385 copy_conditional(y_out, y2, z1_is_zero);
1386 copy_conditional(y_out, y1, z2_is_zero);
1387 copy_conditional(z_out, z2, z1_is_zero);
1388 copy_conditional(z_out, z1, z2_is_zero);
1389 felem_assign(x3, x_out);
1390 felem_assign(y3, y_out);
1391 felem_assign(z3, z_out);
1392}
1393
1394/*-
1395 * Base point pre computation
1396 * --------------------------
1397 *
1398 * Two different sorts of precomputed tables are used in the following code.
1399 * Each contain various points on the curve, where each point is three field
1400 * elements (x, y, z).
1401 *
1402 * For the base point table, z is usually 1 (0 for the point at infinity).
1403 * This table has 16 elements:
1404 * index | bits | point
1405 * ------+---------+------------------------------
1406 * 0 | 0 0 0 0 | 0G
1407 * 1 | 0 0 0 1 | 1G
1408 * 2 | 0 0 1 0 | 2^130G
1409 * 3 | 0 0 1 1 | (2^130 + 1)G
1410 * 4 | 0 1 0 0 | 2^260G
1411 * 5 | 0 1 0 1 | (2^260 + 1)G
1412 * 6 | 0 1 1 0 | (2^260 + 2^130)G
1413 * 7 | 0 1 1 1 | (2^260 + 2^130 + 1)G
1414 * 8 | 1 0 0 0 | 2^390G
1415 * 9 | 1 0 0 1 | (2^390 + 1)G
1416 * 10 | 1 0 1 0 | (2^390 + 2^130)G
1417 * 11 | 1 0 1 1 | (2^390 + 2^130 + 1)G
1418 * 12 | 1 1 0 0 | (2^390 + 2^260)G
1419 * 13 | 1 1 0 1 | (2^390 + 2^260 + 1)G
1420 * 14 | 1 1 1 0 | (2^390 + 2^260 + 2^130)G
1421 * 15 | 1 1 1 1 | (2^390 + 2^260 + 2^130 + 1)G
1422 *
1423 * The reason for this is so that we can clock bits into four different
1424 * locations when doing simple scalar multiplies against the base point.
1425 *
1426 * Tables for other points have table[i] = iG for i in 0 .. 16. */
1427
1428/* gmul is the table of precomputed base points */
1429static const felem gmul[16][3] = {
1430{{0, 0, 0, 0, 0, 0, 0, 0, 0},
1431 {0, 0, 0, 0, 0, 0, 0, 0, 0},
1432 {0, 0, 0, 0, 0, 0, 0, 0, 0}},
1433{{0x017e7e31c2e5bd66, 0x022cf0615a90a6fe, 0x00127a2ffa8de334,
1434 0x01dfbf9d64a3f877, 0x006b4d3dbaa14b5e, 0x014fed487e0a2bd8,
1435 0x015b4429c6481390, 0x03a73678fb2d988e, 0x00c6858e06b70404},
1436 {0x00be94769fd16650, 0x031c21a89cb09022, 0x039013fad0761353,
1437 0x02657bd099031542, 0x03273e662c97ee72, 0x01e6d11a05ebef45,
1438 0x03d1bd998f544495, 0x03001172297ed0b1, 0x011839296a789a3b},
1439 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1440{{0x0373faacbc875bae, 0x00f325023721c671, 0x00f666fd3dbde5ad,
1441 0x01a6932363f88ea7, 0x01fc6d9e13f9c47b, 0x03bcbffc2bbf734e,
1442 0x013ee3c3647f3a92, 0x029409fefe75d07d, 0x00ef9199963d85e5},
1443 {0x011173743ad5b178, 0x02499c7c21bf7d46, 0x035beaeabb8b1a58,
1444 0x00f989c4752ea0a3, 0x0101e1de48a9c1a3, 0x01a20076be28ba6c,
1445 0x02f8052e5eb2de95, 0x01bfe8f82dea117c, 0x0160074d3c36ddb7},
1446 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1447{{0x012f3fc373393b3b, 0x03d3d6172f1419fa, 0x02adc943c0b86873,
1448 0x00d475584177952b, 0x012a4d1673750ee2, 0x00512517a0f13b0c,
1449 0x02b184671a7b1734, 0x0315b84236f1a50a, 0x00a4afc472edbdb9},
1450 {0x00152a7077f385c4, 0x03044007d8d1c2ee, 0x0065829d61d52b52,
1451 0x00494ff6b6631d0d, 0x00a11d94d5f06bcf, 0x02d2f89474d9282e,
1452 0x0241c5727c06eeb9, 0x0386928710fbdb9d, 0x01f883f727b0dfbe},
1453 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1454{{0x019b0c3c9185544d, 0x006243a37c9d97db, 0x02ee3cbe030a2ad2,
1455 0x00cfdd946bb51e0d, 0x0271c00932606b91, 0x03f817d1ec68c561,
1456 0x03f37009806a369c, 0x03c1f30baf184fd5, 0x01091022d6d2f065},
1457 {0x0292c583514c45ed, 0x0316fca51f9a286c, 0x00300af507c1489a,
1458 0x0295f69008298cf1, 0x02c0ed8274943d7b, 0x016509b9b47a431e,
1459 0x02bc9de9634868ce, 0x005b34929bffcb09, 0x000c1a0121681524},
1460 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1461{{0x0286abc0292fb9f2, 0x02665eee9805b3f7, 0x01ed7455f17f26d6,
1462 0x0346355b83175d13, 0x006284944cd0a097, 0x0191895bcdec5e51,
1463 0x02e288370afda7d9, 0x03b22312bfefa67a, 0x01d104d3fc0613fe},
1464 {0x0092421a12f7e47f, 0x0077a83fa373c501, 0x03bd25c5f696bd0d,
1465 0x035c41e4d5459761, 0x01ca0d1742b24f53, 0x00aaab27863a509c,
1466 0x018b6de47df73917, 0x025c0b771705cd01, 0x01fd51d566d760a7},
1467 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1468{{0x01dd92ff6b0d1dbd, 0x039c5e2e8f8afa69, 0x0261ed13242c3b27,
1469 0x0382c6e67026e6a0, 0x01d60b10be2089f9, 0x03c15f3dce86723f,
1470 0x03c764a32d2a062d, 0x017307eac0fad056, 0x018207c0b96c5256},
1471 {0x0196a16d60e13154, 0x03e6ce74c0267030, 0x00ddbf2b4e52a5aa,
1472 0x012738241bbf31c8, 0x00ebe8dc04685a28, 0x024c2ad6d380d4a2,
1473 0x035ee062a6e62d0e, 0x0029ed74af7d3a0f, 0x00eef32aec142ebd},
1474 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1475{{0x00c31ec398993b39, 0x03a9f45bcda68253, 0x00ac733c24c70890,
1476 0x00872b111401ff01, 0x01d178c23195eafb, 0x03bca2c816b87f74,
1477 0x0261a9af46fbad7a, 0x0324b2a8dd3d28f9, 0x00918121d8f24e23},
1478 {0x032bc8c1ca983cd7, 0x00d869dfb08fc8c6, 0x01693cb61fce1516,
1479 0x012a5ea68f4e88a8, 0x010869cab88d7ae3, 0x009081ad277ceee1,
1480 0x033a77166d064cdc, 0x03955235a1fb3a95, 0x01251a4a9b25b65e},
1481 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1482{{0x00148a3a1b27f40b, 0x0123186df1b31fdc, 0x00026e7beaad34ce,
1483 0x01db446ac1d3dbba, 0x0299c1a33437eaec, 0x024540610183cbb7,
1484 0x0173bb0e9ce92e46, 0x02b937e43921214b, 0x01ab0436a9bf01b5},
1485 {0x0383381640d46948, 0x008dacbf0e7f330f, 0x03602122bcc3f318,
1486 0x01ee596b200620d6, 0x03bd0585fda430b3, 0x014aed77fd123a83,
1487 0x005ace749e52f742, 0x0390fe041da2b842, 0x0189a8ceb3299242},
1488 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1489{{0x012a19d6b3282473, 0x00c0915918b423ce, 0x023a954eb94405ae,
1490 0x00529f692be26158, 0x0289fa1b6fa4b2aa, 0x0198ae4ceea346ef,
1491 0x0047d8cdfbdedd49, 0x00cc8c8953f0f6b8, 0x001424abbff49203},
1492 {0x0256732a1115a03a, 0x0351bc38665c6733, 0x03f7b950fb4a6447,
1493 0x000afffa94c22155, 0x025763d0a4dab540, 0x000511e92d4fc283,
1494 0x030a7e9eda0ee96c, 0x004c3cd93a28bf0a, 0x017edb3a8719217f},
1495 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1496{{0x011de5675a88e673, 0x031d7d0f5e567fbe, 0x0016b2062c970ae5,
1497 0x03f4a2be49d90aa7, 0x03cef0bd13822866, 0x03f0923dcf774a6c,
1498 0x0284bebc4f322f72, 0x016ab2645302bb2c, 0x01793f95dace0e2a},
1499 {0x010646e13527a28f, 0x01ca1babd59dc5e7, 0x01afedfd9a5595df,
1500 0x01f15785212ea6b1, 0x0324e5d64f6ae3f4, 0x02d680f526d00645,
1501 0x0127920fadf627a7, 0x03b383f75df4f684, 0x0089e0057e783b0a},
1502 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1503{{0x00f334b9eb3c26c6, 0x0298fdaa98568dce, 0x01c2d24843a82292,
1504 0x020bcb24fa1b0711, 0x02cbdb3d2b1875e6, 0x0014907598f89422,
1505 0x03abe3aa43b26664, 0x02cbf47f720bc168, 0x0133b5e73014b79b},
1506 {0x034aab5dab05779d, 0x00cdc5d71fee9abb, 0x0399f16bd4bd9d30,
1507 0x03582fa592d82647, 0x02be1cdfb775b0e9, 0x0034f7cea32e94cb,
1508 0x0335a7f08f56f286, 0x03b707e9565d1c8b, 0x0015c946ea5b614f},
1509 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1510{{0x024676f6cff72255, 0x00d14625cac96378, 0x00532b6008bc3767,
1511 0x01fc16721b985322, 0x023355ea1b091668, 0x029de7afdc0317c3,
1512 0x02fc8a7ca2da037c, 0x02de1217d74a6f30, 0x013f7173175b73bf},
1513 {0x0344913f441490b5, 0x0200f9e272b61eca, 0x0258a246b1dd55d2,
1514 0x03753db9ea496f36, 0x025e02937a09c5ef, 0x030cbd3d14012692,
1515 0x01793a67e70dc72a, 0x03ec1d37048a662e, 0x006550f700c32a8d},
1516 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1517{{0x00d3f48a347eba27, 0x008e636649b61bd8, 0x00d3b93716778fb3,
1518 0x004d1915757bd209, 0x019d5311a3da44e0, 0x016d1afcbbe6aade,
1519 0x0241bf5f73265616, 0x0384672e5d50d39b, 0x005009fee522b684},
1520 {0x029b4fab064435fe, 0x018868ee095bbb07, 0x01ea3d6936cc92b8,
1521 0x000608b00f78a2f3, 0x02db911073d1c20f, 0x018205938470100a,
1522 0x01f1e4964cbe6ff2, 0x021a19a29eed4663, 0x01414485f42afa81},
1523 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1524{{0x01612b3a17f63e34, 0x03813992885428e6, 0x022b3c215b5a9608,
1525 0x029b4057e19f2fcb, 0x0384059a587af7e6, 0x02d6400ace6fe610,
1526 0x029354d896e8e331, 0x00c047ee6dfba65e, 0x0037720542e9d49d},
1527 {0x02ce9eed7c5e9278, 0x0374ed703e79643b, 0x01316c54c4072006,
1528 0x005aaa09054b2ee8, 0x002824000c840d57, 0x03d4eba24771ed86,
1529 0x0189c50aabc3bdae, 0x0338c01541e15510, 0x00466d56e38eed42},
1530 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1531{{0x007efd8330ad8bd6, 0x02465ed48047710b, 0x0034c6606b215e0c,
1532 0x016ae30c53cbf839, 0x01fa17bd37161216, 0x018ead4e61ce8ab9,
1533 0x005482ed5f5dee46, 0x037543755bba1d7f, 0x005e5ac7e70a9d0f},
1534 {0x0117e1bb2fdcb2a2, 0x03deea36249f40c4, 0x028d09b4a6246cb7,
1535 0x03524b8855bcf756, 0x023d7d109d5ceb58, 0x0178e43e3223ef9c,
1536 0x0154536a0c6e966a, 0x037964d1286ee9fe, 0x0199bcd90e125055},
1537 {1, 0, 0, 0, 0, 0, 0, 0, 0}}
1538};
1539
1540/*
1541 * select_point selects the |idx|th point from a precomputation table and
1542 * copies it to out.
1543 */
1544 /* pre_comp below is of the size provided in |size| */
1545static void select_point(const limb idx, unsigned int size,
1546 const felem pre_comp[][3], felem out[3])
1547{
1548 unsigned i, j;
1549 limb *outlimbs = &out[0][0];
1550
1551 memset(out, 0, sizeof(*out) * 3);
1552
1553 for (i = 0; i < size; i++) {
1554 const limb *inlimbs = &pre_comp[i][0][0];
1555 limb mask = i ^ idx;
1556 mask |= mask >> 4;
1557 mask |= mask >> 2;
1558 mask |= mask >> 1;
1559 mask &= 1;
1560 mask--;
1561 for (j = 0; j < NLIMBS * 3; j++)
1562 outlimbs[j] |= inlimbs[j] & mask;
1563 }
1564}
1565
1566/* get_bit returns the |i|th bit in |in| */
1567static char get_bit(const felem_bytearray in, int i)
1568{
1569 if (i < 0)
1570 return 0;
1571 return (in[i >> 3] >> (i & 7)) & 1;
1572}
1573
1574/*
1575 * Interleaved point multiplication using precomputed point multiples: The
1576 * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
1577 * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
1578 * generator, using certain (large) precomputed multiples in g_pre_comp.
1579 * Output point (X, Y, Z) is stored in x_out, y_out, z_out
1580 */
1581static void batch_mul(felem x_out, felem y_out, felem z_out,
1582 const felem_bytearray scalars[],
1583 const unsigned num_points, const u8 *g_scalar,
1584 const int mixed, const felem pre_comp[][17][3],
1585 const felem g_pre_comp[16][3])
1586{
1587 int i, skip;
1588 unsigned num, gen_mul = (g_scalar != NULL);
1589 felem nq[3], tmp[4];
1590 limb bits;
1591 u8 sign, digit;
1592
1593 /* set nq to the point at infinity */
1594 memset(nq, 0, sizeof(nq));
1595
1596 /*
1597 * Loop over all scalars msb-to-lsb, interleaving additions of multiples
1598 * of the generator (last quarter of rounds) and additions of other
1599 * points multiples (every 5th round).
1600 */
1601 skip = 1; /* save two point operations in the first
1602 * round */
1603 for (i = (num_points ? 520 : 130); i >= 0; --i) {
1604 /* double */
1605 if (!skip)
1606 point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
1607
1608 /* add multiples of the generator */
1609 if (gen_mul && (i <= 130)) {
1610 bits = get_bit(g_scalar, i + 390) << 3;
1611 if (i < 130) {
1612 bits |= get_bit(g_scalar, i + 260) << 2;
1613 bits |= get_bit(g_scalar, i + 130) << 1;
1614 bits |= get_bit(g_scalar, i);
1615 }
1616 /* select the point to add, in constant time */
1617 select_point(bits, 16, g_pre_comp, tmp);
1618 if (!skip) {
1619 /* The 1 argument below is for "mixed" */
1620 point_add(nq[0], nq[1], nq[2],
1621 nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
1622 } else {
1623 memcpy(nq, tmp, 3 * sizeof(felem));
1624 skip = 0;
1625 }
1626 }
1627
1628 /* do other additions every 5 doublings */
1629 if (num_points && (i % 5 == 0)) {
1630 /* loop over all scalars */
1631 for (num = 0; num < num_points; ++num) {
1632 bits = get_bit(scalars[num], i + 4) << 5;
1633 bits |= get_bit(scalars[num], i + 3) << 4;
1634 bits |= get_bit(scalars[num], i + 2) << 3;
1635 bits |= get_bit(scalars[num], i + 1) << 2;
1636 bits |= get_bit(scalars[num], i) << 1;
1637 bits |= get_bit(scalars[num], i - 1);
1638 ossl_ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
1639
1640 /*
1641 * select the point to add or subtract, in constant time
1642 */
1643 select_point(digit, 17, pre_comp[num], tmp);
1644 felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
1645 * point */
1646 copy_conditional(tmp[1], tmp[3], (-(limb) sign));
1647
1648 if (!skip) {
1649 point_add(nq[0], nq[1], nq[2],
1650 nq[0], nq[1], nq[2],
1651 mixed, tmp[0], tmp[1], tmp[2]);
1652 } else {
1653 memcpy(nq, tmp, 3 * sizeof(felem));
1654 skip = 0;
1655 }
1656 }
1657 }
1658 }
1659 felem_assign(x_out, nq[0]);
1660 felem_assign(y_out, nq[1]);
1661 felem_assign(z_out, nq[2]);
1662}
1663
1664/* Precomputation for the group generator. */
1665struct nistp521_pre_comp_st {
1666 felem g_pre_comp[16][3];
1667 CRYPTO_REF_COUNT references;
1668};
1669
1670const EC_METHOD *EC_GFp_nistp521_method(void)
1671{
1672 static const EC_METHOD ret = {
1673 EC_FLAGS_DEFAULT_OCT,
1674 NID_X9_62_prime_field,
1675 ossl_ec_GFp_nistp521_group_init,
1676 ossl_ec_GFp_simple_group_finish,
1677 ossl_ec_GFp_simple_group_clear_finish,
1678 ossl_ec_GFp_nist_group_copy,
1679 ossl_ec_GFp_nistp521_group_set_curve,
1680 ossl_ec_GFp_simple_group_get_curve,
1681 ossl_ec_GFp_simple_group_get_degree,
1682 ossl_ec_group_simple_order_bits,
1683 ossl_ec_GFp_simple_group_check_discriminant,
1684 ossl_ec_GFp_simple_point_init,
1685 ossl_ec_GFp_simple_point_finish,
1686 ossl_ec_GFp_simple_point_clear_finish,
1687 ossl_ec_GFp_simple_point_copy,
1688 ossl_ec_GFp_simple_point_set_to_infinity,
1689 ossl_ec_GFp_simple_point_set_affine_coordinates,
1690 ossl_ec_GFp_nistp521_point_get_affine_coordinates,
1691 0 /* point_set_compressed_coordinates */ ,
1692 0 /* point2oct */ ,
1693 0 /* oct2point */ ,
1694 ossl_ec_GFp_simple_add,
1695 ossl_ec_GFp_simple_dbl,
1696 ossl_ec_GFp_simple_invert,
1697 ossl_ec_GFp_simple_is_at_infinity,
1698 ossl_ec_GFp_simple_is_on_curve,
1699 ossl_ec_GFp_simple_cmp,
1700 ossl_ec_GFp_simple_make_affine,
1701 ossl_ec_GFp_simple_points_make_affine,
1702 ossl_ec_GFp_nistp521_points_mul,
1703 ossl_ec_GFp_nistp521_precompute_mult,
1704 ossl_ec_GFp_nistp521_have_precompute_mult,
1705 ossl_ec_GFp_nist_field_mul,
1706 ossl_ec_GFp_nist_field_sqr,
1707 0 /* field_div */ ,
1708 ossl_ec_GFp_simple_field_inv,
1709 0 /* field_encode */ ,
1710 0 /* field_decode */ ,
1711 0, /* field_set_to_one */
1712 ossl_ec_key_simple_priv2oct,
1713 ossl_ec_key_simple_oct2priv,
1714 0, /* set private */
1715 ossl_ec_key_simple_generate_key,
1716 ossl_ec_key_simple_check_key,
1717 ossl_ec_key_simple_generate_public_key,
1718 0, /* keycopy */
1719 0, /* keyfinish */
1720 ossl_ecdh_simple_compute_key,
1721 ossl_ecdsa_simple_sign_setup,
1722 ossl_ecdsa_simple_sign_sig,
1723 ossl_ecdsa_simple_verify_sig,
1724 0, /* field_inverse_mod_ord */
1725 0, /* blind_coordinates */
1726 0, /* ladder_pre */
1727 0, /* ladder_step */
1728 0 /* ladder_post */
1729 };
1730
1731 return &ret;
1732}
1733
1734/******************************************************************************/
1735/*
1736 * FUNCTIONS TO MANAGE PRECOMPUTATION
1737 */
1738
1739static NISTP521_PRE_COMP *nistp521_pre_comp_new(void)
1740{
1741 NISTP521_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
1742
1743 if (ret == NULL)
1744 return ret;
1745
1746 if (!CRYPTO_NEW_REF(&ret->references, 1)) {
1747 OPENSSL_free(ret);
1748 return NULL;
1749 }
1750 return ret;
1751}
1752
1753NISTP521_PRE_COMP *EC_nistp521_pre_comp_dup(NISTP521_PRE_COMP *p)
1754{
1755 int i;
1756 if (p != NULL)
1757 CRYPTO_UP_REF(&p->references, &i);
1758 return p;
1759}
1760
1761void EC_nistp521_pre_comp_free(NISTP521_PRE_COMP *p)
1762{
1763 int i;
1764
1765 if (p == NULL)
1766 return;
1767
1768 CRYPTO_DOWN_REF(&p->references, &i);
1769 REF_PRINT_COUNT("EC_nistp521", p);
1770 if (i > 0)
1771 return;
1772 REF_ASSERT_ISNT(i < 0);
1773
1774 CRYPTO_FREE_REF(&p->references);
1775 OPENSSL_free(p);
1776}
1777
1778/******************************************************************************/
1779/*
1780 * OPENSSL EC_METHOD FUNCTIONS
1781 */
1782
1783int ossl_ec_GFp_nistp521_group_init(EC_GROUP *group)
1784{
1785 int ret;
1786 ret = ossl_ec_GFp_simple_group_init(group);
1787 group->a_is_minus3 = 1;
1788 return ret;
1789}
1790
1791int ossl_ec_GFp_nistp521_group_set_curve(EC_GROUP *group, const BIGNUM *p,
1792 const BIGNUM *a, const BIGNUM *b,
1793 BN_CTX *ctx)
1794{
1795 int ret = 0;
1796 BIGNUM *curve_p, *curve_a, *curve_b;
1797#ifndef FIPS_MODULE
1798 BN_CTX *new_ctx = NULL;
1799
1800 if (ctx == NULL)
1801 ctx = new_ctx = BN_CTX_new();
1802#endif
1803 if (ctx == NULL)
1804 return 0;
1805
1806 BN_CTX_start(ctx);
1807 curve_p = BN_CTX_get(ctx);
1808 curve_a = BN_CTX_get(ctx);
1809 curve_b = BN_CTX_get(ctx);
1810 if (curve_b == NULL)
1811 goto err;
1812 BN_bin2bn(nistp521_curve_params[0], sizeof(felem_bytearray), curve_p);
1813 BN_bin2bn(nistp521_curve_params[1], sizeof(felem_bytearray), curve_a);
1814 BN_bin2bn(nistp521_curve_params[2], sizeof(felem_bytearray), curve_b);
1815 if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
1816 ERR_raise(ERR_LIB_EC, EC_R_WRONG_CURVE_PARAMETERS);
1817 goto err;
1818 }
1819 group->field_mod_func = BN_nist_mod_521;
1820 ret = ossl_ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
1821 err:
1822 BN_CTX_end(ctx);
1823#ifndef FIPS_MODULE
1824 BN_CTX_free(new_ctx);
1825#endif
1826 return ret;
1827}
1828
1829/*
1830 * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
1831 * (X/Z^2, Y/Z^3)
1832 */
1833int ossl_ec_GFp_nistp521_point_get_affine_coordinates(const EC_GROUP *group,
1834 const EC_POINT *point,
1835 BIGNUM *x, BIGNUM *y,
1836 BN_CTX *ctx)
1837{
1838 felem z1, z2, x_in, y_in, x_out, y_out;
1839 largefelem tmp;
1840
1841 if (EC_POINT_is_at_infinity(group, point)) {
1842 ERR_raise(ERR_LIB_EC, EC_R_POINT_AT_INFINITY);
1843 return 0;
1844 }
1845 if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
1846 (!BN_to_felem(z1, point->Z)))
1847 return 0;
1848 felem_inv(z2, z1);
1849 felem_square(tmp, z2);
1850 felem_reduce(z1, tmp);
1851 felem_mul(tmp, x_in, z1);
1852 felem_reduce(x_in, tmp);
1853 felem_contract(x_out, x_in);
1854 if (x != NULL) {
1855 if (!felem_to_BN(x, x_out)) {
1856 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
1857 return 0;
1858 }
1859 }
1860 felem_mul(tmp, z1, z2);
1861 felem_reduce(z1, tmp);
1862 felem_mul(tmp, y_in, z1);
1863 felem_reduce(y_in, tmp);
1864 felem_contract(y_out, y_in);
1865 if (y != NULL) {
1866 if (!felem_to_BN(y, y_out)) {
1867 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
1868 return 0;
1869 }
1870 }
1871 return 1;
1872}
1873
1874/* points below is of size |num|, and tmp_felems is of size |num+1/ */
1875static void make_points_affine(size_t num, felem points[][3],
1876 felem tmp_felems[])
1877{
1878 /*
1879 * Runs in constant time, unless an input is the point at infinity (which
1880 * normally shouldn't happen).
1881 */
1882 ossl_ec_GFp_nistp_points_make_affine_internal(num,
1883 points,
1884 sizeof(felem),
1885 tmp_felems,
1886 (void (*)(void *))felem_one,
1887 felem_is_zero_int,
1888 (void (*)(void *, const void *))
1889 felem_assign,
1890 (void (*)(void *, const void *))
1891 felem_square_reduce, (void (*)
1892 (void *,
1893 const void
1894 *,
1895 const void
1896 *))
1897 felem_mul_reduce,
1898 (void (*)(void *, const void *))
1899 felem_inv,
1900 (void (*)(void *, const void *))
1901 felem_contract);
1902}
1903
1904/*
1905 * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
1906 * values Result is stored in r (r can equal one of the inputs).
1907 */
1908int ossl_ec_GFp_nistp521_points_mul(const EC_GROUP *group, EC_POINT *r,
1909 const BIGNUM *scalar, size_t num,
1910 const EC_POINT *points[],
1911 const BIGNUM *scalars[], BN_CTX *ctx)
1912{
1913 int ret = 0;
1914 int j;
1915 int mixed = 0;
1916 BIGNUM *x, *y, *z, *tmp_scalar;
1917 felem_bytearray g_secret;
1918 felem_bytearray *secrets = NULL;
1919 felem (*pre_comp)[17][3] = NULL;
1920 felem *tmp_felems = NULL;
1921 unsigned i;
1922 int num_bytes;
1923 int have_pre_comp = 0;
1924 size_t num_points = num;
1925 felem x_in, y_in, z_in, x_out, y_out, z_out;
1926 NISTP521_PRE_COMP *pre = NULL;
1927 felem(*g_pre_comp)[3] = NULL;
1928 EC_POINT *generator = NULL;
1929 const EC_POINT *p = NULL;
1930 const BIGNUM *p_scalar = NULL;
1931
1932 BN_CTX_start(ctx);
1933 x = BN_CTX_get(ctx);
1934 y = BN_CTX_get(ctx);
1935 z = BN_CTX_get(ctx);
1936 tmp_scalar = BN_CTX_get(ctx);
1937 if (tmp_scalar == NULL)
1938 goto err;
1939
1940 if (scalar != NULL) {
1941 pre = group->pre_comp.nistp521;
1942 if (pre)
1943 /* we have precomputation, try to use it */
1944 g_pre_comp = &pre->g_pre_comp[0];
1945 else
1946 /* try to use the standard precomputation */
1947 g_pre_comp = (felem(*)[3]) gmul;
1948 generator = EC_POINT_new(group);
1949 if (generator == NULL)
1950 goto err;
1951 /* get the generator from precomputation */
1952 if (!felem_to_BN(x, g_pre_comp[1][0]) ||
1953 !felem_to_BN(y, g_pre_comp[1][1]) ||
1954 !felem_to_BN(z, g_pre_comp[1][2])) {
1955 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
1956 goto err;
1957 }
1958 if (!ossl_ec_GFp_simple_set_Jprojective_coordinates_GFp(group,
1959 generator,
1960 x, y, z, ctx))
1961 goto err;
1962 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
1963 /* precomputation matches generator */
1964 have_pre_comp = 1;
1965 else
1966 /*
1967 * we don't have valid precomputation: treat the generator as a
1968 * random point
1969 */
1970 num_points++;
1971 }
1972
1973 if (num_points > 0) {
1974 if (num_points >= 2) {
1975 /*
1976 * unless we precompute multiples for just one point, converting
1977 * those into affine form is time well spent
1978 */
1979 mixed = 1;
1980 }
1981 secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
1982 pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
1983 if (mixed)
1984 tmp_felems =
1985 OPENSSL_malloc(sizeof(*tmp_felems) * (num_points * 17 + 1));
1986 if ((secrets == NULL) || (pre_comp == NULL)
1987 || (mixed && (tmp_felems == NULL)))
1988 goto err;
1989
1990 /*
1991 * we treat NULL scalars as 0, and NULL points as points at infinity,
1992 * i.e., they contribute nothing to the linear combination
1993 */
1994 for (i = 0; i < num_points; ++i) {
1995 if (i == num) {
1996 /*
1997 * we didn't have a valid precomputation, so we pick the
1998 * generator
1999 */
2000 p = EC_GROUP_get0_generator(group);
2001 p_scalar = scalar;
2002 } else {
2003 /* the i^th point */
2004 p = points[i];
2005 p_scalar = scalars[i];
2006 }
2007 if ((p_scalar != NULL) && (p != NULL)) {
2008 /* reduce scalar to 0 <= scalar < 2^521 */
2009 if ((BN_num_bits(p_scalar) > 521)
2010 || (BN_is_negative(p_scalar))) {
2011 /*
2012 * this is an unusual input, and we don't guarantee
2013 * constant-timeness
2014 */
2015 if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
2016 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
2017 goto err;
2018 }
2019 num_bytes = BN_bn2lebinpad(tmp_scalar,
2020 secrets[i], sizeof(secrets[i]));
2021 } else {
2022 num_bytes = BN_bn2lebinpad(p_scalar,
2023 secrets[i], sizeof(secrets[i]));
2024 }
2025 if (num_bytes < 0) {
2026 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
2027 goto err;
2028 }
2029 /* precompute multiples */
2030 if ((!BN_to_felem(x_out, p->X)) ||
2031 (!BN_to_felem(y_out, p->Y)) ||
2032 (!BN_to_felem(z_out, p->Z)))
2033 goto err;
2034 memcpy(pre_comp[i][1][0], x_out, sizeof(felem));
2035 memcpy(pre_comp[i][1][1], y_out, sizeof(felem));
2036 memcpy(pre_comp[i][1][2], z_out, sizeof(felem));
2037 for (j = 2; j <= 16; ++j) {
2038 if (j & 1) {
2039 point_add(pre_comp[i][j][0], pre_comp[i][j][1],
2040 pre_comp[i][j][2], pre_comp[i][1][0],
2041 pre_comp[i][1][1], pre_comp[i][1][2], 0,
2042 pre_comp[i][j - 1][0],
2043 pre_comp[i][j - 1][1],
2044 pre_comp[i][j - 1][2]);
2045 } else {
2046 point_double(pre_comp[i][j][0], pre_comp[i][j][1],
2047 pre_comp[i][j][2], pre_comp[i][j / 2][0],
2048 pre_comp[i][j / 2][1],
2049 pre_comp[i][j / 2][2]);
2050 }
2051 }
2052 }
2053 }
2054 if (mixed)
2055 make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
2056 }
2057
2058 /* the scalar for the generator */
2059 if ((scalar != NULL) && (have_pre_comp)) {
2060 memset(g_secret, 0, sizeof(g_secret));
2061 /* reduce scalar to 0 <= scalar < 2^521 */
2062 if ((BN_num_bits(scalar) > 521) || (BN_is_negative(scalar))) {
2063 /*
2064 * this is an unusual input, and we don't guarantee
2065 * constant-timeness
2066 */
2067 if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
2068 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
2069 goto err;
2070 }
2071 num_bytes = BN_bn2lebinpad(tmp_scalar, g_secret, sizeof(g_secret));
2072 } else {
2073 num_bytes = BN_bn2lebinpad(scalar, g_secret, sizeof(g_secret));
2074 }
2075 /* do the multiplication with generator precomputation */
2076 batch_mul(x_out, y_out, z_out,
2077 (const felem_bytearray(*))secrets, num_points,
2078 g_secret,
2079 mixed, (const felem(*)[17][3])pre_comp,
2080 (const felem(*)[3])g_pre_comp);
2081 } else {
2082 /* do the multiplication without generator precomputation */
2083 batch_mul(x_out, y_out, z_out,
2084 (const felem_bytearray(*))secrets, num_points,
2085 NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
2086 }
2087 /* reduce the output to its unique minimal representation */
2088 felem_contract(x_in, x_out);
2089 felem_contract(y_in, y_out);
2090 felem_contract(z_in, z_out);
2091 if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
2092 (!felem_to_BN(z, z_in))) {
2093 ERR_raise(ERR_LIB_EC, ERR_R_BN_LIB);
2094 goto err;
2095 }
2096 ret = ossl_ec_GFp_simple_set_Jprojective_coordinates_GFp(group, r, x, y, z,
2097 ctx);
2098
2099 err:
2100 BN_CTX_end(ctx);
2101 EC_POINT_free(generator);
2102 OPENSSL_free(secrets);
2103 OPENSSL_free(pre_comp);
2104 OPENSSL_free(tmp_felems);
2105 return ret;
2106}
2107
2108int ossl_ec_GFp_nistp521_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
2109{
2110 int ret = 0;
2111 NISTP521_PRE_COMP *pre = NULL;
2112 int i, j;
2113 BIGNUM *x, *y;
2114 EC_POINT *generator = NULL;
2115 felem tmp_felems[16];
2116#ifndef FIPS_MODULE
2117 BN_CTX *new_ctx = NULL;
2118#endif
2119
2120 /* throw away old precomputation */
2121 EC_pre_comp_free(group);
2122
2123#ifndef FIPS_MODULE
2124 if (ctx == NULL)
2125 ctx = new_ctx = BN_CTX_new();
2126#endif
2127 if (ctx == NULL)
2128 return 0;
2129
2130 BN_CTX_start(ctx);
2131 x = BN_CTX_get(ctx);
2132 y = BN_CTX_get(ctx);
2133 if (y == NULL)
2134 goto err;
2135 /* get the generator */
2136 if (group->generator == NULL)
2137 goto err;
2138 generator = EC_POINT_new(group);
2139 if (generator == NULL)
2140 goto err;
2141 BN_bin2bn(nistp521_curve_params[3], sizeof(felem_bytearray), x);
2142 BN_bin2bn(nistp521_curve_params[4], sizeof(felem_bytearray), y);
2143 if (!EC_POINT_set_affine_coordinates(group, generator, x, y, ctx))
2144 goto err;
2145 if ((pre = nistp521_pre_comp_new()) == NULL)
2146 goto err;
2147 /*
2148 * if the generator is the standard one, use built-in precomputation
2149 */
2150 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
2151 memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
2152 goto done;
2153 }
2154 if ((!BN_to_felem(pre->g_pre_comp[1][0], group->generator->X)) ||
2155 (!BN_to_felem(pre->g_pre_comp[1][1], group->generator->Y)) ||
2156 (!BN_to_felem(pre->g_pre_comp[1][2], group->generator->Z)))
2157 goto err;
2158 /* compute 2^130*G, 2^260*G, 2^390*G */
2159 for (i = 1; i <= 4; i <<= 1) {
2160 point_double(pre->g_pre_comp[2 * i][0], pre->g_pre_comp[2 * i][1],
2161 pre->g_pre_comp[2 * i][2], pre->g_pre_comp[i][0],
2162 pre->g_pre_comp[i][1], pre->g_pre_comp[i][2]);
2163 for (j = 0; j < 129; ++j) {
2164 point_double(pre->g_pre_comp[2 * i][0],
2165 pre->g_pre_comp[2 * i][1],
2166 pre->g_pre_comp[2 * i][2],
2167 pre->g_pre_comp[2 * i][0],
2168 pre->g_pre_comp[2 * i][1],
2169 pre->g_pre_comp[2 * i][2]);
2170 }
2171 }
2172 /* g_pre_comp[0] is the point at infinity */
2173 memset(pre->g_pre_comp[0], 0, sizeof(pre->g_pre_comp[0]));
2174 /* the remaining multiples */
2175 /* 2^130*G + 2^260*G */
2176 point_add(pre->g_pre_comp[6][0], pre->g_pre_comp[6][1],
2177 pre->g_pre_comp[6][2], pre->g_pre_comp[4][0],
2178 pre->g_pre_comp[4][1], pre->g_pre_comp[4][2],
2179 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
2180 pre->g_pre_comp[2][2]);
2181 /* 2^130*G + 2^390*G */
2182 point_add(pre->g_pre_comp[10][0], pre->g_pre_comp[10][1],
2183 pre->g_pre_comp[10][2], pre->g_pre_comp[8][0],
2184 pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
2185 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
2186 pre->g_pre_comp[2][2]);
2187 /* 2^260*G + 2^390*G */
2188 point_add(pre->g_pre_comp[12][0], pre->g_pre_comp[12][1],
2189 pre->g_pre_comp[12][2], pre->g_pre_comp[8][0],
2190 pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
2191 0, pre->g_pre_comp[4][0], pre->g_pre_comp[4][1],
2192 pre->g_pre_comp[4][2]);
2193 /* 2^130*G + 2^260*G + 2^390*G */
2194 point_add(pre->g_pre_comp[14][0], pre->g_pre_comp[14][1],
2195 pre->g_pre_comp[14][2], pre->g_pre_comp[12][0],
2196 pre->g_pre_comp[12][1], pre->g_pre_comp[12][2],
2197 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
2198 pre->g_pre_comp[2][2]);
2199 for (i = 1; i < 8; ++i) {
2200 /* odd multiples: add G */
2201 point_add(pre->g_pre_comp[2 * i + 1][0],
2202 pre->g_pre_comp[2 * i + 1][1],
2203 pre->g_pre_comp[2 * i + 1][2], pre->g_pre_comp[2 * i][0],
2204 pre->g_pre_comp[2 * i][1], pre->g_pre_comp[2 * i][2], 0,
2205 pre->g_pre_comp[1][0], pre->g_pre_comp[1][1],
2206 pre->g_pre_comp[1][2]);
2207 }
2208 make_points_affine(15, &(pre->g_pre_comp[1]), tmp_felems);
2209
2210 done:
2211 SETPRECOMP(group, nistp521, pre);
2212 ret = 1;
2213 pre = NULL;
2214 err:
2215 BN_CTX_end(ctx);
2216 EC_POINT_free(generator);
2217#ifndef FIPS_MODULE
2218 BN_CTX_free(new_ctx);
2219#endif
2220 EC_nistp521_pre_comp_free(pre);
2221 return ret;
2222}
2223
2224int ossl_ec_GFp_nistp521_have_precompute_mult(const EC_GROUP *group)
2225{
2226 return HAVEPRECOMP(group, nistp521);
2227}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette