mbed TLS v2.16.7
include
mbedtls
bn_mul.h
Go to the documentation of this file.
1
6
/*
7
* Copyright (C) 2006-2015, ARM Limited, All Rights Reserved
8
* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
9
*
10
* This file is provided under the Apache License 2.0, or the
11
* GNU General Public License v2.0 or later.
12
*
13
* **********
14
* Apache License 2.0:
15
*
16
* Licensed under the Apache License, Version 2.0 (the "License"); you may
17
* not use this file except in compliance with the License.
18
* You may obtain a copy of the License at
19
*
20
* http://www.apache.org/licenses/LICENSE-2.0
21
*
22
* Unless required by applicable law or agreed to in writing, software
23
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
24
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
25
* See the License for the specific language governing permissions and
26
* limitations under the License.
27
*
28
* **********
29
*
30
* **********
31
* GNU General Public License v2.0 or later:
32
*
33
* This program is free software; you can redistribute it and/or modify
34
* it under the terms of the GNU General Public License as published by
35
* the Free Software Foundation; either version 2 of the License, or
36
* (at your option) any later version.
37
*
38
* This program is distributed in the hope that it will be useful,
39
* but WITHOUT ANY WARRANTY; without even the implied warranty of
40
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41
* GNU General Public License for more details.
42
*
43
* You should have received a copy of the GNU General Public License along
44
* with this program; if not, write to the Free Software Foundation, Inc.,
45
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
46
*
47
* **********
48
*
49
* This file is part of mbed TLS (https://tls.mbed.org)
50
*/
51
/*
52
* Multiply source vector [s] with b, add result
53
* to destination vector [d] and set carry c.
54
*
55
* Currently supports:
56
*
57
* . IA-32 (386+) . AMD64 / EM64T
58
* . IA-32 (SSE2) . Motorola 68000
59
* . PowerPC, 32-bit . MicroBlaze
60
* . PowerPC, 64-bit . TriCore
61
* . SPARC v8 . ARM v3+
62
* . Alpha . MIPS32
63
* . C, longlong . C, generic
64
*/
65
#ifndef MBEDTLS_BN_MUL_H
66
#define MBEDTLS_BN_MUL_H
67
68
#if !defined(MBEDTLS_CONFIG_FILE)
69
#include "
config.h
"
70
#else
71
#include MBEDTLS_CONFIG_FILE
72
#endif
73
74
#include "
bignum.h
"
75
76
#if defined(MBEDTLS_HAVE_ASM)
77
78
#ifndef asm
79
#define asm __asm
80
#endif
81
82
/* armcc5 --gnu defines __GNUC__ but doesn't support GNU's extended asm */
83
#if defined(__GNUC__) && \
84
( !defined(__ARMCC_VERSION) || __ARMCC_VERSION >= 6000000 )
85
86
/*
87
* Disable use of the i386 assembly code below if option -O0, to disable all
88
* compiler optimisations, is passed, detected with __OPTIMIZE__
89
* This is done as the number of registers used in the assembly code doesn't
90
* work with the -O0 option.
91
*/
92
#if defined(__i386__) && defined(__OPTIMIZE__)
93
94
#define MULADDC_INIT \
95
asm( \
96
"movl %%ebx, %0 \n\t" \
97
"movl %5, %%esi \n\t" \
98
"movl %6, %%edi \n\t" \
99
"movl %7, %%ecx \n\t" \
100
"movl %8, %%ebx \n\t"
101
102
#define MULADDC_CORE \
103
"lodsl \n\t" \
104
"mull %%ebx \n\t" \
105
"addl %%ecx, %%eax \n\t" \
106
"adcl $0, %%edx \n\t" \
107
"addl (%%edi), %%eax \n\t" \
108
"adcl $0, %%edx \n\t" \
109
"movl %%edx, %%ecx \n\t" \
110
"stosl \n\t"
111
112
#if defined(MBEDTLS_HAVE_SSE2)
113
114
#define MULADDC_HUIT \
115
"movd %%ecx, %%mm1 \n\t" \
116
"movd %%ebx, %%mm0 \n\t" \
117
"movd (%%edi), %%mm3 \n\t" \
118
"paddq %%mm3, %%mm1 \n\t" \
119
"movd (%%esi), %%mm2 \n\t" \
120
"pmuludq %%mm0, %%mm2 \n\t" \
121
"movd 4(%%esi), %%mm4 \n\t" \
122
"pmuludq %%mm0, %%mm4 \n\t" \
123
"movd 8(%%esi), %%mm6 \n\t" \
124
"pmuludq %%mm0, %%mm6 \n\t" \
125
"movd 12(%%esi), %%mm7 \n\t" \
126
"pmuludq %%mm0, %%mm7 \n\t" \
127
"paddq %%mm2, %%mm1 \n\t" \
128
"movd 4(%%edi), %%mm3 \n\t" \
129
"paddq %%mm4, %%mm3 \n\t" \
130
"movd 8(%%edi), %%mm5 \n\t" \
131
"paddq %%mm6, %%mm5 \n\t" \
132
"movd 12(%%edi), %%mm4 \n\t" \
133
"paddq %%mm4, %%mm7 \n\t" \
134
"movd %%mm1, (%%edi) \n\t" \
135
"movd 16(%%esi), %%mm2 \n\t" \
136
"pmuludq %%mm0, %%mm2 \n\t" \
137
"psrlq $32, %%mm1 \n\t" \
138
"movd 20(%%esi), %%mm4 \n\t" \
139
"pmuludq %%mm0, %%mm4 \n\t" \
140
"paddq %%mm3, %%mm1 \n\t" \
141
"movd 24(%%esi), %%mm6 \n\t" \
142
"pmuludq %%mm0, %%mm6 \n\t" \
143
"movd %%mm1, 4(%%edi) \n\t" \
144
"psrlq $32, %%mm1 \n\t" \
145
"movd 28(%%esi), %%mm3 \n\t" \
146
"pmuludq %%mm0, %%mm3 \n\t" \
147
"paddq %%mm5, %%mm1 \n\t" \
148
"movd 16(%%edi), %%mm5 \n\t" \
149
"paddq %%mm5, %%mm2 \n\t" \
150
"movd %%mm1, 8(%%edi) \n\t" \
151
"psrlq $32, %%mm1 \n\t" \
152
"paddq %%mm7, %%mm1 \n\t" \
153
"movd 20(%%edi), %%mm5 \n\t" \
154
"paddq %%mm5, %%mm4 \n\t" \
155
"movd %%mm1, 12(%%edi) \n\t" \
156
"psrlq $32, %%mm1 \n\t" \
157
"paddq %%mm2, %%mm1 \n\t" \
158
"movd 24(%%edi), %%mm5 \n\t" \
159
"paddq %%mm5, %%mm6 \n\t" \
160
"movd %%mm1, 16(%%edi) \n\t" \
161
"psrlq $32, %%mm1 \n\t" \
162
"paddq %%mm4, %%mm1 \n\t" \
163
"movd 28(%%edi), %%mm5 \n\t" \
164
"paddq %%mm5, %%mm3 \n\t" \
165
"movd %%mm1, 20(%%edi) \n\t" \
166
"psrlq $32, %%mm1 \n\t" \
167
"paddq %%mm6, %%mm1 \n\t" \
168
"movd %%mm1, 24(%%edi) \n\t" \
169
"psrlq $32, %%mm1 \n\t" \
170
"paddq %%mm3, %%mm1 \n\t" \
171
"movd %%mm1, 28(%%edi) \n\t" \
172
"addl $32, %%edi \n\t" \
173
"addl $32, %%esi \n\t" \
174
"psrlq $32, %%mm1 \n\t" \
175
"movd %%mm1, %%ecx \n\t"
176
177
#define MULADDC_STOP \
178
"emms \n\t" \
179
"movl %4, %%ebx \n\t" \
180
"movl %%ecx, %1 \n\t" \
181
"movl %%edi, %2 \n\t" \
182
"movl %%esi, %3 \n\t" \
183
: "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
184
: "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
185
: "eax", "ebx", "ecx", "edx", "esi", "edi" \
186
);
187
188
#else
189
190
#define MULADDC_STOP \
191
"movl %4, %%ebx \n\t" \
192
"movl %%ecx, %1 \n\t" \
193
"movl %%edi, %2 \n\t" \
194
"movl %%esi, %3 \n\t" \
195
: "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
196
: "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
197
: "eax", "ebx", "ecx", "edx", "esi", "edi" \
198
);
199
#endif
/* SSE2 */
200
#endif
/* i386 */
201
202
#if defined(__amd64__) || defined (__x86_64__)
203
204
#define MULADDC_INIT \
205
asm( \
206
"xorq %%r8, %%r8\n"
207
208
#define MULADDC_CORE \
209
"movq (%%rsi), %%rax\n" \
210
"mulq %%rbx\n" \
211
"addq $8, %%rsi\n" \
212
"addq %%rcx, %%rax\n" \
213
"movq %%r8, %%rcx\n" \
214
"adcq $0, %%rdx\n" \
215
"nop \n" \
216
"addq %%rax, (%%rdi)\n" \
217
"adcq %%rdx, %%rcx\n" \
218
"addq $8, %%rdi\n"
219
220
#define MULADDC_STOP \
221
: "+c" (c), "+D" (d), "+S" (s) \
222
: "b" (b) \
223
: "rax", "rdx", "r8" \
224
);
225
226
#endif
/* AMD64 */
227
228
#if defined(__mc68020__) || defined(__mcpu32__)
229
230
#define MULADDC_INIT \
231
asm( \
232
"movl %3, %%a2 \n\t" \
233
"movl %4, %%a3 \n\t" \
234
"movl %5, %%d3 \n\t" \
235
"movl %6, %%d2 \n\t" \
236
"moveq #0, %%d0 \n\t"
237
238
#define MULADDC_CORE \
239
"movel %%a2@+, %%d1 \n\t" \
240
"mulul %%d2, %%d4:%%d1 \n\t" \
241
"addl %%d3, %%d1 \n\t" \
242
"addxl %%d0, %%d4 \n\t" \
243
"moveq #0, %%d3 \n\t" \
244
"addl %%d1, %%a3@+ \n\t" \
245
"addxl %%d4, %%d3 \n\t"
246
247
#define MULADDC_STOP \
248
"movl %%d3, %0 \n\t" \
249
"movl %%a3, %1 \n\t" \
250
"movl %%a2, %2 \n\t" \
251
: "=m" (c), "=m" (d), "=m" (s) \
252
: "m" (s), "m" (d), "m" (c), "m" (b) \
253
: "d0", "d1", "d2", "d3", "d4", "a2", "a3" \
254
);
255
256
#define MULADDC_HUIT \
257
"movel %%a2@+, %%d1 \n\t" \
258
"mulul %%d2, %%d4:%%d1 \n\t" \
259
"addxl %%d3, %%d1 \n\t" \
260
"addxl %%d0, %%d4 \n\t" \
261
"addl %%d1, %%a3@+ \n\t" \
262
"movel %%a2@+, %%d1 \n\t" \
263
"mulul %%d2, %%d3:%%d1 \n\t" \
264
"addxl %%d4, %%d1 \n\t" \
265
"addxl %%d0, %%d3 \n\t" \
266
"addl %%d1, %%a3@+ \n\t" \
267
"movel %%a2@+, %%d1 \n\t" \
268
"mulul %%d2, %%d4:%%d1 \n\t" \
269
"addxl %%d3, %%d1 \n\t" \
270
"addxl %%d0, %%d4 \n\t" \
271
"addl %%d1, %%a3@+ \n\t" \
272
"movel %%a2@+, %%d1 \n\t" \
273
"mulul %%d2, %%d3:%%d1 \n\t" \
274
"addxl %%d4, %%d1 \n\t" \
275
"addxl %%d0, %%d3 \n\t" \
276
"addl %%d1, %%a3@+ \n\t" \
277
"movel %%a2@+, %%d1 \n\t" \
278
"mulul %%d2, %%d4:%%d1 \n\t" \
279
"addxl %%d3, %%d1 \n\t" \
280
"addxl %%d0, %%d4 \n\t" \
281
"addl %%d1, %%a3@+ \n\t" \
282
"movel %%a2@+, %%d1 \n\t" \
283
"mulul %%d2, %%d3:%%d1 \n\t" \
284
"addxl %%d4, %%d1 \n\t" \
285
"addxl %%d0, %%d3 \n\t" \
286
"addl %%d1, %%a3@+ \n\t" \
287
"movel %%a2@+, %%d1 \n\t" \
288
"mulul %%d2, %%d4:%%d1 \n\t" \
289
"addxl %%d3, %%d1 \n\t" \
290
"addxl %%d0, %%d4 \n\t" \
291
"addl %%d1, %%a3@+ \n\t" \
292
"movel %%a2@+, %%d1 \n\t" \
293
"mulul %%d2, %%d3:%%d1 \n\t" \
294
"addxl %%d4, %%d1 \n\t" \
295
"addxl %%d0, %%d3 \n\t" \
296
"addl %%d1, %%a3@+ \n\t" \
297
"addxl %%d0, %%d3 \n\t"
298
299
#endif
/* MC68000 */
300
301
#if defined(__powerpc64__) || defined(__ppc64__)
302
303
#if defined(__MACH__) && defined(__APPLE__)
304
305
#define MULADDC_INIT \
306
asm( \
307
"ld r3, %3 \n\t" \
308
"ld r4, %4 \n\t" \
309
"ld r5, %5 \n\t" \
310
"ld r6, %6 \n\t" \
311
"addi r3, r3, -8 \n\t" \
312
"addi r4, r4, -8 \n\t" \
313
"addic r5, r5, 0 \n\t"
314
315
#define MULADDC_CORE \
316
"ldu r7, 8(r3) \n\t" \
317
"mulld r8, r7, r6 \n\t" \
318
"mulhdu r9, r7, r6 \n\t" \
319
"adde r8, r8, r5 \n\t" \
320
"ld r7, 8(r4) \n\t" \
321
"addze r5, r9 \n\t" \
322
"addc r8, r8, r7 \n\t" \
323
"stdu r8, 8(r4) \n\t"
324
325
#define MULADDC_STOP \
326
"addze r5, r5 \n\t" \
327
"addi r4, r4, 8 \n\t" \
328
"addi r3, r3, 8 \n\t" \
329
"std r5, %0 \n\t" \
330
"std r4, %1 \n\t" \
331
"std r3, %2 \n\t" \
332
: "=m" (c), "=m" (d), "=m" (s) \
333
: "m" (s), "m" (d), "m" (c), "m" (b) \
334
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
335
);
336
337
338
#else
/* __MACH__ && __APPLE__ */
339
340
#define MULADDC_INIT \
341
asm( \
342
"ld %%r3, %3 \n\t" \
343
"ld %%r4, %4 \n\t" \
344
"ld %%r5, %5 \n\t" \
345
"ld %%r6, %6 \n\t" \
346
"addi %%r3, %%r3, -8 \n\t" \
347
"addi %%r4, %%r4, -8 \n\t" \
348
"addic %%r5, %%r5, 0 \n\t"
349
350
#define MULADDC_CORE \
351
"ldu %%r7, 8(%%r3) \n\t" \
352
"mulld %%r8, %%r7, %%r6 \n\t" \
353
"mulhdu %%r9, %%r7, %%r6 \n\t" \
354
"adde %%r8, %%r8, %%r5 \n\t" \
355
"ld %%r7, 8(%%r4) \n\t" \
356
"addze %%r5, %%r9 \n\t" \
357
"addc %%r8, %%r8, %%r7 \n\t" \
358
"stdu %%r8, 8(%%r4) \n\t"
359
360
#define MULADDC_STOP \
361
"addze %%r5, %%r5 \n\t" \
362
"addi %%r4, %%r4, 8 \n\t" \
363
"addi %%r3, %%r3, 8 \n\t" \
364
"std %%r5, %0 \n\t" \
365
"std %%r4, %1 \n\t" \
366
"std %%r3, %2 \n\t" \
367
: "=m" (c), "=m" (d), "=m" (s) \
368
: "m" (s), "m" (d), "m" (c), "m" (b) \
369
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
370
);
371
372
#endif
/* __MACH__ && __APPLE__ */
373
374
#elif defined(__powerpc__) || defined(__ppc__)
/* end PPC64/begin PPC32 */
375
376
#if defined(__MACH__) && defined(__APPLE__)
377
378
#define MULADDC_INIT \
379
asm( \
380
"lwz r3, %3 \n\t" \
381
"lwz r4, %4 \n\t" \
382
"lwz r5, %5 \n\t" \
383
"lwz r6, %6 \n\t" \
384
"addi r3, r3, -4 \n\t" \
385
"addi r4, r4, -4 \n\t" \
386
"addic r5, r5, 0 \n\t"
387
388
#define MULADDC_CORE \
389
"lwzu r7, 4(r3) \n\t" \
390
"mullw r8, r7, r6 \n\t" \
391
"mulhwu r9, r7, r6 \n\t" \
392
"adde r8, r8, r5 \n\t" \
393
"lwz r7, 4(r4) \n\t" \
394
"addze r5, r9 \n\t" \
395
"addc r8, r8, r7 \n\t" \
396
"stwu r8, 4(r4) \n\t"
397
398
#define MULADDC_STOP \
399
"addze r5, r5 \n\t" \
400
"addi r4, r4, 4 \n\t" \
401
"addi r3, r3, 4 \n\t" \
402
"stw r5, %0 \n\t" \
403
"stw r4, %1 \n\t" \
404
"stw r3, %2 \n\t" \
405
: "=m" (c), "=m" (d), "=m" (s) \
406
: "m" (s), "m" (d), "m" (c), "m" (b) \
407
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
408
);
409
410
#else
/* __MACH__ && __APPLE__ */
411
412
#define MULADDC_INIT \
413
asm( \
414
"lwz %%r3, %3 \n\t" \
415
"lwz %%r4, %4 \n\t" \
416
"lwz %%r5, %5 \n\t" \
417
"lwz %%r6, %6 \n\t" \
418
"addi %%r3, %%r3, -4 \n\t" \
419
"addi %%r4, %%r4, -4 \n\t" \
420
"addic %%r5, %%r5, 0 \n\t"
421
422
#define MULADDC_CORE \
423
"lwzu %%r7, 4(%%r3) \n\t" \
424
"mullw %%r8, %%r7, %%r6 \n\t" \
425
"mulhwu %%r9, %%r7, %%r6 \n\t" \
426
"adde %%r8, %%r8, %%r5 \n\t" \
427
"lwz %%r7, 4(%%r4) \n\t" \
428
"addze %%r5, %%r9 \n\t" \
429
"addc %%r8, %%r8, %%r7 \n\t" \
430
"stwu %%r8, 4(%%r4) \n\t"
431
432
#define MULADDC_STOP \
433
"addze %%r5, %%r5 \n\t" \
434
"addi %%r4, %%r4, 4 \n\t" \
435
"addi %%r3, %%r3, 4 \n\t" \
436
"stw %%r5, %0 \n\t" \
437
"stw %%r4, %1 \n\t" \
438
"stw %%r3, %2 \n\t" \
439
: "=m" (c), "=m" (d), "=m" (s) \
440
: "m" (s), "m" (d), "m" (c), "m" (b) \
441
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
442
);
443
444
#endif
/* __MACH__ && __APPLE__ */
445
446
#endif
/* PPC32 */
447
448
/*
449
* The Sparc(64) assembly is reported to be broken.
450
* Disable it for now, until we're able to fix it.
451
*/
452
#if 0 && defined(__sparc__)
453
#if defined(__sparc64__)
454
455
#define MULADDC_INIT \
456
asm( \
457
"ldx %3, %%o0 \n\t" \
458
"ldx %4, %%o1 \n\t" \
459
"ld %5, %%o2 \n\t" \
460
"ld %6, %%o3 \n\t"
461
462
#define MULADDC_CORE \
463
"ld [%%o0], %%o4 \n\t" \
464
"inc 4, %%o0 \n\t" \
465
"ld [%%o1], %%o5 \n\t" \
466
"umul %%o3, %%o4, %%o4 \n\t" \
467
"addcc %%o4, %%o2, %%o4 \n\t" \
468
"rd %%y, %%g1 \n\t" \
469
"addx %%g1, 0, %%g1 \n\t" \
470
"addcc %%o4, %%o5, %%o4 \n\t" \
471
"st %%o4, [%%o1] \n\t" \
472
"addx %%g1, 0, %%o2 \n\t" \
473
"inc 4, %%o1 \n\t"
474
475
#define MULADDC_STOP \
476
"st %%o2, %0 \n\t" \
477
"stx %%o1, %1 \n\t" \
478
"stx %%o0, %2 \n\t" \
479
: "=m" (c), "=m" (d), "=m" (s) \
480
: "m" (s), "m" (d), "m" (c), "m" (b) \
481
: "g1", "o0", "o1", "o2", "o3", "o4", \
482
"o5" \
483
);
484
485
#else
/* __sparc64__ */
486
487
#define MULADDC_INIT \
488
asm( \
489
"ld %3, %%o0 \n\t" \
490
"ld %4, %%o1 \n\t" \
491
"ld %5, %%o2 \n\t" \
492
"ld %6, %%o3 \n\t"
493
494
#define MULADDC_CORE \
495
"ld [%%o0], %%o4 \n\t" \
496
"inc 4, %%o0 \n\t" \
497
"ld [%%o1], %%o5 \n\t" \
498
"umul %%o3, %%o4, %%o4 \n\t" \
499
"addcc %%o4, %%o2, %%o4 \n\t" \
500
"rd %%y, %%g1 \n\t" \
501
"addx %%g1, 0, %%g1 \n\t" \
502
"addcc %%o4, %%o5, %%o4 \n\t" \
503
"st %%o4, [%%o1] \n\t" \
504
"addx %%g1, 0, %%o2 \n\t" \
505
"inc 4, %%o1 \n\t"
506
507
#define MULADDC_STOP \
508
"st %%o2, %0 \n\t" \
509
"st %%o1, %1 \n\t" \
510
"st %%o0, %2 \n\t" \
511
: "=m" (c), "=m" (d), "=m" (s) \
512
: "m" (s), "m" (d), "m" (c), "m" (b) \
513
: "g1", "o0", "o1", "o2", "o3", "o4", \
514
"o5" \
515
);
516
517
#endif
/* __sparc64__ */
518
#endif
/* __sparc__ */
519
520
#if defined(__microblaze__) || defined(microblaze)
521
522
#define MULADDC_INIT \
523
asm( \
524
"lwi r3, %3 \n\t" \
525
"lwi r4, %4 \n\t" \
526
"lwi r5, %5 \n\t" \
527
"lwi r6, %6 \n\t" \
528
"andi r7, r6, 0xffff \n\t" \
529
"bsrli r6, r6, 16 \n\t"
530
531
#define MULADDC_CORE \
532
"lhui r8, r3, 0 \n\t" \
533
"addi r3, r3, 2 \n\t" \
534
"lhui r9, r3, 0 \n\t" \
535
"addi r3, r3, 2 \n\t" \
536
"mul r10, r9, r6 \n\t" \
537
"mul r11, r8, r7 \n\t" \
538
"mul r12, r9, r7 \n\t" \
539
"mul r13, r8, r6 \n\t" \
540
"bsrli r8, r10, 16 \n\t" \
541
"bsrli r9, r11, 16 \n\t" \
542
"add r13, r13, r8 \n\t" \
543
"add r13, r13, r9 \n\t" \
544
"bslli r10, r10, 16 \n\t" \
545
"bslli r11, r11, 16 \n\t" \
546
"add r12, r12, r10 \n\t" \
547
"addc r13, r13, r0 \n\t" \
548
"add r12, r12, r11 \n\t" \
549
"addc r13, r13, r0 \n\t" \
550
"lwi r10, r4, 0 \n\t" \
551
"add r12, r12, r10 \n\t" \
552
"addc r13, r13, r0 \n\t" \
553
"add r12, r12, r5 \n\t" \
554
"addc r5, r13, r0 \n\t" \
555
"swi r12, r4, 0 \n\t" \
556
"addi r4, r4, 4 \n\t"
557
558
#define MULADDC_STOP \
559
"swi r5, %0 \n\t" \
560
"swi r4, %1 \n\t" \
561
"swi r3, %2 \n\t" \
562
: "=m" (c), "=m" (d), "=m" (s) \
563
: "m" (s), "m" (d), "m" (c), "m" (b) \
564
: "r3", "r4", "r5", "r6", "r7", "r8", \
565
"r9", "r10", "r11", "r12", "r13" \
566
);
567
568
#endif
/* MicroBlaze */
569
570
#if defined(__tricore__)
571
572
#define MULADDC_INIT \
573
asm( \
574
"ld.a %%a2, %3 \n\t" \
575
"ld.a %%a3, %4 \n\t" \
576
"ld.w %%d4, %5 \n\t" \
577
"ld.w %%d1, %6 \n\t" \
578
"xor %%d5, %%d5 \n\t"
579
580
#define MULADDC_CORE \
581
"ld.w %%d0, [%%a2+] \n\t" \
582
"madd.u %%e2, %%e4, %%d0, %%d1 \n\t" \
583
"ld.w %%d0, [%%a3] \n\t" \
584
"addx %%d2, %%d2, %%d0 \n\t" \
585
"addc %%d3, %%d3, 0 \n\t" \
586
"mov %%d4, %%d3 \n\t" \
587
"st.w [%%a3+], %%d2 \n\t"
588
589
#define MULADDC_STOP \
590
"st.w %0, %%d4 \n\t" \
591
"st.a %1, %%a3 \n\t" \
592
"st.a %2, %%a2 \n\t" \
593
: "=m" (c), "=m" (d), "=m" (s) \
594
: "m" (s), "m" (d), "m" (c), "m" (b) \
595
: "d0", "d1", "e2", "d4", "a2", "a3" \
596
);
597
598
#endif
/* TriCore */
599
600
/*
601
* Note, gcc -O0 by default uses r7 for the frame pointer, so it complains about
602
* our use of r7 below, unless -fomit-frame-pointer is passed.
603
*
604
* On the other hand, -fomit-frame-pointer is implied by any -Ox options with
605
* x !=0, which we can detect using __OPTIMIZE__ (which is also defined by
606
* clang and armcc5 under the same conditions).
607
*
608
* So, only use the optimized assembly below for optimized build, which avoids
609
* the build error and is pretty reasonable anyway.
610
*/
611
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
612
#define MULADDC_CANNOT_USE_R7
613
#endif
614
615
#if defined(__arm__) && !defined(MULADDC_CANNOT_USE_R7)
616
617
#if defined(__thumb__) && !defined(__thumb2__)
618
619
#define MULADDC_INIT \
620
asm( \
621
"ldr r0, %3 \n\t" \
622
"ldr r1, %4 \n\t" \
623
"ldr r2, %5 \n\t" \
624
"ldr r3, %6 \n\t" \
625
"lsr r7, r3, #16 \n\t" \
626
"mov r9, r7 \n\t" \
627
"lsl r7, r3, #16 \n\t" \
628
"lsr r7, r7, #16 \n\t" \
629
"mov r8, r7 \n\t"
630
631
#define MULADDC_CORE \
632
"ldmia r0!, {r6} \n\t" \
633
"lsr r7, r6, #16 \n\t" \
634
"lsl r6, r6, #16 \n\t" \
635
"lsr r6, r6, #16 \n\t" \
636
"mov r4, r8 \n\t" \
637
"mul r4, r6 \n\t" \
638
"mov r3, r9 \n\t" \
639
"mul r6, r3 \n\t" \
640
"mov r5, r9 \n\t" \
641
"mul r5, r7 \n\t" \
642
"mov r3, r8 \n\t" \
643
"mul r7, r3 \n\t" \
644
"lsr r3, r6, #16 \n\t" \
645
"add r5, r5, r3 \n\t" \
646
"lsr r3, r7, #16 \n\t" \
647
"add r5, r5, r3 \n\t" \
648
"add r4, r4, r2 \n\t" \
649
"mov r2, #0 \n\t" \
650
"adc r5, r2 \n\t" \
651
"lsl r3, r6, #16 \n\t" \
652
"add r4, r4, r3 \n\t" \
653
"adc r5, r2 \n\t" \
654
"lsl r3, r7, #16 \n\t" \
655
"add r4, r4, r3 \n\t" \
656
"adc r5, r2 \n\t" \
657
"ldr r3, [r1] \n\t" \
658
"add r4, r4, r3 \n\t" \
659
"adc r2, r5 \n\t" \
660
"stmia r1!, {r4} \n\t"
661
662
#define MULADDC_STOP \
663
"str r2, %0 \n\t" \
664
"str r1, %1 \n\t" \
665
"str r0, %2 \n\t" \
666
: "=m" (c), "=m" (d), "=m" (s) \
667
: "m" (s), "m" (d), "m" (c), "m" (b) \
668
: "r0", "r1", "r2", "r3", "r4", "r5", \
669
"r6", "r7", "r8", "r9", "cc" \
670
);
671
672
#elif (__ARM_ARCH >= 6) && \
673
defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)
674
675
#define MULADDC_INIT \
676
asm(
677
678
#define MULADDC_CORE \
679
"ldr r0, [%0], #4 \n\t" \
680
"ldr r1, [%1] \n\t" \
681
"umaal r1, %2, %3, r0 \n\t" \
682
"str r1, [%1], #4 \n\t"
683
684
#define MULADDC_STOP \
685
: "=r" (s), "=r" (d), "=r" (c) \
686
: "r" (b), "0" (s), "1" (d), "2" (c) \
687
: "r0", "r1", "memory" \
688
);
689
690
#else
691
692
#define MULADDC_INIT \
693
asm( \
694
"ldr r0, %3 \n\t" \
695
"ldr r1, %4 \n\t" \
696
"ldr r2, %5 \n\t" \
697
"ldr r3, %6 \n\t"
698
699
#define MULADDC_CORE \
700
"ldr r4, [r0], #4 \n\t" \
701
"mov r5, #0 \n\t" \
702
"ldr r6, [r1] \n\t" \
703
"umlal r2, r5, r3, r4 \n\t" \
704
"adds r7, r6, r2 \n\t" \
705
"adc r2, r5, #0 \n\t" \
706
"str r7, [r1], #4 \n\t"
707
708
#define MULADDC_STOP \
709
"str r2, %0 \n\t" \
710
"str r1, %1 \n\t" \
711
"str r0, %2 \n\t" \
712
: "=m" (c), "=m" (d), "=m" (s) \
713
: "m" (s), "m" (d), "m" (c), "m" (b) \
714
: "r0", "r1", "r2", "r3", "r4", "r5", \
715
"r6", "r7", "cc" \
716
);
717
718
#endif
/* Thumb */
719
720
#endif
/* ARMv3 */
721
722
#if defined(__alpha__)
723
724
#define MULADDC_INIT \
725
asm( \
726
"ldq $1, %3 \n\t" \
727
"ldq $2, %4 \n\t" \
728
"ldq $3, %5 \n\t" \
729
"ldq $4, %6 \n\t"
730
731
#define MULADDC_CORE \
732
"ldq $6, 0($1) \n\t" \
733
"addq $1, 8, $1 \n\t" \
734
"mulq $6, $4, $7 \n\t" \
735
"umulh $6, $4, $6 \n\t" \
736
"addq $7, $3, $7 \n\t" \
737
"cmpult $7, $3, $3 \n\t" \
738
"ldq $5, 0($2) \n\t" \
739
"addq $7, $5, $7 \n\t" \
740
"cmpult $7, $5, $5 \n\t" \
741
"stq $7, 0($2) \n\t" \
742
"addq $2, 8, $2 \n\t" \
743
"addq $6, $3, $3 \n\t" \
744
"addq $5, $3, $3 \n\t"
745
746
#define MULADDC_STOP \
747
"stq $3, %0 \n\t" \
748
"stq $2, %1 \n\t" \
749
"stq $1, %2 \n\t" \
750
: "=m" (c), "=m" (d), "=m" (s) \
751
: "m" (s), "m" (d), "m" (c), "m" (b) \
752
: "$1", "$2", "$3", "$4", "$5", "$6", "$7" \
753
);
754
#endif
/* Alpha */
755
756
#if defined(__mips__) && !defined(__mips64)
757
758
#define MULADDC_INIT \
759
asm( \
760
"lw $10, %3 \n\t" \
761
"lw $11, %4 \n\t" \
762
"lw $12, %5 \n\t" \
763
"lw $13, %6 \n\t"
764
765
#define MULADDC_CORE \
766
"lw $14, 0($10) \n\t" \
767
"multu $13, $14 \n\t" \
768
"addi $10, $10, 4 \n\t" \
769
"mflo $14 \n\t" \
770
"mfhi $9 \n\t" \
771
"addu $14, $12, $14 \n\t" \
772
"lw $15, 0($11) \n\t" \
773
"sltu $12, $14, $12 \n\t" \
774
"addu $15, $14, $15 \n\t" \
775
"sltu $14, $15, $14 \n\t" \
776
"addu $12, $12, $9 \n\t" \
777
"sw $15, 0($11) \n\t" \
778
"addu $12, $12, $14 \n\t" \
779
"addi $11, $11, 4 \n\t"
780
781
#define MULADDC_STOP \
782
"sw $12, %0 \n\t" \
783
"sw $11, %1 \n\t" \
784
"sw $10, %2 \n\t" \
785
: "=m" (c), "=m" (d), "=m" (s) \
786
: "m" (s), "m" (d), "m" (c), "m" (b) \
787
: "$9", "$10", "$11", "$12", "$13", "$14", "$15", "lo", "hi" \
788
);
789
790
#endif
/* MIPS */
791
#endif
/* GNUC */
792
793
#if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__)
794
795
#define MULADDC_INIT \
796
__asm mov esi, s \
797
__asm mov edi, d \
798
__asm mov ecx, c \
799
__asm mov ebx, b
800
801
#define MULADDC_CORE \
802
__asm lodsd \
803
__asm mul ebx \
804
__asm add eax, ecx \
805
__asm adc edx, 0 \
806
__asm add eax, [edi] \
807
__asm adc edx, 0 \
808
__asm mov ecx, edx \
809
__asm stosd
810
811
#if defined(MBEDTLS_HAVE_SSE2)
812
813
#define EMIT __asm _emit
814
815
#define MULADDC_HUIT \
816
EMIT 0x0F EMIT 0x6E EMIT 0xC9 \
817
EMIT 0x0F EMIT 0x6E EMIT 0xC3 \
818
EMIT 0x0F EMIT 0x6E EMIT 0x1F \
819
EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
820
EMIT 0x0F EMIT 0x6E EMIT 0x16 \
821
EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
822
EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x04 \
823
EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
824
EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x08 \
825
EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
826
EMIT 0x0F EMIT 0x6E EMIT 0x7E EMIT 0x0C \
827
EMIT 0x0F EMIT 0xF4 EMIT 0xF8 \
828
EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
829
EMIT 0x0F EMIT 0x6E EMIT 0x5F EMIT 0x04 \
830
EMIT 0x0F EMIT 0xD4 EMIT 0xDC \
831
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x08 \
832
EMIT 0x0F EMIT 0xD4 EMIT 0xEE \
833
EMIT 0x0F EMIT 0x6E EMIT 0x67 EMIT 0x0C \
834
EMIT 0x0F EMIT 0xD4 EMIT 0xFC \
835
EMIT 0x0F EMIT 0x7E EMIT 0x0F \
836
EMIT 0x0F EMIT 0x6E EMIT 0x56 EMIT 0x10 \
837
EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
838
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
839
EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x14 \
840
EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
841
EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
842
EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x18 \
843
EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
844
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x04 \
845
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
846
EMIT 0x0F EMIT 0x6E EMIT 0x5E EMIT 0x1C \
847
EMIT 0x0F EMIT 0xF4 EMIT 0xD8 \
848
EMIT 0x0F EMIT 0xD4 EMIT 0xCD \
849
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x10 \
850
EMIT 0x0F EMIT 0xD4 EMIT 0xD5 \
851
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x08 \
852
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
853
EMIT 0x0F EMIT 0xD4 EMIT 0xCF \
854
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x14 \
855
EMIT 0x0F EMIT 0xD4 EMIT 0xE5 \
856
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x0C \
857
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
858
EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
859
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x18 \
860
EMIT 0x0F EMIT 0xD4 EMIT 0xF5 \
861
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x10 \
862
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
863
EMIT 0x0F EMIT 0xD4 EMIT 0xCC \
864
EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x1C \
865
EMIT 0x0F EMIT 0xD4 EMIT 0xDD \
866
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x14 \
867
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
868
EMIT 0x0F EMIT 0xD4 EMIT 0xCE \
869
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x18 \
870
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
871
EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
872
EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x1C \
873
EMIT 0x83 EMIT 0xC7 EMIT 0x20 \
874
EMIT 0x83 EMIT 0xC6 EMIT 0x20 \
875
EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
876
EMIT 0x0F EMIT 0x7E EMIT 0xC9
877
878
#define MULADDC_STOP \
879
EMIT 0x0F EMIT 0x77 \
880
__asm mov c, ecx \
881
__asm mov d, edi \
882
__asm mov s, esi \
883
884
#else
885
886
#define MULADDC_STOP \
887
__asm mov c, ecx \
888
__asm mov d, edi \
889
__asm mov s, esi \
890
891
#endif
/* SSE2 */
892
#endif
/* MSVC */
893
894
#endif
/* MBEDTLS_HAVE_ASM */
895
896
#if !defined(MULADDC_CORE)
897
#if defined(MBEDTLS_HAVE_UDBL)
898
899
#define MULADDC_INIT \
900
{ \
901
mbedtls_t_udbl r; \
902
mbedtls_mpi_uint r0, r1;
903
904
#define MULADDC_CORE \
905
r = *(s++) * (mbedtls_t_udbl) b; \
906
r0 = (mbedtls_mpi_uint) r; \
907
r1 = (mbedtls_mpi_uint)( r >> biL ); \
908
r0 += c; r1 += (r0 < c); \
909
r0 += *d; r1 += (r0 < *d); \
910
c = r1; *(d++) = r0;
911
912
#define MULADDC_STOP \
913
}
914
915
#else
916
#define MULADDC_INIT \
917
{ \
918
mbedtls_mpi_uint s0, s1, b0, b1; \
919
mbedtls_mpi_uint r0, r1, rx, ry; \
920
b0 = ( b << biH ) >> biH; \
921
b1 = ( b >> biH );
922
923
#define MULADDC_CORE \
924
s0 = ( *s << biH ) >> biH; \
925
s1 = ( *s >> biH ); s++; \
926
rx = s0 * b1; r0 = s0 * b0; \
927
ry = s1 * b0; r1 = s1 * b1; \
928
r1 += ( rx >> biH ); \
929
r1 += ( ry >> biH ); \
930
rx <<= biH; ry <<= biH; \
931
r0 += rx; r1 += (r0 < rx); \
932
r0 += ry; r1 += (r0 < ry); \
933
r0 += c; r1 += (r0 < c); \
934
r0 += *d; r1 += (r0 < *d); \
935
c = r1; *(d++) = r0;
936
937
#define MULADDC_STOP \
938
}
939
940
#endif
/* C (generic) */
941
#endif
/* C (longlong) */
942
943
#endif
/* bn_mul.h */
bignum.h
Multi-precision integer library.
config.h
Configuration options (set of defines)
Generated on Thu Aug 20 2020 00:00:00 for mbed TLS v2.16.7 by
1.8.18