00001
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041 #ifndef POLARSSL_BN_MUL_H
00042 #define POLARSSL_BN_MUL_H
00043
00044 #include "bignum.h"
00045
00046 #if defined(POLARSSL_HAVE_ASM)
00047
00048 #if defined(__GNUC__)
00049 #if defined(__i386__)
00050
00051 #define MULADDC_INIT \
00052 asm( " \
00053 movl %%ebx, %0; \
00054 movl %5, %%esi; \
00055 movl %6, %%edi; \
00056 movl %7, %%ecx; \
00057 movl %8, %%ebx; \
00058 "
00059
00060 #define MULADDC_CORE \
00061 " \
00062 lodsl; \
00063 mull %%ebx; \
00064 addl %%ecx, %%eax; \
00065 adcl $0, %%edx; \
00066 addl (%%edi), %%eax; \
00067 adcl $0, %%edx; \
00068 movl %%edx, %%ecx; \
00069 stosl; \
00070 "
00071
00072 #if defined(POLARSSL_HAVE_SSE2)
00073
00074 #define MULADDC_HUIT \
00075 " \
00076 movd %%ecx, %%mm1; \
00077 movd %%ebx, %%mm0; \
00078 movd (%%edi), %%mm3; \
00079 paddq %%mm3, %%mm1; \
00080 movd (%%esi), %%mm2; \
00081 pmuludq %%mm0, %%mm2; \
00082 movd 4(%%esi), %%mm4; \
00083 pmuludq %%mm0, %%mm4; \
00084 movd 8(%%esi), %%mm6; \
00085 pmuludq %%mm0, %%mm6; \
00086 movd 12(%%esi), %%mm7; \
00087 pmuludq %%mm0, %%mm7; \
00088 paddq %%mm2, %%mm1; \
00089 movd 4(%%edi), %%mm3; \
00090 paddq %%mm4, %%mm3; \
00091 movd 8(%%edi), %%mm5; \
00092 paddq %%mm6, %%mm5; \
00093 movd 12(%%edi), %%mm4; \
00094 paddq %%mm4, %%mm7; \
00095 movd %%mm1, (%%edi); \
00096 movd 16(%%esi), %%mm2; \
00097 pmuludq %%mm0, %%mm2; \
00098 psrlq $32, %%mm1; \
00099 movd 20(%%esi), %%mm4; \
00100 pmuludq %%mm0, %%mm4; \
00101 paddq %%mm3, %%mm1; \
00102 movd 24(%%esi), %%mm6; \
00103 pmuludq %%mm0, %%mm6; \
00104 movd %%mm1, 4(%%edi); \
00105 psrlq $32, %%mm1; \
00106 movd 28(%%esi), %%mm3; \
00107 pmuludq %%mm0, %%mm3; \
00108 paddq %%mm5, %%mm1; \
00109 movd 16(%%edi), %%mm5; \
00110 paddq %%mm5, %%mm2; \
00111 movd %%mm1, 8(%%edi); \
00112 psrlq $32, %%mm1; \
00113 paddq %%mm7, %%mm1; \
00114 movd 20(%%edi), %%mm5; \
00115 paddq %%mm5, %%mm4; \
00116 movd %%mm1, 12(%%edi); \
00117 psrlq $32, %%mm1; \
00118 paddq %%mm2, %%mm1; \
00119 movd 24(%%edi), %%mm5; \
00120 paddq %%mm5, %%mm6; \
00121 movd %%mm1, 16(%%edi); \
00122 psrlq $32, %%mm1; \
00123 paddq %%mm4, %%mm1; \
00124 movd 28(%%edi), %%mm5; \
00125 paddq %%mm5, %%mm3; \
00126 movd %%mm1, 20(%%edi); \
00127 psrlq $32, %%mm1; \
00128 paddq %%mm6, %%mm1; \
00129 movd %%mm1, 24(%%edi); \
00130 psrlq $32, %%mm1; \
00131 paddq %%mm3, %%mm1; \
00132 movd %%mm1, 28(%%edi); \
00133 addl $32, %%edi; \
00134 addl $32, %%esi; \
00135 psrlq $32, %%mm1; \
00136 movd %%mm1, %%ecx; \
00137 "
00138
00139 #define MULADDC_STOP \
00140 " \
00141 emms; \
00142 movl %4, %%ebx; \
00143 movl %%ecx, %1; \
00144 movl %%edi, %2; \
00145 movl %%esi, %3; \
00146 " \
00147 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
00148 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
00149 : "eax", "ecx", "edx", "esi", "edi" \
00150 );
00151
00152 #else
00153
00154 #define MULADDC_STOP \
00155 " \
00156 movl %4, %%ebx; \
00157 movl %%ecx, %1; \
00158 movl %%edi, %2; \
00159 movl %%esi, %3; \
00160 " \
00161 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
00162 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
00163 : "eax", "ecx", "edx", "esi", "edi" \
00164 );
00165 #endif
00166 #endif
00167
00168 #if defined(__amd64__) || defined (__x86_64__)
00169
00170 #define MULADDC_INIT \
00171 asm( "movq %0, %%rsi " :: "m" (s)); \
00172 asm( "movq %0, %%rdi " :: "m" (d)); \
00173 asm( "movq %0, %%rcx " :: "m" (c)); \
00174 asm( "movq %0, %%rbx " :: "m" (b)); \
00175 asm( "xorq %r8, %r8 " );
00176
00177 #define MULADDC_CORE \
00178 asm( "movq (%rsi),%rax " ); \
00179 asm( "mulq %rbx " ); \
00180 asm( "addq $8, %rsi " ); \
00181 asm( "addq %rcx, %rax " ); \
00182 asm( "movq %r8, %rcx " ); \
00183 asm( "adcq $0, %rdx " ); \
00184 asm( "nop " ); \
00185 asm( "addq %rax, (%rdi) " ); \
00186 asm( "adcq %rdx, %rcx " ); \
00187 asm( "addq $8, %rdi " );
00188
00189 #define MULADDC_STOP \
00190 asm( "movq %%rcx, %0 " : "=m" (c)); \
00191 asm( "movq %%rdi, %0 " : "=m" (d)); \
00192 asm( "movq %%rsi, %0 " : "=m" (s) :: \
00193 "rax", "rcx", "rdx", "rbx", "rsi", "rdi", "r8" );
00194
00195 #endif
00196
00197 #if defined(__mc68020__) || defined(__mcpu32__)
00198
00199 #define MULADDC_INIT \
00200 asm( "movl %0, %%a2 " :: "m" (s)); \
00201 asm( "movl %0, %%a3 " :: "m" (d)); \
00202 asm( "movl %0, %%d3 " :: "m" (c)); \
00203 asm( "movl %0, %%d2 " :: "m" (b)); \
00204 asm( "moveq #0, %d0 " );
00205
00206 #define MULADDC_CORE \
00207 asm( "movel %a2@+, %d1 " ); \
00208 asm( "mulul %d2, %d4:%d1 " ); \
00209 asm( "addl %d3, %d1 " ); \
00210 asm( "addxl %d0, %d4 " ); \
00211 asm( "moveq #0, %d3 " ); \
00212 asm( "addl %d1, %a3@+ " ); \
00213 asm( "addxl %d4, %d3 " );
00214
00215 #define MULADDC_STOP \
00216 asm( "movl %%d3, %0 " : "=m" (c)); \
00217 asm( "movl %%a3, %0 " : "=m" (d)); \
00218 asm( "movl %%a2, %0 " : "=m" (s) :: \
00219 "d0", "d1", "d2", "d3", "d4", "a2", "a3" );
00220
00221 #define MULADDC_HUIT \
00222 asm( "movel %a2@+, %d1 " ); \
00223 asm( "mulul %d2, %d4:%d1 " ); \
00224 asm( "addxl %d3, %d1 " ); \
00225 asm( "addxl %d0, %d4 " ); \
00226 asm( "addl %d1, %a3@+ " ); \
00227 asm( "movel %a2@+, %d1 " ); \
00228 asm( "mulul %d2, %d3:%d1 " ); \
00229 asm( "addxl %d4, %d1 " ); \
00230 asm( "addxl %d0, %d3 " ); \
00231 asm( "addl %d1, %a3@+ " ); \
00232 asm( "movel %a2@+, %d1 " ); \
00233 asm( "mulul %d2, %d4:%d1 " ); \
00234 asm( "addxl %d3, %d1 " ); \
00235 asm( "addxl %d0, %d4 " ); \
00236 asm( "addl %d1, %a3@+ " ); \
00237 asm( "movel %a2@+, %d1 " ); \
00238 asm( "mulul %d2, %d3:%d1 " ); \
00239 asm( "addxl %d4, %d1 " ); \
00240 asm( "addxl %d0, %d3 " ); \
00241 asm( "addl %d1, %a3@+ " ); \
00242 asm( "movel %a2@+, %d1 " ); \
00243 asm( "mulul %d2, %d4:%d1 " ); \
00244 asm( "addxl %d3, %d1 " ); \
00245 asm( "addxl %d0, %d4 " ); \
00246 asm( "addl %d1, %a3@+ " ); \
00247 asm( "movel %a2@+, %d1 " ); \
00248 asm( "mulul %d2, %d3:%d1 " ); \
00249 asm( "addxl %d4, %d1 " ); \
00250 asm( "addxl %d0, %d3 " ); \
00251 asm( "addl %d1, %a3@+ " ); \
00252 asm( "movel %a2@+, %d1 " ); \
00253 asm( "mulul %d2, %d4:%d1 " ); \
00254 asm( "addxl %d3, %d1 " ); \
00255 asm( "addxl %d0, %d4 " ); \
00256 asm( "addl %d1, %a3@+ " ); \
00257 asm( "movel %a2@+, %d1 " ); \
00258 asm( "mulul %d2, %d3:%d1 " ); \
00259 asm( "addxl %d4, %d1 " ); \
00260 asm( "addxl %d0, %d3 " ); \
00261 asm( "addl %d1, %a3@+ " ); \
00262 asm( "addxl %d0, %d3 " );
00263
00264 #endif
00265
00266 #if defined(__powerpc__) || defined(__ppc__)
00267 #if defined(__powerpc64__) || defined(__ppc64__)
00268
00269 #if defined(__MACH__) && defined(__APPLE__)
00270
00271 #define MULADDC_INIT \
00272 asm( "ld r3, %0 " :: "m" (s)); \
00273 asm( "ld r4, %0 " :: "m" (d)); \
00274 asm( "ld r5, %0 " :: "m" (c)); \
00275 asm( "ld r6, %0 " :: "m" (b)); \
00276 asm( "addi r3, r3, -8 " ); \
00277 asm( "addi r4, r4, -8 " ); \
00278 asm( "addic r5, r5, 0 " );
00279
00280 #define MULADDC_CORE \
00281 asm( "ldu r7, 8(r3) " ); \
00282 asm( "mulld r8, r7, r6 " ); \
00283 asm( "mulhdu r9, r7, r6 " ); \
00284 asm( "adde r8, r8, r5 " ); \
00285 asm( "ld r7, 8(r4) " ); \
00286 asm( "addze r5, r9 " ); \
00287 asm( "addc r8, r8, r7 " ); \
00288 asm( "stdu r8, 8(r4) " );
00289
00290 #define MULADDC_STOP \
00291 asm( "addze r5, r5 " ); \
00292 asm( "addi r4, r4, 8 " ); \
00293 asm( "addi r3, r3, 8 " ); \
00294 asm( "std r5, %0 " : "=m" (c)); \
00295 asm( "std r4, %0 " : "=m" (d)); \
00296 asm( "std r3, %0 " : "=m" (s) :: \
00297 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00298
00299 #else
00300
00301 #define MULADDC_INIT \
00302 asm( "ld %%r3, %0 " :: "m" (s)); \
00303 asm( "ld %%r4, %0 " :: "m" (d)); \
00304 asm( "ld %%r5, %0 " :: "m" (c)); \
00305 asm( "ld %%r6, %0 " :: "m" (b)); \
00306 asm( "addi %r3, %r3, -8 " ); \
00307 asm( "addi %r4, %r4, -8 " ); \
00308 asm( "addic %r5, %r5, 0 " );
00309
00310 #define MULADDC_CORE \
00311 asm( "ldu %r7, 8(%r3) " ); \
00312 asm( "mulld %r8, %r7, %r6 " ); \
00313 asm( "mulhdu %r9, %r7, %r6 " ); \
00314 asm( "adde %r8, %r8, %r5 " ); \
00315 asm( "ld %r7, 8(%r4) " ); \
00316 asm( "addze %r5, %r9 " ); \
00317 asm( "addc %r8, %r8, %r7 " ); \
00318 asm( "stdu %r8, 8(%r4) " );
00319
00320 #define MULADDC_STOP \
00321 asm( "addze %r5, %r5 " ); \
00322 asm( "addi %r4, %r4, 8 " ); \
00323 asm( "addi %r3, %r3, 8 " ); \
00324 asm( "std %%r5, %0 " : "=m" (c)); \
00325 asm( "std %%r4, %0 " : "=m" (d)); \
00326 asm( "std %%r3, %0 " : "=m" (s) :: \
00327 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00328
00329 #endif
00330
00331 #else
00332
00333 #if defined(__MACH__) && defined(__APPLE__)
00334
00335 #define MULADDC_INIT \
00336 asm( "lwz r3, %0 " :: "m" (s)); \
00337 asm( "lwz r4, %0 " :: "m" (d)); \
00338 asm( "lwz r5, %0 " :: "m" (c)); \
00339 asm( "lwz r6, %0 " :: "m" (b)); \
00340 asm( "addi r3, r3, -4 " ); \
00341 asm( "addi r4, r4, -4 " ); \
00342 asm( "addic r5, r5, 0 " );
00343
00344 #define MULADDC_CORE \
00345 asm( "lwzu r7, 4(r3) " ); \
00346 asm( "mullw r8, r7, r6 " ); \
00347 asm( "mulhwu r9, r7, r6 " ); \
00348 asm( "adde r8, r8, r5 " ); \
00349 asm( "lwz r7, 4(r4) " ); \
00350 asm( "addze r5, r9 " ); \
00351 asm( "addc r8, r8, r7 " ); \
00352 asm( "stwu r8, 4(r4) " );
00353
00354 #define MULADDC_STOP \
00355 asm( "addze r5, r5 " ); \
00356 asm( "addi r4, r4, 4 " ); \
00357 asm( "addi r3, r3, 4 " ); \
00358 asm( "stw r5, %0 " : "=m" (c)); \
00359 asm( "stw r4, %0 " : "=m" (d)); \
00360 asm( "stw r3, %0 " : "=m" (s) :: \
00361 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00362
00363 #else
00364
00365 #define MULADDC_INIT \
00366 asm( "lwz %%r3, %0 " :: "m" (s)); \
00367 asm( "lwz %%r4, %0 " :: "m" (d)); \
00368 asm( "lwz %%r5, %0 " :: "m" (c)); \
00369 asm( "lwz %%r6, %0 " :: "m" (b)); \
00370 asm( "addi %r3, %r3, -4 " ); \
00371 asm( "addi %r4, %r4, -4 " ); \
00372 asm( "addic %r5, %r5, 0 " );
00373
00374 #define MULADDC_CORE \
00375 asm( "lwzu %r7, 4(%r3) " ); \
00376 asm( "mullw %r8, %r7, %r6 " ); \
00377 asm( "mulhwu %r9, %r7, %r6 " ); \
00378 asm( "adde %r8, %r8, %r5 " ); \
00379 asm( "lwz %r7, 4(%r4) " ); \
00380 asm( "addze %r5, %r9 " ); \
00381 asm( "addc %r8, %r8, %r7 " ); \
00382 asm( "stwu %r8, 4(%r4) " );
00383
00384 #define MULADDC_STOP \
00385 asm( "addze %r5, %r5 " ); \
00386 asm( "addi %r4, %r4, 4 " ); \
00387 asm( "addi %r3, %r3, 4 " ); \
00388 asm( "stw %%r5, %0 " : "=m" (c)); \
00389 asm( "stw %%r4, %0 " : "=m" (d)); \
00390 asm( "stw %%r3, %0 " : "=m" (s) :: \
00391 "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
00392
00393 #endif
00394
00395 #endif
00396 #endif
00397
00398 #if defined(__sparc__) && defined(__sparc64__)
00399
00400 #define MULADDC_INIT \
00401 asm( \
00402 " \
00403 ldx %3, %%o0; \
00404 ldx %4, %%o1; \
00405 ld %5, %%o2; \
00406 ld %6, %%o3; \
00407 "
00408
00409 #define MULADDC_CORE \
00410 " \
00411 ld [%%o0], %%o4; \
00412 inc 4, %%o0; \
00413 ld [%%o1], %%o5; \
00414 umul %%o3, %%o4, %%o4; \
00415 addcc %%o4, %%o2, %%o4; \
00416 rd %%y, %%g1; \
00417 addx %%g1, 0, %%g1; \
00418 addcc %%o4, %%o5, %%o4; \
00419 st %%o4, [%%o1]; \
00420 addx %%g1, 0, %%o2; \
00421 inc 4, %%o1; \
00422 "
00423
00424 #define MULADDC_STOP \
00425 " \
00426 st %%o2, %0; \
00427 stx %%o1, %1; \
00428 stx %%o0, %2; \
00429 " \
00430 : "=m" (c), "=m" (d), "=m" (s) \
00431 : "m" (s), "m" (d), "m" (c), "m" (b) \
00432 : "g1", "o0", "o1", "o2", "o3", "o4", \
00433 "o5" \
00434 );
00435 #endif
00436
00437 #if defined(__sparc__) && !defined(__sparc64__)
00438
00439 #define MULADDC_INIT \
00440 asm( \
00441 " \
00442 ld %3, %%o0; \
00443 ld %4, %%o1; \
00444 ld %5, %%o2; \
00445 ld %6, %%o3; \
00446 "
00447
00448 #define MULADDC_CORE \
00449 " \
00450 ld [%%o0], %%o4; \
00451 inc 4, %%o0; \
00452 ld [%%o1], %%o5; \
00453 umul %%o3, %%o4, %%o4; \
00454 addcc %%o4, %%o2, %%o4; \
00455 rd %%y, %%g1; \
00456 addx %%g1, 0, %%g1; \
00457 addcc %%o4, %%o5, %%o4; \
00458 st %%o4, [%%o1]; \
00459 addx %%g1, 0, %%o2; \
00460 inc 4, %%o1; \
00461 "
00462
00463 #define MULADDC_STOP \
00464 " \
00465 st %%o2, %0; \
00466 st %%o1, %1; \
00467 st %%o0, %2; \
00468 " \
00469 : "=m" (c), "=m" (d), "=m" (s) \
00470 : "m" (s), "m" (d), "m" (c), "m" (b) \
00471 : "g1", "o0", "o1", "o2", "o3", "o4", \
00472 "o5" \
00473 );
00474
00475 #endif
00476
00477 #if defined(__microblaze__) || defined(microblaze)
00478
00479 #define MULADDC_INIT \
00480 asm( "lwi r3, %0 " :: "m" (s)); \
00481 asm( "lwi r4, %0 " :: "m" (d)); \
00482 asm( "lwi r5, %0 " :: "m" (c)); \
00483 asm( "lwi r6, %0 " :: "m" (b)); \
00484 asm( "andi r7, r6, 0xffff" ); \
00485 asm( "bsrli r6, r6, 16 " );
00486
00487 #define MULADDC_CORE \
00488 asm( "lhui r8, r3, 0 " ); \
00489 asm( "addi r3, r3, 2 " ); \
00490 asm( "lhui r9, r3, 0 " ); \
00491 asm( "addi r3, r3, 2 " ); \
00492 asm( "mul r10, r9, r6 " ); \
00493 asm( "mul r11, r8, r7 " ); \
00494 asm( "mul r12, r9, r7 " ); \
00495 asm( "mul r13, r8, r6 " ); \
00496 asm( "bsrli r8, r10, 16 " ); \
00497 asm( "bsrli r9, r11, 16 " ); \
00498 asm( "add r13, r13, r8 " ); \
00499 asm( "add r13, r13, r9 " ); \
00500 asm( "bslli r10, r10, 16 " ); \
00501 asm( "bslli r11, r11, 16 " ); \
00502 asm( "add r12, r12, r10 " ); \
00503 asm( "addc r13, r13, r0 " ); \
00504 asm( "add r12, r12, r11 " ); \
00505 asm( "addc r13, r13, r0 " ); \
00506 asm( "lwi r10, r4, 0 " ); \
00507 asm( "add r12, r12, r10 " ); \
00508 asm( "addc r13, r13, r0 " ); \
00509 asm( "add r12, r12, r5 " ); \
00510 asm( "addc r5, r13, r0 " ); \
00511 asm( "swi r12, r4, 0 " ); \
00512 asm( "addi r4, r4, 4 " );
00513
00514 #define MULADDC_STOP \
00515 asm( "swi r5, %0 " : "=m" (c)); \
00516 asm( "swi r4, %0 " : "=m" (d)); \
00517 asm( "swi r3, %0 " : "=m" (s) :: \
00518 "r3", "r4" , "r5" , "r6" , "r7" , "r8" , \
00519 "r9", "r10", "r11", "r12", "r13" );
00520
00521 #endif
00522
00523 #if defined(__tricore__)
00524
00525 #define MULADDC_INIT \
00526 asm( "ld.a %%a2, %0 " :: "m" (s)); \
00527 asm( "ld.a %%a3, %0 " :: "m" (d)); \
00528 asm( "ld.w %%d4, %0 " :: "m" (c)); \
00529 asm( "ld.w %%d1, %0 " :: "m" (b)); \
00530 asm( "xor %d5, %d5 " );
00531
00532 #define MULADDC_CORE \
00533 asm( "ld.w %d0, [%a2+] " ); \
00534 asm( "madd.u %e2, %e4, %d0, %d1 " ); \
00535 asm( "ld.w %d0, [%a3] " ); \
00536 asm( "addx %d2, %d2, %d0 " ); \
00537 asm( "addc %d3, %d3, 0 " ); \
00538 asm( "mov %d4, %d3 " ); \
00539 asm( "st.w [%a3+], %d2 " );
00540
00541 #define MULADDC_STOP \
00542 asm( "st.w %0, %%d4 " : "=m" (c)); \
00543 asm( "st.a %0, %%a3 " : "=m" (d)); \
00544 asm( "st.a %0, %%a2 " : "=m" (s) :: \
00545 "d0", "d1", "e2", "d4", "a2", "a3" );
00546
00547 #endif
00548
00549 #if defined(__arm__)
00550
00551 #if defined(__thumb__) && !defined(__thumb2__)
00552
00553 #define MULADDC_INIT \
00554 asm( \
00555 " \
00556 ldr r0, %3; \
00557 ldr r1, %4; \
00558 ldr r2, %5; \
00559 ldr r3, %6; \
00560 lsr r7, r3, #16; \
00561 mov r9, r7; \
00562 lsl r7, r3, #16; \
00563 lsr r7, r7, #16; \
00564 mov r8, r7; \
00565 "
00566
00567 #define MULADDC_CORE \
00568 " \
00569 ldmia r0!, {r6}; \
00570 lsr r7, r6, #16; \
00571 lsl r6, r6, #16; \
00572 lsr r6, r6, #16; \
00573 mov r4, r8; \
00574 mul r4, r6; \
00575 mov r3, r9; \
00576 mul r6, r3; \
00577 mov r5, r9; \
00578 mul r5, r7; \
00579 mov r3, r8; \
00580 mul r7, r3; \
00581 lsr r3, r6, #16; \
00582 add r5, r5, r3; \
00583 lsr r3, r7, #16; \
00584 add r5, r5, r3; \
00585 add r4, r4, r2; \
00586 mov r2, #0; \
00587 adc r5, r2; \
00588 lsl r3, r6, #16; \
00589 add r4, r4, r3; \
00590 adc r5, r2; \
00591 lsl r3, r7, #16; \
00592 add r4, r4, r3; \
00593 adc r5, r2; \
00594 ldr r3, [r1]; \
00595 add r4, r4, r3; \
00596 adc r2, r5; \
00597 stmia r1!, {r4}; \
00598 "
00599
00600 #define MULADDC_STOP \
00601 " \
00602 str r2, %0; \
00603 str r1, %1; \
00604 str r0, %2; \
00605 " \
00606 : "=m" (c), "=m" (d), "=m" (s) \
00607 : "m" (s), "m" (d), "m" (c), "m" (b) \
00608 : "r0", "r1", "r2", "r3", "r4", "r5", \
00609 "r6", "r7", "r8", "r9", "cc" \
00610 );
00611
00612 #else
00613
00614 #define MULADDC_INIT \
00615 asm( \
00616 " \
00617 ldr r0, %3; \
00618 ldr r1, %4; \
00619 ldr r2, %5; \
00620 ldr r3, %6; \
00621 "
00622
00623 #define MULADDC_CORE \
00624 " \
00625 ldr r4, [r0], #4; \
00626 mov r5, #0; \
00627 ldr r6, [r1]; \
00628 umlal r2, r5, r3, r4; \
00629 adds r7, r6, r2; \
00630 adc r2, r5, #0; \
00631 str r7, [r1], #4; \
00632 "
00633
00634 #define MULADDC_STOP \
00635 " \
00636 str r2, %0; \
00637 str r1, %1; \
00638 str r0, %2; \
00639 " \
00640 : "=m" (c), "=m" (d), "=m" (s) \
00641 : "m" (s), "m" (d), "m" (c), "m" (b) \
00642 : "r0", "r1", "r2", "r3", "r4", "r5", \
00643 "r6", "r7", "cc" \
00644 );
00645
00646 #endif
00647
00648 #endif
00649
00650 #if defined(__alpha__)
00651
00652 #define MULADDC_INIT \
00653 asm( "ldq $1, %0 " :: "m" (s)); \
00654 asm( "ldq $2, %0 " :: "m" (d)); \
00655 asm( "ldq $3, %0 " :: "m" (c)); \
00656 asm( "ldq $4, %0 " :: "m" (b));
00657
00658 #define MULADDC_CORE \
00659 asm( "ldq $6, 0($1) " ); \
00660 asm( "addq $1, 8, $1 " ); \
00661 asm( "mulq $6, $4, $7 " ); \
00662 asm( "umulh $6, $4, $6 " ); \
00663 asm( "addq $7, $3, $7 " ); \
00664 asm( "cmpult $7, $3, $3 " ); \
00665 asm( "ldq $5, 0($2) " ); \
00666 asm( "addq $7, $5, $7 " ); \
00667 asm( "cmpult $7, $5, $5 " ); \
00668 asm( "stq $7, 0($2) " ); \
00669 asm( "addq $2, 8, $2 " ); \
00670 asm( "addq $6, $3, $3 " ); \
00671 asm( "addq $5, $3, $3 " );
00672
00673 #define MULADDC_STOP \
00674 asm( "stq $3, %0 " : "=m" (c)); \
00675 asm( "stq $2, %0 " : "=m" (d)); \
00676 asm( "stq $1, %0 " : "=m" (s) :: \
00677 "$1", "$2", "$3", "$4", "$5", "$6", "$7" );
00678
00679 #endif
00680
00681 #if defined(__mips__)
00682
00683 #define MULADDC_INIT \
00684 asm( "lw $10, %0 " :: "m" (s)); \
00685 asm( "lw $11, %0 " :: "m" (d)); \
00686 asm( "lw $12, %0 " :: "m" (c)); \
00687 asm( "lw $13, %0 " :: "m" (b));
00688
00689 #define MULADDC_CORE \
00690 asm( "lw $14, 0($10) " ); \
00691 asm( "multu $13, $14 " ); \
00692 asm( "addi $10, $10, 4 " ); \
00693 asm( "mflo $14 " ); \
00694 asm( "mfhi $9 " ); \
00695 asm( "addu $14, $12, $14 " ); \
00696 asm( "lw $15, 0($11) " ); \
00697 asm( "sltu $12, $14, $12 " ); \
00698 asm( "addu $15, $14, $15 " ); \
00699 asm( "sltu $14, $15, $14 " ); \
00700 asm( "addu $12, $12, $9 " ); \
00701 asm( "sw $15, 0($11) " ); \
00702 asm( "addu $12, $12, $14 " ); \
00703 asm( "addi $11, $11, 4 " );
00704
00705 #define MULADDC_STOP \
00706 asm( "sw $12, %0 " : "=m" (c)); \
00707 asm( "sw $11, %0 " : "=m" (d)); \
00708 asm( "sw $10, %0 " : "=m" (s) :: \
00709 "$9", "$10", "$11", "$12", "$13", "$14", "$15" );
00710
00711 #endif
00712 #endif
00713
00714 #if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__)
00715
00716 #define MULADDC_INIT \
00717 __asm mov esi, s \
00718 __asm mov edi, d \
00719 __asm mov ecx, c \
00720 __asm mov ebx, b
00721
00722 #define MULADDC_CORE \
00723 __asm lodsd \
00724 __asm mul ebx \
00725 __asm add eax, ecx \
00726 __asm adc edx, 0 \
00727 __asm add eax, [edi] \
00728 __asm adc edx, 0 \
00729 __asm mov ecx, edx \
00730 __asm stosd
00731
00732 #if defined(POLARSSL_HAVE_SSE2)
00733
00734 #define EMIT __asm _emit
00735
00736 #define MULADDC_HUIT \
00737 EMIT 0x0F EMIT 0x6E EMIT 0xC9 \
00738 EMIT 0x0F EMIT 0x6E EMIT 0xC3 \
00739 EMIT 0x0F EMIT 0x6E EMIT 0x1F \
00740 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
00741 EMIT 0x0F EMIT 0x6E EMIT 0x16 \
00742 EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
00743 EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x04 \
00744 EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
00745 EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x08 \
00746 EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
00747 EMIT 0x0F EMIT 0x6E EMIT 0x7E EMIT 0x0C \
00748 EMIT 0x0F EMIT 0xF4 EMIT 0xF8 \
00749 EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
00750 EMIT 0x0F EMIT 0x6E EMIT 0x5F EMIT 0x04 \
00751 EMIT 0x0F EMIT 0xD4 EMIT 0xDC \
00752 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x08 \
00753 EMIT 0x0F EMIT 0xD4 EMIT 0xEE \
00754 EMIT 0x0F EMIT 0x6E EMIT 0x67 EMIT 0x0C \
00755 EMIT 0x0F EMIT 0xD4 EMIT 0xFC \
00756 EMIT 0x0F EMIT 0x7E EMIT 0x0F \
00757 EMIT 0x0F EMIT 0x6E EMIT 0x56 EMIT 0x10 \
00758 EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
00759 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00760 EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x14 \
00761 EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
00762 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
00763 EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x18 \
00764 EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
00765 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x04 \
00766 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00767 EMIT 0x0F EMIT 0x6E EMIT 0x5E EMIT 0x1C \
00768 EMIT 0x0F EMIT 0xF4 EMIT 0xD8 \
00769 EMIT 0x0F EMIT 0xD4 EMIT 0xCD \
00770 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x10 \
00771 EMIT 0x0F EMIT 0xD4 EMIT 0xD5 \
00772 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x08 \
00773 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00774 EMIT 0x0F EMIT 0xD4 EMIT 0xCF \
00775 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x14 \
00776 EMIT 0x0F EMIT 0xD4 EMIT 0xE5 \
00777 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x0C \
00778 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00779 EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
00780 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x18 \
00781 EMIT 0x0F EMIT 0xD4 EMIT 0xF5 \
00782 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x10 \
00783 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00784 EMIT 0x0F EMIT 0xD4 EMIT 0xCC \
00785 EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x1C \
00786 EMIT 0x0F EMIT 0xD4 EMIT 0xDD \
00787 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x14 \
00788 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00789 EMIT 0x0F EMIT 0xD4 EMIT 0xCE \
00790 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x18 \
00791 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00792 EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
00793 EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x1C \
00794 EMIT 0x83 EMIT 0xC7 EMIT 0x20 \
00795 EMIT 0x83 EMIT 0xC6 EMIT 0x20 \
00796 EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
00797 EMIT 0x0F EMIT 0x7E EMIT 0xC9
00798
00799 #define MULADDC_STOP \
00800 EMIT 0x0F EMIT 0x77 \
00801 __asm mov c, ecx \
00802 __asm mov d, edi \
00803 __asm mov s, esi \
00804
00805 #else
00806
00807 #define MULADDC_STOP \
00808 __asm mov c, ecx \
00809 __asm mov d, edi \
00810 __asm mov s, esi \
00811
00812 #endif
00813 #endif
00814
00815 #endif
00816
00817 #if !defined(MULADDC_CORE)
00818 #if defined(POLARSSL_HAVE_UDBL)
00819
00820 #define MULADDC_INIT \
00821 { \
00822 t_udbl r; \
00823 t_uint r0, r1;
00824
00825 #define MULADDC_CORE \
00826 r = *(s++) * (t_udbl) b; \
00827 r0 = r; \
00828 r1 = r >> biL; \
00829 r0 += c; r1 += (r0 < c); \
00830 r0 += *d; r1 += (r0 < *d); \
00831 c = r1; *(d++) = r0;
00832
00833 #define MULADDC_STOP \
00834 }
00835
00836 #else
00837 #define MULADDC_INIT \
00838 { \
00839 t_uint s0, s1, b0, b1; \
00840 t_uint r0, r1, rx, ry; \
00841 b0 = ( b << biH ) >> biH; \
00842 b1 = ( b >> biH );
00843
00844 #define MULADDC_CORE \
00845 s0 = ( *s << biH ) >> biH; \
00846 s1 = ( *s >> biH ); s++; \
00847 rx = s0 * b1; r0 = s0 * b0; \
00848 ry = s1 * b0; r1 = s1 * b1; \
00849 r1 += ( rx >> biH ); \
00850 r1 += ( ry >> biH ); \
00851 rx <<= biH; ry <<= biH; \
00852 r0 += rx; r1 += (r0 < rx); \
00853 r0 += ry; r1 += (r0 < ry); \
00854 r0 += c; r1 += (r0 < c); \
00855 r0 += *d; r1 += (r0 < *d); \
00856 c = r1; *(d++) = r0;
00857
00858 #define MULADDC_STOP \
00859 }
00860
00861 #endif
00862 #endif
00863
00864 #endif