/openssl/crypto/bn/ |
H A D | bn_sparc.c | 17 const BN_ULONG *np, const BN_ULONG *n0, int num) in bn_mul_mont() argument 33 const BN_ULONG *n0); in bn_mul_mont() 36 const BN_ULONG *n0); in bn_mul_mont() 39 const BN_ULONG *n0); in bn_mul_mont() 42 const BN_ULONG *n0); in bn_mul_mont() 45 const BN_ULONG *n0); in bn_mul_mont() 52 if ((*worker) (rp, ap, bp, np, n0)) in bn_mul_mont() 55 if ((*worker) (rp, ap, bp, np, n0)) in bn_mul_mont() 57 return bn_mul_mont_vis3(rp, ap, bp, np, n0, num); in bn_mul_mont() 74 return bn_mul_mont_fpu(rp, ap, bp, np, n0, num); in bn_mul_mont() [all …]
|
H A D | bn_ppc.c | 16 const BN_ULONG *np, const BN_ULONG *n0, int num) in bn_mul_mont() argument 19 const BN_ULONG *np, const BN_ULONG *n0, int num); in bn_mul_mont() 21 const BN_ULONG *np, const BN_ULONG *n0, int num); in bn_mul_mont() 24 const BN_ULONG *n0, int num); in bn_mul_mont() 27 const BN_ULONG *n0, int num); in bn_mul_mont() 33 return bn_mul4x_mont_int(rp, ap, bp, np, n0, num); in bn_mul_mont() 46 return bn_mul_mont_300_fixed_n6(rp, ap, bp, np, n0, num); in bn_mul_mont() 48 return bn_mul_mont_fixed_n6(rp, ap, bp, np, n0, num); in bn_mul_mont() 52 return bn_mul_mont_int(rp, ap, bp, np, n0, num); in bn_mul_mont()
|
H A D | bn_mont.c | 118 n0 = mont->n0[0]; in bn_from_montgomery_word() 246 ctx->n0[0] = ctx->n0[1] = 0; in BN_MONT_CTX_init() 421 to->n0[0] = from->n0[0]; in BN_MONT_CTX_copy() 422 to->n0[1] = from->n0[1]; in BN_MONT_CTX_copy() 478 ctx->n0[0] = nlo; in ossl_bn_mont_ctx_set() 479 ctx->n0[1] = nhi; in ossl_bn_mont_ctx_set() 481 ctx->n0[0] = nlo; in ossl_bn_mont_ctx_set() 482 ctx->n0[1] = 0; in ossl_bn_mont_ctx_set() 485 ctx->n0[1] = 0; in ossl_bn_mont_ctx_set() 500 if (m1->n0[0] != m2->n0[0]) in ossl_bn_mont_ctx_eq() [all …]
|
H A D | bn_exp.c | 693 mont->n0[0]); in BN_mod_exp_mont_consttime() 810 const BN_ULONG *n0); in BN_mod_exp_mont_consttime() 815 const BN_ULONG *n0); in BN_mod_exp_mont_consttime() 818 const BN_ULONG *n0); in BN_mod_exp_mont_consttime() 821 const BN_ULONG *n0); in BN_mod_exp_mont_consttime() 833 const BN_ULONG *n0, int num); in BN_mod_exp_mont_consttime() 843 BN_ULONG *np = mont->N.d, *n0 = mont->n0; in BN_mod_exp_mont_consttime() local 858 if (!(*mul_worker) (tmp.d, am.d, am.d, np, n0) && in BN_mod_exp_mont_consttime() 859 !(*mul_worker) (tmp.d, am.d, am.d, np, n0)) in BN_mod_exp_mont_consttime() 954 BN_ULONG *n0 = mont->n0, *np; in BN_mod_exp_mont_consttime() local [all …]
|
H A D | bn_div.c | 174 # define bn_div_words(n0,n1,d0) \ argument 178 : "a"(n1), "d"(n0), "r"(d0) \ 188 # define bn_div_words(n0,n1,d0) \ argument 192 : "a"(n1), "d"(n0), "r"(d0) \ 348 BN_ULONG n0, n1, rem = 0; in bn_div_fixed_top() local 350 n0 = wnumtop[0]; in bn_div_fixed_top() 352 if (n0 == d0) in bn_div_fixed_top() 360 q = (BN_ULONG)(((((BN_ULLONG) n0) << BN_BITS2) | n1) / d0); in bn_div_fixed_top() 362 q = bn_div_words(n0, n1, d0); in bn_div_fixed_top() 386 q = bn_div_words(n0, n1, d0); in bn_div_fixed_top()
|
H A D | bn_asm.c | 855 BN_ULONG c0, c1, ml, *tp, n0; in bn_mul_mont() local 869 n0 = *n0p; in bn_mul_mont() 904 ml = (c1 * n0) & BN_MASK2; in bn_mul_mont() 948 const BN_ULONG *np, const BN_ULONG *n0, int num) in bn_mul_mont() argument 996 BN_ULONG c0, c1, *tp, n0 = *n0p; in bn_mul_mont() local 1011 c0 = bn_mul_add_words(tp, np, num, tp[0] * n0); in bn_mul_mont() 1035 const BN_ULONG *np, const BN_ULONG *n0, int num) in bn_mul_mont() argument
|
/openssl/crypto/bn/asm/ |
H A D | armv8-mont.pl | 110 ldr $n0,[$n0] // *n0 120 mul $m1,$lo0,$n0 // "tp[0]"*n0 191 mul $m1,$lo0,$n0 697 ldr $n0,[$n0] // *n0 895 mov $n0,$a0 940 mul $t0,$a0,$n0 1117 mul $na0,$n0,$acc0 // t[0]*n0 1163 mul $na0,$n0,$acc0 // next t[0]*n0 1471 ldr $n0,[$n0] // *n0 1504 mul $mi,$acc0,$n0 // t[0]*n0 [all …]
|
H A D | armv4-mont.pl | 82 $n0="r8"; 166 ldr $n0,[$_n0] @ &n0 170 ldr $n0,[$n0] @ *n0 175 mul $n0,$alo,$n0 @ "tp[0]"*n0 177 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]" 187 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 197 ldr $n0,[$_n0] @ restore n0 216 mul $n0,$alo,$n0 218 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]" 228 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0 [all …]
|
H A D | ppc-mont.pl | 109 $n0="r7"; 172 $LD $n0,0($n0) ; pull n0[0] value 184 $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0 248 $UMULL $m1,$lo0,$n0 ; tp[0]*n0 423 $LD $n0,0($n0) # *n0 465 $UMULL $mi,$acc0,$n0 # t[0]*n0 625 $UMULL $mi,$acc0,$n0 # t[0]*n0 1016 $LD $n0,0($n0) # *n0 1240 mr $n0,$a0 1543 $UMULL $na0,$n0,$acc0 # t[0]*n0 [all …]
|
H A D | s390x-mont.pl | 79 $n0="%r6"; # const BN_ULONG *n0, 130 lg $n0,0($n0) # pull n0 131 _dswap $n0 140 lgr $mn0,$alo # "tp[0]"*n0 141 msgr $mn0,$n0 193 msgr $mn0,$n0 # tp[0]*n0
|
H A D | sparcv9-mont.pl | 59 $n0="%i4"; # const BN_ULONG *n0, 106 ld [$n0],$n0 129 mulx $n0,$acc0,$mul1 ! "t[0]"*n0 132 mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0 133 mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0 200 mulx $n0,$acc0,$mul1 314 mulx $n0,$acc0,$mul1 ! "t[0]"*n0 318 mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0 390 mulx $n0,$tmp0,$mul1 463 mulx $n0,$tmp1,$mul1 [all …]
|
H A D | ia64-mont.pl | 167 xmpy.lu m0=alo[4],n0 // (ap[0]*bp[0])*n0 237 xmpy.lu m0=alo[4],n0 // (ap[0]*bp[i]+tp[0])*n0 531 (p16) xmpy.lu mj[0]=alo[0],n0 // (ap[0]*b[i]+t[0])*n0
|
H A D | x86_64-mont.pl | 180 mov ($n0),$n0 # pull n0[0] value 187 mov $n0,$m1 248 mov $n0,$m1 255 imulq $lo0,$m1 # tp[0]*n0 427 mov ($n0),$n0 # pull n0[0] value 434 mov $n0,$m1 571 mov $n0,$m1 874 mov ($n0),$n0 # *n0 913 mov $n0, 32(%rsp) 1063 mov ($n0),$n0 # *n0 [all …]
|
H A D | vis3-mont.pl | 47 ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)= 101 sllx $t1, 32, $n0 103 or $t0, $n0, $n0 133 mulx $lo0, $n0, $m1 ! "tp[0]"*n0 231 mulx $lo0, $n0, $m1 ! tp[0]*n0
|
H A D | sparct4-mont.pl | 168 ld [%i4+0],%f1 ! load *n0 486 ld [%i2+0],%f1 ! load *n0 671 ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)= 713 sllx $t1, 32, $n0 715 or $t0, $n0, $n0 726 mulx $lo0, $n0, $m1 ! "tp[0]"*n0 808 mulx $lo0, $n0, $m1 ! tp[0]*n0 959 sllx $t1, 32, $n0 960 or $t0, $n0, $n0 971 mulx $lo0, $n0, $m1 ! "tp[0]"*n0 [all …]
|
H A D | alpha-mont.pl | 32 $n0="a4"; # const BN_ULONG *n0, 93 ldq $n0,0($n0) 101 mulq $lo0,$n0,$m1 189 mulq $lo0,$n0,$m1
|
H A D | mips-mont.pl | 104 $n0=$a4; # const BN_ULONG *n0, 140 lw $n0,16($sp) 180 $LD $n0,0($n0) 196 $MULTU ($lo0,$n0) 197 mflo ($m1,$lo0,$n0) 280 $MULTU ($lo0,$n0) 283 mflo ($m1,$lo0,$n0)
|
H A D | sparcv9a-mont.pl | 84 $n0="%i4"; # const BN_ULONG *n0, 96 $n0="%g4"; # reassigned(!) to "64-bit" register 150 ld [%i4+0],$n0 ! $n0 reassigned, remember? 154 or %o0,$n0,$n0 ! $n0=n0[1].n0[0] 200 mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0 222 ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values 530 mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0 539 ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
|
H A D | x86_64-mont5.pl | 252 mov ($n0),$n0 # pull n0[0] value 258 mov $n0,$m1 348 mov $n0,$m1 686 mov ($n0),$n0 # pull n0[0] value 691 mov $n0,$m1 861 mov $n0,$m1 1133 mov ($n0),$n0 # *n0 1186 # +32 saved *n0 2136 mov ($n0),$n0 # *n0 2194 mov $n0, 32(%rsp) # save *n0 [all …]
|
H A D | ppc64-mont.pl | 127 $n0="r7"; 284 ld $n0,0($n0) ; pull n0[0] value 297 mulld $t7,$t7,$n0 ; tp[0]*n0 319 mr $n1,$n0 343 mullw $t0,$t4,$n0 ; mulld tp[0]*n0 344 mulhwu $t1,$t4,$n0 345 mullw $t2,$t5,$n0 921 mulld $t7,$t7,$n0 ; tp[0]*n0 958 mullw $t0,$t4,$n0 ; mulld tp[0]*n0 959 mulhwu $t1,$t4,$n0 [all …]
|
H A D | ppc64-mont-fixed.pl | 73 my $n0 = "r7"; 197 ld $n0,0($n0) 253 mulld $bpi,$tp[0],$n0
|
H A D | rsaz-x86_64.pl | 124 movq $n0, 128(%rsp) 746 movq 128(%rsp), %rdx # pull $n0 834 movq $n0, 128(%rsp) 873 movq 128(%rsp), %rdx # pull $n0 1023 movq $n0, 128(%rsp) # off-load arguments 1209 mov $n0, 128(%rsp) # off-load arguments 1419 movq $n0, 128(%rsp) 1458 movq 128(%rsp), %rdx # pull $n0 1547 movq $n0, 128(%rsp) 1579 movq 128(%rsp), %rdx # pull $n0 [all …]
|
H A D | rsaz-avx2.pl | 85 my $n0="%ecx"; # const BN_ULONG n0, 463 imull $n0, %eax 483 imull $n0, %eax 517 imull $n0, %eax 552 imull $n0, %eax 609 imull $n0, %eax 649 imull $n0, %eax 1012 imull $n0, %eax 1089 imull $n0, %eax 1170 imull $n0, %eax [all …]
|
H A D | parisc-mont.pl | 124 $n0="%r22"; # passed through stack in 32-bit 138 $xfer=$n0; # accommodates [-16..15] offset in fld[dw]s 169 ldw `-$FRAME_MARKER-4`($fp),$n0 188 fldws 0($n0),${fn0} 196 fldws 4($n0),${fn0} ; only low part of n0
|
/openssl/crypto/ec/ |
H A D | ecp_smpl.c | 642 n0 = BN_CTX_get(ctx); in ossl_ec_GFp_simple_add() 673 if (!field_mul(group, n0, n0, b->Z, ctx)) in ossl_ec_GFp_simple_add() 695 if (!field_mul(group, n0, n0, a->Z, ctx)) in ossl_ec_GFp_simple_add() 769 if (!BN_mod_sub_quick(n0, n3, n0, p)) in ossl_ec_GFp_simple_add() 774 if (!field_mul(group, n0, n0, n6, ctx)) in ossl_ec_GFp_simple_add() 780 if (!BN_mod_sub_quick(n0, n0, n1, p)) in ossl_ec_GFp_simple_add() 782 if (BN_is_odd(n0)) in ossl_ec_GFp_simple_add() 783 if (!BN_add(n0, n0, p)) in ossl_ec_GFp_simple_add() 845 if (!BN_mod_add_quick(n0, n0, n1, p)) in ossl_ec_GFp_simple_dbl() 872 if (!BN_mod_add_quick(n0, n0, n1, p)) in ossl_ec_GFp_simple_dbl() [all …]
|