/openssl/crypto/chacha/asm/ |
H A D | chacha-c64xplus.pl | 127 ADD @X[$b1],@X[$a1],@X[$a1] 128 || ADD @X[$b2],@X[$a2],@X[$a2] 129 || ADD @X[$b0],@X[$a0],@X[$a0] 130 || ADD @X[$b3],@X[$a3],@X[$a3] 133 XOR @X[$a1],@X[$d1],@X[$d1] 144 ADD @X[$d1],@X[$c1],@X[$c1] 150 XOR @X[$c1],@X[$b1],@X[$b1] 177 XOR @X[$a0],@X[$d0],@X[$d0] 193 ADD @X[$d1],@X[$c1],@X[$c1] 199 ADD @X[$d0],@X[$c0],@X[$c0] [all …]
|
/openssl/crypto/sha/ |
H A D | sha_local.h | 246 BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13)); in HASH_BLOCK_DATA_ORDER() 247 BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14)); in HASH_BLOCK_DATA_ORDER() 248 BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15)); in HASH_BLOCK_DATA_ORDER() 249 BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0)); in HASH_BLOCK_DATA_ORDER() 251 BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1)); in HASH_BLOCK_DATA_ORDER() 252 BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2)); in HASH_BLOCK_DATA_ORDER() 253 BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3)); in HASH_BLOCK_DATA_ORDER() 254 BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4)); in HASH_BLOCK_DATA_ORDER() 255 BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5)); in HASH_BLOCK_DATA_ORDER() 256 BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6)); in HASH_BLOCK_DATA_ORDER() [all …]
|
H A D | sha256.c | 216 SHA_LONG X[16], l; in sha256_block_data_order() local 233 T1 = X[i] = l; in sha256_block_data_order() 252 T1 = X[i & 0xf] += s0 + s1 + X[(i + 9) & 0xf]; in sha256_block_data_order() 287 T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \ 298 SHA_LONG X[16]; in sha256_block_data_order_c() local 356 T1 = X[0] = l; in sha256_block_data_order_c() 359 T1 = X[1] = l; in sha256_block_data_order_c() 362 T1 = X[2] = l; in sha256_block_data_order_c() 365 T1 = X[3] = l; in sha256_block_data_order_c() 368 T1 = X[4] = l; in sha256_block_data_order_c() [all …]
|
/openssl/crypto/sha/asm/ |
H A D | sha1-alpha.pl | 116 xor @X[($j+2)%16],@X[$j%16],@X[$j%16] 121 xor @X[($j+8)%16],@X[$j%16],@X[$j%16] 126 xor @X[($j+13)%16],@X[$j%16],@X[$j%16] 134 addl @X[$j%16],@X[$j%16],@X[$j%16] 149 xor @X[($j+2)%16],@X[$j%16],@X[$j%16] 154 xor @X[($j+8)%16],@X[$j%16],@X[$j%16] 159 xor @X[($j+13)%16],@X[$j%16],@X[$j%16] 164 addl @X[$j%16],@X[$j%16],@X[$j%16] 206 xor @X[($j+2)%16],@X[$j%16],@X[$j%16] 211 xor @X[($j+8)%16],@X[$j%16],@X[$j%16] [all …]
|
H A D | sha1-mips.pl | 114 rotr @X[$i],@X[$i],16 119 sll @X[$i],@X[$i],24 171 rotr @X[$i],@X[$i],16 176 sll @X[$i],@X[$i],24 187 xor @X[$j%16],@X[($j+2)%16] 190 xor @X[$j%16],@X[($j+8)%16] 196 rotr @X[$j%16],@X[$j%16],31 200 xor @X[$j%16],@X[($j+2)%16] 212 addu @X[$j%16],@X[$j%16] 253 addu @X[$j%16],@X[$j%16] [all …]
|
H A D | sha1-586.pl | 1186 &vpshufb(@X[-3&7],@X[-3&7],@X[2]); 1187 &vpshufb(@X[-2&7],@X[-2&7],@X[2]); 1189 &vpshufb(@X[-1&7],@X[-1&7],@X[2]); 1191 &vpaddd (@X[1],@X[-3&7],@X[3]); 1192 &vpaddd (@X[2],@X[-2&7],@X[3]); 1213 &vpaddd (@X[3],@X[3],@X[-1&7]); 1244 &vpaddd (@X[0],@X[0],@X[0]); 1261 &vpxor (@X[0],@X[0],@X[3]); 1301 &vpaddd (@X[3],@X[3],@X[-1&7]); 1328 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2 [all …]
|
H A D | sha1-sparcv9a.pl | 376 faligndata @X[$m],@X[$m+2],@X[$m] 459 faligndata @X[0],@X[2],@X[0] 460 faligndata @X[2],@X[4],@X[2] 461 faligndata @X[4],@X[6],@X[4] 462 faligndata @X[6],@X[8],@X[6] 463 faligndata @X[8],@X[10],@X[8] 464 faligndata @X[10],@X[12],@X[10] 465 faligndata @X[12],@X[14],@X[12] 466 faligndata @X[14],@X[16],@X[14] 491 fxors @X[13],@X[0],@X[0] [all …]
|
H A D | sha1-x86_64.pl | 1036 vpshufb @X[2],@X[-3&7],@X[-3&7] 1037 vpshufb @X[2],@X[-2&7],@X[-2&7] 1038 vpshufb @X[2],@X[-1&7],@X[-1&7] 1090 &vpaddd (@X[0],@X[0],@X[0]); 1415 vpshufb @X[2],@X[-4&7],@X[-4&7] 1417 vpshufb @X[2],@X[-3&7],@X[-3&7] 1419 vpshufb @X[2],@X[-2&7],@X[-2&7] 1421 vpshufb @X[2],@X[-1&7],@X[-1&7] 1443 &vpaddd (@X[0],@X[0],@X[0]); 1567 &vpaddd (@X[0],@X[0],@X[0]); [all …]
|
H A D | sha512-mips.pl | 141 my ($T1,$tmp0,$tmp1,$tmp2)=(@X[4],@X[5],@X[6],@X[7]); 153 wsbh @X[0],@X[0] # byte swap($i) 154 rotr @X[0],@X[0],16 159 sll @X[0],@X[0],24 169 dsbh @X[0],@X[0] # byte swap($i) 170 dshd @X[0],@X[0] 182 dsrl @X[0],8 263 my ($tmp0,$tmp1,$tmp2,$tmp3)=(@X[4],@X[5],@X[6],@X[7]); 269 $ADDU @X[0],@X[9] # +=X[i+9] 281 $ADDU @X[0],@X[9] # +=X[i+9] [all …]
|
H A D | sha1-parisc.pl | 88 xor @X[($j+2)%16],@X[$j%16],@X[$j%16] 91 xor @X[($j+8)%16],@X[$j%16],@X[$j%16] 96 xor @X[($j+13)%16],@X[$j%16],@X[$j%16] 98 shd @X[$j%16],@X[$j%16],31,@X[$j%16] 109 xor @X[($j+8)%16],@X[$j%16],@X[$j%16] 112 xor @X[($j+13)%16],@X[$j%16],@X[$j%16] 116 shd @X[$j%16],@X[$j%16],31,@X[$j%16] 142 xor @X[($j+2)%16],@X[$j%16],@X[$j%16] 145 xor @X[($j+8)%16],@X[$j%16],@X[$j%16] 149 xor @X[($j+13)%16],@X[$j%16],@X[$j%16] [all …]
|
H A D | sha512-x86_64.pl | 1215 vpshufb $t3,@X[0],@X[0] 1217 vpshufb $t3,@X[1],@X[1] 1218 vpshufb $t3,@X[2],@X[2] 1220 vpshufb $t3,@X[3],@X[3] 1361 vpshufb $t3,@X[0],@X[0] 1363 vpshufb $t3,@X[1],@X[1] 1365 vpshufb $t3,@X[2],@X[2] 1367 vpshufb $t3,@X[3],@X[3] 1369 vpshufb $t3,@X[4],@X[4] 1371 vpshufb $t3,@X[5],@X[5] [all …]
|
H A D | sha512-armv8.pl | 106 my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); 111 rev @X[$i],@X[$i] // $i 118 ldp @X[14],@X[15],[$inp] 181 add @X[$j],@X[$j],@X[($j+9)&15] 185 add @X[$j],@X[$j],$T1 187 add @X[$j],@X[$j],$T2 242 ldp @X[0],@X[1],[$inp],#2*$SZ 257 ldp @X[0],@X[1],[$ctx] 591 &rev32 (@X[0],@X[0]); 657 rev32 @X[2],@X[2] [all …]
|
H A D | sha1-sparcv9.pl | 100 xor @X[($j+1)%8],@X[$j%8],@X[$j%8] 102 xor @X[($j+4)%8],@X[$j%8],@X[$j%8] 106 xor $Xi,@X[$j%8],@X[$j%8] 108 add @X[$j%8],@X[$j%8],@X[$j%8] 110 andn @X[$j%8],$rot1m,@X[$j%8] 112 or $Xi,@X[$j%8],@X[$j%8] 325 sllx @X[0],$tmp1,@X[0] 331 sllx @X[$i+1],$tmp1,@X[$i+1] 332 or $Xi,@X[$i],@X[$i] 337 or $tmp64,@X[7],@X[7] [all …]
|
H A D | sha1-armv4-large.pl | 396 &vext_8 (@X[0],@X[-4&7],@X[-3&7],8); # compose "X[-14]" in "X[0]" 408 &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 437 &veor (@X[0],@X[0],@Tx[0]); 457 &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 460 &veor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 509 &vrev32_8 (@X[-4&7],@X[-4&7]); 562 vrev32.8 @X[-2&7],@X[-2&7] 563 vadd.i32 @X[0],@X[-4&7],$K 564 vrev32.8 @X[-1&7],@X[-1&7] 565 vadd.i32 @X[1],@X[-3&7],$K [all …]
|
H A D | sha1-ia64.pl | 62 my $Xn=@X[$j%16]; 69 dep $X[$i]=$X[$i],tmp2,8,8 };; 78 dep $X[$i]=$X[$i],tmp1,16,16} //;; 93 mux2 $X[$i]=$X[$i],0x44 } //;; 101 dep $X[$i]=$X[$i],tmp1,16,16} //;; 109 xor tmp3=$X[($j+8)%16],$X[($j+13)%16] // forward Xupdate 118 mux2 $X[$i]=$X[$i],0x44 };; 128 my $Xn=@X[$j%16]; 139 xor tmp3=$X[($j+8)%16],$X[($j+13)%16] // forward Xupdate 158 my $Xn=@X[$j%16]; [all …]
|
H A D | sha1-ppc.pl | 120 xor @X[$j%16],@X[$j%16],@X[($j+2)%16] 123 xor @X[$j%16],@X[$j%16],@X[($j+8)%16] 128 xor @X[$j%16],@X[$j%16],@X[($j+13)%16] 130 rotlwi @X[$j%16],@X[$j%16],1 141 xor @X[$j%16],@X[$j%16],@X[($j+2)%16] 144 xor @X[$j%16],@X[$j%16],@X[($j+8)%16] 147 xor @X[$j%16],@X[$j%16],@X[($j+13)%16] 149 rotlwi @X[$j%16],@X[$j%16],1 174 xor @X[$j%16],@X[$j%16],@X[($j+2)%16] 177 xor @X[$j%16],@X[$j%16],@X[($j+8)%16] [all …]
|
H A D | sha512p8-ppc.pl | 114 vsldoi @X[$i],@X[$i-1],@X[$i-1],$SZ 117 vperm @X[$i],@X[$i],@X[$i],$lemask 121 vaddu${sz}m @X[$j],@X[$j],$Sigma 123 vaddu${sz}m @X[$j],@X[$j],$Sigma 124 vaddu${sz}m @X[$j],@X[$j],@X[($j+9)%16] 225 lvx_u @X[0],0,$inp 270 lvx @X[0],$x20,$idx 272 lvx @X[1],$x30,$idx 274 vperm $A,$A,$C,@X[0] 275 vperm $E,$E,$G,@X[0] [all …]
|
H A D | sha1-s390x.pl | 82 lr $X[0],$X[2] 87 rllg $X[1],$X[0],32 94 rll $X[0],$X[0],1 95 rllg $X[1],$X[0],32 96 rll $X[1],$X[1],1 97 rllg $X[0],$X[1],32 98 lr $X[2],$X[1] # feedback 103 unshift(@X,pop(@X)); 108 my $xi=$X[1]; 127 my $xi=$X[1]; [all …]
|
H A D | sha256-armv4.pl | 332 &vext_8 ($T0,@X[0],@X[1],4); # X[1..4] 336 &vext_8 ($T1,@X[2],@X[3],4); # X[9..12] 343 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12] 373 &vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) 424 push(@X,shift(@X)); # "rotate" X[] 442 &vrev32_8 (@X[0],@X[0]); 451 push(@X,shift(@X)); # "rotate" X[] 508 vrev32.8 @X[0],@X[0] @ yes, even on 510 vrev32.8 @X[1],@X[1] @ big-endian 513 vrev32.8 @X[2],@X[2] [all …]
|
H A D | sha512-586.pl | 484 &pshufb (@X[0],@X[1]); 488 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask 491 &paddq (@X[3],@X[0]); 492 &pshufb (@X[1],@X[2]) if ($j<7); 495 push(@X,shift(@X)); # rotate(@X) 627 push(@X,shift(@X)); # rotate(@X) 636 &pshufb (@X[0],@X[1]); 643 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask 646 &paddq (@X[3],@X[0]); 647 &pshufb (@X[1],@X[2]) if ($j<7); [all …]
|
/openssl/crypto/md5/ |
H A D | md5_dgst.c | 40 # ifdef X 41 # undef X 64 X(0) = l; in md5_block_data_order() 66 X(1) = l; in md5_block_data_order() 70 X(2) = l; in md5_block_data_order() 73 X(3) = l; in md5_block_data_order() 76 X(4) = l; in md5_block_data_order() 79 X(5) = l; in md5_block_data_order() 82 X(6) = l; in md5_block_data_order() 85 X(7) = l; in md5_block_data_order() [all …]
|
/openssl/crypto/md4/ |
H A D | md4_dgst.c | 40 # ifdef X 41 # undef X 64 X(0) = l; in md4_block_data_order() 66 X(1) = l; in md4_block_data_order() 70 X(2) = l; in md4_block_data_order() 73 X(3) = l; in md4_block_data_order() 76 X(4) = l; in md4_block_data_order() 79 X(5) = l; in md4_block_data_order() 82 X(6) = l; in md4_block_data_order() 85 X(7) = l; in md4_block_data_order() [all …]
|
/openssl/crypto/md5/asm/ |
H A D | md5-586.pl | 34 $X="esi"; 197 &mov($X, &wparam(1)); # esi 202 &add($C, $X); # offset we end at 214 &R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478); 215 &R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756); 216 &R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db); 217 &R0( 0,$B,$C,$D,$A,$X, 3,22,0xc1bdceee); 218 &R0( 0,$A,$B,$C,$D,$X, 4, 7,0xf57c0faf); 219 &R0( 0,$D,$A,$B,$C,$X, 5,12,0x4787c62a); 220 &R0( 0,$C,$D,$A,$B,$X, 6,17,0xa8304613); [all …]
|
/openssl/crypto/aes/asm/ |
H A D | aesni-sha1-x86_64.pl | 372 &paddd (@X[0],@X[0]); 1142 vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap 1144 vpshufb @X[2],@X[-3&7],@X[-3&7] 1145 vpshufb @X[2],@X[-2&7],@X[-2&7] 1146 vpshufb @X[2],@X[-1&7],@X[-1&7] 1220 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 1244 &vpaddd (@X[0],@X[0],@X[0]); 1617 vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap 1619 vpshufb @X[2],@X[-3&7],@X[-3&7] 1620 vpshufb @X[2],@X[-2&7],@X[-2&7] [all …]
|
/openssl/crypto/sm3/ |
H A D | sm3_local.h | 78 # define P0(X) (X ^ ROTATE(X, 9) ^ ROTATE(X, 17)) argument 81 # define P1(X) (X ^ ROTATE(X, 15) ^ ROTATE(X, 23)) argument 84 #define FF0(X,Y,Z) (X ^ Y ^ Z) argument 85 #define GG0(X,Y,Z) (X ^ Y ^ Z) argument 87 #define FF1(X,Y,Z) ((X & Y) | ((X | Y) & Z)) argument 88 #define GG1(X,Y,Z) ((Z ^ (X & (Y ^ Z)))) argument
|