1#! /usr/bin/env perl 2# Copyright 2011-2023 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the Apache License 2.0 (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9 10# ==================================================================== 11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12# project. The module is, however, dual licensed under OpenSSL and 13# CRYPTOGAMS licenses depending on where you obtain it. For further 14# details see http://www.openssl.org/~appro/cryptogams/. 15# ==================================================================== 16 17# September 2011 18# 19# Assembler helpers for Padlock engine. Compared to original engine 20# version relying on inline assembler and compiled with gcc 3.4.6 it 21# was measured to provide ~100% improvement on misaligned data in ECB 22# mode and ~75% in CBC mode. For aligned data improvement can be 23# observed for short inputs only, e.g. 45% for 64-byte messages in 24# ECB mode, 20% in CBC. Difference in performance for aligned vs. 25# misaligned data depends on misalignment and is either ~1.8x or 2.9x. 26# These are approximately same factors as for hardware support, so 27# there is little reason to rely on the latter. On the contrary, it 28# might actually hurt performance in mixture of aligned and misaligned 29# buffers, because a) if you choose to flip 'align' flag in control 30# word on per-buffer basis, then you'd have to reload key context, 31# which incurs penalty; b) if you choose to set 'align' flag 32# permanently, it limits performance even for aligned data to ~1/2. 33# All above mentioned results were collected on 1.5GHz C7. Nano on the 34# other hand handles unaligned data more gracefully. Depending on 35# algorithm and how unaligned data is, hardware can be up to 70% more 36# efficient than below software alignment procedures, nor does 'align' 37# flag have affect on aligned performance [if has any meaning at all]. 38# Therefore suggestion is to unconditionally set 'align' flag on Nano 39# for optimal performance. 40 41$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 42push(@INC,"${dir}","${dir}../../crypto/perlasm"); 43require "x86asm.pl"; 44 45$output=pop and open STDOUT,">$output"; 46 47&asm_init($ARGV[0]); 48 49%PADLOCK_PREFETCH=(ecb=>128, cbc=>64); # prefetch errata 50$PADLOCK_CHUNK=512; # Must be a power of 2 larger than 16 51 52$ctx="edx"; 53$out="edi"; 54$inp="esi"; 55$len="ecx"; 56$chunk="ebx"; 57 58&function_begin_B("padlock_capability"); 59 &push ("ebx"); 60 &pushf (); 61 &pop ("eax"); 62 &mov ("ecx","eax"); 63 &xor ("eax",1<<21); 64 &push ("eax"); 65 &popf (); 66 &pushf (); 67 &pop ("eax"); 68 &xor ("ecx","eax"); 69 &xor ("eax","eax"); 70 &bt ("ecx",21); 71 &jnc (&label("noluck")); 72 &cpuid (); 73 &xor ("eax","eax"); 74 &cmp ("ebx","0x".unpack("H*",'tneC')); 75 &jne (&label("zhaoxin")); 76 &cmp ("edx","0x".unpack("H*",'Hrua')); 77 &jne (&label("noluck")); 78 &cmp ("ecx","0x".unpack("H*",'slua')); 79 &jne (&label("noluck")); 80 &jmp (&label("zhaoxinEnd")); 81&set_label("zhaoxin"); 82 &cmp ("ebx","0x".unpack("H*",'hS ')); 83 &jne (&label("noluck")); 84 &cmp ("edx","0x".unpack("H*",'hgna')); 85 &jne (&label("noluck")); 86 &cmp ("ecx","0x".unpack("H*",' ia')); 87 &jne (&label("noluck")); 88&set_label("zhaoxinEnd"); 89 &mov ("eax",0xC0000000); 90 &cpuid (); 91 &mov ("edx","eax"); 92 &xor ("eax","eax"); 93 &cmp ("edx",0xC0000001); 94 &jb (&label("noluck")); 95 &mov ("eax",1); 96 &cpuid (); 97 &or ("eax",0x0f); 98 &xor ("ebx","ebx"); 99 &and ("eax",0x0fff); 100 &cmp ("eax",0x06ff); # check for Nano 101 &sete ("bl"); 102 &mov ("eax",0xC0000001); 103 &push ("ebx"); 104 &cpuid (); 105 &pop ("ebx"); 106 &mov ("eax","edx"); 107 &shl ("ebx",4); # bit#4 denotes Nano 108 &and ("eax",0xffffffef); 109 &or ("eax","ebx") 110&set_label("noluck"); 111 &pop ("ebx"); 112 &ret (); 113&function_end_B("padlock_capability") 114 115&function_begin_B("padlock_key_bswap"); 116 &mov ("edx",&wparam(0)); 117 &mov ("ecx",&DWP(240,"edx")); 118 &inc ("ecx"); 119 &shl ("ecx",2); 120&set_label("bswap_loop"); 121 &mov ("eax",&DWP(0,"edx")); 122 &bswap ("eax"); 123 &mov (&DWP(0,"edx"),"eax"); 124 &lea ("edx",&DWP(4,"edx")); 125 &sub ("ecx",1); 126 &jnz (&label("bswap_loop")); 127 &ret (); 128&function_end_B("padlock_key_bswap"); 129 130# This is heuristic key context tracing. At first one 131# believes that one should use atomic swap instructions, 132# but it's not actually necessary. Point is that if 133# padlock_saved_context was changed by another thread 134# after we've read it and before we compare it with ctx, 135# our key *shall* be reloaded upon thread context switch 136# and we are therefore set in either case... 137&static_label("padlock_saved_context"); 138 139&function_begin_B("padlock_verify_context"); 140 &mov ($ctx,&wparam(0)); 141 &lea ("eax",($::win32 or $::coff) ? &DWP(&label("padlock_saved_context")) : 142 &DWP(&label("padlock_saved_context")."-".&label("verify_pic_point"))); 143 &pushf (); 144 &call ("_padlock_verify_ctx"); 145&set_label("verify_pic_point"); 146 &lea ("esp",&DWP(4,"esp")); 147 &ret (); 148&function_end_B("padlock_verify_context"); 149 150&function_begin_B("_padlock_verify_ctx"); 151 &add ("eax",&DWP(0,"esp")) if(!($::win32 or $::coff));# &padlock_saved_context 152 &bt (&DWP(4,"esp"),30); # eflags 153 &jnc (&label("verified")); 154 &cmp ($ctx,&DWP(0,"eax")); 155 &je (&label("verified")); 156 &pushf (); 157 &popf (); 158&set_label("verified"); 159 &mov (&DWP(0,"eax"),$ctx); 160 &ret (); 161&function_end_B("_padlock_verify_ctx"); 162 163&function_begin_B("padlock_reload_key"); 164 &pushf (); 165 &popf (); 166 &ret (); 167&function_end_B("padlock_reload_key"); 168 169&function_begin_B("padlock_aes_block"); 170 &push ("edi"); 171 &push ("esi"); 172 &push ("ebx"); 173 &mov ($out,&wparam(0)); # must be 16-byte aligned 174 &mov ($inp,&wparam(1)); # must be 16-byte aligned 175 &mov ($ctx,&wparam(2)); 176 &mov ($len,1); 177 &lea ("ebx",&DWP(32,$ctx)); # key 178 &lea ($ctx,&DWP(16,$ctx)); # control word 179 &data_byte(0xf3,0x0f,0xa7,0xc8); # rep xcryptecb 180 &pop ("ebx"); 181 &pop ("esi"); 182 &pop ("edi"); 183 &ret (); 184&function_end_B("padlock_aes_block"); 185 186sub generate_mode { 187my ($mode,$opcode) = @_; 188# int padlock_$mode_encrypt(void *out, const void *inp, 189# struct padlock_cipher_data *ctx, size_t len); 190&function_begin("padlock_${mode}_encrypt"); 191 &mov ($out,&wparam(0)); 192 &mov ($inp,&wparam(1)); 193 &mov ($ctx,&wparam(2)); 194 &mov ($len,&wparam(3)); 195 &test ($ctx,15); 196 &jnz (&label("${mode}_abort")); 197 &test ($len,15); 198 &jnz (&label("${mode}_abort")); 199 &lea ("eax",($::win32 or $::coff) ? &DWP(&label("padlock_saved_context")) : 200 &DWP(&label("padlock_saved_context")."-".&label("${mode}_pic_point"))); 201 &pushf (); 202 &cld (); 203 &call ("_padlock_verify_ctx"); 204&set_label("${mode}_pic_point"); 205 &lea ($ctx,&DWP(16,$ctx)); # control word 206 &xor ("eax","eax"); 207 if ($mode eq "ctr32") { 208 &movq ("mm0",&QWP(-16,$ctx)); # load [upper part of] counter 209 } else { 210 &xor ("ebx","ebx"); 211 &test (&DWP(0,$ctx),1<<5); # align bit in control word 212 &jnz (&label("${mode}_aligned")); 213 &test ($out,0x0f); 214 &setz ("al"); # !out_misaligned 215 &test ($inp,0x0f); 216 &setz ("bl"); # !inp_misaligned 217 &test ("eax","ebx"); 218 &jnz (&label("${mode}_aligned")); 219 &neg ("eax"); 220 } 221 &mov ($chunk,$PADLOCK_CHUNK); 222 ¬ ("eax"); # out_misaligned?-1:0 223 &lea ("ebp",&DWP(-24,"esp")); 224 &cmp ($len,$chunk); 225 &cmovc ($chunk,$len); # chunk=len>PADLOCK_CHUNK?PADLOCK_CHUNK:len 226 &and ("eax",$chunk); # out_misaligned?chunk:0 227 &mov ($chunk,$len); 228 &neg ("eax"); 229 &and ($chunk,$PADLOCK_CHUNK-1); # chunk=len%PADLOCK_CHUNK 230 &lea ("esp",&DWP(0,"eax","ebp")); # alloca 231 &mov ("eax",$PADLOCK_CHUNK); 232 &cmovz ($chunk,"eax"); # chunk=chunk?:PADLOCK_CHUNK 233 &mov ("eax","ebp"); 234 &and ("ebp",-16); 235 &and ("esp",-16); 236 &mov (&DWP(16,"ebp"),"eax"); 237 if ($PADLOCK_PREFETCH{$mode}) { 238 &cmp ($len,$chunk); 239 &ja (&label("${mode}_loop")); 240 &mov ("eax",$inp); # check if prefetch crosses page 241 &cmp ("ebp","esp"); 242 &cmove ("eax",$out); 243 &add ("eax",$len); 244 &neg ("eax"); 245 &and ("eax",0xfff); # distance to page boundary 246 &cmp ("eax",$PADLOCK_PREFETCH{$mode}); 247 &mov ("eax",-$PADLOCK_PREFETCH{$mode}); 248 &cmovae ("eax",$chunk); # mask=distance<prefetch?-prefetch:-1 249 &and ($chunk,"eax"); 250 &jz (&label("${mode}_unaligned_tail")); 251 } 252 &jmp (&label("${mode}_loop")); 253 254&set_label("${mode}_loop",16); 255 &mov (&DWP(0,"ebp"),$out); # save parameters 256 &mov (&DWP(4,"ebp"),$inp); 257 &mov (&DWP(8,"ebp"),$len); 258 &mov ($len,$chunk); 259 &mov (&DWP(12,"ebp"),$chunk); # chunk 260 if ($mode eq "ctr32") { 261 &mov ("ecx",&DWP(-4,$ctx)); 262 &xor ($out,$out); 263 &mov ("eax",&DWP(-8,$ctx)); # borrow $len 264&set_label("${mode}_prepare"); 265 &mov (&DWP(12,"esp",$out),"ecx"); 266 &bswap ("ecx"); 267 &movq (&QWP(0,"esp",$out),"mm0"); 268 &inc ("ecx"); 269 &mov (&DWP(8,"esp",$out),"eax"); 270 &bswap ("ecx"); 271 &lea ($out,&DWP(16,$out)); 272 &cmp ($out,$chunk); 273 &jb (&label("${mode}_prepare")); 274 275 &mov (&DWP(-4,$ctx),"ecx"); 276 &lea ($inp,&DWP(0,"esp")); 277 &lea ($out,&DWP(0,"esp")); 278 &mov ($len,$chunk); 279 } else { 280 &test ($out,0x0f); # out_misaligned 281 &cmovnz ($out,"esp"); 282 &test ($inp,0x0f); # inp_misaligned 283 &jz (&label("${mode}_inp_aligned")); 284 &shr ($len,2); 285 &data_byte(0xf3,0xa5); # rep movsl 286 &sub ($out,$chunk); 287 &mov ($len,$chunk); 288 &mov ($inp,$out); 289&set_label("${mode}_inp_aligned"); 290 } 291 &lea ("eax",&DWP(-16,$ctx)); # ivp 292 &lea ("ebx",&DWP(16,$ctx)); # key 293 &shr ($len,4); # len/=AES_BLOCK_SIZE 294 &data_byte(0xf3,0x0f,0xa7,$opcode); # rep xcrypt* 295 if ($mode !~ /ecb|ctr/) { 296 &movaps ("xmm0",&QWP(0,"eax")); 297 &movaps (&QWP(-16,$ctx),"xmm0"); # copy [or refresh] iv 298 } 299 &mov ($out,&DWP(0,"ebp")); # restore parameters 300 &mov ($chunk,&DWP(12,"ebp")); 301 if ($mode eq "ctr32") { 302 &mov ($inp,&DWP(4,"ebp")); 303 &xor ($len,$len); 304&set_label("${mode}_xor"); 305 &movups ("xmm1",&QWP(0,$inp,$len)); 306 &lea ($len,&DWP(16,$len)); 307 &pxor ("xmm1",&QWP(-16,"esp",$len)); 308 &movups (&QWP(-16,$out,$len),"xmm1"); 309 &cmp ($len,$chunk); 310 &jb (&label("${mode}_xor")); 311 } else { 312 &test ($out,0x0f); 313 &jz (&label("${mode}_out_aligned")); 314 &mov ($len,$chunk); 315 &lea ($inp,&DWP(0,"esp")); 316 &shr ($len,2); 317 &data_byte(0xf3,0xa5); # rep movsl 318 &sub ($out,$chunk); 319&set_label("${mode}_out_aligned"); 320 &mov ($inp,&DWP(4,"ebp")); 321 } 322 &mov ($len,&DWP(8,"ebp")); 323 &add ($out,$chunk); 324 &add ($inp,$chunk); 325 &sub ($len,$chunk); 326 &mov ($chunk,$PADLOCK_CHUNK); 327 if (!$PADLOCK_PREFETCH{$mode}) { 328 &jnz (&label("${mode}_loop")); 329 } else { 330 &jz (&label("${mode}_break")); 331 &cmp ($len,$chunk); 332 &jae (&label("${mode}_loop")); 333 334&set_label("${mode}_unaligned_tail"); 335 &xor ("eax","eax"); 336 &cmp ("esp","ebp"); 337 &cmove ("eax",$len); 338 &sub ("esp","eax"); # alloca 339 &mov ("eax", $out); # save parameters 340 &mov ($chunk,$len); 341 &shr ($len,2); 342 &lea ($out,&DWP(0,"esp")); 343 &data_byte(0xf3,0xa5); # rep movsl 344 &mov ($inp,"esp"); 345 &mov ($out,"eax"); # restore parameters 346 &mov ($len,$chunk); 347 &jmp (&label("${mode}_loop")); 348 349&set_label("${mode}_break",16); 350 } 351 if ($mode ne "ctr32") { 352 &cmp ("esp","ebp"); 353 &je (&label("${mode}_done")); 354 } 355 &pxor ("xmm0","xmm0"); 356 &lea ("eax",&DWP(0,"esp")); 357&set_label("${mode}_bzero"); 358 &movaps (&QWP(0,"eax"),"xmm0"); 359 &lea ("eax",&DWP(16,"eax")); 360 &cmp ("ebp","eax"); 361 &ja (&label("${mode}_bzero")); 362 363&set_label("${mode}_done"); 364 &mov ("ebp",&DWP(16,"ebp")); 365 &lea ("esp",&DWP(24,"ebp")); 366 if ($mode ne "ctr32") { 367 &jmp (&label("${mode}_exit")); 368 369&set_label("${mode}_aligned",16); 370 if ($PADLOCK_PREFETCH{$mode}) { 371 &lea ("ebp",&DWP(0,$inp,$len)); 372 &neg ("ebp"); 373 &and ("ebp",0xfff); # distance to page boundary 374 &xor ("eax","eax"); 375 &cmp ("ebp",$PADLOCK_PREFETCH{$mode}); 376 &mov ("ebp",$PADLOCK_PREFETCH{$mode}-1); 377 &cmovae ("ebp","eax"); 378 &and ("ebp",$len); # remainder 379 &sub ($len,"ebp"); 380 &jz (&label("${mode}_aligned_tail")); 381 } 382 &lea ("eax",&DWP(-16,$ctx)); # ivp 383 &lea ("ebx",&DWP(16,$ctx)); # key 384 &shr ($len,4); # len/=AES_BLOCK_SIZE 385 &data_byte(0xf3,0x0f,0xa7,$opcode); # rep xcrypt* 386 if ($mode ne "ecb") { 387 &movaps ("xmm0",&QWP(0,"eax")); 388 &movaps (&QWP(-16,$ctx),"xmm0"); # copy [or refresh] iv 389 } 390 if ($PADLOCK_PREFETCH{$mode}) { 391 &test ("ebp","ebp"); 392 &jz (&label("${mode}_exit")); 393 394&set_label("${mode}_aligned_tail"); 395 &mov ($len,"ebp"); 396 &lea ("ebp",&DWP(-24,"esp")); 397 &mov ("esp","ebp"); 398 &mov ("eax","ebp"); 399 &sub ("esp",$len); 400 &and ("ebp",-16); 401 &and ("esp",-16); 402 &mov (&DWP(16,"ebp"),"eax"); 403 &mov ("eax", $out); # save parameters 404 &mov ($chunk,$len); 405 &shr ($len,2); 406 &lea ($out,&DWP(0,"esp")); 407 &data_byte(0xf3,0xa5); # rep movsl 408 &mov ($inp,"esp"); 409 &mov ($out,"eax"); # restore parameters 410 &mov ($len,$chunk); 411 &jmp (&label("${mode}_loop")); 412 } 413&set_label("${mode}_exit"); } 414 &mov ("eax",1); 415 &lea ("esp",&DWP(4,"esp")); # popf 416 &emms () if ($mode eq "ctr32"); 417&set_label("${mode}_abort"); 418&function_end("padlock_${mode}_encrypt"); 419} 420 421&generate_mode("ecb",0xc8); 422&generate_mode("cbc",0xd0); 423&generate_mode("cfb",0xe0); 424&generate_mode("ofb",0xe8); 425&generate_mode("ctr32",0xc8); # yes, it implements own CTR with ECB opcode, 426 # because hardware CTR was introduced later 427 # and even has errata on certain C7 stepping. 428 # own implementation *always* works, though 429 # ~15% slower than dedicated hardware... 430 431&function_begin_B("padlock_xstore"); 432 &push ("edi"); 433 &mov ("edi",&wparam(0)); 434 &mov ("edx",&wparam(1)); 435 &data_byte(0x0f,0xa7,0xc0); # xstore 436 &pop ("edi"); 437 &ret (); 438&function_end_B("padlock_xstore"); 439 440&function_begin_B("_win32_segv_handler"); 441 &mov ("eax",1); # ExceptionContinueSearch 442 &mov ("edx",&wparam(0)); # *ExceptionRecord 443 &mov ("ecx",&wparam(2)); # *ContextRecord 444 &cmp (&DWP(0,"edx"),0xC0000005) # ExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION 445 &jne (&label("ret")); 446 &add (&DWP(184,"ecx"),4); # skip over rep sha* 447 &mov ("eax",0); # ExceptionContinueExecution 448&set_label("ret"); 449 &ret (); 450&function_end_B("_win32_segv_handler"); 451&safeseh("_win32_segv_handler") if ($::win32); 452 453&function_begin_B("padlock_sha1_oneshot"); 454 &push ("edi"); 455 &push ("esi"); 456 &xor ("eax","eax"); 457 &mov ("edi",&wparam(0)); 458 &mov ("esi",&wparam(1)); 459 &mov ("ecx",&wparam(2)); 460 if ($::win32 or $::coff) { 461 &push (&::islabel("_win32_segv_handler")); 462 &data_byte(0x64,0xff,0x30); # push %fs:(%eax) 463 &data_byte(0x64,0x89,0x20); # mov %esp,%fs:(%eax) 464 } 465 &mov ("edx","esp"); # put aside %esp 466 &add ("esp",-128); # 32 is enough but spec says 128 467 &movups ("xmm0",&QWP(0,"edi")); # copy-in context 468 &and ("esp",-16); 469 &mov ("eax",&DWP(16,"edi")); 470 &movaps (&QWP(0,"esp"),"xmm0"); 471 &mov ("edi","esp"); 472 &mov (&DWP(16,"esp"),"eax"); 473 &xor ("eax","eax"); 474 &data_byte(0xf3,0x0f,0xa6,0xc8); # rep xsha1 475 &movaps ("xmm0",&QWP(0,"esp")); 476 &mov ("eax",&DWP(16,"esp")); 477 &mov ("esp","edx"); # restore %esp 478 if ($::win32 or $::coff) { 479 &data_byte(0x64,0x8f,0x05,0,0,0,0); # pop %fs:0 480 &lea ("esp",&DWP(4,"esp")); 481 } 482 &mov ("edi",&wparam(0)); 483 &movups (&QWP(0,"edi"),"xmm0"); # copy-out context 484 &mov (&DWP(16,"edi"),"eax"); 485 &pop ("esi"); 486 &pop ("edi"); 487 &ret (); 488&function_end_B("padlock_sha1_oneshot"); 489 490&function_begin_B("padlock_sha1_blocks"); 491 &push ("edi"); 492 &push ("esi"); 493 &mov ("edi",&wparam(0)); 494 &mov ("esi",&wparam(1)); 495 &mov ("edx","esp"); # put aside %esp 496 &mov ("ecx",&wparam(2)); 497 &add ("esp",-128); 498 &movups ("xmm0",&QWP(0,"edi")); # copy-in context 499 &and ("esp",-16); 500 &mov ("eax",&DWP(16,"edi")); 501 &movaps (&QWP(0,"esp"),"xmm0"); 502 &mov ("edi","esp"); 503 &mov (&DWP(16,"esp"),"eax"); 504 &mov ("eax",-1); 505 &data_byte(0xf3,0x0f,0xa6,0xc8); # rep xsha1 506 &movaps ("xmm0",&QWP(0,"esp")); 507 &mov ("eax",&DWP(16,"esp")); 508 &mov ("esp","edx"); # restore %esp 509 &mov ("edi",&wparam(0)); 510 &movups (&QWP(0,"edi"),"xmm0"); # copy-out context 511 &mov (&DWP(16,"edi"),"eax"); 512 &pop ("esi"); 513 &pop ("edi"); 514 &ret (); 515&function_end_B("padlock_sha1_blocks"); 516 517&function_begin_B("padlock_sha256_oneshot"); 518 &push ("edi"); 519 &push ("esi"); 520 &xor ("eax","eax"); 521 &mov ("edi",&wparam(0)); 522 &mov ("esi",&wparam(1)); 523 &mov ("ecx",&wparam(2)); 524 if ($::win32 or $::coff) { 525 &push (&::islabel("_win32_segv_handler")); 526 &data_byte(0x64,0xff,0x30); # push %fs:(%eax) 527 &data_byte(0x64,0x89,0x20); # mov %esp,%fs:(%eax) 528 } 529 &mov ("edx","esp"); # put aside %esp 530 &add ("esp",-128); 531 &movups ("xmm0",&QWP(0,"edi")); # copy-in context 532 &and ("esp",-16); 533 &movups ("xmm1",&QWP(16,"edi")); 534 &movaps (&QWP(0,"esp"),"xmm0"); 535 &mov ("edi","esp"); 536 &movaps (&QWP(16,"esp"),"xmm1"); 537 &xor ("eax","eax"); 538 &data_byte(0xf3,0x0f,0xa6,0xd0); # rep xsha256 539 &movaps ("xmm0",&QWP(0,"esp")); 540 &movaps ("xmm1",&QWP(16,"esp")); 541 &mov ("esp","edx"); # restore %esp 542 if ($::win32 or $::coff) { 543 &data_byte(0x64,0x8f,0x05,0,0,0,0); # pop %fs:0 544 &lea ("esp",&DWP(4,"esp")); 545 } 546 &mov ("edi",&wparam(0)); 547 &movups (&QWP(0,"edi"),"xmm0"); # copy-out context 548 &movups (&QWP(16,"edi"),"xmm1"); 549 &pop ("esi"); 550 &pop ("edi"); 551 &ret (); 552&function_end_B("padlock_sha256_oneshot"); 553 554&function_begin_B("padlock_sha256_blocks"); 555 &push ("edi"); 556 &push ("esi"); 557 &mov ("edi",&wparam(0)); 558 &mov ("esi",&wparam(1)); 559 &mov ("ecx",&wparam(2)); 560 &mov ("edx","esp"); # put aside %esp 561 &add ("esp",-128); 562 &movups ("xmm0",&QWP(0,"edi")); # copy-in context 563 &and ("esp",-16); 564 &movups ("xmm1",&QWP(16,"edi")); 565 &movaps (&QWP(0,"esp"),"xmm0"); 566 &mov ("edi","esp"); 567 &movaps (&QWP(16,"esp"),"xmm1"); 568 &mov ("eax",-1); 569 &data_byte(0xf3,0x0f,0xa6,0xd0); # rep xsha256 570 &movaps ("xmm0",&QWP(0,"esp")); 571 &movaps ("xmm1",&QWP(16,"esp")); 572 &mov ("esp","edx"); # restore %esp 573 &mov ("edi",&wparam(0)); 574 &movups (&QWP(0,"edi"),"xmm0"); # copy-out context 575 &movups (&QWP(16,"edi"),"xmm1"); 576 &pop ("esi"); 577 &pop ("edi"); 578 &ret (); 579&function_end_B("padlock_sha256_blocks"); 580 581&function_begin_B("padlock_sha512_blocks"); 582 &push ("edi"); 583 &push ("esi"); 584 &mov ("edi",&wparam(0)); 585 &mov ("esi",&wparam(1)); 586 &mov ("ecx",&wparam(2)); 587 &mov ("edx","esp"); # put aside %esp 588 &add ("esp",-128); 589 &movups ("xmm0",&QWP(0,"edi")); # copy-in context 590 &and ("esp",-16); 591 &movups ("xmm1",&QWP(16,"edi")); 592 &movups ("xmm2",&QWP(32,"edi")); 593 &movups ("xmm3",&QWP(48,"edi")); 594 &movaps (&QWP(0,"esp"),"xmm0"); 595 &mov ("edi","esp"); 596 &movaps (&QWP(16,"esp"),"xmm1"); 597 &movaps (&QWP(32,"esp"),"xmm2"); 598 &movaps (&QWP(48,"esp"),"xmm3"); 599 &data_byte(0xf3,0x0f,0xa6,0xe0); # rep xsha512 600 &movaps ("xmm0",&QWP(0,"esp")); 601 &movaps ("xmm1",&QWP(16,"esp")); 602 &movaps ("xmm2",&QWP(32,"esp")); 603 &movaps ("xmm3",&QWP(48,"esp")); 604 &mov ("esp","edx"); # restore %esp 605 &mov ("edi",&wparam(0)); 606 &movups (&QWP(0,"edi"),"xmm0"); # copy-out context 607 &movups (&QWP(16,"edi"),"xmm1"); 608 &movups (&QWP(32,"edi"),"xmm2"); 609 &movups (&QWP(48,"edi"),"xmm3"); 610 &pop ("esi"); 611 &pop ("edi"); 612 &ret (); 613&function_end_B("padlock_sha512_blocks"); 614 615&asciz ("VIA Padlock x86 module, CRYPTOGAMS by <appro\@openssl.org>"); 616&align (16); 617 618&dataseg(); 619# Essentially this variable belongs in thread local storage. 620# Having this variable global on the other hand can only cause 621# few bogus key reloads [if any at all on signle-CPU system], 622# so we accept the penalty... 623&set_label("padlock_saved_context",4); 624&data_word(0); 625 626&asm_finish(); 627 628close STDOUT; 629