Lines Matching refs:rsp

283 	mov	0(%rsp),%r15
285 mov 8(%rsp),%r14
287 mov 16(%rsp),%r13
289 mov 24(%rsp),%r12
291 mov 32(%rsp),%rbp
293 mov 40(%rsp),%rbx
295 lea 48(%rsp),%rsp
661 mov 0(%rsp),%r15
663 mov 8(%rsp),%r14
665 mov 16(%rsp),%r13
667 mov 24(%rsp),%r12
669 mov 32(%rsp),%rbp
671 mov 40(%rsp),%rbx
673 lea 48(%rsp),%rsp
753 mov 0(%rsp),%r15
755 mov 8(%rsp),%r14
757 mov 16(%rsp),%r13
759 mov 24(%rsp),%r12
761 mov 32(%rsp),%rbp
763 mov 40(%rsp),%rbx
765 lea 48(%rsp),%rax
766 lea 48(%rsp),%rsp
784 lea -0x58(%rsp),%r11
786 sub \$0x178,%rsp
789 lea -0xf8(%rsp),%r11
790 sub \$0x218,%rsp
842 vmovdqa $D0,0x00(%rsp)
847 vmovdqa $D1,0x10(%rsp)
852 vmovdqa $D2,0x20(%rsp)
857 vmovdqa $D0,0x30(%rsp)
862 vmovdqa $D1,0x40(%rsp)
867 vmovdqa $D2,0x50(%rsp)
872 vmovdqa $D0,0x60(%rsp)
876 vmovdqa $D1,0x70(%rsp)
878 vmovdqa 0x00(%rsp),$D4 # preload r0^2
881 vmovdqa $D2,0x80(%rsp)
911 vmovdqa 0x10(%rsp),$H2 # r1^2
916 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
925 vmovdqa 0x30(%rsp),$H3 # r2^2
932 vmovdqa 0x40(%rsp),$H4 # s2^2
937 vmovdqa 0x50(%rsp),$H2 # r3^2
942 vmovdqa 0x60(%rsp),$H3 # s3^2
945 vmovdqa 0x80(%rsp),$H4 # s4^2
967 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
1081 vmovdqa 0x00(%rsp),$D4 # preload r0^2
1373 lea 0xf8(%r11),%rsp
1377 lea 0x58(%r11),%rsp
1378 .cfi_def_cfa %rsp,8
1593 mov 0(%rsp),%r15
1595 mov 8(%rsp),%r14
1597 mov 16(%rsp),%r13
1599 mov 24(%rsp),%r12
1601 mov 32(%rsp),%rbp
1603 mov 40(%rsp),%rbx
1605 lea 48(%rsp),%rsp
1692 mov 0(%rsp),%r15
1694 mov 8(%rsp),%r14
1696 mov 16(%rsp),%r13
1698 mov 24(%rsp),%r12
1700 mov 32(%rsp),%rbp
1702 mov 40(%rsp),%rbx
1704 lea 48(%rsp),%rax
1705 lea 48(%rsp),%rsp
1732 lea -8(%rsp),%r11
1734 sub \$0x128,%rsp
1737 lea -0xf8(%rsp),%r11
1738 sub \$0x1c8,%rsp
1758 and \$-512,%rsp
1764 lea 0x90(%rsp),%rax # size optimization
1771 vmovdqa $T2,0x00(%rsp)
1828 vmovdqa `32*0`(%rsp),$T0 # r0^4
1830 vmovdqa `32*1`(%rsp),$T1 # r1^4
1832 vmovdqa `32*3`(%rsp),$T2 # r2^4
1863 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1978 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
1980 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
1982 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
1998 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
2117 lea 0xf8(%r11),%rsp
2121 lea 8(%r11),%rsp
2122 .cfi_def_cfa %rsp,8
2158 lea -8(%rsp),%r11
2160 sub \$0x128,%rsp
2163 lea -0xf8(%rsp),%r11
2164 sub \$0x1c8,%rsp
2184 and \$-512,%rsp
2199 vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0
2202 vmovdqu64 $R1,0x00(%rsp,%rax){%k2}
2205 vmovdqa64 $S1,0x40(%rsp){%k2}
2208 vmovdqu64 $R2,0x40(%rsp,%rax){%k2}
2210 vmovdqa64 $S2,0x80(%rsp){%k2}
2211 vmovdqu64 $R3,0x80(%rsp,%rax){%k2}
2212 vmovdqa64 $S3,0xc0(%rsp){%k2}
2213 vmovdqu64 $R4,0xc0(%rsp,%rax){%k2}
2214 vmovdqa64 $S4,0x100(%rsp){%k2}
2698 lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
2721 lea 0xf8(%r11),%rsp
2725 lea 8(%r11),%rsp
2726 .cfi_def_cfa %rsp,8
3957 sub \$64,%rsp
4007 sub \$64,%rsp
4055 mov %r10,32(%rsp) # arg5
4056 mov %r11,40(%rsp) # arg6
4057 mov %r12,48(%rsp) # arg7
4058 mov %rcx,56(%rsp) # arg8, (NULL)
4062 add \$64,%rsp