Lines Matching refs:rsp

166 	mov	0(%rsp),%r13
168 mov 8(%rsp),%r12
170 lea 16(%rsp),%rsp
236 mov 0(%rsp),%r13
238 mov 8(%rsp),%r12
240 lea 16(%rsp),%rsp
312 mov 0(%rsp),%r13
314 mov 8(%rsp),%r12
316 lea 16(%rsp),%rsp
368 mov 0(%rsp),%r13
370 mov 8(%rsp),%r12
372 lea 16(%rsp),%rsp
424 mov 0(%rsp),%r13
426 mov 8(%rsp),%r12
428 lea 16(%rsp),%rsp
480 mov 0(%rsp),%r13
482 mov 8(%rsp),%r12
484 lea 16(%rsp),%rsp
810 mov 0(%rsp),%r15
812 mov 8(%rsp),%r14
814 mov 16(%rsp),%r13
816 mov 24(%rsp),%r12
818 mov 32(%rsp),%rbx
820 mov 40(%rsp),%rbp
822 lea 48(%rsp),%rsp
1110 mov 0(%rsp),%r15
1112 mov 8(%rsp),%r14
1114 mov 16(%rsp),%r13
1116 mov 24(%rsp),%r12
1118 mov 32(%rsp),%rbx
1120 mov 40(%rsp),%rbp
1122 lea 48(%rsp),%rsp
1349 mov 0(%rsp),%r15
1351 mov 8(%rsp),%r14
1353 mov 16(%rsp),%r13
1355 mov 24(%rsp),%r12
1357 mov 32(%rsp),%rbx
1359 mov 40(%rsp),%rbp
1361 lea 48(%rsp),%rsp
1557 mov 0(%rsp),%r15
1559 mov 8(%rsp),%r14
1561 mov 16(%rsp),%r13
1563 mov 24(%rsp),%r12
1565 mov 32(%rsp),%rbx
1567 mov 40(%rsp),%rbp
1569 lea 48(%rsp),%rsp
1661 mov 0(%rsp),%r15
1663 mov 8(%rsp),%r14
1665 mov 16(%rsp),%r13
1667 mov 24(%rsp),%r12
1669 mov 32(%rsp),%rbx
1671 mov 40(%rsp),%rbp
1673 lea 48(%rsp),%rsp
1957 mov 0(%rsp),%r15
1959 mov 8(%rsp),%r14
1961 mov 16(%rsp),%r13
1963 mov 24(%rsp),%r12
1965 mov 32(%rsp),%rbx
1967 mov 40(%rsp),%rbp
1969 lea 48(%rsp),%rsp
2546 mov 0(%rsp),%r13
2548 mov 8(%rsp),%r12
2550 lea 16(%rsp),%rsp
2605 lea -0x88(%rsp), %rax
2607 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
2672 movaps (%rsp), %xmm6
2673 movaps 0x10(%rsp), %xmm7
2674 movaps 0x20(%rsp), %xmm8
2675 movaps 0x30(%rsp), %xmm9
2676 movaps 0x40(%rsp), %xmm10
2677 movaps 0x50(%rsp), %xmm11
2678 movaps 0x60(%rsp), %xmm12
2679 movaps 0x70(%rsp), %xmm13
2680 movaps 0x80(%rsp), %xmm14
2681 movaps 0x90(%rsp), %xmm15
2682 lea 0xa8(%rsp), %rsp
2725 lea -0x88(%rsp), %rax
2727 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
2781 movaps (%rsp), %xmm6
2782 movaps 0x10(%rsp), %xmm7
2783 movaps 0x20(%rsp), %xmm8
2784 movaps 0x30(%rsp), %xmm9
2785 movaps 0x40(%rsp), %xmm10
2786 movaps 0x50(%rsp), %xmm11
2787 movaps 0x60(%rsp), %xmm12
2788 movaps 0x70(%rsp), %xmm13
2789 movaps 0x80(%rsp), %xmm14
2790 movaps 0x90(%rsp), %xmm15
2791 lea 0xa8(%rsp), %rsp
2817 lea -0x88(%rsp), %rax
2818 mov %rsp,%r11
2820 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
2886 movaps (%rsp), %xmm6
2887 movaps 0x10(%rsp), %xmm7
2888 movaps 0x20(%rsp), %xmm8
2889 movaps 0x30(%rsp), %xmm9
2890 movaps 0x40(%rsp), %xmm10
2891 movaps 0x50(%rsp), %xmm11
2892 movaps 0x60(%rsp), %xmm12
2893 movaps 0x70(%rsp), %xmm13
2894 movaps 0x80(%rsp), %xmm14
2895 movaps 0x90(%rsp), %xmm15
2896 lea (%r11), %rsp
2925 mov %rsp,%r11
2926 lea -0x88(%rsp), %rax
2928 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
3009 movaps (%rsp), %xmm6
3010 movaps 0x10(%rsp), %xmm7
3011 movaps 0x20(%rsp), %xmm8
3012 movaps 0x30(%rsp), %xmm9
3013 movaps 0x40(%rsp), %xmm10
3014 movaps 0x50(%rsp), %xmm11
3015 movaps 0x60(%rsp), %xmm12
3016 movaps 0x70(%rsp), %xmm13
3017 movaps 0x80(%rsp), %xmm14
3018 movaps 0x90(%rsp), %xmm15
3019 lea (%r11), %rsp
3272 sub \$32*5+8, %rsp
3286 movdqa %xmm0, $in_x(%rsp)
3287 movdqa %xmm1, $in_x+0x10(%rsp)
3294 lea $S(%rsp), $r_ptr
3302 lea $Zsqr(%rsp), $r_ptr
3305 `&load_for_sqr("$S(%rsp)", "$src0")`
3306 lea $S(%rsp), $r_ptr
3320 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
3321 mov $in_x+8*1(%rsp), $acc5
3322 lea $Zsqr(%rsp), $b_ptr
3323 mov $in_x+8*2(%rsp), $acc0
3324 mov $in_x+8*3(%rsp), $acc1
3325 lea $M(%rsp), $r_ptr
3328 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
3329 mov $in_x+8*1(%rsp), $acc5
3330 lea $Zsqr(%rsp), $b_ptr
3331 mov $in_x+8*2(%rsp), $acc0
3332 mov $in_x+8*3(%rsp), $acc1
3333 lea $Zsqr(%rsp), $r_ptr
3336 `&load_for_sqr("$S(%rsp)", "$src0")`
3389 `&load_for_mul("$M(%rsp)", "$Zsqr(%rsp)", "$src0")`
3390 lea $M(%rsp), $r_ptr
3393 lea $tmp0(%rsp), $r_ptr
3396 lea $M(%rsp), $b_ptr
3397 lea $M(%rsp), $r_ptr
3400 `&load_for_mul("$S(%rsp)", "$in_x(%rsp)", "$src0")`
3401 lea $S(%rsp), $r_ptr
3404 lea $tmp0(%rsp), $r_ptr
3407 `&load_for_sqr("$M(%rsp)", "$src0")`
3411 lea $tmp0(%rsp), $b_ptr
3418 mov $S+8*0(%rsp), $t0
3419 mov $S+8*1(%rsp), $t1
3420 mov $S+8*2(%rsp), $t2
3421 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
3422 lea $S(%rsp), $r_ptr
3425 mov $M(%rsp), $src0
3426 lea $M(%rsp), $b_ptr
3429 mov $acc4, $S+8*0(%rsp) # have to save:-(
3431 mov $acc5, $S+8*1(%rsp)
3433 mov $acc0, $S+8*2(%rsp)
3434 lea $S-$bias(%rsp), $a_ptr
3436 mov $acc1, $S+8*3(%rsp)
3438 lea $S(%rsp), $r_ptr
3445 lea 32*5+56(%rsp), %rsi
3459 lea (%rsi),%rsp
3460 .cfi_def_cfa_register %rsp
3523 sub \$32*18+8, %rsp
3535 movdqa %xmm0, $in1_x(%rsp)
3536 movdqa %xmm1, $in1_x+0x10(%rsp)
3537 movdqa %xmm2, $in1_y(%rsp)
3538 movdqa %xmm3, $in1_y+0x10(%rsp)
3539 movdqa %xmm4, $in1_z(%rsp)
3540 movdqa %xmm5, $in1_z+0x10(%rsp)
3553 movdqa %xmm0, $in2_x(%rsp)
3555 movdqa %xmm1, $in2_x+0x10(%rsp)
3558 movdqa %xmm2, $in2_y(%rsp)
3559 movdqa %xmm3, $in2_y+0x10(%rsp)
3566 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
3567 mov $acc6, $in2_z+8*1(%rsp)
3568 mov $acc7, $in2_z+8*2(%rsp)
3569 mov $acc0, $in2_z+8*3(%rsp)
3570 lea $Z2sqr(%rsp), $r_ptr # Z2^2
3589 lea $Z1sqr(%rsp), $r_ptr # Z1^2
3592 `&load_for_mul("$Z2sqr(%rsp)", "$in2_z(%rsp)", "$src0")`
3593 lea $S1(%rsp), $r_ptr # S1 = Z2^3
3596 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
3597 lea $S2(%rsp), $r_ptr # S2 = Z1^3
3600 `&load_for_mul("$S1(%rsp)", "$in1_y(%rsp)", "$src0")`
3601 lea $S1(%rsp), $r_ptr # S1 = Y1*Z2^3
3604 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
3605 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
3608 lea $S1(%rsp), $b_ptr
3609 lea $R(%rsp), $r_ptr # R = S2 - S1
3619 `&load_for_mul("$Z2sqr(%rsp)", "$in1_x(%rsp)", "$src0")`
3620 lea $U1(%rsp), $r_ptr # U1 = X1*Z2^2
3623 `&load_for_mul("$Z1sqr(%rsp)", "$in2_x(%rsp)", "$src0")`
3624 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
3627 lea $U1(%rsp), $b_ptr
3628 lea $H(%rsp), $r_ptr # H = U2 - U1
3648 add \$`32*(18-5)`, %rsp # difference in frame sizes
3655 `&load_for_sqr("$R(%rsp)", "$src0")`
3656 lea $Rsqr(%rsp), $r_ptr # R^2
3659 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
3660 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
3663 `&load_for_sqr("$H(%rsp)", "$src0")`
3664 lea $Hsqr(%rsp), $r_ptr # H^2
3667 `&load_for_mul("$res_z(%rsp)", "$in2_z(%rsp)", "$src0")`
3668 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
3671 `&load_for_mul("$Hsqr(%rsp)", "$H(%rsp)", "$src0")`
3672 lea $Hcub(%rsp), $r_ptr # H^3
3675 `&load_for_mul("$Hsqr(%rsp)", "$U1(%rsp)", "$src0")`
3676 lea $U2(%rsp), $r_ptr # U1*H^2
3687 #lea $U2(%rsp), $a_ptr
3688 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
3693 lea $Rsqr(%rsp), $a_ptr
3720 lea $Hcub(%rsp), $b_ptr
3721 lea $res_x(%rsp), $r_ptr
3724 mov $U2+8*0(%rsp), $t0
3725 mov $U2+8*1(%rsp), $t1
3726 mov $U2+8*2(%rsp), $t2
3727 mov $U2+8*3(%rsp), $t3
3728 lea $res_y(%rsp), $r_ptr
3739 `&load_for_mul("$S1(%rsp)", "$Hcub(%rsp)", "$src0")`
3740 lea $S2(%rsp), $r_ptr
3743 `&load_for_mul("$R(%rsp)", "$res_y(%rsp)", "$src0")`
3744 lea $res_y(%rsp), $r_ptr
3747 lea $S2(%rsp), $b_ptr
3748 lea $res_y(%rsp), $r_ptr
3755 pandn $res_z(%rsp), %xmm0
3757 pandn $res_z+0x10(%rsp), %xmm1
3759 pand $in2_z(%rsp), %xmm2
3760 pand $in2_z+0x10(%rsp), %xmm3
3770 pand $in1_z(%rsp), %xmm2
3771 pand $in1_z+0x10(%rsp), %xmm3
3779 pandn $res_x(%rsp), %xmm0
3781 pandn $res_x+0x10(%rsp), %xmm1
3783 pand $in2_x(%rsp), %xmm2
3784 pand $in2_x+0x10(%rsp), %xmm3
3794 pand $in1_x(%rsp), %xmm2
3795 pand $in1_x+0x10(%rsp), %xmm3
3803 pandn $res_y(%rsp), %xmm0
3805 pandn $res_y+0x10(%rsp), %xmm1
3807 pand $in2_y(%rsp), %xmm2
3808 pand $in2_y+0x10(%rsp), %xmm3
3818 pand $in1_y(%rsp), %xmm2
3819 pand $in1_y+0x10(%rsp), %xmm3
3826 lea 32*18+56(%rsp), %rsi
3840 lea (%rsi),%rsp
3841 .cfi_def_cfa_register %rsp
3903 sub \$32*15+8, %rsp
3918 movdqa %xmm0, $in1_x(%rsp)
3919 movdqa %xmm1, $in1_x+0x10(%rsp)
3920 movdqa %xmm2, $in1_y(%rsp)
3921 movdqa %xmm3, $in1_y+0x10(%rsp)
3922 movdqa %xmm4, $in1_z(%rsp)
3923 movdqa %xmm5, $in1_z+0x10(%rsp)
3932 movdqa %xmm0, $in2_x(%rsp)
3934 movdqa %xmm1, $in2_x+0x10(%rsp)
3937 movdqa %xmm2, $in2_y(%rsp)
3938 movdqa %xmm3, $in2_y+0x10(%rsp)
3945 lea $Z1sqr(%rsp), $r_ptr # Z1^2
3963 lea $Z1sqr-$bias(%rsp), $a_ptr
3965 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
3968 lea $in1_x(%rsp), $b_ptr
3969 lea $H(%rsp), $r_ptr # H = U2 - U1
3972 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
3973 lea $S2(%rsp), $r_ptr # S2 = Z1^3
3976 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
3977 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
3980 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
3981 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
3984 lea $in1_y(%rsp), $b_ptr
3985 lea $R(%rsp), $r_ptr # R = S2 - S1
3988 `&load_for_sqr("$H(%rsp)", "$src0")`
3989 lea $Hsqr(%rsp), $r_ptr # H^2
3992 `&load_for_sqr("$R(%rsp)", "$src0")`
3993 lea $Rsqr(%rsp), $r_ptr # R^2
3996 `&load_for_mul("$H(%rsp)", "$Hsqr(%rsp)", "$src0")`
3997 lea $Hcub(%rsp), $r_ptr # H^3
4000 `&load_for_mul("$Hsqr(%rsp)", "$in1_x(%rsp)", "$src0")`
4001 lea $U2(%rsp), $r_ptr # U1*H^2
4012 #lea $U2(%rsp), $a_ptr
4013 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
4018 lea $Rsqr(%rsp), $a_ptr
4045 lea $Hcub(%rsp), $b_ptr
4046 lea $res_x(%rsp), $r_ptr
4049 mov $U2+8*0(%rsp), $t0
4050 mov $U2+8*1(%rsp), $t1
4051 mov $U2+8*2(%rsp), $t2
4052 mov $U2+8*3(%rsp), $t3
4053 lea $H(%rsp), $r_ptr
4064 `&load_for_mul("$Hcub(%rsp)", "$in1_y(%rsp)", "$src0")`
4065 lea $S2(%rsp), $r_ptr
4068 `&load_for_mul("$H(%rsp)", "$R(%rsp)", "$src0")`
4069 lea $H(%rsp), $r_ptr
4072 lea $S2(%rsp), $b_ptr
4073 lea $res_y(%rsp), $r_ptr
4080 pandn $res_z(%rsp), %xmm0
4082 pandn $res_z+0x10(%rsp), %xmm1
4095 pand $in1_z(%rsp), %xmm2
4096 pand $in1_z+0x10(%rsp), %xmm3
4104 pandn $res_x(%rsp), %xmm0
4106 pandn $res_x+0x10(%rsp), %xmm1
4108 pand $in2_x(%rsp), %xmm2
4109 pand $in2_x+0x10(%rsp), %xmm3
4119 pand $in1_x(%rsp), %xmm2
4120 pand $in1_x+0x10(%rsp), %xmm3
4128 pandn $res_y(%rsp), %xmm0
4130 pandn $res_y+0x10(%rsp), %xmm1
4132 pand $in2_y(%rsp), %xmm2
4133 pand $in2_y+0x10(%rsp), %xmm3
4143 pand $in1_y(%rsp), %xmm2
4144 pand $in1_y+0x10(%rsp), %xmm3
4150 lea 32*15+56(%rsp), %rsi
4164 lea (%rsi),%rsp
4165 .cfi_def_cfa_register %rsp
4350 sub \$64,%rsp
4392 sub \$64,%rsp
4448 mov %r10,32(%rsp) # arg5
4449 mov %r11,40(%rsp) # arg6
4450 mov %r12,48(%rsp) # arg7
4451 mov %rcx,56(%rsp) # arg8, (NULL)
4455 add \$64,%rsp