xref: /openssl/crypto/md5/asm/md5-x86_64.pl (revision 572e6df7)
1#! /usr/bin/env perl
2# Author: Marc Bevand <bevand_m (at) epita.fr>
3# Copyright 2005-2020 The OpenSSL Project Authors. All Rights Reserved.
4#
5# Licensed under the Apache License 2.0 (the "License").  You may not use
6# this file except in compliance with the License.  You can obtain a copy
7# in the file LICENSE in the source distribution or at
8# https://www.openssl.org/source/license.html
9
10# MD5 optimized for AMD64.
11
12use strict;
13
14my $code;
15
16# round1_step() does:
17#   dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
18#   %r10d = X[k_next]
19#   %r11d = z' (copy of z for the next step)
20# Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
21sub round1_step
22{
23    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
24    $code .= " mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
25    $code .= " mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
26    $code .= <<EOF;
27	xor	$y,		%r11d		/* y ^ ... */
28	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
29	and	$x,		%r11d		/* x & ... */
30	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
31	xor	$z,		%r11d		/* z ^ ... */
32	add	%r11d,		$dst		/* dst += ... */
33	rol	\$$s,		$dst		/* dst <<< s */
34	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
35	add	$x,		$dst		/* dst += x */
36EOF
37}
38
39# round2_step() does:
40#   dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
41#   %r10d = X[k_next]
42#   %r11d = z' (copy of z for the next step)
43#   %r12d = z' (copy of z for the next step)
44# Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
45sub round2_step
46{
47    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
48    $code .= " mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
49    $code .= " mov	%edx,		%r12d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
50    $code .= <<EOF;
51	not	%r11d				/* not z */
52	and	$x,		%r12d		/* x & z */
53	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
54	and	$y,		%r11d		/* y & (not z) */
55	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
56	or	%r11d,		%r12d		/* (y & (not z)) | (x & z) */
57	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
58	add	%r12d,		$dst		/* dst += ... */
59	mov	$y,		%r12d		/* (NEXT STEP) z' = $y */
60	rol	\$$s,		$dst		/* dst <<< s */
61	add	$x,		$dst		/* dst += x */
62EOF
63}
64
65# round3_step() does:
66#   dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
67#   %r10d = X[k_next]
68#   %r11d = y' (copy of y for the next step)
69# Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
70{ my $round3_alter=0;
71sub round3_step
72{
73    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
74    $code .= " mov	%ecx,		%r11d		/* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
75    $code .= <<EOF;
76	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
77	xor	$z,		%r11d		/* z ^ ... */
78	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
79	xor	$x,		%r11d		/* x ^ ... */
80	add	%r11d,		$dst		/* dst += ... */
81EOF
82    $code .= <<EOF if ($round3_alter);
83	rol	\$$s,		$dst		/* dst <<< s */
84	mov	$x,		%r11d		/* (NEXT STEP) y' = $x */
85EOF
86    $code .= <<EOF if (!$round3_alter);
87	mov	$x,		%r11d		/* (NEXT STEP) y' = $x */
88	rol	\$$s,		$dst		/* dst <<< s */
89EOF
90    $code .= <<EOF;
91	add	$x,		$dst		/* dst += x */
92EOF
93    $round3_alter^=1;
94}
95}
96
97# round4_step() does:
98#   dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
99#   %r10d = X[k_next]
100#   %r11d = not z' (copy of not z for the next step)
101# Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
102sub round4_step
103{
104    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
105    $code .= " mov	\$0xffffffff,	%r11d\n" if ($pos == -1);
106    $code .= " xor	%edx,		%r11d		/* (NEXT STEP) not z' = not %edx*/\n"
107    if ($pos == -1);
108    $code .= <<EOF;
109	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
110	or	$x,		%r11d		/* x | ... */
111	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
112	xor	$y,		%r11d		/* y ^ ... */
113	add	%r11d,		$dst		/* dst += ... */
114	mov	\$0xffffffff,	%r11d
115	rol	\$$s,		$dst		/* dst <<< s */
116	xor	$y,		%r11d		/* (NEXT STEP) not z' = not $y */
117	add	$x,		$dst		/* dst += x */
118EOF
119}
120
121no warnings qw(uninitialized);
122# $output is the last argument if it looks like a file (it has an extension)
123# $flavour is the first argument if it doesn't look like a file
124my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
125my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
126
127my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
128
129$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
130( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
131( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
132die "can't locate x86_64-xlate.pl";
133
134open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
135    or die "can't call $xlate: $!";
136*STDOUT=*OUT;
137
138$code .= <<EOF;
139.text
140.align 16
141
142.globl ossl_md5_block_asm_data_order
143.type ossl_md5_block_asm_data_order,\@function,3
144ossl_md5_block_asm_data_order:
145.cfi_startproc
146	push	%rbp
147.cfi_push	%rbp
148	push	%rbx
149.cfi_push	%rbx
150	push	%r12
151.cfi_push	%r12
152	push	%r14
153.cfi_push	%r14
154	push	%r15
155.cfi_push	%r15
156.Lprologue:
157
158	# rdi = arg #1 (ctx, MD5_CTX pointer)
159	# rsi = arg #2 (ptr, data pointer)
160	# rdx = arg #3 (nbr, number of 16-word blocks to process)
161	mov	%rdi,		%rbp	# rbp = ctx
162	shl	\$6,		%rdx	# rdx = nbr in bytes
163	lea	(%rsi,%rdx),	%rdi	# rdi = end
164	mov	0*4(%rbp),	%eax	# eax = ctx->A
165	mov	1*4(%rbp),	%ebx	# ebx = ctx->B
166	mov	2*4(%rbp),	%ecx	# ecx = ctx->C
167	mov	3*4(%rbp),	%edx	# edx = ctx->D
168	# end is 'rdi'
169	# ptr is 'rsi'
170	# A is 'eax'
171	# B is 'ebx'
172	# C is 'ecx'
173	# D is 'edx'
174
175	cmp	%rdi,		%rsi		# cmp end with ptr
176	je	.Lend				# jmp if ptr == end
177
178	# BEGIN of loop over 16-word blocks
179.Lloop:	# save old values of A, B, C, D
180	mov	%eax,		%r8d
181	mov	%ebx,		%r9d
182	mov	%ecx,		%r14d
183	mov	%edx,		%r15d
184EOF
185round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
186round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
187round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
188round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
189round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
190round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
191round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
192round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
193round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
194round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
195round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
196round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
197round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
198round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
199round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
200round1_step( 1,'%ebx','%ecx','%edx','%eax', '1','0x49b40821','22');
201
202round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
203round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
204round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
205round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
206round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
207round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
208round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
209round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
210round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
211round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
212round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
213round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
214round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
215round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
216round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
217round2_step( 1,'%ebx','%ecx','%edx','%eax', '5','0x8d2a4c8a','20');
218
219round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
220round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
221round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
222round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
223round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
224round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
225round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
226round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
227round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
228round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
229round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
230round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
231round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
232round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
233round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
234round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
235
236round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
237round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
238round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
239round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
240round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
241round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
242round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
243round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
244round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
245round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
246round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
247round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
248round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
249round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
250round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
251round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
252$code .= <<EOF;
253	# add old values of A, B, C, D
254	add	%r8d,	%eax
255	add	%r9d,	%ebx
256	add	%r14d,	%ecx
257	add	%r15d,	%edx
258
259	# loop control
260	add	\$64,		%rsi		# ptr += 64
261	cmp	%rdi,		%rsi		# cmp end with ptr
262	jb	.Lloop				# jmp if ptr < end
263	# END of loop over 16-word blocks
264
265.Lend:
266	mov	%eax,		0*4(%rbp)	# ctx->A = A
267	mov	%ebx,		1*4(%rbp)	# ctx->B = B
268	mov	%ecx,		2*4(%rbp)	# ctx->C = C
269	mov	%edx,		3*4(%rbp)	# ctx->D = D
270
271	mov	(%rsp),%r15
272.cfi_restore	%r15
273	mov	8(%rsp),%r14
274.cfi_restore	%r14
275	mov	16(%rsp),%r12
276.cfi_restore	%r12
277	mov	24(%rsp),%rbx
278.cfi_restore	%rbx
279	mov	32(%rsp),%rbp
280.cfi_restore	%rbp
281	add	\$40,%rsp
282.cfi_adjust_cfa_offset	-40
283.Lepilogue:
284	ret
285.cfi_endproc
286.size ossl_md5_block_asm_data_order,.-ossl_md5_block_asm_data_order
287EOF
288
289# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
290#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
291if ($win64) {
292my $rec="%rcx";
293my $frame="%rdx";
294my $context="%r8";
295my $disp="%r9";
296
297$code.=<<___;
298.extern	__imp_RtlVirtualUnwind
299.type	se_handler,\@abi-omnipotent
300.align	16
301se_handler:
302	push	%rsi
303	push	%rdi
304	push	%rbx
305	push	%rbp
306	push	%r12
307	push	%r13
308	push	%r14
309	push	%r15
310	pushfq
311	sub	\$64,%rsp
312
313	mov	120($context),%rax	# pull context->Rax
314	mov	248($context),%rbx	# pull context->Rip
315
316	lea	.Lprologue(%rip),%r10
317	cmp	%r10,%rbx		# context->Rip<.Lprologue
318	jb	.Lin_prologue
319
320	mov	152($context),%rax	# pull context->Rsp
321
322	lea	.Lepilogue(%rip),%r10
323	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
324	jae	.Lin_prologue
325
326	lea	40(%rax),%rax
327
328	mov	-8(%rax),%rbp
329	mov	-16(%rax),%rbx
330	mov	-24(%rax),%r12
331	mov	-32(%rax),%r14
332	mov	-40(%rax),%r15
333	mov	%rbx,144($context)	# restore context->Rbx
334	mov	%rbp,160($context)	# restore context->Rbp
335	mov	%r12,216($context)	# restore context->R12
336	mov	%r14,232($context)	# restore context->R14
337	mov	%r15,240($context)	# restore context->R15
338
339.Lin_prologue:
340	mov	8(%rax),%rdi
341	mov	16(%rax),%rsi
342	mov	%rax,152($context)	# restore context->Rsp
343	mov	%rsi,168($context)	# restore context->Rsi
344	mov	%rdi,176($context)	# restore context->Rdi
345
346	mov	40($disp),%rdi		# disp->ContextRecord
347	mov	$context,%rsi		# context
348	mov	\$154,%ecx		# sizeof(CONTEXT)
349	.long	0xa548f3fc		# cld; rep movsq
350
351	mov	$disp,%rsi
352	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
353	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
354	mov	0(%rsi),%r8		# arg3, disp->ControlPc
355	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
356	mov	40(%rsi),%r10		# disp->ContextRecord
357	lea	56(%rsi),%r11		# &disp->HandlerData
358	lea	24(%rsi),%r12		# &disp->EstablisherFrame
359	mov	%r10,32(%rsp)		# arg5
360	mov	%r11,40(%rsp)		# arg6
361	mov	%r12,48(%rsp)		# arg7
362	mov	%rcx,56(%rsp)		# arg8, (NULL)
363	call	*__imp_RtlVirtualUnwind(%rip)
364
365	mov	\$1,%eax		# ExceptionContinueSearch
366	add	\$64,%rsp
367	popfq
368	pop	%r15
369	pop	%r14
370	pop	%r13
371	pop	%r12
372	pop	%rbp
373	pop	%rbx
374	pop	%rdi
375	pop	%rsi
376	ret
377.size	se_handler,.-se_handler
378
379.section	.pdata
380.align	4
381	.rva	.LSEH_begin_ossl_md5_block_asm_data_order
382	.rva	.LSEH_end_ossl_md5_block_asm_data_order
383	.rva	.LSEH_info_ossl_md5_block_asm_data_order
384
385.section	.xdata
386.align	8
387.LSEH_info_ossl_md5_block_asm_data_order:
388	.byte	9,0,0,0
389	.rva	se_handler
390___
391}
392
393print $code;
394
395close STDOUT or die "error closing STDOUT: $!";
396