xref: /openssl/crypto/md5/asm/md5-sparcv9.pl (revision 54b40531)
1#! /usr/bin/env perl
2# Copyright 2012-2021 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the Apache License 2.0 (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15#
16# Hardware SPARC T4 support by David S. Miller.
17# ====================================================================
18
19# MD5 for SPARCv9, 6.9 cycles per byte on UltraSPARC, >40% faster than
20# code generated by Sun C 5.2.
21
22# SPARC T4 MD5 hardware achieves 3.20 cycles per byte, which is 2.1x
23# faster than software. Multi-process benchmark saturates at 12x
24# single-process result on 8-core processor, or ~11GBps per 2.85GHz
25# socket.
26
27# $output is the last argument if it looks like a file (it has an extension)
28$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
29
30$output and open STDOUT,">$output";
31
32use integer;
33
34($ctx,$inp,$len)=("%i0","%i1","%i2");	# input arguments
35
36# 64-bit values
37@X=("%o0","%o1","%o2","%o3","%o4","%o5","%o7","%g1","%g2");
38$tx="%g3";
39($AB,$CD)=("%g4","%g5");
40
41# 32-bit values
42@V=($A,$B,$C,$D)=map("%l$_",(0..3));
43($t1,$t2,$t3,$saved_asi)=map("%l$_",(4..7));
44($shr,$shl1,$shl2)=("%i3","%i4","%i5");
45
46my @K=(	0xd76aa478,0xe8c7b756,0x242070db,0xc1bdceee,
47	0xf57c0faf,0x4787c62a,0xa8304613,0xfd469501,
48	0x698098d8,0x8b44f7af,0xffff5bb1,0x895cd7be,
49	0x6b901122,0xfd987193,0xa679438e,0x49b40821,
50
51	0xf61e2562,0xc040b340,0x265e5a51,0xe9b6c7aa,
52	0xd62f105d,0x02441453,0xd8a1e681,0xe7d3fbc8,
53	0x21e1cde6,0xc33707d6,0xf4d50d87,0x455a14ed,
54	0xa9e3e905,0xfcefa3f8,0x676f02d9,0x8d2a4c8a,
55
56	0xfffa3942,0x8771f681,0x6d9d6122,0xfde5380c,
57	0xa4beea44,0x4bdecfa9,0xf6bb4b60,0xbebfbc70,
58	0x289b7ec6,0xeaa127fa,0xd4ef3085,0x04881d05,
59	0xd9d4d039,0xe6db99e5,0x1fa27cf8,0xc4ac5665,
60
61	0xf4292244,0x432aff97,0xab9423a7,0xfc93a039,
62	0x655b59c3,0x8f0ccc92,0xffeff47d,0x85845dd1,
63	0x6fa87e4f,0xfe2ce6e0,0xa3014314,0x4e0811a1,
64	0xf7537e82,0xbd3af235,0x2ad7d2bb,0xeb86d391, 0	);
65
66sub R0 {
67  my ($i,$a,$b,$c,$d) = @_;
68  my $rot = (7,12,17,22)[$i%4];
69  my $j   = ($i+1)/2;
70
71  if ($i&1) {
72    $code.=<<___;
73	 srlx	@X[$j],$shr,@X[$j]	! align X[`$i+1`]
74	and	$b,$t1,$t1		! round $i
75	 sllx	@X[$j+1],$shl1,$tx
76	add	$t2,$a,$a
77	 sllx	$tx,$shl2,$tx
78	xor	$d,$t1,$t1
79	 or	$tx,@X[$j],@X[$j]
80	 sethi	%hi(@K[$i+1]),$t2
81	add	$t1,$a,$a
82	 or	$t2,%lo(@K[$i+1]),$t2
83	sll	$a,$rot,$t3
84	 add	@X[$j],$t2,$t2		! X[`$i+1`]+K[`$i+1`]
85	srl	$a,32-$rot,$a
86	add	$b,$t3,$t3
87	 xor	 $b,$c,$t1
88	add	$t3,$a,$a
89___
90  } else {
91    $code.=<<___;
92	 srlx	@X[$j],32,$tx		! extract X[`2*$j+1`]
93	and	$b,$t1,$t1		! round $i
94	add	$t2,$a,$a
95	xor	$d,$t1,$t1
96	 sethi	%hi(@K[$i+1]),$t2
97	add	$t1,$a,$a
98	 or	$t2,%lo(@K[$i+1]),$t2
99	sll	$a,$rot,$t3
100	 add	$tx,$t2,$t2		! X[`2*$j+1`]+K[`$i+1`]
101	srl	$a,32-$rot,$a
102	add	$b,$t3,$t3
103	 xor	 $b,$c,$t1
104	add	$t3,$a,$a
105___
106  }
107}
108
109sub R0_1 {
110  my ($i,$a,$b,$c,$d) = @_;
111  my $rot = (7,12,17,22)[$i%4];
112
113$code.=<<___;
114	 srlx	@X[0],32,$tx		! extract X[1]
115	and	$b,$t1,$t1		! round $i
116	add	$t2,$a,$a
117	xor	$d,$t1,$t1
118	 sethi	%hi(@K[$i+1]),$t2
119	add	$t1,$a,$a
120	 or	$t2,%lo(@K[$i+1]),$t2
121	sll	$a,$rot,$t3
122	 add	$tx,$t2,$t2		! X[1]+K[`$i+1`]
123	srl	$a,32-$rot,$a
124	add	$b,$t3,$t3
125	 andn	 $b,$c,$t1
126	add	$t3,$a,$a
127___
128}
129
130sub R1 {
131  my ($i,$a,$b,$c,$d) = @_;
132  my $rot = (5,9,14,20)[$i%4];
133  my $j   = $i<31 ? (1+5*($i+1))%16 : (5+3*($i+1))%16;
134  my $xi  = @X[$j/2];
135
136$code.=<<___ if ($j&1 && ($xi=$tx));
137	 srlx	@X[$j/2],32,$xi		! extract X[$j]
138___
139$code.=<<___;
140	and	$b,$d,$t3		! round $i
141	add	$t2,$a,$a
142	or	$t3,$t1,$t1
143	 sethi	%hi(@K[$i+1]),$t2
144	add	$t1,$a,$a
145	 or	$t2,%lo(@K[$i+1]),$t2
146	sll	$a,$rot,$t3
147	 add	$xi,$t2,$t2		! X[$j]+K[`$i+1`]
148	srl	$a,32-$rot,$a
149	add	$b,$t3,$t3
150	 `$i<31?"andn":"xor"`	 $b,$c,$t1
151	add	$t3,$a,$a
152___
153}
154
155sub R2 {
156  my ($i,$a,$b,$c,$d) = @_;
157  my $rot = (4,11,16,23)[$i%4];
158  my $j   = $i<47 ? (5+3*($i+1))%16 : (0+7*($i+1))%16;
159  my $xi  = @X[$j/2];
160
161$code.=<<___ if ($j&1 && ($xi=$tx));
162	 srlx	@X[$j/2],32,$xi		! extract X[$j]
163___
164$code.=<<___;
165	add	$t2,$a,$a		! round $i
166	xor	$b,$t1,$t1
167	 sethi	%hi(@K[$i+1]),$t2
168	add	$t1,$a,$a
169	 or	$t2,%lo(@K[$i+1]),$t2
170	sll	$a,$rot,$t3
171	 add	$xi,$t2,$t2		! X[$j]+K[`$i+1`]
172	srl	$a,32-$rot,$a
173	add	$b,$t3,$t3
174	 xor	 $b,$c,$t1
175	add	$t3,$a,$a
176___
177}
178
179sub R3 {
180  my ($i,$a,$b,$c,$d) = @_;
181  my $rot = (6,10,15,21)[$i%4];
182  my $j   = (0+7*($i+1))%16;
183  my $xi  = @X[$j/2];
184
185$code.=<<___;
186	add	$t2,$a,$a		! round $i
187___
188$code.=<<___ if ($j&1 && ($xi=$tx));
189	 srlx	@X[$j/2],32,$xi		! extract X[$j]
190___
191$code.=<<___;
192	orn	$b,$d,$t1
193	 sethi	%hi(@K[$i+1]),$t2
194	xor	$c,$t1,$t1
195	 or	$t2,%lo(@K[$i+1]),$t2
196	add	$t1,$a,$a
197	sll	$a,$rot,$t3
198	 add	$xi,$t2,$t2		! X[$j]+K[`$i+1`]
199	srl	$a,32-$rot,$a
200	add	$b,$t3,$t3
201	add	$t3,$a,$a
202___
203}
204
205$code.=<<___;
206#ifndef __ASSEMBLER__
207# define __ASSEMBLER__ 1
208#endif
209#include "crypto/sparc_arch.h"
210
211#ifdef __arch64__
212.register	%g2,#scratch
213.register	%g3,#scratch
214#endif
215
216.section	".text",#alloc,#execinstr
217
218#ifdef __PIC__
219SPARC_PIC_THUNK(%g1)
220#endif
221
222.globl	ossl_md5_block_asm_data_order
223.align	32
224ossl_md5_block_asm_data_order:
225	SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
226	ld	[%g1+4],%g1		! OPENSSL_sparcv9cap_P[1]
227
228	andcc	%g1, CFR_MD5, %g0
229	be	.Lsoftware
230	nop
231
232	mov	4, %g1
233	andcc	%o1, 0x7, %g0
234	lda	[%o0 + %g0]0x88, %f0		! load context
235	lda	[%o0 + %g1]0x88, %f1
236	add	%o0, 8, %o0
237	lda	[%o0 + %g0]0x88, %f2
238	lda	[%o0 + %g1]0x88, %f3
239	bne,pn	%icc, .Lhwunaligned
240	sub	%o0, 8, %o0
241
242.Lhw_loop:
243	ldd	[%o1 + 0x00], %f8
244	ldd	[%o1 + 0x08], %f10
245	ldd	[%o1 + 0x10], %f12
246	ldd	[%o1 + 0x18], %f14
247	ldd	[%o1 + 0x20], %f16
248	ldd	[%o1 + 0x28], %f18
249	ldd	[%o1 + 0x30], %f20
250	subcc	%o2, 1, %o2		! done yet?
251	ldd	[%o1 + 0x38], %f22
252	add	%o1, 0x40, %o1
253	prefetch [%o1 + 63], 20
254
255	.word	0x81b02800		! MD5
256
257	bne,pt	SIZE_T_CC, .Lhw_loop
258	nop
259
260.Lhwfinish:
261	sta	%f0, [%o0 + %g0]0x88	! store context
262	sta	%f1, [%o0 + %g1]0x88
263	add	%o0, 8, %o0
264	sta	%f2, [%o0 + %g0]0x88
265	sta	%f3, [%o0 + %g1]0x88
266	retl
267	nop
268
269.align	8
270.Lhwunaligned:
271	alignaddr %o1, %g0, %o1
272
273	ldd	[%o1 + 0x00], %f10
274.Lhwunaligned_loop:
275	ldd	[%o1 + 0x08], %f12
276	ldd	[%o1 + 0x10], %f14
277	ldd	[%o1 + 0x18], %f16
278	ldd	[%o1 + 0x20], %f18
279	ldd	[%o1 + 0x28], %f20
280	ldd	[%o1 + 0x30], %f22
281	ldd	[%o1 + 0x38], %f24
282	subcc	%o2, 1, %o2		! done yet?
283	ldd	[%o1 + 0x40], %f26
284	add	%o1, 0x40, %o1
285	prefetch [%o1 + 63], 20
286
287	faligndata %f10, %f12, %f8
288	faligndata %f12, %f14, %f10
289	faligndata %f14, %f16, %f12
290	faligndata %f16, %f18, %f14
291	faligndata %f18, %f20, %f16
292	faligndata %f20, %f22, %f18
293	faligndata %f22, %f24, %f20
294	faligndata %f24, %f26, %f22
295
296	.word	0x81b02800		! MD5
297
298	bne,pt	SIZE_T_CC, .Lhwunaligned_loop
299	for	%f26, %f26, %f10	! %f10=%f26
300
301	ba	.Lhwfinish
302	nop
303
304.align	16
305.Lsoftware:
306	save	%sp,-STACK_FRAME,%sp
307
308	rd	%asi,$saved_asi
309	wr	%g0,0x88,%asi		! ASI_PRIMARY_LITTLE
310	and	$inp,7,$shr
311	andn	$inp,7,$inp
312
313	sll	$shr,3,$shr		! *=8
314	mov	56,$shl2
315	ld	[$ctx+0],$A
316	sub	$shl2,$shr,$shl2
317	ld	[$ctx+4],$B
318	and	$shl2,32,$shl1
319	add	$shl2,8,$shl2
320	ld	[$ctx+8],$C
321	sub	$shl2,$shl1,$shl2	! shr+shl1+shl2==64
322	ld	[$ctx+12],$D
323	nop
324
325.Loop:
326	 cmp	$shr,0			! was inp aligned?
327	ldxa	[$inp+0]%asi,@X[0]	! load little-endian input
328	ldxa	[$inp+8]%asi,@X[1]
329	ldxa	[$inp+16]%asi,@X[2]
330	ldxa	[$inp+24]%asi,@X[3]
331	ldxa	[$inp+32]%asi,@X[4]
332	 sllx	$A,32,$AB		! pack A,B
333	ldxa	[$inp+40]%asi,@X[5]
334	 sllx	$C,32,$CD		! pack C,D
335	ldxa	[$inp+48]%asi,@X[6]
336	 or	$B,$AB,$AB
337	ldxa	[$inp+56]%asi,@X[7]
338	 or	$D,$CD,$CD
339	bnz,a,pn	%icc,.+8
340	ldxa	[$inp+64]%asi,@X[8]
341
342	srlx	@X[0],$shr,@X[0]	! align X[0]
343	sllx	@X[1],$shl1,$tx
344	 sethi	%hi(@K[0]),$t2
345	sllx	$tx,$shl2,$tx
346	 or	$t2,%lo(@K[0]),$t2
347	or	$tx,@X[0],@X[0]
348	 xor	$C,$D,$t1
349	 add	@X[0],$t2,$t2		! X[0]+K[0]
350___
351	for ($i=0;$i<15;$i++)	{ &R0($i,@V);	unshift(@V,pop(@V)); }
352	for (;$i<16;$i++)	{ &R0_1($i,@V);	unshift(@V,pop(@V)); }
353	for (;$i<32;$i++)	{ &R1($i,@V);	unshift(@V,pop(@V)); }
354	for (;$i<48;$i++)	{ &R2($i,@V);	unshift(@V,pop(@V)); }
355	for (;$i<64;$i++)	{ &R3($i,@V);	unshift(@V,pop(@V)); }
356$code.=<<___;
357	srlx	$AB,32,$t1		! unpack A,B,C,D and accumulate
358	add	$inp,64,$inp		! advance inp
359	srlx	$CD,32,$t2
360	add	$t1,$A,$A
361	subcc	$len,1,$len		! done yet?
362	add	$AB,$B,$B
363	add	$t2,$C,$C
364	add	$CD,$D,$D
365	srl	$B,0,$B			! clruw	$B
366	bne	SIZE_T_CC,.Loop
367	srl	$D,0,$D			! clruw	$D
368
369	st	$A,[$ctx+0]		! write out ctx
370	st	$B,[$ctx+4]
371	st	$C,[$ctx+8]
372	st	$D,[$ctx+12]
373
374	wr	%g0,$saved_asi,%asi
375	ret
376	restore
377.type	ossl_md5_block_asm_data_order,#function
378.size	ossl_md5_block_asm_data_order,(.-ossl_md5_block_asm_data_order)
379
380.asciz	"MD5 block transform for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
381.align	4
382___
383
384# Purpose of these subroutines is to explicitly encode VIS instructions,
385# so that one can compile the module without having to specify VIS
386# extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
387# Idea is to reserve for option to produce "universal" binary and let
388# programmer detect if current CPU is VIS capable at run-time.
389sub unvis {
390my ($mnemonic,$rs1,$rs2,$rd)=@_;
391my $ref,$opf;
392my %visopf = (	"faligndata"	=> 0x048,
393		"for"		=> 0x07c	);
394
395    $ref = "$mnemonic\t$rs1,$rs2,$rd";
396
397    if ($opf=$visopf{$mnemonic}) {
398	foreach ($rs1,$rs2,$rd) {
399	    return $ref if (!/%f([0-9]{1,2})/);
400	    $_=$1;
401	    if ($1>=32) {
402		return $ref if ($1&1);
403		# re-encode for upper double register addressing
404		$_=($1|$1>>5)&31;
405	    }
406	}
407
408	return	sprintf ".word\t0x%08x !%s",
409			0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
410			$ref;
411    } else {
412	return $ref;
413    }
414}
415sub unalignaddr {
416my ($mnemonic,$rs1,$rs2,$rd)=@_;
417my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
418my $ref="$mnemonic\t$rs1,$rs2,$rd";
419
420    foreach ($rs1,$rs2,$rd) {
421	if (/%([goli])([0-7])/)	{ $_=$bias{$1}+$2; }
422	else			{ return $ref; }
423    }
424    return  sprintf ".word\t0x%08x !%s",
425		    0x81b00300|$rd<<25|$rs1<<14|$rs2,
426		    $ref;
427}
428
429foreach (split("\n",$code)) {
430	s/\`([^\`]*)\`/eval $1/ge;
431
432	s/\b(f[^\s]*)\s+(%f[0-9]{1,2}),\s*(%f[0-9]{1,2}),\s*(%f[0-9]{1,2})/
433		&unvis($1,$2,$3,$4)
434	 /ge;
435	s/\b(alignaddr)\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
436		&unalignaddr($1,$2,$3,$4)
437	 /ge;
438
439	print $_,"\n";
440}
441
442close STDOUT or die "error closing STDOUT: $!";
443