diff options
Diffstat (limited to 'openssl/trunk/crypto/md5/asm')
-rw-r--r-- | openssl/trunk/crypto/md5/asm/md5-586.pl | 306 | ||||
-rw-r--r-- | openssl/trunk/crypto/md5/asm/md5-sparcv9.S | 1031 | ||||
-rwxr-xr-x | openssl/trunk/crypto/md5/asm/md5-x86_64.pl | 245 |
3 files changed, 1582 insertions, 0 deletions
diff --git a/openssl/trunk/crypto/md5/asm/md5-586.pl b/openssl/trunk/crypto/md5/asm/md5-586.pl new file mode 100644 index 00000000..fa3fa3be --- /dev/null +++ b/openssl/trunk/crypto/md5/asm/md5-586.pl @@ -0,0 +1,306 @@ +#!/usr/local/bin/perl + +# Normal is the +# md5_block_x86(MD5_CTX *c, ULONG *X); +# version, non-normal is the +# md5_block_x86(MD5_CTX *c, ULONG *X,int blocks); + +$normal=0; + +push(@INC,"perlasm","../../perlasm"); +require "x86asm.pl"; + +&asm_init($ARGV[0],$0); + +$A="eax"; +$B="ebx"; +$C="ecx"; +$D="edx"; +$tmp1="edi"; +$tmp2="ebp"; +$X="esi"; + +# What we need to load into $tmp for the next round +%Ltmp1=("R0",&Np($C), "R1",&Np($C), "R2",&Np($C), "R3",&Np($D)); +@xo=( + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # R0 + 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, # R1 + 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, # R2 + 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9, # R3 + ); + +&md5_block("md5_block_asm_host_order"); +&asm_finish(); + +sub Np + { + local($p)=@_; + local(%n)=($A,$D,$B,$A,$C,$B,$D,$C); + return($n{$p}); + } + +sub R0 + { + local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_; + + &mov($tmp1,$C) if $pos < 0; + &mov($tmp2,&DWP($xo[$ki]*4,$K,"",0)) if $pos < 0; # very first one + + # body proper + + &comment("R0 $ki"); + &xor($tmp1,$d); # F function - part 2 + + &and($tmp1,$b); # F function - part 3 + &lea($a,&DWP($t,$a,$tmp2,1)); + + &xor($tmp1,$d); # F function - part 4 + + &add($a,$tmp1); + &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 + &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1 + + &rotl($a,$s); + + &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2); + + &add($a,$b); + } + +sub R1 + { + local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_; + + &comment("R1 $ki"); + + &lea($a,&DWP($t,$a,$tmp2,1)); + + &xor($tmp1,$b); # G function - part 2 + &and($tmp1,$d); # G function - part 3 + + &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2); + &xor($tmp1,$c); # G function - part 4 + + &add($a,$tmp1); + &mov($tmp1,&Np($c)) if $pos < 1; # G function - part 1 + &mov($tmp1,&Np($c)) if $pos == 1; # G function - part 1 + + &rotl($a,$s); + + &add($a,$b); + } + +sub R2 + { + local($n,$pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_; + # This one is different, only 3 logical operations + +if (($n & 1) == 0) + { + &comment("R2 $ki"); + # make sure to do 'D' first, not 'B', else we clash with + # the last add from the previous round. + + &xor($tmp1,$d); # H function - part 2 + + &xor($tmp1,$b); # H function - part 3 + &lea($a,&DWP($t,$a,$tmp2,1)); + + &add($a,$tmp1); + + &rotl($a,$s); + + &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)); + &mov($tmp1,&Np($c)); + } +else + { + &comment("R2 $ki"); + # make sure to do 'D' first, not 'B', else we clash with + # the last add from the previous round. + + &lea($a,&DWP($t,$a,$tmp2,1)); + + &add($b,$c); # MOVED FORWARD + &xor($tmp1,$d); # H function - part 2 + + &xor($tmp1,$b); # H function - part 3 + &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2); + + &add($a,$tmp1); + &mov($tmp1,&Np($c)) if $pos < 1; # H function - part 1 + &mov($tmp1,-1) if $pos == 1; # I function - part 1 + + &rotl($a,$s); + + &add($a,$b); + } + } + +sub R3 + { + local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_; + + &comment("R3 $ki"); + + # ¬($tmp1) + &xor($tmp1,$d) if $pos < 0; # I function - part 2 + + &or($tmp1,$b); # I function - part 3 + &lea($a,&DWP($t,$a,$tmp2,1)); + + &xor($tmp1,$c); # I function - part 4 + &mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if $pos != 2; # load X/k value + &mov($tmp2,&wparam(0)) if $pos == 2; + + &add($a,$tmp1); + &mov($tmp1,-1) if $pos < 1; # H function - part 1 + &add($K,64) if $pos >=1 && !$normal; + + &rotl($a,$s); + + &xor($tmp1,&Np($d)) if $pos <= 0; # I function - part = first time + &mov($tmp1,&DWP( 0,$tmp2,"",0)) if $pos > 0; + &add($a,$b); + } + + +sub md5_block + { + local($name)=@_; + + &function_begin_B($name,"",3); + + # parameter 1 is the MD5_CTX structure. + # A 0 + # B 4 + # C 8 + # D 12 + + &push("esi"); + &push("edi"); + &mov($tmp1, &wparam(0)); # edi + &mov($X, &wparam(1)); # esi + &mov($C, &wparam(2)); + &push("ebp"); + &shl($C, 6); + &push("ebx"); + &add($C, $X); # offset we end at + &sub($C, 64); + &mov($A, &DWP( 0,$tmp1,"",0)); + &push($C); # Put on the TOS + &mov($B, &DWP( 4,$tmp1,"",0)); + &mov($C, &DWP( 8,$tmp1,"",0)); + &mov($D, &DWP(12,$tmp1,"",0)); + + &set_label("start") unless $normal; + &comment(""); + &comment("R0 section"); + + &R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478); + &R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756); + &R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db); + &R0( 0,$B,$C,$D,$A,$X, 3,22,0xc1bdceee); + &R0( 0,$A,$B,$C,$D,$X, 4, 7,0xf57c0faf); + &R0( 0,$D,$A,$B,$C,$X, 5,12,0x4787c62a); + &R0( 0,$C,$D,$A,$B,$X, 6,17,0xa8304613); + &R0( 0,$B,$C,$D,$A,$X, 7,22,0xfd469501); + &R0( 0,$A,$B,$C,$D,$X, 8, 7,0x698098d8); + &R0( 0,$D,$A,$B,$C,$X, 9,12,0x8b44f7af); + &R0( 0,$C,$D,$A,$B,$X,10,17,0xffff5bb1); + &R0( 0,$B,$C,$D,$A,$X,11,22,0x895cd7be); + &R0( 0,$A,$B,$C,$D,$X,12, 7,0x6b901122); + &R0( 0,$D,$A,$B,$C,$X,13,12,0xfd987193); + &R0( 0,$C,$D,$A,$B,$X,14,17,0xa679438e); + &R0( 1,$B,$C,$D,$A,$X,15,22,0x49b40821); + + &comment(""); + &comment("R1 section"); + &R1(-1,$A,$B,$C,$D,$X,16, 5,0xf61e2562); + &R1( 0,$D,$A,$B,$C,$X,17, 9,0xc040b340); + &R1( 0,$C,$D,$A,$B,$X,18,14,0x265e5a51); + &R1( 0,$B,$C,$D,$A,$X,19,20,0xe9b6c7aa); + &R1( 0,$A,$B,$C,$D,$X,20, 5,0xd62f105d); + &R1( 0,$D,$A,$B,$C,$X,21, 9,0x02441453); + &R1( 0,$C,$D,$A,$B,$X,22,14,0xd8a1e681); + &R1( 0,$B,$C,$D,$A,$X,23,20,0xe7d3fbc8); + &R1( 0,$A,$B,$C,$D,$X,24, 5,0x21e1cde6); + &R1( 0,$D,$A,$B,$C,$X,25, 9,0xc33707d6); + &R1( 0,$C,$D,$A,$B,$X,26,14,0xf4d50d87); + &R1( 0,$B,$C,$D,$A,$X,27,20,0x455a14ed); + &R1( 0,$A,$B,$C,$D,$X,28, 5,0xa9e3e905); + &R1( 0,$D,$A,$B,$C,$X,29, 9,0xfcefa3f8); + &R1( 0,$C,$D,$A,$B,$X,30,14,0x676f02d9); + &R1( 1,$B,$C,$D,$A,$X,31,20,0x8d2a4c8a); + + &comment(""); + &comment("R2 section"); + &R2( 0,-1,$A,$B,$C,$D,$X,32, 4,0xfffa3942); + &R2( 1, 0,$D,$A,$B,$C,$X,33,11,0x8771f681); + &R2( 2, 0,$C,$D,$A,$B,$X,34,16,0x6d9d6122); + &R2( 3, 0,$B,$C,$D,$A,$X,35,23,0xfde5380c); + &R2( 4, 0,$A,$B,$C,$D,$X,36, 4,0xa4beea44); + &R2( 5, 0,$D,$A,$B,$C,$X,37,11,0x4bdecfa9); + &R2( 6, 0,$C,$D,$A,$B,$X,38,16,0xf6bb4b60); + &R2( 7, 0,$B,$C,$D,$A,$X,39,23,0xbebfbc70); + &R2( 8, 0,$A,$B,$C,$D,$X,40, 4,0x289b7ec6); + &R2( 9, 0,$D,$A,$B,$C,$X,41,11,0xeaa127fa); + &R2(10, 0,$C,$D,$A,$B,$X,42,16,0xd4ef3085); + &R2(11, 0,$B,$C,$D,$A,$X,43,23,0x04881d05); + &R2(12, 0,$A,$B,$C,$D,$X,44, 4,0xd9d4d039); + &R2(13, 0,$D,$A,$B,$C,$X,45,11,0xe6db99e5); + &R2(14, 0,$C,$D,$A,$B,$X,46,16,0x1fa27cf8); + &R2(15, 1,$B,$C,$D,$A,$X,47,23,0xc4ac5665); + + &comment(""); + &comment("R3 section"); + &R3(-1,$A,$B,$C,$D,$X,48, 6,0xf4292244); + &R3( 0,$D,$A,$B,$C,$X,49,10,0x432aff97); + &R3( 0,$C,$D,$A,$B,$X,50,15,0xab9423a7); + &R3( 0,$B,$C,$D,$A,$X,51,21,0xfc93a039); + &R3( 0,$A,$B,$C,$D,$X,52, 6,0x655b59c3); + &R3( 0,$D,$A,$B,$C,$X,53,10,0x8f0ccc92); + &R3( 0,$C,$D,$A,$B,$X,54,15,0xffeff47d); + &R3( 0,$B,$C,$D,$A,$X,55,21,0x85845dd1); + &R3( 0,$A,$B,$C,$D,$X,56, 6,0x6fa87e4f); + &R3( 0,$D,$A,$B,$C,$X,57,10,0xfe2ce6e0); + &R3( 0,$C,$D,$A,$B,$X,58,15,0xa3014314); + &R3( 0,$B,$C,$D,$A,$X,59,21,0x4e0811a1); + &R3( 0,$A,$B,$C,$D,$X,60, 6,0xf7537e82); + &R3( 0,$D,$A,$B,$C,$X,61,10,0xbd3af235); + &R3( 0,$C,$D,$A,$B,$X,62,15,0x2ad7d2bb); + &R3( 2,$B,$C,$D,$A,$X,63,21,0xeb86d391); + + # &mov($tmp2,&wparam(0)); # done in the last R3 + # &mov($tmp1, &DWP( 0,$tmp2,"",0)); # done is the last R3 + + &add($A,$tmp1); + &mov($tmp1, &DWP( 4,$tmp2,"",0)); + + &add($B,$tmp1); + &mov($tmp1, &DWP( 8,$tmp2,"",0)); + + &add($C,$tmp1); + &mov($tmp1, &DWP(12,$tmp2,"",0)); + + &add($D,$tmp1); + &mov(&DWP( 0,$tmp2,"",0),$A); + + &mov(&DWP( 4,$tmp2,"",0),$B); + &mov($tmp1,&swtmp(0)) unless $normal; + + &mov(&DWP( 8,$tmp2,"",0),$C); + &mov(&DWP(12,$tmp2,"",0),$D); + + &cmp($tmp1,$X) unless $normal; # check count + &jae(&label("start")) unless $normal; + + &pop("eax"); # pop the temp variable off the stack + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + diff --git a/openssl/trunk/crypto/md5/asm/md5-sparcv9.S b/openssl/trunk/crypto/md5/asm/md5-sparcv9.S new file mode 100644 index 00000000..db45aa4c --- /dev/null +++ b/openssl/trunk/crypto/md5/asm/md5-sparcv9.S @@ -0,0 +1,1031 @@ +.ident "md5-sparcv9.S, Version 1.0" +.ident "SPARC V9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>" +.file "md5-sparcv9.S" + +/* + * ==================================================================== + * Copyright (c) 1999 Andy Polyakov <appro@fy.chalmers.se>. + * + * Rights for redistribution and usage in source and binary forms are + * granted as long as above copyright notices are retained. Warranty + * of any kind is (of course:-) disclaimed. + * ==================================================================== + */ + +/* + * This is my modest contribution to OpenSSL project (see + * http://www.openssl.org/ for more information about it) and is an + * assembler implementation of MD5 block hash function. I've hand-coded + * this for the sole reason to reach UltraSPARC-specific "load in + * little-endian byte order" instruction. This gives up to 15% + * performance improvement for cases when input message is aligned at + * 32 bits boundary. The module was tested under both 32 *and* 64 bit + * kernels. For updates see http://fy.chalmers.se/~appro/hpe/. + * + * To compile with SC4.x/SC5.x: + * + * cc -xarch=v[9|8plus] -DOPENSSL_SYSNAME_ULTRASPARC -DMD5_BLOCK_DATA_ORDER \ + * -c md5-sparcv9.S + * + * and with gcc: + * + * gcc -mcpu=ultrasparc -DOPENSSL_SYSNAME_ULTRASPARC -DMD5_BLOCK_DATA_ORDER \ + * -c md5-sparcv9.S + * + * or if above fails (it does if you have gas): + * + * gcc -E -DOPENSSL_SYSNAMEULTRASPARC -DMD5_BLOCK_DATA_ORDER md5_block.sparc.S | \ + * as -xarch=v8plus /dev/fd/0 -o md5-sparcv9.o + */ + +#include <openssl/e_os2.h> + +#define A %o0 +#define B %o1 +#define C %o2 +#define D %o3 +#define T1 %o4 +#define T2 %o5 + +#define R0 %l0 +#define R1 %l1 +#define R2 %l2 +#define R3 %l3 +#define R4 %l4 +#define R5 %l5 +#define R6 %l6 +#define R7 %l7 +#define R8 %i3 +#define R9 %i4 +#define R10 %i5 +#define R11 %g1 +#define R12 %g2 +#define R13 %g3 +#define RX %g4 + +#define Aptr %i0+0 +#define Bptr %i0+4 +#define Cptr %i0+8 +#define Dptr %i0+12 + +#define Aval R5 /* those not used at the end of the last round */ +#define Bval R6 +#define Cval R7 +#define Dval R8 + +#if defined(MD5_BLOCK_DATA_ORDER) +# if defined(OPENSSL_SYSNAME_ULTRASPARC) +# define LOAD lda +# define X(i) [%i1+i*4]%asi +# define md5_block md5_block_asm_data_order_aligned +# define ASI_PRIMARY_LITTLE 0x88 +# else +# error "MD5_BLOCK_DATA_ORDER is supported only on UltraSPARC!" +# endif +#else +# define LOAD ld +# define X(i) [%i1+i*4] +# define md5_block md5_block_asm_host_order +#endif + +.section ".text",#alloc,#execinstr + +#if defined(__SUNPRO_C) && defined(__sparcv9) + /* They've said -xarch=v9 at command line */ + .register %g2,#scratch + .register %g3,#scratch +# define FRAME -192 +#elif defined(__GNUC__) && defined(__arch64__) + /* They've said -m64 at command line */ + .register %g2,#scratch + .register %g3,#scratch +# define FRAME -192 +#else +# define FRAME -96 +#endif + +.align 32 + +.global md5_block +md5_block: + save %sp,FRAME,%sp + + ld [Dptr],D + ld [Cptr],C + ld [Bptr],B + ld [Aptr],A +#ifdef ASI_PRIMARY_LITTLE + rd %asi,%o7 ! How dare I? Well, I just do:-) + wr %g0,ASI_PRIMARY_LITTLE,%asi +#endif + LOAD X(0),R0 + +.Lmd5_block_loop: + +!!!!!!!!Round 0 + + xor C,D,T1 + sethi %hi(0xd76aa478),T2 + and T1,B,T1 + or T2,%lo(0xd76aa478),T2 != + xor T1,D,T1 + add T1,R0,T1 + LOAD X(1),R1 + add T1,T2,T1 != + add A,T1,A + sll A,7,T2 + srl A,32-7,A + or A,T2,A != + xor B,C,T1 + add A,B,A + + sethi %hi(0xe8c7b756),T2 + and T1,A,T1 != + or T2,%lo(0xe8c7b756),T2 + xor T1,C,T1 + LOAD X(2),R2 + add T1,R1,T1 != + add T1,T2,T1 + add D,T1,D + sll D,12,T2 + srl D,32-12,D != + or D,T2,D + xor A,B,T1 + add D,A,D + + sethi %hi(0x242070db),T2 != + and T1,D,T1 + or T2,%lo(0x242070db),T2 + xor T1,B,T1 + add T1,R2,T1 != + LOAD X(3),R3 + add T1,T2,T1 + add C,T1,C + sll C,17,T2 != + srl C,32-17,C + or C,T2,C + xor D,A,T1 + add C,D,C != + + sethi %hi(0xc1bdceee),T2 + and T1,C,T1 + or T2,%lo(0xc1bdceee),T2 + xor T1,A,T1 != + add T1,R3,T1 + LOAD X(4),R4 + add T1,T2,T1 + add B,T1,B != + sll B,22,T2 + srl B,32-22,B + or B,T2,B + xor C,D,T1 != + add B,C,B + + sethi %hi(0xf57c0faf),T2 + and T1,B,T1 + or T2,%lo(0xf57c0faf),T2 != + xor T1,D,T1 + add T1,R4,T1 + LOAD X(5),R5 + add T1,T2,T1 != + add A,T1,A + sll A,7,T2 + srl A,32-7,A + or A,T2,A != + xor B,C,T1 + add A,B,A + + sethi %hi(0x4787c62a),T2 + and T1,A,T1 != + or T2,%lo(0x4787c62a),T2 + xor T1,C,T1 + LOAD X(6),R6 + add T1,R5,T1 != + add T1,T2,T1 + add D,T1,D + sll D,12,T2 + srl D,32-12,D != + or D,T2,D + xor A,B,T1 + add D,A,D + + sethi %hi(0xa8304613),T2 != + and T1,D,T1 + or T2,%lo(0xa8304613),T2 + xor T1,B,T1 + add T1,R6,T1 != + LOAD X(7),R7 + add T1,T2,T1 + add C,T1,C + sll C,17,T2 != + srl C,32-17,C + or C,T2,C + xor D,A,T1 + add C,D,C != + + sethi %hi(0xfd469501),T2 + and T1,C,T1 + or T2,%lo(0xfd469501),T2 + xor T1,A,T1 != + add T1,R7,T1 + LOAD X(8),R8 + add T1,T2,T1 + add B,T1,B != + sll B,22,T2 + srl B,32-22,B + or B,T2,B + xor C,D,T1 != + add B,C,B + + sethi %hi(0x698098d8),T2 + and T1,B,T1 + or T2,%lo(0x698098d8),T2 != + xor T1,D,T1 + add T1,R8,T1 + LOAD X(9),R9 + add T1,T2,T1 != + add A,T1,A + sll A,7,T2 + srl A,32-7,A + or A,T2,A != + xor B,C,T1 + add A,B,A + + sethi %hi(0x8b44f7af),T2 + and T1,A,T1 != + or T2,%lo(0x8b44f7af),T2 + xor T1,C,T1 + LOAD X(10),R10 + add T1,R9,T1 != + add T1,T2,T1 + add D,T1,D + sll D,12,T2 + srl D,32-12,D != + or D,T2,D + xor A,B,T1 + add D,A,D + + sethi %hi(0xffff5bb1),T2 != + and T1,D,T1 + or T2,%lo(0xffff5bb1),T2 + xor T1,B,T1 + add T1,R10,T1 != + LOAD X(11),R11 + add T1,T2,T1 + add C,T1,C + sll C,17,T2 != + srl C,32-17,C + or C,T2,C + xor D,A,T1 + add C,D,C != + + sethi %hi(0x895cd7be),T2 + and T1,C,T1 + or T2,%lo(0x895cd7be),T2 + xor T1,A,T1 != + add T1,R11,T1 + LOAD X(12),R12 + add T1,T2,T1 + add B,T1,B != + sll B,22,T2 + srl B,32-22,B + or B,T2,B + xor C,D,T1 != + add B,C,B + + sethi %hi(0x6b901122),T2 + and T1,B,T1 + or T2,%lo(0x6b901122),T2 != + xor T1,D,T1 + add T1,R12,T1 + LOAD X(13),R13 + add T1,T2,T1 != + add A,T1,A + sll A,7,T2 + srl A,32-7,A + or A,T2,A != + xor B,C,T1 + add A,B,A + + sethi %hi(0xfd987193),T2 + and T1,A,T1 != + or T2,%lo(0xfd987193),T2 + xor T1,C,T1 + LOAD X(14),RX + add T1,R13,T1 != + add T1,T2,T1 + add D,T1,D + sll D,12,T2 + srl D,32-12,D != + or D,T2,D + xor A,B,T1 + add D,A,D + + sethi %hi(0xa679438e),T2 != + and T1,D,T1 + or T2,%lo(0xa679438e),T2 + xor T1,B,T1 + add T1,RX,T1 != + LOAD X(15),RX + add T1,T2,T1 + add C,T1,C + sll C,17,T2 != + srl C,32-17,C + or C,T2,C + xor D,A,T1 + add C,D,C != + + sethi %hi(0x49b40821),T2 + and T1,C,T1 + or T2,%lo(0x49b40821),T2 + xor T1,A,T1 != + add T1,RX,T1 + !pre-LOADed X(1),R1 + add T1,T2,T1 + add B,T1,B + sll B,22,T2 != + srl B,32-22,B + or B,T2,B + add B,C,B + +!!!!!!!!Round 1 + + xor B,C,T1 != + sethi %hi(0xf61e2562),T2 + and T1,D,T1 + or T2,%lo(0xf61e2562),T2 + xor T1,C,T1 != + add T1,R1,T1 + !pre-LOADed X(6),R6 + add T1,T2,T1 + add A,T1,A + sll A,5,T2 != + srl A,32-5,A + or A,T2,A + add A,B,A + + xor A,B,T1 != + sethi %hi(0xc040b340),T2 + and T1,C,T1 + or T2,%lo(0xc040b340),T2 + xor T1,B,T1 != + add T1,R6,T1 + !pre-LOADed X(11),R11 + add T1,T2,T1 + add D,T1,D + sll D,9,T2 != + srl D,32-9,D + or D,T2,D + add D,A,D + + xor D,A,T1 != + sethi %hi(0x265e5a51),T2 + and T1,B,T1 + or T2,%lo(0x265e5a51),T2 + xor T1,A,T1 != + add T1,R11,T1 + !pre-LOADed X(0),R0 + add T1,T2,T1 + add C,T1,C + sll C,14,T2 != + srl C,32-14,C + or C,T2,C + add C,D,C + + xor C,D,T1 != + sethi %hi(0xe9b6c7aa),T2 + and T1,A,T1 + or T2,%lo(0xe9b6c7aa),T2 + xor T1,D,T1 != + add T1,R0,T1 + !pre-LOADed X(5),R5 + add T1,T2,T1 + add B,T1,B + sll B,20,T2 != + srl B,32-20,B + or B,T2,B + add B,C,B + + xor B,C,T1 != + sethi %hi(0xd62f105d),T2 + and T1,D,T1 + or T2,%lo(0xd62f105d),T2 + xor T1,C,T1 != + add T1,R5,T1 + !pre-LOADed X(10),R10 + add T1,T2,T1 + add A,T1,A + sll A,5,T2 != + srl A,32-5,A + or A,T2,A + add A,B,A + + xor A,B,T1 != + sethi %hi(0x02441453),T2 + and T1,C,T1 + or T2,%lo(0x02441453),T2 + xor T1,B,T1 != + add T1,R10,T1 + LOAD X(15),RX + add T1,T2,T1 + add D,T1,D != + sll D,9,T2 + srl D,32-9,D + or D,T2,D + add D,A,D != + + xor D,A,T1 + sethi %hi(0xd8a1e681),T2 + and T1,B,T1 + or T2,%lo(0xd8a1e681),T2 != + xor T1,A,T1 + add T1,RX,T1 + !pre-LOADed X(4),R4 + add T1,T2,T1 + add C,T1,C != + sll C,14,T2 + srl C,32-14,C + or C,T2,C + add C,D,C != + + xor C,D,T1 + sethi %hi(0xe7d3fbc8),T2 + and T1,A,T1 + or T2,%lo(0xe7d3fbc8),T2 != + xor T1,D,T1 + add T1,R4,T1 + !pre-LOADed X(9),R9 + add T1,T2,T1 + add B,T1,B != + sll B,20,T2 + srl B,32-20,B + or B,T2,B + add B,C,B != + + xor B,C,T1 + sethi %hi(0x21e1cde6),T2 + and T1,D,T1 + or T2,%lo(0x21e1cde6),T2 != + xor T1,C,T1 + add T1,R9,T1 + LOAD X(14),RX + add T1,T2,T1 != + add A,T1,A + sll A,5,T2 + srl A,32-5,A + or A,T2,A != + add A,B,A + + xor A,B,T1 + sethi %hi(0xc33707d6),T2 + and T1,C,T1 != + or T2,%lo(0xc33707d6),T2 + xor T1,B,T1 + add T1,RX,T1 + !pre-LOADed X(3),R3 + add T1,T2,T1 != + add D,T1,D + sll D,9,T2 + srl D,32-9,D + or D,T2,D != + add D,A,D + + xor D,A,T1 + sethi %hi(0xf4d50d87),T2 + and T1,B,T1 != + or T2,%lo(0xf4d50d87),T2 + xor T1,A,T1 + add T1,R3,T1 + !pre-LOADed X(8),R8 + add T1,T2,T1 != + add C,T1,C + sll C,14,T2 + srl C,32-14,C + or C,T2,C != + add C,D,C + + xor C,D,T1 + sethi %hi(0x455a14ed),T2 + and T1,A,T1 != + or T2,%lo(0x455a14ed),T2 + xor T1,D,T1 + add T1,R8,T1 + !pre-LOADed X(13),R13 + add T1,T2,T1 != + add B,T1,B + sll B,20,T2 + srl B,32-20,B + or B,T2,B != + add B,C,B + + xor B,C,T1 + sethi %hi(0xa9e3e905),T2 + and T1,D,T1 != + or T2,%lo(0xa9e3e905),T2 + xor T1,C,T1 + add T1,R13,T1 + !pre-LOADed X(2),R2 + add T1,T2,T1 != + add A,T1,A + sll A,5,T2 + srl A,32-5,A + or A,T2,A != + add A,B,A + + xor A,B,T1 + sethi %hi(0xfcefa3f8),T2 + and T1,C,T1 != + or T2,%lo(0xfcefa3f8),T2 + xor T1,B,T1 + add T1,R2,T1 + !pre-LOADed X(7),R7 + add T1,T2,T1 != + add D,T1,D + sll D,9,T2 + srl D,32-9,D + or D,T2,D != + add D,A,D + + xor D,A,T1 + sethi %hi(0x676f02d9),T2 + and T1,B,T1 != + or T2,%lo(0x676f02d9),T2 + xor T1,A,T1 + add T1,R7,T1 + !pre-LOADed X(12),R12 + add T1,T2,T1 != + add C,T1,C + sll C,14,T2 + srl C,32-14,C + or C,T2,C != + add C,D,C + + xor C,D,T1 + sethi %hi(0x8d2a4c8a),T2 + and T1,A,T1 != + or T2,%lo(0x8d2a4c8a),T2 + xor T1,D,T1 + add T1,R12,T1 + !pre-LOADed X(5),R5 + add T1,T2,T1 != + add B,T1,B + sll B,20,T2 + srl B,32-20,B + or B,T2,B != + add B,C,B + +!!!!!!!!Round 2 + + xor B,C,T1 + sethi %hi(0xfffa3942),T2 + xor T1,D,T1 != + or T2,%lo(0xfffa3942),T2 + add T1,R5,T1 + !pre-LOADed X(8),R8 + add T1,T2,T1 + add A,T1,A != + sll A,4,T2 + srl A,32-4,A + or A,T2,A + add A,B,A != + + xor A,B,T1 + sethi %hi(0x8771f681),T2 + xor T1,C,T1 + or T2,%lo(0x8771f681),T2 != + add T1,R8,T1 + !pre-LOADed X(11),R11 + add T1,T2,T1 + add D,T1,D + sll D,11,T2 != + srl D,32-11,D + or D,T2,D + add D,A,D + + xor D,A,T1 != + sethi %hi(0x6d9d6122),T2 + xor T1,B,T1 + or T2,%lo(0x6d9d6122),T2 + add T1,R11,T1 != + LOAD X(14),RX + add T1,T2,T1 + add C,T1,C + sll C,16,T2 != + srl C,32-16,C + or C,T2,C + add C,D,C + + xor C,D,T1 != + sethi %hi(0xfde5380c),T2 + xor T1,A,T1 + or T2,%lo(0xfde5380c),T2 + add T1,RX,T1 != + !pre-LOADed X(1),R1 + add T1,T2,T1 + add B,T1,B + sll B,23,T2 + srl B,32-23,B != + or B,T2,B + add B,C,B + + xor B,C,T1 + sethi %hi(0xa4beea44),T2 != + xor T1,D,T1 + or T2,%lo(0xa4beea44),T2 + add T1,R1,T1 + !pre-LOADed X(4),R4 + add T1,T2,T1 != + add A,T1,A + sll A,4,T2 + srl A,32-4,A + or A,T2,A != + add A,B,A + + xor A,B,T1 + sethi %hi(0x4bdecfa9),T2 + xor T1,C,T1 != + or T2,%lo(0x4bdecfa9),T2 + add T1,R4,T1 + !pre-LOADed X(7),R7 + add T1,T2,T1 + add D,T1,D != + sll D,11,T2 + srl D,32-11,D + or D,T2,D + add D,A,D != + + xor D,A,T1 + sethi %hi(0xf6bb4b60),T2 + xor T1,B,T1 + or T2,%lo(0xf6bb4b60),T2 != + add T1,R7,T1 + !pre-LOADed X(10),R10 + add T1,T2,T1 + add C,T1,C + sll C,16,T2 != + srl C,32-16,C + or C,T2,C + add C,D,C + + xor C,D,T1 != + sethi %hi(0xbebfbc70),T2 + xor T1,A,T1 + or T2,%lo(0xbebfbc70),T2 + add T1,R10,T1 != + !pre-LOADed X(13),R13 + add T1,T2,T1 + add B,T1,B + sll B,23,T2 + srl B,32-23,B != + or B,T2,B + add B,C,B + + xor B,C,T1 + sethi %hi(0x289b7ec6),T2 != + xor T1,D,T1 + or T2,%lo(0x289b7ec6),T2 + add T1,R13,T1 + !pre-LOADed X(0),R0 + add T1,T2,T1 != + add A,T1,A + sll A,4,T2 + srl A,32-4,A + or A,T2,A != + add A,B,A + + xor A,B,T1 + sethi %hi(0xeaa127fa),T2 + xor T1,C,T1 != + or T2,%lo(0xeaa127fa),T2 + add T1,R0,T1 + !pre-LOADed X(3),R3 + add T1,T2,T1 + add D,T1,D != + sll D,11,T2 + srl D,32-11,D + or D,T2,D + add D,A,D != + + xor D,A,T1 + sethi %hi(0xd4ef3085),T2 + xor T1,B,T1 + or T2,%lo(0xd4ef3085),T2 != + add T1,R3,T1 + !pre-LOADed X(6),R6 + add T1,T2,T1 + add C,T1,C + sll C,16,T2 != + srl C,32-16,C + or C,T2,C + add C,D,C + + xor C,D,T1 != + sethi %hi(0x04881d05),T2 + xor T1,A,T1 + or T2,%lo(0x04881d05),T2 + add T1,R6,T1 != + !pre-LOADed X(9),R9 + add T1,T2,T1 + add B,T1,B + sll B,23,T2 + srl B,32-23,B != + or B,T2,B + add B,C,B + + xor B,C,T1 + sethi %hi(0xd9d4d039),T2 != + xor T1,D,T1 + or T2,%lo(0xd9d4d039),T2 + add T1,R9,T1 + !pre-LOADed X(12),R12 + add T1,T2,T1 != + add A,T1,A + sll A,4,T2 + srl A,32-4,A + or A,T2,A != + add A,B,A + + xor A,B,T1 + sethi %hi(0xe6db99e5),T2 + xor T1,C,T1 != + or T2,%lo(0xe6db99e5),T2 + add T1,R12,T1 + LOAD X(15),RX + add T1,T2,T1 != + add D,T1,D + sll D,11,T2 + srl D,32-11,D + or D,T2,D != + add D,A,D + + xor D,A,T1 + sethi %hi(0x1fa27cf8),T2 + xor T1,B,T1 != + or T2,%lo(0x1fa27cf8),T2 + add T1,RX,T1 + !pre-LOADed X(2),R2 + add T1,T2,T1 + add C,T1,C != + sll C,16,T2 + srl C,32-16,C + or C,T2,C + add C,D,C != + + xor C,D,T1 + sethi %hi(0xc4ac5665),T2 + xor T1,A,T1 + or T2,%lo(0xc4ac5665),T2 != + add T1,R2,T1 + !pre-LOADed X(0),R0 + add T1,T2,T1 + add B,T1,B + sll B,23,T2 != + srl B,32-23,B + or B,T2,B + add B,C,B + +!!!!!!!!Round 3 + + orn B,D,T1 != + sethi %hi(0xf4292244),T2 + xor T1,C,T1 + or T2,%lo(0xf4292244),T2 + add T1,R0,T1 != + !pre-LOADed X(7),R7 + add T1,T2,T1 + add A,T1,A + sll A,6,T2 + srl A,32-6,A != + or A,T2,A + add A,B,A + + orn A,C,T1 + sethi %hi(0x432aff97),T2 != + xor T1,B,T1 + or T2,%lo(0x432aff97),T2 + LOAD X(14),RX + add T1,R7,T1 != + add T1,T2,T1 + add D,T1,D + sll D,10,T2 + srl D,32-10,D != + or D,T2,D + add D,A,D + + orn D,B,T1 + sethi %hi(0xab9423a7),T2 != + xor T1,A,T1 + or T2,%lo(0xab9423a7),T2 + add T1,RX,T1 + !pre-LOADed X(5),R5 + add T1,T2,T1 != + add C,T1,C + sll C,15,T2 + srl C,32-15,C + or C,T2,C != + add C,D,C + + orn C,A,T1 + sethi %hi(0xfc93a039),T2 + xor T1,D,T1 != + or T2,%lo(0xfc93a039),T2 + add T1,R5,T1 + !pre-LOADed X(12),R12 + add T1,T2,T1 + add B,T1,B != + sll B,21,T2 + srl B,32-21,B + or B,T2,B + add B,C,B != + + orn B,D,T1 + sethi %hi(0x655b59c3),T2 + xor T1,C,T1 + or T2,%lo(0x655b59c3),T2 != + add T1,R12,T1 + !pre-LOADed X(3),R3 + add T1,T2,T1 + add A,T1,A + sll A,6,T2 != + srl A,32-6,A + or A,T2,A + add A,B,A + + orn A,C,T1 != + sethi %hi(0x8f0ccc92),T2 + xor T1,B,T1 + or T2,%lo(0x8f0ccc92),T2 + add T1,R3,T1 != + !pre-LOADed X(10),R10 + add T1,T2,T1 + add D,T1,D + sll D,10,T2 + srl D,32-10,D != + or D,T2,D + add D,A,D + + orn D,B,T1 + sethi %hi(0xffeff47d),T2 != + xor T1,A,T1 + or T2,%lo(0xffeff47d),T2 + add T1,R10,T1 + !pre-LOADed X(1),R1 + add T1,T2,T1 != + add C,T1,C + sll C,15,T2 + srl C,32-15,C + or C,T2,C != + add C,D,C + + orn C,A,T1 + sethi %hi(0x85845dd1),T2 + xor T1,D,T1 != + or T2,%lo(0x85845dd1),T2 + add T1,R1,T1 + !pre-LOADed X(8),R8 + add T1,T2,T1 + add B,T1,B != + sll B,21,T2 + srl B,32-21,B + or B,T2,B + add B,C,B != + + orn B,D,T1 + sethi %hi(0x6fa87e4f),T2 + xor T1,C,T1 + or T2,%lo(0x6fa87e4f),T2 != + add T1,R8,T1 + LOAD X(15),RX + add T1,T2,T1 + add A,T1,A != + sll A,6,T2 + srl A,32-6,A + or A,T2,A + add A,B,A != + + orn A,C,T1 + sethi %hi(0xfe2ce6e0),T2 + xor T1,B,T1 + or T2,%lo(0xfe2ce6e0),T2 != + add T1,RX,T1 + !pre-LOADed X(6),R6 + add T1,T2,T1 + add D,T1,D + sll D,10,T2 != + srl D,32-10,D + or D,T2,D + add D,A,D + + orn D,B,T1 != + sethi %hi(0xa3014314),T2 + xor T1,A,T1 + or T2,%lo(0xa3014314),T2 + add T1,R6,T1 != + !pre-LOADed X(13),R13 + add T1,T2,T1 + add C,T1,C + sll C,15,T2 + srl C,32-15,C != + or C,T2,C + add C,D,C + + orn C,A,T1 + sethi %hi(0x4e0811a1),T2 != + xor T1,D,T1 + or T2,%lo(0x4e0811a1),T2 + !pre-LOADed X(4),R4 + ld [Aptr],Aval + add T1,R13,T1 != + add T1,T2,T1 + add B,T1,B + sll B,21,T2 + srl B,32-21,B != + or B,T2,B + add B,C,B + + orn B,D,T1 + sethi %hi(0xf7537e82),T2 != + xor T1,C,T1 + or T2,%lo(0xf7537e82),T2 + !pre-LOADed X(11),R11 + ld [Dptr],Dval + add T1,R4,T1 != + add T1,T2,T1 + add A,T1,A + sll A,6,T2 + srl A,32-6,A != + or A,T2,A + add A,B,A + + orn A,C,T1 + sethi %hi(0xbd3af235),T2 != + xor T1,B,T1 + or T2,%lo(0xbd3af235),T2 + !pre-LOADed X(2),R2 + ld [Cptr],Cval + add T1,R11,T1 != + add T1,T2,T1 + add D,T1,D + sll D,10,T2 + srl D,32-10,D != + or D,T2,D + add D,A,D + + orn D,B,T1 + sethi %hi(0x2ad7d2bb),T2 != + xor T1,A,T1 + or T2,%lo(0x2ad7d2bb),T2 + !pre-LOADed X(9),R9 + ld [Bptr],Bval + add T1,R2,T1 != + add Aval,A,Aval + add T1,T2,T1 + st Aval,[Aptr] + add C,T1,C != + sll C,15,T2 + add Dval,D,Dval + srl C,32-15,C + or C,T2,C != + st Dval,[Dptr] + add C,D,C + + orn C,A,T1 + sethi %hi(0xeb86d391),T2 != + xor T1,D,T1 + or T2,%lo(0xeb86d391),T2 + add T1,R9,T1 + !pre-LOADed X(0),R0 + mov Aval,A != + add T1,T2,T1 + mov Dval,D + add B,T1,B + sll B,21,T2 != + add Cval,C,Cval + srl B,32-21,B + st Cval,[Cptr] + or B,T2,B != + add B,C,B + + deccc %i2 + mov Cval,C + add B,Bval,B != + inc 64,%i1 + nop + st B,[Bptr] + nop != + +#ifdef OPENSSL_SYSNAME_ULTRASPARC + bg,a,pt %icc,.Lmd5_block_loop +#else + bg,a .Lmd5_block_loop +#endif + LOAD X(0),R0 + +#ifdef ASI_PRIMARY_LITTLE + wr %g0,%o7,%asi +#endif + ret + restore %g0,0,%o0 + +.type md5_block,#function +.size md5_block,(.-md5_block) diff --git a/openssl/trunk/crypto/md5/asm/md5-x86_64.pl b/openssl/trunk/crypto/md5/asm/md5-x86_64.pl new file mode 100755 index 00000000..c36a7feb --- /dev/null +++ b/openssl/trunk/crypto/md5/asm/md5-x86_64.pl @@ -0,0 +1,245 @@ +#!/usr/bin/perl -w +# +# MD5 optimized for AMD64. +# +# Author: Marc Bevand <bevand_m (at) epita.fr> +# Licence: I hereby disclaim the copyright on this code and place it +# in the public domain. +# + +use strict; + +my $code; + +# round1_step() does: +# dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s) +# %r10d = X[k_next] +# %r11d = z' (copy of z for the next step) +# Each round1_step() takes about 5.71 clocks (9 instructions, 1.58 IPC) +sub round1_step +{ + my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; + $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1); + $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1); + $code .= <<EOF; + xor $y, %r11d /* y ^ ... */ + lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ + and $x, %r11d /* x & ... */ + xor $z, %r11d /* z ^ ... */ + mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ + add %r11d, $dst /* dst += ... */ + rol \$$s, $dst /* dst <<< s */ + mov $y, %r11d /* (NEXT STEP) z' = $y */ + add $x, $dst /* dst += x */ +EOF +} + +# round2_step() does: +# dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s) +# %r10d = X[k_next] +# %r11d = y' (copy of y for the next step) +# Each round2_step() takes about 6.22 clocks (9 instructions, 1.45 IPC) +sub round2_step +{ + my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; + $code .= " mov 1*4(%rsi), %r10d /* (NEXT STEP) X[1] */\n" if ($pos == -1); + $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1); + $code .= <<EOF; + xor $x, %r11d /* x ^ ... */ + lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ + and $z, %r11d /* z & ... */ + xor $y, %r11d /* y ^ ... */ + mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ + add %r11d, $dst /* dst += ... */ + rol \$$s, $dst /* dst <<< s */ + mov $x, %r11d /* (NEXT STEP) y' = $x */ + add $x, $dst /* dst += x */ +EOF +} + +# round3_step() does: +# dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s) +# %r10d = X[k_next] +# %r11d = y' (copy of y for the next step) +# Each round3_step() takes about 4.26 clocks (8 instructions, 1.88 IPC) +sub round3_step +{ + my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; + $code .= " mov 5*4(%rsi), %r10d /* (NEXT STEP) X[5] */\n" if ($pos == -1); + $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1); + $code .= <<EOF; + lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ + mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ + xor $z, %r11d /* z ^ ... */ + xor $x, %r11d /* x ^ ... */ + add %r11d, $dst /* dst += ... */ + rol \$$s, $dst /* dst <<< s */ + mov $x, %r11d /* (NEXT STEP) y' = $x */ + add $x, $dst /* dst += x */ +EOF +} + +# round4_step() does: +# dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s) +# %r10d = X[k_next] +# %r11d = not z' (copy of not z for the next step) +# Each round4_step() takes about 5.27 clocks (9 instructions, 1.71 IPC) +sub round4_step +{ + my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_; + $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1); + $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1); + $code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n" + if ($pos == -1); + $code .= <<EOF; + lea $T_i($dst,%r10d),$dst /* Const + dst + ... */ + or $x, %r11d /* x | ... */ + xor $y, %r11d /* y ^ ... */ + add %r11d, $dst /* dst += ... */ + mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */ + mov \$0xffffffff, %r11d + rol \$$s, $dst /* dst <<< s */ + xor $y, %r11d /* (NEXT STEP) not z' = not $y */ + add $x, $dst /* dst += x */ +EOF +} + +my $output = shift; +open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output"; + +$code .= <<EOF; +.text +.align 16 + +.globl md5_block_asm_host_order +.type md5_block_asm_host_order,\@function,3 +md5_block_asm_host_order: + push %rbp + push %rbx + push %r14 + push %r15 + + # rdi = arg #1 (ctx, MD5_CTX pointer) + # rsi = arg #2 (ptr, data pointer) + # rdx = arg #3 (nbr, number of 16-word blocks to process) + mov %rdi, %rbp # rbp = ctx + shl \$6, %rdx # rdx = nbr in bytes + lea (%rsi,%rdx), %rdi # rdi = end + mov 0*4(%rbp), %eax # eax = ctx->A + mov 1*4(%rbp), %ebx # ebx = ctx->B + mov 2*4(%rbp), %ecx # ecx = ctx->C + mov 3*4(%rbp), %edx # edx = ctx->D + # end is 'rdi' + # ptr is 'rsi' + # A is 'eax' + # B is 'ebx' + # C is 'ecx' + # D is 'edx' + + cmp %rdi, %rsi # cmp end with ptr + je .Lend # jmp if ptr == end + + # BEGIN of loop over 16-word blocks +.Lloop: # save old values of A, B, C, D + mov %eax, %r8d + mov %ebx, %r9d + mov %ecx, %r14d + mov %edx, %r15d +EOF +round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7'); +round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12'); +round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17'); +round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22'); +round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7'); +round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12'); +round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17'); +round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22'); +round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7'); +round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12'); +round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17'); +round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22'); +round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7'); +round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12'); +round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17'); +round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22'); + +round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5'); +round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9'); +round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14'); +round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20'); +round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5'); +round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9'); +round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14'); +round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20'); +round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5'); +round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9'); +round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14'); +round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20'); +round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5'); +round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9'); +round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14'); +round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20'); + +round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4'); +round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11'); +round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16'); +round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23'); +round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4'); +round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11'); +round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16'); +round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23'); +round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4'); +round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11'); +round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16'); +round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23'); +round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4'); +round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11'); +round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16'); +round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23'); + +round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6'); +round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10'); +round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15'); +round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21'); +round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6'); +round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10'); +round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15'); +round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21'); +round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6'); +round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10'); +round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15'); +round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21'); +round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6'); +round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10'); +round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15'); +round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21'); +$code .= <<EOF; + # add old values of A, B, C, D + add %r8d, %eax + add %r9d, %ebx + add %r14d, %ecx + add %r15d, %edx + + # loop control + add \$64, %rsi # ptr += 64 + cmp %rdi, %rsi # cmp end with ptr + jb .Lloop # jmp if ptr < end + # END of loop over 16-word blocks + +.Lend: + mov %eax, 0*4(%rbp) # ctx->A = A + mov %ebx, 1*4(%rbp) # ctx->B = B + mov %ecx, 2*4(%rbp) # ctx->C = C + mov %edx, 3*4(%rbp) # ctx->D = D + + pop %r15 + pop %r14 + pop %rbx + pop %rbp + ret +.size md5_block_asm_host_order,.-md5_block_asm_host_order +EOF + +print $code; + +close STDOUT; |