aboutsummaryrefslogtreecommitdiff
path: root/openssl/trunk/crypto/rc4/asm
diff options
context:
space:
mode:
Diffstat (limited to 'openssl/trunk/crypto/rc4/asm')
-rw-r--r--openssl/trunk/crypto/rc4/asm/rc4-586.pl230
-rw-r--r--openssl/trunk/crypto/rc4/asm/rc4-ia64.S160
-rwxr-xr-xopenssl/trunk/crypto/rc4/asm/rc4-x86_64.pl240
3 files changed, 0 insertions, 630 deletions
diff --git a/openssl/trunk/crypto/rc4/asm/rc4-586.pl b/openssl/trunk/crypto/rc4/asm/rc4-586.pl
deleted file mode 100644
index 22bda4b4..00000000
--- a/openssl/trunk/crypto/rc4/asm/rc4-586.pl
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/usr/local/bin/perl
-
-# At some point it became apparent that the original SSLeay RC4
-# assembler implementation performs suboptimaly on latest IA-32
-# microarchitectures. After re-tuning performance has changed as
-# following:
-#
-# Pentium +0%
-# Pentium III +17%
-# AMD +52%(*)
-# P4 +180%(**)
-#
-# (*) This number is actually a trade-off:-) It's possible to
-# achieve +72%, but at the cost of -48% off PIII performance.
-# In other words code performing further 13% faster on AMD
-# would perform almost 2 times slower on Intel PIII...
-# For reference! This code delivers ~80% of rc4-amd64.pl
-# performance on the same Opteron machine.
-# (**) This number requires compressed key schedule set up by
-# RC4_set_key and therefore doesn't apply to 0.9.7 [option for
-# compressed key schedule is implemented in 0.9.8 and later,
-# see commentary section in rc4_skey.c for further details].
-#
-# <appro@fy.chalmers.se>
-
-push(@INC,"perlasm","../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],"rc4-586.pl");
-
-$x="eax";
-$y="ebx";
-$tx="ecx";
-$ty="edx";
-$in="esi";
-$out="edi";
-$d="ebp";
-
-&RC4("RC4");
-
-&asm_finish();
-
-sub RC4_loop
- {
- local($n,$p,$char)=@_;
-
- &comment("Round $n");
-
- if ($char)
- {
- if ($p >= 0)
- {
- &mov($ty, &swtmp(2));
- &cmp($ty, $in);
- &jbe(&label("finished"));
- &inc($in);
- }
- else
- {
- &add($ty, 8);
- &inc($in);
- &cmp($ty, $in);
- &jb(&label("finished"));
- &mov(&swtmp(2), $ty);
- }
- }
- # Moved out
- # &mov( $tx, &DWP(0,$d,$x,4)) if $p < 0;
-
- &add( &LB($y), &LB($tx));
- &mov( $ty, &DWP(0,$d,$y,4));
- # XXX
- &mov( &DWP(0,$d,$x,4),$ty);
- &add( $ty, $tx);
- &mov( &DWP(0,$d,$y,4),$tx);
- &and( $ty, 0xff);
- &inc( &LB($x)); # NEXT ROUND
- &mov( $tx, &DWP(0,$d,$x,4)) if $p < 1; # NEXT ROUND
- &mov( $ty, &DWP(0,$d,$ty,4));
-
- if (!$char)
- {
- #moved up into last round
- if ($p >= 1)
- {
- &add( $out, 8)
- }
- &movb( &BP($n,"esp","",0), &LB($ty));
- }
- else
- {
- # Note in+=8 has occured
- &movb( &HB($ty), &BP(-1,$in,"",0));
- # XXX
- &xorb(&LB($ty), &HB($ty));
- # XXX
- &movb(&BP($n,$out,"",0),&LB($ty));
- }
- }
-
-
-sub RC4
- {
- local($name)=@_;
-
- &function_begin_B($name,"");
-
- &mov($ty,&wparam(1)); # len
- &cmp($ty,0);
- &jne(&label("proceed"));
- &ret();
- &set_label("proceed");
-
- &comment("");
-
- &push("ebp");
- &push("ebx");
- &push("esi");
- &xor( $x, $x); # avoid partial register stalls
- &push("edi");
- &xor( $y, $y); # avoid partial register stalls
- &mov( $d, &wparam(0)); # key
- &mov( $in, &wparam(2));
-
- &movb( &LB($x), &BP(0,$d,"",1));
- &movb( &LB($y), &BP(4,$d,"",1));
-
- &mov( $out, &wparam(3));
- &inc( &LB($x));
-
- &stack_push(3); # 3 temp variables
- &add( $d, 8);
-
- # detect compressed schedule, see commentary section in rc4_skey.c...
- # in 0.9.7 context ~50 bytes below RC4_CHAR label remain redundant,
- # as compressed key schedule is set up in 0.9.8 and later.
- &cmp(&DWP(256,$d),-1);
- &je(&label("RC4_CHAR"));
-
- &lea( $ty, &DWP(-8,$ty,$in));
-
- # check for 0 length input
-
- &mov( &swtmp(2), $ty); # this is now address to exit at
- &mov( $tx, &DWP(0,$d,$x,4));
-
- &cmp( $ty, $in);
- &jb( &label("end")); # less than 8 bytes
-
- &set_label("start");
-
- # filling DELAY SLOT
- &add( $in, 8);
-
- &RC4_loop(0,-1,0);
- &RC4_loop(1,0,0);
- &RC4_loop(2,0,0);
- &RC4_loop(3,0,0);
- &RC4_loop(4,0,0);
- &RC4_loop(5,0,0);
- &RC4_loop(6,0,0);
- &RC4_loop(7,1,0);
-
- &comment("apply the cipher text");
- # xor the cipher data with input
-
- #&add( $out, 8); #moved up into last round
-
- &mov( $tx, &swtmp(0));
- &mov( $ty, &DWP(-8,$in,"",0));
- &xor( $tx, $ty);
- &mov( $ty, &DWP(-4,$in,"",0));
- &mov( &DWP(-8,$out,"",0), $tx);
- &mov( $tx, &swtmp(1));
- &xor( $tx, $ty);
- &mov( $ty, &swtmp(2)); # load end ptr;
- &mov( &DWP(-4,$out,"",0), $tx);
- &mov( $tx, &DWP(0,$d,$x,4));
- &cmp($in, $ty);
- &jbe(&label("start"));
-
- &set_label("end");
-
- # There is quite a bit of extra crap in RC4_loop() for this
- # first round
- &RC4_loop(0,-1,1);
- &RC4_loop(1,0,1);
- &RC4_loop(2,0,1);
- &RC4_loop(3,0,1);
- &RC4_loop(4,0,1);
- &RC4_loop(5,0,1);
- &RC4_loop(6,1,1);
-
- &jmp(&label("finished"));
-
- &align(16);
- # this is essentially Intel P4 specific codepath, see rc4_skey.c,
- # and is engaged in 0.9.8 and later context...
- &set_label("RC4_CHAR");
-
- &lea ($ty,&DWP(0,$in,$ty));
- &mov (&swtmp(2),$ty);
- &movz ($tx,&BP(0,$d,$x));
-
- # strangely enough unrolled loop performs over 20% slower...
- &set_label("RC4_CHAR_loop");
- &add (&LB($y),&LB($tx));
- &movz ($ty,&BP(0,$d,$y));
- &movb (&BP(0,$d,$y),&LB($tx));
- &movb (&BP(0,$d,$x),&LB($ty));
- &add (&LB($ty),&LB($tx));
- &movz ($ty,&BP(0,$d,$ty));
- &add (&LB($x),1);
- &xorb (&LB($ty),&BP(0,$in));
- &lea ($in,&BP(1,$in));
- &movz ($tx,&BP(0,$d,$x));
- &cmp ($in,&swtmp(2));
- &movb (&BP(0,$out),&LB($ty));
- &lea ($out,&BP(1,$out));
- &jb (&label("RC4_CHAR_loop"));
-
- &set_label("finished");
- &dec( $x);
- &stack_pop(3);
- &movb( &BP(-4,$d,"",0),&LB($y));
- &movb( &BP(-8,$d,"",0),&LB($x));
-
- &function_end($name);
- }
-
diff --git a/openssl/trunk/crypto/rc4/asm/rc4-ia64.S b/openssl/trunk/crypto/rc4/asm/rc4-ia64.S
deleted file mode 100644
index a322d0c7..00000000
--- a/openssl/trunk/crypto/rc4/asm/rc4-ia64.S
+++ /dev/null
@@ -1,160 +0,0 @@
-// ====================================================================
-// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-// project.
-//
-// Rights for redistribution and usage in source and binary forms are
-// granted according to the OpenSSL license. Warranty of any kind is
-// disclaimed.
-// ====================================================================
-
-.ident "rc4-ia64.S, Version 2.0"
-.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
-
-// What's wrong with compiler generated code? Because of the nature of
-// C language, compiler doesn't [dare to] reorder load and stores. But
-// being memory-bound, RC4 should benefit from reorder [on in-order-
-// execution core such as IA-64]. But what can we reorder? At the very
-// least we can safely reorder references to key schedule in respect
-// to input and output streams. Secondly, from the first [close] glance
-// it appeared that it's possible to pull up some references to
-// elements of the key schedule itself. Original rationale ["prior
-// loads are not safe only for "degenerated" key schedule, when some
-// elements equal to the same value"] was kind of sloppy. I should have
-// formulated as it really was: if we assume that pulling up reference
-// to key[x+1] is not safe, then it would mean that key schedule would
-// "degenerate," which is never the case. The problem is that this
-// holds true in respect to references to key[x], but not to key[y].
-// Legitimate "collisions" do occur within every 256^2 bytes window.
-// Fortunately there're enough free instruction slots to keep prior
-// reference to key[x+1], detect "collision" and compensate for it.
-// All this without sacrificing a single clock cycle:-) Throughput is
-// ~210MBps on 900MHz CPU, which is is >3x faster than gcc generated
-// code and +30% - if compared to HP-UX C. Unrolling loop below should
-// give >30% on top of that...
-
-.text
-.explicit
-
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-# define ADDP addp4
-#else
-# define ADDP add
-#endif
-
-#ifndef SZ
-#define SZ 4 // this is set to sizeof(RC4_INT)
-#endif
-// SZ==4 seems to be optimal. At least SZ==8 is not any faster, not for
-// assembler implementation, while SZ==1 code is ~30% slower.
-#if SZ==1 // RC4_INT is unsigned char
-# define LDKEY ld1
-# define STKEY st1
-# define OFF 0
-#elif SZ==4 // RC4_INT is unsigned int
-# define LDKEY ld4
-# define STKEY st4
-# define OFF 2
-#elif SZ==8 // RC4_INT is unsigned long
-# define LDKEY ld8
-# define STKEY st8
-# define OFF 3
-#endif
-
-out=r8; // [expanded] output pointer
-inp=r9; // [expanded] output pointer
-prsave=r10;
-key=r28; // [expanded] pointer to RC4_KEY
-ksch=r29; // (key->data+255)[&~(sizeof(key->data)-1)]
-xx=r30;
-yy=r31;
-
-// void RC4(RC4_KEY *key,size_t len,const void *inp,void *out);
-.global RC4#
-.proc RC4#
-.align 32
-.skip 16
-RC4:
- .prologue
- .fframe 0
- .save ar.pfs,r2
- .save ar.lc,r3
- .save pr,prsave
-{ .mii; alloc r2=ar.pfs,4,12,0,16
- mov prsave=pr
- ADDP key=0,in0 };;
-{ .mib; cmp.eq p6,p0=0,in1 // len==0?
- mov r3=ar.lc
-(p6) br.ret.spnt.many b0 };; // emergency exit
-
- .body
- .rotr dat[4],key_x[4],tx[2],rnd[2],key_y[2],ty[1];
-
-{ .mib; LDKEY xx=[key],SZ // load key->x
- add in1=-1,in1 // adjust len for loop counter
- nop.b 0 }
-{ .mib; ADDP inp=0,in2
- ADDP out=0,in3
- brp.loop.imp .Ltop,.Lexit-16 };;
-{ .mmi; LDKEY yy=[key] // load key->y
- add ksch=SZ,key
- mov ar.lc=in1 }
-{ .mmi; mov key_y[1]=r0 // guarantee inequality
- // in first iteration
- add xx=1,xx
- mov pr.rot=1<<16 };;
-{ .mii; nop.m 0
- dep key_x[1]=xx,r0,OFF,8
- mov ar.ec=3 };; // note that epilogue counter
- // is off by 1. I compensate
- // for this at exit...
-.Ltop:
-// The loop is scheduled for 4*(n+2) spin-rate on Itanium 2, which
-// theoretically gives asymptotic performance of clock frequency
-// divided by 4 bytes per seconds, or 400MBps on 1.6GHz CPU. This is
-// for sizeof(RC4_INT)==4. For smaller RC4_INT STKEY inadvertently
-// splits the last bundle and you end up with 5*n spin-rate:-(
-// Originally the loop was scheduled for 3*n and relied on key
-// schedule to be aligned at 256*sizeof(RC4_INT) boundary. But
-// *(out++)=dat, which maps to st1, had same effect [inadvertent
-// bundle split] and holded the loop back. Rescheduling for 4*n
-// made it possible to eliminate dependence on specific alignment
-// and allow OpenSSH keep "abusing" our API. Reaching for 3*n would
-// require unrolling, sticking to variable shift instruction for
-// collecting output [to avoid starvation for integer shifter] and
-// copying of key schedule to controlled place in stack [so that
-// deposit instruction can serve as substitute for whole
-// key->data+((x&255)<<log2(sizeof(key->data[0])))]...
-{ .mmi; (p19) st1 [out]=dat[3],1 // *(out++)=dat
- (p16) add xx=1,xx // x++
- (p18) dep rnd[1]=rnd[1],r0,OFF,8 } // ((tx+ty)&255)<<OFF
-{ .mmi; (p16) add key_x[1]=ksch,key_x[1] // &key[xx&255]
- (p17) add key_y[1]=ksch,key_y[1] };; // &key[yy&255]
-{ .mmi; (p16) LDKEY tx[0]=[key_x[1]] // tx=key[xx]
- (p17) LDKEY ty[0]=[key_y[1]] // ty=key[yy]
- (p16) dep key_x[0]=xx,r0,OFF,8 } // (xx&255)<<OFF
-{ .mmi; (p18) add rnd[1]=ksch,rnd[1] // &key[(tx+ty)&255]
- (p16) cmp.ne.unc p20,p21=key_x[1],key_y[1] };;
-{ .mmi; (p18) LDKEY rnd[1]=[rnd[1]] // rnd=key[(tx+ty)&255]
- (p16) ld1 dat[0]=[inp],1 } // dat=*(inp++)
-.pred.rel "mutex",p20,p21
-{ .mmi; (p21) add yy=yy,tx[1] // (p16)
- (p20) add yy=yy,tx[0] // (p16) y+=tx
- (p21) mov tx[0]=tx[1] };; // (p16)
-{ .mmi; (p17) STKEY [key_y[1]]=tx[1] // key[yy]=tx
- (p17) STKEY [key_x[2]]=ty[0] // key[xx]=ty
- (p16) dep key_y[0]=yy,r0,OFF,8 } // &key[yy&255]
-{ .mmb; (p17) add rnd[0]=tx[1],ty[0] // tx+=ty
- (p18) xor dat[2]=dat[2],rnd[1] // dat^=rnd
- br.ctop.sptk .Ltop };;
-.Lexit:
-{ .mib; STKEY [key]=yy,-SZ // save key->y
- mov pr=prsave,0x1ffff
- nop.b 0 }
-{ .mib; st1 [out]=dat[3],1 // compensate for truncated
- // epilogue counter
- add xx=-1,xx
- nop.b 0 };;
-{ .mib; STKEY [key]=xx // save key->x
- mov ar.lc=r3
- br.ret.sptk.many b0 };;
-.endp RC4#
diff --git a/openssl/trunk/crypto/rc4/asm/rc4-x86_64.pl b/openssl/trunk/crypto/rc4/asm/rc4-x86_64.pl
deleted file mode 100755
index 4b990cba..00000000
--- a/openssl/trunk/crypto/rc4/asm/rc4-x86_64.pl
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. Rights for redistribution and usage in source and binary
-# forms are granted according to the OpenSSL license.
-# ====================================================================
-#
-# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
-# "hand-coded assembler"] doesn't stand for the whole improvement
-# coefficient. It turned out that eliminating RC4_CHAR from config
-# line results in ~40% improvement (yes, even for C implementation).
-# Presumably it has everything to do with AMD cache architecture and
-# RAW or whatever penalties. Once again! The module *requires* config
-# line *without* RC4_CHAR! As for coding "secret," I bet on partial
-# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
-# I simply 'inc %r8b'. Even though optimization manual discourages
-# to operate on partial registers, it turned out to be the best bet.
-# At least for AMD... How IA32E would perform remains to be seen...
-
-# As was shown by Marc Bevand reordering of couple of load operations
-# results in even higher performance gain of 3.3x:-) At least on
-# Opteron... For reference, 1x in this case is RC4_CHAR C-code
-# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
-# Latter means that if you want to *estimate* what to expect from
-# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
-
-# Intel P4 EM64T core was found to run the AMD64 code really slow...
-# The only way to achieve comparable performance on P4 was to keep
-# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
-# compose blended code, which would perform even within 30% marginal
-# on either AMD and Intel platforms, I implement both cases. See
-# rc4_skey.c for further details...
-
-# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
-# those with add/sub results in 50% performance improvement of folded
-# loop...
-
-# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
-# performance by >30% [unlike P4 32-bit case that is]. But this is
-# provided that loads are reordered even more aggressively! Both code
-# pathes, AMD64 and EM64T, reorder loads in essentially same manner
-# as my IA-64 implementation. On Opteron this resulted in modest 5%
-# improvement [I had to test it], while final Intel P4 performance
-# achieves respectful 432MBps on 2.8GHz processor now. For reference.
-# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
-# RC4_INT code-path. While if executed on Opteron, it's only 25%
-# slower than the RC4_INT one [meaning that if CPU µ-arch detection
-# is not implemented, then this final RC4_CHAR code-path should be
-# preferred, as it provides better *all-round* performance].
-
-$output=shift;
-open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
-
-$dat="%rdi"; # arg1
-$len="%rsi"; # arg2
-$inp="%rdx"; # arg3
-$out="%rcx"; # arg4
-
-@XX=("%r8","%r10");
-@TX=("%r9","%r11");
-$YY="%r12";
-$TY="%r13";
-
-$code=<<___;
-.text
-
-.globl RC4
-.type RC4,\@function,4
-.align 16
-RC4: or $len,$len
- jne .Lentry
- ret
-.Lentry:
- push %r12
- push %r13
-
- add \$8,$dat
- movl -8($dat),$XX[0]#d
- movl -4($dat),$YY#d
- cmpl \$-1,256($dat)
- je .LRC4_CHAR
- inc $XX[0]#b
- movl ($dat,$XX[0],4),$TX[0]#d
- test \$-8,$len
- jz .Lloop1
- jmp .Lloop8
-.align 16
-.Lloop8:
-___
-for ($i=0;$i<8;$i++) {
-$code.=<<___;
- add $TX[0]#b,$YY#b
- mov $XX[0],$XX[1]
- movl ($dat,$YY,4),$TY#d
- ror \$8,%rax # ror is redundant when $i=0
- inc $XX[1]#b
- movl ($dat,$XX[1],4),$TX[1]#d
- cmp $XX[1],$YY
- movl $TX[0]#d,($dat,$YY,4)
- cmove $TX[0],$TX[1]
- movl $TY#d,($dat,$XX[0],4)
- add $TX[0]#b,$TY#b
- movb ($dat,$TY,4),%al
-___
-push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
-}
-$code.=<<___;
- ror \$8,%rax
- sub \$8,$len
-
- xor ($inp),%rax
- add \$8,$inp
- mov %rax,($out)
- add \$8,$out
-
- test \$-8,$len
- jnz .Lloop8
- cmp \$0,$len
- jne .Lloop1
-___
-$code.=<<___;
-.Lexit:
- sub \$1,$XX[0]#b
- movl $XX[0]#d,-8($dat)
- movl $YY#d,-4($dat)
-
- pop %r13
- pop %r12
- ret
-.align 16
-.Lloop1:
- add $TX[0]#b,$YY#b
- movl ($dat,$YY,4),$TY#d
- movl $TX[0]#d,($dat,$YY,4)
- movl $TY#d,($dat,$XX[0],4)
- add $TY#b,$TX[0]#b
- inc $XX[0]#b
- movl ($dat,$TX[0],4),$TY#d
- movl ($dat,$XX[0],4),$TX[0]#d
- xorb ($inp),$TY#b
- inc $inp
- movb $TY#b,($out)
- inc $out
- dec $len
- jnz .Lloop1
- jmp .Lexit
-
-.align 16
-.LRC4_CHAR:
- add \$1,$XX[0]#b
- movzb ($dat,$XX[0]),$TX[0]#d
- test \$-8,$len
- jz .Lcloop1
- push %rbx
- jmp .Lcloop8
-.align 16
-.Lcloop8:
- mov ($inp),%eax
- mov 4($inp),%ebx
-___
-# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
-for ($i=0;$i<4;$i++) {
-$code.=<<___;
- add $TX[0]#b,$YY#b
- lea 1($XX[0]),$XX[1]
- movzb ($dat,$YY),$TY#d
- movzb $XX[1]#b,$XX[1]#d
- movzb ($dat,$XX[1]),$TX[1]#d
- movb $TX[0]#b,($dat,$YY)
- cmp $XX[1],$YY
- movb $TY#b,($dat,$XX[0])
- jne .Lcmov$i # Intel cmov is sloooow...
- mov $TX[0],$TX[1]
-.Lcmov$i:
- add $TX[0]#b,$TY#b
- xor ($dat,$TY),%al
- ror \$8,%eax
-___
-push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
-}
-for ($i=4;$i<8;$i++) {
-$code.=<<___;
- add $TX[0]#b,$YY#b
- lea 1($XX[0]),$XX[1]
- movzb ($dat,$YY),$TY#d
- movzb $XX[1]#b,$XX[1]#d
- movzb ($dat,$XX[1]),$TX[1]#d
- movb $TX[0]#b,($dat,$YY)
- cmp $XX[1],$YY
- movb $TY#b,($dat,$XX[0])
- jne .Lcmov$i # Intel cmov is sloooow...
- mov $TX[0],$TX[1]
-.Lcmov$i:
- add $TX[0]#b,$TY#b
- xor ($dat,$TY),%bl
- ror \$8,%ebx
-___
-push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
-}
-$code.=<<___;
- lea -8($len),$len
- mov %eax,($out)
- lea 8($inp),$inp
- mov %ebx,4($out)
- lea 8($out),$out
-
- test \$-8,$len
- jnz .Lcloop8
- pop %rbx
- cmp \$0,$len
- jne .Lcloop1
- jmp .Lexit
-___
-$code.=<<___;
-.align 16
-.Lcloop1:
- add $TX[0]#b,$YY#b
- movzb ($dat,$YY),$TY#d
- movb $TX[0]#b,($dat,$YY)
- movb $TY#b,($dat,$XX[0])
- add $TX[0]#b,$TY#b
- add \$1,$XX[0]#b
- movzb ($dat,$TY),$TY#d
- movzb ($dat,$XX[0]),$TX[0]#d
- xorb ($inp),$TY#b
- lea 1($inp),$inp
- movb $TY#b,($out)
- lea 1($out),$out
- sub \$1,$len
- jnz .Lcloop1
- jmp .Lexit
-.size RC4,.-RC4
-___
-
-$code =~ s/#([bwd])/$1/gm;
-
-print $code;
-
-close STDOUT;