3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
34 # Optimize reduction in squaring procedure and improve 1024+-bit RSA
35 # sign performance by 10-16% on Intel Sandy Bridge and later
36 # (virtually same on non-Intel processors).
40 # Add MULX/ADOX/ADCX code path.
44 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
46 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51 die "can't locate x86_64-xlate.pl";
53 open OUT,"| \"$^X\" $xlate $flavour $output";
56 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
61 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
66 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
71 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
72 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
77 $rp="%rdi"; # BN_ULONG *rp,
78 $ap="%rsi"; # const BN_ULONG *ap,
79 $bp="%rdx"; # const BN_ULONG *bp,
80 $np="%rcx"; # const BN_ULONG *np,
81 $n0="%r8"; # const BN_ULONG *n0,
82 $num="%r9"; # int num);
94 .extern OPENSSL_ia32cap_P
97 .type bn_mul_mont,\@function,6
105 $code.=<<___ if ($addx);
106 mov OPENSSL_ia32cap_P+8(%rip),%r11d
128 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
129 and \$-1024,%rsp # minimize TLB usage
131 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
133 # Some OSes, *cough*-dows, insist on stack being "wired" to
134 # physical memory in strictly sequential manner, i.e. if stack
135 # allocation spans two pages, then reference to farmost one can
136 # be punishable by SEGV. But page walking can do good even on
137 # other OSes, because it guarantees that villain thread hits
138 # the guard page before it can make damage to innocent one...
144 .byte 0x66,0x2e # predict non-taken
147 mov $bp,%r12 # reassign $bp
151 mov ($n0),$n0 # pull n0[0] value
152 mov ($bp),$m0 # m0=bp[0]
159 mulq $m0 # ap[0]*bp[0]
163 imulq $lo0,$m1 # "tp[0]"*n0
167 add %rax,$lo0 # discarded
180 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
183 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
187 mulq $m0 # ap[j]*bp[0]
199 mov ($ap),%rax # ap[0]
201 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
203 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
210 mov $hi1,-8(%rsp,$num,8)
211 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
217 mov ($bp,$i,8),$m0 # m0=bp[i]
221 mulq $m0 # ap[0]*bp[i]
222 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
226 imulq $lo0,$m1 # tp[0]*n0
230 add %rax,$lo0 # discarded
233 mov 8(%rsp),$lo0 # tp[1]
244 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
247 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
251 mulq $m0 # ap[j]*bp[i]
255 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
265 mov ($ap),%rax # ap[0]
267 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
270 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
276 add $lo0,$hi1 # pull upmost overflow bit
278 mov $hi1,-8(%rsp,$num,8)
279 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
285 xor $i,$i # i=0 and clear CF!
286 mov (%rsp),%rax # tp[0]
287 lea (%rsp),$ap # borrow ap for tp
291 .Lsub: sbb ($np,$i,8),%rax
292 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
293 mov 8($ap,$i,8),%rax # tp[i+1]
295 dec $j # doesnn't affect CF!
298 sbb \$0,%rax # handle upmost overflow bit
305 or $np,$ap # ap=borrow?tp:rp
307 .Lcopy: # copy or in-place refresh
309 mov $i,(%rsp,$i,8) # zap temporary vector
310 mov %rax,($rp,$i,8) # rp[i]=tp[i]
315 mov 8(%rsp,$num,8),%rsi # restore %rsp
326 .size bn_mul_mont,.-bn_mul_mont
329 my @A=("%r10","%r11");
330 my @N=("%r13","%rdi");
332 .type bn_mul4x_mont,\@function,6
337 $code.=<<___ if ($addx);
354 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
355 and \$-1024,%rsp # minimize TLB usage
357 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
364 .byte 0x2e # predict non-taken
365 jnc .Lmul4x_page_walk
367 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
368 mov %rdx,%r12 # reassign $bp
372 mov ($n0),$n0 # pull n0[0] value
373 mov ($bp),$m0 # m0=bp[0]
380 mulq $m0 # ap[0]*bp[0]
384 imulq $A[0],$m1 # "tp[0]"*n0
388 add %rax,$A[0] # discarded
411 mulq $m0 # ap[j]*bp[0]
413 mov -16($np,$j,8),%rax
419 mov -8($ap,$j,8),%rax
421 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
423 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
426 mulq $m0 # ap[j]*bp[0]
428 mov -8($np,$j,8),%rax
436 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
438 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
441 mulq $m0 # ap[j]*bp[0]
451 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
453 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
456 mulq $m0 # ap[j]*bp[0]
465 mov -16($ap,$j,8),%rax
467 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
469 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
474 mulq $m0 # ap[j]*bp[0]
476 mov -16($np,$j,8),%rax
482 mov -8($ap,$j,8),%rax
484 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
486 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
489 mulq $m0 # ap[j]*bp[0]
491 mov -8($np,$j,8),%rax
497 mov ($ap),%rax # ap[0]
499 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
501 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
507 mov $N[0],-8(%rsp,$j,8)
508 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
513 mov ($bp,$i,8),$m0 # m0=bp[i]
517 mulq $m0 # ap[0]*bp[i]
518 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
522 imulq $A[0],$m1 # tp[0]*n0
526 add %rax,$A[0] # "$N[0]", discarded
531 mulq $m0 # ap[j]*bp[i]
535 add 8(%rsp),$A[1] # +tp[1]
543 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
546 mov $N[1],(%rsp) # tp[j-1]
551 mulq $m0 # ap[j]*bp[i]
553 mov -16($np,$j,8),%rax
555 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
561 mov -8($ap,$j,8),%rax
565 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
568 mulq $m0 # ap[j]*bp[i]
570 mov -8($np,$j,8),%rax
572 add -8(%rsp,$j,8),$A[1]
582 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
585 mulq $m0 # ap[j]*bp[i]
589 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
599 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
602 mulq $m0 # ap[j]*bp[i]
606 add 8(%rsp,$j,8),$A[1]
613 mov -16($ap,$j,8),%rax
617 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
622 mulq $m0 # ap[j]*bp[i]
624 mov -16($np,$j,8),%rax
626 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
632 mov -8($ap,$j,8),%rax
636 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
639 mulq $m0 # ap[j]*bp[i]
641 mov -8($np,$j,8),%rax
643 add -8(%rsp,$j,8),$A[1]
650 mov ($ap),%rax # ap[0]
654 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
660 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
662 mov $N[0],-8(%rsp,$j,8)
663 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
669 my @ri=("%rax","%rdx",$m0,$m1);
671 mov 16(%rsp,$num,8),$rp # restore $rp
672 mov 0(%rsp),@ri[0] # tp[0]
674 mov 8(%rsp),@ri[1] # tp[1]
675 shr \$2,$num # num/=4
676 lea (%rsp),$ap # borrow ap for tp
677 xor $i,$i # i=0 and clear CF!
680 mov 16($ap),@ri[2] # tp[2]
681 mov 24($ap),@ri[3] # tp[3]
683 lea -1($num),$j # j=num/4-1
687 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
688 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
689 sbb 16($np,$i,8),@ri[2]
690 mov 32($ap,$i,8),@ri[0] # tp[i+1]
691 mov 40($ap,$i,8),@ri[1]
692 sbb 24($np,$i,8),@ri[3]
693 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
694 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
695 sbb 32($np,$i,8),@ri[0]
696 mov 48($ap,$i,8),@ri[2]
697 mov 56($ap,$i,8),@ri[3]
698 sbb 40($np,$i,8),@ri[1]
700 dec $j # doesnn't affect CF!
703 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
704 mov 32($ap,$i,8),@ri[0] # load overflow bit
705 sbb 16($np,$i,8),@ri[2]
706 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
707 sbb 24($np,$i,8),@ri[3]
708 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
710 sbb \$0,@ri[0] # handle upmost overflow bit
711 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
718 or $np,$ap # ap=borrow?tp:rp
725 .Lcopy4x: # copy or in-place refresh
726 movdqu 16($ap,$i),%xmm2
727 movdqu 32($ap,$i),%xmm1
728 movdqa %xmm0,16(%rsp,$i)
729 movdqu %xmm2,16($rp,$i)
730 movdqa %xmm0,32(%rsp,$i)
731 movdqu %xmm1,32($rp,$i)
737 movdqu 16($ap,$i),%xmm2
738 movdqa %xmm0,16(%rsp,$i)
739 movdqu %xmm2,16($rp,$i)
743 mov 8(%rsp,$num,8),%rsi # restore %rsp
754 .size bn_mul4x_mont,.-bn_mul4x_mont
758 ######################################################################
759 # void bn_sqr8x_mont(
760 my $rptr="%rdi"; # const BN_ULONG *rptr,
761 my $aptr="%rsi"; # const BN_ULONG *aptr,
762 my $bptr="%rdx"; # not used
763 my $nptr="%rcx"; # const BN_ULONG *nptr,
764 my $n0 ="%r8"; # const BN_ULONG *n0);
765 my $num ="%r9"; # int num, has to be divisible by 8
767 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
768 my @A0=("%r10","%r11");
769 my @A1=("%r12","%r13");
770 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
772 $code.=<<___ if ($addx);
773 .extern bn_sqrx8x_internal # see x86_64-mont5 module
776 .extern bn_sqr8x_internal # see x86_64-mont5 module
778 .type bn_sqr8x_mont,\@function,6
791 shl \$3,${num}d # convert $num to bytes
792 shl \$3+2,%r10 # 4*$num
795 ##############################################################
796 # ensure that stack frame doesn't alias with $aptr modulo
797 # 4096. this is done to allow memory disambiguation logic
800 lea -64(%rsp,$num,2),%r11
806 sub %r11,%rsp # align with $aptr
807 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
812 lea 4096-64(,$num,2),%r10 # 4096-frame-2*$num
813 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
826 .byte 0x2e # predict non-taken
827 jnc .Lsqr8x_page_walk
833 mov %rax, 40(%rsp) # save original %rsp
836 movq $nptr, %xmm2 # save pointer to modulus
838 movq $rptr,%xmm1 # save $rptr
839 movq %r10, %xmm3 # -$num
841 $code.=<<___ if ($addx);
842 mov OPENSSL_ia32cap_P+8(%rip),%eax
847 call bn_sqrx8x_internal # see x86_64-mont5 module
848 # %rax top-most carry
851 # %r8 end of tp[2*num]
856 sar \$3+2,%rcx # %cf=0
863 call bn_sqr8x_internal # see x86_64-mont5 module
864 # %rax top-most carry
867 # %rdi end of tp[2*num]
872 sar \$3+2,%rcx # %cf=0
892 inc %rcx # preserves %cf
895 sbb \$0,%rax # top-most carry
896 lea (%rbx,$num),%rbx # rewind
897 lea ($rptr,$num),$rptr # rewind
901 pshufd \$0,%xmm1,%xmm1
902 mov 40(%rsp),%rsi # restore %rsp
903 jmp .Lsqr8x_cond_copy
907 movdqa 16*0(%rbx),%xmm2
908 movdqa 16*1(%rbx),%xmm3
910 movdqu 16*0($rptr),%xmm4
911 movdqu 16*1($rptr),%xmm5
912 lea 16*2($rptr),$rptr
913 movdqa %xmm0,-16*2(%rbx) # zero tp
914 movdqa %xmm0,-16*1(%rbx)
915 movdqa %xmm0,-16*2(%rbx,%rdx)
916 movdqa %xmm0,-16*1(%rbx,%rdx)
925 movdqu %xmm4,-16*2($rptr)
926 movdqu %xmm5,-16*1($rptr)
928 jnz .Lsqr8x_cond_copy
940 .size bn_sqr8x_mont,.-bn_sqr8x_mont
945 my $bp="%rdx"; # original value
948 .type bn_mulx4x_mont,\@function,6
960 shl \$3,${num}d # convert $num to bytes
963 sub $num,%r10 # -$num
965 lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
973 .byte 0x66,0x2e # predict non-taken
974 jnc .Lmulx4x_page_walk
977 ##############################################################
980 # +8 off-loaded &b[i]
989 mov $num,0(%rsp) # save $num
991 mov %r10,16(%rsp) # end of b[num]
993 mov $n0, 24(%rsp) # save *n0
994 mov $rp, 32(%rsp) # save $rp
995 mov %rax,40(%rsp) # save original %rsp
996 mov $num,48(%rsp) # inner counter
1002 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
1003 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
1007 mov ($bp),%rdx # b[0], $bp==%rdx actually
1008 lea 64+32(%rsp),$tptr
1011 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
1012 mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
1014 mov $bptr,8(%rsp) # off-load &b[i]
1015 mulx 2*8($aptr),%r12,%r13 # ...
1019 mov $mi,$bptr # borrow $bptr
1020 imulq 24(%rsp),$mi # "t[0]"*n0
1021 xor $zero,$zero # cf=0, of=0
1023 mulx 3*8($aptr),%rax,%r14
1025 lea 4*8($aptr),$aptr
1027 adcx $zero,%r14 # cf=0
1029 mulx 0*8($nptr),%rax,%r10
1030 adcx %rax,$bptr # discarded
1032 mulx 1*8($nptr),%rax,%r11
1035 .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 # mulx 2*8($nptr),%rax,%r12
1036 mov 48(%rsp),$bptr # counter value
1037 mov %r10,-4*8($tptr)
1040 mulx 3*8($nptr),%rax,%r15
1042 mov %r11,-3*8($tptr)
1044 adox $zero,%r15 # of=0
1045 lea 4*8($nptr),$nptr
1046 mov %r12,-2*8($tptr)
1052 adcx $zero,%r15 # cf=0, modulo-scheduled
1053 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
1055 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
1057 mulx 2*8($aptr),%r12,%rax # ...
1059 mulx 3*8($aptr),%r13,%r14
1063 adcx $zero,%r14 # cf=0
1064 lea 4*8($aptr),$aptr
1065 lea 4*8($tptr),$tptr
1068 mulx 0*8($nptr),%rax,%r15
1071 mulx 1*8($nptr),%rax,%r15
1074 mulx 2*8($nptr),%rax,%r15
1075 mov %r10,-5*8($tptr)
1077 mov %r11,-4*8($tptr)
1079 mulx 3*8($nptr),%rax,%r15
1081 mov %r12,-3*8($tptr)
1084 lea 4*8($nptr),$nptr
1085 mov %r13,-2*8($tptr)
1087 dec $bptr # of=0, pass cf
1090 mov 0(%rsp),$num # load num
1091 mov 8(%rsp),$bptr # re-load &b[i]
1092 adc $zero,%r15 # modulo-scheduled
1094 sbb %r15,%r15 # top-most carry
1095 mov %r14,-1*8($tptr)
1100 mov ($bptr),%rdx # b[i]
1101 lea 8($bptr),$bptr # b++
1102 sub $num,$aptr # rewind $aptr
1103 mov %r15,($tptr) # save top-most carry
1104 lea 64+4*8(%rsp),$tptr
1105 sub $num,$nptr # rewind $nptr
1107 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
1108 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1110 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
1111 adox -4*8($tptr),$mi
1113 mulx 2*8($aptr),%r15,%r13 # ...
1114 adox -3*8($tptr),%r11
1119 mov $bptr,8(%rsp) # off-load &b[i]
1122 imulq 24(%rsp),$mi # "t[0]"*n0
1123 xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
1125 mulx 3*8($aptr),%rax,%r14
1127 adox -2*8($tptr),%r12
1129 adox -1*8($tptr),%r13
1131 lea 4*8($aptr),$aptr
1134 mulx 0*8($nptr),%rax,%r10
1135 adcx %rax,%r15 # discarded
1137 mulx 1*8($nptr),%rax,%r11
1140 mulx 2*8($nptr),%rax,%r12
1141 mov %r10,-4*8($tptr)
1144 mulx 3*8($nptr),%rax,%r15
1146 mov %r11,-3*8($tptr)
1147 lea 4*8($nptr),$nptr
1149 adox $zero,%r15 # of=0
1150 mov 48(%rsp),$bptr # counter value
1151 mov %r12,-2*8($tptr)
1157 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
1158 adcx $zero,%r15 # cf=0, modulo-scheduled
1160 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
1161 adcx 0*8($tptr),%r10
1163 mulx 2*8($aptr),%r12,%rax # ...
1164 adcx 1*8($tptr),%r11
1166 mulx 3*8($aptr),%r13,%r14
1168 adcx 2*8($tptr),%r12
1170 adcx 3*8($tptr),%r13
1171 adox $zero,%r14 # of=0
1172 lea 4*8($aptr),$aptr
1173 lea 4*8($tptr),$tptr
1174 adcx $zero,%r14 # cf=0
1177 mulx 0*8($nptr),%rax,%r15
1180 mulx 1*8($nptr),%rax,%r15
1183 mulx 2*8($nptr),%rax,%r15
1184 mov %r10,-5*8($tptr)
1187 mulx 3*8($nptr),%rax,%r15
1189 mov %r11,-4*8($tptr)
1190 mov %r12,-3*8($tptr)
1193 lea 4*8($nptr),$nptr
1194 mov %r13,-2*8($tptr)
1196 dec $bptr # of=0, pass cf
1199 mov 0(%rsp),$num # load num
1200 mov 8(%rsp),$bptr # re-load &b[i]
1201 adc $zero,%r15 # modulo-scheduled
1202 sub 0*8($tptr),$zero # pull top-most carry
1204 sbb %r15,%r15 # top-most carry
1205 mov %r14,-1*8($tptr)
1211 sub $num,$nptr # rewind $nptr
1214 shr \$3+2,$num # %cf=0
1215 mov 32(%rsp),$rptr # restore rp
1224 lea 8*4($tptr),$tptr
1229 lea 8*4($nptr),$nptr
1234 lea 8*4($rptr),$rptr
1235 dec $num # preserves %cf
1238 sbb \$0,%r15 # top-most carry
1240 sub %rdx,$rptr # rewind
1244 pshufd \$0,%xmm1,%xmm1
1245 mov 40(%rsp),%rsi # restore %rsp
1246 jmp .Lmulx4x_cond_copy
1250 movdqa 16*0($tptr),%xmm2
1251 movdqa 16*1($tptr),%xmm3
1252 lea 16*2($tptr),$tptr
1253 movdqu 16*0($rptr),%xmm4
1254 movdqu 16*1($rptr),%xmm5
1255 lea 16*2($rptr),$rptr
1256 movdqa %xmm0,-16*2($tptr) # zero tp
1257 movdqa %xmm0,-16*1($tptr)
1266 movdqu %xmm4,-16*2($rptr)
1267 movdqu %xmm5,-16*1($rptr)
1269 jnz .Lmulx4x_cond_copy
1283 .size bn_mulx4x_mont,.-bn_mulx4x_mont
1287 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1291 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1292 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1300 .extern __imp_RtlVirtualUnwind
1301 .type mul_handler,\@abi-omnipotent
1315 mov 120($context),%rax # pull context->Rax
1316 mov 248($context),%rbx # pull context->Rip
1318 mov 8($disp),%rsi # disp->ImageBase
1319 mov 56($disp),%r11 # disp->HandlerData
1321 mov 0(%r11),%r10d # HandlerData[0]
1322 lea (%rsi,%r10),%r10 # end of prologue label
1323 cmp %r10,%rbx # context->Rip<end of prologue label
1324 jb .Lcommon_seh_tail
1326 mov 152($context),%rax # pull context->Rsp
1328 mov 4(%r11),%r10d # HandlerData[1]
1329 lea (%rsi,%r10),%r10 # epilogue label
1330 cmp %r10,%rbx # context->Rip>=epilogue label
1331 jae .Lcommon_seh_tail
1333 mov 192($context),%r10 # pull $num
1334 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
1343 mov %rbx,144($context) # restore context->Rbx
1344 mov %rbp,160($context) # restore context->Rbp
1345 mov %r12,216($context) # restore context->R12
1346 mov %r13,224($context) # restore context->R13
1347 mov %r14,232($context) # restore context->R14
1348 mov %r15,240($context) # restore context->R15
1350 jmp .Lcommon_seh_tail
1351 .size mul_handler,.-mul_handler
1353 .type sqr_handler,\@abi-omnipotent
1367 mov 120($context),%rax # pull context->Rax
1368 mov 248($context),%rbx # pull context->Rip
1370 mov 8($disp),%rsi # disp->ImageBase
1371 mov 56($disp),%r11 # disp->HandlerData
1373 mov 0(%r11),%r10d # HandlerData[0]
1374 lea (%rsi,%r10),%r10 # end of prologue label
1375 cmp %r10,%rbx # context->Rip<.Lsqr_body
1376 jb .Lcommon_seh_tail
1378 mov 152($context),%rax # pull context->Rsp
1380 mov 4(%r11),%r10d # HandlerData[1]
1381 lea (%rsi,%r10),%r10 # epilogue label
1382 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
1383 jae .Lcommon_seh_tail
1385 mov 40(%rax),%rax # pull saved stack pointer
1393 mov %rbx,144($context) # restore context->Rbx
1394 mov %rbp,160($context) # restore context->Rbp
1395 mov %r12,216($context) # restore context->R12
1396 mov %r13,224($context) # restore context->R13
1397 mov %r14,232($context) # restore context->R14
1398 mov %r15,240($context) # restore context->R15
1403 mov %rax,152($context) # restore context->Rsp
1404 mov %rsi,168($context) # restore context->Rsi
1405 mov %rdi,176($context) # restore context->Rdi
1407 mov 40($disp),%rdi # disp->ContextRecord
1408 mov $context,%rsi # context
1409 mov \$154,%ecx # sizeof(CONTEXT)
1410 .long 0xa548f3fc # cld; rep movsq
1413 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1414 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1415 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1416 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1417 mov 40(%rsi),%r10 # disp->ContextRecord
1418 lea 56(%rsi),%r11 # &disp->HandlerData
1419 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1420 mov %r10,32(%rsp) # arg5
1421 mov %r11,40(%rsp) # arg6
1422 mov %r12,48(%rsp) # arg7
1423 mov %rcx,56(%rsp) # arg8, (NULL)
1424 call *__imp_RtlVirtualUnwind(%rip)
1426 mov \$1,%eax # ExceptionContinueSearch
1438 .size sqr_handler,.-sqr_handler
1442 .rva .LSEH_begin_bn_mul_mont
1443 .rva .LSEH_end_bn_mul_mont
1444 .rva .LSEH_info_bn_mul_mont
1446 .rva .LSEH_begin_bn_mul4x_mont
1447 .rva .LSEH_end_bn_mul4x_mont
1448 .rva .LSEH_info_bn_mul4x_mont
1450 .rva .LSEH_begin_bn_sqr8x_mont
1451 .rva .LSEH_end_bn_sqr8x_mont
1452 .rva .LSEH_info_bn_sqr8x_mont
1454 $code.=<<___ if ($addx);
1455 .rva .LSEH_begin_bn_mulx4x_mont
1456 .rva .LSEH_end_bn_mulx4x_mont
1457 .rva .LSEH_info_bn_mulx4x_mont
1462 .LSEH_info_bn_mul_mont:
1465 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
1466 .LSEH_info_bn_mul4x_mont:
1469 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
1470 .LSEH_info_bn_sqr8x_mont:
1473 .rva .Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
1475 $code.=<<___ if ($addx);
1476 .LSEH_info_bn_mulx4x_mont:
1479 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]