1#!/usr/bin/env perl
2#
3# Copyright (c) 2010-2011 Intel Corp.
4#   Author: Vinodh.Gopal@intel.com
5#           Jim Guilford
6#           Erdinc.Ozturk@intel.com
7#           Maxim.Perminov@intel.com
8#
9# More information about algorithm used can be found at:
10#   http://www.cse.buffalo.edu/srds2009/escs2009_submission_Gopal.pdf
11#
12# ====================================================================
13# Copyright (c) 2011 The OpenSSL Project.  All rights reserved.
14#
15# Redistribution and use in source and binary forms, with or without
16# modification, are permitted provided that the following conditions
17# are met:
18#
19# 1. Redistributions of source code must retain the above copyright
20#    notice, this list of conditions and the following disclaimer.
21#
22# 2. Redistributions in binary form must reproduce the above copyright
23#    notice, this list of conditions and the following disclaimer in
24#    the documentation and/or other materials provided with the
25#    distribution.
26#
27# 3. All advertising materials mentioning features or use of this
28#    software must display the following acknowledgment:
29#    "This product includes software developed by the OpenSSL Project
30#    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
31#
32# 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
33#    endorse or promote products derived from this software without
34#    prior written permission. For written permission, please contact
35#    licensing@OpenSSL.org.
36#
37# 5. Products derived from this software may not be called "OpenSSL"
38#    nor may "OpenSSL" appear in their names without prior written
39#    permission of the OpenSSL Project.
40#
41# 6. Redistributions of any form whatsoever must retain the following
42#    acknowledgment:
43#    "This product includes software developed by the OpenSSL Project
44#    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
45#
46# THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
47# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
50# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
53# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
57# OF THE POSSIBILITY OF SUCH DAMAGE.
58# ====================================================================
59
60$flavour = shift;
61$output  = shift;
62if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
63
64my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
65
66$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
67( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
68( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
69die "can't locate x86_64-xlate.pl";
70
71open OUT,"| \"$^X\" $xlate $flavour $output";
72*STDOUT=*OUT;
73
74use strict;
75my $code=".text\n\n";
76my $m=0;
77
78#
79# Define x512 macros
80#
81
82#MULSTEP_512_ADD	MACRO	x7, x6, x5, x4, x3, x2, x1, x0, dst, src1, src2, add_src, tmp1, tmp2
83#
84# uses rax, rdx, and args
85sub MULSTEP_512_ADD
86{
87 my ($x, $DST, $SRC2, $ASRC, $OP, $TMP)=@_;
88 my @X=@$x;	# make a copy
89$code.=<<___;
90	 mov	(+8*0)($SRC2), %rax
91	 mul	$OP			# rdx:rax = %OP * [0]
92	 mov	($ASRC), $X[0]
93	 add	%rax, $X[0]
94	 adc	\$0, %rdx
95	 mov	$X[0], $DST
96___
97for(my $i=1;$i<8;$i++) {
98$code.=<<___;
99	 mov	%rdx, $TMP
100
101	 mov	(+8*$i)($SRC2), %rax
102	 mul	$OP			# rdx:rax = %OP * [$i]
103	 mov	(+8*$i)($ASRC), $X[$i]
104	 add	%rax, $X[$i]
105	 adc	\$0, %rdx
106	 add	$TMP, $X[$i]
107	 adc	\$0, %rdx
108___
109}
110$code.=<<___;
111	 mov	%rdx, $X[0]
112___
113}
114
115#MULSTEP_512	MACRO	x7, x6, x5, x4, x3, x2, x1, x0, dst, src2, src1_val, tmp
116#
117# uses rax, rdx, and args
118sub MULSTEP_512
119{
120 my ($x, $DST, $SRC2, $OP, $TMP)=@_;
121 my @X=@$x;	# make a copy
122$code.=<<___;
123	 mov	(+8*0)($SRC2), %rax
124	 mul	$OP			# rdx:rax = %OP * [0]
125	 add	%rax, $X[0]
126	 adc	\$0, %rdx
127	 mov	$X[0], $DST
128___
129for(my $i=1;$i<8;$i++) {
130$code.=<<___;
131	 mov	%rdx, $TMP
132
133	 mov	(+8*$i)($SRC2), %rax
134	 mul	$OP			# rdx:rax = %OP * [$i]
135	 add	%rax, $X[$i]
136	 adc	\$0, %rdx
137	 add	$TMP, $X[$i]
138	 adc	\$0, %rdx
139___
140}
141$code.=<<___;
142	 mov	%rdx, $X[0]
143___
144}
145
146#
147# Swizzle Macros
148#
149
150# macro to copy data from flat space to swizzled table
151#MACRO swizzle	pDst, pSrc, tmp1, tmp2
152# pDst and pSrc are modified
153sub swizzle
154{
155 my ($pDst, $pSrc, $cnt, $d0)=@_;
156$code.=<<___;
157	 mov	\$8, $cnt
158loop_$m:
159	 mov	($pSrc), $d0
160	 mov	$d0#w, ($pDst)
161	 shr	\$16, $d0
162	 mov	$d0#w, (+64*1)($pDst)
163	 shr	\$16, $d0
164	 mov	$d0#w, (+64*2)($pDst)
165	 shr	\$16, $d0
166	 mov	$d0#w, (+64*3)($pDst)
167	 lea	8($pSrc), $pSrc
168	 lea	64*4($pDst), $pDst
169	 dec	$cnt
170	 jnz	loop_$m
171___
172
173 $m++;
174}
175
176# macro to copy data from swizzled table to  flat space
177#MACRO unswizzle	pDst, pSrc, tmp*3
178sub unswizzle
179{
180 my ($pDst, $pSrc, $cnt, $d0, $d1)=@_;
181$code.=<<___;
182	 mov	\$4, $cnt
183loop_$m:
184	 movzxw	(+64*3+256*0)($pSrc), $d0
185	 movzxw	(+64*3+256*1)($pSrc), $d1
186	 shl	\$16, $d0
187	 shl	\$16, $d1
188	 mov	(+64*2+256*0)($pSrc), $d0#w
189	 mov	(+64*2+256*1)($pSrc), $d1#w
190	 shl	\$16, $d0
191	 shl	\$16, $d1
192	 mov	(+64*1+256*0)($pSrc), $d0#w
193	 mov	(+64*1+256*1)($pSrc), $d1#w
194	 shl	\$16, $d0
195	 shl	\$16, $d1
196	 mov	(+64*0+256*0)($pSrc), $d0#w
197	 mov	(+64*0+256*1)($pSrc), $d1#w
198	 mov	$d0, (+8*0)($pDst)
199	 mov	$d1, (+8*1)($pDst)
200	 lea	256*2($pSrc), $pSrc
201	 lea	8*2($pDst), $pDst
202	 sub	\$1, $cnt
203	 jnz	loop_$m
204___
205
206 $m++;
207}
208
209#
210# Data Structures
211#
212
213# Reduce Data
214#
215#
216# Offset  Value
217# 0C0     Carries
218# 0B8     X2[10]
219# 0B0     X2[9]
220# 0A8     X2[8]
221# 0A0     X2[7]
222# 098     X2[6]
223# 090     X2[5]
224# 088     X2[4]
225# 080     X2[3]
226# 078     X2[2]
227# 070     X2[1]
228# 068     X2[0]
229# 060     X1[12]  P[10]
230# 058     X1[11]  P[9]  Z[8]
231# 050     X1[10]  P[8]  Z[7]
232# 048     X1[9]   P[7]  Z[6]
233# 040     X1[8]   P[6]  Z[5]
234# 038     X1[7]   P[5]  Z[4]
235# 030     X1[6]   P[4]  Z[3]
236# 028     X1[5]   P[3]  Z[2]
237# 020     X1[4]   P[2]  Z[1]
238# 018     X1[3]   P[1]  Z[0]
239# 010     X1[2]   P[0]  Y[2]
240# 008     X1[1]   Q[1]  Y[1]
241# 000     X1[0]   Q[0]  Y[0]
242
243my $X1_offset           =  0;			# 13 qwords
244my $X2_offset           =  $X1_offset + 13*8;			# 11 qwords
245my $Carries_offset      =  $X2_offset + 11*8;			# 1 qword
246my $Q_offset            =  0;			# 2 qwords
247my $P_offset            =  $Q_offset + 2*8;			# 11 qwords
248my $Y_offset            =  0;			# 3 qwords
249my $Z_offset            =  $Y_offset + 3*8;			# 9 qwords
250
251my $Red_Data_Size       =  $Carries_offset + 1*8;			# (25 qwords)
252
253#
254# Stack Frame
255#
256#
257# offset	value
258# ...		<old stack contents>
259# ...
260# 280		Garray
261
262# 278		tmp16[15]
263# ...		...
264# 200		tmp16[0]
265
266# 1F8		tmp[7]
267# ...		...
268# 1C0		tmp[0]
269
270# 1B8		GT[7]
271# ...		...
272# 180		GT[0]
273
274# 178		Reduce Data
275# ...		...
276# 0B8		Reduce Data
277# 0B0		reserved
278# 0A8		reserved
279# 0A0		reserved
280# 098		reserved
281# 090		reserved
282# 088		reduce result addr
283# 080		exp[8]
284
285# ...
286# 048		exp[1]
287# 040		exp[0]
288
289# 038		reserved
290# 030		loop_idx
291# 028		pg
292# 020		i
293# 018		pData	; arg 4
294# 010		pG	; arg 2
295# 008		pResult	; arg 1
296# 000		rsp	; stack pointer before subtract
297
298my $rsp_offset          =  0;
299my $pResult_offset      =  8*1 + $rsp_offset;
300my $pG_offset           =  8*1 + $pResult_offset;
301my $pData_offset        =  8*1 + $pG_offset;
302my $i_offset            =  8*1 + $pData_offset;
303my $pg_offset           =  8*1 + $i_offset;
304my $loop_idx_offset     =  8*1 + $pg_offset;
305my $reserved1_offset    =  8*1 + $loop_idx_offset;
306my $exp_offset          =  8*1 + $reserved1_offset;
307my $red_result_addr_offset=  8*9 + $exp_offset;
308my $reserved2_offset    =  8*1 + $red_result_addr_offset;
309my $Reduce_Data_offset  =  8*5 + $reserved2_offset;
310my $GT_offset           =  $Red_Data_Size + $Reduce_Data_offset;
311my $tmp_offset          =  8*8 + $GT_offset;
312my $tmp16_offset        =  8*8 + $tmp_offset;
313my $garray_offset       =  8*16 + $tmp16_offset;
314my $mem_size            =  8*8*32 + $garray_offset;
315
316#
317# Offsets within Reduce Data
318#
319#
320#	struct MODF_2FOLD_MONT_512_C1_DATA {
321#	UINT64 t[8][8];
322#	UINT64 m[8];
323#	UINT64 m1[8]; /* 2^768 % m */
324#	UINT64 m2[8]; /* 2^640 % m */
325#	UINT64 k1[2]; /* (- 1/m) % 2^128 */
326#	};
327
328my $T                   =  0;
329my $M                   =  512;			# = 8 * 8 * 8
330my $M1                  =  576;			# = 8 * 8 * 9 /* += 8 * 8 */
331my $M2                  =  640;			# = 8 * 8 * 10 /* += 8 * 8 */
332my $K1                  =  704;			# = 8 * 8 * 11 /* += 8 * 8 */
333
334#
335#   FUNCTIONS
336#
337
338{{{
339#
340# MULADD_128x512 : Function to multiply 128-bits (2 qwords) by 512-bits (8 qwords)
341#                       and add 512-bits (8 qwords)
342#                       to get 640 bits (10 qwords)
343# Input: 128-bit mul source: [rdi+8*1], rbp
344#        512-bit mul source: [rsi+8*n]
345#        512-bit add source: r15, r14, ..., r9, r8
346# Output: r9, r8, r15, r14, r13, r12, r11, r10, [rcx+8*1], [rcx+8*0]
347# Clobbers all regs except: rcx, rsi, rdi
348$code.=<<___;
349.type	MULADD_128x512,\@abi-omnipotent
350.align	16
351MULADD_128x512:
352___
353	&MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx");
354$code.=<<___;
355	 mov	(+8*1)(%rdi), %rbp
356___
357	&MULSTEP_512([map("%r$_",(9..15,8))], "(+8*1)(%rcx)", "%rsi", "%rbp", "%rbx");
358$code.=<<___;
359	 ret
360.size	MULADD_128x512,.-MULADD_128x512
361___
362}}}
363
364{{{
365#MULADD_256x512	MACRO	pDst, pA, pB, OP, TMP, X7, X6, X5, X4, X3, X2, X1, X0
366#
367# Inputs: pDst: Destination  (768 bits, 12 qwords)
368#         pA:   Multiplicand (1024 bits, 16 qwords)
369#         pB:   Multiplicand (512 bits, 8 qwords)
370# Dst = Ah * B + Al
371# where Ah is (in qwords) A[15:12] (256 bits) and Al is A[7:0] (512 bits)
372# Results in X3 X2 X1 X0 X7 X6 X5 X4 Dst[3:0]
373# Uses registers: arguments, RAX, RDX
374sub MULADD_256x512
375{
376 my ($pDst, $pA, $pB, $OP, $TMP, $X)=@_;
377$code.=<<___;
378	mov	(+8*12)($pA), $OP
379___
380	&MULSTEP_512_ADD($X, "(+8*0)($pDst)", $pB, $pA, $OP, $TMP);
381	push(@$X,shift(@$X));
382
383$code.=<<___;
384	 mov	(+8*13)($pA), $OP
385___
386	&MULSTEP_512($X, "(+8*1)($pDst)", $pB, $OP, $TMP);
387	push(@$X,shift(@$X));
388
389$code.=<<___;
390	 mov	(+8*14)($pA), $OP
391___
392	&MULSTEP_512($X, "(+8*2)($pDst)", $pB, $OP, $TMP);
393	push(@$X,shift(@$X));
394
395$code.=<<___;
396	 mov	(+8*15)($pA), $OP
397___
398	&MULSTEP_512($X, "(+8*3)($pDst)", $pB, $OP, $TMP);
399	push(@$X,shift(@$X));
400}
401
402#
403# mont_reduce(UINT64 *x,  /* 1024 bits, 16 qwords */
404#	       UINT64 *m,  /*  512 bits,  8 qwords */
405#	       MODF_2FOLD_MONT_512_C1_DATA *data,
406#             UINT64 *r)  /*  512 bits,  8 qwords */
407# Input:  x (number to be reduced): tmp16 (Implicit)
408#         m (modulus):              [pM]  (Implicit)
409#         data (reduce data):       [pData] (Implicit)
410# Output: r (result):		     Address in [red_res_addr]
411#         result also in: r9, r8, r15, r14, r13, r12, r11, r10
412
413my @X=map("%r$_",(8..15));
414
415$code.=<<___;
416.type	mont_reduce,\@abi-omnipotent
417.align	16
418mont_reduce:
419___
420
421my $STACK_DEPTH         =  8;
422	#
423	# X1 = Xh * M1 + Xl
424$code.=<<___;
425	 lea	(+$Reduce_Data_offset+$X1_offset+$STACK_DEPTH)(%rsp), %rdi			# pX1 (Dst) 769 bits, 13 qwords
426	 mov	(+$pData_offset+$STACK_DEPTH)(%rsp), %rsi			# pM1 (Bsrc) 512 bits, 8 qwords
427	 add	\$$M1, %rsi
428	 lea	(+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx			# X (Asrc) 1024 bits, 16 qwords
429
430___
431
432	&MULADD_256x512("%rdi", "%rcx", "%rsi", "%rbp", "%rbx", \@X);	# rotates @X 4 times
433	# results in r11, r10, r9, r8, r15, r14, r13, r12, X1[3:0]
434
435$code.=<<___;
436	 xor	%rax, %rax
437	# X1 += xl
438	 add	(+8*8)(%rcx), $X[4]
439	 adc	(+8*9)(%rcx), $X[5]
440	 adc	(+8*10)(%rcx), $X[6]
441	 adc	(+8*11)(%rcx), $X[7]
442	 adc	\$0, %rax
443	# X1 is now rax, r11-r8, r15-r12, tmp16[3:0]
444
445	#
446	# check for carry ;; carry stored in rax
447	 mov	$X[4], (+8*8)(%rdi)			# rdi points to X1
448	 mov	$X[5], (+8*9)(%rdi)
449	 mov	$X[6], %rbp
450	 mov	$X[7], (+8*11)(%rdi)
451
452	 mov	%rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
453
454	 mov	(+8*0)(%rdi), $X[4]
455	 mov	(+8*1)(%rdi), $X[5]
456	 mov	(+8*2)(%rdi), $X[6]
457	 mov	(+8*3)(%rdi), $X[7]
458
459	# X1 is now stored in: X1[11], rbp, X1[9:8], r15-r8
460	# rdi -> X1
461	# rsi -> M1
462
463	#
464	# X2 = Xh * M2 + Xl
465	# do first part (X2 = Xh * M2)
466	 add	\$8*10, %rdi			# rdi -> pXh ; 128 bits, 2 qwords
467				#        Xh is actually { [rdi+8*1], rbp }
468	 add	\$`$M2-$M1`, %rsi			# rsi -> M2
469	 lea	(+$Reduce_Data_offset+$X2_offset+$STACK_DEPTH)(%rsp), %rcx			# rcx -> pX2 ; 641 bits, 11 qwords
470___
471	unshift(@X,pop(@X));	unshift(@X,pop(@X));
472$code.=<<___;
473
474	 call	MULADD_128x512			# args in rcx, rdi / rbp, rsi, r15-r8
475	# result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
476	 mov	(+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rax
477
478	# X2 += Xl
479	 add	(+8*8-8*10)(%rdi), $X[6]		# (-8*10) is to adjust rdi -> Xh to Xl
480	 adc	(+8*9-8*10)(%rdi), $X[7]
481	 mov	$X[6], (+8*8)(%rcx)
482	 mov	$X[7], (+8*9)(%rcx)
483
484	 adc	%rax, %rax
485	 mov	%rax, (+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp)
486
487	 lea	(+$Reduce_Data_offset+$Q_offset+$STACK_DEPTH)(%rsp), %rdi			# rdi -> pQ ; 128 bits, 2 qwords
488	 add	\$`$K1-$M2`, %rsi			# rsi -> pK1 ; 128 bits, 2 qwords
489
490	# MUL_128x128t128	rdi, rcx, rsi	; Q = X2 * K1 (bottom half)
491	# B1:B0 = rsi[1:0] = K1[1:0]
492	# A1:A0 = rcx[1:0] = X2[1:0]
493	# Result = rdi[1],rbp = Q[1],rbp
494	 mov	(%rsi), %r8			# B0
495	 mov	(+8*1)(%rsi), %rbx			# B1
496
497	 mov	(%rcx), %rax			# A0
498	 mul	%r8			# B0
499	 mov	%rax, %rbp
500	 mov	%rdx, %r9
501
502	 mov	(+8*1)(%rcx), %rax			# A1
503	 mul	%r8			# B0
504	 add	%rax, %r9
505
506	 mov	(%rcx), %rax			# A0
507	 mul	%rbx			# B1
508	 add	%rax, %r9
509
510	 mov	%r9, (+8*1)(%rdi)
511	# end MUL_128x128t128
512
513	 sub	\$`$K1-$M`, %rsi
514
515	 mov	(%rcx), $X[6]
516	 mov	(+8*1)(%rcx), $X[7]			# r9:r8 = X2[1:0]
517
518	 call	MULADD_128x512			# args in rcx, rdi / rbp, rsi, r15-r8
519	# result in r9, r8, r15, r14, r13, r12, r11, r10, X2[1:0]
520
521	# load first half of m to rdx, rdi, rbx, rax
522	# moved this here for efficiency
523	 mov	(+8*0)(%rsi), %rax
524	 mov	(+8*1)(%rsi), %rbx
525	 mov	(+8*2)(%rsi), %rdi
526	 mov	(+8*3)(%rsi), %rdx
527
528	# continue with reduction
529	 mov	(+$Reduce_Data_offset+$Carries_offset+$STACK_DEPTH)(%rsp), %rbp
530
531	 add	(+8*8)(%rcx), $X[6]
532	 adc	(+8*9)(%rcx), $X[7]
533
534	#accumulate the final carry to rbp
535	 adc	%rbp, %rbp
536
537	# Add in overflow corrections: R = (X2>>128) += T[overflow]
538	# R = {r9, r8, r15, r14, ..., r10}
539	 shl	\$3, %rbp
540	 mov	(+$pData_offset+$STACK_DEPTH)(%rsp), %rcx			# rsi -> Data (and points to T)
541	 add	%rcx, %rbp			# pT ; 512 bits, 8 qwords, spread out
542
543	# rsi will be used to generate a mask after the addition
544	 xor	%rsi, %rsi
545
546	 add	(+8*8*0)(%rbp), $X[0]
547	 adc	(+8*8*1)(%rbp), $X[1]
548	 adc	(+8*8*2)(%rbp), $X[2]
549	 adc	(+8*8*3)(%rbp), $X[3]
550	 adc	(+8*8*4)(%rbp), $X[4]
551	 adc	(+8*8*5)(%rbp), $X[5]
552	 adc	(+8*8*6)(%rbp), $X[6]
553	 adc	(+8*8*7)(%rbp), $X[7]
554
555	# if there is a carry:	rsi = 0xFFFFFFFFFFFFFFFF
556	# if carry is clear:	rsi = 0x0000000000000000
557	 sbb	\$0, %rsi
558
559	# if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
560	 and	%rsi, %rax
561	 and	%rsi, %rbx
562	 and	%rsi, %rdi
563	 and	%rsi, %rdx
564
565	 mov	\$1, %rbp
566	 sub	%rax, $X[0]
567	 sbb	%rbx, $X[1]
568	 sbb	%rdi, $X[2]
569	 sbb	%rdx, $X[3]
570
571	# if there is a borrow:		rbp = 0
572	# if there is no borrow:	rbp = 1
573	# this is used to save the borrows in between the first half and the 2nd half of the subtraction of m
574	 sbb	\$0, %rbp
575
576	#load second half of m to rdx, rdi, rbx, rax
577
578	 add	\$$M, %rcx
579	 mov	(+8*4)(%rcx), %rax
580	 mov	(+8*5)(%rcx), %rbx
581	 mov	(+8*6)(%rcx), %rdi
582	 mov	(+8*7)(%rcx), %rdx
583
584	# use the rsi mask as before
585	# if carry is clear, subtract 0. Otherwise, subtract 256 bits of m
586	 and	%rsi, %rax
587	 and	%rsi, %rbx
588	 and	%rsi, %rdi
589	 and	%rsi, %rdx
590
591	# if rbp = 0, there was a borrow before, it is moved to the carry flag
592	# if rbp = 1, there was not a borrow before, carry flag is cleared
593	 sub	\$1, %rbp
594
595	 sbb	%rax, $X[4]
596	 sbb	%rbx, $X[5]
597	 sbb	%rdi, $X[6]
598	 sbb	%rdx, $X[7]
599
600	# write R back to memory
601
602	 mov	(+$red_result_addr_offset+$STACK_DEPTH)(%rsp), %rsi
603	 mov	$X[0], (+8*0)(%rsi)
604	 mov	$X[1], (+8*1)(%rsi)
605	 mov	$X[2], (+8*2)(%rsi)
606	 mov	$X[3], (+8*3)(%rsi)
607	 mov	$X[4], (+8*4)(%rsi)
608	 mov	$X[5], (+8*5)(%rsi)
609	 mov	$X[6], (+8*6)(%rsi)
610	 mov	$X[7], (+8*7)(%rsi)
611
612	 ret
613.size	mont_reduce,.-mont_reduce
614___
615}}}
616
617{{{
618#MUL_512x512	MACRO	pDst, pA, pB, x7, x6, x5, x4, x3, x2, x1, x0, tmp*2
619#
620# Inputs: pDst: Destination  (1024 bits, 16 qwords)
621#         pA:   Multiplicand (512 bits, 8 qwords)
622#         pB:   Multiplicand (512 bits, 8 qwords)
623# Uses registers rax, rdx, args
624#   B operand in [pB] and also in x7...x0
625sub MUL_512x512
626{
627 my ($pDst, $pA, $pB, $x, $OP, $TMP, $pDst_o)=@_;
628 my ($pDst,  $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
629 my @X=@$x;	# make a copy
630
631$code.=<<___;
632	 mov	(+8*0)($pA), $OP
633
634	 mov	$X[0], %rax
635	 mul	$OP			# rdx:rax = %OP * [0]
636	 mov	%rax, (+$pDst_o+8*0)($pDst)
637	 mov	%rdx, $X[0]
638___
639for(my $i=1;$i<8;$i++) {
640$code.=<<___;
641	 mov	$X[$i], %rax
642	 mul	$OP			# rdx:rax = %OP * [$i]
643	 add	%rax, $X[$i-1]
644	 adc	\$0, %rdx
645	 mov	%rdx, $X[$i]
646___
647}
648
649for(my $i=1;$i<8;$i++) {
650$code.=<<___;
651	 mov	(+8*$i)($pA), $OP
652___
653
654	&MULSTEP_512(\@X, "(+$pDst_o+8*$i)($pDst)", $pB, $OP, $TMP);
655	push(@X,shift(@X));
656}
657
658$code.=<<___;
659	 mov	$X[0], (+$pDst_o+8*8)($pDst)
660	 mov	$X[1], (+$pDst_o+8*9)($pDst)
661	 mov	$X[2], (+$pDst_o+8*10)($pDst)
662	 mov	$X[3], (+$pDst_o+8*11)($pDst)
663	 mov	$X[4], (+$pDst_o+8*12)($pDst)
664	 mov	$X[5], (+$pDst_o+8*13)($pDst)
665	 mov	$X[6], (+$pDst_o+8*14)($pDst)
666	 mov	$X[7], (+$pDst_o+8*15)($pDst)
667___
668}
669
670#
671# mont_mul_a3b : subroutine to compute (Src1 * Src2) % M (all 512-bits)
672# Input:  src1: Address of source 1: rdi
673#         src2: Address of source 2: rsi
674# Output: dst:  Address of destination: [red_res_addr]
675#    src2 and result also in: r9, r8, r15, r14, r13, r12, r11, r10
676# Temp:   Clobbers [tmp16], all registers
677$code.=<<___;
678.type	mont_mul_a3b,\@abi-omnipotent
679.align	16
680mont_mul_a3b:
681	#
682	# multiply tmp = src1 * src2
683	# For multiply: dst = rcx, src1 = rdi, src2 = rsi
684	# stack depth is extra 8 from call
685___
686	&MUL_512x512("%rsp+$tmp16_offset+8", "%rdi", "%rsi", [map("%r$_",(10..15,8..9))], "%rbp", "%rbx");
687$code.=<<___;
688	#
689	# Dst = tmp % m
690	# Call reduce(tmp, m, data, dst)
691
692	# tail recursion optimization: jmp to mont_reduce and return from there
693	 jmp	mont_reduce
694	# call	mont_reduce
695	# ret
696.size	mont_mul_a3b,.-mont_mul_a3b
697___
698}}}
699
700{{{
701#SQR_512 MACRO pDest, pA, x7, x6, x5, x4, x3, x2, x1, x0, tmp*4
702#
703# Input in memory [pA] and also in x7...x0
704# Uses all argument registers plus rax and rdx
705#
706# This version computes all of the off-diagonal terms into memory,
707# and then it adds in the diagonal terms
708
709sub SQR_512
710{
711 my ($pDst, $pA, $x, $A, $tmp, $x7, $x6, $pDst_o)=@_;
712 my ($pDst,  $pDst_o) = ($pDst =~ m/([^+]*)\+?(.*)?/);
713 my @X=@$x;	# make a copy
714$code.=<<___;
715	# ------------------
716	# first pass 01...07
717	# ------------------
718	 mov	$X[0], $A
719
720	 mov	$X[1],%rax
721	 mul	$A
722	 mov	%rax, (+$pDst_o+8*1)($pDst)
723___
724for(my $i=2;$i<8;$i++) {
725$code.=<<___;
726	 mov	%rdx, $X[$i-2]
727	 mov	$X[$i],%rax
728	 mul	$A
729	 add	%rax, $X[$i-2]
730	 adc	\$0, %rdx
731___
732}
733$code.=<<___;
734	 mov	%rdx, $x7
735
736	 mov	$X[0], (+$pDst_o+8*2)($pDst)
737
738	# ------------------
739	# second pass 12...17
740	# ------------------
741
742	 mov	(+8*1)($pA), $A
743
744	 mov	(+8*2)($pA),%rax
745	 mul	$A
746	 add	%rax, $X[1]
747	 adc	\$0, %rdx
748	 mov	$X[1], (+$pDst_o+8*3)($pDst)
749
750	 mov	%rdx, $X[0]
751	 mov	(+8*3)($pA),%rax
752	 mul	$A
753	 add	%rax, $X[2]
754	 adc	\$0, %rdx
755	 add	$X[0], $X[2]
756	 adc	\$0, %rdx
757	 mov	$X[2], (+$pDst_o+8*4)($pDst)
758
759	 mov	%rdx, $X[0]
760	 mov	(+8*4)($pA),%rax
761	 mul	$A
762	 add	%rax, $X[3]
763	 adc	\$0, %rdx
764	 add	$X[0], $X[3]
765	 adc	\$0, %rdx
766
767	 mov	%rdx, $X[0]
768	 mov	(+8*5)($pA),%rax
769	 mul	$A
770	 add	%rax, $X[4]
771	 adc	\$0, %rdx
772	 add	$X[0], $X[4]
773	 adc	\$0, %rdx
774
775	 mov	%rdx, $X[0]
776	 mov	$X[6],%rax
777	 mul	$A
778	 add	%rax, $X[5]
779	 adc	\$0, %rdx
780	 add	$X[0], $X[5]
781	 adc	\$0, %rdx
782
783	 mov	%rdx, $X[0]
784	 mov	$X[7],%rax
785	 mul	$A
786	 add	%rax, $x7
787	 adc	\$0, %rdx
788	 add	$X[0], $x7
789	 adc	\$0, %rdx
790
791	 mov	%rdx, $X[1]
792
793	# ------------------
794	# third pass 23...27
795	# ------------------
796	 mov	(+8*2)($pA), $A
797
798	 mov	(+8*3)($pA),%rax
799	 mul	$A
800	 add	%rax, $X[3]
801	 adc	\$0, %rdx
802	 mov	$X[3], (+$pDst_o+8*5)($pDst)
803
804	 mov	%rdx, $X[0]
805	 mov	(+8*4)($pA),%rax
806	 mul	$A
807	 add	%rax, $X[4]
808	 adc	\$0, %rdx
809	 add	$X[0], $X[4]
810	 adc	\$0, %rdx
811	 mov	$X[4], (+$pDst_o+8*6)($pDst)
812
813	 mov	%rdx, $X[0]
814	 mov	(+8*5)($pA),%rax
815	 mul	$A
816	 add	%rax, $X[5]
817	 adc	\$0, %rdx
818	 add	$X[0], $X[5]
819	 adc	\$0, %rdx
820
821	 mov	%rdx, $X[0]
822	 mov	$X[6],%rax
823	 mul	$A
824	 add	%rax, $x7
825	 adc	\$0, %rdx
826	 add	$X[0], $x7
827	 adc	\$0, %rdx
828
829	 mov	%rdx, $X[0]
830	 mov	$X[7],%rax
831	 mul	$A
832	 add	%rax, $X[1]
833	 adc	\$0, %rdx
834	 add	$X[0], $X[1]
835	 adc	\$0, %rdx
836
837	 mov	%rdx, $X[2]
838
839	# ------------------
840	# fourth pass 34...37
841	# ------------------
842
843	 mov	(+8*3)($pA), $A
844
845	 mov	(+8*4)($pA),%rax
846	 mul	$A
847	 add	%rax, $X[5]
848	 adc	\$0, %rdx
849	 mov	$X[5], (+$pDst_o+8*7)($pDst)
850
851	 mov	%rdx, $X[0]
852	 mov	(+8*5)($pA),%rax
853	 mul	$A
854	 add	%rax, $x7
855	 adc	\$0, %rdx
856	 add	$X[0], $x7
857	 adc	\$0, %rdx
858	 mov	$x7, (+$pDst_o+8*8)($pDst)
859
860	 mov	%rdx, $X[0]
861	 mov	$X[6],%rax
862	 mul	$A
863	 add	%rax, $X[1]
864	 adc	\$0, %rdx
865	 add	$X[0], $X[1]
866	 adc	\$0, %rdx
867
868	 mov	%rdx, $X[0]
869	 mov	$X[7],%rax
870	 mul	$A
871	 add	%rax, $X[2]
872	 adc	\$0, %rdx
873	 add	$X[0], $X[2]
874	 adc	\$0, %rdx
875
876	 mov	%rdx, $X[5]
877
878	# ------------------
879	# fifth pass 45...47
880	# ------------------
881	 mov	(+8*4)($pA), $A
882
883	 mov	(+8*5)($pA),%rax
884	 mul	$A
885	 add	%rax, $X[1]
886	 adc	\$0, %rdx
887	 mov	$X[1], (+$pDst_o+8*9)($pDst)
888
889	 mov	%rdx, $X[0]
890	 mov	$X[6],%rax
891	 mul	$A
892	 add	%rax, $X[2]
893	 adc	\$0, %rdx
894	 add	$X[0], $X[2]
895	 adc	\$0, %rdx
896	 mov	$X[2], (+$pDst_o+8*10)($pDst)
897
898	 mov	%rdx, $X[0]
899	 mov	$X[7],%rax
900	 mul	$A
901	 add	%rax, $X[5]
902	 adc	\$0, %rdx
903	 add	$X[0], $X[5]
904	 adc	\$0, %rdx
905
906	 mov	%rdx, $X[1]
907
908	# ------------------
909	# sixth pass 56...57
910	# ------------------
911	 mov	(+8*5)($pA), $A
912
913	 mov	$X[6],%rax
914	 mul	$A
915	 add	%rax, $X[5]
916	 adc	\$0, %rdx
917	 mov	$X[5], (+$pDst_o+8*11)($pDst)
918
919	 mov	%rdx, $X[0]
920	 mov	$X[7],%rax
921	 mul	$A
922	 add	%rax, $X[1]
923	 adc	\$0, %rdx
924	 add	$X[0], $X[1]
925	 adc	\$0, %rdx
926	 mov	$X[1], (+$pDst_o+8*12)($pDst)
927
928	 mov	%rdx, $X[2]
929
930	# ------------------
931	# seventh pass 67
932	# ------------------
933	 mov	$X[6], $A
934
935	 mov	$X[7],%rax
936	 mul	$A
937	 add	%rax, $X[2]
938	 adc	\$0, %rdx
939	 mov	$X[2], (+$pDst_o+8*13)($pDst)
940
941	 mov	%rdx, (+$pDst_o+8*14)($pDst)
942
943	# start finalize (add	in squares, and double off-terms)
944	 mov	(+$pDst_o+8*1)($pDst), $X[0]
945	 mov	(+$pDst_o+8*2)($pDst), $X[1]
946	 mov	(+$pDst_o+8*3)($pDst), $X[2]
947	 mov	(+$pDst_o+8*4)($pDst), $X[3]
948	 mov	(+$pDst_o+8*5)($pDst), $X[4]
949	 mov	(+$pDst_o+8*6)($pDst), $X[5]
950
951	 mov	(+8*3)($pA), %rax
952	 mul	%rax
953	 mov	%rax, $x6
954	 mov	%rdx, $X[6]
955
956	 add	$X[0], $X[0]
957	 adc	$X[1], $X[1]
958	 adc	$X[2], $X[2]
959	 adc	$X[3], $X[3]
960	 adc	$X[4], $X[4]
961	 adc	$X[5], $X[5]
962	 adc	\$0, $X[6]
963
964	 mov	(+8*0)($pA), %rax
965	 mul	%rax
966	 mov	%rax, (+$pDst_o+8*0)($pDst)
967	 mov	%rdx, $A
968
969	 mov	(+8*1)($pA), %rax
970	 mul	%rax
971
972	 add	$A, $X[0]
973	 adc	%rax, $X[1]
974	 adc	\$0, %rdx
975
976	 mov	%rdx, $A
977	 mov	$X[0], (+$pDst_o+8*1)($pDst)
978	 mov	$X[1], (+$pDst_o+8*2)($pDst)
979
980	 mov	(+8*2)($pA), %rax
981	 mul	%rax
982
983	 add	$A, $X[2]
984	 adc	%rax, $X[3]
985	 adc	\$0, %rdx
986
987	 mov	%rdx, $A
988
989	 mov	$X[2], (+$pDst_o+8*3)($pDst)
990	 mov	$X[3], (+$pDst_o+8*4)($pDst)
991
992	 xor	$tmp, $tmp
993	 add	$A, $X[4]
994	 adc	$x6, $X[5]
995	 adc	\$0, $tmp
996
997	 mov	$X[4], (+$pDst_o+8*5)($pDst)
998	 mov	$X[5], (+$pDst_o+8*6)($pDst)
999
1000	# %%tmp has 0/1 in column 7
1001	# %%A6 has a full value in column 7
1002
1003	 mov	(+$pDst_o+8*7)($pDst), $X[0]
1004	 mov	(+$pDst_o+8*8)($pDst), $X[1]
1005	 mov	(+$pDst_o+8*9)($pDst), $X[2]
1006	 mov	(+$pDst_o+8*10)($pDst), $X[3]
1007	 mov	(+$pDst_o+8*11)($pDst), $X[4]
1008	 mov	(+$pDst_o+8*12)($pDst), $X[5]
1009	 mov	(+$pDst_o+8*13)($pDst), $x6
1010	 mov	(+$pDst_o+8*14)($pDst), $x7
1011
1012	 mov	$X[7], %rax
1013	 mul	%rax
1014	 mov	%rax, $X[7]
1015	 mov	%rdx, $A
1016
1017	 add	$X[0], $X[0]
1018	 adc	$X[1], $X[1]
1019	 adc	$X[2], $X[2]
1020	 adc	$X[3], $X[3]
1021	 adc	$X[4], $X[4]
1022	 adc	$X[5], $X[5]
1023	 adc	$x6, $x6
1024	 adc	$x7, $x7
1025	 adc	\$0, $A
1026
1027	 add	$tmp, $X[0]
1028
1029	 mov	(+8*4)($pA), %rax
1030	 mul	%rax
1031
1032	 add	$X[6], $X[0]
1033	 adc	%rax, $X[1]
1034	 adc	\$0, %rdx
1035
1036	 mov	%rdx, $tmp
1037
1038	 mov	$X[0], (+$pDst_o+8*7)($pDst)
1039	 mov	$X[1], (+$pDst_o+8*8)($pDst)
1040
1041	 mov	(+8*5)($pA), %rax
1042	 mul	%rax
1043
1044	 add	$tmp, $X[2]
1045	 adc	%rax, $X[3]
1046	 adc	\$0, %rdx
1047
1048	 mov	%rdx, $tmp
1049
1050	 mov	$X[2], (+$pDst_o+8*9)($pDst)
1051	 mov	$X[3], (+$pDst_o+8*10)($pDst)
1052
1053	 mov	(+8*6)($pA), %rax
1054	 mul	%rax
1055
1056	 add	$tmp, $X[4]
1057	 adc	%rax, $X[5]
1058	 adc	\$0, %rdx
1059
1060	 mov	$X[4], (+$pDst_o+8*11)($pDst)
1061	 mov	$X[5], (+$pDst_o+8*12)($pDst)
1062
1063	 add	%rdx, $x6
1064	 adc	$X[7], $x7
1065	 adc	\$0, $A
1066
1067	 mov	$x6, (+$pDst_o+8*13)($pDst)
1068	 mov	$x7, (+$pDst_o+8*14)($pDst)
1069	 mov	$A, (+$pDst_o+8*15)($pDst)
1070___
1071}
1072
1073#
1074# sqr_reduce: subroutine to compute Result = reduce(Result * Result)
1075#
1076# input and result also in: r9, r8, r15, r14, r13, r12, r11, r10
1077#
1078$code.=<<___;
1079.type	sqr_reduce,\@abi-omnipotent
1080.align	16
1081sqr_reduce:
1082	 mov	(+$pResult_offset+8)(%rsp), %rcx
1083___
1084	&SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi");
1085$code.=<<___;
1086	# tail recursion optimization: jmp to mont_reduce and return from there
1087	 jmp	mont_reduce
1088	# call	mont_reduce
1089	# ret
1090.size	sqr_reduce,.-sqr_reduce
1091___
1092}}}
1093
1094#
1095# MAIN FUNCTION
1096#
1097
1098#mod_exp_512(UINT64 *result, /* 512 bits, 8 qwords */
1099#           UINT64 *g,   /* 512 bits, 8 qwords */
1100#           UINT64 *exp, /* 512 bits, 8 qwords */
1101#           struct mod_ctx_512 *data)
1102
1103# window size = 5
1104# table size = 2^5 = 32
1105#table_entries	equ	32
1106#table_size	equ	table_entries * 8
1107$code.=<<___;
1108.globl	mod_exp_512
1109.type	mod_exp_512,\@function,4
1110mod_exp_512:
1111	 push	%rbp
1112	 push	%rbx
1113	 push	%r12
1114	 push	%r13
1115	 push	%r14
1116	 push	%r15
1117
1118	# adjust stack down and then align it with cache boundary
1119	 mov	%rsp, %r8
1120	 sub	\$$mem_size, %rsp
1121	 and	\$-64, %rsp
1122
1123	# store previous stack pointer and arguments
1124	 mov	%r8, (+$rsp_offset)(%rsp)
1125	 mov	%rdi, (+$pResult_offset)(%rsp)
1126	 mov	%rsi, (+$pG_offset)(%rsp)
1127	 mov	%rcx, (+$pData_offset)(%rsp)
1128.Lbody:
1129	# transform g into montgomery space
1130	# GT = reduce(g * C2) = reduce(g * (2^256))
1131	# reduce expects to have the input in [tmp16]
1132	 pxor	%xmm4, %xmm4
1133	 movdqu	(+16*0)(%rsi), %xmm0
1134	 movdqu	(+16*1)(%rsi), %xmm1
1135	 movdqu	(+16*2)(%rsi), %xmm2
1136	 movdqu	(+16*3)(%rsi), %xmm3
1137	 movdqa	%xmm4, (+$tmp16_offset+16*0)(%rsp)
1138	 movdqa	%xmm4, (+$tmp16_offset+16*1)(%rsp)
1139	 movdqa	%xmm4, (+$tmp16_offset+16*6)(%rsp)
1140	 movdqa	%xmm4, (+$tmp16_offset+16*7)(%rsp)
1141	 movdqa	%xmm0, (+$tmp16_offset+16*2)(%rsp)
1142	 movdqa	%xmm1, (+$tmp16_offset+16*3)(%rsp)
1143	 movdqa	%xmm2, (+$tmp16_offset+16*4)(%rsp)
1144	 movdqa	%xmm3, (+$tmp16_offset+16*5)(%rsp)
1145
1146	# load pExp before rdx gets blown away
1147	 movdqu	(+16*0)(%rdx), %xmm0
1148	 movdqu	(+16*1)(%rdx), %xmm1
1149	 movdqu	(+16*2)(%rdx), %xmm2
1150	 movdqu	(+16*3)(%rdx), %xmm3
1151
1152	 lea	(+$GT_offset)(%rsp), %rbx
1153	 mov	%rbx, (+$red_result_addr_offset)(%rsp)
1154	 call	mont_reduce
1155
1156	# Initialize tmp = C
1157	 lea	(+$tmp_offset)(%rsp), %rcx
1158	 xor	%rax, %rax
1159	 mov	%rax, (+8*0)(%rcx)
1160	 mov	%rax, (+8*1)(%rcx)
1161	 mov	%rax, (+8*3)(%rcx)
1162	 mov	%rax, (+8*4)(%rcx)
1163	 mov	%rax, (+8*5)(%rcx)
1164	 mov	%rax, (+8*6)(%rcx)
1165	 mov	%rax, (+8*7)(%rcx)
1166	 mov	%rax, (+$exp_offset+8*8)(%rsp)
1167	 movq	\$1, (+8*2)(%rcx)
1168
1169	 lea	(+$garray_offset)(%rsp), %rbp
1170	 mov	%rcx, %rsi			# pTmp
1171	 mov	%rbp, %rdi			# Garray[][0]
1172___
1173
1174	&swizzle("%rdi", "%rcx", "%rax", "%rbx");
1175
1176	# for (rax = 31; rax != 0; rax--) {
1177	#     tmp = reduce(tmp * G)
1178	#     swizzle(pg, tmp);
1179	#     pg += 2; }
1180$code.=<<___;
1181	 mov	\$31, %rax
1182	 mov	%rax, (+$i_offset)(%rsp)
1183	 mov	%rbp, (+$pg_offset)(%rsp)
1184	# rsi -> pTmp
1185	 mov	%rsi, (+$red_result_addr_offset)(%rsp)
1186	 mov	(+8*0)(%rsi), %r10
1187	 mov	(+8*1)(%rsi), %r11
1188	 mov	(+8*2)(%rsi), %r12
1189	 mov	(+8*3)(%rsi), %r13
1190	 mov	(+8*4)(%rsi), %r14
1191	 mov	(+8*5)(%rsi), %r15
1192	 mov	(+8*6)(%rsi), %r8
1193	 mov	(+8*7)(%rsi), %r9
1194init_loop:
1195	 lea	(+$GT_offset)(%rsp), %rdi
1196	 call	mont_mul_a3b
1197	 lea	(+$tmp_offset)(%rsp), %rsi
1198	 mov	(+$pg_offset)(%rsp), %rbp
1199	 add	\$2, %rbp
1200	 mov	%rbp, (+$pg_offset)(%rsp)
1201	 mov	%rsi, %rcx			# rcx = rsi = addr of tmp
1202___
1203
1204	&swizzle("%rbp", "%rcx", "%rax", "%rbx");
1205$code.=<<___;
1206	 mov	(+$i_offset)(%rsp), %rax
1207	 sub	\$1, %rax
1208	 mov	%rax, (+$i_offset)(%rsp)
1209	 jne	init_loop
1210
1211	#
1212	# Copy exponent onto stack
1213	 movdqa	%xmm0, (+$exp_offset+16*0)(%rsp)
1214	 movdqa	%xmm1, (+$exp_offset+16*1)(%rsp)
1215	 movdqa	%xmm2, (+$exp_offset+16*2)(%rsp)
1216	 movdqa	%xmm3, (+$exp_offset+16*3)(%rsp)
1217
1218
1219	#
1220	# Do exponentiation
1221	# Initialize result to G[exp{511:507}]
1222	 mov	(+$exp_offset+62)(%rsp), %eax
1223	 mov	%rax, %rdx
1224	 shr	\$11, %rax
1225	 and	\$0x07FF, %edx
1226	 mov	%edx, (+$exp_offset+62)(%rsp)
1227	 lea	(+$garray_offset)(%rsp,%rax,2), %rsi
1228	 mov	(+$pResult_offset)(%rsp), %rdx
1229___
1230
1231	&unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
1232
1233	#
1234	# Loop variables
1235	# rcx = [loop_idx] = index: 510-5 to 0 by 5
1236$code.=<<___;
1237	 movq	\$505, (+$loop_idx_offset)(%rsp)
1238
1239	 mov	(+$pResult_offset)(%rsp), %rcx
1240	 mov	%rcx, (+$red_result_addr_offset)(%rsp)
1241	 mov	(+8*0)(%rcx), %r10
1242	 mov	(+8*1)(%rcx), %r11
1243	 mov	(+8*2)(%rcx), %r12
1244	 mov	(+8*3)(%rcx), %r13
1245	 mov	(+8*4)(%rcx), %r14
1246	 mov	(+8*5)(%rcx), %r15
1247	 mov	(+8*6)(%rcx), %r8
1248	 mov	(+8*7)(%rcx), %r9
1249	 jmp	sqr_2
1250
1251main_loop_a3b:
1252	 call	sqr_reduce
1253	 call	sqr_reduce
1254	 call	sqr_reduce
1255sqr_2:
1256	 call	sqr_reduce
1257	 call	sqr_reduce
1258
1259	#
1260	# Do multiply, first look up proper value in Garray
1261	 mov	(+$loop_idx_offset)(%rsp), %rcx			# bit index
1262	 mov	%rcx, %rax
1263	 shr	\$4, %rax			# rax is word pointer
1264	 mov	(+$exp_offset)(%rsp,%rax,2), %edx
1265	 and	\$15, %rcx
1266	 shrq	%cl, %rdx
1267	 and	\$0x1F, %rdx
1268
1269	 lea	(+$garray_offset)(%rsp,%rdx,2), %rsi
1270	 lea	(+$tmp_offset)(%rsp), %rdx
1271	 mov	%rdx, %rdi
1272___
1273
1274	&unswizzle("%rdx", "%rsi", "%rbp", "%rbx", "%rax");
1275	# rdi = tmp = pG
1276
1277	#
1278	# Call mod_mul_a1(pDst,  pSrc1, pSrc2, pM, pData)
1279	#                 result result pG     M   Data
1280$code.=<<___;
1281	 mov	(+$pResult_offset)(%rsp), %rsi
1282	 call	mont_mul_a3b
1283
1284	#
1285	# finish loop
1286	 mov	(+$loop_idx_offset)(%rsp), %rcx
1287	 sub	\$5, %rcx
1288	 mov	%rcx, (+$loop_idx_offset)(%rsp)
1289	 jge	main_loop_a3b
1290
1291	#
1292
1293end_main_loop_a3b:
1294	# transform result out of Montgomery space
1295	# result = reduce(result)
1296	 mov	(+$pResult_offset)(%rsp), %rdx
1297	 pxor	%xmm4, %xmm4
1298	 movdqu	(+16*0)(%rdx), %xmm0
1299	 movdqu	(+16*1)(%rdx), %xmm1
1300	 movdqu	(+16*2)(%rdx), %xmm2
1301	 movdqu	(+16*3)(%rdx), %xmm3
1302	 movdqa	%xmm4, (+$tmp16_offset+16*4)(%rsp)
1303	 movdqa	%xmm4, (+$tmp16_offset+16*5)(%rsp)
1304	 movdqa	%xmm4, (+$tmp16_offset+16*6)(%rsp)
1305	 movdqa	%xmm4, (+$tmp16_offset+16*7)(%rsp)
1306	 movdqa	%xmm0, (+$tmp16_offset+16*0)(%rsp)
1307	 movdqa	%xmm1, (+$tmp16_offset+16*1)(%rsp)
1308	 movdqa	%xmm2, (+$tmp16_offset+16*2)(%rsp)
1309	 movdqa	%xmm3, (+$tmp16_offset+16*3)(%rsp)
1310	 call	mont_reduce
1311
1312	# If result > m, subract m
1313	# load result into r15:r8
1314	 mov	(+$pResult_offset)(%rsp), %rax
1315	 mov	(+8*0)(%rax), %r8
1316	 mov	(+8*1)(%rax), %r9
1317	 mov	(+8*2)(%rax), %r10
1318	 mov	(+8*3)(%rax), %r11
1319	 mov	(+8*4)(%rax), %r12
1320	 mov	(+8*5)(%rax), %r13
1321	 mov	(+8*6)(%rax), %r14
1322	 mov	(+8*7)(%rax), %r15
1323
1324	# subtract m
1325	 mov	(+$pData_offset)(%rsp), %rbx
1326	 add	\$$M, %rbx
1327
1328	 sub	(+8*0)(%rbx), %r8
1329	 sbb	(+8*1)(%rbx), %r9
1330	 sbb	(+8*2)(%rbx), %r10
1331	 sbb	(+8*3)(%rbx), %r11
1332	 sbb	(+8*4)(%rbx), %r12
1333	 sbb	(+8*5)(%rbx), %r13
1334	 sbb	(+8*6)(%rbx), %r14
1335	 sbb	(+8*7)(%rbx), %r15
1336
1337	# if Carry is clear, replace result with difference
1338	 mov	(+8*0)(%rax), %rsi
1339	 mov	(+8*1)(%rax), %rdi
1340	 mov	(+8*2)(%rax), %rcx
1341	 mov	(+8*3)(%rax), %rdx
1342	 cmovnc	%r8, %rsi
1343	 cmovnc	%r9, %rdi
1344	 cmovnc	%r10, %rcx
1345	 cmovnc	%r11, %rdx
1346	 mov	%rsi, (+8*0)(%rax)
1347	 mov	%rdi, (+8*1)(%rax)
1348	 mov	%rcx, (+8*2)(%rax)
1349	 mov	%rdx, (+8*3)(%rax)
1350
1351	 mov	(+8*4)(%rax), %rsi
1352	 mov	(+8*5)(%rax), %rdi
1353	 mov	(+8*6)(%rax), %rcx
1354	 mov	(+8*7)(%rax), %rdx
1355	 cmovnc	%r12, %rsi
1356	 cmovnc	%r13, %rdi
1357	 cmovnc	%r14, %rcx
1358	 cmovnc	%r15, %rdx
1359	 mov	%rsi, (+8*4)(%rax)
1360	 mov	%rdi, (+8*5)(%rax)
1361	 mov	%rcx, (+8*6)(%rax)
1362	 mov	%rdx, (+8*7)(%rax)
1363
1364	 mov	(+$rsp_offset)(%rsp), %rsi
1365	 mov	0(%rsi),%r15
1366	 mov	8(%rsi),%r14
1367	 mov	16(%rsi),%r13
1368	 mov	24(%rsi),%r12
1369	 mov	32(%rsi),%rbx
1370	 mov	40(%rsi),%rbp
1371	 lea	48(%rsi),%rsp
1372.Lepilogue:
1373	 ret
1374.size mod_exp_512, . - mod_exp_512
1375___
1376
1377if ($win64) {
1378# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1379#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
1380my $rec="%rcx";
1381my $frame="%rdx";
1382my $context="%r8";
1383my $disp="%r9";
1384
1385$code.=<<___;
1386.extern	__imp_RtlVirtualUnwind
1387.type	mod_exp_512_se_handler,\@abi-omnipotent
1388.align	16
1389mod_exp_512_se_handler:
1390	push	%rsi
1391	push	%rdi
1392	push	%rbx
1393	push	%rbp
1394	push	%r12
1395	push	%r13
1396	push	%r14
1397	push	%r15
1398	pushfq
1399	sub	\$64,%rsp
1400
1401	mov	120($context),%rax	# pull context->Rax
1402	mov	248($context),%rbx	# pull context->Rip
1403
1404	lea	.Lbody(%rip),%r10
1405	cmp	%r10,%rbx		# context->Rip<prologue label
1406	jb	.Lin_prologue
1407
1408	mov	152($context),%rax	# pull context->Rsp
1409
1410	lea	.Lepilogue(%rip),%r10
1411	cmp	%r10,%rbx		# context->Rip>=epilogue label
1412	jae	.Lin_prologue
1413
1414	mov	$rsp_offset(%rax),%rax	# pull saved Rsp
1415
1416	mov	32(%rax),%rbx
1417	mov	40(%rax),%rbp
1418	mov	24(%rax),%r12
1419	mov	16(%rax),%r13
1420	mov	8(%rax),%r14
1421	mov	0(%rax),%r15
1422	lea	48(%rax),%rax
1423	mov	%rbx,144($context)	# restore context->Rbx
1424	mov	%rbp,160($context)	# restore context->Rbp
1425	mov	%r12,216($context)	# restore context->R12
1426	mov	%r13,224($context)	# restore context->R13
1427	mov	%r14,232($context)	# restore context->R14
1428	mov	%r15,240($context)	# restore context->R15
1429
1430.Lin_prologue:
1431	mov	8(%rax),%rdi
1432	mov	16(%rax),%rsi
1433	mov	%rax,152($context)	# restore context->Rsp
1434	mov	%rsi,168($context)	# restore context->Rsi
1435	mov	%rdi,176($context)	# restore context->Rdi
1436
1437	mov	40($disp),%rdi		# disp->ContextRecord
1438	mov	$context,%rsi		# context
1439	mov	\$154,%ecx		# sizeof(CONTEXT)
1440	.long	0xa548f3fc		# cld; rep movsq
1441
1442	mov	$disp,%rsi
1443	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
1444	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
1445	mov	0(%rsi),%r8		# arg3, disp->ControlPc
1446	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
1447	mov	40(%rsi),%r10		# disp->ContextRecord
1448	lea	56(%rsi),%r11		# &disp->HandlerData
1449	lea	24(%rsi),%r12		# &disp->EstablisherFrame
1450	mov	%r10,32(%rsp)		# arg5
1451	mov	%r11,40(%rsp)		# arg6
1452	mov	%r12,48(%rsp)		# arg7
1453	mov	%rcx,56(%rsp)		# arg8, (NULL)
1454	call	*__imp_RtlVirtualUnwind(%rip)
1455
1456	mov	\$1,%eax		# ExceptionContinueSearch
1457	add	\$64,%rsp
1458	popfq
1459	pop	%r15
1460	pop	%r14
1461	pop	%r13
1462	pop	%r12
1463	pop	%rbp
1464	pop	%rbx
1465	pop	%rdi
1466	pop	%rsi
1467	ret
1468.size	mod_exp_512_se_handler,.-mod_exp_512_se_handler
1469
1470.section	.pdata
1471.align	4
1472	.rva	.LSEH_begin_mod_exp_512
1473	.rva	.LSEH_end_mod_exp_512
1474	.rva	.LSEH_info_mod_exp_512
1475
1476.section	.xdata
1477.align	8
1478.LSEH_info_mod_exp_512:
1479	.byte	9,0,0,0
1480	.rva	mod_exp_512_se_handler
1481___
1482}
1483
1484sub reg_part {
1485my ($reg,$conv)=@_;
1486    if ($reg =~ /%r[0-9]+/)	{ $reg .= $conv; }
1487    elsif ($conv eq "b")	{ $reg =~ s/%[er]([^x]+)x?/%$1l/;	}
1488    elsif ($conv eq "w")	{ $reg =~ s/%[er](.+)/%$1/;		}
1489    elsif ($conv eq "d")	{ $reg =~ s/%[er](.+)/%e$1/;		}
1490    return $reg;
1491}
1492
1493$code =~ s/(%[a-z0-9]+)#([bwd])/reg_part($1,$2)/gem;
1494$code =~ s/\`([^\`]*)\`/eval $1/gem;
1495$code =~ s/(\(\+[^)]+\))/eval $1/gem;
1496print $code;
1497close STDOUT;
1498