1/* Include file for internal GNU MP types and definitions.
2
3   THE CONTENTS OF THIS FILE ARE FOR INTERNAL USE AND ARE ALMOST CERTAIN TO
4   BE SUBJECT TO INCOMPATIBLE CHANGES IN FUTURE GNU MP RELEASES.
5
6Copyright 1991, 1993, 1994, 1995, 1996, 1997, 1999, 2000, 2001, 2002, 2003,
72004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation,
8Inc.
9
10This file is part of the GNU MP Library.
11
12The GNU MP Library is free software; you can redistribute it and/or modify
13it under the terms of the GNU Lesser General Public License as published by
14the Free Software Foundation; either version 3 of the License, or (at your
15option) any later version.
16
17The GNU MP Library is distributed in the hope that it will be useful, but
18WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
20License for more details.
21
22You should have received a copy of the GNU Lesser General Public License
23along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
24
25
26/* __GMP_DECLSPEC must be given on any global data that will be accessed
27   from outside libgmp, meaning from the test or development programs, or
28   from libgmpxx.  Failing to do this will result in an incorrect address
29   being used for the accesses.  On functions __GMP_DECLSPEC makes calls
30   from outside libgmp more efficient, but they'll still work fine without
31   it.  */
32
33
34#ifndef __GMP_IMPL_H__
35#define __GMP_IMPL_H__
36
37#if defined _CRAY
38#include <intrinsics.h>  /* for _popcnt */
39#endif
40
41/* limits.h is not used in general, since it's an ANSI-ism, and since on
42   solaris gcc 2.95 under -mcpu=ultrasparc in ABI=32 ends up getting wrong
43   values (the ABI=64 values).
44
45   On Cray vector systems, however, we need the system limits.h since sizes
46   of signed and unsigned types can differ there, depending on compiler
47   options (eg. -hnofastmd), making our SHRT_MAX etc expressions fail.  For
48   reference, int can be 46 or 64 bits, whereas uint is always 64 bits; and
49   short can be 24, 32, 46 or 64 bits, and different for ushort.  */
50
51#if defined _CRAY
52#include <limits.h>
53#endif
54
55/* For fat.h and other fat binary stuff.
56   No need for __GMP_ATTRIBUTE_PURE or __GMP_NOTHROW, since functions
57   declared this way are only used to set function pointers in __gmp_cpuvec,
58   they're not called directly.  */
59#define DECL_add_n(name) \
60  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t))
61#define DECL_addmul_1(name) \
62  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
63#define DECL_copyd(name) \
64  __GMP_DECLSPEC void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t))
65#define DECL_copyi(name) \
66  DECL_copyd (name)
67#define DECL_divexact_1(name) \
68  __GMP_DECLSPEC void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
69#define DECL_divexact_by3c(name) \
70  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t))
71#define DECL_divrem_1(name) \
72  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t))
73#define DECL_gcd_1(name) \
74  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t))
75#define DECL_lshift(name) \
76  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, unsigned))
77#define DECL_mod_1(name) \
78  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t))
79#define DECL_mod_34lsub1(name) \
80  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t))
81#define DECL_modexact_1c_odd(name) \
82  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t))
83#define DECL_mul_1(name) \
84  DECL_addmul_1 (name)
85#define DECL_mul_basecase(name) \
86  __GMP_DECLSPEC void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t))
87#define DECL_preinv_divrem_1(name) \
88  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int))
89#define DECL_preinv_mod_1(name) \
90  __GMP_DECLSPEC mp_limb_t name __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t))
91#define DECL_rshift(name) \
92  DECL_lshift (name)
93#define DECL_sqr_basecase(name) \
94  __GMP_DECLSPEC void name __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t))
95#define DECL_sub_n(name) \
96  DECL_add_n (name)
97#define DECL_submul_1(name) \
98  DECL_addmul_1 (name)
99
100#if ! __GMP_WITHIN_CONFIGURE
101#include "config.h"
102#include "gmp-mparam.h"
103#include "fib_table.h"
104#include "mp_bases.h"
105#if WANT_FAT_BINARY
106#include "fat.h"
107#endif
108#endif
109
110#if HAVE_INTTYPES_H      /* for uint_least32_t */
111# include <inttypes.h>
112#else
113# if HAVE_STDINT_H
114#  include <stdint.h>
115# endif
116#endif
117
118#ifdef __cplusplus
119#include <cstring>  /* for strlen */
120#include <string>   /* for std::string */
121#endif
122
123
124#ifndef WANT_TMP_DEBUG  /* for TMP_ALLOC_LIMBS_2 and others */
125#define WANT_TMP_DEBUG 0
126#endif
127
128/* The following tries to get a good version of alloca.  The tests are
129   adapted from autoconf AC_FUNC_ALLOCA, with a couple of additions.
130   Whether this succeeds is tested by GMP_FUNC_ALLOCA and HAVE_ALLOCA will
131   be setup appropriately.
132
133   ifndef alloca - a cpp define might already exist.
134       glibc <stdlib.h> includes <alloca.h> which uses GCC __builtin_alloca.
135       HP cc +Olibcalls adds a #define of alloca to __builtin_alloca.
136
137   GCC __builtin_alloca - preferred whenever available.
138
139   _AIX pragma - IBM compilers need a #pragma in "each module that needs to
140       use alloca".  Pragma indented to protect pre-ANSI cpp's.  _IBMR2 was
141       used in past versions of GMP, retained still in case it matters.
142
143       The autoconf manual says this pragma needs to be at the start of a C
144       file, apart from comments and preprocessor directives.  Is that true?
145       xlc on aix 4.xxx doesn't seem to mind it being after prototypes etc
146       from gmp.h.
147*/
148
149#ifndef alloca
150# ifdef __GNUC__
151#  define alloca __builtin_alloca
152# else
153#  ifdef __DECC
154#   define alloca(x) __ALLOCA(x)
155#  else
156#   ifdef _MSC_VER
157#    include <malloc.h>
158#    define alloca _alloca
159#   else
160#    if HAVE_ALLOCA_H
161#     include <alloca.h>
162#    else
163#     if defined (_AIX) || defined (_IBMR2)
164 #pragma alloca
165#     else
166       char *alloca ();
167#     endif
168#    endif
169#   endif
170#  endif
171# endif
172#endif
173
174
175/* if not provided by gmp-mparam.h */
176#ifndef BYTES_PER_MP_LIMB
177#define BYTES_PER_MP_LIMB  SIZEOF_MP_LIMB_T
178#endif
179#define GMP_LIMB_BYTES  BYTES_PER_MP_LIMB
180#ifndef GMP_LIMB_BITS
181#define GMP_LIMB_BITS  (8 * SIZEOF_MP_LIMB_T)
182#endif
183
184#define BITS_PER_ULONG  (8 * SIZEOF_UNSIGNED_LONG)
185
186
187/* gmp_uint_least32_t is an unsigned integer type with at least 32 bits. */
188#if HAVE_UINT_LEAST32_T
189typedef uint_least32_t      gmp_uint_least32_t;
190#else
191#if SIZEOF_UNSIGNED_SHORT >= 4
192typedef unsigned short      gmp_uint_least32_t;
193#else
194#if SIZEOF_UNSIGNED >= 4
195typedef unsigned            gmp_uint_least32_t;
196#else
197typedef unsigned long       gmp_uint_least32_t;
198#endif
199#endif
200#endif
201
202
203/* gmp_intptr_t, for pointer to integer casts */
204#if HAVE_INTPTR_T
205typedef intptr_t            gmp_intptr_t;
206#else /* fallback */
207typedef size_t              gmp_intptr_t;
208#endif
209
210
211/* pre-inverse types for truncating division and modulo */
212typedef struct {mp_limb_t inv32;} gmp_pi1_t;
213typedef struct {mp_limb_t inv21, inv32, inv53;} gmp_pi2_t;
214
215
216/* const and signed must match __gmp_const and __gmp_signed, so follow the
217   decision made for those in gmp.h.    */
218#if ! __GMP_HAVE_CONST
219#define const   /* empty */
220#define signed  /* empty */
221#endif
222
223/* "const" basically means a function does nothing but examine its arguments
224   and give a return value, it doesn't read or write any memory (neither
225   global nor pointed to by arguments), and has no other side-effects.  This
226   is more restrictive than "pure".  See info node "(gcc)Function
227   Attributes".  __GMP_NO_ATTRIBUTE_CONST_PURE lets tune/common.c etc turn
228   this off when trying to write timing loops.  */
229#if HAVE_ATTRIBUTE_CONST && ! defined (__GMP_NO_ATTRIBUTE_CONST_PURE)
230#define ATTRIBUTE_CONST  __attribute__ ((const))
231#else
232#define ATTRIBUTE_CONST
233#endif
234
235#if HAVE_ATTRIBUTE_NORETURN
236#define ATTRIBUTE_NORETURN  __attribute__ ((noreturn))
237#else
238#define ATTRIBUTE_NORETURN
239#endif
240
241/* "malloc" means a function behaves like malloc in that the pointer it
242   returns doesn't alias anything.  */
243#if HAVE_ATTRIBUTE_MALLOC
244#define ATTRIBUTE_MALLOC  __attribute__ ((malloc))
245#else
246#define ATTRIBUTE_MALLOC
247#endif
248
249
250#if ! HAVE_STRCHR
251#define strchr(s,c)  index(s,c)
252#endif
253
254#if ! HAVE_MEMSET
255#define memset(p, c, n)                 \
256  do {                                  \
257    ASSERT ((n) >= 0);                  \
258    char *__memset__p = (p);            \
259    int  __i;                           \
260    for (__i = 0; __i < (n); __i++)     \
261      __memset__p[__i] = (c);           \
262  } while (0)
263#endif
264
265/* va_copy is standard in C99, and gcc provides __va_copy when in strict C89
266   mode.  Falling back to a memcpy will give maximum portability, since it
267   works no matter whether va_list is a pointer, struct or array.  */
268#if ! defined (va_copy) && defined (__va_copy)
269#define va_copy(dst,src)  __va_copy(dst,src)
270#endif
271#if ! defined (va_copy)
272#define va_copy(dst,src) \
273  do { memcpy (&(dst), &(src), sizeof (va_list)); } while (0)
274#endif
275
276
277/* HAVE_HOST_CPU_alpha_CIX is 1 on an alpha with the CIX instructions
278   (ie. ctlz, ctpop, cttz).  */
279#if HAVE_HOST_CPU_alphaev67 || HAVE_HOST_CPU_alphaev68  \
280  || HAVE_HOST_CPU_alphaev7
281#define HAVE_HOST_CPU_alpha_CIX 1
282#endif
283
284
285#if defined (__cplusplus)
286extern "C" {
287#endif
288
289
290/* Usage: TMP_DECL;
291          TMP_MARK;
292          ptr = TMP_ALLOC (bytes);
293          TMP_FREE;
294
295   Small allocations should use TMP_SALLOC, big allocations should use
296   TMP_BALLOC.  Allocations that might be small or big should use TMP_ALLOC.
297
298   Functions that use just TMP_SALLOC should use TMP_SDECL, TMP_SMARK, and
299   TMP_SFREE.
300
301   TMP_DECL just declares a variable, but might be empty and so must be last
302   in a list of variables.  TMP_MARK must be done before any TMP_ALLOC.
303   TMP_ALLOC(0) is not allowed.  TMP_FREE doesn't need to be done if a
304   TMP_MARK was made, but then no TMP_ALLOCs.  */
305
306/* The alignment in bytes, used for TMP_ALLOCed blocks, when alloca or
307   __gmp_allocate_func doesn't already determine it.  Currently TMP_ALLOC
308   isn't used for "double"s, so that's not in the union.  */
309union tmp_align_t {
310  mp_limb_t  l;
311  char       *p;
312};
313#define __TMP_ALIGN  sizeof (union tmp_align_t)
314
315/* Return "a" rounded upwards to a multiple of "m", if it isn't already.
316   "a" must be an unsigned type.
317   This is designed for use with a compile-time constant "m".
318   The POW2 case is expected to be usual, and gcc 3.0 and up recognises
319   "(-(8*n))%8" or the like is always zero, which means the rounding up in
320   the WANT_TMP_NOTREENTRANT version of TMP_ALLOC below will be a noop.  */
321#define ROUND_UP_MULTIPLE(a,m)          \
322  (POW2_P(m) ? (a) + (-(a))%(m)         \
323   : (a)+(m)-1 - (((a)+(m)-1) % (m)))
324
325#if defined (WANT_TMP_ALLOCA) || defined (WANT_TMP_REENTRANT)
326struct tmp_reentrant_t {
327  struct tmp_reentrant_t  *next;
328  size_t		  size;	  /* bytes, including header */
329};
330__GMP_DECLSPEC void *__gmp_tmp_reentrant_alloc __GMP_PROTO ((struct tmp_reentrant_t **, size_t)) ATTRIBUTE_MALLOC;
331__GMP_DECLSPEC void  __gmp_tmp_reentrant_free __GMP_PROTO ((struct tmp_reentrant_t *));
332#endif
333
334#if WANT_TMP_ALLOCA
335#define TMP_SDECL
336#define TMP_DECL		struct tmp_reentrant_t *__tmp_marker
337#define TMP_SMARK
338#define TMP_MARK		__tmp_marker = 0
339#define TMP_SALLOC(n)		alloca(n)
340#define TMP_BALLOC(n)		__gmp_tmp_reentrant_alloc (&__tmp_marker, n)
341#define TMP_ALLOC(n)							\
342  (LIKELY ((n) < 65536) ? TMP_SALLOC(n) : TMP_BALLOC(n))
343#define TMP_SFREE
344#define TMP_FREE							   \
345  do {									   \
346    if (UNLIKELY (__tmp_marker != 0)) __gmp_tmp_reentrant_free (__tmp_marker); \
347  } while (0)
348#endif
349
350#if WANT_TMP_REENTRANT
351#define TMP_SDECL		TMP_DECL
352#define TMP_DECL		struct tmp_reentrant_t *__tmp_marker
353#define TMP_SMARK		TMP_MARK
354#define TMP_MARK		__tmp_marker = 0
355#define TMP_SALLOC(n)		TMP_ALLOC(n)
356#define TMP_BALLOC(n)		TMP_ALLOC(n)
357#define TMP_ALLOC(n)		__gmp_tmp_reentrant_alloc (&__tmp_marker, n)
358#define TMP_SFREE		TMP_FREE
359#define TMP_FREE		__gmp_tmp_reentrant_free (__tmp_marker)
360#endif
361
362#if WANT_TMP_NOTREENTRANT
363struct tmp_marker
364{
365  struct tmp_stack *which_chunk;
366  void *alloc_point;
367};
368__GMP_DECLSPEC void *__gmp_tmp_alloc __GMP_PROTO ((unsigned long)) ATTRIBUTE_MALLOC;
369__GMP_DECLSPEC void __gmp_tmp_mark __GMP_PROTO ((struct tmp_marker *));
370__GMP_DECLSPEC void __gmp_tmp_free __GMP_PROTO ((struct tmp_marker *));
371#define TMP_SDECL		TMP_DECL
372#define TMP_DECL		struct tmp_marker __tmp_marker
373#define TMP_SMARK		TMP_MARK
374#define TMP_MARK		__gmp_tmp_mark (&__tmp_marker)
375#define TMP_SALLOC(n)		TMP_ALLOC(n)
376#define TMP_BALLOC(n)		TMP_ALLOC(n)
377#define TMP_ALLOC(n)							\
378  __gmp_tmp_alloc (ROUND_UP_MULTIPLE ((unsigned long) (n), __TMP_ALIGN))
379#define TMP_SFREE		TMP_FREE
380#define TMP_FREE		__gmp_tmp_free (&__tmp_marker)
381#endif
382
383#if WANT_TMP_DEBUG
384/* See tal-debug.c for some comments. */
385struct tmp_debug_t {
386  struct tmp_debug_entry_t  *list;
387  const char                *file;
388  int                       line;
389};
390struct tmp_debug_entry_t {
391  struct tmp_debug_entry_t  *next;
392  char                      *block;
393  size_t                    size;
394};
395__GMP_DECLSPEC void  __gmp_tmp_debug_mark  __GMP_PROTO ((const char *, int, struct tmp_debug_t **,
396							 struct tmp_debug_t *,
397							 const char *, const char *));
398__GMP_DECLSPEC void *__gmp_tmp_debug_alloc __GMP_PROTO ((const char *, int, int,
399							 struct tmp_debug_t **, const char *,
400							 size_t)) ATTRIBUTE_MALLOC;
401__GMP_DECLSPEC void  __gmp_tmp_debug_free  __GMP_PROTO ((const char *, int, int,
402							 struct tmp_debug_t **,
403							 const char *, const char *));
404#define TMP_SDECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
405#define TMP_DECL TMP_DECL_NAME(__tmp_xmarker, "__tmp_marker")
406#define TMP_SMARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
407#define TMP_MARK TMP_MARK_NAME(__tmp_xmarker, "__tmp_marker")
408#define TMP_SFREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
409#define TMP_FREE TMP_FREE_NAME(__tmp_xmarker, "__tmp_marker")
410/* The marker variable is designed to provoke an uninitialized variable
411   warning from the compiler if TMP_FREE is used without a TMP_MARK.
412   __tmp_marker_inscope does the same for TMP_ALLOC.  Runtime tests pick
413   these things up too.  */
414#define TMP_DECL_NAME(marker, marker_name)                      \
415  int marker;                                                   \
416  int __tmp_marker_inscope;                                     \
417  const char *__tmp_marker_name = marker_name;                  \
418  struct tmp_debug_t  __tmp_marker_struct;                      \
419  /* don't demand NULL, just cast a zero */                     \
420  struct tmp_debug_t  *__tmp_marker = (struct tmp_debug_t *) 0
421#define TMP_MARK_NAME(marker, marker_name)                      \
422  do {                                                          \
423    marker = 1;                                                 \
424    __tmp_marker_inscope = 1;                                   \
425    __gmp_tmp_debug_mark  (ASSERT_FILE, ASSERT_LINE,            \
426                           &__tmp_marker, &__tmp_marker_struct, \
427                           __tmp_marker_name, marker_name);     \
428  } while (0)
429#define TMP_SALLOC(n)		TMP_ALLOC(n)
430#define TMP_BALLOC(n)		TMP_ALLOC(n)
431#define TMP_ALLOC(size)                                                 \
432  __gmp_tmp_debug_alloc (ASSERT_FILE, ASSERT_LINE,                      \
433                         __tmp_marker_inscope,                          \
434                         &__tmp_marker, __tmp_marker_name, size)
435#define TMP_FREE_NAME(marker, marker_name)                      \
436  do {                                                          \
437    __gmp_tmp_debug_free  (ASSERT_FILE, ASSERT_LINE,            \
438                           marker, &__tmp_marker,               \
439                           __tmp_marker_name, marker_name);     \
440  } while (0)
441#endif /* WANT_TMP_DEBUG */
442
443
444/* Allocating various types. */
445#define TMP_ALLOC_TYPE(n,type)  ((type *) TMP_ALLOC ((n) * sizeof (type)))
446#define TMP_SALLOC_TYPE(n,type) ((type *) TMP_SALLOC ((n) * sizeof (type)))
447#define TMP_BALLOC_TYPE(n,type) ((type *) TMP_BALLOC ((n) * sizeof (type)))
448#define TMP_ALLOC_LIMBS(n)      TMP_ALLOC_TYPE(n,mp_limb_t)
449#define TMP_SALLOC_LIMBS(n)     TMP_SALLOC_TYPE(n,mp_limb_t)
450#define TMP_BALLOC_LIMBS(n)     TMP_BALLOC_TYPE(n,mp_limb_t)
451#define TMP_ALLOC_MP_PTRS(n)    TMP_ALLOC_TYPE(n,mp_ptr)
452#define TMP_SALLOC_MP_PTRS(n)   TMP_SALLOC_TYPE(n,mp_ptr)
453#define TMP_BALLOC_MP_PTRS(n)   TMP_BALLOC_TYPE(n,mp_ptr)
454
455/* It's more efficient to allocate one block than two.  This is certainly
456   true of the malloc methods, but it can even be true of alloca if that
457   involves copying a chunk of stack (various RISCs), or a call to a stack
458   bounds check (mingw).  In any case, when debugging keep separate blocks
459   so a redzoning malloc debugger can protect each individually.  */
460#define TMP_ALLOC_LIMBS_2(xp,xsize, yp,ysize)           \
461  do {                                                  \
462    if (WANT_TMP_DEBUG)                                 \
463      {                                                 \
464        (xp) = TMP_ALLOC_LIMBS (xsize);                 \
465        (yp) = TMP_ALLOC_LIMBS (ysize);                 \
466      }                                                 \
467    else                                                \
468      {                                                 \
469        (xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize));     \
470        (yp) = (xp) + (xsize);                          \
471      }                                                 \
472  } while (0)
473
474
475/* From gmp.h, nicer names for internal use. */
476#define CRAY_Pragma(str)               __GMP_CRAY_Pragma(str)
477#define MPN_CMP(result, xp, yp, size)  __GMPN_CMP(result, xp, yp, size)
478#define LIKELY(cond)                   __GMP_LIKELY(cond)
479#define UNLIKELY(cond)                 __GMP_UNLIKELY(cond)
480
481#define ABS(x) ((x) >= 0 ? (x) : -(x))
482#define ABS_CAST(T,x) ((x) >= 0 ? (T)(x) : -((T)((x) + 1) - 1))
483#undef MIN
484#define MIN(l,o) ((l) < (o) ? (l) : (o))
485#undef MAX
486#define MAX(h,i) ((h) > (i) ? (h) : (i))
487#define numberof(x)  (sizeof (x) / sizeof ((x)[0]))
488
489/* Field access macros.  */
490#define SIZ(x) ((x)->_mp_size)
491#define ABSIZ(x) ABS (SIZ (x))
492#define PTR(x) ((x)->_mp_d)
493#define LIMBS(x) ((x)->_mp_d)
494#define EXP(x) ((x)->_mp_exp)
495#define PREC(x) ((x)->_mp_prec)
496#define ALLOC(x) ((x)->_mp_alloc)
497
498/* n-1 inverts any low zeros and the lowest one bit.  If n&(n-1) leaves zero
499   then that lowest one bit must have been the only bit set.  n==0 will
500   return true though, so avoid that.  */
501#define POW2_P(n)  (((n) & ((n) - 1)) == 0)
502
503
504/* The "short" defines are a bit different because shorts are promoted to
505   ints by ~ or >> etc.
506
507   #ifndef's are used since on some systems (HP?) header files other than
508   limits.h setup these defines.  We could forcibly #undef in that case, but
509   there seems no need to worry about that.  */
510
511#ifndef ULONG_MAX
512#define ULONG_MAX   __GMP_ULONG_MAX
513#endif
514#ifndef UINT_MAX
515#define UINT_MAX    __GMP_UINT_MAX
516#endif
517#ifndef USHRT_MAX
518#define USHRT_MAX   __GMP_USHRT_MAX
519#endif
520#define MP_LIMB_T_MAX      (~ (mp_limb_t) 0)
521
522/* Must cast ULONG_MAX etc to unsigned long etc, since they might not be
523   unsigned on a K&R compiler.  In particular the HP-UX 10 bundled K&R cc
524   treats the plain decimal values in <limits.h> as signed.  */
525#define ULONG_HIGHBIT      (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
526#define UINT_HIGHBIT       (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
527#define USHRT_HIGHBIT      ((unsigned short) (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1)))
528#define GMP_LIMB_HIGHBIT  (MP_LIMB_T_MAX ^ (MP_LIMB_T_MAX >> 1))
529
530#ifndef LONG_MIN
531#define LONG_MIN           ((long) ULONG_HIGHBIT)
532#endif
533#ifndef LONG_MAX
534#define LONG_MAX           (-(LONG_MIN+1))
535#endif
536
537#ifndef INT_MIN
538#define INT_MIN            ((int) UINT_HIGHBIT)
539#endif
540#ifndef INT_MAX
541#define INT_MAX            (-(INT_MIN+1))
542#endif
543
544#ifndef SHRT_MIN
545#define SHRT_MIN           ((short) USHRT_HIGHBIT)
546#endif
547#ifndef SHRT_MAX
548#define SHRT_MAX           ((short) (-(SHRT_MIN+1)))
549#endif
550
551#if __GMP_MP_SIZE_T_INT
552#define MP_SIZE_T_MAX      INT_MAX
553#define MP_SIZE_T_MIN      INT_MIN
554#else
555#define MP_SIZE_T_MAX      LONG_MAX
556#define MP_SIZE_T_MIN      LONG_MIN
557#endif
558
559/* mp_exp_t is the same as mp_size_t */
560#define MP_EXP_T_MAX   MP_SIZE_T_MAX
561#define MP_EXP_T_MIN   MP_SIZE_T_MIN
562
563#define LONG_HIGHBIT       LONG_MIN
564#define INT_HIGHBIT        INT_MIN
565#define SHRT_HIGHBIT       SHRT_MIN
566
567
568#define GMP_NUMB_HIGHBIT  (CNST_LIMB(1) << (GMP_NUMB_BITS-1))
569
570#if GMP_NAIL_BITS == 0
571#define GMP_NAIL_LOWBIT   CNST_LIMB(0)
572#else
573#define GMP_NAIL_LOWBIT   (CNST_LIMB(1) << GMP_NUMB_BITS)
574#endif
575
576#if GMP_NAIL_BITS != 0
577/* Set various *_THRESHOLD values to be used for nails.  Thus we avoid using
578   code that has not yet been qualified.  */
579
580#undef  DC_DIV_QR_THRESHOLD
581#define DC_DIV_QR_THRESHOLD              50
582
583#undef DIVREM_1_NORM_THRESHOLD
584#undef DIVREM_1_UNNORM_THRESHOLD
585#undef MOD_1_NORM_THRESHOLD
586#undef MOD_1_UNNORM_THRESHOLD
587#undef USE_PREINV_DIVREM_1
588#undef DIVREM_2_THRESHOLD
589#undef DIVEXACT_1_THRESHOLD
590#define DIVREM_1_NORM_THRESHOLD           MP_SIZE_T_MAX  /* no preinv */
591#define DIVREM_1_UNNORM_THRESHOLD         MP_SIZE_T_MAX  /* no preinv */
592#define MOD_1_NORM_THRESHOLD              MP_SIZE_T_MAX  /* no preinv */
593#define MOD_1_UNNORM_THRESHOLD            MP_SIZE_T_MAX  /* no preinv */
594#define USE_PREINV_DIVREM_1               0  /* no preinv */
595#define DIVREM_2_THRESHOLD                MP_SIZE_T_MAX  /* no preinv */
596
597/* mpn/generic/mul_fft.c is not nails-capable. */
598#undef  MUL_FFT_THRESHOLD
599#undef  SQR_FFT_THRESHOLD
600#define MUL_FFT_THRESHOLD                MP_SIZE_T_MAX
601#define SQR_FFT_THRESHOLD                MP_SIZE_T_MAX
602#endif
603
604/* Swap macros. */
605
606#define MP_LIMB_T_SWAP(x, y)                    \
607  do {                                          \
608    mp_limb_t __mp_limb_t_swap__tmp = (x);      \
609    (x) = (y);                                  \
610    (y) = __mp_limb_t_swap__tmp;                \
611  } while (0)
612#define MP_SIZE_T_SWAP(x, y)                    \
613  do {                                          \
614    mp_size_t __mp_size_t_swap__tmp = (x);      \
615    (x) = (y);                                  \
616    (y) = __mp_size_t_swap__tmp;                \
617  } while (0)
618
619#define MP_PTR_SWAP(x, y)               \
620  do {                                  \
621    mp_ptr __mp_ptr_swap__tmp = (x);    \
622    (x) = (y);                          \
623    (y) = __mp_ptr_swap__tmp;           \
624  } while (0)
625#define MP_SRCPTR_SWAP(x, y)                    \
626  do {                                          \
627    mp_srcptr __mp_srcptr_swap__tmp = (x);      \
628    (x) = (y);                                  \
629    (y) = __mp_srcptr_swap__tmp;                \
630  } while (0)
631
632#define MPN_PTR_SWAP(xp,xs, yp,ys)      \
633  do {                                  \
634    MP_PTR_SWAP (xp, yp);               \
635    MP_SIZE_T_SWAP (xs, ys);            \
636  } while(0)
637#define MPN_SRCPTR_SWAP(xp,xs, yp,ys)   \
638  do {                                  \
639    MP_SRCPTR_SWAP (xp, yp);            \
640    MP_SIZE_T_SWAP (xs, ys);            \
641  } while(0)
642
643#define MPZ_PTR_SWAP(x, y)              \
644  do {                                  \
645    mpz_ptr __mpz_ptr_swap__tmp = (x);  \
646    (x) = (y);                          \
647    (y) = __mpz_ptr_swap__tmp;          \
648  } while (0)
649#define MPZ_SRCPTR_SWAP(x, y)                   \
650  do {                                          \
651    mpz_srcptr __mpz_srcptr_swap__tmp = (x);    \
652    (x) = (y);                                  \
653    (y) = __mpz_srcptr_swap__tmp;               \
654  } while (0)
655
656
657/* Enhancement: __gmp_allocate_func could have "__attribute__ ((malloc))",
658   but current gcc (3.0) doesn't seem to support that.  */
659__GMP_DECLSPEC extern void * (*__gmp_allocate_func) __GMP_PROTO ((size_t));
660__GMP_DECLSPEC extern void * (*__gmp_reallocate_func) __GMP_PROTO ((void *, size_t, size_t));
661__GMP_DECLSPEC extern void   (*__gmp_free_func) __GMP_PROTO ((void *, size_t));
662
663__GMP_DECLSPEC void *__gmp_default_allocate __GMP_PROTO ((size_t));
664__GMP_DECLSPEC void *__gmp_default_reallocate __GMP_PROTO ((void *, size_t, size_t));
665__GMP_DECLSPEC void __gmp_default_free __GMP_PROTO ((void *, size_t));
666
667#define __GMP_ALLOCATE_FUNC_TYPE(n,type) \
668  ((type *) (*__gmp_allocate_func) ((n) * sizeof (type)))
669#define __GMP_ALLOCATE_FUNC_LIMBS(n)   __GMP_ALLOCATE_FUNC_TYPE (n, mp_limb_t)
670
671#define __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, type) \
672  ((type *) (*__gmp_reallocate_func)                            \
673   (p, (old_size) * sizeof (type), (new_size) * sizeof (type)))
674#define __GMP_REALLOCATE_FUNC_LIMBS(p, old_size, new_size) \
675  __GMP_REALLOCATE_FUNC_TYPE(p, old_size, new_size, mp_limb_t)
676
677#define __GMP_FREE_FUNC_TYPE(p,n,type) (*__gmp_free_func) (p, (n) * sizeof (type))
678#define __GMP_FREE_FUNC_LIMBS(p,n)     __GMP_FREE_FUNC_TYPE (p, n, mp_limb_t)
679
680#define __GMP_REALLOCATE_FUNC_MAYBE(ptr, oldsize, newsize)      \
681  do {                                                          \
682    if ((oldsize) != (newsize))                                 \
683      (ptr) = (*__gmp_reallocate_func) (ptr, oldsize, newsize); \
684  } while (0)
685
686#define __GMP_REALLOCATE_FUNC_MAYBE_TYPE(ptr, oldsize, newsize, type)   \
687  do {                                                                  \
688    if ((oldsize) != (newsize))                                         \
689      (ptr) = (type *) (*__gmp_reallocate_func)                         \
690        (ptr, (oldsize) * sizeof (type), (newsize) * sizeof (type));    \
691  } while (0)
692
693
694/* Dummy for non-gcc, code involving it will go dead. */
695#if ! defined (__GNUC__) || __GNUC__ < 2
696#define __builtin_constant_p(x)   0
697#endif
698
699
700/* In gcc 2.96 and up on i386, tail calls are optimized to jumps if the
701   stack usage is compatible.  __attribute__ ((regparm (N))) helps by
702   putting leading parameters in registers, avoiding extra stack.
703
704   regparm cannot be used with calls going through the PLT, because the
705   binding code there may clobber the registers (%eax, %edx, %ecx) used for
706   the regparm parameters.  Calls to local (ie. static) functions could
707   still use this, if we cared to differentiate locals and globals.
708
709   On athlon-unknown-freebsd4.9 with gcc 3.3.3, regparm cannot be used with
710   -p or -pg profiling, since that version of gcc doesn't realize the
711   .mcount calls will clobber the parameter registers.  Other systems are
712   ok, like debian with glibc 2.3.2 (mcount doesn't clobber), but we don't
713   bother to try to detect this.  regparm is only an optimization so we just
714   disable it when profiling (profiling being a slowdown anyway).  */
715
716#if HAVE_HOST_CPU_FAMILY_x86 && __GMP_GNUC_PREREQ (2,96) && ! defined (PIC) \
717  && ! WANT_PROFILING_PROF && ! WANT_PROFILING_GPROF
718#define USE_LEADING_REGPARM 1
719#else
720#define USE_LEADING_REGPARM 0
721#endif
722
723/* Macros for altering parameter order according to regparm usage. */
724#if USE_LEADING_REGPARM
725#define REGPARM_2_1(a,b,x)    x,a,b
726#define REGPARM_3_1(a,b,c,x)  x,a,b,c
727#define REGPARM_ATTR(n) __attribute__ ((regparm (n)))
728#else
729#define REGPARM_2_1(a,b,x)    a,b,x
730#define REGPARM_3_1(a,b,c,x)  a,b,c,x
731#define REGPARM_ATTR(n)
732#endif
733
734
735/* ASM_L gives a local label for a gcc asm block, for use when temporary
736   local labels like "1:" might not be available, which is the case for
737   instance on the x86s (the SCO assembler doesn't support them).
738
739   The label generated is made unique by including "%=" which is a unique
740   number for each insn.  This ensures the same name can be used in multiple
741   asm blocks, perhaps via a macro.  Since jumps between asm blocks are not
742   allowed there's no need for a label to be usable outside a single
743   block.  */
744
745#define ASM_L(name)  LSYM_PREFIX "asm_%=_" #name
746
747
748#if defined (__GNUC__) && HAVE_HOST_CPU_FAMILY_x86
749#if 0
750/* FIXME: Check that these actually improve things.
751   FIXME: Need a cld after each std.
752   FIXME: Can't have inputs in clobbered registers, must describe them as
753   dummy outputs, and add volatile. */
754#define MPN_COPY_INCR(DST, SRC, N)					\
755  __asm__ ("cld\n\trep\n\tmovsl" : :					\
756	   "D" (DST), "S" (SRC), "c" (N) :				\
757	   "cx", "di", "si", "memory")
758#define MPN_COPY_DECR(DST, SRC, N)					\
759  __asm__ ("std\n\trep\n\tmovsl" : :					\
760	   "D" ((DST) + (N) - 1), "S" ((SRC) + (N) - 1), "c" (N) :	\
761	   "cx", "di", "si", "memory")
762#endif
763#endif
764
765
766__GMP_DECLSPEC void __gmpz_aorsmul_1 __GMP_PROTO ((REGPARM_3_1 (mpz_ptr, mpz_srcptr, mp_limb_t, mp_size_t))) REGPARM_ATTR(1);
767#define mpz_aorsmul_1(w,u,v,sub)  __gmpz_aorsmul_1 (REGPARM_3_1 (w, u, v, sub))
768
769#define mpz_n_pow_ui __gmpz_n_pow_ui
770__GMP_DECLSPEC void    mpz_n_pow_ui __GMP_PROTO ((mpz_ptr, mp_srcptr, mp_size_t, unsigned long));
771
772
773#define mpn_addmul_1c __MPN(addmul_1c)
774__GMP_DECLSPEC mp_limb_t mpn_addmul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
775
776#define mpn_addmul_2 __MPN(addmul_2)
777__GMP_DECLSPEC mp_limb_t mpn_addmul_2 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
778
779#define mpn_addmul_3 __MPN(addmul_3)
780__GMP_DECLSPEC mp_limb_t mpn_addmul_3 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
781
782#define mpn_addmul_4 __MPN(addmul_4)
783__GMP_DECLSPEC mp_limb_t mpn_addmul_4 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
784
785#define mpn_addmul_5 __MPN(addmul_5)
786__GMP_DECLSPEC mp_limb_t mpn_addmul_5 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
787
788#define mpn_addmul_6 __MPN(addmul_6)
789__GMP_DECLSPEC mp_limb_t mpn_addmul_6 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
790
791#define mpn_addmul_7 __MPN(addmul_7)
792__GMP_DECLSPEC mp_limb_t mpn_addmul_7 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
793
794#define mpn_addmul_8 __MPN(addmul_8)
795__GMP_DECLSPEC mp_limb_t mpn_addmul_8 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
796
797/* mpn_addlsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+2*{b,n}, and
798   returns the carry out (0, 1 or 2).  */
799#define mpn_addlsh1_n __MPN(addlsh1_n)
800__GMP_DECLSPEC mp_limb_t mpn_addlsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
801
802/* mpn_addlsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}+4*{b,n}, and
803   returns the carry out (0, ..., 4).  */
804#define mpn_addlsh2_n __MPN(addlsh2_n)
805__GMP_DECLSPEC mp_limb_t mpn_addlsh2_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
806
807/* mpn_addlsh_n(c,a,b,n,k), when it exists, sets {c,n} to {a,n}+2^k*{b,n}, and
808   returns the carry out (0, ..., 2^k).  */
809#define mpn_addlsh_n __MPN(addlsh_n)
810  __GMP_DECLSPEC mp_limb_t mpn_addlsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int));
811
812/* mpn_sublsh1_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-2*{b,n}, and
813   returns the borrow out (0, 1 or 2).  */
814#define mpn_sublsh1_n __MPN(sublsh1_n)
815__GMP_DECLSPEC mp_limb_t mpn_sublsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
816
817/* mpn_rsblsh1_n(c,a,b,n), when it exists, sets {c,n} to 2*{b,n}-{a,n}, and
818   returns the carry out (-1, 0, 1).  */
819#define mpn_rsblsh1_n __MPN(rsblsh1_n)
820__GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh1_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
821
822/* mpn_sublsh2_n(c,a,b,n), when it exists, sets {c,n} to {a,n}-4*{b,n}, and
823   returns the borrow out (FIXME 0, 1, 2 or 3).  */
824#define mpn_sublsh2_n __MPN(sublsh2_n)
825__GMP_DECLSPEC mp_limb_t mpn_sublsh2_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
826
827/* mpn_rsblsh2_n(c,a,b,n), when it exists, sets {c,n} to 4*{b,n}-{a,n}, and
828   returns the carry out (-1, ..., 3).  */
829#define mpn_rsblsh2_n __MPN(rsblsh2_n)
830__GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh2_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
831
832/* mpn_rsblsh_n(c,a,b,n,k), when it exists, sets {c,n} to 2^k*{b,n}-{a,n}, and
833   returns the carry out (-1, 0, ..., 2^k-1).  */
834#define mpn_rsblsh_n __MPN(rsblsh_n)
835__GMP_DECLSPEC mp_limb_signed_t mpn_rsblsh_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, unsigned int));
836
837/* mpn_rsh1add_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} + {b,n}) >> 1,
838   and returns the bit rshifted out (0 or 1).  */
839#define mpn_rsh1add_n __MPN(rsh1add_n)
840__GMP_DECLSPEC mp_limb_t mpn_rsh1add_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
841#define mpn_rsh1add_nc __MPN(rsh1add_nc)
842__GMP_DECLSPEC mp_limb_t mpn_rsh1add_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
843
844/* mpn_rsh1sub_n(c,a,b,n), when it exists, sets {c,n} to ({a,n} - {b,n}) >> 1,
845   and returns the bit rshifted out (0 or 1).  If there's a borrow from the
846   subtract, it's stored as a 1 in the high bit of c[n-1], like a twos
847   complement negative.  */
848#define mpn_rsh1sub_n __MPN(rsh1sub_n)
849__GMP_DECLSPEC mp_limb_t mpn_rsh1sub_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
850#define mpn_rsh1sub_nc __MPN(rsh1sub_nc)
851__GMP_DECLSPEC mp_limb_t mpn_rsh1sub_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
852
853#define mpn_lshiftc __MPN(lshiftc)
854__GMP_DECLSPEC mp_limb_t mpn_lshiftc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, unsigned int));
855
856#define mpn_add_n_sub_n __MPN(add_n_sub_n)
857__GMP_DECLSPEC mp_limb_t mpn_add_n_sub_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
858
859#define mpn_add_n_sub_nc __MPN(add_n_sub_nc)
860__GMP_DECLSPEC mp_limb_t mpn_add_n_sub_nc __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
861
862#define mpn_addaddmul_1msb0 __MPN(addaddmul_1msb0)
863__GMP_DECLSPEC mp_limb_t mpn_addaddmul_1msb0 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
864
865#define mpn_divrem_1c __MPN(divrem_1c)
866__GMP_DECLSPEC mp_limb_t mpn_divrem_1c __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
867
868#define mpn_dump __MPN(dump)
869__GMP_DECLSPEC void mpn_dump __GMP_PROTO ((mp_srcptr, mp_size_t));
870
871#define mpn_fib2_ui __MPN(fib2_ui)
872__GMP_DECLSPEC mp_size_t mpn_fib2_ui __GMP_PROTO ((mp_ptr, mp_ptr, unsigned long));
873
874/* Remap names of internal mpn functions.  */
875#define __clz_tab               __MPN(clz_tab)
876#define mpn_udiv_w_sdiv		__MPN(udiv_w_sdiv)
877
878#define mpn_jacobi_base __MPN(jacobi_base)
879__GMP_DECLSPEC int mpn_jacobi_base __GMP_PROTO ((mp_limb_t, mp_limb_t, int)) ATTRIBUTE_CONST;
880
881#define mpn_mod_1c __MPN(mod_1c)
882__GMP_DECLSPEC mp_limb_t mpn_mod_1c __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)) __GMP_ATTRIBUTE_PURE;
883
884#define mpn_mul_1c __MPN(mul_1c)
885__GMP_DECLSPEC mp_limb_t mpn_mul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
886
887#define mpn_mul_2 __MPN(mul_2)
888__GMP_DECLSPEC mp_limb_t mpn_mul_2 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
889
890#define mpn_mul_3 __MPN(mul_3)
891__GMP_DECLSPEC mp_limb_t mpn_mul_3 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
892
893#define mpn_mul_4 __MPN(mul_4)
894__GMP_DECLSPEC mp_limb_t mpn_mul_4 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
895
896#ifndef mpn_mul_basecase  /* if not done with cpuvec in a fat binary */
897#define mpn_mul_basecase __MPN(mul_basecase)
898__GMP_DECLSPEC void mpn_mul_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
899#endif
900
901#define mpn_mullo_n __MPN(mullo_n)
902__GMP_DECLSPEC void mpn_mullo_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
903
904#define mpn_mullo_basecase __MPN(mullo_basecase)
905__GMP_DECLSPEC void mpn_mullo_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t));
906
907#define mpn_sqr __MPN(sqr)
908__GMP_DECLSPEC void mpn_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
909
910#ifndef mpn_sqr_basecase  /* if not done with cpuvec in a fat binary */
911#define mpn_sqr_basecase __MPN(sqr_basecase)
912__GMP_DECLSPEC void mpn_sqr_basecase __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
913#endif
914
915#define mpn_submul_1c __MPN(submul_1c)
916__GMP_DECLSPEC mp_limb_t mpn_submul_1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
917
918#define mpn_redc_1 __MPN(redc_1)
919__GMP_DECLSPEC void mpn_redc_1 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
920
921#define mpn_redc_2 __MPN(redc_2)
922__GMP_DECLSPEC void mpn_redc_2 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
923#define mpn_redc_n __MPN(redc_n)
924__GMP_DECLSPEC void mpn_redc_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr));
925
926
927#define mpn_mod_1_1p_cps __MPN(mod_1_1p_cps)
928__GMP_DECLSPEC void mpn_mod_1_1p_cps __GMP_PROTO ((mp_limb_t [4], mp_limb_t));
929#define mpn_mod_1_1p __MPN(mod_1_1p)
930__GMP_DECLSPEC mp_limb_t mpn_mod_1_1p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [4])) __GMP_ATTRIBUTE_PURE;
931
932#define mpn_mod_1s_2p_cps __MPN(mod_1s_2p_cps)
933__GMP_DECLSPEC void mpn_mod_1s_2p_cps __GMP_PROTO ((mp_limb_t [5], mp_limb_t));
934#define mpn_mod_1s_2p __MPN(mod_1s_2p)
935__GMP_DECLSPEC mp_limb_t mpn_mod_1s_2p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [5])) __GMP_ATTRIBUTE_PURE;
936
937#define mpn_mod_1s_3p_cps __MPN(mod_1s_3p_cps)
938__GMP_DECLSPEC void mpn_mod_1s_3p_cps __GMP_PROTO ((mp_limb_t [6], mp_limb_t));
939#define mpn_mod_1s_3p __MPN(mod_1s_3p)
940__GMP_DECLSPEC mp_limb_t mpn_mod_1s_3p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [6])) __GMP_ATTRIBUTE_PURE;
941
942#define mpn_mod_1s_4p_cps __MPN(mod_1s_4p_cps)
943__GMP_DECLSPEC void mpn_mod_1s_4p_cps __GMP_PROTO ((mp_limb_t [7], mp_limb_t));
944#define mpn_mod_1s_4p __MPN(mod_1s_4p)
945__GMP_DECLSPEC mp_limb_t mpn_mod_1s_4p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t [7])) __GMP_ATTRIBUTE_PURE;
946
947#define mpn_bc_mulmod_bnm1 __MPN(bc_mulmod_bnm1)
948__GMP_DECLSPEC void mpn_bc_mulmod_bnm1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
949#define mpn_mulmod_bnm1 __MPN(mulmod_bnm1)
950__GMP_DECLSPEC void mpn_mulmod_bnm1 __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
951#define mpn_mulmod_bnm1_next_size __MPN(mulmod_bnm1_next_size)
952__GMP_DECLSPEC mp_size_t mpn_mulmod_bnm1_next_size __GMP_PROTO ((mp_size_t)) ATTRIBUTE_CONST;
953static inline mp_size_t
954mpn_mulmod_bnm1_itch (mp_size_t rn, mp_size_t an, mp_size_t bn) {
955  mp_size_t n, itch;
956  n = rn >> 1;
957  itch = rn + 4 +
958    (an > n ? (bn > n ? rn : n) : 0);
959  return itch;
960}
961
962#define mpn_sqrmod_bnm1 __MPN(sqrmod_bnm1)
963__GMP_DECLSPEC void mpn_sqrmod_bnm1 __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
964#define mpn_sqrmod_bnm1_next_size __MPN(sqrmod_bnm1_next_size)
965__GMP_DECLSPEC mp_size_t mpn_sqrmod_bnm1_next_size __GMP_PROTO ((mp_size_t)) ATTRIBUTE_CONST;
966static inline mp_size_t
967mpn_sqrmod_bnm1_itch (mp_size_t rn, mp_size_t an) {
968  mp_size_t n, itch;
969  n = rn >> 1;
970  itch = rn + 3 +
971    (an > n ? an : 0);
972  return itch;
973}
974
975typedef __gmp_randstate_struct *gmp_randstate_ptr;
976typedef const __gmp_randstate_struct *gmp_randstate_srcptr;
977
978/* Pseudo-random number generator function pointers structure.  */
979typedef struct {
980  void (*randseed_fn) __GMP_PROTO ((gmp_randstate_t, mpz_srcptr));
981  void (*randget_fn) __GMP_PROTO ((gmp_randstate_t, mp_ptr, unsigned long int));
982  void (*randclear_fn) __GMP_PROTO ((gmp_randstate_t));
983  void (*randiset_fn) __GMP_PROTO ((gmp_randstate_ptr, gmp_randstate_srcptr));
984} gmp_randfnptr_t;
985
986/* Macro to obtain a void pointer to the function pointers structure.  */
987#define RNG_FNPTR(rstate) ((rstate)->_mp_algdata._mp_lc)
988
989/* Macro to obtain a pointer to the generator's state.
990   When used as a lvalue the rvalue needs to be cast to mp_ptr.  */
991#define RNG_STATE(rstate) ((rstate)->_mp_seed->_mp_d)
992
993/* Write a given number of random bits to rp.  */
994#define _gmp_rand(rp, state, bits)                              \
995  do {                                                          \
996    gmp_randstate_ptr  __rstate = (state);                      \
997    (*((gmp_randfnptr_t *) RNG_FNPTR (__rstate))->randget_fn)   \
998       (__rstate, rp, bits);                                    \
999  } while (0)
1000
1001__GMP_DECLSPEC void __gmp_randinit_mt_noseed __GMP_PROTO ((gmp_randstate_t));
1002
1003
1004/* __gmp_rands is the global state for the old-style random functions, and
1005   is also used in the test programs (hence the __GMP_DECLSPEC).
1006
1007   There's no seeding here, so mpz_random etc will generate the same
1008   sequence every time.  This is not unlike the C library random functions
1009   if you don't seed them, so perhaps it's acceptable.  Digging up a seed
1010   from /dev/random or the like would work on many systems, but might
1011   encourage a false confidence, since it'd be pretty much impossible to do
1012   something that would work reliably everywhere.  In any case the new style
1013   functions are recommended to applications which care about randomness, so
1014   the old functions aren't too important.  */
1015
1016__GMP_DECLSPEC extern char             __gmp_rands_initialized;
1017__GMP_DECLSPEC extern gmp_randstate_t  __gmp_rands;
1018
1019#define RANDS                                       \
1020  ((__gmp_rands_initialized ? 0                     \
1021    : (__gmp_rands_initialized = 1,                 \
1022       __gmp_randinit_mt_noseed (__gmp_rands), 0)), \
1023   __gmp_rands)
1024
1025/* this is used by the test programs, to free memory */
1026#define RANDS_CLEAR()                   \
1027  do {                                  \
1028    if (__gmp_rands_initialized)        \
1029      {                                 \
1030        __gmp_rands_initialized = 0;    \
1031        gmp_randclear (__gmp_rands);    \
1032      }                                 \
1033  } while (0)
1034
1035
1036/* For a threshold between algorithms A and B, size>=thresh is where B
1037   should be used.  Special value MP_SIZE_T_MAX means only ever use A, or
1038   value 0 means only ever use B.  The tests for these special values will
1039   be compile-time constants, so the compiler should be able to eliminate
1040   the code for the unwanted algorithm.  */
1041
1042#define ABOVE_THRESHOLD(size,thresh)    \
1043  ((thresh) == 0                        \
1044   || ((thresh) != MP_SIZE_T_MAX        \
1045       && (size) >= (thresh)))
1046#define BELOW_THRESHOLD(size,thresh)  (! ABOVE_THRESHOLD (size, thresh))
1047
1048#define MPN_TOOM22_MUL_MINSIZE    4
1049#define MPN_TOOM2_SQR_MINSIZE     4
1050
1051#define MPN_TOOM33_MUL_MINSIZE   17
1052#define MPN_TOOM3_SQR_MINSIZE    17
1053
1054#define MPN_TOOM44_MUL_MINSIZE   30
1055#define MPN_TOOM4_SQR_MINSIZE    30
1056
1057#define MPN_TOOM6H_MUL_MINSIZE   46
1058#define MPN_TOOM6_SQR_MINSIZE    46
1059
1060#define MPN_TOOM8H_MUL_MINSIZE   86
1061#define MPN_TOOM8_SQR_MINSIZE    86
1062
1063#define MPN_TOOM32_MUL_MINSIZE   10
1064#define MPN_TOOM42_MUL_MINSIZE   10
1065#define MPN_TOOM43_MUL_MINSIZE   49 /* ??? */
1066#define MPN_TOOM53_MUL_MINSIZE   49 /* ??? */
1067#define MPN_TOOM63_MUL_MINSIZE   49
1068
1069#define   mpn_sqr_diagonal __MPN(sqr_diagonal)
1070__GMP_DECLSPEC void      mpn_sqr_diagonal __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
1071
1072#define   mpn_toom_interpolate_5pts __MPN(toom_interpolate_5pts)
1073__GMP_DECLSPEC void      mpn_toom_interpolate_5pts __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_limb_t));
1074
1075enum toom6_flags {toom6_all_pos = 0, toom6_vm1_neg = 1, toom6_vm2_neg = 2};
1076#define   mpn_toom_interpolate_6pts __MPN(toom_interpolate_6pts)
1077__GMP_DECLSPEC void      mpn_toom_interpolate_6pts __GMP_PROTO ((mp_ptr, mp_size_t, enum toom6_flags, mp_ptr, mp_ptr, mp_ptr, mp_size_t));
1078
1079enum toom7_flags { toom7_w1_neg = 1, toom7_w3_neg = 2 };
1080#define   mpn_toom_interpolate_7pts __MPN(toom_interpolate_7pts)
1081__GMP_DECLSPEC void      mpn_toom_interpolate_7pts __GMP_PROTO ((mp_ptr, mp_size_t, enum toom7_flags, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
1082
1083#define mpn_toom_interpolate_8pts __MPN(toom_interpolate_8pts)
1084__GMP_DECLSPEC void      mpn_toom_interpolate_8pts __GMP_PROTO ((mp_ptr, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
1085
1086#define mpn_toom_interpolate_12pts __MPN(toom_interpolate_12pts)
1087__GMP_DECLSPEC void      mpn_toom_interpolate_12pts __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr));
1088
1089#define mpn_toom_interpolate_16pts __MPN(toom_interpolate_16pts)
1090__GMP_DECLSPEC void      mpn_toom_interpolate_16pts __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_size_t, int, mp_ptr));
1091
1092#define   mpn_toom_couple_handling __MPN(toom_couple_handling)
1093__GMP_DECLSPEC void mpn_toom_couple_handling __GMP_PROTO ((mp_ptr, mp_size_t, mp_ptr, int, mp_size_t, int, int));
1094
1095#define   mpn_toom_eval_dgr3_pm1 __MPN(toom_eval_dgr3_pm1)
1096__GMP_DECLSPEC int mpn_toom_eval_dgr3_pm1 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
1097
1098#define   mpn_toom_eval_dgr3_pm2 __MPN(toom_eval_dgr3_pm2)
1099__GMP_DECLSPEC int mpn_toom_eval_dgr3_pm2 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
1100
1101#define   mpn_toom_eval_pm1 __MPN(toom_eval_pm1)
1102__GMP_DECLSPEC int mpn_toom_eval_pm1 __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
1103
1104#define   mpn_toom_eval_pm2 __MPN(toom_eval_pm2)
1105__GMP_DECLSPEC int mpn_toom_eval_pm2 __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
1106
1107#define   mpn_toom_eval_pm2exp __MPN(toom_eval_pm2exp)
1108__GMP_DECLSPEC int mpn_toom_eval_pm2exp __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr));
1109
1110#define   mpn_toom_eval_pm2rexp __MPN(toom_eval_pm2rexp)
1111__GMP_DECLSPEC int mpn_toom_eval_pm2rexp __GMP_PROTO ((mp_ptr, mp_ptr, unsigned, mp_srcptr, mp_size_t, mp_size_t, unsigned, mp_ptr));
1112
1113#define   mpn_toom22_mul __MPN(toom22_mul)
1114__GMP_DECLSPEC void      mpn_toom22_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1115
1116#define   mpn_toom32_mul __MPN(toom32_mul)
1117__GMP_DECLSPEC void      mpn_toom32_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1118
1119#define   mpn_toom42_mul __MPN(toom42_mul)
1120__GMP_DECLSPEC void      mpn_toom42_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1121
1122#define   mpn_toom52_mul __MPN(toom52_mul)
1123__GMP_DECLSPEC void      mpn_toom52_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1124
1125#define   mpn_toom62_mul __MPN(toom62_mul)
1126__GMP_DECLSPEC void      mpn_toom62_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1127
1128#define   mpn_toom2_sqr __MPN(toom2_sqr)
1129__GMP_DECLSPEC void      mpn_toom2_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1130
1131#define   mpn_toom33_mul __MPN(toom33_mul)
1132__GMP_DECLSPEC void      mpn_toom33_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1133
1134#define   mpn_toom43_mul __MPN(toom43_mul)
1135__GMP_DECLSPEC void      mpn_toom43_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1136
1137#define   mpn_toom53_mul __MPN(toom53_mul)
1138__GMP_DECLSPEC void      mpn_toom53_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1139
1140#define   mpn_toom63_mul __MPN(toom63_mul)
1141__GMP_DECLSPEC void      mpn_toom63_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1142
1143#define   mpn_toom3_sqr __MPN(toom3_sqr)
1144__GMP_DECLSPEC void      mpn_toom3_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1145
1146#define   mpn_toom44_mul __MPN(toom44_mul)
1147__GMP_DECLSPEC void      mpn_toom44_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1148
1149#define   mpn_toom4_sqr __MPN(toom4_sqr)
1150__GMP_DECLSPEC void      mpn_toom4_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1151
1152#define   mpn_toom6h_mul __MPN(toom6h_mul)
1153__GMP_DECLSPEC void      mpn_toom6h_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1154
1155#define   mpn_toom6_sqr __MPN(toom6_sqr)
1156__GMP_DECLSPEC void      mpn_toom6_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1157
1158#define   mpn_toom8h_mul __MPN(toom8h_mul)
1159__GMP_DECLSPEC void      mpn_toom8h_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1160
1161#define   mpn_toom8_sqr __MPN(toom8_sqr)
1162__GMP_DECLSPEC void      mpn_toom8_sqr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1163
1164#define   mpn_fft_best_k __MPN(fft_best_k)
1165__GMP_DECLSPEC int       mpn_fft_best_k __GMP_PROTO ((mp_size_t, int)) ATTRIBUTE_CONST;
1166
1167#define   mpn_mul_fft __MPN(mul_fft)
1168__GMP_DECLSPEC mp_limb_t mpn_mul_fft __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, int));
1169
1170#define   mpn_mul_fft_full __MPN(mul_fft_full)
1171__GMP_DECLSPEC void      mpn_mul_fft_full __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
1172
1173#define   mpn_nussbaumer_mul __MPN(nussbaumer_mul)
1174__GMP_DECLSPEC void      mpn_nussbaumer_mul __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
1175
1176#define   mpn_fft_next_size __MPN(fft_next_size)
1177__GMP_DECLSPEC mp_size_t mpn_fft_next_size __GMP_PROTO ((mp_size_t, int)) ATTRIBUTE_CONST;
1178
1179#define   mpn_sbpi1_div_qr __MPN(sbpi1_div_qr)
1180__GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
1181
1182#define   mpn_sbpi1_div_q __MPN(sbpi1_div_q)
1183__GMP_DECLSPEC mp_limb_t mpn_sbpi1_div_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
1184
1185#define   mpn_sbpi1_divappr_q __MPN(sbpi1_divappr_q)
1186__GMP_DECLSPEC mp_limb_t mpn_sbpi1_divappr_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
1187
1188#define   mpn_dcpi1_div_qr __MPN(dcpi1_div_qr)
1189__GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *));
1190#define   mpn_dcpi1_div_qr_n __MPN(dcpi1_div_qr_n)
1191__GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_qr_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr));
1192
1193#define   mpn_dcpi1_div_q __MPN(dcpi1_div_q)
1194__GMP_DECLSPEC mp_limb_t mpn_dcpi1_div_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *));
1195
1196#define   mpn_dcpi1_divappr_q __MPN(dcpi1_divappr_q)
1197__GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, gmp_pi1_t *));
1198#define   mpn_dcpi1_divappr_q_n __MPN(dcpi1_divappr_q_n)
1199__GMP_DECLSPEC mp_limb_t mpn_dcpi1_divappr_q_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, gmp_pi1_t *, mp_ptr));
1200
1201#define   mpn_mu_div_qr __MPN(mu_div_qr)
1202__GMP_DECLSPEC mp_limb_t mpn_mu_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1203#define   mpn_mu_div_qr_itch __MPN(mu_div_qr_itch)
1204__GMP_DECLSPEC mp_size_t mpn_mu_div_qr_itch __GMP_PROTO ((mp_size_t, mp_size_t, int));
1205#define   mpn_mu_div_qr_choose_in __MPN(mu_div_qr_choose_in)
1206__GMP_DECLSPEC mp_size_t mpn_mu_div_qr_choose_in __GMP_PROTO ((mp_size_t, mp_size_t, int));
1207
1208#define   mpn_preinv_mu_div_qr __MPN(preinv_mu_div_qr)
1209__GMP_DECLSPEC mp_limb_t mpn_preinv_mu_div_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1210#define   mpn_preinv_mu_div_qr_itch __MPN(preinv_mu_div_qr_itch)
1211__GMP_DECLSPEC mp_size_t mpn_preinv_mu_div_qr_itch __GMP_PROTO ((mp_size_t, mp_size_t, mp_size_t));
1212
1213#define   mpn_mu_divappr_q __MPN(mu_divappr_q)
1214__GMP_DECLSPEC mp_limb_t mpn_mu_divappr_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1215#define   mpn_mu_divappr_q_itch __MPN(mu_divappr_q_itch)
1216__GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_itch __GMP_PROTO ((mp_size_t, mp_size_t, int));
1217#define   mpn_mu_divappr_q_choose_in __MPN(mu_divappr_q_choose_in)
1218__GMP_DECLSPEC mp_size_t mpn_mu_divappr_q_choose_in __GMP_PROTO ((mp_size_t, mp_size_t, int));
1219
1220#define   mpn_preinv_mu_divappr_q __MPN(preinv_mu_divappr_q)
1221__GMP_DECLSPEC mp_limb_t mpn_preinv_mu_divappr_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1222
1223#define   mpn_mu_div_q __MPN(mu_div_q)
1224__GMP_DECLSPEC mp_limb_t mpn_mu_div_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1225#define   mpn_mu_div_q_itch __MPN(mu_div_q_itch)
1226__GMP_DECLSPEC mp_size_t mpn_mu_div_q_itch __GMP_PROTO ((mp_size_t, mp_size_t, int));
1227
1228#define  mpn_div_q __MPN(div_q)
1229__GMP_DECLSPEC void mpn_div_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1230
1231#define   mpn_invert __MPN(invert)
1232__GMP_DECLSPEC void      mpn_invert __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1233#define mpn_invert_itch(n)  mpn_invertappr_itch(n)
1234
1235#define   mpn_ni_invertappr __MPN(ni_invertappr)
1236__GMP_DECLSPEC mp_limb_t mpn_ni_invertappr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1237#define   mpn_invertappr __MPN(invertappr)
1238__GMP_DECLSPEC mp_limb_t mpn_invertappr __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1239#define mpn_invertappr_itch(n)  (3 * (n) + 2)
1240
1241#define   mpn_binvert __MPN(binvert)
1242__GMP_DECLSPEC void      mpn_binvert __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_ptr));
1243#define   mpn_binvert_itch __MPN(binvert_itch)
1244__GMP_DECLSPEC mp_size_t mpn_binvert_itch __GMP_PROTO ((mp_size_t));
1245
1246#define mpn_bdiv_q_1 __MPN(bdiv_q_1)
1247__GMP_DECLSPEC mp_limb_t mpn_bdiv_q_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
1248
1249#define mpn_pi1_bdiv_q_1 __MPN(pi1_bdiv_q_1)
1250__GMP_DECLSPEC mp_limb_t mpn_pi1_bdiv_q_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int));
1251
1252#define   mpn_sbpi1_bdiv_qr __MPN(sbpi1_bdiv_qr)
1253__GMP_DECLSPEC mp_limb_t mpn_sbpi1_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
1254
1255#define   mpn_sbpi1_bdiv_q __MPN(sbpi1_bdiv_q)
1256__GMP_DECLSPEC void      mpn_sbpi1_bdiv_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
1257
1258#define   mpn_dcpi1_bdiv_qr __MPN(dcpi1_bdiv_qr)
1259__GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
1260#define   mpn_dcpi1_bdiv_qr_n_itch __MPN(dcpi1_bdiv_qr_n_itch)
1261__GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_qr_n_itch __GMP_PROTO ((mp_size_t));
1262
1263#define   mpn_dcpi1_bdiv_qr_n __MPN(dcpi1_bdiv_qr_n)
1264__GMP_DECLSPEC mp_limb_t mpn_dcpi1_bdiv_qr_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr));
1265#define   mpn_dcpi1_bdiv_q __MPN(dcpi1_bdiv_q)
1266__GMP_DECLSPEC void      mpn_dcpi1_bdiv_q __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t));
1267
1268#define   mpn_dcpi1_bdiv_q_n_itch __MPN(dcpi1_bdiv_q_n_itch)
1269__GMP_DECLSPEC mp_size_t mpn_dcpi1_bdiv_q_n_itch __GMP_PROTO ((mp_size_t));
1270#define   mpn_dcpi1_bdiv_q_n __MPN(dcpi1_bdiv_q_n)
1271__GMP_DECLSPEC void      mpn_dcpi1_bdiv_q_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr));
1272
1273#define   mpn_mu_bdiv_qr __MPN(mu_bdiv_qr)
1274__GMP_DECLSPEC mp_limb_t mpn_mu_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1275#define   mpn_mu_bdiv_qr_itch __MPN(mu_bdiv_qr_itch)
1276__GMP_DECLSPEC mp_size_t mpn_mu_bdiv_qr_itch __GMP_PROTO ((mp_size_t, mp_size_t));
1277
1278#define   mpn_mu_bdiv_q __MPN(mu_bdiv_q)
1279__GMP_DECLSPEC void      mpn_mu_bdiv_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1280#define   mpn_mu_bdiv_q_itch __MPN(mu_bdiv_q_itch)
1281__GMP_DECLSPEC mp_size_t mpn_mu_bdiv_q_itch __GMP_PROTO ((mp_size_t, mp_size_t));
1282
1283#define   mpn_bdiv_qr __MPN(bdiv_qr)
1284__GMP_DECLSPEC mp_limb_t mpn_bdiv_qr __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1285#define   mpn_bdiv_qr_itch __MPN(bdiv_qr_itch)
1286__GMP_DECLSPEC mp_size_t mpn_bdiv_qr_itch __GMP_PROTO ((mp_size_t, mp_size_t));
1287
1288#define   mpn_bdiv_q __MPN(bdiv_q)
1289__GMP_DECLSPEC void      mpn_bdiv_q __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1290#define   mpn_bdiv_q_itch __MPN(bdiv_q_itch)
1291__GMP_DECLSPEC mp_size_t mpn_bdiv_q_itch __GMP_PROTO ((mp_size_t, mp_size_t));
1292
1293#define   mpn_divexact __MPN(divexact)
1294__GMP_DECLSPEC void      mpn_divexact __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t));
1295#define   mpn_divexact_itch __MPN(divexact_itch)
1296__GMP_DECLSPEC mp_size_t mpn_divexact_itch __GMP_PROTO ((mp_size_t, mp_size_t));
1297
1298#define   mpn_bdiv_dbm1c __MPN(bdiv_dbm1c)
1299__GMP_DECLSPEC mp_limb_t mpn_bdiv_dbm1c __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t));
1300#define   mpn_bdiv_dbm1(dst, src, size, divisor) \
1301  mpn_bdiv_dbm1c (dst, src, size, divisor, __GMP_CAST (mp_limb_t, 0))
1302
1303#define   mpn_powm __MPN(powm)
1304__GMP_DECLSPEC void      mpn_powm __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1305#define   mpn_powlo __MPN(powlo)
1306__GMP_DECLSPEC void      mpn_powlo __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_size_t, mp_ptr));
1307#define   mpn_powm_sec __MPN(powm_sec)
1308__GMP_DECLSPEC void      mpn_powm_sec __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr));
1309#define   mpn_powm_sec_itch __MPN(powm_sec_itch)
1310__GMP_DECLSPEC mp_size_t mpn_powm_sec_itch __GMP_PROTO ((mp_size_t, mp_size_t, mp_size_t));
1311#define   mpn_subcnd_n __MPN(subcnd_n)
1312__GMP_DECLSPEC mp_limb_t mpn_subcnd_n __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
1313#define   mpn_tabselect __MPN(tabselect)
1314__GMP_DECLSPEC void      mpn_tabselect __GMP_PROTO ((volatile mp_limb_t *, volatile mp_limb_t *, mp_size_t, mp_size_t, mp_size_t));
1315#define mpn_redc_1_sec __MPN(redc_1_sec)
1316__GMP_DECLSPEC void mpn_redc_1_sec __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
1317
1318#ifndef DIVEXACT_BY3_METHOD
1319#if GMP_NUMB_BITS % 2 == 0 && ! defined (HAVE_NATIVE_mpn_divexact_by3c)
1320#define DIVEXACT_BY3_METHOD 0	/* default to using mpn_bdiv_dbm1c */
1321#else
1322#define DIVEXACT_BY3_METHOD 1
1323#endif
1324#endif
1325
1326#if DIVEXACT_BY3_METHOD == 0
1327#undef mpn_divexact_by3
1328#define mpn_divexact_by3(dst,src,size) \
1329  (3 & mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3)))
1330/* override mpn_divexact_by3c defined in gmp.h */
1331/*
1332#undef mpn_divexact_by3c
1333#define mpn_divexact_by3c(dst,src,size,cy) \
1334  (3 & mpn_bdiv_dbm1c (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 3, GMP_NUMB_MASK / 3 * cy)))
1335*/
1336#endif
1337
1338#if GMP_NUMB_BITS % 4 == 0
1339#define mpn_divexact_by5(dst,src,size) \
1340  (7 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 5)))
1341#endif
1342
1343#if GMP_NUMB_BITS % 6 == 0
1344#define mpn_divexact_by7(dst,src,size) \
1345  (7 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 7)))
1346#endif
1347
1348#if GMP_NUMB_BITS % 6 == 0
1349#define mpn_divexact_by9(dst,src,size) \
1350  (15 & 7 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 9)))
1351#endif
1352
1353#if GMP_NUMB_BITS % 10 == 0
1354#define mpn_divexact_by11(dst,src,size) \
1355  (15 & 5 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 11)))
1356#endif
1357
1358#if GMP_NUMB_BITS % 12 == 0
1359#define mpn_divexact_by13(dst,src,size) \
1360  (15 & 3 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 13)))
1361#endif
1362
1363#if GMP_NUMB_BITS % 4 == 0
1364#define mpn_divexact_by15(dst,src,size) \
1365  (15 & 1 * mpn_bdiv_dbm1 (dst, src, size, __GMP_CAST (mp_limb_t, GMP_NUMB_MASK / 15)))
1366#endif
1367
1368#define mpz_divexact_gcd  __gmpz_divexact_gcd
1369__GMP_DECLSPEC void    mpz_divexact_gcd __GMP_PROTO ((mpz_ptr, mpz_srcptr, mpz_srcptr));
1370
1371#define mpz_inp_str_nowhite __gmpz_inp_str_nowhite
1372#ifdef _GMP_H_HAVE_FILE
1373__GMP_DECLSPEC size_t  mpz_inp_str_nowhite __GMP_PROTO ((mpz_ptr, FILE *, int, int, size_t));
1374#endif
1375
1376#define mpn_divisible_p __MPN(divisible_p)
1377__GMP_DECLSPEC int     mpn_divisible_p __GMP_PROTO ((mp_srcptr, mp_size_t, mp_srcptr, mp_size_t)) __GMP_ATTRIBUTE_PURE;
1378
1379#define   mpn_rootrem __MPN(rootrem)
1380__GMP_DECLSPEC mp_size_t mpn_rootrem __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
1381
1382
1383#if defined (_CRAY)
1384#define MPN_COPY_INCR(dst, src, n)					\
1385  do {									\
1386    int __i;		/* Faster on some Crays with plain int */	\
1387    _Pragma ("_CRI ivdep");						\
1388    for (__i = 0; __i < (n); __i++)					\
1389      (dst)[__i] = (src)[__i];						\
1390  } while (0)
1391#endif
1392
1393/* used by test programs, hence __GMP_DECLSPEC */
1394#ifndef mpn_copyi  /* if not done with cpuvec in a fat binary */
1395#define mpn_copyi __MPN(copyi)
1396__GMP_DECLSPEC void mpn_copyi __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
1397#endif
1398
1399#if ! defined (MPN_COPY_INCR) && HAVE_NATIVE_mpn_copyi
1400#define MPN_COPY_INCR(dst, src, size)                   \
1401  do {                                                  \
1402    ASSERT ((size) >= 0);                               \
1403    ASSERT (MPN_SAME_OR_INCR_P (dst, src, size));       \
1404    mpn_copyi (dst, src, size);                         \
1405  } while (0)
1406#endif
1407
1408/* Copy N limbs from SRC to DST incrementing, N==0 allowed.  */
1409#if ! defined (MPN_COPY_INCR)
1410#define MPN_COPY_INCR(dst, src, n)                      \
1411  do {                                                  \
1412    ASSERT ((n) >= 0);                                  \
1413    ASSERT (MPN_SAME_OR_INCR_P (dst, src, n));          \
1414    if ((n) != 0)                                       \
1415      {                                                 \
1416	mp_size_t __n = (n) - 1;                        \
1417	mp_ptr __dst = (dst);                           \
1418	mp_srcptr __src = (src);                        \
1419	mp_limb_t __x;                                  \
1420	__x = *__src++;                                 \
1421	if (__n != 0)                                   \
1422	  {                                             \
1423	    do                                          \
1424	      {                                         \
1425		*__dst++ = __x;                         \
1426		__x = *__src++;                         \
1427	      }                                         \
1428	    while (--__n);                              \
1429	  }                                             \
1430	*__dst++ = __x;                                 \
1431      }                                                 \
1432  } while (0)
1433#endif
1434
1435
1436#if defined (_CRAY)
1437#define MPN_COPY_DECR(dst, src, n)					\
1438  do {									\
1439    int __i;		/* Faster on some Crays with plain int */	\
1440    _Pragma ("_CRI ivdep");						\
1441    for (__i = (n) - 1; __i >= 0; __i--)				\
1442      (dst)[__i] = (src)[__i];						\
1443  } while (0)
1444#endif
1445
1446/* used by test programs, hence __GMP_DECLSPEC */
1447#ifndef mpn_copyd  /* if not done with cpuvec in a fat binary */
1448#define mpn_copyd __MPN(copyd)
1449__GMP_DECLSPEC void mpn_copyd __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
1450#endif
1451
1452#if ! defined (MPN_COPY_DECR) && HAVE_NATIVE_mpn_copyd
1453#define MPN_COPY_DECR(dst, src, size)                   \
1454  do {                                                  \
1455    ASSERT ((size) >= 0);                               \
1456    ASSERT (MPN_SAME_OR_DECR_P (dst, src, size));       \
1457    mpn_copyd (dst, src, size);                         \
1458  } while (0)
1459#endif
1460
1461/* Copy N limbs from SRC to DST decrementing, N==0 allowed.  */
1462#if ! defined (MPN_COPY_DECR)
1463#define MPN_COPY_DECR(dst, src, n)                      \
1464  do {                                                  \
1465    ASSERT ((n) >= 0);                                  \
1466    ASSERT (MPN_SAME_OR_DECR_P (dst, src, n));          \
1467    if ((n) != 0)                                       \
1468      {                                                 \
1469	mp_size_t __n = (n) - 1;                        \
1470	mp_ptr __dst = (dst) + __n;                     \
1471	mp_srcptr __src = (src) + __n;                  \
1472	mp_limb_t __x;                                  \
1473	__x = *__src--;                                 \
1474	if (__n != 0)                                   \
1475	  {                                             \
1476	    do                                          \
1477	      {                                         \
1478		*__dst-- = __x;                         \
1479		__x = *__src--;                         \
1480	      }                                         \
1481	    while (--__n);                              \
1482	  }                                             \
1483	*__dst-- = __x;                                 \
1484      }                                                 \
1485  } while (0)
1486#endif
1487
1488
1489#ifndef MPN_COPY
1490#define MPN_COPY(d,s,n)                         \
1491  do {                                          \
1492    ASSERT (MPN_SAME_OR_SEPARATE_P (d, s, n));  \
1493    MPN_COPY_INCR (d, s, n);                    \
1494  } while (0)
1495#endif
1496
1497
1498/* Set {dst,size} to the limbs of {src,size} in reverse order. */
1499#define MPN_REVERSE(dst, src, size)                     \
1500  do {                                                  \
1501    mp_ptr     __dst = (dst);                           \
1502    mp_size_t  __size = (size);                         \
1503    mp_srcptr  __src = (src) + __size - 1;              \
1504    mp_size_t  __i;                                     \
1505    ASSERT ((size) >= 0);                               \
1506    ASSERT (! MPN_OVERLAP_P (dst, size, src, size));    \
1507    CRAY_Pragma ("_CRI ivdep");                         \
1508    for (__i = 0; __i < __size; __i++)                  \
1509      {                                                 \
1510        *__dst = *__src;                                \
1511        __dst++;                                        \
1512        __src--;                                        \
1513      }                                                 \
1514  } while (0)
1515
1516
1517/* Zero n limbs at dst.
1518
1519   For power and powerpc we want an inline stu/bdnz loop for zeroing.  On
1520   ppc630 for instance this is optimal since it can sustain only 1 store per
1521   cycle.
1522
1523   gcc 2.95.x (for powerpc64 -maix64, or powerpc32) doesn't recognise the
1524   "for" loop in the generic code below can become stu/bdnz.  The do/while
1525   here helps it get to that.  The same caveat about plain -mpowerpc64 mode
1526   applies here as to __GMPN_COPY_INCR in gmp.h.
1527
1528   xlc 3.1 already generates stu/bdnz from the generic C, and does so from
1529   this loop too.
1530
1531   Enhancement: GLIBC does some trickery with dcbz to zero whole cache lines
1532   at a time.  MPN_ZERO isn't all that important in GMP, so it might be more
1533   trouble than it's worth to do the same, though perhaps a call to memset
1534   would be good when on a GNU system.  */
1535
1536#if HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc
1537#define MPN_ZERO(dst, n)			\
1538  do {						\
1539    ASSERT ((n) >= 0);				\
1540    if ((n) != 0)				\
1541      {						\
1542	mp_ptr __dst = (dst) - 1;		\
1543	mp_size_t __n = (n);			\
1544	do					\
1545	  *++__dst = 0;				\
1546	while (--__n);				\
1547      }						\
1548  } while (0)
1549#endif
1550
1551#ifndef MPN_ZERO
1552#define MPN_ZERO(dst, n)			\
1553  do {						\
1554    ASSERT ((n) >= 0);				\
1555    if ((n) != 0)				\
1556      {						\
1557	mp_ptr __dst = (dst);			\
1558	mp_size_t __n = (n);			\
1559	do					\
1560	  *__dst++ = 0;				\
1561	while (--__n);				\
1562      }						\
1563  } while (0)
1564#endif
1565
1566
1567/* On the x86s repe/scasl doesn't seem useful, since it takes many cycles to
1568   start up and would need to strip a lot of zeros before it'd be faster
1569   than a simple cmpl loop.  Here are some times in cycles for
1570   std/repe/scasl/cld and cld/repe/scasl (the latter would be for stripping
1571   low zeros).
1572
1573                std   cld
1574           P5    18    16
1575           P6    46    38
1576           K6    36    13
1577           K7    21    20
1578*/
1579#ifndef MPN_NORMALIZE
1580#define MPN_NORMALIZE(DST, NLIMBS) \
1581  do {									\
1582    while ((NLIMBS) > 0)                                                \
1583      {									\
1584	if ((DST)[(NLIMBS) - 1] != 0)					\
1585	  break;							\
1586	(NLIMBS)--;							\
1587      }									\
1588  } while (0)
1589#endif
1590#ifndef MPN_NORMALIZE_NOT_ZERO
1591#define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS)     \
1592  do {                                          \
1593    ASSERT ((NLIMBS) >= 1);                     \
1594    while (1)                                   \
1595      {                                         \
1596	if ((DST)[(NLIMBS) - 1] != 0)           \
1597	  break;                                \
1598	(NLIMBS)--;                             \
1599      }                                         \
1600  } while (0)
1601#endif
1602
1603/* Strip least significant zero limbs from {ptr,size} by incrementing ptr
1604   and decrementing size.  low should be ptr[0], and will be the new ptr[0]
1605   on returning.  The number in {ptr,size} must be non-zero, ie. size!=0 and
1606   somewhere a non-zero limb.  */
1607#define MPN_STRIP_LOW_ZEROS_NOT_ZERO(ptr, size, low)    \
1608  do {                                                  \
1609    ASSERT ((size) >= 1);                               \
1610    ASSERT ((low) == (ptr)[0]);                         \
1611                                                        \
1612    while ((low) == 0)                                  \
1613      {                                                 \
1614        (size)--;                                       \
1615        ASSERT ((size) >= 1);                           \
1616        (ptr)++;                                        \
1617        (low) = *(ptr);                                 \
1618      }                                                 \
1619  } while (0)
1620
1621/* Initialize X of type mpz_t with space for NLIMBS limbs.  X should be a
1622   temporary variable; it will be automatically cleared out at function
1623   return.  We use __x here to make it possible to accept both mpz_ptr and
1624   mpz_t arguments.  */
1625#define MPZ_TMP_INIT(X, NLIMBS)                                         \
1626  do {                                                                  \
1627    mpz_ptr __x = (X);                                                  \
1628    ASSERT ((NLIMBS) >= 1);                                             \
1629    __x->_mp_alloc = (NLIMBS);                                          \
1630    __x->_mp_d = TMP_ALLOC_LIMBS (NLIMBS);				\
1631  } while (0)
1632
1633/* Realloc for an mpz_t WHAT if it has less than NEEDED limbs.  */
1634#define MPZ_REALLOC(z,n) (UNLIKELY ((n) > ALLOC(z))     \
1635                          ? (mp_ptr) _mpz_realloc(z,n)  \
1636                          : PTR(z))
1637
1638#define MPZ_EQUAL_1_P(z)  (SIZ(z)==1 && PTR(z)[0] == 1)
1639
1640
1641/* MPN_FIB2_SIZE(n) is the size in limbs required by mpn_fib2_ui for fp and
1642   f1p.
1643
1644   From Knuth vol 1 section 1.2.8, F[n] = phi^n/sqrt(5) rounded to the
1645   nearest integer, where phi=(1+sqrt(5))/2 is the golden ratio.  So the
1646   number of bits required is n*log_2((1+sqrt(5))/2) = n*0.6942419.
1647
1648   The multiplier used is 23/32=0.71875 for efficient calculation on CPUs
1649   without good floating point.  There's +2 for rounding up, and a further
1650   +2 since at the last step x limbs are doubled into a 2x+1 limb region
1651   whereas the actual F[2k] value might be only 2x-1 limbs.
1652
1653   Note that a division is done first, since on a 32-bit system it's at
1654   least conceivable to go right up to n==ULONG_MAX.  (F[2^32-1] would be
1655   about 380Mbytes, plus temporary workspace of about 1.2Gbytes here and
1656   whatever a multiply of two 190Mbyte numbers takes.)
1657
1658   Enhancement: When GMP_NUMB_BITS is not a power of 2 the division could be
1659   worked into the multiplier.  */
1660
1661#define MPN_FIB2_SIZE(n) \
1662  ((mp_size_t) ((n) / 32 * 23 / GMP_NUMB_BITS) + 4)
1663
1664
1665/* FIB_TABLE(n) returns the Fibonacci number F[n].  Must have n in the range
1666   -1 <= n <= FIB_TABLE_LIMIT (that constant in fib_table.h).
1667
1668   FIB_TABLE_LUCNUM_LIMIT (in fib_table.h) is the largest n for which L[n] =
1669   F[n] + 2*F[n-1] fits in a limb.  */
1670
1671__GMP_DECLSPEC extern const mp_limb_t __gmp_fib_table[];
1672#define FIB_TABLE(n)  (__gmp_fib_table[(n)+1])
1673
1674#define SIEVESIZE 512		/* FIXME: Allow gmp_init_primesieve to choose */
1675typedef struct
1676{
1677  unsigned long d;		   /* current index in s[] */
1678  unsigned long s0;		   /* number corresponding to s[0] */
1679  unsigned long sqrt_s0;	   /* misnomer for sqrt(s[SIEVESIZE-1]) */
1680  unsigned char s[SIEVESIZE + 1];  /* sieve table */
1681} gmp_primesieve_t;
1682
1683#define gmp_init_primesieve __gmp_init_primesieve
1684__GMP_DECLSPEC void gmp_init_primesieve (gmp_primesieve_t *);
1685
1686#define gmp_nextprime __gmp_nextprime
1687__GMP_DECLSPEC unsigned long int gmp_nextprime (gmp_primesieve_t *);
1688
1689
1690#ifndef MUL_TOOM22_THRESHOLD
1691#define MUL_TOOM22_THRESHOLD             30
1692#endif
1693
1694#ifndef MUL_TOOM33_THRESHOLD
1695#define MUL_TOOM33_THRESHOLD            100
1696#endif
1697
1698#ifndef MUL_TOOM44_THRESHOLD
1699#define MUL_TOOM44_THRESHOLD            300
1700#endif
1701
1702#ifndef MUL_TOOM6H_THRESHOLD
1703#define MUL_TOOM6H_THRESHOLD            350
1704#endif
1705
1706#ifndef SQR_TOOM6_THRESHOLD
1707#define SQR_TOOM6_THRESHOLD MUL_TOOM6H_THRESHOLD
1708#endif
1709
1710#ifndef MUL_TOOM8H_THRESHOLD
1711#define MUL_TOOM8H_THRESHOLD            450
1712#endif
1713
1714#ifndef SQR_TOOM8_THRESHOLD
1715#define SQR_TOOM8_THRESHOLD MUL_TOOM8H_THRESHOLD
1716#endif
1717
1718#ifndef MUL_TOOM32_TO_TOOM43_THRESHOLD
1719#define MUL_TOOM32_TO_TOOM43_THRESHOLD  100
1720#endif
1721
1722#ifndef MUL_TOOM32_TO_TOOM53_THRESHOLD
1723#define MUL_TOOM32_TO_TOOM53_THRESHOLD  110
1724#endif
1725
1726#ifndef MUL_TOOM42_TO_TOOM53_THRESHOLD
1727#define MUL_TOOM42_TO_TOOM53_THRESHOLD  100
1728#endif
1729
1730#ifndef MUL_TOOM42_TO_TOOM63_THRESHOLD
1731#define MUL_TOOM42_TO_TOOM63_THRESHOLD  110
1732#endif
1733
1734/* MUL_TOOM22_THRESHOLD_LIMIT is the maximum for MUL_TOOM22_THRESHOLD.  In a
1735   normal build MUL_TOOM22_THRESHOLD is a constant and we use that.  In a fat
1736   binary or tune program build MUL_TOOM22_THRESHOLD is a variable and a
1737   separate hard limit will have been defined.  Similarly for TOOM3.  */
1738#ifndef MUL_TOOM22_THRESHOLD_LIMIT
1739#define MUL_TOOM22_THRESHOLD_LIMIT  MUL_TOOM22_THRESHOLD
1740#endif
1741#ifndef MUL_TOOM33_THRESHOLD_LIMIT
1742#define MUL_TOOM33_THRESHOLD_LIMIT  MUL_TOOM33_THRESHOLD
1743#endif
1744#ifndef MULLO_BASECASE_THRESHOLD_LIMIT
1745#define MULLO_BASECASE_THRESHOLD_LIMIT  MULLO_BASECASE_THRESHOLD
1746#endif
1747
1748/* SQR_BASECASE_THRESHOLD is where mpn_sqr_basecase should take over from
1749   mpn_mul_basecase.  Default is to use mpn_sqr_basecase from 0.  (Note that we
1750   certainly always want it if there's a native assembler mpn_sqr_basecase.)
1751
1752   If it turns out that mpn_toom2_sqr becomes faster than mpn_mul_basecase
1753   before mpn_sqr_basecase does, then SQR_BASECASE_THRESHOLD is the toom2
1754   threshold and SQR_TOOM2_THRESHOLD is 0.  This oddity arises more or less
1755   because SQR_TOOM2_THRESHOLD represents the size up to which mpn_sqr_basecase
1756   should be used, and that may be never.  */
1757
1758#ifndef SQR_BASECASE_THRESHOLD
1759#define SQR_BASECASE_THRESHOLD            0
1760#endif
1761
1762#ifndef SQR_TOOM2_THRESHOLD
1763#define SQR_TOOM2_THRESHOLD              50
1764#endif
1765
1766#ifndef SQR_TOOM3_THRESHOLD
1767#define SQR_TOOM3_THRESHOLD             120
1768#endif
1769
1770#ifndef SQR_TOOM4_THRESHOLD
1771#define SQR_TOOM4_THRESHOLD             400
1772#endif
1773
1774/* See comments above about MUL_TOOM33_THRESHOLD_LIMIT.  */
1775#ifndef SQR_TOOM3_THRESHOLD_LIMIT
1776#define SQR_TOOM3_THRESHOLD_LIMIT  SQR_TOOM3_THRESHOLD
1777#endif
1778
1779#ifndef DC_DIV_QR_THRESHOLD
1780#define DC_DIV_QR_THRESHOLD              50
1781#endif
1782
1783#ifndef DC_DIVAPPR_Q_THRESHOLD
1784#define DC_DIVAPPR_Q_THRESHOLD          200
1785#endif
1786
1787#ifndef DC_BDIV_QR_THRESHOLD
1788#define DC_BDIV_QR_THRESHOLD             50
1789#endif
1790
1791#ifndef DC_BDIV_Q_THRESHOLD
1792#define DC_BDIV_Q_THRESHOLD             180
1793#endif
1794
1795#ifndef DIVEXACT_JEB_THRESHOLD
1796#define DIVEXACT_JEB_THRESHOLD           25
1797#endif
1798
1799#ifndef INV_MULMOD_BNM1_THRESHOLD
1800#define INV_MULMOD_BNM1_THRESHOLD  (5*MULMOD_BNM1_THRESHOLD)
1801#endif
1802
1803#ifndef INV_APPR_THRESHOLD
1804#define INV_APPR_THRESHOLD         INV_NEWTON_THRESHOLD
1805#endif
1806
1807#ifndef INV_NEWTON_THRESHOLD
1808#define INV_NEWTON_THRESHOLD            200
1809#endif
1810
1811#ifndef BINV_NEWTON_THRESHOLD
1812#define BINV_NEWTON_THRESHOLD           300
1813#endif
1814
1815#ifndef MU_DIVAPPR_Q_THRESHOLD
1816#define MU_DIVAPPR_Q_THRESHOLD         2000
1817#endif
1818
1819#ifndef MU_DIV_QR_THRESHOLD
1820#define MU_DIV_QR_THRESHOLD            2000
1821#endif
1822
1823#ifndef MUPI_DIV_QR_THRESHOLD
1824#define MUPI_DIV_QR_THRESHOLD           200
1825#endif
1826
1827#ifndef MU_BDIV_Q_THRESHOLD
1828#define MU_BDIV_Q_THRESHOLD            2000
1829#endif
1830
1831#ifndef MU_BDIV_QR_THRESHOLD
1832#define MU_BDIV_QR_THRESHOLD           2000
1833#endif
1834
1835#ifndef MULMOD_BNM1_THRESHOLD
1836#define MULMOD_BNM1_THRESHOLD            16
1837#endif
1838
1839#ifndef SQRMOD_BNM1_THRESHOLD
1840#define SQRMOD_BNM1_THRESHOLD            16
1841#endif
1842
1843#ifndef MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD
1844#define MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD  (INV_MULMOD_BNM1_THRESHOLD/2)
1845#endif
1846
1847#if HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2
1848
1849#ifndef REDC_1_TO_REDC_2_THRESHOLD
1850#define REDC_1_TO_REDC_2_THRESHOLD       15
1851#endif
1852#ifndef REDC_2_TO_REDC_N_THRESHOLD
1853#define REDC_2_TO_REDC_N_THRESHOLD      100
1854#endif
1855
1856#else
1857
1858#ifndef REDC_1_TO_REDC_N_THRESHOLD
1859#define REDC_1_TO_REDC_N_THRESHOLD      100
1860#endif
1861
1862#endif /* HAVE_NATIVE_mpn_addmul_2 || HAVE_NATIVE_mpn_redc_2 */
1863
1864
1865/* First k to use for an FFT modF multiply.  A modF FFT is an order
1866   log(2^k)/log(2^(k-1)) algorithm, so k=3 is merely 1.5 like karatsuba,
1867   whereas k=4 is 1.33 which is faster than toom3 at 1.485.    */
1868#define FFT_FIRST_K  4
1869
1870/* Threshold at which FFT should be used to do a modF NxN -> N multiply. */
1871#ifndef MUL_FFT_MODF_THRESHOLD
1872#define MUL_FFT_MODF_THRESHOLD   (MUL_TOOM33_THRESHOLD * 3)
1873#endif
1874#ifndef SQR_FFT_MODF_THRESHOLD
1875#define SQR_FFT_MODF_THRESHOLD   (SQR_TOOM3_THRESHOLD * 3)
1876#endif
1877
1878/* Threshold at which FFT should be used to do an NxN -> 2N multiply.  This
1879   will be a size where FFT is using k=7 or k=8, since an FFT-k used for an
1880   NxN->2N multiply and not recursing into itself is an order
1881   log(2^k)/log(2^(k-2)) algorithm, so it'll be at least k=7 at 1.39 which
1882   is the first better than toom3.  */
1883#ifndef MUL_FFT_THRESHOLD
1884#define MUL_FFT_THRESHOLD   (MUL_FFT_MODF_THRESHOLD * 10)
1885#endif
1886#ifndef SQR_FFT_THRESHOLD
1887#define SQR_FFT_THRESHOLD   (SQR_FFT_MODF_THRESHOLD * 10)
1888#endif
1889
1890/* Table of thresholds for successive modF FFT "k"s.  The first entry is
1891   where FFT_FIRST_K+1 should be used, the second FFT_FIRST_K+2,
1892   etc.  See mpn_fft_best_k(). */
1893#ifndef MUL_FFT_TABLE
1894#define MUL_FFT_TABLE                           \
1895  { MUL_TOOM33_THRESHOLD * 4,   /* k=5 */        \
1896    MUL_TOOM33_THRESHOLD * 8,   /* k=6 */        \
1897    MUL_TOOM33_THRESHOLD * 16,  /* k=7 */        \
1898    MUL_TOOM33_THRESHOLD * 32,  /* k=8 */        \
1899    MUL_TOOM33_THRESHOLD * 96,  /* k=9 */        \
1900    MUL_TOOM33_THRESHOLD * 288, /* k=10 */       \
1901    0 }
1902#endif
1903#ifndef SQR_FFT_TABLE
1904#define SQR_FFT_TABLE                           \
1905  { SQR_TOOM3_THRESHOLD * 4,   /* k=5 */        \
1906    SQR_TOOM3_THRESHOLD * 8,   /* k=6 */        \
1907    SQR_TOOM3_THRESHOLD * 16,  /* k=7 */        \
1908    SQR_TOOM3_THRESHOLD * 32,  /* k=8 */        \
1909    SQR_TOOM3_THRESHOLD * 96,  /* k=9 */        \
1910    SQR_TOOM3_THRESHOLD * 288, /* k=10 */       \
1911    0 }
1912#endif
1913
1914struct fft_table_nk
1915{
1916  unsigned int n:27;
1917  unsigned int k:5;
1918};
1919
1920#ifndef FFT_TABLE_ATTRS
1921#define FFT_TABLE_ATTRS   static const
1922#endif
1923
1924#define MPN_FFT_TABLE_SIZE  16
1925
1926
1927#ifndef DC_DIV_QR_THRESHOLD
1928#define DC_DIV_QR_THRESHOLD    (3 * MUL_TOOM22_THRESHOLD)
1929#endif
1930
1931#ifndef GET_STR_DC_THRESHOLD
1932#define GET_STR_DC_THRESHOLD             18
1933#endif
1934
1935#ifndef GET_STR_PRECOMPUTE_THRESHOLD
1936#define GET_STR_PRECOMPUTE_THRESHOLD     35
1937#endif
1938
1939#ifndef SET_STR_DC_THRESHOLD
1940#define SET_STR_DC_THRESHOLD            750
1941#endif
1942
1943#ifndef SET_STR_PRECOMPUTE_THRESHOLD
1944#define SET_STR_PRECOMPUTE_THRESHOLD   2000
1945#endif
1946
1947/* Return non-zero if xp,xsize and yp,ysize overlap.
1948   If xp+xsize<=yp there's no overlap, or if yp+ysize<=xp there's no
1949   overlap.  If both these are false, there's an overlap. */
1950#define MPN_OVERLAP_P(xp, xsize, yp, ysize) \
1951  ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
1952#define MEM_OVERLAP_P(xp, xsize, yp, ysize)     \
1953  (   (char *) (xp) + (xsize) > (char *) (yp)   \
1954   && (char *) (yp) + (ysize) > (char *) (xp))
1955
1956/* Return non-zero if xp,xsize and yp,ysize are either identical or not
1957   overlapping.  Return zero if they're partially overlapping. */
1958#define MPN_SAME_OR_SEPARATE_P(xp, yp, size)    \
1959  MPN_SAME_OR_SEPARATE2_P(xp, size, yp, size)
1960#define MPN_SAME_OR_SEPARATE2_P(xp, xsize, yp, ysize)           \
1961  ((xp) == (yp) || ! MPN_OVERLAP_P (xp, xsize, yp, ysize))
1962
1963/* Return non-zero if dst,dsize and src,ssize are either identical or
1964   overlapping in a way suitable for an incrementing/decrementing algorithm.
1965   Return zero if they're partially overlapping in an unsuitable fashion. */
1966#define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize)             \
1967  ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
1968#define MPN_SAME_OR_INCR_P(dst, src, size)      \
1969  MPN_SAME_OR_INCR2_P(dst, size, src, size)
1970#define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize)             \
1971  ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
1972#define MPN_SAME_OR_DECR_P(dst, src, size)      \
1973  MPN_SAME_OR_DECR2_P(dst, size, src, size)
1974
1975
1976/* ASSERT() is a private assertion checking scheme, similar to <assert.h>.
1977   ASSERT() does the check only if WANT_ASSERT is selected, ASSERT_ALWAYS()
1978   does it always.  Generally assertions are meant for development, but
1979   might help when looking for a problem later too.
1980
1981   Note that strings shouldn't be used within the ASSERT expression,
1982   eg. ASSERT(strcmp(s,"notgood")!=0), since the quotes upset the "expr"
1983   used in the !HAVE_STRINGIZE case (ie. K&R).  */
1984
1985#ifdef __LINE__
1986#define ASSERT_LINE  __LINE__
1987#else
1988#define ASSERT_LINE  -1
1989#endif
1990
1991#ifdef __FILE__
1992#define ASSERT_FILE  __FILE__
1993#else
1994#define ASSERT_FILE  ""
1995#endif
1996
1997__GMP_DECLSPEC void __gmp_assert_header __GMP_PROTO ((const char *, int));
1998__GMP_DECLSPEC void __gmp_assert_fail __GMP_PROTO ((const char *, int, const char *)) ATTRIBUTE_NORETURN;
1999
2000#if HAVE_STRINGIZE
2001#define ASSERT_FAIL(expr)  __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, #expr)
2002#else
2003#define ASSERT_FAIL(expr)  __gmp_assert_fail (ASSERT_FILE, ASSERT_LINE, "expr")
2004#endif
2005
2006#define ASSERT_ALWAYS(expr)     \
2007  do {                          \
2008    if (!(expr))                \
2009      ASSERT_FAIL (expr);       \
2010  } while (0)
2011
2012#if WANT_ASSERT
2013#define ASSERT(expr)   ASSERT_ALWAYS (expr)
2014#else
2015#define ASSERT(expr)   do {} while (0)
2016#endif
2017
2018
2019/* ASSERT_CARRY checks the expression is non-zero, and ASSERT_NOCARRY checks
2020   that it's zero.  In both cases if assertion checking is disabled the
2021   expression is still evaluated.  These macros are meant for use with
2022   routines like mpn_add_n() where the return value represents a carry or
2023   whatever that should or shouldn't occur in some context.  For example,
2024   ASSERT_NOCARRY (mpn_add_n (rp, s1p, s2p, size)); */
2025#if WANT_ASSERT
2026#define ASSERT_CARRY(expr)     ASSERT_ALWAYS ((expr) != 0)
2027#define ASSERT_NOCARRY(expr)   ASSERT_ALWAYS ((expr) == 0)
2028#else
2029#define ASSERT_CARRY(expr)     (expr)
2030#define ASSERT_NOCARRY(expr)   (expr)
2031#endif
2032
2033
2034/* ASSERT_CODE includes code when assertion checking is wanted.  This is the
2035   same as writing "#if WANT_ASSERT", but more compact.  */
2036#if WANT_ASSERT
2037#define ASSERT_CODE(expr)  expr
2038#else
2039#define ASSERT_CODE(expr)
2040#endif
2041
2042
2043/* Test that an mpq_t is in fully canonical form.  This can be used as
2044   protection on routines like mpq_equal which give wrong results on
2045   non-canonical inputs.  */
2046#if WANT_ASSERT
2047#define ASSERT_MPQ_CANONICAL(q)                         \
2048  do {                                                  \
2049    ASSERT (q->_mp_den._mp_size > 0);                   \
2050    if (q->_mp_num._mp_size == 0)                       \
2051      {                                                 \
2052        /* zero should be 0/1 */                        \
2053        ASSERT (mpz_cmp_ui (mpq_denref(q), 1L) == 0);   \
2054      }                                                 \
2055    else                                                \
2056      {                                                 \
2057        /* no common factors */                         \
2058        mpz_t  __g;                                     \
2059        mpz_init (__g);                                 \
2060        mpz_gcd (__g, mpq_numref(q), mpq_denref(q));    \
2061        ASSERT (mpz_cmp_ui (__g, 1) == 0);              \
2062        mpz_clear (__g);                                \
2063      }                                                 \
2064  } while (0)
2065#else
2066#define ASSERT_MPQ_CANONICAL(q)  do {} while (0)
2067#endif
2068
2069/* Check that the nail parts are zero. */
2070#define ASSERT_ALWAYS_LIMB(limb)                \
2071  do {                                          \
2072    mp_limb_t  __nail = (limb) & GMP_NAIL_MASK; \
2073    ASSERT_ALWAYS (__nail == 0);                \
2074  } while (0)
2075#define ASSERT_ALWAYS_MPN(ptr, size)            \
2076  do {                                          \
2077    /* let whole loop go dead when no nails */  \
2078    if (GMP_NAIL_BITS != 0)                     \
2079      {                                         \
2080        mp_size_t  __i;                         \
2081        for (__i = 0; __i < (size); __i++)      \
2082          ASSERT_ALWAYS_LIMB ((ptr)[__i]);      \
2083      }                                         \
2084  } while (0)
2085#if WANT_ASSERT
2086#define ASSERT_LIMB(limb)       ASSERT_ALWAYS_LIMB (limb)
2087#define ASSERT_MPN(ptr, size)   ASSERT_ALWAYS_MPN (ptr, size)
2088#else
2089#define ASSERT_LIMB(limb)       do {} while (0)
2090#define ASSERT_MPN(ptr, size)   do {} while (0)
2091#endif
2092
2093
2094/* Assert that an mpn region {ptr,size} is zero, or non-zero.
2095   size==0 is allowed, and in that case {ptr,size} considered to be zero.  */
2096#if WANT_ASSERT
2097#define ASSERT_MPN_ZERO_P(ptr,size)     \
2098  do {                                  \
2099    mp_size_t  __i;                     \
2100    ASSERT ((size) >= 0);               \
2101    for (__i = 0; __i < (size); __i++)  \
2102      ASSERT ((ptr)[__i] == 0);         \
2103  } while (0)
2104#define ASSERT_MPN_NONZERO_P(ptr,size)  \
2105  do {                                  \
2106    mp_size_t  __i;                     \
2107    int        __nonzero = 0;           \
2108    ASSERT ((size) >= 0);               \
2109    for (__i = 0; __i < (size); __i++)  \
2110      if ((ptr)[__i] != 0)              \
2111        {                               \
2112          __nonzero = 1;                \
2113          break;                        \
2114        }                               \
2115    ASSERT (__nonzero);                 \
2116  } while (0)
2117#else
2118#define ASSERT_MPN_ZERO_P(ptr,size)     do {} while (0)
2119#define ASSERT_MPN_NONZERO_P(ptr,size)  do {} while (0)
2120#endif
2121
2122
2123#if ! HAVE_NATIVE_mpn_com
2124#undef mpn_com
2125#define mpn_com(d,s,n)                                  \
2126  do {                                                  \
2127    mp_ptr     __d = (d);                               \
2128    mp_srcptr  __s = (s);                               \
2129    mp_size_t  __n = (n);                               \
2130    ASSERT (__n >= 1);                                  \
2131    ASSERT (MPN_SAME_OR_SEPARATE_P (__d, __s, __n));    \
2132    do                                                  \
2133      *__d++ = (~ *__s++) & GMP_NUMB_MASK;              \
2134    while (--__n);                                      \
2135  } while (0)
2136#endif
2137
2138#define MPN_LOGOPS_N_INLINE(rp, up, vp, n, operation)			\
2139  do {									\
2140    mp_srcptr	__up = (up);						\
2141    mp_srcptr	__vp = (vp);						\
2142    mp_ptr	__rp = (rp);						\
2143    mp_size_t	__n = (n);						\
2144    mp_limb_t __a, __b;							\
2145    ASSERT (__n > 0);							\
2146    ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __up, __n));			\
2147    ASSERT (MPN_SAME_OR_SEPARATE_P (__rp, __vp, __n));			\
2148    __up += __n;							\
2149    __vp += __n;							\
2150    __rp += __n;							\
2151    __n = -__n;								\
2152    do {								\
2153      __a = __up[__n];							\
2154      __b = __vp[__n];							\
2155      __rp[__n] = operation;						\
2156    } while (++__n);							\
2157  } while (0)
2158
2159
2160#if ! HAVE_NATIVE_mpn_and_n
2161#undef mpn_and_n
2162#define mpn_and_n(rp, up, vp, n) \
2163  MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & __b)
2164#endif
2165
2166#if ! HAVE_NATIVE_mpn_andn_n
2167#undef mpn_andn_n
2168#define mpn_andn_n(rp, up, vp, n) \
2169  MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a & ~__b)
2170#endif
2171
2172#if ! HAVE_NATIVE_mpn_nand_n
2173#undef mpn_nand_n
2174#define mpn_nand_n(rp, up, vp, n) \
2175  MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a & __b) & GMP_NUMB_MASK)
2176#endif
2177
2178#if ! HAVE_NATIVE_mpn_ior_n
2179#undef mpn_ior_n
2180#define mpn_ior_n(rp, up, vp, n) \
2181  MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a | __b)
2182#endif
2183
2184#if ! HAVE_NATIVE_mpn_iorn_n
2185#undef mpn_iorn_n
2186#define mpn_iorn_n(rp, up, vp, n) \
2187  MPN_LOGOPS_N_INLINE (rp, up, vp, n, (__a | ~__b) & GMP_NUMB_MASK)
2188#endif
2189
2190#if ! HAVE_NATIVE_mpn_nior_n
2191#undef mpn_nior_n
2192#define mpn_nior_n(rp, up, vp, n) \
2193  MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a | __b) & GMP_NUMB_MASK)
2194#endif
2195
2196#if ! HAVE_NATIVE_mpn_xor_n
2197#undef mpn_xor_n
2198#define mpn_xor_n(rp, up, vp, n) \
2199  MPN_LOGOPS_N_INLINE (rp, up, vp, n, __a ^ __b)
2200#endif
2201
2202#if ! HAVE_NATIVE_mpn_xnor_n
2203#undef mpn_xnor_n
2204#define mpn_xnor_n(rp, up, vp, n) \
2205  MPN_LOGOPS_N_INLINE (rp, up, vp, n, ~(__a ^ __b) & GMP_NUMB_MASK)
2206#endif
2207
2208#define mpn_trialdiv __MPN(trialdiv)
2209__GMP_DECLSPEC mp_limb_t mpn_trialdiv __GMP_PROTO ((mp_srcptr, mp_size_t, mp_size_t, int *));
2210
2211#define mpn_remove __MPN(remove)
2212__GMP_DECLSPEC mp_bitcnt_t mpn_remove __GMP_PROTO ((mp_ptr, mp_size_t *, mp_ptr, mp_size_t, mp_ptr, mp_size_t, mp_bitcnt_t));
2213
2214
2215/* ADDC_LIMB sets w=x+y and cout to 0 or 1 for a carry from that addition. */
2216#if GMP_NAIL_BITS == 0
2217#define ADDC_LIMB(cout, w, x, y)        \
2218  do {                                  \
2219    mp_limb_t  __x = (x);               \
2220    mp_limb_t  __y = (y);               \
2221    mp_limb_t  __w = __x + __y;         \
2222    (w) = __w;                          \
2223    (cout) = __w < __x;                 \
2224  } while (0)
2225#else
2226#define ADDC_LIMB(cout, w, x, y)        \
2227  do {                                  \
2228    mp_limb_t  __w;                     \
2229    ASSERT_LIMB (x);                    \
2230    ASSERT_LIMB (y);                    \
2231    __w = (x) + (y);                    \
2232    (w) = __w & GMP_NUMB_MASK;          \
2233    (cout) = __w >> GMP_NUMB_BITS;      \
2234  } while (0)
2235#endif
2236
2237/* SUBC_LIMB sets w=x-y and cout to 0 or 1 for a borrow from that
2238   subtract.  */
2239#if GMP_NAIL_BITS == 0
2240#define SUBC_LIMB(cout, w, x, y)        \
2241  do {                                  \
2242    mp_limb_t  __x = (x);               \
2243    mp_limb_t  __y = (y);               \
2244    mp_limb_t  __w = __x - __y;         \
2245    (w) = __w;                          \
2246    (cout) = __w > __x;                 \
2247  } while (0)
2248#else
2249#define SUBC_LIMB(cout, w, x, y)        \
2250  do {                                  \
2251    mp_limb_t  __w = (x) - (y);         \
2252    (w) = __w & GMP_NUMB_MASK;          \
2253    (cout) = __w >> (GMP_LIMB_BITS-1);  \
2254  } while (0)
2255#endif
2256
2257
2258/* MPN_INCR_U does {ptr,size} += n, MPN_DECR_U does {ptr,size} -= n, both
2259   expecting no carry (or borrow) from that.
2260
2261   The size parameter is only for the benefit of assertion checking.  In a
2262   normal build it's unused and the carry/borrow is just propagated as far
2263   as it needs to go.
2264
2265   On random data, usually only one or two limbs of {ptr,size} get updated,
2266   so there's no need for any sophisticated looping, just something compact
2267   and sensible.
2268
2269   FIXME: Switch all code from mpn_{incr,decr}_u to MPN_{INCR,DECR}_U,
2270   declaring their operand sizes, then remove the former.  This is purely
2271   for the benefit of assertion checking.  */
2272
2273#if defined (__GNUC__) && HAVE_HOST_CPU_FAMILY_x86 && GMP_NAIL_BITS == 0      \
2274  && GMP_LIMB_BITS == 32 && ! defined (NO_ASM) && ! WANT_ASSERT
2275/* Better flags handling than the generic C gives on i386, saving a few
2276   bytes of code and maybe a cycle or two.  */
2277
2278#define MPN_IORD_U(ptr, incr, aors)					\
2279  do {									\
2280    mp_ptr  __ptr_dummy;						\
2281    if (__builtin_constant_p (incr) && (incr) == 1)			\
2282      {									\
2283        __asm__ __volatile__						\
2284          ("\n" ASM_L(top) ":\n"					\
2285           "\t" aors " $1, (%0)\n"					\
2286           "\tleal 4(%0),%0\n"						\
2287           "\tjc " ASM_L(top)						\
2288           : "=r" (__ptr_dummy)						\
2289           : "0"  (ptr)							\
2290           : "memory");							\
2291      }									\
2292    else								\
2293      {									\
2294        __asm__ __volatile__						\
2295          (   aors  " %2,(%0)\n"					\
2296           "\tjnc " ASM_L(done) "\n"					\
2297           ASM_L(top) ":\n"						\
2298           "\t" aors " $1,4(%0)\n"					\
2299           "\tleal 4(%0),%0\n"						\
2300           "\tjc " ASM_L(top) "\n"					\
2301           ASM_L(done) ":\n"						\
2302           : "=r" (__ptr_dummy)						\
2303           : "0"  (ptr),						\
2304             "ri" (incr)						\
2305           : "memory");							\
2306      }									\
2307  } while (0)
2308
2309#define MPN_INCR_U(ptr, size, incr)  MPN_IORD_U (ptr, incr, "addl")
2310#define MPN_DECR_U(ptr, size, incr)  MPN_IORD_U (ptr, incr, "subl")
2311#define mpn_incr_u(ptr, incr)  MPN_INCR_U (ptr, 0, incr)
2312#define mpn_decr_u(ptr, incr)  MPN_DECR_U (ptr, 0, incr)
2313#endif
2314
2315#if GMP_NAIL_BITS == 0
2316#ifndef mpn_incr_u
2317#define mpn_incr_u(p,incr)                              \
2318  do {                                                  \
2319    mp_limb_t __x;                                      \
2320    mp_ptr __p = (p);                                   \
2321    if (__builtin_constant_p (incr) && (incr) == 1)     \
2322      {                                                 \
2323        while (++(*(__p++)) == 0)                       \
2324          ;                                             \
2325      }                                                 \
2326    else                                                \
2327      {                                                 \
2328        __x = *__p + (incr);                            \
2329        *__p = __x;                                     \
2330        if (__x < (incr))                               \
2331          while (++(*(++__p)) == 0)                     \
2332            ;                                           \
2333      }                                                 \
2334  } while (0)
2335#endif
2336#ifndef mpn_decr_u
2337#define mpn_decr_u(p,incr)                              \
2338  do {                                                  \
2339    mp_limb_t __x;                                      \
2340    mp_ptr __p = (p);                                   \
2341    if (__builtin_constant_p (incr) && (incr) == 1)     \
2342      {                                                 \
2343        while ((*(__p++))-- == 0)                       \
2344          ;                                             \
2345      }                                                 \
2346    else                                                \
2347      {                                                 \
2348        __x = *__p;                                     \
2349        *__p = __x - (incr);                            \
2350        if (__x < (incr))                               \
2351          while ((*(++__p))-- == 0)                     \
2352            ;                                           \
2353      }                                                 \
2354  } while (0)
2355#endif
2356#endif
2357
2358#if GMP_NAIL_BITS >= 1
2359#ifndef mpn_incr_u
2360#define mpn_incr_u(p,incr)                              \
2361  do {							\
2362    mp_limb_t __x;					\
2363    mp_ptr __p = (p);					\
2364    if (__builtin_constant_p (incr) && (incr) == 1)	\
2365      {							\
2366	do						\
2367	  {						\
2368	    __x = (*__p + 1) & GMP_NUMB_MASK;		\
2369	    *__p++ = __x;				\
2370	  }						\
2371	while (__x == 0);				\
2372      }							\
2373    else						\
2374      {							\
2375	__x = (*__p + (incr));				\
2376	*__p++ = __x & GMP_NUMB_MASK;			\
2377	if (__x >> GMP_NUMB_BITS != 0)			\
2378	  {						\
2379	    do						\
2380	      {						\
2381		__x = (*__p + 1) & GMP_NUMB_MASK;	\
2382		*__p++ = __x;				\
2383	      }						\
2384	    while (__x == 0);				\
2385	  }						\
2386      }							\
2387  } while (0)
2388#endif
2389#ifndef mpn_decr_u
2390#define mpn_decr_u(p,incr)				\
2391  do {							\
2392    mp_limb_t __x;					\
2393    mp_ptr __p = (p);					\
2394    if (__builtin_constant_p (incr) && (incr) == 1)	\
2395      {							\
2396	do						\
2397	  {						\
2398	    __x = *__p;					\
2399	    *__p++ = (__x - 1) & GMP_NUMB_MASK;		\
2400	  }						\
2401	while (__x == 0);				\
2402      }							\
2403    else						\
2404      {							\
2405	__x = *__p - (incr);				\
2406	*__p++ = __x & GMP_NUMB_MASK;			\
2407	if (__x >> GMP_NUMB_BITS != 0)			\
2408	  {						\
2409	    do						\
2410	      {						\
2411		__x = *__p;				\
2412		*__p++ = (__x - 1) & GMP_NUMB_MASK;	\
2413	      }						\
2414	    while (__x == 0);				\
2415	  }						\
2416      }							\
2417  } while (0)
2418#endif
2419#endif
2420
2421#ifndef MPN_INCR_U
2422#if WANT_ASSERT
2423#define MPN_INCR_U(ptr, size, n)                        \
2424  do {                                                  \
2425    ASSERT ((size) >= 1);                               \
2426    ASSERT_NOCARRY (mpn_add_1 (ptr, ptr, size, n));     \
2427  } while (0)
2428#else
2429#define MPN_INCR_U(ptr, size, n)   mpn_incr_u (ptr, n)
2430#endif
2431#endif
2432
2433#ifndef MPN_DECR_U
2434#if WANT_ASSERT
2435#define MPN_DECR_U(ptr, size, n)                        \
2436  do {                                                  \
2437    ASSERT ((size) >= 1);                               \
2438    ASSERT_NOCARRY (mpn_sub_1 (ptr, ptr, size, n));     \
2439  } while (0)
2440#else
2441#define MPN_DECR_U(ptr, size, n)   mpn_decr_u (ptr, n)
2442#endif
2443#endif
2444
2445
2446/* Structure for conversion between internal binary format and
2447   strings in base 2..36.  */
2448struct bases
2449{
2450  /* Number of digits in the conversion base that always fits in an mp_limb_t.
2451     For example, for base 10 on a machine where a mp_limb_t has 32 bits this
2452     is 9, since 10**9 is the largest number that fits into a mp_limb_t.  */
2453  int chars_per_limb;
2454
2455  /* log(2)/log(conversion_base) */
2456  double chars_per_bit_exactly;
2457
2458  /* base**chars_per_limb, i.e. the biggest number that fits a word, built by
2459     factors of base.  Exception: For 2, 4, 8, etc, big_base is log2(base),
2460     i.e. the number of bits used to represent each digit in the base.  */
2461  mp_limb_t big_base;
2462
2463  /* A GMP_LIMB_BITS bit approximation to 1/big_base, represented as a
2464     fixed-point number.  Instead of dividing by big_base an application can
2465     choose to multiply by big_base_inverted.  */
2466  mp_limb_t big_base_inverted;
2467};
2468
2469#define   mp_bases __MPN(bases)
2470__GMP_DECLSPEC extern const struct bases mp_bases[257];
2471
2472
2473/* For power of 2 bases this is exact.  For other bases the result is either
2474   exact or one too big.
2475
2476   To be exact always it'd be necessary to examine all the limbs of the
2477   operand, since numbers like 100..000 and 99...999 generally differ only
2478   in the lowest limb.  It'd be possible to examine just a couple of high
2479   limbs to increase the probability of being exact, but that doesn't seem
2480   worth bothering with.  */
2481
2482#define MPN_SIZEINBASE(result, ptr, size, base)                         \
2483  do {                                                                  \
2484    int       __lb_base, __cnt;                                         \
2485    size_t __totbits;                                                   \
2486                                                                        \
2487    ASSERT ((size) >= 0);                                               \
2488    ASSERT ((base) >= 2);                                               \
2489    ASSERT ((base) < numberof (mp_bases));                              \
2490                                                                        \
2491    /* Special case for X == 0.  */                                     \
2492    if ((size) == 0)                                                    \
2493      (result) = 1;                                                     \
2494    else                                                                \
2495      {                                                                 \
2496        /* Calculate the total number of significant bits of X.  */     \
2497        count_leading_zeros (__cnt, (ptr)[(size)-1]);                   \
2498        __totbits = (size_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS);\
2499                                                                        \
2500        if (POW2_P (base))                                              \
2501          {                                                             \
2502            __lb_base = mp_bases[base].big_base;                        \
2503            (result) = (__totbits + __lb_base - 1) / __lb_base;         \
2504          }                                                             \
2505        else                                                            \
2506          (result) = (size_t)                                           \
2507            (__totbits * mp_bases[base].chars_per_bit_exactly) + 1;     \
2508      }                                                                 \
2509  } while (0)
2510
2511/* eliminate mp_bases lookups for base==16 */
2512#define MPN_SIZEINBASE_16(result, ptr, size)                            \
2513  do {                                                                  \
2514    int       __cnt;                                                    \
2515    mp_size_t __totbits;                                                \
2516                                                                        \
2517    ASSERT ((size) >= 0);                                               \
2518                                                                        \
2519    /* Special case for X == 0.  */                                     \
2520    if ((size) == 0)                                                    \
2521      (result) = 1;                                                     \
2522    else                                                                \
2523      {                                                                 \
2524        /* Calculate the total number of significant bits of X.  */     \
2525        count_leading_zeros (__cnt, (ptr)[(size)-1]);                   \
2526        __totbits = (size_t) (size) * GMP_NUMB_BITS - (__cnt - GMP_NAIL_BITS);\
2527        (result) = (__totbits + 4 - 1) / 4;                             \
2528      }                                                                 \
2529  } while (0)
2530
2531/* bit count to limb count, rounding up */
2532#define BITS_TO_LIMBS(n)  (((n) + (GMP_NUMB_BITS - 1)) / GMP_NUMB_BITS)
2533
2534/* MPN_SET_UI sets an mpn (ptr, cnt) to given ui.  MPZ_FAKE_UI creates fake
2535   mpz_t from ui.  The zp argument must have room for LIMBS_PER_ULONG limbs
2536   in both cases (LIMBS_PER_ULONG is also defined here.) */
2537#if BITS_PER_ULONG <= GMP_NUMB_BITS /* need one limb per ulong */
2538
2539#define LIMBS_PER_ULONG 1
2540#define MPN_SET_UI(zp, zn, u)   \
2541  (zp)[0] = (u);                \
2542  (zn) = ((zp)[0] != 0);
2543#define MPZ_FAKE_UI(z, zp, u)   \
2544  (zp)[0] = (u);                \
2545  PTR (z) = (zp);               \
2546  SIZ (z) = ((zp)[0] != 0);     \
2547  ASSERT_CODE (ALLOC (z) = 1);
2548
2549#else /* need two limbs per ulong */
2550
2551#define LIMBS_PER_ULONG 2
2552#define MPN_SET_UI(zp, zn, u)                          \
2553  (zp)[0] = (u) & GMP_NUMB_MASK;                       \
2554  (zp)[1] = (u) >> GMP_NUMB_BITS;                      \
2555  (zn) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0);
2556#define MPZ_FAKE_UI(z, zp, u)                          \
2557  (zp)[0] = (u) & GMP_NUMB_MASK;                       \
2558  (zp)[1] = (u) >> GMP_NUMB_BITS;                      \
2559  SIZ (z) = ((zp)[1] != 0 ? 2 : (zp)[0] != 0 ? 1 : 0); \
2560  PTR (z) = (zp);                                      \
2561  ASSERT_CODE (ALLOC (z) = 2);
2562
2563#endif
2564
2565
2566#if HAVE_HOST_CPU_FAMILY_x86
2567#define TARGET_REGISTER_STARVED 1
2568#else
2569#define TARGET_REGISTER_STARVED 0
2570#endif
2571
2572
2573/* LIMB_HIGHBIT_TO_MASK(n) examines the high bit of a limb value and turns 1
2574   or 0 there into a limb 0xFF..FF or 0 respectively.
2575
2576   On most CPUs this is just an arithmetic right shift by GMP_LIMB_BITS-1,
2577   but C99 doesn't guarantee signed right shifts are arithmetic, so we have
2578   a little compile-time test and a fallback to a "? :" form.  The latter is
2579   necessary for instance on Cray vector systems.
2580
2581   Recent versions of gcc (eg. 3.3) will in fact optimize a "? :" like this
2582   to an arithmetic right shift anyway, but it's good to get the desired
2583   shift on past versions too (in particular since an important use of
2584   LIMB_HIGHBIT_TO_MASK is in udiv_qrnnd_preinv).  */
2585
2586#define LIMB_HIGHBIT_TO_MASK(n)                                 \
2587  (((mp_limb_signed_t) -1 >> 1) < 0                             \
2588   ? (mp_limb_signed_t) (n) >> (GMP_LIMB_BITS - 1)              \
2589   : (n) & GMP_LIMB_HIGHBIT ? MP_LIMB_T_MAX : CNST_LIMB(0))
2590
2591
2592/* Use a library function for invert_limb, if available. */
2593#define   mpn_invert_limb __MPN(invert_limb)
2594__GMP_DECLSPEC mp_limb_t mpn_invert_limb __GMP_PROTO ((mp_limb_t)) ATTRIBUTE_CONST;
2595#if ! defined (invert_limb) && HAVE_NATIVE_mpn_invert_limb
2596#define invert_limb(invxl,xl)           \
2597  do {                                  \
2598    (invxl) = mpn_invert_limb (xl);     \
2599  } while (0)
2600#endif
2601
2602#ifndef invert_limb
2603#define invert_limb(invxl,xl)                   \
2604  do {                                          \
2605    mp_limb_t dummy;                            \
2606    ASSERT ((xl) != 0);                         \
2607    udiv_qrnnd (invxl, dummy, ~(xl), ~CNST_LIMB(0), xl);  \
2608  } while (0)
2609#endif
2610
2611#define invert_pi1(dinv, d1, d0)				\
2612  do {								\
2613    mp_limb_t v, p, t1, t0, mask;				\
2614    invert_limb (v, d1);					\
2615    p = d1 * v;							\
2616    p += d0;							\
2617    if (p < d0)							\
2618      {								\
2619	v--;							\
2620	mask = -(p >= d1);					\
2621	p -= d1;						\
2622	v += mask;						\
2623	p -= mask & d1;						\
2624      }								\
2625    umul_ppmm (t1, t0, d0, v);					\
2626    p += t1;							\
2627    if (p < t1)							\
2628      {								\
2629        v--;							\
2630	if (UNLIKELY (p >= d1))					\
2631	  {							\
2632	    if (p > d1 || t0 >= d0)				\
2633	      v--;						\
2634	  }							\
2635      }								\
2636    (dinv).inv32 = v;						\
2637  } while (0)
2638
2639
2640#ifndef udiv_qrnnd_preinv
2641#define udiv_qrnnd_preinv udiv_qrnnd_preinv3
2642#endif
2643
2644/* Divide the two-limb number in (NH,,NL) by D, with DI being the largest
2645   limb not larger than (2**(2*GMP_LIMB_BITS))/D - (2**GMP_LIMB_BITS).
2646   If this would yield overflow, DI should be the largest possible number
2647   (i.e., only ones).  For correct operation, the most significant bit of D
2648   has to be set.  Put the quotient in Q and the remainder in R.  */
2649#define udiv_qrnnd_preinv1(q, r, nh, nl, d, di)				\
2650  do {									\
2651    mp_limb_t _q, _ql, _r;						\
2652    mp_limb_t _xh, _xl;							\
2653    ASSERT ((d) != 0);							\
2654    umul_ppmm (_q, _ql, (nh), (di));					\
2655    _q += (nh);	/* Compensate, di is 2**GMP_LIMB_BITS too small */	\
2656    umul_ppmm (_xh, _xl, _q, (d));					\
2657    sub_ddmmss (_xh, _r, (nh), (nl), _xh, _xl);				\
2658    if (_xh != 0)							\
2659      {									\
2660	sub_ddmmss (_xh, _r, _xh, _r, 0, (d));				\
2661	_q += 1;							\
2662	if (_xh != 0)							\
2663	  {								\
2664	    _r -= (d);							\
2665	    _q += 1;							\
2666	  }								\
2667      }									\
2668    if (_r >= (d))							\
2669      {									\
2670	_r -= (d);							\
2671	_q += 1;							\
2672      }									\
2673    (r) = _r;								\
2674    (q) = _q;								\
2675  } while (0)
2676
2677/* Like udiv_qrnnd_preinv, but branch-free. */
2678#define udiv_qrnnd_preinv2(q, r, nh, nl, d, di)				\
2679  do {									\
2680    mp_limb_t _n2, _n10, _nmask, _nadj, _q1;				\
2681    mp_limb_t _xh, _xl;							\
2682    _n2 = (nh);								\
2683    _n10 = (nl);							\
2684    _nmask = LIMB_HIGHBIT_TO_MASK (_n10);				\
2685    _nadj = _n10 + (_nmask & (d));					\
2686    umul_ppmm (_xh, _xl, di, _n2 - _nmask);				\
2687    add_ssaaaa (_xh, _xl, _xh, _xl, _n2, _nadj);			\
2688    _q1 = ~_xh;								\
2689    umul_ppmm (_xh, _xl, _q1, d);					\
2690    add_ssaaaa (_xh, _xl, _xh, _xl, nh, nl);				\
2691    _xh -= (d);					/* xh = 0 or -1 */	\
2692    (r) = _xl + ((d) & _xh);						\
2693    (q) = _xh - _q1;							\
2694  } while (0)
2695
2696/* Like udiv_qrnnd_preinv2, but for for any value D.  DNORM is D shifted left
2697   so that its most significant bit is set.  LGUP is ceil(log2(D)).  */
2698#define udiv_qrnnd_preinv2gen(q, r, nh, nl, d, di, dnorm, lgup) \
2699  do {									\
2700    mp_limb_t _n2, _n10, _nmask, _nadj, _q1;				\
2701    mp_limb_t _xh, _xl;							\
2702    _n2 = ((nh) << (GMP_LIMB_BITS - (lgup))) + ((nl) >> 1 >> (l - 1));	\
2703    _n10 = (nl) << (GMP_LIMB_BITS - (lgup));				\
2704    _nmask = LIMB_HIGHBIT_TO_MASK (_n10);				\
2705    _nadj = _n10 + (_nmask & (dnorm));					\
2706    umul_ppmm (_xh, _xl, di, _n2 - _nmask);				\
2707    add_ssaaaa (_xh, _xl, _xh, _xl, _n2, _nadj);			\
2708    _q1 = ~_xh;								\
2709    umul_ppmm (_xh, _xl, _q1, d);					\
2710    add_ssaaaa (_xh, _xl, _xh, _xl, nh, nl);				\
2711    _xh -= (d);								\
2712    (r) = _xl + ((d) & _xh);						\
2713    (q) = _xh - _q1;							\
2714  } while (0)
2715
2716/* udiv_qrnnd_preinv3 -- Based on work by Niels M�ller and Torbj�rn Granlund.
2717
2718   We write things strangely below, to help gcc.  A more straightforward
2719   version:
2720
2721   _r = (nl) - _qh * (d);
2722   _t = _r + (d);
2723   if (_r >= _ql)
2724     {
2725       _qh--;
2726       _r = _t;
2727     }
2728
2729   For one operation shorter critical path, one may want to use this form:
2730
2731   _p = _qh * (d)
2732   _s = (nl) + (d);
2733   _r = (nl) - _p;
2734   _t = _s - _p;
2735   if (_r >= _ql)
2736     {
2737       _qh--;
2738       _r = _t;
2739     }
2740*/
2741#define udiv_qrnnd_preinv3(q, r, nh, nl, d, di)				\
2742  do {									\
2743    mp_limb_t _qh, _ql, _r;						\
2744    umul_ppmm (_qh, _ql, (nh), (di));					\
2745    if (__builtin_constant_p (nl) && (nl) == 0)				\
2746      _qh += (nh) + 1;							\
2747    else								\
2748      add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl));			\
2749    _r = (nl) - _qh * (d);						\
2750    if (_r > _ql)	/* both > and >= should be OK */		\
2751      {									\
2752	_r += (d);							\
2753	_qh--;								\
2754      }									\
2755    if (UNLIKELY (_r >= (d)))						\
2756      {									\
2757	_r -= (d);							\
2758	_qh++;								\
2759      }									\
2760    (r) = _r;								\
2761    (q) = _qh;								\
2762  } while (0)
2763
2764/* Compute r = nh*B mod d, where di is the inverse of d.  */
2765#define udiv_rnd_preinv(r, nh, d, di)					\
2766  do {									\
2767    mp_limb_t _qh, _ql, _r;						\
2768    umul_ppmm (_qh, _ql, (nh), (di));					\
2769    _qh += (nh) + 1;							\
2770    _r = - _qh * (d);							\
2771    if (_r > _ql)							\
2772      _r += (d);							\
2773    (r) = _r;								\
2774  } while (0)
2775
2776/* Compute quotient the quotient and remainder for n / d. Requires d
2777   >= B^2 / 2 and n < d B. di is the inverse
2778
2779     floor ((B^3 - 1) / (d0 + d1 B)) - B.
2780
2781   NOTE: Output variables are updated multiple times. Only some inputs
2782   and outputs may overlap.
2783*/
2784#define udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv)		\
2785  do {									\
2786    mp_limb_t _q0, _t1, _t0, _mask;					\
2787    umul_ppmm ((q), _q0, (n2), (dinv));					\
2788    add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1));			\
2789									\
2790    /* Compute the two most significant limbs of n - q'd */		\
2791    (r1) = (n1) - (d1) * (q);						\
2792    (r0) = (n0);							\
2793    sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));			\
2794    umul_ppmm (_t1, _t0, (d0), (q));					\
2795    sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0);			\
2796    (q)++;								\
2797									\
2798    /* Conditionally adjust q and the remainders */			\
2799    _mask = - (mp_limb_t) ((r1) >= _q0);				\
2800    (q) += _mask;							\
2801    add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0));	\
2802    if (UNLIKELY ((r1) >= (d1)))					\
2803      {									\
2804	if ((r1) > (d1) || (r0) >= (d0))				\
2805	  {								\
2806	    (q)++;							\
2807	    sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));		\
2808	  }								\
2809      }									\
2810  } while (0)
2811
2812#ifndef mpn_preinv_divrem_1  /* if not done with cpuvec in a fat binary */
2813#define   mpn_preinv_divrem_1 __MPN(preinv_divrem_1)
2814__GMP_DECLSPEC mp_limb_t mpn_preinv_divrem_1 __GMP_PROTO ((mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int));
2815#endif
2816
2817
2818/* USE_PREINV_DIVREM_1 is whether to use mpn_preinv_divrem_1, as opposed to the
2819   plain mpn_divrem_1.  The default is yes, since the few CISC chips where
2820   preinv is not good have defines saying so.  */
2821#ifndef USE_PREINV_DIVREM_1
2822#define USE_PREINV_DIVREM_1   1
2823#endif
2824
2825#if USE_PREINV_DIVREM_1
2826#define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift)    \
2827  mpn_preinv_divrem_1 (qp, xsize, ap, size, d, dinv, shift)
2828#else
2829#define MPN_DIVREM_OR_PREINV_DIVREM_1(qp,xsize,ap,size,d,dinv,shift)    \
2830  mpn_divrem_1 (qp, xsize, ap, size, d)
2831#endif
2832
2833#ifndef PREINV_MOD_1_TO_MOD_1_THRESHOLD
2834#define PREINV_MOD_1_TO_MOD_1_THRESHOLD 10
2835#endif
2836
2837/* This selection may seem backwards.  The reason mpn_mod_1 typically takes
2838   over for larger sizes is that it uses the mod_1_1 function.  */
2839#define MPN_MOD_OR_PREINV_MOD_1(src,size,divisor,inverse)       	\
2840  (BELOW_THRESHOLD (size, PREINV_MOD_1_TO_MOD_1_THRESHOLD)		\
2841   ? mpn_preinv_mod_1 (src, size, divisor, inverse)			\
2842   : mpn_mod_1 (src, size, divisor))
2843
2844
2845#ifndef mpn_mod_34lsub1  /* if not done with cpuvec in a fat binary */
2846#define   mpn_mod_34lsub1 __MPN(mod_34lsub1)
2847__GMP_DECLSPEC mp_limb_t mpn_mod_34lsub1 __GMP_PROTO ((mp_srcptr, mp_size_t)) __GMP_ATTRIBUTE_PURE;
2848#endif
2849
2850
2851/* DIVEXACT_1_THRESHOLD is at what size to use mpn_divexact_1, as opposed to
2852   plain mpn_divrem_1.  Likewise BMOD_1_TO_MOD_1_THRESHOLD for
2853   mpn_modexact_1_odd against plain mpn_mod_1.  On most CPUs divexact and
2854   modexact are faster at all sizes, so the defaults are 0.  Those CPUs
2855   where this is not right have a tuned threshold.  */
2856#ifndef DIVEXACT_1_THRESHOLD
2857#define DIVEXACT_1_THRESHOLD  0
2858#endif
2859#ifndef BMOD_1_TO_MOD_1_THRESHOLD
2860#define BMOD_1_TO_MOD_1_THRESHOLD  10
2861#endif
2862
2863#ifndef mpn_divexact_1  /* if not done with cpuvec in a fat binary */
2864#define mpn_divexact_1 __MPN(divexact_1)
2865__GMP_DECLSPEC void    mpn_divexact_1 __GMP_PROTO ((mp_ptr, mp_srcptr, mp_size_t, mp_limb_t));
2866#endif
2867
2868#define MPN_DIVREM_OR_DIVEXACT_1(dst, src, size, divisor)                     \
2869  do {                                                                        \
2870    if (BELOW_THRESHOLD (size, DIVEXACT_1_THRESHOLD))                         \
2871      ASSERT_NOCARRY (mpn_divrem_1 (dst, (mp_size_t) 0, src, size, divisor)); \
2872    else                                                                      \
2873      {                                                                       \
2874        ASSERT (mpn_mod_1 (src, size, divisor) == 0);                         \
2875        mpn_divexact_1 (dst, src, size, divisor);                             \
2876      }                                                                       \
2877  } while (0)
2878
2879#ifndef mpn_modexact_1c_odd  /* if not done with cpuvec in a fat binary */
2880#define   mpn_modexact_1c_odd __MPN(modexact_1c_odd)
2881__GMP_DECLSPEC mp_limb_t mpn_modexact_1c_odd __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t)) __GMP_ATTRIBUTE_PURE;
2882#endif
2883
2884#if HAVE_NATIVE_mpn_modexact_1_odd
2885#define   mpn_modexact_1_odd  __MPN(modexact_1_odd)
2886__GMP_DECLSPEC mp_limb_t mpn_modexact_1_odd __GMP_PROTO ((mp_srcptr, mp_size_t, mp_limb_t)) __GMP_ATTRIBUTE_PURE;
2887#else
2888#define mpn_modexact_1_odd(src,size,divisor) \
2889  mpn_modexact_1c_odd (src, size, divisor, CNST_LIMB(0))
2890#endif
2891
2892#define MPN_MOD_OR_MODEXACT_1_ODD(src,size,divisor)			\
2893  (BELOW_THRESHOLD (size, BMOD_1_TO_MOD_1_THRESHOLD)			\
2894   ? mpn_modexact_1_odd (src, size, divisor)				\
2895   : mpn_mod_1 (src, size, divisor))
2896
2897/* binvert_limb() sets inv to the multiplicative inverse of n modulo
2898   2^GMP_NUMB_BITS, ie. satisfying inv*n == 1 mod 2^GMP_NUMB_BITS.
2899   n must be odd (otherwise such an inverse doesn't exist).
2900
2901   This is not to be confused with invert_limb(), which is completely
2902   different.
2903
2904   The table lookup gives an inverse with the low 8 bits valid, and each
2905   multiply step doubles the number of bits.  See Jebelean "An algorithm for
2906   exact division" end of section 4 (reference in gmp.texi).
2907
2908   Possible enhancement: Could use UHWtype until the last step, if half-size
2909   multiplies are faster (might help under _LONG_LONG_LIMB).
2910
2911   Alternative: As noted in Granlund and Montgomery "Division by Invariant
2912   Integers using Multiplication" (reference in gmp.texi), n itself gives a
2913   3-bit inverse immediately, and could be used instead of a table lookup.
2914   A 4-bit inverse can be obtained effectively from xoring bits 1 and 2 into
2915   bit 3, for instance with (((n + 2) & 4) << 1) ^ n.  */
2916
2917#define binvert_limb_table  __gmp_binvert_limb_table
2918__GMP_DECLSPEC extern const unsigned char  binvert_limb_table[128];
2919
2920#define binvert_limb(inv,n)						\
2921  do {									\
2922    mp_limb_t  __n = (n);						\
2923    mp_limb_t  __inv;							\
2924    ASSERT ((__n & 1) == 1);						\
2925									\
2926    __inv = binvert_limb_table[(__n/2) & 0x7F]; /*  8 */		\
2927    if (GMP_NUMB_BITS > 8)   __inv = 2 * __inv - __inv * __inv * __n;	\
2928    if (GMP_NUMB_BITS > 16)  __inv = 2 * __inv - __inv * __inv * __n;	\
2929    if (GMP_NUMB_BITS > 32)  __inv = 2 * __inv - __inv * __inv * __n;	\
2930									\
2931    if (GMP_NUMB_BITS > 64)						\
2932      {									\
2933	int  __invbits = 64;						\
2934	do {								\
2935	  __inv = 2 * __inv - __inv * __inv * __n;			\
2936	  __invbits *= 2;						\
2937	} while (__invbits < GMP_NUMB_BITS);				\
2938      }									\
2939									\
2940    ASSERT ((__inv * __n & GMP_NUMB_MASK) == 1);			\
2941    (inv) = __inv & GMP_NUMB_MASK;					\
2942  } while (0)
2943#define modlimb_invert binvert_limb  /* backward compatibility */
2944
2945/* Multiplicative inverse of 3, modulo 2^GMP_NUMB_BITS.
2946   Eg. 0xAAAAAAAB for 32 bits, 0xAAAAAAAAAAAAAAAB for 64 bits.
2947   GMP_NUMB_MAX/3*2+1 is right when GMP_NUMB_BITS is even, but when it's odd
2948   we need to start from GMP_NUMB_MAX>>1. */
2949#define MODLIMB_INVERSE_3 (((GMP_NUMB_MAX >> (GMP_NUMB_BITS % 2)) / 3) * 2 + 1)
2950
2951/* ceil(GMP_NUMB_MAX/3) and ceil(2*GMP_NUMB_MAX/3).
2952   These expressions work because GMP_NUMB_MAX%3 != 0 for all GMP_NUMB_BITS. */
2953#define GMP_NUMB_CEIL_MAX_DIV3   (GMP_NUMB_MAX / 3 + 1)
2954#define GMP_NUMB_CEIL_2MAX_DIV3  ((GMP_NUMB_MAX>>1) / 3 + 1 + GMP_NUMB_HIGHBIT)
2955
2956
2957/* Set r to -a mod d.  a>=d is allowed.  Can give r>d.  All should be limbs.
2958
2959   It's not clear whether this is the best way to do this calculation.
2960   Anything congruent to -a would be fine for the one limb congruence
2961   tests.  */
2962
2963#define NEG_MOD(r, a, d)						\
2964  do {									\
2965    ASSERT ((d) != 0);							\
2966    ASSERT_LIMB (a);							\
2967    ASSERT_LIMB (d);							\
2968									\
2969    if ((a) <= (d))							\
2970      {									\
2971        /* small a is reasonably likely */				\
2972        (r) = (d) - (a);						\
2973      }									\
2974    else								\
2975      {									\
2976        unsigned   __twos;						\
2977        mp_limb_t  __dnorm;						\
2978        count_leading_zeros (__twos, d);				\
2979        __twos -= GMP_NAIL_BITS;					\
2980        __dnorm = (d) << __twos;					\
2981        (r) = ((a) <= __dnorm ? __dnorm : 2*__dnorm) - (a);		\
2982      }									\
2983									\
2984    ASSERT_LIMB (r);							\
2985  } while (0)
2986
2987/* A bit mask of all the least significant zero bits of n, or -1 if n==0. */
2988#define LOW_ZEROS_MASK(n)  (((n) & -(n)) - 1)
2989
2990
2991/* ULONG_PARITY sets "p" to 1 if there's an odd number of 1 bits in "n", or
2992   to 0 if there's an even number.  "n" should be an unsigned long and "p"
2993   an int.  */
2994
2995#if defined (__GNUC__) && ! defined (NO_ASM) && HAVE_HOST_CPU_alpha_CIX
2996#define ULONG_PARITY(p, n)						\
2997  do {									\
2998    int __p;								\
2999    __asm__ ("ctpop %1, %0" : "=r" (__p) : "r" (n));			\
3000    (p) = __p & 1;							\
3001  } while (0)
3002#endif
3003
3004/* Cray intrinsic _popcnt. */
3005#ifdef _CRAY
3006#define ULONG_PARITY(p, n)      \
3007  do {                          \
3008    (p) = _popcnt (n) & 1;      \
3009  } while (0)
3010#endif
3011
3012#if defined (__GNUC__) && ! defined (__INTEL_COMPILER)			\
3013    && ! defined (NO_ASM) && defined (__ia64)
3014/* unsigned long is either 32 or 64 bits depending on the ABI, zero extend
3015   to a 64 bit unsigned long long for popcnt */
3016#define ULONG_PARITY(p, n)						\
3017  do {									\
3018    unsigned long long  __n = (unsigned long) (n);			\
3019    int  __p;								\
3020    __asm__ ("popcnt %0 = %1" : "=r" (__p) : "r" (__n));		\
3021    (p) = __p & 1;							\
3022  } while (0)
3023#endif
3024
3025#if defined (__GNUC__) && ! defined (__INTEL_COMPILER)			\
3026    && ! defined (NO_ASM) && HAVE_HOST_CPU_FAMILY_x86
3027#if __GMP_GNUC_PREREQ (3,1)
3028#define __GMP_qm "=Qm"
3029#define __GMP_q "=Q"
3030#else
3031#define __GMP_qm "=qm"
3032#define __GMP_q "=q"
3033#endif
3034#define ULONG_PARITY(p, n)						\
3035  do {									\
3036    char	   __p;							\
3037    unsigned long  __n = (n);						\
3038    __n ^= (__n >> 16);							\
3039    __asm__ ("xorb %h1, %b1\n\t"					\
3040	     "setpo %0"							\
3041	 : __GMP_qm (__p), __GMP_q (__n)				\
3042	 : "1" (__n));							\
3043    (p) = __p;								\
3044  } while (0)
3045#endif
3046
3047#if ! defined (ULONG_PARITY)
3048#define ULONG_PARITY(p, n)						\
3049  do {									\
3050    unsigned long  __n = (n);						\
3051    int  __p = 0;							\
3052    do									\
3053      {									\
3054        __p ^= 0x96696996L >> (__n & 0x1F);				\
3055        __n >>= 5;							\
3056      }									\
3057    while (__n != 0);							\
3058									\
3059    (p) = __p & 1;							\
3060  } while (0)
3061#endif
3062
3063
3064/* 3 cycles on 604 or 750 since shifts and rlwimi's can pair.  gcc (as of
3065   version 3.1 at least) doesn't seem to know how to generate rlwimi for
3066   anything other than bit-fields, so use "asm".  */
3067#if defined (__GNUC__) && ! defined (NO_ASM)                    \
3068  && HAVE_HOST_CPU_FAMILY_powerpc && GMP_LIMB_BITS == 32
3069#define BSWAP_LIMB(dst, src)						\
3070  do {									\
3071    mp_limb_t  __bswapl_src = (src);					\
3072    mp_limb_t  __tmp1 = __bswapl_src >> 24;		/* low byte */	\
3073    mp_limb_t  __tmp2 = __bswapl_src << 24;		/* high byte */	\
3074    __asm__ ("rlwimi %0, %2, 24, 16, 23"		/* 2nd low */	\
3075	 : "=r" (__tmp1) : "0" (__tmp1), "r" (__bswapl_src));		\
3076    __asm__ ("rlwimi %0, %2,  8,  8, 15"		/* 3nd high */	\
3077	 : "=r" (__tmp2) : "0" (__tmp2), "r" (__bswapl_src));		\
3078    (dst) = __tmp1 | __tmp2;				/* whole */	\
3079  } while (0)
3080#endif
3081
3082/* bswap is available on i486 and up and is fast.  A combination rorw $8 /
3083   roll $16 / rorw $8 is used in glibc for plain i386 (and in the linux
3084   kernel with xchgb instead of rorw), but this is not done here, because
3085   i386 means generic x86 and mixing word and dword operations will cause
3086   partial register stalls on P6 chips.  */
3087#if defined (__GNUC__) && ! defined (NO_ASM)            \
3088  && HAVE_HOST_CPU_FAMILY_x86 && ! HAVE_HOST_CPU_i386   \
3089  && GMP_LIMB_BITS == 32
3090#define BSWAP_LIMB(dst, src)						\
3091  do {									\
3092    __asm__ ("bswap %0" : "=r" (dst) : "0" (src));			\
3093  } while (0)
3094#endif
3095
3096#if defined (__GNUC__) && ! defined (NO_ASM)            \
3097  && defined (__amd64__) && GMP_LIMB_BITS == 64
3098#define BSWAP_LIMB(dst, src)						\
3099  do {									\
3100    __asm__ ("bswap %q0" : "=r" (dst) : "0" (src));			\
3101  } while (0)
3102#endif
3103
3104#if defined (__GNUC__) && ! defined (__INTEL_COMPILER)			\
3105    && ! defined (NO_ASM) && defined (__ia64) && GMP_LIMB_BITS == 64
3106#define BSWAP_LIMB(dst, src)						\
3107  do {									\
3108    __asm__ ("mux1 %0 = %1, @rev" : "=r" (dst) :  "r" (src));		\
3109  } while (0)
3110#endif
3111
3112/* As per glibc. */
3113#if defined (__GNUC__) && ! defined (NO_ASM)                    \
3114  && HAVE_HOST_CPU_FAMILY_m68k && GMP_LIMB_BITS == 32
3115#define BSWAP_LIMB(dst, src)						\
3116  do {									\
3117    mp_limb_t  __bswapl_src = (src);					\
3118    __asm__ ("ror%.w %#8, %0\n\t"					\
3119	     "swap   %0\n\t"						\
3120	     "ror%.w %#8, %0"						\
3121	     : "=d" (dst)						\
3122	     : "0" (__bswapl_src));					\
3123  } while (0)
3124#endif
3125
3126#if ! defined (BSWAP_LIMB)
3127#if GMP_LIMB_BITS == 8
3128#define BSWAP_LIMB(dst, src)            \
3129  do { (dst) = (src); } while (0)
3130#endif
3131#if GMP_LIMB_BITS == 16
3132#define BSWAP_LIMB(dst, src)                    \
3133  do {                                          \
3134    (dst) = ((src) << 8) + ((src) >> 8);        \
3135  } while (0)
3136#endif
3137#if GMP_LIMB_BITS == 32
3138#define BSWAP_LIMB(dst, src)    \
3139  do {                          \
3140    (dst) =                     \
3141      ((src) << 24)             \
3142      + (((src) & 0xFF00) << 8) \
3143      + (((src) >> 8) & 0xFF00) \
3144      + ((src) >> 24);          \
3145  } while (0)
3146#endif
3147#if GMP_LIMB_BITS == 64
3148#define BSWAP_LIMB(dst, src)            \
3149  do {                                  \
3150    (dst) =                             \
3151      ((src) << 56)                     \
3152      + (((src) & 0xFF00) << 40)        \
3153      + (((src) & 0xFF0000) << 24)      \
3154      + (((src) & 0xFF000000) << 8)     \
3155      + (((src) >> 8) & 0xFF000000)     \
3156      + (((src) >> 24) & 0xFF0000)      \
3157      + (((src) >> 40) & 0xFF00)        \
3158      + ((src) >> 56);                  \
3159  } while (0)
3160#endif
3161#endif
3162
3163#if ! defined (BSWAP_LIMB)
3164#define BSWAP_LIMB(dst, src)                            \
3165  do {                                                  \
3166    mp_limb_t  __bswapl_src = (src);                    \
3167    mp_limb_t  __dst = 0;                               \
3168    int        __i;                                     \
3169    for (__i = 0; __i < BYTES_PER_MP_LIMB; __i++)       \
3170      {                                                 \
3171        __dst = (__dst << 8) | (__bswapl_src & 0xFF);   \
3172        __bswapl_src >>= 8;                             \
3173      }                                                 \
3174    (dst) = __dst;                                      \
3175  } while (0)
3176#endif
3177
3178
3179/* Apparently lwbrx might be slow on some PowerPC chips, so restrict it to
3180   those we know are fast.  */
3181#if defined (__GNUC__) && ! defined (NO_ASM)                            \
3182  && GMP_LIMB_BITS == 32 && HAVE_LIMB_BIG_ENDIAN                        \
3183  && (HAVE_HOST_CPU_powerpc604                                          \
3184      || HAVE_HOST_CPU_powerpc604e                                      \
3185      || HAVE_HOST_CPU_powerpc750                                       \
3186      || HAVE_HOST_CPU_powerpc7400)
3187#define BSWAP_LIMB_FETCH(limb, src)					\
3188  do {									\
3189    mp_srcptr  __blf_src = (src);					\
3190    mp_limb_t  __limb;							\
3191    __asm__ ("lwbrx %0, 0, %1"						\
3192	     : "=r" (__limb)						\
3193	     : "r" (__blf_src),						\
3194	       "m" (*__blf_src));					\
3195    (limb) = __limb;							\
3196  } while (0)
3197#endif
3198
3199#if ! defined (BSWAP_LIMB_FETCH)
3200#define BSWAP_LIMB_FETCH(limb, src)  BSWAP_LIMB (limb, *(src))
3201#endif
3202
3203
3204/* On the same basis that lwbrx might be slow, restrict stwbrx to those we
3205   know are fast.  FIXME: Is this necessary?  */
3206#if defined (__GNUC__) && ! defined (NO_ASM)                            \
3207  && GMP_LIMB_BITS == 32 && HAVE_LIMB_BIG_ENDIAN                        \
3208  && (HAVE_HOST_CPU_powerpc604                                          \
3209      || HAVE_HOST_CPU_powerpc604e                                      \
3210      || HAVE_HOST_CPU_powerpc750                                       \
3211      || HAVE_HOST_CPU_powerpc7400)
3212#define BSWAP_LIMB_STORE(dst, limb)					\
3213  do {									\
3214    mp_ptr     __dst = (dst);						\
3215    mp_limb_t  __limb = (limb);						\
3216    __asm__ ("stwbrx %1, 0, %2"						\
3217	     : "=m" (*__dst)						\
3218	     : "r" (__limb),						\
3219	       "r" (__dst));						\
3220  } while (0)
3221#endif
3222
3223#if ! defined (BSWAP_LIMB_STORE)
3224#define BSWAP_LIMB_STORE(dst, limb)  BSWAP_LIMB (*(dst), limb)
3225#endif
3226
3227
3228/* Byte swap limbs from {src,size} and store at {dst,size}. */
3229#define MPN_BSWAP(dst, src, size)                       \
3230  do {                                                  \
3231    mp_ptr     __dst = (dst);                           \
3232    mp_srcptr  __src = (src);                           \
3233    mp_size_t  __size = (size);                         \
3234    mp_size_t  __i;                                     \
3235    ASSERT ((size) >= 0);                               \
3236    ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size));   \
3237    CRAY_Pragma ("_CRI ivdep");                         \
3238    for (__i = 0; __i < __size; __i++)                  \
3239      {                                                 \
3240        BSWAP_LIMB_FETCH (*__dst, __src);               \
3241        __dst++;                                        \
3242        __src++;                                        \
3243      }                                                 \
3244  } while (0)
3245
3246/* Byte swap limbs from {dst,size} and store in reverse order at {src,size}. */
3247#define MPN_BSWAP_REVERSE(dst, src, size)               \
3248  do {                                                  \
3249    mp_ptr     __dst = (dst);                           \
3250    mp_size_t  __size = (size);                         \
3251    mp_srcptr  __src = (src) + __size - 1;              \
3252    mp_size_t  __i;                                     \
3253    ASSERT ((size) >= 0);                               \
3254    ASSERT (! MPN_OVERLAP_P (dst, size, src, size));    \
3255    CRAY_Pragma ("_CRI ivdep");                         \
3256    for (__i = 0; __i < __size; __i++)                  \
3257      {                                                 \
3258        BSWAP_LIMB_FETCH (*__dst, __src);               \
3259        __dst++;                                        \
3260        __src--;                                        \
3261      }                                                 \
3262  } while (0)
3263
3264
3265/* No processor claiming to be SPARC v9 compliant seems to
3266   implement the POPC instruction.  Disable pattern for now.  */
3267#if 0
3268#if defined __GNUC__ && defined __sparc_v9__ && GMP_LIMB_BITS == 64
3269#define popc_limb(result, input)					\
3270  do {									\
3271    DItype __res;							\
3272    __asm__ ("popc %1,%0" : "=r" (result) : "rI" (input));		\
3273  } while (0)
3274#endif
3275#endif
3276
3277#if defined (__GNUC__) && ! defined (NO_ASM) && HAVE_HOST_CPU_alpha_CIX
3278#define popc_limb(result, input)					\
3279  do {									\
3280    __asm__ ("ctpop %1, %0" : "=r" (result) : "r" (input));		\
3281  } while (0)
3282#endif
3283
3284/* Cray intrinsic. */
3285#ifdef _CRAY
3286#define popc_limb(result, input)        \
3287  do {                                  \
3288    (result) = _popcnt (input);         \
3289  } while (0)
3290#endif
3291
3292#if defined (__GNUC__) && ! defined (__INTEL_COMPILER)			\
3293    && ! defined (NO_ASM) && defined (__ia64) && GMP_LIMB_BITS == 64
3294#define popc_limb(result, input)					\
3295  do {									\
3296    __asm__ ("popcnt %0 = %1" : "=r" (result) : "r" (input));		\
3297  } while (0)
3298#endif
3299
3300/* Cool population count of an mp_limb_t.
3301   You have to figure out how this works, We won't tell you!
3302
3303   The constants could also be expressed as:
3304     0x55... = [2^N / 3]     = [(2^N-1)/3]
3305     0x33... = [2^N / 5]     = [(2^N-1)/5]
3306     0x0f... = [2^N / 17]    = [(2^N-1)/17]
3307     (N is GMP_LIMB_BITS, [] denotes truncation.) */
3308
3309#if ! defined (popc_limb) && GMP_LIMB_BITS == 8
3310#define popc_limb(result, input)					\
3311  do {									\
3312    mp_limb_t  __x = (input);						\
3313    __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3314    __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3315    __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3316    (result) = __x & 0xff;						\
3317  } while (0)
3318#endif
3319
3320#if ! defined (popc_limb) && GMP_LIMB_BITS == 16
3321#define popc_limb(result, input)					\
3322  do {									\
3323    mp_limb_t  __x = (input);						\
3324    __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3325    __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3326    __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3327    __x = ((__x >> 8) + __x);						\
3328    (result) = __x & 0xff;						\
3329  } while (0)
3330#endif
3331
3332#if ! defined (popc_limb) && GMP_LIMB_BITS == 32
3333#define popc_limb(result, input)					\
3334  do {									\
3335    mp_limb_t  __x = (input);						\
3336    __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3337    __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3338    __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3339    __x = ((__x >> 8) + __x);						\
3340    __x = ((__x >> 16) + __x);						\
3341    (result) = __x & 0xff;						\
3342  } while (0)
3343#endif
3344
3345#if ! defined (popc_limb) && GMP_LIMB_BITS == 64
3346#define popc_limb(result, input)					\
3347  do {									\
3348    mp_limb_t  __x = (input);						\
3349    __x -= (__x >> 1) & MP_LIMB_T_MAX/3;				\
3350    __x = ((__x >> 2) & MP_LIMB_T_MAX/5) + (__x & MP_LIMB_T_MAX/5);	\
3351    __x = ((__x >> 4) + __x) & MP_LIMB_T_MAX/17;			\
3352    __x = ((__x >> 8) + __x);						\
3353    __x = ((__x >> 16) + __x);						\
3354    __x = ((__x >> 32) + __x);						\
3355    (result) = __x & 0xff;						\
3356  } while (0)
3357#endif
3358
3359
3360/* Define stuff for longlong.h.  */
3361#if HAVE_ATTRIBUTE_MODE
3362typedef unsigned int UQItype	__attribute__ ((mode (QI)));
3363typedef		 int SItype	__attribute__ ((mode (SI)));
3364typedef unsigned int USItype	__attribute__ ((mode (SI)));
3365typedef		 int DItype	__attribute__ ((mode (DI)));
3366typedef unsigned int UDItype	__attribute__ ((mode (DI)));
3367#else
3368typedef unsigned char UQItype;
3369typedef		 long SItype;
3370typedef unsigned long USItype;
3371#if HAVE_LONG_LONG
3372typedef	long long int DItype;
3373typedef unsigned long long int UDItype;
3374#else /* Assume `long' gives us a wide enough type.  Needed for hppa2.0w.  */
3375typedef long int DItype;
3376typedef unsigned long int UDItype;
3377#endif
3378#endif
3379
3380typedef mp_limb_t UWtype;
3381typedef unsigned int UHWtype;
3382#define W_TYPE_SIZE GMP_LIMB_BITS
3383
3384/* Define ieee_double_extract and _GMP_IEEE_FLOATS.
3385
3386   Bit field packing is "implementation defined" according to C99, which
3387   leaves us at the compiler's mercy here.  For some systems packing is
3388   defined in the ABI (eg. x86).  In any case so far it seems universal that
3389   little endian systems pack from low to high, and big endian from high to
3390   low within the given type.
3391
3392   Within the fields we rely on the integer endianness being the same as the
3393   float endianness, this is true everywhere we know of and it'd be a fairly
3394   strange system that did anything else.  */
3395
3396#if HAVE_DOUBLE_IEEE_LITTLE_SWAPPED
3397#define _GMP_IEEE_FLOATS 1
3398union ieee_double_extract
3399{
3400  struct
3401    {
3402      gmp_uint_least32_t manh:20;
3403      gmp_uint_least32_t exp:11;
3404      gmp_uint_least32_t sig:1;
3405      gmp_uint_least32_t manl:32;
3406    } s;
3407  double d;
3408};
3409#endif
3410
3411#if HAVE_DOUBLE_IEEE_LITTLE_ENDIAN
3412#define _GMP_IEEE_FLOATS 1
3413union ieee_double_extract
3414{
3415  struct
3416    {
3417      gmp_uint_least32_t manl:32;
3418      gmp_uint_least32_t manh:20;
3419      gmp_uint_least32_t exp:11;
3420      gmp_uint_least32_t sig:1;
3421    } s;
3422  double d;
3423};
3424#endif
3425
3426#if HAVE_DOUBLE_IEEE_BIG_ENDIAN
3427#define _GMP_IEEE_FLOATS 1
3428union ieee_double_extract
3429{
3430  struct
3431    {
3432      gmp_uint_least32_t sig:1;
3433      gmp_uint_least32_t exp:11;
3434      gmp_uint_least32_t manh:20;
3435      gmp_uint_least32_t manl:32;
3436    } s;
3437  double d;
3438};
3439#endif
3440
3441
3442/* Use (4.0 * ...) instead of (2.0 * ...) to work around buggy compilers
3443   that don't convert ulong->double correctly (eg. SunOS 4 native cc).  */
3444#define MP_BASE_AS_DOUBLE (4.0 * ((mp_limb_t) 1 << (GMP_NUMB_BITS - 2)))
3445/* Maximum number of limbs it will take to store any `double'.
3446   We assume doubles have 53 mantissa bits.  */
3447#define LIMBS_PER_DOUBLE ((53 + GMP_NUMB_BITS - 2) / GMP_NUMB_BITS + 1)
3448
3449__GMP_DECLSPEC int __gmp_extract_double __GMP_PROTO ((mp_ptr, double));
3450
3451#define mpn_get_d __gmpn_get_d
3452__GMP_DECLSPEC double mpn_get_d __GMP_PROTO ((mp_srcptr, mp_size_t, mp_size_t, long)) __GMP_ATTRIBUTE_PURE;
3453
3454
3455/* DOUBLE_NAN_INF_ACTION executes code a_nan if x is a NaN, or executes
3456   a_inf if x is an infinity.  Both are considered unlikely values, for
3457   branch prediction.  */
3458
3459#if _GMP_IEEE_FLOATS
3460#define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf)  \
3461  do {                                          \
3462    union ieee_double_extract  u;               \
3463    u.d = (x);                                  \
3464    if (UNLIKELY (u.s.exp == 0x7FF))            \
3465      {                                         \
3466        if (u.s.manl == 0 && u.s.manh == 0)     \
3467          { a_inf; }                            \
3468        else                                    \
3469          { a_nan; }                            \
3470      }                                         \
3471  } while (0)
3472#endif
3473
3474#if HAVE_DOUBLE_VAX_D || HAVE_DOUBLE_VAX_G || HAVE_DOUBLE_CRAY_CFP
3475/* no nans or infs in these formats */
3476#define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf)  \
3477  do { } while (0)
3478#endif
3479
3480#ifndef DOUBLE_NAN_INF_ACTION
3481/* Unknown format, try something generic.
3482   NaN should be "unordered", so x!=x.
3483   Inf should be bigger than DBL_MAX.  */
3484#define DOUBLE_NAN_INF_ACTION(x, a_nan, a_inf)                  \
3485  do {                                                          \
3486    {                                                           \
3487      if (UNLIKELY ((x) != (x)))                                \
3488        { a_nan; }                                              \
3489      else if (UNLIKELY ((x) > DBL_MAX || (x) < -DBL_MAX))      \
3490        { a_inf; }                                              \
3491    }                                                           \
3492  } while (0)
3493#endif
3494
3495/* On m68k, x86 and amd64, gcc (and maybe other compilers) can hold doubles
3496   in the coprocessor, which means a bigger exponent range than normal, and
3497   depending on the rounding mode, a bigger mantissa than normal.  (See
3498   "Disappointments" in the gcc manual.)  FORCE_DOUBLE stores and fetches
3499   "d" through memory to force any rounding and overflows to occur.
3500
3501   On amd64, and on x86s with SSE2, gcc (depending on options) uses the xmm
3502   registers, where there's no such extra precision and no need for the
3503   FORCE_DOUBLE.  We don't bother to detect this since the present uses for
3504   FORCE_DOUBLE are only in test programs and default generic C code.
3505
3506   Not quite sure that an "automatic volatile" will use memory, but it does
3507   in gcc.  An asm("":"=m"(d):"0"(d)) can't be used to trick gcc, since
3508   apparently matching operands like "0" are only allowed on a register
3509   output.  gcc 3.4 warns about this, though in fact it and past versions
3510   seem to put the operand through memory as hoped.  */
3511
3512#if (HAVE_HOST_CPU_FAMILY_m68k || HAVE_HOST_CPU_FAMILY_x86      \
3513     || defined (__amd64__))
3514#define FORCE_DOUBLE(d) \
3515  do { volatile double __gmp_force = (d); (d) = __gmp_force; } while (0)
3516#else
3517#define FORCE_DOUBLE(d)  do { } while (0)
3518#endif
3519
3520
3521__GMP_DECLSPEC extern int __gmp_junk;
3522__GMP_DECLSPEC extern const int __gmp_0;
3523__GMP_DECLSPEC void __gmp_exception __GMP_PROTO ((int)) ATTRIBUTE_NORETURN;
3524__GMP_DECLSPEC void __gmp_divide_by_zero __GMP_PROTO ((void)) ATTRIBUTE_NORETURN;
3525__GMP_DECLSPEC void __gmp_sqrt_of_negative __GMP_PROTO ((void)) ATTRIBUTE_NORETURN;
3526__GMP_DECLSPEC void __gmp_invalid_operation __GMP_PROTO ((void)) ATTRIBUTE_NORETURN;
3527#define GMP_ERROR(code)   __gmp_exception (code)
3528#define DIVIDE_BY_ZERO    __gmp_divide_by_zero ()
3529#define SQRT_OF_NEGATIVE  __gmp_sqrt_of_negative ()
3530
3531#if defined _LONG_LONG_LIMB
3532#if __GMP_HAVE_TOKEN_PASTE
3533#define CNST_LIMB(C) ((mp_limb_t) C##LL)
3534#else
3535#define CNST_LIMB(C) ((mp_limb_t) C/**/LL)
3536#endif
3537#else /* not _LONG_LONG_LIMB */
3538#if __GMP_HAVE_TOKEN_PASTE
3539#define CNST_LIMB(C) ((mp_limb_t) C##L)
3540#else
3541#define CNST_LIMB(C) ((mp_limb_t) C/**/L)
3542#endif
3543#endif /* _LONG_LONG_LIMB */
3544
3545/* Stuff used by mpn/generic/perfsqr.c and mpz/prime_p.c */
3546#if GMP_NUMB_BITS == 2
3547#define PP 0x3					/* 3 */
3548#define PP_FIRST_OMITTED 5
3549#endif
3550#if GMP_NUMB_BITS == 4
3551#define PP 0xF					/* 3 x 5 */
3552#define PP_FIRST_OMITTED 7
3553#endif
3554#if GMP_NUMB_BITS == 8
3555#define PP 0x69					/* 3 x 5 x 7 */
3556#define PP_FIRST_OMITTED 11
3557#endif
3558#if GMP_NUMB_BITS == 16
3559#define PP 0x3AA7				/* 3 x 5 x 7 x 11 x 13 */
3560#define PP_FIRST_OMITTED 17
3561#endif
3562#if GMP_NUMB_BITS == 32
3563#define PP 0xC0CFD797L				/* 3 x 5 x 7 x 11 x ... x 29 */
3564#define PP_INVERTED 0x53E5645CL
3565#define PP_FIRST_OMITTED 31
3566#endif
3567#if GMP_NUMB_BITS == 64
3568#define PP CNST_LIMB(0xE221F97C30E94E1D)	/* 3 x 5 x 7 x 11 x ... x 53 */
3569#define PP_INVERTED CNST_LIMB(0x21CFE6CFC938B36B)
3570#define PP_FIRST_OMITTED 59
3571#endif
3572#ifndef PP_FIRST_OMITTED
3573#define PP_FIRST_OMITTED 3
3574#endif
3575
3576
3577
3578/* BIT1 means a result value in bit 1 (second least significant bit), with a
3579   zero bit representing +1 and a one bit representing -1.  Bits other than
3580   bit 1 are garbage.  These are meant to be kept in "int"s, and casts are
3581   used to ensure the expressions are "int"s even if a and/or b might be
3582   other types.
3583
3584   JACOBI_TWOS_U_BIT1 and JACOBI_RECIP_UU_BIT1 are used in mpn_jacobi_base
3585   and their speed is important.  Expressions are used rather than
3586   conditionals to accumulate sign changes, which effectively means XORs
3587   instead of conditional JUMPs. */
3588
3589/* (a/0), with a signed; is 1 if a=+/-1, 0 otherwise */
3590#define JACOBI_S0(a)   (((a) == 1) | ((a) == -1))
3591
3592/* (a/0), with a unsigned; is 1 if a=+/-1, 0 otherwise */
3593#define JACOBI_U0(a)   ((a) == 1)
3594
3595/* (a/0), with a given by low and size;
3596   is 1 if a=+/-1, 0 otherwise */
3597#define JACOBI_LS0(alow,asize) \
3598  (((asize) == 1 || (asize) == -1) && (alow) == 1)
3599
3600/* (a/0), with a an mpz_t;
3601   fetch of low limb always valid, even if size is zero */
3602#define JACOBI_Z0(a)   JACOBI_LS0 (PTR(a)[0], SIZ(a))
3603
3604/* (0/b), with b unsigned; is 1 if b=1, 0 otherwise */
3605#define JACOBI_0U(b)   ((b) == 1)
3606
3607/* (0/b), with b unsigned; is 1 if b=+/-1, 0 otherwise */
3608#define JACOBI_0S(b)   ((b) == 1 || (b) == -1)
3609
3610/* (0/b), with b given by low and size; is 1 if b=+/-1, 0 otherwise */
3611#define JACOBI_0LS(blow,bsize) \
3612  (((bsize) == 1 || (bsize) == -1) && (blow) == 1)
3613
3614/* Convert a bit1 to +1 or -1. */
3615#define JACOBI_BIT1_TO_PN(result_bit1) \
3616  (1 - ((int) (result_bit1) & 2))
3617
3618/* (2/b), with b unsigned and odd;
3619   is (-1)^((b^2-1)/8) which is 1 if b==1,7mod8 or -1 if b==3,5mod8 and
3620   hence obtained from (b>>1)^b */
3621#define JACOBI_TWO_U_BIT1(b) \
3622  ((int) (((b) >> 1) ^ (b)))
3623
3624/* (2/b)^twos, with b unsigned and odd */
3625#define JACOBI_TWOS_U_BIT1(twos, b) \
3626  ((int) ((twos) << 1) & JACOBI_TWO_U_BIT1 (b))
3627
3628/* (2/b)^twos, with b unsigned and odd */
3629#define JACOBI_TWOS_U(twos, b) \
3630  (JACOBI_BIT1_TO_PN (JACOBI_TWOS_U_BIT1 (twos, b)))
3631
3632/* (-1/b), with b odd (signed or unsigned);
3633   is (-1)^((b-1)/2) */
3634#define JACOBI_N1B_BIT1(b) \
3635  ((int) (b))
3636
3637/* (a/b) effect due to sign of a: signed/unsigned, b odd;
3638   is (-1/b) if a<0, or +1 if a>=0 */
3639#define JACOBI_ASGN_SU_BIT1(a, b) \
3640  ((((a) < 0) << 1) & JACOBI_N1B_BIT1(b))
3641
3642/* (a/b) effect due to sign of b: signed/signed;
3643   is -1 if a and b both negative, +1 otherwise */
3644#define JACOBI_BSGN_SS_BIT1(a, b) \
3645  ((((a)<0) & ((b)<0)) << 1)
3646
3647/* (a/b) effect due to sign of b: signed/mpz;
3648   is -1 if a and b both negative, +1 otherwise */
3649#define JACOBI_BSGN_SZ_BIT1(a, b) \
3650  JACOBI_BSGN_SS_BIT1 (a, SIZ(b))
3651
3652/* (a/b) effect due to sign of b: mpz/signed;
3653   is -1 if a and b both negative, +1 otherwise */
3654#define JACOBI_BSGN_ZS_BIT1(a, b) \
3655  JACOBI_BSGN_SZ_BIT1 (b, a)
3656
3657/* (a/b) reciprocity to switch to (b/a), a,b both unsigned and odd;
3658   is (-1)^((a-1)*(b-1)/4), which means +1 if either a,b==1mod4, or -1 if
3659   both a,b==3mod4, achieved in bit 1 by a&b.  No ASSERT()s about a,b odd
3660   because this is used in a couple of places with only bit 1 of a or b
3661   valid. */
3662#define JACOBI_RECIP_UU_BIT1(a, b) \
3663  ((int) ((a) & (b)))
3664
3665/* Strip low zero limbs from {b_ptr,b_size} by incrementing b_ptr and
3666   decrementing b_size.  b_low should be b_ptr[0] on entry, and will be
3667   updated for the new b_ptr.  result_bit1 is updated according to the
3668   factors of 2 stripped, as per (a/2).  */
3669#define JACOBI_STRIP_LOW_ZEROS(result_bit1, a, b_ptr, b_size, b_low)    \
3670  do {                                                                  \
3671    ASSERT ((b_size) >= 1);                                             \
3672    ASSERT ((b_low) == (b_ptr)[0]);                                     \
3673                                                                        \
3674    while (UNLIKELY ((b_low) == 0))                                     \
3675      {                                                                 \
3676        (b_size)--;                                                     \
3677        ASSERT ((b_size) >= 1);                                         \
3678        (b_ptr)++;                                                      \
3679        (b_low) = *(b_ptr);                                             \
3680                                                                        \
3681        ASSERT (((a) & 1) != 0);                                        \
3682        if ((GMP_NUMB_BITS % 2) == 1)                                   \
3683          (result_bit1) ^= JACOBI_TWO_U_BIT1(a);                        \
3684      }                                                                 \
3685  } while (0)
3686
3687/* Set a_rem to {a_ptr,a_size} reduced modulo b, either using mod_1 or
3688   modexact_1_odd, but in either case leaving a_rem<b.  b must be odd and
3689   unsigned.  modexact_1_odd effectively calculates -a mod b, and
3690   result_bit1 is adjusted for the factor of -1.
3691
3692   The way mpn_modexact_1_odd sometimes bases its remainder on a_size and
3693   sometimes on a_size-1 means if GMP_NUMB_BITS is odd we can't know what
3694   factor to introduce into result_bit1, so for that case use mpn_mod_1
3695   unconditionally.
3696
3697   FIXME: mpn_modexact_1_odd is more efficient, so some way to get it used
3698   for odd GMP_NUMB_BITS would be good.  Perhaps it could mung its result,
3699   or not skip a divide step, or something. */
3700
3701#define JACOBI_MOD_OR_MODEXACT_1_ODD(result_bit1, a_rem, a_ptr, a_size, b) \
3702  do {                                                                     \
3703    mp_srcptr  __a_ptr  = (a_ptr);                                         \
3704    mp_size_t  __a_size = (a_size);                                        \
3705    mp_limb_t  __b      = (b);                                             \
3706                                                                           \
3707    ASSERT (__a_size >= 1);                                                \
3708    ASSERT (__b & 1);                                                      \
3709                                                                           \
3710    if ((GMP_NUMB_BITS % 2) != 0                                           \
3711        || ABOVE_THRESHOLD (__a_size, BMOD_1_TO_MOD_1_THRESHOLD))          \
3712      {                                                                    \
3713        (a_rem) = mpn_mod_1 (__a_ptr, __a_size, __b);                      \
3714      }                                                                    \
3715    else                                                                   \
3716      {                                                                    \
3717        (result_bit1) ^= JACOBI_N1B_BIT1 (__b);                            \
3718        (a_rem) = mpn_modexact_1_odd (__a_ptr, __a_size, __b);             \
3719      }                                                                    \
3720  } while (0)
3721
3722/* Matrix multiplication */
3723#define   mpn_matrix22_mul __MPN(matrix22_mul)
3724__GMP_DECLSPEC void      mpn_matrix22_mul __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
3725#define   mpn_matrix22_mul_strassen __MPN(matrix22_mul_strassen)
3726__GMP_DECLSPEC void      mpn_matrix22_mul_strassen __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_srcptr, mp_srcptr, mp_srcptr, mp_size_t, mp_ptr));
3727#define   mpn_matrix22_mul_itch __MPN(matrix22_mul_itch)
3728__GMP_DECLSPEC mp_size_t mpn_matrix22_mul_itch __GMP_PROTO ((mp_size_t, mp_size_t));
3729
3730#ifndef MATRIX22_STRASSEN_THRESHOLD
3731#define MATRIX22_STRASSEN_THRESHOLD 30
3732#endif
3733
3734/* HGCD definitions */
3735
3736/* Extract one numb, shifting count bits left
3737    ________  ________
3738   |___xh___||___xl___|
3739	  |____r____|
3740   >count <
3741
3742   The count includes any nail bits, so it should work fine if count
3743   is computed using count_leading_zeros. If GMP_NAIL_BITS > 0, all of
3744   xh, xl and r include nail bits. Must have 0 < count < GMP_LIMB_BITS.
3745
3746   FIXME: Omit masking with GMP_NUMB_MASK, and let callers do that for
3747   those calls where the count high bits of xh may be non-zero.
3748*/
3749
3750#define MPN_EXTRACT_NUMB(count, xh, xl)				\
3751  ((((xh) << ((count) - GMP_NAIL_BITS)) & GMP_NUMB_MASK) |	\
3752   ((xl) >> (GMP_LIMB_BITS - (count))))
3753
3754
3755/* The matrix non-negative M = (u, u'; v,v') keeps track of the
3756   reduction (a;b) = M (alpha; beta) where alpha, beta are smaller
3757   than a, b. The determinant must always be one, so that M has an
3758   inverse (v', -u'; -v, u). Elements always fit in GMP_NUMB_BITS - 1
3759   bits. */
3760struct hgcd_matrix1
3761{
3762  mp_limb_t u[2][2];
3763};
3764
3765#define mpn_hgcd2 __MPN (hgcd2)
3766__GMP_DECLSPEC int mpn_hgcd2 __GMP_PROTO ((mp_limb_t, mp_limb_t, mp_limb_t, mp_limb_t,	struct hgcd_matrix1 *));
3767
3768#define mpn_hgcd_mul_matrix1_vector __MPN (hgcd_mul_matrix1_vector)
3769__GMP_DECLSPEC mp_size_t mpn_hgcd_mul_matrix1_vector __GMP_PROTO ((const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t));
3770
3771#define mpn_hgcd_mul_matrix1_inverse_vector __MPN (hgcd_mul_matrix1_inverse_vector)
3772__GMP_DECLSPEC mp_size_t mpn_hgcd_mul_matrix1_inverse_vector __GMP_PROTO ((const struct hgcd_matrix1 *, mp_ptr, mp_srcptr, mp_ptr, mp_size_t));
3773
3774struct hgcd_matrix
3775{
3776  mp_size_t alloc;		/* for sanity checking only */
3777  mp_size_t n;
3778  mp_ptr p[2][2];
3779};
3780
3781#define MPN_HGCD_MATRIX_INIT_ITCH(n) (4 * ((n+1)/2 + 1))
3782
3783#define mpn_hgcd_matrix_init __MPN (hgcd_matrix_init)
3784__GMP_DECLSPEC void mpn_hgcd_matrix_init __GMP_PROTO ((struct hgcd_matrix *, mp_size_t, mp_ptr));
3785
3786#define mpn_hgcd_matrix_mul __MPN (hgcd_matrix_mul)
3787__GMP_DECLSPEC void mpn_hgcd_matrix_mul __GMP_PROTO ((struct hgcd_matrix *, const struct hgcd_matrix *, mp_ptr));
3788
3789#define mpn_hgcd_matrix_adjust __MPN (hgcd_matrix_adjust)
3790__GMP_DECLSPEC mp_size_t mpn_hgcd_matrix_adjust __GMP_PROTO ((struct hgcd_matrix *, mp_size_t, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
3791
3792#define mpn_hgcd_itch __MPN (hgcd_itch)
3793__GMP_DECLSPEC mp_size_t mpn_hgcd_itch __GMP_PROTO ((mp_size_t));
3794
3795#define mpn_hgcd __MPN (hgcd)
3796__GMP_DECLSPEC mp_size_t mpn_hgcd __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr));
3797
3798#define MPN_HGCD_LEHMER_ITCH(n) (n)
3799
3800#define mpn_hgcd_lehmer __MPN (hgcd_lehmer)
3801__GMP_DECLSPEC mp_size_t mpn_hgcd_lehmer __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, struct hgcd_matrix *, mp_ptr));
3802
3803/* Needs storage for the quotient */
3804#define MPN_GCD_SUBDIV_STEP_ITCH(n) (n)
3805
3806#define mpn_gcd_subdiv_step __MPN(gcd_subdiv_step)
3807__GMP_DECLSPEC mp_size_t mpn_gcd_subdiv_step __GMP_PROTO ((mp_ptr, mp_size_t *, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
3808
3809#define MPN_GCD_LEHMER_N_ITCH(n) (n)
3810
3811#define mpn_gcd_lehmer_n __MPN(gcd_lehmer_n)
3812__GMP_DECLSPEC mp_size_t mpn_gcd_lehmer_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
3813
3814#define mpn_gcdext_subdiv_step __MPN(gcdext_subdiv_step)
3815__GMP_DECLSPEC mp_size_t mpn_gcdext_subdiv_step __GMP_PROTO ((mp_ptr, mp_size_t *, mp_ptr, mp_size_t *, mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_ptr, mp_size_t *, mp_ptr, mp_ptr));
3816
3817#define MPN_GCDEXT_LEHMER_N_ITCH(n) (4*(n) + 3)
3818
3819#define mpn_gcdext_lehmer_n __MPN(gcdext_lehmer_n)
3820__GMP_DECLSPEC mp_size_t mpn_gcdext_lehmer_n __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t *, mp_ptr, mp_ptr, mp_size_t, mp_ptr));
3821
3822/* 4*(an + 1) + 4*(bn + 1) + an */
3823#define MPN_GCDEXT_LEHMER_ITCH(an, bn) (5*(an) + 4*(bn) + 8)
3824
3825#ifndef HGCD_THRESHOLD
3826#define HGCD_THRESHOLD 400
3827#endif
3828
3829#ifndef GCD_DC_THRESHOLD
3830#define GCD_DC_THRESHOLD 1000
3831#endif
3832
3833#ifndef GCDEXT_DC_THRESHOLD
3834#define GCDEXT_DC_THRESHOLD 600
3835#endif
3836
3837/* Definitions for mpn_set_str and mpn_get_str */
3838struct powers
3839{
3840  mp_ptr p;			/* actual power value */
3841  mp_size_t n;			/* # of limbs at p */
3842  mp_size_t shift;		/* weight of lowest limb, in limb base B */
3843  size_t digits_in_base;	/* number of corresponding digits */
3844  int base;
3845};
3846typedef struct powers powers_t;
3847#define mpn_dc_set_str_powtab_alloc(n) ((n) + GMP_LIMB_BITS)
3848#define mpn_dc_set_str_itch(n) ((n) + GMP_LIMB_BITS)
3849#define mpn_dc_get_str_powtab_alloc(n) ((n) + 2 * GMP_LIMB_BITS)
3850#define mpn_dc_get_str_itch(n) ((n) + GMP_LIMB_BITS)
3851
3852#define   mpn_dc_set_str __MPN(dc_set_str)
3853__GMP_DECLSPEC mp_size_t mpn_dc_set_str __GMP_PROTO ((mp_ptr, const unsigned char *, size_t, const powers_t *, mp_ptr));
3854#define   mpn_bc_set_str __MPN(bc_set_str)
3855__GMP_DECLSPEC mp_size_t mpn_bc_set_str __GMP_PROTO ((mp_ptr, const unsigned char *, size_t, int));
3856#define   mpn_set_str_compute_powtab __MPN(set_str_compute_powtab)
3857__GMP_DECLSPEC void      mpn_set_str_compute_powtab __GMP_PROTO ((powers_t *, mp_ptr, mp_size_t, int));
3858
3859
3860/* __GMPF_BITS_TO_PREC applies a minimum 53 bits, rounds upwards to a whole
3861   limb and adds an extra limb.  __GMPF_PREC_TO_BITS drops that extra limb,
3862   hence giving back the user's size in bits rounded up.  Notice that
3863   converting prec->bits->prec gives an unchanged value.  */
3864#define __GMPF_BITS_TO_PREC(n)						\
3865  ((mp_size_t) ((__GMP_MAX (53, n) + 2 * GMP_NUMB_BITS - 1) / GMP_NUMB_BITS))
3866#define __GMPF_PREC_TO_BITS(n) \
3867  ((mp_bitcnt_t) (n) * GMP_NUMB_BITS - GMP_NUMB_BITS)
3868
3869__GMP_DECLSPEC extern mp_size_t __gmp_default_fp_limb_precision;
3870
3871
3872/* Set n to the number of significant digits an mpf of the given _mp_prec
3873   field, in the given base.  This is a rounded up value, designed to ensure
3874   there's enough digits to reproduce all the guaranteed part of the value.
3875
3876   There are prec many limbs, but the high might be only "1" so forget it
3877   and just count prec-1 limbs into chars.  +1 rounds that upwards, and a
3878   further +1 is because the limbs usually won't fall on digit boundaries.
3879
3880   FIXME: If base is a power of 2 and the bits per digit divides
3881   GMP_LIMB_BITS then the +2 is unnecessary.  This happens always for
3882   base==2, and in base==16 with the current 32 or 64 bit limb sizes. */
3883
3884#define MPF_SIGNIFICANT_DIGITS(n, base, prec)                           \
3885  do {                                                                  \
3886    ASSERT (base >= 2 && base < numberof (mp_bases));                   \
3887    (n) = 2 + (size_t) ((((size_t) (prec) - 1) * GMP_NUMB_BITS)         \
3888                        * mp_bases[(base)].chars_per_bit_exactly);      \
3889  } while (0)
3890
3891
3892/* Decimal point string, from the current C locale.  Needs <langinfo.h> for
3893   nl_langinfo and constants, preferably with _GNU_SOURCE defined to get
3894   DECIMAL_POINT from glibc, and needs <locale.h> for localeconv, each under
3895   their respective #if HAVE_FOO_H.
3896
3897   GLIBC recommends nl_langinfo because getting only one facet can be
3898   faster, apparently. */
3899
3900/* DECIMAL_POINT seems to need _GNU_SOURCE defined to get it from glibc. */
3901#if HAVE_NL_LANGINFO && defined (DECIMAL_POINT)
3902#define GMP_DECIMAL_POINT  (nl_langinfo (DECIMAL_POINT))
3903#endif
3904/* RADIXCHAR is deprecated, still in unix98 or some such. */
3905#if HAVE_NL_LANGINFO && defined (RADIXCHAR) && ! defined (GMP_DECIMAL_POINT)
3906#define GMP_DECIMAL_POINT  (nl_langinfo (RADIXCHAR))
3907#endif
3908/* localeconv is slower since it returns all locale stuff */
3909#if HAVE_LOCALECONV && ! defined (GMP_DECIMAL_POINT)
3910#define GMP_DECIMAL_POINT  (localeconv()->decimal_point)
3911#endif
3912#if ! defined (GMP_DECIMAL_POINT)
3913#define GMP_DECIMAL_POINT  (".")
3914#endif
3915
3916
3917#define DOPRNT_CONV_FIXED        1
3918#define DOPRNT_CONV_SCIENTIFIC   2
3919#define DOPRNT_CONV_GENERAL      3
3920
3921#define DOPRNT_JUSTIFY_NONE      0
3922#define DOPRNT_JUSTIFY_LEFT      1
3923#define DOPRNT_JUSTIFY_RIGHT     2
3924#define DOPRNT_JUSTIFY_INTERNAL  3
3925
3926#define DOPRNT_SHOWBASE_YES      1
3927#define DOPRNT_SHOWBASE_NO       2
3928#define DOPRNT_SHOWBASE_NONZERO  3
3929
3930struct doprnt_params_t {
3931  int         base;          /* negative for upper case */
3932  int         conv;          /* choices above */
3933  const char  *expfmt;       /* exponent format */
3934  int         exptimes4;     /* exponent multiply by 4 */
3935  char        fill;          /* character */
3936  int         justify;       /* choices above */
3937  int         prec;          /* prec field, or -1 for all digits */
3938  int         showbase;      /* choices above */
3939  int         showpoint;     /* if radix point always shown */
3940  int         showtrailing;  /* if trailing zeros wanted */
3941  char        sign;          /* '+', ' ', or '\0' */
3942  int         width;         /* width field */
3943};
3944
3945#if _GMP_H_HAVE_VA_LIST
3946
3947__GMP_DECLSPEC typedef int (*doprnt_format_t) __GMP_PROTO ((void *, const char *, va_list));
3948__GMP_DECLSPEC typedef int (*doprnt_memory_t) __GMP_PROTO ((void *, const char *, size_t));
3949__GMP_DECLSPEC typedef int (*doprnt_reps_t)   __GMP_PROTO ((void *, int, int));
3950__GMP_DECLSPEC typedef int (*doprnt_final_t)  __GMP_PROTO ((void *));
3951
3952struct doprnt_funs_t {
3953  doprnt_format_t  format;
3954  doprnt_memory_t  memory;
3955  doprnt_reps_t    reps;
3956  doprnt_final_t   final;   /* NULL if not required */
3957};
3958
3959extern const struct doprnt_funs_t  __gmp_fprintf_funs;
3960extern const struct doprnt_funs_t  __gmp_sprintf_funs;
3961extern const struct doprnt_funs_t  __gmp_snprintf_funs;
3962extern const struct doprnt_funs_t  __gmp_obstack_printf_funs;
3963extern const struct doprnt_funs_t  __gmp_ostream_funs;
3964
3965/* "buf" is a __gmp_allocate_func block of "alloc" many bytes.  The first
3966   "size" of these have been written.  "alloc > size" is maintained, so
3967   there's room to store a '\0' at the end.  "result" is where the
3968   application wants the final block pointer.  */
3969struct gmp_asprintf_t {
3970  char    **result;
3971  char    *buf;
3972  size_t  size;
3973  size_t  alloc;
3974};
3975
3976#define GMP_ASPRINTF_T_INIT(d, output)                          \
3977  do {                                                          \
3978    (d).result = (output);                                      \
3979    (d).alloc = 256;                                            \
3980    (d).buf = (char *) (*__gmp_allocate_func) ((d).alloc);      \
3981    (d).size = 0;                                               \
3982  } while (0)
3983
3984/* If a realloc is necessary, use twice the size actually required, so as to
3985   avoid repeated small reallocs.  */
3986#define GMP_ASPRINTF_T_NEED(d, n)                                       \
3987  do {                                                                  \
3988    size_t  alloc, newsize, newalloc;                                   \
3989    ASSERT ((d)->alloc >= (d)->size + 1);                               \
3990                                                                        \
3991    alloc = (d)->alloc;                                                 \
3992    newsize = (d)->size + (n);                                          \
3993    if (alloc <= newsize)                                               \
3994      {                                                                 \
3995        newalloc = 2*newsize;                                           \
3996        (d)->alloc = newalloc;                                          \
3997        (d)->buf = __GMP_REALLOCATE_FUNC_TYPE ((d)->buf,                \
3998                                               alloc, newalloc, char);  \
3999      }                                                                 \
4000  } while (0)
4001
4002__GMP_DECLSPEC int __gmp_asprintf_memory __GMP_PROTO ((struct gmp_asprintf_t *, const char *, size_t));
4003__GMP_DECLSPEC int __gmp_asprintf_reps __GMP_PROTO ((struct gmp_asprintf_t *, int, int));
4004__GMP_DECLSPEC int __gmp_asprintf_final __GMP_PROTO ((struct gmp_asprintf_t *));
4005
4006/* buf is where to write the next output, and size is how much space is left
4007   there.  If the application passed size==0 then that's what we'll have
4008   here, and nothing at all should be written.  */
4009struct gmp_snprintf_t {
4010  char    *buf;
4011  size_t  size;
4012};
4013
4014/* Add the bytes printed by the call to the total retval, or bail out on an
4015   error.  */
4016#define DOPRNT_ACCUMULATE(call) \
4017  do {                          \
4018    int  __ret;                 \
4019    __ret = call;               \
4020    if (__ret == -1)            \
4021      goto error;               \
4022    retval += __ret;            \
4023  } while (0)
4024#define DOPRNT_ACCUMULATE_FUN(fun, params)      \
4025  do {                                          \
4026    ASSERT ((fun) != NULL);                     \
4027    DOPRNT_ACCUMULATE ((*(fun)) params);        \
4028  } while (0)
4029
4030#define DOPRNT_FORMAT(fmt, ap)                          \
4031  DOPRNT_ACCUMULATE_FUN (funs->format, (data, fmt, ap))
4032#define DOPRNT_MEMORY(ptr, len)                                 \
4033  DOPRNT_ACCUMULATE_FUN (funs->memory, (data, ptr, len))
4034#define DOPRNT_REPS(c, n)                               \
4035  DOPRNT_ACCUMULATE_FUN (funs->reps, (data, c, n))
4036
4037#define DOPRNT_STRING(str)      DOPRNT_MEMORY (str, strlen (str))
4038
4039#define DOPRNT_REPS_MAYBE(c, n) \
4040  do {                          \
4041    if ((n) != 0)               \
4042      DOPRNT_REPS (c, n);       \
4043  } while (0)
4044#define DOPRNT_MEMORY_MAYBE(ptr, len)   \
4045  do {                                  \
4046    if ((len) != 0)                     \
4047      DOPRNT_MEMORY (ptr, len);         \
4048  } while (0)
4049
4050__GMP_DECLSPEC int __gmp_doprnt __GMP_PROTO ((const struct doprnt_funs_t *, void *, const char *, va_list));
4051__GMP_DECLSPEC int __gmp_doprnt_integer __GMP_PROTO ((const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *));
4052
4053#define __gmp_doprnt_mpf __gmp_doprnt_mpf2
4054__GMP_DECLSPEC int __gmp_doprnt_mpf __GMP_PROTO ((const struct doprnt_funs_t *, void *, const struct doprnt_params_t *, const char *, mpf_srcptr));
4055
4056__GMP_DECLSPEC int __gmp_replacement_vsnprintf __GMP_PROTO ((char *, size_t, const char *, va_list));
4057#endif /* _GMP_H_HAVE_VA_LIST */
4058
4059
4060typedef int (*gmp_doscan_scan_t)  __GMP_PROTO ((void *, const char *, ...));
4061typedef void *(*gmp_doscan_step_t) __GMP_PROTO ((void *, int));
4062typedef int (*gmp_doscan_get_t)   __GMP_PROTO ((void *));
4063typedef int (*gmp_doscan_unget_t) __GMP_PROTO ((int, void *));
4064
4065struct gmp_doscan_funs_t {
4066  gmp_doscan_scan_t   scan;
4067  gmp_doscan_step_t   step;
4068  gmp_doscan_get_t    get;
4069  gmp_doscan_unget_t  unget;
4070};
4071extern const struct gmp_doscan_funs_t  __gmp_fscanf_funs;
4072extern const struct gmp_doscan_funs_t  __gmp_sscanf_funs;
4073
4074#if _GMP_H_HAVE_VA_LIST
4075__GMP_DECLSPEC int __gmp_doscan __GMP_PROTO ((const struct gmp_doscan_funs_t *, void *, const char *, va_list));
4076#endif
4077
4078
4079/* For testing and debugging.  */
4080#define MPZ_CHECK_FORMAT(z)					\
4081  do {                                                          \
4082    ASSERT_ALWAYS (SIZ(z) == 0 || PTR(z)[ABSIZ(z) - 1] != 0);	\
4083    ASSERT_ALWAYS (ALLOC(z) >= ABSIZ(z));			\
4084    ASSERT_ALWAYS_MPN (PTR(z), ABSIZ(z));                       \
4085  } while (0)
4086
4087#define MPQ_CHECK_FORMAT(q)                             \
4088  do {                                                  \
4089    MPZ_CHECK_FORMAT (mpq_numref (q));                  \
4090    MPZ_CHECK_FORMAT (mpq_denref (q));                  \
4091    ASSERT_ALWAYS (SIZ(mpq_denref(q)) >= 1);            \
4092                                                        \
4093    if (SIZ(mpq_numref(q)) == 0)                        \
4094      {                                                 \
4095        /* should have zero as 0/1 */                   \
4096        ASSERT_ALWAYS (SIZ(mpq_denref(q)) == 1          \
4097                       && PTR(mpq_denref(q))[0] == 1);  \
4098      }                                                 \
4099    else                                                \
4100      {                                                 \
4101        /* should have no common factors */             \
4102        mpz_t  g;                                       \
4103        mpz_init (g);                                   \
4104        mpz_gcd (g, mpq_numref(q), mpq_denref(q));      \
4105        ASSERT_ALWAYS (mpz_cmp_ui (g, 1) == 0);         \
4106        mpz_clear (g);                                  \
4107      }                                                 \
4108  } while (0)
4109
4110#define MPF_CHECK_FORMAT(f)                             \
4111  do {                                                  \
4112    ASSERT_ALWAYS (PREC(f) >= __GMPF_BITS_TO_PREC(53)); \
4113    ASSERT_ALWAYS (ABSIZ(f) <= PREC(f)+1);              \
4114    if (SIZ(f) == 0)                                    \
4115      ASSERT_ALWAYS (EXP(f) == 0);                      \
4116    if (SIZ(f) != 0)                                    \
4117      ASSERT_ALWAYS (PTR(f)[ABSIZ(f) - 1] != 0);        \
4118  } while (0)
4119
4120
4121#define MPZ_PROVOKE_REALLOC(z)					\
4122  do { ALLOC(z) = ABSIZ(z); } while (0)
4123
4124
4125/* Enhancement: The "mod" and "gcd_1" functions below could have
4126   __GMP_ATTRIBUTE_PURE, but currently (gcc 3.3) that's not supported on
4127   function pointers, only actual functions.  It probably doesn't make much
4128   difference to the gmp code, since hopefully we arrange calls so there's
4129   no great need for the compiler to move things around.  */
4130
4131#if WANT_FAT_BINARY && (HAVE_HOST_CPU_FAMILY_x86 || HAVE_HOST_CPU_FAMILY_x86_64)
4132/* NOTE: The function pointers in this struct are also in CPUVEC_FUNCS_LIST
4133   in mpn/x86/x86-defs.m4.  Be sure to update that when changing here.  */
4134struct cpuvec_t {
4135  DECL_add_n           ((*add_n));
4136  DECL_addmul_1        ((*addmul_1));
4137  DECL_copyd           ((*copyd));
4138  DECL_copyi           ((*copyi));
4139  DECL_divexact_1      ((*divexact_1));
4140  DECL_divexact_by3c   ((*divexact_by3c));
4141  DECL_divrem_1        ((*divrem_1));
4142  DECL_gcd_1           ((*gcd_1));
4143  DECL_lshift          ((*lshift));
4144  DECL_mod_1           ((*mod_1));
4145  DECL_mod_34lsub1     ((*mod_34lsub1));
4146  DECL_modexact_1c_odd ((*modexact_1c_odd));
4147  DECL_mul_1           ((*mul_1));
4148  DECL_mul_basecase    ((*mul_basecase));
4149  DECL_preinv_divrem_1 ((*preinv_divrem_1));
4150  DECL_preinv_mod_1    ((*preinv_mod_1));
4151  DECL_rshift          ((*rshift));
4152  DECL_sqr_basecase    ((*sqr_basecase));
4153  DECL_sub_n           ((*sub_n));
4154  DECL_submul_1        ((*submul_1));
4155  int                  initialized;
4156  mp_size_t            mul_toom22_threshold;
4157  mp_size_t            mul_toom33_threshold;
4158  mp_size_t            sqr_toom2_threshold;
4159  mp_size_t            sqr_toom3_threshold;
4160};
4161__GMP_DECLSPEC extern struct cpuvec_t __gmpn_cpuvec;
4162#endif /* x86 fat binary */
4163
4164__GMP_DECLSPEC void __gmpn_cpuvec_init __GMP_PROTO ((void));
4165
4166/* Get a threshold "field" from __gmpn_cpuvec, running __gmpn_cpuvec_init()
4167   if that hasn't yet been done (to establish the right values).  */
4168#define CPUVEC_THRESHOLD(field)                                               \
4169  ((LIKELY (__gmpn_cpuvec.initialized) ? 0 : (__gmpn_cpuvec_init (), 0)),     \
4170   __gmpn_cpuvec.field)
4171
4172
4173#if HAVE_NATIVE_mpn_add_nc
4174#define mpn_add_nc __MPN(add_nc)
4175__GMP_DECLSPEC mp_limb_t mpn_add_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
4176#else
4177static inline
4178mp_limb_t
4179mpn_add_nc (mp_ptr rp, mp_srcptr up, mp_srcptr vp, mp_size_t n, mp_limb_t ci)
4180{
4181  mp_limb_t co;
4182  co = mpn_add_n (rp, up, vp, n);
4183  co += mpn_add_1 (rp, rp, n, ci);
4184  return co;
4185}
4186#endif
4187
4188#if HAVE_NATIVE_mpn_sub_nc
4189#define mpn_sub_nc __MPN(sub_nc)
4190__GMP_DECLSPEC mp_limb_t mpn_sub_nc __GMP_PROTO ((mp_ptr, mp_srcptr, mp_srcptr, mp_size_t, mp_limb_t));
4191#else
4192static inline mp_limb_t
4193mpn_sub_nc (mp_ptr rp, mp_srcptr up, mp_srcptr vp, mp_size_t n, mp_limb_t ci)
4194{
4195  mp_limb_t co;
4196  co = mpn_sub_n (rp, up, vp, n);
4197  co += mpn_sub_1 (rp, rp, n, ci);
4198  return co;
4199}
4200#endif
4201
4202static inline int
4203mpn_zero_p (mp_srcptr ap, mp_size_t n)
4204{
4205  mp_size_t i;
4206  for (i = n - 1; i >= 0; i--)
4207    {
4208      if (ap[i] != 0)
4209	return 0;
4210    }
4211  return 1;
4212}
4213
4214#if TUNE_PROGRAM_BUILD
4215/* Some extras wanted when recompiling some .c files for use by the tune
4216   program.  Not part of a normal build.
4217
4218   It's necessary to keep these thresholds as #defines (just to an
4219   identically named variable), since various defaults are established based
4220   on #ifdef in the .c files.  For some this is not so (the defaults are
4221   instead established above), but all are done this way for consistency. */
4222
4223#undef	MUL_TOOM22_THRESHOLD
4224#define MUL_TOOM22_THRESHOLD		mul_toom22_threshold
4225extern mp_size_t			mul_toom22_threshold;
4226
4227#undef	MUL_TOOM33_THRESHOLD
4228#define MUL_TOOM33_THRESHOLD		mul_toom33_threshold
4229extern mp_size_t			mul_toom33_threshold;
4230
4231#undef	MUL_TOOM44_THRESHOLD
4232#define MUL_TOOM44_THRESHOLD		mul_toom44_threshold
4233extern mp_size_t			mul_toom44_threshold;
4234
4235#undef	MUL_TOOM6H_THRESHOLD
4236#define MUL_TOOM6H_THRESHOLD		mul_toom6h_threshold
4237extern mp_size_t			mul_toom6h_threshold;
4238
4239#undef	MUL_TOOM8H_THRESHOLD
4240#define MUL_TOOM8H_THRESHOLD		mul_toom8h_threshold
4241extern mp_size_t			mul_toom8h_threshold;
4242
4243#undef	MUL_TOOM32_TO_TOOM43_THRESHOLD
4244#define MUL_TOOM32_TO_TOOM43_THRESHOLD	mul_toom32_to_toom43_threshold
4245extern mp_size_t			mul_toom32_to_toom43_threshold;
4246
4247#undef	MUL_TOOM32_TO_TOOM53_THRESHOLD
4248#define MUL_TOOM32_TO_TOOM53_THRESHOLD	mul_toom32_to_toom53_threshold
4249extern mp_size_t			mul_toom32_to_toom53_threshold;
4250
4251#undef	MUL_TOOM42_TO_TOOM53_THRESHOLD
4252#define MUL_TOOM42_TO_TOOM53_THRESHOLD	mul_toom42_to_toom53_threshold
4253extern mp_size_t			mul_toom42_to_toom53_threshold;
4254
4255#undef	MUL_TOOM42_TO_TOOM63_THRESHOLD
4256#define MUL_TOOM42_TO_TOOM63_THRESHOLD	mul_toom42_to_toom63_threshold
4257extern mp_size_t			mul_toom42_to_toom63_threshold;
4258
4259#undef	MUL_FFT_THRESHOLD
4260#define MUL_FFT_THRESHOLD		mul_fft_threshold
4261extern mp_size_t			mul_fft_threshold;
4262
4263#undef	MUL_FFT_MODF_THRESHOLD
4264#define MUL_FFT_MODF_THRESHOLD		mul_fft_modf_threshold
4265extern mp_size_t			mul_fft_modf_threshold;
4266
4267#undef	MUL_FFT_TABLE
4268#define MUL_FFT_TABLE			{ 0 }
4269
4270#undef	MUL_FFT_TABLE3
4271#define MUL_FFT_TABLE3			{ {0,0} }
4272
4273/* A native mpn_sqr_basecase is not tuned and SQR_BASECASE_THRESHOLD should
4274   remain as zero (always use it). */
4275#if ! HAVE_NATIVE_mpn_sqr_basecase
4276#undef	SQR_BASECASE_THRESHOLD
4277#define SQR_BASECASE_THRESHOLD		sqr_basecase_threshold
4278extern mp_size_t			sqr_basecase_threshold;
4279#endif
4280
4281#if TUNE_PROGRAM_BUILD_SQR
4282#undef	SQR_TOOM2_THRESHOLD
4283#define SQR_TOOM2_THRESHOLD		SQR_TOOM2_MAX_GENERIC
4284#else
4285#undef	SQR_TOOM2_THRESHOLD
4286#define SQR_TOOM2_THRESHOLD		sqr_toom2_threshold
4287extern mp_size_t			sqr_toom2_threshold;
4288#endif
4289
4290#undef	SQR_TOOM3_THRESHOLD
4291#define SQR_TOOM3_THRESHOLD		sqr_toom3_threshold
4292extern mp_size_t			sqr_toom3_threshold;
4293
4294#undef	SQR_TOOM4_THRESHOLD
4295#define SQR_TOOM4_THRESHOLD		sqr_toom4_threshold
4296extern mp_size_t			sqr_toom4_threshold;
4297
4298#undef	SQR_TOOM6_THRESHOLD
4299#define SQR_TOOM6_THRESHOLD		sqr_toom6_threshold
4300extern mp_size_t			sqr_toom6_threshold;
4301
4302#undef	SQR_TOOM8_THRESHOLD
4303#define SQR_TOOM8_THRESHOLD		sqr_toom8_threshold
4304extern mp_size_t			sqr_toom8_threshold;
4305
4306#undef  SQR_FFT_THRESHOLD
4307#define SQR_FFT_THRESHOLD		sqr_fft_threshold
4308extern mp_size_t			sqr_fft_threshold;
4309
4310#undef  SQR_FFT_MODF_THRESHOLD
4311#define SQR_FFT_MODF_THRESHOLD		sqr_fft_modf_threshold
4312extern mp_size_t			sqr_fft_modf_threshold;
4313
4314#undef	SQR_FFT_TABLE
4315#define SQR_FFT_TABLE			{ 0 }
4316
4317#undef	SQR_FFT_TABLE3
4318#define SQR_FFT_TABLE3			{ {0,0} }
4319
4320#undef	MULLO_BASECASE_THRESHOLD
4321#define MULLO_BASECASE_THRESHOLD	mullo_basecase_threshold
4322extern mp_size_t			mullo_basecase_threshold;
4323
4324#undef	MULLO_DC_THRESHOLD
4325#define MULLO_DC_THRESHOLD		mullo_dc_threshold
4326extern mp_size_t			mullo_dc_threshold;
4327
4328#undef	MULLO_MUL_N_THRESHOLD
4329#define MULLO_MUL_N_THRESHOLD		mullo_mul_n_threshold
4330extern mp_size_t			mullo_mul_n_threshold;
4331
4332#undef	DC_DIV_QR_THRESHOLD
4333#define DC_DIV_QR_THRESHOLD		dc_div_qr_threshold
4334extern mp_size_t			dc_div_qr_threshold;
4335
4336#undef	DC_DIVAPPR_Q_THRESHOLD
4337#define DC_DIVAPPR_Q_THRESHOLD		dc_divappr_q_threshold
4338extern mp_size_t			dc_divappr_q_threshold;
4339
4340#undef	DC_BDIV_Q_THRESHOLD
4341#define DC_BDIV_Q_THRESHOLD		dc_bdiv_q_threshold
4342extern mp_size_t			dc_bdiv_q_threshold;
4343
4344#undef	DC_BDIV_QR_THRESHOLD
4345#define DC_BDIV_QR_THRESHOLD		dc_bdiv_qr_threshold
4346extern mp_size_t			dc_bdiv_qr_threshold;
4347
4348#undef	MU_DIV_QR_THRESHOLD
4349#define MU_DIV_QR_THRESHOLD		mu_div_qr_threshold
4350extern mp_size_t			mu_div_qr_threshold;
4351
4352#undef	MU_DIVAPPR_Q_THRESHOLD
4353#define MU_DIVAPPR_Q_THRESHOLD		mu_divappr_q_threshold
4354extern mp_size_t			mu_divappr_q_threshold;
4355
4356#undef	MUPI_DIV_QR_THRESHOLD
4357#define MUPI_DIV_QR_THRESHOLD		mupi_div_qr_threshold
4358extern mp_size_t			mupi_div_qr_threshold;
4359
4360#undef	MU_BDIV_QR_THRESHOLD
4361#define MU_BDIV_QR_THRESHOLD		mu_bdiv_qr_threshold
4362extern mp_size_t			mu_bdiv_qr_threshold;
4363
4364#undef	MU_BDIV_Q_THRESHOLD
4365#define MU_BDIV_Q_THRESHOLD		mu_bdiv_q_threshold
4366extern mp_size_t			mu_bdiv_q_threshold;
4367
4368#undef	INV_MULMOD_BNM1_THRESHOLD
4369#define INV_MULMOD_BNM1_THRESHOLD	inv_mulmod_bnm1_threshold
4370extern mp_size_t			inv_mulmod_bnm1_threshold;
4371
4372#undef	INV_NEWTON_THRESHOLD
4373#define INV_NEWTON_THRESHOLD		inv_newton_threshold
4374extern mp_size_t			inv_newton_threshold;
4375
4376#undef	INV_APPR_THRESHOLD
4377#define INV_APPR_THRESHOLD		inv_appr_threshold
4378extern mp_size_t			inv_appr_threshold;
4379
4380#undef	BINV_NEWTON_THRESHOLD
4381#define BINV_NEWTON_THRESHOLD		binv_newton_threshold
4382extern mp_size_t			binv_newton_threshold;
4383
4384#undef	REDC_1_TO_REDC_2_THRESHOLD
4385#define REDC_1_TO_REDC_2_THRESHOLD	redc_1_to_redc_2_threshold
4386extern mp_size_t			redc_1_to_redc_2_threshold;
4387
4388#undef	REDC_2_TO_REDC_N_THRESHOLD
4389#define REDC_2_TO_REDC_N_THRESHOLD	redc_2_to_redc_n_threshold
4390extern mp_size_t			redc_2_to_redc_n_threshold;
4391
4392#undef	REDC_1_TO_REDC_N_THRESHOLD
4393#define REDC_1_TO_REDC_N_THRESHOLD	redc_1_to_redc_n_threshold
4394extern mp_size_t			redc_1_to_redc_n_threshold;
4395
4396#undef	MATRIX22_STRASSEN_THRESHOLD
4397#define MATRIX22_STRASSEN_THRESHOLD	matrix22_strassen_threshold
4398extern mp_size_t			matrix22_strassen_threshold;
4399
4400#undef	HGCD_THRESHOLD
4401#define HGCD_THRESHOLD			hgcd_threshold
4402extern mp_size_t			hgcd_threshold;
4403
4404#undef	GCD_DC_THRESHOLD
4405#define GCD_DC_THRESHOLD		gcd_dc_threshold
4406extern mp_size_t			gcd_dc_threshold;
4407
4408#undef  GCDEXT_DC_THRESHOLD
4409#define GCDEXT_DC_THRESHOLD		gcdext_dc_threshold
4410extern mp_size_t			gcdext_dc_threshold;
4411
4412#undef  DIVREM_1_NORM_THRESHOLD
4413#define DIVREM_1_NORM_THRESHOLD		divrem_1_norm_threshold
4414extern mp_size_t			divrem_1_norm_threshold;
4415
4416#undef  DIVREM_1_UNNORM_THRESHOLD
4417#define DIVREM_1_UNNORM_THRESHOLD	divrem_1_unnorm_threshold
4418extern mp_size_t			divrem_1_unnorm_threshold;
4419
4420#undef	MOD_1_NORM_THRESHOLD
4421#define MOD_1_NORM_THRESHOLD		mod_1_norm_threshold
4422extern mp_size_t			mod_1_norm_threshold;
4423
4424#undef	MOD_1_UNNORM_THRESHOLD
4425#define MOD_1_UNNORM_THRESHOLD		mod_1_unnorm_threshold
4426extern mp_size_t			mod_1_unnorm_threshold;
4427
4428#undef	MOD_1N_TO_MOD_1_1_THRESHOLD
4429#define MOD_1N_TO_MOD_1_1_THRESHOLD	mod_1n_to_mod_1_1_threshold
4430extern mp_size_t			mod_1n_to_mod_1_1_threshold;
4431
4432#undef	MOD_1U_TO_MOD_1_1_THRESHOLD
4433#define MOD_1U_TO_MOD_1_1_THRESHOLD	mod_1u_to_mod_1_1_threshold
4434extern mp_size_t			mod_1u_to_mod_1_1_threshold;
4435
4436#undef	MOD_1_1_TO_MOD_1_2_THRESHOLD
4437#define MOD_1_1_TO_MOD_1_2_THRESHOLD	mod_1_1_to_mod_1_2_threshold
4438extern mp_size_t			mod_1_1_to_mod_1_2_threshold;
4439
4440#undef	MOD_1_2_TO_MOD_1_4_THRESHOLD
4441#define MOD_1_2_TO_MOD_1_4_THRESHOLD	mod_1_2_to_mod_1_4_threshold
4442extern mp_size_t			mod_1_2_to_mod_1_4_threshold;
4443
4444#undef	PREINV_MOD_1_TO_MOD_1_THRESHOLD
4445#define PREINV_MOD_1_TO_MOD_1_THRESHOLD	preinv_mod_1_to_mod_1_threshold
4446extern mp_size_t			preinv_mod_1_to_mod_1_threshold;
4447
4448#if ! UDIV_PREINV_ALWAYS
4449#undef	DIVREM_2_THRESHOLD
4450#define DIVREM_2_THRESHOLD		divrem_2_threshold
4451extern mp_size_t			divrem_2_threshold;
4452#endif
4453
4454#undef	MULMOD_BNM1_THRESHOLD
4455#define MULMOD_BNM1_THRESHOLD		mulmod_bnm1_threshold
4456extern mp_size_t			mulmod_bnm1_threshold;
4457
4458#undef	SQRMOD_BNM1_THRESHOLD
4459#define SQRMOD_BNM1_THRESHOLD		sqrmod_bnm1_threshold
4460extern mp_size_t			sqrmod_bnm1_threshold;
4461
4462#undef	GET_STR_DC_THRESHOLD
4463#define GET_STR_DC_THRESHOLD		get_str_dc_threshold
4464extern mp_size_t			get_str_dc_threshold;
4465
4466#undef  GET_STR_PRECOMPUTE_THRESHOLD
4467#define GET_STR_PRECOMPUTE_THRESHOLD	get_str_precompute_threshold
4468extern mp_size_t			get_str_precompute_threshold;
4469
4470#undef	SET_STR_DC_THRESHOLD
4471#define SET_STR_DC_THRESHOLD		set_str_dc_threshold
4472extern mp_size_t			set_str_dc_threshold;
4473
4474#undef  SET_STR_PRECOMPUTE_THRESHOLD
4475#define SET_STR_PRECOMPUTE_THRESHOLD	set_str_precompute_threshold
4476extern mp_size_t			set_str_precompute_threshold;
4477
4478#undef  FFT_TABLE_ATTRS
4479#define FFT_TABLE_ATTRS
4480extern mp_size_t  mpn_fft_table[2][MPN_FFT_TABLE_SIZE];
4481#define FFT_TABLE3_SIZE 2000	/* generous space for tuning */
4482extern struct fft_table_nk mpn_fft_table3[2][FFT_TABLE3_SIZE];
4483
4484/* Sizes the tune program tests up to, used in a couple of recompilations. */
4485#undef MUL_TOOM22_THRESHOLD_LIMIT
4486#undef MUL_TOOM33_THRESHOLD_LIMIT
4487#undef MULLO_BASECASE_THRESHOLD_LIMIT
4488#undef SQR_TOOM3_THRESHOLD_LIMIT
4489#define SQR_TOOM2_MAX_GENERIC           200
4490#define MUL_TOOM22_THRESHOLD_LIMIT      700
4491#define MUL_TOOM33_THRESHOLD_LIMIT      700
4492#define SQR_TOOM3_THRESHOLD_LIMIT       400
4493#define MUL_TOOM44_THRESHOLD_LIMIT     1000
4494#define SQR_TOOM4_THRESHOLD_LIMIT      1000
4495#define MUL_TOOM6H_THRESHOLD_LIMIT     1100
4496#define SQR_TOOM6_THRESHOLD_LIMIT      1100
4497#define MUL_TOOM8H_THRESHOLD_LIMIT     1200
4498#define SQR_TOOM8_THRESHOLD_LIMIT      1200
4499#define MULLO_BASECASE_THRESHOLD_LIMIT  200
4500#define GET_STR_THRESHOLD_LIMIT         150
4501
4502#endif /* TUNE_PROGRAM_BUILD */
4503
4504#if defined (__cplusplus)
4505}
4506#endif
4507
4508/* FIXME: Make these itch functions less conservative.  Also consider making
4509   them dependent on just 'an', and compute the allocation directly from 'an'
4510   instead of via n.  */
4511
4512/* toom22/toom2: Scratch need is 2*(an + k), k is the recursion depth.
4513   k is ths smallest k such that
4514     ceil(an/2^k) < MUL_TOOM22_THRESHOLD.
4515   which implies that
4516     k = bitsize of floor ((an-1)/(MUL_TOOM22_THRESHOLD-1))
4517       = 1 + floor (log_2 (floor ((an-1)/(MUL_TOOM22_THRESHOLD-1))))
4518*/
4519#define mpn_toom22_mul_itch(an, bn) \
4520  (2 * ((an) + GMP_NUMB_BITS))
4521#define mpn_toom2_sqr_itch(an) \
4522  (2 * ((an) + GMP_NUMB_BITS))
4523
4524/* toom33/toom3: Scratch need is 5an/2 + 10k, k is the recursion depth.
4525   We use 3an + C, so that we can use a smaller constant.
4526 */
4527#define mpn_toom33_mul_itch(an, bn) \
4528  (3 * (an) + GMP_NUMB_BITS)
4529#define mpn_toom3_sqr_itch(an) \
4530  (3 * (an) + GMP_NUMB_BITS)
4531
4532/* toom33/toom3: Scratch need is 8an/3 + 13k, k is the recursion depth.
4533   We use 3an + C, so that we can use a smaller constant.
4534 */
4535#define mpn_toom44_mul_itch(an, bn) \
4536  (3 * (an) + GMP_NUMB_BITS)
4537#define mpn_toom4_sqr_itch(an) \
4538  (3 * (an) + GMP_NUMB_BITS)
4539
4540#define mpn_toom6_sqr_itch(n)						\
4541( ((n) - SQR_TOOM6_THRESHOLD)*2 +					\
4542   MAX(SQR_TOOM6_THRESHOLD*2 + GMP_NUMB_BITS*6,			\
4543       mpn_toom4_sqr_itch(SQR_TOOM6_THRESHOLD)) )
4544
4545#define mpn_toom6_mul_n_itch(n)						\
4546( ((n) - MUL_TOOM6H_THRESHOLD)*2 +					\
4547   MAX(MUL_TOOM6H_THRESHOLD*2 + GMP_NUMB_BITS*6,			\
4548       mpn_toom44_mul_itch(MUL_TOOM6H_THRESHOLD,MUL_TOOM6H_THRESHOLD)) )
4549
4550static inline mp_size_t
4551mpn_toom6h_mul_itch (mp_size_t an, mp_size_t bn) {
4552  mp_size_t estimatedN;
4553  estimatedN = (an + bn) / (size_t) 10 + 1;
4554  return mpn_toom6_mul_n_itch (estimatedN * 6);
4555}
4556
4557#define mpn_toom8_sqr_itch(n)						\
4558( (((n)*15)>>3) - ((SQR_TOOM8_THRESHOLD*15)>>3) +			\
4559   MAX(((SQR_TOOM8_THRESHOLD*15)>>3) + GMP_NUMB_BITS*6,		\
4560       mpn_toom6_sqr_itch(SQR_TOOM8_THRESHOLD)) )
4561
4562#define mpn_toom8_mul_n_itch(n)						\
4563( (((n)*15)>>3) - ((MUL_TOOM8H_THRESHOLD*15)>>3) +			\
4564   MAX(((MUL_TOOM8H_THRESHOLD*15)>>3) + GMP_NUMB_BITS*6,		\
4565       mpn_toom6_mul_n_itch(MUL_TOOM8H_THRESHOLD)) )
4566
4567static inline mp_size_t
4568mpn_toom8h_mul_itch (mp_size_t an, mp_size_t bn) {
4569  mp_size_t estimatedN;
4570  estimatedN = (an + bn) / (size_t) 14 + 1;
4571  return mpn_toom8_mul_n_itch (estimatedN * 8);
4572}
4573
4574static inline mp_size_t
4575mpn_toom32_mul_itch (mp_size_t an, mp_size_t bn)
4576{
4577  mp_size_t n = 1 + (2 * an >= 3 * bn ? (an - 1) / (size_t) 3 : (bn - 1) >> 1);
4578  mp_size_t itch = 2 * n + 1;
4579
4580  return itch;
4581}
4582
4583static inline mp_size_t
4584mpn_toom42_mul_itch (mp_size_t an, mp_size_t bn)
4585{
4586  mp_size_t n = an >= 2 * bn ? (an + 3) >> 2 : (bn + 1) >> 1;
4587  return 6 * n + 3;
4588}
4589
4590static inline mp_size_t
4591mpn_toom43_mul_itch (mp_size_t an, mp_size_t bn)
4592{
4593  mp_size_t n = 1 + (3 * an >= 4 * bn ? (an - 1) >> 2 : (bn - 1) / (size_t) 3);
4594
4595  return 6*n + 4;
4596}
4597
4598static inline mp_size_t
4599mpn_toom52_mul_itch (mp_size_t an, mp_size_t bn)
4600{
4601  mp_size_t n = 1 + (2 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) >> 1);
4602  return 6*n + 4;
4603}
4604
4605static inline mp_size_t
4606mpn_toom53_mul_itch (mp_size_t an, mp_size_t bn)
4607{
4608  mp_size_t n = 1 + (3 * an >= 5 * bn ? (an - 1) / (size_t) 5 : (bn - 1) / (size_t) 3);
4609  return 10 * n + 10;
4610}
4611
4612static inline mp_size_t
4613mpn_toom62_mul_itch (mp_size_t an, mp_size_t bn)
4614{
4615  mp_size_t n = 1 + (an >= 3 * bn ? (an - 1) / (size_t) 6 : (bn - 1) >> 1);
4616  return 10 * n + 10;
4617}
4618
4619static inline mp_size_t
4620mpn_toom63_mul_itch (mp_size_t an, mp_size_t bn)
4621{
4622  mp_size_t n = 1 + (an >= 2 * bn ? (an - 1) / (size_t) 6 : (bn - 1) / (size_t) 3);
4623  return 9 * n + 3;
4624}
4625
4626#if 0
4627#define mpn_fft_mul mpn_mul_fft_full
4628#else
4629#define mpn_fft_mul mpn_nussbaumer_mul
4630#endif
4631
4632#ifdef __cplusplus
4633
4634/* A little helper for a null-terminated __gmp_allocate_func string.
4635   The destructor ensures it's freed even if an exception is thrown.
4636   The len field is needed by the destructor, and can be used by anyone else
4637   to avoid a second strlen pass over the data.
4638
4639   Since our input is a C string, using strlen is correct.  Perhaps it'd be
4640   more C++-ish style to use std::char_traits<char>::length, but char_traits
4641   isn't available in gcc 2.95.4.  */
4642
4643class gmp_allocated_string {
4644 public:
4645  char *str;
4646  size_t len;
4647  gmp_allocated_string(char *arg)
4648  {
4649    str = arg;
4650    len = std::strlen (str);
4651  }
4652  ~gmp_allocated_string()
4653  {
4654    (*__gmp_free_func) (str, len+1);
4655  }
4656};
4657
4658std::istream &__gmpz_operator_in_nowhite (std::istream &, mpz_ptr, char);
4659int __gmp_istream_set_base (std::istream &, char &, bool &, bool &);
4660void __gmp_istream_set_digits (std::string &, std::istream &, char &, bool &, int);
4661void __gmp_doprnt_params_from_ios (struct doprnt_params_t *p, std::ios &o);
4662std::ostream& __gmp_doprnt_integer_ostream (std::ostream &o, struct doprnt_params_t *p, char *s);
4663extern const struct doprnt_funs_t  __gmp_asprintf_funs_noformat;
4664
4665#endif /* __cplusplus */
4666
4667#endif /* __GMP_IMPL_H__ */
4668