Merge branch 'vendor/GMP' into gcc441
[dragonfly.git] / contrib / gmp / longlong.h
1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2
3 Copyright 1991, 1992, 1993, 1994, 1996, 1997, 1999, 2000, 2001, 2002, 2003,
4 2004, 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
5
6 This file is free software; you can redistribute it and/or modify it under the
7 terms of the GNU Lesser General Public License as published by the Free
8 Software Foundation; either version 3 of the License, or (at your option) any
9 later version.
10
11 This file is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
13 PARTICULAR PURPOSE.  See the GNU Lesser General Public License for more
14 details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with this file.  If not, see http://www.gnu.org/licenses/.  */
18
19 /* You have to define the following before including this file:
20
21    UWtype -- An unsigned type, default type for operations (typically a "word")
22    UHWtype -- An unsigned type, at least half the size of UWtype.
23    UDWtype -- An unsigned type, at least twice as large a UWtype
24    W_TYPE_SIZE -- size in bits of UWtype
25
26    SItype, USItype -- Signed and unsigned 32 bit types.
27    DItype, UDItype -- Signed and unsigned 64 bit types.
28
29    On a 32 bit machine UWtype should typically be USItype;
30    on a 64 bit machine, UWtype should typically be UDItype.
31
32    CAUTION!  Using this file outside of GMP is not safe.  You need to include
33    gmp.h and gmp-impl.h, or certain things might not work as expected.
34 */
35
36 #define __BITS4 (W_TYPE_SIZE / 4)
37 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
38 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
39 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
40
41 /* This is used to make sure no undesirable sharing between different libraries
42    that use this file takes place.  */
43 #ifndef __MPN
44 #define __MPN(x) __##x
45 #endif
46
47 #ifndef _PROTO
48 #if (__STDC__-0) || defined (__cplusplus)
49 #define _PROTO(x) x
50 #else
51 #define _PROTO(x) ()
52 #endif
53 #endif
54
55 /* Define auxiliary asm macros.
56
57    1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
58    UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
59    word product in HIGH_PROD and LOW_PROD.
60
61    2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
62    UDWtype product.  This is just a variant of umul_ppmm.
63
64    3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
65    denominator) divides a UDWtype, composed by the UWtype integers
66    HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
67    in QUOTIENT and the remainder in REMAINDER.  HIGH_NUMERATOR must be less
68    than DENOMINATOR for correct operation.  If, in addition, the most
69    significant bit of DENOMINATOR must be 1, then the pre-processor symbol
70    UDIV_NEEDS_NORMALIZATION is defined to 1.
71
72    4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
73    denominator).  Like udiv_qrnnd but the numbers are signed.  The quotient
74    is rounded towards 0.
75
76    5) count_leading_zeros(count, x) counts the number of zero-bits from the
77    msb to the first non-zero bit in the UWtype X.  This is the number of
78    steps X needs to be shifted left to set the msb.  Undefined for X == 0,
79    unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
80
81    6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
82    from the least significant end.
83
84    7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
85    high_addend_2, low_addend_2) adds two UWtype integers, composed by
86    HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
87    respectively.  The result is placed in HIGH_SUM and LOW_SUM.  Overflow
88    (i.e. carry out) is not stored anywhere, and is lost.
89
90    8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
91    high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
92    composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
93    LOW_SUBTRAHEND_2 respectively.  The result is placed in HIGH_DIFFERENCE
94    and LOW_DIFFERENCE.  Overflow (i.e. carry out) is not stored anywhere,
95    and is lost.
96
97    If any of these macros are left undefined for a particular CPU,
98    C macros are used.
99
100
101    Notes:
102
103    For add_ssaaaa the two high and two low addends can both commute, but
104    unfortunately gcc only supports one "%" commutative in each asm block.
105    This has always been so but is only documented in recent versions
106    (eg. pre-release 3.3).  Having two or more "%"s can cause an internal
107    compiler error in certain rare circumstances.
108
109    Apparently it was only the last "%" that was ever actually respected, so
110    the code has been updated to leave just that.  Clearly there's a free
111    choice whether high or low should get it, if there's a reason to favour
112    one over the other.  Also obviously when the constraints on the two
113    operands are identical there's no benefit to the reloader in any "%" at
114    all.
115
116    */
117
118 /* The CPUs come in alphabetical order below.
119
120    Please add support for more CPUs here, or improve the current support
121    for the CPUs below!  */
122
123
124 /* count_leading_zeros_gcc_clz is count_leading_zeros implemented with gcc
125    3.4 __builtin_clzl or __builtin_clzll, according to our limb size.
126    Similarly count_trailing_zeros_gcc_ctz using __builtin_ctzl or
127    __builtin_ctzll.
128
129    These builtins are only used when we check what code comes out, on some
130    chips they're merely libgcc calls, where we will instead want an inline
131    in that case (either asm or generic C).
132
133    These builtins are better than an asm block of the same insn, since an
134    asm block doesn't give gcc any information about scheduling or resource
135    usage.  We keep an asm block for use on prior versions of gcc though.
136
137    For reference, __builtin_ffs existed in gcc prior to __builtin_clz, but
138    it's not used (for count_leading_zeros) because it generally gives extra
139    code to ensure the result is 0 when the input is 0, which we don't need
140    or want.  */
141
142 #ifdef _LONG_LONG_LIMB
143 #define count_leading_zeros_gcc_clz(count,x)    \
144   do {                                          \
145     ASSERT ((x) != 0);                          \
146     (count) = __builtin_clzll (x);              \
147   } while (0)
148 #else
149 #define count_leading_zeros_gcc_clz(count,x)    \
150   do {                                          \
151     ASSERT ((x) != 0);                          \
152     (count) = __builtin_clzl (x);               \
153   } while (0)
154 #endif
155
156 #ifdef _LONG_LONG_LIMB
157 #define count_trailing_zeros_gcc_ctz(count,x)   \
158   do {                                          \
159     ASSERT ((x) != 0);                          \
160     (count) = __builtin_ctzll (x);              \
161   } while (0)
162 #else
163 #define count_trailing_zeros_gcc_ctz(count,x)   \
164   do {                                          \
165     ASSERT ((x) != 0);                          \
166     (count) = __builtin_ctzl (x);               \
167   } while (0)
168 #endif
169
170
171 /* FIXME: The macros using external routines like __MPN(count_leading_zeros)
172    don't need to be under !NO_ASM */
173 #if ! defined (NO_ASM)
174
175 #if defined (__alpha) && W_TYPE_SIZE == 64
176 /* Most alpha-based machines, except Cray systems. */
177 #if defined (__GNUC__)
178 #if __GMP_GNUC_PREREQ (3,3)
179 #define umul_ppmm(ph, pl, m0, m1) \
180   do {                                                                  \
181     UDItype __m0 = (m0), __m1 = (m1);                                   \
182     (ph) = __builtin_alpha_umulh (__m0, __m1);                          \
183     (pl) = __m0 * __m1;                                                 \
184   } while (0)
185 #else
186 #define umul_ppmm(ph, pl, m0, m1) \
187   do {                                                                  \
188     UDItype __m0 = (m0), __m1 = (m1);                                   \
189     __asm__ ("umulh %r1,%2,%0"                                          \
190              : "=r" (ph)                                                \
191              : "%rJ" (m0), "rI" (m1));                                  \
192     (pl) = __m0 * __m1;                                                 \
193   } while (0)
194 #endif
195 #define UMUL_TIME 18
196 #else /* ! __GNUC__ */
197 #include <machine/builtins.h>
198 #define umul_ppmm(ph, pl, m0, m1) \
199   do {                                                                  \
200     UDItype __m0 = (m0), __m1 = (m1);                                   \
201     (ph) = __UMULH (m0, m1);                                            \
202     (pl) = __m0 * __m1;                                                 \
203   } while (0)
204 #endif
205 #ifndef LONGLONG_STANDALONE
206 #define udiv_qrnnd(q, r, n1, n0, d) \
207   do { UWtype __di;                                                     \
208     __di = __MPN(invert_limb) (d);                                      \
209     udiv_qrnnd_preinv (q, r, n1, n0, d, __di);                          \
210   } while (0)
211 #define UDIV_PREINV_ALWAYS  1
212 #define UDIV_NEEDS_NORMALIZATION 1
213 #define UDIV_TIME 220
214 #endif /* LONGLONG_STANDALONE */
215
216 /* clz_tab is required in all configurations, since mpn/alpha/cntlz.asm
217    always goes into libgmp.so, even when not actually used.  */
218 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
219
220 #if defined (__GNUC__) && HAVE_HOST_CPU_alpha_CIX
221 #define count_leading_zeros(COUNT,X) \
222   __asm__("ctlz %1,%0" : "=r"(COUNT) : "r"(X))
223 #define count_trailing_zeros(COUNT,X) \
224   __asm__("cttz %1,%0" : "=r"(COUNT) : "r"(X))
225 #endif /* clz/ctz using cix */
226
227 #if ! defined (count_leading_zeros)                             \
228   && defined (__GNUC__) && ! defined (LONGLONG_STANDALONE)
229 /* ALPHA_CMPBGE_0 gives "cmpbge $31,src,dst", ie. test src bytes == 0.
230    "$31" is written explicitly in the asm, since an "r" constraint won't
231    select reg 31.  There seems no need to worry about "r31" syntax for cray,
232    since gcc itself (pre-release 3.4) emits just $31 in various places.  */
233 #define ALPHA_CMPBGE_0(dst, src)                                        \
234   do { asm ("cmpbge $31, %1, %0" : "=r" (dst) : "r" (src)); } while (0)
235 /* Zero bytes are turned into bits with cmpbge, a __clz_tab lookup counts
236    them, locating the highest non-zero byte.  A second __clz_tab lookup
237    counts the leading zero bits in that byte, giving the result.  */
238 #define count_leading_zeros(count, x)                                   \
239   do {                                                                  \
240     UWtype  __clz__b, __clz__c, __clz__x = (x);                         \
241     ALPHA_CMPBGE_0 (__clz__b,  __clz__x);           /* zero bytes */    \
242     __clz__b = __clz_tab [(__clz__b >> 1) ^ 0x7F];  /* 8 to 1 byte */   \
243     __clz__b = __clz__b * 8 - 7;                    /* 57 to 1 shift */ \
244     __clz__x >>= __clz__b;                                              \
245     __clz__c = __clz_tab [__clz__x];                /* 8 to 1 bit */    \
246     __clz__b = 65 - __clz__b;                                           \
247     (count) = __clz__b - __clz__c;                                      \
248   } while (0)
249 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
250 #endif /* clz using cmpbge */
251
252 #if ! defined (count_leading_zeros) && ! defined (LONGLONG_STANDALONE)
253 #if HAVE_ATTRIBUTE_CONST
254 long __MPN(count_leading_zeros) _PROTO ((UDItype)) __attribute__ ((const));
255 #else
256 long __MPN(count_leading_zeros) _PROTO ((UDItype));
257 #endif
258 #define count_leading_zeros(count, x) \
259   ((count) = __MPN(count_leading_zeros) (x))
260 #endif /* clz using mpn */
261 #endif /* __alpha */
262
263 #if defined (_CRAY) && W_TYPE_SIZE == 64
264 #include <intrinsics.h>
265 #define UDIV_PREINV_ALWAYS  1
266 #define UDIV_NEEDS_NORMALIZATION 1
267 #define UDIV_TIME 220
268 long __MPN(count_leading_zeros) _PROTO ((UDItype));
269 #define count_leading_zeros(count, x) \
270   ((count) = _leadz ((UWtype) (x)))
271 #if defined (_CRAYIEEE)         /* I.e., Cray T90/ieee, T3D, and T3E */
272 #define umul_ppmm(ph, pl, m0, m1) \
273   do {                                                                  \
274     UDItype __m0 = (m0), __m1 = (m1);                                   \
275     (ph) = _int_mult_upper (m0, m1);                                    \
276     (pl) = __m0 * __m1;                                                 \
277   } while (0)
278 #ifndef LONGLONG_STANDALONE
279 #define udiv_qrnnd(q, r, n1, n0, d) \
280   do { UWtype __di;                                                     \
281     __di = __MPN(invert_limb) (d);                                      \
282     udiv_qrnnd_preinv (q, r, n1, n0, d, __di);                          \
283   } while (0)
284 #endif /* LONGLONG_STANDALONE */
285 #endif /* _CRAYIEEE */
286 #endif /* _CRAY */
287
288 #if defined (__ia64) && W_TYPE_SIZE == 64
289 /* This form encourages gcc (pre-release 3.4 at least) to emit predicated
290    "sub r=r,r" and "sub r=r,r,1", giving a 2 cycle latency.  The generic
291    code using "al<bl" arithmetically comes out making an actual 0 or 1 in a
292    register, which takes an extra cycle.  */
293 #define sub_ddmmss(sh, sl, ah, al, bh, bl)      \
294   do {                                          \
295     UWtype __x;                                 \
296     __x = (al) - (bl);                          \
297     if ((al) < (bl))                            \
298       (sh) = (ah) - (bh) - 1;                   \
299     else                                        \
300       (sh) = (ah) - (bh);                       \
301     (sl) = __x;                                 \
302   } while (0)
303 #if defined (__GNUC__) && ! defined (__INTEL_COMPILER)
304 /* Do both product parts in assembly, since that gives better code with
305    all gcc versions.  Some callers will just use the upper part, and in
306    that situation we waste an instruction, but not any cycles.  */
307 #define umul_ppmm(ph, pl, m0, m1) \
308     __asm__ ("xma.hu %0 = %2, %3, f0\n\txma.l %1 = %2, %3, f0"          \
309              : "=&f" (ph), "=f" (pl)                                    \
310              : "f" (m0), "f" (m1))
311 #define UMUL_TIME 14
312 #define count_leading_zeros(count, x) \
313   do {                                                                  \
314     UWtype _x = (x), _y, _a, _c;                                        \
315     __asm__ ("mux1 %0 = %1, @rev" : "=r" (_y) : "r" (_x));              \
316     __asm__ ("czx1.l %0 = %1" : "=r" (_a) : "r" (-_y | _y));            \
317     _c = (_a - 1) << 3;                                                 \
318     _x >>= _c;                                                          \
319     if (_x >= 1 << 4)                                                   \
320       _x >>= 4, _c += 4;                                                \
321     if (_x >= 1 << 2)                                                   \
322       _x >>= 2, _c += 2;                                                \
323     _c += _x >> 1;                                                      \
324     (count) =  W_TYPE_SIZE - 1 - _c;                                    \
325   } while (0)
326 /* similar to what gcc does for __builtin_ffs, but 0 based rather than 1
327    based, and we don't need a special case for x==0 here */
328 #define count_trailing_zeros(count, x)                                  \
329   do {                                                                  \
330     UWtype __ctz_x = (x);                                               \
331     __asm__ ("popcnt %0 = %1"                                           \
332              : "=r" (count)                                             \
333              : "r" ((__ctz_x-1) & ~__ctz_x));                           \
334   } while (0)
335 #endif
336 #if defined (__INTEL_COMPILER)
337 #include <ia64intrin.h>
338 #define umul_ppmm(ph, pl, m0, m1)                                       \
339   do {                                                                  \
340     UWtype _m0 = (m0), _m1 = (m1);                                      \
341     ph = _m64_xmahu (_m0, _m1, 0);                                      \
342     pl = _m0 * _m1;                                                     \
343   } while (0)
344 #endif
345 #ifndef LONGLONG_STANDALONE
346 #define udiv_qrnnd(q, r, n1, n0, d) \
347   do { UWtype __di;                                                     \
348     __di = __MPN(invert_limb) (d);                                      \
349     udiv_qrnnd_preinv (q, r, n1, n0, d, __di);                          \
350   } while (0)
351 #define UDIV_PREINV_ALWAYS  1
352 #define UDIV_NEEDS_NORMALIZATION 1
353 #endif
354 #define UDIV_TIME 220
355 #endif
356
357
358 #if defined (__GNUC__)
359
360 /* We sometimes need to clobber "cc" with gcc2, but that would not be
361    understood by gcc1.  Use cpp to avoid major code duplication.  */
362 #if __GNUC__ < 2
363 #define __CLOBBER_CC
364 #define __AND_CLOBBER_CC
365 #else /* __GNUC__ >= 2 */
366 #define __CLOBBER_CC : "cc"
367 #define __AND_CLOBBER_CC , "cc"
368 #endif /* __GNUC__ < 2 */
369
370 #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
371 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
372   __asm__ ("add %1,%4,%5\n\taddc %0,%2,%3"                              \
373            : "=r" (sh), "=&r" (sl)                                      \
374            : "r" (ah), "rI" (bh), "%r" (al), "rI" (bl))
375 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
376   __asm__ ("sub %1,%4,%5\n\tsubc %0,%2,%3"                              \
377            : "=r" (sh), "=&r" (sl)                                      \
378            : "r" (ah), "rI" (bh), "r" (al), "rI" (bl))
379 #define umul_ppmm(xh, xl, m0, m1) \
380   do {                                                                  \
381     USItype __m0 = (m0), __m1 = (m1);                                   \
382     __asm__ ("multiplu %0,%1,%2"                                        \
383              : "=r" (xl)                                                \
384              : "r" (__m0), "r" (__m1));                                 \
385     __asm__ ("multmu %0,%1,%2"                                          \
386              : "=r" (xh)                                                \
387              : "r" (__m0), "r" (__m1));                                 \
388   } while (0)
389 #define udiv_qrnnd(q, r, n1, n0, d) \
390   __asm__ ("dividu %0,%3,%4"                                            \
391            : "=r" (q), "=q" (r)                                         \
392            : "1" (n1), "r" (n0), "r" (d))
393 #define count_leading_zeros(count, x) \
394     __asm__ ("clz %0,%1"                                                \
395              : "=r" (count)                                             \
396              : "r" (x))
397 #define COUNT_LEADING_ZEROS_0 32
398 #endif /* __a29k__ */
399
400 #if defined (__arc__)
401 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
402   __asm__ ("add.f\t%1, %4, %5\n\tadc\t%0, %2, %3"                       \
403            : "=r" (sh),                                                 \
404              "=&r" (sl)                                                 \
405            : "r"  ((USItype) (ah)),                                     \
406              "rIJ" ((USItype) (bh)),                                    \
407              "%r" ((USItype) (al)),                                     \
408              "rIJ" ((USItype) (bl)))
409 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
410   __asm__ ("sub.f\t%1, %4, %5\n\tsbc\t%0, %2, %3"                       \
411            : "=r" (sh),                                                 \
412              "=&r" (sl)                                                 \
413            : "r" ((USItype) (ah)),                                      \
414              "rIJ" ((USItype) (bh)),                                    \
415              "r" ((USItype) (al)),                                      \
416              "rIJ" ((USItype) (bl)))
417 #endif
418
419 #if defined (__arm__) && W_TYPE_SIZE == 32
420 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
421   __asm__ ("adds\t%1, %4, %5\n\tadc\t%0, %2, %3"                        \
422            : "=r" (sh), "=&r" (sl)                                      \
423            : "r" (ah), "rI" (bh), "%r" (al), "rI" (bl) __CLOBBER_CC)
424 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
425   do {                                                                  \
426     if (__builtin_constant_p (al))                                      \
427       {                                                                 \
428         if (__builtin_constant_p (ah))                                  \
429           __asm__ ("rsbs\t%1, %5, %4\n\trsc\t%0, %3, %2"                \
430                    : "=r" (sh), "=&r" (sl)                              \
431                    : "rI" (ah), "r" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
432         else                                                            \
433           __asm__ ("rsbs\t%1, %5, %4\n\tsbc\t%0, %2, %3"                \
434                    : "=r" (sh), "=&r" (sl)                              \
435                    : "r" (ah), "rI" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
436       }                                                                 \
437     else if (__builtin_constant_p (ah))                                 \
438       {                                                                 \
439         if (__builtin_constant_p (bl))                                  \
440           __asm__ ("subs\t%1, %4, %5\n\trsc\t%0, %3, %2"                \
441                    : "=r" (sh), "=&r" (sl)                              \
442                    : "rI" (ah), "r" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
443         else                                                            \
444           __asm__ ("rsbs\t%1, %5, %4\n\trsc\t%0, %3, %2"                \
445                    : "=r" (sh), "=&r" (sl)                              \
446                    : "rI" (ah), "r" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
447       }                                                                 \
448     else if (__builtin_constant_p (bl))                                 \
449       {                                                                 \
450         if (__builtin_constant_p (bh))                                  \
451           __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3"                \
452                    : "=r" (sh), "=&r" (sl)                              \
453                    : "r" (ah), "rI" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
454         else                                                            \
455           __asm__ ("subs\t%1, %4, %5\n\trsc\t%0, %3, %2"                \
456                    : "=r" (sh), "=&r" (sl)                              \
457                    : "rI" (ah), "r" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
458       }                                                                 \
459     else /* only bh might be a constant */                              \
460       __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3"                    \
461                : "=r" (sh), "=&r" (sl)                                  \
462                : "r" (ah), "rI" (bh), "r" (al), "rI" (bl) __CLOBBER_CC);\
463     } while (0)
464 #if 1 || defined (__arm_m__)    /* `M' series has widening multiply support */
465 #define umul_ppmm(xh, xl, a, b) \
466   __asm__ ("umull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
467 #define UMUL_TIME 5
468 #define smul_ppmm(xh, xl, a, b) \
469   __asm__ ("smull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
470 #ifndef LONGLONG_STANDALONE
471 #define udiv_qrnnd(q, r, n1, n0, d) \
472   do { UWtype __di;                                                     \
473     __di = __MPN(invert_limb) (d);                                      \
474     udiv_qrnnd_preinv (q, r, n1, n0, d, __di);                          \
475   } while (0)
476 #define UDIV_PREINV_ALWAYS  1
477 #define UDIV_NEEDS_NORMALIZATION 1
478 #define UDIV_TIME 70
479 #endif /* LONGLONG_STANDALONE */
480 #else
481 #define umul_ppmm(xh, xl, a, b) \
482   __asm__ ("%@ Inlined umul_ppmm\n"                                     \
483 "       mov     %|r0, %2, lsr #16\n"                                    \
484 "       mov     %|r2, %3, lsr #16\n"                                    \
485 "       bic     %|r1, %2, %|r0, lsl #16\n"                              \
486 "       bic     %|r2, %3, %|r2, lsl #16\n"                              \
487 "       mul     %1, %|r1, %|r2\n"                                       \
488 "       mul     %|r2, %|r0, %|r2\n"                                     \
489 "       mul     %|r1, %0, %|r1\n"                                       \
490 "       mul     %0, %|r0, %0\n"                                         \
491 "       adds    %|r1, %|r2, %|r1\n"                                     \
492 "       addcs   %0, %0, #65536\n"                                       \
493 "       adds    %1, %1, %|r1, lsl #16\n"                                \
494 "       adc     %0, %0, %|r1, lsr #16"                                  \
495            : "=&r" (xh), "=r" (xl)                                      \
496            : "r" (a), "r" (b)                                           \
497            : "r0", "r1", "r2")
498 #define UMUL_TIME 20
499 #ifndef LONGLONG_STANDALONE
500 #define udiv_qrnnd(q, r, n1, n0, d) \
501   do { UWtype __r;                                                      \
502     (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d));                    \
503     (r) = __r;                                                          \
504   } while (0)
505 extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
506 #define UDIV_TIME 200
507 #endif /* LONGLONG_STANDALONE */
508 #endif
509 #endif /* __arm__ */
510
511 #if defined (__clipper__) && W_TYPE_SIZE == 32
512 #define umul_ppmm(w1, w0, u, v) \
513   ({union {UDItype __ll;                                                \
514            struct {USItype __l, __h;} __i;                              \
515           } __x;                                                        \
516   __asm__ ("mulwux %2,%0"                                               \
517            : "=r" (__x.__ll)                                            \
518            : "%0" ((USItype)(u)), "r" ((USItype)(v)));                  \
519   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
520 #define smul_ppmm(w1, w0, u, v) \
521   ({union {DItype __ll;                                                 \
522            struct {SItype __l, __h;} __i;                               \
523           } __x;                                                        \
524   __asm__ ("mulwx %2,%0"                                                \
525            : "=r" (__x.__ll)                                            \
526            : "%0" ((SItype)(u)), "r" ((SItype)(v)));                    \
527   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
528 #define __umulsidi3(u, v) \
529   ({UDItype __w;                                                        \
530     __asm__ ("mulwux %2,%0"                                             \
531              : "=r" (__w) : "%0" ((USItype)(u)), "r" ((USItype)(v)));   \
532     __w; })
533 #endif /* __clipper__ */
534
535 /* Fujitsu vector computers.  */
536 #if defined (__uxp__) && W_TYPE_SIZE == 32
537 #define umul_ppmm(ph, pl, u, v) \
538   do {                                                                  \
539     union {UDItype __ll;                                                \
540            struct {USItype __h, __l;} __i;                              \
541           } __x;                                                        \
542     __asm__ ("mult.lu %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v));\
543     (ph) = __x.__i.__h;                                                 \
544     (pl) = __x.__i.__l;                                                 \
545   } while (0)
546 #define smul_ppmm(ph, pl, u, v) \
547   do {                                                                  \
548     union {UDItype __ll;                                                \
549            struct {USItype __h, __l;} __i;                              \
550           } __x;                                                        \
551     __asm__ ("mult.l %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v)); \
552     (ph) = __x.__i.__h;                                                 \
553     (pl) = __x.__i.__l;                                                 \
554   } while (0)
555 #endif
556
557 #if defined (__gmicro__) && W_TYPE_SIZE == 32
558 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
559   __asm__ ("add.w %5,%1\n\taddx %3,%0"                                  \
560            : "=g" (sh), "=&g" (sl)                                      \
561            : "0"  ((USItype)(ah)), "g" ((USItype)(bh)),                 \
562              "%1" ((USItype)(al)), "g" ((USItype)(bl)))
563 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
564   __asm__ ("sub.w %5,%1\n\tsubx %3,%0"                                  \
565            : "=g" (sh), "=&g" (sl)                                      \
566            : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                  \
567              "1" ((USItype)(al)), "g" ((USItype)(bl)))
568 #define umul_ppmm(ph, pl, m0, m1) \
569   __asm__ ("mulx %3,%0,%1"                                              \
570            : "=g" (ph), "=r" (pl)                                       \
571            : "%0" ((USItype)(m0)), "g" ((USItype)(m1)))
572 #define udiv_qrnnd(q, r, nh, nl, d) \
573   __asm__ ("divx %4,%0,%1"                                              \
574            : "=g" (q), "=r" (r)                                         \
575            : "1" ((USItype)(nh)), "0" ((USItype)(nl)), "g" ((USItype)(d)))
576 #define count_leading_zeros(count, x) \
577   __asm__ ("bsch/1 %1,%0"                                               \
578            : "=g" (count) : "g" ((USItype)(x)), "0" ((USItype)0))
579 #endif
580
581 #if defined (__hppa) && W_TYPE_SIZE == 32
582 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
583   __asm__ ("add%I5 %5,%r4,%1\n\taddc %r2,%r3,%0"                        \
584            : "=r" (sh), "=&r" (sl)                                      \
585            : "rM" (ah), "rM" (bh), "%rM" (al), "rI" (bl))
586 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
587   __asm__ ("sub%I4 %4,%r5,%1\n\tsubb %r2,%r3,%0"                        \
588            : "=r" (sh), "=&r" (sl)                                      \
589            : "rM" (ah), "rM" (bh), "rI" (al), "rM" (bl))
590 #if defined (_PA_RISC1_1)
591 #define umul_ppmm(wh, wl, u, v) \
592   do {                                                                  \
593     union {UDItype __ll;                                                \
594            struct {USItype __h, __l;} __i;                              \
595           } __x;                                                        \
596     __asm__ ("xmpyu %1,%2,%0" : "=*f" (__x.__ll) : "*f" (u), "*f" (v)); \
597     (wh) = __x.__i.__h;                                                 \
598     (wl) = __x.__i.__l;                                                 \
599   } while (0)
600 #define UMUL_TIME 8
601 #define UDIV_TIME 60
602 #else
603 #define UMUL_TIME 40
604 #define UDIV_TIME 80
605 #endif
606 #define count_leading_zeros(count, x) \
607   do {                                                                  \
608     USItype __tmp;                                                      \
609     __asm__ (                                                           \
610        "ldi             1,%0\n"                                         \
611 "       extru,=         %1,15,16,%%r0   ; Bits 31..16 zero?\n"          \
612 "       extru,tr        %1,15,16,%1     ; No.  Shift down, skip add.\n" \
613 "       ldo             16(%0),%0       ; Yes.  Perform add.\n"         \
614 "       extru,=         %1,23,8,%%r0    ; Bits 15..8 zero?\n"           \
615 "       extru,tr        %1,23,8,%1      ; No.  Shift down, skip add.\n" \
616 "       ldo             8(%0),%0        ; Yes.  Perform add.\n"         \
617 "       extru,=         %1,27,4,%%r0    ; Bits 7..4 zero?\n"            \
618 "       extru,tr        %1,27,4,%1      ; No.  Shift down, skip add.\n" \
619 "       ldo             4(%0),%0        ; Yes.  Perform add.\n"         \
620 "       extru,=         %1,29,2,%%r0    ; Bits 3..2 zero?\n"            \
621 "       extru,tr        %1,29,2,%1      ; No.  Shift down, skip add.\n" \
622 "       ldo             2(%0),%0        ; Yes.  Perform add.\n"         \
623 "       extru           %1,30,1,%1      ; Extract bit 1.\n"             \
624 "       sub             %0,%1,%0        ; Subtract it.\n"               \
625         : "=r" (count), "=r" (__tmp) : "1" (x));                        \
626   } while (0)
627 #endif /* hppa */
628
629 /* These macros are for ABI=2.0w.  In ABI=2.0n they can't be used, since GCC
630    (3.2) puts longlong into two adjacent 32-bit registers.  Presumably this
631    is just a case of no direct support for 2.0n but treating it like 1.0. */
632 #if defined (__hppa) && W_TYPE_SIZE == 64 && ! defined (_LONG_LONG_LIMB)
633 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
634   __asm__ ("add%I5 %5,%r4,%1\n\tadd,dc %r2,%r3,%0"                      \
635            : "=r" (sh), "=&r" (sl)                                      \
636            : "rM" (ah), "rM" (bh), "%rM" (al), "rI" (bl))
637 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
638   __asm__ ("sub%I4 %4,%r5,%1\n\tsub,db %r2,%r3,%0"                      \
639            : "=r" (sh), "=&r" (sl)                                      \
640            : "rM" (ah), "rM" (bh), "rI" (al), "rM" (bl))
641 #endif /* hppa */
642
643 #if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
644 #define smul_ppmm(xh, xl, m0, m1) \
645   do {                                                                  \
646     union {DItype __ll;                                                 \
647            struct {USItype __h, __l;} __i;                              \
648           } __x;                                                        \
649     __asm__ ("lr %N0,%1\n\tmr %0,%2"                                    \
650              : "=&r" (__x.__ll)                                         \
651              : "r" (m0), "r" (m1));                                     \
652     (xh) = __x.__i.__h; (xl) = __x.__i.__l;                             \
653   } while (0)
654 #define sdiv_qrnnd(q, r, n1, n0, d) \
655   do {                                                                  \
656     union {DItype __ll;                                                 \
657            struct {USItype __h, __l;} __i;                              \
658           } __x;                                                        \
659     __x.__i.__h = n1; __x.__i.__l = n0;                                 \
660     __asm__ ("dr %0,%2"                                                 \
661              : "=r" (__x.__ll)                                          \
662              : "0" (__x.__ll), "r" (d));                                \
663     (q) = __x.__i.__l; (r) = __x.__i.__h;                               \
664   } while (0)
665 #endif
666
667 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
668 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
669   __asm__ ("addl %5,%k1\n\tadcl %3,%k0"                                 \
670            : "=r" (sh), "=&r" (sl)                                      \
671            : "0"  ((USItype)(ah)), "g" ((USItype)(bh)),                 \
672              "%1" ((USItype)(al)), "g" ((USItype)(bl)))
673 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
674   __asm__ ("subl %5,%k1\n\tsbbl %3,%k0"                                 \
675            : "=r" (sh), "=&r" (sl)                                      \
676            : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                  \
677              "1" ((USItype)(al)), "g" ((USItype)(bl)))
678 #define umul_ppmm(w1, w0, u, v) \
679   __asm__ ("mull %3"                                                    \
680            : "=a" (w0), "=d" (w1)                                       \
681            : "%0" ((USItype)(u)), "rm" ((USItype)(v)))
682 #define udiv_qrnnd(q, r, n1, n0, dx) /* d renamed to dx avoiding "=d" */\
683   __asm__ ("divl %4"                 /* stringification in K&R C */     \
684            : "=a" (q), "=d" (r)                                         \
685            : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "rm" ((USItype)(dx)))
686
687 #if HAVE_HOST_CPU_i586 || HAVE_HOST_CPU_pentium || HAVE_HOST_CPU_pentiummmx
688 /* Pentium bsrl takes between 10 and 72 cycles depending where the most
689    significant 1 bit is, hence the use of the following alternatives.  bsfl
690    is slow too, between 18 and 42 depending where the least significant 1
691    bit is, so let the generic count_trailing_zeros below make use of the
692    count_leading_zeros here too.  */
693
694 #if HAVE_HOST_CPU_pentiummmx && ! defined (LONGLONG_STANDALONE)
695 /* The following should be a fixed 14 or 15 cycles, but possibly plus an L1
696    cache miss reading from __clz_tab.  For P55 it's favoured over the float
697    below so as to avoid mixing MMX and x87, since the penalty for switching
698    between the two is about 100 cycles.
699
700    The asm block sets __shift to -3 if the high 24 bits are clear, -2 for
701    16, -1 for 8, or 0 otherwise.  This could be written equivalently as
702    follows, but as of gcc 2.95.2 it results in conditional jumps.
703
704        __shift = -(__n < 0x1000000);
705        __shift -= (__n < 0x10000);
706        __shift -= (__n < 0x100);
707
708    The middle two sbbl and cmpl's pair, and with luck something gcc
709    generates might pair with the first cmpl and the last sbbl.  The "32+1"
710    constant could be folded into __clz_tab[], but it doesn't seem worth
711    making a different table just for that.  */
712
713 #define count_leading_zeros(c,n)                                        \
714   do {                                                                  \
715     USItype  __n = (n);                                                 \
716     USItype  __shift;                                                   \
717     __asm__ ("cmpl  $0x1000000, %1\n"                                   \
718              "sbbl  %0, %0\n"                                           \
719              "cmpl  $0x10000, %1\n"                                     \
720              "sbbl  $0, %0\n"                                           \
721              "cmpl  $0x100, %1\n"                                       \
722              "sbbl  $0, %0\n"                                           \
723              : "=&r" (__shift) : "r"  (__n));                           \
724     __shift = __shift*8 + 24 + 1;                                       \
725     (c) = 32 + 1 - __shift - __clz_tab[__n >> __shift];                 \
726   } while (0)
727 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
728 #define COUNT_LEADING_ZEROS_0   31   /* n==0 indistinguishable from n==1 */
729
730 #else /* ! pentiummmx || LONGLONG_STANDALONE */
731 /* The following should be a fixed 14 cycles or so.  Some scheduling
732    opportunities should be available between the float load/store too.  This
733    sort of code is used in gcc 3 for __builtin_ffs (with "n&-n") and is
734    apparently suggested by the Intel optimizing manual (don't know exactly
735    where).  gcc 2.95 or up will be best for this, so the "double" is
736    correctly aligned on the stack.  */
737 #define count_leading_zeros(c,n)                                        \
738   do {                                                                  \
739     union {                                                             \
740       double    d;                                                      \
741       unsigned  a[2];                                                   \
742     } __u;                                                              \
743     ASSERT ((n) != 0);                                                  \
744     __u.d = (UWtype) (n);                                               \
745     (c) = 0x3FF + 31 - (__u.a[1] >> 20);                                \
746   } while (0)
747 #define COUNT_LEADING_ZEROS_0   (0x3FF + 31)
748 #endif /* pentiummx */
749
750 #else /* ! pentium */
751
752 #if __GMP_GNUC_PREREQ (3,4)  /* using bsrl */
753 #define count_leading_zeros(count,x)  count_leading_zeros_gcc_clz(count,x)
754 #endif /* gcc clz */
755
756 /* On P6, gcc prior to 3.0 generates a partial register stall for
757    __cbtmp^31, due to using "xorb $31" instead of "xorl $31", the former
758    being 1 code byte smaller.  "31-__cbtmp" is a workaround, probably at the
759    cost of one extra instruction.  Do this for "i386" too, since that means
760    generic x86.  */
761 #if ! defined (count_leading_zeros) && __GNUC__ < 3                     \
762   && (HAVE_HOST_CPU_i386                                                \
763       || HAVE_HOST_CPU_i686                                             \
764       || HAVE_HOST_CPU_pentiumpro                                       \
765       || HAVE_HOST_CPU_pentium2                                         \
766       || HAVE_HOST_CPU_pentium3)
767 #define count_leading_zeros(count, x)                                   \
768   do {                                                                  \
769     USItype __cbtmp;                                                    \
770     ASSERT ((x) != 0);                                                  \
771     __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x)));      \
772     (count) = 31 - __cbtmp;                                             \
773   } while (0)
774 #endif /* gcc<3 asm bsrl */
775
776 #ifndef count_leading_zeros
777 #define count_leading_zeros(count, x)                                   \
778   do {                                                                  \
779     USItype __cbtmp;                                                    \
780     ASSERT ((x) != 0);                                                  \
781     __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x)));      \
782     (count) = __cbtmp ^ 31;                                             \
783   } while (0)
784 #endif /* asm bsrl */
785
786 #if __GMP_GNUC_PREREQ (3,4)  /* using bsfl */
787 #define count_trailing_zeros(count,x)  count_trailing_zeros_gcc_ctz(count,x)
788 #endif /* gcc ctz */
789
790 #ifndef count_trailing_zeros
791 #define count_trailing_zeros(count, x)                                  \
792   do {                                                                  \
793     ASSERT ((x) != 0);                                                  \
794     __asm__ ("bsfl %1,%k0" : "=r" (count) : "rm" ((USItype)(x)));       \
795   } while (0)
796 #endif /* asm bsfl */
797
798 #endif /* ! pentium */
799
800 #ifndef UMUL_TIME
801 #define UMUL_TIME 10
802 #endif
803 #ifndef UDIV_TIME
804 #define UDIV_TIME 40
805 #endif
806 #endif /* 80x86 */
807
808 #if defined (__amd64__) && W_TYPE_SIZE == 64
809 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
810   __asm__ ("addq %5,%q1\n\tadcq %3,%q0"                                 \
811            : "=r" (sh), "=&r" (sl)                                      \
812            : "0"  ((UDItype)(ah)), "rme" ((UDItype)(bh)),               \
813              "%1" ((UDItype)(al)), "rme" ((UDItype)(bl)))
814 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
815   __asm__ ("subq %5,%q1\n\tsbbq %3,%q0"                                 \
816            : "=r" (sh), "=&r" (sl)                                      \
817            : "0" ((UDItype)(ah)), "rme" ((UDItype)(bh)),                \
818              "1" ((UDItype)(al)), "rme" ((UDItype)(bl)))
819 #define umul_ppmm(w1, w0, u, v) \
820   __asm__ ("mulq %3"                                                    \
821            : "=a" (w0), "=d" (w1)                                       \
822            : "%0" ((UDItype)(u)), "rm" ((UDItype)(v)))
823 #define udiv_qrnnd(q, r, n1, n0, dx) /* d renamed to dx avoiding "=d" */\
824   __asm__ ("divq %4"                 /* stringification in K&R C */     \
825            : "=a" (q), "=d" (r)                                         \
826            : "0" ((UDItype)(n0)), "1" ((UDItype)(n1)), "rm" ((UDItype)(dx)))
827 /* bsrq destination must be a 64-bit register, hence UDItype for __cbtmp. */
828 #define count_leading_zeros(count, x)                                   \
829   do {                                                                  \
830     UDItype __cbtmp;                                                    \
831     ASSERT ((x) != 0);                                                  \
832     __asm__ ("bsrq %1,%0" : "=r" (__cbtmp) : "rm" ((UDItype)(x)));      \
833     (count) = __cbtmp ^ 63;                                             \
834   } while (0)
835 /* bsfq destination must be a 64-bit register, "%q0" forces this in case
836    count is only an int. */
837 #define count_trailing_zeros(count, x)                                  \
838   do {                                                                  \
839     ASSERT ((x) != 0);                                                  \
840     __asm__ ("bsfq %1,%q0" : "=r" (count) : "rm" ((UDItype)(x)));       \
841   } while (0)
842 #endif /* x86_64 */
843
844 #if defined (__i860__) && W_TYPE_SIZE == 32
845 #define rshift_rhlc(r,h,l,c) \
846   __asm__ ("shr %3,r0,r0\;shrd %1,%2,%0"                                \
847            "=r" (r) : "r" (h), "r" (l), "rn" (c))
848 #endif /* i860 */
849
850 #if defined (__i960__) && W_TYPE_SIZE == 32
851 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
852   __asm__ ("cmpo 1,0\;addc %5,%4,%1\;addc %3,%2,%0"                     \
853            : "=r" (sh), "=&r" (sl)                                      \
854            : "dI" (ah), "dI" (bh), "%dI" (al), "dI" (bl))
855 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
856   __asm__ ("cmpo 0,0\;subc %5,%4,%1\;subc %3,%2,%0"                     \
857            : "=r" (sh), "=&r" (sl)                                      \
858            : "dI" (ah), "dI" (bh), "dI" (al), "dI" (bl))
859 #define umul_ppmm(w1, w0, u, v) \
860   ({union {UDItype __ll;                                                \
861            struct {USItype __l, __h;} __i;                              \
862           } __x;                                                        \
863   __asm__ ("emul %2,%1,%0"                                              \
864            : "=d" (__x.__ll) : "%dI" (u), "dI" (v));                    \
865   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
866 #define __umulsidi3(u, v) \
867   ({UDItype __w;                                                        \
868     __asm__ ("emul %2,%1,%0" : "=d" (__w) : "%dI" (u), "dI" (v));       \
869     __w; })
870 #define udiv_qrnnd(q, r, nh, nl, d) \
871   do {                                                                  \
872     union {UDItype __ll;                                                \
873            struct {USItype __l, __h;} __i;                              \
874           } __nn;                                                       \
875     __nn.__i.__h = (nh); __nn.__i.__l = (nl);                           \
876     __asm__ ("ediv %d,%n,%0"                                            \
877            : "=d" (__rq.__ll) : "dI" (__nn.__ll), "dI" (d));            \
878     (r) = __rq.__i.__l; (q) = __rq.__i.__h;                             \
879   } while (0)
880 #define count_leading_zeros(count, x) \
881   do {                                                                  \
882     USItype __cbtmp;                                                    \
883     __asm__ ("scanbit %1,%0" : "=r" (__cbtmp) : "r" (x));               \
884     (count) = __cbtmp ^ 31;                                             \
885   } while (0)
886 #define COUNT_LEADING_ZEROS_0 (-32) /* sic */
887 #if defined (__i960mx)          /* what is the proper symbol to test??? */
888 #define rshift_rhlc(r,h,l,c) \
889   do {                                                                  \
890     union {UDItype __ll;                                                \
891            struct {USItype __l, __h;} __i;                              \
892           } __nn;                                                       \
893     __nn.__i.__h = (h); __nn.__i.__l = (l);                             \
894     __asm__ ("shre %2,%1,%0" : "=d" (r) : "dI" (__nn.__ll), "dI" (c));  \
895   }
896 #endif /* i960mx */
897 #endif /* i960 */
898
899 #if (defined (__mc68000__) || defined (__mc68020__) || defined(mc68020) \
900      || defined (__m68k__) || defined (__mc5200__) || defined (__mc5206e__) \
901      || defined (__mc5307__)) && W_TYPE_SIZE == 32
902 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
903   __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0"                              \
904            : "=d" (sh), "=&d" (sl)                                      \
905            : "0"  ((USItype)(ah)), "d" ((USItype)(bh)),                 \
906              "%1" ((USItype)(al)), "g" ((USItype)(bl)))
907 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
908   __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0"                              \
909            : "=d" (sh), "=&d" (sl)                                      \
910            : "0" ((USItype)(ah)), "d" ((USItype)(bh)),                  \
911              "1" ((USItype)(al)), "g" ((USItype)(bl)))
912 /* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r.  */
913 #if defined (__mc68020__) || defined(mc68020) \
914      || defined (__mc68030__) || defined (mc68030) \
915      || defined (__mc68040__) || defined (mc68040) \
916      || defined (__mcpu32__) || defined (mcpu32) \
917      || defined (__NeXT__)
918 #define umul_ppmm(w1, w0, u, v) \
919   __asm__ ("mulu%.l %3,%1:%0"                                           \
920            : "=d" (w0), "=d" (w1)                                       \
921            : "%0" ((USItype)(u)), "dmi" ((USItype)(v)))
922 #define UMUL_TIME 45
923 #define udiv_qrnnd(q, r, n1, n0, d) \
924   __asm__ ("divu%.l %4,%1:%0"                                           \
925            : "=d" (q), "=d" (r)                                         \
926            : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
927 #define UDIV_TIME 90
928 #define sdiv_qrnnd(q, r, n1, n0, d) \
929   __asm__ ("divs%.l %4,%1:%0"                                           \
930            : "=d" (q), "=d" (r)                                         \
931            : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
932 #else /* for other 68k family members use 16x16->32 multiplication */
933 #define umul_ppmm(xh, xl, a, b) \
934   do { USItype __umul_tmp1, __umul_tmp2;                                \
935         __asm__ ("| Inlined umul_ppmm\n"                                \
936 "       move%.l %5,%3\n"                                                \
937 "       move%.l %2,%0\n"                                                \
938 "       move%.w %3,%1\n"                                                \
939 "       swap    %3\n"                                                   \
940 "       swap    %0\n"                                                   \
941 "       mulu%.w %2,%1\n"                                                \
942 "       mulu%.w %3,%0\n"                                                \
943 "       mulu%.w %2,%3\n"                                                \
944 "       swap    %2\n"                                                   \
945 "       mulu%.w %5,%2\n"                                                \
946 "       add%.l  %3,%2\n"                                                \
947 "       jcc     1f\n"                                                   \
948 "       add%.l  %#0x10000,%0\n"                                         \
949 "1:     move%.l %2,%3\n"                                                \
950 "       clr%.w  %2\n"                                                   \
951 "       swap    %2\n"                                                   \
952 "       swap    %3\n"                                                   \
953 "       clr%.w  %3\n"                                                   \
954 "       add%.l  %3,%1\n"                                                \
955 "       addx%.l %2,%0\n"                                                \
956 "       | End inlined umul_ppmm"                                        \
957               : "=&d" (xh), "=&d" (xl),                                 \
958                 "=d" (__umul_tmp1), "=&d" (__umul_tmp2)                 \
959               : "%2" ((USItype)(a)), "d" ((USItype)(b)));               \
960   } while (0)
961 #define UMUL_TIME 100
962 #define UDIV_TIME 400
963 #endif /* not mc68020 */
964 /* The '020, '030, '040 and '060 have bitfield insns.
965    GCC 3.4 defines __mc68020__ when in CPU32 mode, check for __mcpu32__ to
966    exclude bfffo on that chip (bitfield insns not available).  */
967 #if (defined (__mc68020__) || defined (mc68020)    \
968      || defined (__mc68030__) || defined (mc68030) \
969      || defined (__mc68040__) || defined (mc68040) \
970      || defined (__mc68060__) || defined (mc68060) \
971      || defined (__NeXT__))                        \
972   && ! defined (__mcpu32__)
973 #define count_leading_zeros(count, x) \
974   __asm__ ("bfffo %1{%b2:%b2},%0"                                       \
975            : "=d" (count)                                               \
976            : "od" ((USItype) (x)), "n" (0))
977 #define COUNT_LEADING_ZEROS_0 32
978 #endif
979 #endif /* mc68000 */
980
981 #if defined (__m88000__) && W_TYPE_SIZE == 32
982 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
983   __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3"                   \
984            : "=r" (sh), "=&r" (sl)                                      \
985            : "rJ" (ah), "rJ" (bh), "%rJ" (al), "rJ" (bl))
986 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
987   __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3"                   \
988            : "=r" (sh), "=&r" (sl)                                      \
989            : "rJ" (ah), "rJ" (bh), "rJ" (al), "rJ" (bl))
990 #define count_leading_zeros(count, x) \
991   do {                                                                  \
992     USItype __cbtmp;                                                    \
993     __asm__ ("ff1 %0,%1" : "=r" (__cbtmp) : "r" (x));                   \
994     (count) = __cbtmp ^ 31;                                             \
995   } while (0)
996 #define COUNT_LEADING_ZEROS_0 63 /* sic */
997 #if defined (__m88110__)
998 #define umul_ppmm(wh, wl, u, v) \
999   do {                                                                  \
1000     union {UDItype __ll;                                                \
1001            struct {USItype __h, __l;} __i;                              \
1002           } __x;                                                        \
1003     __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v));   \
1004     (wh) = __x.__i.__h;                                                 \
1005     (wl) = __x.__i.__l;                                                 \
1006   } while (0)
1007 #define udiv_qrnnd(q, r, n1, n0, d) \
1008   ({union {UDItype __ll;                                                \
1009            struct {USItype __h, __l;} __i;                              \
1010           } __x, __q;                                                   \
1011   __x.__i.__h = (n1); __x.__i.__l = (n0);                               \
1012   __asm__ ("divu.d %0,%1,%2"                                            \
1013            : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d));                \
1014   (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
1015 #define UMUL_TIME 5
1016 #define UDIV_TIME 25
1017 #else
1018 #define UMUL_TIME 17
1019 #define UDIV_TIME 150
1020 #endif /* __m88110__ */
1021 #endif /* __m88000__ */
1022
1023 #if defined (__mips) && W_TYPE_SIZE == 32
1024 #if __GMP_GNUC_PREREQ (4,4)
1025 #define umul_ppmm(w1, w0, u, v) \
1026   do {                                                                  \
1027     UDItype __ll = (UDItype)(u) * (v);                                  \
1028     w1 = __ll >> 32;                                                    \
1029     w0 = __ll;                                                          \
1030   } while (0)
1031 #endif
1032 #if !defined (umul_ppmm) && __GMP_GNUC_PREREQ (2,7)
1033 #define umul_ppmm(w1, w0, u, v) \
1034   __asm__ ("multu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
1035 #endif
1036 #if !defined (umul_ppmm)
1037 #define umul_ppmm(w1, w0, u, v) \
1038   __asm__ ("multu %2,%3\n\tmflo %0\n\tmfhi %1"                          \
1039            : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
1040 #endif
1041 #define UMUL_TIME 10
1042 #define UDIV_TIME 100
1043 #endif /* __mips */
1044
1045 #if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64
1046 #if __GMP_GNUC_PREREQ (4,4)
1047 #define umul_ppmm(w1, w0, u, v) \
1048   do {                                                                  \
1049     typedef unsigned int __ll_UTItype __attribute__((mode(TI)));        \
1050     __ll_UTItype __ll = (__ll_UTItype)(u) * (v);                        \
1051     w1 = __ll >> 64;                                                    \
1052     w0 = __ll;                                                          \
1053   } while (0)
1054 #endif
1055 #if !defined (umul_ppmm) && __GMP_GNUC_PREREQ (2,7)
1056 #define umul_ppmm(w1, w0, u, v) \
1057   __asm__ ("dmultu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
1058 #endif
1059 #if !defined (umul_ppmm)
1060 #define umul_ppmm(w1, w0, u, v) \
1061   __asm__ ("dmultu %2,%3\n\tmflo %0\n\tmfhi %1"                         \
1062            : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
1063 #endif
1064 #define UMUL_TIME 20
1065 #define UDIV_TIME 140
1066 #endif /* __mips */
1067
1068 #if defined (__mmix__) && W_TYPE_SIZE == 64
1069 #define umul_ppmm(w1, w0, u, v) \
1070   __asm__ ("MULU %0,%2,%3" : "=r" (w0), "=z" (w1) : "r" (u), "r" (v))
1071 #endif
1072
1073 #if defined (__ns32000__) && W_TYPE_SIZE == 32
1074 #define umul_ppmm(w1, w0, u, v) \
1075   ({union {UDItype __ll;                                                \
1076            struct {USItype __l, __h;} __i;                              \
1077           } __x;                                                        \
1078   __asm__ ("meid %2,%0"                                                 \
1079            : "=g" (__x.__ll)                                            \
1080            : "%0" ((USItype)(u)), "g" ((USItype)(v)));                  \
1081   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
1082 #define __umulsidi3(u, v) \
1083   ({UDItype __w;                                                        \
1084     __asm__ ("meid %2,%0"                                               \
1085              : "=g" (__w)                                               \
1086              : "%0" ((USItype)(u)), "g" ((USItype)(v)));                \
1087     __w; })
1088 #define udiv_qrnnd(q, r, n1, n0, d) \
1089   ({union {UDItype __ll;                                                \
1090            struct {USItype __l, __h;} __i;                              \
1091           } __x;                                                        \
1092   __x.__i.__h = (n1); __x.__i.__l = (n0);                               \
1093   __asm__ ("deid %2,%0"                                                 \
1094            : "=g" (__x.__ll)                                            \
1095            : "0" (__x.__ll), "g" ((USItype)(d)));                       \
1096   (r) = __x.__i.__l; (q) = __x.__i.__h; })
1097 #define count_trailing_zeros(count,x) \
1098   do {                                                                  \
1099     __asm__ ("ffsd      %2,%0"                                          \
1100              : "=r" (count)                                             \
1101              : "0" ((USItype) 0), "r" ((USItype) (x)));                 \
1102   } while (0)
1103 #endif /* __ns32000__ */
1104
1105 /* In the past we had a block of various #defines tested
1106        _ARCH_PPC    - AIX
1107        _ARCH_PWR    - AIX
1108        __powerpc__  - gcc
1109        __POWERPC__  - BEOS
1110        __ppc__      - Darwin
1111        PPC          - old gcc, GNU/Linux, SysV
1112    The plain PPC test was not good for vxWorks, since PPC is defined on all
1113    CPUs there (eg. m68k too), as a constant one is expected to compare
1114    CPU_FAMILY against.
1115
1116    At any rate, this was pretty unattractive and a bit fragile.  The use of
1117    HAVE_HOST_CPU_FAMILY is designed to cut through it all and be sure of
1118    getting the desired effect.
1119
1120    ENHANCE-ME: We should test _IBMR2 here when we add assembly support for
1121    the system vendor compilers.  (Is that vendor compilers with inline asm,
1122    or what?)  */
1123
1124 #if (HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc)        \
1125   && W_TYPE_SIZE == 32
1126 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1127   do {                                                                  \
1128     if (__builtin_constant_p (bh) && (bh) == 0)                         \
1129       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"           \
1130              : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
1131     else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)         \
1132       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"           \
1133              : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
1134     else                                                                \
1135       __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"          \
1136              : "=r" (sh), "=&r" (sl)                                    \
1137              : "r" (ah), "r" (bh), "%r" (al), "rI" (bl));               \
1138   } while (0)
1139 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1140   do {                                                                  \
1141     if (__builtin_constant_p (ah) && (ah) == 0)                         \
1142       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"       \
1143                : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
1144     else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0)         \
1145       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"       \
1146                : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
1147     else if (__builtin_constant_p (bh) && (bh) == 0)                    \
1148       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"         \
1149                : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
1150     else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0)         \
1151       __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"         \
1152                : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
1153     else                                                                \
1154       __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"      \
1155                : "=r" (sh), "=&r" (sl)                                  \
1156                : "r" (ah), "r" (bh), "rI" (al), "r" (bl));              \
1157   } while (0)
1158 #define count_leading_zeros(count, x) \
1159   __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
1160 #define COUNT_LEADING_ZEROS_0 32
1161 #if HAVE_HOST_CPU_FAMILY_powerpc
1162 #if __GMP_GNUC_PREREQ (4,4)
1163 #define umul_ppmm(w1, w0, u, v) \
1164   do {                                                                  \
1165     UDItype __ll = (UDItype)(u) * (v);                                  \
1166     w1 = __ll >> 32;                                                    \
1167     w0 = __ll;                                                          \
1168   } while (0)
1169 #endif
1170 #if !defined (umul_ppmm)
1171 #define umul_ppmm(ph, pl, m0, m1) \
1172   do {                                                                  \
1173     USItype __m0 = (m0), __m1 = (m1);                                   \
1174     __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));      \
1175     (pl) = __m0 * __m1;                                                 \
1176   } while (0)
1177 #endif
1178 #define UMUL_TIME 15
1179 #define smul_ppmm(ph, pl, m0, m1) \
1180   do {                                                                  \
1181     SItype __m0 = (m0), __m1 = (m1);                                    \
1182     __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));       \
1183     (pl) = __m0 * __m1;                                                 \
1184   } while (0)
1185 #define SMUL_TIME 14
1186 #define UDIV_TIME 120
1187 #else
1188 #define UMUL_TIME 8
1189 #define smul_ppmm(xh, xl, m0, m1) \
1190   __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
1191 #define SMUL_TIME 4
1192 #define sdiv_qrnnd(q, r, nh, nl, d) \
1193   __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
1194 #define UDIV_TIME 100
1195 #endif
1196 #endif /* 32-bit POWER architecture variants.  */
1197
1198 /* We should test _IBMR2 here when we add assembly support for the system
1199    vendor compilers.  */
1200 #if HAVE_HOST_CPU_FAMILY_powerpc && W_TYPE_SIZE == 64
1201 #if !defined (_LONG_LONG_LIMB)
1202 /* _LONG_LONG_LIMB is ABI=mode32 where adde operates on 32-bit values.  So
1203    use adde etc only when not _LONG_LONG_LIMB.  */
1204 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1205   do {                                                                  \
1206     if (__builtin_constant_p (bh) && (bh) == 0)                         \
1207       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2"           \
1208              : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
1209     else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)         \
1210       __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2"           \
1211              : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
1212     else                                                                \
1213       __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3"          \
1214              : "=r" (sh), "=&r" (sl)                                    \
1215              : "r" (ah), "r" (bh), "%r" (al), "rI" (bl));               \
1216   } while (0)
1217 /* We use "*rI" for the constant operand here, since with just "I", gcc barfs.
1218    This might seem strange, but gcc folds away the dead code late.  */
1219 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1220   do {                                                                        \
1221     if (__builtin_constant_p (bl) && bl > -0x8000 && bl <= 0x8000) {          \
1222         if (__builtin_constant_p (ah) && (ah) == 0)                           \
1223           __asm__ ("{ai|addic} %1,%3,%4\n\t{sfze|subfze} %0,%2"               \
1224                    : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "*rI" (-bl)); \
1225         else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0)           \
1226           __asm__ ("{ai|addic} %1,%3,%4\n\t{sfme|subfme} %0,%2"               \
1227                    : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "*rI" (-bl)); \
1228         else if (__builtin_constant_p (bh) && (bh) == 0)                      \
1229           __asm__ ("{ai|addic} %1,%3,%4\n\t{ame|addme} %0,%2"                 \
1230                    : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "*rI" (-bl)); \
1231         else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)           \
1232           __asm__ ("{ai|addic} %1,%3,%4\n\t{aze|addze} %0,%2"                 \
1233                    : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "*rI" (-bl)); \
1234         else                                                                  \
1235           __asm__ ("{ai|addic} %1,%4,%5\n\t{sfe|subfe} %0,%3,%2"              \
1236                    : "=r" (sh), "=&r" (sl)                                    \
1237                    : "r" (ah), "r" (bh), "rI" (al), "*rI" (-bl));             \
1238       } else {                                                                \
1239         if (__builtin_constant_p (ah) && (ah) == 0)                           \
1240           __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2"         \
1241                    : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));  \
1242         else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0)           \
1243           __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2"         \
1244                    : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));  \
1245         else if (__builtin_constant_p (bh) && (bh) == 0)                      \
1246           __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2"           \
1247                    : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));  \
1248         else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0)           \
1249           __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2"           \
1250                    : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));  \
1251         else                                                                  \
1252           __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2"        \
1253                    : "=r" (sh), "=&r" (sl)                                    \
1254                    : "r" (ah), "r" (bh), "rI" (al), "r" (bl));                \
1255       }                                                                       \
1256   } while (0)
1257 #endif /* ! _LONG_LONG_LIMB */
1258 #define count_leading_zeros(count, x) \
1259   __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
1260 #define COUNT_LEADING_ZEROS_0 64
1261 #if __GMP_GNUC_PREREQ (4,4)
1262 #define umul_ppmm(w1, w0, u, v) \
1263   do {                                                                  \
1264     typedef unsigned int __ll_UTItype __attribute__((mode(TI)));        \
1265     __ll_UTItype __ll = (__ll_UTItype)(u) * (v);                        \
1266     w1 = __ll >> 64;                                                    \
1267     w0 = __ll;                                                          \
1268   } while (0)
1269 #endif
1270 #if !defined (umul_ppmm)
1271 #define umul_ppmm(ph, pl, m0, m1) \
1272   do {                                                                  \
1273     UDItype __m0 = (m0), __m1 = (m1);                                   \
1274     __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));      \
1275     (pl) = __m0 * __m1;                                                 \
1276   } while (0)
1277 #endif
1278 #define UMUL_TIME 15
1279 #define smul_ppmm(ph, pl, m0, m1) \
1280   do {                                                                  \
1281     DItype __m0 = (m0), __m1 = (m1);                                    \
1282     __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1));       \
1283     (pl) = __m0 * __m1;                                                 \
1284   } while (0)
1285 #define SMUL_TIME 14  /* ??? */
1286 #define UDIV_TIME 120 /* ??? */
1287 #endif /* 64-bit PowerPC.  */
1288
1289 #if defined (__pyr__) && W_TYPE_SIZE == 32
1290 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1291   __asm__ ("addw %5,%1\n\taddwc %3,%0"                                  \
1292            : "=r" (sh), "=&r" (sl)                                      \
1293            : "0"  ((USItype)(ah)), "g" ((USItype)(bh)),                 \
1294              "%1" ((USItype)(al)), "g" ((USItype)(bl)))
1295 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1296   __asm__ ("subw %5,%1\n\tsubwb %3,%0"                                  \
1297            : "=r" (sh), "=&r" (sl)                                      \
1298            : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                  \
1299              "1" ((USItype)(al)), "g" ((USItype)(bl)))
1300 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP.  */
1301 #define umul_ppmm(w1, w0, u, v) \
1302   ({union {UDItype __ll;                                                \
1303            struct {USItype __h, __l;} __i;                              \
1304           } __x;                                                        \
1305   __asm__ ("movw %1,%R0\n\tuemul %2,%0"                                 \
1306            : "=&r" (__x.__ll)                                           \
1307            : "g" ((USItype) (u)), "g" ((USItype)(v)));                  \
1308   (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
1309 #endif /* __pyr__ */
1310
1311 #if defined (__ibm032__) /* RT/ROMP */  && W_TYPE_SIZE == 32
1312 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1313   __asm__ ("a %1,%5\n\tae %0,%3"                                        \
1314            : "=r" (sh), "=&r" (sl)                                      \
1315            : "0"  ((USItype)(ah)), "r" ((USItype)(bh)),                 \
1316              "%1" ((USItype)(al)), "r" ((USItype)(bl)))
1317 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1318   __asm__ ("s %1,%5\n\tse %0,%3"                                        \
1319            : "=r" (sh), "=&r" (sl)                                      \
1320            : "0" ((USItype)(ah)), "r" ((USItype)(bh)),                  \
1321              "1" ((USItype)(al)), "r" ((USItype)(bl)))
1322 #define smul_ppmm(ph, pl, m0, m1) \
1323   __asm__ (                                                             \
1324        "s       r2,r2\n"                                                \
1325 "       mts r10,%2\n"                                                   \
1326 "       m       r2,%3\n"                                                \
1327 "       m       r2,%3\n"                                                \
1328 "       m       r2,%3\n"                                                \
1329 "       m       r2,%3\n"                                                \
1330 "       m       r2,%3\n"                                                \
1331 "       m       r2,%3\n"                                                \
1332 "       m       r2,%3\n"                                                \
1333 "       m       r2,%3\n"                                                \
1334 "       m       r2,%3\n"                                                \
1335 "       m       r2,%3\n"                                                \
1336 "       m       r2,%3\n"                                                \
1337 "       m       r2,%3\n"                                                \
1338 "       m       r2,%3\n"                                                \
1339 "       m       r2,%3\n"                                                \
1340 "       m       r2,%3\n"                                                \
1341 "       m       r2,%3\n"                                                \
1342 "       cas     %0,r2,r0\n"                                             \
1343 "       mfs     r10,%1"                                                 \
1344            : "=r" (ph), "=r" (pl)                                       \
1345            : "%r" ((USItype)(m0)), "r" ((USItype)(m1))                  \
1346            : "r2")
1347 #define UMUL_TIME 20
1348 #define UDIV_TIME 200
1349 #define count_leading_zeros(count, x) \
1350   do {                                                                  \
1351     if ((x) >= 0x10000)                                                 \
1352       __asm__ ("clz     %0,%1"                                          \
1353                : "=r" (count) : "r" ((USItype)(x) >> 16));              \
1354     else                                                                \
1355       {                                                                 \
1356         __asm__ ("clz   %0,%1"                                          \
1357                  : "=r" (count) : "r" ((USItype)(x)));                  \
1358         (count) += 16;                                                  \
1359       }                                                                 \
1360   } while (0)
1361 #endif /* RT/ROMP */
1362
1363 #if defined (__sh2__) && W_TYPE_SIZE == 32
1364 #define umul_ppmm(w1, w0, u, v) \
1365   __asm__ ("dmulu.l %2,%3\n\tsts macl,%1\n\tsts mach,%0"                \
1366            : "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "macl", "mach")
1367 #define UMUL_TIME 5
1368 #endif
1369
1370 #if defined (__sparc__) && W_TYPE_SIZE == 32
1371 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1372   __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0"                          \
1373            : "=r" (sh), "=&r" (sl)                                      \
1374            : "rJ" (ah), "rI" (bh),"%rJ" (al), "rI" (bl)                 \
1375            __CLOBBER_CC)
1376 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1377   __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0"                          \
1378            : "=r" (sh), "=&r" (sl)                                      \
1379            : "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl) \
1380            __CLOBBER_CC)
1381 /* FIXME: When gcc -mcpu=v9 is used on solaris, gcc/config/sol2-sld-64.h
1382    doesn't define anything to indicate that to us, it only sets __sparcv8. */
1383 #if defined (__sparc_v9__) || defined (__sparcv9)
1384 /* Perhaps we should use floating-point operations here?  */
1385 #if 0
1386 /* Triggers a bug making mpz/tests/t-gcd.c fail.
1387    Perhaps we simply need explicitly zero-extend the inputs?  */
1388 #define umul_ppmm(w1, w0, u, v) \
1389   __asm__ ("mulx %2,%3,%%g1; srl %%g1,0,%1; srlx %%g1,32,%0" :          \
1390            "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "g1")
1391 #else
1392 /* Use v8 umul until above bug is fixed.  */
1393 #define umul_ppmm(w1, w0, u, v) \
1394   __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
1395 #endif
1396 /* Use a plain v8 divide for v9.  */
1397 #define udiv_qrnnd(q, r, n1, n0, d) \
1398   do {                                                                  \
1399     USItype __q;                                                        \
1400     __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0"                     \
1401              : "=r" (__q) : "r" (n1), "r" (n0), "r" (d));               \
1402     (r) = (n0) - __q * (d);                                             \
1403     (q) = __q;                                                          \
1404   } while (0)
1405 #else
1406 #if defined (__sparc_v8__)   /* gcc normal */                           \
1407   || defined (__sparcv8)     /* gcc solaris */                          \
1408   || HAVE_HOST_CPU_supersparc
1409 /* Don't match immediate range because, 1) it is not often useful,
1410    2) the 'I' flag thinks of the range as a 13 bit signed interval,
1411    while we want to match a 13 bit interval, sign extended to 32 bits,
1412    but INTERPRETED AS UNSIGNED.  */
1413 #define umul_ppmm(w1, w0, u, v) \
1414   __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
1415 #define UMUL_TIME 5
1416
1417 #if HAVE_HOST_CPU_supersparc
1418 #define UDIV_TIME 60            /* SuperSPARC timing */
1419 #else
1420 /* Don't use this on SuperSPARC because its udiv only handles 53 bit
1421    dividends and will trap to the kernel for the rest. */
1422 #define udiv_qrnnd(q, r, n1, n0, d) \
1423   do {                                                                  \
1424     USItype __q;                                                        \
1425     __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0"                     \
1426              : "=r" (__q) : "r" (n1), "r" (n0), "r" (d));               \
1427     (r) = (n0) - __q * (d);                                             \
1428     (q) = __q;                                                          \
1429   } while (0)
1430 #define UDIV_TIME 25
1431 #endif /* HAVE_HOST_CPU_supersparc */
1432
1433 #else /* ! __sparc_v8__ */
1434 #if defined (__sparclite__)
1435 /* This has hardware multiply but not divide.  It also has two additional
1436    instructions scan (ffs from high bit) and divscc.  */
1437 #define umul_ppmm(w1, w0, u, v) \
1438   __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
1439 #define UMUL_TIME 5
1440 #define udiv_qrnnd(q, r, n1, n0, d) \
1441   __asm__ ("! Inlined udiv_qrnnd\n"                                     \
1442 "       wr      %%g0,%2,%%y     ! Not a delayed write for sparclite\n"  \
1443 "       tst     %%g0\n"                                                 \
1444 "       divscc  %3,%4,%%g1\n"                                           \
1445 "       divscc  %%g1,%4,%%g1\n"                                         \
1446 "       divscc  %%g1,%4,%%g1\n"                                         \
1447 "       divscc  %%g1,%4,%%g1\n"                                         \
1448 "       divscc  %%g1,%4,%%g1\n"                                         \
1449 "       divscc  %%g1,%4,%%g1\n"                                         \
1450 "       divscc  %%g1,%4,%%g1\n"                                         \
1451 "       divscc  %%g1,%4,%%g1\n"                                         \
1452 "       divscc  %%g1,%4,%%g1\n"                                         \
1453 "       divscc  %%g1,%4,%%g1\n"                                         \
1454 "       divscc  %%g1,%4,%%g1\n"                                         \
1455 "       divscc  %%g1,%4,%%g1\n"                                         \
1456 "       divscc  %%g1,%4,%%g1\n"                                         \
1457 "       divscc  %%g1,%4,%%g1\n"                                         \
1458 "       divscc  %%g1,%4,%%g1\n"                                         \
1459 "       divscc  %%g1,%4,%%g1\n"                                         \
1460 "       divscc  %%g1,%4,%%g1\n"                                         \
1461 "       divscc  %%g1,%4,%%g1\n"                                         \
1462 "       divscc  %%g1,%4,%%g1\n"                                         \
1463 "       divscc  %%g1,%4,%%g1\n"                                         \
1464 "       divscc  %%g1,%4,%%g1\n"                                         \
1465 "       divscc  %%g1,%4,%%g1\n"                                         \
1466 "       divscc  %%g1,%4,%%g1\n"                                         \
1467 "       divscc  %%g1,%4,%%g1\n"                                         \
1468 "       divscc  %%g1,%4,%%g1\n"                                         \
1469 "       divscc  %%g1,%4,%%g1\n"                                         \
1470 "       divscc  %%g1,%4,%%g1\n"                                         \
1471 "       divscc  %%g1,%4,%%g1\n"                                         \
1472 "       divscc  %%g1,%4,%%g1\n"                                         \
1473 "       divscc  %%g1,%4,%%g1\n"                                         \
1474 "       divscc  %%g1,%4,%%g1\n"                                         \
1475 "       divscc  %%g1,%4,%0\n"                                           \
1476 "       rd      %%y,%1\n"                                               \
1477 "       bl,a 1f\n"                                                      \
1478 "       add     %1,%4,%1\n"                                             \
1479 "1:     ! End of inline udiv_qrnnd"                                     \
1480            : "=r" (q), "=r" (r) : "r" (n1), "r" (n0), "rI" (d)          \
1481            : "%g1" __AND_CLOBBER_CC)
1482 #define UDIV_TIME 37
1483 #define count_leading_zeros(count, x) \
1484   __asm__ ("scan %1,1,%0" : "=r" (count) : "r" (x))
1485 /* Early sparclites return 63 for an argument of 0, but they warn that future
1486    implementations might change this.  Therefore, leave COUNT_LEADING_ZEROS_0
1487    undefined.  */
1488 #endif /* __sparclite__ */
1489 #endif /* __sparc_v8__ */
1490 #endif /* __sparc_v9__ */
1491 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd.  */
1492 #ifndef umul_ppmm
1493 #define umul_ppmm(w1, w0, u, v) \
1494   __asm__ ("! Inlined umul_ppmm\n"                                      \
1495 "       wr      %%g0,%2,%%y     ! SPARC has 0-3 delay insn after a wr\n" \
1496 "       sra     %3,31,%%g2      ! Don't move this insn\n"               \
1497 "       and     %2,%%g2,%%g2    ! Don't move this insn\n"               \
1498 "       andcc   %%g0,0,%%g1     ! Don't move this insn\n"               \
1499 "       mulscc  %%g1,%3,%%g1\n"                                         \
1500 "       mulscc  %%g1,%3,%%g1\n"                                         \
1501 "       mulscc  %%g1,%3,%%g1\n"                                         \
1502 "       mulscc  %%g1,%3,%%g1\n"                                         \
1503 "       mulscc  %%g1,%3,%%g1\n"                                         \
1504 "       mulscc  %%g1,%3,%%g1\n"                                         \
1505 "       mulscc  %%g1,%3,%%g1\n"                                         \
1506 "       mulscc  %%g1,%3,%%g1\n"                                         \
1507 "       mulscc  %%g1,%3,%%g1\n"                                         \
1508 "       mulscc  %%g1,%3,%%g1\n"                                         \
1509 "       mulscc  %%g1,%3,%%g1\n"                                         \
1510 "       mulscc  %%g1,%3,%%g1\n"                                         \
1511 "       mulscc  %%g1,%3,%%g1\n"                                         \
1512 "       mulscc  %%g1,%3,%%g1\n"                                         \
1513 "       mulscc  %%g1,%3,%%g1\n"                                         \
1514 "       mulscc  %%g1,%3,%%g1\n"                                         \
1515 "       mulscc  %%g1,%3,%%g1\n"                                         \
1516 "       mulscc  %%g1,%3,%%g1\n"                                         \
1517 "       mulscc  %%g1,%3,%%g1\n"                                         \
1518 "       mulscc  %%g1,%3,%%g1\n"                                         \
1519 "       mulscc  %%g1,%3,%%g1\n"                                         \
1520 "       mulscc  %%g1,%3,%%g1\n"                                         \
1521 "       mulscc  %%g1,%3,%%g1\n"                                         \
1522 "       mulscc  %%g1,%3,%%g1\n"                                         \
1523 "       mulscc  %%g1,%3,%%g1\n"                                         \
1524 "       mulscc  %%g1,%3,%%g1\n"                                         \
1525 "       mulscc  %%g1,%3,%%g1\n"                                         \
1526 "       mulscc  %%g1,%3,%%g1\n"                                         \
1527 "       mulscc  %%g1,%3,%%g1\n"                                         \
1528 "       mulscc  %%g1,%3,%%g1\n"                                         \
1529 "       mulscc  %%g1,%3,%%g1\n"                                         \
1530 "       mulscc  %%g1,%3,%%g1\n"                                         \
1531 "       mulscc  %%g1,0,%%g1\n"                                          \
1532 "       add     %%g1,%%g2,%0\n"                                         \
1533 "       rd      %%y,%1"                                                 \
1534            : "=r" (w1), "=r" (w0) : "%rI" (u), "r" (v)                  \
1535            : "%g1", "%g2" __AND_CLOBBER_CC)
1536 #define UMUL_TIME 39            /* 39 instructions */
1537 #endif
1538 #ifndef udiv_qrnnd
1539 #ifndef LONGLONG_STANDALONE
1540 #define udiv_qrnnd(q, r, n1, n0, d) \
1541   do { UWtype __r;                                                      \
1542     (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d));                    \
1543     (r) = __r;                                                          \
1544   } while (0)
1545 extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
1546 #ifndef UDIV_TIME
1547 #define UDIV_TIME 140
1548 #endif
1549 #endif /* LONGLONG_STANDALONE */
1550 #endif /* udiv_qrnnd */
1551 #endif /* __sparc__ */
1552
1553 #if defined (__sparc__) && W_TYPE_SIZE == 64
1554 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1555   __asm__ (                                                             \
1556        "addcc   %r4,%5,%1\n"                                            \
1557       " addccc  %r6,%7,%%g0\n"                                          \
1558       " addc    %r2,%3,%0"                                              \
1559           : "=r" (sh), "=&r" (sl)                                       \
1560           : "rJ" (ah), "rI" (bh), "%rJ" (al), "rI" (bl),                \
1561             "%rJ" ((al) >> 32), "rI" ((bl) >> 32)                       \
1562            __CLOBBER_CC)
1563 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1564   __asm__ (                                                             \
1565        "subcc   %r4,%5,%1\n"                                            \
1566       " subccc  %r6,%7,%%g0\n"                                          \
1567       " subc    %r2,%3,%0"                                              \
1568           : "=r" (sh), "=&r" (sl)                                       \
1569           : "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl),         \
1570             "rJ" ((al) >> 32), "rI" ((bl) >> 32)                        \
1571            __CLOBBER_CC)
1572 #endif
1573
1574 #if defined (__vax__) && W_TYPE_SIZE == 32
1575 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1576   __asm__ ("addl2 %5,%1\n\tadwc %3,%0"                                  \
1577            : "=g" (sh), "=&g" (sl)                                      \
1578            : "0"  ((USItype)(ah)), "g" ((USItype)(bh)),                 \
1579              "%1" ((USItype)(al)), "g" ((USItype)(bl)))
1580 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1581   __asm__ ("subl2 %5,%1\n\tsbwc %3,%0"                                  \
1582            : "=g" (sh), "=&g" (sl)                                      \
1583            : "0" ((USItype)(ah)), "g" ((USItype)(bh)),                  \
1584              "1" ((USItype)(al)), "g" ((USItype)(bl)))
1585 #define smul_ppmm(xh, xl, m0, m1) \
1586   do {                                                                  \
1587     union {UDItype __ll;                                                \
1588            struct {USItype __l, __h;} __i;                              \
1589           } __x;                                                        \
1590     USItype __m0 = (m0), __m1 = (m1);                                   \
1591     __asm__ ("emul %1,%2,$0,%0"                                         \
1592              : "=g" (__x.__ll) : "g" (__m0), "g" (__m1));               \
1593     (xh) = __x.__i.__h; (xl) = __x.__i.__l;                             \
1594   } while (0)
1595 #define sdiv_qrnnd(q, r, n1, n0, d) \
1596   do {                                                                  \
1597     union {DItype __ll;                                                 \
1598            struct {SItype __l, __h;} __i;                               \
1599           } __x;                                                        \
1600     __x.__i.__h = n1; __x.__i.__l = n0;                                 \
1601     __asm__ ("ediv %3,%2,%0,%1"                                         \
1602              : "=g" (q), "=g" (r) : "g" (__x.__ll), "g" (d));           \
1603   } while (0)
1604 #if 0
1605 /* FIXME: This instruction appears to be unimplemented on some systems (vax
1606    8800 maybe). */
1607 #define count_trailing_zeros(count,x)                                   \
1608   do {                                                                  \
1609     __asm__ ("ffs 0, 31, %1, %0"                                        \
1610              : "=g" (count)                                             \
1611              : "g" ((USItype) (x)));                                    \
1612   } while (0)
1613 #endif
1614 #endif /* __vax__ */
1615
1616 #if defined (__z8000__) && W_TYPE_SIZE == 16
1617 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1618   __asm__ ("add %H1,%H5\n\tadc  %H0,%H3"                                \
1619            : "=r" (sh), "=&r" (sl)                                      \
1620            : "0"  ((unsigned int)(ah)), "r" ((unsigned int)(bh)),       \
1621              "%1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
1622 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1623   __asm__ ("sub %H1,%H5\n\tsbc  %H0,%H3"                                \
1624            : "=r" (sh), "=&r" (sl)                                      \
1625            : "0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)),        \
1626              "1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
1627 #define umul_ppmm(xh, xl, m0, m1) \
1628   do {                                                                  \
1629     union {long int __ll;                                               \
1630            struct {unsigned int __h, __l;} __i;                         \
1631           } __x;                                                        \
1632     unsigned int __m0 = (m0), __m1 = (m1);                              \
1633     __asm__ ("mult      %S0,%H3"                                        \
1634              : "=r" (__x.__i.__h), "=r" (__x.__i.__l)                   \
1635              : "%1" (m0), "rQR" (m1));                                  \
1636     (xh) = __x.__i.__h; (xl) = __x.__i.__l;                             \
1637     (xh) += ((((signed int) __m0 >> 15) & __m1)                         \
1638              + (((signed int) __m1 >> 15) & __m0));                     \
1639   } while (0)
1640 #endif /* __z8000__ */
1641
1642 #endif /* __GNUC__ */
1643
1644 #endif /* NO_ASM */
1645
1646
1647 #if !defined (umul_ppmm) && defined (__umulsidi3)
1648 #define umul_ppmm(ph, pl, m0, m1) \
1649   {                                                                     \
1650     UDWtype __ll = __umulsidi3 (m0, m1);                                \
1651     ph = (UWtype) (__ll >> W_TYPE_SIZE);                                \
1652     pl = (UWtype) __ll;                                                 \
1653   }
1654 #endif
1655
1656 #if !defined (__umulsidi3)
1657 #define __umulsidi3(u, v) \
1658   ({UWtype __hi, __lo;                                                  \
1659     umul_ppmm (__hi, __lo, u, v);                                       \
1660     ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
1661 #endif
1662
1663
1664 /* Use mpn_umul_ppmm or mpn_udiv_qrnnd functions, if they exist.  The "_r"
1665    forms have "reversed" arguments, meaning the pointer is last, which
1666    sometimes allows better parameter passing, in particular on 64-bit
1667    hppa. */
1668
1669 #define mpn_umul_ppmm  __MPN(umul_ppmm)
1670 extern UWtype mpn_umul_ppmm _PROTO ((UWtype *, UWtype, UWtype));
1671
1672 #if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm  \
1673   && ! defined (LONGLONG_STANDALONE)
1674 #define umul_ppmm(wh, wl, u, v)                                               \
1675   do {                                                                        \
1676     UWtype __umul_ppmm__p0;                                                   \
1677     (wh) = mpn_umul_ppmm (&__umul_ppmm__p0, (UWtype) (u), (UWtype) (v));      \
1678     (wl) = __umul_ppmm__p0;                                                   \
1679   } while (0)
1680 #endif
1681
1682 #define mpn_umul_ppmm_r  __MPN(umul_ppmm_r)
1683 extern UWtype mpn_umul_ppmm_r _PROTO ((UWtype, UWtype, UWtype *));
1684
1685 #if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm_r        \
1686   && ! defined (LONGLONG_STANDALONE)
1687 #define umul_ppmm(wh, wl, u, v)                                               \
1688   do {                                                                        \
1689     UWtype __umul_ppmm__p0;                                                   \
1690     (wh) = mpn_umul_ppmm_r ((UWtype) (u), (UWtype) (v), &__umul_ppmm__p0);    \
1691     (wl) = __umul_ppmm__p0;                                                   \
1692   } while (0)
1693 #endif
1694
1695 #define mpn_udiv_qrnnd  __MPN(udiv_qrnnd)
1696 extern UWtype mpn_udiv_qrnnd _PROTO ((UWtype *, UWtype, UWtype, UWtype));
1697
1698 #if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd        \
1699   && ! defined (LONGLONG_STANDALONE)
1700 #define udiv_qrnnd(q, r, n1, n0, d)                                     \
1701   do {                                                                  \
1702     UWtype __udiv_qrnnd__r;                                             \
1703     (q) = mpn_udiv_qrnnd (&__udiv_qrnnd__r,                             \
1704                           (UWtype) (n1), (UWtype) (n0), (UWtype) d);    \
1705     (r) = __udiv_qrnnd__r;                                              \
1706   } while (0)
1707 #endif
1708
1709 #define mpn_udiv_qrnnd_r  __MPN(udiv_qrnnd_r)
1710 extern UWtype mpn_udiv_qrnnd_r _PROTO ((UWtype, UWtype, UWtype, UWtype *));
1711
1712 #if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd_r      \
1713   && ! defined (LONGLONG_STANDALONE)
1714 #define udiv_qrnnd(q, r, n1, n0, d)                                     \
1715   do {                                                                  \
1716     UWtype __udiv_qrnnd__r;                                             \
1717     (q) = mpn_udiv_qrnnd_r ((UWtype) (n1), (UWtype) (n0), (UWtype) d,   \
1718                             &__udiv_qrnnd__r);                          \
1719     (r) = __udiv_qrnnd__r;                                              \
1720   } while (0)
1721 #endif
1722
1723
1724 /* If this machine has no inline assembler, use C macros.  */
1725
1726 #if !defined (add_ssaaaa)
1727 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1728   do {                                                                  \
1729     UWtype __x;                                                         \
1730     __x = (al) + (bl);                                                  \
1731     (sh) = (ah) + (bh) + (__x < (al));                                  \
1732     (sl) = __x;                                                         \
1733   } while (0)
1734 #endif
1735
1736 #if !defined (sub_ddmmss)
1737 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1738   do {                                                                  \
1739     UWtype __x;                                                         \
1740     __x = (al) - (bl);                                                  \
1741     (sh) = (ah) - (bh) - ((al) < (bl));                                 \
1742     (sl) = __x;                                                         \
1743   } while (0)
1744 #endif
1745
1746 /* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
1747    smul_ppmm.  */
1748 #if !defined (umul_ppmm) && defined (smul_ppmm)
1749 #define umul_ppmm(w1, w0, u, v)                                         \
1750   do {                                                                  \
1751     UWtype __w1;                                                        \
1752     UWtype __xm0 = (u), __xm1 = (v);                                    \
1753     smul_ppmm (__w1, w0, __xm0, __xm1);                                 \
1754     (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1)               \
1755                 + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0);              \
1756   } while (0)
1757 #endif
1758
1759 /* If we still don't have umul_ppmm, define it using plain C.
1760
1761    For reference, when this code is used for squaring (ie. u and v identical
1762    expressions), gcc recognises __x1 and __x2 are the same and generates 3
1763    multiplies, not 4.  The subsequent additions could be optimized a bit,
1764    but the only place GMP currently uses such a square is mpn_sqr_basecase,
1765    and chips obliged to use this generic C umul will have plenty of worse
1766    performance problems than a couple of extra instructions on the diagonal
1767    of sqr_basecase.  */
1768
1769 #if !defined (umul_ppmm)
1770 #define umul_ppmm(w1, w0, u, v)                                         \
1771   do {                                                                  \
1772     UWtype __x0, __x1, __x2, __x3;                                      \
1773     UHWtype __ul, __vl, __uh, __vh;                                     \
1774     UWtype __u = (u), __v = (v);                                        \
1775                                                                         \
1776     __ul = __ll_lowpart (__u);                                          \
1777     __uh = __ll_highpart (__u);                                         \
1778     __vl = __ll_lowpart (__v);                                          \
1779     __vh = __ll_highpart (__v);                                         \
1780                                                                         \
1781     __x0 = (UWtype) __ul * __vl;                                        \
1782     __x1 = (UWtype) __ul * __vh;                                        \
1783     __x2 = (UWtype) __uh * __vl;                                        \
1784     __x3 = (UWtype) __uh * __vh;                                        \
1785                                                                         \
1786     __x1 += __ll_highpart (__x0);/* this can't give carry */            \
1787     __x1 += __x2;               /* but this indeed can */               \
1788     if (__x1 < __x2)            /* did we get it? */                    \
1789       __x3 += __ll_B;           /* yes, add it in the proper pos. */    \
1790                                                                         \
1791     (w1) = __x3 + __ll_highpart (__x1);                                 \
1792     (w0) = (__x1 << W_TYPE_SIZE/2) + __ll_lowpart (__x0);               \
1793   } while (0)
1794 #endif
1795
1796 /* If we don't have smul_ppmm, define it using umul_ppmm (which surely will
1797    exist in one form or another.  */
1798 #if !defined (smul_ppmm)
1799 #define smul_ppmm(w1, w0, u, v)                                         \
1800   do {                                                                  \
1801     UWtype __w1;                                                        \
1802     UWtype __xm0 = (u), __xm1 = (v);                                    \
1803     umul_ppmm (__w1, w0, __xm0, __xm1);                                 \
1804     (w1) = __w1 - (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1)               \
1805                 - (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0);              \
1806   } while (0)
1807 #endif
1808
1809 /* Define this unconditionally, so it can be used for debugging.  */
1810 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
1811   do {                                                                  \
1812     UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m;                     \
1813                                                                         \
1814     ASSERT ((d) != 0);                                                  \
1815     ASSERT ((n1) < (d));                                                \
1816                                                                         \
1817     __d1 = __ll_highpart (d);                                           \
1818     __d0 = __ll_lowpart (d);                                            \
1819                                                                         \
1820     __q1 = (n1) / __d1;                                                 \
1821     __r1 = (n1) - __q1 * __d1;                                          \
1822     __m = __q1 * __d0;                                                  \
1823     __r1 = __r1 * __ll_B | __ll_highpart (n0);                          \
1824     if (__r1 < __m)                                                     \
1825       {                                                                 \
1826         __q1--, __r1 += (d);                                            \
1827         if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1828           if (__r1 < __m)                                               \
1829             __q1--, __r1 += (d);                                        \
1830       }                                                                 \
1831     __r1 -= __m;                                                        \
1832                                                                         \
1833     __q0 = __r1 / __d1;                                                 \
1834     __r0 = __r1  - __q0 * __d1;                                         \
1835     __m = __q0 * __d0;                                                  \
1836     __r0 = __r0 * __ll_B | __ll_lowpart (n0);                           \
1837     if (__r0 < __m)                                                     \
1838       {                                                                 \
1839         __q0--, __r0 += (d);                                            \
1840         if (__r0 >= (d))                                                \
1841           if (__r0 < __m)                                               \
1842             __q0--, __r0 += (d);                                        \
1843       }                                                                 \
1844     __r0 -= __m;                                                        \
1845                                                                         \
1846     (q) = __q1 * __ll_B | __q0;                                         \
1847     (r) = __r0;                                                         \
1848   } while (0)
1849
1850 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1851    __udiv_w_sdiv (defined in libgcc or elsewhere).  */
1852 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1853 #define udiv_qrnnd(q, r, nh, nl, d) \
1854   do {                                                                  \
1855     UWtype __r;                                                         \
1856     (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d);                         \
1857     (r) = __r;                                                          \
1858   } while (0)
1859 #endif
1860
1861 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c.  */
1862 #if !defined (udiv_qrnnd)
1863 #define UDIV_NEEDS_NORMALIZATION 1
1864 #define udiv_qrnnd __udiv_qrnnd_c
1865 #endif
1866
1867 #if !defined (count_leading_zeros)
1868 #define count_leading_zeros(count, x) \
1869   do {                                                                  \
1870     UWtype __xr = (x);                                                  \
1871     UWtype __a;                                                         \
1872                                                                         \
1873     if (W_TYPE_SIZE == 32)                                              \
1874       {                                                                 \
1875         __a = __xr < ((UWtype) 1 << 2*__BITS4)                          \
1876           ? (__xr < ((UWtype) 1 << __BITS4) ? 1 : __BITS4 + 1)          \
1877           : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 + 1           \
1878           : 3*__BITS4 + 1);                                             \
1879       }                                                                 \
1880     else                                                                \
1881       {                                                                 \
1882         for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8)                  \
1883           if (((__xr >> __a) & 0xff) != 0)                              \
1884             break;                                                      \
1885         ++__a;                                                          \
1886       }                                                                 \
1887                                                                         \
1888     (count) = W_TYPE_SIZE + 1 - __a - __clz_tab[__xr >> __a];           \
1889   } while (0)
1890 /* This version gives a well-defined value for zero. */
1891 #define COUNT_LEADING_ZEROS_0 (W_TYPE_SIZE - 1)
1892 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
1893 #endif
1894
1895 /* clz_tab needed by mpn/x86/pentium/mod_1.asm in a fat binary */
1896 #if HAVE_HOST_CPU_FAMILY_x86 && WANT_FAT_BINARY
1897 #define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
1898 #endif
1899
1900 #ifdef COUNT_LEADING_ZEROS_NEED_CLZ_TAB
1901 extern const unsigned char __GMP_DECLSPEC __clz_tab[128];
1902 #endif
1903
1904 #if !defined (count_trailing_zeros)
1905 /* Define count_trailing_zeros using count_leading_zeros.  The latter might be
1906    defined in asm, but if it is not, the C version above is good enough.  */
1907 #define count_trailing_zeros(count, x) \
1908   do {                                                                  \
1909     UWtype __ctz_x = (x);                                               \
1910     UWtype __ctz_c;                                                     \
1911     ASSERT (__ctz_x != 0);                                              \
1912     count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x);                  \
1913     (count) = W_TYPE_SIZE - 1 - __ctz_c;                                \
1914   } while (0)
1915 #endif
1916
1917 #ifndef UDIV_NEEDS_NORMALIZATION
1918 #define UDIV_NEEDS_NORMALIZATION 0
1919 #endif
1920
1921 /* Whether udiv_qrnnd is actually implemented with udiv_qrnnd_preinv, and
1922    that hence the latter should always be used.  */
1923 #ifndef UDIV_PREINV_ALWAYS
1924 #define UDIV_PREINV_ALWAYS 0
1925 #endif
1926
1927 /* Give defaults for UMUL_TIME and UDIV_TIME.  */
1928 #ifndef UMUL_TIME
1929 #define UMUL_TIME 1
1930 #endif
1931
1932 #ifndef UDIV_TIME
1933 #define UDIV_TIME UMUL_TIME
1934 #endif