1 /* mpn_preinv_divrem_1 -- mpn by limb division with pre-inverted divisor.
3 THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY. THEY'RE ALMOST
4 CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
5 FUTURE GNU MP RELEASES.
7 Copyright 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
9 This file is part of the GNU MP Library.
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of the GNU Lesser General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
16 The GNU MP Library is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
19 License for more details.
21 You should have received a copy of the GNU Lesser General Public License
22 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
29 /* Don't bloat a shared library with unused code. */
30 #if USE_PREINV_DIVREM_1
32 /* Same test here for skipping one divide step as in mpn_divrem_1.
34 The main reason for a separate shift==0 case is that not all CPUs give
35 zero for "n0 >> GMP_LIMB_BITS" which would arise in the general case
36 code used on shift==0. shift==0 is also reasonably common in mp_bases
37 big_base, for instance base==10 on a 64-bit limb.
39 Under shift!=0 it would be possible to call mpn_lshift to adjust the
40 dividend all in one go (into the quotient space say), rather than
41 limb-by-limb in the loop. This might help if mpn_lshift is a lot faster
42 than what the compiler can generate for EXTRACT. But this is left to CPU
43 specific implementations to consider, especially since EXTRACT isn't on
46 If size==0 then the result is simply xsize limbs of zeros, but nothing
47 special is done for that, since it wouldn't be a usual call, and
48 certainly never arises from mpn_get_str which is our main caller. */
51 mpn_preinv_divrem_1 (mp_ptr qp, mp_size_t xsize,
52 mp_srcptr ap, mp_size_t size, mp_limb_t d_unnorm,
53 mp_limb_t dinv, int shift)
55 mp_limb_t ahigh, qhigh, r;
62 ASSERT (d_unnorm != 0);
67 count_leading_zeros (want_shift, d_unnorm);
68 ASSERT (shift == want_shift);
69 invert_limb (want_dinv, d_unnorm << shift);
70 ASSERT (dinv == want_dinv);
73 /* FIXME: What's the correct overlap rule when xsize!=0? */
74 ASSERT (MPN_SAME_OR_SEPARATE_P (qp+xsize, ap, size));
77 d = d_unnorm << shift;
78 qp += (size + xsize - 1); /* dest high limb */
82 /* High quotient limb is 0 or 1, and skip a divide step. */
85 r = (qhigh ? r-d : r);
89 for (i = size-1; i >= 0; i--)
92 udiv_qrnnd_preinv (*qp, r, r, n0, d, dinv);
109 r |= n1 >> (GMP_LIMB_BITS - shift);
111 for (i = size-2; i >= 0; i--)
115 udiv_qrnnd_preinv (*qp, r, r,
116 ((n1 << shift) | (n0 >> (GMP_LIMB_BITS - shift))),
121 udiv_qrnnd_preinv (*qp, r, r, n1 << shift, d, dinv);
126 for (i = 0; i < xsize; i++)
128 udiv_qrnnd_preinv (*qp, r, r, CNST_LIMB(0), d, dinv);
135 #endif /* USE_PREINV_DIVREM_1 */