1 /* mpn_sb_div_q -- schoolbook division with 2-limb sloppy non-greater
2 precomputed inverse, returning an accurate quotient.
4 Contributed to the GNU project by Torbjörn Granlund.
6 THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH A MUTABLE INTERFACE. IT IS
7 ONLY SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS
8 ALMOST GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP
11 Copyright 2006, 2007 Free Software Foundation, Inc.
13 This file is part of the GNU MP Library.
15 The GNU MP Library is free software; you can redistribute it and/or modify
16 it under the terms of the GNU Lesser General Public License as published by
17 the Free Software Foundation; either version 3 of the License, or (at your
18 option) any later version.
20 The GNU MP Library is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
23 License for more details.
25 You should have received a copy of the GNU Lesser General Public License
26 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
34 1. Should it demand normalized operands like now, or normalize on-the-fly?
35 2. Overwrites {np,nn}.
36 3. Uses mpn_submul_1. It would be nice to somehow make it use mpn_addmul_1
37 instead. (That would open for mpn_addmul_2 straightforwardly.)
41 mpn_sb_div_q (mp_ptr qp,
42 mp_ptr np, mp_size_t nn,
43 mp_srcptr dp, mp_size_t dn,
46 mp_limb_t q, q10, q01a, q00a, q01b, q00b;
53 mp_size_t dn_orig = dn;
54 mp_srcptr dp_orig = dp;
59 ASSERT ((dp[dn-1] & GMP_NUMB_HIGHBIT) != 0);
60 ASSERT (! MPN_OVERLAP_P (np, nn, dp, dn));
61 ASSERT (! MPN_OVERLAP_P (qp, nn-dn, dp, dn));
62 ASSERT (! MPN_OVERLAP_P (qp, nn-dn, np, nn) || qp+dn >= np);
74 qh = mpn_cmp (np - dn, dp, dn) >= 0;
76 mpn_sub_n (np - dn, np - dn, dp, dn);
79 di1 = dip[1]; di0 = dip[0];
80 for (i = qn; i >= dn; i--)
83 umul_ppmm (q, q10, np[0], di1);
84 umul_ppmm (q01a, q00a, np[-1], di1);
85 add_ssaaaa (q, q10, q, q10, np[0], q01a);
86 umul_ppmm (q01b, q00b, np[0], di0);
87 add_ssaaaa (q, q10, q, q10, 0, q01b);
88 add_ssaaaa (q, q10, q, q10, 0, np[-1]);
90 cy = mpn_submul_1 (np - dn, dp, dn, q);
92 if (UNLIKELY (np[0] > cy || mpn_cmp (np - dn, dp, dn) >= 0))
95 mpn_sub_n (np - dn, np - dn, dp, dn);
101 for (i = dn - 1; i > 0; i--)
104 umul_ppmm (q, q10, np[0], di1);
105 umul_ppmm (q01a, q00a, np[-1], di1);
106 add_ssaaaa (q, q10, q, q10, np[0], q01a);
107 umul_ppmm (q01b, q00b, np[0], di0);
108 add_ssaaaa (q, q10, q, q10, 0, q01b);
109 add_ssaaaa (q, q10, q, q10, 0, np[-1]);
111 cy = mpn_submul_1 (np - dn, dp, dn, q);
113 if (UNLIKELY (np[0] > cy || mpn_cmp (np - dn, dp, dn) >= 0))
119 mpn_sub_n (np - dn, np - dn, dp, dn);
124 /* Truncate operands. */
128 /* The partial remainder might be equal to the truncated divisor,
129 thus non-canonical. When that happens, the rest of the quotient
130 should be all ones. */
131 if (UNLIKELY (mpn_cmp (np - dn, dp, dn) == 0))
134 *--qp = GMP_NUMB_MAX;
140 if (UNLIKELY (np[-1] < dn))
144 /* The quotient may be too large if the remainder is small. Recompute
145 for above ignored operand parts, until the remainder spills.
147 FIXME: The quality of this code isn't the same as the code above.
148 1. We don't compute things in an optimal order, high-to-low, in order
149 to terminate as quickly as possible.
150 2. We mess with pointers and sizes, adding and subtracting and
151 adjusting to get things right. It surely could be streamlined.
152 3. The only termination criteria are that we determine that the
153 quotient needs to be adjusted, or that we have recomputed
154 everything. We should stop when the remainder is so large
155 that no additional subtracting could make it spill.
156 4. If nothing else, we should not do two loops of submul_1 over the
157 data, instead handle both the triangularization and chopping at
164 /* Compensate for triangularization. */
176 for (i = dn - 3; i >= 0; i--)
179 cy = mpn_submul_1 (np - (dn - i), dp, dn - i - 2, q);
185 cy = mpn_sub_1 (qp, qp, qn, 1);
186 ASSERT_ALWAYS (cy == 0);
199 /* Compensate for ignored dividend and divisor tails. */
209 cy = mpn_sub_n (np + qn, np + qn, dp, dn - (qn + 1));
214 cy = mpn_sub_1 (qp, qp, qn, 1);
221 for (i = dn - qn - 2; i >= 0; i--)
223 cy = mpn_submul_1 (np + i, qp, qn, dp[i]);
224 cy = mpn_sub_1 (np + qn + i, np + qn + i, dn - qn - i - 1, cy);
229 cy = mpn_sub_1 (qp, qp, qn, 1);
230 ASSERT_ALWAYS (cy == 0);