1 /* mpn_mu_div_qr, mpn_preinv_mu_div_qr.
3 Compute Q = floor(N / D) and R = N-QD. N is nn limbs and D is dn limbs and
4 must be normalized, and Q must be nn-dn limbs. The requirement that Q is
5 nn-dn limbs (and not nn-dn+1 limbs) was put in place in order to allow us to
6 let N be unmodified during the operation.
8 Contributed to the GNU project by Torbjorn Granlund.
10 THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES. IT IS ONLY
11 SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST
12 GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP RELEASE.
14 Copyright 2005, 2006, 2007, 2009, 2010 Free Software Foundation, Inc.
16 This file is part of the GNU MP Library.
18 The GNU MP Library is free software; you can redistribute it and/or modify
19 it under the terms of the GNU Lesser General Public License as published by
20 the Free Software Foundation; either version 3 of the License, or (at your
21 option) any later version.
23 The GNU MP Library is distributed in the hope that it will be useful, but
24 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
26 License for more details.
28 You should have received a copy of the GNU Lesser General Public License
29 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
33 The idea of the algorithm used herein is to compute a smaller inverted value
34 than used in the standard Barrett algorithm, and thus save time in the
35 Newton iterations, and pay just a small price when using the inverted value
36 for developing quotient bits. This algorithm was presented at ICMS 2006.
39 /* CAUTION: This code and the code in mu_divappr_q.c should be edited in sync.
43 * This isn't optimal when the quotient isn't needed, as it might take a lot
44 of space. The computation is always needed, though, so there is no time to
45 save with special code.
47 * The itch/scratch scheme isn't perhaps such a good idea as it once seemed,
48 demonstrated by the fact that the mpn_invertappr function's scratch needs
49 mean that we need to keep a large allocation long after it is needed.
50 Things are worse as mpn_mul_fft does not accept any scratch parameter,
51 which means we'll have a large memory hole while in mpn_mul_fft. In
52 general, a peak scratch need in the beginning of a function isn't
53 well-handled by the itch/scratch scheme.
63 #include <stdlib.h> /* for NULL */
68 /* FIXME: The MU_DIV_QR_SKEW_THRESHOLD was not analysed properly. It gives a
69 speedup according to old measurements, but does the decision mechanism
70 really make sense? It seem like the quotient between dn and qn might be
71 what we really should be checking. */
72 #ifndef MU_DIV_QR_SKEW_THRESHOLD
73 #define MU_DIV_QR_SKEW_THRESHOLD 100
76 #ifdef CHECK /* FIXME: Enable in minithres */
77 #undef MU_DIV_QR_SKEW_THRESHOLD
78 #define MU_DIV_QR_SKEW_THRESHOLD 1
82 static mp_limb_t mpn_mu_div_qr2 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr);
86 mpn_mu_div_qr (mp_ptr qp,
98 if (qn + MU_DIV_QR_SKEW_THRESHOLD < dn)
100 /* |______________|_ign_first__| dividend nn
101 |_______|_ign_first__| divisor dn
103 |______| quotient (prel) qn
105 |___________________| quotient * ignored-divisor-part dn-1
108 /* Compute a preliminary quotient and a partial remainder by dividing the
109 most significant limbs of each operand. */
110 qh = mpn_mu_div_qr2 (qp, rp + nn - (2 * qn + 1),
111 np + nn - (2 * qn + 1), 2 * qn + 1,
112 dp + dn - (qn + 1), qn + 1,
115 /* Multiply the quotient by the divisor limbs ignored above. */
116 if (dn - (qn + 1) > qn)
117 mpn_mul (scratch, dp, dn - (qn + 1), qp, qn); /* prod is dn-1 limbs */
119 mpn_mul (scratch, qp, qn, dp, dn - (qn + 1)); /* prod is dn-1 limbs */
122 cy = mpn_add_n (scratch + qn, scratch + qn, dp, dn - (qn + 1));
125 scratch[dn - 1] = cy;
127 cy = mpn_sub_n (rp, np, scratch, nn - (2 * qn + 1));
128 cy = mpn_sub_nc (rp + nn - (2 * qn + 1),
129 rp + nn - (2 * qn + 1),
130 scratch + nn - (2 * qn + 1),
134 qh -= mpn_sub_1 (qp, qp, qn, 1);
135 mpn_add_n (rp, rp, dp, dn);
140 qh = mpn_mu_div_qr2 (qp, rp, np, nn, dp, dn, scratch);
147 mpn_mu_div_qr2 (mp_ptr qp,
163 /* Compute the inverse size. */
164 in = mpn_mu_div_qr_choose_in (qn, dn, 0);
168 /* This alternative inverse computation method gets slightly more accurate
169 results. FIXMEs: (1) Temp allocation needs not analysed (2) itch function
170 not adapted (3) mpn_invertappr scratch needs not met. */
172 tp = scratch + in + 1;
174 /* compute an approximate inverse on (in+1) limbs */
177 MPN_COPY (tp + 1, dp, in);
179 mpn_invertappr (ip, tp, in + 1, NULL);
180 MPN_COPY_INCR (ip, ip + 1, in);
184 cy = mpn_add_1 (tp, dp + dn - (in + 1), in + 1, 1);
185 if (UNLIKELY (cy != 0))
189 mpn_invertappr (ip, tp, in + 1, NULL);
190 MPN_COPY_INCR (ip, ip + 1, in);
194 /* This older inverse computation method gets slightly worse results than the
199 /* Compute inverse of D to in+1 limbs, then round to 'in' limbs. Ideally the
200 inversion function should do this automatically. */
204 MPN_COPY (tp + in + 2, dp, in);
205 mpn_invertappr (tp, tp + in + 1, in + 1, NULL);
209 mpn_invertappr (tp, dp + dn - (in + 1), in + 1, NULL);
211 cy = mpn_sub_1 (tp, tp, in + 1, GMP_NUMB_HIGHBIT);
212 if (UNLIKELY (cy != 0))
213 MPN_ZERO (tp + 1, in);
214 MPN_COPY (ip, tp + 1, in);
217 qh = mpn_preinv_mu_div_qr (qp, rp, np, nn, dp, dn, ip, in, scratch + in);
223 mpn_preinv_mu_div_qr (mp_ptr qp,
234 mp_limb_t cy, cx, qh;
239 #define scratch_out (scratch + tn)
246 qh = mpn_cmp (np, dp, dn) >= 0;
248 mpn_sub_n (rp, np, dp, dn);
250 MPN_COPY (rp, np, dn);
253 return qh; /* Degenerate use. Should we allow this? */
265 /* Compute the next block of quotient limbs by multiplying the inverse I
266 by the upper part of the partial remainder R. */
267 mpn_mul_n (tp, rp + dn - in, ip, in); /* mulhi */
268 cy = mpn_add_n (qp, tp + in, rp + dn - in, in); /* I's msb implicit */
269 ASSERT_ALWAYS (cy == 0);
273 /* Compute the product of the quotient block and the divisor D, to be
274 subtracted from the partial remainder combined with new limbs from the
275 dividend N. We only really need the low dn+1 limbs. */
277 if (BELOW_THRESHOLD (in, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD))
278 mpn_mul (tp, dp, dn, qp, in); /* dn+in limbs, high 'in' cancels */
281 tn = mpn_mulmod_bnm1_next_size (dn + 1);
282 mpn_mulmod_bnm1 (tp, tn, dp, dn, qp, in, scratch_out);
283 wn = dn + in - tn; /* number of wrapped limbs */
286 cy = mpn_sub_n (tp, tp, rp + dn - wn, wn);
287 cy = mpn_sub_1 (tp + wn, tp + wn, tn - wn, cy);
288 cx = mpn_cmp (rp + dn - in, tp + dn, tn - dn) < 0;
289 ASSERT_ALWAYS (cx >= cy);
290 mpn_incr_u (tp, cx - cy);
294 r = rp[dn - in] - tp[dn];
296 /* Subtract the product from the partial remainder combined with new
297 limbs from the dividend N, generating a new partial remainder R. */
300 cy = mpn_sub_n (tp, np, tp, in); /* get next 'in' limbs from N */
301 cy = mpn_sub_nc (tp + in, rp, tp + in, dn - in, cy);
302 MPN_COPY (rp, tp, dn); /* FIXME: try to avoid this */
306 cy = mpn_sub_n (rp, np, tp, in); /* get next 'in' limbs from N */
309 STAT (int i; int err = 0;
310 static int errarr[5]; static int err_rec; static int tot);
312 /* Check the remainder R and adjust the quotient as needed. */
316 /* We loop 0 times with about 69% probability, 1 time with about 31%
317 probability, 2 times with about 0.6% probability, if inverse is
318 computed as recommended. */
320 cy = mpn_sub_n (rp, rp, dp, dn);
324 if (mpn_cmp (rp, dp, dn) >= 0)
326 /* This is executed with about 76% probability. */
328 cy = mpn_sub_n (rp, rp, dp, dn);
337 if (tot % 0x10000 == 0)
339 for (i = 0; i <= err_rec; i++)
340 printf (" %d(%.1f%%)", errarr[i], 100.0*errarr[i]/tot);
349 /* In case k=0 (automatic choice), we distinguish 3 cases:
350 (a) dn < qn: in = ceil(qn / ceil(qn/dn))
351 (b) dn/3 < qn <= dn: in = ceil(qn / 2)
352 (c) qn < dn/3: in = qn
353 In all cases we have in <= dn.
356 mpn_mu_div_qr_choose_in (mp_size_t qn, mp_size_t dn, int k)
365 /* Compute an inverse size that is a nice partition of the quotient. */
366 b = (qn - 1) / dn + 1; /* ceil(qn/dn), number of blocks */
367 in = (qn - 1) / b + 1; /* ceil(qn/b) = ceil(qn / ceil(qn/dn)) */
369 else if (3 * qn > dn)
371 in = (qn - 1) / 2 + 1; /* b = 2 */
375 in = (qn - 1) / 1 + 1; /* b = 1 */
382 in = (xn - 1) / k + 1;
389 mpn_mu_div_qr_itch (mp_size_t nn, mp_size_t dn, int mua_k)
391 mp_size_t itch_local = mpn_mulmod_bnm1_next_size (dn + 1);
392 mp_size_t in = mpn_mu_div_qr_choose_in (nn - dn, dn, mua_k);
393 mp_size_t itch_out = mpn_mulmod_bnm1_itch (itch_local, dn, in);
395 return in + itch_local + itch_out;
399 mpn_preinv_mu_div_qr_itch (mp_size_t nn, mp_size_t dn, mp_size_t in)
401 mp_size_t itch_local = mpn_mulmod_bnm1_next_size (dn + 1);
402 mp_size_t itch_out = mpn_mulmod_bnm1_itch (itch_local, dn, in);
404 return itch_local + itch_out;