1 /* mpn_mullow_n -- multiply two n-limb nunbers and return the low n limbs
4 THIS IS (FOR NOW) AN INTERNAL FUNCTION. IT IS ONLY SAFE TO REACH THIS
5 FUNCTION THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST GUARANTEED
6 THAT IT'LL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
8 Copyright 2004, 2005 Free Software Foundation, Inc.
10 This file is part of the GNU MP Library.
12 The GNU MP Library is free software; you can redistribute it and/or modify
13 it under the terms of the GNU Lesser General Public License as published by
14 the Free Software Foundation; either version 3 of the License, or (at your
15 option) any later version.
17 The GNU MP Library is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
20 License for more details.
22 You should have received a copy of the GNU Lesser General Public License
23 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
29 #ifndef MULLOW_BASECASE_THRESHOLD
30 #define MULLOW_BASECASE_THRESHOLD 0 /* never use mpn_mul_basecase */
33 #ifndef MULLOW_DC_THRESHOLD
34 #define MULLOW_DC_THRESHOLD 3*MUL_KARATSUBA_THRESHOLD
37 #ifndef MULLOW_MUL_N_THRESHOLD
38 #define MULLOW_MUL_N_THRESHOLD 10*MULLOW_DC_THRESHOLD
41 /* Avoid zero allocations when MULLOW_BASECASE_THRESHOLD is 0. */
42 #define MUL_BASECASE_ALLOC \
43 (MULLOW_BASECASE_THRESHOLD_LIMIT == 0 ? 1 : 2*MULLOW_BASECASE_THRESHOLD_LIMIT)
46 FIXME: This function should accept a temporary area.
47 FIXME: Perhaps call mpn_kara_mul_n instead of mpn_mul_n?
48 THINK: If mpn_mul_basecase is always faster than mpn_mullow_basecase
49 (typically thanks to mpn_addmul_2) should we unconditionally use
51 FIXME: The recursive calls to mpn_mullow_n use sizes n/2 (one uses floor(n/2)
52 and the other ceil(n/2)). Depending on the values of the various
53 _THRESHOLDs, this may never trigger MULLOW_BASECASE_THRESHOLD.
54 Should we worry about this overhead?
58 mpn_mullow_n (mp_ptr rp, mp_srcptr xp, mp_srcptr yp, mp_size_t n)
60 if (BELOW_THRESHOLD (n, MULLOW_BASECASE_THRESHOLD))
62 /* Allocate workspace of fixed size on stack: fast! */
63 mp_limb_t ws[MUL_BASECASE_ALLOC];
64 mpn_mul_basecase (ws, xp, n, yp, n);
67 else if (BELOW_THRESHOLD (n, MULLOW_DC_THRESHOLD))
69 mpn_mullow_basecase (rp, xp, yp, n);
71 else if (BELOW_THRESHOLD (n, MULLOW_MUL_N_THRESHOLD))
73 /* Divide-and-conquer */
74 mp_size_t n2 = n >> 1; /* floor(n/2) */
75 mp_size_t n1 = n - n2; /* ceil(n/2) */
79 tp = TMP_SALLOC_LIMBS (n1);
81 /* Split as x = x1 2^(n1 GMP_NUMB_BITS) + x0,
82 y = y1 2^(n2 GMP_NUMB_BITS) + y0 */
85 mpn_mul_n (rp, xp, yp, n2);
87 rp[2 * n2] = mpn_addmul_1 (rp + n2, yp, n2, xp[n2]);
89 /* x1 * y0 * 2^(n1 GMP_NUMB_BITS) */
90 mpn_mullow_n (tp, xp + n1, yp, n2);
91 mpn_add_n (rp + n1, rp + n1, tp, n2);
93 /* x0 * y1 * 2^(n2 GMP_NUMB_BITS) */
94 mpn_mullow_n (tp, yp + n2, xp, n1);
95 mpn_add_n (rp + n2, rp + n2, tp, n1);
100 /* For really large operands, use plain mpn_mul_n but throw away upper n
105 tp = TMP_ALLOC_LIMBS (2 * n);
107 mpn_mul_n (tp, xp, yp, n);
108 MPN_COPY (rp, tp, n);