1 /* mpn_addmul_1 -- multiply the N long limb vector pointed to by UP by VL,
2 add the N least significant limbs of the product to the limb vector
3 pointed to by RP. Return the most significant limb of the product,
4 adjusted for carry-out from the addition.
6 Copyright 1992, 1993, 1994, 1996, 2000, 2002, 2004 Free Software Foundation,
9 This file is part of the GNU MP Library.
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of the GNU Lesser General Public License as published by
13 the Free Software Foundation; either version 3 of the License, or (at your
14 option) any later version.
16 The GNU MP Library is distributed in the hope that it will be useful, but
17 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
19 License for more details.
21 You should have received a copy of the GNU Lesser General Public License
22 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
29 #if GMP_NAIL_BITS == 0
32 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
34 mp_limb_t ul, cl, hpl, lpl, rl;
37 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
43 umul_ppmm (hpl, lpl, ul, vl);
46 cl = (lpl < cl) + hpl;
60 #if GMP_NAIL_BITS == 1
63 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
65 mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, cl, xl, c1, c2, c3;
68 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
73 shifted_vl = vl << GMP_NAIL_BITS;
80 umul_ppmm (hpl, lpl, ul, shifted_vl);
81 lpl >>= GMP_NAIL_BITS;
82 ADDC_LIMB (c1, xl, prev_hpl, lpl);
83 ADDC_LIMB (c2, xl, xl, rl);
84 ADDC_LIMB (c3, xl, xl, cl);
96 #if GMP_NAIL_BITS >= 2
99 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
101 mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, xw, cl, xl;
104 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
109 shifted_vl = vl << GMP_NAIL_BITS;
116 umul_ppmm (hpl, lpl, ul, shifted_vl);
117 lpl >>= GMP_NAIL_BITS;
118 xw = prev_hpl + lpl + rl + cl;
119 cl = xw >> GMP_NUMB_BITS;
120 xl = xw & GMP_NUMB_MASK;
126 return prev_hpl + cl;