1 /* mpc_mul -- Multiply two complex numbers
3 Copyright (C) 2002, 2004, 2005, 2008, 2009, 2010, 2011, 2012 INRIA
5 This file is part of GNU MPC.
7 GNU MPC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the
9 Free Software Foundation; either version 3 of the License, or (at your
10 option) any later version.
12 GNU MPC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
17 You should have received a copy of the GNU Lesser General Public License
18 along with this program. If not, see http://www.gnu.org/licenses/ .
21 #include <stdio.h> /* for MPC_ASSERT */
24 #define mpz_add_si(z,x,y) do { \
26 mpz_add_ui (z, x, (long int) y); \
28 mpz_sub_ui (z, x, (long int) (-y)); \
31 /* compute z=x*y when x has an infinite part */
33 mul_infinite (mpc_ptr z, mpc_srcptr x, mpc_srcptr y)
35 /* Let x=xr+i*xi and y=yr+i*yi; extract the signs of the operands */
36 int xrs = mpfr_signbit (mpc_realref (x)) ? -1 : 1;
37 int xis = mpfr_signbit (mpc_imagref (x)) ? -1 : 1;
38 int yrs = mpfr_signbit (mpc_realref (y)) ? -1 : 1;
39 int yis = mpfr_signbit (mpc_imagref (y)) ? -1 : 1;
43 /* compute the sign of
44 u = xrs * yrs * xr * yr - xis * yis * xi * yi
45 v = xrs * yis * xr * yi + xis * yrs * xi * yr
46 +1 if positive, -1 if negatiye, 0 if NaN */
47 if ( mpfr_nan_p (mpc_realref (x)) || mpfr_nan_p (mpc_imagref (x))
48 || mpfr_nan_p (mpc_realref (y)) || mpfr_nan_p (mpc_imagref (y))) {
52 else if (mpfr_inf_p (mpc_realref (x))) {
53 /* x = (+/-inf) xr + i*xi */
54 u = ( mpfr_zero_p (mpc_realref (y))
55 || (mpfr_inf_p (mpc_imagref (x)) && mpfr_zero_p (mpc_imagref (y)))
56 || (mpfr_zero_p (mpc_imagref (x)) && mpfr_inf_p (mpc_imagref (y)))
57 || ( (mpfr_inf_p (mpc_imagref (x)) || mpfr_inf_p (mpc_imagref (y)))
58 && xrs*yrs == xis*yis)
60 v = ( mpfr_zero_p (mpc_imagref (y))
61 || (mpfr_inf_p (mpc_imagref (x)) && mpfr_zero_p (mpc_realref (y)))
62 || (mpfr_zero_p (mpc_imagref (x)) && mpfr_inf_p (mpc_realref (y)))
63 || ( (mpfr_inf_p (mpc_imagref (x)) || mpfr_inf_p (mpc_imagref (x)))
64 && xrs*yis != xis*yrs)
68 /* x = xr + i*(+/-inf) with |xr| != inf */
69 u = ( mpfr_zero_p (mpc_imagref (y))
70 || (mpfr_zero_p (mpc_realref (x)) && mpfr_inf_p (mpc_realref (y)))
71 || (mpfr_inf_p (mpc_realref (y)) && xrs*yrs == xis*yis)
73 v = ( mpfr_zero_p (mpc_realref (y))
74 || (mpfr_zero_p (mpc_realref (x)) && mpfr_inf_p (mpc_imagref (y)))
75 || (mpfr_inf_p (mpc_imagref (y)) && xrs*yis != xis*yrs)
79 if (u == 0 && v == 0) {
80 /* Naive result is NaN+i*NaN. Obtain an infinity using the algorithm
81 given in Annex G.5.1 of the ISO C99 standard */
82 int xr = (mpfr_zero_p (mpc_realref (x)) || mpfr_nan_p (mpc_realref (x)) ? 0
83 : (mpfr_inf_p (mpc_realref (x)) ? 1 : 0));
84 int xi = (mpfr_zero_p (mpc_imagref (x)) || mpfr_nan_p (mpc_imagref (x)) ? 0
85 : (mpfr_inf_p (mpc_imagref (x)) ? 1 : 0));
86 int yr = (mpfr_zero_p (mpc_realref (y)) || mpfr_nan_p (mpc_realref (y)) ? 0 : 1);
87 int yi = (mpfr_zero_p (mpc_imagref (y)) || mpfr_nan_p (mpc_imagref (y)) ? 0 : 1);
89 yr = mpfr_inf_p (mpc_realref (y)) ? 1 : 0;
90 yi = mpfr_inf_p (mpc_imagref (y)) ? 1 : 0;
93 u = xrs * xr * yrs * yr - xis * xi * yis * yi;
94 v = xrs * xr * yis * yi + xis * xi * yrs * yr;
98 mpfr_set_nan (mpc_realref (z));
100 mpfr_set_inf (mpc_realref (z), u);
103 mpfr_set_nan (mpc_imagref (z));
105 mpfr_set_inf (mpc_imagref (z), v);
107 return MPC_INEX (0, 0); /* exact */
111 /* compute z = x*y for Im(y) == 0 */
113 mul_real (mpc_ptr z, mpc_srcptr x, mpc_srcptr y, mpc_rnd_t rnd)
115 int xrs, xis, yrs, yis;
118 /* save signs of operands */
119 xrs = MPFR_SIGNBIT (mpc_realref (x));
120 xis = MPFR_SIGNBIT (mpc_imagref (x));
121 yrs = MPFR_SIGNBIT (mpc_realref (y));
122 yis = MPFR_SIGNBIT (mpc_imagref (y));
124 inex = mpc_mul_fr (z, x, mpc_realref (y), rnd);
125 /* Signs of zeroes may be wrong. Their correction does not change the
127 if (mpfr_zero_p (mpc_realref (z)))
128 mpfr_setsign (mpc_realref (z), mpc_realref (z), MPC_RND_RE(rnd) == GMP_RNDD
129 || (xrs != yrs && xis == yis), GMP_RNDN);
130 if (mpfr_zero_p (mpc_imagref (z)))
131 mpfr_setsign (mpc_imagref (z), mpc_imagref (z), MPC_RND_IM (rnd) == GMP_RNDD
132 || (xrs != yis && xis != yrs), GMP_RNDN);
138 /* compute z = x*y for Re(y) == 0, and Im(x) != 0 and Im(y) != 0 */
140 mul_imag (mpc_ptr z, mpc_srcptr x, mpc_srcptr y, mpc_rnd_t rnd)
143 int inex_re, inex_im;
144 int overlap = z == x || z == y;
148 mpc_init3 (rop, MPC_PREC_RE (z), MPC_PREC_IM (z));
152 sign = (MPFR_SIGNBIT (mpc_realref (y)) != MPFR_SIGNBIT (mpc_imagref (x)))
153 && (MPFR_SIGNBIT (mpc_imagref (y)) != MPFR_SIGNBIT (mpc_realref (x)));
155 inex_re = -mpfr_mul (mpc_realref (rop), mpc_imagref (x), mpc_imagref (y),
156 INV_RND (MPC_RND_RE (rnd)));
157 mpfr_neg (mpc_realref (rop), mpc_realref (rop), GMP_RNDN); /* exact */
158 inex_im = mpfr_mul (mpc_imagref (rop), mpc_realref (x), mpc_imagref (y),
160 mpc_set (z, rop, MPC_RNDNN);
162 /* Sign of zeroes may be wrong (note that Re(z) cannot be zero) */
163 if (mpfr_zero_p (mpc_imagref (z)))
164 mpfr_setsign (mpc_imagref (z), mpc_imagref (z), MPC_RND_IM (rnd) == GMP_RNDD
170 return MPC_INEX (inex_re, inex_im);
175 mpfr_fmma (mpfr_ptr z, mpfr_srcptr a, mpfr_srcptr b, mpfr_srcptr c,
176 mpfr_srcptr d, int sign, mpfr_rnd_t rnd)
178 /* Computes z = ab+cd if sign >= 0, or z = ab-cd if sign < 0.
179 Assumes that a, b, c, d are finite and non-zero; so any multiplication
180 of two of them yielding an infinity is an overflow, and a
181 multiplication yielding 0 is an underflow.
182 Assumes further that z is distinct from a, b, c, d. */
187 /* u=a*b, v=sign*c*d exactly */
188 mpfr_init2 (u, mpfr_get_prec (a) + mpfr_get_prec (b));
189 mpfr_init2 (v, mpfr_get_prec (c) + mpfr_get_prec (d));
190 mpfr_mul (u, a, b, GMP_RNDN);
191 mpfr_mul (v, c, d, GMP_RNDN);
193 mpfr_neg (v, v, GMP_RNDN);
195 /* tentatively compute z as u+v; here we need z to be distinct
196 from a, b, c, d to not lose the latter */
197 inex = mpfr_add (z, u, v, rnd);
199 if (mpfr_inf_p (z)) {
200 /* replace by "correctly rounded overflow" */
201 mpfr_set_si (z, (mpfr_signbit (z) ? -1 : 1), GMP_RNDN);
202 inex = mpfr_mul_2ui (z, z, mpfr_get_emax (), rnd);
204 else if (mpfr_zero_p (u) && !mpfr_zero_p (v)) {
205 /* exactly u underflowed, determine inexact flag */
206 inex = (mpfr_signbit (u) ? 1 : -1);
208 else if (mpfr_zero_p (v) && !mpfr_zero_p (u)) {
209 /* exactly v underflowed, determine inexact flag */
210 inex = (mpfr_signbit (v) ? 1 : -1);
212 else if (mpfr_nan_p (z) || (mpfr_zero_p (u) && mpfr_zero_p (v))) {
213 /* In the first case, u and v are infinities with opposite signs.
214 In the second case, u and v are zeroes; their sum may be 0 or the
215 least representable number, with a sign to be determined.
216 Redo the computations with mpz_t exponents */
217 mpfr_exp_t ea, eb, ec, ed;
219 /* cheat to work around the const qualifiers */
221 /* Normalise the input by shifting and keep track of the shifts in
222 the exponents of u and v */
223 ea = mpfr_get_exp (a);
224 eb = mpfr_get_exp (b);
225 ec = mpfr_get_exp (c);
226 ed = mpfr_get_exp (d);
228 mpfr_set_exp ((mpfr_ptr) a, (mpfr_prec_t) 0);
229 mpfr_set_exp ((mpfr_ptr) b, (mpfr_prec_t) 0);
230 mpfr_set_exp ((mpfr_ptr) c, (mpfr_prec_t) 0);
231 mpfr_set_exp ((mpfr_ptr) d, (mpfr_prec_t) 0);
235 mpz_set_si (eu, (long int) ea);
236 mpz_add_si (eu, eu, (long int) eb);
237 mpz_set_si (ev, (long int) ec);
238 mpz_add_si (ev, ev, (long int) ed);
240 /* recompute u and v and move exponents to eu and ev */
241 mpfr_mul (u, a, b, GMP_RNDN);
242 /* exponent of u is non-positive */
243 mpz_sub_ui (eu, eu, (unsigned long int) (-mpfr_get_exp (u)));
244 mpfr_set_exp (u, (mpfr_prec_t) 0);
245 mpfr_mul (v, c, d, GMP_RNDN);
247 mpfr_neg (v, v, GMP_RNDN);
248 mpz_sub_ui (ev, ev, (unsigned long int) (-mpfr_get_exp (v)));
249 mpfr_set_exp (v, (mpfr_prec_t) 0);
251 if (mpfr_nan_p (z)) {
252 mpfr_exp_t emax = mpfr_get_emax ();
254 /* We have a = ma * 2^ea with 1/2 <= |ma| < 1 and ea <= emax, and
255 analogously for b. So eu <= 2*emax, and eu > emax since we have
256 an overflow. The same holds for ev. Shift u and v by as much as
257 possible so that one of them has exponent emax and the
258 remaining exponents in eu and ev are the same. Then carry out
259 the addition. Shifting u and v prevents an underflow. */
260 if (mpz_cmp (eu, ev) >= 0) {
261 mpfr_set_exp (u, emax);
262 mpz_sub_ui (eu, eu, (long int) emax);
263 mpz_sub (ev, ev, eu);
264 mpfr_set_exp (v, (mpfr_exp_t) mpz_get_ui (ev));
265 /* remaining common exponent is now in eu */
268 mpfr_set_exp (v, emax);
269 mpz_sub_ui (ev, ev, (long int) emax);
270 mpz_sub (eu, eu, ev);
271 mpfr_set_exp (u, (mpfr_exp_t) mpz_get_ui (eu));
273 /* remaining common exponent is now also in eu */
275 inex = mpfr_add (z, u, v, rnd);
276 /* Result is finite since u and v have different signs. */
277 overflow = mpfr_mul_2ui (z, z, mpz_get_ui (eu), rnd);
283 /* Addition of two zeroes with same sign. We have a = ma * 2^ea
284 with 1/2 <= |ma| < 1 and ea >= emin and similarly for b.
285 So 2*emin < 2*emin+1 <= eu < emin < 0, and analogously for v. */
286 mpfr_exp_t emin = mpfr_get_emin ();
287 if (mpz_cmp (eu, ev) <= 0) {
288 mpfr_set_exp (u, emin);
289 mpz_add_ui (eu, eu, (unsigned long int) (-emin));
290 mpz_sub (ev, ev, eu);
291 mpfr_set_exp (v, (mpfr_exp_t) mpz_get_si (ev));
294 mpfr_set_exp (v, emin);
295 mpz_add_ui (ev, ev, (unsigned long int) (-emin));
296 mpz_sub (eu, eu, ev);
297 mpfr_set_exp (u, (mpfr_exp_t) mpz_get_si (eu));
300 inex = mpfr_add (z, u, v, rnd);
302 underflow = mpfr_div_2ui (z, z, mpz_get_ui (eu), rnd);
310 mpfr_set_exp ((mpfr_ptr) a, ea);
311 mpfr_set_exp ((mpfr_ptr) b, eb);
312 mpfr_set_exp ((mpfr_ptr) c, ec);
313 mpfr_set_exp ((mpfr_ptr) d, ed);
314 /* works also when some of a, b, c, d are not all distinct */
325 mpc_mul_naive (mpc_ptr z, mpc_srcptr x, mpc_srcptr y, mpc_rnd_t rnd)
327 /* computes z=x*y by the schoolbook method, where x and y are assumed
328 to be finite and without zero parts */
332 MPC_ASSERT ( mpfr_regular_p (mpc_realref (x)) && mpfr_regular_p (mpc_imagref (x))
333 && mpfr_regular_p (mpc_realref (y)) && mpfr_regular_p (mpc_imagref (y)));
334 overlap = (z == x) || (z == y);
336 mpc_init3 (rop, MPC_PREC_RE (z), MPC_PREC_IM (z));
340 inex = MPC_INEX (mpfr_fmma (mpc_realref (rop), mpc_realref (x), mpc_realref (y), mpc_imagref (x),
341 mpc_imagref (y), -1, MPC_RND_RE (rnd)),
342 mpfr_fmma (mpc_imagref (rop), mpc_realref (x), mpc_imagref (y), mpc_imagref (x),
343 mpc_realref (y), +1, MPC_RND_IM (rnd)));
345 mpc_set (z, rop, MPC_RNDNN);
354 mpc_mul_karatsuba (mpc_ptr rop, mpc_srcptr op1, mpc_srcptr op2, mpc_rnd_t rnd)
356 /* computes rop=op1*op2 by a Karatsuba algorithm, where op1 and op2
357 are assumed to be finite and without zero parts */
358 mpfr_srcptr a, b, c, d;
359 int mul_i, ok, inexact, mul_a, mul_c, inex_re = 0, inex_im = 0, sign_x, sign_u;
361 mpfr_prec_t prec, prec_re, prec_u, prec_v, prec_w;
362 mpfr_rnd_t rnd_re, rnd_u;
364 /* true if rop == op1 or rop == op2 */
366 /* overlap is quite difficult to handle, because we have to tentatively
367 round the variable u in the end to either the real or the imaginary
368 part of rop (it is not possible to tell now whether the real or
369 imaginary part is used). If this fails, we have to start again and
370 need the correct values of op1 and op2.
371 So we just create a new variable for the result in this case. */
373 const int MAX_MUL_LOOP = 1;
375 overlap = (rop == op1) || (rop == op2);
377 mpc_init3 (result, MPC_PREC_RE (rop), MPC_PREC_IM (rop));
379 result [0] = rop [0];
381 a = mpc_realref(op1);
382 b = mpc_imagref(op1);
383 c = mpc_realref(op2);
384 d = mpc_imagref(op2);
386 /* (a + i*b) * (c + i*d) = [ac - bd] + i*[ad + bc] */
388 mul_i = 0; /* number of multiplications by i */
389 mul_a = 1; /* implicit factor for a */
390 mul_c = 1; /* implicit factor for c */
392 if (mpfr_cmp_abs (a, b) < 0)
396 mul_a = -1; /* consider i * (a+i*b) = -b + i*a */
399 if (mpfr_cmp_abs (c, d) < 0)
403 mul_c = -1; /* consider -d + i*c instead of c + i*d */
406 /* find the precision and rounding mode for the new real part */
409 prec_re = MPC_PREC_IM(rop);
410 rnd_re = MPC_RND_IM(rnd);
412 else /* mul_i = 0 or 2 */
414 prec_re = MPC_PREC_RE(rop);
415 rnd_re = MPC_RND_RE(rnd);
419 rnd_re = INV_RND(rnd_re);
421 /* now |a| >= |b| and |c| >= |d| */
422 prec = MPC_MAX_PREC(rop);
424 mpfr_init2 (v, prec_v = mpfr_get_prec (a) + mpfr_get_prec (d));
425 mpfr_init2 (w, prec_w = mpfr_get_prec (b) + mpfr_get_prec (c));
429 inexact = mpfr_mul (v, a, d, GMP_RNDN);
431 /* over- or underflow */
436 mpfr_neg (v, v, GMP_RNDN);
438 inexact = mpfr_mul (w, b, c, GMP_RNDN);
440 /* over- or underflow */
445 mpfr_neg (w, w, GMP_RNDN);
447 /* compute sign(v-w) */
448 sign_x = mpfr_cmp_abs (v, w);
450 sign_x = 2 * mpfr_sgn (v) - mpfr_sgn (w);
451 else if (sign_x == 0)
452 sign_x = mpfr_sgn (v) - mpfr_sgn (w);
454 sign_x = mpfr_sgn (v) - 2 * mpfr_sgn (w);
456 sign_u = mul_a * mpfr_sgn (a) * mul_c * mpfr_sgn (c);
458 if (sign_x * sign_u < 0)
463 { int tmp; tmp = mul_a; mul_a = mul_c; mul_c = tmp; }
467 /* now sign_x * sign_u >= 0 */
472 /* the following should give failures with prob. <= 1/prec */
473 prec += mpc_ceil_log2 (prec) + 3;
475 mpfr_set_prec (u, prec_u = prec);
476 mpfr_set_prec (x, prec);
478 /* first compute away(b +/- a) and store it in u */
479 inexact = (mul_a == -1 ?
480 ROUND_AWAY (mpfr_sub (u, b, a, MPFR_RNDA), u) :
481 ROUND_AWAY (mpfr_add (u, b, a, MPFR_RNDA), u));
483 /* then compute away(+/-c - d) and store it in x */
484 inexact |= (mul_c == -1 ?
485 ROUND_AWAY (mpfr_add (x, c, d, MPFR_RNDA), x) :
486 ROUND_AWAY (mpfr_sub (x, c, d, MPFR_RNDA), x));
488 mpfr_neg (x, x, GMP_RNDN);
491 mpfr_prec_round (u, prec_u = 2 * prec, GMP_RNDN);
493 /* compute away(u*x) and store it in u */
494 inexact |= ROUND_AWAY (mpfr_mul (u, u, x, MPFR_RNDA), u);
497 /* if all computations are exact up to here, it may be that
498 the real part is exact, thus we need if possible to
499 compute v - w exactly */
503 /* v and w are different from 0, so mpfr_get_exp is safe to use */
504 prec_x = SAFE_ABS (mpfr_exp_t, mpfr_get_exp (v) - mpfr_get_exp (w))
505 + MPC_MAX (prec_v, prec_w) + 1;
506 /* +1 is necessary for a potential carry */
507 /* ensure we do not use a too large precision */
511 mpfr_prec_round (x, prec_x, GMP_RNDN);
514 rnd_u = (sign_u > 0) ? GMP_RNDU : GMP_RNDD;
515 inexact |= mpfr_sub (x, v, w, rnd_u); /* ad - bc */
517 /* in case u=0, ensure that rnd_u rounds x away from zero */
518 if (mpfr_sgn (u) == 0)
519 rnd_u = (mpfr_sgn (x) > 0) ? GMP_RNDU : GMP_RNDD;
520 inexact |= mpfr_add (u, u, x, rnd_u); /* ac - bd */
523 mpfr_can_round (u, prec_u - 3, rnd_u, GMP_RNDZ,
524 prec_re + (rnd_re == GMP_RNDN));
525 /* this ensures both we can round correctly and determine the correct
526 inexact flag (for rounding to nearest) */
528 while (!ok && loop <= MAX_MUL_LOOP);
529 /* after MAX_MUL_LOOP rounds, use mpc_naive instead */
532 /* if inexact is zero, then u is exactly ac-bd, otherwise fix the sign
533 of the inexact flag for u, which was rounded away from ac-bd */
535 inexact = mpfr_sgn (u);
539 inex_re = mpfr_set (mpc_realref(result), u, MPC_RND_RE(rnd));
542 inex_re = inexact; /* u is rounded away from 0 */
543 inex_im = mpfr_add (mpc_imagref(result), v, w, MPC_RND_IM(rnd));
546 inex_im = mpfr_add (mpc_imagref(result), v, w, MPC_RND_IM(rnd));
548 else if (mul_i == 1) /* (x+i*y)/i = y - i*x */
550 inex_im = mpfr_neg (mpc_imagref(result), u, MPC_RND_IM(rnd));
553 inex_im = -inexact; /* u is rounded away from 0 */
554 inex_re = mpfr_add (mpc_realref(result), v, w, MPC_RND_RE(rnd));
557 inex_re = mpfr_add (mpc_realref(result), v, w, MPC_RND_RE(rnd));
559 else /* mul_i = 2, z/i^2 = -z */
561 inex_re = mpfr_neg (mpc_realref(result), u, MPC_RND_RE(rnd));
564 inex_re = -inexact; /* u is rounded away from 0 */
565 inex_im = -mpfr_add (mpc_imagref(result), v, w,
566 INV_RND(MPC_RND_IM(rnd)));
567 mpfr_neg (mpc_imagref(result), mpc_imagref(result), MPC_RND_IM(rnd));
571 inex_im = -mpfr_add (mpc_imagref(result), v, w,
572 INV_RND(MPC_RND_IM(rnd)));
573 mpfr_neg (mpc_imagref(result), mpc_imagref(result), MPC_RND_IM(rnd));
577 mpc_set (rop, result, MPC_RNDNN);
589 return MPC_INEX(inex_re, inex_im);
591 return mpc_mul_naive (rop, op1, op2, rnd);
596 mpc_mul (mpc_ptr a, mpc_srcptr b, mpc_srcptr c, mpc_rnd_t rnd)
598 /* Conforming to ISO C99 standard (G.5.1 multiplicative operators),
599 infinities are treated specially if both parts are NaN when computed
602 return mul_infinite (a, b, c);
604 return mul_infinite (a, c, b);
606 /* NaN contamination of both parts in result */
607 if (mpfr_nan_p (mpc_realref (b)) || mpfr_nan_p (mpc_imagref (b))
608 || mpfr_nan_p (mpc_realref (c)) || mpfr_nan_p (mpc_imagref (c))) {
609 mpfr_set_nan (mpc_realref (a));
610 mpfr_set_nan (mpc_imagref (a));
611 return MPC_INEX (0, 0);
614 /* check for real multiplication */
615 if (mpfr_zero_p (mpc_imagref (b)))
616 return mul_real (a, c, b, rnd);
617 if (mpfr_zero_p (mpc_imagref (c)))
618 return mul_real (a, b, c, rnd);
620 /* check for purely imaginary multiplication */
621 if (mpfr_zero_p (mpc_realref (b)))
622 return mul_imag (a, c, b, rnd);
623 if (mpfr_zero_p (mpc_realref (c)))
624 return mul_imag (a, b, c, rnd);
626 /* If the real and imaginary part of one argument have a very different */
627 /* exponent, it is not reasonable to use Karatsuba multiplication. */
628 if ( SAFE_ABS (mpfr_exp_t,
629 mpfr_get_exp (mpc_realref (b)) - mpfr_get_exp (mpc_imagref (b)))
630 > (mpfr_exp_t) MPC_MAX_PREC (b) / 2
631 || SAFE_ABS (mpfr_exp_t,
632 mpfr_get_exp (mpc_realref (c)) - mpfr_get_exp (mpc_imagref (c)))
633 > (mpfr_exp_t) MPC_MAX_PREC (c) / 2)
634 return mpc_mul_naive (a, b, c, rnd);
636 return ((MPC_MAX_PREC(a)
637 <= (mpfr_prec_t) MUL_KARATSUBA_THRESHOLD * BITS_PER_MP_LIMB)
638 ? mpc_mul_naive : mpc_mul_karatsuba) (a, b, c, rnd);