Update gcc-50 to SVN version 221572
[dragonfly.git] / contrib / gcc-5.0 / gcc / expmed.c
CommitLineData
dda118e3
JM
1/* Medium-level subroutines: convert bit-field store and extract
2 and shifts, multiplies and divides to rtl instructions.
3 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "diagnostic-core.h"
27#include "rtl.h"
28#include "hash-set.h"
29#include "machmode.h"
30#include "vec.h"
31#include "double-int.h"
32#include "input.h"
33#include "alias.h"
34#include "symtab.h"
35#include "wide-int.h"
36#include "inchash.h"
37#include "tree.h"
38#include "fold-const.h"
39#include "stor-layout.h"
40#include "tm_p.h"
41#include "flags.h"
42#include "insn-config.h"
43#include "hashtab.h"
44#include "hard-reg-set.h"
45#include "function.h"
46#include "statistics.h"
47#include "real.h"
48#include "fixed-value.h"
49#include "expmed.h"
50#include "dojump.h"
51#include "explow.h"
52#include "calls.h"
53#include "emit-rtl.h"
54#include "varasm.h"
55#include "stmt.h"
56#include "expr.h"
57#include "insn-codes.h"
58#include "optabs.h"
59#include "recog.h"
60#include "langhooks.h"
61#include "predict.h"
62#include "basic-block.h"
63#include "df.h"
64#include "target.h"
65
66struct target_expmed default_target_expmed;
67#if SWITCHABLE_TARGET
68struct target_expmed *this_target_expmed = &default_target_expmed;
69#endif
70
71static void store_fixed_bit_field (rtx, unsigned HOST_WIDE_INT,
72 unsigned HOST_WIDE_INT,
73 unsigned HOST_WIDE_INT,
74 unsigned HOST_WIDE_INT,
75 rtx);
76static void store_fixed_bit_field_1 (rtx, unsigned HOST_WIDE_INT,
77 unsigned HOST_WIDE_INT,
78 rtx);
79static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
80 unsigned HOST_WIDE_INT,
81 unsigned HOST_WIDE_INT,
82 unsigned HOST_WIDE_INT,
83 rtx);
84static rtx extract_fixed_bit_field (machine_mode, rtx,
85 unsigned HOST_WIDE_INT,
86 unsigned HOST_WIDE_INT, rtx, int);
87static rtx extract_fixed_bit_field_1 (machine_mode, rtx,
88 unsigned HOST_WIDE_INT,
89 unsigned HOST_WIDE_INT, rtx, int);
90static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int);
91static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
92 unsigned HOST_WIDE_INT, int);
93static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *);
94static rtx expand_smod_pow2 (machine_mode, rtx, HOST_WIDE_INT);
95static rtx expand_sdiv_pow2 (machine_mode, rtx, HOST_WIDE_INT);
96
97/* Return a constant integer mask value of mode MODE with BITSIZE ones
98 followed by BITPOS zeros, or the complement of that if COMPLEMENT.
99 The mask is truncated if necessary to the width of mode MODE. The
100 mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
101
102static inline rtx
103mask_rtx (machine_mode mode, int bitpos, int bitsize, bool complement)
104{
105 return immed_wide_int_const
106 (wi::shifted_mask (bitpos, bitsize, complement,
107 GET_MODE_PRECISION (mode)), mode);
108}
109
110/* Test whether a value is zero of a power of two. */
111#define EXACT_POWER_OF_2_OR_ZERO_P(x) \
112 (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0)
113
114struct init_expmed_rtl
115{
116 rtx reg;
117 rtx plus;
118 rtx neg;
119 rtx mult;
120 rtx sdiv;
121 rtx udiv;
122 rtx sdiv_32;
123 rtx smod_32;
124 rtx wide_mult;
125 rtx wide_lshr;
126 rtx wide_trunc;
127 rtx shift;
128 rtx shift_mult;
129 rtx shift_add;
130 rtx shift_sub0;
131 rtx shift_sub1;
132 rtx zext;
133 rtx trunc;
134
135 rtx pow2[MAX_BITS_PER_WORD];
136 rtx cint[MAX_BITS_PER_WORD];
137};
138
139static void
140init_expmed_one_conv (struct init_expmed_rtl *all, machine_mode to_mode,
141 machine_mode from_mode, bool speed)
142{
143 int to_size, from_size;
144 rtx which;
145
146 to_size = GET_MODE_PRECISION (to_mode);
147 from_size = GET_MODE_PRECISION (from_mode);
148
149 /* Most partial integers have a precision less than the "full"
150 integer it requires for storage. In case one doesn't, for
151 comparison purposes here, reduce the bit size by one in that
152 case. */
153 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
154 && exact_log2 (to_size) != -1)
155 to_size --;
156 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
157 && exact_log2 (from_size) != -1)
158 from_size --;
159
160 /* Assume cost of zero-extend and sign-extend is the same. */
161 which = (to_size < from_size ? all->trunc : all->zext);
162
163 PUT_MODE (all->reg, from_mode);
164 set_convert_cost (to_mode, from_mode, speed, set_src_cost (which, speed));
165}
166
167static void
168init_expmed_one_mode (struct init_expmed_rtl *all,
169 machine_mode mode, int speed)
170{
171 int m, n, mode_bitsize;
172 machine_mode mode_from;
173
174 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
175
176 PUT_MODE (all->reg, mode);
177 PUT_MODE (all->plus, mode);
178 PUT_MODE (all->neg, mode);
179 PUT_MODE (all->mult, mode);
180 PUT_MODE (all->sdiv, mode);
181 PUT_MODE (all->udiv, mode);
182 PUT_MODE (all->sdiv_32, mode);
183 PUT_MODE (all->smod_32, mode);
184 PUT_MODE (all->wide_trunc, mode);
185 PUT_MODE (all->shift, mode);
186 PUT_MODE (all->shift_mult, mode);
187 PUT_MODE (all->shift_add, mode);
188 PUT_MODE (all->shift_sub0, mode);
189 PUT_MODE (all->shift_sub1, mode);
190 PUT_MODE (all->zext, mode);
191 PUT_MODE (all->trunc, mode);
192
193 set_add_cost (speed, mode, set_src_cost (all->plus, speed));
194 set_neg_cost (speed, mode, set_src_cost (all->neg, speed));
195 set_mul_cost (speed, mode, set_src_cost (all->mult, speed));
196 set_sdiv_cost (speed, mode, set_src_cost (all->sdiv, speed));
197 set_udiv_cost (speed, mode, set_src_cost (all->udiv, speed));
198
199 set_sdiv_pow2_cheap (speed, mode, (set_src_cost (all->sdiv_32, speed)
200 <= 2 * add_cost (speed, mode)));
201 set_smod_pow2_cheap (speed, mode, (set_src_cost (all->smod_32, speed)
202 <= 4 * add_cost (speed, mode)));
203
204 set_shift_cost (speed, mode, 0, 0);
205 {
206 int cost = add_cost (speed, mode);
207 set_shiftadd_cost (speed, mode, 0, cost);
208 set_shiftsub0_cost (speed, mode, 0, cost);
209 set_shiftsub1_cost (speed, mode, 0, cost);
210 }
211
212 n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
213 for (m = 1; m < n; m++)
214 {
215 XEXP (all->shift, 1) = all->cint[m];
216 XEXP (all->shift_mult, 1) = all->pow2[m];
217
218 set_shift_cost (speed, mode, m, set_src_cost (all->shift, speed));
219 set_shiftadd_cost (speed, mode, m, set_src_cost (all->shift_add, speed));
220 set_shiftsub0_cost (speed, mode, m, set_src_cost (all->shift_sub0, speed));
221 set_shiftsub1_cost (speed, mode, m, set_src_cost (all->shift_sub1, speed));
222 }
223
224 if (SCALAR_INT_MODE_P (mode))
225 {
226 for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
227 mode_from = (machine_mode)(mode_from + 1))
228 init_expmed_one_conv (all, mode, mode_from, speed);
229 }
230 if (GET_MODE_CLASS (mode) == MODE_INT)
231 {
232 machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
233 if (wider_mode != VOIDmode)
234 {
235 PUT_MODE (all->zext, wider_mode);
236 PUT_MODE (all->wide_mult, wider_mode);
237 PUT_MODE (all->wide_lshr, wider_mode);
238 XEXP (all->wide_lshr, 1) = GEN_INT (mode_bitsize);
239
240 set_mul_widen_cost (speed, wider_mode,
241 set_src_cost (all->wide_mult, speed));
242 set_mul_highpart_cost (speed, mode,
243 set_src_cost (all->wide_trunc, speed));
244 }
245 }
246}
247
248void
249init_expmed (void)
250{
251 struct init_expmed_rtl all;
252 machine_mode mode = QImode;
253 int m, speed;
254
255 memset (&all, 0, sizeof all);
256 for (m = 1; m < MAX_BITS_PER_WORD; m++)
257 {
258 all.pow2[m] = GEN_INT ((HOST_WIDE_INT) 1 << m);
259 all.cint[m] = GEN_INT (m);
260 }
261
262 /* Avoid using hard regs in ways which may be unsupported. */
263 all.reg = gen_rtx_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
264 all.plus = gen_rtx_PLUS (mode, all.reg, all.reg);
265 all.neg = gen_rtx_NEG (mode, all.reg);
266 all.mult = gen_rtx_MULT (mode, all.reg, all.reg);
267 all.sdiv = gen_rtx_DIV (mode, all.reg, all.reg);
268 all.udiv = gen_rtx_UDIV (mode, all.reg, all.reg);
269 all.sdiv_32 = gen_rtx_DIV (mode, all.reg, all.pow2[5]);
270 all.smod_32 = gen_rtx_MOD (mode, all.reg, all.pow2[5]);
271 all.zext = gen_rtx_ZERO_EXTEND (mode, all.reg);
272 all.wide_mult = gen_rtx_MULT (mode, all.zext, all.zext);
273 all.wide_lshr = gen_rtx_LSHIFTRT (mode, all.wide_mult, all.reg);
274 all.wide_trunc = gen_rtx_TRUNCATE (mode, all.wide_lshr);
275 all.shift = gen_rtx_ASHIFT (mode, all.reg, all.reg);
276 all.shift_mult = gen_rtx_MULT (mode, all.reg, all.reg);
277 all.shift_add = gen_rtx_PLUS (mode, all.shift_mult, all.reg);
278 all.shift_sub0 = gen_rtx_MINUS (mode, all.shift_mult, all.reg);
279 all.shift_sub1 = gen_rtx_MINUS (mode, all.reg, all.shift_mult);
280 all.trunc = gen_rtx_TRUNCATE (mode, all.reg);
281
282 for (speed = 0; speed < 2; speed++)
283 {
284 crtl->maybe_hot_insn_p = speed;
285 set_zero_cost (speed, set_src_cost (const0_rtx, speed));
286
287 for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
288 mode = (machine_mode)(mode + 1))
289 init_expmed_one_mode (&all, mode, speed);
290
291 if (MIN_MODE_PARTIAL_INT != VOIDmode)
292 for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
293 mode = (machine_mode)(mode + 1))
294 init_expmed_one_mode (&all, mode, speed);
295
296 if (MIN_MODE_VECTOR_INT != VOIDmode)
297 for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
298 mode = (machine_mode)(mode + 1))
299 init_expmed_one_mode (&all, mode, speed);
300 }
301
302 if (alg_hash_used_p ())
303 {
304 struct alg_hash_entry *p = alg_hash_entry_ptr (0);
305 memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
306 }
307 else
308 set_alg_hash_used_p (true);
309 default_rtl_profile ();
310
311 ggc_free (all.trunc);
312 ggc_free (all.shift_sub1);
313 ggc_free (all.shift_sub0);
314 ggc_free (all.shift_add);
315 ggc_free (all.shift_mult);
316 ggc_free (all.shift);
317 ggc_free (all.wide_trunc);
318 ggc_free (all.wide_lshr);
319 ggc_free (all.wide_mult);
320 ggc_free (all.zext);
321 ggc_free (all.smod_32);
322 ggc_free (all.sdiv_32);
323 ggc_free (all.udiv);
324 ggc_free (all.sdiv);
325 ggc_free (all.mult);
326 ggc_free (all.neg);
327 ggc_free (all.plus);
328 ggc_free (all.reg);
329}
330
331/* Return an rtx representing minus the value of X.
332 MODE is the intended mode of the result,
333 useful if X is a CONST_INT. */
334
335rtx
336negate_rtx (machine_mode mode, rtx x)
337{
338 rtx result = simplify_unary_operation (NEG, mode, x, mode);
339
340 if (result == 0)
341 result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
342
343 return result;
344}
345
346/* Adjust bitfield memory MEM so that it points to the first unit of mode
347 MODE that contains a bitfield of size BITSIZE at bit position BITNUM.
348 If MODE is BLKmode, return a reference to every byte in the bitfield.
349 Set *NEW_BITNUM to the bit position of the field within the new memory. */
350
351static rtx
352narrow_bit_field_mem (rtx mem, machine_mode mode,
353 unsigned HOST_WIDE_INT bitsize,
354 unsigned HOST_WIDE_INT bitnum,
355 unsigned HOST_WIDE_INT *new_bitnum)
356{
357 if (mode == BLKmode)
358 {
359 *new_bitnum = bitnum % BITS_PER_UNIT;
360 HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
361 HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
362 / BITS_PER_UNIT);
363 return adjust_bitfield_address_size (mem, mode, offset, size);
364 }
365 else
366 {
367 unsigned int unit = GET_MODE_BITSIZE (mode);
368 *new_bitnum = bitnum % unit;
369 HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
370 return adjust_bitfield_address (mem, mode, offset);
371 }
372}
373
374/* The caller wants to perform insertion or extraction PATTERN on a
375 bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
376 BITREGION_START and BITREGION_END are as for store_bit_field
377 and FIELDMODE is the natural mode of the field.
378
379 Search for a mode that is compatible with the memory access
380 restrictions and (where applicable) with a register insertion or
381 extraction. Return the new memory on success, storing the adjusted
382 bit position in *NEW_BITNUM. Return null otherwise. */
383
384static rtx
385adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
386 rtx op0, HOST_WIDE_INT bitsize,
387 HOST_WIDE_INT bitnum,
388 unsigned HOST_WIDE_INT bitregion_start,
389 unsigned HOST_WIDE_INT bitregion_end,
390 machine_mode fieldmode,
391 unsigned HOST_WIDE_INT *new_bitnum)
392{
393 bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
394 bitregion_end, MEM_ALIGN (op0),
395 MEM_VOLATILE_P (op0));
396 machine_mode best_mode;
397 if (iter.next_mode (&best_mode))
398 {
399 /* We can use a memory in BEST_MODE. See whether this is true for
400 any wider modes. All other things being equal, we prefer to
401 use the widest mode possible because it tends to expose more
402 CSE opportunities. */
403 if (!iter.prefer_smaller_modes ())
404 {
405 /* Limit the search to the mode required by the corresponding
406 register insertion or extraction instruction, if any. */
407 machine_mode limit_mode = word_mode;
408 extraction_insn insn;
409 if (get_best_reg_extraction_insn (&insn, pattern,
410 GET_MODE_BITSIZE (best_mode),
411 fieldmode))
412 limit_mode = insn.field_mode;
413
414 machine_mode wider_mode;
415 while (iter.next_mode (&wider_mode)
416 && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
417 best_mode = wider_mode;
418 }
419 return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
420 new_bitnum);
421 }
422 return NULL_RTX;
423}
424
425/* Return true if a bitfield of size BITSIZE at bit number BITNUM within
426 a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
427 offset is then BITNUM / BITS_PER_UNIT. */
428
429static bool
430lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
431 unsigned HOST_WIDE_INT bitsize,
432 machine_mode struct_mode)
433{
434 if (BYTES_BIG_ENDIAN)
435 return (bitnum % BITS_PER_UNIT == 0
436 && (bitnum + bitsize == GET_MODE_BITSIZE (struct_mode)
437 || (bitnum + bitsize) % BITS_PER_WORD == 0));
438 else
439 return bitnum % BITS_PER_WORD == 0;
440}
441
442/* Return true if -fstrict-volatile-bitfields applies to an access of OP0
443 containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
444 Return false if the access would touch memory outside the range
445 BITREGION_START to BITREGION_END for conformance to the C++ memory
446 model. */
447
448static bool
449strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
450 unsigned HOST_WIDE_INT bitnum,
451 machine_mode fieldmode,
452 unsigned HOST_WIDE_INT bitregion_start,
453 unsigned HOST_WIDE_INT bitregion_end)
454{
455 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
456
457 /* -fstrict-volatile-bitfields must be enabled and we must have a
458 volatile MEM. */
459 if (!MEM_P (op0)
460 || !MEM_VOLATILE_P (op0)
461 || flag_strict_volatile_bitfields <= 0)
462 return false;
463
464 /* Non-integral modes likely only happen with packed structures.
465 Punt. */
466 if (!SCALAR_INT_MODE_P (fieldmode))
467 return false;
468
469 /* The bit size must not be larger than the field mode, and
470 the field mode must not be larger than a word. */
471 if (bitsize > modesize || modesize > BITS_PER_WORD)
472 return false;
473
474 /* Check for cases of unaligned fields that must be split. */
475 if (bitnum % BITS_PER_UNIT + bitsize > modesize
476 || (STRICT_ALIGNMENT
477 && bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
478 return false;
479
480 /* Check for cases where the C++ memory model applies. */
481 if (bitregion_end != 0
482 && (bitnum - bitnum % modesize < bitregion_start
483 || bitnum - bitnum % modesize + modesize - 1 > bitregion_end))
484 return false;
485
486 return true;
487}
488
489/* Return true if OP is a memory and if a bitfield of size BITSIZE at
490 bit number BITNUM can be treated as a simple value of mode MODE. */
491
492static bool
493simple_mem_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
494 unsigned HOST_WIDE_INT bitnum, machine_mode mode)
495{
496 return (MEM_P (op0)
497 && bitnum % BITS_PER_UNIT == 0
498 && bitsize == GET_MODE_BITSIZE (mode)
499 && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
500 || (bitnum % GET_MODE_ALIGNMENT (mode) == 0
501 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
502}
503\f
504/* Try to use instruction INSV to store VALUE into a field of OP0.
505 BITSIZE and BITNUM are as for store_bit_field. */
506
507static bool
508store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
509 unsigned HOST_WIDE_INT bitsize,
510 unsigned HOST_WIDE_INT bitnum,
511 rtx value)
512{
513 struct expand_operand ops[4];
514 rtx value1;
515 rtx xop0 = op0;
516 rtx_insn *last = get_last_insn ();
517 bool copy_back = false;
518
519 machine_mode op_mode = insv->field_mode;
520 unsigned int unit = GET_MODE_BITSIZE (op_mode);
521 if (bitsize == 0 || bitsize > unit)
522 return false;
523
524 if (MEM_P (xop0))
525 /* Get a reference to the first byte of the field. */
526 xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
527 &bitnum);
528 else
529 {
530 /* Convert from counting within OP0 to counting in OP_MODE. */
531 if (BYTES_BIG_ENDIAN)
532 bitnum += unit - GET_MODE_BITSIZE (GET_MODE (op0));
533
534 /* If xop0 is a register, we need it in OP_MODE
535 to make it acceptable to the format of insv. */
536 if (GET_CODE (xop0) == SUBREG)
537 /* We can't just change the mode, because this might clobber op0,
538 and we will need the original value of op0 if insv fails. */
539 xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0), SUBREG_BYTE (xop0));
540 if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
541 xop0 = gen_lowpart_SUBREG (op_mode, xop0);
542 }
543
544 /* If the destination is a paradoxical subreg such that we need a
545 truncate to the inner mode, perform the insertion on a temporary and
546 truncate the result to the original destination. Note that we can't
547 just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
548 X) 0)) is (reg:N X). */
549 if (GET_CODE (xop0) == SUBREG
550 && REG_P (SUBREG_REG (xop0))
551 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
552 op_mode))
553 {
554 rtx tem = gen_reg_rtx (op_mode);
555 emit_move_insn (tem, xop0);
556 xop0 = tem;
557 copy_back = true;
558 }
559
560 /* There are similar overflow check at the start of store_bit_field_1,
561 but that only check the situation where the field lies completely
562 outside the register, while there do have situation where the field
563 lies partialy in the register, we need to adjust bitsize for this
564 partial overflow situation. Without this fix, pr48335-2.c on big-endian
565 will broken on those arch support bit insert instruction, like arm, aarch64
566 etc. */
567 if (bitsize + bitnum > unit && bitnum < unit)
568 {
569 warning (OPT_Wextra, "write of %wu-bit data outside the bound of "
570 "destination object, data truncated into %wu-bit",
571 bitsize, unit - bitnum);
572 bitsize = unit - bitnum;
573 }
574
575 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
576 "backwards" from the size of the unit we are inserting into.
577 Otherwise, we count bits from the most significant on a
578 BYTES/BITS_BIG_ENDIAN machine. */
579
580 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
581 bitnum = unit - bitsize - bitnum;
582
583 /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
584 value1 = value;
585 if (GET_MODE (value) != op_mode)
586 {
587 if (GET_MODE_BITSIZE (GET_MODE (value)) >= bitsize)
588 {
589 /* Optimization: Don't bother really extending VALUE
590 if it has all the bits we will actually use. However,
591 if we must narrow it, be sure we do it correctly. */
592
593 if (GET_MODE_SIZE (GET_MODE (value)) < GET_MODE_SIZE (op_mode))
594 {
595 rtx tmp;
596
597 tmp = simplify_subreg (op_mode, value1, GET_MODE (value), 0);
598 if (! tmp)
599 tmp = simplify_gen_subreg (op_mode,
600 force_reg (GET_MODE (value),
601 value1),
602 GET_MODE (value), 0);
603 value1 = tmp;
604 }
605 else
606 value1 = gen_lowpart (op_mode, value1);
607 }
608 else if (CONST_INT_P (value))
609 value1 = gen_int_mode (INTVAL (value), op_mode);
610 else
611 /* Parse phase is supposed to make VALUE's data type
612 match that of the component reference, which is a type
613 at least as wide as the field; so VALUE should have
614 a mode that corresponds to that type. */
615 gcc_assert (CONSTANT_P (value));
616 }
617
618 create_fixed_operand (&ops[0], xop0);
619 create_integer_operand (&ops[1], bitsize);
620 create_integer_operand (&ops[2], bitnum);
621 create_input_operand (&ops[3], value1, op_mode);
622 if (maybe_expand_insn (insv->icode, 4, ops))
623 {
624 if (copy_back)
625 convert_move (op0, xop0, true);
626 return true;
627 }
628 delete_insns_since (last);
629 return false;
630}
631
632/* A subroutine of store_bit_field, with the same arguments. Return true
633 if the operation could be implemented.
634
635 If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
636 no other way of implementing the operation. If FALLBACK_P is false,
637 return false instead. */
638
639static bool
640store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
641 unsigned HOST_WIDE_INT bitnum,
642 unsigned HOST_WIDE_INT bitregion_start,
643 unsigned HOST_WIDE_INT bitregion_end,
644 machine_mode fieldmode,
645 rtx value, bool fallback_p)
646{
647 rtx op0 = str_rtx;
648 rtx orig_value;
649
650 while (GET_CODE (op0) == SUBREG)
651 {
652 /* The following line once was done only if WORDS_BIG_ENDIAN,
653 but I think that is a mistake. WORDS_BIG_ENDIAN is
654 meaningful at a much higher level; when structures are copied
655 between memory and regs, the higher-numbered regs
656 always get higher addresses. */
657 int inner_mode_size = GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)));
658 int outer_mode_size = GET_MODE_SIZE (GET_MODE (op0));
659 int byte_offset = 0;
660
661 /* Paradoxical subregs need special handling on big endian machines. */
662 if (SUBREG_BYTE (op0) == 0 && inner_mode_size < outer_mode_size)
663 {
664 int difference = inner_mode_size - outer_mode_size;
665
666 if (WORDS_BIG_ENDIAN)
667 byte_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
668 if (BYTES_BIG_ENDIAN)
669 byte_offset += difference % UNITS_PER_WORD;
670 }
671 else
672 byte_offset = SUBREG_BYTE (op0);
673
674 bitnum += byte_offset * BITS_PER_UNIT;
675 op0 = SUBREG_REG (op0);
676 }
677
678 /* No action is needed if the target is a register and if the field
679 lies completely outside that register. This can occur if the source
680 code contains an out-of-bounds access to a small array. */
681 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
682 return true;
683
684 /* Use vec_set patterns for inserting parts of vectors whenever
685 available. */
686 if (VECTOR_MODE_P (GET_MODE (op0))
687 && !MEM_P (op0)
688 && optab_handler (vec_set_optab, GET_MODE (op0)) != CODE_FOR_nothing
689 && fieldmode == GET_MODE_INNER (GET_MODE (op0))
690 && bitsize == GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
691 && !(bitnum % GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
692 {
693 struct expand_operand ops[3];
694 machine_mode outermode = GET_MODE (op0);
695 machine_mode innermode = GET_MODE_INNER (outermode);
696 enum insn_code icode = optab_handler (vec_set_optab, outermode);
697 int pos = bitnum / GET_MODE_BITSIZE (innermode);
698
699 create_fixed_operand (&ops[0], op0);
700 create_input_operand (&ops[1], value, innermode);
701 create_integer_operand (&ops[2], pos);
702 if (maybe_expand_insn (icode, 3, ops))
703 return true;
704 }
705
706 /* If the target is a register, overwriting the entire object, or storing
707 a full-word or multi-word field can be done with just a SUBREG. */
708 if (!MEM_P (op0)
709 && bitsize == GET_MODE_BITSIZE (fieldmode)
710 && ((bitsize == GET_MODE_BITSIZE (GET_MODE (op0)) && bitnum == 0)
711 || (bitsize % BITS_PER_WORD == 0 && bitnum % BITS_PER_WORD == 0)))
712 {
713 /* Use the subreg machinery either to narrow OP0 to the required
714 words or to cope with mode punning between equal-sized modes.
715 In the latter case, use subreg on the rhs side, not lhs. */
716 rtx sub;
717
718 if (bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
719 {
720 sub = simplify_gen_subreg (GET_MODE (op0), value, fieldmode, 0);
721 if (sub)
722 {
723 emit_move_insn (op0, sub);
724 return true;
725 }
726 }
727 else
728 {
729 sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0),
730 bitnum / BITS_PER_UNIT);
731 if (sub)
732 {
733 emit_move_insn (sub, value);
734 return true;
735 }
736 }
737 }
738
739 /* If the target is memory, storing any naturally aligned field can be
740 done with a simple store. For targets that support fast unaligned
741 memory, any naturally sized, unit aligned field can be done directly. */
742 if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode))
743 {
744 op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT);
745 emit_move_insn (op0, value);
746 return true;
747 }
748
749 /* Make sure we are playing with integral modes. Pun with subregs
750 if we aren't. This must come after the entire register case above,
751 since that case is valid for any mode. The following cases are only
752 valid for integral modes. */
753 {
754 machine_mode imode = int_mode_for_mode (GET_MODE (op0));
755 if (imode != GET_MODE (op0))
756 {
757 if (MEM_P (op0))
758 op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
759 else
760 {
761 gcc_assert (imode != BLKmode);
762 op0 = gen_lowpart (imode, op0);
763 }
764 }
765 }
766
767 /* Storing an lsb-aligned field in a register
768 can be done with a movstrict instruction. */
769
770 if (!MEM_P (op0)
771 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
772 && bitsize == GET_MODE_BITSIZE (fieldmode)
773 && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
774 {
775 struct expand_operand ops[2];
776 enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
777 rtx arg0 = op0;
778 unsigned HOST_WIDE_INT subreg_off;
779
780 if (GET_CODE (arg0) == SUBREG)
781 {
782 /* Else we've got some float mode source being extracted into
783 a different float mode destination -- this combination of
784 subregs results in Severe Tire Damage. */
785 gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
786 || GET_MODE_CLASS (fieldmode) == MODE_INT
787 || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
788 arg0 = SUBREG_REG (arg0);
789 }
790
791 subreg_off = bitnum / BITS_PER_UNIT;
792 if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off))
793 {
794 arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
795
796 create_fixed_operand (&ops[0], arg0);
797 /* Shrink the source operand to FIELDMODE. */
798 create_convert_operand_to (&ops[1], value, fieldmode, false);
799 if (maybe_expand_insn (icode, 2, ops))
800 return true;
801 }
802 }
803
804 /* Handle fields bigger than a word. */
805
806 if (bitsize > BITS_PER_WORD)
807 {
808 /* Here we transfer the words of the field
809 in the order least significant first.
810 This is because the most significant word is the one which may
811 be less than full.
812 However, only do that if the value is not BLKmode. */
813
814 unsigned int backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
815 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
816 unsigned int i;
817 rtx_insn *last;
818
819 /* This is the mode we must force value to, so that there will be enough
820 subwords to extract. Note that fieldmode will often (always?) be
821 VOIDmode, because that is what store_field uses to indicate that this
822 is a bit field, but passing VOIDmode to operand_subword_force
823 is not allowed. */
824 fieldmode = GET_MODE (value);
825 if (fieldmode == VOIDmode)
826 fieldmode = smallest_mode_for_size (nwords * BITS_PER_WORD, MODE_INT);
827
828 last = get_last_insn ();
829 for (i = 0; i < nwords; i++)
830 {
831 /* If I is 0, use the low-order word in both field and target;
832 if I is 1, use the next to lowest word; and so on. */
833 unsigned int wordnum = (backwards
834 ? GET_MODE_SIZE (fieldmode) / UNITS_PER_WORD
835 - i - 1
836 : i);
837 unsigned int bit_offset = (backwards
838 ? MAX ((int) bitsize - ((int) i + 1)
839 * BITS_PER_WORD,
840 0)
841 : (int) i * BITS_PER_WORD);
842 rtx value_word = operand_subword_force (value, wordnum, fieldmode);
843 unsigned HOST_WIDE_INT new_bitsize =
844 MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
845
846 /* If the remaining chunk doesn't have full wordsize we have
847 to make sure that for big endian machines the higher order
848 bits are used. */
849 if (new_bitsize < BITS_PER_WORD && BYTES_BIG_ENDIAN && !backwards)
850 value_word = simplify_expand_binop (word_mode, lshr_optab,
851 value_word,
852 GEN_INT (BITS_PER_WORD
853 - new_bitsize),
854 NULL_RTX, true,
855 OPTAB_LIB_WIDEN);
856
857 if (!store_bit_field_1 (op0, new_bitsize,
858 bitnum + bit_offset,
859 bitregion_start, bitregion_end,
860 word_mode,
861 value_word, fallback_p))
862 {
863 delete_insns_since (last);
864 return false;
865 }
866 }
867 return true;
868 }
869
870 /* If VALUE has a floating-point or complex mode, access it as an
871 integer of the corresponding size. This can occur on a machine
872 with 64 bit registers that uses SFmode for float. It can also
873 occur for unaligned float or complex fields. */
874 orig_value = value;
875 if (GET_MODE (value) != VOIDmode
876 && GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
877 && GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
878 {
879 value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
880 emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
881 }
882
883 /* If OP0 is a multi-word register, narrow it to the affected word.
884 If the region spans two words, defer to store_split_bit_field. */
885 if (!MEM_P (op0) && GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
886 {
887 op0 = simplify_gen_subreg (word_mode, op0, GET_MODE (op0),
888 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
889 gcc_assert (op0);
890 bitnum %= BITS_PER_WORD;
891 if (bitnum + bitsize > BITS_PER_WORD)
892 {
893 if (!fallback_p)
894 return false;
895
896 store_split_bit_field (op0, bitsize, bitnum, bitregion_start,
897 bitregion_end, value);
898 return true;
899 }
900 }
901
902 /* From here on we can assume that the field to be stored in fits
903 within a word. If the destination is a register, it too fits
904 in a word. */
905
906 extraction_insn insv;
907 if (!MEM_P (op0)
908 && get_best_reg_extraction_insn (&insv, EP_insv,
909 GET_MODE_BITSIZE (GET_MODE (op0)),
910 fieldmode)
911 && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
912 return true;
913
914 /* If OP0 is a memory, try copying it to a register and seeing if a
915 cheap register alternative is available. */
916 if (MEM_P (op0))
917 {
918 if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
919 fieldmode)
920 && store_bit_field_using_insv (&insv, op0, bitsize, bitnum, value))
921 return true;
922
923 rtx_insn *last = get_last_insn ();
924
925 /* Try loading part of OP0 into a register, inserting the bitfield
926 into that, and then copying the result back to OP0. */
927 unsigned HOST_WIDE_INT bitpos;
928 rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
929 bitregion_start, bitregion_end,
930 fieldmode, &bitpos);
931 if (xop0)
932 {
933 rtx tempreg = copy_to_reg (xop0);
934 if (store_bit_field_1 (tempreg, bitsize, bitpos,
935 bitregion_start, bitregion_end,
936 fieldmode, orig_value, false))
937 {
938 emit_move_insn (xop0, tempreg);
939 return true;
940 }
941 delete_insns_since (last);
942 }
943 }
944
945 if (!fallback_p)
946 return false;
947
948 store_fixed_bit_field (op0, bitsize, bitnum, bitregion_start,
949 bitregion_end, value);
950 return true;
951}
952
953/* Generate code to store value from rtx VALUE
954 into a bit-field within structure STR_RTX
955 containing BITSIZE bits starting at bit BITNUM.
956
957 BITREGION_START is bitpos of the first bitfield in this region.
958 BITREGION_END is the bitpos of the ending bitfield in this region.
959 These two fields are 0, if the C++ memory model does not apply,
960 or we are not interested in keeping track of bitfield regions.
961
962 FIELDMODE is the machine-mode of the FIELD_DECL node for this field. */
963
964void
965store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
966 unsigned HOST_WIDE_INT bitnum,
967 unsigned HOST_WIDE_INT bitregion_start,
968 unsigned HOST_WIDE_INT bitregion_end,
969 machine_mode fieldmode,
970 rtx value)
971{
972 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
973 if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode,
974 bitregion_start, bitregion_end))
975 {
976 /* Storing any naturally aligned field can be done with a simple
977 store. For targets that support fast unaligned memory, any
978 naturally sized, unit aligned field can be done directly. */
7de7a9db 979 if (bitsize == GET_MODE_BITSIZE (fieldmode))
dda118e3
JM
980 {
981 str_rtx = adjust_bitfield_address (str_rtx, fieldmode,
982 bitnum / BITS_PER_UNIT);
983 emit_move_insn (str_rtx, value);
984 }
985 else
986 {
7de7a9db
JM
987 rtx temp;
988
dda118e3
JM
989 str_rtx = narrow_bit_field_mem (str_rtx, fieldmode, bitsize, bitnum,
990 &bitnum);
7de7a9db
JM
991 temp = copy_to_reg (str_rtx);
992 if (!store_bit_field_1 (temp, bitsize, bitnum, 0, 0,
993 fieldmode, value, true))
994 gcc_unreachable ();
995
996 emit_move_insn (str_rtx, temp);
dda118e3
JM
997 }
998
999 return;
1000 }
1001
1002 /* Under the C++0x memory model, we must not touch bits outside the
1003 bit region. Adjust the address to start at the beginning of the
1004 bit region. */
1005 if (MEM_P (str_rtx) && bitregion_start > 0)
1006 {
1007 machine_mode bestmode;
1008 HOST_WIDE_INT offset, size;
1009
1010 gcc_assert ((bitregion_start % BITS_PER_UNIT) == 0);
1011
1012 offset = bitregion_start / BITS_PER_UNIT;
1013 bitnum -= bitregion_start;
1014 size = (bitnum + bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
1015 bitregion_end -= bitregion_start;
1016 bitregion_start = 0;
1017 bestmode = get_best_mode (bitsize, bitnum,
1018 bitregion_start, bitregion_end,
1019 MEM_ALIGN (str_rtx), VOIDmode,
1020 MEM_VOLATILE_P (str_rtx));
1021 str_rtx = adjust_bitfield_address_size (str_rtx, bestmode, offset, size);
1022 }
1023
1024 if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
1025 bitregion_start, bitregion_end,
1026 fieldmode, value, true))
1027 gcc_unreachable ();
1028}
1029\f
1030/* Use shifts and boolean operations to store VALUE into a bit field of
1031 width BITSIZE in OP0, starting at bit BITNUM. */
1032
1033static void
1034store_fixed_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1035 unsigned HOST_WIDE_INT bitnum,
1036 unsigned HOST_WIDE_INT bitregion_start,
1037 unsigned HOST_WIDE_INT bitregion_end,
1038 rtx value)
1039{
1040 /* There is a case not handled here:
1041 a structure with a known alignment of just a halfword
1042 and a field split across two aligned halfwords within the structure.
1043 Or likewise a structure with a known alignment of just a byte
1044 and a field split across two bytes.
1045 Such cases are not supposed to be able to occur. */
1046
1047 if (MEM_P (op0))
1048 {
1049 machine_mode mode = GET_MODE (op0);
1050 if (GET_MODE_BITSIZE (mode) == 0
1051 || GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (word_mode))
1052 mode = word_mode;
1053 mode = get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
1054 MEM_ALIGN (op0), mode, MEM_VOLATILE_P (op0));
1055
1056 if (mode == VOIDmode)
1057 {
1058 /* The only way this should occur is if the field spans word
1059 boundaries. */
1060 store_split_bit_field (op0, bitsize, bitnum, bitregion_start,
1061 bitregion_end, value);
1062 return;
1063 }
1064
1065 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
1066 }
1067
1068 store_fixed_bit_field_1 (op0, bitsize, bitnum, value);
1069}
1070
1071/* Helper function for store_fixed_bit_field, stores
1072 the bit field always using the MODE of OP0. */
1073
1074static void
1075store_fixed_bit_field_1 (rtx op0, unsigned HOST_WIDE_INT bitsize,
1076 unsigned HOST_WIDE_INT bitnum,
1077 rtx value)
1078{
1079 machine_mode mode;
1080 rtx temp;
1081 int all_zero = 0;
1082 int all_one = 0;
1083
1084 mode = GET_MODE (op0);
1085 gcc_assert (SCALAR_INT_MODE_P (mode));
1086
1087 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1088 for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1089
1090 if (BYTES_BIG_ENDIAN)
1091 /* BITNUM is the distance between our msb
1092 and that of the containing datum.
1093 Convert it to the distance from the lsb. */
1094 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1095
1096 /* Now BITNUM is always the distance between our lsb
1097 and that of OP0. */
1098
1099 /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1100 we must first convert its mode to MODE. */
1101
1102 if (CONST_INT_P (value))
1103 {
1104 unsigned HOST_WIDE_INT v = UINTVAL (value);
1105
1106 if (bitsize < HOST_BITS_PER_WIDE_INT)
1107 v &= ((unsigned HOST_WIDE_INT) 1 << bitsize) - 1;
1108
1109 if (v == 0)
1110 all_zero = 1;
1111 else if ((bitsize < HOST_BITS_PER_WIDE_INT
1112 && v == ((unsigned HOST_WIDE_INT) 1 << bitsize) - 1)
1113 || (bitsize == HOST_BITS_PER_WIDE_INT
1114 && v == (unsigned HOST_WIDE_INT) -1))
1115 all_one = 1;
1116
1117 value = lshift_value (mode, v, bitnum);
1118 }
1119 else
1120 {
1121 int must_and = (GET_MODE_BITSIZE (GET_MODE (value)) != bitsize
1122 && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1123
1124 if (GET_MODE (value) != mode)
1125 value = convert_to_mode (mode, value, 1);
1126
1127 if (must_and)
1128 value = expand_binop (mode, and_optab, value,
1129 mask_rtx (mode, 0, bitsize, 0),
1130 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1131 if (bitnum > 0)
1132 value = expand_shift (LSHIFT_EXPR, mode, value,
1133 bitnum, NULL_RTX, 1);
1134 }
1135
1136 /* Now clear the chosen bits in OP0,
1137 except that if VALUE is -1 we need not bother. */
1138 /* We keep the intermediates in registers to allow CSE to combine
1139 consecutive bitfield assignments. */
1140
1141 temp = force_reg (mode, op0);
1142
1143 if (! all_one)
1144 {
1145 temp = expand_binop (mode, and_optab, temp,
1146 mask_rtx (mode, bitnum, bitsize, 1),
1147 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1148 temp = force_reg (mode, temp);
1149 }
1150
1151 /* Now logical-or VALUE into OP0, unless it is zero. */
1152
1153 if (! all_zero)
1154 {
1155 temp = expand_binop (mode, ior_optab, temp, value,
1156 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1157 temp = force_reg (mode, temp);
1158 }
1159
1160 if (op0 != temp)
1161 {
1162 op0 = copy_rtx (op0);
1163 emit_move_insn (op0, temp);
1164 }
1165}
1166\f
1167/* Store a bit field that is split across multiple accessible memory objects.
1168
1169 OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1170 BITSIZE is the field width; BITPOS the position of its first bit
1171 (within the word).
1172 VALUE is the value to store.
1173
1174 This does not yet handle fields wider than BITS_PER_WORD. */
1175
1176static void
1177store_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1178 unsigned HOST_WIDE_INT bitpos,
1179 unsigned HOST_WIDE_INT bitregion_start,
1180 unsigned HOST_WIDE_INT bitregion_end,
1181 rtx value)
1182{
1183 unsigned int unit;
1184 unsigned int bitsdone = 0;
1185
1186 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1187 much at a time. */
1188 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1189 unit = BITS_PER_WORD;
1190 else
1191 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1192
1193 /* If OP0 is a memory with a mode, then UNIT must not be larger than
1194 OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1195 again, and we will mutually recurse forever. */
1196 if (MEM_P (op0) && GET_MODE_BITSIZE (GET_MODE (op0)) > 0)
1197 unit = MIN (unit, GET_MODE_BITSIZE (GET_MODE (op0)));
1198
1199 /* If VALUE is a constant other than a CONST_INT, get it into a register in
1200 WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1201 that VALUE might be a floating-point constant. */
1202 if (CONSTANT_P (value) && !CONST_INT_P (value))
1203 {
1204 rtx word = gen_lowpart_common (word_mode, value);
1205
1206 if (word && (value != word))
1207 value = word;
1208 else
1209 value = gen_lowpart_common (word_mode,
1210 force_reg (GET_MODE (value) != VOIDmode
1211 ? GET_MODE (value)
1212 : word_mode, value));
1213 }
1214
1215 while (bitsdone < bitsize)
1216 {
1217 unsigned HOST_WIDE_INT thissize;
1218 rtx part, word;
1219 unsigned HOST_WIDE_INT thispos;
1220 unsigned HOST_WIDE_INT offset;
1221
1222 offset = (bitpos + bitsdone) / unit;
1223 thispos = (bitpos + bitsdone) % unit;
1224
1225 /* When region of bytes we can touch is restricted, decrease
1226 UNIT close to the end of the region as needed. If op0 is a REG
1227 or SUBREG of REG, don't do this, as there can't be data races
1228 on a register and we can expand shorter code in some cases. */
1229 if (bitregion_end
1230 && unit > BITS_PER_UNIT
1231 && bitpos + bitsdone - thispos + unit > bitregion_end + 1
1232 && !REG_P (op0)
1233 && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1234 {
1235 unit = unit / 2;
1236 continue;
1237 }
1238
1239 /* THISSIZE must not overrun a word boundary. Otherwise,
1240 store_fixed_bit_field will call us again, and we will mutually
1241 recurse forever. */
1242 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1243 thissize = MIN (thissize, unit - thispos);
1244
1245 if (BYTES_BIG_ENDIAN)
1246 {
1247 /* Fetch successively less significant portions. */
1248 if (CONST_INT_P (value))
1249 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1250 >> (bitsize - bitsdone - thissize))
1251 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1252 else
1253 {
1254 int total_bits = GET_MODE_BITSIZE (GET_MODE (value));
1255 /* The args are chosen so that the last part includes the
1256 lsb. Give extract_bit_field the value it needs (with
1257 endianness compensation) to fetch the piece we want. */
1258 part = extract_fixed_bit_field (word_mode, value, thissize,
1259 total_bits - bitsize + bitsdone,
1260 NULL_RTX, 1);
1261 }
1262 }
1263 else
1264 {
1265 /* Fetch successively more significant portions. */
1266 if (CONST_INT_P (value))
1267 part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1268 >> bitsdone)
1269 & (((HOST_WIDE_INT) 1 << thissize) - 1));
1270 else
1271 part = extract_fixed_bit_field (word_mode, value, thissize,
1272 bitsdone, NULL_RTX, 1);
1273 }
1274
1275 /* If OP0 is a register, then handle OFFSET here.
1276
1277 When handling multiword bitfields, extract_bit_field may pass
1278 down a word_mode SUBREG of a larger REG for a bitfield that actually
1279 crosses a word boundary. Thus, for a SUBREG, we must find
1280 the current word starting from the base register. */
1281 if (GET_CODE (op0) == SUBREG)
1282 {
1283 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD)
1284 + (offset * unit / BITS_PER_WORD);
1285 machine_mode sub_mode = GET_MODE (SUBREG_REG (op0));
1286 if (sub_mode != BLKmode && GET_MODE_SIZE (sub_mode) < UNITS_PER_WORD)
1287 word = word_offset ? const0_rtx : op0;
1288 else
1289 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1290 GET_MODE (SUBREG_REG (op0)));
1291 offset &= BITS_PER_WORD / unit - 1;
1292 }
1293 else if (REG_P (op0))
1294 {
1295 machine_mode op0_mode = GET_MODE (op0);
1296 if (op0_mode != BLKmode && GET_MODE_SIZE (op0_mode) < UNITS_PER_WORD)
1297 word = offset ? const0_rtx : op0;
1298 else
1299 word = operand_subword_force (op0, offset * unit / BITS_PER_WORD,
1300 GET_MODE (op0));
1301 offset &= BITS_PER_WORD / unit - 1;
1302 }
1303 else
1304 word = op0;
1305
1306 /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1307 it is just an out-of-bounds access. Ignore it. */
1308 if (word != const0_rtx)
1309 store_fixed_bit_field (word, thissize, offset * unit + thispos,
1310 bitregion_start, bitregion_end, part);
1311 bitsdone += thissize;
1312 }
1313}
1314\f
1315/* A subroutine of extract_bit_field_1 that converts return value X
1316 to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1317 to extract_bit_field. */
1318
1319static rtx
1320convert_extracted_bit_field (rtx x, machine_mode mode,
1321 machine_mode tmode, bool unsignedp)
1322{
1323 if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1324 return x;
1325
1326 /* If the x mode is not a scalar integral, first convert to the
1327 integer mode of that size and then access it as a floating-point
1328 value via a SUBREG. */
1329 if (!SCALAR_INT_MODE_P (tmode))
1330 {
1331 machine_mode smode;
1332
1333 smode = mode_for_size (GET_MODE_BITSIZE (tmode), MODE_INT, 0);
1334 x = convert_to_mode (smode, x, unsignedp);
1335 x = force_reg (smode, x);
1336 return gen_lowpart (tmode, x);
1337 }
1338
1339 return convert_to_mode (tmode, x, unsignedp);
1340}
1341
1342/* Try to use an ext(z)v pattern to extract a field from OP0.
1343 Return the extracted value on success, otherwise return null.
1344 EXT_MODE is the mode of the extraction and the other arguments
1345 are as for extract_bit_field. */
1346
1347static rtx
1348extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1349 unsigned HOST_WIDE_INT bitsize,
1350 unsigned HOST_WIDE_INT bitnum,
1351 int unsignedp, rtx target,
1352 machine_mode mode, machine_mode tmode)
1353{
1354 struct expand_operand ops[4];
1355 rtx spec_target = target;
1356 rtx spec_target_subreg = 0;
1357 machine_mode ext_mode = extv->field_mode;
1358 unsigned unit = GET_MODE_BITSIZE (ext_mode);
1359
1360 if (bitsize == 0 || unit < bitsize)
1361 return NULL_RTX;
1362
1363 if (MEM_P (op0))
1364 /* Get a reference to the first byte of the field. */
1365 op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1366 &bitnum);
1367 else
1368 {
1369 /* Convert from counting within OP0 to counting in EXT_MODE. */
1370 if (BYTES_BIG_ENDIAN)
1371 bitnum += unit - GET_MODE_BITSIZE (GET_MODE (op0));
1372
1373 /* If op0 is a register, we need it in EXT_MODE to make it
1374 acceptable to the format of ext(z)v. */
1375 if (GET_CODE (op0) == SUBREG && GET_MODE (op0) != ext_mode)
1376 return NULL_RTX;
1377 if (REG_P (op0) && GET_MODE (op0) != ext_mode)
1378 op0 = gen_lowpart_SUBREG (ext_mode, op0);
1379 }
1380
1381 /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1382 "backwards" from the size of the unit we are extracting from.
1383 Otherwise, we count bits from the most significant on a
1384 BYTES/BITS_BIG_ENDIAN machine. */
1385
1386 if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1387 bitnum = unit - bitsize - bitnum;
1388
1389 if (target == 0)
1390 target = spec_target = gen_reg_rtx (tmode);
1391
1392 if (GET_MODE (target) != ext_mode)
1393 {
1394 /* Don't use LHS paradoxical subreg if explicit truncation is needed
1395 between the mode of the extraction (word_mode) and the target
1396 mode. Instead, create a temporary and use convert_move to set
1397 the target. */
1398 if (REG_P (target)
1399 && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode))
1400 {
1401 target = gen_lowpart (ext_mode, target);
1402 if (GET_MODE_PRECISION (ext_mode)
1403 > GET_MODE_PRECISION (GET_MODE (spec_target)))
1404 spec_target_subreg = target;
1405 }
1406 else
1407 target = gen_reg_rtx (ext_mode);
1408 }
1409
1410 create_output_operand (&ops[0], target, ext_mode);
1411 create_fixed_operand (&ops[1], op0);
1412 create_integer_operand (&ops[2], bitsize);
1413 create_integer_operand (&ops[3], bitnum);
1414 if (maybe_expand_insn (extv->icode, 4, ops))
1415 {
1416 target = ops[0].value;
1417 if (target == spec_target)
1418 return target;
1419 if (target == spec_target_subreg)
1420 return spec_target;
1421 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1422 }
1423 return NULL_RTX;
1424}
1425
1426/* A subroutine of extract_bit_field, with the same arguments.
1427 If FALLBACK_P is true, fall back to extract_fixed_bit_field
1428 if we can find no other means of implementing the operation.
1429 if FALLBACK_P is false, return NULL instead. */
1430
1431static rtx
1432extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1433 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1434 machine_mode mode, machine_mode tmode,
1435 bool fallback_p)
1436{
1437 rtx op0 = str_rtx;
1438 machine_mode int_mode;
1439 machine_mode mode1;
1440
1441 if (tmode == VOIDmode)
1442 tmode = mode;
1443
1444 while (GET_CODE (op0) == SUBREG)
1445 {
1446 bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1447 op0 = SUBREG_REG (op0);
1448 }
1449
1450 /* If we have an out-of-bounds access to a register, just return an
1451 uninitialized register of the required mode. This can occur if the
1452 source code contains an out-of-bounds access to a small array. */
1453 if (REG_P (op0) && bitnum >= GET_MODE_BITSIZE (GET_MODE (op0)))
1454 return gen_reg_rtx (tmode);
1455
1456 if (REG_P (op0)
1457 && mode == GET_MODE (op0)
1458 && bitnum == 0
1459 && bitsize == GET_MODE_BITSIZE (GET_MODE (op0)))
1460 {
1461 /* We're trying to extract a full register from itself. */
1462 return op0;
1463 }
1464
1465 /* See if we can get a better vector mode before extracting. */
1466 if (VECTOR_MODE_P (GET_MODE (op0))
1467 && !MEM_P (op0)
1468 && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1469 {
1470 machine_mode new_mode;
1471
1472 if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1473 new_mode = MIN_MODE_VECTOR_FLOAT;
1474 else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1475 new_mode = MIN_MODE_VECTOR_FRACT;
1476 else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1477 new_mode = MIN_MODE_VECTOR_UFRACT;
1478 else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1479 new_mode = MIN_MODE_VECTOR_ACCUM;
1480 else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1481 new_mode = MIN_MODE_VECTOR_UACCUM;
1482 else
1483 new_mode = MIN_MODE_VECTOR_INT;
1484
1485 for (; new_mode != VOIDmode ; new_mode = GET_MODE_WIDER_MODE (new_mode))
1486 if (GET_MODE_SIZE (new_mode) == GET_MODE_SIZE (GET_MODE (op0))
1487 && targetm.vector_mode_supported_p (new_mode))
1488 break;
1489 if (new_mode != VOIDmode)
1490 op0 = gen_lowpart (new_mode, op0);
1491 }
1492
1493 /* Use vec_extract patterns for extracting parts of vectors whenever
1494 available. */
1495 if (VECTOR_MODE_P (GET_MODE (op0))
1496 && !MEM_P (op0)
1497 && optab_handler (vec_extract_optab, GET_MODE (op0)) != CODE_FOR_nothing
1498 && ((bitnum + bitsize - 1) / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))
1499 == bitnum / GET_MODE_BITSIZE (GET_MODE_INNER (GET_MODE (op0)))))
1500 {
1501 struct expand_operand ops[3];
1502 machine_mode outermode = GET_MODE (op0);
1503 machine_mode innermode = GET_MODE_INNER (outermode);
1504 enum insn_code icode = optab_handler (vec_extract_optab, outermode);
1505 unsigned HOST_WIDE_INT pos = bitnum / GET_MODE_BITSIZE (innermode);
1506
1507 create_output_operand (&ops[0], target, innermode);
1508 create_input_operand (&ops[1], op0, outermode);
1509 create_integer_operand (&ops[2], pos);
1510 if (maybe_expand_insn (icode, 3, ops))
1511 {
1512 target = ops[0].value;
1513 if (GET_MODE (target) != mode)
1514 return gen_lowpart (tmode, target);
1515 return target;
1516 }
1517 }
1518
1519 /* Make sure we are playing with integral modes. Pun with subregs
1520 if we aren't. */
1521 {
1522 machine_mode imode = int_mode_for_mode (GET_MODE (op0));
1523 if (imode != GET_MODE (op0))
1524 {
1525 if (MEM_P (op0))
1526 op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
1527 else if (imode != BLKmode)
1528 {
1529 op0 = gen_lowpart (imode, op0);
1530
1531 /* If we got a SUBREG, force it into a register since we
1532 aren't going to be able to do another SUBREG on it. */
1533 if (GET_CODE (op0) == SUBREG)
1534 op0 = force_reg (imode, op0);
1535 }
1536 else if (REG_P (op0))
1537 {
1538 rtx reg, subreg;
1539 imode = smallest_mode_for_size (GET_MODE_BITSIZE (GET_MODE (op0)),
1540 MODE_INT);
1541 reg = gen_reg_rtx (imode);
1542 subreg = gen_lowpart_SUBREG (GET_MODE (op0), reg);
1543 emit_move_insn (subreg, op0);
1544 op0 = reg;
1545 bitnum += SUBREG_BYTE (subreg) * BITS_PER_UNIT;
1546 }
1547 else
1548 {
1549 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
1550 rtx mem = assign_stack_temp (GET_MODE (op0), size);
1551 emit_move_insn (mem, op0);
1552 op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1553 }
1554 }
1555 }
1556
1557 /* ??? We currently assume TARGET is at least as big as BITSIZE.
1558 If that's wrong, the solution is to test for it and set TARGET to 0
1559 if needed. */
1560
1561 /* Get the mode of the field to use for atomic access or subreg
1562 conversion. */
1563 mode1 = mode;
1564 if (SCALAR_INT_MODE_P (tmode))
1565 {
1566 machine_mode try_mode = mode_for_size (bitsize,
1567 GET_MODE_CLASS (tmode), 0);
1568 if (try_mode != BLKmode)
1569 mode1 = try_mode;
1570 }
1571 gcc_assert (mode1 != BLKmode);
1572
1573 /* Extraction of a full MODE1 value can be done with a subreg as long
1574 as the least significant bit of the value is the least significant
1575 bit of either OP0 or a word of OP0. */
1576 if (!MEM_P (op0)
1577 && lowpart_bit_field_p (bitnum, bitsize, GET_MODE (op0))
1578 && bitsize == GET_MODE_BITSIZE (mode1)
1579 && TRULY_NOOP_TRUNCATION_MODES_P (mode1, GET_MODE (op0)))
1580 {
1581 rtx sub = simplify_gen_subreg (mode1, op0, GET_MODE (op0),
1582 bitnum / BITS_PER_UNIT);
1583 if (sub)
1584 return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1585 }
1586
1587 /* Extraction of a full MODE1 value can be done with a load as long as
1588 the field is on a byte boundary and is sufficiently aligned. */
1589 if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1))
1590 {
1591 op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT);
1592 return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1593 }
1594
1595 /* Handle fields bigger than a word. */
1596
1597 if (bitsize > BITS_PER_WORD)
1598 {
1599 /* Here we transfer the words of the field
1600 in the order least significant first.
1601 This is because the most significant word is the one which may
1602 be less than full. */
1603
1604 unsigned int backwards = WORDS_BIG_ENDIAN;
1605 unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1606 unsigned int i;
1607 rtx_insn *last;
1608
1609 if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1610 target = gen_reg_rtx (mode);
1611
1612 /* Indicate for flow that the entire target reg is being set. */
1613 emit_clobber (target);
1614
1615 last = get_last_insn ();
1616 for (i = 0; i < nwords; i++)
1617 {
1618 /* If I is 0, use the low-order word in both field and target;
1619 if I is 1, use the next to lowest word; and so on. */
1620 /* Word number in TARGET to use. */
1621 unsigned int wordnum
1622 = (backwards
1623 ? GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD - i - 1
1624 : i);
1625 /* Offset from start of field in OP0. */
1626 unsigned int bit_offset = (backwards
1627 ? MAX ((int) bitsize - ((int) i + 1)
1628 * BITS_PER_WORD,
1629 0)
1630 : (int) i * BITS_PER_WORD);
1631 rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1632 rtx result_part
1633 = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1634 bitsize - i * BITS_PER_WORD),
1635 bitnum + bit_offset, 1, target_part,
1636 mode, word_mode, fallback_p);
1637
1638 gcc_assert (target_part);
1639 if (!result_part)
1640 {
1641 delete_insns_since (last);
1642 return NULL;
1643 }
1644
1645 if (result_part != target_part)
1646 emit_move_insn (target_part, result_part);
1647 }
1648
1649 if (unsignedp)
1650 {
1651 /* Unless we've filled TARGET, the upper regs in a multi-reg value
1652 need to be zero'd out. */
1653 if (GET_MODE_SIZE (GET_MODE (target)) > nwords * UNITS_PER_WORD)
1654 {
1655 unsigned int i, total_words;
1656
1657 total_words = GET_MODE_SIZE (GET_MODE (target)) / UNITS_PER_WORD;
1658 for (i = nwords; i < total_words; i++)
1659 emit_move_insn
1660 (operand_subword (target,
1661 backwards ? total_words - i - 1 : i,
1662 1, VOIDmode),
1663 const0_rtx);
1664 }
1665 return target;
1666 }
1667
1668 /* Signed bit field: sign-extend with two arithmetic shifts. */
1669 target = expand_shift (LSHIFT_EXPR, mode, target,
1670 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1671 return expand_shift (RSHIFT_EXPR, mode, target,
1672 GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1673 }
1674
1675 /* If OP0 is a multi-word register, narrow it to the affected word.
1676 If the region spans two words, defer to extract_split_bit_field. */
1677 if (!MEM_P (op0) && GET_MODE_SIZE (GET_MODE (op0)) > UNITS_PER_WORD)
1678 {
1679 op0 = simplify_gen_subreg (word_mode, op0, GET_MODE (op0),
1680 bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1681 bitnum %= BITS_PER_WORD;
1682 if (bitnum + bitsize > BITS_PER_WORD)
1683 {
1684 if (!fallback_p)
1685 return NULL_RTX;
1686 target = extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
1687 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1688 }
1689 }
1690
1691 /* From here on we know the desired field is smaller than a word.
1692 If OP0 is a register, it too fits within a word. */
1693 enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
1694 extraction_insn extv;
1695 if (!MEM_P (op0)
1696 /* ??? We could limit the structure size to the part of OP0 that
1697 contains the field, with appropriate checks for endianness
1698 and TRULY_NOOP_TRUNCATION. */
1699 && get_best_reg_extraction_insn (&extv, pattern,
1700 GET_MODE_BITSIZE (GET_MODE (op0)),
1701 tmode))
1702 {
1703 rtx result = extract_bit_field_using_extv (&extv, op0, bitsize, bitnum,
1704 unsignedp, target, mode,
1705 tmode);
1706 if (result)
1707 return result;
1708 }
1709
1710 /* If OP0 is a memory, try copying it to a register and seeing if a
1711 cheap register alternative is available. */
1712 if (MEM_P (op0))
1713 {
1714 if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
1715 tmode))
1716 {
1717 rtx result = extract_bit_field_using_extv (&extv, op0, bitsize,
1718 bitnum, unsignedp,
1719 target, mode,
1720 tmode);
1721 if (result)
1722 return result;
1723 }
1724
1725 rtx_insn *last = get_last_insn ();
1726
1727 /* Try loading part of OP0 into a register and extracting the
1728 bitfield from that. */
1729 unsigned HOST_WIDE_INT bitpos;
1730 rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
1731 0, 0, tmode, &bitpos);
1732 if (xop0)
1733 {
1734 xop0 = copy_to_reg (xop0);
1735 rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
1736 unsignedp, target,
1737 mode, tmode, false);
1738 if (result)
1739 return result;
1740 delete_insns_since (last);
1741 }
1742 }
1743
1744 if (!fallback_p)
1745 return NULL;
1746
1747 /* Find a correspondingly-sized integer field, so we can apply
1748 shifts and masks to it. */
1749 int_mode = int_mode_for_mode (tmode);
1750 if (int_mode == BLKmode)
1751 int_mode = int_mode_for_mode (mode);
1752 /* Should probably push op0 out to memory and then do a load. */
1753 gcc_assert (int_mode != BLKmode);
1754
1755 target = extract_fixed_bit_field (int_mode, op0, bitsize, bitnum,
1756 target, unsignedp);
1757 return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1758}
1759
1760/* Generate code to extract a byte-field from STR_RTX
1761 containing BITSIZE bits, starting at BITNUM,
1762 and put it in TARGET if possible (if TARGET is nonzero).
1763 Regardless of TARGET, we return the rtx for where the value is placed.
1764
1765 STR_RTX is the structure containing the byte (a REG or MEM).
1766 UNSIGNEDP is nonzero if this is an unsigned bit field.
1767 MODE is the natural mode of the field value once extracted.
1768 TMODE is the mode the caller would like the value to have;
1769 but the value may be returned with type MODE instead.
1770
1771 If a TARGET is specified and we can store in it at no extra cost,
1772 we do so, and return TARGET.
1773 Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
1774 if they are equally easy. */
1775
1776rtx
1777extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
1778 unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target,
1779 machine_mode mode, machine_mode tmode)
1780{
1781 machine_mode mode1;
1782
1783 /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1784 if (GET_MODE_BITSIZE (GET_MODE (str_rtx)) > 0)
1785 mode1 = GET_MODE (str_rtx);
1786 else if (target && GET_MODE_BITSIZE (GET_MODE (target)) > 0)
1787 mode1 = GET_MODE (target);
1788 else
1789 mode1 = tmode;
1790
1791 if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1, 0, 0))
1792 {
dda118e3
JM
1793 /* Extraction of a full MODE1 value can be done with a load as long as
1794 the field is on a byte boundary and is sufficiently aligned. */
7de7a9db 1795 if (bitsize == GET_MODE_BITSIZE(mode1))
dda118e3 1796 {
7de7a9db
JM
1797 rtx result = adjust_bitfield_address (str_rtx, mode1,
1798 bitnum / BITS_PER_UNIT);
1799 return convert_extracted_bit_field (result, mode, tmode, unsignedp);
dda118e3
JM
1800 }
1801
7de7a9db
JM
1802 str_rtx = narrow_bit_field_mem (str_rtx, mode1, bitsize, bitnum,
1803 &bitnum);
1804 str_rtx = copy_to_reg (str_rtx);
dda118e3 1805 }
7de7a9db 1806
dda118e3
JM
1807 return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
1808 target, mode, tmode, true);
1809}
1810\f
1811/* Use shifts and boolean operations to extract a field of BITSIZE bits
1812 from bit BITNUM of OP0.
1813
1814 UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
1815 If TARGET is nonzero, attempts to store the value there
1816 and return TARGET, but this is not guaranteed.
1817 If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
1818
1819static rtx
1820extract_fixed_bit_field (machine_mode tmode, rtx op0,
1821 unsigned HOST_WIDE_INT bitsize,
1822 unsigned HOST_WIDE_INT bitnum, rtx target,
1823 int unsignedp)
1824{
1825 if (MEM_P (op0))
1826 {
1827 machine_mode mode
1828 = get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0), word_mode,
1829 MEM_VOLATILE_P (op0));
1830
1831 if (mode == VOIDmode)
1832 /* The only way this should occur is if the field spans word
1833 boundaries. */
1834 return extract_split_bit_field (op0, bitsize, bitnum, unsignedp);
1835
1836 op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
1837 }
1838
1839 return extract_fixed_bit_field_1 (tmode, op0, bitsize, bitnum,
1840 target, unsignedp);
1841}
1842
1843/* Helper function for extract_fixed_bit_field, extracts
1844 the bit field always using the MODE of OP0. */
1845
1846static rtx
1847extract_fixed_bit_field_1 (machine_mode tmode, rtx op0,
1848 unsigned HOST_WIDE_INT bitsize,
1849 unsigned HOST_WIDE_INT bitnum, rtx target,
1850 int unsignedp)
1851{
1852 machine_mode mode = GET_MODE (op0);
1853 gcc_assert (SCALAR_INT_MODE_P (mode));
1854
1855 /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1856 for invalid input, such as extract equivalent of f5 from
1857 gcc.dg/pr48335-2.c. */
1858
1859 if (BYTES_BIG_ENDIAN)
1860 /* BITNUM is the distance between our msb and that of OP0.
1861 Convert it to the distance from the lsb. */
1862 bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1863
1864 /* Now BITNUM is always the distance between the field's lsb and that of OP0.
1865 We have reduced the big-endian case to the little-endian case. */
1866
1867 if (unsignedp)
1868 {
1869 if (bitnum)
1870 {
1871 /* If the field does not already start at the lsb,
1872 shift it so it does. */
1873 /* Maybe propagate the target for the shift. */
1874 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1875 if (tmode != mode)
1876 subtarget = 0;
1877 op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
1878 }
1879 /* Convert the value to the desired mode. */
1880 if (mode != tmode)
1881 op0 = convert_to_mode (tmode, op0, 1);
1882
1883 /* Unless the msb of the field used to be the msb when we shifted,
1884 mask out the upper bits. */
1885
1886 if (GET_MODE_BITSIZE (mode) != bitnum + bitsize)
1887 return expand_binop (GET_MODE (op0), and_optab, op0,
1888 mask_rtx (GET_MODE (op0), 0, bitsize, 0),
1889 target, 1, OPTAB_LIB_WIDEN);
1890 return op0;
1891 }
1892
1893 /* To extract a signed bit-field, first shift its msb to the msb of the word,
1894 then arithmetic-shift its lsb to the lsb of the word. */
1895 op0 = force_reg (mode, op0);
1896
1897 /* Find the narrowest integer mode that contains the field. */
1898
1899 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1900 mode = GET_MODE_WIDER_MODE (mode))
1901 if (GET_MODE_BITSIZE (mode) >= bitsize + bitnum)
1902 {
1903 op0 = convert_to_mode (mode, op0, 0);
1904 break;
1905 }
1906
1907 if (mode != tmode)
1908 target = 0;
1909
1910 if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
1911 {
1912 int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
1913 /* Maybe propagate the target for the shift. */
1914 rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
1915 op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
1916 }
1917
1918 return expand_shift (RSHIFT_EXPR, mode, op0,
1919 GET_MODE_BITSIZE (mode) - bitsize, target, 0);
1920}
1921
1922/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
1923 VALUE << BITPOS. */
1924
1925static rtx
1926lshift_value (machine_mode mode, unsigned HOST_WIDE_INT value,
1927 int bitpos)
1928{
1929 return immed_wide_int_const (wi::lshift (value, bitpos), mode);
1930}
1931\f
1932/* Extract a bit field that is split across two words
1933 and return an RTX for the result.
1934
1935 OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
1936 BITSIZE is the field width; BITPOS, position of its first bit, in the word.
1937 UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend. */
1938
1939static rtx
1940extract_split_bit_field (rtx op0, unsigned HOST_WIDE_INT bitsize,
1941 unsigned HOST_WIDE_INT bitpos, int unsignedp)
1942{
1943 unsigned int unit;
1944 unsigned int bitsdone = 0;
1945 rtx result = NULL_RTX;
1946 int first = 1;
1947
1948 /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1949 much at a time. */
1950 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1951 unit = BITS_PER_WORD;
1952 else
1953 unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1954
1955 while (bitsdone < bitsize)
1956 {
1957 unsigned HOST_WIDE_INT thissize;
1958 rtx part, word;
1959 unsigned HOST_WIDE_INT thispos;
1960 unsigned HOST_WIDE_INT offset;
1961
1962 offset = (bitpos + bitsdone) / unit;
1963 thispos = (bitpos + bitsdone) % unit;
1964
1965 /* THISSIZE must not overrun a word boundary. Otherwise,
1966 extract_fixed_bit_field will call us again, and we will mutually
1967 recurse forever. */
1968 thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1969 thissize = MIN (thissize, unit - thispos);
1970
1971 /* If OP0 is a register, then handle OFFSET here.
1972
1973 When handling multiword bitfields, extract_bit_field may pass
1974 down a word_mode SUBREG of a larger REG for a bitfield that actually
1975 crosses a word boundary. Thus, for a SUBREG, we must find
1976 the current word starting from the base register. */
1977 if (GET_CODE (op0) == SUBREG)
1978 {
1979 int word_offset = (SUBREG_BYTE (op0) / UNITS_PER_WORD) + offset;
1980 word = operand_subword_force (SUBREG_REG (op0), word_offset,
1981 GET_MODE (SUBREG_REG (op0)));
1982 offset = 0;
1983 }
1984 else if (REG_P (op0))
1985 {
1986 word = operand_subword_force (op0, offset, GET_MODE (op0));
1987 offset = 0;
1988 }
1989 else
1990 word = op0;
1991
1992 /* Extract the parts in bit-counting order,
1993 whose meaning is determined by BYTES_PER_UNIT.
1994 OFFSET is in UNITs, and UNIT is in bits. */
1995 part = extract_fixed_bit_field (word_mode, word, thissize,
1996 offset * unit + thispos, 0, 1);
1997 bitsdone += thissize;
1998
1999 /* Shift this part into place for the result. */
2000 if (BYTES_BIG_ENDIAN)
2001 {
2002 if (bitsize != bitsdone)
2003 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2004 bitsize - bitsdone, 0, 1);
2005 }
2006 else
2007 {
2008 if (bitsdone != thissize)
2009 part = expand_shift (LSHIFT_EXPR, word_mode, part,
2010 bitsdone - thissize, 0, 1);
2011 }
2012
2013 if (first)
2014 result = part;
2015 else
2016 /* Combine the parts with bitwise or. This works
2017 because we extracted each part as an unsigned bit field. */
2018 result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2019 OPTAB_LIB_WIDEN);
2020
2021 first = 0;
2022 }
2023
2024 /* Unsigned bit field: we are done. */
2025 if (unsignedp)
2026 return result;
2027 /* Signed bit field: sign-extend with two arithmetic shifts. */
2028 result = expand_shift (LSHIFT_EXPR, word_mode, result,
2029 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2030 return expand_shift (RSHIFT_EXPR, word_mode, result,
2031 BITS_PER_WORD - bitsize, NULL_RTX, 0);
2032}
2033\f
2034/* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2035 the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2036 MODE, fill the upper bits with zeros. Fail if the layout of either
2037 mode is unknown (as for CC modes) or if the extraction would involve
2038 unprofitable mode punning. Return the value on success, otherwise
2039 return null.
2040
2041 This is different from gen_lowpart* in these respects:
2042
2043 - the returned value must always be considered an rvalue
2044
2045 - when MODE is wider than SRC_MODE, the extraction involves
2046 a zero extension
2047
2048 - when MODE is smaller than SRC_MODE, the extraction involves
2049 a truncation (and is thus subject to TRULY_NOOP_TRUNCATION).
2050
2051 In other words, this routine performs a computation, whereas the
2052 gen_lowpart* routines are conceptually lvalue or rvalue subreg
2053 operations. */
2054
2055rtx
2056extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
2057{
2058 machine_mode int_mode, src_int_mode;
2059
2060 if (mode == src_mode)
2061 return src;
2062
2063 if (CONSTANT_P (src))
2064 {
2065 /* simplify_gen_subreg can't be used here, as if simplify_subreg
2066 fails, it will happily create (subreg (symbol_ref)) or similar
2067 invalid SUBREGs. */
2068 unsigned int byte = subreg_lowpart_offset (mode, src_mode);
2069 rtx ret = simplify_subreg (mode, src, src_mode, byte);
2070 if (ret)
2071 return ret;
2072
2073 if (GET_MODE (src) == VOIDmode
2074 || !validate_subreg (mode, src_mode, src, byte))
2075 return NULL_RTX;
2076
2077 src = force_reg (GET_MODE (src), src);
2078 return gen_rtx_SUBREG (mode, src, byte);
2079 }
2080
2081 if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2082 return NULL_RTX;
2083
2084 if (GET_MODE_BITSIZE (mode) == GET_MODE_BITSIZE (src_mode)
2085 && MODES_TIEABLE_P (mode, src_mode))
2086 {
2087 rtx x = gen_lowpart_common (mode, src);
2088 if (x)
2089 return x;
2090 }
2091
2092 src_int_mode = int_mode_for_mode (src_mode);
2093 int_mode = int_mode_for_mode (mode);
2094 if (src_int_mode == BLKmode || int_mode == BLKmode)
2095 return NULL_RTX;
2096
2097 if (!MODES_TIEABLE_P (src_int_mode, src_mode))
2098 return NULL_RTX;
2099 if (!MODES_TIEABLE_P (int_mode, mode))
2100 return NULL_RTX;
2101
2102 src = gen_lowpart (src_int_mode, src);
2103 src = convert_modes (int_mode, src_int_mode, src, true);
2104 src = gen_lowpart (mode, src);
2105 return src;
2106}
2107\f
2108/* Add INC into TARGET. */
2109
2110void
2111expand_inc (rtx target, rtx inc)
2112{
2113 rtx value = expand_binop (GET_MODE (target), add_optab,
2114 target, inc,
2115 target, 0, OPTAB_LIB_WIDEN);
2116 if (value != target)
2117 emit_move_insn (target, value);
2118}
2119
2120/* Subtract DEC from TARGET. */
2121
2122void
2123expand_dec (rtx target, rtx dec)
2124{
2125 rtx value = expand_binop (GET_MODE (target), sub_optab,
2126 target, dec,
2127 target, 0, OPTAB_LIB_WIDEN);
2128 if (value != target)
2129 emit_move_insn (target, value);
2130}
2131\f
2132/* Output a shift instruction for expression code CODE,
2133 with SHIFTED being the rtx for the value to shift,
2134 and AMOUNT the rtx for the amount to shift by.
2135 Store the result in the rtx TARGET, if that is convenient.
2136 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2137 Return the rtx for where the value is. */
2138
2139static rtx
2140expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted,
2141 rtx amount, rtx target, int unsignedp)
2142{
2143 rtx op1, temp = 0;
2144 int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2145 int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2146 optab lshift_optab = ashl_optab;
2147 optab rshift_arith_optab = ashr_optab;
2148 optab rshift_uns_optab = lshr_optab;
2149 optab lrotate_optab = rotl_optab;
2150 optab rrotate_optab = rotr_optab;
2151 machine_mode op1_mode;
2152 machine_mode scalar_mode = mode;
2153 int attempt;
2154 bool speed = optimize_insn_for_speed_p ();
2155
2156 if (VECTOR_MODE_P (mode))
2157 scalar_mode = GET_MODE_INNER (mode);
2158 op1 = amount;
2159 op1_mode = GET_MODE (op1);
2160
2161 /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2162 shift amount is a vector, use the vector/vector shift patterns. */
2163 if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2164 {
2165 lshift_optab = vashl_optab;
2166 rshift_arith_optab = vashr_optab;
2167 rshift_uns_optab = vlshr_optab;
2168 lrotate_optab = vrotl_optab;
2169 rrotate_optab = vrotr_optab;
2170 }
2171
2172 /* Previously detected shift-counts computed by NEGATE_EXPR
2173 and shifted in the other direction; but that does not work
2174 on all machines. */
2175
2176 if (SHIFT_COUNT_TRUNCATED)
2177 {
2178 if (CONST_INT_P (op1)
2179 && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2180 (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2181 op1 = GEN_INT ((unsigned HOST_WIDE_INT) INTVAL (op1)
2182 % GET_MODE_BITSIZE (scalar_mode));
2183 else if (GET_CODE (op1) == SUBREG
2184 && subreg_lowpart_p (op1)
2185 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2186 && SCALAR_INT_MODE_P (GET_MODE (op1)))
2187 op1 = SUBREG_REG (op1);
2188 }
2189
2190 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
2191 prefer left rotation, if op1 is from bitsize / 2 + 1 to
2192 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
2193 amount instead. */
2194 if (rotate
2195 && CONST_INT_P (op1)
2196 && IN_RANGE (INTVAL (op1), GET_MODE_BITSIZE (scalar_mode) / 2 + left,
2197 GET_MODE_BITSIZE (scalar_mode) - 1))
2198 {
2199 op1 = GEN_INT (GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2200 left = !left;
2201 code = left ? LROTATE_EXPR : RROTATE_EXPR;
2202 }
2203
2204 /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2205 Note that this is not the case for bigger values. For instance a rotation
2206 of 0x01020304 by 16 bits gives 0x03040102 which is different from
2207 0x04030201 (bswapsi). */
2208 if (rotate
2209 && CONST_INT_P (op1)
2210 && INTVAL (op1) == BITS_PER_UNIT
2211 && GET_MODE_SIZE (scalar_mode) == 2
2212 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing)
2213 return expand_unop (HImode, bswap_optab, shifted, NULL_RTX,
2214 unsignedp);
2215
2216 if (op1 == const0_rtx)
2217 return shifted;
2218
2219 /* Check whether its cheaper to implement a left shift by a constant
2220 bit count by a sequence of additions. */
2221 if (code == LSHIFT_EXPR
2222 && CONST_INT_P (op1)
2223 && INTVAL (op1) > 0
2224 && INTVAL (op1) < GET_MODE_PRECISION (scalar_mode)
2225 && INTVAL (op1) < MAX_BITS_PER_WORD
2226 && (shift_cost (speed, mode, INTVAL (op1))
2227 > INTVAL (op1) * add_cost (speed, mode))
2228 && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2229 {
2230 int i;
2231 for (i = 0; i < INTVAL (op1); i++)
2232 {
2233 temp = force_reg (mode, shifted);
2234 shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2235 unsignedp, OPTAB_LIB_WIDEN);
2236 }
2237 return shifted;
2238 }
2239
2240 for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2241 {
2242 enum optab_methods methods;
2243
2244 if (attempt == 0)
2245 methods = OPTAB_DIRECT;
2246 else if (attempt == 1)
2247 methods = OPTAB_WIDEN;
2248 else
2249 methods = OPTAB_LIB_WIDEN;
2250
2251 if (rotate)
2252 {
2253 /* Widening does not work for rotation. */
2254 if (methods == OPTAB_WIDEN)
2255 continue;
2256 else if (methods == OPTAB_LIB_WIDEN)
2257 {
2258 /* If we have been unable to open-code this by a rotation,
2259 do it as the IOR of two shifts. I.e., to rotate A
2260 by N bits, compute
2261 (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2262 where C is the bitsize of A.
2263
2264 It is theoretically possible that the target machine might
2265 not be able to perform either shift and hence we would
2266 be making two libcalls rather than just the one for the
2267 shift (similarly if IOR could not be done). We will allow
2268 this extremely unlikely lossage to avoid complicating the
2269 code below. */
2270
2271 rtx subtarget = target == shifted ? 0 : target;
2272 rtx new_amount, other_amount;
2273 rtx temp1;
2274
2275 new_amount = op1;
2276 if (op1 == const0_rtx)
2277 return shifted;
2278 else if (CONST_INT_P (op1))
2279 other_amount = GEN_INT (GET_MODE_BITSIZE (scalar_mode)
2280 - INTVAL (op1));
2281 else
2282 {
2283 other_amount
2284 = simplify_gen_unary (NEG, GET_MODE (op1),
2285 op1, GET_MODE (op1));
2286 HOST_WIDE_INT mask = GET_MODE_PRECISION (scalar_mode) - 1;
2287 other_amount
2288 = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2289 gen_int_mode (mask, GET_MODE (op1)));
2290 }
2291
2292 shifted = force_reg (mode, shifted);
2293
2294 temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2295 mode, shifted, new_amount, 0, 1);
2296 temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2297 mode, shifted, other_amount,
2298 subtarget, 1);
2299 return expand_binop (mode, ior_optab, temp, temp1, target,
2300 unsignedp, methods);
2301 }
2302
2303 temp = expand_binop (mode,
2304 left ? lrotate_optab : rrotate_optab,
2305 shifted, op1, target, unsignedp, methods);
2306 }
2307 else if (unsignedp)
2308 temp = expand_binop (mode,
2309 left ? lshift_optab : rshift_uns_optab,
2310 shifted, op1, target, unsignedp, methods);
2311
2312 /* Do arithmetic shifts.
2313 Also, if we are going to widen the operand, we can just as well
2314 use an arithmetic right-shift instead of a logical one. */
2315 if (temp == 0 && ! rotate
2316 && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2317 {
2318 enum optab_methods methods1 = methods;
2319
2320 /* If trying to widen a log shift to an arithmetic shift,
2321 don't accept an arithmetic shift of the same size. */
2322 if (unsignedp)
2323 methods1 = OPTAB_MUST_WIDEN;
2324
2325 /* Arithmetic shift */
2326
2327 temp = expand_binop (mode,
2328 left ? lshift_optab : rshift_arith_optab,
2329 shifted, op1, target, unsignedp, methods1);
2330 }
2331
2332 /* We used to try extzv here for logical right shifts, but that was
2333 only useful for one machine, the VAX, and caused poor code
2334 generation there for lshrdi3, so the code was deleted and a
2335 define_expand for lshrsi3 was added to vax.md. */
2336 }
2337
2338 gcc_assert (temp);
2339 return temp;
2340}
2341
2342/* Output a shift instruction for expression code CODE,
2343 with SHIFTED being the rtx for the value to shift,
2344 and AMOUNT the amount to shift by.
2345 Store the result in the rtx TARGET, if that is convenient.
2346 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2347 Return the rtx for where the value is. */
2348
2349rtx
2350expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2351 int amount, rtx target, int unsignedp)
2352{
2353 return expand_shift_1 (code, mode,
2354 shifted, GEN_INT (amount), target, unsignedp);
2355}
2356
2357/* Output a shift instruction for expression code CODE,
2358 with SHIFTED being the rtx for the value to shift,
2359 and AMOUNT the tree for the amount to shift by.
2360 Store the result in the rtx TARGET, if that is convenient.
2361 If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2362 Return the rtx for where the value is. */
2363
2364rtx
2365expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted,
2366 tree amount, rtx target, int unsignedp)
2367{
2368 return expand_shift_1 (code, mode,
2369 shifted, expand_normal (amount), target, unsignedp);
2370}
2371
2372\f
2373/* Indicates the type of fixup needed after a constant multiplication.
2374 BASIC_VARIANT means no fixup is needed, NEGATE_VARIANT means that
2375 the result should be negated, and ADD_VARIANT means that the
2376 multiplicand should be added to the result. */
2377enum mult_variant {basic_variant, negate_variant, add_variant};
2378
2379static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2380 const struct mult_cost *, machine_mode mode);
2381static bool choose_mult_variant (machine_mode, HOST_WIDE_INT,
2382 struct algorithm *, enum mult_variant *, int);
2383static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx,
2384 const struct algorithm *, enum mult_variant);
2385static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2386static rtx extract_high_half (machine_mode, rtx);
2387static rtx expmed_mult_highpart (machine_mode, rtx, rtx, rtx, int, int);
2388static rtx expmed_mult_highpart_optab (machine_mode, rtx, rtx, rtx,
2389 int, int);
2390/* Compute and return the best algorithm for multiplying by T.
2391 The algorithm must cost less than cost_limit
2392 If retval.cost >= COST_LIMIT, no algorithm was found and all
2393 other field of the returned struct are undefined.
2394 MODE is the machine mode of the multiplication. */
2395
2396static void
2397synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2398 const struct mult_cost *cost_limit, machine_mode mode)
2399{
2400 int m;
2401 struct algorithm *alg_in, *best_alg;
2402 struct mult_cost best_cost;
2403 struct mult_cost new_limit;
2404 int op_cost, op_latency;
2405 unsigned HOST_WIDE_INT orig_t = t;
2406 unsigned HOST_WIDE_INT q;
2407 int maxm, hash_index;
2408 bool cache_hit = false;
2409 enum alg_code cache_alg = alg_zero;
2410 bool speed = optimize_insn_for_speed_p ();
2411 machine_mode imode;
2412 struct alg_hash_entry *entry_ptr;
2413
2414 /* Indicate that no algorithm is yet found. If no algorithm
2415 is found, this value will be returned and indicate failure. */
2416 alg_out->cost.cost = cost_limit->cost + 1;
2417 alg_out->cost.latency = cost_limit->latency + 1;
2418
2419 if (cost_limit->cost < 0
2420 || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2421 return;
2422
2423 /* Be prepared for vector modes. */
2424 imode = GET_MODE_INNER (mode);
2425 if (imode == VOIDmode)
2426 imode = mode;
2427
2428 maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2429
2430 /* Restrict the bits of "t" to the multiplication's mode. */
2431 t &= GET_MODE_MASK (imode);
2432
2433 /* t == 1 can be done in zero cost. */
2434 if (t == 1)
2435 {
2436 alg_out->ops = 1;
2437 alg_out->cost.cost = 0;
2438 alg_out->cost.latency = 0;
2439 alg_out->op[0] = alg_m;
2440 return;
2441 }
2442
2443 /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2444 fail now. */
2445 if (t == 0)
2446 {
2447 if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2448 return;
2449 else
2450 {
2451 alg_out->ops = 1;
2452 alg_out->cost.cost = zero_cost (speed);
2453 alg_out->cost.latency = zero_cost (speed);
2454 alg_out->op[0] = alg_zero;
2455 return;
2456 }
2457 }
2458
2459 /* We'll be needing a couple extra algorithm structures now. */
2460
2461 alg_in = XALLOCA (struct algorithm);
2462 best_alg = XALLOCA (struct algorithm);
2463 best_cost = *cost_limit;
2464
2465 /* Compute the hash index. */
2466 hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2467
2468 /* See if we already know what to do for T. */
2469 entry_ptr = alg_hash_entry_ptr (hash_index);
2470 if (entry_ptr->t == t
2471 && entry_ptr->mode == mode
2472 && entry_ptr->mode == mode
2473 && entry_ptr->speed == speed
2474 && entry_ptr->alg != alg_unknown)
2475 {
2476 cache_alg = entry_ptr->alg;
2477
2478 if (cache_alg == alg_impossible)
2479 {
2480 /* The cache tells us that it's impossible to synthesize
2481 multiplication by T within entry_ptr->cost. */
2482 if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2483 /* COST_LIMIT is at least as restrictive as the one
2484 recorded in the hash table, in which case we have no
2485 hope of synthesizing a multiplication. Just
2486 return. */
2487 return;
2488
2489 /* If we get here, COST_LIMIT is less restrictive than the
2490 one recorded in the hash table, so we may be able to
2491 synthesize a multiplication. Proceed as if we didn't
2492 have the cache entry. */
2493 }
2494 else
2495 {
2496 if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2497 /* The cached algorithm shows that this multiplication
2498 requires more cost than COST_LIMIT. Just return. This
2499 way, we don't clobber this cache entry with
2500 alg_impossible but retain useful information. */
2501 return;
2502
2503 cache_hit = true;
2504
2505 switch (cache_alg)
2506 {
2507 case alg_shift:
2508 goto do_alg_shift;
2509
2510 case alg_add_t_m2:
2511 case alg_sub_t_m2:
2512 goto do_alg_addsub_t_m2;
2513
2514 case alg_add_factor:
2515 case alg_sub_factor:
2516 goto do_alg_addsub_factor;
2517
2518 case alg_add_t2_m:
2519 goto do_alg_add_t2_m;
2520
2521 case alg_sub_t2_m:
2522 goto do_alg_sub_t2_m;
2523
2524 default:
2525 gcc_unreachable ();
2526 }
2527 }
2528 }
2529
2530 /* If we have a group of zero bits at the low-order part of T, try
2531 multiplying by the remaining bits and then doing a shift. */
2532
2533 if ((t & 1) == 0)
2534 {
2535 do_alg_shift:
2536 m = floor_log2 (t & -t); /* m = number of low zero bits */
2537 if (m < maxm)
2538 {
2539 q = t >> m;
2540 /* The function expand_shift will choose between a shift and
2541 a sequence of additions, so the observed cost is given as
2542 MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2543 op_cost = m * add_cost (speed, mode);
2544 if (shift_cost (speed, mode, m) < op_cost)
2545 op_cost = shift_cost (speed, mode, m);
2546 new_limit.cost = best_cost.cost - op_cost;
2547 new_limit.latency = best_cost.latency - op_cost;
2548 synth_mult (alg_in, q, &new_limit, mode);
2549
2550 alg_in->cost.cost += op_cost;
2551 alg_in->cost.latency += op_cost;
2552 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2553 {
dda118e3 2554 best_cost = alg_in->cost;
38c0c85b 2555 std::swap (alg_in, best_alg);
dda118e3
JM
2556 best_alg->log[best_alg->ops] = m;
2557 best_alg->op[best_alg->ops] = alg_shift;
2558 }
2559
2560 /* See if treating ORIG_T as a signed number yields a better
2561 sequence. Try this sequence only for a negative ORIG_T
2562 as it would be useless for a non-negative ORIG_T. */
2563 if ((HOST_WIDE_INT) orig_t < 0)
2564 {
2565 /* Shift ORIG_T as follows because a right shift of a
2566 negative-valued signed type is implementation
2567 defined. */
2568 q = ~(~orig_t >> m);
2569 /* The function expand_shift will choose between a shift
2570 and a sequence of additions, so the observed cost is
2571 given as MIN (m * add_cost(speed, mode),
2572 shift_cost(speed, mode, m)). */
2573 op_cost = m * add_cost (speed, mode);
2574 if (shift_cost (speed, mode, m) < op_cost)
2575 op_cost = shift_cost (speed, mode, m);
2576 new_limit.cost = best_cost.cost - op_cost;
2577 new_limit.latency = best_cost.latency - op_cost;
2578 synth_mult (alg_in, q, &new_limit, mode);
2579
2580 alg_in->cost.cost += op_cost;
2581 alg_in->cost.latency += op_cost;
2582 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2583 {
dda118e3 2584 best_cost = alg_in->cost;
38c0c85b 2585 std::swap (alg_in, best_alg);
dda118e3
JM
2586 best_alg->log[best_alg->ops] = m;
2587 best_alg->op[best_alg->ops] = alg_shift;
2588 }
2589 }
2590 }
2591 if (cache_hit)
2592 goto done;
2593 }
2594
2595 /* If we have an odd number, add or subtract one. */
2596 if ((t & 1) != 0)
2597 {
2598 unsigned HOST_WIDE_INT w;
2599
2600 do_alg_addsub_t_m2:
2601 for (w = 1; (w & t) != 0; w <<= 1)
2602 ;
2603 /* If T was -1, then W will be zero after the loop. This is another
2604 case where T ends with ...111. Handling this with (T + 1) and
2605 subtract 1 produces slightly better code and results in algorithm
2606 selection much faster than treating it like the ...0111 case
2607 below. */
2608 if (w == 0
2609 || (w > 2
2610 /* Reject the case where t is 3.
2611 Thus we prefer addition in that case. */
2612 && t != 3))
2613 {
2614 /* T ends with ...111. Multiply by (T + 1) and subtract 1. */
2615
2616 op_cost = add_cost (speed, mode);
2617 new_limit.cost = best_cost.cost - op_cost;
2618 new_limit.latency = best_cost.latency - op_cost;
2619 synth_mult (alg_in, t + 1, &new_limit, mode);
2620
2621 alg_in->cost.cost += op_cost;
2622 alg_in->cost.latency += op_cost;
2623 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2624 {
dda118e3 2625 best_cost = alg_in->cost;
38c0c85b 2626 std::swap (alg_in, best_alg);
dda118e3
JM
2627 best_alg->log[best_alg->ops] = 0;
2628 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2629 }
2630 }
2631 else
2632 {
2633 /* T ends with ...01 or ...011. Multiply by (T - 1) and add 1. */
2634
2635 op_cost = add_cost (speed, mode);
2636 new_limit.cost = best_cost.cost - op_cost;
2637 new_limit.latency = best_cost.latency - op_cost;
2638 synth_mult (alg_in, t - 1, &new_limit, mode);
2639
2640 alg_in->cost.cost += op_cost;
2641 alg_in->cost.latency += op_cost;
2642 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2643 {
dda118e3 2644 best_cost = alg_in->cost;
38c0c85b 2645 std::swap (alg_in, best_alg);
dda118e3
JM
2646 best_alg->log[best_alg->ops] = 0;
2647 best_alg->op[best_alg->ops] = alg_add_t_m2;
2648 }
2649 }
2650
2651 /* We may be able to calculate a * -7, a * -15, a * -31, etc
2652 quickly with a - a * n for some appropriate constant n. */
2653 m = exact_log2 (-orig_t + 1);
2654 if (m >= 0 && m < maxm)
2655 {
2656 op_cost = shiftsub1_cost (speed, mode, m);
2657 new_limit.cost = best_cost.cost - op_cost;
2658 new_limit.latency = best_cost.latency - op_cost;
2659 synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
2660 &new_limit, mode);
2661
2662 alg_in->cost.cost += op_cost;
2663 alg_in->cost.latency += op_cost;
2664 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2665 {
dda118e3 2666 best_cost = alg_in->cost;
38c0c85b 2667 std::swap (alg_in, best_alg);
dda118e3
JM
2668 best_alg->log[best_alg->ops] = m;
2669 best_alg->op[best_alg->ops] = alg_sub_t_m2;
2670 }
2671 }
2672
2673 if (cache_hit)
2674 goto done;
2675 }
2676
2677 /* Look for factors of t of the form
2678 t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
2679 If we find such a factor, we can multiply by t using an algorithm that
2680 multiplies by q, shift the result by m and add/subtract it to itself.
2681
2682 We search for large factors first and loop down, even if large factors
2683 are less probable than small; if we find a large factor we will find a
2684 good sequence quickly, and therefore be able to prune (by decreasing
2685 COST_LIMIT) the search. */
2686
2687 do_alg_addsub_factor:
2688 for (m = floor_log2 (t - 1); m >= 2; m--)
2689 {
2690 unsigned HOST_WIDE_INT d;
2691
2692 d = ((unsigned HOST_WIDE_INT) 1 << m) + 1;
2693 if (t % d == 0 && t > d && m < maxm
2694 && (!cache_hit || cache_alg == alg_add_factor))
2695 {
2696 /* If the target has a cheap shift-and-add instruction use
2697 that in preference to a shift insn followed by an add insn.
2698 Assume that the shift-and-add is "atomic" with a latency
2699 equal to its cost, otherwise assume that on superscalar
2700 hardware the shift may be executed concurrently with the
2701 earlier steps in the algorithm. */
2702 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2703 if (shiftadd_cost (speed, mode, m) < op_cost)
2704 {
2705 op_cost = shiftadd_cost (speed, mode, m);
2706 op_latency = op_cost;
2707 }
2708 else
2709 op_latency = add_cost (speed, mode);
2710
2711 new_limit.cost = best_cost.cost - op_cost;
2712 new_limit.latency = best_cost.latency - op_latency;
2713 synth_mult (alg_in, t / d, &new_limit, mode);
2714
2715 alg_in->cost.cost += op_cost;
2716 alg_in->cost.latency += op_latency;
2717 if (alg_in->cost.latency < op_cost)
2718 alg_in->cost.latency = op_cost;
2719 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2720 {
dda118e3 2721 best_cost = alg_in->cost;
38c0c85b 2722 std::swap (alg_in, best_alg);
dda118e3
JM
2723 best_alg->log[best_alg->ops] = m;
2724 best_alg->op[best_alg->ops] = alg_add_factor;
2725 }
2726 /* Other factors will have been taken care of in the recursion. */
2727 break;
2728 }
2729
2730 d = ((unsigned HOST_WIDE_INT) 1 << m) - 1;
2731 if (t % d == 0 && t > d && m < maxm
2732 && (!cache_hit || cache_alg == alg_sub_factor))
2733 {
2734 /* If the target has a cheap shift-and-subtract insn use
2735 that in preference to a shift insn followed by a sub insn.
2736 Assume that the shift-and-sub is "atomic" with a latency
2737 equal to it's cost, otherwise assume that on superscalar
2738 hardware the shift may be executed concurrently with the
2739 earlier steps in the algorithm. */
2740 op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
2741 if (shiftsub0_cost (speed, mode, m) < op_cost)
2742 {
2743 op_cost = shiftsub0_cost (speed, mode, m);
2744 op_latency = op_cost;
2745 }
2746 else
2747 op_latency = add_cost (speed, mode);
2748
2749 new_limit.cost = best_cost.cost - op_cost;
2750 new_limit.latency = best_cost.latency - op_latency;
2751 synth_mult (alg_in, t / d, &new_limit, mode);
2752
2753 alg_in->cost.cost += op_cost;
2754 alg_in->cost.latency += op_latency;
2755 if (alg_in->cost.latency < op_cost)
2756 alg_in->cost.latency = op_cost;
2757 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2758 {
dda118e3 2759 best_cost = alg_in->cost;
38c0c85b 2760 std::swap (alg_in, best_alg);
dda118e3
JM
2761 best_alg->log[best_alg->ops] = m;
2762 best_alg->op[best_alg->ops] = alg_sub_factor;
2763 }
2764 break;
2765 }
2766 }
2767 if (cache_hit)
2768 goto done;
2769
2770 /* Try shift-and-add (load effective address) instructions,
2771 i.e. do a*3, a*5, a*9. */
2772 if ((t & 1) != 0)
2773 {
2774 do_alg_add_t2_m:
2775 q = t - 1;
2776 q = q & -q;
2777 m = exact_log2 (q);
2778 if (m >= 0 && m < maxm)
2779 {
2780 op_cost = shiftadd_cost (speed, mode, m);
2781 new_limit.cost = best_cost.cost - op_cost;
2782 new_limit.latency = best_cost.latency - op_cost;
2783 synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
2784
2785 alg_in->cost.cost += op_cost;
2786 alg_in->cost.latency += op_cost;
2787 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2788 {
dda118e3 2789 best_cost = alg_in->cost;
38c0c85b 2790 std::swap (alg_in, best_alg);
dda118e3
JM
2791 best_alg->log[best_alg->ops] = m;
2792 best_alg->op[best_alg->ops] = alg_add_t2_m;
2793 }
2794 }
2795 if (cache_hit)
2796 goto done;
2797
2798 do_alg_sub_t2_m:
2799 q = t + 1;
2800 q = q & -q;
2801 m = exact_log2 (q);
2802 if (m >= 0 && m < maxm)
2803 {
2804 op_cost = shiftsub0_cost (speed, mode, m);
2805 new_limit.cost = best_cost.cost - op_cost;
2806 new_limit.latency = best_cost.latency - op_cost;
2807 synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
2808
2809 alg_in->cost.cost += op_cost;
2810 alg_in->cost.latency += op_cost;
2811 if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2812 {
dda118e3 2813 best_cost = alg_in->cost;
38c0c85b 2814 std::swap (alg_in, best_alg);
dda118e3
JM
2815 best_alg->log[best_alg->ops] = m;
2816 best_alg->op[best_alg->ops] = alg_sub_t2_m;
2817 }
2818 }
2819 if (cache_hit)
2820 goto done;
2821 }
2822
2823 done:
2824 /* If best_cost has not decreased, we have not found any algorithm. */
2825 if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
2826 {
2827 /* We failed to find an algorithm. Record alg_impossible for
2828 this case (that is, <T, MODE, COST_LIMIT>) so that next time
2829 we are asked to find an algorithm for T within the same or
2830 lower COST_LIMIT, we can immediately return to the
2831 caller. */
2832 entry_ptr->t = t;
2833 entry_ptr->mode = mode;
2834 entry_ptr->speed = speed;
2835 entry_ptr->alg = alg_impossible;
2836 entry_ptr->cost = *cost_limit;
2837 return;
2838 }
2839
2840 /* Cache the result. */
2841 if (!cache_hit)
2842 {
2843 entry_ptr->t = t;
2844 entry_ptr->mode = mode;
2845 entry_ptr->speed = speed;
2846 entry_ptr->alg = best_alg->op[best_alg->ops];
2847 entry_ptr->cost.cost = best_cost.cost;
2848 entry_ptr->cost.latency = best_cost.latency;
2849 }
2850
2851 /* If we are getting a too long sequence for `struct algorithm'
2852 to record, make this search fail. */
2853 if (best_alg->ops == MAX_BITS_PER_WORD)
2854 return;
2855
2856 /* Copy the algorithm from temporary space to the space at alg_out.
2857 We avoid using structure assignment because the majority of
2858 best_alg is normally undefined, and this is a critical function. */
2859 alg_out->ops = best_alg->ops + 1;
2860 alg_out->cost = best_cost;
2861 memcpy (alg_out->op, best_alg->op,
2862 alg_out->ops * sizeof *alg_out->op);
2863 memcpy (alg_out->log, best_alg->log,
2864 alg_out->ops * sizeof *alg_out->log);
2865}
2866\f
2867/* Find the cheapest way of multiplying a value of mode MODE by VAL.
2868 Try three variations:
2869
2870 - a shift/add sequence based on VAL itself
2871 - a shift/add sequence based on -VAL, followed by a negation
2872 - a shift/add sequence based on VAL - 1, followed by an addition.
2873
2874 Return true if the cheapest of these cost less than MULT_COST,
2875 describing the algorithm in *ALG and final fixup in *VARIANT. */
2876
2877static bool
2878choose_mult_variant (machine_mode mode, HOST_WIDE_INT val,
2879 struct algorithm *alg, enum mult_variant *variant,
2880 int mult_cost)
2881{
2882 struct algorithm alg2;
2883 struct mult_cost limit;
2884 int op_cost;
2885 bool speed = optimize_insn_for_speed_p ();
2886
2887 /* Fail quickly for impossible bounds. */
2888 if (mult_cost < 0)
2889 return false;
2890
2891 /* Ensure that mult_cost provides a reasonable upper bound.
2892 Any constant multiplication can be performed with less
2893 than 2 * bits additions. */
2894 op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
2895 if (mult_cost > op_cost)
2896 mult_cost = op_cost;
2897
2898 *variant = basic_variant;
2899 limit.cost = mult_cost;
2900 limit.latency = mult_cost;
2901 synth_mult (alg, val, &limit, mode);
2902
2903 /* This works only if the inverted value actually fits in an
2904 `unsigned int' */
2905 if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
2906 {
2907 op_cost = neg_cost (speed, mode);
2908 if (MULT_COST_LESS (&alg->cost, mult_cost))
2909 {
2910 limit.cost = alg->cost.cost - op_cost;
2911 limit.latency = alg->cost.latency - op_cost;
2912 }
2913 else
2914 {
2915 limit.cost = mult_cost - op_cost;
2916 limit.latency = mult_cost - op_cost;
2917 }
2918
2919 synth_mult (&alg2, -val, &limit, mode);
2920 alg2.cost.cost += op_cost;
2921 alg2.cost.latency += op_cost;
2922 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2923 *alg = alg2, *variant = negate_variant;
2924 }
2925
2926 /* This proves very useful for division-by-constant. */
2927 op_cost = add_cost (speed, mode);
2928 if (MULT_COST_LESS (&alg->cost, mult_cost))
2929 {
2930 limit.cost = alg->cost.cost - op_cost;
2931 limit.latency = alg->cost.latency - op_cost;
2932 }
2933 else
2934 {
2935 limit.cost = mult_cost - op_cost;
2936 limit.latency = mult_cost - op_cost;
2937 }
2938
2939 synth_mult (&alg2, val - 1, &limit, mode);
2940 alg2.cost.cost += op_cost;
2941 alg2.cost.latency += op_cost;
2942 if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
2943 *alg = alg2, *variant = add_variant;
2944
2945 return MULT_COST_LESS (&alg->cost, mult_cost);
2946}
2947
2948/* A subroutine of expand_mult, used for constant multiplications.
2949 Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
2950 convenient. Use the shift/add sequence described by ALG and apply
2951 the final fixup specified by VARIANT. */
2952
2953static rtx
2954expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
2955 rtx target, const struct algorithm *alg,
2956 enum mult_variant variant)
2957{
2958 HOST_WIDE_INT val_so_far;
2959 rtx_insn *insn;
2960 rtx accum, tem;
2961 int opno;
2962 machine_mode nmode;
2963
2964 /* Avoid referencing memory over and over and invalid sharing
2965 on SUBREGs. */
2966 op0 = force_reg (mode, op0);
2967
2968 /* ACCUM starts out either as OP0 or as a zero, depending on
2969 the first operation. */
2970
2971 if (alg->op[0] == alg_zero)
2972 {
2973 accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
2974 val_so_far = 0;
2975 }
2976 else if (alg->op[0] == alg_m)
2977 {
2978 accum = copy_to_mode_reg (mode, op0);
2979 val_so_far = 1;
2980 }
2981 else
2982 gcc_unreachable ();
2983
2984 for (opno = 1; opno < alg->ops; opno++)
2985 {
2986 int log = alg->log[opno];
2987 rtx shift_subtarget = optimize ? 0 : accum;
2988 rtx add_target
2989 = (opno == alg->ops - 1 && target != 0 && variant != add_variant
2990 && !optimize)
2991 ? target : 0;
2992 rtx accum_target = optimize ? 0 : accum;
2993 rtx accum_inner;
2994
2995 switch (alg->op[opno])
2996 {
2997 case alg_shift:
2998 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
2999 /* REG_EQUAL note will be attached to the following insn. */
3000 emit_move_insn (accum, tem);
3001 val_so_far <<= log;
3002 break;
3003
3004 case alg_add_t_m2:
3005 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3006 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3007 add_target ? add_target : accum_target);
3008 val_so_far += (HOST_WIDE_INT) 1 << log;
3009 break;
3010
3011 case alg_sub_t_m2:
3012 tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3013 accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3014 add_target ? add_target : accum_target);
3015 val_so_far -= (HOST_WIDE_INT) 1 << log;
3016 break;
3017
3018 case alg_add_t2_m:
3019 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3020 log, shift_subtarget, 0);
3021 accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3022 add_target ? add_target : accum_target);
3023 val_so_far = (val_so_far << log) + 1;
3024 break;
3025
3026 case alg_sub_t2_m:
3027 accum = expand_shift (LSHIFT_EXPR, mode, accum,
3028 log, shift_subtarget, 0);
3029 accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3030 add_target ? add_target : accum_target);
3031 val_so_far = (val_so_far << log) - 1;
3032 break;
3033
3034 case alg_add_factor:
3035 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3036 accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3037 add_target ? add_target : accum_target);
3038 val_so_far += val_so_far << log;
3039 break;
3040
3041 case alg_sub_factor:
3042 tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3043 accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3044 (add_target
3045 ? add_target : (optimize ? 0 : tem)));
3046 val_so_far = (val_so_far << log) - val_so_far;
3047 break;
3048
3049 default:
3050 gcc_unreachable ();
3051 }
3052
3053 if (SCALAR_INT_MODE_P (mode))
3054 {
3055 /* Write a REG_EQUAL note on the last insn so that we can cse
3056 multiplication sequences. Note that if ACCUM is a SUBREG,
3057 we've set the inner register and must properly indicate that. */
3058 tem = op0, nmode = mode;
3059 accum_inner = accum;
3060 if (GET_CODE (accum) == SUBREG)
3061 {
3062 accum_inner = SUBREG_REG (accum);
3063 nmode = GET_MODE (accum_inner);
3064 tem = gen_lowpart (nmode, op0);
3065 }
3066
3067 insn = get_last_insn ();
3068 set_dst_reg_note (insn, REG_EQUAL,
3069 gen_rtx_MULT (nmode, tem,
3070 gen_int_mode (val_so_far, nmode)),
3071 accum_inner);
3072 }
3073 }
3074
3075 if (variant == negate_variant)
3076 {
3077 val_so_far = -val_so_far;
3078 accum = expand_unop (mode, neg_optab, accum, target, 0);
3079 }
3080 else if (variant == add_variant)
3081 {
3082 val_so_far = val_so_far + 1;
3083 accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3084 }
3085
3086 /* Compare only the bits of val and val_so_far that are significant
3087 in the result mode, to avoid sign-/zero-extension confusion. */
3088 nmode = GET_MODE_INNER (mode);
3089 if (nmode == VOIDmode)
3090 nmode = mode;
3091 val &= GET_MODE_MASK (nmode);
3092 val_so_far &= GET_MODE_MASK (nmode);
3093 gcc_assert (val == val_so_far);
3094
3095 return accum;
3096}
3097
3098/* Perform a multiplication and return an rtx for the result.
3099 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3100 TARGET is a suggestion for where to store the result (an rtx).
3101
3102 We check specially for a constant integer as OP1.
3103 If you want this check for OP0 as well, then before calling
3104 you should swap the two operands if OP0 would be constant. */
3105
3106rtx
3107expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3108 int unsignedp)
3109{
3110 enum mult_variant variant;
3111 struct algorithm algorithm;
3112 rtx scalar_op1;
3113 int max_cost;
3114 bool speed = optimize_insn_for_speed_p ();
3115 bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3116
3117 if (CONSTANT_P (op0))
3118 std::swap (op0, op1);
3119
3120 /* For vectors, there are several simplifications that can be made if
3121 all elements of the vector constant are identical. */
3122 scalar_op1 = op1;
3123 if (GET_CODE (op1) == CONST_VECTOR)
3124 {
3125 int i, n = CONST_VECTOR_NUNITS (op1);
3126 scalar_op1 = CONST_VECTOR_ELT (op1, 0);
3127 for (i = 1; i < n; ++i)
3128 if (!rtx_equal_p (scalar_op1, CONST_VECTOR_ELT (op1, i)))
3129 goto skip_scalar;
3130 }
3131
3132 if (INTEGRAL_MODE_P (mode))
3133 {
3134 rtx fake_reg;
3135 HOST_WIDE_INT coeff;
3136 bool is_neg;
3137 int mode_bitsize;
3138
3139 if (op1 == CONST0_RTX (mode))
3140 return op1;
3141 if (op1 == CONST1_RTX (mode))
3142 return op0;
3143 if (op1 == CONSTM1_RTX (mode))
3144 return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3145 op0, target, 0);
3146
3147 if (do_trapv)
3148 goto skip_synth;
3149
3150 /* If mode is integer vector mode, check if the backend supports
3151 vector lshift (by scalar or vector) at all. If not, we can't use
3152 synthetized multiply. */
3153 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3154 && optab_handler (vashl_optab, mode) == CODE_FOR_nothing
3155 && optab_handler (ashl_optab, mode) == CODE_FOR_nothing)
3156 goto skip_synth;
3157
3158 /* These are the operations that are potentially turned into
3159 a sequence of shifts and additions. */
3160 mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3161
3162 /* synth_mult does an `unsigned int' multiply. As long as the mode is
3163 less than or equal in size to `unsigned int' this doesn't matter.
3164 If the mode is larger than `unsigned int', then synth_mult works
3165 only if the constant value exactly fits in an `unsigned int' without
3166 any truncation. This means that multiplying by negative values does
3167 not work; results are off by 2^32 on a 32 bit machine. */
3168 if (CONST_INT_P (scalar_op1))
3169 {
3170 coeff = INTVAL (scalar_op1);
3171 is_neg = coeff < 0;
3172 }
3173#if TARGET_SUPPORTS_WIDE_INT
3174 else if (CONST_WIDE_INT_P (scalar_op1))
3175#else
3176 else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3177#endif
3178 {
3179 int shift = wi::exact_log2 (std::make_pair (scalar_op1, mode));
3180 /* Perfect power of 2 (other than 1, which is handled above). */
3181 if (shift > 0)
3182 return expand_shift (LSHIFT_EXPR, mode, op0,
3183 shift, target, unsignedp);
3184 else
3185 goto skip_synth;
3186 }
3187 else
3188 goto skip_synth;
3189
3190 /* We used to test optimize here, on the grounds that it's better to
3191 produce a smaller program when -O is not used. But this causes
3192 such a terrible slowdown sometimes that it seems better to always
3193 use synth_mult. */
3194
3195 /* Special case powers of two. */
3196 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3197 && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3198 return expand_shift (LSHIFT_EXPR, mode, op0,
3199 floor_log2 (coeff), target, unsignedp);
3200
3201 fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3202
3203 /* Attempt to handle multiplication of DImode values by negative
3204 coefficients, by performing the multiplication by a positive
3205 multiplier and then inverting the result. */
3206 if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3207 {
3208 /* Its safe to use -coeff even for INT_MIN, as the
3209 result is interpreted as an unsigned coefficient.
3210 Exclude cost of op0 from max_cost to match the cost
3211 calculation of the synth_mult. */
3212 coeff = -(unsigned HOST_WIDE_INT) coeff;
3213 max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), speed)
3214 - neg_cost (speed, mode));
3215 if (max_cost <= 0)
3216 goto skip_synth;
3217
3218 /* Special case powers of two. */
3219 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3220 {
3221 rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3222 floor_log2 (coeff), target, unsignedp);
3223 return expand_unop (mode, neg_optab, temp, target, 0);
3224 }
3225
3226 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3227 max_cost))
3228 {
3229 rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3230 &algorithm, variant);
3231 return expand_unop (mode, neg_optab, temp, target, 0);
3232 }
3233 goto skip_synth;
3234 }
3235
3236 /* Exclude cost of op0 from max_cost to match the cost
3237 calculation of the synth_mult. */
3238 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), speed);
3239 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3240 return expand_mult_const (mode, op0, coeff, target,
3241 &algorithm, variant);
3242 }
3243 skip_synth:
3244
3245 /* Expand x*2.0 as x+x. */
3246 if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1))
3247 {
3248 REAL_VALUE_TYPE d;
3249 REAL_VALUE_FROM_CONST_DOUBLE (d, scalar_op1);
3250
3251 if (REAL_VALUES_EQUAL (d, dconst2))
3252 {
3253 op0 = force_reg (GET_MODE (op0), op0);
3254 return expand_binop (mode, add_optab, op0, op0,
3255 target, unsignedp, OPTAB_LIB_WIDEN);
3256 }
3257 }
3258 skip_scalar:
3259
3260 /* This used to use umul_optab if unsigned, but for non-widening multiply
3261 there is no difference between signed and unsigned. */
3262 op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3263 op0, op1, target, unsignedp, OPTAB_LIB_WIDEN);
3264 gcc_assert (op0);
3265 return op0;
3266}
3267
3268/* Return a cost estimate for multiplying a register by the given
3269 COEFFicient in the given MODE and SPEED. */
3270
3271int
3272mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
3273{
3274 int max_cost;
3275 struct algorithm algorithm;
3276 enum mult_variant variant;
3277
3278 rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3279 max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg), speed);
3280 if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3281 return algorithm.cost.cost;
3282 else
3283 return max_cost;
3284}
3285
3286/* Perform a widening multiplication and return an rtx for the result.
3287 MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3288 TARGET is a suggestion for where to store the result (an rtx).
3289 THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3290 or smul_widen_optab.
3291
3292 We check specially for a constant integer as OP1, comparing the
3293 cost of a widening multiply against the cost of a sequence of shifts
3294 and adds. */
3295
3296rtx
3297expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3298 int unsignedp, optab this_optab)
3299{
3300 bool speed = optimize_insn_for_speed_p ();
3301 rtx cop1;
3302
3303 if (CONST_INT_P (op1)
3304 && GET_MODE (op0) != VOIDmode
3305 && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3306 this_optab == umul_widen_optab))
3307 && CONST_INT_P (cop1)
3308 && (INTVAL (cop1) >= 0
3309 || HWI_COMPUTABLE_MODE_P (mode)))
3310 {
3311 HOST_WIDE_INT coeff = INTVAL (cop1);
3312 int max_cost;
3313 enum mult_variant variant;
3314 struct algorithm algorithm;
3315
3316 if (coeff == 0)
3317 return CONST0_RTX (mode);
3318
3319 /* Special case powers of two. */
3320 if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3321 {
3322 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3323 return expand_shift (LSHIFT_EXPR, mode, op0,
3324 floor_log2 (coeff), target, unsignedp);
3325 }
3326
3327 /* Exclude cost of op0 from max_cost to match the cost
3328 calculation of the synth_mult. */
3329 max_cost = mul_widen_cost (speed, mode);
3330 if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3331 max_cost))
3332 {
3333 op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3334 return expand_mult_const (mode, op0, coeff, target,
3335 &algorithm, variant);
3336 }
3337 }
3338 return expand_binop (mode, this_optab, op0, op1, target,
3339 unsignedp, OPTAB_LIB_WIDEN);
3340}
3341\f
3342/* Choose a minimal N + 1 bit approximation to 1/D that can be used to
3343 replace division by D, and put the least significant N bits of the result
3344 in *MULTIPLIER_PTR and return the most significant bit.
3345
3346 The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3347 needed precision is in PRECISION (should be <= N).
3348
3349 PRECISION should be as small as possible so this function can choose
3350 multiplier more freely.
3351
3352 The rounded-up logarithm of D is placed in *lgup_ptr. A shift count that
3353 is to be used for a final right shift is placed in *POST_SHIFT_PTR.
3354
3355 Using this function, x/D will be equal to (x * m) >> (*POST_SHIFT_PTR),
3356 where m is the full HOST_BITS_PER_WIDE_INT + 1 bit multiplier. */
3357
3358unsigned HOST_WIDE_INT
3359choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3360 unsigned HOST_WIDE_INT *multiplier_ptr,
3361 int *post_shift_ptr, int *lgup_ptr)
3362{
3363 int lgup, post_shift;
3364 int pow, pow2;
3365
3366 /* lgup = ceil(log2(divisor)); */
3367 lgup = ceil_log2 (d);
3368
3369 gcc_assert (lgup <= n);
3370
3371 pow = n + lgup;
3372 pow2 = n + lgup - precision;
3373
3374 /* mlow = 2^(N + lgup)/d */
3375 wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
3376 wide_int mlow = wi::udiv_trunc (val, d);
3377
3378 /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
3379 val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3380 wide_int mhigh = wi::udiv_trunc (val, d);
3381
3382 /* If precision == N, then mlow, mhigh exceed 2^N
3383 (but they do not exceed 2^(N+1)). */
3384
3385 /* Reduce to lowest terms. */
3386 for (post_shift = lgup; post_shift > 0; post_shift--)
3387 {
3388 unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3389 HOST_BITS_PER_WIDE_INT);
3390 unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3391 HOST_BITS_PER_WIDE_INT);
3392 if (ml_lo >= mh_lo)
3393 break;
3394
3395 mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3396 mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3397 }
3398
3399 *post_shift_ptr = post_shift;
3400 *lgup_ptr = lgup;
3401 if (n < HOST_BITS_PER_WIDE_INT)
3402 {
3403 unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
3404 *multiplier_ptr = mhigh.to_uhwi () & mask;
3405 return mhigh.to_uhwi () >= mask;
3406 }
3407 else
3408 {
3409 *multiplier_ptr = mhigh.to_uhwi ();
3410 return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3411 }
3412}
3413
3414/* Compute the inverse of X mod 2**n, i.e., find Y such that X * Y is
3415 congruent to 1 (mod 2**N). */
3416
3417static unsigned HOST_WIDE_INT
3418invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3419{
3420 /* Solve x*y == 1 (mod 2^n), where x is odd. Return y. */
3421
3422 /* The algorithm notes that the choice y = x satisfies
3423 x*y == 1 mod 2^3, since x is assumed odd.
3424 Each iteration doubles the number of bits of significance in y. */
3425
3426 unsigned HOST_WIDE_INT mask;
3427 unsigned HOST_WIDE_INT y = x;
3428 int nbit = 3;
3429
3430 mask = (n == HOST_BITS_PER_WIDE_INT
3431 ? ~(unsigned HOST_WIDE_INT) 0
3432 : ((unsigned HOST_WIDE_INT) 1 << n) - 1);
3433
3434 while (nbit < n)
3435 {
3436 y = y * (2 - x*y) & mask; /* Modulo 2^N */
3437 nbit *= 2;
3438 }
3439 return y;
3440}
3441
3442/* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3443 flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3444 product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3445 to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3446 become signed.
3447
3448 The result is put in TARGET if that is convenient.
3449
3450 MODE is the mode of operation. */
3451
3452rtx
3453expand_mult_highpart_adjust (machine_mode mode, rtx adj_operand, rtx op0,
3454 rtx op1, rtx target, int unsignedp)
3455{
3456 rtx tem;
3457 enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3458
3459 tem = expand_shift (RSHIFT_EXPR, mode, op0,
3460 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3461 tem = expand_and (mode, tem, op1, NULL_RTX);
3462 adj_operand
3463 = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3464 adj_operand);
3465
3466 tem = expand_shift (RSHIFT_EXPR, mode, op1,
3467 GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3468 tem = expand_and (mode, tem, op0, NULL_RTX);
3469 target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3470 target);
3471
3472 return target;
3473}
3474
3475/* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3476
3477static rtx
3478extract_high_half (machine_mode mode, rtx op)
3479{
3480 machine_mode wider_mode;
3481
3482 if (mode == word_mode)
3483 return gen_highpart (mode, op);
3484
3485 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3486
3487 wider_mode = GET_MODE_WIDER_MODE (mode);
3488 op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3489 GET_MODE_BITSIZE (mode), 0, 1);
3490 return convert_modes (mode, wider_mode, op, 0);
3491}
3492
3493/* Like expmed_mult_highpart, but only consider using a multiplication
3494 optab. OP1 is an rtx for the constant operand. */
3495
3496static rtx
3497expmed_mult_highpart_optab (machine_mode mode, rtx op0, rtx op1,
3498 rtx target, int unsignedp, int max_cost)
3499{
3500 rtx narrow_op1 = gen_int_mode (INTVAL (op1), mode);
3501 machine_mode wider_mode;
3502 optab moptab;
3503 rtx tem;
3504 int size;
3505 bool speed = optimize_insn_for_speed_p ();
3506
3507 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3508
3509 wider_mode = GET_MODE_WIDER_MODE (mode);
3510 size = GET_MODE_BITSIZE (mode);
3511
3512 /* Firstly, try using a multiplication insn that only generates the needed
3513 high part of the product, and in the sign flavor of unsignedp. */
3514 if (mul_highpart_cost (speed, mode) < max_cost)
3515 {
3516 moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3517 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3518 unsignedp, OPTAB_DIRECT);
3519 if (tem)
3520 return tem;
3521 }
3522
3523 /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3524 Need to adjust the result after the multiplication. */
3525 if (size - 1 < BITS_PER_WORD
3526 && (mul_highpart_cost (speed, mode)
3527 + 2 * shift_cost (speed, mode, size-1)
3528 + 4 * add_cost (speed, mode) < max_cost))
3529 {
3530 moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3531 tem = expand_binop (mode, moptab, op0, narrow_op1, target,
3532 unsignedp, OPTAB_DIRECT);
3533 if (tem)
3534 /* We used the wrong signedness. Adjust the result. */
3535 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3536 tem, unsignedp);
3537 }
3538
3539 /* Try widening multiplication. */
3540 moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3541 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3542 && mul_widen_cost (speed, wider_mode) < max_cost)
3543 {
3544 tem = expand_binop (wider_mode, moptab, op0, narrow_op1, 0,
3545 unsignedp, OPTAB_WIDEN);
3546 if (tem)
3547 return extract_high_half (mode, tem);
3548 }
3549
3550 /* Try widening the mode and perform a non-widening multiplication. */
3551 if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3552 && size - 1 < BITS_PER_WORD
3553 && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3554 < max_cost))
3555 {
3556 rtx_insn *insns;
3557 rtx wop0, wop1;
3558
3559 /* We need to widen the operands, for example to ensure the
3560 constant multiplier is correctly sign or zero extended.
3561 Use a sequence to clean-up any instructions emitted by
3562 the conversions if things don't work out. */
3563 start_sequence ();
3564 wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3565 wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3566 tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3567 unsignedp, OPTAB_WIDEN);
3568 insns = get_insns ();
3569 end_sequence ();
3570
3571 if (tem)
3572 {
3573 emit_insn (insns);
3574 return extract_high_half (mode, tem);
3575 }
3576 }
3577
3578 /* Try widening multiplication of opposite signedness, and adjust. */
3579 moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3580 if (widening_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3581 && size - 1 < BITS_PER_WORD
3582 && (mul_widen_cost (speed, wider_mode)
3583 + 2 * shift_cost (speed, mode, size-1)
3584 + 4 * add_cost (speed, mode) < max_cost))
3585 {
3586 tem = expand_binop (wider_mode, moptab, op0, narrow_op1,
3587 NULL_RTX, ! unsignedp, OPTAB_WIDEN);
3588 if (tem != 0)
3589 {
3590 tem = extract_high_half (mode, tem);
3591 /* We used the wrong signedness. Adjust the result. */
3592 return expand_mult_highpart_adjust (mode, tem, op0, narrow_op1,
3593 target, unsignedp);
3594 }
3595 }
3596
3597 return 0;
3598}
3599
3600/* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3601 putting the high half of the result in TARGET if that is convenient,
3602 and return where the result is. If the operation can not be performed,
3603 0 is returned.
3604
3605 MODE is the mode of operation and result.
3606
3607 UNSIGNEDP nonzero means unsigned multiply.
3608
3609 MAX_COST is the total allowed cost for the expanded RTL. */
3610
3611static rtx
3612expmed_mult_highpart (machine_mode mode, rtx op0, rtx op1,
3613 rtx target, int unsignedp, int max_cost)
3614{
3615 machine_mode wider_mode = GET_MODE_WIDER_MODE (mode);
3616 unsigned HOST_WIDE_INT cnst1;
3617 int extra_cost;
3618 bool sign_adjust = false;
3619 enum mult_variant variant;
3620 struct algorithm alg;
3621 rtx tem;
3622 bool speed = optimize_insn_for_speed_p ();
3623
3624 gcc_assert (!SCALAR_FLOAT_MODE_P (mode));
3625 /* We can't support modes wider than HOST_BITS_PER_INT. */
3626 gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
3627
3628 cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
3629
3630 /* We can't optimize modes wider than BITS_PER_WORD.
3631 ??? We might be able to perform double-word arithmetic if
3632 mode == word_mode, however all the cost calculations in
3633 synth_mult etc. assume single-word operations. */
3634 if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
3635 return expmed_mult_highpart_optab (mode, op0, op1, target,
3636 unsignedp, max_cost);
3637
3638 extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
3639
3640 /* Check whether we try to multiply by a negative constant. */
3641 if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
3642 {
3643 sign_adjust = true;
3644 extra_cost += add_cost (speed, mode);
3645 }
3646
3647 /* See whether shift/add multiplication is cheap enough. */
3648 if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
3649 max_cost - extra_cost))
3650 {
3651 /* See whether the specialized multiplication optabs are
3652 cheaper than the shift/add version. */
3653 tem = expmed_mult_highpart_optab (mode, op0, op1, target, unsignedp,
3654 alg.cost.cost + extra_cost);
3655 if (tem)
3656 return tem;
3657
3658 tem = convert_to_mode (wider_mode, op0, unsignedp);
3659 tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
3660 tem = extract_high_half (mode, tem);
3661
3662 /* Adjust result for signedness. */
3663 if (sign_adjust)
3664 tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
3665
3666 return tem;
3667 }
3668 return expmed_mult_highpart_optab (mode, op0, op1, target,
3669 unsignedp, max_cost);
3670}
3671
3672
3673/* Expand signed modulus of OP0 by a power of two D in mode MODE. */
3674
3675static rtx
3676expand_smod_pow2 (machine_mode mode, rtx op0, HOST_WIDE_INT d)
3677{
3678 rtx result, temp, shift;
3679 rtx_code_label *label;
3680 int logd;
3681 int prec = GET_MODE_PRECISION (mode);
3682
3683 logd = floor_log2 (d);
3684 result = gen_reg_rtx (mode);
3685
3686 /* Avoid conditional branches when they're expensive. */
3687 if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
3688 && optimize_insn_for_speed_p ())
3689 {
3690 rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
3691 mode, 0, -1);
3692 if (signmask)
3693 {
3694 HOST_WIDE_INT masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
3695 signmask = force_reg (mode, signmask);
3696 shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
3697
3698 /* Use the rtx_cost of a LSHIFTRT instruction to determine
3699 which instruction sequence to use. If logical right shifts
3700 are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
3701 use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
3702
3703 temp = gen_rtx_LSHIFTRT (mode, result, shift);
3704 if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
3705 || (set_src_cost (temp, optimize_insn_for_speed_p ())
3706 > COSTS_N_INSNS (2)))
3707 {
3708 temp = expand_binop (mode, xor_optab, op0, signmask,
3709 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3710 temp = expand_binop (mode, sub_optab, temp, signmask,
3711 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3712 temp = expand_binop (mode, and_optab, temp,
3713 gen_int_mode (masklow, mode),
3714 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3715 temp = expand_binop (mode, xor_optab, temp, signmask,
3716 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3717 temp = expand_binop (mode, sub_optab, temp, signmask,
3718 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3719 }
3720 else
3721 {
3722 signmask = expand_binop (mode, lshr_optab, signmask, shift,
3723 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3724 signmask = force_reg (mode, signmask);
3725
3726 temp = expand_binop (mode, add_optab, op0, signmask,
3727 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3728 temp = expand_binop (mode, and_optab, temp,
3729 gen_int_mode (masklow, mode),
3730 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3731 temp = expand_binop (mode, sub_optab, temp, signmask,
3732 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3733 }
3734 return temp;
3735 }
3736 }
3737
3738 /* Mask contains the mode's signbit and the significant bits of the
3739 modulus. By including the signbit in the operation, many targets
3740 can avoid an explicit compare operation in the following comparison
3741 against zero. */
3742 wide_int mask = wi::mask (logd, false, prec);
3743 mask = wi::set_bit (mask, prec - 1);
3744
3745 temp = expand_binop (mode, and_optab, op0,
3746 immed_wide_int_const (mask, mode),
3747 result, 1, OPTAB_LIB_WIDEN);
3748 if (temp != result)
3749 emit_move_insn (result, temp);
3750
3751 label = gen_label_rtx ();
3752 do_cmp_and_jump (result, const0_rtx, GE, mode, label);
3753
3754 temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
3755 0, OPTAB_LIB_WIDEN);
3756
3757 mask = wi::mask (logd, true, prec);
3758 temp = expand_binop (mode, ior_optab, temp,
3759 immed_wide_int_const (mask, mode),
3760 result, 1, OPTAB_LIB_WIDEN);
3761 temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
3762 0, OPTAB_LIB_WIDEN);
3763 if (temp != result)
3764 emit_move_insn (result, temp);
3765 emit_label (label);
3766 return result;
3767}
3768
3769/* Expand signed division of OP0 by a power of two D in mode MODE.
3770 This routine is only called for positive values of D. */
3771
3772static rtx
3773expand_sdiv_pow2 (machine_mode mode, rtx op0, HOST_WIDE_INT d)
3774{
3775 rtx temp;
3776 rtx_code_label *label;
3777 int logd;
3778
3779 logd = floor_log2 (d);