1 /* Data References Analysis and Manipulation Utilities for Vectorization.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "double-int.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
42 #include "hard-reg-set.h"
44 #include "dominance.h"
46 #include "basic-block.h"
47 #include "gimple-pretty-print.h"
48 #include "tree-ssa-alias.h"
49 #include "internal-fn.h"
51 #include "gimple-expr.h"
55 #include "gimple-iterator.h"
56 #include "gimplify-me.h"
57 #include "gimple-ssa.h"
58 #include "tree-phinodes.h"
59 #include "ssa-iterators.h"
60 #include "stringpool.h"
61 #include "tree-ssanames.h"
62 #include "tree-ssa-loop-ivopts.h"
63 #include "tree-ssa-loop-manip.h"
64 #include "tree-ssa-loop.h"
66 #include "tree-chrec.h"
67 #include "tree-scalar-evolution.h"
68 #include "tree-vectorizer.h"
69 #include "diagnostic-core.h"
71 #include "plugin-api.h"
74 /* Need to include rtl.h, expr.h, etc. for optabs. */
78 #include "statistics.h"
80 #include "fixed-value.h"
81 #include "insn-config.h"
90 #include "insn-codes.h"
94 /* Return true if load- or store-lanes optab OPTAB is implemented for
95 COUNT vectors of type VECTYPE. NAME is the name of OPTAB. */
98 vect_lanes_optab_supported_p (const char *name, convert_optab optab,
99 tree vectype, unsigned HOST_WIDE_INT count)
101 machine_mode mode, array_mode;
104 mode = TYPE_MODE (vectype);
105 limit_p = !targetm.array_mode_supported_p (mode, count);
106 array_mode = mode_for_size (count * GET_MODE_BITSIZE (mode),
109 if (array_mode == BLKmode)
111 if (dump_enabled_p ())
112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
113 "no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
114 GET_MODE_NAME (mode), count);
118 if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
120 if (dump_enabled_p ())
121 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
122 "cannot use %s<%s><%s>\n", name,
123 GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
127 if (dump_enabled_p ())
128 dump_printf_loc (MSG_NOTE, vect_location,
129 "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
130 GET_MODE_NAME (mode));
136 /* Return the smallest scalar part of STMT.
137 This is used to determine the vectype of the stmt. We generally set the
138 vectype according to the type of the result (lhs). For stmts whose
139 result-type is different than the type of the arguments (e.g., demotion,
140 promotion), vectype will be reset appropriately (later). Note that we have
141 to visit the smallest datatype in this function, because that determines the
142 VF. If the smallest datatype in the loop is present only as the rhs of a
143 promotion operation - we'd miss it.
144 Such a case, where a variable of this datatype does not appear in the lhs
145 anywhere in the loop, can only occur if it's an invariant: e.g.:
146 'int_x = (int) short_inv', which we'd expect to have been optimized away by
147 invariant motion. However, we cannot rely on invariant motion to always
148 take invariants out of the loop, and so in the case of promotion we also
149 have to check the rhs.
150 LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
154 vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit,
155 HOST_WIDE_INT *rhs_size_unit)
157 tree scalar_type = gimple_expr_type (stmt);
158 HOST_WIDE_INT lhs, rhs;
160 lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
162 if (is_gimple_assign (stmt)
163 && (gimple_assign_cast_p (stmt)
164 || gimple_assign_rhs_code (stmt) == WIDEN_MULT_EXPR
165 || gimple_assign_rhs_code (stmt) == WIDEN_LSHIFT_EXPR
166 || gimple_assign_rhs_code (stmt) == FLOAT_EXPR))
168 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
170 rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
172 scalar_type = rhs_type;
175 *lhs_size_unit = lhs;
176 *rhs_size_unit = rhs;
181 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
182 tested at run-time. Return TRUE if DDR was successfully inserted.
183 Return false if versioning is not supported. */
186 vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
188 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
190 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
193 if (dump_enabled_p ())
195 dump_printf_loc (MSG_NOTE, vect_location,
196 "mark for run-time aliasing test between ");
197 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
198 dump_printf (MSG_NOTE, " and ");
199 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
200 dump_printf (MSG_NOTE, "\n");
203 if (optimize_loop_nest_for_size_p (loop))
205 if (dump_enabled_p ())
206 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
207 "versioning not supported when optimizing"
212 /* FORNOW: We don't support versioning with outer-loop vectorization. */
215 if (dump_enabled_p ())
216 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
217 "versioning not yet supported for outer-loops.\n");
221 /* FORNOW: We don't support creating runtime alias tests for non-constant
223 if (TREE_CODE (DR_STEP (DDR_A (ddr))) != INTEGER_CST
224 || TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
226 if (dump_enabled_p ())
227 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
228 "versioning not yet supported for non-constant "
233 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
238 /* Function vect_analyze_data_ref_dependence.
240 Return TRUE if there (might) exist a dependence between a memory-reference
241 DRA and a memory-reference DRB. When versioning for alias may check a
242 dependence at run-time, return FALSE. Adjust *MAX_VF according to
243 the data dependence. */
246 vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
247 loop_vec_info loop_vinfo, int *max_vf)
250 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
251 struct data_reference *dra = DDR_A (ddr);
252 struct data_reference *drb = DDR_B (ddr);
253 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
254 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
255 lambda_vector dist_v;
256 unsigned int loop_depth;
258 /* In loop analysis all data references should be vectorizable. */
259 if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
260 || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
263 /* Independent data accesses. */
264 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
268 || (DR_IS_READ (dra) && DR_IS_READ (drb)))
271 /* Even if we have an anti-dependence then, as the vectorized loop covers at
272 least two scalar iterations, there is always also a true dependence.
273 As the vectorizer does not re-order loads and stores we can ignore
274 the anti-dependence if TBAA can disambiguate both DRs similar to the
275 case with known negative distance anti-dependences (positive
276 distance anti-dependences would violate TBAA constraints). */
277 if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
278 || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
279 && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
280 get_alias_set (DR_REF (drb))))
283 /* Unknown data dependence. */
284 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
286 /* If user asserted safelen consecutive iterations can be
287 executed concurrently, assume independence. */
288 if (loop->safelen >= 2)
290 if (loop->safelen < *max_vf)
291 *max_vf = loop->safelen;
292 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
296 if (STMT_VINFO_GATHER_P (stmtinfo_a)
297 || STMT_VINFO_GATHER_P (stmtinfo_b))
299 if (dump_enabled_p ())
301 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
302 "versioning for alias not supported for: "
303 "can't determine dependence between ");
304 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
306 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
307 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
309 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
314 if (dump_enabled_p ())
316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
317 "versioning for alias required: "
318 "can't determine dependence between ");
319 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
321 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
322 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
324 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
327 /* Add to list of ddrs that need to be tested at run-time. */
328 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
331 /* Known data dependence. */
332 if (DDR_NUM_DIST_VECTS (ddr) == 0)
334 /* If user asserted safelen consecutive iterations can be
335 executed concurrently, assume independence. */
336 if (loop->safelen >= 2)
338 if (loop->safelen < *max_vf)
339 *max_vf = loop->safelen;
340 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
344 if (STMT_VINFO_GATHER_P (stmtinfo_a)
345 || STMT_VINFO_GATHER_P (stmtinfo_b))
347 if (dump_enabled_p ())
349 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
350 "versioning for alias not supported for: "
351 "bad dist vector for ");
352 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
354 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
355 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
357 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
362 if (dump_enabled_p ())
364 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
365 "versioning for alias required: "
366 "bad dist vector for ");
367 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
368 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
369 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
370 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
372 /* Add to list of ddrs that need to be tested at run-time. */
373 return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
376 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
377 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
379 int dist = dist_v[loop_depth];
381 if (dump_enabled_p ())
382 dump_printf_loc (MSG_NOTE, vect_location,
383 "dependence distance = %d.\n", dist);
387 if (dump_enabled_p ())
389 dump_printf_loc (MSG_NOTE, vect_location,
390 "dependence distance == 0 between ");
391 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
392 dump_printf (MSG_NOTE, " and ");
393 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
394 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
397 /* When we perform grouped accesses and perform implicit CSE
398 by detecting equal accesses and doing disambiguation with
399 runtime alias tests like for
407 where we will end up loading { a[i], a[i+1] } once, make
408 sure that inserting group loads before the first load and
409 stores after the last store will do the right thing.
410 Similar for groups like
414 where loads from the group interleave with the store. */
415 if (STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
416 || STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
419 earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
421 (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
423 if (dump_enabled_p ())
424 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
425 "READ_WRITE dependence in interleaving."
434 if (dist > 0 && DDR_REVERSED_P (ddr))
436 /* If DDR_REVERSED_P the order of the data-refs in DDR was
437 reversed (to make distance vector positive), and the actual
438 distance is negative. */
439 if (dump_enabled_p ())
440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
441 "dependence distance negative.\n");
442 /* Record a negative dependence distance to later limit the
443 amount of stmt copying / unrolling we can perform.
444 Only need to handle read-after-write dependence. */
446 && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
447 || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
448 STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
453 && abs (dist) < *max_vf)
455 /* The dependence distance requires reduction of the maximal
456 vectorization factor. */
457 *max_vf = abs (dist);
458 if (dump_enabled_p ())
459 dump_printf_loc (MSG_NOTE, vect_location,
460 "adjusting maximal vectorization factor to %i\n",
464 if (abs (dist) >= *max_vf)
466 /* Dependence distance does not create dependence, as far as
467 vectorization is concerned, in this case. */
468 if (dump_enabled_p ())
469 dump_printf_loc (MSG_NOTE, vect_location,
470 "dependence distance >= VF.\n");
474 if (dump_enabled_p ())
476 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
477 "not vectorized, possible dependence "
478 "between data-refs ");
479 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
480 dump_printf (MSG_NOTE, " and ");
481 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
482 dump_printf (MSG_NOTE, "\n");
491 /* Function vect_analyze_data_ref_dependences.
493 Examine all the data references in the loop, and make sure there do not
494 exist any data dependences between them. Set *MAX_VF according to
495 the maximum vectorization factor the data dependences allow. */
498 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
501 struct data_dependence_relation *ddr;
503 if (dump_enabled_p ())
504 dump_printf_loc (MSG_NOTE, vect_location,
505 "=== vect_analyze_data_ref_dependences ===\n");
507 LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
508 if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
509 &LOOP_VINFO_DDRS (loop_vinfo),
510 LOOP_VINFO_LOOP_NEST (loop_vinfo), true))
513 FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
514 if (vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf))
521 /* Function vect_slp_analyze_data_ref_dependence.
523 Return TRUE if there (might) exist a dependence between a memory-reference
524 DRA and a memory-reference DRB. When versioning for alias may check a
525 dependence at run-time, return FALSE. Adjust *MAX_VF according to
526 the data dependence. */
529 vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
531 struct data_reference *dra = DDR_A (ddr);
532 struct data_reference *drb = DDR_B (ddr);
534 /* We need to check dependences of statements marked as unvectorizable
535 as well, they still can prohibit vectorization. */
537 /* Independent data accesses. */
538 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
544 /* Read-read is OK. */
545 if (DR_IS_READ (dra) && DR_IS_READ (drb))
548 /* If dra and drb are part of the same interleaving chain consider
550 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
551 && (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
552 == GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
555 /* Unknown data dependence. */
556 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
558 if (dump_enabled_p ())
560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
561 "can't determine dependence between ");
562 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
563 dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
564 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
565 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
568 else if (dump_enabled_p ())
570 dump_printf_loc (MSG_NOTE, vect_location,
571 "determined dependence between ");
572 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
573 dump_printf (MSG_NOTE, " and ");
574 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
575 dump_printf (MSG_NOTE, "\n");
578 /* We do not vectorize basic blocks with write-write dependencies. */
579 if (DR_IS_WRITE (dra) && DR_IS_WRITE (drb))
582 /* If we have a read-write dependence check that the load is before the store.
583 When we vectorize basic blocks, vector load can be only before
584 corresponding scalar load, and vector store can be only after its
585 corresponding scalar store. So the order of the acceses is preserved in
586 case the load is before the store. */
587 gimple earlier_stmt = get_earlier_stmt (DR_STMT (dra), DR_STMT (drb));
588 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (earlier_stmt))))
590 /* That only holds for load-store pairs taking part in vectorization. */
591 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dra)))
592 && STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (drb))))
600 /* Function vect_analyze_data_ref_dependences.
602 Examine all the data references in the basic-block, and make sure there
603 do not exist any data dependences between them. Set *MAX_VF according to
604 the maximum vectorization factor the data dependences allow. */
607 vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
609 struct data_dependence_relation *ddr;
612 if (dump_enabled_p ())
613 dump_printf_loc (MSG_NOTE, vect_location,
614 "=== vect_slp_analyze_data_ref_dependences ===\n");
616 if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
617 &BB_VINFO_DDRS (bb_vinfo),
621 FOR_EACH_VEC_ELT (BB_VINFO_DDRS (bb_vinfo), i, ddr)
622 if (vect_slp_analyze_data_ref_dependence (ddr))
629 /* Function vect_compute_data_ref_alignment
631 Compute the misalignment of the data reference DR.
634 1. If during the misalignment computation it is found that the data reference
635 cannot be vectorized then false is returned.
636 2. DR_MISALIGNMENT (DR) is defined.
638 FOR NOW: No analysis is actually performed. Misalignment is calculated
639 only for trivial cases. TODO. */
642 vect_compute_data_ref_alignment (struct data_reference *dr)
644 gimple stmt = DR_STMT (dr);
645 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
646 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
647 struct loop *loop = NULL;
648 tree ref = DR_REF (dr);
650 tree base, base_addr;
653 unsigned HOST_WIDE_INT alignment;
655 if (dump_enabled_p ())
656 dump_printf_loc (MSG_NOTE, vect_location,
657 "vect_compute_data_ref_alignment:\n");
660 loop = LOOP_VINFO_LOOP (loop_vinfo);
662 /* Initialize misalignment to unknown. */
663 SET_DR_MISALIGNMENT (dr, -1);
665 /* Strided loads perform only component accesses, misalignment information
666 is irrelevant for them. */
667 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
670 misalign = DR_INIT (dr);
671 aligned_to = DR_ALIGNED_TO (dr);
672 base_addr = DR_BASE_ADDRESS (dr);
673 vectype = STMT_VINFO_VECTYPE (stmt_info);
675 /* In case the dataref is in an inner-loop of the loop that is being
676 vectorized (LOOP), we use the base and misalignment information
677 relative to the outer-loop (LOOP). This is ok only if the misalignment
678 stays the same throughout the execution of the inner-loop, which is why
679 we have to check that the stride of the dataref in the inner-loop evenly
680 divides by the vector size. */
681 if (loop && nested_in_vect_loop_p (loop, stmt))
683 tree step = DR_STEP (dr);
684 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
686 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0)
688 if (dump_enabled_p ())
689 dump_printf_loc (MSG_NOTE, vect_location,
690 "inner step divides the vector-size.\n");
691 misalign = STMT_VINFO_DR_INIT (stmt_info);
692 aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
693 base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
699 "inner step doesn't divide the vector-size.\n");
700 misalign = NULL_TREE;
704 /* Similarly, if we're doing basic-block vectorization, we can only use
705 base and misalignment information relative to an innermost loop if the
706 misalignment stays the same throughout the execution of the loop.
707 As above, this is the case if the stride of the dataref evenly divides
708 by the vector size. */
711 tree step = DR_STEP (dr);
712 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
714 if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
716 if (dump_enabled_p ())
717 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
718 "SLP: step doesn't divide the vector-size.\n");
719 misalign = NULL_TREE;
723 /* To look at alignment of the base we have to preserve an inner MEM_REF
724 as that carries alignment information of the actual access. */
726 while (handled_component_p (base))
727 base = TREE_OPERAND (base, 0);
728 if (TREE_CODE (base) == MEM_REF)
729 base = build2 (MEM_REF, TREE_TYPE (base), base_addr,
730 build_int_cst (TREE_TYPE (TREE_OPERAND (base, 1)), 0));
731 unsigned int base_alignment = get_object_alignment (base);
733 if (base_alignment >= TYPE_ALIGN (TREE_TYPE (vectype)))
734 DR_VECT_AUX (dr)->base_element_aligned = true;
736 alignment = TYPE_ALIGN_UNIT (vectype);
738 if ((compare_tree_int (aligned_to, alignment) < 0)
741 if (dump_enabled_p ())
743 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
744 "Unknown alignment for access: ");
745 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
746 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
751 if (base_alignment < TYPE_ALIGN (vectype))
753 /* Strip an inner MEM_REF to a bare decl if possible. */
754 if (TREE_CODE (base) == MEM_REF
755 && integer_zerop (TREE_OPERAND (base, 1))
756 && TREE_CODE (TREE_OPERAND (base, 0)) == ADDR_EXPR)
757 base = TREE_OPERAND (TREE_OPERAND (base, 0), 0);
759 if (!vect_can_force_dr_alignment_p (base, TYPE_ALIGN (vectype)))
761 if (dump_enabled_p ())
763 dump_printf_loc (MSG_NOTE, vect_location,
764 "can't force alignment of ref: ");
765 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
766 dump_printf (MSG_NOTE, "\n");
771 /* Force the alignment of the decl.
772 NOTE: This is the only change to the code we make during
773 the analysis phase, before deciding to vectorize the loop. */
774 if (dump_enabled_p ())
776 dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
777 dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
778 dump_printf (MSG_NOTE, "\n");
781 DR_VECT_AUX (dr)->base_decl = base;
782 DR_VECT_AUX (dr)->base_misaligned = true;
783 DR_VECT_AUX (dr)->base_element_aligned = true;
786 /* If this is a backward running DR then first access in the larger
787 vectype actually is N-1 elements before the address in the DR.
788 Adjust misalign accordingly. */
789 if (tree_int_cst_sgn (DR_STEP (dr)) < 0)
791 tree offset = ssize_int (TYPE_VECTOR_SUBPARTS (vectype) - 1);
792 /* DR_STEP(dr) is the same as -TYPE_SIZE of the scalar type,
793 otherwise we wouldn't be here. */
794 offset = fold_build2 (MULT_EXPR, ssizetype, offset, DR_STEP (dr));
795 /* PLUS because DR_STEP was negative. */
796 misalign = size_binop (PLUS_EXPR, misalign, offset);
799 SET_DR_MISALIGNMENT (dr,
800 wi::mod_floor (misalign, alignment, SIGNED).to_uhwi ());
802 if (dump_enabled_p ())
804 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
805 "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
806 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
807 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
814 /* Function vect_compute_data_refs_alignment
816 Compute the misalignment of data references in the loop.
817 Return FALSE if a data reference is found that cannot be vectorized. */
820 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo,
821 bb_vec_info bb_vinfo)
823 vec<data_reference_p> datarefs;
824 struct data_reference *dr;
828 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
830 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
832 FOR_EACH_VEC_ELT (datarefs, i, dr)
833 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
834 && !vect_compute_data_ref_alignment (dr))
838 /* Mark unsupported statement as unvectorizable. */
839 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
850 /* Function vect_update_misalignment_for_peel
852 DR - the data reference whose misalignment is to be adjusted.
853 DR_PEEL - the data reference whose misalignment is being made
854 zero in the vector loop by the peel.
855 NPEEL - the number of iterations in the peel loop if the misalignment
856 of DR_PEEL is known at compile time. */
859 vect_update_misalignment_for_peel (struct data_reference *dr,
860 struct data_reference *dr_peel, int npeel)
863 vec<dr_p> same_align_drs;
864 struct data_reference *current_dr;
865 int dr_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr))));
866 int dr_peel_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel))));
867 stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
868 stmt_vec_info peel_stmt_info = vinfo_for_stmt (DR_STMT (dr_peel));
870 /* For interleaved data accesses the step in the loop must be multiplied by
871 the size of the interleaving group. */
872 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
873 dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)));
874 if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
875 dr_peel_size *= GROUP_SIZE (peel_stmt_info);
877 /* It can be assumed that the data refs with the same alignment as dr_peel
878 are aligned in the vector loop. */
880 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel)));
881 FOR_EACH_VEC_ELT (same_align_drs, i, current_dr)
883 if (current_dr != dr)
885 gcc_assert (DR_MISALIGNMENT (dr) / dr_size ==
886 DR_MISALIGNMENT (dr_peel) / dr_peel_size);
887 SET_DR_MISALIGNMENT (dr, 0);
891 if (known_alignment_for_access_p (dr)
892 && known_alignment_for_access_p (dr_peel))
894 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
895 int misal = DR_MISALIGNMENT (dr);
896 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
897 misal += negative ? -npeel * dr_size : npeel * dr_size;
898 misal &= (TYPE_ALIGN (vectype) / BITS_PER_UNIT) - 1;
899 SET_DR_MISALIGNMENT (dr, misal);
903 if (dump_enabled_p ())
904 dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
905 SET_DR_MISALIGNMENT (dr, -1);
909 /* Function vect_verify_datarefs_alignment
911 Return TRUE if all data references in the loop can be
912 handled with respect to alignment. */
915 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
917 vec<data_reference_p> datarefs;
918 struct data_reference *dr;
919 enum dr_alignment_support supportable_dr_alignment;
923 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
925 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
927 FOR_EACH_VEC_ELT (datarefs, i, dr)
929 gimple stmt = DR_STMT (dr);
930 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
932 if (!STMT_VINFO_RELEVANT_P (stmt_info))
935 /* For interleaving, only the alignment of the first access matters.
936 Skip statements marked as not vectorizable. */
937 if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
938 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
939 || !STMT_VINFO_VECTORIZABLE (stmt_info))
942 /* Strided loads perform only component accesses, alignment is
943 irrelevant for them. */
944 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
947 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
948 if (!supportable_dr_alignment)
950 if (dump_enabled_p ())
953 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
954 "not vectorized: unsupported unaligned load.");
956 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
957 "not vectorized: unsupported unaligned "
960 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
962 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
966 if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
967 dump_printf_loc (MSG_NOTE, vect_location,
968 "Vectorizing an unaligned access.\n");
973 /* Given an memory reference EXP return whether its alignment is less
977 not_size_aligned (tree exp)
979 if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
982 return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
983 > get_object_alignment (exp));
986 /* Function vector_alignment_reachable_p
988 Return true if vector alignment for DR is reachable by peeling
989 a few loop iterations. Return false otherwise. */
992 vector_alignment_reachable_p (struct data_reference *dr)
994 gimple stmt = DR_STMT (dr);
995 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
996 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
998 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1000 /* For interleaved access we peel only if number of iterations in
1001 the prolog loop ({VF - misalignment}), is a multiple of the
1002 number of the interleaved accesses. */
1003 int elem_size, mis_in_elements;
1004 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1006 /* FORNOW: handle only known alignment. */
1007 if (!known_alignment_for_access_p (dr))
1010 elem_size = GET_MODE_SIZE (TYPE_MODE (vectype)) / nelements;
1011 mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
1013 if ((nelements - mis_in_elements) % GROUP_SIZE (stmt_info))
1017 /* If misalignment is known at the compile time then allow peeling
1018 only if natural alignment is reachable through peeling. */
1019 if (known_alignment_for_access_p (dr) && !aligned_access_p (dr))
1021 HOST_WIDE_INT elmsize =
1022 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1023 if (dump_enabled_p ())
1025 dump_printf_loc (MSG_NOTE, vect_location,
1026 "data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
1027 dump_printf (MSG_NOTE,
1028 ". misalignment = %d.\n", DR_MISALIGNMENT (dr));
1030 if (DR_MISALIGNMENT (dr) % elmsize)
1032 if (dump_enabled_p ())
1033 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1034 "data size does not divide the misalignment.\n");
1039 if (!known_alignment_for_access_p (dr))
1041 tree type = TREE_TYPE (DR_REF (dr));
1042 bool is_packed = not_size_aligned (DR_REF (dr));
1043 if (dump_enabled_p ())
1044 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1045 "Unknown misalignment, is_packed = %d\n",is_packed);
1046 if ((TYPE_USER_ALIGN (type) && !is_packed)
1047 || targetm.vectorize.vector_alignment_reachable (type, is_packed))
1057 /* Calculate the cost of the memory access represented by DR. */
1060 vect_get_data_access_cost (struct data_reference *dr,
1061 unsigned int *inside_cost,
1062 unsigned int *outside_cost,
1063 stmt_vector_for_cost *body_cost_vec)
1065 gimple stmt = DR_STMT (dr);
1066 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1067 int nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1068 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1069 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1070 int ncopies = vf / nunits;
1072 if (DR_IS_READ (dr))
1073 vect_get_load_cost (dr, ncopies, true, inside_cost, outside_cost,
1074 NULL, body_cost_vec, false);
1076 vect_get_store_cost (dr, ncopies, inside_cost, body_cost_vec);
1078 if (dump_enabled_p ())
1079 dump_printf_loc (MSG_NOTE, vect_location,
1080 "vect_get_data_access_cost: inside_cost = %d, "
1081 "outside_cost = %d.\n", *inside_cost, *outside_cost);
1085 /* Insert DR into peeling hash table with NPEEL as key. */
1088 vect_peeling_hash_insert (loop_vec_info loop_vinfo, struct data_reference *dr,
1091 struct _vect_peel_info elem, *slot;
1092 _vect_peel_info **new_slot;
1093 bool supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1096 slot = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find (&elem);
1101 slot = XNEW (struct _vect_peel_info);
1102 slot->npeel = npeel;
1106 = LOOP_VINFO_PEELING_HTAB (loop_vinfo)->find_slot (slot, INSERT);
1110 if (!supportable_dr_alignment
1111 && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1112 slot->count += VECT_MAX_COST;
1116 /* Traverse peeling hash table to find peeling option that aligns maximum
1117 number of data accesses. */
1120 vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1121 _vect_peel_extended_info *max)
1123 vect_peel_info elem = *slot;
1125 if (elem->count > max->peel_info.count
1126 || (elem->count == max->peel_info.count
1127 && max->peel_info.npeel > elem->npeel))
1129 max->peel_info.npeel = elem->npeel;
1130 max->peel_info.count = elem->count;
1131 max->peel_info.dr = elem->dr;
1138 /* Traverse peeling hash table and calculate cost for each peeling option.
1139 Find the one with the lowest cost. */
1142 vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
1143 _vect_peel_extended_info *min)
1145 vect_peel_info elem = *slot;
1146 int save_misalignment, dummy;
1147 unsigned int inside_cost = 0, outside_cost = 0, i;
1148 gimple stmt = DR_STMT (elem->dr);
1149 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1150 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
1151 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1152 struct data_reference *dr;
1153 stmt_vector_for_cost prologue_cost_vec, body_cost_vec, epilogue_cost_vec;
1155 prologue_cost_vec.create (2);
1156 body_cost_vec.create (2);
1157 epilogue_cost_vec.create (2);
1159 FOR_EACH_VEC_ELT (datarefs, i, dr)
1161 stmt = DR_STMT (dr);
1162 stmt_info = vinfo_for_stmt (stmt);
1163 /* For interleaving, only the alignment of the first access
1165 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1166 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1169 save_misalignment = DR_MISALIGNMENT (dr);
1170 vect_update_misalignment_for_peel (dr, elem->dr, elem->npeel);
1171 vect_get_data_access_cost (dr, &inside_cost, &outside_cost,
1173 SET_DR_MISALIGNMENT (dr, save_misalignment);
1176 auto_vec<stmt_info_for_cost> scalar_cost_vec;
1177 vect_get_single_scalar_iteration_cost (loop_vinfo, &scalar_cost_vec);
1178 outside_cost += vect_get_known_peeling_cost
1179 (loop_vinfo, elem->npeel, &dummy,
1180 &scalar_cost_vec, &prologue_cost_vec, &epilogue_cost_vec);
1182 /* Prologue and epilogue costs are added to the target model later.
1183 These costs depend only on the scalar iteration cost, the
1184 number of peeling iterations finally chosen, and the number of
1185 misaligned statements. So discard the information found here. */
1186 prologue_cost_vec.release ();
1187 epilogue_cost_vec.release ();
1189 if (inside_cost < min->inside_cost
1190 || (inside_cost == min->inside_cost && outside_cost < min->outside_cost))
1192 min->inside_cost = inside_cost;
1193 min->outside_cost = outside_cost;
1194 min->body_cost_vec.release ();
1195 min->body_cost_vec = body_cost_vec;
1196 min->peel_info.dr = elem->dr;
1197 min->peel_info.npeel = elem->npeel;
1200 body_cost_vec.release ();
1206 /* Choose best peeling option by traversing peeling hash table and either
1207 choosing an option with the lowest cost (if cost model is enabled) or the
1208 option that aligns as many accesses as possible. */
1210 static struct data_reference *
1211 vect_peeling_hash_choose_best_peeling (loop_vec_info loop_vinfo,
1212 unsigned int *npeel,
1213 stmt_vector_for_cost *body_cost_vec)
1215 struct _vect_peel_extended_info res;
1217 res.peel_info.dr = NULL;
1218 res.body_cost_vec = stmt_vector_for_cost ();
1220 if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1222 res.inside_cost = INT_MAX;
1223 res.outside_cost = INT_MAX;
1224 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1225 ->traverse <_vect_peel_extended_info *,
1226 vect_peeling_hash_get_lowest_cost> (&res);
1230 res.peel_info.count = 0;
1231 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1232 ->traverse <_vect_peel_extended_info *,
1233 vect_peeling_hash_get_most_frequent> (&res);
1236 *npeel = res.peel_info.npeel;
1237 *body_cost_vec = res.body_cost_vec;
1238 return res.peel_info.dr;
1242 /* Function vect_enhance_data_refs_alignment
1244 This pass will use loop versioning and loop peeling in order to enhance
1245 the alignment of data references in the loop.
1247 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1248 original loop is to be vectorized. Any other loops that are created by
1249 the transformations performed in this pass - are not supposed to be
1250 vectorized. This restriction will be relaxed.
1252 This pass will require a cost model to guide it whether to apply peeling
1253 or versioning or a combination of the two. For example, the scheme that
1254 intel uses when given a loop with several memory accesses, is as follows:
1255 choose one memory access ('p') which alignment you want to force by doing
1256 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1257 other accesses are not necessarily aligned, or (2) use loop versioning to
1258 generate one loop in which all accesses are aligned, and another loop in
1259 which only 'p' is necessarily aligned.
1261 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1262 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1263 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1265 Devising a cost model is the most critical aspect of this work. It will
1266 guide us on which access to peel for, whether to use loop versioning, how
1267 many versions to create, etc. The cost model will probably consist of
1268 generic considerations as well as target specific considerations (on
1269 powerpc for example, misaligned stores are more painful than misaligned
1272 Here are the general steps involved in alignment enhancements:
1274 -- original loop, before alignment analysis:
1275 for (i=0; i<N; i++){
1276 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1277 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1280 -- After vect_compute_data_refs_alignment:
1281 for (i=0; i<N; i++){
1282 x = q[i]; # DR_MISALIGNMENT(q) = 3
1283 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1286 -- Possibility 1: we do loop versioning:
1288 for (i=0; i<N; i++){ # loop 1A
1289 x = q[i]; # DR_MISALIGNMENT(q) = 3
1290 p[i] = y; # DR_MISALIGNMENT(p) = 0
1294 for (i=0; i<N; i++){ # loop 1B
1295 x = q[i]; # DR_MISALIGNMENT(q) = 3
1296 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1300 -- Possibility 2: we do loop peeling:
1301 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1305 for (i = 3; i < N; i++){ # loop 2A
1306 x = q[i]; # DR_MISALIGNMENT(q) = 0
1307 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1310 -- Possibility 3: combination of loop peeling and versioning:
1311 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1316 for (i = 3; i<N; i++){ # loop 3A
1317 x = q[i]; # DR_MISALIGNMENT(q) = 0
1318 p[i] = y; # DR_MISALIGNMENT(p) = 0
1322 for (i = 3; i<N; i++){ # loop 3B
1323 x = q[i]; # DR_MISALIGNMENT(q) = 0
1324 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1328 These loops are later passed to loop_transform to be vectorized. The
1329 vectorizer will use the alignment information to guide the transformation
1330 (whether to generate regular loads/stores, or with special handling for
1334 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
1336 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1337 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1338 enum dr_alignment_support supportable_dr_alignment;
1339 struct data_reference *dr0 = NULL, *first_store = NULL;
1340 struct data_reference *dr;
1342 bool do_peeling = false;
1343 bool do_versioning = false;
1346 stmt_vec_info stmt_info;
1347 unsigned int npeel = 0;
1348 bool all_misalignments_unknown = true;
1349 unsigned int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1350 unsigned possible_npeel_number = 1;
1352 unsigned int nelements, mis, same_align_drs_max = 0;
1353 stmt_vector_for_cost body_cost_vec = stmt_vector_for_cost ();
1355 if (dump_enabled_p ())
1356 dump_printf_loc (MSG_NOTE, vect_location,
1357 "=== vect_enhance_data_refs_alignment ===\n");
1359 /* While cost model enhancements are expected in the future, the high level
1360 view of the code at this time is as follows:
1362 A) If there is a misaligned access then see if peeling to align
1363 this access can make all data references satisfy
1364 vect_supportable_dr_alignment. If so, update data structures
1365 as needed and return true.
1367 B) If peeling wasn't possible and there is a data reference with an
1368 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1369 then see if loop versioning checks can be used to make all data
1370 references satisfy vect_supportable_dr_alignment. If so, update
1371 data structures as needed and return true.
1373 C) If neither peeling nor versioning were successful then return false if
1374 any data reference does not satisfy vect_supportable_dr_alignment.
1376 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1378 Note, Possibility 3 above (which is peeling and versioning together) is not
1379 being done at this time. */
1381 /* (1) Peeling to force alignment. */
1383 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1385 + How many accesses will become aligned due to the peeling
1386 - How many accesses will become unaligned due to the peeling,
1387 and the cost of misaligned accesses.
1388 - The cost of peeling (the extra runtime checks, the increase
1391 FOR_EACH_VEC_ELT (datarefs, i, dr)
1393 stmt = DR_STMT (dr);
1394 stmt_info = vinfo_for_stmt (stmt);
1396 if (!STMT_VINFO_RELEVANT_P (stmt_info))
1399 /* For interleaving, only the alignment of the first access
1401 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1402 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1405 /* For invariant accesses there is nothing to enhance. */
1406 if (integer_zerop (DR_STEP (dr)))
1409 /* Strided loads perform only component accesses, alignment is
1410 irrelevant for them. */
1411 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1414 supportable_dr_alignment = vect_supportable_dr_alignment (dr, true);
1415 do_peeling = vector_alignment_reachable_p (dr);
1418 if (known_alignment_for_access_p (dr))
1420 unsigned int npeel_tmp;
1421 bool negative = tree_int_cst_compare (DR_STEP (dr),
1422 size_zero_node) < 0;
1424 /* Save info about DR in the hash table. */
1425 if (!LOOP_VINFO_PEELING_HTAB (loop_vinfo))
1426 LOOP_VINFO_PEELING_HTAB (loop_vinfo)
1427 = new hash_table<peel_info_hasher> (1);
1429 vectype = STMT_VINFO_VECTYPE (stmt_info);
1430 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1431 mis = DR_MISALIGNMENT (dr) / GET_MODE_SIZE (TYPE_MODE (
1432 TREE_TYPE (DR_REF (dr))));
1433 npeel_tmp = (negative
1434 ? (mis - nelements) : (nelements - mis))
1437 /* For multiple types, it is possible that the bigger type access
1438 will have more than one peeling option. E.g., a loop with two
1439 types: one of size (vector size / 4), and the other one of
1440 size (vector size / 8). Vectorization factor will 8. If both
1441 access are misaligned by 3, the first one needs one scalar
1442 iteration to be aligned, and the second one needs 5. But the
1443 the first one will be aligned also by peeling 5 scalar
1444 iterations, and in that case both accesses will be aligned.
1445 Hence, except for the immediate peeling amount, we also want
1446 to try to add full vector size, while we don't exceed
1447 vectorization factor.
1448 We do this automtically for cost model, since we calculate cost
1449 for every peeling option. */
1450 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1451 possible_npeel_number = vf /nelements;
1453 /* Handle the aligned case. We may decide to align some other
1454 access, making DR unaligned. */
1455 if (DR_MISALIGNMENT (dr) == 0)
1458 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1459 possible_npeel_number++;
1462 for (j = 0; j < possible_npeel_number; j++)
1464 gcc_assert (npeel_tmp <= vf);
1465 vect_peeling_hash_insert (loop_vinfo, dr, npeel_tmp);
1466 npeel_tmp += nelements;
1469 all_misalignments_unknown = false;
1470 /* Data-ref that was chosen for the case that all the
1471 misalignments are unknown is not relevant anymore, since we
1472 have a data-ref with known alignment. */
1477 /* If we don't know any misalignment values, we prefer
1478 peeling for data-ref that has the maximum number of data-refs
1479 with the same alignment, unless the target prefers to align
1480 stores over load. */
1481 if (all_misalignments_unknown)
1483 unsigned same_align_drs
1484 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
1486 || same_align_drs_max < same_align_drs)
1488 same_align_drs_max = same_align_drs;
1491 /* For data-refs with the same number of related
1492 accesses prefer the one where the misalign
1493 computation will be invariant in the outermost loop. */
1494 else if (same_align_drs_max == same_align_drs)
1496 struct loop *ivloop0, *ivloop;
1497 ivloop0 = outermost_invariant_loop_for_expr
1498 (loop, DR_BASE_ADDRESS (dr0));
1499 ivloop = outermost_invariant_loop_for_expr
1500 (loop, DR_BASE_ADDRESS (dr));
1501 if ((ivloop && !ivloop0)
1502 || (ivloop && ivloop0
1503 && flow_loop_nested_p (ivloop, ivloop0)))
1507 if (!first_store && DR_IS_WRITE (dr))
1511 /* If there are both known and unknown misaligned accesses in the
1512 loop, we choose peeling amount according to the known
1514 if (!supportable_dr_alignment)
1517 if (!first_store && DR_IS_WRITE (dr))
1524 if (!aligned_access_p (dr))
1526 if (dump_enabled_p ())
1527 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1528 "vector alignment may not be reachable\n");
1534 /* Check if we can possibly peel the loop. */
1535 if (!vect_can_advance_ivs_p (loop_vinfo)
1536 || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
1539 /* If we don't know how many times the peeling loop will run
1540 assume it will run VF-1 times and disable peeling if the remaining
1541 iters are less than the vectorization factor. */
1543 && all_misalignments_unknown
1544 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1545 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1546 < 2 * (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1))
1550 && all_misalignments_unknown
1551 && vect_supportable_dr_alignment (dr0, false))
1553 /* Check if the target requires to prefer stores over loads, i.e., if
1554 misaligned stores are more expensive than misaligned loads (taking
1555 drs with same alignment into account). */
1556 if (first_store && DR_IS_READ (dr0))
1558 unsigned int load_inside_cost = 0, load_outside_cost = 0;
1559 unsigned int store_inside_cost = 0, store_outside_cost = 0;
1560 unsigned int load_inside_penalty = 0, load_outside_penalty = 0;
1561 unsigned int store_inside_penalty = 0, store_outside_penalty = 0;
1562 stmt_vector_for_cost dummy;
1565 vect_get_data_access_cost (dr0, &load_inside_cost, &load_outside_cost,
1567 vect_get_data_access_cost (first_store, &store_inside_cost,
1568 &store_outside_cost, &dummy);
1572 /* Calculate the penalty for leaving FIRST_STORE unaligned (by
1573 aligning the load DR0). */
1574 load_inside_penalty = store_inside_cost;
1575 load_outside_penalty = store_outside_cost;
1577 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1578 DR_STMT (first_store))).iterate (i, &dr);
1580 if (DR_IS_READ (dr))
1582 load_inside_penalty += load_inside_cost;
1583 load_outside_penalty += load_outside_cost;
1587 load_inside_penalty += store_inside_cost;
1588 load_outside_penalty += store_outside_cost;
1591 /* Calculate the penalty for leaving DR0 unaligned (by
1592 aligning the FIRST_STORE). */
1593 store_inside_penalty = load_inside_cost;
1594 store_outside_penalty = load_outside_cost;
1596 STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (
1597 DR_STMT (dr0))).iterate (i, &dr);
1599 if (DR_IS_READ (dr))
1601 store_inside_penalty += load_inside_cost;
1602 store_outside_penalty += load_outside_cost;
1606 store_inside_penalty += store_inside_cost;
1607 store_outside_penalty += store_outside_cost;
1610 if (load_inside_penalty > store_inside_penalty
1611 || (load_inside_penalty == store_inside_penalty
1612 && load_outside_penalty > store_outside_penalty))
1616 /* In case there are only loads with different unknown misalignments, use
1617 peeling only if it may help to align other accesses in the loop. */
1619 && !STMT_VINFO_SAME_ALIGN_REFS (
1620 vinfo_for_stmt (DR_STMT (dr0))).length ()
1621 && vect_supportable_dr_alignment (dr0, false)
1622 != dr_unaligned_supported)
1626 if (do_peeling && !dr0)
1628 /* Peeling is possible, but there is no data access that is not supported
1629 unless aligned. So we try to choose the best possible peeling. */
1631 /* We should get here only if there are drs with known misalignment. */
1632 gcc_assert (!all_misalignments_unknown);
1634 /* Choose the best peeling from the hash table. */
1635 dr0 = vect_peeling_hash_choose_best_peeling (loop_vinfo, &npeel,
1640 /* If peeling by npeel will result in a remaining loop not iterating
1641 enough to be vectorized then do not peel. */
1643 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1644 && (LOOP_VINFO_INT_NITERS (loop_vinfo)
1645 < LOOP_VINFO_VECT_FACTOR (loop_vinfo) + npeel))
1651 stmt = DR_STMT (dr0);
1652 stmt_info = vinfo_for_stmt (stmt);
1653 vectype = STMT_VINFO_VECTYPE (stmt_info);
1654 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1656 if (known_alignment_for_access_p (dr0))
1658 bool negative = tree_int_cst_compare (DR_STEP (dr0),
1659 size_zero_node) < 0;
1662 /* Since it's known at compile time, compute the number of
1663 iterations in the peeled loop (the peeling factor) for use in
1664 updating DR_MISALIGNMENT values. The peeling factor is the
1665 vectorization factor minus the misalignment as an element
1667 mis = DR_MISALIGNMENT (dr0);
1668 mis /= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0))));
1669 npeel = ((negative ? mis - nelements : nelements - mis)
1673 /* For interleaved data access every iteration accesses all the
1674 members of the group, therefore we divide the number of iterations
1675 by the group size. */
1676 stmt_info = vinfo_for_stmt (DR_STMT (dr0));
1677 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1678 npeel /= GROUP_SIZE (stmt_info);
1680 if (dump_enabled_p ())
1681 dump_printf_loc (MSG_NOTE, vect_location,
1682 "Try peeling by %d\n", npeel);
1685 /* Ensure that all data refs can be vectorized after the peel. */
1686 FOR_EACH_VEC_ELT (datarefs, i, dr)
1688 int save_misalignment;
1693 stmt = DR_STMT (dr);
1694 stmt_info = vinfo_for_stmt (stmt);
1695 /* For interleaving, only the alignment of the first access
1697 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1698 && GROUP_FIRST_ELEMENT (stmt_info) != stmt)
1701 /* Strided loads perform only component accesses, alignment is
1702 irrelevant for them. */
1703 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1706 save_misalignment = DR_MISALIGNMENT (dr);
1707 vect_update_misalignment_for_peel (dr, dr0, npeel);
1708 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1709 SET_DR_MISALIGNMENT (dr, save_misalignment);
1711 if (!supportable_dr_alignment)
1718 if (do_peeling && known_alignment_for_access_p (dr0) && npeel == 0)
1720 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1725 body_cost_vec.release ();
1732 unsigned max_allowed_peel
1733 = PARAM_VALUE (PARAM_VECT_MAX_PEELING_FOR_ALIGNMENT);
1734 if (max_allowed_peel != (unsigned)-1)
1736 unsigned max_peel = npeel;
1739 gimple dr_stmt = DR_STMT (dr0);
1740 stmt_vec_info vinfo = vinfo_for_stmt (dr_stmt);
1741 tree vtype = STMT_VINFO_VECTYPE (vinfo);
1742 max_peel = TYPE_VECTOR_SUBPARTS (vtype) - 1;
1744 if (max_peel > max_allowed_peel)
1747 if (dump_enabled_p ())
1748 dump_printf_loc (MSG_NOTE, vect_location,
1749 "Disable peeling, max peels reached: %d\n", max_peel);
1756 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1757 If the misalignment of DR_i is identical to that of dr0 then set
1758 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1759 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1760 by the peeling factor times the element size of DR_i (MOD the
1761 vectorization factor times the size). Otherwise, the
1762 misalignment of DR_i must be set to unknown. */
1763 FOR_EACH_VEC_ELT (datarefs, i, dr)
1765 vect_update_misalignment_for_peel (dr, dr0, npeel);
1767 LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0;
1769 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
1771 LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
1772 = DR_MISALIGNMENT (dr0);
1773 SET_DR_MISALIGNMENT (dr0, 0);
1774 if (dump_enabled_p ())
1776 dump_printf_loc (MSG_NOTE, vect_location,
1777 "Alignment of access forced using peeling.\n");
1778 dump_printf_loc (MSG_NOTE, vect_location,
1779 "Peeling for alignment will be applied.\n");
1781 /* The inside-loop cost will be accounted for in vectorizable_load
1782 and vectorizable_store correctly with adjusted alignments.
1783 Drop the body_cst_vec on the floor here. */
1784 body_cost_vec.release ();
1786 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1792 body_cost_vec.release ();
1794 /* (2) Versioning to force alignment. */
1796 /* Try versioning if:
1797 1) optimize loop for speed
1798 2) there is at least one unsupported misaligned data ref with an unknown
1800 3) all misaligned data refs with a known misalignment are supported, and
1801 4) the number of runtime alignment checks is within reason. */
1804 optimize_loop_nest_for_speed_p (loop)
1805 && (!loop->inner); /* FORNOW */
1809 FOR_EACH_VEC_ELT (datarefs, i, dr)
1811 stmt = DR_STMT (dr);
1812 stmt_info = vinfo_for_stmt (stmt);
1814 /* For interleaving, only the alignment of the first access
1816 if (aligned_access_p (dr)
1817 || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1818 && GROUP_FIRST_ELEMENT (stmt_info) != stmt))
1821 /* Strided loads perform only component accesses, alignment is
1822 irrelevant for them. */
1823 if (STMT_VINFO_STRIDE_LOAD_P (stmt_info))
1826 supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
1828 if (!supportable_dr_alignment)
1834 if (known_alignment_for_access_p (dr)
1835 || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
1836 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS))
1838 do_versioning = false;
1842 stmt = DR_STMT (dr);
1843 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1844 gcc_assert (vectype);
1846 /* The rightmost bits of an aligned address must be zeros.
1847 Construct the mask needed for this test. For example,
1848 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1849 mask must be 15 = 0xf. */
1850 mask = GET_MODE_SIZE (TYPE_MODE (vectype)) - 1;
1852 /* FORNOW: use the same mask to test all potentially unaligned
1853 references in the loop. The vectorizer currently supports
1854 a single vector size, see the reference to
1855 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
1856 vectorization factor is computed. */
1857 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo)
1858 || LOOP_VINFO_PTR_MASK (loop_vinfo) == mask);
1859 LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
1860 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (
1865 /* Versioning requires at least one misaligned data reference. */
1866 if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
1867 do_versioning = false;
1868 else if (!do_versioning)
1869 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
1874 vec<gimple> may_misalign_stmts
1875 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
1878 /* It can now be assumed that the data references in the statements
1879 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
1880 of the loop being vectorized. */
1881 FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt)
1883 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1884 dr = STMT_VINFO_DATA_REF (stmt_info);
1885 SET_DR_MISALIGNMENT (dr, 0);
1886 if (dump_enabled_p ())
1887 dump_printf_loc (MSG_NOTE, vect_location,
1888 "Alignment of access forced using versioning.\n");
1891 if (dump_enabled_p ())
1892 dump_printf_loc (MSG_NOTE, vect_location,
1893 "Versioning for alignment will be applied.\n");
1895 /* Peeling and versioning can't be done together at this time. */
1896 gcc_assert (! (do_peeling && do_versioning));
1898 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1903 /* This point is reached if neither peeling nor versioning is being done. */
1904 gcc_assert (! (do_peeling || do_versioning));
1906 stat = vect_verify_datarefs_alignment (loop_vinfo, NULL);
1911 /* Function vect_find_same_alignment_drs.
1913 Update group and alignment relations according to the chosen
1914 vectorization factor. */
1917 vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
1918 loop_vec_info loop_vinfo)
1921 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1922 int vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1923 struct data_reference *dra = DDR_A (ddr);
1924 struct data_reference *drb = DDR_B (ddr);
1925 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
1926 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
1927 int dra_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra))));
1928 int drb_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb))));
1929 lambda_vector dist_v;
1930 unsigned int loop_depth;
1932 if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
1938 if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1941 /* Loop-based vectorization and known data dependence. */
1942 if (DDR_NUM_DIST_VECTS (ddr) == 0)
1945 /* Data-dependence analysis reports a distance vector of zero
1946 for data-references that overlap only in the first iteration
1947 but have different sign step (see PR45764).
1948 So as a sanity check require equal DR_STEP. */
1949 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
1952 loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
1953 FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
1955 int dist = dist_v[loop_depth];
1957 if (dump_enabled_p ())
1958 dump_printf_loc (MSG_NOTE, vect_location,
1959 "dependence distance = %d.\n", dist);
1961 /* Same loop iteration. */
1963 || (dist % vectorization_factor == 0 && dra_size == drb_size))
1965 /* Two references with distance zero have the same alignment. */
1966 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
1967 STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
1968 if (dump_enabled_p ())
1970 dump_printf_loc (MSG_NOTE, vect_location,
1971 "accesses have the same alignment.\n");
1972 dump_printf (MSG_NOTE,
1973 "dependence distance modulo vf == 0 between ");
1974 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
1975 dump_printf (MSG_NOTE, " and ");
1976 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
1977 dump_printf (MSG_NOTE, "\n");
1984 /* Function vect_analyze_data_refs_alignment
1986 Analyze the alignment of the data-references in the loop.
1987 Return FALSE if a data reference is found that cannot be vectorized. */
1990 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
1991 bb_vec_info bb_vinfo)
1993 if (dump_enabled_p ())
1994 dump_printf_loc (MSG_NOTE, vect_location,
1995 "=== vect_analyze_data_refs_alignment ===\n");
1997 /* Mark groups of data references with same alignment using
1998 data dependence information. */
2001 vec<ddr_p> ddrs = LOOP_VINFO_DDRS (loop_vinfo);
2002 struct data_dependence_relation *ddr;
2005 FOR_EACH_VEC_ELT (ddrs, i, ddr)
2006 vect_find_same_alignment_drs (ddr, loop_vinfo);
2009 if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
2011 if (dump_enabled_p ())
2012 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2013 "not vectorized: can't calculate alignment "
2022 /* Analyze groups of accesses: check that DR belongs to a group of
2023 accesses of legal size, step, etc. Detect gaps, single element
2024 interleaving, and other special cases. Set grouped access info.
2025 Collect groups of strided stores for further use in SLP analysis. */
2028 vect_analyze_group_access (struct data_reference *dr)
2030 tree step = DR_STEP (dr);
2031 tree scalar_type = TREE_TYPE (DR_REF (dr));
2032 HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
2033 gimple stmt = DR_STMT (dr);
2034 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2035 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2036 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
2037 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2038 HOST_WIDE_INT groupsize, last_accessed_element = 1;
2039 bool slp_impossible = false;
2040 struct loop *loop = NULL;
2043 loop = LOOP_VINFO_LOOP (loop_vinfo);
2045 /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
2046 size of the interleaving group (including gaps). */
2047 groupsize = absu_hwi (dr_step) / type_size;
2049 /* Not consecutive access is possible only if it is a part of interleaving. */
2050 if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
2052 /* Check if it this DR is a part of interleaving, and is a single
2053 element of the group that is accessed in the loop. */
2055 /* Gaps are supported only for loads. STEP must be a multiple of the type
2056 size. The size of the group must be a power of 2. */
2058 && (dr_step % type_size) == 0
2060 && exact_log2 (groupsize) != -1)
2062 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
2063 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2064 if (dump_enabled_p ())
2066 dump_printf_loc (MSG_NOTE, vect_location,
2067 "Detected single element interleaving ");
2068 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
2069 dump_printf (MSG_NOTE, " step ");
2070 dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
2071 dump_printf (MSG_NOTE, "\n");
2076 if (dump_enabled_p ())
2077 dump_printf_loc (MSG_NOTE, vect_location,
2078 "Data access with gaps requires scalar "
2082 if (dump_enabled_p ())
2083 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2084 "Peeling for outer loop is not"
2089 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2095 if (dump_enabled_p ())
2097 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2098 "not consecutive access ");
2099 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2100 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2105 /* Mark the statement as unvectorizable. */
2106 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2113 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
2115 /* First stmt in the interleaving chain. Check the chain. */
2116 gimple next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
2117 struct data_reference *data_ref = dr;
2118 unsigned int count = 1;
2119 tree prev_init = DR_INIT (data_ref);
2121 HOST_WIDE_INT diff, gaps = 0;
2122 unsigned HOST_WIDE_INT count_in_bytes;
2126 /* Skip same data-refs. In case that two or more stmts share
2127 data-ref (supported only for loads), we vectorize only the first
2128 stmt, and the rest get their vectorized loads from the first
2130 if (!tree_int_cst_compare (DR_INIT (data_ref),
2131 DR_INIT (STMT_VINFO_DATA_REF (
2132 vinfo_for_stmt (next)))))
2134 if (DR_IS_WRITE (data_ref))
2136 if (dump_enabled_p ())
2137 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2138 "Two store stmts share the same dr.\n");
2142 /* For load use the same data-ref load. */
2143 GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
2146 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2151 data_ref = STMT_VINFO_DATA_REF (vinfo_for_stmt (next));
2153 /* All group members have the same STEP by construction. */
2154 gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
2156 /* Check that the distance between two accesses is equal to the type
2157 size. Otherwise, we have gaps. */
2158 diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
2159 - TREE_INT_CST_LOW (prev_init)) / type_size;
2162 /* FORNOW: SLP of accesses with gaps is not supported. */
2163 slp_impossible = true;
2164 if (DR_IS_WRITE (data_ref))
2166 if (dump_enabled_p ())
2167 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2168 "interleaved store with gaps\n");
2175 last_accessed_element += diff;
2177 /* Store the gap from the previous member of the group. If there is no
2178 gap in the access, GROUP_GAP is always 1. */
2179 GROUP_GAP (vinfo_for_stmt (next)) = diff;
2181 prev_init = DR_INIT (data_ref);
2182 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
2183 /* Count the number of data-refs in the chain. */
2187 /* COUNT is the number of accesses found, we multiply it by the size of
2188 the type to get COUNT_IN_BYTES. */
2189 count_in_bytes = type_size * count;
2191 /* Check that the size of the interleaving (including gaps) is not
2192 greater than STEP. */
2194 && absu_hwi (dr_step) < count_in_bytes + gaps * type_size)
2196 if (dump_enabled_p ())
2198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2199 "interleaving size is greater than step for ");
2200 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2202 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2207 /* Check that the size of the interleaving is equal to STEP for stores,
2208 i.e., that there are no gaps. */
2210 && absu_hwi (dr_step) != count_in_bytes)
2212 if (DR_IS_READ (dr))
2214 slp_impossible = true;
2215 /* There is a gap after the last load in the group. This gap is a
2216 difference between the groupsize and the number of elements.
2217 When there is no gap, this difference should be 0. */
2218 GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - count;
2222 if (dump_enabled_p ())
2223 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2224 "interleaved store with gaps\n");
2229 /* Check that STEP is a multiple of type size. */
2231 && (dr_step % type_size) != 0)
2233 if (dump_enabled_p ())
2235 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2236 "step is not a multiple of type size: step ");
2237 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, step);
2238 dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
2239 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2240 TYPE_SIZE_UNIT (scalar_type));
2241 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2249 GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
2250 if (dump_enabled_p ())
2251 dump_printf_loc (MSG_NOTE, vect_location,
2252 "Detected interleaving of size %d\n", (int)groupsize);
2254 /* SLP: create an SLP data structure for every interleaving group of
2255 stores for further analysis in vect_analyse_slp. */
2256 if (DR_IS_WRITE (dr) && !slp_impossible)
2259 LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt);
2261 BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt);
2264 /* There is a gap in the end of the group. */
2265 if (groupsize - last_accessed_element > 0 && loop_vinfo)
2267 if (dump_enabled_p ())
2268 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2269 "Data access with gaps requires scalar "
2273 if (dump_enabled_p ())
2274 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2275 "Peeling for outer loop is not supported\n");
2279 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = true;
2287 /* Analyze the access pattern of the data-reference DR.
2288 In case of non-consecutive accesses call vect_analyze_group_access() to
2289 analyze groups of accesses. */
2292 vect_analyze_data_ref_access (struct data_reference *dr)
2294 tree step = DR_STEP (dr);
2295 tree scalar_type = TREE_TYPE (DR_REF (dr));
2296 gimple stmt = DR_STMT (dr);
2297 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2298 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
2299 struct loop *loop = NULL;
2302 loop = LOOP_VINFO_LOOP (loop_vinfo);
2304 if (loop_vinfo && !step)
2306 if (dump_enabled_p ())
2307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2308 "bad data-ref access in loop\n");
2312 /* Allow invariant loads in not nested loops. */
2313 if (loop_vinfo && integer_zerop (step))
2315 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2316 if (nested_in_vect_loop_p (loop, stmt))
2318 if (dump_enabled_p ())
2319 dump_printf_loc (MSG_NOTE, vect_location,
2320 "zero step in inner loop of nest\n");
2323 return DR_IS_READ (dr);
2326 if (loop && nested_in_vect_loop_p (loop, stmt))
2328 /* Interleaved accesses are not yet supported within outer-loop
2329 vectorization for references in the inner-loop. */
2330 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2332 /* For the rest of the analysis we use the outer-loop step. */
2333 step = STMT_VINFO_DR_STEP (stmt_info);
2334 if (integer_zerop (step))
2336 if (dump_enabled_p ())
2337 dump_printf_loc (MSG_NOTE, vect_location,
2338 "zero step in outer loop.\n");
2339 if (DR_IS_READ (dr))
2347 if (TREE_CODE (step) == INTEGER_CST)
2349 HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
2350 if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
2352 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
2354 /* Mark that it is not interleaving. */
2355 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
2360 if (loop && nested_in_vect_loop_p (loop, stmt))
2362 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_NOTE, vect_location,
2364 "grouped access in outer loop.\n");
2368 /* Assume this is a DR handled by non-constant strided load case. */
2369 if (TREE_CODE (step) != INTEGER_CST)
2370 return STMT_VINFO_STRIDE_LOAD_P (stmt_info);
2372 /* Not consecutive access - check if it's a part of interleaving group. */
2373 return vect_analyze_group_access (dr);
2378 /* A helper function used in the comparator function to sort data
2379 references. T1 and T2 are two data references to be compared.
2380 The function returns -1, 0, or 1. */
2383 compare_tree (tree t1, tree t2)
2386 enum tree_code code;
2397 if (TREE_CODE (t1) != TREE_CODE (t2))
2398 return TREE_CODE (t1) < TREE_CODE (t2) ? -1 : 1;
2400 code = TREE_CODE (t1);
2403 /* For const values, we can just use hash values for comparisons. */
2411 hashval_t h1 = iterative_hash_expr (t1, 0);
2412 hashval_t h2 = iterative_hash_expr (t2, 0);
2414 return h1 < h2 ? -1 : 1;
2419 cmp = compare_tree (SSA_NAME_VAR (t1), SSA_NAME_VAR (t2));
2423 if (SSA_NAME_VERSION (t1) != SSA_NAME_VERSION (t2))
2424 return SSA_NAME_VERSION (t1) < SSA_NAME_VERSION (t2) ? -1 : 1;
2428 tclass = TREE_CODE_CLASS (code);
2430 /* For var-decl, we could compare their UIDs. */
2431 if (tclass == tcc_declaration)
2433 if (DECL_UID (t1) != DECL_UID (t2))
2434 return DECL_UID (t1) < DECL_UID (t2) ? -1 : 1;
2438 /* For expressions with operands, compare their operands recursively. */
2439 for (i = TREE_OPERAND_LENGTH (t1) - 1; i >= 0; --i)
2441 cmp = compare_tree (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i));
2451 /* Compare two data-references DRA and DRB to group them into chunks
2452 suitable for grouping. */
2455 dr_group_sort_cmp (const void *dra_, const void *drb_)
2457 data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2458 data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2461 /* Stabilize sort. */
2465 /* Ordering of DRs according to base. */
2466 if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0))
2468 cmp = compare_tree (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb));
2473 /* And according to DR_OFFSET. */
2474 if (!dr_equal_offsets_p (dra, drb))
2476 cmp = compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2481 /* Put reads before writes. */
2482 if (DR_IS_READ (dra) != DR_IS_READ (drb))
2483 return DR_IS_READ (dra) ? -1 : 1;
2485 /* Then sort after access size. */
2486 if (!operand_equal_p (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2487 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))), 0))
2489 cmp = compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
2490 TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
2495 /* And after step. */
2496 if (!operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
2498 cmp = compare_tree (DR_STEP (dra), DR_STEP (drb));
2503 /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2504 cmp = tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb));
2506 return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2510 /* Function vect_analyze_data_ref_accesses.
2512 Analyze the access pattern of all the data references in the loop.
2514 FORNOW: the only access pattern that is considered vectorizable is a
2515 simple step 1 (consecutive) access.
2517 FORNOW: handle only arrays and pointer accesses. */
2520 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
2523 vec<data_reference_p> datarefs;
2524 struct data_reference *dr;
2526 if (dump_enabled_p ())
2527 dump_printf_loc (MSG_NOTE, vect_location,
2528 "=== vect_analyze_data_ref_accesses ===\n");
2531 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2533 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
2535 if (datarefs.is_empty ())
2538 /* Sort the array of datarefs to make building the interleaving chains
2539 linear. Don't modify the original vector's order, it is needed for
2540 determining what dependencies are reversed. */
2541 vec<data_reference_p> datarefs_copy = datarefs.copy ();
2542 datarefs_copy.qsort (dr_group_sort_cmp);
2544 /* Build the interleaving chains. */
2545 for (i = 0; i < datarefs_copy.length () - 1;)
2547 data_reference_p dra = datarefs_copy[i];
2548 stmt_vec_info stmtinfo_a = vinfo_for_stmt (DR_STMT (dra));
2549 stmt_vec_info lastinfo = NULL;
2550 for (i = i + 1; i < datarefs_copy.length (); ++i)
2552 data_reference_p drb = datarefs_copy[i];
2553 stmt_vec_info stmtinfo_b = vinfo_for_stmt (DR_STMT (drb));
2555 /* ??? Imperfect sorting (non-compatible types, non-modulo
2556 accesses, same accesses) can lead to a group to be artificially
2557 split here as we don't just skip over those. If it really
2558 matters we can push those to a worklist and re-iterate
2559 over them. The we can just skip ahead to the next DR here. */
2561 /* Check that the data-refs have same first location (except init)
2562 and they are both either store or load (not load and store,
2563 not masked loads or stores). */
2564 if (DR_IS_READ (dra) != DR_IS_READ (drb)
2565 || !operand_equal_p (DR_BASE_ADDRESS (dra),
2566 DR_BASE_ADDRESS (drb), 0)
2567 || !dr_equal_offsets_p (dra, drb)
2568 || !gimple_assign_single_p (DR_STMT (dra))
2569 || !gimple_assign_single_p (DR_STMT (drb)))
2572 /* Check that the data-refs have the same constant size and step. */
2573 tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
2574 tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
2575 if (!tree_fits_uhwi_p (sza)
2576 || !tree_fits_uhwi_p (szb)
2577 || !tree_int_cst_equal (sza, szb)
2578 || !tree_fits_shwi_p (DR_STEP (dra))
2579 || !tree_fits_shwi_p (DR_STEP (drb))
2580 || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb)))
2583 /* Do not place the same access in the interleaving chain twice. */
2584 if (tree_int_cst_compare (DR_INIT (dra), DR_INIT (drb)) == 0)
2587 /* Check the types are compatible.
2588 ??? We don't distinguish this during sorting. */
2589 if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
2590 TREE_TYPE (DR_REF (drb))))
2593 /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
2594 HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
2595 HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
2596 gcc_assert (init_a < init_b);
2598 /* If init_b == init_a + the size of the type * k, we have an
2599 interleaving, and DRA is accessed before DRB. */
2600 HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
2601 if ((init_b - init_a) % type_size_a != 0)
2604 /* The step (if not zero) is greater than the difference between
2605 data-refs' inits. This splits groups into suitable sizes. */
2606 HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
2607 if (step != 0 && step <= (init_b - init_a))
2610 if (dump_enabled_p ())
2612 dump_printf_loc (MSG_NOTE, vect_location,
2613 "Detected interleaving ");
2614 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
2615 dump_printf (MSG_NOTE, " and ");
2616 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
2617 dump_printf (MSG_NOTE, "\n");
2620 /* Link the found element into the group list. */
2621 if (!GROUP_FIRST_ELEMENT (stmtinfo_a))
2623 GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
2624 lastinfo = stmtinfo_a;
2626 GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
2627 GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
2628 lastinfo = stmtinfo_b;
2632 FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
2633 if (STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr)))
2634 && !vect_analyze_data_ref_access (dr))
2636 if (dump_enabled_p ())
2637 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2638 "not vectorized: complicated access pattern.\n");
2642 /* Mark the statement as not vectorizable. */
2643 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
2648 datarefs_copy.release ();
2653 datarefs_copy.release ();
2658 /* Operator == between two dr_with_seg_len objects.
2660 This equality operator is used to make sure two data refs
2661 are the same one so that we will consider to combine the
2662 aliasing checks of those two pairs of data dependent data
2666 operator == (const dr_with_seg_len& d1,
2667 const dr_with_seg_len& d2)
2669 return operand_equal_p (DR_BASE_ADDRESS (d1.dr),
2670 DR_BASE_ADDRESS (d2.dr), 0)
2671 && compare_tree (d1.offset, d2.offset) == 0
2672 && compare_tree (d1.seg_len, d2.seg_len) == 0;
2675 /* Function comp_dr_with_seg_len_pair.
2677 Comparison function for sorting objects of dr_with_seg_len_pair_t
2678 so that we can combine aliasing checks in one scan. */
2681 comp_dr_with_seg_len_pair (const void *p1_, const void *p2_)
2683 const dr_with_seg_len_pair_t* p1 = (const dr_with_seg_len_pair_t *) p1_;
2684 const dr_with_seg_len_pair_t* p2 = (const dr_with_seg_len_pair_t *) p2_;
2686 const dr_with_seg_len &p11 = p1->first,
2691 /* For DR pairs (a, b) and (c, d), we only consider to merge the alias checks
2692 if a and c have the same basic address snd step, and b and d have the same
2693 address and step. Therefore, if any a&c or b&d don't have the same address
2694 and step, we don't care the order of those two pairs after sorting. */
2697 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p11.dr),
2698 DR_BASE_ADDRESS (p21.dr))) != 0)
2700 if ((comp_res = compare_tree (DR_BASE_ADDRESS (p12.dr),
2701 DR_BASE_ADDRESS (p22.dr))) != 0)
2703 if ((comp_res = compare_tree (DR_STEP (p11.dr), DR_STEP (p21.dr))) != 0)
2705 if ((comp_res = compare_tree (DR_STEP (p12.dr), DR_STEP (p22.dr))) != 0)
2707 if ((comp_res = compare_tree (p11.offset, p21.offset)) != 0)
2709 if ((comp_res = compare_tree (p12.offset, p22.offset)) != 0)
2715 /* Function vect_vfa_segment_size.
2717 Create an expression that computes the size of segment
2718 that will be accessed for a data reference. The functions takes into
2719 account that realignment loads may access one more vector.
2722 DR: The data reference.
2723 LENGTH_FACTOR: segment length to consider.
2725 Return an expression whose value is the size of segment which will be
2729 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2731 tree segment_length;
2733 if (integer_zerop (DR_STEP (dr)))
2734 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2736 segment_length = size_binop (MULT_EXPR,
2737 fold_convert (sizetype, DR_STEP (dr)),
2738 fold_convert (sizetype, length_factor));
2740 if (vect_supportable_dr_alignment (dr, false)
2741 == dr_explicit_realign_optimized)
2743 tree vector_size = TYPE_SIZE_UNIT
2744 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2746 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2748 return segment_length;
2751 /* Function vect_prune_runtime_alias_test_list.
2753 Prune a list of ddrs to be tested at run-time by versioning for alias.
2754 Merge several alias checks into one if possible.
2755 Return FALSE if resulting list of ddrs is longer then allowed by
2756 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2759 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
2761 vec<ddr_p> may_alias_ddrs =
2762 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2763 vec<dr_with_seg_len_pair_t>& comp_alias_ddrs =
2764 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
2765 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2766 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2772 if (dump_enabled_p ())
2773 dump_printf_loc (MSG_NOTE, vect_location,
2774 "=== vect_prune_runtime_alias_test_list ===\n");
2776 if (may_alias_ddrs.is_empty ())
2779 /* Basically, for each pair of dependent data refs store_ptr_0
2780 and load_ptr_0, we create an expression:
2782 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2783 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2785 for aliasing checks. However, in some cases we can decrease
2786 the number of checks by combining two checks into one. For
2787 example, suppose we have another pair of data refs store_ptr_0
2788 and load_ptr_1, and if the following condition is satisfied:
2790 load_ptr_0 < load_ptr_1 &&
2791 load_ptr_1 - load_ptr_0 - load_segment_length_0 < store_segment_length_0
2793 (this condition means, in each iteration of vectorized loop,
2794 the accessed memory of store_ptr_0 cannot be between the memory
2795 of load_ptr_0 and load_ptr_1.)
2797 we then can use only the following expression to finish the
2798 alising checks between store_ptr_0 & load_ptr_0 and
2799 store_ptr_0 & load_ptr_1:
2801 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2802 || (load_ptr_1 + load_segment_length_1 <= store_ptr_0))
2804 Note that we only consider that load_ptr_0 and load_ptr_1 have the
2805 same basic address. */
2807 comp_alias_ddrs.create (may_alias_ddrs.length ());
2809 /* First, we collect all data ref pairs for aliasing checks. */
2810 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2812 struct data_reference *dr_a, *dr_b;
2813 gimple dr_group_first_a, dr_group_first_b;
2814 tree segment_length_a, segment_length_b;
2815 gimple stmt_a, stmt_b;
2818 stmt_a = DR_STMT (DDR_A (ddr));
2819 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2820 if (dr_group_first_a)
2822 stmt_a = dr_group_first_a;
2823 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2827 stmt_b = DR_STMT (DDR_B (ddr));
2828 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2829 if (dr_group_first_b)
2831 stmt_b = dr_group_first_b;
2832 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2835 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2836 length_factor = scalar_loop_iters;
2838 length_factor = size_int (vect_factor);
2839 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2840 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2842 dr_with_seg_len_pair_t dr_with_seg_len_pair
2843 (dr_with_seg_len (dr_a, segment_length_a),
2844 dr_with_seg_len (dr_b, segment_length_b));
2846 if (compare_tree (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b)) > 0)
2847 std::swap (dr_with_seg_len_pair.first, dr_with_seg_len_pair.second);
2849 comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
2852 /* Second, we sort the collected data ref pairs so that we can scan
2853 them once to combine all possible aliasing checks. */
2854 comp_alias_ddrs.qsort (comp_dr_with_seg_len_pair);
2856 /* Third, we scan the sorted dr pairs and check if we can combine
2857 alias checks of two neighbouring dr pairs. */
2858 for (size_t i = 1; i < comp_alias_ddrs.length (); ++i)
2860 /* Deal with two ddrs (dr_a1, dr_b1) and (dr_a2, dr_b2). */
2861 dr_with_seg_len *dr_a1 = &comp_alias_ddrs[i-1].first,
2862 *dr_b1 = &comp_alias_ddrs[i-1].second,
2863 *dr_a2 = &comp_alias_ddrs[i].first,
2864 *dr_b2 = &comp_alias_ddrs[i].second;
2866 /* Remove duplicate data ref pairs. */
2867 if (*dr_a1 == *dr_a2 && *dr_b1 == *dr_b2)
2869 if (dump_enabled_p ())
2871 dump_printf_loc (MSG_NOTE, vect_location,
2872 "found equal ranges ");
2873 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2874 DR_REF (dr_a1->dr));
2875 dump_printf (MSG_NOTE, ", ");
2876 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2877 DR_REF (dr_b1->dr));
2878 dump_printf (MSG_NOTE, " and ");
2879 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2880 DR_REF (dr_a2->dr));
2881 dump_printf (MSG_NOTE, ", ");
2882 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2883 DR_REF (dr_b2->dr));
2884 dump_printf (MSG_NOTE, "\n");
2887 comp_alias_ddrs.ordered_remove (i--);
2891 if (*dr_a1 == *dr_a2 || *dr_b1 == *dr_b2)
2893 /* We consider the case that DR_B1 and DR_B2 are same memrefs,
2894 and DR_A1 and DR_A2 are two consecutive memrefs. */
2895 if (*dr_a1 == *dr_a2)
2897 std::swap (dr_a1, dr_b1);
2898 std::swap (dr_a2, dr_b2);
2901 if (!operand_equal_p (DR_BASE_ADDRESS (dr_a1->dr),
2902 DR_BASE_ADDRESS (dr_a2->dr),
2904 || !tree_fits_shwi_p (dr_a1->offset)
2905 || !tree_fits_shwi_p (dr_a2->offset))
2908 HOST_WIDE_INT diff = (tree_to_shwi (dr_a2->offset)
2909 - tree_to_shwi (dr_a1->offset));
2912 /* Now we check if the following condition is satisfied:
2914 DIFF - SEGMENT_LENGTH_A < SEGMENT_LENGTH_B
2916 where DIFF = DR_A2->OFFSET - DR_A1->OFFSET. However,
2917 SEGMENT_LENGTH_A or SEGMENT_LENGTH_B may not be constant so we
2918 have to make a best estimation. We can get the minimum value
2919 of SEGMENT_LENGTH_B as a constant, represented by MIN_SEG_LEN_B,
2920 then either of the following two conditions can guarantee the
2923 1: DIFF <= MIN_SEG_LEN_B
2924 2: DIFF - SEGMENT_LENGTH_A < MIN_SEG_LEN_B
2928 HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
2929 ? tree_to_shwi (dr_b1->seg_len)
2932 if (diff <= min_seg_len_b
2933 || (tree_fits_shwi_p (dr_a1->seg_len)
2934 && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
2936 if (dump_enabled_p ())
2938 dump_printf_loc (MSG_NOTE, vect_location,
2939 "merging ranges for ");
2940 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2941 DR_REF (dr_a1->dr));
2942 dump_printf (MSG_NOTE, ", ");
2943 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2944 DR_REF (dr_b1->dr));
2945 dump_printf (MSG_NOTE, " and ");
2946 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2947 DR_REF (dr_a2->dr));
2948 dump_printf (MSG_NOTE, ", ");
2949 dump_generic_expr (MSG_NOTE, TDF_SLIM,
2950 DR_REF (dr_b2->dr));
2951 dump_printf (MSG_NOTE, "\n");
2954 dr_a1->seg_len = size_binop (PLUS_EXPR,
2955 dr_a2->seg_len, size_int (diff));
2956 comp_alias_ddrs.ordered_remove (i--);
2961 dump_printf_loc (MSG_NOTE, vect_location,
2962 "improved number of alias checks from %d to %d\n",
2963 may_alias_ddrs.length (), comp_alias_ddrs.length ());
2964 if ((int) comp_alias_ddrs.length () >
2965 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS))
2971 /* Check whether a non-affine read in stmt is suitable for gather load
2972 and if so, return a builtin decl for that operation. */
2975 vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
2976 tree *offp, int *scalep)
2978 HOST_WIDE_INT scale = 1, pbitpos, pbitsize;
2979 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2980 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2981 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
2982 tree offtype = NULL_TREE;
2983 tree decl, base, off;
2985 int punsignedp, pvolatilep;
2988 /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
2989 see if we can use the def stmt of the address. */
2990 if (is_gimple_call (stmt)
2991 && gimple_call_internal_p (stmt)
2992 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
2993 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
2994 && TREE_CODE (base) == MEM_REF
2995 && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
2996 && integer_zerop (TREE_OPERAND (base, 1))
2997 && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
2999 gimple def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
3000 if (is_gimple_assign (def_stmt)
3001 && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
3002 base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
3005 /* The gather builtins need address of the form
3006 loop_invariant + vector * {1, 2, 4, 8}
3008 loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
3009 Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
3010 of loop invariants/SSA_NAMEs defined in the loop, with casts,
3011 multiplications and additions in it. To get a vector, we need
3012 a single SSA_NAME that will be defined in the loop and will
3013 contain everything that is not loop invariant and that can be
3014 vectorized. The following code attempts to find such a preexistng
3015 SSA_NAME OFF and put the loop invariants into a tree BASE
3016 that can be gimplified before the loop. */
3017 base = get_inner_reference (base, &pbitsize, &pbitpos, &off,
3018 &pmode, &punsignedp, &pvolatilep, false);
3019 gcc_assert (base != NULL_TREE && (pbitpos % BITS_PER_UNIT) == 0);
3021 if (TREE_CODE (base) == MEM_REF)
3023 if (!integer_zerop (TREE_OPERAND (base, 1)))
3025 if (off == NULL_TREE)
3027 offset_int moff = mem_ref_offset (base);
3028 off = wide_int_to_tree (sizetype, moff);
3031 off = size_binop (PLUS_EXPR, off,
3032 fold_convert (sizetype, TREE_OPERAND (base, 1)));
3034 base = TREE_OPERAND (base, 0);
3037 base = build_fold_addr_expr (base);
3039 if (off == NULL_TREE)
3040 off = size_zero_node;
3042 /* If base is not loop invariant, either off is 0, then we start with just
3043 the constant offset in the loop invariant BASE and continue with base
3044 as OFF, otherwise give up.
3045 We could handle that case by gimplifying the addition of base + off
3046 into some SSA_NAME and use that as off, but for now punt. */
3047 if (!expr_invariant_in_loop_p (loop, base))
3049 if (!integer_zerop (off))
3052 base = size_int (pbitpos / BITS_PER_UNIT);
3054 /* Otherwise put base + constant offset into the loop invariant BASE
3055 and continue with OFF. */
3058 base = fold_convert (sizetype, base);
3059 base = size_binop (PLUS_EXPR, base, size_int (pbitpos / BITS_PER_UNIT));
3062 /* OFF at this point may be either a SSA_NAME or some tree expression
3063 from get_inner_reference. Try to peel off loop invariants from it
3064 into BASE as long as possible. */
3066 while (offtype == NULL_TREE)
3068 enum tree_code code;
3069 tree op0, op1, add = NULL_TREE;
3071 if (TREE_CODE (off) == SSA_NAME)
3073 gimple def_stmt = SSA_NAME_DEF_STMT (off);
3075 if (expr_invariant_in_loop_p (loop, off))
3078 if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
3081 op0 = gimple_assign_rhs1 (def_stmt);
3082 code = gimple_assign_rhs_code (def_stmt);
3083 op1 = gimple_assign_rhs2 (def_stmt);
3087 if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
3089 code = TREE_CODE (off);
3090 extract_ops_from_tree (off, &code, &op0, &op1);
3094 case POINTER_PLUS_EXPR:
3096 if (expr_invariant_in_loop_p (loop, op0))
3101 add = fold_convert (sizetype, add);
3103 add = size_binop (MULT_EXPR, add, size_int (scale));
3104 base = size_binop (PLUS_EXPR, base, add);
3107 if (expr_invariant_in_loop_p (loop, op1))
3115 if (expr_invariant_in_loop_p (loop, op1))
3117 add = fold_convert (sizetype, op1);
3118 add = size_binop (MINUS_EXPR, size_zero_node, add);
3124 if (scale == 1 && tree_fits_shwi_p (op1))
3126 scale = tree_to_shwi (op1);
3135 if (!POINTER_TYPE_P (TREE_TYPE (op0))
3136 && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
3138 if (TYPE_PRECISION (TREE_TYPE (op0))
3139 == TYPE_PRECISION (TREE_TYPE (off)))
3144 if (TYPE_PRECISION (TREE_TYPE (op0))
3145 < TYPE_PRECISION (TREE_TYPE (off)))
3148 offtype = TREE_TYPE (off);
3159 /* If at the end OFF still isn't a SSA_NAME or isn't
3160 defined in the loop, punt. */
3161 if (TREE_CODE (off) != SSA_NAME
3162 || expr_invariant_in_loop_p (loop, off))
3165 if (offtype == NULL_TREE)
3166 offtype = TREE_TYPE (off);
3168 decl = targetm.vectorize.builtin_gather (STMT_VINFO_VECTYPE (stmt_info),
3170 if (decl == NULL_TREE)
3182 /* Function vect_analyze_data_refs.
3184 Find all the data references in the loop or basic block.
3186 The general structure of the analysis of data refs in the vectorizer is as
3188 1- vect_analyze_data_refs(loop/bb): call
3189 compute_data_dependences_for_loop/bb to find and analyze all data-refs
3190 in the loop/bb and their dependences.
3191 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3192 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3193 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3198 vect_analyze_data_refs (loop_vec_info loop_vinfo,
3199 bb_vec_info bb_vinfo,
3200 int *min_vf, unsigned *n_stmts)
3202 struct loop *loop = NULL;
3203 basic_block bb = NULL;
3205 vec<data_reference_p> datarefs;
3206 struct data_reference *dr;
3209 if (dump_enabled_p ())
3210 dump_printf_loc (MSG_NOTE, vect_location,
3211 "=== vect_analyze_data_refs ===\n");
3215 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
3217 loop = LOOP_VINFO_LOOP (loop_vinfo);
3218 datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3219 if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
3221 if (dump_enabled_p ())
3222 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3223 "not vectorized: loop contains function calls"
3224 " or data references that cannot be analyzed\n");
3228 for (i = 0; i < loop->num_nodes; i++)
3230 gimple_stmt_iterator gsi;
3232 for (gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi))
3234 gimple stmt = gsi_stmt (gsi);
3235 if (is_gimple_debug (stmt))
3238 if (!find_data_references_in_stmt (loop, stmt, &datarefs))
3240 if (is_gimple_call (stmt) && loop->safelen)
3242 tree fndecl = gimple_call_fndecl (stmt), op;
3243 if (fndecl != NULL_TREE)
3245 struct cgraph_node *node = cgraph_node::get (fndecl);
3246 if (node != NULL && node->simd_clones != NULL)
3248 unsigned int j, n = gimple_call_num_args (stmt);
3249 for (j = 0; j < n; j++)
3251 op = gimple_call_arg (stmt, j);
3253 || (REFERENCE_CLASS_P (op)
3254 && get_base_address (op)))
3257 op = gimple_call_lhs (stmt);
3258 /* Ignore #pragma omp declare simd functions
3259 if they don't have data references in the
3260 call stmt itself. */
3264 || (REFERENCE_CLASS_P (op)
3265 && get_base_address (op)))))
3270 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3271 if (dump_enabled_p ())
3272 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3273 "not vectorized: loop contains function "
3274 "calls or data references that cannot "
3281 LOOP_VINFO_DATAREFS (loop_vinfo) = datarefs;
3285 gimple_stmt_iterator gsi;
3287 bb = BB_VINFO_BB (bb_vinfo);
3288 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3290 gimple stmt = gsi_stmt (gsi);
3291 if (is_gimple_debug (stmt))
3294 if (!find_data_references_in_stmt (NULL, stmt,
3295 &BB_VINFO_DATAREFS (bb_vinfo)))
3297 /* Mark the rest of the basic-block as unvectorizable. */
3298 for (; !gsi_end_p (gsi); gsi_next (&gsi))
3300 stmt = gsi_stmt (gsi);
3301 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)) = false;
3307 datarefs = BB_VINFO_DATAREFS (bb_vinfo);
3310 /* Go through the data-refs, check that the analysis succeeded. Update
3311 pointer from stmt_vec_info struct to DR and vectype. */
3313 FOR_EACH_VEC_ELT (datarefs, i, dr)
3316 stmt_vec_info stmt_info;
3317 tree base, offset, init;
3318 bool gather = false;
3319 bool simd_lane_access = false;
3323 if (!dr || !DR_REF (dr))
3325 if (dump_enabled_p ())
3326 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3327 "not vectorized: unhandled data-ref\n");
3331 stmt = DR_STMT (dr);
3332 stmt_info = vinfo_for_stmt (stmt);
3334 /* Discard clobbers from the dataref vector. We will remove
3335 clobber stmts during vectorization. */
3336 if (gimple_clobber_p (stmt))
3339 if (i == datarefs.length () - 1)
3344 datarefs.ordered_remove (i);
3349 /* Check that analysis of the data-ref succeeded. */
3350 if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
3355 && !TREE_THIS_VOLATILE (DR_REF (dr))
3356 && targetm.vectorize.builtin_gather != NULL;
3357 bool maybe_simd_lane_access
3358 = loop_vinfo && loop->simduid;
3360 /* If target supports vector gather loads, or if this might be
3361 a SIMD lane access, see if they can't be used. */
3363 && (maybe_gather || maybe_simd_lane_access)
3364 && !nested_in_vect_loop_p (loop, stmt))
3366 struct data_reference *newdr
3367 = create_data_ref (NULL, loop_containing_stmt (stmt),
3368 DR_REF (dr), stmt, true);
3369 gcc_assert (newdr != NULL && DR_REF (newdr));
3370 if (DR_BASE_ADDRESS (newdr)
3371 && DR_OFFSET (newdr)
3374 && integer_zerop (DR_STEP (newdr)))
3376 if (maybe_simd_lane_access)
3378 tree off = DR_OFFSET (newdr);
3380 if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
3381 && TREE_CODE (off) == MULT_EXPR
3382 && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
3384 tree step = TREE_OPERAND (off, 1);
3385 off = TREE_OPERAND (off, 0);
3387 if (CONVERT_EXPR_P (off)
3388 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off,
3390 < TYPE_PRECISION (TREE_TYPE (off)))
3391 off = TREE_OPERAND (off, 0);
3392 if (TREE_CODE (off) == SSA_NAME)
3394 gimple def = SSA_NAME_DEF_STMT (off);
3395 tree reft = TREE_TYPE (DR_REF (newdr));
3396 if (is_gimple_call (def)
3397 && gimple_call_internal_p (def)
3398 && (gimple_call_internal_fn (def)
3399 == IFN_GOMP_SIMD_LANE))
3401 tree arg = gimple_call_arg (def, 0);
3402 gcc_assert (TREE_CODE (arg) == SSA_NAME);
3403 arg = SSA_NAME_VAR (arg);
3404 if (arg == loop->simduid
3406 && tree_int_cst_equal
3407 (TYPE_SIZE_UNIT (reft),
3410 DR_OFFSET (newdr) = ssize_int (0);
3411 DR_STEP (newdr) = step;
3412 DR_ALIGNED_TO (newdr)
3413 = size_int (BIGGEST_ALIGNMENT);
3415 simd_lane_access = true;
3421 if (!simd_lane_access && maybe_gather)
3427 if (!gather && !simd_lane_access)
3428 free_data_ref (newdr);
3431 if (!gather && !simd_lane_access)
3433 if (dump_enabled_p ())
3435 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3436 "not vectorized: data ref analysis "
3438 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3439 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3449 if (TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
3451 if (dump_enabled_p ())
3452 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3453 "not vectorized: base addr of dr is a "
3459 if (gather || simd_lane_access)
3464 if (TREE_THIS_VOLATILE (DR_REF (dr)))
3466 if (dump_enabled_p ())
3468 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3469 "not vectorized: volatile type ");
3470 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3471 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3480 if (stmt_can_throw_internal (stmt))
3482 if (dump_enabled_p ())
3484 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3485 "not vectorized: statement can throw an "
3487 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3488 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3494 if (gather || simd_lane_access)
3499 if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
3500 && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
3502 if (dump_enabled_p ())
3504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3505 "not vectorized: statement is bitfield "
3507 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3508 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3514 if (gather || simd_lane_access)
3519 base = unshare_expr (DR_BASE_ADDRESS (dr));
3520 offset = unshare_expr (DR_OFFSET (dr));
3521 init = unshare_expr (DR_INIT (dr));
3523 if (is_gimple_call (stmt)
3524 && (!gimple_call_internal_p (stmt)
3525 || (gimple_call_internal_fn (stmt) != IFN_MASK_LOAD
3526 && gimple_call_internal_fn (stmt) != IFN_MASK_STORE)))
3528 if (dump_enabled_p ())
3530 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3531 "not vectorized: dr in a call ");
3532 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3533 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3539 if (gather || simd_lane_access)
3544 /* Update DR field in stmt_vec_info struct. */
3546 /* If the dataref is in an inner-loop of the loop that is considered for
3547 for vectorization, we also want to analyze the access relative to
3548 the outer-loop (DR contains information only relative to the
3549 inner-most enclosing loop). We do that by building a reference to the
3550 first location accessed by the inner-loop, and analyze it relative to
3552 if (loop && nested_in_vect_loop_p (loop, stmt))
3554 tree outer_step, outer_base, outer_init;
3555 HOST_WIDE_INT pbitsize, pbitpos;
3558 int punsignedp, pvolatilep;
3559 affine_iv base_iv, offset_iv;
3562 /* Build a reference to the first location accessed by the
3563 inner-loop: *(BASE+INIT). (The first location is actually
3564 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3565 tree inner_base = build_fold_indirect_ref
3566 (fold_build_pointer_plus (base, init));
3568 if (dump_enabled_p ())
3570 dump_printf_loc (MSG_NOTE, vect_location,
3571 "analyze in outer-loop: ");
3572 dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
3573 dump_printf (MSG_NOTE, "\n");
3576 outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
3577 &poffset, &pmode, &punsignedp, &pvolatilep, false);
3578 gcc_assert (outer_base != NULL_TREE);
3580 if (pbitpos % BITS_PER_UNIT != 0)
3582 if (dump_enabled_p ())
3583 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3584 "failed: bit offset alignment.\n");
3588 outer_base = build_fold_addr_expr (outer_base);
3589 if (!simple_iv (loop, loop_containing_stmt (stmt), outer_base,
3592 if (dump_enabled_p ())
3593 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3594 "failed: evolution of base is not affine.\n");
3601 poffset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset,
3609 offset_iv.base = ssize_int (0);
3610 offset_iv.step = ssize_int (0);
3612 else if (!simple_iv (loop, loop_containing_stmt (stmt), poffset,
3615 if (dump_enabled_p ())
3616 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3617 "evolution of offset is not affine.\n");
3621 outer_init = ssize_int (pbitpos / BITS_PER_UNIT);
3622 split_constant_offset (base_iv.base, &base_iv.base, &dinit);
3623 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3624 split_constant_offset (offset_iv.base, &offset_iv.base, &dinit);
3625 outer_init = size_binop (PLUS_EXPR, outer_init, dinit);
3627 outer_step = size_binop (PLUS_EXPR,
3628 fold_convert (ssizetype, base_iv.step),
3629 fold_convert (ssizetype, offset_iv.step));
3631 STMT_VINFO_DR_STEP (stmt_info) = outer_step;
3632 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3633 STMT_VINFO_DR_BASE_ADDRESS (stmt_info) = base_iv.base;
3634 STMT_VINFO_DR_INIT (stmt_info) = outer_init;
3635 STMT_VINFO_DR_OFFSET (stmt_info) =
3636 fold_convert (ssizetype, offset_iv.base);
3637 STMT_VINFO_DR_ALIGNED_TO (stmt_info) =
3638 size_int (highest_pow2_factor (offset_iv.base));
3640 if (dump_enabled_p ())
3642 dump_printf_loc (MSG_NOTE, vect_location,
3643 "\touter base_address: ");
3644 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3645 STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3646 dump_printf (MSG_NOTE, "\n\touter offset from base address: ");
3647 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3648 STMT_VINFO_DR_OFFSET (stmt_info));
3649 dump_printf (MSG_NOTE,
3650 "\n\touter constant offset from base address: ");
3651 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3652 STMT_VINFO_DR_INIT (stmt_info));
3653 dump_printf (MSG_NOTE, "\n\touter step: ");
3654 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3655 STMT_VINFO_DR_STEP (stmt_info));
3656 dump_printf (MSG_NOTE, "\n\touter aligned to: ");
3657 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3658 STMT_VINFO_DR_ALIGNED_TO (stmt_info));
3659 dump_printf (MSG_NOTE, "\n");
3663 if (STMT_VINFO_DATA_REF (stmt_info))
3665 if (dump_enabled_p ())
3667 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3668 "not vectorized: more than one data ref "
3670 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3671 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3677 if (gather || simd_lane_access)
3682 STMT_VINFO_DATA_REF (stmt_info) = dr;
3683 if (simd_lane_access)
3685 STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) = true;
3686 free_data_ref (datarefs[i]);
3690 /* Set vectype for STMT. */
3691 scalar_type = TREE_TYPE (DR_REF (dr));
3692 STMT_VINFO_VECTYPE (stmt_info)
3693 = get_vectype_for_scalar_type (scalar_type);
3694 if (!STMT_VINFO_VECTYPE (stmt_info))
3696 if (dump_enabled_p ())
3698 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3699 "not vectorized: no vectype for stmt: ");
3700 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3701 dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
3702 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
3704 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3710 if (gather || simd_lane_access)
3712 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3720 if (dump_enabled_p ())
3722 dump_printf_loc (MSG_NOTE, vect_location,
3723 "got vectype for stmt: ");
3724 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3725 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3726 STMT_VINFO_VECTYPE (stmt_info));
3727 dump_printf (MSG_NOTE, "\n");
3731 /* Adjust the minimal vectorization factor according to the
3733 vf = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
3741 gather = 0 != vect_check_gather (stmt, loop_vinfo, NULL, &off, NULL);
3743 && get_vectype_for_scalar_type (TREE_TYPE (off)) == NULL_TREE)
3747 STMT_VINFO_DATA_REF (stmt_info) = NULL;
3749 if (dump_enabled_p ())
3751 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3752 "not vectorized: not suitable for gather "
3754 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3755 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3761 STMT_VINFO_GATHER_P (stmt_info) = true;
3764 && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
3766 if (nested_in_vect_loop_p (loop, stmt)
3767 || !DR_IS_READ (dr))
3769 if (dump_enabled_p ())
3771 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3772 "not vectorized: not suitable for strided "
3774 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3775 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3779 STMT_VINFO_STRIDE_LOAD_P (stmt_info) = true;
3783 /* If we stopped analysis at the first dataref we could not analyze
3784 when trying to vectorize a basic-block mark the rest of the datarefs
3785 as not vectorizable and truncate the vector of datarefs. That
3786 avoids spending useless time in analyzing their dependence. */
3787 if (i != datarefs.length ())
3789 gcc_assert (bb_vinfo != NULL);
3790 for (unsigned j = i; j < datarefs.length (); ++j)
3792 data_reference_p dr = datarefs[j];
3793 STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
3796 datarefs.truncate (i);
3803 /* Function vect_get_new_vect_var.
3805 Returns a name for a new variable. The current naming scheme appends the
3806 prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
3807 the name of vectorizer generated variables, and appends that to NAME if
3811 vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
3818 case vect_simple_var:
3821 case vect_scalar_var:
3824 case vect_pointer_var:
3833 char* tmp = concat (prefix, "_", name, NULL);
3834 new_vect_var = create_tmp_reg (type, tmp);
3838 new_vect_var = create_tmp_reg (type, prefix);
3840 return new_vect_var;
3843 /* Duplicate ptr info and set alignment/misaligment on NAME from DR. */
3846 vect_duplicate_ssa_name_ptr_info (tree name, data_reference *dr,
3847 stmt_vec_info stmt_info)
3849 duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr));
3850 unsigned int align = TYPE_ALIGN_UNIT (STMT_VINFO_VECTYPE (stmt_info));
3851 int misalign = DR_MISALIGNMENT (dr);
3853 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
3855 set_ptr_info_alignment (SSA_NAME_PTR_INFO (name), align, misalign);
3858 /* Function vect_create_addr_base_for_vector_ref.
3860 Create an expression that computes the address of the first memory location
3861 that will be accessed for a data reference.
3864 STMT: The statement containing the data reference.
3865 NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
3866 OFFSET: Optional. If supplied, it is be added to the initial address.
3867 LOOP: Specify relative to which loop-nest should the address be computed.
3868 For example, when the dataref is in an inner-loop nested in an
3869 outer-loop that is now being vectorized, LOOP can be either the
3870 outer-loop, or the inner-loop. The first memory location accessed
3871 by the following dataref ('in' points to short):
3878 if LOOP=i_loop: &in (relative to i_loop)
3879 if LOOP=j_loop: &in+i*2B (relative to j_loop)
3880 BYTE_OFFSET: Optional, defaulted to NULL. If supplied, it is added to the
3881 initial address. Unlike OFFSET, which is number of elements to
3882 be added, BYTE_OFFSET is measured in bytes.
3885 1. Return an SSA_NAME whose value is the address of the memory location of
3886 the first vector of the data reference.
3887 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
3888 these statement(s) which define the returned SSA_NAME.
3890 FORNOW: We are only handling array accesses with step 1. */
3893 vect_create_addr_base_for_vector_ref (gimple stmt,
3894 gimple_seq *new_stmt_list,
3899 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3900 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
3902 const char *base_name;
3905 gimple_seq seq = NULL;
3909 tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
3910 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3912 if (loop_vinfo && loop && loop != (gimple_bb (stmt))->loop_father)
3914 struct loop *outer_loop = LOOP_VINFO_LOOP (loop_vinfo);
3916 gcc_assert (nested_in_vect_loop_p (outer_loop, stmt));
3918 data_ref_base = unshare_expr (STMT_VINFO_DR_BASE_ADDRESS (stmt_info));
3919 base_offset = unshare_expr (STMT_VINFO_DR_OFFSET (stmt_info));
3920 init = unshare_expr (STMT_VINFO_DR_INIT (stmt_info));
3924 data_ref_base = unshare_expr (DR_BASE_ADDRESS (dr));
3925 base_offset = unshare_expr (DR_OFFSET (dr));
3926 init = unshare_expr (DR_INIT (dr));
3930 base_name = get_name (data_ref_base);
3933 base_offset = ssize_int (0);
3934 init = ssize_int (0);
3935 base_name = get_name (DR_REF (dr));
3938 /* Create base_offset */
3939 base_offset = size_binop (PLUS_EXPR,
3940 fold_convert (sizetype, base_offset),
3941 fold_convert (sizetype, init));
3945 offset = fold_build2 (MULT_EXPR, sizetype,
3946 fold_convert (sizetype, offset), step);
3947 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3948 base_offset, offset);
3952 byte_offset = fold_convert (sizetype, byte_offset);
3953 base_offset = fold_build2 (PLUS_EXPR, sizetype,
3954 base_offset, byte_offset);
3957 /* base + base_offset */
3959 addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
3962 addr_base = build1 (ADDR_EXPR,
3963 build_pointer_type (TREE_TYPE (DR_REF (dr))),
3964 unshare_expr (DR_REF (dr)));
3967 vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
3968 addr_base = fold_convert (vect_ptr_type, addr_base);
3969 dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
3970 addr_base = force_gimple_operand (addr_base, &seq, false, dest);
3971 gimple_seq_add_seq (new_stmt_list, seq);
3973 if (DR_PTR_INFO (dr)
3974 && TREE_CODE (addr_base) == SSA_NAME)
3976 vect_duplicate_ssa_name_ptr_info (addr_base, dr, stmt_info);
3977 if (offset || byte_offset)
3978 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
3981 if (dump_enabled_p ())
3983 dump_printf_loc (MSG_NOTE, vect_location, "created ");
3984 dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
3985 dump_printf (MSG_NOTE, "\n");
3992 /* Function vect_create_data_ref_ptr.
3994 Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
3995 location accessed in the loop by STMT, along with the def-use update
3996 chain to appropriately advance the pointer through the loop iterations.
3997 Also set aliasing information for the pointer. This pointer is used by
3998 the callers to this function to create a memory reference expression for
3999 vector load/store access.
4002 1. STMT: a stmt that references memory. Expected to be of the form
4003 GIMPLE_ASSIGN <name, data-ref> or
4004 GIMPLE_ASSIGN <data-ref, name>.
4005 2. AGGR_TYPE: the type of the reference, which should be either a vector
4007 3. AT_LOOP: the loop where the vector memref is to be created.
4008 4. OFFSET (optional): an offset to be added to the initial address accessed
4009 by the data-ref in STMT.
4010 5. BSI: location where the new stmts are to be placed if there is no loop
4011 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
4012 pointing to the initial address.
4013 7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
4014 to the initial address accessed by the data-ref in STMT. This is
4015 similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
4019 1. Declare a new ptr to vector_type, and have it point to the base of the
4020 data reference (initial addressed accessed by the data reference).
4021 For example, for vector of type V8HI, the following code is generated:
4024 ap = (v8hi *)initial_address;
4026 if OFFSET is not supplied:
4027 initial_address = &a[init];
4028 if OFFSET is supplied:
4029 initial_address = &a[init + OFFSET];
4030 if BYTE_OFFSET is supplied:
4031 initial_address = &a[init] + BYTE_OFFSET;
4033 Return the initial_address in INITIAL_ADDRESS.
4035 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
4036 update the pointer in each iteration of the loop.
4038 Return the increment stmt that updates the pointer in PTR_INCR.
4040 3. Set INV_P to true if the access pattern of the data reference in the
4041 vectorized loop is invariant. Set it to false otherwise.
4043 4. Return the pointer. */
4046 vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
4047 tree offset, tree *initial_address,
4048 gimple_stmt_iterator *gsi, gimple *ptr_incr,
4049 bool only_init, bool *inv_p, tree byte_offset)
4051 const char *base_name;
4052 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4053 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4054 struct loop *loop = NULL;
4055 bool nested_in_vect_loop = false;
4056 struct loop *containing_loop = NULL;
4061 gimple_seq new_stmt_list = NULL;
4065 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4067 gimple_stmt_iterator incr_gsi;
4069 tree indx_before_incr, indx_after_incr;
4072 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
4074 gcc_assert (TREE_CODE (aggr_type) == ARRAY_TYPE
4075 || TREE_CODE (aggr_type) == VECTOR_TYPE);
4079 loop = LOOP_VINFO_LOOP (loop_vinfo);
4080 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4081 containing_loop = (gimple_bb (stmt))->loop_father;
4082 pe = loop_preheader_edge (loop);
4086 gcc_assert (bb_vinfo);
4091 /* Check the step (evolution) of the load in LOOP, and record
4092 whether it's invariant. */
4093 if (nested_in_vect_loop)
4094 step = STMT_VINFO_DR_STEP (stmt_info);
4096 step = DR_STEP (STMT_VINFO_DATA_REF (stmt_info));
4098 if (integer_zerop (step))
4103 /* Create an expression for the first address accessed by this load
4105 base_name = get_name (DR_BASE_ADDRESS (dr));
4107 if (dump_enabled_p ())
4109 tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
4110 dump_printf_loc (MSG_NOTE, vect_location,
4111 "create %s-pointer variable to type: ",
4112 get_tree_code_name (TREE_CODE (aggr_type)));
4113 dump_generic_expr (MSG_NOTE, TDF_SLIM, aggr_type);
4114 if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
4115 dump_printf (MSG_NOTE, " vectorizing an array ref: ");
4116 else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
4117 dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
4118 else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
4119 dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
4121 dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
4122 dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
4123 dump_printf (MSG_NOTE, "\n");
4126 /* (1) Create the new aggregate-pointer variable.
4127 Vector and array types inherit the alias set of their component
4128 type by default so we need to use a ref-all pointer if the data
4129 reference does not conflict with the created aggregated data
4130 reference because it is not addressable. */
4131 bool need_ref_all = false;
4132 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4133 get_alias_set (DR_REF (dr))))
4134 need_ref_all = true;
4135 /* Likewise for any of the data references in the stmt group. */
4136 else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1)
4138 gimple orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info);
4141 stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
4142 struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
4143 if (!alias_sets_conflict_p (get_alias_set (aggr_type),
4144 get_alias_set (DR_REF (sdr))))
4146 need_ref_all = true;
4149 orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo);
4153 aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
4155 aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
4158 /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
4159 vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
4160 def-use update cycles for the pointer: one relative to the outer-loop
4161 (LOOP), which is what steps (3) and (4) below do. The other is relative
4162 to the inner-loop (which is the inner-most loop containing the dataref),
4163 and this is done be step (5) below.
4165 When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
4166 inner-most loop, and so steps (3),(4) work the same, and step (5) is
4167 redundant. Steps (3),(4) create the following:
4170 LOOP: vp1 = phi(vp0,vp2)
4176 If there is an inner-loop nested in loop, then step (5) will also be
4177 applied, and an additional update in the inner-loop will be created:
4180 LOOP: vp1 = phi(vp0,vp2)
4182 inner: vp3 = phi(vp1,vp4)
4183 vp4 = vp3 + inner_step
4189 /* (2) Calculate the initial address of the aggregate-pointer, and set
4190 the aggregate-pointer to point to it before the loop. */
4192 /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
4194 new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list,
4195 offset, loop, byte_offset);
4200 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
4201 gcc_assert (!new_bb);
4204 gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
4207 *initial_address = new_temp;
4209 /* Create: p = (aggr_type *) initial_base */
4210 if (TREE_CODE (new_temp) != SSA_NAME
4211 || !useless_type_conversion_p (aggr_ptr_type, TREE_TYPE (new_temp)))
4213 vec_stmt = gimple_build_assign (aggr_ptr,
4214 fold_convert (aggr_ptr_type, new_temp));
4215 aggr_ptr_init = make_ssa_name (aggr_ptr, vec_stmt);
4216 /* Copy the points-to information if it exists. */
4217 if (DR_PTR_INFO (dr))
4218 vect_duplicate_ssa_name_ptr_info (aggr_ptr_init, dr, stmt_info);
4219 gimple_assign_set_lhs (vec_stmt, aggr_ptr_init);
4222 new_bb = gsi_insert_on_edge_immediate (pe, vec_stmt);
4223 gcc_assert (!new_bb);
4226 gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
4229 aggr_ptr_init = new_temp;
4231 /* (3) Handle the updating of the aggregate-pointer inside the loop.
4232 This is needed when ONLY_INIT is false, and also when AT_LOOP is the
4233 inner-loop nested in LOOP (during outer-loop vectorization). */
4235 /* No update in loop is required. */
4236 if (only_init && (!loop_vinfo || at_loop == loop))
4237 aptr = aggr_ptr_init;
4240 /* The step of the aggregate pointer is the type size. */
4241 tree iv_step = TYPE_SIZE_UNIT (aggr_type);
4242 /* One exception to the above is when the scalar step of the load in
4243 LOOP is zero. In this case the step here is also zero. */
4245 iv_step = size_zero_node;
4246 else if (tree_int_cst_sgn (step) == -1)
4247 iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
4249 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4251 create_iv (aggr_ptr_init,
4252 fold_convert (aggr_ptr_type, iv_step),
4253 aggr_ptr, loop, &incr_gsi, insert_after,
4254 &indx_before_incr, &indx_after_incr);
4255 incr = gsi_stmt (incr_gsi);
4256 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4258 /* Copy the points-to information if it exists. */
4259 if (DR_PTR_INFO (dr))
4261 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4262 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4267 aptr = indx_before_incr;
4270 if (!nested_in_vect_loop || only_init)
4274 /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
4275 nested in LOOP, if exists. */
4277 gcc_assert (nested_in_vect_loop);
4280 standard_iv_increment_position (containing_loop, &incr_gsi,
4282 create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
4283 containing_loop, &incr_gsi, insert_after, &indx_before_incr,
4285 incr = gsi_stmt (incr_gsi);
4286 set_vinfo_for_stmt (incr, new_stmt_vec_info (incr, loop_vinfo, NULL));
4288 /* Copy the points-to information if it exists. */
4289 if (DR_PTR_INFO (dr))
4291 vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr, stmt_info);
4292 vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr, stmt_info);
4297 return indx_before_incr;
4304 /* Function bump_vector_ptr
4306 Increment a pointer (to a vector type) by vector-size. If requested,
4307 i.e. if PTR-INCR is given, then also connect the new increment stmt
4308 to the existing def-use update-chain of the pointer, by modifying
4309 the PTR_INCR as illustrated below:
4311 The pointer def-use update-chain before this function:
4312 DATAREF_PTR = phi (p_0, p_2)
4314 PTR_INCR: p_2 = DATAREF_PTR + step
4316 The pointer def-use update-chain after this function:
4317 DATAREF_PTR = phi (p_0, p_2)
4319 NEW_DATAREF_PTR = DATAREF_PTR + BUMP
4321 PTR_INCR: p_2 = NEW_DATAREF_PTR + step
4324 DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
4326 PTR_INCR - optional. The stmt that updates the pointer in each iteration of
4327 the loop. The increment amount across iterations is expected
4329 BSI - location where the new update stmt is to be placed.
4330 STMT - the original scalar memory-access stmt that is being vectorized.
4331 BUMP - optional. The offset by which to bump the pointer. If not given,
4332 the offset is assumed to be vector_size.
4334 Output: Return NEW_DATAREF_PTR as illustrated above.
4339 bump_vector_ptr (tree dataref_ptr, gimple ptr_incr, gimple_stmt_iterator *gsi,
4340 gimple stmt, tree bump)
4342 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4343 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4344 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4345 tree update = TYPE_SIZE_UNIT (vectype);
4348 use_operand_p use_p;
4349 tree new_dataref_ptr;
4354 new_dataref_ptr = copy_ssa_name (dataref_ptr);
4355 incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
4356 dataref_ptr, update);
4357 vect_finish_stmt_generation (stmt, incr_stmt, gsi);
4359 /* Copy the points-to information if it exists. */
4360 if (DR_PTR_INFO (dr))
4362 duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
4363 mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
4367 return new_dataref_ptr;
4369 /* Update the vector-pointer's cross-iteration increment. */
4370 FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
4372 tree use = USE_FROM_PTR (use_p);
4374 if (use == dataref_ptr)
4375 SET_USE (use_p, new_dataref_ptr);
4377 gcc_assert (tree_int_cst_compare (use, update) == 0);
4380 return new_dataref_ptr;
4384 /* Function vect_create_destination_var.
4386 Create a new temporary of type VECTYPE. */
4389 vect_create_destination_var (tree scalar_dest, tree vectype)
4395 enum vect_var_kind kind;
4397 kind = vectype ? vect_simple_var : vect_scalar_var;
4398 type = vectype ? vectype : TREE_TYPE (scalar_dest);
4400 gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
4402 name = get_name (scalar_dest);
4404 new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
4406 new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
4407 vec_dest = vect_get_new_vect_var (type, kind, new_name);
4413 /* Function vect_grouped_store_supported.
4415 Returns TRUE if interleave high and interleave low permutations
4416 are supported, and FALSE otherwise. */
4419 vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
4421 machine_mode mode = TYPE_MODE (vectype);
4423 /* vect_permute_store_chain requires the group size to be equal to 3 or
4424 be a power of two. */
4425 if (count != 3 && exact_log2 (count) == -1)
4427 if (dump_enabled_p ())
4428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4429 "the size of the group of accesses"
4430 " is not a power of 2 or not eqaul to 3\n");
4434 /* Check that the permutation is supported. */
4435 if (VECTOR_MODE_P (mode))
4437 unsigned int i, nelt = GET_MODE_NUNITS (mode);
4438 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4442 unsigned int j0 = 0, j1 = 0, j2 = 0;
4445 for (j = 0; j < 3; j++)
4447 int nelt0 = ((3 - j) * nelt) % 3;
4448 int nelt1 = ((3 - j) * nelt + 1) % 3;
4449 int nelt2 = ((3 - j) * nelt + 2) % 3;
4450 for (i = 0; i < nelt; i++)
4452 if (3 * i + nelt0 < nelt)
4453 sel[3 * i + nelt0] = j0++;
4454 if (3 * i + nelt1 < nelt)
4455 sel[3 * i + nelt1] = nelt + j1++;
4456 if (3 * i + nelt2 < nelt)
4457 sel[3 * i + nelt2] = 0;
4459 if (!can_vec_perm_p (mode, false, sel))
4461 if (dump_enabled_p ())
4462 dump_printf (MSG_MISSED_OPTIMIZATION,
4463 "permutaion op not supported by target.\n");
4467 for (i = 0; i < nelt; i++)
4469 if (3 * i + nelt0 < nelt)
4470 sel[3 * i + nelt0] = 3 * i + nelt0;
4471 if (3 * i + nelt1 < nelt)
4472 sel[3 * i + nelt1] = 3 * i + nelt1;
4473 if (3 * i + nelt2 < nelt)
4474 sel[3 * i + nelt2] = nelt + j2++;
4476 if (!can_vec_perm_p (mode, false, sel))
4478 if (dump_enabled_p ())
4479 dump_printf (MSG_MISSED_OPTIMIZATION,
4480 "permutaion op not supported by target.\n");
4488 /* If length is not equal to 3 then only power of 2 is supported. */
4489 gcc_assert (exact_log2 (count) != -1);
4491 for (i = 0; i < nelt / 2; i++)
4494 sel[i * 2 + 1] = i + nelt;
4496 if (can_vec_perm_p (mode, false, sel))
4498 for (i = 0; i < nelt; i++)
4500 if (can_vec_perm_p (mode, false, sel))
4506 if (dump_enabled_p ())
4507 dump_printf (MSG_MISSED_OPTIMIZATION,
4508 "permutaion op not supported by target.\n");
4513 /* Return TRUE if vec_store_lanes is available for COUNT vectors of
4517 vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
4519 return vect_lanes_optab_supported_p ("vec_store_lanes",
4520 vec_store_lanes_optab,
4525 /* Function vect_permute_store_chain.
4527 Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
4528 a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
4529 the data correctly for the stores. Return the final references for stores
4532 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
4533 The input is 4 vectors each containing 8 elements. We assign a number to
4534 each element, the input sequence is:
4536 1st vec: 0 1 2 3 4 5 6 7
4537 2nd vec: 8 9 10 11 12 13 14 15
4538 3rd vec: 16 17 18 19 20 21 22 23
4539 4th vec: 24 25 26 27 28 29 30 31
4541 The output sequence should be:
4543 1st vec: 0 8 16 24 1 9 17 25
4544 2nd vec: 2 10 18 26 3 11 19 27
4545 3rd vec: 4 12 20 28 5 13 21 30
4546 4th vec: 6 14 22 30 7 15 23 31
4548 i.e., we interleave the contents of the four vectors in their order.
4550 We use interleave_high/low instructions to create such output. The input of
4551 each interleave_high/low operation is two vectors:
4554 the even elements of the result vector are obtained left-to-right from the
4555 high/low elements of the first vector. The odd elements of the result are
4556 obtained left-to-right from the high/low elements of the second vector.
4557 The output of interleave_high will be: 0 4 1 5
4558 and of interleave_low: 2 6 3 7
4561 The permutation is done in log LENGTH stages. In each stage interleave_high
4562 and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
4563 where the first argument is taken from the first half of DR_CHAIN and the
4564 second argument from it's second half.
4567 I1: interleave_high (1st vec, 3rd vec)
4568 I2: interleave_low (1st vec, 3rd vec)
4569 I3: interleave_high (2nd vec, 4th vec)
4570 I4: interleave_low (2nd vec, 4th vec)
4572 The output for the first stage is:
4574 I1: 0 16 1 17 2 18 3 19
4575 I2: 4 20 5 21 6 22 7 23
4576 I3: 8 24 9 25 10 26 11 27
4577 I4: 12 28 13 29 14 30 15 31
4579 The output of the second stage, i.e. the final result is:
4581 I1: 0 8 16 24 1 9 17 25
4582 I2: 2 10 18 26 3 11 19 27
4583 I3: 4 12 20 28 5 13 21 30
4584 I4: 6 14 22 30 7 15 23 31. */
4587 vect_permute_store_chain (vec<tree> dr_chain,
4588 unsigned int length,
4590 gimple_stmt_iterator *gsi,
4591 vec<tree> *result_chain)
4593 tree vect1, vect2, high, low;
4595 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
4596 tree perm_mask_low, perm_mask_high;
4598 tree perm3_mask_low, perm3_mask_high;
4599 unsigned int i, n, log_length = exact_log2 (length);
4600 unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype);
4601 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
4603 result_chain->quick_grow (length);
4604 memcpy (result_chain->address (), dr_chain.address (),
4605 length * sizeof (tree));
4609 unsigned int j0 = 0, j1 = 0, j2 = 0;
4611 for (j = 0; j < 3; j++)
4613 int nelt0 = ((3 - j) * nelt) % 3;
4614 int nelt1 = ((3 - j) * nelt + 1) % 3;
4615 int nelt2 = ((3 - j) * nelt + 2) % 3;
4617 for (i = 0; i < nelt; i++)
4619 if (3 * i + nelt0 < nelt)
4620 sel[3 * i + nelt0] = j0++;
4621 if (3 * i + nelt1 < nelt)
4622 sel[3 * i + nelt1] = nelt + j1++;
4623 if (3 * i + nelt2 < nelt)
4624 sel[3 * i + nelt2] = 0;
4626 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4628 for (i = 0; i < nelt; i++)
4630 if (3 * i + nelt0 < nelt)
4631 sel[3 * i + nelt0] = 3 * i + nelt0;
4632 if (3 * i + nelt1 < nelt)
4633 sel[3 * i + nelt1] = 3 * i + nelt1;
4634 if (3 * i + nelt2 < nelt)
4635 sel[3 * i + nelt2] = nelt + j2++;
4637 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4639 vect1 = dr_chain[0];
4640 vect2 = dr_chain[1];
4642 /* Create interleaving stmt:
4643 low = VEC_PERM_EXPR <vect1, vect2,
4644 {j, nelt, *, j + 1, nelt + j + 1, *,
4645 j + 2, nelt + j + 2, *, ...}> */
4646 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
4647 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4648 vect2, perm3_mask_low);
4649 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4652 vect2 = dr_chain[2];
4653 /* Create interleaving stmt:
4654 low = VEC_PERM_EXPR <vect1, vect2,
4655 {0, 1, nelt + j, 3, 4, nelt + j + 1,
4656 6, 7, nelt + j + 2, ...}> */
4657 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
4658 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
4659 vect2, perm3_mask_high);
4660 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4661 (*result_chain)[j] = data_ref;
4666 /* If length is not equal to 3 then only power of 2 is supported. */
4667 gcc_assert (exact_log2 (length) != -1);
4669 for (i = 0, n = nelt / 2; i < n; i++)
4672 sel[i * 2 + 1] = i + nelt;
4674 perm_mask_high = vect_gen_perm_mask_checked (vectype, sel);
4676 for (i = 0; i < nelt; i++)
4678 perm_mask_low = vect_gen_perm_mask_checked (vectype, sel);
4680 for (i = 0, n = log_length; i < n; i++)
4682 for (j = 0; j < length/2; j++)
4684 vect1 = dr_chain[j];
4685 vect2 = dr_chain[j+length/2];
4687 /* Create interleaving stmt:
4688 high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
4690 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
4691 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
4692 vect2, perm_mask_high);
4693 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4694 (*result_chain)[2*j] = high;
4696 /* Create interleaving stmt:
4697 low = VEC_PERM_EXPR <vect1, vect2,
4698 {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
4700 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
4701 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
4702 vect2, perm_mask_low);
4703 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
4704 (*result_chain)[2*j+1] = low;
4706 memcpy (dr_chain.address (), result_chain->address (),
4707 length * sizeof (tree));
4712 /* Function vect_setup_realignment
4714 This function is called when vectorizing an unaligned load using
4715 the dr_explicit_realign[_optimized] scheme.
4716 This function generates the following code at the loop prolog:
4719 x msq_init = *(floor(p)); # prolog load
4720 realignment_token = call target_builtin;
4722 x msq = phi (msq_init, ---)
4724 The stmts marked with x are generated only for the case of
4725 dr_explicit_realign_optimized.
4727 The code above sets up a new (vector) pointer, pointing to the first
4728 location accessed by STMT, and a "floor-aligned" load using that pointer.
4729 It also generates code to compute the "realignment-token" (if the relevant
4730 target hook was defined), and creates a phi-node at the loop-header bb
4731 whose arguments are the result of the prolog-load (created by this
4732 function) and the result of a load that takes place in the loop (to be
4733 created by the caller to this function).
4735 For the case of dr_explicit_realign_optimized:
4736 The caller to this function uses the phi-result (msq) to create the
4737 realignment code inside the loop, and sets up the missing phi argument,
4740 msq = phi (msq_init, lsq)
4741 lsq = *(floor(p')); # load in loop
4742 result = realign_load (msq, lsq, realignment_token);
4744 For the case of dr_explicit_realign:
4746 msq = *(floor(p)); # load in loop
4748 lsq = *(floor(p')); # load in loop
4749 result = realign_load (msq, lsq, realignment_token);
4752 STMT - (scalar) load stmt to be vectorized. This load accesses
4753 a memory location that may be unaligned.
4754 BSI - place where new code is to be inserted.
4755 ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
4759 REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
4760 target hook, if defined.
4761 Return value - the result of the loop-header phi node. */
4764 vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi,
4765 tree *realignment_token,
4766 enum dr_alignment_support alignment_support_scheme,
4768 struct loop **at_loop)
4770 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4771 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4772 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4773 struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4774 struct loop *loop = NULL;
4776 tree scalar_dest = gimple_assign_lhs (stmt);
4782 tree msq_init = NULL_TREE;
4785 tree msq = NULL_TREE;
4786 gimple_seq stmts = NULL;
4788 bool compute_in_loop = false;
4789 bool nested_in_vect_loop = false;
4790 struct loop *containing_loop = (gimple_bb (stmt))->loop_father;
4791 struct loop *loop_for_initial_load = NULL;
4795 loop = LOOP_VINFO_LOOP (loop_vinfo);
4796 nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt);
4799 gcc_assert (alignment_support_scheme == dr_explicit_realign
4800 || alignment_support_scheme == dr_explicit_realign_optimized);
4802 /* We need to generate three things:
4803 1. the misalignment computation
4804 2. the extra vector load (for the optimized realignment scheme).
4805 3. the phi node for the two vectors from which the realignment is
4806 done (for the optimized realignment scheme). */
4808 /* 1. Determine where to generate the misalignment computation.
4810 If INIT_ADDR is NULL_TREE, this indicates that the misalignment
4811 calculation will be generated by this function, outside the loop (in the
4812 preheader). Otherwise, INIT_ADDR had already been computed for us by the
4813 caller, inside the loop.
4815 Background: If the misalignment remains fixed throughout the iterations of
4816 the loop, then both realignment schemes are applicable, and also the
4817 misalignment computation can be done outside LOOP. This is because we are
4818 vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
4819 are a multiple of VS (the Vector Size), and therefore the misalignment in
4820 different vectorized LOOP iterations is always the same.
4821 The problem arises only if the memory access is in an inner-loop nested
4822 inside LOOP, which is now being vectorized using outer-loop vectorization.
4823 This is the only case when the misalignment of the memory access may not
4824 remain fixed throughout the iterations of the inner-loop (as explained in
4825 detail in vect_supportable_dr_alignment). In this case, not only is the
4826 optimized realignment scheme not applicable, but also the misalignment
4827 computation (and generation of the realignment token that is passed to
4828 REALIGN_LOAD) have to be done inside the loop.
4830 In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
4831 or not, which in turn determines if the misalignment is computed inside
4832 the inner-loop, or outside LOOP. */
4834 if (init_addr != NULL_TREE || !loop_vinfo)
4836 compute_in_loop = true;
4837 gcc_assert (alignment_support_scheme == dr_explicit_realign);
4841 /* 2. Determine where to generate the extra vector load.
4843 For the optimized realignment scheme, instead of generating two vector
4844 loads in each iteration, we generate a single extra vector load in the
4845 preheader of the loop, and in each iteration reuse the result of the
4846 vector load from the previous iteration. In case the memory access is in
4847 an inner-loop nested inside LOOP, which is now being vectorized using
4848 outer-loop vectorization, we need to determine whether this initial vector
4849 load should be generated at the preheader of the inner-loop, or can be
4850 generated at the preheader of LOOP. If the memory access has no evolution
4851 in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
4852 to be generated inside LOOP (in the preheader of the inner-loop). */
4854 if (nested_in_vect_loop)
4856 tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
4857 bool invariant_in_outerloop =
4858 (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
4859 loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
4862 loop_for_initial_load = loop;
4864 *at_loop = loop_for_initial_load;
4866 if (loop_for_initial_load)
4867 pe = loop_preheader_edge (loop_for_initial_load);
4869 /* 3. For the case of the optimized realignment, create the first vector
4870 load at the loop preheader. */
4872 if (alignment_support_scheme == dr_explicit_realign_optimized)
4874 /* Create msq_init = *(floor(p1)) in the loop preheader */
4877 gcc_assert (!compute_in_loop);
4878 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4879 ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load,
4880 NULL_TREE, &init_addr, NULL, &inc,
4882 new_temp = copy_ssa_name (ptr);
4883 new_stmt = gimple_build_assign
4884 (new_temp, BIT_AND_EXPR, ptr,
4885 build_int_cst (TREE_TYPE (ptr),
4886 -(HOST_WIDE_INT)TYPE_ALIGN_UNIT (vectype)));
4887 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4888 gcc_assert (!new_bb);
4890 = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
4891 build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
4892 new_stmt = gimple_build_assign (vec_dest, data_ref);
4893 new_temp = make_ssa_name (vec_dest, new_stmt);
4894 gimple_assign_set_lhs (new_stmt, new_temp);
4897 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4898 gcc_assert (!new_bb);
4901 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4903 msq_init = gimple_assign_lhs (new_stmt);
4906 /* 4. Create realignment token using a target builtin, if available.
4907 It is done either inside the containing loop, or before LOOP (as
4908 determined above). */
4910 if (targetm.vectorize.builtin_mask_for_load)
4915 /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
4918 /* Generate the INIT_ADDR computation outside LOOP. */
4919 init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts,
4923 pe = loop_preheader_edge (loop);
4924 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
4925 gcc_assert (!new_bb);
4928 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
4931 builtin_decl = targetm.vectorize.builtin_mask_for_load ();
4932 new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
4934 vect_create_destination_var (scalar_dest,
4935 gimple_call_return_type (new_stmt));
4936 new_temp = make_ssa_name (vec_dest, new_stmt);
4937 gimple_call_set_lhs (new_stmt, new_temp);
4939 if (compute_in_loop)
4940 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
4943 /* Generate the misalignment computation outside LOOP. */
4944 pe = loop_preheader_edge (loop);
4945 new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
4946 gcc_assert (!new_bb);
4949 *realignment_token = gimple_call_lhs (new_stmt);
4951 /* The result of the CALL_EXPR to this builtin is determined from
4952 the value of the parameter and no global variables are touched
4953 which makes the builtin a "const" function. Requiring the
4954 builtin to have the "const" attribute makes it unnecessary
4955 to call mark_call_clobbered. */
4956 gcc_assert (TREE_READONLY (builtin_decl));
4959 if (alignment_support_scheme == dr_explicit_realign)
4962 gcc_assert (!compute_in_loop);
4963 gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
4966 /* 5. Create msq = phi <msq_init, lsq> in loop */
4968 pe = loop_preheader_edge (containing_loop);
4969 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4970 msq = make_ssa_name (vec_dest);
4971 phi_stmt = create_phi_node (msq, containing_loop->header);
4972 add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
4978 /* Function vect_grouped_load_supported.
4980 Returns TRUE if even and odd permutations are supported,
4981 and FALSE otherwise. */
4984 vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
4986 machine_mode mode = TYPE_MODE (vectype);
4988 /* vect_permute_load_chain requires the group size to be equal to 3 or
4989 be a power of two. */
4990 if (count != 3 && exact_log2 (count) == -1)
4992 if (dump_enabled_p ())
4993 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
4994 "the size of the group of accesses"
4995 " is not a power of 2 or not equal to 3\n");
4999 /* Check that the permutation is supported. */
5000 if (VECTOR_MODE_P (mode))
5002 unsigned int i, j, nelt = GET_MODE_NUNITS (mode);
5003 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5008 for (k = 0; k < 3; k++)
5010 for (i = 0; i < nelt; i++)
5011 if (3 * i + k < 2 * nelt)
5015 if (!can_vec_perm_p (mode, false, sel))
5017 if (dump_enabled_p ())
5018 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5019 "shuffle of 3 loads is not supported by"
5023 for (i = 0, j = 0; i < nelt; i++)
5024 if (3 * i + k < 2 * nelt)
5027 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5028 if (!can_vec_perm_p (mode, false, sel))
5030 if (dump_enabled_p ())
5031 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5032 "shuffle of 3 loads is not supported by"
5041 /* If length is not equal to 3 then only power of 2 is supported. */
5042 gcc_assert (exact_log2 (count) != -1);
5043 for (i = 0; i < nelt; i++)
5045 if (can_vec_perm_p (mode, false, sel))
5047 for (i = 0; i < nelt; i++)
5049 if (can_vec_perm_p (mode, false, sel))
5055 if (dump_enabled_p ())
5056 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5057 "extract even/odd not supported by target\n");
5061 /* Return TRUE if vec_load_lanes is available for COUNT vectors of
5065 vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count)
5067 return vect_lanes_optab_supported_p ("vec_load_lanes",
5068 vec_load_lanes_optab,
5072 /* Function vect_permute_load_chain.
5074 Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
5075 a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
5076 the input data correctly. Return the final references for loads in
5079 E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
5080 The input is 4 vectors each containing 8 elements. We assign a number to each
5081 element, the input sequence is:
5083 1st vec: 0 1 2 3 4 5 6 7
5084 2nd vec: 8 9 10 11 12 13 14 15
5085 3rd vec: 16 17 18 19 20 21 22 23
5086 4th vec: 24 25 26 27 28 29 30 31
5088 The output sequence should be:
5090 1st vec: 0 4 8 12 16 20 24 28
5091 2nd vec: 1 5 9 13 17 21 25 29
5092 3rd vec: 2 6 10 14 18 22 26 30
5093 4th vec: 3 7 11 15 19 23 27 31
5095 i.e., the first output vector should contain the first elements of each
5096 interleaving group, etc.
5098 We use extract_even/odd instructions to create such output. The input of
5099 each extract_even/odd operation is two vectors
5103 and the output is the vector of extracted even/odd elements. The output of
5104 extract_even will be: 0 2 4 6
5105 and of extract_odd: 1 3 5 7
5108 The permutation is done in log LENGTH stages. In each stage extract_even
5109 and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
5110 their order. In our example,
5112 E1: extract_even (1st vec, 2nd vec)
5113 E2: extract_odd (1st vec, 2nd vec)
5114 E3: extract_even (3rd vec, 4th vec)
5115 E4: extract_odd (3rd vec, 4th vec)
5117 The output for the first stage will be:
5119 E1: 0 2 4 6 8 10 12 14
5120 E2: 1 3 5 7 9 11 13 15
5121 E3: 16 18 20 22 24 26 28 30
5122 E4: 17 19 21 23 25 27 29 31
5124 In order to proceed and create the correct sequence for the next stage (or
5125 for the correct output, if the second stage is the last one, as in our
5126 example), we first put the output of extract_even operation and then the
5127 output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
5128 The input for the second stage is:
5130 1st vec (E1): 0 2 4 6 8 10 12 14
5131 2nd vec (E3): 16 18 20 22 24 26 28 30
5132 3rd vec (E2): 1 3 5 7 9 11 13 15
5133 4th vec (E4): 17 19 21 23 25 27 29 31
5135 The output of the second stage:
5137 E1: 0 4 8 12 16 20 24 28
5138 E2: 2 6 10 14 18 22 26 30
5139 E3: 1 5 9 13 17 21 25 29
5140 E4: 3 7 11 15 19 23 27 31
5142 And RESULT_CHAIN after reordering:
5144 1st vec (E1): 0 4 8 12 16 20 24 28
5145 2nd vec (E3): 1 5 9 13 17 21 25 29
5146 3rd vec (E2): 2 6 10 14 18 22 26 30
5147 4th vec (E4): 3 7 11 15 19 23 27 31. */
5150 vect_permute_load_chain (vec<tree> dr_chain,
5151 unsigned int length,
5153 gimple_stmt_iterator *gsi,
5154 vec<tree> *result_chain)
5156 tree data_ref, first_vect, second_vect;
5157 tree perm_mask_even, perm_mask_odd;
5158 tree perm3_mask_low, perm3_mask_high;
5160 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5161 unsigned int i, j, log_length = exact_log2 (length);
5162 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5163 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5165 result_chain->quick_grow (length);
5166 memcpy (result_chain->address (), dr_chain.address (),
5167 length * sizeof (tree));
5173 for (k = 0; k < 3; k++)
5175 for (i = 0; i < nelt; i++)
5176 if (3 * i + k < 2 * nelt)
5180 perm3_mask_low = vect_gen_perm_mask_checked (vectype, sel);
5182 for (i = 0, j = 0; i < nelt; i++)
5183 if (3 * i + k < 2 * nelt)
5186 sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
5188 perm3_mask_high = vect_gen_perm_mask_checked (vectype, sel);
5190 first_vect = dr_chain[0];
5191 second_vect = dr_chain[1];
5193 /* Create interleaving stmt (low part of):
5194 low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5196 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
5197 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5198 second_vect, perm3_mask_low);
5199 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5201 /* Create interleaving stmt (high part of):
5202 high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
5204 first_vect = data_ref;
5205 second_vect = dr_chain[2];
5206 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
5207 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
5208 second_vect, perm3_mask_high);
5209 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5210 (*result_chain)[k] = data_ref;
5215 /* If length is not equal to 3 then only power of 2 is supported. */
5216 gcc_assert (exact_log2 (length) != -1);
5218 for (i = 0; i < nelt; ++i)
5220 perm_mask_even = vect_gen_perm_mask_checked (vectype, sel);
5222 for (i = 0; i < nelt; ++i)
5224 perm_mask_odd = vect_gen_perm_mask_checked (vectype, sel);
5226 for (i = 0; i < log_length; i++)
5228 for (j = 0; j < length; j += 2)
5230 first_vect = dr_chain[j];
5231 second_vect = dr_chain[j+1];
5233 /* data_ref = permute_even (first_data_ref, second_data_ref); */
5234 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
5235 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5236 first_vect, second_vect,
5238 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5239 (*result_chain)[j/2] = data_ref;
5241 /* data_ref = permute_odd (first_data_ref, second_data_ref); */
5242 data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
5243 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5244 first_vect, second_vect,
5246 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5247 (*result_chain)[j/2+length/2] = data_ref;
5249 memcpy (dr_chain.address (), result_chain->address (),
5250 length * sizeof (tree));
5255 /* Function vect_shift_permute_load_chain.
5257 Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
5258 sequence of stmts to reorder the input data accordingly.
5259 Return the final references for loads in RESULT_CHAIN.
5260 Return true if successed, false otherwise.
5262 E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
5263 The input is 3 vectors each containing 8 elements. We assign a
5264 number to each element, the input sequence is:
5266 1st vec: 0 1 2 3 4 5 6 7
5267 2nd vec: 8 9 10 11 12 13 14 15
5268 3rd vec: 16 17 18 19 20 21 22 23
5270 The output sequence should be:
5272 1st vec: 0 3 6 9 12 15 18 21
5273 2nd vec: 1 4 7 10 13 16 19 22
5274 3rd vec: 2 5 8 11 14 17 20 23
5276 We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
5278 First we shuffle all 3 vectors to get correct elements order:
5280 1st vec: ( 0 3 6) ( 1 4 7) ( 2 5)
5281 2nd vec: ( 8 11 14) ( 9 12 15) (10 13)
5282 3rd vec: (16 19 22) (17 20 23) (18 21)
5284 Next we unite and shift vector 3 times:
5287 shift right by 6 the concatenation of:
5288 "1st vec" and "2nd vec"
5289 ( 0 3 6) ( 1 4 7) |( 2 5) _ ( 8 11 14) ( 9 12 15)| (10 13)
5290 "2nd vec" and "3rd vec"
5291 ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
5292 "3rd vec" and "1st vec"
5293 (16 19 22) (17 20 23) |(18 21) _ ( 0 3 6) ( 1 4 7)| ( 2 5)
5296 So that now new vectors are:
5298 1st vec: ( 2 5) ( 8 11 14) ( 9 12 15)
5299 2nd vec: (10 13) (16 19 22) (17 20 23)
5300 3rd vec: (18 21) ( 0 3 6) ( 1 4 7)
5303 shift right by 5 the concatenation of:
5304 "1st vec" and "3rd vec"
5305 ( 2 5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0 3 6)| ( 1 4 7)
5306 "2nd vec" and "1st vec"
5307 (10 13) (16 19 22) |(17 20 23) _ ( 2 5) ( 8 11 14)| ( 9 12 15)
5308 "3rd vec" and "2nd vec"
5309 (18 21) ( 0 3 6) |( 1 4 7) _ (10 13) (16 19 22)| (17 20 23)
5312 So that now new vectors are:
5314 1st vec: ( 9 12 15) (18 21) ( 0 3 6)
5315 2nd vec: (17 20 23) ( 2 5) ( 8 11 14)
5316 3rd vec: ( 1 4 7) (10 13) (16 19 22) READY
5319 shift right by 5 the concatenation of:
5320 "1st vec" and "1st vec"
5321 ( 9 12 15) (18 21) |( 0 3 6) _ ( 9 12 15) (18 21)| ( 0 3 6)
5322 shift right by 3 the concatenation of:
5323 "2nd vec" and "2nd vec"
5324 (17 20 23) |( 2 5) ( 8 11 14) _ (17 20 23)| ( 2 5) ( 8 11 14)
5327 So that now all vectors are READY:
5328 1st vec: ( 0 3 6) ( 9 12 15) (18 21)
5329 2nd vec: ( 2 5) ( 8 11 14) (17 20 23)
5330 3rd vec: ( 1 4 7) (10 13) (16 19 22)
5332 This algorithm is faster than one in vect_permute_load_chain if:
5333 1. "shift of a concatination" is faster than general permutation.
5335 2. The TARGET machine can't execute vector instructions in parallel.
5336 This is because each step of the algorithm depends on previous.
5337 The algorithm in vect_permute_load_chain is much more parallel.
5339 The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
5343 vect_shift_permute_load_chain (vec<tree> dr_chain,
5344 unsigned int length,
5346 gimple_stmt_iterator *gsi,
5347 vec<tree> *result_chain)
5349 tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
5350 tree perm2_mask1, perm2_mask2, perm3_mask;
5351 tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
5354 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
5356 unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
5357 unsigned char *sel = XALLOCAVEC (unsigned char, nelt);
5358 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5359 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5361 result_chain->quick_grow (length);
5362 memcpy (result_chain->address (), dr_chain.address (),
5363 length * sizeof (tree));
5365 if (exact_log2 (length) != -1 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 4)
5367 unsigned int j, log_length = exact_log2 (length);
5368 for (i = 0; i < nelt / 2; ++i)
5370 for (i = 0; i < nelt / 2; ++i)
5371 sel[nelt / 2 + i] = i * 2 + 1;
5372 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5374 if (dump_enabled_p ())
5375 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5376 "shuffle of 2 fields structure is not \
5377 supported by target\n");
5380 perm2_mask1 = vect_gen_perm_mask_checked (vectype, sel);
5382 for (i = 0; i < nelt / 2; ++i)
5384 for (i = 0; i < nelt / 2; ++i)
5385 sel[nelt / 2 + i] = i * 2;
5386 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5388 if (dump_enabled_p ())
5389 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5390 "shuffle of 2 fields structure is not \
5391 supported by target\n");
5394 perm2_mask2 = vect_gen_perm_mask_checked (vectype, sel);
5396 /* Generating permutation constant to shift all elements.
5397 For vector length 8 it is {4 5 6 7 8 9 10 11}. */
5398 for (i = 0; i < nelt; i++)
5399 sel[i] = nelt / 2 + i;
5400 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5402 if (dump_enabled_p ())
5403 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5404 "shift permutation is not supported by target\n");
5407 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5409 /* Generating permutation constant to select vector from 2.
5410 For vector length 8 it is {0 1 2 3 12 13 14 15}. */
5411 for (i = 0; i < nelt / 2; i++)
5413 for (i = nelt / 2; i < nelt; i++)
5415 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5417 if (dump_enabled_p ())
5418 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5419 "select is not supported by target\n");
5422 select_mask = vect_gen_perm_mask_checked (vectype, sel);
5424 for (i = 0; i < log_length; i++)
5426 for (j = 0; j < length; j += 2)
5428 first_vect = dr_chain[j];
5429 second_vect = dr_chain[j + 1];
5431 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5432 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5433 first_vect, first_vect,
5435 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5438 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
5439 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5440 second_vect, second_vect,
5442 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5445 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
5446 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5447 vect[0], vect[1], shift1_mask);
5448 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5449 (*result_chain)[j/2 + length/2] = data_ref;
5451 data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
5452 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5453 vect[0], vect[1], select_mask);
5454 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5455 (*result_chain)[j/2] = data_ref;
5457 memcpy (dr_chain.address (), result_chain->address (),
5458 length * sizeof (tree));
5462 if (length == 3 && LOOP_VINFO_VECT_FACTOR (loop_vinfo) > 2)
5464 unsigned int k = 0, l = 0;
5466 /* Generating permutation constant to get all elements in rigth order.
5467 For vector length 8 it is {0 3 6 1 4 7 2 5}. */
5468 for (i = 0; i < nelt; i++)
5470 if (3 * k + (l % 3) >= nelt)
5473 l += (3 - (nelt % 3));
5475 sel[i] = 3 * k + (l % 3);
5478 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5480 if (dump_enabled_p ())
5481 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5482 "shuffle of 3 fields structure is not \
5483 supported by target\n");
5486 perm3_mask = vect_gen_perm_mask_checked (vectype, sel);
5488 /* Generating permutation constant to shift all elements.
5489 For vector length 8 it is {6 7 8 9 10 11 12 13}. */
5490 for (i = 0; i < nelt; i++)
5491 sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
5492 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5494 if (dump_enabled_p ())
5495 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5496 "shift permutation is not supported by target\n");
5499 shift1_mask = vect_gen_perm_mask_checked (vectype, sel);
5501 /* Generating permutation constant to shift all elements.
5502 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5503 for (i = 0; i < nelt; i++)
5504 sel[i] = 2 * (nelt / 3) + 1 + i;
5505 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5507 if (dump_enabled_p ())
5508 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5509 "shift permutation is not supported by target\n");
5512 shift2_mask = vect_gen_perm_mask_checked (vectype, sel);
5514 /* Generating permutation constant to shift all elements.
5515 For vector length 8 it is {3 4 5 6 7 8 9 10}. */
5516 for (i = 0; i < nelt; i++)
5517 sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
5518 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5520 if (dump_enabled_p ())
5521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5522 "shift permutation is not supported by target\n");
5525 shift3_mask = vect_gen_perm_mask_checked (vectype, sel);
5527 /* Generating permutation constant to shift all elements.
5528 For vector length 8 it is {5 6 7 8 9 10 11 12}. */
5529 for (i = 0; i < nelt; i++)
5530 sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
5531 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
5533 if (dump_enabled_p ())
5534 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5535 "shift permutation is not supported by target\n");
5538 shift4_mask = vect_gen_perm_mask_checked (vectype, sel);
5540 for (k = 0; k < 3; k++)
5542 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
5543 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5544 dr_chain[k], dr_chain[k],
5546 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5550 for (k = 0; k < 3; k++)
5552 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
5553 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5554 vect[k % 3], vect[(k + 1) % 3],
5556 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5557 vect_shift[k] = data_ref;
5560 for (k = 0; k < 3; k++)
5562 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
5563 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
5564 vect_shift[(4 - k) % 3],
5565 vect_shift[(3 - k) % 3],
5567 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5571 (*result_chain)[3 - (nelt % 3)] = vect[2];
5573 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
5574 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
5575 vect[0], shift3_mask);
5576 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5577 (*result_chain)[nelt % 3] = data_ref;
5579 data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
5580 perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
5581 vect[1], shift4_mask);
5582 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
5583 (*result_chain)[0] = data_ref;
5589 /* Function vect_transform_grouped_load.
5591 Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
5592 to perform their permutation and ascribe the result vectorized statements to
5593 the scalar statements.
5597 vect_transform_grouped_load (gimple stmt, vec<tree> dr_chain, int size,
5598 gimple_stmt_iterator *gsi)
5601 vec<tree> result_chain = vNULL;
5603 /* DR_CHAIN contains input data-refs that are a part of the interleaving.
5604 RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
5605 vectors, that are ready for vector computation. */
5606 result_chain.create (size);
5608 /* If reassociation width for vector type is 2 or greater target machine can
5609 execute 2 or more vector instructions in parallel. Otherwise try to
5610 get chain for loads group using vect_shift_permute_load_chain. */
5611 mode = TYPE_MODE (STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)));
5612 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
5613 || exact_log2 (size) != -1
5614 || !vect_shift_permute_load_chain (dr_chain, size, stmt,
5615 gsi, &result_chain))
5616 vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain);
5617 vect_record_grouped_load_vectors (stmt, result_chain);
5618 result_chain.release ();
5621 /* RESULT_CHAIN contains the output of a group of grouped loads that were
5622 generated as part of the vectorization of STMT. Assign the statement
5623 for each vector to the associated scalar statement. */
5626 vect_record_grouped_load_vectors (gimple stmt, vec<tree> result_chain)
5628 gimple first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
5629 gimple next_stmt, new_stmt;
5630 unsigned int i, gap_count;
5633 /* Put a permuted data-ref in the VECTORIZED_STMT field.
5634 Since we scan the chain starting from it's first node, their order
5635 corresponds the order of data-refs in RESULT_CHAIN. */
5636 next_stmt = first_stmt;
5638 FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
5643 /* Skip the gaps. Loads created for the gaps will be removed by dead
5644 code elimination pass later. No need to check for the first stmt in
5645 the group, since it always exists.
5646 GROUP_GAP is the number of steps in elements from the previous
5647 access (if there is no gap GROUP_GAP is 1). We skip loads that
5648 correspond to the gaps. */
5649 if (next_stmt != first_stmt
5650 && gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt)))
5658 new_stmt = SSA_NAME_DEF_STMT (tmp_data_ref);
5659 /* We assume that if VEC_STMT is not NULL, this is a case of multiple
5660 copies, and we put the new vector statement in the first available
5662 if (!STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)))
5663 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
5666 if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5669 STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
5671 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt));
5674 prev_stmt = rel_stmt;
5676 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (rel_stmt));
5679 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (prev_stmt)) =
5684 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
5686 /* If NEXT_STMT accesses the same DR as the previous statement,
5687 put the same TMP_DATA_REF as its vectorized statement; otherwise
5688 get the next data-ref from RESULT_CHAIN. */
5689 if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
5695 /* Function vect_force_dr_alignment_p.
5697 Returns whether the alignment of a DECL can be forced to be aligned
5698 on ALIGNMENT bit boundary. */
5701 vect_can_force_dr_alignment_p (const_tree decl, unsigned int alignment)
5703 if (TREE_CODE (decl) != VAR_DECL)
5706 if (decl_in_symtab_p (decl)
5707 && !symtab_node::get (decl)->can_increase_alignment_p ())
5710 if (TREE_STATIC (decl))
5711 return (alignment <= MAX_OFILE_ALIGNMENT);
5713 return (alignment <= MAX_STACK_ALIGNMENT);
5717 /* Return whether the data reference DR is supported with respect to its
5719 If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
5720 it is aligned, i.e., check if it is possible to vectorize it with different
5723 enum dr_alignment_support
5724 vect_supportable_dr_alignment (struct data_reference *dr,
5725 bool check_aligned_accesses)
5727 gimple stmt = DR_STMT (dr);
5728 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5729 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5730 machine_mode mode = TYPE_MODE (vectype);
5731 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5732 struct loop *vect_loop = NULL;
5733 bool nested_in_vect_loop = false;
5735 if (aligned_access_p (dr) && !check_aligned_accesses)
5738 /* For now assume all conditional loads/stores support unaligned
5739 access without any special code. */
5740 if (is_gimple_call (stmt)
5741 && gimple_call_internal_p (stmt)
5742 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
5743 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
5744 return dr_unaligned_supported;
5748 vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
5749 nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt);
5752 /* Possibly unaligned access. */
5754 /* We can choose between using the implicit realignment scheme (generating
5755 a misaligned_move stmt) and the explicit realignment scheme (generating
5756 aligned loads with a REALIGN_LOAD). There are two variants to the
5757 explicit realignment scheme: optimized, and unoptimized.
5758 We can optimize the realignment only if the step between consecutive
5759 vector loads is equal to the vector size. Since the vector memory
5760 accesses advance in steps of VS (Vector Size) in the vectorized loop, it
5761 is guaranteed that the misalignment amount remains the same throughout the
5762 execution of the vectorized loop. Therefore, we can create the
5763 "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
5764 at the loop preheader.
5766 However, in the case of outer-loop vectorization, when vectorizing a
5767 memory access in the inner-loop nested within the LOOP that is now being
5768 vectorized, while it is guaranteed that the misalignment of the
5769 vectorized memory access will remain the same in different outer-loop
5770 iterations, it is *not* guaranteed that is will remain the same throughout
5771 the execution of the inner-loop. This is because the inner-loop advances
5772 with the original scalar step (and not in steps of VS). If the inner-loop
5773 step happens to be a multiple of VS, then the misalignment remains fixed
5774 and we can use the optimized realignment scheme. For example:
5780 When vectorizing the i-loop in the above example, the step between
5781 consecutive vector loads is 1, and so the misalignment does not remain
5782 fixed across the execution of the inner-loop, and the realignment cannot
5783 be optimized (as illustrated in the following pseudo vectorized loop):
5785 for (i=0; i<N; i+=4)
5786 for (j=0; j<M; j++){
5787 vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
5788 // when j is {0,1,2,3,4,5,6,7,...} respectively.
5789 // (assuming that we start from an aligned address).
5792 We therefore have to use the unoptimized realignment scheme:
5794 for (i=0; i<N; i+=4)
5795 for (j=k; j<M; j+=4)
5796 vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
5797 // that the misalignment of the initial address is
5800 The loop can then be vectorized as follows:
5802 for (k=0; k<4; k++){
5803 rt = get_realignment_token (&vp[k]);
5804 for (i=0; i<N; i+=4){
5806 for (j=k; j<M; j+=4){
5808 va = REALIGN_LOAD <v1,v2,rt>;
5815 if (DR_IS_READ (dr))
5817 bool is_packed = false;
5818 tree type = (TREE_TYPE (DR_REF (dr)));
5820 if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
5821 && (!targetm.vectorize.builtin_mask_for_load
5822 || targetm.vectorize.builtin_mask_for_load ()))
5824 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5825 if ((nested_in_vect_loop
5826 && (TREE_INT_CST_LOW (DR_STEP (dr))
5827 != GET_MODE_SIZE (TYPE_MODE (vectype))))
5829 return dr_explicit_realign;
5831 return dr_explicit_realign_optimized;
5833 if (!known_alignment_for_access_p (dr))
5834 is_packed = not_size_aligned (DR_REF (dr));
5836 if ((TYPE_USER_ALIGN (type) && !is_packed)
5837 || targetm.vectorize.
5838 support_vector_misalignment (mode, type,
5839 DR_MISALIGNMENT (dr), is_packed))
5840 /* Can't software pipeline the loads, but can at least do them. */
5841 return dr_unaligned_supported;
5845 bool is_packed = false;
5846 tree type = (TREE_TYPE (DR_REF (dr)));
5848 if (!known_alignment_for_access_p (dr))
5849 is_packed = not_size_aligned (DR_REF (dr));
5851 if ((TYPE_USER_ALIGN (type) && !is_packed)
5852 || targetm.vectorize.
5853 support_vector_misalignment (mode, type,
5854 DR_MISALIGNMENT (dr), is_packed))
5855 return dr_unaligned_supported;
5859 return dr_unaligned_unsupported;