1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Inlining decision heuristics
23 The implementation of inliner is organized as follows:
25 inlining heuristics limits
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
38 The inliner itself is split into two passes:
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
94 #include "coretypes.h"
99 #include "double-int.h"
103 #include "wide-int.h"
106 #include "fold-const.h"
107 #include "trans-mem.h"
109 #include "tree-inline.h"
110 #include "langhooks.h"
112 #include "diagnostic.h"
113 #include "gimple-pretty-print.h"
116 #include "tree-pass.h"
117 #include "coverage.h"
122 #include "hard-reg-set.h"
124 #include "function.h"
125 #include "basic-block.h"
126 #include "tree-ssa-alias.h"
127 #include "internal-fn.h"
128 #include "gimple-expr.h"
131 #include "gimple-ssa.h"
132 #include "hash-map.h"
133 #include "plugin-api.h"
136 #include "alloc-pool.h"
137 #include "symbol-summary.h"
138 #include "ipa-prop.h"
141 #include "ipa-inline.h"
142 #include "ipa-utils.h"
144 #include "auto-profile.h"
146 #include "builtins.h"
147 #include "fibonacci_heap.h"
148 #include "lto-streamer.h"
150 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
151 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
153 /* Statistics we collect about inlining algorithm. */
154 static int overall_size;
155 static gcov_type max_count;
156 static gcov_type spec_rem;
158 /* Pre-computed constants 1/CGRAPH_FREQ_BASE and 1/100. */
159 static sreal cgraph_freq_base_rec, percent_rec;
161 /* Return false when inlining edge E would lead to violating
162 limits on function unit growth or stack usage growth.
164 The relative function body growth limit is present generally
165 to avoid problems with non-linear behavior of the compiler.
166 To allow inlining huge functions into tiny wrapper, the limit
167 is always based on the bigger of the two functions considered.
169 For stack growth limits we always base the growth in stack usage
170 of the callers. We want to prevent applications from segfaulting
171 on stack overflow when functions with huge stack frames gets
175 caller_growth_limits (struct cgraph_edge *e)
177 struct cgraph_node *to = e->caller;
178 struct cgraph_node *what = e->callee->ultimate_alias_target ();
181 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
182 inline_summary *info, *what_info, *outer_info = inline_summaries->get (to);
184 /* Look for function e->caller is inlined to. While doing
185 so work out the largest function body on the way. As
186 described above, we want to base our function growth
187 limits based on that. Not on the self size of the
188 outer function, not on the self size of inline code
189 we immediately inline to. This is the most relaxed
190 interpretation of the rule "do not grow large functions
191 too much in order to prevent compiler from exploding". */
194 info = inline_summaries->get (to);
195 if (limit < info->self_size)
196 limit = info->self_size;
197 if (stack_size_limit < info->estimated_self_stack_size)
198 stack_size_limit = info->estimated_self_stack_size;
199 if (to->global.inlined_to)
200 to = to->callers->caller;
205 what_info = inline_summaries->get (what);
207 if (limit < what_info->self_size)
208 limit = what_info->self_size;
210 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
212 /* Check the size after inlining against the function limits. But allow
213 the function to shrink if it went over the limits by forced inlining. */
214 newsize = estimate_size_after_inlining (to, e);
215 if (newsize >= info->size
216 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
219 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
223 if (!what_info->estimated_stack_size)
226 /* FIXME: Stack size limit often prevents inlining in Fortran programs
227 due to large i/o datastructures used by the Fortran front-end.
228 We ought to ignore this limit when we know that the edge is executed
229 on every invocation of the caller (i.e. its call statement dominates
230 exit block). We do not track this information, yet. */
231 stack_size_limit += ((gcov_type)stack_size_limit
232 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
234 inlined_stack = (outer_info->stack_frame_offset
235 + outer_info->estimated_self_stack_size
236 + what_info->estimated_stack_size);
237 /* Check new stack consumption with stack consumption at the place
239 if (inlined_stack > stack_size_limit
240 /* If function already has large stack usage from sibling
241 inline call, we can inline, too.
242 This bit overoptimistically assume that we are good at stack
244 && inlined_stack > info->estimated_stack_size
245 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
247 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
253 /* Dump info about why inlining has failed. */
256 report_inline_failed_reason (struct cgraph_edge *e)
260 fprintf (dump_file, " not inlinable: %s/%i -> %s/%i, %s\n",
261 xstrdup_for_dump (e->caller->name ()), e->caller->order,
262 xstrdup_for_dump (e->callee->name ()), e->callee->order,
263 cgraph_inline_failed_string (e->inline_failed));
264 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
265 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
266 && e->caller->lto_file_data
267 && e->callee->function_symbol ()->lto_file_data)
269 fprintf (dump_file, " LTO objects: %s, %s\n",
270 e->caller->lto_file_data->file_name,
271 e->callee->function_symbol ()->lto_file_data->file_name);
273 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
274 cl_target_option_print_diff
275 (dump_file, 2, target_opts_for_fn (e->caller->decl),
276 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
277 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
278 cl_optimization_print_diff
279 (dump_file, 2, opts_for_fn (e->caller->decl),
280 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
284 /* Decide whether sanitizer-related attributes allow inlining. */
287 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
289 /* Don't care if sanitizer is disabled */
290 if (!(flag_sanitize & SANITIZE_ADDRESS))
293 if (!caller || !callee)
296 return !!lookup_attribute ("no_sanitize_address",
297 DECL_ATTRIBUTES (caller)) ==
298 !!lookup_attribute ("no_sanitize_address",
299 DECL_ATTRIBUTES (callee));
302 /* Decide if we can inline the edge and possibly update
303 inline_failed reason.
304 We check whether inlining is possible at all and whether
305 caller growth limits allow doing so.
307 if REPORT is true, output reason to the dump file.
309 if DISREGARD_LIMITS is true, ignore size limits.*/
312 can_inline_edge_p (struct cgraph_edge *e, bool report,
313 bool disregard_limits = false, bool early = false)
315 bool inlinable = true;
316 enum availability avail;
317 cgraph_node *callee = e->callee->ultimate_alias_target (&avail);
318 cgraph_node *caller = e->caller->global.inlined_to
319 ? e->caller->global.inlined_to : e->caller;
320 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
322 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
323 struct function *caller_fun = caller->get_fun ();
324 struct function *callee_fun = callee ? callee->get_fun () : NULL;
326 gcc_assert (e->inline_failed);
328 if (!callee || !callee->definition)
330 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
333 else if (callee->calls_comdat_local)
335 e->inline_failed = CIF_USES_COMDAT_LOCAL;
338 else if (!inline_summaries->get (callee)->inlinable
339 || (caller_fun && fn_contains_cilk_spawn_p (caller_fun)))
341 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
344 else if (avail <= AVAIL_INTERPOSABLE)
346 e->inline_failed = CIF_OVERWRITABLE;
349 else if (e->call_stmt_cannot_inline_p)
351 if (e->inline_failed != CIF_FUNCTION_NOT_OPTIMIZED)
352 e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
355 /* Don't inline if the functions have different EH personalities. */
356 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
357 && DECL_FUNCTION_PERSONALITY (callee->decl)
358 && (DECL_FUNCTION_PERSONALITY (caller->decl)
359 != DECL_FUNCTION_PERSONALITY (callee->decl)))
361 e->inline_failed = CIF_EH_PERSONALITY;
364 /* TM pure functions should not be inlined into non-TM_pure
366 else if (is_tm_pure (callee->decl)
367 && !is_tm_pure (caller->decl))
369 e->inline_failed = CIF_UNSPECIFIED;
372 /* Don't inline if the callee can throw non-call exceptions but the
374 FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
375 Move the flag into cgraph node or mirror it in the inline summary. */
376 else if (callee_fun && callee_fun->can_throw_non_call_exceptions
377 && !(caller_fun && caller_fun->can_throw_non_call_exceptions))
379 e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
382 /* Check compatibility of target optimization options. */
383 else if (!targetm.target_option.can_inline_p (caller->decl,
386 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
389 /* Don't inline a function with mismatched sanitization attributes. */
390 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
392 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
395 /* Check if caller growth allows the inlining. */
396 else if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
398 && !lookup_attribute ("flatten",
399 DECL_ATTRIBUTES (caller->decl))
400 && !caller_growth_limits (e))
402 /* Don't inline a function with a higher optimization level than the
403 caller. FIXME: this is really just tip of iceberg of handling
404 optimization attribute. */
405 else if (caller_tree != callee_tree)
407 /* There are some options that change IL semantics which means
408 we cannot inline in these cases for correctness reason.
409 Not even for always_inline declared functions. */
410 /* Strictly speaking only when the callee contains signed integer
411 math where overflow is undefined. */
412 if ((opt_for_fn (caller->decl, flag_strict_overflow)
413 != opt_for_fn (caller->decl, flag_strict_overflow))
414 || (opt_for_fn (caller->decl, flag_wrapv)
415 != opt_for_fn (caller->decl, flag_wrapv))
416 || (opt_for_fn (caller->decl, flag_trapv)
417 != opt_for_fn (caller->decl, flag_trapv))
418 /* Strictly speaking only when the callee contains memory
419 accesses that are not using alias-set zero anyway. */
420 || (opt_for_fn (caller->decl, flag_strict_aliasing)
421 != opt_for_fn (caller->decl, flag_strict_aliasing))
422 /* Strictly speaking only when the callee uses FP math. */
423 || (opt_for_fn (caller->decl, flag_rounding_math)
424 != opt_for_fn (caller->decl, flag_rounding_math))
425 || (opt_for_fn (caller->decl, flag_trapping_math)
426 != opt_for_fn (caller->decl, flag_trapping_math))
427 || (opt_for_fn (caller->decl, flag_unsafe_math_optimizations)
428 != opt_for_fn (caller->decl, flag_unsafe_math_optimizations))
429 || (opt_for_fn (caller->decl, flag_finite_math_only)
430 != opt_for_fn (caller->decl, flag_finite_math_only))
431 || (opt_for_fn (caller->decl, flag_signaling_nans)
432 != opt_for_fn (caller->decl, flag_signaling_nans))
433 || (opt_for_fn (caller->decl, flag_cx_limited_range)
434 != opt_for_fn (caller->decl, flag_cx_limited_range))
435 || (opt_for_fn (caller->decl, flag_signed_zeros)
436 != opt_for_fn (caller->decl, flag_signed_zeros))
437 || (opt_for_fn (caller->decl, flag_associative_math)
438 != opt_for_fn (caller->decl, flag_associative_math))
439 || (opt_for_fn (caller->decl, flag_reciprocal_math)
440 != opt_for_fn (caller->decl, flag_reciprocal_math))
441 /* Strictly speaking only when the callee contains function
442 calls that may end up setting errno. */
443 || (opt_for_fn (caller->decl, flag_errno_math)
444 != opt_for_fn (caller->decl, flag_errno_math))
445 /* When devirtualization is diabled for callee, it is not safe
446 to inline it as we possibly mangled the type info.
447 Allow early inlining of always inlines. */
448 || (opt_for_fn (caller->decl, flag_devirtualize)
449 && !opt_for_fn (callee->decl, flag_devirtualize)
451 || (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
452 || !lookup_attribute ("always_inline",
453 DECL_ATTRIBUTES (callee->decl))))))
455 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
458 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
459 else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
460 && lookup_attribute ("always_inline",
461 DECL_ATTRIBUTES (callee->decl)))
463 /* When user added an attribute to the callee honor it. */
464 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
465 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
467 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
470 /* If mismatch is caused by merging two LTO units with different
471 optimizationflags we want to be bit nicer. However never inline
472 if one of functions is not optimized at all. */
473 else if (!opt_for_fn (callee->decl, optimize)
474 || !opt_for_fn (caller->decl, optimize))
476 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
479 /* If callee is optimized for size and caller is not, allow inlining if
480 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
481 is inline (and thus likely an unified comdat). This will allow caller
483 else if (opt_for_fn (callee->decl, optimize_size)
484 > opt_for_fn (caller->decl, optimize_size))
486 int growth = estimate_edge_growth (e);
488 && (!DECL_DECLARED_INLINE_P (callee->decl)
489 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
490 MAX_INLINE_INSNS_AUTO)))
492 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
496 /* If callee is more aggressively optimized for performance than caller,
497 we generally want to inline only cheap (runtime wise) functions. */
498 else if (opt_for_fn (callee->decl, optimize_size)
499 < opt_for_fn (caller->decl, optimize_size)
500 || (opt_for_fn (callee->decl, optimize)
501 >= opt_for_fn (caller->decl, optimize)))
503 if (estimate_edge_time (e)
504 >= 20 + inline_edge_summary (e)->call_stmt_time)
506 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
513 if (!inlinable && report)
514 report_inline_failed_reason (e);
519 /* Return true if the edge E is inlinable during early inlining. */
522 can_early_inline_edge_p (struct cgraph_edge *e)
524 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
525 /* Early inliner might get called at WPA stage when IPA pass adds new
526 function. In this case we can not really do any of early inlining
527 because function bodies are missing. */
528 if (!gimple_has_body_p (callee->decl))
530 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
533 /* In early inliner some of callees may not be in SSA form yet
534 (i.e. the callgraph is cyclic and we did not process
535 the callee by early inliner, yet). We don't have CIF code for this
536 case; later we will re-do the decision in the real inliner. */
537 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
538 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
541 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
544 if (!can_inline_edge_p (e, true, false, true))
550 /* Return number of calls in N. Ignore cheap builtins. */
553 num_calls (struct cgraph_node *n)
555 struct cgraph_edge *e;
558 for (e = n->callees; e; e = e->next_callee)
559 if (!is_inexpensive_builtin (e->callee->decl))
565 /* Return true if we are interested in inlining small function. */
568 want_early_inline_function_p (struct cgraph_edge *e)
570 bool want_inline = true;
571 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
573 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
575 /* For AutoFDO, we need to make sure that before profile summary, all
576 hot paths' IR look exactly the same as profiled binary. As a result,
577 in einliner, we will disregard size limit and inline those callsites
579 * inlined in the profiled binary, and
580 * the cloned callee has enough samples to be considered "hot". */
581 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
583 else if (!DECL_DECLARED_INLINE_P (callee->decl)
584 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
586 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
587 report_inline_failed_reason (e);
592 int growth = estimate_edge_growth (e);
597 else if (!e->maybe_hot_p ()
601 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
602 "call is cold and code would grow by %i\n",
603 xstrdup_for_dump (e->caller->name ()),
605 xstrdup_for_dump (callee->name ()), callee->order,
609 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
612 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
613 "growth %i exceeds --param early-inlining-insns\n",
614 xstrdup_for_dump (e->caller->name ()),
616 xstrdup_for_dump (callee->name ()), callee->order,
620 else if ((n = num_calls (callee)) != 0
621 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
624 fprintf (dump_file, " will not early inline: %s/%i->%s/%i, "
625 "growth %i exceeds --param early-inlining-insns "
626 "divided by number of calls\n",
627 xstrdup_for_dump (e->caller->name ()),
629 xstrdup_for_dump (callee->name ()), callee->order,
637 /* Compute time of the edge->caller + edge->callee execution when inlining
641 compute_uninlined_call_time (struct inline_summary *callee_info,
642 struct cgraph_edge *edge)
644 sreal uninlined_call_time = (sreal)callee_info->time;
645 cgraph_node *caller = (edge->caller->global.inlined_to
646 ? edge->caller->global.inlined_to
649 if (edge->count && caller->count)
650 uninlined_call_time *= (sreal)edge->count / caller->count;
652 uninlined_call_time *= cgraph_freq_base_rec * edge->frequency;
654 uninlined_call_time = uninlined_call_time >> 11;
656 int caller_time = inline_summaries->get (caller)->time;
657 return uninlined_call_time + caller_time;
660 /* Same as compute_uinlined_call_time but compute time when inlining
664 compute_inlined_call_time (struct cgraph_edge *edge,
667 cgraph_node *caller = (edge->caller->global.inlined_to
668 ? edge->caller->global.inlined_to
670 int caller_time = inline_summaries->get (caller)->time;
671 sreal time = edge_time;
673 if (edge->count && caller->count)
674 time *= (sreal)edge->count / caller->count;
676 time *= cgraph_freq_base_rec * edge->frequency;
680 /* This calculation should match one in ipa-inline-analysis.
681 FIXME: Once ipa-inline-analysis is converted to sreal this can be
683 time -= (sreal) ((gcov_type) edge->frequency
684 * inline_edge_summary (edge)->call_stmt_time
685 * (INLINE_TIME_SCALE / CGRAPH_FREQ_BASE)) / INLINE_TIME_SCALE;
688 time = ((sreal) 1) >> 8;
689 gcc_checking_assert (time >= 0);
693 /* Return true if the speedup for inlining E is bigger than
694 PARAM_MAX_INLINE_MIN_SPEEDUP. */
697 big_speedup_p (struct cgraph_edge *e)
699 sreal time = compute_uninlined_call_time (inline_summaries->get (e->callee),
701 sreal inlined_time = compute_inlined_call_time (e, estimate_edge_time (e));
703 if (time - inlined_time
704 > (sreal) time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)
710 /* Return true if we are interested in inlining small function.
711 When REPORT is true, report reason to dump file. */
714 want_inline_small_function_p (struct cgraph_edge *e, bool report)
716 bool want_inline = true;
717 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
719 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
721 else if (!DECL_DECLARED_INLINE_P (callee->decl)
722 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
724 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
727 /* Do fast and conservative check if the function can be good
728 inline candidate. At the moment we allow inline hints to
729 promote non-inline functions to inline and we increase
730 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
731 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
732 && (!e->count || !e->maybe_hot_p ()))
733 && inline_summaries->get (callee)->min_size
734 - inline_edge_summary (e)->call_stmt_size
735 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
737 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
740 else if ((DECL_DECLARED_INLINE_P (callee->decl) || e->count)
741 && inline_summaries->get (callee)->min_size
742 - inline_edge_summary (e)->call_stmt_size
743 > 16 * MAX_INLINE_INSNS_SINGLE)
745 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
746 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
747 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
752 int growth = estimate_edge_growth (e);
753 inline_hints hints = estimate_edge_hints (e);
754 bool big_speedup = big_speedup_p (e);
758 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
759 hints suggests that inlining given function is very profitable. */
760 else if (DECL_DECLARED_INLINE_P (callee->decl)
761 && growth >= MAX_INLINE_INSNS_SINGLE
763 && !(hints & (INLINE_HINT_indirect_call
764 | INLINE_HINT_known_hot
765 | INLINE_HINT_loop_iterations
766 | INLINE_HINT_array_index
767 | INLINE_HINT_loop_stride)))
768 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
770 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
773 else if (!DECL_DECLARED_INLINE_P (callee->decl)
774 && !opt_for_fn (e->caller->decl, flag_inline_functions))
776 /* growth_likely_positive is expensive, always test it last. */
777 if (growth >= MAX_INLINE_INSNS_SINGLE
778 || growth_likely_positive (callee, growth))
780 e->inline_failed = CIF_NOT_DECLARED_INLINED;
784 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
785 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
786 inlining given function is very profitable. */
787 else if (!DECL_DECLARED_INLINE_P (callee->decl)
789 && !(hints & INLINE_HINT_known_hot)
790 && growth >= ((hints & (INLINE_HINT_indirect_call
791 | INLINE_HINT_loop_iterations
792 | INLINE_HINT_array_index
793 | INLINE_HINT_loop_stride))
794 ? MAX (MAX_INLINE_INSNS_AUTO,
795 MAX_INLINE_INSNS_SINGLE)
796 : MAX_INLINE_INSNS_AUTO))
798 /* growth_likely_positive is expensive, always test it last. */
799 if (growth >= MAX_INLINE_INSNS_SINGLE
800 || growth_likely_positive (callee, growth))
802 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
806 /* If call is cold, do not inline when function body would grow. */
807 else if (!e->maybe_hot_p ()
808 && (growth >= MAX_INLINE_INSNS_SINGLE
809 || growth_likely_positive (callee, growth)))
811 e->inline_failed = CIF_UNLIKELY_CALL;
815 if (!want_inline && report)
816 report_inline_failed_reason (e);
820 /* EDGE is self recursive edge.
821 We hand two cases - when function A is inlining into itself
822 or when function A is being inlined into another inliner copy of function
825 In first case OUTER_NODE points to the toplevel copy of A, while
826 in the second case OUTER_NODE points to the outermost copy of A in B.
828 In both cases we want to be extra selective since
829 inlining the call will just introduce new recursive calls to appear. */
832 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
833 struct cgraph_node *outer_node,
837 char const *reason = NULL;
838 bool want_inline = true;
839 int caller_freq = CGRAPH_FREQ_BASE;
840 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
842 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
843 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
845 if (!edge->maybe_hot_p ())
847 reason = "recursive call is cold";
850 else if (max_count && !outer_node->count)
852 reason = "not executed in profile";
855 else if (depth > max_depth)
857 reason = "--param max-inline-recursive-depth exceeded.";
861 if (outer_node->global.inlined_to)
862 caller_freq = outer_node->callers->frequency;
866 reason = "function is inlined and unlikely";
872 /* Inlining of self recursive function into copy of itself within other function
873 is transformation similar to loop peeling.
875 Peeling is profitable if we can inline enough copies to make probability
876 of actual call to the self recursive function very small. Be sure that
877 the probability of recursion is small.
879 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
880 This way the expected number of recision is at most max_depth. */
883 int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
886 for (i = 1; i < depth; i++)
887 max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
889 && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
892 reason = "profile of recursive call is too large";
896 && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
899 reason = "frequency of recursive call is too large";
903 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
904 depth is large. We reduce function call overhead and increase chances that
905 things fit in hardware return predictor.
907 Recursive inlining might however increase cost of stack frame setup
908 actually slowing down functions whose recursion tree is wide rather than
911 Deciding reliably on when to do recursive inlining without profile feedback
912 is tricky. For now we disable recursive inlining when probability of self
915 Recursive inlining of self recursive call within loop also results in large loop
916 depths that generally optimize badly. We may want to throttle down inlining
917 in those cases. In particular this seems to happen in one of libstdc++ rb tree
922 && (edge->count * 100 / outer_node->count
923 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
925 reason = "profile of recursive call is too small";
929 && (edge->frequency * 100 / caller_freq
930 <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
932 reason = "frequency of recursive call is too small";
936 if (!want_inline && dump_file)
937 fprintf (dump_file, " not inlining recursively: %s\n", reason);
941 /* Return true when NODE has uninlinable caller;
942 set HAS_HOT_CALL if it has hot call.
943 Worker for cgraph_for_node_and_aliases. */
946 check_callers (struct cgraph_node *node, void *has_hot_call)
948 struct cgraph_edge *e;
949 for (e = node->callers; e; e = e->next_caller)
951 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once))
953 if (!can_inline_edge_p (e, true))
955 if (e->recursive_p ())
957 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
958 *(bool *)has_hot_call = true;
963 /* If NODE has a caller, return true. */
966 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
973 /* Decide if inlining NODE would reduce unit size by eliminating
974 the offline copy of function.
975 When COLD is true the cold calls are considered, too. */
978 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
980 bool has_hot_call = false;
982 /* Aliases gets inlined along with the function they alias. */
985 /* Already inlined? */
986 if (node->global.inlined_to)
988 /* Does it have callers? */
989 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
991 /* Inlining into all callers would increase size? */
992 if (estimate_growth (node) > 0)
994 /* All inlines must be possible. */
995 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
998 if (!cold && !has_hot_call)
1003 /* A cost model driving the inlining heuristics in a way so the edges with
1004 smallest badness are inlined first. After each inlining is performed
1005 the costs of all caller edges of nodes affected are recomputed so the
1006 metrics may accurately depend on values such as number of inlinable callers
1007 of the function or function body size. */
1010 edge_badness (struct cgraph_edge *edge, bool dump)
1013 int growth, edge_time;
1014 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1015 struct inline_summary *callee_info = inline_summaries->get (callee);
1017 cgraph_node *caller = (edge->caller->global.inlined_to
1018 ? edge->caller->global.inlined_to
1021 growth = estimate_edge_growth (edge);
1022 edge_time = estimate_edge_time (edge);
1023 hints = estimate_edge_hints (edge);
1024 gcc_checking_assert (edge_time >= 0);
1025 gcc_checking_assert (edge_time <= callee_info->time);
1026 gcc_checking_assert (growth <= callee_info->size);
1030 fprintf (dump_file, " Badness calculation for %s/%i -> %s/%i\n",
1031 xstrdup_for_dump (edge->caller->name ()),
1032 edge->caller->order,
1033 xstrdup_for_dump (callee->name ()),
1034 edge->callee->order);
1035 fprintf (dump_file, " size growth %i, time %i ",
1038 dump_inline_hints (dump_file, hints);
1039 if (big_speedup_p (edge))
1040 fprintf (dump_file, " big_speedup");
1041 fprintf (dump_file, "\n");
1044 /* Always prefer inlining saving code size. */
1047 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1049 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1052 /* Inlining into EXTERNAL functions is not going to change anything unless
1053 they are themselves inlined. */
1054 else if (DECL_EXTERNAL (caller->decl))
1057 fprintf (dump_file, " max: function is external\n");
1058 return sreal::max ();
1060 /* When profile is available. Compute badness as:
1062 time_saved * caller_count
1063 goodness = ---------------------------------
1064 growth_of_caller * overall_growth
1066 badness = - goodness
1068 Again use negative value to make calls with profile appear hotter
1071 else if (opt_for_fn (caller->decl, flag_guess_branch_prob) || caller->count)
1073 sreal numerator, denominator;
1075 numerator = (compute_uninlined_call_time (callee_info, edge)
1076 - compute_inlined_call_time (edge, edge_time));
1078 numerator = ((sreal) 1 >> 8);
1080 numerator *= caller->count;
1081 else if (opt_for_fn (caller->decl, flag_branch_probabilities))
1082 numerator = numerator >> 11;
1083 denominator = growth;
1084 if (callee_info->growth > 0)
1085 denominator *= callee_info->growth;
1087 badness = - numerator / denominator;
1092 " %f: guessed profile. frequency %f, count %"PRId64
1093 " caller count %"PRId64
1094 " time w/o inlining %f, time w inlining %f"
1095 " overall growth %i (current) %i (original)\n",
1096 badness.to_double (), (double)edge->frequency / CGRAPH_FREQ_BASE,
1097 edge->count, caller->count,
1098 compute_uninlined_call_time (callee_info, edge).to_double (),
1099 compute_inlined_call_time (edge, edge_time).to_double (),
1100 estimate_growth (callee),
1101 callee_info->growth);
1104 /* When function local profile is not available or it does not give
1105 useful information (ie frequency is zero), base the cost on
1106 loop nest and overall size growth, so we optimize for overall number
1107 of functions fully inlined in program. */
1110 int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
1113 /* Decrease badness if call is nested. */
1115 badness = badness >> nest;
1117 badness = badness << nest;
1119 fprintf (dump_file, " %f: no profile. nest %i\n", badness.to_double (),
1122 gcc_checking_assert (badness != 0);
1124 if (edge->recursive_p ())
1125 badness = badness.shift (badness > 0 ? 4 : -4);
1126 if ((hints & (INLINE_HINT_indirect_call
1127 | INLINE_HINT_loop_iterations
1128 | INLINE_HINT_array_index
1129 | INLINE_HINT_loop_stride))
1130 || callee_info->growth <= 0)
1131 badness = badness.shift (badness > 0 ? -2 : 2);
1132 if (hints & (INLINE_HINT_same_scc))
1133 badness = badness.shift (badness > 0 ? 3 : -3);
1134 else if (hints & (INLINE_HINT_in_scc))
1135 badness = badness.shift (badness > 0 ? 2 : -2);
1136 else if (hints & (INLINE_HINT_cross_module))
1137 badness = badness.shift (badness > 0 ? 1 : -1);
1138 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1139 badness = badness.shift (badness > 0 ? -4 : 4);
1140 else if ((hints & INLINE_HINT_declared_inline))
1141 badness = badness.shift (badness > 0 ? -3 : 3);
1143 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1147 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1149 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1151 sreal badness = edge_badness (edge, false);
1154 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1155 gcc_checking_assert (n->get_data () == edge);
1157 /* fibonacci_heap::replace_key does busy updating of the
1158 heap that is unnecesarily expensive.
1159 We do lazy increases: after extracting minimum if the key
1160 turns out to be out of date, it is re-inserted into heap
1161 with correct value. */
1162 if (badness < n->get_key ())
1164 if (dump_file && (dump_flags & TDF_DETAILS))
1167 " decreasing badness %s/%i -> %s/%i, %f"
1169 xstrdup_for_dump (edge->caller->name ()),
1170 edge->caller->order,
1171 xstrdup_for_dump (edge->callee->name ()),
1172 edge->callee->order,
1173 n->get_key ().to_double (),
1174 badness.to_double ());
1176 heap->decrease_key (n, badness);
1181 if (dump_file && (dump_flags & TDF_DETAILS))
1184 " enqueuing call %s/%i -> %s/%i, badness %f\n",
1185 xstrdup_for_dump (edge->caller->name ()),
1186 edge->caller->order,
1187 xstrdup_for_dump (edge->callee->name ()),
1188 edge->callee->order,
1189 badness.to_double ());
1191 edge->aux = heap->insert (badness, edge);
1196 /* NODE was inlined.
1197 All caller edges needs to be resetted because
1198 size estimates change. Similarly callees needs reset
1199 because better context may be known. */
1202 reset_edge_caches (struct cgraph_node *node)
1204 struct cgraph_edge *edge;
1205 struct cgraph_edge *e = node->callees;
1206 struct cgraph_node *where = node;
1207 struct ipa_ref *ref;
1209 if (where->global.inlined_to)
1210 where = where->global.inlined_to;
1212 for (edge = where->callers; edge; edge = edge->next_caller)
1213 if (edge->inline_failed)
1214 reset_edge_growth_cache (edge);
1216 FOR_EACH_ALIAS (where, ref)
1217 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1223 if (!e->inline_failed && e->callee->callees)
1224 e = e->callee->callees;
1227 if (e->inline_failed)
1228 reset_edge_growth_cache (e);
1235 if (e->caller == node)
1237 e = e->caller->callers;
1239 while (!e->next_callee);
1245 /* Recompute HEAP nodes for each of caller of NODE.
1246 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1247 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1248 it is inlinable. Otherwise check all edges. */
1251 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1252 bitmap updated_nodes,
1253 struct cgraph_edge *check_inlinablity_for)
1255 struct cgraph_edge *edge;
1256 struct ipa_ref *ref;
1258 if ((!node->alias && !inline_summaries->get (node)->inlinable)
1259 || node->global.inlined_to)
1261 if (!bitmap_set_bit (updated_nodes, node->uid))
1264 FOR_EACH_ALIAS (node, ref)
1266 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1267 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1270 for (edge = node->callers; edge; edge = edge->next_caller)
1271 if (edge->inline_failed)
1273 if (!check_inlinablity_for
1274 || check_inlinablity_for == edge)
1276 if (can_inline_edge_p (edge, false)
1277 && want_inline_small_function_p (edge, false))
1278 update_edge_key (heap, edge);
1281 report_inline_failed_reason (edge);
1282 heap->delete_node ((edge_heap_node_t *) edge->aux);
1287 update_edge_key (heap, edge);
1291 /* Recompute HEAP nodes for each uninlined call in NODE.
1292 This is used when we know that edge badnesses are going only to increase
1293 (we introduced new call site) and thus all we need is to insert newly
1294 created edges into heap. */
1297 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1298 bitmap updated_nodes)
1300 struct cgraph_edge *e = node->callees;
1305 if (!e->inline_failed && e->callee->callees)
1306 e = e->callee->callees;
1309 enum availability avail;
1310 struct cgraph_node *callee;
1311 /* We do not reset callee growth cache here. Since we added a new call,
1312 growth chould have just increased and consequentely badness metric
1313 don't need updating. */
1314 if (e->inline_failed
1315 && (callee = e->callee->ultimate_alias_target (&avail))
1316 && inline_summaries->get (callee)->inlinable
1317 && avail >= AVAIL_AVAILABLE
1318 && !bitmap_bit_p (updated_nodes, callee->uid))
1320 if (can_inline_edge_p (e, false)
1321 && want_inline_small_function_p (e, false))
1322 update_edge_key (heap, e);
1325 report_inline_failed_reason (e);
1326 heap->delete_node ((edge_heap_node_t *) e->aux);
1336 if (e->caller == node)
1338 e = e->caller->callers;
1340 while (!e->next_callee);
1346 /* Enqueue all recursive calls from NODE into priority queue depending on
1347 how likely we want to recursively inline the call. */
1350 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1353 struct cgraph_edge *e;
1354 enum availability avail;
1356 for (e = where->callees; e; e = e->next_callee)
1357 if (e->callee == node
1358 || (e->callee->ultimate_alias_target (&avail) == node
1359 && avail > AVAIL_INTERPOSABLE))
1361 /* When profile feedback is available, prioritize by expected number
1363 heap->insert (!max_count ? -e->frequency
1364 : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1367 for (e = where->callees; e; e = e->next_callee)
1368 if (!e->inline_failed)
1369 lookup_recursive_calls (node, e->callee, heap);
1372 /* Decide on recursive inlining: in the case function has recursive calls,
1373 inline until body size reaches given argument. If any new indirect edges
1374 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1378 recursive_inlining (struct cgraph_edge *edge,
1379 vec<cgraph_edge *> *new_edges)
1381 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1382 edge_heap_t heap (sreal::min ());
1383 struct cgraph_node *node;
1384 struct cgraph_edge *e;
1385 struct cgraph_node *master_clone = NULL, *next;
1389 node = edge->caller;
1390 if (node->global.inlined_to)
1391 node = node->global.inlined_to;
1393 if (DECL_DECLARED_INLINE_P (node->decl))
1394 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1396 /* Make sure that function is small enough to be considered for inlining. */
1397 if (estimate_size_after_inlining (node, edge) >= limit)
1399 lookup_recursive_calls (node, node, &heap);
1405 " Performing recursive inlining on %s\n",
1408 /* Do the inlining and update list of recursive call during process. */
1409 while (!heap.empty ())
1411 struct cgraph_edge *curr = heap.extract_min ();
1412 struct cgraph_node *cnode, *dest = curr->callee;
1414 if (!can_inline_edge_p (curr, true))
1417 /* MASTER_CLONE is produced in the case we already started modified
1418 the function. Be sure to redirect edge to the original body before
1419 estimating growths otherwise we will be seeing growths after inlining
1420 the already modified body. */
1423 curr->redirect_callee (master_clone);
1424 reset_edge_growth_cache (curr);
1427 if (estimate_size_after_inlining (node, curr) > limit)
1429 curr->redirect_callee (dest);
1430 reset_edge_growth_cache (curr);
1435 for (cnode = curr->caller;
1436 cnode->global.inlined_to; cnode = cnode->callers->caller)
1438 == curr->callee->ultimate_alias_target ()->decl)
1441 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1443 curr->redirect_callee (dest);
1444 reset_edge_growth_cache (curr);
1451 " Inlining call of depth %i", depth);
1454 fprintf (dump_file, " called approx. %.2f times per call",
1455 (double)curr->count / node->count);
1457 fprintf (dump_file, "\n");
1461 /* We need original clone to copy around. */
1462 master_clone = node->create_clone (node->decl, node->count,
1463 CGRAPH_FREQ_BASE, false, vNULL,
1465 for (e = master_clone->callees; e; e = e->next_callee)
1466 if (!e->inline_failed)
1467 clone_inlined_nodes (e, true, false, NULL, CGRAPH_FREQ_BASE);
1468 curr->redirect_callee (master_clone);
1469 reset_edge_growth_cache (curr);
1472 inline_call (curr, false, new_edges, &overall_size, true);
1473 lookup_recursive_calls (node, curr->callee, &heap);
1477 if (!heap.empty () && dump_file)
1478 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1485 "\n Inlined %i times, "
1486 "body grown from size %i to %i, time %i to %i\n", n,
1487 inline_summaries->get (master_clone)->size, inline_summaries->get (node)->size,
1488 inline_summaries->get (master_clone)->time, inline_summaries->get (node)->time);
1490 /* Remove master clone we used for inlining. We rely that clones inlined
1491 into master clone gets queued just before master clone so we don't
1493 for (node = symtab->first_function (); node != master_clone;
1496 next = symtab->next_function (node);
1497 if (node->global.inlined_to == master_clone)
1500 master_clone->remove ();
1505 /* Given whole compilation unit estimate of INSNS, compute how large we can
1506 allow the unit to grow. */
1509 compute_max_insns (int insns)
1511 int max_insns = insns;
1512 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1513 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1515 return ((int64_t) max_insns
1516 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1520 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1523 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1525 while (new_edges.length () > 0)
1527 struct cgraph_edge *edge = new_edges.pop ();
1529 gcc_assert (!edge->aux);
1530 if (edge->inline_failed
1531 && can_inline_edge_p (edge, true)
1532 && want_inline_small_function_p (edge, true))
1533 edge->aux = heap->insert (edge_badness (edge, false), edge);
1537 /* Remove EDGE from the fibheap. */
1540 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1544 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1549 /* Return true if speculation of edge E seems useful.
1550 If ANTICIPATE_INLINING is true, be conservative and hope that E
1554 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1556 enum availability avail;
1557 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail);
1558 struct cgraph_edge *direct, *indirect;
1559 struct ipa_ref *ref;
1561 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1563 if (!e->maybe_hot_p ())
1566 /* See if IP optimizations found something potentially useful about the
1567 function. For now we look only for CONST/PURE flags. Almost everything
1568 else we propagate is useless. */
1569 if (avail >= AVAIL_AVAILABLE)
1571 int ecf_flags = flags_from_decl_or_type (target->decl);
1572 if (ecf_flags & ECF_CONST)
1574 e->speculative_call_info (direct, indirect, ref);
1575 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1578 else if (ecf_flags & ECF_PURE)
1580 e->speculative_call_info (direct, indirect, ref);
1581 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1585 /* If we did not managed to inline the function nor redirect
1586 to an ipa-cp clone (that are seen by having local flag set),
1587 it is probably pointless to inline it unless hardware is missing
1588 indirect call predictor. */
1589 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1591 /* For overwritable targets there is not much to do. */
1592 if (e->inline_failed && !can_inline_edge_p (e, false, true))
1594 /* OK, speculation seems interesting. */
1598 /* We know that EDGE is not going to be inlined.
1599 See if we can remove speculation. */
1602 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1604 if (edge->speculative && !speculation_useful_p (edge, false))
1606 struct cgraph_node *node = edge->caller;
1607 struct cgraph_node *where = node->global.inlined_to
1608 ? node->global.inlined_to : node;
1609 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1611 spec_rem += edge->count;
1612 edge->resolve_speculation ();
1613 reset_edge_caches (where);
1614 inline_update_overall_summary (where);
1615 update_caller_keys (edge_heap, where,
1616 updated_nodes, NULL);
1617 update_callee_keys (edge_heap, where,
1619 BITMAP_FREE (updated_nodes);
1623 /* Return true if NODE should be accounted for overall size estimate.
1624 Skip all nodes optimized for size so we can measure the growth of hot
1625 part of program no matter of the padding. */
1628 inline_account_function_p (struct cgraph_node *node)
1630 return (!DECL_EXTERNAL (node->decl)
1631 && !opt_for_fn (node->decl, optimize_size)
1632 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1635 /* We use greedy algorithm for inlining of small functions:
1636 All inline candidates are put into prioritized heap ordered in
1639 The inlining of small functions is bounded by unit growth parameters. */
1642 inline_small_functions (void)
1644 struct cgraph_node *node;
1645 struct cgraph_edge *edge;
1646 edge_heap_t edge_heap (sreal::min ());
1647 bitmap updated_nodes = BITMAP_ALLOC (NULL);
1648 int min_size, max_size;
1649 auto_vec<cgraph_edge *> new_indirect_edges;
1650 int initial_size = 0;
1651 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1652 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1653 new_indirect_edges.create (8);
1655 edge_removal_hook_holder
1656 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1658 /* Compute overall unit size and other global parameters used by badness
1662 ipa_reduced_postorder (order, true, true, NULL);
1665 FOR_EACH_DEFINED_FUNCTION (node)
1666 if (!node->global.inlined_to)
1668 if (!node->alias && node->analyzed
1669 && (node->has_gimple_body_p () || node->thunk.thunk_p))
1671 struct inline_summary *info = inline_summaries->get (node);
1672 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1674 /* Do not account external functions, they will be optimized out
1675 if not inlined. Also only count the non-cold portion of program. */
1676 if (inline_account_function_p (node))
1677 initial_size += info->size;
1678 info->growth = estimate_growth (node);
1679 if (dfs && dfs->next_cycle)
1681 struct cgraph_node *n2;
1682 int id = dfs->scc_no + 1;
1684 n2 = ((struct ipa_dfs_info *) node->aux)->next_cycle)
1686 struct inline_summary *info2 = inline_summaries->get (n2);
1694 for (edge = node->callers; edge; edge = edge->next_caller)
1695 if (max_count < edge->count)
1696 max_count = edge->count;
1698 ipa_free_postorder_info ();
1699 initialize_growth_caches ();
1703 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1706 overall_size = initial_size;
1707 max_size = compute_max_insns (overall_size);
1708 min_size = overall_size;
1710 /* Populate the heap with all edges we might inline. */
1712 FOR_EACH_DEFINED_FUNCTION (node)
1714 bool update = false;
1715 struct cgraph_edge *next = NULL;
1716 bool has_speculative = false;
1719 fprintf (dump_file, "Enqueueing calls in %s/%i.\n",
1720 node->name (), node->order);
1722 for (edge = node->callees; edge; edge = next)
1724 next = edge->next_callee;
1725 if (edge->inline_failed
1727 && can_inline_edge_p (edge, true)
1728 && want_inline_small_function_p (edge, true)
1729 && edge->inline_failed)
1731 gcc_assert (!edge->aux);
1732 update_edge_key (&edge_heap, edge);
1734 if (edge->speculative)
1735 has_speculative = true;
1737 if (has_speculative)
1738 for (edge = node->callees; edge; edge = next)
1739 if (edge->speculative && !speculation_useful_p (edge,
1742 edge->resolve_speculation ();
1747 struct cgraph_node *where = node->global.inlined_to
1748 ? node->global.inlined_to : node;
1749 inline_update_overall_summary (where);
1750 reset_edge_caches (where);
1751 update_caller_keys (&edge_heap, where,
1752 updated_nodes, NULL);
1753 update_callee_keys (&edge_heap, where,
1755 bitmap_clear (updated_nodes);
1759 gcc_assert (in_lto_p
1761 || (profile_info && flag_branch_probabilities));
1763 while (!edge_heap.empty ())
1765 int old_size = overall_size;
1766 struct cgraph_node *where, *callee;
1767 sreal badness = edge_heap.min_key ();
1768 sreal current_badness;
1771 edge = edge_heap.extract_min ();
1772 gcc_assert (edge->aux);
1774 if (!edge->inline_failed || !edge->callee->analyzed)
1777 #ifdef ENABLE_CHECKING
1778 /* Be sure that caches are maintained consistent. */
1779 sreal cached_badness = edge_badness (edge, false);
1781 int old_size_est = estimate_edge_size (edge);
1782 int old_time_est = estimate_edge_time (edge);
1783 int old_hints_est = estimate_edge_hints (edge);
1785 reset_edge_growth_cache (edge);
1786 gcc_assert (old_size_est == estimate_edge_size (edge));
1787 gcc_assert (old_time_est == estimate_edge_time (edge));
1790 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1792 fails with profile feedback because some hints depends on
1793 maybe_hot_edge_p predicate and because callee gets inlined to other
1794 calls, the edge may become cold.
1795 This ought to be fixed by computing relative probabilities
1796 for given invocation but that will be better done once whole
1797 code is converted to sreals. Disable for now and revert to "wrong"
1798 value so enable/disable checking paths agree. */
1799 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1801 /* When updating the edge costs, we only decrease badness in the keys.
1802 Increases of badness are handled lazilly; when we see key with out
1803 of date value on it, we re-insert it now. */
1804 current_badness = edge_badness (edge, false);
1805 /* Disable checking for profile because roundoff errors may cause slight
1806 deviations in the order. */
1807 gcc_assert (max_count || cached_badness == current_badness);
1808 gcc_assert (current_badness >= badness);
1810 current_badness = edge_badness (edge, false);
1812 if (current_badness != badness)
1814 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1816 edge->aux = edge_heap.insert (current_badness, edge);
1820 badness = current_badness;
1823 if (!can_inline_edge_p (edge, true))
1825 resolve_noninline_speculation (&edge_heap, edge);
1829 callee = edge->callee->ultimate_alias_target ();
1830 growth = estimate_edge_growth (edge);
1834 "\nConsidering %s/%i with %i size\n",
1835 callee->name (), callee->order,
1836 inline_summaries->get (callee)->size);
1838 " to be inlined into %s/%i in %s:%i\n"
1839 " Estimated badness is %f, frequency %.2f.\n",
1840 edge->caller->name (), edge->caller->order,
1842 && (LOCATION_LOCUS (gimple_location ((const_gimple)
1844 > BUILTINS_LOCATION)
1845 ? gimple_filename ((const_gimple) edge->call_stmt)
1848 ? gimple_lineno ((const_gimple) edge->call_stmt)
1850 badness.to_double (),
1851 edge->frequency / (double)CGRAPH_FREQ_BASE);
1853 fprintf (dump_file," Called %"PRId64"x\n",
1855 if (dump_flags & TDF_DETAILS)
1856 edge_badness (edge, true);
1859 if (overall_size + growth > max_size
1860 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1862 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1863 report_inline_failed_reason (edge);
1864 resolve_noninline_speculation (&edge_heap, edge);
1868 if (!want_inline_small_function_p (edge, true))
1870 resolve_noninline_speculation (&edge_heap, edge);
1874 /* Heuristics for inlining small functions work poorly for
1875 recursive calls where we do effects similar to loop unrolling.
1876 When inlining such edge seems profitable, leave decision on
1877 specific inliner. */
1878 if (edge->recursive_p ())
1880 where = edge->caller;
1881 if (where->global.inlined_to)
1882 where = where->global.inlined_to;
1883 if (!recursive_inlining (edge,
1884 opt_for_fn (edge->caller->decl,
1885 flag_indirect_inlining)
1886 ? &new_indirect_edges : NULL))
1888 edge->inline_failed = CIF_RECURSIVE_INLINING;
1889 resolve_noninline_speculation (&edge_heap, edge);
1892 reset_edge_caches (where);
1893 /* Recursive inliner inlines all recursive calls of the function
1894 at once. Consequently we need to update all callee keys. */
1895 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
1896 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1897 update_callee_keys (&edge_heap, where, updated_nodes);
1898 bitmap_clear (updated_nodes);
1902 struct cgraph_node *outer_node = NULL;
1905 /* Consider the case where self recursive function A is inlined
1906 into B. This is desired optimization in some cases, since it
1907 leads to effect similar of loop peeling and we might completely
1908 optimize out the recursive call. However we must be extra
1911 where = edge->caller;
1912 while (where->global.inlined_to)
1914 if (where->decl == callee->decl)
1915 outer_node = where, depth++;
1916 where = where->callers->caller;
1919 && !want_inline_self_recursive_call_p (edge, outer_node,
1923 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
1924 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1925 resolve_noninline_speculation (&edge_heap, edge);
1928 else if (depth && dump_file)
1929 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1931 gcc_checking_assert (!callee->global.inlined_to);
1932 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1933 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
1935 reset_edge_caches (edge->callee->function_symbol ());
1937 update_callee_keys (&edge_heap, where, updated_nodes);
1939 where = edge->caller;
1940 if (where->global.inlined_to)
1941 where = where->global.inlined_to;
1943 /* Our profitability metric can depend on local properties
1944 such as number of inlinable calls and size of the function body.
1945 After inlining these properties might change for the function we
1946 inlined into (since it's body size changed) and for the functions
1947 called by function we inlined (since number of it inlinable callers
1949 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
1950 /* Offline copy count has possibly changed, recompute if profile is
1954 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
1955 if (n != edge->callee && n->analyzed)
1956 update_callee_keys (&edge_heap, n, updated_nodes);
1958 bitmap_clear (updated_nodes);
1963 " Inlined into %s which now has time %i and size %i,"
1964 "net change of %+i.\n",
1965 edge->caller->name (),
1966 inline_summaries->get (edge->caller)->time,
1967 inline_summaries->get (edge->caller)->size,
1968 overall_size - old_size);
1970 if (min_size > overall_size)
1972 min_size = overall_size;
1973 max_size = compute_max_insns (min_size);
1976 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1980 free_growth_caches ();
1983 "Unit growth for small function inlining: %i->%i (%i%%)\n",
1984 initial_size, overall_size,
1985 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1986 BITMAP_FREE (updated_nodes);
1987 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
1990 /* Flatten NODE. Performed both during early inlining and
1991 at IPA inlining time. */
1994 flatten_function (struct cgraph_node *node, bool early)
1996 struct cgraph_edge *e;
1998 /* We shouldn't be called recursively when we are being processed. */
1999 gcc_assert (node->aux == NULL);
2001 node->aux = (void *) node;
2003 for (e = node->callees; e; e = e->next_callee)
2005 struct cgraph_node *orig_callee;
2006 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2008 /* We've hit cycle? It is time to give up. */
2013 "Not inlining %s into %s to avoid cycle.\n",
2014 xstrdup_for_dump (callee->name ()),
2015 xstrdup_for_dump (e->caller->name ()));
2016 e->inline_failed = CIF_RECURSIVE_INLINING;
2020 /* When the edge is already inlined, we just need to recurse into
2021 it in order to fully flatten the leaves. */
2022 if (!e->inline_failed)
2024 flatten_function (callee, early);
2028 /* Flatten attribute needs to be processed during late inlining. For
2029 extra code quality we however do flattening during early optimization,
2032 ? !can_inline_edge_p (e, true)
2033 : !can_early_inline_edge_p (e))
2036 if (e->recursive_p ())
2039 fprintf (dump_file, "Not inlining: recursive call.\n");
2043 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2044 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2047 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2051 /* Inline the edge and flatten the inline clone. Avoid
2052 recursing through the original node if the node was cloned. */
2054 fprintf (dump_file, " Inlining %s into %s.\n",
2055 xstrdup_for_dump (callee->name ()),
2056 xstrdup_for_dump (e->caller->name ()));
2057 orig_callee = callee;
2058 inline_call (e, true, NULL, NULL, false);
2059 if (e->callee != orig_callee)
2060 orig_callee->aux = (void *) node;
2061 flatten_function (e->callee, early);
2062 if (e->callee != orig_callee)
2063 orig_callee->aux = NULL;
2067 if (!node->global.inlined_to)
2068 inline_update_overall_summary (node);
2071 /* Count number of callers of NODE and store it into DATA (that
2072 points to int. Worker for cgraph_for_node_and_aliases. */
2075 sum_callers (struct cgraph_node *node, void *data)
2077 struct cgraph_edge *e;
2078 int *num_calls = (int *)data;
2080 for (e = node->callers; e; e = e->next_caller)
2085 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2086 DATA points to number of calls originally found so we avoid infinite
2090 inline_to_all_callers (struct cgraph_node *node, void *data)
2092 int *num_calls = (int *)data;
2093 bool callee_removed = false;
2095 while (node->callers && !node->global.inlined_to)
2097 struct cgraph_node *caller = node->callers->caller;
2099 if (!can_inline_edge_p (node->callers, true)
2100 || node->callers->recursive_p ())
2103 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2111 "\nInlining %s size %i.\n",
2113 inline_summaries->get (node)->size);
2115 " Called once from %s %i insns.\n",
2116 node->callers->caller->name (),
2117 inline_summaries->get (node->callers->caller)->size);
2120 inline_call (node->callers, true, NULL, NULL, true, &callee_removed);
2123 " Inlined into %s which now has %i size\n",
2125 inline_summaries->get (caller)->size);
2126 if (!(*num_calls)--)
2129 fprintf (dump_file, "New calls found; giving up.\n");
2130 return callee_removed;
2138 /* Output overall time estimate. */
2140 dump_overall_stats (void)
2142 int64_t sum_weighted = 0, sum = 0;
2143 struct cgraph_node *node;
2145 FOR_EACH_DEFINED_FUNCTION (node)
2146 if (!node->global.inlined_to
2149 int time = inline_summaries->get (node)->time;
2151 sum_weighted += time * node->count;
2153 fprintf (dump_file, "Overall time estimate: "
2154 "%"PRId64" weighted by profile: "
2155 "%"PRId64"\n", sum, sum_weighted);
2158 /* Output some useful stats about inlining. */
2161 dump_inline_stats (void)
2163 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2164 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2165 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2166 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2167 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2168 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2169 int64_t reason[CIF_N_REASONS][3];
2171 struct cgraph_node *node;
2173 memset (reason, 0, sizeof (reason));
2174 FOR_EACH_DEFINED_FUNCTION (node)
2176 struct cgraph_edge *e;
2177 for (e = node->callees; e; e = e->next_callee)
2179 if (e->inline_failed)
2181 reason[(int) e->inline_failed][0] += e->count;
2182 reason[(int) e->inline_failed][1] += e->frequency;
2183 reason[(int) e->inline_failed][2] ++;
2184 if (DECL_VIRTUAL_P (e->callee->decl))
2186 if (e->indirect_inlining_edge)
2187 noninlined_virt_indir_cnt += e->count;
2189 noninlined_virt_cnt += e->count;
2193 if (e->indirect_inlining_edge)
2194 noninlined_indir_cnt += e->count;
2196 noninlined_cnt += e->count;
2203 if (DECL_VIRTUAL_P (e->callee->decl))
2204 inlined_speculative_ply += e->count;
2206 inlined_speculative += e->count;
2208 else if (DECL_VIRTUAL_P (e->callee->decl))
2210 if (e->indirect_inlining_edge)
2211 inlined_virt_indir_cnt += e->count;
2213 inlined_virt_cnt += e->count;
2217 if (e->indirect_inlining_edge)
2218 inlined_indir_cnt += e->count;
2220 inlined_cnt += e->count;
2224 for (e = node->indirect_calls; e; e = e->next_callee)
2225 if (e->indirect_info->polymorphic)
2226 indirect_poly_cnt += e->count;
2228 indirect_cnt += e->count;
2233 "Inlined %"PRId64 " + speculative "
2234 "%"PRId64 " + speculative polymorphic "
2235 "%"PRId64 " + previously indirect "
2236 "%"PRId64 " + virtual "
2237 "%"PRId64 " + virtual and previously indirect "
2238 "%"PRId64 "\n" "Not inlined "
2239 "%"PRId64 " + previously indirect "
2240 "%"PRId64 " + virtual "
2241 "%"PRId64 " + virtual and previously indirect "
2242 "%"PRId64 " + stil indirect "
2243 "%"PRId64 " + still indirect polymorphic "
2244 "%"PRId64 "\n", inlined_cnt,
2245 inlined_speculative, inlined_speculative_ply,
2246 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2247 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2248 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2250 "Removed speculations %"PRId64 "\n",
2253 dump_overall_stats ();
2254 fprintf (dump_file, "\nWhy inlining failed?\n");
2255 for (i = 0; i < CIF_N_REASONS; i++)
2257 fprintf (dump_file, "%-50s: %8i calls, %8i freq, %"PRId64" count\n",
2258 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2259 (int) reason[i][2], (int) reason[i][1], reason[i][0]);
2262 /* Decide on the inlining. We do so in the topological order to avoid
2263 expenses on updating data structures. */
2268 struct cgraph_node *node;
2270 struct cgraph_node **order;
2273 bool remove_functions = false;
2278 cgraph_freq_base_rec = (sreal) 1 / (sreal) CGRAPH_FREQ_BASE;
2279 percent_rec = (sreal) 1 / (sreal) 100;
2281 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2283 if (in_lto_p && optimize)
2284 ipa_update_after_lto_read ();
2287 dump_inline_summaries (dump_file);
2289 nnodes = ipa_reverse_postorder (order);
2291 FOR_EACH_FUNCTION (node)
2295 fprintf (dump_file, "\nFlattening functions:\n");
2297 /* In the first pass handle functions to be flattened. Do this with
2298 a priority so none of our later choices will make this impossible. */
2299 for (i = nnodes - 1; i >= 0; i--)
2303 /* Handle nodes to be flattened.
2304 Ideally when processing callees we stop inlining at the
2305 entry of cycles, possibly cloning that entry point and
2306 try to flatten itself turning it into a self-recursive
2308 if (lookup_attribute ("flatten",
2309 DECL_ATTRIBUTES (node->decl)) != NULL)
2313 "Flattening %s\n", node->name ());
2314 flatten_function (node, false);
2318 dump_overall_stats ();
2320 inline_small_functions ();
2322 gcc_assert (symtab->state == IPA_SSA);
2323 symtab->state = IPA_SSA_AFTER_INLINING;
2324 /* Do first after-inlining removal. We want to remove all "stale" extern
2325 inline functions and virtual functions so we really know what is called
2327 symtab->remove_unreachable_nodes (dump_file);
2330 /* Inline functions with a property that after inlining into all callers the
2331 code size will shrink because the out-of-line copy is eliminated.
2332 We do this regardless on the callee size as long as function growth limits
2336 "\nDeciding on functions to be inlined into all callers and "
2337 "removing useless speculations:\n");
2339 /* Inlining one function called once has good chance of preventing
2340 inlining other function into the same callee. Ideally we should
2341 work in priority order, but probably inlining hot functions first
2342 is good cut without the extra pain of maintaining the queue.
2344 ??? this is not really fitting the bill perfectly: inlining function
2345 into callee often leads to better optimization of callee due to
2346 increased context for optimization.
2347 For example if main() function calls a function that outputs help
2348 and then function that does the main optmization, we should inline
2349 the second with priority even if both calls are cold by themselves.
2351 We probably want to implement new predicate replacing our use of
2352 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2354 for (cold = 0; cold <= 1; cold ++)
2356 FOR_EACH_DEFINED_FUNCTION (node)
2358 struct cgraph_edge *edge, *next;
2361 for (edge = node->callees; edge; edge = next)
2363 next = edge->next_callee;
2364 if (edge->speculative && !speculation_useful_p (edge, false))
2366 edge->resolve_speculation ();
2367 spec_rem += edge->count;
2369 remove_functions = true;
2374 struct cgraph_node *where = node->global.inlined_to
2375 ? node->global.inlined_to : node;
2376 reset_edge_caches (where);
2377 inline_update_overall_summary (where);
2379 if (want_inline_function_to_all_callers_p (node, cold))
2382 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2384 while (node->call_for_symbol_and_aliases
2385 (inline_to_all_callers, &num_calls, true))
2387 remove_functions = true;
2392 /* Free ipa-prop structures if they are no longer needed. */
2394 ipa_free_all_structures_after_iinln ();
2399 "\nInlined %i calls, eliminated %i functions\n\n",
2400 ncalls_inlined, nfunctions_inlined);
2401 dump_inline_stats ();
2405 dump_inline_summaries (dump_file);
2406 /* In WPA we use inline summaries for partitioning process. */
2408 inline_free_summary ();
2409 return remove_functions ? TODO_remove_functions : 0;
2412 /* Inline always-inline function calls in NODE. */
2415 inline_always_inline_functions (struct cgraph_node *node)
2417 struct cgraph_edge *e;
2418 bool inlined = false;
2420 for (e = node->callees; e; e = e->next_callee)
2422 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2423 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2426 if (e->recursive_p ())
2429 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2430 e->callee->name ());
2431 e->inline_failed = CIF_RECURSIVE_INLINING;
2435 if (!can_early_inline_edge_p (e))
2437 /* Set inlined to true if the callee is marked "always_inline" but
2438 is not inlinable. This will allow flagging an error later in
2439 expand_call_inline in tree-inline.c. */
2440 if (lookup_attribute ("always_inline",
2441 DECL_ATTRIBUTES (callee->decl)) != NULL)
2447 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2448 xstrdup_for_dump (e->callee->name ()),
2449 xstrdup_for_dump (e->caller->name ()));
2450 inline_call (e, true, NULL, NULL, false);
2454 inline_update_overall_summary (node);
2459 /* Decide on the inlining. We do so in the topological order to avoid
2460 expenses on updating data structures. */
2463 early_inline_small_functions (struct cgraph_node *node)
2465 struct cgraph_edge *e;
2466 bool inlined = false;
2468 for (e = node->callees; e; e = e->next_callee)
2470 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2471 if (!inline_summaries->get (callee)->inlinable
2472 || !e->inline_failed)
2475 /* Do not consider functions not declared inline. */
2476 if (!DECL_DECLARED_INLINE_P (callee->decl)
2477 && !opt_for_fn (node->decl, flag_inline_small_functions)
2478 && !opt_for_fn (node->decl, flag_inline_functions))
2482 fprintf (dump_file, "Considering inline candidate %s.\n",
2485 if (!can_early_inline_edge_p (e))
2488 if (e->recursive_p ())
2491 fprintf (dump_file, " Not inlining: recursive call.\n");
2495 if (!want_early_inline_function_p (e))
2499 fprintf (dump_file, " Inlining %s into %s.\n",
2500 xstrdup_for_dump (callee->name ()),
2501 xstrdup_for_dump (e->caller->name ()));
2502 inline_call (e, true, NULL, NULL, true);
2510 early_inliner (function *fun)
2512 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2513 struct cgraph_edge *edge;
2514 unsigned int todo = 0;
2516 bool inlined = false;
2521 /* Do nothing if datastructures for ipa-inliner are already computed. This
2522 happens when some pass decides to construct new function and
2523 cgraph_add_new_function calls lowering passes and early optimization on
2524 it. This may confuse ourself when early inliner decide to inline call to
2525 function clone, because function clones don't have parameter list in
2526 ipa-prop matching their signature. */
2527 if (ipa_node_params_sum)
2530 #ifdef ENABLE_CHECKING
2533 node->remove_all_references ();
2535 /* Rebuild this reference because it dosn't depend on
2536 function's body and it's required to pass cgraph_node
2538 if (node->instrumented_version
2539 && !node->instrumentation_clone)
2540 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2542 /* Even when not optimizing or not inlining inline always-inline
2544 inlined = inline_always_inline_functions (node);
2548 || !flag_early_inlining
2549 /* Never inline regular functions into always-inline functions
2550 during incremental inlining. This sucks as functions calling
2551 always inline functions will get less optimized, but at the
2552 same time inlining of functions calling always inline
2553 function into an always inline function might introduce
2554 cycles of edges to be always inlined in the callgraph.
2556 We might want to be smarter and just avoid this type of inlining. */
2557 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2558 && lookup_attribute ("always_inline",
2559 DECL_ATTRIBUTES (node->decl))))
2561 else if (lookup_attribute ("flatten",
2562 DECL_ATTRIBUTES (node->decl)) != NULL)
2564 /* When the function is marked to be flattened, recursively inline
2568 "Flattening %s\n", node->name ());
2569 flatten_function (node, true);
2574 /* If some always_inline functions was inlined, apply the changes.
2575 This way we will not account always inline into growth limits and
2576 moreover we will inline calls from always inlines that we skipped
2577 previously becuase of conditional above. */
2580 timevar_push (TV_INTEGRATION);
2581 todo |= optimize_inline_calls (current_function_decl);
2582 /* optimize_inline_calls call above might have introduced new
2583 statements that don't have inline parameters computed. */
2584 for (edge = node->callees; edge; edge = edge->next_callee)
2586 if (inline_edge_summary_vec.length () > (unsigned) edge->uid)
2588 struct inline_edge_summary *es = inline_edge_summary (edge);
2590 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2592 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2595 inline_update_overall_summary (node);
2597 timevar_pop (TV_INTEGRATION);
2599 /* We iterate incremental inlining to get trivial cases of indirect
2601 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2602 && early_inline_small_functions (node))
2604 timevar_push (TV_INTEGRATION);
2605 todo |= optimize_inline_calls (current_function_decl);
2607 /* Technically we ought to recompute inline parameters so the new
2608 iteration of early inliner works as expected. We however have
2609 values approximately right and thus we only need to update edge
2610 info that might be cleared out for newly discovered edges. */
2611 for (edge = node->callees; edge; edge = edge->next_callee)
2613 /* We have no summary for new bound store calls yet. */
2614 if (inline_edge_summary_vec.length () > (unsigned)edge->uid)
2616 struct inline_edge_summary *es = inline_edge_summary (edge);
2618 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2620 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2622 if (edge->callee->decl
2623 && !gimple_check_call_matching_types (
2624 edge->call_stmt, edge->callee->decl, false))
2625 edge->call_stmt_cannot_inline_p = true;
2627 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2628 inline_update_overall_summary (node);
2629 timevar_pop (TV_INTEGRATION);
2634 fprintf (dump_file, "Iterations: %i\n", iterations);
2639 timevar_push (TV_INTEGRATION);
2640 todo |= optimize_inline_calls (current_function_decl);
2641 timevar_pop (TV_INTEGRATION);
2644 fun->always_inline_functions_inlined = true;
2649 /* Do inlining of small functions. Doing so early helps profiling and other
2650 passes to be somewhat more effective and avoids some code duplication in
2651 later real inlining pass for testcases with very many function calls. */
2655 const pass_data pass_data_early_inline =
2657 GIMPLE_PASS, /* type */
2658 "einline", /* name */
2659 OPTGROUP_INLINE, /* optinfo_flags */
2660 TV_EARLY_INLINING, /* tv_id */
2661 PROP_ssa, /* properties_required */
2662 0, /* properties_provided */
2663 0, /* properties_destroyed */
2664 0, /* todo_flags_start */
2665 0, /* todo_flags_finish */
2668 class pass_early_inline : public gimple_opt_pass
2671 pass_early_inline (gcc::context *ctxt)
2672 : gimple_opt_pass (pass_data_early_inline, ctxt)
2675 /* opt_pass methods: */
2676 virtual unsigned int execute (function *);
2678 }; // class pass_early_inline
2681 pass_early_inline::execute (function *fun)
2683 return early_inliner (fun);
2689 make_pass_early_inline (gcc::context *ctxt)
2691 return new pass_early_inline (ctxt);
2696 const pass_data pass_data_ipa_inline =
2698 IPA_PASS, /* type */
2699 "inline", /* name */
2700 OPTGROUP_INLINE, /* optinfo_flags */
2701 TV_IPA_INLINING, /* tv_id */
2702 0, /* properties_required */
2703 0, /* properties_provided */
2704 0, /* properties_destroyed */
2705 0, /* todo_flags_start */
2706 ( TODO_dump_symtab ), /* todo_flags_finish */
2709 class pass_ipa_inline : public ipa_opt_pass_d
2712 pass_ipa_inline (gcc::context *ctxt)
2713 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2714 inline_generate_summary, /* generate_summary */
2715 inline_write_summary, /* write_summary */
2716 inline_read_summary, /* read_summary */
2717 NULL, /* write_optimization_summary */
2718 NULL, /* read_optimization_summary */
2719 NULL, /* stmt_fixup */
2720 0, /* function_transform_todo_flags_start */
2721 inline_transform, /* function_transform */
2722 NULL) /* variable_transform */
2725 /* opt_pass methods: */
2726 virtual unsigned int execute (function *) { return ipa_inline (); }
2728 }; // class pass_ipa_inline
2733 make_pass_ipa_inline (gcc::context *ctxt)
2735 return new pass_ipa_inline (ctxt);