forked from riscvarchive/riscv-gcc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtree-cfg.c
10114 lines (8623 loc) · 277 KB
/
tree-cfg.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Control flow functions for trees.
Copyright (C) 2001-2020 Free Software Foundation, Inc.
Contributed by Diego Novillo <[email protected]>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "gimple-pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "trans-mem.h"
#include "stor-layout.h"
#include "print-tree.h"
#include "cfganal.h"
#include "gimple-fold.h"
#include "tree-eh.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-cfg.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "except.h"
#include "cfgloop.h"
#include "tree-ssa-propagate.h"
#include "value-prof.h"
#include "tree-inline.h"
#include "tree-ssa-live.h"
#include "omp-general.h"
#include "omp-expand.h"
#include "tree-cfgcleanup.h"
#include "gimplify.h"
#include "attribs.h"
#include "selftest.h"
#include "opts.h"
#include "asan.h"
#include "profile.h"
/* This file contains functions for building the Control Flow Graph (CFG)
for a function tree. */
/* Local declarations. */
/* Initial capacity for the basic block array. */
static const int initial_cfg_capacity = 20;
/* This hash table allows us to efficiently lookup all CASE_LABEL_EXPRs
which use a particular edge. The CASE_LABEL_EXPRs are chained together
via their CASE_CHAIN field, which we clear after we're done with the
hash table to prevent problems with duplication of GIMPLE_SWITCHes.
Access to this list of CASE_LABEL_EXPRs allows us to efficiently
update the case vector in response to edge redirections.
Right now this table is set up and torn down at key points in the
compilation process. It would be nice if we could make the table
more persistent. The key is getting notification of changes to
the CFG (particularly edge removal, creation and redirection). */
static hash_map<edge, tree> *edge_to_cases;
/* If we record edge_to_cases, this bitmap will hold indexes
of basic blocks that end in a GIMPLE_SWITCH which we touched
due to edge manipulations. */
static bitmap touched_switch_bbs;
/* CFG statistics. */
struct cfg_stats_d
{
long num_merged_labels;
};
static struct cfg_stats_d cfg_stats;
/* Data to pass to replace_block_vars_by_duplicates_1. */
struct replace_decls_d
{
hash_map<tree, tree> *vars_map;
tree to_context;
};
/* Hash table to store last discriminator assigned for each locus. */
struct locus_discrim_map
{
int location_line;
int discriminator;
};
/* Hashtable helpers. */
struct locus_discrim_hasher : free_ptr_hash <locus_discrim_map>
{
static inline hashval_t hash (const locus_discrim_map *);
static inline bool equal (const locus_discrim_map *,
const locus_discrim_map *);
};
/* Trivial hash function for a location_t. ITEM is a pointer to
a hash table entry that maps a location_t to a discriminator. */
inline hashval_t
locus_discrim_hasher::hash (const locus_discrim_map *item)
{
return item->location_line;
}
/* Equality function for the locus-to-discriminator map. A and B
point to the two hash table entries to compare. */
inline bool
locus_discrim_hasher::equal (const locus_discrim_map *a,
const locus_discrim_map *b)
{
return a->location_line == b->location_line;
}
static hash_table<locus_discrim_hasher> *discriminator_per_locus;
/* Basic blocks and flowgraphs. */
static void make_blocks (gimple_seq);
/* Edges. */
static void make_edges (void);
static void assign_discriminators (void);
static void make_cond_expr_edges (basic_block);
static void make_gimple_switch_edges (gswitch *, basic_block);
static bool make_goto_expr_edges (basic_block);
static void make_gimple_asm_edges (basic_block);
static edge gimple_redirect_edge_and_branch (edge, basic_block);
static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
/* Various helpers. */
static inline bool stmt_starts_bb_p (gimple *, gimple *);
static int gimple_verify_flow_info (void);
static void gimple_make_forwarder_block (edge);
static gimple *first_non_label_stmt (basic_block);
static bool verify_gimple_transaction (gtransaction *);
static bool call_can_make_abnormal_goto (gimple *);
/* Flowgraph optimization and cleanup. */
static void gimple_merge_blocks (basic_block, basic_block);
static bool gimple_can_merge_blocks_p (basic_block, basic_block);
static void remove_bb (basic_block);
static edge find_taken_edge_computed_goto (basic_block, tree);
static edge find_taken_edge_cond_expr (const gcond *, tree);
void
init_empty_tree_cfg_for_function (struct function *fn)
{
/* Initialize the basic block array. */
init_flow (fn);
profile_status_for_fn (fn) = PROFILE_ABSENT;
n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS;
vec_alloc (basic_block_info_for_fn (fn), initial_cfg_capacity);
vec_safe_grow_cleared (basic_block_info_for_fn (fn),
initial_cfg_capacity);
/* Build a mapping of labels to their associated blocks. */
vec_alloc (label_to_block_map_for_fn (fn), initial_cfg_capacity);
vec_safe_grow_cleared (label_to_block_map_for_fn (fn),
initial_cfg_capacity);
SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn));
SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn));
ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
= EXIT_BLOCK_PTR_FOR_FN (fn);
EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb
= ENTRY_BLOCK_PTR_FOR_FN (fn);
}
void
init_empty_tree_cfg (void)
{
init_empty_tree_cfg_for_function (cfun);
}
/*---------------------------------------------------------------------------
Create basic blocks
---------------------------------------------------------------------------*/
/* Entry point to the CFG builder for trees. SEQ is the sequence of
statements to be added to the flowgraph. */
static void
build_gimple_cfg (gimple_seq seq)
{
/* Register specific gimple functions. */
gimple_register_cfg_hooks ();
memset ((void *) &cfg_stats, 0, sizeof (cfg_stats));
init_empty_tree_cfg ();
make_blocks (seq);
/* Make sure there is always at least one block, even if it's empty. */
if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
/* Adjust the size of the array. */
if (basic_block_info_for_fn (cfun)->length ()
< (size_t) n_basic_blocks_for_fn (cfun))
vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
n_basic_blocks_for_fn (cfun));
/* To speed up statement iterator walks, we first purge dead labels. */
cleanup_dead_labels ();
/* Group case nodes to reduce the number of edges.
We do this after cleaning up dead labels because otherwise we miss
a lot of obvious case merging opportunities. */
group_case_labels ();
/* Create the edges of the flowgraph. */
discriminator_per_locus = new hash_table<locus_discrim_hasher> (13);
make_edges ();
assign_discriminators ();
cleanup_dead_labels ();
delete discriminator_per_locus;
discriminator_per_locus = NULL;
}
/* Look for ANNOTATE calls with loop annotation kind in BB; if found, remove
them and propagate the information to LOOP. We assume that the annotations
come immediately before the condition in BB, if any. */
static void
replace_loop_annotate_in_block (basic_block bb, class loop *loop)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple *stmt = gsi_stmt (gsi);
if (!(stmt && gimple_code (stmt) == GIMPLE_COND))
return;
for (gsi_prev_nondebug (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
{
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_CALL)
break;
if (!gimple_call_internal_p (stmt)
|| gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
break;
switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
{
case annot_expr_ivdep_kind:
loop->safelen = INT_MAX;
break;
case annot_expr_unroll_kind:
loop->unroll
= (unsigned short) tree_to_shwi (gimple_call_arg (stmt, 2));
cfun->has_unroll = true;
break;
case annot_expr_no_vector_kind:
loop->dont_vectorize = true;
break;
case annot_expr_vector_kind:
loop->force_vectorize = true;
cfun->has_force_vectorize_loops = true;
break;
case annot_expr_parallel_kind:
loop->can_be_parallel = true;
loop->safelen = INT_MAX;
break;
default:
gcc_unreachable ();
}
stmt = gimple_build_assign (gimple_call_lhs (stmt),
gimple_call_arg (stmt, 0));
gsi_replace (&gsi, stmt, true);
}
}
/* Look for ANNOTATE calls with loop annotation kind; if found, remove
them and propagate the information to the loop. We assume that the
annotations come immediately before the condition of the loop. */
static void
replace_loop_annotate (void)
{
class loop *loop;
basic_block bb;
gimple_stmt_iterator gsi;
gimple *stmt;
FOR_EACH_LOOP (loop, 0)
{
/* First look into the header. */
replace_loop_annotate_in_block (loop->header, loop);
/* Then look into the latch, if any. */
if (loop->latch)
replace_loop_annotate_in_block (loop->latch, loop);
/* Push the global flag_finite_loops state down to individual loops. */
loop->finite_p = flag_finite_loops;
}
/* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
FOR_EACH_BB_FN (bb, cfun)
{
for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
{
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_CALL)
continue;
if (!gimple_call_internal_p (stmt)
|| gimple_call_internal_fn (stmt) != IFN_ANNOTATE)
continue;
switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (stmt, 1)))
{
case annot_expr_ivdep_kind:
case annot_expr_unroll_kind:
case annot_expr_no_vector_kind:
case annot_expr_vector_kind:
case annot_expr_parallel_kind:
break;
default:
gcc_unreachable ();
}
warning_at (gimple_location (stmt), 0, "ignoring loop annotation");
stmt = gimple_build_assign (gimple_call_lhs (stmt),
gimple_call_arg (stmt, 0));
gsi_replace (&gsi, stmt, true);
}
}
}
static unsigned int
execute_build_cfg (void)
{
gimple_seq body = gimple_body (current_function_decl);
build_gimple_cfg (body);
gimple_set_body (current_function_decl, NULL);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Scope blocks:\n");
dump_scope_blocks (dump_file, dump_flags);
}
cleanup_tree_cfg ();
loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
replace_loop_annotate ();
return 0;
}
namespace {
const pass_data pass_data_build_cfg =
{
GIMPLE_PASS, /* type */
"cfg", /* name */
OPTGROUP_NONE, /* optinfo_flags */
TV_TREE_CFG, /* tv_id */
PROP_gimple_leh, /* properties_required */
( PROP_cfg | PROP_loops ), /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_build_cfg : public gimple_opt_pass
{
public:
pass_build_cfg (gcc::context *ctxt)
: gimple_opt_pass (pass_data_build_cfg, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return execute_build_cfg (); }
}; // class pass_build_cfg
} // anon namespace
gimple_opt_pass *
make_pass_build_cfg (gcc::context *ctxt)
{
return new pass_build_cfg (ctxt);
}
/* Return true if T is a computed goto. */
bool
computed_goto_p (gimple *t)
{
return (gimple_code (t) == GIMPLE_GOTO
&& TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL);
}
/* Returns true if the sequence of statements STMTS only contains
a call to __builtin_unreachable (). */
bool
gimple_seq_unreachable_p (gimple_seq stmts)
{
if (stmts == NULL
/* Return false if -fsanitize=unreachable, we don't want to
optimize away those calls, but rather turn them into
__ubsan_handle_builtin_unreachable () or __builtin_trap ()
later. */
|| sanitize_flags_p (SANITIZE_UNREACHABLE))
return false;
gimple_stmt_iterator gsi = gsi_last (stmts);
if (!gimple_call_builtin_p (gsi_stmt (gsi), BUILT_IN_UNREACHABLE))
return false;
for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_LABEL
&& !is_gimple_debug (stmt)
&& !gimple_clobber_p (stmt))
return false;
}
return true;
}
/* Returns true for edge E where e->src ends with a GIMPLE_COND and
the other edge points to a bb with just __builtin_unreachable ().
I.e. return true for C->M edge in:
<bb C>:
...
if (something)
goto <bb N>;
else
goto <bb M>;
<bb N>:
__builtin_unreachable ();
<bb M>: */
bool
assert_unreachable_fallthru_edge_p (edge e)
{
basic_block pred_bb = e->src;
gimple *last = last_stmt (pred_bb);
if (last && gimple_code (last) == GIMPLE_COND)
{
basic_block other_bb = EDGE_SUCC (pred_bb, 0)->dest;
if (other_bb == e->dest)
other_bb = EDGE_SUCC (pred_bb, 1)->dest;
if (EDGE_COUNT (other_bb->succs) == 0)
return gimple_seq_unreachable_p (bb_seq (other_bb));
}
return false;
}
/* Initialize GF_CALL_CTRL_ALTERING flag, which indicates the call
could alter control flow except via eh. We initialize the flag at
CFG build time and only ever clear it later. */
static void
gimple_call_initialize_ctrl_altering (gimple *stmt)
{
int flags = gimple_call_flags (stmt);
/* A call alters control flow if it can make an abnormal goto. */
if (call_can_make_abnormal_goto (stmt)
/* A call also alters control flow if it does not return. */
|| flags & ECF_NORETURN
/* TM ending statements have backedges out of the transaction.
Return true so we split the basic block containing them.
Note that the TM_BUILTIN test is merely an optimization. */
|| ((flags & ECF_TM_BUILTIN)
&& is_tm_ending_fndecl (gimple_call_fndecl (stmt)))
/* BUILT_IN_RETURN call is same as return statement. */
|| gimple_call_builtin_p (stmt, BUILT_IN_RETURN)
/* IFN_UNIQUE should be the last insn, to make checking for it
as cheap as possible. */
|| (gimple_call_internal_p (stmt)
&& gimple_call_internal_unique_p (stmt)))
gimple_call_set_ctrl_altering (stmt, true);
else
gimple_call_set_ctrl_altering (stmt, false);
}
/* Insert SEQ after BB and build a flowgraph. */
static basic_block
make_blocks_1 (gimple_seq seq, basic_block bb)
{
gimple_stmt_iterator i = gsi_start (seq);
gimple *stmt = NULL;
gimple *prev_stmt = NULL;
bool start_new_block = true;
bool first_stmt_of_seq = true;
while (!gsi_end_p (i))
{
/* PREV_STMT should only be set to a debug stmt if the debug
stmt is before nondebug stmts. Once stmt reaches a nondebug
nonlabel, prev_stmt will be set to it, so that
stmt_starts_bb_p will know to start a new block if a label is
found. However, if stmt was a label after debug stmts only,
keep the label in prev_stmt even if we find further debug
stmts, for there may be other labels after them, and they
should land in the same block. */
if (!prev_stmt || !stmt || !is_gimple_debug (stmt))
prev_stmt = stmt;
stmt = gsi_stmt (i);
if (stmt && is_gimple_call (stmt))
gimple_call_initialize_ctrl_altering (stmt);
/* If the statement starts a new basic block or if we have determined
in a previous pass that we need to create a new block for STMT, do
so now. */
if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt))
{
if (!first_stmt_of_seq)
gsi_split_seq_before (&i, &seq);
bb = create_basic_block (seq, bb);
start_new_block = false;
prev_stmt = NULL;
}
/* Now add STMT to BB and create the subgraphs for special statement
codes. */
gimple_set_bb (stmt, bb);
/* If STMT is a basic block terminator, set START_NEW_BLOCK for the
next iteration. */
if (stmt_ends_bb_p (stmt))
{
/* If the stmt can make abnormal goto use a new temporary
for the assignment to the LHS. This makes sure the old value
of the LHS is available on the abnormal edge. Otherwise
we will end up with overlapping life-ranges for abnormal
SSA names. */
if (gimple_has_lhs (stmt)
&& stmt_can_make_abnormal_goto (stmt)
&& is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
{
tree lhs = gimple_get_lhs (stmt);
tree tmp = create_tmp_var (TREE_TYPE (lhs));
gimple *s = gimple_build_assign (lhs, tmp);
gimple_set_location (s, gimple_location (stmt));
gimple_set_block (s, gimple_block (stmt));
gimple_set_lhs (stmt, tmp);
if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (tmp) = 1;
gsi_insert_after (&i, s, GSI_SAME_STMT);
}
start_new_block = true;
}
gsi_next (&i);
first_stmt_of_seq = false;
}
return bb;
}
/* Build a flowgraph for the sequence of stmts SEQ. */
static void
make_blocks (gimple_seq seq)
{
/* Look for debug markers right before labels, and move the debug
stmts after the labels. Accepting labels among debug markers
adds no value, just complexity; if we wanted to annotate labels
with view numbers (so sequencing among markers would matter) or
somesuch, we're probably better off still moving the labels, but
adding other debug annotations in their original positions or
emitting nonbind or bind markers associated with the labels in
the original position of the labels.
Moving labels would probably be simpler, but we can't do that:
moving labels assigns label ids to them, and doing so because of
debug markers makes for -fcompare-debug and possibly even codegen
differences. So, we have to move the debug stmts instead. To
that end, we scan SEQ backwards, marking the position of the
latest (earliest we find) label, and moving debug stmts that are
not separated from it by nondebug nonlabel stmts after the
label. */
if (MAY_HAVE_DEBUG_MARKER_STMTS)
{
gimple_stmt_iterator label = gsi_none ();
for (gimple_stmt_iterator i = gsi_last (seq); !gsi_end_p (i); gsi_prev (&i))
{
gimple *stmt = gsi_stmt (i);
/* If this is the first label we encounter (latest in SEQ)
before nondebug stmts, record its position. */
if (is_a <glabel *> (stmt))
{
if (gsi_end_p (label))
label = i;
continue;
}
/* Without a recorded label position to move debug stmts to,
there's nothing to do. */
if (gsi_end_p (label))
continue;
/* Move the debug stmt at I after LABEL. */
if (is_gimple_debug (stmt))
{
gcc_assert (gimple_debug_nonbind_marker_p (stmt));
/* As STMT is removed, I advances to the stmt after
STMT, so the gsi_prev in the for "increment"
expression gets us to the stmt we're to visit after
STMT. LABEL, however, would advance to the moved
stmt if we passed it to gsi_move_after, so pass it a
copy instead, so as to keep LABEL pointing to the
LABEL. */
gimple_stmt_iterator copy = label;
gsi_move_after (&i, ©);
continue;
}
/* There aren't any (more?) debug stmts before label, so
there isn't anything else to move after it. */
label = gsi_none ();
}
}
make_blocks_1 (seq, ENTRY_BLOCK_PTR_FOR_FN (cfun));
}
/* Create and return a new empty basic block after bb AFTER. */
static basic_block
create_bb (void *h, void *e, basic_block after)
{
basic_block bb;
gcc_assert (!e);
/* Create and initialize a new basic block. Since alloc_block uses
GC allocation that clears memory to allocate a basic block, we do
not have to clear the newly allocated basic block here. */
bb = alloc_block ();
bb->index = last_basic_block_for_fn (cfun);
bb->flags = BB_NEW;
set_bb_seq (bb, h ? (gimple_seq) h : NULL);
/* Add the new block to the linked list of blocks. */
link_block (bb, after);
/* Grow the basic block array if needed. */
if ((size_t) last_basic_block_for_fn (cfun)
== basic_block_info_for_fn (cfun)->length ())
{
size_t new_size =
(last_basic_block_for_fn (cfun)
+ (last_basic_block_for_fn (cfun) + 3) / 4);
vec_safe_grow_cleared (basic_block_info_for_fn (cfun), new_size);
}
/* Add the newly created block to the array. */
SET_BASIC_BLOCK_FOR_FN (cfun, last_basic_block_for_fn (cfun), bb);
n_basic_blocks_for_fn (cfun)++;
last_basic_block_for_fn (cfun)++;
return bb;
}
/*---------------------------------------------------------------------------
Edge creation
---------------------------------------------------------------------------*/
/* If basic block BB has an abnormal edge to a basic block
containing IFN_ABNORMAL_DISPATCHER internal call, return
that the dispatcher's basic block, otherwise return NULL. */
basic_block
get_abnormal_succ_dispatcher (basic_block bb)
{
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
{
gimple_stmt_iterator gsi
= gsi_start_nondebug_after_labels_bb (e->dest);
gimple *g = gsi_stmt (gsi);
if (g && gimple_call_internal_p (g, IFN_ABNORMAL_DISPATCHER))
return e->dest;
}
return NULL;
}
/* Helper function for make_edges. Create a basic block with
with ABNORMAL_DISPATCHER internal call in it if needed, and
create abnormal edges from BBS to it and from it to FOR_BB
if COMPUTED_GOTO is false, otherwise factor the computed gotos. */
static void
handle_abnormal_edges (basic_block *dispatcher_bbs,
basic_block for_bb, int *bb_to_omp_idx,
auto_vec<basic_block> *bbs, bool computed_goto)
{
basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0);
unsigned int idx = 0;
basic_block bb;
bool inner = false;
if (bb_to_omp_idx)
{
dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index];
if (bb_to_omp_idx[for_bb->index] != 0)
inner = true;
}
/* If the dispatcher has been created already, then there are basic
blocks with abnormal edges to it, so just make a new edge to
for_bb. */
if (*dispatcher == NULL)
{
/* Check if there are any basic blocks that need to have
abnormal edges to this dispatcher. If there are none, return
early. */
if (bb_to_omp_idx == NULL)
{
if (bbs->is_empty ())
return;
}
else
{
FOR_EACH_VEC_ELT (*bbs, idx, bb)
if (bb_to_omp_idx[bb->index] == bb_to_omp_idx[for_bb->index])
break;
if (bb == NULL)
return;
}
/* Create the dispatcher bb. */
*dispatcher = create_basic_block (NULL, for_bb);
if (computed_goto)
{
/* Factor computed gotos into a common computed goto site. Also
record the location of that site so that we can un-factor the
gotos after we have converted back to normal form. */
gimple_stmt_iterator gsi = gsi_start_bb (*dispatcher);
/* Create the destination of the factored goto. Each original
computed goto will put its desired destination into this
variable and jump to the label we create immediately below. */
tree var = create_tmp_var (ptr_type_node, "gotovar");
/* Build a label for the new block which will contain the
factored computed goto. */
tree factored_label_decl
= create_artificial_label (UNKNOWN_LOCATION);
gimple *factored_computed_goto_label
= gimple_build_label (factored_label_decl);
gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT);
/* Build our new computed goto. */
gimple *factored_computed_goto = gimple_build_goto (var);
gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT);
FOR_EACH_VEC_ELT (*bbs, idx, bb)
{
if (bb_to_omp_idx
&& bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
continue;
gsi = gsi_last_bb (bb);
gimple *last = gsi_stmt (gsi);
gcc_assert (computed_goto_p (last));
/* Copy the original computed goto's destination into VAR. */
gimple *assignment
= gimple_build_assign (var, gimple_goto_dest (last));
gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
edge e = make_edge (bb, *dispatcher, EDGE_FALLTHRU);
e->goto_locus = gimple_location (last);
gsi_remove (&gsi, true);
}
}
else
{
tree arg = inner ? boolean_true_node : boolean_false_node;
gimple *g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
1, arg);
gimple_stmt_iterator gsi = gsi_after_labels (*dispatcher);
gsi_insert_after (&gsi, g, GSI_NEW_STMT);
/* Create predecessor edges of the dispatcher. */
FOR_EACH_VEC_ELT (*bbs, idx, bb)
{
if (bb_to_omp_idx
&& bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
continue;
make_edge (bb, *dispatcher, EDGE_ABNORMAL);
}
}
}
make_edge (*dispatcher, for_bb, EDGE_ABNORMAL);
}
/* Creates outgoing edges for BB. Returns 1 when it ends with an
computed goto, returns 2 when it ends with a statement that
might return to this function via an nonlocal goto, otherwise
return 0. Updates *PCUR_REGION with the OMP region this BB is in. */
static int
make_edges_bb (basic_block bb, struct omp_region **pcur_region, int *pomp_index)
{
gimple *last = last_stmt (bb);
bool fallthru = false;
int ret = 0;
if (!last)
return ret;
switch (gimple_code (last))
{
case GIMPLE_GOTO:
if (make_goto_expr_edges (bb))
ret = 1;
fallthru = false;
break;
case GIMPLE_RETURN:
{
edge e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
e->goto_locus = gimple_location (last);
fallthru = false;
}
break;
case GIMPLE_COND:
make_cond_expr_edges (bb);
fallthru = false;
break;
case GIMPLE_SWITCH:
make_gimple_switch_edges (as_a <gswitch *> (last), bb);
fallthru = false;
break;
case GIMPLE_RESX:
make_eh_edges (last);
fallthru = false;
break;
case GIMPLE_EH_DISPATCH:
fallthru = make_eh_dispatch_edges (as_a <geh_dispatch *> (last));
break;
case GIMPLE_CALL:
/* If this function receives a nonlocal goto, then we need to
make edges from this call site to all the nonlocal goto
handlers. */
if (stmt_can_make_abnormal_goto (last))
ret = 2;
/* If this statement has reachable exception handlers, then
create abnormal edges to them. */
make_eh_edges (last);
/* BUILTIN_RETURN is really a return statement. */
if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
{
make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
fallthru = false;
}
/* Some calls are known not to return. */
else
fallthru = !gimple_call_noreturn_p (last);
break;
case GIMPLE_ASSIGN:
/* A GIMPLE_ASSIGN may throw internally and thus be considered
control-altering. */
if (is_ctrl_altering_stmt (last))
make_eh_edges (last);
fallthru = true;
break;
case GIMPLE_ASM:
make_gimple_asm_edges (bb);
fallthru = true;
break;
CASE_GIMPLE_OMP:
fallthru = omp_make_gimple_edges (bb, pcur_region, pomp_index);
break;
case GIMPLE_TRANSACTION:
{
gtransaction *txn = as_a <gtransaction *> (last);
tree label1 = gimple_transaction_label_norm (txn);
tree label2 = gimple_transaction_label_uninst (txn);
if (label1)
make_edge (bb, label_to_block (cfun, label1), EDGE_FALLTHRU);
if (label2)
make_edge (bb, label_to_block (cfun, label2),
EDGE_TM_UNINSTRUMENTED | (label1 ? 0 : EDGE_FALLTHRU));
tree label3 = gimple_transaction_label_over (txn);
if (gimple_transaction_subcode (txn)
& (GTMA_HAVE_ABORT | GTMA_IS_OUTER))
make_edge (bb, label_to_block (cfun, label3), EDGE_TM_ABORT);
fallthru = false;
}
break;
default:
gcc_assert (!stmt_ends_bb_p (last));
fallthru = true;
break;
}
if (fallthru)
make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
return ret;
}
/* Join all the blocks in the flowgraph. */
static void
make_edges (void)
{
basic_block bb;
struct omp_region *cur_region = NULL;
auto_vec<basic_block> ab_edge_goto;
auto_vec<basic_block> ab_edge_call;
int *bb_to_omp_idx = NULL;
int cur_omp_region_idx = 0;
/* Create an edge from entry to the first block with executable
statements in it. */
make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun),
BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS),
EDGE_FALLTHRU);
/* Traverse the basic block array placing edges. */
FOR_EACH_BB_FN (bb, cfun)
{
int mer;
if (bb_to_omp_idx)
bb_to_omp_idx[bb->index] = cur_omp_region_idx;
mer = make_edges_bb (bb, &cur_region, &cur_omp_region_idx);
if (mer == 1)
ab_edge_goto.safe_push (bb);
else if (mer == 2)
ab_edge_call.safe_push (bb);
if (cur_region && bb_to_omp_idx == NULL)
bb_to_omp_idx = XCNEWVEC (int, n_basic_blocks_for_fn (cfun));
}
/* Computed gotos are hell to deal with, especially if there are
lots of them with a large number of destinations. So we factor
them to a common computed goto location before we build the
edge list. After we convert back to normal form, we will un-factor
the computed gotos since factoring introduces an unwanted jump.