forked from riscvarchive/riscv-gcc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathira-build.c
3519 lines (3170 loc) · 101 KB
/
ira-build.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Building internal representation for IRA.
Copyright (C) 2006-2020 Free Software Foundation, Inc.
Contributed by Vladimir Makarov <[email protected]>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "predict.h"
#include "df.h"
#include "insn-config.h"
#include "regs.h"
#include "memmodel.h"
#include "ira.h"
#include "ira-int.h"
#include "sparseset.h"
#include "cfgloop.h"
static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
ira_loop_tree_node_t);
/* The root of the loop tree corresponding to the all function. */
ira_loop_tree_node_t ira_loop_tree_root;
/* Height of the loop tree. */
int ira_loop_tree_height;
/* All nodes representing basic blocks are referred through the
following array. We cannot use basic block member `aux' for this
because it is used for insertion of insns on edges. */
ira_loop_tree_node_t ira_bb_nodes;
/* All nodes representing loops are referred through the following
array. */
ira_loop_tree_node_t ira_loop_nodes;
/* And size of the ira_loop_nodes array. */
unsigned int ira_loop_nodes_count;
/* Map regno -> allocnos with given regno (see comments for
allocno member `next_regno_allocno'). */
ira_allocno_t *ira_regno_allocno_map;
/* Array of references to all allocnos. The order number of the
allocno corresponds to the index in the array. Removed allocnos
have NULL element value. */
ira_allocno_t *ira_allocnos;
/* Sizes of the previous array. */
int ira_allocnos_num;
/* Count of conflict record structures we've created, used when creating
a new conflict id. */
int ira_objects_num;
/* Map a conflict id to its conflict record. */
ira_object_t *ira_object_id_map;
/* Array of references to all allocno preferences. The order number
of the preference corresponds to the index in the array. */
ira_pref_t *ira_prefs;
/* Size of the previous array. */
int ira_prefs_num;
/* Array of references to all copies. The order number of the copy
corresponds to the index in the array. Removed copies have NULL
element value. */
ira_copy_t *ira_copies;
/* Size of the previous array. */
int ira_copies_num;
/* LAST_BASIC_BLOCK before generating additional insns because of live
range splitting. Emitting insns on a critical edge creates a new
basic block. */
static int last_basic_block_before_change;
/* Initialize some members in loop tree node NODE. Use LOOP_NUM for
the member loop_num. */
static void
init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
{
int max_regno = max_reg_num ();
node->regno_allocno_map
= (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
node->all_allocnos = ira_allocate_bitmap ();
node->modified_regnos = ira_allocate_bitmap ();
node->border_allocnos = ira_allocate_bitmap ();
node->local_copies = ira_allocate_bitmap ();
node->loop_num = loop_num;
node->children = NULL;
node->subloops = NULL;
}
/* The following function allocates the loop tree nodes. If
CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
the root which corresponds the all function) will be not allocated
but nodes will still be allocated for basic blocks. */
static void
create_loop_tree_nodes (void)
{
unsigned int i, j;
bool skip_p;
edge_iterator ei;
edge e;
vec<edge> edges;
loop_p loop;
ira_bb_nodes
= ((struct ira_loop_tree_node *)
ira_allocate (sizeof (struct ira_loop_tree_node)
* last_basic_block_for_fn (cfun)));
last_basic_block_before_change = last_basic_block_for_fn (cfun);
for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
{
ira_bb_nodes[i].regno_allocno_map = NULL;
memset (ira_bb_nodes[i].reg_pressure, 0,
sizeof (ira_bb_nodes[i].reg_pressure));
ira_bb_nodes[i].all_allocnos = NULL;
ira_bb_nodes[i].modified_regnos = NULL;
ira_bb_nodes[i].border_allocnos = NULL;
ira_bb_nodes[i].local_copies = NULL;
}
if (current_loops == NULL)
{
ira_loop_nodes_count = 1;
ira_loop_nodes = ((struct ira_loop_tree_node *)
ira_allocate (sizeof (struct ira_loop_tree_node)));
init_loop_tree_node (ira_loop_nodes, 0);
return;
}
ira_loop_nodes_count = number_of_loops (cfun);
ira_loop_nodes = ((struct ira_loop_tree_node *)
ira_allocate (sizeof (struct ira_loop_tree_node)
* ira_loop_nodes_count));
FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
{
if (loop_outer (loop) != NULL)
{
ira_loop_nodes[i].regno_allocno_map = NULL;
skip_p = false;
FOR_EACH_EDGE (e, ei, loop->header->preds)
if (e->src != loop->latch
&& (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
{
skip_p = true;
break;
}
if (skip_p)
continue;
edges = get_loop_exit_edges (loop);
FOR_EACH_VEC_ELT (edges, j, e)
if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
{
skip_p = true;
break;
}
edges.release ();
if (skip_p)
continue;
}
init_loop_tree_node (&ira_loop_nodes[i], loop->num);
}
}
/* The function returns TRUE if there are more one allocation
region. */
static bool
more_one_region_p (void)
{
unsigned int i;
loop_p loop;
if (current_loops != NULL)
FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
if (ira_loop_nodes[i].regno_allocno_map != NULL
&& ira_loop_tree_root != &ira_loop_nodes[i])
return true;
return false;
}
/* Free the loop tree node of a loop. */
static void
finish_loop_tree_node (ira_loop_tree_node_t loop)
{
if (loop->regno_allocno_map != NULL)
{
ira_assert (loop->bb == NULL);
ira_free_bitmap (loop->local_copies);
ira_free_bitmap (loop->border_allocnos);
ira_free_bitmap (loop->modified_regnos);
ira_free_bitmap (loop->all_allocnos);
ira_free (loop->regno_allocno_map);
loop->regno_allocno_map = NULL;
}
}
/* Free the loop tree nodes. */
static void
finish_loop_tree_nodes (void)
{
unsigned int i;
for (i = 0; i < ira_loop_nodes_count; i++)
finish_loop_tree_node (&ira_loop_nodes[i]);
ira_free (ira_loop_nodes);
for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
{
if (ira_bb_nodes[i].local_copies != NULL)
ira_free_bitmap (ira_bb_nodes[i].local_copies);
if (ira_bb_nodes[i].border_allocnos != NULL)
ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
if (ira_bb_nodes[i].modified_regnos != NULL)
ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
if (ira_bb_nodes[i].all_allocnos != NULL)
ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
if (ira_bb_nodes[i].regno_allocno_map != NULL)
ira_free (ira_bb_nodes[i].regno_allocno_map);
}
ira_free (ira_bb_nodes);
}
/* The following recursive function adds LOOP to the loop tree
hierarchy. LOOP is added only once. If LOOP is NULL we adding
loop designating the whole function when CFG loops are not
built. */
static void
add_loop_to_tree (class loop *loop)
{
int loop_num;
class loop *parent;
ira_loop_tree_node_t loop_node, parent_node;
/* We cannot use loop node access macros here because of potential
checking and because the nodes are not initialized enough
yet. */
if (loop != NULL && loop_outer (loop) != NULL)
add_loop_to_tree (loop_outer (loop));
loop_num = loop != NULL ? loop->num : 0;
if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
&& ira_loop_nodes[loop_num].children == NULL)
{
/* We have not added loop node to the tree yet. */
loop_node = &ira_loop_nodes[loop_num];
loop_node->loop = loop;
loop_node->bb = NULL;
if (loop == NULL)
parent = NULL;
else
{
for (parent = loop_outer (loop);
parent != NULL;
parent = loop_outer (parent))
if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
break;
}
if (parent == NULL)
{
loop_node->next = NULL;
loop_node->subloop_next = NULL;
loop_node->parent = NULL;
}
else
{
parent_node = &ira_loop_nodes[parent->num];
loop_node->next = parent_node->children;
parent_node->children = loop_node;
loop_node->subloop_next = parent_node->subloops;
parent_node->subloops = loop_node;
loop_node->parent = parent_node;
}
}
}
/* The following recursive function sets up levels of nodes of the
tree given its root LOOP_NODE. The enumeration starts with LEVEL.
The function returns maximal value of level in the tree + 1. */
static int
setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
{
int height, max_height;
ira_loop_tree_node_t subloop_node;
ira_assert (loop_node->bb == NULL);
loop_node->level = level;
max_height = level + 1;
for (subloop_node = loop_node->subloops;
subloop_node != NULL;
subloop_node = subloop_node->subloop_next)
{
ira_assert (subloop_node->bb == NULL);
height = setup_loop_tree_level (subloop_node, level + 1);
if (height > max_height)
max_height = height;
}
return max_height;
}
/* Create the loop tree. The algorithm is designed to provide correct
order of loops (they are ordered by their last loop BB) and basic
blocks in the chain formed by member next. */
static void
form_loop_tree (void)
{
basic_block bb;
class loop *parent;
ira_loop_tree_node_t bb_node, loop_node;
/* We cannot use loop/bb node access macros because of potential
checking and because the nodes are not initialized enough
yet. */
FOR_EACH_BB_FN (bb, cfun)
{
bb_node = &ira_bb_nodes[bb->index];
bb_node->bb = bb;
bb_node->loop = NULL;
bb_node->subloops = NULL;
bb_node->children = NULL;
bb_node->subloop_next = NULL;
bb_node->next = NULL;
if (current_loops == NULL)
parent = NULL;
else
{
for (parent = bb->loop_father;
parent != NULL;
parent = loop_outer (parent))
if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
break;
}
add_loop_to_tree (parent);
loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
bb_node->next = loop_node->children;
bb_node->parent = loop_node;
loop_node->children = bb_node;
}
ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
}
/* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
tree nodes. */
static void
rebuild_regno_allocno_maps (void)
{
unsigned int l;
int max_regno, regno;
ira_allocno_t a;
ira_loop_tree_node_t loop_tree_node;
loop_p loop;
ira_allocno_iterator ai;
ira_assert (current_loops != NULL);
max_regno = max_reg_num ();
FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
if (ira_loop_nodes[l].regno_allocno_map != NULL)
{
ira_free (ira_loop_nodes[l].regno_allocno_map);
ira_loop_nodes[l].regno_allocno_map
= (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
* max_regno);
memset (ira_loop_nodes[l].regno_allocno_map, 0,
sizeof (ira_allocno_t) * max_regno);
}
ira_free (ira_regno_allocno_map);
ira_regno_allocno_map
= (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
FOR_EACH_ALLOCNO (a, ai)
{
if (ALLOCNO_CAP_MEMBER (a) != NULL)
/* Caps are not in the regno allocno maps. */
continue;
regno = ALLOCNO_REGNO (a);
loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
ira_regno_allocno_map[regno] = a;
if (loop_tree_node->regno_allocno_map[regno] == NULL)
/* Remember that we can create temporary allocnos to break
cycles in register shuffle. */
loop_tree_node->regno_allocno_map[regno] = a;
}
}
/* Pools for allocnos, allocno live ranges and objects. */
static object_allocator<live_range> live_range_pool ("live ranges");
static object_allocator<ira_allocno> allocno_pool ("allocnos");
static object_allocator<ira_object> object_pool ("objects");
/* Vec containing references to all created allocnos. It is a
container of array allocnos. */
static vec<ira_allocno_t> allocno_vec;
/* Vec containing references to all created ira_objects. It is a
container of ira_object_id_map. */
static vec<ira_object_t> ira_object_id_map_vec;
/* Initialize data concerning allocnos. */
static void
initiate_allocnos (void)
{
allocno_vec.create (max_reg_num () * 2);
ira_allocnos = NULL;
ira_allocnos_num = 0;
ira_objects_num = 0;
ira_object_id_map_vec.create (max_reg_num () * 2);
ira_object_id_map = NULL;
ira_regno_allocno_map
= (ira_allocno_t *) ira_allocate (max_reg_num ()
* sizeof (ira_allocno_t));
memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
}
/* Create and return an object corresponding to a new allocno A. */
static ira_object_t
ira_create_object (ira_allocno_t a, int subword)
{
enum reg_class aclass = ALLOCNO_CLASS (a);
ira_object_t obj = object_pool.allocate ();
OBJECT_ALLOCNO (obj) = a;
OBJECT_SUBWORD (obj) = subword;
OBJECT_CONFLICT_ID (obj) = ira_objects_num;
OBJECT_CONFLICT_VEC_P (obj) = false;
OBJECT_CONFLICT_ARRAY (obj) = NULL;
OBJECT_NUM_CONFLICTS (obj) = 0;
OBJECT_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
OBJECT_MIN (obj) = INT_MAX;
OBJECT_MAX (obj) = -1;
OBJECT_LIVE_RANGES (obj) = NULL;
ira_object_id_map_vec.safe_push (obj);
ira_object_id_map
= ira_object_id_map_vec.address ();
ira_objects_num = ira_object_id_map_vec.length ();
return obj;
}
/* Create and return the allocno corresponding to REGNO in
LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
same regno if CAP_P is FALSE. */
ira_allocno_t
ira_create_allocno (int regno, bool cap_p,
ira_loop_tree_node_t loop_tree_node)
{
ira_allocno_t a;
a = allocno_pool.allocate ();
ALLOCNO_REGNO (a) = regno;
ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
if (! cap_p)
{
ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
ira_regno_allocno_map[regno] = a;
if (loop_tree_node->regno_allocno_map[regno] == NULL)
/* Remember that we can create temporary allocnos to break
cycles in register shuffle on region borders (see
ira-emit.c). */
loop_tree_node->regno_allocno_map[regno] = a;
}
ALLOCNO_CAP (a) = NULL;
ALLOCNO_CAP_MEMBER (a) = NULL;
ALLOCNO_NUM (a) = ira_allocnos_num;
bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
ALLOCNO_NREFS (a) = 0;
ALLOCNO_FREQ (a) = 0;
ALLOCNO_HARD_REGNO (a) = -1;
ALLOCNO_CALL_FREQ (a) = 0;
ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
ALLOCNO_CROSSED_CALLS_ABIS (a) = 0;
CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
#ifdef STACK_REGS
ALLOCNO_NO_STACK_REG_P (a) = false;
ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
#endif
ALLOCNO_DONT_REASSIGN_P (a) = false;
ALLOCNO_BAD_SPILL_P (a) = false;
ALLOCNO_ASSIGNED_P (a) = false;
ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
ALLOCNO_PREFS (a) = NULL;
ALLOCNO_COPIES (a) = NULL;
ALLOCNO_HARD_REG_COSTS (a) = NULL;
ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
ALLOCNO_CLASS (a) = NO_REGS;
ALLOCNO_UPDATED_CLASS_COST (a) = 0;
ALLOCNO_CLASS_COST (a) = 0;
ALLOCNO_MEMORY_COST (a) = 0;
ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
ALLOCNO_NUM_OBJECTS (a) = 0;
ALLOCNO_ADD_DATA (a) = NULL;
allocno_vec.safe_push (a);
ira_allocnos = allocno_vec.address ();
ira_allocnos_num = allocno_vec.length ();
return a;
}
/* Set up register class for A and update its conflict hard
registers. */
void
ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
{
ira_allocno_object_iterator oi;
ira_object_t obj;
ALLOCNO_CLASS (a) = aclass;
FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
{
OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
}
}
/* Determine the number of objects we should associate with allocno A
and allocate them. */
void
ira_create_allocno_objects (ira_allocno_t a)
{
machine_mode mode = ALLOCNO_MODE (a);
enum reg_class aclass = ALLOCNO_CLASS (a);
int n = ira_reg_class_max_nregs[aclass][mode];
int i;
if (n != 2 || maybe_ne (GET_MODE_SIZE (mode), n * UNITS_PER_WORD))
n = 1;
ALLOCNO_NUM_OBJECTS (a) = n;
for (i = 0; i < n; i++)
ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
}
/* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
ALLOCNO_OBJECT structures. This must be called after the allocno
classes are known. */
static void
create_allocno_objects (void)
{
ira_allocno_t a;
ira_allocno_iterator ai;
FOR_EACH_ALLOCNO (a, ai)
ira_create_allocno_objects (a);
}
/* Merge hard register conflict information for all objects associated with
allocno TO into the corresponding objects associated with FROM.
If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
static void
merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
bool total_only)
{
int i;
gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
{
ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
if (!total_only)
OBJECT_CONFLICT_HARD_REGS (to_obj)
|= OBJECT_CONFLICT_HARD_REGS (from_obj);
OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj)
|= OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj);
}
#ifdef STACK_REGS
if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
ALLOCNO_NO_STACK_REG_P (to) = true;
if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
#endif
}
/* Update hard register conflict information for all objects associated with
A to include the regs in SET. */
void
ior_hard_reg_conflicts (ira_allocno_t a, const_hard_reg_set set)
{
ira_allocno_object_iterator i;
ira_object_t obj;
FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
{
OBJECT_CONFLICT_HARD_REGS (obj) |= set;
OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= set;
}
}
/* Return TRUE if a conflict vector with NUM elements is more
profitable than a conflict bit vector for OBJ. */
bool
ira_conflict_vector_profitable_p (ira_object_t obj, int num)
{
int nw;
int max = OBJECT_MAX (obj);
int min = OBJECT_MIN (obj);
if (max < min)
/* We prefer a bit vector in such case because it does not result
in allocation. */
return false;
nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
return (2 * sizeof (ira_object_t) * (num + 1)
< 3 * nw * sizeof (IRA_INT_TYPE));
}
/* Allocates and initialize the conflict vector of OBJ for NUM
conflicting objects. */
void
ira_allocate_conflict_vec (ira_object_t obj, int num)
{
int size;
ira_object_t *vec;
ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
num++; /* for NULL end marker */
size = sizeof (ira_object_t) * num;
OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
vec[0] = NULL;
OBJECT_NUM_CONFLICTS (obj) = 0;
OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
OBJECT_CONFLICT_VEC_P (obj) = true;
}
/* Allocate and initialize the conflict bit vector of OBJ. */
static void
allocate_conflict_bit_vec (ira_object_t obj)
{
unsigned int size;
ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
/ IRA_INT_BITS * sizeof (IRA_INT_TYPE));
OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
OBJECT_CONFLICT_VEC_P (obj) = false;
}
/* Allocate and initialize the conflict vector or conflict bit vector
of OBJ for NUM conflicting allocnos whatever is more profitable. */
void
ira_allocate_object_conflicts (ira_object_t obj, int num)
{
if (ira_conflict_vector_profitable_p (obj, num))
ira_allocate_conflict_vec (obj, num);
else
allocate_conflict_bit_vec (obj);
}
/* Add OBJ2 to the conflicts of OBJ1. */
static void
add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
{
int num;
unsigned int size;
if (OBJECT_CONFLICT_VEC_P (obj1))
{
ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
int curr_num = OBJECT_NUM_CONFLICTS (obj1);
num = curr_num + 2;
if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
{
ira_object_t *newvec;
size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
newvec = (ira_object_t *) ira_allocate (size);
memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
ira_free (vec);
vec = newvec;
OBJECT_CONFLICT_ARRAY (obj1) = vec;
OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
}
vec[num - 2] = obj2;
vec[num - 1] = NULL;
OBJECT_NUM_CONFLICTS (obj1)++;
}
else
{
int nw, added_head_nw, id;
IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
id = OBJECT_CONFLICT_ID (obj2);
if (OBJECT_MIN (obj1) > id)
{
/* Expand head of the bit vector. */
added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
{
memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
vec, nw * sizeof (IRA_INT_TYPE));
memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
}
else
{
size
= (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
vec = (IRA_INT_TYPE *) ira_allocate (size);
memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
memset ((char *) vec
+ (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
ira_free (OBJECT_CONFLICT_ARRAY (obj1));
OBJECT_CONFLICT_ARRAY (obj1) = vec;
OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
}
OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
}
else if (OBJECT_MAX (obj1) < id)
{
nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
size = nw * sizeof (IRA_INT_TYPE);
if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
{
/* Expand tail of the bit vector. */
size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
vec = (IRA_INT_TYPE *) ira_allocate (size);
memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
ira_free (OBJECT_CONFLICT_ARRAY (obj1));
OBJECT_CONFLICT_ARRAY (obj1) = vec;
OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
}
OBJECT_MAX (obj1) = id;
}
SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
}
}
/* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
static void
ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
{
add_to_conflicts (obj1, obj2);
add_to_conflicts (obj2, obj1);
}
/* Clear all conflicts of OBJ. */
static void
clear_conflicts (ira_object_t obj)
{
if (OBJECT_CONFLICT_VEC_P (obj))
{
OBJECT_NUM_CONFLICTS (obj) = 0;
OBJECT_CONFLICT_VEC (obj)[0] = NULL;
}
else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
{
int nw;
nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
}
}
/* The array used to find duplications in conflict vectors of
allocnos. */
static int *conflict_check;
/* The value used to mark allocation presence in conflict vector of
the current allocno. */
static int curr_conflict_check_tick;
/* Remove duplications in conflict vector of OBJ. */
static void
compress_conflict_vec (ira_object_t obj)
{
ira_object_t *vec, conflict_obj;
int i, j;
ira_assert (OBJECT_CONFLICT_VEC_P (obj));
vec = OBJECT_CONFLICT_VEC (obj);
curr_conflict_check_tick++;
for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
{
int id = OBJECT_CONFLICT_ID (conflict_obj);
if (conflict_check[id] != curr_conflict_check_tick)
{
conflict_check[id] = curr_conflict_check_tick;
vec[j++] = conflict_obj;
}
}
OBJECT_NUM_CONFLICTS (obj) = j;
vec[j] = NULL;
}
/* Remove duplications in conflict vectors of all allocnos. */
static void
compress_conflict_vecs (void)
{
ira_object_t obj;
ira_object_iterator oi;
conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
memset (conflict_check, 0, sizeof (int) * ira_objects_num);
curr_conflict_check_tick = 0;
FOR_EACH_OBJECT (obj, oi)
{
if (OBJECT_CONFLICT_VEC_P (obj))
compress_conflict_vec (obj);
}
ira_free (conflict_check);
}
/* This recursive function outputs allocno A and if it is a cap the
function outputs its members. */
void
ira_print_expanded_allocno (ira_allocno_t a)
{
basic_block bb;
fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
fprintf (ira_dump_file, ",b%d", bb->index);
else
fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
if (ALLOCNO_CAP_MEMBER (a) != NULL)
{
fprintf (ira_dump_file, ":");
ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
}
fprintf (ira_dump_file, ")");
}
/* Create and return the cap representing allocno A in the
parent loop. */
static ira_allocno_t
create_cap_allocno (ira_allocno_t a)
{
ira_allocno_t cap;
ira_loop_tree_node_t parent;
enum reg_class aclass;
parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
aclass = ALLOCNO_CLASS (a);
ira_set_allocno_class (cap, aclass);
ira_create_allocno_objects (cap);
ALLOCNO_CAP_MEMBER (cap) = a;
ALLOCNO_CAP (a) = cap;
ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
ira_allocate_and_copy_costs
(&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
ira_allocate_and_copy_costs
(&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
merge_hard_reg_conflicts (a, cap, false);
ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
ALLOCNO_CROSSED_CALLS_ABIS (cap) = ALLOCNO_CROSSED_CALLS_ABIS (a);
ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap)
= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
{
fprintf (ira_dump_file, " Creating cap ");
ira_print_expanded_allocno (cap);
fprintf (ira_dump_file, "\n");
}
return cap;
}
/* Create and return a live range for OBJECT with given attributes. */
live_range_t
ira_create_live_range (ira_object_t obj, int start, int finish,
live_range_t next)
{
live_range_t p;
p = live_range_pool.allocate ();
p->object = obj;
p->start = start;
p->finish = finish;
p->next = next;
return p;
}
/* Create a new live range for OBJECT and queue it at the head of its
live range list. */
void
ira_add_live_range_to_object (ira_object_t object, int start, int finish)
{
live_range_t p;
p = ira_create_live_range (object, start, finish,
OBJECT_LIVE_RANGES (object));
OBJECT_LIVE_RANGES (object) = p;
}
/* Copy allocno live range R and return the result. */
static live_range_t
copy_live_range (live_range_t r)
{
live_range_t p;
p = live_range_pool.allocate ();
*p = *r;
return p;
}
/* Copy allocno live range list given by its head R and return the
result. */
live_range_t
ira_copy_live_range_list (live_range_t r)
{
live_range_t p, first, last;
if (r == NULL)
return NULL;
for (first = last = NULL; r != NULL; r = r->next)
{
p = copy_live_range (r);
if (first == NULL)
first = p;
else
last->next = p;
last = p;
}
return first;
}
/* Merge ranges R1 and R2 and returns the result. The function
maintains the order of ranges and tries to minimize number of the
result ranges. */
live_range_t
ira_merge_live_ranges (live_range_t r1, live_range_t r2)
{
live_range_t first, last;
if (r1 == NULL)
return r2;
if (r2 == NULL)
return r1;
for (first = last = NULL; r1 != NULL && r2 != NULL;)
{
if (r1->start < r2->start)
std::swap (r1, r2);
if (r1->start <= r2->finish + 1)
{
/* Intersected ranges: merge r1 and r2 into r1. */
r1->start = r2->start;
if (r1->finish < r2->finish)
r1->finish = r2->finish;
live_range_t temp = r2;
r2 = r2->next;
ira_finish_live_range (temp);