php-internal-docs 8.4.8
Unofficial docs for php/php-src
Loading...
Searching...
No Matches
ir_ra.c
Go to the documentation of this file.
1/*
2 * IR - Lightweight JIT Compilation Framework
3 * (RA - Register Allocation, Liveness, Coalescing, SSA Deconstruction)
4 * Copyright (C) 2022 Zend by Perforce.
5 * Authors: Dmitry Stogov <dmitry@php.net>
6 *
7 * See: "Linear Scan Register Allocation on SSA Form", Christian Wimmer and
8 * Michael Franz, CGO'10 (2010)
9 * See: "Optimized Interval Splitting in a Linear Scan Register Allocator",
10 * Christian Wimmer VEE'10 (2005)
11 */
12
13#ifndef _GNU_SOURCE
14# define _GNU_SOURCE
15#endif
16
17#include <stdlib.h>
18#include "ir.h"
19
20#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
21# include "ir_x86.h"
22#elif defined(IR_TARGET_AARCH64)
23# include "ir_aarch64.h"
24#else
25# error "Unknown IR target"
26#endif
27
28#include "ir_private.h"
29
31{
32 return IR_REG_NUM;
33}
34
35bool ir_reg_is_int(int32_t reg)
36{
37 IR_ASSERT(reg >= 0 && reg < IR_REG_NUM);
38 return reg >= IR_REG_GP_FIRST && reg <= IR_REG_GP_LAST;
39}
40
41static int ir_assign_virtual_registers_slow(ir_ctx *ctx)
42{
43 uint32_t *vregs;
44 uint32_t vregs_count = 0;
45 uint32_t b;
46 ir_ref i, n;
47 ir_block *bb;
48 ir_insn *insn;
49 uint32_t flags;
50
51 /* Assign unique virtual register to each data node */
52 vregs = ir_mem_calloc(ctx->insns_count, sizeof(ir_ref));
53 n = 1;
54 for (b = 1, bb = ctx->cfg_blocks + b; b <= ctx->cfg_blocks_count; b++, bb++) {
56 i = bb->start;
57
58 /* skip first instruction */
59 insn = ctx->ir_base + i;
60 n = ir_insn_len(insn);
61 i += n;
62 insn += n;
63 while (i < bb->end) {
64 flags = ir_op_flags[insn->op];
65 if (((flags & IR_OP_FLAG_DATA) && insn->op != IR_VAR && (insn->op != IR_PARAM || ctx->use_lists[i].count > 0))
66 || ((flags & IR_OP_FLAG_MEM) && ctx->use_lists[i].count > 1)) {
67 if (!ctx->rules || !(ctx->rules[i] & (IR_FUSED|IR_SKIPPED))) {
68 vregs[i] = ++vregs_count;
69 }
70 }
71 n = ir_insn_len(insn);
72 i += n;
73 insn += n;
74 }
75 }
76 ctx->vregs_count = vregs_count;
77 ctx->vregs = vregs;
78
79 return 1;
80}
81
83{
84 uint32_t *vregs;
85 uint32_t vregs_count = 0;
86 ir_ref i;
87 ir_insn *insn;
88
89 if (!ctx->rules) {
90 return ir_assign_virtual_registers_slow(ctx);
91 }
92
93 /* Assign unique virtual register to each rule that needs it */
94 vregs = ir_mem_malloc(ctx->insns_count * sizeof(ir_ref));
95
96 for (i = 1, insn = &ctx->ir_base[1]; i < ctx->insns_count; i++, insn++) {
97 uint32_t v = 0;
98
99 if (ctx->rules[i] && !(ctx->rules[i] & (IR_FUSED|IR_SKIPPED))) {
100 uint32_t flags = ir_op_flags[insn->op];
101
102 if ((flags & IR_OP_FLAG_DATA)
103 || ((flags & IR_OP_FLAG_MEM) && ctx->use_lists[i].count > 1)) {
104 v = ++vregs_count;
105 }
106 }
107 vregs[i] = v;
108 }
109
110 ctx->vregs_count = vregs_count;
111 ctx->vregs = vregs;
112
113 return 1;
114}
115
116/* Lifetime intervals construction */
117
118static ir_live_interval *ir_new_live_range(ir_ctx *ctx, int v, ir_live_pos start, ir_live_pos end)
119{
121
122 ival->type = IR_VOID;
123 ival->reg = IR_REG_NONE;
124 ival->flags = 0;
125 ival->vreg = v;
126 ival->stack_spill_pos = -1; // not allocated
127 ival->range.start = start;
128 ival->range.end = ival->end = end;
129 ival->range.next = NULL;
130 ival->use_pos = NULL;
131 ival->next = NULL;
132
133 ctx->live_intervals[v] = ival;
134 return ival;
135}
136
137static ir_live_interval *ir_add_live_range(ir_ctx *ctx, int v, ir_live_pos start, ir_live_pos end)
138{
139 ir_live_interval *ival = ctx->live_intervals[v];
140 ir_live_range *p, *q;
141
142 if (!ival) {
143 return ir_new_live_range(ctx, v, start, end);
144 }
145
146 p = &ival->range;
147 if (end >= p->start) {
149
150 do {
151 if (p->end >= start) {
152 if (start < p->start) {
153 p->start = start;
154 }
155 if (end > p->end) {
156 /* merge with next */
157 ir_live_range *next = p->next;
158
159 p->end = end;
160 while (next && p->end >= next->start) {
161 if (next->end > p->end) {
162 p->end = next->end;
163 }
164 p->next = next->next;
165 /* remember in the "unused_ranges" list */
166 next->next = ctx->unused_ranges;
167 ctx->unused_ranges = next;
168 next = p->next;
169 }
170 if (!p->next) {
171 ival->end = p->end;
172 }
173 }
174 return ival;
175 }
176 prev = p;
177 p = prev->next;
178 } while (p && end >= p->start);
179 if (!p) {
180 ival->end = end;
181 }
182 if (prev) {
183 if (ctx->unused_ranges) {
184 /* reuse */
185 q = ctx->unused_ranges;
186 ctx->unused_ranges = q->next;
187 } else {
188 q = ir_arena_alloc(&ctx->arena, sizeof(ir_live_range));
189 }
190 prev->next = q;
191 q->start = start;
192 q->end = end;
193 q->next = p;
194 return ival;
195 }
196 }
197
198 if (ctx->unused_ranges) {
199 /* reuse */
200 q = ctx->unused_ranges;
201 ctx->unused_ranges = q->next;
202 } else {
203 q = ir_arena_alloc(&ctx->arena, sizeof(ir_live_range));
204 }
205 q->start = p->start;
206 q->end = p->end;
207 q->next = p->next;
208 p->start = start;
209 p->end = end;
210 p->next = q;
211 return ival;
212}
213
215{
216 ir_live_interval *ival = ctx->live_intervals[v];
217
218 if (ival && ival->range.start == end) {
219 ival->range.start = start;
220 return ival;
221 }
222 return ir_add_live_range(ctx, v, start, end);
223}
224
225static void ir_add_fixed_live_range(ir_ctx *ctx, ir_reg reg, ir_live_pos start, ir_live_pos end)
226{
227 int v = ctx->vregs_count + 1 + reg;
228 ir_live_interval *ival = ctx->live_intervals[v];
229 ir_live_range *q;
230
231 if (!ival) {
232 ival = ir_arena_alloc(&ctx->arena, sizeof(ir_live_interval));
233 ival->type = IR_VOID;
234 ival->reg = reg;
236 ival->vreg = v;
237 ival->stack_spill_pos = -1; // not allocated
238 ival->range.start = start;
239 ival->range.end = ival->end = end;
240 ival->range.next = NULL;
241 ival->use_pos = NULL;
242 ival->next = NULL;
243
244 ctx->live_intervals[v] = ival;
245 } else if (EXPECTED(end < ival->range.start)) {
246 if (ctx->unused_ranges) {
247 /* reuse */
248 q = ctx->unused_ranges;
249 ctx->unused_ranges = q->next;
250 } else {
251 q = ir_arena_alloc(&ctx->arena, sizeof(ir_live_range));
252 }
253
254 q->start = ival->range.start;
255 q->end = ival->range.end;
256 q->next = ival->range.next;
257 ival->range.start = start;
258 ival->range.end = end;
259 ival->range.next = q;
260 } else if (end == ival->range.start) {
261 ival->range.start = start;
262 } else {
263 ir_add_live_range(ctx, v, start, end);
264 }
265}
266
267static void ir_add_tmp(ir_ctx *ctx, ir_ref ref, ir_ref tmp_ref, int32_t tmp_op_num, ir_tmp_reg tmp_reg)
268{
270
271 ival->type = tmp_reg.type;
272 ival->reg = IR_REG_NONE;
274 ival->tmp_ref = tmp_ref;
275 ival->tmp_op_num = tmp_op_num;
276 ival->range.start = IR_START_LIVE_POS_FROM_REF(ref) + tmp_reg.start;
277 ival->range.end = ival->end = IR_START_LIVE_POS_FROM_REF(ref) + tmp_reg.end;
278 ival->range.next = NULL;
279 ival->use_pos = NULL;
280
281 if (!ctx->live_intervals[0]) {
282 ival->next = NULL;
283 ctx->live_intervals[0] = ival;
284 } else if (ival->range.start >= ctx->live_intervals[0]->range.start) {
286
287 while (prev->next && ival->range.start >= prev->next->range.start) {
288 prev = prev->next;
289 }
290 ival->next = prev->next;
291 prev->next = ival;
292 } else {
294
295 ival->next = next;
296 ctx->live_intervals[0] = ival;
297 }
298 return;
299}
300
301static bool ir_has_tmp(ir_ctx *ctx, ir_ref ref, int32_t op_num)
302{
303 ir_live_interval *ival = ctx->live_intervals[0];
304
305 if (ival) {
306 while (ival && IR_LIVE_POS_TO_REF(ival->range.start) <= ref) {
307 if (ival->tmp_ref == ref && ival->tmp_op_num == op_num) {
308 return 1;
309 }
310 ival = ival->next;
311 }
312 }
313 return 0;
314}
315
316static ir_live_interval *ir_fix_live_range(ir_ctx *ctx, int v, ir_live_pos old_start, ir_live_pos new_start)
317{
318 ir_live_interval *ival = ctx->live_intervals[v];
319 ir_live_range *p = &ival->range;
320
321#if 0
322 while (p && p->start < old_start) {
323 p = p->next;
324 }
325#endif
326 IR_ASSERT(ival && p->start == old_start);
327 p->start = new_start;
328 return ival;
329}
330
331static void ir_add_use_pos(ir_ctx *ctx, ir_live_interval *ival, ir_use_pos *use_pos)
332{
333 ir_use_pos *p = ival->use_pos;
334
335 if (EXPECTED(!p || p->pos > use_pos->pos)) {
336 use_pos->next = p;
337 ival->use_pos = use_pos;
338 } else {
340
341 do {
342 prev = p;
343 p = p->next;
344 } while (p && p->pos < use_pos->pos);
345
346 use_pos->next = prev->next;
347 prev->next = use_pos;
348 }
349}
350
351
352IR_ALWAYS_INLINE void ir_add_use(ir_ctx *ctx, ir_live_interval *ival, int op_num, ir_live_pos pos, ir_reg hint, uint8_t use_flags, ir_ref hint_ref)
353{
354 ir_use_pos *use_pos;
355
356 use_pos = ir_arena_alloc(&ctx->arena, sizeof(ir_use_pos));
357 use_pos->op_num = op_num;
358 use_pos->hint = hint;
359 use_pos->flags = use_flags;
360 use_pos->hint_ref = hint_ref;
361 use_pos->pos = pos;
362
363 if (hint != IR_REG_NONE) {
365 }
366 if (hint_ref > 0) {
368 }
369
370 ir_add_use_pos(ctx, ival, use_pos);
371}
372
373static void ir_add_phi_use(ir_ctx *ctx, ir_live_interval *ival, int op_num, ir_live_pos pos, ir_ref phi_ref)
374{
375 ir_use_pos *use_pos;
376
377 IR_ASSERT(phi_ref > 0);
378 use_pos = ir_arena_alloc(&ctx->arena, sizeof(ir_use_pos));
379 use_pos->op_num = op_num;
380 use_pos->hint = IR_REG_NONE;
381 use_pos->flags = IR_PHI_USE | IR_USE_SHOULD_BE_IN_REG; // TODO: ???
382 use_pos->hint_ref = -phi_ref;
383 use_pos->pos = pos;
384
385 ir_add_use_pos(ctx, ival, use_pos);
386}
387
388static void ir_add_hint(ir_ctx *ctx, ir_ref ref, ir_live_pos pos, ir_reg hint)
389{
390 ir_live_interval *ival = ctx->live_intervals[ctx->vregs[ref]];
391
392 if (!(ival->flags & IR_LIVE_INTERVAL_HAS_HINT_REGS)) {
393 ir_use_pos *use_pos = ival->use_pos;
394
395 while (use_pos) {
396 if (use_pos->pos == pos) {
397 if (use_pos->hint == IR_REG_NONE) {
398 use_pos->hint = hint;
400 }
401 }
402 use_pos = use_pos->next;
403 }
404 }
405}
406
407static void ir_hint_propagation(ir_ctx *ctx)
408{
409 int i;
410 ir_live_interval *ival;
411 ir_use_pos *use_pos;
412 ir_use_pos *hint_use_pos;
413
414 for (i = ctx->vregs_count; i > 0; i--) {
415 ival = ctx->live_intervals[i];
416 if (ival
418 use_pos = ival->use_pos;
419 hint_use_pos = NULL;
420 while (use_pos) {
421 if (use_pos->op_num == 0) {
422 if (use_pos->hint_ref > 0) {
423 hint_use_pos = use_pos;
424 }
425 } else if (use_pos->hint != IR_REG_NONE) {
426 if (hint_use_pos) {
427 ir_add_hint(ctx, hint_use_pos->hint_ref, hint_use_pos->pos, use_pos->hint);
428 hint_use_pos = NULL;
429 }
430 }
431 use_pos = use_pos->next;
432 }
433 }
434 }
435}
436
437#ifdef IR_BITSET_LIVENESS
438/* DFS + Loop-Forest livness for SSA using bitset(s) */
439static void ir_add_osr_entry_loads(ir_ctx *ctx, ir_block *bb, ir_bitset live, uint32_t len, uint32_t b)
440{
441 bool ok = 1;
442 int count = 0;
443 ir_list *list = (ir_list*)ctx->osr_entry_loads;
444 ir_ref i;
445
446 IR_BITSET_FOREACH(live, len, i) {
447 /* Skip live references from ENTRY to PARAM. TODO: duplicate PARAM in each ENTRY ??? */
448 ir_use_pos *use_pos = ctx->live_intervals[i]->use_pos;
449 ir_ref ref = (use_pos->hint_ref < 0) ? -use_pos->hint_ref : IR_LIVE_POS_TO_REF(use_pos->pos);
450
451 if (use_pos->op_num) {
452 ir_ref *ops = ctx->ir_base[ref].ops;
453 ref = ops[use_pos->op_num];
454 }
455
456 if (ctx->ir_base[ref].op == IR_PARAM) {
457 continue;
458 }
459 if (ctx->binding) {
460 ir_ref var = ir_binding_find(ctx, ref);
461 if (var < 0) {
462 /* We may load the value at OSR entry-point */
463 if (!count) {
464 bb->flags &= ~IR_BB_EMPTY;
465 bb->flags |= IR_BB_OSR_ENTRY_LOADS;
466 if (!ctx->osr_entry_loads) {
467 list = ctx->osr_entry_loads = ir_mem_malloc(sizeof(ir_list));
468 ir_list_init(list, 16);
469 }
470 ir_list_push(list, b);
471 ir_list_push(list, 0);
472 }
473 ir_list_push(list, ref);
474 count++;
475 continue;
476 }
477 }
478 fprintf(stderr, "ENTRY %d (block %d start %d) - live var %d\n", ctx->ir_base[bb->start].op2, b, bb->start, ref);
479 ok = 0;
481
482 if (!ok) {
483 IR_ASSERT(0);
484 }
485 if (count) {
486 ir_list_set(list, ir_list_len(ctx->osr_entry_loads) - (count + 1), count);
487
488#if 0
489 /* ENTRY "clobbers" all registers */
490 ir_ref ref = ctx->ir_base[bb->start].op1;
491 ir_add_fixed_live_range(ctx, IR_REG_ALL,
494#endif
495 }
496}
497
498static void ir_add_fusion_ranges(ir_ctx *ctx, ir_ref ref, ir_ref input, ir_block *bb, ir_bitset live)
499{
500 ir_ref stack[4];
501 int stack_pos = 0;
502 ir_target_constraints constraints;
503 ir_insn *insn;
504 uint32_t j, n, flags, def_flags;
505 ir_ref *p, child;
506 uint8_t use_flags;
507 ir_reg reg;
508 ir_live_pos use_pos;
509 ir_live_interval *ival;
510
511 while (1) {
512 IR_ASSERT(input > 0 && ctx->rules[input] & IR_FUSED);
513
514 if (!(ctx->rules[input] & IR_SIMPLE)) {
515 def_flags = ir_get_target_constraints(ctx, input, &constraints);
516 n = constraints.tmps_count;
517 while (n > 0) {
518 n--;
519 if (constraints.tmp_regs[n].type) {
520 ir_add_tmp(ctx, ref, input, constraints.tmp_regs[n].num, constraints.tmp_regs[n]);
521 } else {
522 /* CPU specific constraints */
523 ir_add_fixed_live_range(ctx, constraints.tmp_regs[n].reg,
524 IR_START_LIVE_POS_FROM_REF(ref) + constraints.tmp_regs[n].start,
525 IR_START_LIVE_POS_FROM_REF(ref) + constraints.tmp_regs[n].end);
526 }
527 }
528 } else {
530 constraints.hints_count = 0;
531 }
532
533 insn = &ctx->ir_base[input];
534 flags = ir_op_flags[insn->op];
536 j = 1;
537 p = insn->ops + j;
539 j++;
540 p++;
541 }
542 for (; j <= n; j++, p++) {
544 child = *p;
545 if (child > 0) {
546 uint32_t v = ctx->vregs[child];
547
548 if (v) {
549 use_flags = IR_FUSED_USE | IR_USE_FLAGS(def_flags, j);
550 reg = (j < constraints.hints_count) ? constraints.hints[j] : IR_REG_NONE;
551 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
552 if (EXPECTED(reg == IR_REG_NONE)) {
553 use_pos += IR_USE_SUB_REF;
554 }
555
556 if (!ir_bitset_in(live, v)) {
557 /* live.add(opd) */
558 ir_bitset_incl(live, v);
559 /* intervals[opd].addRange(b.from, op.id) */
560 ival = ir_add_live_range(ctx, v,
561 IR_START_LIVE_POS_FROM_REF(bb->start), use_pos);
562 } else {
563 ival = ctx->live_intervals[v];
564 }
565 ir_add_use(ctx, ival, j, use_pos, reg, use_flags, -input);
566 } else if (ctx->rules[child] & IR_FUSED) {
567 IR_ASSERT(stack_pos < (int)(sizeof(stack)/sizeof(stack_pos)));
568 stack[stack_pos++] = child;
569 } else if (ctx->rules[child] == (IR_SKIPPED|IR_RLOAD)) {
570 ir_set_alocated_reg(ctx, input, j, ctx->ir_base[child].op2);
571 }
572 }
573 }
574 if (!stack_pos) {
575 break;
576 }
577 input = stack[--stack_pos];
578 }
579}
580
582{
583 uint32_t b, i, j, k, n, succ, *p;
584 ir_ref ref;
585 uint32_t len;
586 ir_insn *insn;
587 ir_block *bb, *succ_bb;
588#ifdef IR_DEBUG
589 ir_bitset visited;
590#endif
591 ir_bitset live, bb_live;
592 ir_bitset loops = NULL;
593 ir_bitqueue queue;
594 ir_live_interval *ival;
595
596 if (!(ctx->flags2 & IR_LINEAR) || !ctx->vregs) {
597 return 0;
598 }
599
600 if (ctx->rules) {
601 ctx->regs = ir_mem_malloc(sizeof(ir_regs) * ctx->insns_count);
602 memset(ctx->regs, IR_REG_NONE, sizeof(ir_regs) * ctx->insns_count);
603 }
604
605 /* Root of the list of IR_VARs */
606 ctx->vars = IR_UNUSED;
607
608 /* Compute Live Ranges */
610 len = ir_bitset_len(ctx->vregs_count + 1);
611 bb_live = ir_mem_malloc((ctx->cfg_blocks_count + 1) * len * sizeof(ir_bitset_base_t));
612
613 /* vregs + tmp + fixed + SRATCH + ALL */
614 ctx->live_intervals = ir_mem_calloc(ctx->vregs_count + 1 + IR_REG_NUM + 2, sizeof(ir_live_interval*));
615
616#ifdef IR_DEBUG
617 visited = ir_bitset_malloc(ctx->cfg_blocks_count + 1);
618#endif
619
620 if (!ctx->arena) {
621 ctx->arena = ir_arena_create(16 * 1024);
622 }
623
624 /* for each basic block in reverse order */
625 for (b = ctx->cfg_blocks_count; b > 0; b--) {
626 bb = &ctx->cfg_blocks[b];
628 /* for each successor of b */
629
630#ifdef IR_DEBUG
631 ir_bitset_incl(visited, b);
632#endif
633 live = bb_live + (len * b);
634 n = bb->successors_count;
635 if (n == 0) {
636 ir_bitset_clear(live, len);
637 } else {
638 p = &ctx->cfg_edges[bb->successors];
639 succ = *p;
640
641#ifdef IR_DEBUG
642 /* blocks must be ordered where all dominators of a block are before this block */
643 IR_ASSERT(ir_bitset_in(visited, succ) || bb->loop_header == succ);
644#endif
645
646 /* live = union of successors.liveIn */
647 if (EXPECTED(succ > b) && EXPECTED(!(ctx->cfg_blocks[succ].flags & IR_BB_ENTRY))) {
648 ir_bitset_copy(live, bb_live + (len * succ), len);
649 } else {
650 IR_ASSERT(succ > b || (ctx->cfg_blocks[succ].flags & IR_BB_LOOP_HEADER));
651 ir_bitset_clear(live, len);
652 }
653 if (n > 1) {
654 for (p++, n--; n > 0; p++, n--) {
655 succ = *p;
656 if (EXPECTED(succ > b) && EXPECTED(!(ctx->cfg_blocks[succ].flags & IR_BB_ENTRY))) {
657 ir_bitset_union(live, bb_live + (len * succ), len);
658 } else {
659 IR_ASSERT(succ > b || (ctx->cfg_blocks[succ].flags & IR_BB_LOOP_HEADER));
660 }
661 }
662 }
663
664 /* for each opd in live */
665 IR_BITSET_FOREACH(live, len, i) {
666 /* intervals[opd].addRange(b.from, b.to) */
671 }
672
673 if (bb->successors_count == 1) {
674 /* for each phi function phi of successor */
675 succ = ctx->cfg_edges[bb->successors];
676 succ_bb = &ctx->cfg_blocks[succ];
677 if (succ_bb->flags & IR_BB_HAS_PHI) {
678 ir_use_list *use_list = &ctx->use_lists[succ_bb->start];
679
680 k = ir_phi_input_number(ctx, succ_bb, b);
681 IR_ASSERT(k != 0);
682 for (ref = 0; ref < use_list->count; ref++) {
683 ir_ref use = ctx->use_edges[use_list->refs + ref];
684 insn = &ctx->ir_base[use];
685 if (insn->op == IR_PHI) {
686 ir_ref input = ir_insn_op(insn, k);
687 if (input > 0) {
688 uint32_t v = ctx->vregs[input];
689
690 /* live.add(phi.inputOf(b)) */
691 IR_ASSERT(v);
692 ir_bitset_incl(live, v);
693 /* intervals[phi.inputOf(b)].addRange(b.from, b.to) */
694 ival = ir_add_prev_live_range(ctx, v,
697 ir_add_phi_use(ctx, ival, k, IR_DEF_LIVE_POS_FROM_REF(bb->end), use);
698 }
699 }
700 }
701 }
702 }
703
704 /* for each operation op of b in reverse order */
705 ref = bb->end;
706 insn = &ctx->ir_base[ref];
707 if (insn->op == IR_END || insn->op == IR_LOOP_END) {
708 ref = ctx->prev_ref[ref];
709 }
710 for (; ref > bb->start; ref = ctx->prev_ref[ref]) {
711 uint32_t def_flags;
712 uint32_t flags;
713 ir_ref *p;
714 ir_target_constraints constraints;
715 uint32_t v;
716
717 if (ctx->rules) {
718 int n;
719
720 if (ctx->rules[ref] & (IR_FUSED|IR_SKIPPED)) {
721 if (((ctx->rules[ref] & IR_RULE_MASK) == IR_VAR
722 || (ctx->rules[ref] & IR_RULE_MASK) == IR_ALLOCA)
723 && ctx->use_lists[ref].count > 0) {
724 insn = &ctx->ir_base[ref];
725 if (insn->op != IR_VADDR) {
726 insn->op3 = ctx->vars;
727 ctx->vars = ref;
728 }
729 }
730 continue;
731 }
732
733 def_flags = ir_get_target_constraints(ctx, ref, &constraints);
734 n = constraints.tmps_count;
735 while (n > 0) {
736 n--;
737 if (constraints.tmp_regs[n].type) {
738 ir_add_tmp(ctx, ref, ref, constraints.tmp_regs[n].num, constraints.tmp_regs[n]);
739 } else {
740 /* CPU specific constraints */
741 ir_add_fixed_live_range(ctx, constraints.tmp_regs[n].reg,
742 IR_START_LIVE_POS_FROM_REF(ref) + constraints.tmp_regs[n].start,
743 IR_START_LIVE_POS_FROM_REF(ref) + constraints.tmp_regs[n].end);
744 }
745 }
746 } else {
747 def_flags = 0;
748 constraints.def_reg = IR_REG_NONE;
749 constraints.hints_count = 0;
750 }
751
752 insn = &ctx->ir_base[ref];
753 v = ctx->vregs[ref];
754 if (v) {
755 IR_ASSERT(ir_bitset_in(live, v));
756
757 if (insn->op != IR_PHI) {
758 ir_live_pos def_pos;
759 ir_ref hint_ref = 0;
760 ir_reg reg = constraints.def_reg;
761
762 if (reg != IR_REG_NONE) {
763 def_pos = IR_SAVE_LIVE_POS_FROM_REF(ref);
764 if (insn->op == IR_PARAM || insn->op == IR_RLOAD) {
765 /* parameter register must be kept before it's copied */
766 ir_add_fixed_live_range(ctx, reg, IR_START_LIVE_POS_FROM_REF(bb->start), def_pos);
767 }
768 } else if (def_flags & IR_DEF_REUSES_OP1_REG) {
769 if (!IR_IS_CONST_REF(insn->op1) && ctx->vregs[insn->op1]) {
770 hint_ref = insn->op1;
771 }
772 def_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
773 } else if (def_flags & IR_DEF_CONFLICTS_WITH_INPUT_REGS) {
774 def_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
775 } else {
776 if (insn->op == IR_PARAM) {
777 /* We may reuse parameter stack slot for spilling */
779 } else if (insn->op == IR_VLOAD) {
780 /* Load may be fused into the usage instruction */
782 }
783 def_pos = IR_DEF_LIVE_POS_FROM_REF(ref);
784 }
785 /* live.remove(opd) */
786 ir_bitset_excl(live, v);
787 /* intervals[opd].setFrom(op.id) */
788 ival = ir_fix_live_range(ctx, v,
789 IR_START_LIVE_POS_FROM_REF(bb->start), def_pos);
790 ival->type = insn->type;
791 ir_add_use(ctx, ival, 0, def_pos, reg, def_flags, hint_ref);
792 } else {
793 /* live.remove(opd) */
794 ir_bitset_excl(live, v);
795 /* PHIs inputs must not be processed */
796 ival = ctx->live_intervals[v];
797 if (UNEXPECTED(!ival)) {
798 /* Dead PHI */
799 ival = ir_add_live_range(ctx, v, IR_DEF_LIVE_POS_FROM_REF(ref), IR_USE_LIVE_POS_FROM_REF(ref));
800 }
801 ival->type = insn->type;
803 continue;
804 }
805 }
806
807 IR_ASSERT(insn->op != IR_PHI && (!ctx->rules || !(ctx->rules[ref] & (IR_FUSED|IR_SKIPPED))));
808 flags = ir_op_flags[insn->op];
809 j = 1;
810 p = insn->ops + 1;
812 j++;
813 p++;
814 }
815 for (; j <= insn->inputs_count; j++, p++) {
816 ir_ref input = *p;
817 ir_reg reg = (j < constraints.hints_count) ? constraints.hints[j] : IR_REG_NONE;
818 ir_live_pos use_pos;
819 ir_ref hint_ref = 0;
820 uint32_t v;
821
822 if (input > 0) {
823 v = ctx->vregs[input];
824 if (v) {
825 use_pos = IR_USE_LIVE_POS_FROM_REF(ref);
826 if (reg != IR_REG_NONE) {
827 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
828 ir_add_fixed_live_range(ctx, reg, use_pos, use_pos + IR_USE_SUB_REF);
829 } else if (def_flags & IR_DEF_REUSES_OP1_REG) {
830 if (j == 1) {
831 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
832 IR_ASSERT(ctx->vregs[ref]);
833 hint_ref = ref;
834 } else if (input == insn->op1) {
835 /* Input is the same as "op1" */
836 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
837 }
838 }
839 if (!ir_bitset_in(live, v)) {
840 /* live.add(opd) */
841 ir_bitset_incl(live, v);
842 /* intervals[opd].addRange(b.from, op.id) */
843 ival = ir_add_live_range(ctx, v, IR_START_LIVE_POS_FROM_REF(bb->start), use_pos);
844 } else {
845 ival = ctx->live_intervals[v];
846 }
847 ir_add_use(ctx, ival, j, use_pos, reg, IR_USE_FLAGS(def_flags, j), hint_ref);
848 } else if (ctx->rules) {
849 if (ctx->rules[input] & IR_FUSED) {
850 ir_add_fusion_ranges(ctx, ref, input, bb, live);
851 } else if (ctx->rules[input] == (IR_SKIPPED|IR_RLOAD)) {
852 ir_set_alocated_reg(ctx, ref, j, ctx->ir_base[input].op2);
853 }
854 }
855 } else if (reg != IR_REG_NONE) {
856 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
857 ir_add_fixed_live_range(ctx, reg, use_pos, use_pos + IR_USE_SUB_REF);
858 }
859 }
860 }
861
862 /* if b is loop header */
863 if ((bb->flags & IR_BB_LOOP_HEADER)
864 && !ir_bitset_empty(live, len)) {
865 /* variables live at loop header are alive at the whole loop body */
866 uint32_t bb_set_len = ir_bitset_len(ctx->cfg_blocks_count + 1);
867 uint32_t child;
868 ir_block *child_bb;
869 ir_bitset child_live_in;
870
871 if (!loops) {
872 loops = ir_bitset_malloc(ctx->cfg_blocks_count + 1);
873 ir_bitqueue_init(&queue, ctx->cfg_blocks_count + 1);
874 } else {
875 ir_bitset_clear(loops, bb_set_len);
876 ir_bitqueue_clear(&queue);
877 }
878 ir_bitset_incl(loops, b);
879 child = b;
880 do {
881 child_bb = &ctx->cfg_blocks[child];
882 child_live_in = bb_live + (len * child);
883
884 IR_BITSET_FOREACH(live, len, i) {
885 ir_bitset_incl(child_live_in, i);
886 ir_add_live_range(ctx, i,
888 IR_END_LIVE_POS_FROM_REF(child_bb->end));
890
891 child = child_bb->dom_child;
892 while (child) {
893 child_bb = &ctx->cfg_blocks[child];
894 if (child_bb->loop_header && ir_bitset_in(loops, child_bb->loop_header)) {
895 ir_bitqueue_add(&queue, child);
896 if (child_bb->flags & IR_BB_LOOP_HEADER) {
897 ir_bitset_incl(loops, child);
898 }
899 }
900 child = child_bb->dom_next_child;
901 }
902 } while ((child = ir_bitqueue_pop(&queue)) != (uint32_t)-1);
903 }
904 }
905
906 if (ctx->entries) {
907 for (i = 0; i < ctx->entries_count; i++) {
908 b = ctx->entries[i];
909 bb = &ctx->cfg_blocks[b];
910 live = bb_live + (len * b);
911 ir_add_osr_entry_loads(ctx, bb, live, len, b);
912 }
913 if (ctx->osr_entry_loads) {
915 }
916 }
917
918 if (loops) {
919 ir_mem_free(loops);
920 ir_bitqueue_free(&queue);
921 }
922
923 ir_mem_free(bb_live);
924#ifdef IR_DEBUG
925 ir_mem_free(visited);
926#endif
927
928 return 1;
929}
930
931#else
932/* Path exploration by definition liveness for SSA using sets represented by linked lists */
933
934#define IS_LIVE_IN_BLOCK(v, b) \
935 (live_in_block[v] == b)
936#define SET_LIVE_IN_BLOCK(v, b) do { \
937 live_in_block[v] = b; \
938 } while (0)
939
940/* Returns the last virtual register alive at the end of the block (it is used as an already-visited marker) */
941IR_ALWAYS_INLINE uint32_t ir_live_out_top(ir_ctx *ctx, uint32_t *live_outs, ir_list *live_lists, uint32_t b)
942{
943#if 0
944 return live_outs[b];
945#else
946 if (!live_outs[b]) {
947 return -1;
948 }
949 return ir_list_at(live_lists, live_outs[b]);
950#endif
951}
952
953/* Remember a virtual register alive at the end of the block */
954IR_ALWAYS_INLINE void ir_live_out_push(ir_ctx *ctx, uint32_t *live_outs, ir_list *live_lists, uint32_t b, uint32_t v)
955{
956#if 0
957 ir_block *bb = &ctx->cfg_blocks[b];
958 live_outs[b] = v;
962#else
963 if (live_lists->len >= live_lists->a.size) {
964 ir_array_grow(&live_lists->a, live_lists->a.size + 1024);
965 }
966 /* Form a linked list of virtual register live at the end of the block */
967 ir_list_push_unchecked(live_lists, live_outs[b]); /* push old root of the list (previous element of the list) */
968 live_outs[b] = ir_list_len(live_lists); /* remember the new root */
969 ir_list_push_unchecked(live_lists, v); /* push a virtual register */
970#endif
971}
972
973/*
974 * Computes live-out sets for each basic-block per variable using def-use chains.
975 *
976 * The implementation is based on algorithms 6 and 7 desriebed in
977 * "Computing Liveness Sets for SSA-Form Programs", Florian Brandner, Benoit Boissinot.
978 * Alain Darte, Benoit Dupont de Dinechin, Fabrice Rastello. TR Inria RR-7503, 2011
979 */
980static void ir_compute_live_sets(ir_ctx *ctx, uint32_t *live_outs, ir_list *live_lists)
981{
982 ir_list block_queue, fuse_queue;
983 ir_ref i;
984
985 ir_list_init(&fuse_queue, 16);
986 ir_list_init(&block_queue, 256);
987
988 /* For each virtual register explore paths from all uses to definition */
989 for (i = ctx->insns_count - 1; i > 0; i--) {
990 uint32_t v = ctx->vregs[i];
991
992 if (v) {
993 uint32_t def_block = ctx->cfg_map[i];
994 ir_use_list *use_list = &ctx->use_lists[i];
995 ir_ref *p, n = use_list->count;
996
997 /* Collect all blocks where 'v' is used into a 'block_queue' */
998 for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
999 ir_ref use = *p;
1000 ir_insn *insn = &ctx->ir_base[use];
1001
1002 if (UNEXPECTED(insn->op == IR_PHI)) {
1003 ir_ref n = insn->inputs_count - 1;
1004 ir_ref *p = insn->ops + 2; /* PHI data inputs */
1005 ir_ref *q = ctx->ir_base[insn->op1].ops + 1; /* MERGE inputs */
1006
1007 for (;n > 0; p++, q++, n--) {
1008 if (*p == i) {
1009 uint32_t pred_block = ctx->cfg_map[*q];
1010
1011 if (ir_live_out_top(ctx, live_outs, live_lists, pred_block) != v) {
1012 ir_live_out_push(ctx, live_outs, live_lists, pred_block, v);
1013 if (pred_block != def_block) {
1014 ir_list_push(&block_queue, pred_block);
1015 }
1016 }
1017 }
1018 }
1019 } else if (ctx->rules && UNEXPECTED(ctx->rules[use] & IR_FUSED)) {
1020 while (1) {
1021 ir_use_list *use_list = &ctx->use_lists[use];
1022 ir_ref *p, n = use_list->count;
1023
1024 for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
1025 ir_ref use = *p;
1026
1027 if (ctx->rules[use] & IR_FUSED) {
1028 ir_list_push(&fuse_queue, use);
1029 } else {
1030 uint32_t use_block = ctx->cfg_map[use];
1031
1032 if (def_block != use_block && ir_live_out_top(ctx, live_outs, live_lists, use_block) != v) {
1033 ir_list_push(&block_queue, use_block);
1034 }
1035 }
1036 }
1037 if (!ir_list_len(&fuse_queue)) {
1038 break;
1039 }
1040 use = ir_list_pop(&fuse_queue);
1041 }
1042 } else {
1043 uint32_t use_block = ctx->cfg_map[use];
1044
1045 /* Check if the virtual register is alive at the start of 'use_block' */
1046 if (def_block != use_block && ir_live_out_top(ctx, live_outs, live_lists, use_block) != v) {
1047 ir_list_push(&block_queue, use_block);
1048 }
1049 }
1050 }
1051
1052 /* UP_AND_MARK: Traverse through predecessor blocks until we reach the block where 'v' is defined*/
1053 while (ir_list_len(&block_queue)) {
1054 uint32_t b = ir_list_pop(&block_queue);
1055 ir_block *bb = &ctx->cfg_blocks[b];
1056 uint32_t *p, n = bb->predecessors_count;
1057
1058 if (bb->flags & IR_BB_ENTRY) {
1059 /* live_in_push(ENTRY, v) */
1060 ir_insn *insn = &ctx->ir_base[bb->start];
1061
1062 IR_ASSERT(insn->op == IR_ENTRY);
1063 IR_ASSERT(insn->op3 >= 0 && insn->op3 < (ir_ref)ctx->entries_count);
1064 if (live_lists->len >= live_lists->a.size) {
1065 ir_array_grow(&live_lists->a, live_lists->a.size + 1024);
1066 }
1067 ir_list_push_unchecked(live_lists, live_outs[ctx->cfg_blocks_count + 1 + insn->op3]);
1068 ir_list_push_unchecked(live_lists, v);
1069 live_outs[ctx->cfg_blocks_count + 1 + insn->op3] = ir_list_len(live_lists) - 1;
1070 continue;
1071 }
1072 for (p = &ctx->cfg_edges[bb->predecessors]; n > 0; p++, n--) {
1073 uint32_t pred_block = *p;
1074
1075 /* Check if 'pred_block' wasn't traversed before */
1076 if (ir_live_out_top(ctx, live_outs, live_lists, pred_block) != v) {
1077 /* Mark a virtual register 'v' alive at the end of 'pred_block' */
1078 ir_live_out_push(ctx, live_outs, live_lists, pred_block, v);
1079 if (pred_block != def_block) {
1080 ir_list_push(&block_queue, pred_block);
1081 }
1082 }
1083 }
1084 }
1085 }
1086 }
1087
1088 ir_list_free(&block_queue);
1089 ir_list_free(&fuse_queue);
1090}
1091
1092static void ir_add_osr_entry_loads(ir_ctx *ctx, ir_block *bb, uint32_t pos, ir_list *live_lists, uint32_t b)
1093{
1094 bool ok = 1;
1095 int count = 0;
1096 ir_list *list = (ir_list*)ctx->osr_entry_loads;
1097 ir_ref i;
1098
1099 while (pos) {
1100 i = ir_list_at(live_lists, pos);
1101 pos = ir_list_at(live_lists, pos - 1);
1102
1103 /* Skip live references from ENTRY to PARAM. TODO: duplicate PARAM in each ENTRY ??? */
1104 ir_use_pos *use_pos = ctx->live_intervals[i]->use_pos;
1105 ir_ref ref = (use_pos->hint_ref < 0) ? -use_pos->hint_ref : IR_LIVE_POS_TO_REF(use_pos->pos);
1106
1107 if (use_pos->op_num) {
1108 ir_ref *ops = ctx->ir_base[ref].ops;
1109 ref = ops[use_pos->op_num];
1110 }
1111
1112 if (ctx->ir_base[ref].op == IR_PARAM) {
1113 continue;
1114 }
1115 if (ctx->binding) {
1116 ir_ref var = ir_binding_find(ctx, ref);
1117 if (var < 0) {
1118 /* We may load the value at OSR entry-point */
1119 if (!count) {
1120 bb->flags &= ~IR_BB_EMPTY;
1121 bb->flags |= IR_BB_OSR_ENTRY_LOADS;
1122 if (!ctx->osr_entry_loads) {
1123 list = ctx->osr_entry_loads = ir_mem_malloc(sizeof(ir_list));
1124 ir_list_init(list, 16);
1125 }
1126 ir_list_push(list, b);
1127 ir_list_push(list, 0);
1128 }
1129 ir_list_push(list, ref);
1130 count++;
1131 continue;
1132 }
1133 }
1134 fprintf(stderr, "ENTRY %d (block %d start %d) - live var %d\n", ctx->ir_base[bb->start].op2, b, bb->start, ref);
1135 ok = 0;
1136 }
1137
1138 if (!ok) {
1139 IR_ASSERT(0);
1140 }
1141 if (count) {
1142 ir_list_set(list, ir_list_len(ctx->osr_entry_loads) - (count + 1), count);
1143
1144#if 0
1145 /* ENTRY "clobbers" all registers */
1146 ir_ref ref = ctx->ir_base[bb->start].op1;
1147 ir_add_fixed_live_range(ctx, IR_REG_ALL,
1150#endif
1151 }
1152}
1153
1154static void ir_add_fusion_ranges(ir_ctx *ctx, ir_ref ref, ir_ref input, ir_block *bb, uint32_t *live_in_block, uint32_t b)
1155{
1156 ir_ref stack[4];
1157 int stack_pos = 0;
1158 ir_target_constraints constraints;
1159 ir_insn *insn;
1160 uint32_t j, n, flags, def_flags;
1161 ir_ref *p, child;
1162 uint8_t use_flags;
1163 ir_reg reg;
1165 ir_live_pos use_pos;
1166 ir_live_interval *ival;
1167
1168 while (1) {
1169 IR_ASSERT(input > 0 && ctx->rules[input] & IR_FUSED);
1170
1171 if (!(ctx->rules[input] & IR_SIMPLE)) {
1172 def_flags = ir_get_target_constraints(ctx, input, &constraints);
1173 n = constraints.tmps_count;
1174 while (n > 0) {
1175 n--;
1176 if (constraints.tmp_regs[n].type) {
1177 ir_add_tmp(ctx, ref, input, constraints.tmp_regs[n].num, constraints.tmp_regs[n]);
1178 } else {
1179 /* CPU specific constraints */
1180 ir_add_fixed_live_range(ctx, constraints.tmp_regs[n].reg,
1181 pos + constraints.tmp_regs[n].start,
1182 pos + constraints.tmp_regs[n].end);
1183 }
1184 }
1185 } else {
1187 constraints.hints_count = 0;
1188 }
1189
1190 insn = &ctx->ir_base[input];
1191 flags = ir_op_flags[insn->op];
1194 j = 1;
1195 p = insn->ops + j;
1196 if (flags & IR_OP_FLAG_CONTROL) {
1197 j++;
1198 p++;
1199 }
1200 for (; j <= n; j++, p++) {
1202 child = *p;
1203 if (child > 0) {
1204 uint32_t v = ctx->vregs[child];
1205
1206 if (v) {
1207 reg = (j < constraints.hints_count) ? constraints.hints[j] : IR_REG_NONE;
1208 use_pos = pos;
1209 if (EXPECTED(reg == IR_REG_NONE)) {
1210 use_pos += IR_USE_SUB_REF;
1211 }
1212
1213 if (!IS_LIVE_IN_BLOCK(v, b)) {
1214 /* live.add(opd) */
1215 SET_LIVE_IN_BLOCK(v, b);
1216 /* intervals[opd].addRange(b.from, op.id) */
1217 ival = ir_add_live_range(ctx, v,
1218 IR_START_LIVE_POS_FROM_REF(bb->start), use_pos);
1219 } else {
1220 ival = ctx->live_intervals[v];
1221 }
1222 use_flags = IR_FUSED_USE | IR_USE_FLAGS(def_flags, j);
1223 ir_add_use(ctx, ival, j, use_pos, reg, use_flags, -input);
1224 } else if (ctx->rules[child] & IR_FUSED) {
1225 IR_ASSERT(stack_pos < (int)(sizeof(stack)/sizeof(stack_pos)));
1226 stack[stack_pos++] = child;
1227 } else if (ctx->rules[child] == (IR_SKIPPED|IR_RLOAD)) {
1228 ir_set_alocated_reg(ctx, input, j, ctx->ir_base[child].op2);
1229 }
1230 }
1231 }
1232 if (!stack_pos) {
1233 break;
1234 }
1235 input = stack[--stack_pos];
1236 }
1237}
1238
1240{
1241 uint32_t b, i, j, k, n, succ;
1242 ir_ref ref;
1243 ir_insn *insn;
1244 ir_block *bb, *succ_bb;
1245 uint32_t *live_outs;
1246 uint32_t *live_in_block;
1247 ir_list live_lists;
1248 ir_live_interval *ival;
1249
1250 if (!(ctx->flags2 & IR_LINEAR) || !ctx->vregs) {
1251 return 0;
1252 }
1253
1254 if (ctx->rules) {
1255 ctx->regs = ir_mem_malloc(sizeof(ir_regs) * ctx->insns_count);
1256 memset(ctx->regs, IR_REG_NONE, sizeof(ir_regs) * ctx->insns_count);
1257 }
1258
1259 /* Root of the list of IR_VARs */
1260 ctx->vars = IR_UNUSED;
1261
1262 /* Compute Live Ranges */
1264
1265 /* vregs + tmp + fixed + SRATCH + ALL */
1266 ctx->live_intervals = ir_mem_calloc(ctx->vregs_count + 1 + IR_REG_NUM + 2, sizeof(ir_live_interval*));
1267
1268 if (!ctx->arena) {
1269 ctx->arena = ir_arena_create(16 * 1024);
1270 }
1271
1272 live_outs = ir_mem_calloc(ctx->cfg_blocks_count + 1 + ctx->entries_count, sizeof(uint32_t));
1273 ir_list_init(&live_lists, 1024);
1274 ir_compute_live_sets(ctx, live_outs, &live_lists);
1275 live_in_block = ir_mem_calloc(ctx->vregs_count + 1, sizeof(uint32_t));
1276
1277 /* for each basic block in reverse order */
1278 for (b = ctx->cfg_blocks_count; b > 0; b--) {
1279 bb = &ctx->cfg_blocks[b];
1281
1282 /* For all virtual register alive at the end of the block */
1283 n = live_outs[b];
1284 while (n != 0) {
1285 i = ir_list_at(&live_lists, n);
1286 SET_LIVE_IN_BLOCK(i, b);
1290 n = ir_list_at(&live_lists, n - 1);
1291 }
1292
1293 if (bb->successors_count == 1) {
1294 /* for each phi function of the successor */
1295 succ = ctx->cfg_edges[bb->successors];
1296 succ_bb = &ctx->cfg_blocks[succ];
1297 if (succ_bb->flags & IR_BB_HAS_PHI) {
1298 ir_use_list *use_list = &ctx->use_lists[succ_bb->start];
1299 ir_ref n, *p;
1300
1301 k = ir_phi_input_number(ctx, succ_bb, b);
1302 IR_ASSERT(k != 0);
1303 n = use_list->count;
1304 for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
1305 ir_ref use = *p;
1306 insn = &ctx->ir_base[use];
1307 if (insn->op == IR_PHI) {
1308 ir_ref input = ir_insn_op(insn, k);
1309 if (input > 0) {
1310 uint32_t v = ctx->vregs[input];
1311
1312 if (v) {
1313 ival = ctx->live_intervals[v];
1314 ir_add_phi_use(ctx, ival, k, IR_DEF_LIVE_POS_FROM_REF(bb->end), use);
1315 }
1316 }
1317 }
1318 }
1319 }
1320 }
1321
1322 /* for each operation of the block in reverse order */
1323 ref = bb->end;
1324 insn = &ctx->ir_base[ref];
1325 if (insn->op == IR_END || insn->op == IR_LOOP_END) {
1326 ref = ctx->prev_ref[ref];
1327 }
1328 for (; ref > bb->start; ref = ctx->prev_ref[ref]) {
1329 uint32_t def_flags;
1330 uint32_t flags;
1331 ir_ref *p;
1332 ir_target_constraints constraints;
1333 uint32_t v;
1334
1335 if (ctx->rules) {
1336 int n;
1337
1338 if (ctx->rules[ref] & (IR_FUSED|IR_SKIPPED)) {
1339 if (((ctx->rules[ref] & IR_RULE_MASK) == IR_VAR
1340 || (ctx->rules[ref] & IR_RULE_MASK) == IR_ALLOCA)
1341 && ctx->use_lists[ref].count > 0) {
1342 insn = &ctx->ir_base[ref];
1343 if (insn->op != IR_VADDR) {
1344 insn->op3 = ctx->vars;
1345 ctx->vars = ref;
1346 }
1347 }
1348 continue;
1349 }
1350
1351 def_flags = ir_get_target_constraints(ctx, ref, &constraints);
1352 n = constraints.tmps_count;
1353 while (n > 0) {
1354 n--;
1355 if (constraints.tmp_regs[n].type) {
1356 ir_add_tmp(ctx, ref, ref, constraints.tmp_regs[n].num, constraints.tmp_regs[n]);
1357 } else {
1358 /* CPU specific constraints */
1359 ir_add_fixed_live_range(ctx, constraints.tmp_regs[n].reg,
1360 IR_START_LIVE_POS_FROM_REF(ref) + constraints.tmp_regs[n].start,
1361 IR_START_LIVE_POS_FROM_REF(ref) + constraints.tmp_regs[n].end);
1362 }
1363 }
1364 } else {
1365 def_flags = 0;
1366 constraints.def_reg = IR_REG_NONE;
1367 constraints.hints_count = 0;
1368 }
1369
1370 insn = &ctx->ir_base[ref];
1371 v = ctx->vregs[ref];
1372 if (v) {
1373 if (insn->op != IR_PHI) {
1374 ir_live_pos def_pos;
1375 ir_ref hint_ref = 0;
1376 ir_reg reg = constraints.def_reg;
1377
1378 if (reg != IR_REG_NONE) {
1379 def_pos = IR_SAVE_LIVE_POS_FROM_REF(ref);
1380 if (insn->op == IR_PARAM || insn->op == IR_RLOAD) {
1381 /* parameter register must be kept before it's copied */
1382 ir_add_fixed_live_range(ctx, reg, IR_START_LIVE_POS_FROM_REF(bb->start), def_pos);
1383 }
1384 } else if (def_flags & IR_DEF_REUSES_OP1_REG) {
1385 if (!IR_IS_CONST_REF(insn->op1) && ctx->vregs[insn->op1]) {
1386 hint_ref = insn->op1;
1387 }
1388 if (def_flags & IR_DEF_CONFLICTS_WITH_INPUT_REGS) {
1389 def_pos = IR_USE_LIVE_POS_FROM_REF(ref);
1390 } else {
1391 def_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
1392 }
1393 } else if (def_flags & IR_DEF_CONFLICTS_WITH_INPUT_REGS) {
1394 def_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
1395 } else {
1396 if (insn->op == IR_PARAM) {
1397 /* We may reuse parameter stack slot for spilling */
1399 } else if (insn->op == IR_VLOAD) {
1400 /* Load may be fused into the usage instruction */
1402 }
1403 def_pos = IR_DEF_LIVE_POS_FROM_REF(ref);
1404 }
1405 /* intervals[opd].setFrom(op.id) */
1406 ival = ir_fix_live_range(ctx, v,
1407 IR_START_LIVE_POS_FROM_REF(bb->start), def_pos);
1408 ival->type = insn->type;
1409 ir_add_use(ctx, ival, 0, def_pos, reg, def_flags, hint_ref);
1410 } else {
1411 /* PHIs inputs must not be processed */
1412 ival = ctx->live_intervals[v];
1413 if (UNEXPECTED(!ival)) {
1414 /* Dead PHI */
1415 ival = ir_add_live_range(ctx, v, IR_DEF_LIVE_POS_FROM_REF(ref), IR_USE_LIVE_POS_FROM_REF(ref));
1416 }
1417 ival->type = insn->type;
1419 continue;
1420 }
1421 }
1422
1423 IR_ASSERT(insn->op != IR_PHI && (!ctx->rules || !(ctx->rules[ref] & (IR_FUSED|IR_SKIPPED))));
1424 flags = ir_op_flags[insn->op];
1425 j = 1;
1426 p = insn->ops + 1;
1428 j++;
1429 p++;
1430 }
1431 for (; j <= insn->inputs_count; j++, p++) {
1432 ir_ref input = *p;
1433 ir_reg reg = (j < constraints.hints_count) ? constraints.hints[j] : IR_REG_NONE;
1434 ir_live_pos use_pos;
1435 ir_ref hint_ref = 0;
1436 uint32_t v;
1437
1438 if (input > 0) {
1439 v = ctx->vregs[input];
1440 if (v) {
1441 use_pos = IR_USE_LIVE_POS_FROM_REF(ref);
1442 if (reg != IR_REG_NONE) {
1443 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
1444 ir_add_fixed_live_range(ctx, reg, use_pos, use_pos + IR_USE_SUB_REF);
1445 } else if (def_flags & IR_DEF_REUSES_OP1_REG) {
1446 if (j == 1) {
1447 if (def_flags & IR_DEF_CONFLICTS_WITH_INPUT_REGS) {
1448 use_pos = IR_USE_LIVE_POS_FROM_REF(ref);
1449 } else {
1450 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
1451 }
1452 IR_ASSERT(ctx->vregs[ref]);
1453 hint_ref = ref;
1454 } else if (input == insn->op1) {
1455 /* Input is the same as "op1" */
1456 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
1457 }
1458 }
1459 if (!IS_LIVE_IN_BLOCK(v, b)) {
1460 /* live.add(opd) */
1461 SET_LIVE_IN_BLOCK(v, b);
1462 /* intervals[opd].addRange(b.from, op.id) */
1463 ival = ir_add_live_range(ctx, v, IR_START_LIVE_POS_FROM_REF(bb->start), use_pos);
1464 } else {
1465 ival = ctx->live_intervals[v];
1466 }
1467 ir_add_use(ctx, ival, j, use_pos, reg, IR_USE_FLAGS(def_flags, j), hint_ref);
1468 } else if (ctx->rules) {
1469 if (ctx->rules[input] & IR_FUSED) {
1470 ir_add_fusion_ranges(ctx, ref, input, bb, live_in_block, b);
1471 } else {
1472 if (ctx->rules[input] == (IR_SKIPPED|IR_RLOAD)) {
1473 ir_set_alocated_reg(ctx, ref, j, ctx->ir_base[input].op2);
1474 }
1475 if (reg != IR_REG_NONE) {
1476 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
1477 ir_add_fixed_live_range(ctx, reg, use_pos, use_pos + IR_USE_SUB_REF);
1478 }
1479 }
1480 }
1481 } else if (reg != IR_REG_NONE) {
1482 use_pos = IR_LOAD_LIVE_POS_FROM_REF(ref);
1483 ir_add_fixed_live_range(ctx, reg, use_pos, use_pos + IR_USE_SUB_REF);
1484 }
1485 }
1486 }
1487 }
1488
1489 if (ctx->entries) {
1490 for (i = 0; i < ctx->entries_count; i++) {
1491 b = ctx->entries[i];
1492 bb = &ctx->cfg_blocks[b];
1493 IR_ASSERT(bb->predecessors_count == 1);
1494 ir_add_osr_entry_loads(ctx, bb, live_outs[ctx->cfg_blocks_count + 1 + i], &live_lists, b);
1495 }
1496 if (ctx->osr_entry_loads) {
1498 }
1499 }
1500
1501 ir_list_free(&live_lists);
1502 ir_mem_free(live_outs);
1503 ir_mem_free(live_in_block);
1504
1505 return 1;
1506}
1507
1508#endif
1509
1510/* Live Ranges coalescing */
1511
1512static ir_live_pos ir_ivals_overlap(ir_live_range *lrg1, ir_live_range *lrg2)
1513{
1514 while (1) {
1515 if (lrg2->start < lrg1->end) {
1516 if (lrg1->start < lrg2->end) {
1517 return IR_MAX(lrg1->start, lrg2->start);
1518 } else {
1519 lrg2 = lrg2->next;
1520 if (!lrg2) {
1521 return 0;
1522 }
1523 }
1524 } else {
1525 lrg1 = lrg1->next;
1526 if (!lrg1) {
1527 return 0;
1528 }
1529 }
1530 }
1531}
1532
1533static ir_live_pos ir_vregs_overlap(ir_ctx *ctx, uint32_t r1, uint32_t r2)
1534{
1535 ir_live_interval *ival1 = ctx->live_intervals[r1];
1536 ir_live_interval *ival2 = ctx->live_intervals[r2];
1537
1538#if 0
1539 if (ival2->range.start >= ival1->end
1540 || ival1->range.start >= ival2->end) {
1541 return 0;
1542 }
1543#endif
1544 return ir_ivals_overlap(&ival1->range, &ival2->range);
1545}
1546
1547static bool ir_ivals_inside(ir_live_range *parent, ir_live_range *child)
1548{
1549 do {
1550 while (parent && parent->end < child->start) {
1551 parent = parent->next;
1552 }
1553 if (!parent || parent->start > child->start || parent->end < child->end) {
1554 return 0;
1555 }
1556 child = child->next;
1557 } while (child);
1558 return 1;
1559}
1560
1561static bool ir_vregs_inside(ir_ctx *ctx, uint32_t parent, uint32_t child)
1562{
1563 ir_live_interval *child_ival = ctx->live_intervals[child];
1564 ir_live_interval *parent_ival = ctx->live_intervals[parent];
1565
1566 if ((child_ival->flags | parent_ival->flags) & IR_LIVE_INTERVAL_COALESCED) {
1567 // TODO: Support valid cases with already coalesced "parent_ival
1568 return 0;
1569 }
1570#if 0
1571 if (child_ival->end >= parent_ival->end) {
1572 return 0;
1573 }
1574#endif
1575 return ir_ivals_inside(&parent_ival->range, &child_ival->range);
1576}
1577
1578static void ir_vregs_join(ir_ctx *ctx, uint32_t r1, uint32_t r2)
1579{
1580 ir_live_interval *ival = ctx->live_intervals[r2];
1581 ir_live_range *live_range = &ival->range;
1583 ir_use_pos *use_pos, *next_pos, **prev;
1584
1585#if 0
1586 fprintf(stderr, "COALESCE %d -> %d\n", r2, r1);
1587#endif
1588
1589 ir_add_live_range(ctx, r1, live_range->start, live_range->end);
1590 live_range = live_range->next;
1591 while (live_range) {
1592 next = live_range->next;
1593 live_range->next = ctx->unused_ranges;
1594 ctx->unused_ranges = live_range;
1595 ir_add_live_range(ctx, r1, live_range->start, live_range->end);
1596 live_range = next;
1597 }
1598
1599 /* merge sorted use_pos lists */
1600 prev = &ctx->live_intervals[r1]->use_pos;
1601 use_pos = ival->use_pos;
1602 while (use_pos) {
1603 if (use_pos->hint_ref > 0 && ctx->vregs[use_pos->hint_ref] == r1) {
1604 use_pos->hint_ref = 0;
1605 }
1606 while (*prev && ((*prev)->pos < use_pos->pos ||
1607 ((*prev)->pos == use_pos->pos &&
1608 (use_pos->op_num == 0 || (*prev)->op_num < use_pos->op_num)))) {
1609 if ((*prev)->hint_ref > 0 && ctx->vregs[(*prev)->hint_ref] == r2) {
1610 (*prev)->hint_ref = 0;
1611 }
1612 prev = &(*prev)->next;
1613 }
1614 next_pos = use_pos->next;
1615 use_pos->next = *prev;
1616 *prev = use_pos;
1617 prev = &use_pos->next;
1618 use_pos = next_pos;
1619 }
1620 use_pos = *prev;
1621 while (use_pos) {
1622 if (use_pos->hint_ref > 0 && ctx->vregs[use_pos->hint_ref] == r2) {
1623 use_pos->hint_ref = 0;
1624 }
1625 use_pos = use_pos->next;
1626 }
1627
1628 ctx->live_intervals[r1]->flags |=
1630 if (ctx->ir_base[IR_LIVE_POS_TO_REF(ctx->live_intervals[r1]->use_pos->pos)].op != IR_VLOAD) {
1632 }
1633 ctx->live_intervals[r2] = NULL;
1634
1635 // TODO: remember to reuse ???
1636 //ir_mem_free(ival);
1637}
1638
1639static void ir_vregs_coalesce(ir_ctx *ctx, uint32_t v1, uint32_t v2, ir_ref from, ir_ref to)
1640{
1641 ir_ref i;
1642 uint16_t f1 = ctx->live_intervals[v1]->flags;
1643 uint16_t f2 = ctx->live_intervals[v2]->flags;
1644
1645#if 0
1646 if (ctx->binding) {
1647 ir_ref b1 = ir_binding_find(ctx, from);
1648 ir_ref b2 = ir_binding_find(ctx, to);
1649 IR_ASSERT(b1 == b2);
1650 }
1651#endif
1653 ir_vregs_join(ctx, v1, v2);
1654 ctx->vregs[to] = v1;
1655 } else if ((f2 & IR_LIVE_INTERVAL_COALESCED) && !(f1 & IR_LIVE_INTERVAL_COALESCED)) {
1656 ir_vregs_join(ctx, v2, v1);
1657 ctx->vregs[from] = v2;
1658 } else if (from < to) {
1659 ir_vregs_join(ctx, v1, v2);
1660 if (f2 & IR_LIVE_INTERVAL_COALESCED) {
1661 for (i = 1; i < ctx->insns_count; i++) {
1662 if (ctx->vregs[i] == v2) {
1663 ctx->vregs[i] = v1;
1664 }
1665 }
1666 } else {
1667 ctx->vregs[to] = v1;
1668 }
1669 } else {
1670 ir_vregs_join(ctx, v2, v1);
1671 if (f1 & IR_LIVE_INTERVAL_COALESCED) {
1672 for (i = 1; i < ctx->insns_count; i++) {
1673 if (ctx->vregs[i] == v1) {
1674 ctx->vregs[i] = v2;
1675 }
1676 }
1677 } else {
1678 ctx->vregs[from] = v2;
1679 }
1680 }
1681}
1682
1683static void ir_add_phi_move(ir_ctx *ctx, uint32_t b, ir_ref from, ir_ref to)
1684{
1685 if (IR_IS_CONST_REF(from) || ctx->vregs[from] != ctx->vregs[to]) {
1686 ctx->cfg_blocks[b].flags &= ~IR_BB_EMPTY;
1689#if 0
1690 fprintf(stderr, "BB%d: MOV %d -> %d\n", b, from, to);
1691#endif
1692 }
1693}
1694
1699
1700static int ir_block_cmp(const void *b1, const void *b2)
1701{
1704
1705 if (d1->loop_depth > d2->loop_depth) {
1706 return -1;
1707 } else if (d1->loop_depth == d2->loop_depth) {
1708 if (d1->b < d2->b) {
1709 return -1;
1710 } else {
1711 return 1;
1712 }
1713 } else {
1714 return 1;
1715 }
1716}
1717
1718static void ir_swap_operands(ir_ctx *ctx, ir_ref i, ir_insn *insn)
1719{
1722 ir_live_interval *ival;
1723 ir_live_range *r;
1724 ir_use_pos *p, *p1 = NULL, *p2 = NULL;
1725 ir_ref tmp;
1726
1727 tmp = insn->op1;
1728 insn->op1 = insn->op2;
1729 insn->op2 = tmp;
1730
1731 ival = ctx->live_intervals[ctx->vregs[insn->op1]];
1732 p = ival->use_pos;
1733 while (p) {
1734 if (p->pos == pos) {
1735 p->pos = load_pos;
1736 p->op_num = 1;
1737 p1 = p;
1738 break;
1739 }
1740 p = p->next;
1741 }
1742
1743 ival = ctx->live_intervals[ctx->vregs[i]];
1744 p = ival->use_pos;
1745 while (p) {
1746 if (p->pos == load_pos) {
1747 p->hint_ref = insn->op1;
1748 break;
1749 }
1750 p = p->next;
1751 }
1752
1753 if (insn->op2 > 0 && ctx->vregs[insn->op2]) {
1754 ival = ctx->live_intervals[ctx->vregs[insn->op2]];
1755 r = &ival->range;
1756 while (r) {
1757 if (r->end == load_pos) {
1758 r->end = pos;
1759 if (!r->next) {
1760 ival->end = pos;
1761 }
1762 break;
1763 }
1764 r = r->next;
1765 }
1766 p = ival->use_pos;
1767 while (p) {
1768 if (p->pos == load_pos) {
1769 p->pos = pos;
1770 p->op_num = 2;
1771 p2 = p;
1772 break;
1773 }
1774 p = p->next;
1775 }
1776 }
1777 if (p1 && p2) {
1778 uint8_t tmp = p1->flags;
1779 p1->flags = p2->flags;
1780 p2->flags = tmp;
1781 }
1782}
1783
1784static int ir_hint_conflict(ir_ctx *ctx, ir_ref ref, int use, int def)
1785{
1786 ir_use_pos *p;
1787 ir_reg r1 = IR_REG_NONE;
1788 ir_reg r2 = IR_REG_NONE;
1789
1790 p = ctx->live_intervals[use]->use_pos;
1791 while (p) {
1792 if (IR_LIVE_POS_TO_REF(p->pos) == ref) {
1793 break;
1794 }
1795 if (p->hint != IR_REG_NONE) {
1796 r1 = p->hint;
1797 }
1798 p = p->next;
1799 }
1800
1801 p = ctx->live_intervals[def]->use_pos;
1802 while (p) {
1803 if (IR_LIVE_POS_TO_REF(p->pos) > ref) {
1804 if (p->hint != IR_REG_NONE) {
1805 r2 = p->hint;
1806 break;
1807 }
1808 }
1809 p = p->next;
1810 }
1811 return r1 != r2 && r1 != IR_REG_NONE && r2 != IR_REG_NONE;
1812}
1813
1814static int ir_try_swap_operands(ir_ctx *ctx, ir_ref i, ir_insn *insn)
1815{
1816 if (ctx->vregs[insn->op1]
1817 && ctx->vregs[insn->op1] != ctx->vregs[i]
1818 && !ir_vregs_overlap(ctx, ctx->vregs[insn->op1], ctx->vregs[i])
1819 && !ir_hint_conflict(ctx, i, ctx->vregs[insn->op1], ctx->vregs[i])) {
1820 /* pass */
1821 } else {
1822 if (ctx->vregs[insn->op2] && ctx->vregs[insn->op2] != ctx->vregs[i]) {
1825 ir_live_interval *ival = ctx->live_intervals[ctx->vregs[insn->op2]];
1826 ir_live_range *r = &ival->range;
1827
1828 if ((ival->flags & IR_LIVE_INTERVAL_MEM_PARAM) && ctx->use_lists[insn->op2].count == 1) {
1829 return 0;
1830 }
1831 while (r) {
1832 if (r->end == pos) {
1833 r->end = load_pos;
1834 if (!r->next) {
1835 ival->end = load_pos;
1836 }
1837 if (!ir_vregs_overlap(ctx, ctx->vregs[insn->op2], ctx->vregs[i])
1838 && !ir_hint_conflict(ctx, i, ctx->vregs[insn->op2], ctx->vregs[i])) {
1839 ir_swap_operands(ctx, i, insn);
1840 return 1;
1841 } else {
1842 r->end = pos;
1843 if (!r->next) {
1844 ival->end = pos;
1845 }
1846 }
1847 break;
1848 }
1849 r = r->next;
1850 }
1851 }
1852 }
1853 return 0;
1854}
1855
1857{
1858 uint32_t b, n, succ, pred_b, count = 0;
1859 ir_ref *p, use, input, k, j;
1860 ir_block *bb, *succ_bb;
1861 ir_use_list *use_list;
1862 ir_insn *insn;
1863 ir_bitset visited;
1864 ir_coalesce_block *list;
1865 bool compact = 0;
1866
1867 /* Collect a list of blocks which are predecossors to block with phi functions */
1868 list = ir_mem_malloc(sizeof(ir_coalesce_block) * ctx->cfg_blocks_count);
1869 visited = ir_bitset_malloc(ctx->cfg_blocks_count + 1);
1870 for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) {
1872 if (bb->flags & IR_BB_HAS_PHI) {
1873 k = bb->predecessors_count;
1874 if (k > 1) {
1875 use_list = &ctx->use_lists[bb->start];
1876 n = use_list->count;
1877 IR_ASSERT(k == ctx->ir_base[bb->start].inputs_count);
1878 for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
1879 use = *p;
1880 insn = &ctx->ir_base[use];
1881 if (insn->op == IR_PHI) {
1882 do {
1883 k--;
1884 pred_b = ctx->cfg_edges[bb->predecessors + k];
1885 if (!ir_bitset_in(visited, pred_b)) {
1886 ir_bitset_incl(visited, pred_b);
1887 list[count].b = pred_b;
1888 list[count].loop_depth = ctx->cfg_blocks[pred_b].loop_depth;
1889 count++;
1890 }
1891 } while (k > 0);
1892 break;
1893 }
1894 }
1895 }
1896 }
1897 }
1898 ir_mem_free(visited);
1899
1900 /* Sort blocks according to their loop depth */
1901 qsort(list, count, sizeof(ir_coalesce_block), ir_block_cmp);
1902
1903 while (count > 0) {
1904
1905 count--;
1906 b = list[count].b;
1907 bb = &ctx->cfg_blocks[b];
1908 IR_ASSERT(bb->successors_count == 1);
1909 succ = ctx->cfg_edges[bb->successors];
1910 succ_bb = &ctx->cfg_blocks[succ];
1911 IR_ASSERT(succ_bb->predecessors_count > 1);
1912 k = ir_phi_input_number(ctx, succ_bb, b);
1913 use_list = &ctx->use_lists[succ_bb->start];
1914 n = use_list->count;
1915 for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
1916 use = *p;
1917 insn = &ctx->ir_base[use];
1918 if (insn->op == IR_PHI) {
1919 input = ir_insn_op(insn, k);
1920 if (input > 0 && ctx->vregs[input]) {
1921 uint32_t v1 = ctx->vregs[input];
1922 uint32_t v2 = ctx->vregs[use];
1923
1924 if (v1 == v2) {
1925 /* already coalesced */
1926 } else {
1927 if (!ir_vregs_overlap(ctx, v1, v2)) {
1928 ir_vregs_coalesce(ctx, v1, v2, input, use);
1929 compact = 1;
1930 } else {
1931#if 1
1932 if (ctx->rules && (ctx->rules[input] & IR_MAY_SWAP)) {
1933 ir_insn *input_insn = &ctx->ir_base[input];
1934
1935 IR_ASSERT(ir_op_flags[input_insn->op] & IR_OP_FLAG_COMMUTATIVE);
1936 if (input_insn->op2 == use
1937 && input_insn->op1 != use
1939 ir_live_range *r = &ctx->live_intervals[v2]->range;
1940
1941 do {
1942 if (r->end == IR_USE_LIVE_POS_FROM_REF(input)) {
1943 break;
1944 }
1945 r = r->next;
1946 } while (r);
1947 if (r) {
1948 r->end = IR_LOAD_LIVE_POS_FROM_REF(input);
1949 if (!r->next) {
1950 ctx->live_intervals[v2]->end = IR_LOAD_LIVE_POS_FROM_REF(input);
1951 }
1952 if (ir_vregs_overlap(ctx, v1, v2)) {
1953 r->end = IR_USE_LIVE_POS_FROM_REF(input);
1954 if (!r->next) {
1955 ctx->live_intervals[v2]->end = IR_USE_LIVE_POS_FROM_REF(input);
1956 }
1957 } else {
1958 ir_swap_operands(ctx, input, input_insn);
1959 IR_ASSERT(!ir_vregs_overlap(ctx, v1, v2));
1960 ir_vregs_coalesce(ctx, v1, v2, input, use);
1961 compact = 1;
1962 continue;
1963 }
1964 }
1965 }
1966 }
1967#endif
1968 ir_add_phi_move(ctx, b, input, use);
1969 }
1970 }
1971 } else {
1972 /* Move for constant input */
1973 ir_add_phi_move(ctx, b, input, use);
1974 }
1975 }
1976 }
1977 }
1978 ir_mem_free(list);
1979
1980 ir_hint_propagation(ctx);
1981
1982 if (ctx->rules) {
1983 /* try to swap operands of commutative instructions for better register allocation */
1984 uint32_t *rule = ctx->rules + 1;
1985 ir_ref i;
1986
1987 for (i = 1; i < ctx->insns_count; rule++, i++) {
1988 if ((*rule) & (IR_MAY_SWAP|IR_MAY_REUSE)) {
1989 insn = &ctx->ir_base[i];
1990 IR_ASSERT(ctx->vregs[i]);
1991 if ((*rule) & IR_MAY_SWAP) {
1993 if (ctx->live_intervals[ctx->vregs[i]]->use_pos
1995 && insn->op2 > 0
1996 && insn->op1 > 0
1997 && insn->op1 != insn->op2) {
1998 ir_try_swap_operands(ctx, i, insn);
1999 }
2000 } else {
2001 IR_ASSERT((*rule) & IR_MAY_REUSE);
2002 if (insn->op1 > 0
2003 && ctx->vregs[insn->op1]
2004 && ctx->vregs[i] != ctx->vregs[insn->op1]) {
2005 if (ir_vregs_inside(ctx, ctx->vregs[insn->op1], ctx->vregs[i])) {
2006 if (ctx->binding) {
2007 ir_ref b1 = ir_binding_find(ctx, i);
2008 ir_ref b2 = ir_binding_find(ctx, insn->op1);
2009 if (b1 && b1 != b2) {
2010 continue;
2011 }
2012 }
2013 ir_vregs_coalesce(ctx, ctx->vregs[i], ctx->vregs[insn->op1], i, insn->op1);
2014 compact = 1;
2015 }
2016 }
2017 }
2018 }
2019 }
2020 }
2021
2022 if (compact) {
2023 ir_ref i, n;
2024 uint32_t *xlat = ir_mem_malloc((ctx->vregs_count + 1) * sizeof(uint32_t));
2025
2026 for (i = 1, n = 1; i <= ctx->vregs_count; i++) {
2027 if (ctx->live_intervals[i]) {
2028 xlat[i] = n;
2029 if (i != n) {
2030 ctx->live_intervals[n] = ctx->live_intervals[i];
2031 ctx->live_intervals[n]->vreg = n;
2032 }
2033 n++;
2034 }
2035 }
2036 n--;
2037 if (n != ctx->vregs_count) {
2038 j = ctx->vregs_count - n;
2039 /* vregs + tmp + fixed + SRATCH + ALL */
2040 for (i = n + 1; i <= n + IR_REG_NUM + 2; i++) {
2041 ctx->live_intervals[i] = ctx->live_intervals[i + j];
2042 if (ctx->live_intervals[i]) {
2043 ctx->live_intervals[i]->vreg = i;
2044 }
2045 }
2046 for (j = 1; j < ctx->insns_count; j++) {
2047 if (ctx->vregs[j]) {
2048 ctx->vregs[j] = xlat[ctx->vregs[j]];
2049 }
2050 }
2051 ctx->vregs_count = n;
2052 }
2053 ir_mem_free(xlat);
2054 }
2055
2056 return 1;
2057}
2058
2059/* SSA Deconstruction */
2060
2062{
2063 uint32_t b, n;
2064 ir_ref j, k, *p, use;
2065 ir_block *bb;
2066 ir_use_list *use_list;
2067 ir_insn *insn;
2068
2069 for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) {
2071 k = bb->predecessors_count;
2072 if (k > 1) {
2073 use_list = &ctx->use_lists[bb->start];
2074 n = use_list->count;
2075 if (n > 1) {
2076 IR_ASSERT(k == ctx->ir_base[bb->start].inputs_count);
2077 k++;
2078 for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
2079 use = *p;
2080 insn = &ctx->ir_base[use];
2081 if (insn->op == IR_PHI) {
2082 for (j = 2; j <= k; j++) {
2083 if (IR_IS_CONST_REF(ir_insn_op(insn, j)) || ctx->vregs[ir_insn_op(insn, j)] != ctx->vregs[use]) {
2084 int pred = ctx->cfg_edges[bb->predecessors + (j-2)];
2085 ctx->cfg_blocks[pred].flags &= ~IR_BB_EMPTY;
2086 ctx->cfg_blocks[pred].flags |= IR_BB_DESSA_MOVES;
2088 }
2089 }
2090 }
2091 }
2092 }
2093 }
2094 }
2095 return 1;
2096}
2097
2098/*
2099 * Parallel copy sequentialization algorithm
2100 *
2101 * The implementation is based on algorithm 1 desriebed in
2102 * "Revisiting Out-of-SSA Translation for Correctness, Code Quality and Efficiency",
2103 * Benoit Boissinot, Alain Darte, Fabrice Rastello, Benoit Dupont de Dinechin, Christophe Guillon.
2104 * 2009 International Symposium on Code Generation and Optimization, Seattle, WA, USA, 2009,
2105 * pp. 114-125, doi: 10.1109/CGO.2009.19.
2106 */
2107int ir_gen_dessa_moves(ir_ctx *ctx, uint32_t b, emit_copy_t emit_copy)
2108{
2109 uint32_t succ, k, n = 0;
2110 ir_block *bb, *succ_bb;
2111 ir_use_list *use_list;
2112 ir_ref *loc, *pred, *src, *dst, i, *p, ref, input;
2113 ir_ref s, d;
2114 ir_insn *insn;
2115 uint32_t len;
2116 ir_bitset todo, ready;
2117 bool have_constants_or_addresses = 0;
2118
2119 bb = &ctx->cfg_blocks[b];
2120 if (!(bb->flags & IR_BB_DESSA_MOVES)) {
2121 return 0;
2122 }
2123 IR_ASSERT(bb->successors_count == 1);
2124 succ = ctx->cfg_edges[bb->successors];
2125 succ_bb = &ctx->cfg_blocks[succ];
2126 IR_ASSERT(succ_bb->predecessors_count > 1);
2127 use_list = &ctx->use_lists[succ_bb->start];
2128
2129 k = ir_phi_input_number(ctx, succ_bb, b);
2130
2131 loc = ir_mem_malloc((ctx->vregs_count + 1) * 4 * sizeof(ir_ref));
2132 pred = loc + ctx->vregs_count + 1;
2133 src = pred + ctx->vregs_count + 1;
2134 dst = src + ctx->vregs_count + 1;
2135 len = ir_bitset_len(ctx->vregs_count + 1);
2136 todo = ir_bitset_malloc(ctx->vregs_count + 1);
2137
2138 for (i = use_list->count, p = &ctx->use_edges[use_list->refs]; i > 0; p++, i--) {
2139 ref = *p;
2140 insn = &ctx->ir_base[ref];
2141 if (insn->op == IR_PHI) {
2142 input = ir_insn_op(insn, k);
2143 if (IR_IS_CONST_REF(input) || !ctx->vregs[input]) {
2144 have_constants_or_addresses = 1;
2145 } else if (ctx->vregs[input] != ctx->vregs[ref]) {
2146 s = ctx->vregs[input];
2147 d = ctx->vregs[ref];
2148 src[s] = input;
2149 dst[d] = ref;
2150 loc[d] = pred[s] = 0;
2151 ir_bitset_incl(todo, d);
2152 n++;
2153 }
2154 }
2155 }
2156
2157 if (n > 0) {
2158 src[0] = dst[0] = 0;
2159 ready = ir_bitset_malloc(ctx->vregs_count + 1);
2160 IR_BITSET_FOREACH(todo, len, d) {
2161 ref = dst[d];
2162 insn = &ctx->ir_base[ref];
2163 IR_ASSERT(insn->op == IR_PHI);
2164 input = ir_insn_op(insn, k);
2165 s = ctx->vregs[input];
2166 loc[s] = s;
2167 pred[d] = s;
2169
2170 IR_BITSET_FOREACH(todo, len, i) {
2171 if (!loc[i]) {
2172 ir_bitset_incl(ready, i);
2173 }
2175
2176 while (1) {
2177 ir_ref a, b, c;
2178
2179 while ((b = ir_bitset_pop_first(ready, len)) >= 0) {
2180 a = pred[b];
2181 c = loc[a];
2182 emit_copy(ctx, ctx->ir_base[dst[b]].type, src[c], dst[b]);
2183 ir_bitset_excl(todo, b);
2184 loc[a] = b;
2185 src[b] = dst[b];
2186 if (a == c && pred[a]) {
2187 ir_bitset_incl(ready, a);
2188 }
2189 }
2190 b = ir_bitset_pop_first(todo, len);
2191 if (b < 0) {
2192 break;
2193 }
2194 IR_ASSERT(b != loc[pred[b]]);
2195 emit_copy(ctx, ctx->ir_base[src[b]].type, src[b], 0);
2196 loc[b] = 0;
2197 ir_bitset_incl(ready, b);
2198 }
2199
2200 ir_mem_free(ready);
2201 }
2202
2203 ir_mem_free(todo);
2204 ir_mem_free(loc);
2205
2206 if (have_constants_or_addresses) {
2207 for (i = use_list->count, p = &ctx->use_edges[use_list->refs]; i > 0; p++, i--) {
2208 ref = *p;
2209 insn = &ctx->ir_base[ref];
2210 if (insn->op == IR_PHI) {
2211 input = ir_insn_op(insn, k);
2212 if (IR_IS_CONST_REF(input) || !ctx->vregs[input]) {
2213 emit_copy(ctx, insn->type, input, ref);
2214 }
2215 }
2216 }
2217 }
2218
2219 return 1;
2220}
2221
2222/* Linear Scan Register Allocation */
2223
2224#ifdef IR_DEBUG
2225# define IR_LOG_LSRA(action, ival, comment) do { \
2226 if (ctx->flags & IR_DEBUG_RA) { \
2227 ir_live_interval *_ival = (ival); \
2228 ir_live_pos _start = _ival->range.start; \
2229 ir_live_pos _end = _ival->end; \
2230 fprintf(stderr, action " R%d [%d.%d...%d.%d)" comment "\n", \
2231 (_ival->flags & IR_LIVE_INTERVAL_TEMP) ? 0 : _ival->vreg, \
2232 IR_LIVE_POS_TO_REF(_start), IR_LIVE_POS_TO_SUB_REF(_start), \
2233 IR_LIVE_POS_TO_REF(_end), IR_LIVE_POS_TO_SUB_REF(_end)); \
2234 } \
2235 } while (0)
2236# define IR_LOG_LSRA_ASSIGN(action, ival, comment) do { \
2237 if (ctx->flags & IR_DEBUG_RA) { \
2238 ir_live_interval *_ival = (ival); \
2239 ir_live_pos _start = _ival->range.start; \
2240 ir_live_pos _end = _ival->end; \
2241 fprintf(stderr, action " R%d [%d.%d...%d.%d) to %s" comment "\n", \
2242 (_ival->flags & IR_LIVE_INTERVAL_TEMP) ? 0 : _ival->vreg, \
2243 IR_LIVE_POS_TO_REF(_start), IR_LIVE_POS_TO_SUB_REF(_start), \
2244 IR_LIVE_POS_TO_REF(_end), IR_LIVE_POS_TO_SUB_REF(_end), \
2245 ir_reg_name(_ival->reg, _ival->type)); \
2246 } \
2247 } while (0)
2248# define IR_LOG_LSRA_SPLIT(ival, pos) do { \
2249 if (ctx->flags & IR_DEBUG_RA) { \
2250 ir_live_interval *_ival = (ival); \
2251 ir_live_pos _start = _ival->range.start; \
2252 ir_live_pos _end = _ival->end; \
2253 ir_live_pos _pos = (pos); \
2254 fprintf(stderr, " ---- Split R%d [%d.%d...%d.%d) at %d.%d\n", \
2255 (_ival->flags & IR_LIVE_INTERVAL_TEMP) ? 0 : _ival->vreg, \
2256 IR_LIVE_POS_TO_REF(_start), IR_LIVE_POS_TO_SUB_REF(_start), \
2257 IR_LIVE_POS_TO_REF(_end), IR_LIVE_POS_TO_SUB_REF(_end), \
2258 IR_LIVE_POS_TO_REF(_pos), IR_LIVE_POS_TO_SUB_REF(_pos)); \
2259 } \
2260 } while (0)
2261# define IR_LOG_LSRA_CONFLICT(action, ival, pos) do { \
2262 if (ctx->flags & IR_DEBUG_RA) { \
2263 ir_live_interval *_ival = (ival); \
2264 ir_live_pos _start = _ival->range.start; \
2265 ir_live_pos _end = _ival->end; \
2266 ir_live_pos _pos = (pos); \
2267 fprintf(stderr, action " R%d [%d.%d...%d.%d) assigned to %s at %d.%d\n", \
2268 (_ival->flags & IR_LIVE_INTERVAL_TEMP) ? 0 : _ival->vreg, \
2269 IR_LIVE_POS_TO_REF(_start), IR_LIVE_POS_TO_SUB_REF(_start), \
2270 IR_LIVE_POS_TO_REF(_end), IR_LIVE_POS_TO_SUB_REF(_end), \
2271 ir_reg_name(_ival->reg, _ival->type), \
2272 IR_LIVE_POS_TO_REF(_pos), IR_LIVE_POS_TO_SUB_REF(_pos)); \
2273 } \
2274 } while (0)
2275#else
2276# define IR_LOG_LSRA(action, ival, comment)
2277# define IR_LOG_LSRA_ASSIGN(action, ival, comment)
2278# define IR_LOG_LSRA_SPLIT(ival, pos)
2279# define IR_LOG_LSRA_CONFLICT(action, ival, pos);
2280#endif
2281
2282static bool ir_ival_covers(ir_live_interval *ival, ir_live_pos position)
2283{
2284 ir_live_range *live_range = &ival->range;
2285
2286 do {
2287 if (position < live_range->end) {
2288 return position >= live_range->start;
2289 }
2290 live_range = live_range->next;
2291 } while (live_range);
2292
2293 return 0;
2294}
2295
2296static bool ir_ival_has_hole_between(ir_live_interval *ival, ir_live_pos from, ir_live_pos to)
2297{
2298 ir_live_range *r = &ival->range;
2299
2300 while (r) {
2301 if (from < r->start) {
2302 return 1;
2303 } else if (to <= r->end) {
2304 return 0;
2305 }
2306 r = r->next;
2307 }
2308 return 0;
2309}
2310
2311
2312static ir_live_pos ir_last_use_pos_before(ir_live_interval *ival, ir_live_pos pos, uint8_t flags)
2313{
2314 ir_live_pos ret = 0;
2315 ir_use_pos *p = ival->use_pos;
2316
2317 while (p && p->pos <= pos) {
2318 if (p->flags & flags) {
2319 ret = p->pos;
2320 }
2321 p = p->next;
2322 }
2323 return ret;
2324}
2325
2326static ir_live_pos ir_first_use_pos_after(ir_live_interval *ival, ir_live_pos pos, uint8_t flags)
2327{
2328 ir_use_pos *p = ival->use_pos;
2329
2330 while (p && p->pos < pos) {
2331 p = p->next;
2332 }
2333 if (p && p->pos == pos && p->op_num != 0) {
2334 p = p->next;
2335 }
2336 while (p && !(p->flags & flags)) {
2337 p = p->next;
2338 }
2339 return p ? p->pos : 0x7fffffff;
2340}
2341
2342static ir_live_pos ir_first_use_pos(ir_live_interval *ival, uint8_t flags)
2343{
2344 ir_use_pos *p = ival->use_pos;
2345
2346 while (p && !(p->flags & flags)) {
2347 p = p->next;
2348 }
2349 return p ? p->pos : 0x7fffffff;
2350}
2351
2352static ir_block *ir_block_from_live_pos(ir_ctx *ctx, ir_live_pos pos)
2353{
2355 uint32_t b = ctx->cfg_map[ref];
2356
2357 while (!b) {
2358 ref--;
2359 IR_ASSERT(ref > 0);
2360 b = ctx->cfg_map[ref];
2361 }
2362 IR_ASSERT(b <= ctx->cfg_blocks_count);
2363 return &ctx->cfg_blocks[b];
2364}
2365
2366static ir_live_pos ir_find_optimal_split_position(ir_ctx *ctx, ir_live_interval *ival, ir_live_pos min_pos, ir_live_pos max_pos, bool prefer_max)
2367{
2368 ir_block *min_bb, *max_bb;
2369
2370 if (min_pos == max_pos) {
2371 return max_pos;
2372 }
2373
2374 IR_ASSERT(min_pos < max_pos);
2375 IR_ASSERT(min_pos >= ival->range.start);
2376 IR_ASSERT(max_pos < ival->end);
2377
2378 min_bb = ir_block_from_live_pos(ctx, min_pos);
2379 max_bb = ir_block_from_live_pos(ctx, max_pos);
2380
2381 if (min_bb == max_bb
2382 || ir_ival_has_hole_between(ival, min_pos, max_pos)) { // TODO: ???
2383 return (prefer_max) ? max_pos : min_pos;
2384 }
2385
2386 if (max_bb->loop_depth > 0) {
2387 /* Split at the end of the loop entry */
2388 do {
2389 ir_block *bb;
2390
2391 if (max_bb->flags & IR_BB_LOOP_HEADER) {
2392 bb = max_bb;
2393 } else {
2394 IR_ASSERT(max_bb->loop_header);
2395 bb = &ctx->cfg_blocks[max_bb->loop_header];
2396 }
2397 bb = &ctx->cfg_blocks[bb->idom];
2398 if (IR_DEF_LIVE_POS_FROM_REF(bb->end) < min_pos) {
2399 break;
2400 }
2401 max_bb = bb;
2402 } while (max_bb->loop_depth > 0);
2403
2404 if (IR_DEF_LIVE_POS_FROM_REF(max_bb->end) < max_pos) {
2405 return IR_DEF_LIVE_POS_FROM_REF(max_bb->end);
2406 }
2407 }
2408
2409 if (IR_LOAD_LIVE_POS_FROM_REF(max_bb->start) > min_pos) {
2410 return IR_LOAD_LIVE_POS_FROM_REF(max_bb->start);
2411 } else {
2412 // TODO: "min_bb" is in a deeper loop than "max_bb" ???
2413 return max_pos;
2414 }
2415}
2416
2417static ir_live_interval *ir_split_interval_at(ir_ctx *ctx, ir_live_interval *ival, ir_live_pos pos)
2418{
2419 ir_live_interval *child;
2420 ir_live_range *p, *prev;
2421 ir_use_pos *use_pos, *prev_use_pos;
2422
2423 IR_LOG_LSRA_SPLIT(ival, pos);
2424 IR_ASSERT(pos > ival->range.start);
2425 ctx->flags2 |= IR_RA_HAVE_SPLITS;
2426
2427 p = &ival->range;
2428 prev = NULL;
2429 while (p && pos >= p->end) {
2430 prev = p;
2431 p = prev->next;
2432 }
2433 IR_ASSERT(p);
2434
2435 if (pos < p->start) {
2436 /* split between ranges */
2437 pos = p->start;
2438 }
2439
2440 use_pos = ival->use_pos;
2441 prev_use_pos = NULL;
2442
2444 if (p->start == pos) {
2445 while (use_pos && pos > use_pos->pos) {
2446 if (use_pos->hint != IR_REG_NONE) {
2448 }
2449 if (use_pos->hint_ref > 0) {
2451 }
2452 prev_use_pos = use_pos;
2453 use_pos = use_pos->next;
2454 }
2455 } else {
2456 while (use_pos && pos >= use_pos->pos) {
2457 if (use_pos->hint != IR_REG_NONE) {
2459 }
2460 if (use_pos->hint_ref > 0) {
2462 }
2463 prev_use_pos = use_pos;
2464 use_pos = use_pos->next;
2465 }
2466 }
2467
2468 child = ir_arena_alloc(&ctx->arena, sizeof(ir_live_interval));
2469 child->type = ival->type;
2470 child->reg = IR_REG_NONE;
2472 child->vreg = ival->vreg;
2473 child->stack_spill_pos = -1; // not allocated
2474 child->range.start = pos;
2475 child->range.end = p->end;
2476 child->range.next = p->next;
2477 child->end = ival->end;
2478 child->use_pos = prev_use_pos ? prev_use_pos->next : use_pos;
2479
2480 child->next = ival->next;
2481 ival->next = child;
2482
2483 if (pos == p->start) {
2484 prev->next = NULL;
2485 ival->end = prev->end;
2486 /* Cache to reuse */
2487 p->next = ctx->unused_ranges;
2488 ctx->unused_ranges = p;
2489 } else {
2490 p->end = ival->end = pos;
2491 p->next = NULL;
2492 }
2493 if (prev_use_pos) {
2494 prev_use_pos->next = NULL;
2495 } else {
2496 ival->use_pos = NULL;
2497 }
2498
2499 use_pos = child->use_pos;
2500 while (use_pos) {
2501 if (use_pos->hint != IR_REG_NONE) {
2503 }
2504 if (use_pos->hint_ref > 0) {
2506 }
2507 use_pos = use_pos->next;
2508 }
2509
2510 return child;
2511}
2512
2513static int32_t ir_allocate_small_spill_slot(ir_ctx *ctx, size_t size, ir_reg_alloc_data *data)
2514{
2515 int32_t ret;
2516
2517 IR_ASSERT(size == 0 || size == 1 || size == 2 || size == 4 || size == 8);
2518 if (data->handled && data->handled[size]) {
2519 ret = data->handled[size]->stack_spill_pos;
2520 data->handled[size] = data->handled[size]->list_next;
2521 } else if (size == 8) {
2522 ret = ctx->stack_frame_size;
2523 ctx->stack_frame_size += 8;
2524 } else if (size == 4) {
2525 if (data->unused_slot_4) {
2526 ret = data->unused_slot_4;
2527 data->unused_slot_4 = 0;
2528 } else if (data->handled && data->handled[8]) {
2529 ret = data->handled[8]->stack_spill_pos;
2530 data->handled[8] = data->handled[8]->list_next;
2531 data->unused_slot_4 = ret + 4;
2532 } else {
2533 ret = ctx->stack_frame_size;
2534 if (sizeof(void*) == 8) {
2535 data->unused_slot_4 = ctx->stack_frame_size + 4;
2536 ctx->stack_frame_size += 8;
2537 } else {
2538 ctx->stack_frame_size += 4;
2539 }
2540 }
2541 } else if (size == 2) {
2542 if (data->unused_slot_2) {
2543 ret = data->unused_slot_2;
2544 data->unused_slot_2 = 0;
2545 } else if (data->unused_slot_4) {
2546 ret = data->unused_slot_4;
2547 data->unused_slot_2 = data->unused_slot_4 + 2;
2548 data->unused_slot_4 = 0;
2549 } else if (data->handled && data->handled[4]) {
2550 ret = data->handled[4]->stack_spill_pos;
2551 data->handled[4] = data->handled[4]->list_next;
2552 data->unused_slot_2 = ret + 2;
2553 } else if (data->handled && data->handled[8]) {
2554 ret = data->handled[8]->stack_spill_pos;
2555 data->handled[8] = data->handled[8]->list_next;
2556 data->unused_slot_2 = ret + 2;
2557 data->unused_slot_4 = ret + 4;
2558 } else {
2559 ret = ctx->stack_frame_size;
2560 data->unused_slot_2 = ctx->stack_frame_size + 2;
2561 if (sizeof(void*) == 8) {
2562 data->unused_slot_4 = ctx->stack_frame_size + 4;
2563 ctx->stack_frame_size += 8;
2564 } else {
2565 ctx->stack_frame_size += 4;
2566 }
2567 }
2568 } else if (size == 1) {
2569 if (data->unused_slot_1) {
2570 ret = data->unused_slot_1;
2571 data->unused_slot_1 = 0;
2572 } else if (data->unused_slot_2) {
2573 ret = data->unused_slot_2;
2574 data->unused_slot_1 = data->unused_slot_2 + 1;
2575 data->unused_slot_2 = 0;
2576 } else if (data->unused_slot_4) {
2577 ret = data->unused_slot_4;
2578 data->unused_slot_1 = data->unused_slot_4 + 1;
2579 data->unused_slot_2 = data->unused_slot_4 + 2;
2580 data->unused_slot_4 = 0;
2581 } else if (data->handled && data->handled[2]) {
2582 ret = data->handled[2]->stack_spill_pos;
2583 data->handled[2] = data->handled[2]->list_next;
2584 data->unused_slot_1 = ret + 1;
2585 } else if (data->handled && data->handled[4]) {
2586 ret = data->handled[4]->stack_spill_pos;
2587 data->handled[4] = data->handled[4]->list_next;
2588 data->unused_slot_1 = ret + 1;
2589 data->unused_slot_2 = ret + 2;
2590 } else if (data->handled && data->handled[8]) {
2591 ret = data->handled[8]->stack_spill_pos;
2592 data->handled[8] = data->handled[8]->list_next;
2593 data->unused_slot_1 = ret + 1;
2594 data->unused_slot_2 = ret + 2;
2595 data->unused_slot_4 = ret + 4;
2596 } else {
2597 ret = ctx->stack_frame_size;
2598 data->unused_slot_1 = ctx->stack_frame_size + 1;
2599 data->unused_slot_2 = ctx->stack_frame_size + 2;
2600 if (sizeof(void*) == 8) {
2601 data->unused_slot_4 = ctx->stack_frame_size + 4;
2602 ctx->stack_frame_size += 8;
2603 } else {
2604 ctx->stack_frame_size += 4;
2605 }
2606 }
2607 } else {
2608 ret = IR_NULL;
2609 }
2610 return ret;
2611}
2612
2614{
2615 return ir_allocate_small_spill_slot(ctx, ir_type_size[type], data);
2616}
2617
2618static int32_t ir_allocate_big_spill_slot(ir_ctx *ctx, int32_t size, ir_reg_alloc_data *data)
2619{
2620 int32_t ret;
2621
2622 if (size <= 8) {
2623 if (size == 3) {
2624 size = 4;
2625 } else if (size > 4 && size < 8) {
2626 size = 8;
2627 }
2628 return ir_allocate_small_spill_slot(ctx, size, data);
2629 }
2630
2631 /* Align stack allocated data to 16 byte */
2635 ctx->stack_frame_size = ret + size;
2636
2637 return ret;
2638}
2639
2640static ir_reg ir_get_first_reg_hint(ir_ctx *ctx, ir_live_interval *ival, ir_regset available)
2641{
2642 ir_use_pos *use_pos;
2643 ir_reg reg;
2644
2645 use_pos = ival->use_pos;
2646 while (use_pos) {
2647 reg = use_pos->hint;
2648 if (reg >= 0 && IR_REGSET_IN(available, reg)) {
2649 return reg;
2650 }
2651 use_pos = use_pos->next;
2652 }
2653
2654 return IR_REG_NONE;
2655}
2656
2657static ir_reg ir_try_allocate_preferred_reg(ir_ctx *ctx, ir_live_interval *ival, ir_regset available, ir_live_pos *freeUntilPos)
2658{
2659 ir_use_pos *use_pos;
2660 ir_reg reg;
2661
2663 use_pos = ival->use_pos;
2664 while (use_pos) {
2665 reg = use_pos->hint;
2666 if (reg >= 0 && IR_REGSET_IN(available, reg)) {
2667 if (ival->end <= freeUntilPos[reg]) {
2668 /* register available for the whole interval */
2669 return reg;
2670 }
2671 }
2672 use_pos = use_pos->next;
2673 }
2674 }
2675
2677 use_pos = ival->use_pos;
2678 while (use_pos) {
2679 if (use_pos->hint_ref > 0) {
2680 reg = ctx->live_intervals[ctx->vregs[use_pos->hint_ref]]->reg;
2681 if (reg >= 0 && IR_REGSET_IN(available, reg)) {
2682 if (ival->end <= freeUntilPos[reg]) {
2683 /* register available for the whole interval */
2684 return reg;
2685 }
2686 }
2687 }
2688 use_pos = use_pos->next;
2689 }
2690 }
2691
2692 return IR_REG_NONE;
2693}
2694
2695static ir_reg ir_get_preferred_reg(ir_ctx *ctx, ir_live_interval *ival, ir_regset available)
2696{
2697 ir_use_pos *use_pos;
2698 ir_reg reg;
2699
2700 use_pos = ival->use_pos;
2701 while (use_pos) {
2702 reg = use_pos->hint;
2703 if (reg >= 0 && IR_REGSET_IN(available, reg)) {
2704 return reg;
2705 } else if (use_pos->hint_ref > 0) {
2706 reg = ctx->live_intervals[ctx->vregs[use_pos->hint_ref]]->reg;
2707 if (reg >= 0 && IR_REGSET_IN(available, reg)) {
2708 return reg;
2709 }
2710 }
2711 use_pos = use_pos->next;
2712 }
2713
2714 return IR_REG_NONE;
2715}
2716
2717static void ir_add_to_unhandled(ir_live_interval **unhandled, ir_live_interval *ival)
2718{
2719 ir_live_pos pos = ival->range.start;
2720
2721 if (*unhandled == NULL
2722 || pos < (*unhandled)->range.start
2723 || (pos == (*unhandled)->range.start
2726 || (pos == (*unhandled)->range.start
2727 && ival->vreg > (*unhandled)->vreg)) {
2728 ival->list_next = *unhandled;
2729 *unhandled = ival;
2730 } else {
2731 ir_live_interval *prev = *unhandled;
2732
2733 while (prev->list_next) {
2734 if (pos < prev->list_next->range.start
2735 || (pos == prev->list_next->range.start
2738 || (pos == prev->list_next->range.start
2739 && ival->vreg > prev->list_next->vreg)) {
2740 break;
2741 }
2742 prev = prev->list_next;
2743 }
2744 ival->list_next = prev->list_next;
2745 prev->list_next = ival;
2746 }
2747}
2748
2749/* merge sorted lists */
2750static void ir_merge_to_unhandled(ir_live_interval **unhandled, ir_live_interval *ival)
2751{
2754
2755 if (*unhandled == NULL) {
2756 *unhandled = ival;
2757 while (ival) {
2758 ival = ival->list_next = ival->next;
2759 }
2760 } else {
2761 prev = unhandled;
2762 while (ival) {
2763 pos = ival->range.start;
2764 while (*prev && pos >= (*prev)->range.start) {
2765 prev = &(*prev)->list_next;
2766 }
2767 ival->list_next = *prev;
2768 *prev = ival;
2769 prev = &ival->list_next;
2770 ival = ival->next;
2771 }
2772 }
2773#ifdef IR_DEBUG
2774 ival = *unhandled;
2775 pos = 0;
2776
2777 while (ival) {
2778 IR_ASSERT(ival->range.start >= pos);
2779 pos = ival->range.start;
2780 ival = ival->list_next;
2781 }
2782#endif
2783}
2784
2785static void ir_add_to_unhandled_spill(ir_live_interval **unhandled, ir_live_interval *ival)
2786{
2787 ir_live_pos pos = ival->range.start;
2788
2789 if (*unhandled == NULL
2790 || pos <= (*unhandled)->range.start) {
2791 ival->list_next = *unhandled;
2792 *unhandled = ival;
2793 } else {
2794 ir_live_interval *prev = *unhandled;
2795
2796 while (prev->list_next) {
2797 if (pos <= prev->list_next->range.start) {
2798 break;
2799 }
2800 prev = prev->list_next;
2801 }
2802 ival->list_next = prev->list_next;
2803 prev->list_next = ival;
2804 }
2805}
2806
2807static ir_reg ir_try_allocate_free_reg(ir_ctx *ctx, ir_live_interval *ival, ir_live_interval **active, ir_live_interval *inactive, ir_live_interval **unhandled)
2808{
2809 ir_live_pos freeUntilPos[IR_REG_NUM];
2810 int i, reg;
2812 ir_live_interval *other;
2813 ir_regset available, overlapped, scratch;
2814
2815 if (IR_IS_TYPE_FP(ival->type)) {
2816 available = IR_REGSET_FP;
2817 /* set freeUntilPos of all physical registers to maxInt */
2818 for (i = IR_REG_FP_FIRST; i <= IR_REG_FP_LAST; i++) {
2819 freeUntilPos[i] = 0x7fffffff;
2820 }
2821 } else {
2822 available = IR_REGSET_GP;
2823 if (ctx->flags & IR_USE_FRAME_POINTER) {
2824 IR_REGSET_EXCL(available, IR_REG_FRAME_POINTER);
2825 }
2826#if defined(IR_TARGET_X86)
2827 if (ir_type_size[ival->type] == 1) {
2828 /* TODO: if no registers avialivle, we may use of one this register for already allocated interval ??? */
2829 IR_REGSET_EXCL(available, IR_REG_RBP);
2830 IR_REGSET_EXCL(available, IR_REG_RSI);
2831 IR_REGSET_EXCL(available, IR_REG_RDI);
2832 }
2833#endif
2834 /* set freeUntilPos of all physical registers to maxInt */
2835 for (i = IR_REG_GP_FIRST; i <= IR_REG_GP_LAST; i++) {
2836 freeUntilPos[i] = 0x7fffffff;
2837 }
2838 }
2839
2840 available = IR_REGSET_DIFFERENCE(available, (ir_regset)ctx->fixed_regset);
2841
2842 /* for each interval it in active */
2843 other = *active;
2844 while (other) {
2845 /* freeUntilPos[it.reg] = 0 */
2846 reg = other->reg;
2847 IR_ASSERT(reg >= 0);
2848 if (reg >= IR_REG_SCRATCH) {
2849 if (reg == IR_REG_SCRATCH) {
2850 available = IR_REGSET_DIFFERENCE(available, IR_REGSET_SCRATCH);
2851 } else {
2852 IR_ASSERT(reg == IR_REG_ALL);
2853 available = IR_REGSET_EMPTY;
2854 }
2855 } else {
2856 IR_REGSET_EXCL(available, reg);
2857 }
2858 other = other->list_next;
2859 }
2860
2861 /* for each interval it in inactive intersecting with current
2862 *
2863 * This loop is not necessary for program in SSA form (see LSRA on SSA fig. 6),
2864 * but it is still necessary after coalescing and splitting
2865 */
2866 overlapped = IR_REGSET_EMPTY;
2867 other = inactive;
2868 pos = ival->end;
2869 while (other) {
2870 /* freeUntilPos[it.reg] = next intersection of it with current */
2871 if (other->current_range->start < pos) {
2872 next = ir_ivals_overlap(&ival->range, other->current_range);
2873 if (next) {
2874 reg = other->reg;
2875 IR_ASSERT(reg >= 0);
2876 if (reg >= IR_REG_SCRATCH) {
2877 ir_regset regset;
2878
2879 if (reg == IR_REG_SCRATCH) {
2880 regset = IR_REGSET_INTERSECTION(available, IR_REGSET_SCRATCH);
2881 } else {
2882 IR_ASSERT(reg == IR_REG_ALL);
2883 regset = available;
2884 }
2885 overlapped = IR_REGSET_UNION(overlapped, regset);
2886 IR_REGSET_FOREACH(regset, reg) {
2887 if (next < freeUntilPos[reg]) {
2888 freeUntilPos[reg] = next;
2889 }
2890 } IR_REGSET_FOREACH_END();
2891 } else if (IR_REGSET_IN(available, reg)) {
2892 IR_REGSET_INCL(overlapped, reg);
2893 if (next < freeUntilPos[reg]) {
2894 freeUntilPos[reg] = next;
2895 }
2896 }
2897 }
2898 }
2899 other = other->list_next;
2900 }
2901
2902 available = IR_REGSET_DIFFERENCE(available, overlapped);
2903 if (available != IR_REGSET_EMPTY) {
2904
2906 /* Try to use hint */
2907 reg = ir_try_allocate_preferred_reg(ctx, ival, available, freeUntilPos);
2908 if (reg != IR_REG_NONE) {
2909 ival->reg = reg;
2910 IR_LOG_LSRA_ASSIGN(" ---- Assign", ival, " (hint available without spilling)");
2911 if (*unhandled && ival->end > (*unhandled)->range.start) {
2912 ival->list_next = *active;
2913 *active = ival;
2914 }
2915 return reg;
2916 }
2917 }
2918
2919 if (ival->flags & IR_LIVE_INTERVAL_SPLIT_CHILD) {
2920 /* Try to reuse the register previously allocated for splited interval */
2921 reg = ctx->live_intervals[ival->vreg]->reg;
2922 if (reg >= 0 && IR_REGSET_IN(available, reg)) {
2923 ival->reg = reg;
2924 IR_LOG_LSRA_ASSIGN(" ---- Assign", ival, " (available without spilling)");
2925 if (*unhandled && ival->end > (*unhandled)->range.start) {
2926 ival->list_next = *active;
2927 *active = ival;
2928 }
2929 return reg;
2930 }
2931 }
2932
2933 /* prefer caller-saved registers to avoid save/restore in prologue/epilogue */
2934 scratch = IR_REGSET_INTERSECTION(available, IR_REGSET_SCRATCH);
2935 if (scratch != IR_REGSET_EMPTY) {
2936 /* prefer registers that don't conflict with the hints for the following unhandled intervals */
2937 if (1) {
2938 ir_regset non_conflicting = scratch;
2939
2940 other = *unhandled;
2941 while (other && other->range.start < ival->range.end) {
2943 reg = ir_get_first_reg_hint(ctx, other, non_conflicting);
2944
2945 if (reg >= 0) {
2946 IR_REGSET_EXCL(non_conflicting, reg);
2947 if (non_conflicting == IR_REGSET_EMPTY) {
2948 break;
2949 }
2950 }
2951 }
2952 other = other->list_next;
2953 }
2954 if (non_conflicting != IR_REGSET_EMPTY) {
2955 reg = IR_REGSET_FIRST(non_conflicting);
2956 } else {
2957 reg = IR_REGSET_FIRST(scratch);
2958 }
2959 } else {
2960 reg = IR_REGSET_FIRST(scratch);
2961 }
2962 } else {
2963 reg = IR_REGSET_FIRST(available);
2964 }
2965 ival->reg = reg;
2966 IR_LOG_LSRA_ASSIGN(" ---- Assign", ival, " (available without spilling)");
2967 if (*unhandled && ival->end > (*unhandled)->range.start) {
2968 ival->list_next = *active;
2969 *active = ival;
2970 }
2971 return reg;
2972 }
2973
2974 /* reg = register with highest freeUntilPos */
2975 reg = IR_REG_NONE;
2976 pos = 0;
2977 IR_REGSET_FOREACH(overlapped, i) {
2978 if (freeUntilPos[i] > pos) {
2979 pos = freeUntilPos[i];
2980 reg = i;
2981 } else if (freeUntilPos[i] == pos
2982 && !IR_REGSET_IN(IR_REGSET_SCRATCH, reg)
2983 && IR_REGSET_IN(IR_REGSET_SCRATCH, i)) {
2984 /* prefer caller-saved registers to avoid save/restore in prologue/epilogue */
2985 pos = freeUntilPos[i];
2986 reg = i;
2987 }
2988 } IR_REGSET_FOREACH_END();
2989
2990 if (pos > ival->range.start) {
2991 /* register available for the first part of the interval */
2992 /* split current before freeUntilPos[reg] */
2993 ir_live_pos split_pos = ir_last_use_pos_before(ival, pos,
2995 if (split_pos > ival->range.start) {
2996 split_pos = ir_find_optimal_split_position(ctx, ival, split_pos, pos, 0);
2997 other = ir_split_interval_at(ctx, ival, split_pos);
2999 ir_reg pref_reg = ir_try_allocate_preferred_reg(ctx, ival, IR_REGSET_UNION(available, overlapped), freeUntilPos);
3000
3001 if (pref_reg != IR_REG_NONE) {
3002 ival->reg = pref_reg;
3003 } else {
3004 ival->reg = reg;
3005 }
3006 } else {
3007 ival->reg = reg;
3008 }
3009 IR_LOG_LSRA_ASSIGN(" ---- Assign", ival, " (available without spilling for the first part)");
3010 if (*unhandled && ival->end > (*unhandled)->range.start) {
3011 ival->list_next = *active;
3012 *active = ival;
3013 }
3014 ir_add_to_unhandled(unhandled, other);
3015 IR_LOG_LSRA(" ---- Queue", other, "");
3016 return reg;
3017 }
3018 }
3019 return IR_REG_NONE;
3020}
3021
3022static ir_reg ir_allocate_blocked_reg(ir_ctx *ctx, ir_live_interval *ival, ir_live_interval **active, ir_live_interval **inactive, ir_live_interval **unhandled)
3023{
3024 ir_live_pos nextUsePos[IR_REG_NUM];
3025 ir_live_pos blockPos[IR_REG_NUM];
3026 int i, reg;
3027 ir_live_pos pos, next_use_pos;
3028 ir_live_interval *other, *prev;
3029 ir_use_pos *use_pos;
3030 ir_regset available, tmp_regset;
3031
3032 if (!(ival->flags & IR_LIVE_INTERVAL_TEMP)) {
3033 use_pos = ival->use_pos;
3034 while (use_pos && !(use_pos->flags & IR_USE_MUST_BE_IN_REG)) {
3035 use_pos = use_pos->next;
3036 }
3037 if (!use_pos) {
3038 /* spill */
3039 IR_LOG_LSRA(" ---- Spill", ival, " (no use pos that must be in reg)");
3040 ctx->flags2 |= IR_RA_HAVE_SPILLS;
3041 return IR_REG_NONE;
3042 }
3043 next_use_pos = use_pos->pos;
3044 } else {
3045 next_use_pos = ival->range.end;
3046 }
3047
3048 if (IR_IS_TYPE_FP(ival->type)) {
3049 available = IR_REGSET_FP;
3050 /* set nextUsePos of all physical registers to maxInt */
3051 for (i = IR_REG_FP_FIRST; i <= IR_REG_FP_LAST; i++) {
3052 nextUsePos[i] = 0x7fffffff;
3053 blockPos[i] = 0x7fffffff;
3054 }
3055 } else {
3056 available = IR_REGSET_GP;
3057 if (ctx->flags & IR_USE_FRAME_POINTER) {
3058 IR_REGSET_EXCL(available, IR_REG_FRAME_POINTER);
3059 }
3060#if defined(IR_TARGET_X86)
3061 if (ir_type_size[ival->type] == 1) {
3062 /* TODO: if no registers avialivle, we may use of one this register for already allocated interval ??? */
3063 IR_REGSET_EXCL(available, IR_REG_RBP);
3064 IR_REGSET_EXCL(available, IR_REG_RSI);
3065 IR_REGSET_EXCL(available, IR_REG_RDI);
3066 }
3067#endif
3068 /* set nextUsePos of all physical registers to maxInt */
3069 for (i = IR_REG_GP_FIRST; i <= IR_REG_GP_LAST; i++) {
3070 nextUsePos[i] = 0x7fffffff;
3071 blockPos[i] = 0x7fffffff;
3072 }
3073 }
3074
3075 available = IR_REGSET_DIFFERENCE(available, (ir_regset)ctx->fixed_regset);
3076
3077 if (IR_REGSET_IS_EMPTY(available)) {
3078 fprintf(stderr, "LSRA Internal Error: No registers available. Allocation is not possible\n");
3079 IR_ASSERT(0);
3080 exit(-1);
3081 }
3082
3083 /* for each interval it in active */
3084 other = *active;
3085 while (other) {
3086 /* nextUsePos[it.reg] = next use of it after start of current */
3087 reg = other->reg;
3088 IR_ASSERT(reg >= 0);
3089 if (reg >= IR_REG_SCRATCH) {
3090 ir_regset regset;
3091
3092 if (reg == IR_REG_SCRATCH) {
3093 regset = IR_REGSET_INTERSECTION(available, IR_REGSET_SCRATCH);
3094 } else {
3095 IR_ASSERT(reg == IR_REG_ALL);
3096 regset = available;
3097 }
3098 IR_REGSET_FOREACH(regset, reg) {
3099 blockPos[reg] = nextUsePos[reg] = 0;
3100 } IR_REGSET_FOREACH_END();
3101 } else if (IR_REGSET_IN(available, reg)) {
3103 blockPos[reg] = nextUsePos[reg] = 0;
3104 } else {
3105 pos = ir_first_use_pos_after(other, ival->range.start,
3107 if (pos < nextUsePos[reg]) {
3108 nextUsePos[reg] = pos;
3109 }
3110 }
3111 }
3112 other = other->list_next;
3113 }
3114
3115 /* for each interval it in inactive intersecting with current */
3116 other = *inactive;
3117 while (other) {
3118 /* freeUntilPos[it.reg] = next intersection of it with current */
3119 reg = other->reg;
3120 IR_ASSERT(reg >= 0);
3121 if (reg >= IR_REG_SCRATCH) {
3122 ir_live_pos overlap = ir_ivals_overlap(&ival->range, other->current_range);
3123
3124 if (overlap) {
3125 ir_regset regset;
3126
3127 if (reg == IR_REG_SCRATCH) {
3128 regset = IR_REGSET_INTERSECTION(available, IR_REGSET_SCRATCH);
3129 } else {
3130 IR_ASSERT(reg == IR_REG_ALL);
3131 regset = available;
3132 }
3133 IR_REGSET_FOREACH(regset, reg) {
3134 if (overlap < nextUsePos[reg]) {
3135 nextUsePos[reg] = overlap;
3136 }
3137 if (overlap < blockPos[reg]) {
3138 blockPos[reg] = overlap;
3139 }
3140 } IR_REGSET_FOREACH_END();
3141 }
3142 } else if (IR_REGSET_IN(available, reg)) {
3143 ir_live_pos overlap = ir_ivals_overlap(&ival->range, other->current_range);
3144
3145 if (overlap) {
3147 if (overlap < nextUsePos[reg]) {
3148 nextUsePos[reg] = overlap;
3149 }
3150 if (overlap < blockPos[reg]) {
3151 blockPos[reg] = overlap;
3152 }
3153 } else {
3154 pos = ir_first_use_pos_after(other, ival->range.start,
3156 if (pos < nextUsePos[reg]) {
3157 nextUsePos[reg] = pos;
3158 }
3159 }
3160 }
3161 }
3162 other = other->list_next;
3163 }
3164
3165 /* register hinting */
3166 reg = IR_REG_NONE;
3168 reg = ir_get_preferred_reg(ctx, ival, available);
3169 }
3170 if (reg == IR_REG_NONE) {
3171select_register:
3172 reg = IR_REGSET_FIRST(available);
3173 }
3174
3175 /* reg = register with highest nextUsePos */
3176 pos = nextUsePos[reg];
3177 tmp_regset = available;
3178 IR_REGSET_EXCL(tmp_regset, reg);
3179 IR_REGSET_FOREACH(tmp_regset, i) {
3180 if (nextUsePos[i] > pos) {
3181 pos = nextUsePos[i];
3182 reg = i;
3183 }
3184 } IR_REGSET_FOREACH_END();
3185
3186 /* if first usage of current is after nextUsePos[reg] then */
3187 if (next_use_pos > pos && !(ival->flags & IR_LIVE_INTERVAL_TEMP)) {
3188 /* all other intervals are used before current, so it is best to spill current itself */
3189 /* assign spill slot to current */
3190 /* split current before its first use position that requires a register */
3191 ir_live_pos split_pos;
3192
3193spill_current:
3194 if (next_use_pos == ival->range.start) {
3195 IR_ASSERT(ival->use_pos && ival->use_pos->op_num == 0);
3196 /* split right after definition */
3197 split_pos = next_use_pos + 1;
3198 } else {
3199 split_pos = ir_find_optimal_split_position(ctx, ival, ival->range.start, next_use_pos - 1, 1);
3200 }
3201
3202 if (split_pos > ival->range.start) {
3203 IR_LOG_LSRA(" ---- Conflict with others", ival, " (all others are used before)");
3204 other = ir_split_interval_at(ctx, ival, split_pos);
3205 IR_LOG_LSRA(" ---- Spill", ival, "");
3206 ir_add_to_unhandled(unhandled, other);
3207 IR_LOG_LSRA(" ---- Queue", other, "");
3208 return IR_REG_NONE;
3209 }
3210 }
3211
3212 if (ival->end > blockPos[reg]) {
3213 /* spilling make a register free only for the first part of current */
3214 IR_LOG_LSRA(" ---- Conflict with others", ival, " (spilling make a register free only for the first part)");
3215 /* split current at optimal position before block_pos[reg] */
3216 ir_live_pos split_pos = ir_last_use_pos_before(ival, blockPos[reg] + 1,
3218 if (split_pos == 0) {
3219 split_pos = ir_first_use_pos_after(ival, blockPos[reg],
3221 other = ir_split_interval_at(ctx, ival, split_pos);
3222 ir_add_to_unhandled(unhandled, other);
3223 IR_LOG_LSRA(" ---- Queue", other, "");
3224 return IR_REG_NONE;
3225 }
3226 if (split_pos >= blockPos[reg]) {
3227try_next_available_register:
3228 IR_REGSET_EXCL(available, reg);
3229 if (IR_REGSET_IS_EMPTY(available)) {
3230 fprintf(stderr, "LSRA Internal Error: Unsolvable conflict. Allocation is not possible\n");
3231 IR_ASSERT(0);
3232 exit(-1);
3233 }
3234 IR_LOG_LSRA(" ---- Restart", ival, "");
3235 goto select_register;
3236 }
3237 split_pos = ir_find_optimal_split_position(ctx, ival, split_pos, blockPos[reg], 1);
3238 other = ir_split_interval_at(ctx, ival, split_pos);
3239 ir_add_to_unhandled(unhandled, other);
3240 IR_LOG_LSRA(" ---- Queue", other, "");
3241 }
3242
3243 /* spill intervals that currently block reg */
3244 prev = NULL;
3245 other = *active;
3246 while (other) {
3247 ir_live_pos split_pos;
3248
3249 if (reg == other->reg) {
3250 /* split active interval for reg at position */
3251 ir_live_pos overlap = ir_ivals_overlap(&ival->range, other->current_range);
3252
3253 if (overlap) {
3254 ir_live_interval *child, *child2;
3255
3256 IR_ASSERT(other->type != IR_VOID);
3257 IR_LOG_LSRA_CONFLICT(" ---- Conflict with active", other, overlap);
3258
3259 split_pos = ir_last_use_pos_before(other, ival->range.start, IR_USE_MUST_BE_IN_REG | IR_USE_SHOULD_BE_IN_REG);
3260 if (split_pos == 0) {
3261 split_pos = ival->range.start;
3262 }
3263 split_pos = ir_find_optimal_split_position(ctx, other, split_pos, ival->range.start, 1);
3264 if (split_pos > other->range.start) {
3265 child = ir_split_interval_at(ctx, other, split_pos);
3266 if (prev) {
3267 prev->list_next = other->list_next;
3268 } else {
3269 *active = other->list_next;
3270 }
3271 IR_LOG_LSRA(" ---- Finish", other, "");
3272 } else {
3273 if (ir_first_use_pos(other, IR_USE_MUST_BE_IN_REG) <= other->end) {
3274 if (!(ival->flags & IR_LIVE_INTERVAL_TEMP)) {
3275 next_use_pos = ir_first_use_pos(ival, IR_USE_MUST_BE_IN_REG);
3276 if (next_use_pos == ival->range.start) {
3277 IR_ASSERT(ival->use_pos && ival->use_pos->op_num == 0);
3278 /* split right after definition */
3279 split_pos = next_use_pos + 1;
3280 } else {
3281 split_pos = ir_find_optimal_split_position(ctx, ival, ival->range.start, next_use_pos - 1, 1);
3282 }
3283
3284 if (split_pos > ival->range.start) {
3285 goto spill_current;
3286 }
3287 }
3288 goto try_next_available_register;
3289 }
3290 child = other;
3291 other->reg = IR_REG_NONE;
3292 if (prev) {
3293 prev->list_next = other->list_next;
3294 } else {
3295 *active = other->list_next;
3296 }
3297 IR_LOG_LSRA(" ---- Spill and Finish", other, " (it must not be in reg)");
3298 }
3299
3300 split_pos = ir_first_use_pos_after(child, ival->range.start, IR_USE_MUST_BE_IN_REG | IR_USE_SHOULD_BE_IN_REG) - 1; // TODO: ???
3301 if (split_pos > child->range.start && split_pos < child->end) {
3302 ir_live_pos opt_split_pos = ir_find_optimal_split_position(ctx, child, ival->range.start, split_pos, 1);
3303 if (opt_split_pos > child->range.start) {
3304 split_pos = opt_split_pos;
3305 }
3306 child2 = ir_split_interval_at(ctx, child, split_pos);
3307 IR_LOG_LSRA(" ---- Spill", child, "");
3308 ir_add_to_unhandled(unhandled, child2);
3309 IR_LOG_LSRA(" ---- Queue", child2, "");
3310 } else if (child != other) {
3311 // TODO: this may cause endless loop
3312 ir_add_to_unhandled(unhandled, child);
3313 IR_LOG_LSRA(" ---- Queue", child, "");
3314 }
3315 }
3316 break;
3317 }
3318 prev = other;
3319 other = other->list_next;
3320 }
3321
3322 /* split any inactive interval for reg at the end of its lifetime hole */
3323 other = *inactive;
3324 while (other) {
3325 /* freeUntilPos[it.reg] = next intersection of it with current */
3326 if (reg == other->reg) {
3327 ir_live_pos overlap = ir_ivals_overlap(&ival->range, other->current_range);
3328
3329 if (overlap) {
3330 ir_live_interval *child;
3331
3332 IR_ASSERT(other->type != IR_VOID);
3333 IR_LOG_LSRA_CONFLICT(" ---- Conflict with inactive", other, overlap);
3334 // TODO: optimal split position (this case is not tested)
3335 child = ir_split_interval_at(ctx, other, overlap);
3336 /* reset range cache */
3337 other->current_range = &other->range;
3338 ir_add_to_unhandled(unhandled, child);
3339 IR_LOG_LSRA(" ---- Queue", child, "");
3340 }
3341 }
3342 other = other->list_next;
3343 }
3344
3345 /* current.reg = reg */
3346 ival->reg = reg;
3347 IR_LOG_LSRA_ASSIGN(" ---- Assign", ival, " (after splitting others)");
3348
3349 if (*unhandled && ival->end > (*unhandled)->range.start) {
3350 ival->list_next = *active;
3351 *active = ival;
3352 }
3353 return reg;
3354}
3355
3356static int ir_fix_dessa_tmps(ir_ctx *ctx, uint8_t type, ir_ref from, ir_ref to)
3357{
3358 ir_block *bb = ctx->data;
3359 ir_tmp_reg tmp_reg;
3360
3361 if (to == 0) {
3362 if (IR_IS_TYPE_INT(type)) {
3363 tmp_reg.num = 0;
3364 tmp_reg.type = type;
3365 tmp_reg.start = IR_USE_SUB_REF;
3366 tmp_reg.end = IR_SAVE_SUB_REF;
3367 } else {
3369 tmp_reg.num = 1;
3370 tmp_reg.type = type;
3371 tmp_reg.start = IR_USE_SUB_REF;
3372 tmp_reg.end = IR_SAVE_SUB_REF;
3373 }
3374 } else if (from != 0) {
3375 if (IR_IS_TYPE_INT(type)) {
3376 tmp_reg.num = 0;
3377 tmp_reg.type = type;
3378 tmp_reg.start = IR_USE_SUB_REF;
3379 tmp_reg.end = IR_SAVE_SUB_REF;
3380 } else {
3382 tmp_reg.num = 1;
3383 tmp_reg.type = type;
3384 tmp_reg.start = IR_USE_SUB_REF;
3385 tmp_reg.end = IR_SAVE_SUB_REF;
3386 }
3387 } else {
3388 return 1;
3389 }
3390 if (!ir_has_tmp(ctx, bb->end, tmp_reg.num)) {
3391 ir_add_tmp(ctx, bb->end, bb->end, tmp_reg.num, tmp_reg);
3392 }
3393 return 1;
3394}
3395
3396static bool ir_ival_spill_for_fuse_load(ir_ctx *ctx, ir_live_interval *ival, ir_reg_alloc_data *data)
3397{
3398 ir_use_pos *use_pos = ival->use_pos;
3399 ir_insn *insn;
3400
3401 if (ival->flags & IR_LIVE_INTERVAL_MEM_PARAM) {
3402 IR_ASSERT(!ival->next && use_pos && use_pos->op_num == 0);
3403 insn = &ctx->ir_base[IR_LIVE_POS_TO_REF(use_pos->pos)];
3404 IR_ASSERT(insn->op == IR_PARAM);
3405 use_pos = use_pos->next;
3406 if (use_pos && (use_pos->next || (use_pos->flags & IR_USE_MUST_BE_IN_REG))) {
3407 return 0;
3408 }
3409
3410 if (use_pos) {
3411 ir_block *bb = ir_block_from_live_pos(ctx, use_pos->pos);
3412 if (bb->loop_depth) {
3413 return 0;
3414 }
3415 }
3416
3417 return 1;
3418 } else if (ival->flags & IR_LIVE_INTERVAL_MEM_LOAD) {
3419 insn = &ctx->ir_base[IR_LIVE_POS_TO_REF(use_pos->pos)];
3420 IR_ASSERT(insn->op == IR_VLOAD);
3421 IR_ASSERT(ctx->ir_base[insn->op2].op == IR_VAR);
3422 use_pos = use_pos->next;
3423 if (use_pos && (use_pos->next || (use_pos->flags & IR_USE_MUST_BE_IN_REG))) {
3424 return 0;
3425 }
3426
3427 if (use_pos) {
3428 ir_block *bb = ir_block_from_live_pos(ctx, use_pos->pos);
3429 if (bb->loop_depth && bb != ir_block_from_live_pos(ctx, ival->use_pos->pos)) {
3430 return 0;
3431 }
3432 /* check if VAR may be clobbered between VLOAD and use */
3433 ir_use_list *use_list = &ctx->use_lists[insn->op2];
3434 ir_ref n = use_list->count;
3435 ir_ref *p = &ctx->use_edges[use_list->refs];
3436 for (; n > 0; p++, n--) {
3437 ir_ref use = *p;
3438 if (ctx->ir_base[use].op == IR_VSTORE) {
3439 if (use > IR_LIVE_POS_TO_REF(ival->use_pos->pos) && use < IR_LIVE_POS_TO_REF(use_pos->pos)) {
3440 return 0;
3441 }
3442 } else if (ctx->ir_base[use].op == IR_VADDR) {
3443 return 0;
3444 }
3445 }
3446 }
3447 ival->stack_spill_pos = ctx->ir_base[insn->op2].op3;
3448
3449 return 1;
3450 }
3451 return 0;
3452}
3453
3454static void ir_assign_bound_spill_slots(ir_ctx *ctx)
3455{
3456 ir_hashtab_bucket *b = ctx->binding->data;
3457 uint32_t n = ctx->binding->count;
3458 uint32_t v;
3459 ir_live_interval *ival;
3460
3461 while (n > 0) {
3462 v = ctx->vregs[b->key];
3463 if (v) {
3464 ival = ctx->live_intervals[v];
3465 if (ival
3466 && ival->stack_spill_pos == -1
3467 && (ival->next || ival->reg == IR_REG_NONE)) {
3468 IR_ASSERT(b->val < 0);
3469 /* special spill slot */
3470 ival->stack_spill_pos = -b->val;
3472 }
3473 }
3474 b++;
3475 n--;
3476 }
3477}
3478
3479static int ir_linear_scan(ir_ctx *ctx)
3480{
3481 uint32_t b;
3482 ir_block *bb;
3483 ir_live_interval *unhandled = NULL;
3485 ir_live_interval *inactive = NULL;
3486 ir_live_interval *ival, *other, *prev;
3487 int j;
3488 ir_live_pos position;
3489 ir_reg reg;
3491 ir_ref vars = ctx->vars;
3492
3493 if (!ctx->live_intervals) {
3494 return 0;
3495 }
3496
3497 if (ctx->flags2 & IR_LR_HAVE_DESSA_MOVES) {
3498 /* Add fixed intervals for temporary registers used for DESSA moves */
3499 for (b = 1, bb = &ctx->cfg_blocks[1]; b <= ctx->cfg_blocks_count; b++, bb++) {
3501 if (bb->flags & IR_BB_DESSA_MOVES) {
3502 ctx->data = bb;
3503 ir_gen_dessa_moves(ctx, b, ir_fix_dessa_tmps);
3504 }
3505 }
3506 }
3507
3508 ctx->data = &data;
3509 ctx->stack_frame_size = 0;
3510 data.unused_slot_4 = 0;
3511 data.unused_slot_2 = 0;
3512 data.unused_slot_1 = 0;
3513 data.handled = NULL;
3514
3515 while (vars) {
3516 ir_ref var = vars;
3517 ir_insn *insn = &ctx->ir_base[var];
3518
3519 IR_ASSERT(insn->op == IR_VAR || insn->op == IR_ALLOCA);
3520 vars = insn->op3; /* list next */
3521
3522 if (insn->op == IR_VAR) {
3523 ir_ref slot = ir_allocate_spill_slot(ctx, insn->type, &data);;
3524 ir_use_list *use_list;
3525 ir_ref n, *p;
3526
3527 insn->op3 = slot;
3528 use_list = &ctx->use_lists[var];
3529 n = use_list->count;
3530 p = &ctx->use_edges[use_list->refs];
3531 for (; n > 0; p++, n--) {
3532 insn = &ctx->ir_base[*p];
3533 if (insn->op == IR_VADDR) {
3534 insn->op3 = slot;
3535 }
3536 }
3537 } else {
3538 ir_insn *val = &ctx->ir_base[insn->op2];
3539
3540 IR_ASSERT(IR_IS_CONST_REF(insn->op2));
3543 IR_ASSERT(IR_IS_TYPE_UNSIGNED(val->type) || val->val.i64 >= 0);
3544 IR_ASSERT(val->val.i64 < 0x7fffffff);
3545
3546 insn->op3 = ir_allocate_big_spill_slot(ctx, val->val.i32, &data);
3547 }
3548 }
3549
3550 for (j = ctx->vregs_count; j != 0; j--) {
3551 ival = ctx->live_intervals[j];
3552 if (ival) {
3554 || !ir_ival_spill_for_fuse_load(ctx, ival, &data)) {
3555 ir_add_to_unhandled(&unhandled, ival);
3556 }
3557 }
3558 }
3559
3560 ival = ctx->live_intervals[0];
3561 if (ival) {
3562 ir_merge_to_unhandled(&unhandled, ival);
3563 }
3564
3565 /* vregs + tmp + fixed + SRATCH + ALL */
3566 for (j = ctx->vregs_count + 1; j <= ctx->vregs_count + IR_REG_NUM + 2; j++) {
3567 ival = ctx->live_intervals[j];
3568 if (ival) {
3569 ival->current_range = &ival->range;
3570 ival->list_next = inactive;
3571 inactive = ival;
3572 }
3573 }
3574
3576
3577#ifdef IR_DEBUG
3578 if (ctx->flags & IR_DEBUG_RA) {
3579 fprintf(stderr, "----\n");
3580 ir_dump_live_ranges(ctx, stderr);
3581 fprintf(stderr, "---- Start LSRA\n");
3582 }
3583#endif
3584
3585 while (unhandled) {
3586 ival = unhandled;
3587 ival->current_range = &ival->range;
3588 unhandled = ival->list_next;
3589 position = ival->range.start;
3590
3591 IR_LOG_LSRA(" ---- Processing", ival, "...");
3592
3593 /* for each interval i in active */
3594 other = active;
3595 prev = NULL;
3596 while (other) {
3597 ir_live_range *r = other->current_range;
3598
3599 IR_ASSERT(r);
3600 if (r->end <= position) {
3601 do {
3602 r = r->next;
3603 } while (r && r->end <= position);
3604 if (!r) {
3605 /* move i from active to handled */
3606 other = other->list_next;
3607 if (prev) {
3608 prev->list_next = other;
3609 } else {
3610 active = other;
3611 }
3612 continue;
3613 }
3614 other->current_range = r;
3615 }
3616 if (position < r->start) {
3617 /* move i from active to inactive */
3618 if (prev) {
3619 prev->list_next = other->list_next;
3620 } else {
3621 active = other->list_next;
3622 }
3623 other->list_next = inactive;
3624 inactive = other;
3625 } else {
3626 prev = other;
3627 }
3628 other = prev ? prev->list_next : active;
3629 }
3630
3631 /* for each interval i in inactive */
3632 other = inactive;
3633 prev = NULL;
3634 while (other) {
3635 ir_live_range *r = other->current_range;
3636
3637 IR_ASSERT(r);
3638 if (r->end <= position) {
3639 do {
3640 r = r->next;
3641 } while (r && r->end <= position);
3642 if (!r) {
3643 /* move i from inactive to handled */
3644 other = other->list_next;
3645 if (prev) {
3646 prev->list_next = other;
3647 } else {
3648 inactive = other;
3649 }
3650 continue;
3651 }
3652 other->current_range = r;
3653 }
3654 if (position >= r->start) {
3655 /* move i from inactive to active */
3656 if (prev) {
3657 prev->list_next = other->list_next;
3658 } else {
3659 inactive = other->list_next;
3660 }
3661 other->list_next = active;
3662 active = other;
3663 } else {
3664 prev = other;
3665 }
3666 other = prev ? prev->list_next : inactive;
3667 }
3668
3669 reg = ir_try_allocate_free_reg(ctx, ival, &active, inactive, &unhandled);
3670 if (reg == IR_REG_NONE) {
3671 reg = ir_allocate_blocked_reg(ctx, ival, &active, &inactive, &unhandled);
3672 }
3673 }
3674
3675#if 0 //def IR_DEBUG
3676 /* all intervals must be processed */
3677 ival = active;
3678 while (ival) {
3679 IR_ASSERT(!ival->next);
3680 ival = ival->list_next;
3681 }
3682 ival = inactive;
3683 while (ival) {
3684 IR_ASSERT(!ival->next);
3685 ival = ival->list_next;
3686 }
3687#endif
3688
3690
3691 if (ctx->binding) {
3692 ir_assign_bound_spill_slots(ctx);
3693 }
3694
3695 /* Use simple linear-scan (without holes) to allocate and reuse spill slots */
3696 unhandled = NULL;
3697 for (j = ctx->vregs_count; j != 0; j--) {
3698 ival = ctx->live_intervals[j];
3699 if (ival
3700 && (ival->next || ival->reg == IR_REG_NONE)
3701 && ival->stack_spill_pos == -1) {
3703 if (!(ival->flags & IR_LIVE_INTERVAL_MEM_PARAM)) {
3704 ir_live_range *r;
3705
3706 other = ival;
3707 while (other->next) {
3708 other = other->next;
3709 }
3710 r = &other->range;
3711 while (r->next) {
3712 r = r->next;
3713 }
3714 ival->end = r->end;
3715 ir_add_to_unhandled_spill(&unhandled, ival);
3716 }
3717 }
3718 }
3719
3720 if (unhandled) {
3721 uint8_t size;
3722 ir_live_interval *handled[9] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
3723 ir_live_interval *old;
3724
3725 data.handled = handled;
3726 active = NULL;
3727 while (unhandled) {
3728 ival = unhandled;
3729 ival->current_range = &ival->range;
3730 unhandled = ival->list_next;
3731 position = ival->range.start;
3732
3733 /* for each interval i in active */
3734 other = active;
3735 prev = NULL;
3736 while (other) {
3737 if (other->end <= position) {
3738 /* move i from active to handled */
3739 if (prev) {
3740 prev->list_next = other->list_next;
3741 } else {
3742 active = other->list_next;
3743 }
3744 size = ir_type_size[other->type];
3745 IR_ASSERT(size == 1 || size == 2 || size == 4 || size == 8);
3746 old = handled[size];
3747 while (old) {
3748 if (old->stack_spill_pos == other->stack_spill_pos) {
3749 break;
3750 }
3751 old = old->list_next;
3752 }
3753 if (!old) {
3754 other->list_next = handled[size];
3755 handled[size] = other;
3756 }
3757 } else {
3758 prev = other;
3759 }
3760 other = prev ? prev->list_next : active;
3761 }
3762
3763 ival->stack_spill_pos = ir_allocate_spill_slot(ctx, ival->type, &data);
3764 if (unhandled && ival->end > unhandled->range.start) {
3765 ival->list_next = active;
3766 active = ival;
3767 } else {
3768 size = ir_type_size[ival->type];
3769 IR_ASSERT(size == 1 || size == 2 || size == 4 || size == 8);
3770 old = handled[size];
3771 while (old) {
3772 if (old->stack_spill_pos == ival->stack_spill_pos) {
3773 break;
3774 }
3775 old = old->list_next;
3776 }
3777 if (!old) {
3778 ival->list_next = handled[size];
3779 handled[size] = ival;
3780 }
3781 }
3782 }
3783 data.handled = NULL;
3784 }
3785 }
3786
3787#ifdef IR_TARGET_X86
3788 if (ctx->flags2 & IR_HAS_FP_RET_SLOT) {
3789 ctx->ret_slot = ir_allocate_spill_slot(ctx, IR_DOUBLE, &data);
3790 } else if (ctx->ret_type == IR_FLOAT || ctx->ret_type == IR_DOUBLE) {
3791 ctx->ret_slot = ir_allocate_spill_slot(ctx, ctx->ret_type, &data);
3792 } else {
3793 ctx->ret_slot = -1;
3794 }
3795#endif
3796
3797#ifdef IR_DEBUG
3798 if (ctx->flags & IR_DEBUG_RA) {
3799 fprintf(stderr, "---- Finish LSRA\n");
3800 ir_dump_live_ranges(ctx, stderr);
3801 fprintf(stderr, "----\n");
3802 }
3803#endif
3804
3805 return 1;
3806}
3807
3808static bool needs_spill_reload(ir_ctx *ctx, ir_live_interval *ival, uint32_t b0, ir_bitset available)
3809{
3810 ir_worklist worklist;
3811 ir_block *bb;
3812 uint32_t b, *p, n;
3813
3814 ir_worklist_init(&worklist, ctx->cfg_blocks_count + 1);
3815 ir_worklist_push(&worklist, b0);
3816 while (ir_worklist_len(&worklist) != 0) {
3817 b = ir_worklist_pop(&worklist);
3818 bb = &ctx->cfg_blocks[b];
3819 if (bb->flags & (IR_BB_ENTRY|IR_BB_START)) {
3820 ir_worklist_free(&worklist);
3821 return 1;
3822 }
3823 n = bb->predecessors_count;
3824 for (p = &ctx->cfg_edges[bb->predecessors]; n > 0; p++, n--) {
3825 b = *p;
3826 bb = &ctx->cfg_blocks[b];
3827
3828 if (!ir_ival_covers(ival, IR_SAVE_LIVE_POS_FROM_REF(bb->end))) {
3829 ir_worklist_free(&worklist);
3830 return 1;
3831 } else if (!ir_bitset_in(available, b)) {
3832 ir_worklist_push(&worklist, b);
3833 }
3834 }
3835 }
3836 ir_worklist_free(&worklist);
3837 return 0;
3838}
3839
3840static bool needs_spill_load(ir_ctx *ctx, ir_live_interval *ival, ir_use_pos *use_pos)
3841{
3842 if (use_pos->next
3843 && use_pos->op_num == 1
3844 && use_pos->next->pos == use_pos->pos
3845 && !(use_pos->next->flags & IR_USE_MUST_BE_IN_REG)) {
3846 /* Support for R2 = ADD(R1, R1) */
3847 use_pos = use_pos->next;
3848 }
3849 return use_pos->next && use_pos->next->op_num != 0;
3850}
3851
3852static void ir_set_fused_reg(ir_ctx *ctx, ir_ref root, ir_ref ref_and_op, int8_t reg)
3853{
3854 char key[10];
3855
3856 IR_ASSERT(reg != IR_REG_NONE);
3857 if (!ctx->fused_regs) {
3858 ctx->fused_regs = ir_mem_malloc(sizeof(ir_strtab));
3859 ir_strtab_init(ctx->fused_regs, 8, 128);
3860 }
3861 memcpy(key, &root, sizeof(ir_ref));
3862 memcpy(key + 4, &ref_and_op, sizeof(ir_ref));
3863 ir_strtab_lookup(ctx->fused_regs, key, 8, 0x10000000 | reg);
3864}
3865
3866static void assign_regs(ir_ctx *ctx)
3867{
3868 ir_ref i;
3869 ir_live_interval *ival, *top_ival;
3870 ir_use_pos *use_pos;
3871 int8_t reg, old_reg;
3872 ir_ref ref;
3873 ir_regset used_regs = 0;
3874
3875 if (!ctx->regs) {
3876 ctx->regs = ir_mem_malloc(sizeof(ir_regs) * ctx->insns_count);
3877 memset(ctx->regs, IR_REG_NONE, sizeof(ir_regs) * ctx->insns_count);
3878 }
3879
3880 if (!(ctx->flags2 & (IR_RA_HAVE_SPLITS|IR_RA_HAVE_SPILLS))) {
3881 for (i = 1; i <= ctx->vregs_count; i++) {
3882 ival = ctx->live_intervals[i];
3883 if (ival) {
3884 do {
3885 if (ival->reg != IR_REG_NONE) {
3886 reg = ival->reg;
3887 IR_REGSET_INCL(used_regs, reg);
3888 use_pos = ival->use_pos;
3889 while (use_pos) {
3890 ref = (use_pos->hint_ref < 0) ? -use_pos->hint_ref : IR_LIVE_POS_TO_REF(use_pos->pos);
3891 ir_set_alocated_reg(ctx, ref, use_pos->op_num, reg);
3892 use_pos = use_pos->next;
3893 }
3894 }
3895 ival = ival->next;
3896 } while (ival);
3897 }
3898 }
3899 } else {
3900 ir_bitset available = ir_bitset_malloc(ctx->cfg_blocks_count + 1);
3901
3902 for (i = 1; i <= ctx->vregs_count; i++) {
3903 top_ival = ival = ctx->live_intervals[i];
3904 if (ival) {
3905 if (!(ival->flags & IR_LIVE_INTERVAL_SPILLED)) {
3906 do {
3907 if (ival->reg != IR_REG_NONE) {
3908 IR_REGSET_INCL(used_regs, ival->reg);
3909 use_pos = ival->use_pos;
3910 while (use_pos) {
3911 reg = ival->reg;
3912 ref = IR_LIVE_POS_TO_REF(use_pos->pos);
3913 if (use_pos->hint_ref < 0) {
3914 ref = -use_pos->hint_ref;
3915 }
3916 ir_set_alocated_reg(ctx, ref, use_pos->op_num, reg);
3917
3918 use_pos = use_pos->next;
3919 }
3920 }
3921 ival = ival->next;
3922 } while (ival);
3923 } else {
3924 do {
3925 if (ival->reg != IR_REG_NONE) {
3926 ir_ref prev_use_ref = IR_UNUSED;
3927
3928 ir_bitset_clear(available, ir_bitset_len(ctx->cfg_blocks_count + 1));
3929 IR_REGSET_INCL(used_regs, ival->reg);
3930 use_pos = ival->use_pos;
3931 while (use_pos) {
3932 reg = ival->reg;
3933 ref = IR_LIVE_POS_TO_REF(use_pos->pos);
3934 // TODO: Insert spill loads and stores in optimal positions (resolution)
3935 if (use_pos->op_num == 0) {
3936 if ((ctx->ir_base[ref].op == IR_COPY
3937 || ctx->ir_base[ref].op == IR_BITCAST
3938 || ctx->ir_base[ref].op == IR_TRUNC)
3939 && !IR_IS_CONST_REF(ctx->ir_base[ref].op1)
3940 && ctx->vregs[ctx->ir_base[ref].op1] == (uint32_t)i) {
3941 /* register reuse */
3942 ir_set_alocated_reg(ctx, ref, use_pos->op_num, reg);
3943 prev_use_ref = ref;
3944 use_pos = use_pos->next;
3945 continue;
3946 }
3947 ir_bitset_clear(available, ir_bitset_len(ctx->cfg_blocks_count + 1));
3948 if (ctx->ir_base[ref].op == IR_PHI) {
3949 /* Spilled PHI var is passed through memory */
3950 reg = IR_REG_NONE;
3951 prev_use_ref = IR_UNUSED;
3952 } else if (ctx->ir_base[ref].op == IR_PARAM
3953 && (ival->flags & IR_LIVE_INTERVAL_MEM_PARAM)) {
3954 /* Stack PARAM var is passed through memory */
3955 reg = IR_REG_NONE;
3956 } else {
3957 uint32_t use_b = ctx->cfg_map[ref];
3958
3959 if (ir_ival_covers(ival, IR_SAVE_LIVE_POS_FROM_REF(ctx->cfg_blocks[use_b].end))) {
3960 ir_bitset_incl(available, use_b);
3961 }
3962 if (top_ival->flags & IR_LIVE_INTERVAL_SPILL_SPECIAL) {
3963 reg |= IR_REG_SPILL_SPECIAL;
3964 } else {
3965 reg |= IR_REG_SPILL_STORE;
3966 }
3967 prev_use_ref = ref;
3968 }
3969 } else if ((!prev_use_ref || ctx->cfg_map[prev_use_ref] != ctx->cfg_map[ref])
3970 && needs_spill_reload(ctx, ival, ctx->cfg_map[ref], available)) {
3971 if (!(use_pos->flags & IR_USE_MUST_BE_IN_REG)
3972 && use_pos->hint != reg
3973// && ctx->ir_base[ref].op != IR_CALL
3974// && ctx->ir_base[ref].op != IR_TAILCALL) {
3975 && ctx->ir_base[ref].op != IR_SNAPSHOT
3976 && !needs_spill_load(ctx, ival, use_pos)) {
3977 /* fuse spill load (valid only when register is not reused) */
3978 reg = IR_REG_NONE;
3979 if (use_pos->next
3980 && use_pos->op_num == 1
3981 && use_pos->next->pos == use_pos->pos
3982 && !(use_pos->next->flags & IR_USE_MUST_BE_IN_REG)) {
3983 /* Support for R2 = BINOP(R1, R1) */
3984 if (use_pos->hint_ref < 0) {
3985 ref = -use_pos->hint_ref;
3986 }
3987 ir_set_alocated_reg(ctx, ref, use_pos->op_num, reg);
3988 use_pos = use_pos->next;
3989 }
3990 } else {
3991 if (top_ival->flags & IR_LIVE_INTERVAL_SPILL_SPECIAL) {
3992 reg |= IR_REG_SPILL_SPECIAL;
3993 } else {
3994 reg |= IR_REG_SPILL_LOAD;
3995 }
3996 if (ctx->ir_base[ref].op != IR_SNAPSHOT && !(use_pos->flags & IR_PHI_USE)) {
3997 uint32_t use_b = ctx->cfg_map[ref];
3998
3999 if (ir_ival_covers(ival, IR_SAVE_LIVE_POS_FROM_REF(ctx->cfg_blocks[use_b].end))) {
4000 ir_bitset_incl(available, use_b);
4001 }
4002 prev_use_ref = ref;
4003 }
4004 }
4005 if (use_pos->hint_ref < 0
4006 && (old_reg = ir_get_alocated_reg(ctx, -use_pos->hint_ref, use_pos->op_num)) != IR_REG_NONE) {
4007 if (top_ival->flags & IR_LIVE_INTERVAL_SPILL_SPECIAL) {
4008 reg |= IR_REG_SPILL_SPECIAL;
4009 } else {
4010 reg |= IR_REG_SPILL_LOAD;
4011 }
4012 if (reg != old_reg) {
4013 IR_ASSERT(ctx->rules[-use_pos->hint_ref] & IR_FUSED);
4014 ctx->rules[-use_pos->hint_ref] |= IR_FUSED_REG;
4015 ir_set_fused_reg(ctx, ref, -use_pos->hint_ref * sizeof(ir_ref) + use_pos->op_num, reg);
4016 use_pos = use_pos->next;
4017 continue;
4018 }
4019 }
4020 } else if (use_pos->flags & IR_PHI_USE) {
4021 IR_ASSERT(use_pos->hint_ref < 0);
4022 IR_ASSERT(ctx->vregs[-use_pos->hint_ref]);
4023 IR_ASSERT(ctx->live_intervals[ctx->vregs[-use_pos->hint_ref]]);
4024 if (ctx->live_intervals[ctx->vregs[-use_pos->hint_ref]]->flags & IR_LIVE_INTERVAL_SPILLED) {
4025 /* Spilled PHI var is passed through memory */
4026 reg = IR_REG_NONE;
4027 }
4028 } else if (use_pos->hint_ref < 0
4029 && (old_reg = ir_get_alocated_reg(ctx, -use_pos->hint_ref, use_pos->op_num)) != IR_REG_NONE) {
4030 if (reg != old_reg) {
4031 IR_ASSERT(ctx->rules[-use_pos->hint_ref] & IR_FUSED);
4032 ctx->rules[-use_pos->hint_ref] |= IR_FUSED_REG;
4033 ir_set_fused_reg(ctx, ref, -use_pos->hint_ref * sizeof(ir_ref) + use_pos->op_num, reg);
4034 use_pos = use_pos->next;
4035 continue;
4036 }
4037 } else {
4038 /* reuse register without spill load */
4039 }
4040 if (use_pos->hint_ref < 0) {
4041 ref = -use_pos->hint_ref;
4042 }
4043 ir_set_alocated_reg(ctx, ref, use_pos->op_num, reg);
4044
4045 use_pos = use_pos->next;
4046 }
4047 } else if (!(top_ival->flags & IR_LIVE_INTERVAL_SPILL_SPECIAL)) {
4048 use_pos = ival->use_pos;
4049 while (use_pos) {
4050 ref = IR_LIVE_POS_TO_REF(use_pos->pos);
4051 if (ctx->ir_base[ref].op == IR_SNAPSHOT) {
4052 IR_ASSERT(use_pos->hint_ref >= 0);
4053 /* A reference to a CPU spill slot */
4055 ir_set_alocated_reg(ctx, ref, use_pos->op_num, reg);
4056 }
4057 use_pos = use_pos->next;
4058 }
4059 }
4060 ival = ival->next;
4061 } while (ival);
4062 }
4063 }
4064 }
4065 ir_mem_free(available);
4066 }
4067
4068 /* Temporary registers */
4069 ival = ctx->live_intervals[0];
4070 if (ival) {
4071 do {
4072 IR_ASSERT(ival->reg != IR_REG_NONE);
4073 IR_REGSET_INCL(used_regs, ival->reg);
4074 reg = ival->reg;
4075 if (ival->tmp_op_num > 0) {
4076 ir_insn *insn = &ctx->ir_base[ival->tmp_ref];
4077
4078 if (ival->tmp_op_num <= insn->inputs_count) {
4079 ir_ref *ops = insn->ops;
4080 if (IR_IS_CONST_REF(ops[ival->tmp_op_num])) {
4081 /* constant rematerialization */
4082 reg |= IR_REG_SPILL_LOAD;
4083 } else if (ctx->ir_base[ops[ival->tmp_op_num]].op == IR_ALLOCA
4084 || ctx->ir_base[ops[ival->tmp_op_num]].op == IR_VADDR) {
4085 /* local address rematerialization */
4086 reg |= IR_REG_SPILL_LOAD;
4087 }
4088 }
4089 }
4090 ir_set_alocated_reg(ctx, ival->tmp_ref, ival->tmp_op_num, reg);
4091 ival = ival->next;
4092 } while (ival);
4093 }
4094
4095 if (ctx->fixed_stack_frame_size != -1) {
4096 ctx->used_preserved_regs = (ir_regset)ctx->fixed_save_regset;
4097 if (IR_REGSET_DIFFERENCE(IR_REGSET_INTERSECTION(used_regs, IR_REGSET_PRESERVED),
4098 ctx->used_preserved_regs)) {
4099 // TODO: Preserved reg and fixed frame conflict ???
4100 // IR_ASSERT(0 && "Preserved reg and fixed frame conflict");
4101 }
4102 } else {
4103 ctx->used_preserved_regs = IR_REGSET_UNION((ir_regset)ctx->fixed_save_regset,
4104 IR_REGSET_DIFFERENCE(IR_REGSET_INTERSECTION(used_regs, IR_REGSET_PRESERVED),
4105 (ctx->flags & IR_FUNCTION) ? (ir_regset)ctx->fixed_regset : IR_REGSET_PRESERVED));
4106 }
4107
4108 ir_fix_stack_frame(ctx);
4109}
4110
4112{
4113 if (ir_linear_scan(ctx)) {
4114 assign_regs(ctx);
4115 return 1;
4116 }
4117 return 0;
4118}
size_t len
Definition apprentice.c:174
fprintf($stream, string $format, mixed ... $values)
prev(array|object &$array)
count(Countable|array $value, int $mode=COUNT_NORMAL)
compact($var_name,... $var_names)
char s[4]
Definition cdf.c:77
uint32_t v
Definition cdf.c:1237
zend_ffi_type * type
Definition ffi.c:3812
zend_long n
Definition ffi.c:4979
new_type size
Definition ffi.c:4365
memcpy(ptr1, ptr2, size)
memset(ptr, 0, type->size)
zval * val
Definition ffi.c:4262
buf start
Definition ffi.c:4687
const php_stream_filter_ops * ops
Definition filters.c:1899
#define NULL
Definition gdcache.h:45
again j
foreach($dp as $el) foreach( $dp as $el) if( $pass2< 2) echo ""
ir_ref ir_binding_find(const ir_ctx *ctx, ir_ref ref)
Definition ir.c:1161
void ir_array_grow(ir_array *a, uint32_t size)
Definition ir.c:1485
const uint32_t ir_op_flags[IR_LAST_OP]
Definition ir.c:294
const uint8_t ir_type_size[IR_LAST_TYPE]
Definition ir.c:61
#define IR_USE_FRAME_POINTER
Definition ir.h:522
struct _ir_live_range ir_live_range
Definition ir.h:555
enum _ir_type ir_type
#define IR_IS_TYPE_INT(t)
Definition ir.h:145
struct _ir_live_interval ir_live_interval
Definition ir.h:554
#define IR_IS_TYPE_UNSIGNED(t)
Definition ir.h:143
#define IR_FUNCTION
Definition ir.h:510
#define IR_REG_SPILL_STORE
Definition ir.h:788
#define IR_UNUSED
Definition ir.h:395
@ IR_VOID
Definition ir.h:151
#define IR_NULL
Definition ir.h:396
void ir_dump_live_ranges(const ir_ctx *ctx, FILE *f)
Definition ir_dump.c:337
#define ir_mem_calloc
Definition ir.h:1009
int32_t ir_ref
Definition ir.h:390
IR_ALWAYS_INLINE ir_ref ir_insn_op(const ir_insn *insn, int32_t n)
Definition ir.h:727
#define IR_IS_CONST_REF(ref)
Definition ir.h:392
#define ir_mem_malloc
Definition ir.h:1006
int8_t ir_regs[4]
Definition ir.h:557
ir_ref ir_strtab_lookup(ir_strtab *strtab, const char *str, uint32_t len, ir_ref val)
Definition ir_strtab.c:134
#define IR_REG_SPILL_LOAD
Definition ir.h:787
#define IR_REG_SPILL_SPECIAL
Definition ir.h:789
struct _ir_strtab ir_strtab
#define IR_REG_NUM(r)
Definition ir.h:792
#define ir_mem_free
Definition ir.h:1015
#define IR_REG_NONE
Definition ir.h:786
struct _ir_ctx ir_ctx
Definition ir.h:550
#define IR_IS_TYPE_FP(t)
Definition ir.h:146
#define IR_ALWAYS_INLINE
Definition ir.h:108
void ir_strtab_init(ir_strtab *strtab, uint32_t count, uint32_t buf_size)
Definition ir_strtab.c:93
struct _ir_use_list ir_use_list
Definition ir.h:551
struct _ir_insn ir_insn
struct _ir_block ir_block
Definition ir.h:552
#define IR_REGSET_FP
Definition ir_aarch64.h:119
#define IR_REGSET_GP
Definition ir_aarch64.h:117
#define IR_REG_SCRATCH
Definition ir_aarch64.h:96
#define IR_REG_ALL
Definition ir_aarch64.h:97
#define IR_REG_STACK_POINTER
Definition ir_aarch64.h:103
#define IR_REGSET_PRESERVED
Definition ir_aarch64.h:157
struct _ir_tmp_reg ir_tmp_reg
#define IR_REG_GP_LAST
Definition ir_aarch64.h:94
#define IR_REGSET_SCRATCH
Definition ir_aarch64.h:152
#define IR_REG_FP_FIRST
Definition ir_aarch64.h:93
#define IR_REG_FRAME_POINTER
Definition ir_aarch64.h:105
#define IR_REG_FP_LAST
Definition ir_aarch64.h:95
#define IR_REG_GP_FIRST
Definition ir_aarch64.h:92
void ir_fix_stack_frame(ir_ctx *ctx)
int ir_get_target_constraints(ir_ctx *ctx, ir_ref ref, ir_target_constraints *constraints)
#define IR_SIMPLE
#define IR_16B_FRAME_ALIGNMENT
struct _ir_list ir_list
IR_ALWAYS_INLINE uint32_t ir_list_len(const ir_list *l)
Definition ir_private.h:728
ir_bitset_base_t * ir_bitset
Definition ir_private.h:317
#define IR_USE_SUB_REF
IR_ALWAYS_INLINE ir_ref ir_list_pop(ir_list *l)
Definition ir_private.h:748
IR_ALWAYS_INLINE void ir_list_push_unchecked(ir_list *l, ir_ref val)
Definition ir_private.h:743
#define IR_PHI_USE
IR_ALWAYS_INLINE void ir_worklist_free(ir_worklist *w)
Definition ir_private.h:797
#define IR_OP2_MUST_BE_IN_REG
int(* emit_copy_t)(ir_ctx *ctx, uint8_t type, ir_ref from, ir_ref to)
IR_ALWAYS_INLINE bool ir_bitset_in(const ir_bitset set, uint32_t n)
Definition ir_private.h:339
#define IR_BB_EMPTY
#define IR_ALIGNED_SIZE(size, alignment)
Definition ir_private.h:59
IR_ALWAYS_INLINE void ir_bitset_copy(ir_bitset set1, const ir_bitset set2, uint32_t len)
Definition ir_private.h:370
#define IR_MAY_SWAP
IR_ALWAYS_INLINE uint32_t ir_phi_input_number(const ir_ctx *ctx, const ir_block *bb, uint32_t from)
IR_ALWAYS_INLINE void ir_bitqueue_add(ir_bitqueue *q, uint32_t n)
Definition ir_private.h:629
#define IR_OPND_DATA
Definition ir_private.h:946
#define IR_LOAD_LIVE_POS_FROM_REF(ref)
#define IR_BB_DESSA_MOVES
#define IR_OP3_MUST_BE_IN_REG
#define IR_LIVE_INTERVAL_FIXED
#define IR_FUSED_REG
IR_ALWAYS_INLINE ir_bitset ir_bitset_malloc(uint32_t n)
Definition ir_private.h:324
IR_ALWAYS_INLINE bool ir_bitset_empty(const ir_bitset set, uint32_t len)
Definition ir_private.h:354
#define IR_DEF_CONFLICTS_WITH_INPUT_REGS
#define IR_USE_MUST_BE_IN_REG
IR_ALWAYS_INLINE bool ir_worklist_push(ir_worklist *w, ir_ref val)
Definition ir_private.h:819
#define IR_BB_HAS_PHI
#define IR_LIVE_INTERVAL_MEM_LOAD
#define IR_LIVE_INTERVAL_HAS_HINT_REFS
struct _ir_hashtab_bucket ir_hashtab_bucket
IR_ALWAYS_INLINE ir_arena * ir_arena_create(size_t size)
Definition ir_private.h:234
#define IR_LIVE_INTERVAL_MEM_PARAM
#define IR_RA_HAVE_SPILLS
struct _ir_worklist ir_worklist
IR_ALWAYS_INLINE void ir_bitset_union(ir_bitset set1, const ir_bitset set2, uint32_t len)
Definition ir_private.h:384
IR_ALWAYS_INLINE void ir_bitset_incl(ir_bitset set, uint32_t n)
Definition ir_private.h:329
#define IR_FUSED
#define IR_LIVE_INTERVAL_HAS_HINT_REGS
#define IR_LR_HAVE_DESSA_MOVES
#define IR_IS_SYM_CONST(op)
Definition ir_private.h:889
#define IR_ASSERT(x)
Definition ir_private.h:17
IR_ALWAYS_INLINE int8_t ir_get_alocated_reg(const ir_ctx *ctx, ir_ref ref, int op_num)
#define IR_OP1_MUST_BE_IN_REG
ir_ref ir_live_pos
IR_ALWAYS_INLINE void ir_worklist_init(ir_worklist *w, uint32_t size)
Definition ir_private.h:791
IR_ALWAYS_INLINE void ir_list_free(ir_list *l)
Definition ir_private.h:717
IR_ALWAYS_INLINE uint32_t ir_bitset_len(uint32_t n)
Definition ir_private.h:319
#define IR_MAX(a, b)
Definition ir_private.h:62
#define IR_RULE_MASK
#define IR_LIVE_INTERVAL_COALESCED
#define IR_RA_HAVE_SPLITS
IR_ALWAYS_INLINE void ir_bitqueue_init(ir_bitqueue *q, uint32_t n)
Definition ir_private.h:581
#define IR_BB_ENTRY
IR_ALWAYS_INLINE ir_ref ir_list_at(const ir_list *l, uint32_t i)
Definition ir_private.h:760
#define IR_DEF_REUSES_OP1_REG
#define IR_LIVE_INTERVAL_SPILL_SPECIAL
#define IR_SAVE_LIVE_POS_FROM_REF(ref)
IR_ALWAYS_INLINE void ir_list_init(ir_list *l, uint32_t size)
Definition ir_private.h:711
#define IR_OPND_KIND(flags, i)
Definition ir_private.h:963
struct _ir_reg_alloc_data ir_reg_alloc_data
#define IR_FUSED_USE
IR_ALWAYS_INLINE int ir_bitqueue_pop(ir_bitqueue *q)
Definition ir_private.h:610
#define IR_OP_FLAG_COMMUTATIVE
Definition ir_private.h:933
#define IR_LINEAR
#define IR_SKIPPED
#define IR_BB_UNREACHABLE
IR_ALWAYS_INLINE int ir_bitset_pop_first(ir_bitset set, uint32_t len)
Definition ir_private.h:445
IR_ALWAYS_INLINE void ir_list_set(ir_list *l, uint32_t i, ir_ref val)
Definition ir_private.h:766
#define IR_BITSET_FOREACH(set, len, bit)
Definition ir_private.h:461
#define IR_INPUT_EDGES_COUNT(flags)
Definition ir_private.h:958
#define IR_BITSET_FOREACH_END()
Definition ir_private.h:480
IR_ALWAYS_INLINE void ir_bitset_clear(ir_bitset set, uint32_t len)
Definition ir_private.h:344
#define IR_OP_FLAG_PINNED
Definition ir_private.h:937
#define IR_OP_FLAG_CONTROL
Definition ir_private.h:931
#define IR_END_LIVE_POS_FROM_REF(ref)
IR_ALWAYS_INLINE void ir_bitqueue_free(ir_bitqueue *q)
Definition ir_private.h:599
#define IR_BB_START
IR_ALWAYS_INLINE void ir_list_push(ir_list *l, ir_ref val)
Definition ir_private.h:738
#define IR_USE_FLAGS(def_flags, op_num)
#define ir_bitset_base_t
Definition ir_private.h:313
IR_ALWAYS_INLINE void ir_bitqueue_clear(ir_bitqueue *q)
Definition ir_private.h:604
IR_ALWAYS_INLINE ir_ref ir_worklist_pop(ir_worklist *w)
Definition ir_private.h:831
#define IR_LIVE_INTERVAL_TEMP
#define IR_USE_LIVE_POS_FROM_REF(ref)
IR_ALWAYS_INLINE uint32_t ir_worklist_len(const ir_worklist *w)
Definition ir_private.h:803
struct _ir_bitqueue ir_bitqueue
struct _ir_use_pos ir_use_pos
IR_ALWAYS_INLINE void ir_set_alocated_reg(ir_ctx *ctx, ir_ref ref, int op_num, int8_t reg)
#define IR_LIVE_POS_TO_REF(pos)
struct _ir_target_constraints ir_target_constraints
#define IR_START_LIVE_POS_FROM_REF(ref)
#define IR_DEF_LIVE_POS_FROM_REF(ref)
#define IR_OP_FLAG_MEM
Definition ir_private.h:932
#define IR_LIVE_INTERVAL_SPLIT_CHILD
#define IR_LIVE_INTERVAL_SPILLED
#define IR_HAS_FP_RET_SLOT
#define IR_SAVE_SUB_REF
#define IR_BB_LOOP_HEADER
#define IR_MAY_REUSE
IR_ALWAYS_INLINE uint32_t ir_insn_len(const ir_insn *insn)
Definition ir_private.h:997
IR_ALWAYS_INLINE void ir_bitset_excl(ir_bitset set, uint32_t n)
Definition ir_private.h:334
#define IR_OP_FLAG_DATA
Definition ir_private.h:930
IR_ALWAYS_INLINE void * ir_arena_alloc(ir_arena **arena_ptr, size_t size)
Definition ir_private.h:255
#define IR_USE_SHOULD_BE_IN_REG
#define IR_OP_HAS_VAR_INPUTS(flags)
Definition ir_private.h:961
IR_ALWAYS_INLINE ir_live_interval * ir_add_prev_live_range(ir_ctx *ctx, int v, ir_live_pos start, ir_live_pos end)
Definition ir_ra.c:214
bool ir_reg_is_int(int32_t reg)
Definition ir_ra.c:35
int ir_coalesce(ir_ctx *ctx)
Definition ir_ra.c:1856
#define IS_LIVE_IN_BLOCK(v, b)
Definition ir_ra.c:934
#define IR_LOG_LSRA_CONFLICT(action, ival, pos)
Definition ir_ra.c:2279
int ir_assign_virtual_registers(ir_ctx *ctx)
Definition ir_ra.c:82
#define IR_LOG_LSRA(action, ival, comment)
Definition ir_ra.c:2276
IR_ALWAYS_INLINE void ir_add_use(ir_ctx *ctx, ir_live_interval *ival, int op_num, ir_live_pos pos, ir_reg hint, uint8_t use_flags, ir_ref hint_ref)
Definition ir_ra.c:352
#define IR_LOG_LSRA_ASSIGN(action, ival, comment)
Definition ir_ra.c:2277
int ir_compute_dessa_moves(ir_ctx *ctx)
Definition ir_ra.c:2061
int32_t ir_allocate_spill_slot(ir_ctx *ctx, ir_type type, ir_reg_alloc_data *data)
Definition ir_ra.c:2613
int ir_gen_dessa_moves(ir_ctx *ctx, uint32_t b, emit_copy_t emit_copy)
Definition ir_ra.c:2107
#define IR_LOG_LSRA_SPLIT(ival, pos)
Definition ir_ra.c:2278
IR_ALWAYS_INLINE void ir_live_out_push(ir_ctx *ctx, uint32_t *live_outs, ir_list *live_lists, uint32_t b, uint32_t v)
Definition ir_ra.c:954
#define SET_LIVE_IN_BLOCK(v, b)
Definition ir_ra.c:936
int ir_compute_live_ranges(ir_ctx *ctx)
Definition ir_ra.c:1239
int ir_regs_number(void)
Definition ir_ra.c:30
int ir_reg_alloc(ir_ctx *ctx)
Definition ir_ra.c:4111
struct _ir_coalesce_block ir_coalesce_block
IR_ALWAYS_INLINE uint32_t ir_live_out_top(ir_ctx *ctx, uint32_t *live_outs, ir_list *live_lists, uint32_t b)
Definition ir_ra.c:941
#define IR_REG_RDI
Definition ir_x86.h:114
#define IR_REG_RSI
Definition ir_x86.h:113
#define IR_REG_RBP
Definition ir_x86.h:112
#define next(ls)
Definition minilua.c:2661
unsigned const char * end
Definition php_ffi.h:51
unsigned const char * pos
Definition php_ffi.h:52
php_output_handler * active
Definition php_output.h:140
unsigned char key[REFLECTION_KEY_LEN]
zend_constant * data
p
Definition session.c:1105
uint32_t size
Definition ir_private.h:651
ir_ref end
uint32_t dom_next_child
ir_ref start
uint32_t idom
uint32_t loop_header
uint32_t flags
uint32_t loop_depth
uint32_t successors
uint32_t dom_child
uint32_t successors_count
uint32_t predecessors
uint32_t predecessors_count
uint32_t loop_depth
Definition ir_ra.c:1697
uint32_t * cfg_edges
Definition ir.h:593
ir_live_interval ** live_intervals
Definition ir.h:609
ir_live_range * unused_ranges
Definition ir.h:611
ir_hashtab * binding
Definition ir.h:586
uint32_t * entries
Definition ir.h:632
uint32_t entries_count
Definition ir.h:631
int32_t fixed_stack_frame_size
Definition ir.h:602
ir_ref * prev_ref
Definition ir.h:614
ir_ref vars
Definition ir.h:619
ir_strtab * fused_regs
Definition ir.h:613
ir_arena * arena
Definition ir.h:610
void * data
Definition ir.h:616
ir_ref vregs_count
Definition ir.h:598
ir_block * cfg_blocks
Definition ir.h:592
ir_type ret_type
Definition ir.h:581
uint32_t * vregs
Definition ir.h:597
void * osr_entry_loads
Definition ir.h:633
ir_use_list * use_lists
Definition ir.h:587
ir_regs * regs
Definition ir.h:612
uint64_t fixed_regset
Definition ir.h:600
ir_insn * ir_base
Definition ir.h:574
uint32_t flags
Definition ir.h:579
uint32_t cfg_blocks_count
Definition ir.h:590
int32_t stack_frame_size
Definition ir.h:623
ir_ref insns_count
Definition ir.h:575
uint32_t flags2
Definition ir.h:580
uint32_t * rules
Definition ir.h:596
ir_ref * use_edges
Definition ir.h:588
uint64_t fixed_save_regset
Definition ir.h:604
uint32_t * cfg_map
Definition ir.h:594
uint64_t used_preserved_regs
Definition ir.h:625
void * data
Definition ir_private.h:852
uint32_t count
Definition ir_private.h:855
ir_array a
Definition ir_private.h:703
uint32_t len
Definition ir_private.h:704
ir_live_range * current_range
ir_live_interval * next
int32_t stack_spill_pos
ir_live_pos end
ir_live_range range
ir_use_pos * use_pos
ir_live_interval * list_next
ir_live_pos end
ir_live_range * next
ir_live_pos start
ir_tmp_reg tmp_regs[3]
Definition ir_aarch64.h:185
int8_t hints[IR_MAX_REG_ARGS+3]
Definition ir_aarch64.h:186
uint8_t num
Definition ir_aarch64.h:173
uint8_t start
Definition ir_aarch64.h:177
uint8_t end
Definition ir_aarch64.h:178
uint8_t type
Definition ir_aarch64.h:176
ir_use_pos * next
ir_ref hint_ref
ir_live_pos pos
uint8_t flags
uint16_t op_num
$obj a
Definition test.php:84
exit(string|int $status=0)
#define EXPECTED(condition)
#define UNEXPECTED(condition)
#define d1
double d2
zval * ret