php-internal-docs 8.4.8
Unofficial docs for php/php-src
Loading...
Searching...
No Matches
ir_emit.c
Go to the documentation of this file.
1/*
2 * IR - Lightweight JIT Compilation Framework
3 * (Native code generator based on DynAsm)
4 * Copyright (C) 2022 Zend by Perforce.
5 * Authors: Dmitry Stogov <dmitry@php.net>
6 */
7
8#include "ir.h"
9
10#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
11# include "ir_x86.h"
12#elif defined(IR_TARGET_AARCH64)
13# include "ir_aarch64.h"
14#else
15# error "Unknown IR target"
16#endif
17
18#include "ir_private.h"
19#ifndef _WIN32
20# include <dlfcn.h>
21#else
22# define WIN32_LEAN_AND_MEAN
23# include <windows.h>
24# include <psapi.h>
25#endif
26
27#if defined(__linux__) || defined(__sun)
28# include <alloca.h>
29#endif
30
31#define DASM_M_GROW(ctx, t, p, sz, need) \
32 do { \
33 size_t _sz = (sz), _need = (need); \
34 if (_sz < _need) { \
35 if (_sz < 16) _sz = 16; \
36 while (_sz < _need) _sz += _sz; \
37 (p) = (t *)ir_mem_realloc((p), _sz); \
38 (sz) = _sz; \
39 } \
40 } while(0)
41
42#define DASM_M_FREE(ctx, p, sz) ir_mem_free(p)
43
44#ifdef IR_DEBUG
45# define DASM_CHECKS
46#endif
47
48typedef struct _ir_copy {
50 ir_reg from;
51 ir_reg to;
53
54typedef struct _ir_dessa_copy {
56 int32_t from; /* negative - constant ref, [0..IR_REG_NUM) - CPU reg, [IR_REG_NUM...) - virtual reg */
57 int32_t to; /* [0..IR_REG_NUM) - CPU reg, [IR_REG_NUM...) - virtual reg */
59
60#if IR_REG_INT_ARGS
61static const int8_t _ir_int_reg_params[IR_REG_INT_ARGS];
62#else
63static const int8_t *_ir_int_reg_params;
64#endif
65#if IR_REG_FP_ARGS
66static const int8_t _ir_fp_reg_params[IR_REG_FP_ARGS];
67#else
68static const int8_t *_ir_fp_reg_params;
69#endif
70
71static const ir_proto_t *ir_call_proto(const ir_ctx *ctx, ir_insn *insn)
72{
73 if (IR_IS_CONST_REF(insn->op2)) {
74 const ir_insn *func = &ctx->ir_base[insn->op2];
75
76 if (func->op == IR_FUNC || func->op == IR_FUNC_ADDR) {
77 if (func->proto) {
78 return (const ir_proto_t *)ir_get_str(ctx, func->proto);
79 }
80 }
81 } else if (ctx->ir_base[insn->op2].op == IR_PROTO) {
82 return (const ir_proto_t *)ir_get_str(ctx, ctx->ir_base[insn->op2].op2);
83 }
84 return NULL;
85}
86
87#ifdef IR_HAVE_FASTCALL
88static const int8_t _ir_int_fc_reg_params[IR_REG_INT_FCARGS];
89static const int8_t *_ir_fp_fc_reg_params;
90
91bool ir_is_fastcall(const ir_ctx *ctx, const ir_insn *insn)
92{
93 if (sizeof(void*) == 4) {
94 if (IR_IS_CONST_REF(insn->op2)) {
95 const ir_insn *func = &ctx->ir_base[insn->op2];
96
97 if (func->op == IR_FUNC || func->op == IR_FUNC_ADDR) {
98 if (func->proto) {
99 const ir_proto_t *proto = (const ir_proto_t *)ir_get_str(ctx, func->proto);
100
101 return (proto->flags & IR_FASTCALL_FUNC) != 0;
102 }
103 }
104 } else if (ctx->ir_base[insn->op2].op == IR_PROTO) {
105 const ir_proto_t *proto = (const ir_proto_t *)ir_get_str(ctx, ctx->ir_base[insn->op2].op2);
106
107 return (proto->flags & IR_FASTCALL_FUNC) != 0;
108 }
109 return 0;
110 }
111 return 0;
112}
113#else
114bool ir_is_fastcall(const ir_ctx *ctx, const ir_insn *insn)
115{
116 return 0;
117}
118#endif
119
120bool ir_is_vararg(const ir_ctx *ctx, ir_insn *insn)
121{
122 const ir_proto_t *proto = ir_call_proto(ctx, insn);
123
124 if (proto) {
125 return (proto->flags & IR_VARARG_FUNC) != 0;
126 }
127 return 0;
128}
129
130IR_ALWAYS_INLINE uint32_t ir_rule(const ir_ctx *ctx, ir_ref ref)
131{
133 return ctx->rules[ref];
134}
135
137{
138 return ref > ctx->bb_start;
139}
140
141
142static ir_reg ir_get_param_reg(const ir_ctx *ctx, ir_ref ref)
143{
144 ir_use_list *use_list = &ctx->use_lists[1];
145 int i;
146 ir_ref use, *p;
147 ir_insn *insn;
148 int int_param = 0;
149 int fp_param = 0;
150 int int_reg_params_count = IR_REG_INT_ARGS;
151 int fp_reg_params_count = IR_REG_FP_ARGS;
152 const int8_t *int_reg_params = _ir_int_reg_params;
153 const int8_t *fp_reg_params = _ir_fp_reg_params;
154
155#ifdef IR_HAVE_FASTCALL
156 if (sizeof(void*) == 4 && (ctx->flags & IR_FASTCALL_FUNC)) {
157 int_reg_params_count = IR_REG_INT_FCARGS;
158 fp_reg_params_count = IR_REG_FP_FCARGS;
159 int_reg_params = _ir_int_fc_reg_params;
160 fp_reg_params = _ir_fp_fc_reg_params;
161 }
162#endif
163
164 for (i = use_list->count, p = &ctx->use_edges[use_list->refs]; i > 0; p++, i--) {
165 use = *p;
166 insn = &ctx->ir_base[use];
167 if (insn->op == IR_PARAM) {
168 if (IR_IS_TYPE_INT(insn->type)) {
169 if (use == ref) {
170 if (int_param < int_reg_params_count) {
171 return int_reg_params[int_param];
172 } else {
173 return IR_REG_NONE;
174 }
175 }
176 int_param++;
177#ifdef _WIN64
178 /* WIN64 calling convention use common couter for int and fp registers */
179 fp_param++;
180#endif
181 } else {
182 IR_ASSERT(IR_IS_TYPE_FP(insn->type));
183 if (use == ref) {
184 if (fp_param < fp_reg_params_count) {
185 return fp_reg_params[fp_param];
186 } else {
187 return IR_REG_NONE;
188 }
189 }
190 fp_param++;
191#ifdef _WIN64
192 /* WIN64 calling convention use common couter for int and fp registers */
193 int_param++;
194#endif
195 }
196 }
197 }
198 return IR_REG_NONE;
199}
200
201static int ir_get_args_regs(const ir_ctx *ctx, const ir_insn *insn, int8_t *regs)
202{
203 int j, n;
205 int int_param = 0;
206 int fp_param = 0;
207 int count = 0;
208 int int_reg_params_count = IR_REG_INT_ARGS;
209 int fp_reg_params_count = IR_REG_FP_ARGS;
210 const int8_t *int_reg_params = _ir_int_reg_params;
211 const int8_t *fp_reg_params = _ir_fp_reg_params;
212
213#ifdef IR_HAVE_FASTCALL
214 if (sizeof(void*) == 4 && ir_is_fastcall(ctx, insn)) {
215 int_reg_params_count = IR_REG_INT_FCARGS;
216 fp_reg_params_count = IR_REG_FP_FCARGS;
217 int_reg_params = _ir_int_fc_reg_params;
218 fp_reg_params = _ir_fp_fc_reg_params;
219 }
220#endif
221
222 n = insn->inputs_count;
223 n = IR_MIN(n, IR_MAX_REG_ARGS + 2);
224 for (j = 3; j <= n; j++) {
225 type = ctx->ir_base[ir_insn_op(insn, j)].type;
226 if (IR_IS_TYPE_INT(type)) {
227 if (int_param < int_reg_params_count) {
228 regs[j] = int_reg_params[int_param];
229 count = j + 1;
230 } else {
231 regs[j] = IR_REG_NONE;
232 }
233 int_param++;
234#ifdef _WIN64
235 /* WIN64 calling convention use common couter for int and fp registers */
236 fp_param++;
237#endif
238 } else {
240 if (fp_param < fp_reg_params_count) {
241 regs[j] = fp_reg_params[fp_param];
242 count = j + 1;
243 } else {
244 regs[j] = IR_REG_NONE;
245 }
246 fp_param++;
247#ifdef _WIN64
248 /* WIN64 calling convention use common couter for int and fp registers */
249 int_param++;
250#endif
251 }
252 }
253 return count;
254}
255
256static bool ir_is_same_mem_var(const ir_ctx *ctx, ir_ref r1, int32_t offset)
257{
258 ir_live_interval *ival1;
259 int32_t o1;
260
261 if (IR_IS_CONST_REF(r1)) {
262 return 0;
263 }
264
265 IR_ASSERT(ctx->vregs[r1]);
266 ival1 = ctx->live_intervals[ctx->vregs[r1]];
267 IR_ASSERT(ival1);
268 o1 = ival1->stack_spill_pos;
269 IR_ASSERT(o1 != -1);
270 return o1 == offset;
271}
272
273void *ir_resolve_sym_name(const char *name)
274{
275 void *addr;
276
277#ifndef _WIN32
278 void *handle = NULL;
279# ifdef RTLD_DEFAULT
281# endif
282 addr = dlsym(handle, name);
283#else
284 HMODULE mods[256];
285 DWORD cbNeeded;
286 uint32_t i = 0;
287
288 addr = NULL;
289
290 EnumProcessModules(GetCurrentProcess(), mods, sizeof(mods), &cbNeeded);
291
292 while(i < (cbNeeded / sizeof(HMODULE))) {
293 addr = GetProcAddress(mods[i], name);
294 if (addr) {
295 return addr;
296 }
297 i++;
298 }
299#endif
300 return addr;
301}
302
303#ifdef IR_SNAPSHOT_HANDLER_DCL
305#endif
306
307#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
308static void* ir_sym_addr(ir_ctx *ctx, const ir_insn *addr_insn)
309{
310 const char *name = ir_get_str(ctx, addr_insn->val.name);
311 void *addr = (ctx->loader && ctx->loader->resolve_sym_name) ?
312 ctx->loader->resolve_sym_name(ctx->loader, name, 0) :
314
315 return addr;
316}
317#endif
318
319static void* ir_sym_val(ir_ctx *ctx, const ir_insn *addr_insn)
320{
321 const char *name = ir_get_str(ctx, addr_insn->val.name);
322 void *addr = (ctx->loader && ctx->loader->resolve_sym_name) ?
323 ctx->loader->resolve_sym_name(ctx->loader, name, addr_insn->op == IR_FUNC) :
325
327 return addr;
328}
329
330static void *ir_call_addr(ir_ctx *ctx, ir_insn *insn, ir_insn *addr_insn)
331{
332 void *addr;
333
334 IR_ASSERT(addr_insn->type == IR_ADDR);
335 if (addr_insn->op == IR_FUNC) {
336 addr = ir_sym_val(ctx, addr_insn);
337 } else {
338 IR_ASSERT(addr_insn->op == IR_ADDR || addr_insn->op == IR_FUNC_ADDR);
339 addr = (void*)addr_insn->val.addr;
340 }
341 return addr;
342}
343
344static void *ir_jmp_addr(ir_ctx *ctx, ir_insn *insn, ir_insn *addr_insn)
345{
346 void *addr = ir_call_addr(ctx, insn, addr_insn);
347
348#ifdef IR_SNAPSHOT_HANDLER
349 if (ctx->ir_base[insn->op1].op == IR_SNAPSHOT) {
350 addr = IR_SNAPSHOT_HANDLER(ctx, insn->op1, &ctx->ir_base[insn->op1], addr);
351 }
352#endif
353 return addr;
354}
355
356static int8_t ir_get_fused_reg(ir_ctx *ctx, ir_ref root, ir_ref ref_and_op)
357{
358 if (ctx->fused_regs) {
359 char key[10];
360 ir_ref val;
361
362 memcpy(key, &root, sizeof(ir_ref));
363 memcpy(key + 4, &ref_and_op, sizeof(ir_ref));
364
365 val = ir_strtab_find(ctx->fused_regs, key, 8);
366 if (val) {
367 return val;
368 }
369 }
370 return ((int8_t*)ctx->regs)[ref_and_op];
371}
372
373#if defined(__GNUC__)
374# pragma GCC diagnostic push
375# pragma GCC diagnostic ignored "-Warray-bounds"
376# pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
377#endif
378
379#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
380# include "dynasm/dasm_proto.h"
381# include "dynasm/dasm_x86.h"
382#elif defined(IR_TARGET_AARCH64)
383# include "dynasm/dasm_proto.h"
384static int ir_add_veneer(dasm_State *Dst, void *buffer, uint32_t ins, int *b, uint32_t *cp, ptrdiff_t offset);
385# define DASM_ADD_VENEER ir_add_veneer
386# include "dynasm/dasm_arm64.h"
387#else
388# error "Unknown IR target"
389#endif
390
391#if defined(__GNUC__)
392# pragma GCC diagnostic pop
393#endif
394
395/* Forward Declarations */
396static void ir_emit_osr_entry_loads(ir_ctx *ctx, int b, ir_block *bb);
397static int ir_parallel_copy(ir_ctx *ctx, ir_copy *copies, int count, ir_reg tmp_reg, ir_reg tmp_fp_reg);
398static void ir_emit_dessa_moves(ir_ctx *ctx, int b, ir_block *bb);
399
406
407static int ir_const_label(ir_ctx *ctx, ir_ref ref)
408{
410 int label = ctx->cfg_blocks_count - ref;
411
413 ir_bitset_incl(data->emit_constants, -ref);
414 return label;
415}
416
417#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
418# include "ir_emit_x86.h"
419#elif defined(IR_TARGET_AARCH64)
420# include "ir_emit_aarch64.h"
421#else
422# error "Unknown IR target"
423#endif
424
425static IR_NEVER_INLINE void ir_emit_osr_entry_loads(ir_ctx *ctx, int b, ir_block *bb)
426{
427 ir_list *list = (ir_list*)ctx->osr_entry_loads;
428 int pos = 0, count, i;
429 ir_ref ref;
430
431 IR_ASSERT(ctx->binding);
432 IR_ASSERT(list);
433 while (1) {
434 i = ir_list_at(list, pos);
435 if (b == i) {
436 break;
437 }
438 IR_ASSERT(i != 0); /* end marker */
439 pos++;
440 count = ir_list_at(list, pos);
441 pos += count + 1;
442 }
443 pos++;
444 count = ir_list_at(list, pos);
445 pos++;
446
447 for (i = 0; i < count; i++, pos++) {
448 ref = ir_list_at(list, pos);
449 IR_ASSERT(ref >= 0 && ctx->vregs[ref] && ctx->live_intervals[ctx->vregs[ref]]);
450 if (!(ctx->live_intervals[ctx->vregs[ref]]->flags & IR_LIVE_INTERVAL_SPILLED)) {
451 /* not spilled */
452 ir_reg reg = ctx->live_intervals[ctx->vregs[ref]]->reg;
453 ir_type type = ctx->ir_base[ref].type;
454 int32_t offset = -ir_binding_find(ctx, ref);
455
456 IR_ASSERT(offset > 0);
457 ir_emit_load_mem(ctx, type, reg, IR_MEM_BO(ctx->spill_base, offset));
458 } else {
460 }
461 }
462}
463
464/*
465 * Parallel copy sequentialization algorithm
466 *
467 * The implementation is based on algorithm 1 desriebed in
468 * "Revisiting Out-of-SSA Translation for Correctness, Code Quality and Efficiency",
469 * Benoit Boissinot, Alain Darte, Fabrice Rastello, Benoit Dupont de Dinechin, Christophe Guillon.
470 * 2009 International Symposium on Code Generation and Optimization, Seattle, WA, USA, 2009,
471 * pp. 114-125, doi: 10.1109/CGO.2009.19.
472 */
473static int ir_parallel_copy(ir_ctx *ctx, ir_copy *copies, int count, ir_reg tmp_reg, ir_reg tmp_fp_reg)
474{
475 int i;
476 int8_t *pred, *loc, *types;
477 ir_reg to, from;
479 ir_regset todo, ready, srcs;
480
481 if (count == 1) {
482 to = copies[0].to;
483 from = copies[0].from;
484 IR_ASSERT(from != to);
485 type = copies[0].type;
486 if (IR_IS_TYPE_INT(type)) {
487 ir_emit_mov(ctx, type, to, from);
488 } else {
489 ir_emit_fp_mov(ctx, type, to, from);
490 }
491 return 1;
492 }
493
494 loc = alloca(IR_REG_NUM * 3 * sizeof(int8_t));
495 pred = loc + IR_REG_NUM;
496 types = pred + IR_REG_NUM;
497 todo = IR_REGSET_EMPTY;
498 srcs = IR_REGSET_EMPTY;
499
500 for (i = 0; i < count; i++) {
501 from = copies[i].from;
502 to = copies[i].to;
503 IR_ASSERT(from != to);
504 IR_REGSET_INCL(srcs, from);
505 loc[from] = from;
506 pred[to] = from;
507 types[from] = copies[i].type;
508 IR_ASSERT(!IR_REGSET_IN(todo, to));
509 IR_REGSET_INCL(todo, to);
510 }
511
512 ready = IR_REGSET_DIFFERENCE(todo, srcs);
513
514 if (ready == todo) {
515 for (i = 0; i < count; i++) {
516 from = copies[i].from;
517 to = copies[i].to;
518 IR_ASSERT(from != to);
519 type = copies[i].type;
520 if (IR_IS_TYPE_INT(type)) {
521 ir_emit_mov(ctx, type, to, from);
522 } else {
523 ir_emit_fp_mov(ctx, type, to, from);
524 }
525 }
526 return 1;
527 }
528
529 /* temporary registers can't be the same as some of the destinations */
530 IR_ASSERT(tmp_reg == IR_REG_NONE || !IR_REGSET_IN(todo, tmp_reg));
531 IR_ASSERT(tmp_fp_reg == IR_REG_NONE || !IR_REGSET_IN(todo, tmp_fp_reg));
532
533 /* first we resolve all "windmill blades" - trees (this doesn't requre temporary registers) */
534 while (ready != IR_REGSET_EMPTY) {
535 ir_reg r;
536
537 to = ir_regset_pop_first(&ready);
538 from = pred[to];
539 r = loc[from];
540 type = types[from];
541 if (IR_IS_TYPE_INT(type)) {
542 ir_emit_mov_ext(ctx, type, to, r);
543 } else {
544 ir_emit_fp_mov(ctx, type, to, r);
545 }
546 IR_REGSET_EXCL(todo, to);
547 loc[from] = to;
548 if (from == r && IR_REGSET_IN(todo, from)) {
549 IR_REGSET_INCL(ready, from);
550 }
551 }
552 if (todo == IR_REGSET_EMPTY) {
553 return 1;
554 }
555
556 /* at this point the sources that are the same as temoraries are already moved */
557 IR_ASSERT(tmp_reg == IR_REG_NONE || !IR_REGSET_IN(srcs, tmp_reg) || pred[loc[tmp_reg]] == tmp_reg);
558 IR_ASSERT(tmp_fp_reg == IR_REG_NONE || !IR_REGSET_IN(srcs, tmp_fp_reg) || pred[loc[tmp_fp_reg]] == tmp_fp_reg);
559
560 /* now we resolve all "windmill axles" - cycles (this reuires temporary registers) */
561 while (todo != IR_REGSET_EMPTY) {
562 to = ir_regset_pop_first(&todo);
563 from = pred[to];
564 IR_ASSERT(to != loc[from]);
565 type = types[from];
566 if (IR_IS_TYPE_INT(type)) {
567#ifdef IR_HAVE_SWAP_INT
568 if (pred[from] == to) {
569 if (ir_type_size[types[to]] > ir_type_size[type]) {
570 type = types[to];
571 }
572 ir_emit_swap(ctx, type, to, from);
573 IR_REGSET_EXCL(todo, from);
574 loc[to] = from;
575 loc[from] = to;
576 continue;
577 }
578#endif
579 IR_ASSERT(tmp_reg != IR_REG_NONE);
580 IR_ASSERT(tmp_reg >= IR_REG_GP_FIRST && tmp_reg <= IR_REG_GP_LAST);
581 ir_emit_mov(ctx, type, tmp_reg, to);
582 loc[to] = tmp_reg;
583 } else {
584#ifdef IR_HAVE_SWAP_FP
585 if (pred[from] == to && types[to] == type) {
586 ir_emit_swap_fp(ctx, type, to, from);
587 IR_REGSET_EXCL(todo, from);
588 loc[to] = from;
589 loc[from] = to;
590 continue;
591 }
592#endif
593 IR_ASSERT(tmp_fp_reg != IR_REG_NONE);
594 IR_ASSERT(tmp_fp_reg >= IR_REG_FP_FIRST && tmp_fp_reg <= IR_REG_FP_LAST);
595 ir_emit_fp_mov(ctx, type, tmp_fp_reg, to);
596 loc[to] = tmp_fp_reg;
597 }
598 while (1) {
599 ir_reg r;
600
601 from = pred[to];
602 r = loc[from];
603 type = types[from];
604 if (IR_IS_TYPE_INT(type)) {
605 ir_emit_mov_ext(ctx, type, to, r);
606 } else {
607 ir_emit_fp_mov(ctx, type, to, r);
608 }
609 IR_REGSET_EXCL(todo, to);
610 loc[from] = to;
611 if (from == r && IR_REGSET_IN(todo, from)) {
612 to = from;
613 } else {
614 break;
615 }
616 }
617 }
618
619 return 1;
620}
621
622static void ir_emit_dessa_move(ir_ctx *ctx, ir_type type, ir_ref to, ir_ref from, ir_reg tmp_reg, ir_reg tmp_fp_reg)
623{
624 ir_mem mem_from, mem_to;
625
626 IR_ASSERT(from != to);
627 if (to < IR_REG_NUM) {
628 if (IR_IS_CONST_REF(from)) {
629 if (-from < ctx->consts_count) {
630 /* constant reference */
631 ir_emit_load(ctx, type, to, from);
632 } else {
633 /* local variable address */
634 ir_load_local_addr(ctx, to, -from - ctx->consts_count);
635 }
636 } else if (from < IR_REG_NUM) {
637 if (IR_IS_TYPE_INT(type)) {
638 ir_emit_mov(ctx, type, to, from);
639 } else {
640 ir_emit_fp_mov(ctx, type, to, from);
641 }
642 } else {
643 mem_from = ir_vreg_spill_slot(ctx, from - IR_REG_NUM);
644 ir_emit_load_mem(ctx, type, to, mem_from);
645 }
646 } else {
647 mem_to = ir_vreg_spill_slot(ctx, to - IR_REG_NUM);
648 if (IR_IS_CONST_REF(from)) {
649 if (-from < ctx->consts_count) {
650 /* constant reference */
651#if defined(IR_TARGET_X86) || defined(IR_TARGET_X64)
653 && !IR_IS_SYM_CONST(ctx->ir_base[from].op)
654 && (ir_type_size[type] != 8 || IR_IS_SIGNED_32BIT(ctx->ir_base[from].val.i64))) {
655 ir_emit_store_mem_imm(ctx, type, mem_to, ctx->ir_base[from].val.i32);
656 return;
657 }
658#endif
659 ir_reg tmp = IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg;
660 IR_ASSERT(tmp != IR_REG_NONE);
661 ir_emit_load(ctx, type, tmp, from);
662 ir_emit_store_mem(ctx, type, mem_to, tmp);
663 } else {
664 /* local variable address */
666 IR_ASSERT(tmp_reg != IR_REG_NONE);
667 ir_load_local_addr(ctx, tmp_reg, -from - ctx->consts_count);
668 ir_emit_store_mem(ctx, type, mem_to, tmp_reg);
669 }
670 } else if (from < IR_REG_NUM) {
671 ir_emit_store_mem(ctx, type, mem_to, from);
672 } else {
673 mem_from = ir_vreg_spill_slot(ctx, from - IR_REG_NUM);
674 IR_ASSERT(IR_MEM_VAL(mem_to) != IR_MEM_VAL(mem_from));
675 ir_reg tmp = IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg;
676 IR_ASSERT(tmp != IR_REG_NONE);
677 ir_emit_load_mem(ctx, type, tmp, mem_from);
678 ir_emit_store_mem(ctx, type, mem_to, tmp);
679 }
680 }
681}
682
683IR_ALWAYS_INLINE void ir_dessa_resolve_cycle(ir_ctx *ctx, int32_t *pred, int32_t *loc, int8_t *types, ir_bitset todo, int32_t to, ir_reg tmp_reg, ir_reg tmp_fp_reg)
684{
685 ir_ref from;
686 ir_mem tmp_spill_slot;
688
689 IR_MEM_VAL(tmp_spill_slot) = 0;
691 from = pred[to];
692 type = types[from];
694 IR_ASSERT(from != to);
695 IR_ASSERT(loc[from] == from);
696
697 if (IR_IS_TYPE_INT(type)) {
698#ifdef IR_HAVE_SWAP_INT
699 if (pred[from] == to && to < IR_REG_NUM && from < IR_REG_NUM) {
700 /* a simple cycle from 2 elements */
701 if (ir_type_size[types[to]] > ir_type_size[type]) {
702 type = types[to];
703 }
704 ir_emit_swap(ctx, type, to, from);
705 ir_bitset_excl(todo, from);
706 ir_bitset_excl(todo, to);
707 loc[to] = from;
708 loc[from] = to;
709 return;
710 }
711#endif
712 IR_ASSERT(tmp_reg != IR_REG_NONE);
713 IR_ASSERT(tmp_reg >= IR_REG_GP_FIRST && tmp_reg <= IR_REG_GP_LAST);
714 loc[to] = tmp_reg;
715 if (to < IR_REG_NUM) {
716 ir_emit_mov(ctx, type, tmp_reg, to);
717 } else {
718 ir_emit_load_mem_int(ctx, type, tmp_reg, ir_vreg_spill_slot(ctx, to - IR_REG_NUM));
719 }
720 } else {
721#ifdef IR_HAVE_SWAP_FP
722 if (pred[from] == to && to < IR_REG_NUM && from < IR_REG_NUM && types[to] == type) {
723 /* a simple cycle from 2 elements */
724 ir_emit_swap_fp(ctx, type, to, from);
725 IR_REGSET_EXCL(todo, from);
726 IR_REGSET_EXCL(todo, to);
727 loc[to] = from;
728 loc[from] = to;
729 return;
730 }
731#endif
732 IR_ASSERT(tmp_fp_reg != IR_REG_NONE);
733 IR_ASSERT(tmp_fp_reg >= IR_REG_FP_FIRST && tmp_fp_reg <= IR_REG_FP_LAST);
734 loc[to] = tmp_fp_reg;
735 if (to < IR_REG_NUM) {
736 ir_emit_fp_mov(ctx, type, tmp_fp_reg, to);
737 } else {
738 ir_emit_load_mem_fp(ctx, type, tmp_fp_reg, ir_vreg_spill_slot(ctx, to - IR_REG_NUM));
739 }
740 }
741
742 while (1) {
743 int32_t r;
744
745 from = pred[to];
746 r = loc[from];
747 type = types[to];
748
749 if (from == r && ir_bitset_in(todo, from)) {
750 /* Memory to memory move inside an isolated or "blocked" cycle requres an additional temporary register */
751 if (to >= IR_REG_NUM && r >= IR_REG_NUM) {
752 ir_reg tmp = IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg;
753
754 if (!IR_MEM_VAL(tmp_spill_slot)) {
755 /* Free a register, saving it in a temporary spill slot */
756 tmp_spill_slot = IR_MEM_BO(IR_REG_STACK_POINTER, -16);
757 ir_emit_store_mem(ctx, type, tmp_spill_slot, tmp);
758 }
759 ir_emit_dessa_move(ctx, type, to, r, tmp_reg, tmp_fp_reg);
760 } else {
761 ir_emit_dessa_move(ctx, type, to, r, IR_REG_NONE, IR_REG_NONE);
762 }
763 ir_bitset_excl(todo, to);
764 loc[from] = to;
765 to = from;
766 } else {
767 break;
768 }
769 }
770
771 type = types[to];
772 if (IR_MEM_VAL(tmp_spill_slot)) {
773 ir_emit_load_mem(ctx, type, IR_IS_TYPE_INT(type) ? tmp_reg : tmp_fp_reg, tmp_spill_slot);
774 }
775 ir_emit_dessa_move(ctx, type, to, loc[from], IR_REG_NONE, IR_REG_NONE);
776 ir_bitset_excl(todo, to);
777 loc[from] = to;
778}
779
780static int ir_dessa_parallel_copy(ir_ctx *ctx, ir_dessa_copy *copies, int count, ir_reg tmp_reg, ir_reg tmp_fp_reg)
781{
782 int i;
783 int32_t *pred, *loc, to, from;
784 int8_t *types;
786 uint32_t len;
787 ir_bitset todo, ready, srcs, visited;
788
789 if (count == 1) {
790 to = copies[0].to;
791 from = copies[0].from;
792 IR_ASSERT(from != to);
793 type = copies[0].type;
794 ir_emit_dessa_move(ctx, type, to, from, tmp_reg, tmp_fp_reg);
795 return 1;
796 }
797
798 len = IR_REG_NUM + ctx->vregs_count + 1;
799 todo = ir_bitset_malloc(len);
800 srcs = ir_bitset_malloc(len);
801 loc = ir_mem_malloc(len * 2 * sizeof(int32_t) + len * sizeof(int8_t));
802 pred = loc + len;
803 types = (int8_t*)(pred + len);
804
805 for (i = 0; i < count; i++) {
806 from = copies[i].from;
807 to = copies[i].to;
808 IR_ASSERT(from != to);
809 if (!IR_IS_CONST_REF(from)) {
810 ir_bitset_incl(srcs, from);
811 loc[from] = from;
812 }
813 pred[to] = from;
814 types[to] = copies[i].type;
815 IR_ASSERT(!ir_bitset_in(todo, to));
816 ir_bitset_incl(todo, to);
817 }
818
819 /* temporary registers can't be the same as some of the sources */
820 IR_ASSERT(tmp_reg == IR_REG_NONE || !ir_bitset_in(srcs, tmp_reg));
821 IR_ASSERT(tmp_fp_reg == IR_REG_NONE || !ir_bitset_in(srcs, tmp_fp_reg));
822
823 /* first we resolve all "windmill blades" - trees, that don't set temporary registers */
824 ready = ir_bitset_malloc(len);
825 ir_bitset_copy(ready, todo, ir_bitset_len(len));
827 if (tmp_reg != IR_REG_NONE) {
828 ir_bitset_excl(ready, tmp_reg);
829 }
830 if (tmp_fp_reg != IR_REG_NONE) {
831 ir_bitset_excl(ready, tmp_fp_reg);
832 }
833 while ((to = ir_bitset_pop_first(ready, ir_bitset_len(len))) >= 0) {
834 ir_bitset_excl(todo, to);
835 type = types[to];
836 from = pred[to];
837 if (IR_IS_CONST_REF(from)) {
838 ir_emit_dessa_move(ctx, type, to, from, tmp_reg, tmp_fp_reg);
839 } else {
840 int32_t r = loc[from];
841 ir_emit_dessa_move(ctx, type, to, r, tmp_reg, tmp_fp_reg);
842 loc[from] = to;
843 if (from == r && ir_bitset_in(todo, from) && from != tmp_reg && from != tmp_fp_reg) {
844 ir_bitset_incl(ready, from);
845 }
846 }
847 }
848
849 /* then we resolve all "windmill axles" - cycles (this requres temporary registers) */
850 visited = ir_bitset_malloc(len);
851 ir_bitset_copy(ready, todo, ir_bitset_len(len));
853 while ((to = ir_bitset_first(ready, ir_bitset_len(len))) >= 0) {
855 ir_bitset_incl(visited, to);
856 to = pred[to];
857 while (!IR_IS_CONST_REF(to) && ir_bitset_in(ready, to)) {
858 to = pred[to];
859 if (IR_IS_CONST_REF(to)) {
860 break;
861 } else if (ir_bitset_in(visited, to)) {
862 /* We found a cycle. Resolve it. */
863 ir_bitset_incl(visited, to);
864 ir_dessa_resolve_cycle(ctx, pred, loc, types, todo, to, tmp_reg, tmp_fp_reg);
865 break;
866 }
867 ir_bitset_incl(visited, to);
868 }
869 ir_bitset_difference(ready, visited, ir_bitset_len(len));
870 }
871
872 /* finally we resolve remaining "windmill blades" - trees that set temporary registers */
873 ir_bitset_copy(ready, todo, ir_bitset_len(len));
875 while ((to = ir_bitset_pop_first(ready, ir_bitset_len(len))) >= 0) {
876 ir_bitset_excl(todo, to);
877 type = types[to];
878 from = pred[to];
879 if (IR_IS_CONST_REF(from)) {
880 ir_emit_dessa_move(ctx, type, to, from, tmp_reg, tmp_fp_reg);
881 } else {
882 int32_t r = loc[from];
883 ir_emit_dessa_move(ctx, type, to, r, tmp_reg, tmp_fp_reg);
884 loc[from] = to;
885 if (from == r && ir_bitset_in(todo, from)) {
886 ir_bitset_incl(ready, from);
887 }
888 }
889 }
890
892
893 ir_mem_free(visited);
894 ir_mem_free(ready);
895 ir_mem_free(loc);
896 ir_mem_free(srcs);
897 ir_mem_free(todo);
898 return 1;
899}
900
901static void ir_emit_dessa_moves(ir_ctx *ctx, int b, ir_block *bb)
902{
903 uint32_t succ, k, n = 0;
904 ir_block *succ_bb;
905 ir_use_list *use_list;
906 ir_ref i, *p;
907 ir_dessa_copy *copies;
908 ir_reg tmp_reg = ctx->regs[bb->end][0];
909 ir_reg tmp_fp_reg = ctx->regs[bb->end][1];
910
911 IR_ASSERT(bb->successors_count == 1);
912 succ = ctx->cfg_edges[bb->successors];
913 succ_bb = &ctx->cfg_blocks[succ];
914 IR_ASSERT(succ_bb->predecessors_count > 1);
915 use_list = &ctx->use_lists[succ_bb->start];
916 k = ir_phi_input_number(ctx, succ_bb, b);
917
918 copies = alloca(use_list->count * sizeof(ir_dessa_copy));
919
920 for (i = use_list->count, p = &ctx->use_edges[use_list->refs]; i > 0; p++, i--) {
921 ir_ref ref = *p;
922 ir_insn *insn = &ctx->ir_base[ref];
923
924 if (insn->op == IR_PHI) {
925 ir_ref input = ir_insn_op(insn, k);
926 ir_reg src = ir_get_alocated_reg(ctx, ref, k);
927 ir_reg dst = ctx->regs[ref][0];
928 ir_ref from, to;
929
930 IR_ASSERT(dst == IR_REG_NONE || !IR_REG_SPILLED(dst));
931 if (IR_IS_CONST_REF(input)) {
932 from = input;
933 } else if (ir_rule(ctx, input) == IR_STATIC_ALLOCA) {
934 /* encode local variable address */
935 from = -(ctx->consts_count + input);
936 } else {
937 from = (src != IR_REG_NONE && !IR_REG_SPILLED(src)) ?
938 (ir_ref)src : (ir_ref)(IR_REG_NUM + ctx->vregs[input]);
939 }
940 to = (dst != IR_REG_NONE) ?
941 (ir_ref)dst : (ir_ref)(IR_REG_NUM + ctx->vregs[ref]);
942 if (to != from) {
943 if (to >= IR_REG_NUM
944 && from >= IR_REG_NUM
945 && IR_MEM_VAL(ir_vreg_spill_slot(ctx, from - IR_REG_NUM)) ==
946 IR_MEM_VAL(ir_vreg_spill_slot(ctx, to - IR_REG_NUM))) {
947 /* It's possible that different virtual registers share the same special spill slot */
948 // TODO: See ext/opcache/tests/jit/gh11917.phpt failure on Linux 32-bit
949 continue;
950 }
951 copies[n].type = insn->type;
952 copies[n].from = from;
953 copies[n].to = to;
954 n++;
955 }
956 }
957 }
958
959 if (n > 0) {
960 ir_dessa_parallel_copy(ctx, copies, n, tmp_reg, tmp_fp_reg);
961 }
962}
963
965{
966 uint32_t b;
967 ir_ref start, ref, *prev_ref;
968 ir_block *bb;
969 ir_insn *insn;
970 uint32_t entries_count = 0;
971
972 ctx->rules = ir_mem_calloc(ctx->insns_count, sizeof(uint32_t));
973
974 prev_ref = ctx->prev_ref;
975 if (!prev_ref) {
977 prev_ref = ctx->prev_ref;
978 }
979
980 if (ctx->entries_count) {
981 ctx->entries = ir_mem_malloc(ctx->entries_count * sizeof(ir_ref));
982 }
983
984 for (b = ctx->cfg_blocks_count, bb = ctx->cfg_blocks + b; b > 0; b--, bb--) {
986 start = bb->start;
987 if (UNEXPECTED(bb->flags & IR_BB_ENTRY)) {
988 IR_ASSERT(entries_count < ctx->entries_count);
989 insn = &ctx->ir_base[start];
990 IR_ASSERT(insn->op == IR_ENTRY);
991 insn->op3 = entries_count;
992 ctx->entries[entries_count] = b;
993 entries_count++;
994 }
995 ctx->rules[start] = IR_SKIPPED | IR_NOP;
996 ref = bb->end;
997 if (bb->successors_count == 1) {
998 insn = &ctx->ir_base[ref];
999 if (insn->op == IR_END || insn->op == IR_LOOP_END) {
1000 ctx->rules[ref] = insn->op;
1001 ref = prev_ref[ref];
1002 if (ref == start && ctx->cfg_edges[bb->successors] != b) {
1003 if (EXPECTED(!(bb->flags & IR_BB_ENTRY))) {
1004 bb->flags |= IR_BB_EMPTY;
1005 } else if (ctx->flags & IR_MERGE_EMPTY_ENTRIES) {
1006 bb->flags |= IR_BB_EMPTY;
1007 if (ctx->cfg_edges[bb->successors] == b + 1) {
1008 (bb + 1)->flags |= IR_BB_PREV_EMPTY_ENTRY;
1009 }
1010 }
1011 continue;
1012 }
1013 }
1014 }
1015
1016 ctx->bb_start = start; /* bb_start is used by matcher to avoid fusion of insns from different blocks */
1017
1018 while (ref != start) {
1019 uint32_t rule = ctx->rules[ref];
1020
1021 if (!rule) {
1022 ctx->rules[ref] = rule = ir_match_insn(ctx, ref);
1023 }
1024 ir_match_insn2(ctx, ref, rule);
1025 ref = prev_ref[ref];
1026 }
1027 }
1028
1029 if (ctx->entries_count) {
1030 ctx->entries_count = entries_count;
1031 if (!entries_count) {
1032 ir_mem_free(ctx->entries);
1033 ctx->entries = NULL;
1034 }
1035 }
1036
1037 return 1;
1038}
1039
1041{
1042 int32_t offset;
1043
1044 IR_ASSERT(ref >= 0 && ctx->vregs[ref] && ctx->live_intervals[ctx->vregs[ref]]);
1045 offset = ctx->live_intervals[ctx->vregs[ref]]->stack_spill_pos;
1046 IR_ASSERT(offset != -1);
1047 return IR_SPILL_POS_TO_OFFSET(offset);
1048}
size_t len
Definition apprentice.c:174
count(Countable|array $value, int $mode=COUNT_NORMAL)
zend_long ptrdiff_t
#define DWORD
Definition exif.c:1762
zend_ffi_type * type
Definition ffi.c:3812
DL_HANDLE handle
Definition ffi.c:3028
zend_long n
Definition ffi.c:4979
memcpy(ptr1, ptr2, size)
zval * val
Definition ffi.c:4262
buf start
Definition ffi.c:4687
zend_long offset
#define NULL
Definition gdcache.h:45
again j
ir_ref ir_binding_find(const ir_ctx *ctx, ir_ref ref)
Definition ir.c:1161
const char * ir_get_str(const ir_ctx *ctx, ir_ref idx)
Definition ir.c:709
const uint8_t ir_type_size[IR_LAST_TYPE]
Definition ir.c:61
#define IR_REG_SPILLED(r)
Definition ir.h:790
ir_ref ir_strtab_find(const ir_strtab *strtab, const char *str, uint32_t len)
Definition ir_strtab.c:115
enum _ir_type ir_type
#define IR_IS_TYPE_INT(t)
Definition ir.h:145
struct _ir_live_interval ir_live_interval
Definition ir.h:554
#define IR_NEVER_INLINE
Definition ir.h:111
#define IR_VARARG_FUNC
Definition ir.h:512
#define ir_mem_calloc
Definition ir.h:1009
int32_t ir_ref
Definition ir.h:390
IR_ALWAYS_INLINE ir_ref ir_insn_op(const ir_insn *insn, int32_t n)
Definition ir.h:727
struct _ir_proto_t ir_proto_t
#define IR_IS_CONST_REF(ref)
Definition ir.h:392
#define ir_mem_malloc
Definition ir.h:1006
#define IR_FASTCALL_FUNC
Definition ir.h:511
#define IR_REG_NUM(r)
Definition ir.h:792
#define ir_mem_free
Definition ir.h:1015
#define IR_REG_NONE
Definition ir.h:786
struct _ir_ctx ir_ctx
Definition ir.h:550
#define IR_IS_TYPE_FP(t)
Definition ir.h:146
#define IR_ALWAYS_INLINE
Definition ir.h:108
struct _ir_use_list ir_use_list
Definition ir.h:551
struct _ir_insn ir_insn
#define IR_MERGE_EMPTY_ENTRIES
Definition ir.h:528
struct _ir_block ir_block
Definition ir.h:552
#define IR_MAX_REG_ARGS
Definition ir_aarch64.h:149
#define IR_REG_INT_ARGS
Definition ir_aarch64.h:131
#define IR_REG_STACK_POINTER
Definition ir_aarch64.h:103
#define IR_REG_FP_ARGS
Definition ir_aarch64.h:132
#define IR_REG_GP_LAST
Definition ir_aarch64.h:94
#define IR_REG_FP_FIRST
Definition ir_aarch64.h:93
#define IR_REG_FP_LAST
Definition ir_aarch64.h:95
#define IR_REG_GP_FIRST
Definition ir_aarch64.h:92
IR_ALWAYS_INLINE uint32_t ir_rule(const ir_ctx *ctx, ir_ref ref)
Definition ir_emit.c:130
int ir_match(ir_ctx *ctx)
Definition ir_emit.c:964
struct _ir_copy ir_copy
struct _ir_common_backend_data ir_common_backend_data
bool ir_is_fastcall(const ir_ctx *ctx, const ir_insn *insn)
Definition ir_emit.c:114
bool ir_is_vararg(const ir_ctx *ctx, ir_insn *insn)
Definition ir_emit.c:120
int32_t ir_get_spill_slot_offset(ir_ctx *ctx, ir_ref ref)
Definition ir_emit.c:1040
IR_ALWAYS_INLINE bool ir_in_same_block(ir_ctx *ctx, ir_ref ref)
Definition ir_emit.c:136
void * ir_resolve_sym_name(const char *name)
Definition ir_emit.c:273
struct _ir_dessa_copy ir_dessa_copy
IR_ALWAYS_INLINE void ir_dessa_resolve_cycle(ir_ctx *ctx, int32_t *pred, int32_t *loc, int8_t *types, ir_bitset todo, int32_t to, ir_reg tmp_reg, ir_reg tmp_fp_reg)
Definition ir_emit.c:683
void ir_build_prev_refs(ir_ctx *ctx)
Definition ir_gcm.c:1347
#define IR_SNAPSHOT_HANDLER(ctx, ref, insn, addr)
Definition ir_php.h:17
#define IR_SNAPSHOT_HANDLER_DCL()
Definition ir_php.h:14
struct _ir_list ir_list
ir_bitset_base_t * ir_bitset
Definition ir_private.h:317
IR_ALWAYS_INLINE bool ir_bitset_in(const ir_bitset set, uint32_t n)
Definition ir_private.h:339
#define IR_BB_EMPTY
#define IR_MIN(a, b)
Definition ir_private.h:63
IR_ALWAYS_INLINE void ir_bitset_copy(ir_bitset set1, const ir_bitset set2, uint32_t len)
Definition ir_private.h:370
IR_ALWAYS_INLINE uint32_t ir_phi_input_number(const ir_ctx *ctx, const ir_block *bb, uint32_t from)
IR_ALWAYS_INLINE void ir_bitset_intersection(ir_bitset set1, const ir_bitset set2, uint32_t len)
Definition ir_private.h:375
IR_ALWAYS_INLINE ir_bitset ir_bitset_malloc(uint32_t n)
Definition ir_private.h:324
IR_ALWAYS_INLINE bool ir_bitset_empty(const ir_bitset set, uint32_t len)
Definition ir_private.h:354
IR_ALWAYS_INLINE void ir_bitset_incl(ir_bitset set, uint32_t n)
Definition ir_private.h:329
#define IR_IS_SYM_CONST(op)
Definition ir_private.h:889
#define IR_ASSERT(x)
Definition ir_private.h:17
IR_ALWAYS_INLINE int8_t ir_get_alocated_reg(const ir_ctx *ctx, ir_ref ref, int op_num)
IR_ALWAYS_INLINE uint32_t ir_bitset_len(uint32_t n)
Definition ir_private.h:319
IR_ALWAYS_INLINE int ir_bitset_first(const ir_bitset set, uint32_t len)
Definition ir_private.h:414
#define IR_BB_ENTRY
IR_ALWAYS_INLINE ir_ref ir_list_at(const ir_list *l, uint32_t i)
Definition ir_private.h:760
#define IR_LIVE_INTERVAL_SPILL_SPECIAL
#define IR_BB_PREV_EMPTY_ENTRY
struct _ir_reg_alloc_data ir_reg_alloc_data
IR_ALWAYS_INLINE void ir_bitset_difference(ir_bitset set1, const ir_bitset set2, uint32_t len)
Definition ir_private.h:393
#define IR_SKIPPED
#define IR_BB_UNREACHABLE
IR_ALWAYS_INLINE int ir_bitset_pop_first(ir_bitset set, uint32_t len)
Definition ir_private.h:445
IR_ALWAYS_INLINE void ir_bitset_clear(ir_bitset set, uint32_t len)
Definition ir_private.h:344
#define IR_LIVE_INTERVAL_SPILLED
IR_ALWAYS_INLINE void ir_bitset_excl(ir_bitset set, uint32_t n)
Definition ir_private.h:334
HashTable types
Definition php_ffi.h:36
unsigned const char * pos
Definition php_ffi.h:52
unsigned char key[REFLECTION_KEY_LEN]
zend_constant * data
#define RTLD_DEFAULT
p
Definition session.c:1105
ir_ref end
ir_ref start
uint32_t flags
uint32_t successors
uint32_t successors_count
uint32_t predecessors_count
dasm_State * dasm_state
Definition ir_emit.c:403
ir_bitset emit_constants
Definition ir_emit.c:404
ir_reg_alloc_data ra_data
Definition ir_emit.c:401
uint32_t dessa_from_block
Definition ir_emit.c:402
ir_reg from
Definition ir_emit.c:50
ir_type type
Definition ir_emit.c:49
ir_reg to
Definition ir_emit.c:51
uint32_t * cfg_edges
Definition ir.h:593
ir_live_interval ** live_intervals
Definition ir.h:609
ir_hashtab * binding
Definition ir.h:586
ir_ref consts_count
Definition ir.h:577
uint32_t * entries
Definition ir.h:632
uint32_t entries_count
Definition ir.h:631
ir_ref bb_start
Definition ir.h:618
ir_ref * prev_ref
Definition ir.h:614
ir_strtab * fused_regs
Definition ir.h:613
void * data
Definition ir.h:616
ir_ref vregs_count
Definition ir.h:598
ir_block * cfg_blocks
Definition ir.h:592
uint32_t * vregs
Definition ir.h:597
ir_loader * loader
Definition ir.h:642
void * osr_entry_loads
Definition ir.h:633
ir_use_list * use_lists
Definition ir.h:587
ir_regs * regs
Definition ir.h:612
ir_insn * ir_base
Definition ir.h:574
uint32_t flags
Definition ir.h:579
uint32_t cfg_blocks_count
Definition ir.h:590
ir_ref insns_count
Definition ir.h:575
uint32_t * rules
Definition ir.h:596
ir_ref * use_edges
Definition ir.h:588
int32_t spill_base
Definition ir.h:599
int32_t from
Definition ir_emit.c:56
ir_type type
Definition ir_emit.c:55
int32_t to
Definition ir_emit.c:57
ir_val val
Definition ir.h:477
int32_t stack_spill_pos
void *(* resolve_sym_name)(ir_loader *loader, const char *name, bool add_thunk)
Definition ir.h:861
uint8_t flags
Definition ir.h:685
Definition file.h:177
int64_t i64
Definition ir.h:412
execute_data func
char * alloca()
#define EXPECTED(condition)
#define UNEXPECTED(condition)
zend_string * name