2 * emit-x86.c: Support functions for emitting x86 code
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Miguel de Icaza (miguel@ximian.com)
8 * (C) 2001 Ximian, Inc.
14 #include <mono/metadata/assembly.h>
15 #include <mono/metadata/loader.h>
16 #include <mono/metadata/cil-coff.h>
17 #include <mono/metadata/tabledefs.h>
18 #include <mono/metadata/class.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/metadata/mono-endian.h>
21 #include <mono/arch/x86/x86-codegen.h>
22 #include <mono/metadata/profiler-private.h>
30 //#define DEBUG_REGALLOC
31 //#define DEBUG_SPILLS
34 arch_get_reg_name (int regnum)
55 g_assert_not_reached ();
61 * we may want a x86-specific header or we
62 * can just declare it extern in x86.brg.
64 int mono_x86_have_cmov = 0;
67 cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
70 __asm__ __volatile__ (
74 "xorl $0x200000, %%eax\n"
80 "andl $0x200000, %%eax\n"
88 __asm__ __volatile__ ("cpuid"
89 : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
97 mono_cpu_detect (void) {
98 int eax, ebx, ecx, edx;
100 /* Feature Flags function, flags returned in EDX. */
101 if (cpuid(1, &eax, &ebx, &ecx, &edx)) {
102 if (edx & (1U << 15)) {
103 mono_x86_have_cmov = 1;
109 enter_method (MonoMethod *method, char *ebp)
116 fname = mono_method_full_name (method, TRUE);
117 printf ("ENTER: %s\n(", fname);
120 if (((int)ebp & 3) != 0) {
121 g_error ("unaligned stack detected (%p)", ebp);
126 if (ISSTRUCT (method->signature->ret)) {
129 g_assert (!method->signature->ret->byref);
131 size = mono_type_stack_size (method->signature->ret, &align);
133 printf ("VALUERET:%p, ", *((gpointer *)ebp));
134 ebp += sizeof (gpointer);
137 if (method->signature->hasthis) {
138 if (method->klass->valuetype) {
139 printf ("value:%p, ", *((gpointer *)ebp));
141 o = *((MonoObject **)ebp);
144 class = o->vtable->klass;
146 if (class == mono_defaults.string_class) {
147 printf ("this:[STRING:%p:%s], ", o, mono_string_to_utf8 ((MonoString *)o));
149 printf ("this:%p[%s.%s], ", o, class->name_space, class->name);
152 printf ("this:NULL, ");
154 ebp += sizeof (gpointer);
157 for (i = 0; i < method->signature->param_count; ++i) {
158 MonoType *type = method->signature->params [i];
160 size = mono_type_stack_size (type, &align);
163 printf ("[BYREF:%p], ", *((gpointer *)ebp));
164 } else switch (type->type) {
166 case MONO_TYPE_BOOLEAN:
176 printf ("%d, ", *((int *)(ebp)));
178 case MONO_TYPE_STRING: {
179 MonoString *s = *((MonoString **)ebp);
181 g_assert (((MonoObject *)s)->vtable->klass == mono_defaults.string_class);
182 printf ("[STRING:%p:%s], ", s, mono_string_to_utf8 (s));
184 printf ("[STRING:null], ");
187 case MONO_TYPE_CLASS:
188 case MONO_TYPE_OBJECT: {
189 o = *((MonoObject **)ebp);
191 class = o->vtable->klass;
193 if (class == mono_defaults.string_class) {
194 printf ("[STRING:%p:%s], ", o, mono_string_to_utf8 ((MonoString *)o));
195 } else if (class == mono_defaults.int32_class) {
196 printf ("[INT32:%p:%d], ", o, *(gint32 *)((char *)o + sizeof (MonoObject)));
198 printf ("[%s.%s:%p], ", class->name_space, class->name, o);
200 printf ("%p, ", *((gpointer *)(ebp)));
205 case MONO_TYPE_FNPTR:
206 case MONO_TYPE_ARRAY:
207 case MONO_TYPE_SZARRAY:
208 printf ("%p, ", *((gpointer *)(ebp)));
211 printf ("%lld, ", *((gint64 *)(ebp)));
214 printf ("%f, ", *((float *)(ebp)));
217 printf ("%f, ", *((double *)(ebp)));
219 case MONO_TYPE_VALUETYPE:
221 for (j = 0; j < size; j++)
222 printf ("%02x,", *((guint8*)ebp +j));
229 g_assert (align == 4 || align == 8);
230 ebp += size + align - 1;
231 ebp = (gpointer)((unsigned)ebp & ~(align - 1));
238 leave_method (MonoMethod *method, int edx, int eax, double test)
243 fname = mono_method_full_name (method, TRUE);
244 printf ("LEAVE: %s", fname);
247 switch (method->signature->ret->type) {
250 case MONO_TYPE_BOOLEAN:
252 printf ("TRUE:%d", eax);
266 printf ("EAX=%d", eax);
268 case MONO_TYPE_STRING: {
269 MonoString *s = (MonoString *)eax;
272 g_assert (((MonoObject *)s)->vtable->klass == mono_defaults.string_class);
273 printf ("[STRING:%p:%s]", s, mono_string_to_utf8 (s));
275 printf ("[STRING:null], ");
278 case MONO_TYPE_OBJECT: {
279 MonoObject *o = (MonoObject *)eax;
282 if (o->vtable->klass == mono_defaults.boolean_class) {
283 printf ("[BOOLEAN:%p:%d]", o, *((guint8 *)o + sizeof (MonoObject)));
284 } else if (o->vtable->klass == mono_defaults.int32_class) {
285 printf ("[INT32:%p:%d]", o, *((gint32 *)((char *)o + sizeof (MonoObject))));
287 printf ("[%s.%s:%p]", o->vtable->klass->name_space, o->vtable->klass->name, o);
289 printf ("[OBJECT:%p]", o);
293 case MONO_TYPE_CLASS:
295 case MONO_TYPE_FNPTR:
296 case MONO_TYPE_ARRAY:
297 case MONO_TYPE_SZARRAY:
298 printf ("EAX=%p", (gpointer)eax);
301 *((gint32 *)&l) = eax;
302 *((gint32 *)&l + 1) = edx;
303 printf ("EAX/EDX=%lld", l);
306 printf ("FP=%f\n", test);
309 printf ("(unknown return type)");
316 * arch_emit_prologue:
317 * @cfg: pointer to status information
319 * Emits the function prolog.
322 arch_emit_prologue (MonoFlowGraph *cfg)
324 MonoMethod *method = cfg->method;
325 MonoMethodHeader *header = ((MonoMethodNormal *)method)->header;
326 int i, j, k, alloc_size, pos;
328 x86_push_reg (cfg->code, X86_EBP);
329 x86_mov_reg_reg (cfg->code, X86_EBP, X86_ESP, 4);
331 alloc_size = cfg->locals_size;
334 if (method->save_lmf) {
336 pos += sizeof (MonoLMF);
338 /* save the current IP */
339 cfg->lmfip_offset = cfg->code + 1 - cfg->start;
340 x86_push_imm (cfg->code, 0);
341 /* save all caller saved regs */
342 x86_push_reg (cfg->code, X86_EBX);
343 x86_push_reg (cfg->code, X86_EDI);
344 x86_push_reg (cfg->code, X86_ESI);
345 x86_push_reg (cfg->code, X86_EBP);
347 /* save method info */
348 x86_push_imm (cfg->code, method);
350 /* get the address of lmf for the current thread */
351 x86_call_code (cfg->code, mono_get_lmf_addr);
353 x86_push_reg (cfg->code, X86_EAX);
354 /* push *lfm (previous_lmf) */
355 x86_push_membase (cfg->code, X86_EAX, 0);
357 x86_mov_membase_reg (cfg->code, X86_EAX, 0, X86_ESP, 4);
360 if (mono_regset_reg_used (cfg->rs, X86_EBX)) {
361 x86_push_reg (cfg->code, X86_EBX);
365 if (mono_regset_reg_used (cfg->rs, X86_EDI)) {
366 x86_push_reg (cfg->code, X86_EDI);
370 if (mono_regset_reg_used (cfg->rs, X86_ESI)) {
371 x86_push_reg (cfg->code, X86_ESI);
378 x86_alu_reg_imm (cfg->code, X86_SUB, X86_ESP, alloc_size);
380 if (mono_jit_trace_calls) {
381 x86_push_reg (cfg->code, X86_EBP);
382 x86_push_imm (cfg->code, cfg->method);
383 x86_mov_reg_imm (cfg->code, X86_EAX, enter_method);
384 x86_call_reg (cfg->code, X86_EAX);
385 x86_alu_reg_imm (cfg->code, X86_ADD, X86_ESP, 8);
387 if (mono_jit_profile) {
388 x86_push_imm (cfg->code, cfg->method);
389 x86_mov_reg_imm (cfg->code, X86_EAX, mono_profiler_method_enter);
390 x86_call_reg (cfg->code, X86_EAX);
391 x86_alu_reg_imm (cfg->code, X86_ADD, X86_ESP, 4);
394 /* initialize local vars */
395 if (header->num_locals) {
396 gboolean unassigned_locals = TRUE;
398 if (cfg->bblocks [0].live_in_set) {
399 i = mono_bitset_find_first (cfg->bblocks [0].live_in_set,
400 cfg->locals_start_index - 1);
401 unassigned_locals = (i >= 0 && i < cfg->locals_start_index +
405 if (unassigned_locals && header->init_locals) {
406 MonoVarInfo *vi = &VARINFO (cfg, cfg->locals_start_index + header->num_locals - 1);
407 int offset = vi->offset;
411 /* do not clear caller saved registers */
414 for (i = 0; i < header->num_locals; ++i) {
415 MonoVarInfo *rv = &VARINFO (cfg, cfg->locals_start_index + i);
418 int ind = 1 << rv->reg;
420 x86_alu_reg_reg (cfg->code, X86_XOR, rv->reg, rv->reg);
425 if (size == 1 || size == 2 || size == 4) {
426 x86_mov_membase_imm (cfg->code, X86_EBP, offset, 0, size);
434 for (k = 0; k < i; k++) {
435 x86_mov_membase_imm (cfg->code, X86_EBP, offset, 0, 4);
440 x86_mov_membase_imm (cfg->code, X86_EBP, offset, 0, 2);
444 x86_mov_membase_imm (cfg->code, X86_EBP, offset, 0, 1);
449 if (!mono_regset_reg_used (cfg->rs, X86_EDI))
450 x86_push_reg (cfg->code, X86_EDI);
451 x86_lea_membase (cfg->code, X86_EDI, X86_EBP, offset);
452 x86_alu_reg_reg (cfg->code, X86_XOR, X86_EAX, X86_EAX);
453 x86_mov_reg_imm (cfg->code, X86_ECX, i);
455 x86_prefix (cfg->code, X86_REP_PREFIX);
456 x86_stosl (cfg->code);
457 for (i = 0; i < j; i++)
458 x86_stosb (cfg->code);
459 if (!mono_regset_reg_used (cfg->rs, X86_EDI))
460 x86_pop_reg (cfg->code, X86_EDI);
464 x86_mov_membase_imm (cfg->code, X86_EBP, offset, 0, 2);
465 x86_mov_membase_imm (cfg->code, X86_EBP, offset + 2, 0, 1);
470 /* we always need to initialize object pointers */
472 for (i = 0; i < header->num_locals; ++i) {
473 MonoType *t = header->locals [i];
474 int offset = VARINFO (cfg, cfg->locals_start_index + i).offset;
477 x86_mov_membase_imm (cfg->code, X86_EBP, offset, 0, 4);
482 case MONO_TYPE_STRING:
483 case MONO_TYPE_CLASS:
484 case MONO_TYPE_ARRAY:
485 case MONO_TYPE_SZARRAY:
486 case MONO_TYPE_OBJECT:
487 x86_mov_membase_imm (cfg->code, X86_EBP, offset, 0, 4);
497 * arch_emit_epilogue:
498 * @cfg: pointer to status information
500 * Emits the function epilog.
503 arch_emit_epilogue (MonoFlowGraph *cfg)
507 * note: with trace and profiling the value on the FP stack may get clobbered.
509 if (mono_jit_trace_calls) {
510 x86_fld_reg (cfg->code, 0);
511 x86_alu_reg_imm (cfg->code, X86_SUB, X86_ESP, 8);
512 x86_fst_membase (cfg->code, X86_ESP, 0, TRUE, TRUE);
513 x86_push_reg (cfg->code, X86_EAX);
514 x86_push_reg (cfg->code, X86_EDX);
515 x86_push_imm (cfg->code, cfg->method);
516 x86_mov_reg_imm (cfg->code, X86_EAX, leave_method);
517 x86_call_reg (cfg->code, X86_EAX);
518 x86_alu_reg_imm (cfg->code, X86_ADD, X86_ESP, 4);
519 x86_pop_reg (cfg->code, X86_EDX);
520 x86_pop_reg (cfg->code, X86_EAX);
521 x86_alu_reg_imm (cfg->code, X86_ADD, X86_ESP, 8);
523 if (mono_jit_profile) {
524 x86_push_reg (cfg->code, X86_EAX);
525 x86_push_reg (cfg->code, X86_EDX);
526 x86_push_imm (cfg->code, cfg->method);
527 x86_mov_reg_imm (cfg->code, X86_EAX, mono_profiler_method_leave);
528 x86_call_reg (cfg->code, X86_EAX);
529 x86_alu_reg_imm (cfg->code, X86_ADD, X86_ESP, 4);
530 x86_pop_reg (cfg->code, X86_EDX);
531 x86_pop_reg (cfg->code, X86_EAX);
534 if (cfg->method->save_lmf) {
535 pos = -sizeof (MonoLMF) - 4;
539 if (mono_regset_reg_used (cfg->rs, X86_EBX)) {
540 x86_mov_reg_membase (cfg->code, X86_EBX, X86_EBP, pos, 4);
543 if (mono_regset_reg_used (cfg->rs, X86_EDI)) {
544 x86_mov_reg_membase (cfg->code, X86_EDI, X86_EBP, pos, 4);
547 if (mono_regset_reg_used (cfg->rs, X86_ESI)) {
548 x86_mov_reg_membase (cfg->code, X86_ESI, X86_EBP, pos, 4);
552 if (cfg->method->save_lmf) {
553 pos = -sizeof (MonoLMF);
555 x86_lea_membase (cfg->code, X86_ESP, X86_EBP, pos);
557 /* ebx = previous_lmf */
558 x86_pop_reg (cfg->code, X86_EBX);
560 x86_pop_reg (cfg->code, X86_EDI);
561 /* *(lmf) = previous_lmf */
562 x86_mov_membase_reg (cfg->code, X86_EDI, 0, X86_EBX, 4);
564 /* discard method info */
565 x86_pop_reg (cfg->code, X86_ESI);
567 /* restore caller saved regs */
568 x86_pop_reg (cfg->code, X86_EBP);
569 x86_pop_reg (cfg->code, X86_ESI);
570 x86_pop_reg (cfg->code, X86_EDI);
571 x86_pop_reg (cfg->code, X86_EBX);
575 x86_leave (cfg->code);
580 arch_allocate_var (MonoFlowGraph *cfg, int size, int align, MonoVarType vartype, MonoValueType type)
584 mono_jit_stats.allocate_var++;
586 vi.range.last_use.abs_pos = 0;
587 vi.range.first_use.pos.bid = 0xffff;
588 vi.range.first_use.pos.tid = 0;
591 vi.varnum = cfg->varinfo->len;
593 if (size != sizeof (gpointer))
598 case MONO_LOCALVAR: {
599 cfg->locals_size += size;
600 cfg->locals_size += align - 1;
601 cfg->locals_size &= ~(align - 1);
603 SET_VARINFO (vi, type, vartype, - cfg->locals_size, size);
604 g_array_append_val (cfg->varinfo, vi);
608 int arg_start = 8 + cfg->has_vtarg*4;
610 g_assert ((align & 3) == 0);
612 SET_VARINFO (vi, type, vartype, cfg->args_size + arg_start, size);
613 g_array_append_val (cfg->varinfo, vi);
615 cfg->args_size += size;
617 cfg->args_size &= ~3;
621 g_assert_not_reached ();
624 return cfg->varinfo->len - 1;
628 mono_label_cfg (MonoFlowGraph *cfg)
632 for (i = 0; i < cfg->block_count; i++) {
633 GPtrArray *forest = cfg->bblocks [i].forest;
636 if (!cfg->bblocks [i].reached) /* unreachable code */
641 for (j = 0; j < top; j++) {
642 MBTree *t1 = (MBTree *) g_ptr_array_index (forest, j);
645 mbstate = mono_burg_label (t1, cfg);
648 if (mono_debug_format != MONO_DEBUG_FORMAT_NONE)
650 g_warning ("tree does not match in %s",
651 mono_method_full_name (cfg->method, TRUE));
652 mono_print_ctree (cfg, t1); printf ("\n\n");
654 mono_print_forest (cfg, forest);
655 g_assert_not_reached ();
664 tree_allocate_regs (MonoFlowGraph *cfg, MBTree *tree, int goal, MonoRegSet *rs,
665 guint8 exclude_mask, int *spillcount)
668 int ern = mono_burg_rule (tree->state, goal);
669 const guint16 *nts = mono_burg_nts [ern];
670 guint8 left_exclude_mask = 0, right_exclude_mask = 0;
673 #ifdef DEBUG_REGALLOC
674 printf ("tree_allocate_regs start %d %08x %d %d\n", tree->op, rs->free_mask, goal,
675 (nts [0] && kids [0] == tree));
678 mono_burg_kids (tree, ern, kids);
684 exclude_mask |= (1 << X86_ECX);
685 left_exclude_mask |= (1 << X86_ECX);
688 case MB_TERM_MUL_OVF:
689 case MB_TERM_MUL_OVF_UN:
694 if (goal == MB_NTERM_reg) {
695 left_exclude_mask |= (1 << X86_EDX);
696 right_exclude_mask |= (1 << X86_EDX) | (1 << X86_EAX);
703 if (nts [0] && kids [0] == tree) {
705 if (!tree_allocate_regs (cfg, kids [0], nts [0], rs, exclude_mask, spillcount))
726 if (nts [1]) { /* two kids */
729 if (!tree_allocate_regs (cfg, kids [0], nts [0], rs, left_exclude_mask, spillcount))
734 if (!tree_allocate_regs (cfg, kids [1], nts [1], rs, right_exclude_mask, spillcount)) {
736 #ifdef DEBUG_REGALLOC
737 printf ("tree_allocate_regs try 1 failed %d %d %d %d\n",
738 nts [1], kids [1]->reg1,
739 kids [1]->reg2,kids [1]->reg3);
743 if (kids [0]->reg1 != -1) {
744 right_exclude_mask |= 1 << kids [0]->reg1;
747 if (kids [0]->reg2 != -1) {
748 right_exclude_mask |= 1 << kids [0]->reg2;
751 if (kids [0]->reg3 != -1) {
752 right_exclude_mask |= 1 << kids [0]->reg3;
756 mono_regset_free_reg (rs, kids [0]->reg1);
757 mono_regset_free_reg (rs, kids [0]->reg2);
758 mono_regset_free_reg (rs, kids [0]->reg3);
760 kids [0]->spilled = 1;
762 if (!tree_allocate_regs (cfg, kids [1], nts [1], rs, right_exclude_mask, spillcount)) {
763 #ifdef DEBUG_REGALLOC
764 printf ("tree_allocate_regs try 2 failed\n");
768 #ifdef DEBUG_REGALLOC
769 printf ("tree_allocate_regs try 2 succesfull\n");
774 if (nts [3]) /* we cant handle four kids */
775 g_assert_not_reached ();
777 if (!tree_allocate_regs (cfg, kids [2], nts [2], rs, right_exclude_mask, spillcount))
782 } else { /* one kid */
783 if (!tree_allocate_regs (cfg, kids [0], nts [0], rs, left_exclude_mask, spillcount))
789 for (i = 0; nts [i]; i++) {
790 mono_regset_free_reg (rs, kids [i]->reg1);
791 mono_regset_free_reg (rs, kids [i]->reg2);
792 mono_regset_free_reg (rs, kids [i]->reg3);
795 tree->emit = mono_burg_func [ern];
798 case MB_TERM_CALL_I4:
799 case MB_TERM_CALL_I8:
800 case MB_TERM_CALL_R8:
801 // case MB_TERM_CALL_VOID :
802 if ((tree->reg1 = mono_regset_alloc_reg (rs, X86_EAX, exclude_mask)) == -1)
804 if ((tree->reg2 = mono_regset_alloc_reg (rs, X86_EDX, exclude_mask)) == -1)
806 if ((tree->reg3 = mono_regset_alloc_reg (rs, X86_ECX, exclude_mask)) == -1)
814 case MB_TERM_MUL_OVF_UN:
819 if ((tree->reg1 = mono_regset_alloc_reg (rs, X86_EAX, exclude_mask)) == -1)
821 if ((tree->reg2 = mono_regset_alloc_reg (rs, X86_EDX, exclude_mask)) == -1)
825 if ((tree->reg1 = mono_regset_alloc_reg (rs, -1, exclude_mask)) == -1)
833 case MB_TERM_MUL_OVF:
834 case MB_TERM_MUL_OVF_UN:
839 if ((tree->reg1 = mono_regset_alloc_reg (rs, X86_EAX, exclude_mask)) == -1)
841 if ((tree->reg2 = mono_regset_alloc_reg (rs, X86_EDX, exclude_mask)) == -1)
845 if ((tree->reg1 = mono_regset_alloc_reg (rs, -1, exclude_mask)) == -1)
847 if ((tree->reg2 = mono_regset_alloc_reg (rs, -1, exclude_mask)) == -1)
853 /* fixme: allocate floating point registers */
857 if (tree->op == MB_TERM_ADD) {
858 if ((tree->reg1 = mono_regset_alloc_reg (rs, tree->left->reg1, exclude_mask)) == -1)
860 if ((tree->reg2 = mono_regset_alloc_reg (rs, tree->right->reg1, exclude_mask)) == -1)
866 if (tree->op == MB_TERM_ADD) {
867 if ((tree->reg1 = mono_regset_alloc_reg (rs, tree->left->reg1, exclude_mask)) == -1)
873 if (tree->op == MB_TERM_SHL ||
874 tree->op == MB_TERM_MUL) {
875 if ((tree->reg1 = mono_regset_alloc_reg (rs, tree->left->reg1, exclude_mask)) == -1)
885 #ifdef DEBUG_REGALLOC
886 printf ("tree_allocate_regs end %d %08x\n", tree->op, rs->free_mask);
892 arch_allocate_regs (MonoFlowGraph *cfg)
894 int i, j, max_spillcount = 0;
896 for (i = 0; i < cfg->block_count; i++) {
897 GPtrArray *forest = cfg->bblocks [i].forest;
900 if (!cfg->bblocks [i].reached) /* unreachable code */
905 for (j = 0; j < top; j++) {
906 MBTree *t1 = (MBTree *) g_ptr_array_index (forest, j);
908 #ifdef DEBUG_REGALLOC
909 printf ("arch_allocate_regs start %d:%d %08x\n", i, j, cfg->rs->free_mask);
911 if (!tree_allocate_regs (cfg, t1, 1, cfg->rs, 0, &spillcount)) {
912 mono_print_ctree (cfg, t1);
914 g_error ("register allocation failed");
917 max_spillcount = MAX (max_spillcount, spillcount);
919 #ifdef DEBUG_REGALLOC
920 printf ("arch_allocate_regs end %d:%d %08x\n", i, j, cfg->rs->free_mask);
922 g_assert (cfg->rs->free_mask == 0xffffffff);
926 /* allocate space for spilled regs */
928 cfg->spillvars = mono_mempool_alloc0 (cfg->mp, sizeof (gint) * max_spillcount);
929 cfg->spillcount = max_spillcount;
931 for (i = 0; i < max_spillcount; i++) {
933 spillvar = arch_allocate_var (cfg, sizeof (gpointer), sizeof (gpointer),
934 MONO_TEMPVAR, VAL_I32);
935 cfg->spillvars [i] = VARINFO (cfg, spillvar).offset;
940 tree_emit (int goal, MonoFlowGraph *cfg, MBTree *tree, int *spillcount)
943 int ern = mono_burg_rule (tree->state, goal);
944 const guint16 *nts = mono_burg_nts [ern];
948 mono_burg_kids (tree, ern, kids);
952 int spilloffset1, spilloffset2, spilloffset3;
954 tree_emit (nts [0], cfg, kids [0], spillcount);
956 if (kids [0]->spilled) {
958 printf ("SPILL_REGS %d %03x %s.%s:%s\n",
959 nts [0], cfg->code - cfg->start,
960 cfg->method->klass->name_space,
961 cfg->method->klass->name, cfg->method->name);
963 mono_print_ctree (cfg, kids [0]);printf ("\n\n");
969 if (kids [0]->reg1 != -1) {
970 spilloffset1 = cfg->spillvars [(*spillcount)++];
971 x86_mov_membase_reg (cfg->code, X86_EBP, spilloffset1,
974 if (kids [0]->reg2 != -1) {
975 spilloffset2 = cfg->spillvars [(*spillcount)++];
976 x86_mov_membase_reg (cfg->code, X86_EBP, spilloffset2,
979 if (kids [0]->reg3 != -1) {
980 spilloffset3 = cfg->spillvars [(*spillcount)++];
981 x86_mov_membase_reg (cfg->code, X86_EBP, spilloffset3,
986 tree_emit (nts [1], cfg, kids [1], spillcount);
988 if (kids [0]->spilled) {
991 printf ("RELOAD_REGS %03x %s.%s:%s\n",
992 cfg->code - cfg->start,
993 cfg->method->klass->name_space,
994 cfg->method->klass->name, cfg->method->name);
997 if (kids [0]->reg3 != -1)
998 x86_mov_reg_membase (cfg->code, kids [0]->reg3, X86_EBP,
1000 if (kids [0]->reg2 != -1)
1001 x86_mov_reg_membase (cfg->code, kids [0]->reg2, X86_EBP,
1003 if (kids [0]->reg1 != -1)
1004 x86_mov_reg_membase (cfg->code, kids [0]->reg1, X86_EBP,
1009 g_assert (!nts [3]);
1010 tree_emit (nts [2], cfg, kids [2], spillcount);
1013 tree_emit (nts [0], cfg, kids [0], spillcount);
1017 g_assert ((*spillcount) <= cfg->spillcount);
1019 tree->addr = offset = cfg->code - cfg->start;
1021 /* we assume an instruction uses a maximum of 128 bytes */
1022 if ((cfg->code_size - offset) <= 128) {
1023 int add = MIN (cfg->code_size, 128);
1024 cfg->code_size += add;
1025 mono_jit_stats.code_reallocs++;
1026 cfg->start = g_realloc (cfg->start, cfg->code_size);
1027 g_assert (cfg->start);
1028 cfg->code = cfg->start + offset;
1031 if ((emit = mono_burg_func [ern]))
1034 g_assert ((cfg->code - cfg->start) < cfg->code_size);
1038 mono_emit_cfg (MonoFlowGraph *cfg)
1040 int i, j, spillcount;
1042 for (i = 0; i < cfg->block_count; i++) {
1043 MonoBBlock *bb = &cfg->bblocks [i];
1044 GPtrArray *forest = bb->forest;
1047 if (!bb->reached) /* unreachable code */
1052 bb->addr = cfg->code - cfg->start;
1054 for (j = 0; j < top; j++) {
1055 MBTree *t1 = (MBTree *) g_ptr_array_index (forest, j);
1058 tree_emit (1, cfg, t1, &spillcount);
1062 cfg->epilog = cfg->code - cfg->start;
1066 mono_compute_branches (MonoFlowGraph *cfg)
1074 for (j = 0; j < cfg->block_count; j++) {
1075 MonoBBlock *bb = &cfg->bblocks [j];
1076 GPtrArray *forest = bb->forest;
1079 if (!bb->reached) /* unreachable code */
1084 for (i = 0; i < top; i++) {
1085 MBTree *t1 = (MBTree *) g_ptr_array_index (forest, i);
1087 if (t1->op == MB_TERM_SWITCH) {
1088 MonoBBlock **jt = (MonoBBlock **)t1->data.p;
1089 guint32 *rt = (guint32 *)t1->data.p;
1090 int m = *((guint32 *)t1->data.p) + 1;
1093 for (k = 1; k <= m; k++)
1094 rt [k] = (int)(jt [k]->addr + cfg->start);
1096 /* emit the switch instruction again to update addresses */
1097 cfg->code = cfg->start + t1->addr;
1098 ((MBEmitFunc)t1->emit) (t1, cfg);
1105 for (ji = cfg->jump_info; ji; ji = ji->next) {
1106 unsigned char *ip = GUINT_TO_POINTER (GPOINTER_TO_UINT (ji->ip) + cfg->start);
1107 unsigned char *target;
1110 case MONO_JUMP_INFO_BB:
1111 target = ji->data.bb->addr + cfg->start;
1113 case MONO_JUMP_INFO_ABS:
1114 target = ji->data.target;
1116 case MONO_JUMP_INFO_EPILOG:
1117 target = cfg->epilog + cfg->start;
1119 case MONO_JUMP_INFO_IP:
1120 *(unsigned char**)ip = ip;
1123 g_assert_not_reached ();
1125 x86_patch (ip, target);
1128 /* patch the IP in the LMF saving code */
1129 if (cfg->lmfip_offset) {
1130 *((guint32 *)(cfg->start + cfg->lmfip_offset)) =
1131 (gint32)(cfg->start + cfg->lmfip_offset);
1136 mono_add_jump_info (MonoFlowGraph *cfg, gpointer ip, MonoJumpInfoType type, gpointer target)
1138 MonoJumpInfo *ji = mono_mempool_alloc (cfg->mp, sizeof (MonoJumpInfo));
1141 ji->ip = GUINT_TO_POINTER (GPOINTER_TO_UINT (ip) - GPOINTER_TO_UINT (cfg->start));
1142 ji->data.target = target;
1143 ji->next = cfg->jump_info;
1145 cfg->jump_info = ji;
1149 arch_jit_compile_cfg (MonoDomain *target_domain, MonoFlowGraph *cfg)
1152 guint32 ls_used_mask = 0;
1153 MonoMethod *method = cfg->method;
1155 ji = mono_mempool_alloc0 (target_domain->mp, sizeof (MonoJitInfo));
1157 cfg->rs = mono_regset_new (X86_NREG);
1158 mono_regset_reserve_reg (cfg->rs, X86_ESP);
1159 mono_regset_reserve_reg (cfg->rs, X86_EBP);
1161 /* we can use this regs for global register allocation */
1162 mono_regset_reserve_reg (cfg->rs, X86_EBX);
1163 mono_regset_reserve_reg (cfg->rs, X86_ESI);
1165 if (mono_use_linear_scan) {
1166 mono_linear_scan (cfg, &ls_used_mask);
1167 cfg->rs->used_mask |= ls_used_mask;
1170 if (mono_jit_dump_forest) {
1172 printf ("FOREST %s\n", mono_method_full_name (method, TRUE));
1173 for (i = 0; i < cfg->block_count; i++) {
1174 printf ("BLOCK %d:\n", i);
1175 mono_print_forest (cfg, cfg->bblocks [i].forest);
1179 if (!mono_label_cfg (cfg))
1182 arch_allocate_regs (cfg);
1184 /* align to 8 byte boundary */
1185 cfg->locals_size += 7;
1186 cfg->locals_size &= ~7;
1188 arch_emit_prologue (cfg);
1189 cfg->prologue_end = cfg->code - cfg->start;
1190 mono_emit_cfg (cfg);
1191 arch_emit_epilogue (cfg);
1192 cfg->epilogue_end = cfg->code - cfg->start;
1194 mono_compute_branches (cfg);
1196 ji->code_size = cfg->code - cfg->start;
1197 ji->used_regs = cfg->rs->used_mask;
1198 ji->method = method;
1199 ji->code_start = cfg->start;