2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
22 #ifdef HAVE_SYS_TIME_H
30 #include <mono/utils/memcheck.h>
32 #include <mono/metadata/assembly.h>
33 #include <mono/metadata/attrdefs.h>
34 #include <mono/metadata/loader.h>
35 #include <mono/metadata/tabledefs.h>
36 #include <mono/metadata/class.h>
37 #include <mono/metadata/object.h>
38 #include <mono/metadata/exception.h>
39 #include <mono/metadata/opcodes.h>
40 #include <mono/metadata/mono-endian.h>
41 #include <mono/metadata/tokentype.h>
42 #include <mono/metadata/tabledefs.h>
43 #include <mono/metadata/marshal.h>
44 #include <mono/metadata/debug-helpers.h>
45 #include <mono/metadata/mono-debug.h>
46 #include <mono/metadata/gc-internal.h>
47 #include <mono/metadata/security-manager.h>
48 #include <mono/metadata/threads-types.h>
49 #include <mono/metadata/security-core-clr.h>
50 #include <mono/metadata/monitor.h>
51 #include <mono/metadata/profiler-private.h>
52 #include <mono/metadata/profiler.h>
53 #include <mono/utils/mono-compiler.h>
54 #include <mono/utils/mono-memory-model.h>
55 #include <mono/metadata/mono-basic-block.h>
62 #include "jit-icalls.h"
64 #include "debugger-agent.h"
66 #define BRANCH_COST 10
67 #define INLINE_LENGTH_LIMIT 20
68 #define INLINE_FAILURE do {\
69 if ((cfg->method != method) && (method->wrapper_type == MONO_WRAPPER_NONE))\
72 #define CHECK_CFG_EXCEPTION do {\
73 if (cfg->exception_type != MONO_EXCEPTION_NONE)\
76 #define METHOD_ACCESS_FAILURE do { \
77 char *method_fname = mono_method_full_name (method, TRUE); \
78 char *cil_method_fname = mono_method_full_name (cil_method, TRUE); \
79 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS); \
80 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname); \
81 g_free (method_fname); \
82 g_free (cil_method_fname); \
83 goto exception_exit; \
85 #define FIELD_ACCESS_FAILURE do { \
86 char *method_fname = mono_method_full_name (method, TRUE); \
87 char *field_fname = mono_field_full_name (field); \
88 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS); \
89 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname); \
90 g_free (method_fname); \
91 g_free (field_fname); \
92 goto exception_exit; \
94 #define GENERIC_SHARING_FAILURE(opcode) do { \
95 if (cfg->generic_sharing_context) { \
96 if (cfg->verbose_level > 2) \
97 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", method->klass->name_space, method->klass->name, method->name, method->signature->param_count, mono_opcode_name ((opcode)), __LINE__); \
98 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
99 goto exception_exit; \
102 #define OUT_OF_MEMORY_FAILURE do { \
103 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
104 goto exception_exit; \
106 /* Determine whenever 'ins' represents a load of the 'this' argument */
107 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
109 static int ldind_to_load_membase (int opcode);
110 static int stind_to_store_membase (int opcode);
112 int mono_op_to_op_imm (int opcode);
113 int mono_op_to_op_imm_noemul (int opcode);
115 MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
116 void mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native);
117 void mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass);
119 /* helper methods signatures */
120 static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
121 static MonoMethodSignature *helper_sig_domain_get = NULL;
122 static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
123 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm = NULL;
124 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
125 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline = NULL;
126 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm = NULL;
129 * Instruction metadata
137 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
138 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
144 #if SIZEOF_REGISTER == 8
149 /* keep in sync with the enum in mini.h */
152 #include "mini-ops.h"
157 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
158 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
160 * This should contain the index of the last sreg + 1. This is not the same
161 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
163 const gint8 ins_sreg_counts[] = {
164 #include "mini-ops.h"
169 #define MONO_INIT_VARINFO(vi,id) do { \
170 (vi)->range.first_use.pos.bid = 0xffff; \
176 mono_inst_set_src_registers (MonoInst *ins, int *regs)
178 ins->sreg1 = regs [0];
179 ins->sreg2 = regs [1];
180 ins->sreg3 = regs [2];
184 mono_alloc_ireg (MonoCompile *cfg)
186 return alloc_ireg (cfg);
190 mono_alloc_freg (MonoCompile *cfg)
192 return alloc_freg (cfg);
196 mono_alloc_preg (MonoCompile *cfg)
198 return alloc_preg (cfg);
202 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
204 return alloc_dreg (cfg, stack_type);
208 * mono_alloc_ireg_ref:
210 * Allocate an IREG, and mark it as holding a GC ref.
213 mono_alloc_ireg_ref (MonoCompile *cfg)
215 return alloc_ireg_ref (cfg);
219 * mono_alloc_ireg_mp:
221 * Allocate an IREG, and mark it as holding a managed pointer.
224 mono_alloc_ireg_mp (MonoCompile *cfg)
226 return alloc_ireg_mp (cfg);
230 * mono_alloc_ireg_copy:
232 * Allocate an IREG with the same GC type as VREG.
235 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
237 if (vreg_is_ref (cfg, vreg))
238 return alloc_ireg_ref (cfg);
239 else if (vreg_is_mp (cfg, vreg))
240 return alloc_ireg_mp (cfg);
242 return alloc_ireg (cfg);
246 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
252 switch (type->type) {
255 case MONO_TYPE_BOOLEAN:
267 case MONO_TYPE_FNPTR:
269 case MONO_TYPE_CLASS:
270 case MONO_TYPE_STRING:
271 case MONO_TYPE_OBJECT:
272 case MONO_TYPE_SZARRAY:
273 case MONO_TYPE_ARRAY:
277 #if SIZEOF_REGISTER == 8
286 case MONO_TYPE_VALUETYPE:
287 if (type->data.klass->enumtype) {
288 type = mono_class_enum_basetype (type->data.klass);
291 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
294 case MONO_TYPE_TYPEDBYREF:
296 case MONO_TYPE_GENERICINST:
297 type = &type->data.generic_class->container_class->byval_arg;
301 g_assert (cfg->generic_sharing_context);
304 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
310 mono_print_bb (MonoBasicBlock *bb, const char *msg)
315 printf ("\n%s %d: [IN: ", msg, bb->block_num);
316 for (i = 0; i < bb->in_count; ++i)
317 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
319 for (i = 0; i < bb->out_count; ++i)
320 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
322 for (tree = bb->code; tree; tree = tree->next)
323 mono_print_ins_index (-1, tree);
327 mono_create_helper_signatures (void)
329 helper_sig_domain_get = mono_create_icall_signature ("ptr");
330 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
331 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
332 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
333 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
334 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
335 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
339 * Can't put this at the beginning, since other files reference stuff from this
344 #define UNVERIFIED do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto unverified; } while (0)
346 #define LOAD_ERROR do { if (mini_get_debug_options ()->break_on_unverified) G_BREAKPOINT (); else goto load_error; } while (0)
348 #define GET_BBLOCK(cfg,tblock,ip) do { \
349 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
351 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
352 NEW_BBLOCK (cfg, (tblock)); \
353 (tblock)->cil_code = (ip); \
354 ADD_BBLOCK (cfg, (tblock)); \
358 #if defined(TARGET_X86) || defined(TARGET_AMD64)
359 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
360 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
361 (dest)->dreg = alloc_ireg_mp ((cfg)); \
362 (dest)->sreg1 = (sr1); \
363 (dest)->sreg2 = (sr2); \
364 (dest)->inst_imm = (imm); \
365 (dest)->backend.shift_amount = (shift); \
366 MONO_ADD_INS ((cfg)->cbb, (dest)); \
370 #if SIZEOF_REGISTER == 8
371 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
372 /* FIXME: Need to add many more cases */ \
373 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
375 int dr = alloc_preg (cfg); \
376 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
377 (ins)->sreg2 = widen->dreg; \
381 #define ADD_WIDEN_OP(ins, arg1, arg2)
384 #define ADD_BINOP(op) do { \
385 MONO_INST_NEW (cfg, ins, (op)); \
387 ins->sreg1 = sp [0]->dreg; \
388 ins->sreg2 = sp [1]->dreg; \
389 type_from_op (ins, sp [0], sp [1]); \
391 /* Have to insert a widening op */ \
392 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
393 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
394 MONO_ADD_INS ((cfg)->cbb, (ins)); \
395 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
398 #define ADD_UNOP(op) do { \
399 MONO_INST_NEW (cfg, ins, (op)); \
401 ins->sreg1 = sp [0]->dreg; \
402 type_from_op (ins, sp [0], NULL); \
404 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
405 MONO_ADD_INS ((cfg)->cbb, (ins)); \
406 *sp++ = mono_decompose_opcode (cfg, ins); \
409 #define ADD_BINCOND(next_block) do { \
412 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
413 cmp->sreg1 = sp [0]->dreg; \
414 cmp->sreg2 = sp [1]->dreg; \
415 type_from_op (cmp, sp [0], sp [1]); \
417 type_from_op (ins, sp [0], sp [1]); \
418 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
419 GET_BBLOCK (cfg, tblock, target); \
420 link_bblock (cfg, bblock, tblock); \
421 ins->inst_true_bb = tblock; \
422 if ((next_block)) { \
423 link_bblock (cfg, bblock, (next_block)); \
424 ins->inst_false_bb = (next_block); \
425 start_new_bblock = 1; \
427 GET_BBLOCK (cfg, tblock, ip); \
428 link_bblock (cfg, bblock, tblock); \
429 ins->inst_false_bb = tblock; \
430 start_new_bblock = 2; \
432 if (sp != stack_start) { \
433 handle_stack_args (cfg, stack_start, sp - stack_start); \
434 CHECK_UNVERIFIABLE (cfg); \
436 MONO_ADD_INS (bblock, cmp); \
437 MONO_ADD_INS (bblock, ins); \
441 * link_bblock: Links two basic blocks
443 * links two basic blocks in the control flow graph, the 'from'
444 * argument is the starting block and the 'to' argument is the block
445 * the control flow ends to after 'from'.
448 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
450 MonoBasicBlock **newa;
454 if (from->cil_code) {
456 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
458 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
461 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
463 printf ("edge from entry to exit\n");
468 for (i = 0; i < from->out_count; ++i) {
469 if (to == from->out_bb [i]) {
475 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
476 for (i = 0; i < from->out_count; ++i) {
477 newa [i] = from->out_bb [i];
485 for (i = 0; i < to->in_count; ++i) {
486 if (from == to->in_bb [i]) {
492 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
493 for (i = 0; i < to->in_count; ++i) {
494 newa [i] = to->in_bb [i];
503 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
505 link_bblock (cfg, from, to);
509 * mono_find_block_region:
511 * We mark each basic block with a region ID. We use that to avoid BB
512 * optimizations when blocks are in different regions.
515 * A region token that encodes where this region is, and information
516 * about the clause owner for this block.
518 * The region encodes the try/catch/filter clause that owns this block
519 * as well as the type. -1 is a special value that represents a block
520 * that is in none of try/catch/filter.
523 mono_find_block_region (MonoCompile *cfg, int offset)
525 MonoMethodHeader *header = cfg->header;
526 MonoExceptionClause *clause;
529 for (i = 0; i < header->num_clauses; ++i) {
530 clause = &header->clauses [i];
531 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
532 (offset < (clause->handler_offset)))
533 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
535 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
536 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
537 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
538 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
539 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
541 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
544 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
545 return ((i + 1) << 8) | clause->flags;
552 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
554 MonoMethodHeader *header = cfg->header;
555 MonoExceptionClause *clause;
559 for (i = 0; i < header->num_clauses; ++i) {
560 clause = &header->clauses [i];
561 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
562 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
563 if (clause->flags == type)
564 res = g_list_append (res, clause);
571 mono_create_spvar_for_region (MonoCompile *cfg, int region)
575 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
579 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
580 /* prevent it from being register allocated */
581 var->flags |= MONO_INST_INDIRECT;
583 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
587 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
589 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
593 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
597 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
601 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
602 /* prevent it from being register allocated */
603 var->flags |= MONO_INST_INDIRECT;
605 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
611 * Returns the type used in the eval stack when @type is loaded.
612 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
615 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
619 inst->klass = klass = mono_class_from_mono_type (type);
621 inst->type = STACK_MP;
626 switch (type->type) {
628 inst->type = STACK_INV;
632 case MONO_TYPE_BOOLEAN:
638 inst->type = STACK_I4;
643 case MONO_TYPE_FNPTR:
644 inst->type = STACK_PTR;
646 case MONO_TYPE_CLASS:
647 case MONO_TYPE_STRING:
648 case MONO_TYPE_OBJECT:
649 case MONO_TYPE_SZARRAY:
650 case MONO_TYPE_ARRAY:
651 inst->type = STACK_OBJ;
655 inst->type = STACK_I8;
659 inst->type = STACK_R8;
661 case MONO_TYPE_VALUETYPE:
662 if (type->data.klass->enumtype) {
663 type = mono_class_enum_basetype (type->data.klass);
667 inst->type = STACK_VTYPE;
670 case MONO_TYPE_TYPEDBYREF:
671 inst->klass = mono_defaults.typed_reference_class;
672 inst->type = STACK_VTYPE;
674 case MONO_TYPE_GENERICINST:
675 type = &type->data.generic_class->container_class->byval_arg;
678 case MONO_TYPE_MVAR :
679 /* FIXME: all the arguments must be references for now,
680 * later look inside cfg and see if the arg num is
683 g_assert (cfg->generic_sharing_context);
684 inst->type = STACK_OBJ;
687 g_error ("unknown type 0x%02x in eval stack type", type->type);
692 * The following tables are used to quickly validate the IL code in type_from_op ().
695 bin_num_table [STACK_MAX] [STACK_MAX] = {
696 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
697 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
698 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
699 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
700 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
701 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
702 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
703 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
708 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
711 /* reduce the size of this table */
713 bin_int_table [STACK_MAX] [STACK_MAX] = {
714 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
715 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
716 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
717 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
718 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
719 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
720 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
721 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
725 bin_comp_table [STACK_MAX] [STACK_MAX] = {
726 /* Inv i L p F & O vt */
728 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
729 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
730 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
731 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
732 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
733 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
734 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
737 /* reduce the size of this table */
739 shift_table [STACK_MAX] [STACK_MAX] = {
740 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
741 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
742 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
743 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
744 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
745 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
746 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
747 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
751 * Tables to map from the non-specific opcode to the matching
752 * type-specific opcode.
754 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
756 binops_op_map [STACK_MAX] = {
757 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
760 /* handles from CEE_NEG to CEE_CONV_U8 */
762 unops_op_map [STACK_MAX] = {
763 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
766 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
768 ovfops_op_map [STACK_MAX] = {
769 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
772 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
774 ovf2ops_op_map [STACK_MAX] = {
775 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
778 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
780 ovf3ops_op_map [STACK_MAX] = {
781 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
784 /* handles from CEE_BEQ to CEE_BLT_UN */
786 beqops_op_map [STACK_MAX] = {
787 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
790 /* handles from CEE_CEQ to CEE_CLT_UN */
792 ceqops_op_map [STACK_MAX] = {
793 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
797 * Sets ins->type (the type on the eval stack) according to the
798 * type of the opcode and the arguments to it.
799 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
801 * FIXME: this function sets ins->type unconditionally in some cases, but
802 * it should set it to invalid for some types (a conv.x on an object)
805 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
807 switch (ins->opcode) {
814 /* FIXME: check unverifiable args for STACK_MP */
815 ins->type = bin_num_table [src1->type] [src2->type];
816 ins->opcode += binops_op_map [ins->type];
823 ins->type = bin_int_table [src1->type] [src2->type];
824 ins->opcode += binops_op_map [ins->type];
829 ins->type = shift_table [src1->type] [src2->type];
830 ins->opcode += binops_op_map [ins->type];
835 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
836 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
837 ins->opcode = OP_LCOMPARE;
838 else if (src1->type == STACK_R8)
839 ins->opcode = OP_FCOMPARE;
841 ins->opcode = OP_ICOMPARE;
843 case OP_ICOMPARE_IMM:
844 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
845 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
846 ins->opcode = OP_LCOMPARE_IMM;
858 ins->opcode += beqops_op_map [src1->type];
861 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
862 ins->opcode += ceqops_op_map [src1->type];
868 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
869 ins->opcode += ceqops_op_map [src1->type];
873 ins->type = neg_table [src1->type];
874 ins->opcode += unops_op_map [ins->type];
877 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
878 ins->type = src1->type;
880 ins->type = STACK_INV;
881 ins->opcode += unops_op_map [ins->type];
887 ins->type = STACK_I4;
888 ins->opcode += unops_op_map [src1->type];
891 ins->type = STACK_R8;
892 switch (src1->type) {
895 ins->opcode = OP_ICONV_TO_R_UN;
898 ins->opcode = OP_LCONV_TO_R_UN;
902 case CEE_CONV_OVF_I1:
903 case CEE_CONV_OVF_U1:
904 case CEE_CONV_OVF_I2:
905 case CEE_CONV_OVF_U2:
906 case CEE_CONV_OVF_I4:
907 case CEE_CONV_OVF_U4:
908 ins->type = STACK_I4;
909 ins->opcode += ovf3ops_op_map [src1->type];
911 case CEE_CONV_OVF_I_UN:
912 case CEE_CONV_OVF_U_UN:
913 ins->type = STACK_PTR;
914 ins->opcode += ovf2ops_op_map [src1->type];
916 case CEE_CONV_OVF_I1_UN:
917 case CEE_CONV_OVF_I2_UN:
918 case CEE_CONV_OVF_I4_UN:
919 case CEE_CONV_OVF_U1_UN:
920 case CEE_CONV_OVF_U2_UN:
921 case CEE_CONV_OVF_U4_UN:
922 ins->type = STACK_I4;
923 ins->opcode += ovf2ops_op_map [src1->type];
926 ins->type = STACK_PTR;
927 switch (src1->type) {
929 ins->opcode = OP_ICONV_TO_U;
933 #if SIZEOF_VOID_P == 8
934 ins->opcode = OP_LCONV_TO_U;
936 ins->opcode = OP_MOVE;
940 ins->opcode = OP_LCONV_TO_U;
943 ins->opcode = OP_FCONV_TO_U;
949 ins->type = STACK_I8;
950 ins->opcode += unops_op_map [src1->type];
952 case CEE_CONV_OVF_I8:
953 case CEE_CONV_OVF_U8:
954 ins->type = STACK_I8;
955 ins->opcode += ovf3ops_op_map [src1->type];
957 case CEE_CONV_OVF_U8_UN:
958 case CEE_CONV_OVF_I8_UN:
959 ins->type = STACK_I8;
960 ins->opcode += ovf2ops_op_map [src1->type];
964 ins->type = STACK_R8;
965 ins->opcode += unops_op_map [src1->type];
968 ins->type = STACK_R8;
972 ins->type = STACK_I4;
973 ins->opcode += ovfops_op_map [src1->type];
978 ins->type = STACK_PTR;
979 ins->opcode += ovfops_op_map [src1->type];
987 ins->type = bin_num_table [src1->type] [src2->type];
988 ins->opcode += ovfops_op_map [src1->type];
989 if (ins->type == STACK_R8)
990 ins->type = STACK_INV;
992 case OP_LOAD_MEMBASE:
993 ins->type = STACK_PTR;
995 case OP_LOADI1_MEMBASE:
996 case OP_LOADU1_MEMBASE:
997 case OP_LOADI2_MEMBASE:
998 case OP_LOADU2_MEMBASE:
999 case OP_LOADI4_MEMBASE:
1000 case OP_LOADU4_MEMBASE:
1001 ins->type = STACK_PTR;
1003 case OP_LOADI8_MEMBASE:
1004 ins->type = STACK_I8;
1006 case OP_LOADR4_MEMBASE:
1007 case OP_LOADR8_MEMBASE:
1008 ins->type = STACK_R8;
1011 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1015 if (ins->type == STACK_MP)
1016 ins->klass = mono_defaults.object_class;
1021 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1027 param_table [STACK_MAX] [STACK_MAX] = {
1032 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1036 switch (args->type) {
1046 for (i = 0; i < sig->param_count; ++i) {
1047 switch (args [i].type) {
1051 if (!sig->params [i]->byref)
1055 if (sig->params [i]->byref)
1057 switch (sig->params [i]->type) {
1058 case MONO_TYPE_CLASS:
1059 case MONO_TYPE_STRING:
1060 case MONO_TYPE_OBJECT:
1061 case MONO_TYPE_SZARRAY:
1062 case MONO_TYPE_ARRAY:
1069 if (sig->params [i]->byref)
1071 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1080 /*if (!param_table [args [i].type] [sig->params [i]->type])
1088 * When we need a pointer to the current domain many times in a method, we
1089 * call mono_domain_get() once and we store the result in a local variable.
1090 * This function returns the variable that represents the MonoDomain*.
1092 inline static MonoInst *
1093 mono_get_domainvar (MonoCompile *cfg)
1095 if (!cfg->domainvar)
1096 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1097 return cfg->domainvar;
1101 * The got_var contains the address of the Global Offset Table when AOT
1105 mono_get_got_var (MonoCompile *cfg)
1107 #ifdef MONO_ARCH_NEED_GOT_VAR
1108 if (!cfg->compile_aot)
1110 if (!cfg->got_var) {
1111 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1113 return cfg->got_var;
1120 mono_get_vtable_var (MonoCompile *cfg)
1122 g_assert (cfg->generic_sharing_context);
1124 if (!cfg->rgctx_var) {
1125 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1126 /* force the var to be stack allocated */
1127 cfg->rgctx_var->flags |= MONO_INST_INDIRECT;
1130 return cfg->rgctx_var;
1134 type_from_stack_type (MonoInst *ins) {
1135 switch (ins->type) {
1136 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1137 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1138 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1139 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1141 return &ins->klass->this_arg;
1142 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1143 case STACK_VTYPE: return &ins->klass->byval_arg;
1145 g_error ("stack type %d to monotype not handled\n", ins->type);
1150 static G_GNUC_UNUSED int
1151 type_to_stack_type (MonoType *t)
1153 t = mono_type_get_underlying_type (t);
1157 case MONO_TYPE_BOOLEAN:
1160 case MONO_TYPE_CHAR:
1167 case MONO_TYPE_FNPTR:
1169 case MONO_TYPE_CLASS:
1170 case MONO_TYPE_STRING:
1171 case MONO_TYPE_OBJECT:
1172 case MONO_TYPE_SZARRAY:
1173 case MONO_TYPE_ARRAY:
1181 case MONO_TYPE_VALUETYPE:
1182 case MONO_TYPE_TYPEDBYREF:
1184 case MONO_TYPE_GENERICINST:
1185 if (mono_type_generic_inst_is_valuetype (t))
1191 g_assert_not_reached ();
1198 array_access_to_klass (int opcode)
1202 return mono_defaults.byte_class;
1204 return mono_defaults.uint16_class;
1207 return mono_defaults.int_class;
1210 return mono_defaults.sbyte_class;
1213 return mono_defaults.int16_class;
1216 return mono_defaults.int32_class;
1218 return mono_defaults.uint32_class;
1221 return mono_defaults.int64_class;
1224 return mono_defaults.single_class;
1227 return mono_defaults.double_class;
1228 case CEE_LDELEM_REF:
1229 case CEE_STELEM_REF:
1230 return mono_defaults.object_class;
1232 g_assert_not_reached ();
1238 * We try to share variables when possible
1241 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1246 /* inlining can result in deeper stacks */
1247 if (slot >= cfg->header->max_stack)
1248 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1250 pos = ins->type - 1 + slot * STACK_MAX;
1252 switch (ins->type) {
1259 if ((vnum = cfg->intvars [pos]))
1260 return cfg->varinfo [vnum];
1261 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1262 cfg->intvars [pos] = res->inst_c0;
1265 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1271 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1274 * Don't use this if a generic_context is set, since that means AOT can't
1275 * look up the method using just the image+token.
1276 * table == 0 means this is a reference made from a wrapper.
1278 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1279 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1280 jump_info_token->image = image;
1281 jump_info_token->token = token;
1282 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1287 * This function is called to handle items that are left on the evaluation stack
1288 * at basic block boundaries. What happens is that we save the values to local variables
1289 * and we reload them later when first entering the target basic block (with the
1290 * handle_loaded_temps () function).
1291 * A single joint point will use the same variables (stored in the array bb->out_stack or
1292 * bb->in_stack, if the basic block is before or after the joint point).
1294 * This function needs to be called _before_ emitting the last instruction of
1295 * the bb (i.e. before emitting a branch).
1296 * If the stack merge fails at a join point, cfg->unverifiable is set.
1299 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1302 MonoBasicBlock *bb = cfg->cbb;
1303 MonoBasicBlock *outb;
1304 MonoInst *inst, **locals;
1309 if (cfg->verbose_level > 3)
1310 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1311 if (!bb->out_scount) {
1312 bb->out_scount = count;
1313 //printf ("bblock %d has out:", bb->block_num);
1315 for (i = 0; i < bb->out_count; ++i) {
1316 outb = bb->out_bb [i];
1317 /* exception handlers are linked, but they should not be considered for stack args */
1318 if (outb->flags & BB_EXCEPTION_HANDLER)
1320 //printf (" %d", outb->block_num);
1321 if (outb->in_stack) {
1323 bb->out_stack = outb->in_stack;
1329 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1330 for (i = 0; i < count; ++i) {
1332 * try to reuse temps already allocated for this purpouse, if they occupy the same
1333 * stack slot and if they are of the same type.
1334 * This won't cause conflicts since if 'local' is used to
1335 * store one of the values in the in_stack of a bblock, then
1336 * the same variable will be used for the same outgoing stack
1338 * This doesn't work when inlining methods, since the bblocks
1339 * in the inlined methods do not inherit their in_stack from
1340 * the bblock they are inlined to. See bug #58863 for an
1343 if (cfg->inlined_method)
1344 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1346 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1351 for (i = 0; i < bb->out_count; ++i) {
1352 outb = bb->out_bb [i];
1353 /* exception handlers are linked, but they should not be considered for stack args */
1354 if (outb->flags & BB_EXCEPTION_HANDLER)
1356 if (outb->in_scount) {
1357 if (outb->in_scount != bb->out_scount) {
1358 cfg->unverifiable = TRUE;
1361 continue; /* check they are the same locals */
1363 outb->in_scount = count;
1364 outb->in_stack = bb->out_stack;
1367 locals = bb->out_stack;
1369 for (i = 0; i < count; ++i) {
1370 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1371 inst->cil_code = sp [i]->cil_code;
1372 sp [i] = locals [i];
1373 if (cfg->verbose_level > 3)
1374 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1378 * It is possible that the out bblocks already have in_stack assigned, and
1379 * the in_stacks differ. In this case, we will store to all the different
1386 /* Find a bblock which has a different in_stack */
1388 while (bindex < bb->out_count) {
1389 outb = bb->out_bb [bindex];
1390 /* exception handlers are linked, but they should not be considered for stack args */
1391 if (outb->flags & BB_EXCEPTION_HANDLER) {
1395 if (outb->in_stack != locals) {
1396 for (i = 0; i < count; ++i) {
1397 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1398 inst->cil_code = sp [i]->cil_code;
1399 sp [i] = locals [i];
1400 if (cfg->verbose_level > 3)
1401 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1403 locals = outb->in_stack;
1412 /* Emit code which loads interface_offsets [klass->interface_id]
1413 * The array is stored in memory before vtable.
1416 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1418 if (cfg->compile_aot) {
1419 int ioffset_reg = alloc_preg (cfg);
1420 int iid_reg = alloc_preg (cfg);
1422 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1423 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1424 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1427 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1432 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1434 int ibitmap_reg = alloc_preg (cfg);
1435 #ifdef COMPRESSED_INTERFACE_BITMAP
1437 MonoInst *res, *ins;
1438 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1439 MONO_ADD_INS (cfg->cbb, ins);
1441 if (cfg->compile_aot)
1442 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1444 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1445 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1448 int ibitmap_byte_reg = alloc_preg (cfg);
1450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1452 if (cfg->compile_aot) {
1453 int iid_reg = alloc_preg (cfg);
1454 int shifted_iid_reg = alloc_preg (cfg);
1455 int ibitmap_byte_address_reg = alloc_preg (cfg);
1456 int masked_iid_reg = alloc_preg (cfg);
1457 int iid_one_bit_reg = alloc_preg (cfg);
1458 int iid_bit_reg = alloc_preg (cfg);
1459 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1460 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1461 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1462 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1463 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1464 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1465 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1466 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1468 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1475 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1476 * stored in "klass_reg" implements the interface "klass".
1479 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1481 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1485 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1486 * stored in "vtable_reg" implements the interface "klass".
1489 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1491 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1495 * Emit code which checks whenever the interface id of @klass is smaller than
1496 * than the value given by max_iid_reg.
1499 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1500 MonoBasicBlock *false_target)
1502 if (cfg->compile_aot) {
1503 int iid_reg = alloc_preg (cfg);
1504 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1505 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1510 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1512 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1515 /* Same as above, but obtains max_iid from a vtable */
1517 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1518 MonoBasicBlock *false_target)
1520 int max_iid_reg = alloc_preg (cfg);
1522 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, max_interface_id));
1523 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1526 /* Same as above, but obtains max_iid from a klass */
1528 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1529 MonoBasicBlock *false_target)
1531 int max_iid_reg = alloc_preg (cfg);
1533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, max_interface_id));
1534 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1538 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1540 int idepth_reg = alloc_preg (cfg);
1541 int stypes_reg = alloc_preg (cfg);
1542 int stype = alloc_preg (cfg);
1544 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1545 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1547 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1550 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1552 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1553 } else if (cfg->compile_aot) {
1554 int const_reg = alloc_preg (cfg);
1555 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1556 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1564 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1566 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1570 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1572 int intf_reg = alloc_preg (cfg);
1574 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1575 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1578 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1580 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1584 * Variant of the above that takes a register to the class, not the vtable.
1587 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1589 int intf_bit_reg = alloc_preg (cfg);
1591 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1592 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1595 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1597 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1601 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1604 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1605 } else if (cfg->compile_aot) {
1606 int const_reg = alloc_preg (cfg);
1607 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1608 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1610 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1612 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1616 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1618 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1622 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1624 if (cfg->compile_aot) {
1625 int const_reg = alloc_preg (cfg);
1626 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1627 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1629 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1631 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1635 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1638 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1641 int rank_reg = alloc_preg (cfg);
1642 int eclass_reg = alloc_preg (cfg);
1644 g_assert (!klass_inst);
1645 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, rank));
1646 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1647 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1648 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
1649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
1650 if (klass->cast_class == mono_defaults.object_class) {
1651 int parent_reg = alloc_preg (cfg);
1652 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
1653 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1654 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1655 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1656 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1657 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1658 } else if (klass->cast_class == mono_defaults.enum_class) {
1659 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1660 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1661 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1663 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1664 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1667 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1668 /* Check that the object is a vector too */
1669 int bounds_reg = alloc_preg (cfg);
1670 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
1671 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1672 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1675 int idepth_reg = alloc_preg (cfg);
1676 int stypes_reg = alloc_preg (cfg);
1677 int stype = alloc_preg (cfg);
1679 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1680 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, idepth));
1681 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1682 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1684 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, supertypes));
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1686 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1691 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1693 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1697 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1701 g_assert (val == 0);
1706 if ((size <= 4) && (size <= align)) {
1709 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1712 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1715 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1717 #if SIZEOF_REGISTER == 8
1719 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1725 val_reg = alloc_preg (cfg);
1727 if (SIZEOF_REGISTER == 8)
1728 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1730 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1733 /* This could be optimized further if neccesary */
1735 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1742 #if !NO_UNALIGNED_ACCESS
1743 if (SIZEOF_REGISTER == 8) {
1745 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1750 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1758 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1763 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1768 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1775 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1782 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1783 g_assert (size < 10000);
1786 /* This could be optimized further if neccesary */
1788 cur_reg = alloc_preg (cfg);
1789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1797 #if !NO_UNALIGNED_ACCESS
1798 if (SIZEOF_REGISTER == 8) {
1800 cur_reg = alloc_preg (cfg);
1801 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1802 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1811 cur_reg = alloc_preg (cfg);
1812 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1813 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1819 cur_reg = alloc_preg (cfg);
1820 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1821 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1827 cur_reg = alloc_preg (cfg);
1828 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1829 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1837 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
1840 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1843 type = mini_get_basic_type_from_generic (gsctx, type);
1844 switch (type->type) {
1845 case MONO_TYPE_VOID:
1846 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALLVIRT: OP_VOIDCALL;
1849 case MONO_TYPE_BOOLEAN:
1852 case MONO_TYPE_CHAR:
1855 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1859 case MONO_TYPE_FNPTR:
1860 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1861 case MONO_TYPE_CLASS:
1862 case MONO_TYPE_STRING:
1863 case MONO_TYPE_OBJECT:
1864 case MONO_TYPE_SZARRAY:
1865 case MONO_TYPE_ARRAY:
1866 return calli? OP_CALL_REG: virt? OP_CALLVIRT: OP_CALL;
1869 return calli? OP_LCALL_REG: virt? OP_LCALLVIRT: OP_LCALL;
1872 return calli? OP_FCALL_REG: virt? OP_FCALLVIRT: OP_FCALL;
1873 case MONO_TYPE_VALUETYPE:
1874 if (type->data.klass->enumtype) {
1875 type = mono_class_enum_basetype (type->data.klass);
1878 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1879 case MONO_TYPE_TYPEDBYREF:
1880 return calli? OP_VCALL_REG: virt? OP_VCALLVIRT: OP_VCALL;
1881 case MONO_TYPE_GENERICINST:
1882 type = &type->data.generic_class->container_class->byval_arg;
1885 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
1891 * target_type_is_incompatible:
1892 * @cfg: MonoCompile context
1894 * Check that the item @arg on the evaluation stack can be stored
1895 * in the target type (can be a local, or field, etc).
1896 * The cfg arg can be used to check if we need verification or just
1899 * Returns: non-0 value if arg can't be stored on a target.
1902 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
1904 MonoType *simple_type;
1907 if (target->byref) {
1908 /* FIXME: check that the pointed to types match */
1909 if (arg->type == STACK_MP)
1910 return arg->klass != mono_class_from_mono_type (target);
1911 if (arg->type == STACK_PTR)
1916 simple_type = mono_type_get_underlying_type (target);
1917 switch (simple_type->type) {
1918 case MONO_TYPE_VOID:
1922 case MONO_TYPE_BOOLEAN:
1925 case MONO_TYPE_CHAR:
1928 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
1932 /* STACK_MP is needed when setting pinned locals */
1933 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1938 case MONO_TYPE_FNPTR:
1940 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
1941 * in native int. (#688008).
1943 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
1946 case MONO_TYPE_CLASS:
1947 case MONO_TYPE_STRING:
1948 case MONO_TYPE_OBJECT:
1949 case MONO_TYPE_SZARRAY:
1950 case MONO_TYPE_ARRAY:
1951 if (arg->type != STACK_OBJ)
1953 /* FIXME: check type compatibility */
1957 if (arg->type != STACK_I8)
1962 if (arg->type != STACK_R8)
1965 case MONO_TYPE_VALUETYPE:
1966 if (arg->type != STACK_VTYPE)
1968 klass = mono_class_from_mono_type (simple_type);
1969 if (klass != arg->klass)
1972 case MONO_TYPE_TYPEDBYREF:
1973 if (arg->type != STACK_VTYPE)
1975 klass = mono_class_from_mono_type (simple_type);
1976 if (klass != arg->klass)
1979 case MONO_TYPE_GENERICINST:
1980 if (mono_type_generic_inst_is_valuetype (simple_type)) {
1981 if (arg->type != STACK_VTYPE)
1983 klass = mono_class_from_mono_type (simple_type);
1984 if (klass != arg->klass)
1988 if (arg->type != STACK_OBJ)
1990 /* FIXME: check type compatibility */
1994 case MONO_TYPE_MVAR:
1995 /* FIXME: all the arguments must be references for now,
1996 * later look inside cfg and see if the arg num is
1997 * really a reference
1999 g_assert (cfg->generic_sharing_context);
2000 if (arg->type != STACK_OBJ)
2004 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2010 * Prepare arguments for passing to a function call.
2011 * Return a non-zero value if the arguments can't be passed to the given
2013 * The type checks are not yet complete and some conversions may need
2014 * casts on 32 or 64 bit architectures.
2016 * FIXME: implement this using target_type_is_incompatible ()
2019 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2021 MonoType *simple_type;
2025 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2029 for (i = 0; i < sig->param_count; ++i) {
2030 if (sig->params [i]->byref) {
2031 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2035 simple_type = sig->params [i];
2036 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2038 switch (simple_type->type) {
2039 case MONO_TYPE_VOID:
2044 case MONO_TYPE_BOOLEAN:
2047 case MONO_TYPE_CHAR:
2050 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2056 case MONO_TYPE_FNPTR:
2057 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2060 case MONO_TYPE_CLASS:
2061 case MONO_TYPE_STRING:
2062 case MONO_TYPE_OBJECT:
2063 case MONO_TYPE_SZARRAY:
2064 case MONO_TYPE_ARRAY:
2065 if (args [i]->type != STACK_OBJ)
2070 if (args [i]->type != STACK_I8)
2075 if (args [i]->type != STACK_R8)
2078 case MONO_TYPE_VALUETYPE:
2079 if (simple_type->data.klass->enumtype) {
2080 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2083 if (args [i]->type != STACK_VTYPE)
2086 case MONO_TYPE_TYPEDBYREF:
2087 if (args [i]->type != STACK_VTYPE)
2090 case MONO_TYPE_GENERICINST:
2091 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2095 g_error ("unknown type 0x%02x in check_call_signature",
2103 callvirt_to_call (int opcode)
2108 case OP_VOIDCALLVIRT:
2117 g_assert_not_reached ();
2124 callvirt_to_call_membase (int opcode)
2128 return OP_CALL_MEMBASE;
2129 case OP_VOIDCALLVIRT:
2130 return OP_VOIDCALL_MEMBASE;
2132 return OP_FCALL_MEMBASE;
2134 return OP_LCALL_MEMBASE;
2136 return OP_VCALL_MEMBASE;
2138 g_assert_not_reached ();
2144 #ifdef MONO_ARCH_HAVE_IMT
2146 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
2150 if (COMPILE_LLVM (cfg)) {
2151 method_reg = alloc_preg (cfg);
2154 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2155 } else if (cfg->compile_aot) {
2156 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2159 MONO_INST_NEW (cfg, ins, OP_PCONST);
2160 ins->inst_p0 = call->method;
2161 ins->dreg = method_reg;
2162 MONO_ADD_INS (cfg->cbb, ins);
2166 call->imt_arg_reg = method_reg;
2168 #ifdef MONO_ARCH_IMT_REG
2169 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2171 /* Need this to keep the IMT arg alive */
2172 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2177 #ifdef MONO_ARCH_IMT_REG
2178 method_reg = alloc_preg (cfg);
2181 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2182 } else if (cfg->compile_aot) {
2183 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, call->method, MONO_PATCH_INFO_METHODCONST);
2186 MONO_INST_NEW (cfg, ins, OP_PCONST);
2187 ins->inst_p0 = call->method;
2188 ins->dreg = method_reg;
2189 MONO_ADD_INS (cfg->cbb, ins);
2192 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2194 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2199 static MonoJumpInfo *
2200 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2202 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2206 ji->data.target = target;
2211 inline static MonoCallInst *
2212 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2213 MonoInst **args, int calli, int virtual, int tail, int rgctx)
2216 #ifdef MONO_ARCH_SOFT_FLOAT
2221 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2223 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2226 call->signature = sig;
2227 call->rgctx_reg = rgctx;
2229 type_to_eval_stack_type ((cfg), sig->ret, &call->inst);
2232 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2233 call->vret_var = cfg->vret_addr;
2234 //g_assert_not_reached ();
2236 } else if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2237 MonoInst *temp = mono_compile_create_var (cfg, sig->ret, OP_LOCAL);
2240 temp->backend.is_pinvoke = sig->pinvoke;
2243 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2244 * address of return value to increase optimization opportunities.
2245 * Before vtype decomposition, the dreg of the call ins itself represents the
2246 * fact the call modifies the return value. After decomposition, the call will
2247 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2248 * will be transformed into an LDADDR.
2250 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2251 loada->dreg = alloc_preg (cfg);
2252 loada->inst_p0 = temp;
2253 /* We reference the call too since call->dreg could change during optimization */
2254 loada->inst_p1 = call;
2255 MONO_ADD_INS (cfg->cbb, loada);
2257 call->inst.dreg = temp->dreg;
2259 call->vret_var = loada;
2260 } else if (!MONO_TYPE_IS_VOID (sig->ret))
2261 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2263 #ifdef MONO_ARCH_SOFT_FLOAT
2264 if (COMPILE_SOFT_FLOAT (cfg)) {
2266 * If the call has a float argument, we would need to do an r8->r4 conversion using
2267 * an icall, but that cannot be done during the call sequence since it would clobber
2268 * the call registers + the stack. So we do it before emitting the call.
2270 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2272 MonoInst *in = call->args [i];
2274 if (i >= sig->hasthis)
2275 t = sig->params [i - sig->hasthis];
2277 t = &mono_defaults.int_class->byval_arg;
2278 t = mono_type_get_underlying_type (t);
2280 if (!t->byref && t->type == MONO_TYPE_R4) {
2281 MonoInst *iargs [1];
2285 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2287 /* The result will be in an int vreg */
2288 call->args [i] = conv;
2295 if (COMPILE_LLVM (cfg))
2296 mono_llvm_emit_call (cfg, call);
2298 mono_arch_emit_call (cfg, call);
2300 mono_arch_emit_call (cfg, call);
2303 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2304 cfg->flags |= MONO_CFG_HAS_CALLS;
2310 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2312 #ifdef MONO_ARCH_RGCTX_REG
2313 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2314 cfg->uses_rgctx_reg = TRUE;
2315 call->rgctx_reg = TRUE;
2317 call->rgctx_arg_reg = rgctx_reg;
2324 inline static MonoInst*
2325 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *rgctx_arg)
2331 rgctx_reg = mono_alloc_preg (cfg);
2332 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2335 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE);
2337 call->inst.sreg1 = addr->dreg;
2339 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2342 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2344 return (MonoInst*)call;
2348 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, int rgctx_type);
2350 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, int rgctx_type);
2353 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig,
2354 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2356 gboolean might_be_remote;
2357 gboolean virtual = this != NULL;
2358 gboolean enable_for_aot = TRUE;
2364 rgctx_reg = mono_alloc_preg (cfg);
2365 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2368 if (method->string_ctor) {
2369 /* Create the real signature */
2370 /* FIXME: Cache these */
2371 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2372 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2377 context_used = mono_method_check_context_used (method);
2379 might_be_remote = this && sig->hasthis &&
2380 (method->klass->marshalbyref || method->klass == mono_defaults.object_class) &&
2381 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2383 if (might_be_remote && context_used) {
2386 g_assert (cfg->generic_sharing_context);
2388 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2390 return mono_emit_calli (cfg, sig, args, addr, NULL);
2393 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, FALSE, rgctx_arg ? TRUE : FALSE);
2395 if (might_be_remote)
2396 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2398 call->method = method;
2399 call->inst.flags |= MONO_INST_HAS_METHOD;
2400 call->inst.inst_left = this;
2403 int vtable_reg, slot_reg, this_reg;
2405 this_reg = this->dreg;
2407 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
2408 if ((method->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (method->name, "Invoke"))) {
2409 MonoInst *dummy_use;
2411 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2413 /* Make a call to delegate->invoke_impl */
2414 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2415 call->inst.inst_basereg = this_reg;
2416 call->inst.inst_offset = G_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2417 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2419 /* We must emit a dummy use here because the delegate trampoline will
2420 replace the 'this' argument with the delegate target making this activation
2421 no longer a root for the delegate.
2422 This is an issue for delegates that target collectible code such as dynamic
2423 methods of GC'able assemblies.
2425 For a test case look into #667921.
2427 FIXME: a dummy use is not the best way to do it as the local register allocator
2428 will put it on a caller save register and spil it around the call.
2429 Ideally, we would either put it on a callee save register or only do the store part.
2431 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2433 return (MonoInst*)call;
2437 if ((!cfg->compile_aot || enable_for_aot) &&
2438 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2439 (MONO_METHOD_IS_FINAL (method) &&
2440 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2441 !(method->klass->marshalbyref && context_used)) {
2443 * the method is not virtual, we just need to ensure this is not null
2444 * and then we can call the method directly.
2446 if (method->klass->marshalbyref || method->klass == mono_defaults.object_class) {
2448 * The check above ensures method is not gshared, this is needed since
2449 * gshared methods can't have wrappers.
2451 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2454 if (!method->string_ctor)
2455 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2457 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2458 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2460 * the method is virtual, but we can statically dispatch since either
2461 * it's class or the method itself are sealed.
2462 * But first we need to ensure it's not a null reference.
2464 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2466 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2468 call->inst.opcode = callvirt_to_call_membase (call->inst.opcode);
2470 vtable_reg = alloc_preg (cfg);
2471 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, G_STRUCT_OFFSET (MonoObject, vtable));
2472 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2474 #ifdef MONO_ARCH_HAVE_IMT
2476 guint32 imt_slot = mono_method_get_imt_slot (method);
2477 emit_imt_argument (cfg, call, imt_arg);
2478 slot_reg = vtable_reg;
2479 call->inst.inst_offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2482 if (slot_reg == -1) {
2483 slot_reg = alloc_preg (cfg);
2484 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2485 call->inst.inst_offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2488 slot_reg = vtable_reg;
2489 call->inst.inst_offset = G_STRUCT_OFFSET (MonoVTable, vtable) +
2490 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2491 #ifdef MONO_ARCH_HAVE_IMT
2493 g_assert (mono_method_signature (method)->generic_param_count);
2494 emit_imt_argument (cfg, call, imt_arg);
2499 call->inst.sreg1 = slot_reg;
2500 call->virtual = TRUE;
2504 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2507 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2509 return (MonoInst*)call;
2513 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2515 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), args, this, NULL, NULL);
2519 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2526 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE);
2529 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2531 return (MonoInst*)call;
2535 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2537 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2541 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2545 * mono_emit_abs_call:
2547 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2549 inline static MonoInst*
2550 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2551 MonoMethodSignature *sig, MonoInst **args)
2553 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2557 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2560 if (cfg->abs_patches == NULL)
2561 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2562 g_hash_table_insert (cfg->abs_patches, ji, ji);
2563 ins = mono_emit_native_call (cfg, ji, sig, args);
2564 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2569 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2571 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2572 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2576 * Native code might return non register sized integers
2577 * without initializing the upper bits.
2579 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2580 case OP_LOADI1_MEMBASE:
2581 widen_op = OP_ICONV_TO_I1;
2583 case OP_LOADU1_MEMBASE:
2584 widen_op = OP_ICONV_TO_U1;
2586 case OP_LOADI2_MEMBASE:
2587 widen_op = OP_ICONV_TO_I2;
2589 case OP_LOADU2_MEMBASE:
2590 widen_op = OP_ICONV_TO_U2;
2596 if (widen_op != -1) {
2597 int dreg = alloc_preg (cfg);
2600 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2601 widen->type = ins->type;
2611 get_memcpy_method (void)
2613 static MonoMethod *memcpy_method = NULL;
2614 if (!memcpy_method) {
2615 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2617 g_error ("Old corlib found. Install a new one");
2619 return memcpy_method;
2623 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
2625 MonoClassField *field;
2626 gpointer iter = NULL;
2628 while ((field = mono_class_get_fields (klass, &iter))) {
2631 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
2633 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
2634 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
2635 g_assert ((foffset % SIZEOF_VOID_P) == 0);
2636 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
2638 MonoClass *field_class = mono_class_from_mono_type (field->type);
2639 if (field_class->has_references)
2640 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
2646 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value, int value_reg)
2648 int card_table_shift_bits;
2649 gpointer card_table_mask;
2651 MonoInst *dummy_use;
2652 int nursery_shift_bits;
2653 size_t nursery_size;
2654 gboolean has_card_table_wb = FALSE;
2656 if (!cfg->gen_write_barriers)
2659 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
2661 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
2663 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
2664 has_card_table_wb = TRUE;
2667 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0) {
2670 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
2671 wbarrier->sreg1 = ptr->dreg;
2673 wbarrier->sreg2 = value->dreg;
2675 wbarrier->sreg2 = value_reg;
2676 MONO_ADD_INS (cfg->cbb, wbarrier);
2677 } else if (card_table) {
2678 int offset_reg = alloc_preg (cfg);
2679 int card_reg = alloc_preg (cfg);
2682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
2683 if (card_table_mask)
2684 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
2686 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
2687 * IMM's larger than 32bits.
2689 if (cfg->compile_aot) {
2690 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
2692 MONO_INST_NEW (cfg, ins, OP_PCONST);
2693 ins->inst_p0 = card_table;
2694 ins->dreg = card_reg;
2695 MONO_ADD_INS (cfg->cbb, ins);
2698 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
2699 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
2701 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
2702 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
2706 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
2708 MONO_INST_NEW (cfg, dummy_use, OP_DUMMY_USE);
2709 dummy_use->sreg1 = value_reg;
2710 MONO_ADD_INS (cfg->cbb, dummy_use);
2715 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
2717 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
2718 unsigned need_wb = 0;
2723 /*types with references can't have alignment smaller than sizeof(void*) */
2724 if (align < SIZEOF_VOID_P)
2727 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
2728 if (size > 32 * SIZEOF_VOID_P)
2731 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
2733 /* We don't unroll more than 5 stores to avoid code bloat. */
2734 if (size > 5 * SIZEOF_VOID_P) {
2735 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
2736 size += (SIZEOF_VOID_P - 1);
2737 size &= ~(SIZEOF_VOID_P - 1);
2739 EMIT_NEW_ICONST (cfg, iargs [2], size);
2740 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
2741 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
2745 destreg = iargs [0]->dreg;
2746 srcreg = iargs [1]->dreg;
2749 dest_ptr_reg = alloc_preg (cfg);
2750 tmp_reg = alloc_preg (cfg);
2753 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
2755 while (size >= SIZEOF_VOID_P) {
2756 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, tmp_reg, srcreg, offset);
2757 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
2760 emit_write_barrier (cfg, iargs [0], NULL, tmp_reg);
2762 offset += SIZEOF_VOID_P;
2763 size -= SIZEOF_VOID_P;
2766 /*tmp += sizeof (void*)*/
2767 if (size >= SIZEOF_VOID_P) {
2768 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
2769 MONO_ADD_INS (cfg->cbb, iargs [0]);
2773 /* Those cannot be references since size < sizeof (void*) */
2775 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
2776 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
2782 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
2783 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
2789 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
2790 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
2799 * Emit code to copy a valuetype of type @klass whose address is stored in
2800 * @src->dreg to memory whose address is stored at @dest->dreg.
2803 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
2805 MonoInst *iargs [4];
2808 MonoMethod *memcpy_method;
2812 * This check breaks with spilled vars... need to handle it during verification anyway.
2813 * g_assert (klass && klass == src->klass && klass == dest->klass);
2817 n = mono_class_native_size (klass, &align);
2819 n = mono_class_value_size (klass, &align);
2821 /* if native is true there should be no references in the struct */
2822 if (cfg->gen_write_barriers && klass->has_references && !native) {
2823 /* Avoid barriers when storing to the stack */
2824 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
2825 (dest->opcode == OP_LDADDR))) {
2826 int context_used = 0;
2831 if (cfg->generic_sharing_context)
2832 context_used = mono_class_check_context_used (klass);
2834 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
2835 if ((cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
2837 } else if (context_used) {
2838 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
2840 if (cfg->compile_aot) {
2841 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
2843 EMIT_NEW_PCONST (cfg, iargs [2], klass);
2844 mono_class_compute_gc_descriptor (klass);
2848 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
2853 if ((cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
2854 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
2855 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
2859 EMIT_NEW_ICONST (cfg, iargs [2], n);
2861 memcpy_method = get_memcpy_method ();
2862 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
2867 get_memset_method (void)
2869 static MonoMethod *memset_method = NULL;
2870 if (!memset_method) {
2871 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
2873 g_error ("Old corlib found. Install a new one");
2875 return memset_method;
2879 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
2881 MonoInst *iargs [3];
2884 MonoMethod *memset_method;
2886 /* FIXME: Optimize this for the case when dest is an LDADDR */
2888 mono_class_init (klass);
2889 n = mono_class_value_size (klass, &align);
2891 if (n <= sizeof (gpointer) * 5) {
2892 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
2895 memset_method = get_memset_method ();
2897 EMIT_NEW_ICONST (cfg, iargs [1], 0);
2898 EMIT_NEW_ICONST (cfg, iargs [2], n);
2899 mono_emit_method_call (cfg, memset_method, iargs, NULL);
2904 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
2906 MonoInst *this = NULL;
2908 g_assert (cfg->generic_sharing_context);
2910 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
2911 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
2912 !method->klass->valuetype)
2913 EMIT_NEW_ARGLOAD (cfg, this, 0);
2915 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
2916 MonoInst *mrgctx_loc, *mrgctx_var;
2919 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
2921 mrgctx_loc = mono_get_vtable_var (cfg);
2922 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
2925 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
2926 MonoInst *vtable_loc, *vtable_var;
2930 vtable_loc = mono_get_vtable_var (cfg);
2931 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
2933 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
2934 MonoInst *mrgctx_var = vtable_var;
2937 vtable_reg = alloc_preg (cfg);
2938 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
2939 vtable_var->type = STACK_PTR;
2947 vtable_reg = alloc_preg (cfg);
2948 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
2953 static MonoJumpInfoRgctxEntry *
2954 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, int info_type)
2956 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
2957 res->method = method;
2958 res->in_mrgctx = in_mrgctx;
2959 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
2960 res->data->type = patch_type;
2961 res->data->data.target = patch_data;
2962 res->info_type = info_type;
2967 static inline MonoInst*
2968 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
2970 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
2974 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
2975 MonoClass *klass, int rgctx_type)
2977 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
2978 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
2980 return emit_rgctx_fetch (cfg, rgctx, entry);
2984 * emit_get_rgctx_method:
2986 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
2987 * normal constants, else emit a load from the rgctx.
2990 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
2991 MonoMethod *cmethod, int rgctx_type)
2993 if (!context_used) {
2996 switch (rgctx_type) {
2997 case MONO_RGCTX_INFO_METHOD:
2998 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3000 case MONO_RGCTX_INFO_METHOD_RGCTX:
3001 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3004 g_assert_not_reached ();
3007 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3008 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3010 return emit_rgctx_fetch (cfg, rgctx, entry);
3015 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3016 MonoClassField *field, int rgctx_type)
3018 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3019 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3021 return emit_rgctx_fetch (cfg, rgctx, entry);
3025 * On return the caller must check @klass for load errors.
3028 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3030 MonoInst *vtable_arg;
3032 int context_used = 0;
3034 if (cfg->generic_sharing_context)
3035 context_used = mono_class_check_context_used (klass);
3038 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3039 klass, MONO_RGCTX_INFO_VTABLE);
3041 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3045 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3048 if (COMPILE_LLVM (cfg))
3049 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3051 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3052 #ifdef MONO_ARCH_VTABLE_REG
3053 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3054 cfg->uses_vtable_reg = TRUE;
3061 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg)
3063 if (mini_get_debug_options ()->better_cast_details) {
3064 int to_klass_reg = alloc_preg (cfg);
3065 int vtable_reg = alloc_preg (cfg);
3066 int klass_reg = alloc_preg (cfg);
3067 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3070 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3074 MONO_ADD_INS (cfg->cbb, tls_get);
3075 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3076 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3078 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3079 MONO_EMIT_NEW_PCONST (cfg, to_klass_reg, klass);
3080 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3085 reset_cast_details (MonoCompile *cfg)
3087 /* Reset the variables holding the cast details */
3088 if (mini_get_debug_options ()->better_cast_details) {
3089 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3091 MONO_ADD_INS (cfg->cbb, tls_get);
3092 /* It is enough to reset the from field */
3093 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, G_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3098 * On return the caller must check @array_class for load errors
3101 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3103 int vtable_reg = alloc_preg (cfg);
3104 int context_used = 0;
3106 if (cfg->generic_sharing_context)
3107 context_used = mono_class_check_context_used (array_class);
3109 save_cast_details (cfg, array_class, obj->dreg);
3111 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
3113 if (cfg->opt & MONO_OPT_SHARED) {
3114 int class_reg = alloc_preg (cfg);
3115 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3116 if (cfg->compile_aot) {
3117 int klass_reg = alloc_preg (cfg);
3118 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3119 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3121 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3123 } else if (context_used) {
3124 MonoInst *vtable_ins;
3126 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3127 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3129 if (cfg->compile_aot) {
3133 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3135 vt_reg = alloc_preg (cfg);
3136 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3137 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3140 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3142 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3146 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3148 reset_cast_details (cfg);
3152 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3153 * generic code is generated.
3156 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3158 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3161 MonoInst *rgctx, *addr;
3163 /* FIXME: What if the class is shared? We might not
3164 have to get the address of the method from the
3166 addr = emit_get_rgctx_method (cfg, context_used, method,
3167 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3169 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3171 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3173 return mono_emit_method_call (cfg, method, &val, NULL);
3178 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3182 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3183 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3184 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3185 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3187 obj_reg = sp [0]->dreg;
3188 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3189 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3191 /* FIXME: generics */
3192 g_assert (klass->rank == 0);
3195 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3196 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3198 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3199 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, element_class));
3202 MonoInst *element_class;
3204 /* This assertion is from the unboxcast insn */
3205 g_assert (klass->rank == 0);
3207 element_class = emit_get_rgctx_klass (cfg, context_used,
3208 klass->element_class, MONO_RGCTX_INFO_KLASS);
3210 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3211 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3213 save_cast_details (cfg, klass->element_class, obj_reg);
3214 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3215 reset_cast_details (cfg);
3218 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3219 MONO_ADD_INS (cfg->cbb, add);
3220 add->type = STACK_MP;
3227 * Returns NULL and set the cfg exception on error.
3230 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3232 MonoInst *iargs [2];
3238 MonoInst *iargs [2];
3241 FIXME: we cannot get managed_alloc here because we can't get
3242 the class's vtable (because it's not a closed class)
3244 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3245 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3248 if (cfg->opt & MONO_OPT_SHARED)
3249 rgctx_info = MONO_RGCTX_INFO_KLASS;
3251 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3252 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3254 if (cfg->opt & MONO_OPT_SHARED) {
3255 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3257 alloc_ftn = mono_object_new;
3260 alloc_ftn = mono_object_new_specific;
3263 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3266 if (cfg->opt & MONO_OPT_SHARED) {
3267 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3268 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3270 alloc_ftn = mono_object_new;
3271 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3272 /* This happens often in argument checking code, eg. throw new FooException... */
3273 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3274 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3275 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3277 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3278 MonoMethod *managed_alloc = NULL;
3282 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3283 cfg->exception_ptr = klass;
3287 #ifndef MONO_CROSS_COMPILE
3288 managed_alloc = mono_gc_get_managed_allocator (vtable, for_box);
3291 if (managed_alloc) {
3292 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3293 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3295 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3297 guint32 lw = vtable->klass->instance_size;
3298 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3299 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3300 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3303 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3307 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3311 * Returns NULL and set the cfg exception on error.
3314 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used)
3316 MonoInst *alloc, *ins;
3318 if (mono_class_is_nullable (klass)) {
3319 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3322 /* FIXME: What if the class is shared? We might not
3323 have to get the method address from the RGCTX. */
3324 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3325 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3326 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3328 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, rgctx);
3330 return mono_emit_method_call (cfg, method, &val, NULL);
3334 alloc = handle_alloc (cfg, klass, TRUE, context_used);
3338 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
3345 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
3348 MonoGenericContainer *container;
3349 MonoGenericInst *ginst;
3351 if (klass->generic_class) {
3352 container = klass->generic_class->container_class->generic_container;
3353 ginst = klass->generic_class->context.class_inst;
3354 } else if (klass->generic_container && context_used) {
3355 container = klass->generic_container;
3356 ginst = container->context.class_inst;
3361 for (i = 0; i < container->type_argc; ++i) {
3363 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
3365 type = ginst->type_argv [i];
3366 if (mini_type_is_reference (cfg, type))
3372 // FIXME: This doesn't work yet (class libs tests fail?)
3373 #define is_complex_isinst(klass) (TRUE || (klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || klass->marshalbyref || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
3376 * Returns NULL and set the cfg exception on error.
3379 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3381 MonoBasicBlock *is_null_bb;
3382 int obj_reg = src->dreg;
3383 int vtable_reg = alloc_preg (cfg);
3384 MonoInst *klass_inst = NULL;
3389 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3390 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
3391 MonoInst *cache_ins;
3393 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3398 /* klass - it's the second element of the cache entry*/
3399 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3402 args [2] = cache_ins;
3404 return mono_emit_method_call (cfg, mono_castclass, args, NULL);
3407 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3409 if (is_complex_isinst (klass)) {
3410 /* Complex case, handle by an icall */
3416 args [1] = klass_inst;
3418 return mono_emit_jit_icall (cfg, mono_object_castclass, args);
3420 /* Simple case, handled by the code below */
3424 NEW_BBLOCK (cfg, is_null_bb);
3426 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3427 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3429 save_cast_details (cfg, klass, obj_reg);
3431 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3432 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3433 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
3435 int klass_reg = alloc_preg (cfg);
3437 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3439 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3440 /* the remoting code is broken, access the class for now */
3441 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3442 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3444 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3445 cfg->exception_ptr = klass;
3448 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3453 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3455 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3456 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
3460 MONO_START_BB (cfg, is_null_bb);
3462 reset_cast_details (cfg);
3468 * Returns NULL and set the cfg exception on error.
3471 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
3474 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
3475 int obj_reg = src->dreg;
3476 int vtable_reg = alloc_preg (cfg);
3477 int res_reg = alloc_ireg_ref (cfg);
3478 MonoInst *klass_inst = NULL;
3483 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
3484 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
3485 MonoInst *cache_ins;
3487 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
3492 /* klass - it's the second element of the cache entry*/
3493 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
3496 args [2] = cache_ins;
3498 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
3501 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3503 if (is_complex_isinst (klass)) {
3504 /* Complex case, handle by an icall */
3510 args [1] = klass_inst;
3512 return mono_emit_jit_icall (cfg, mono_object_isinst, args);
3514 /* Simple case, the code below can handle it */
3518 NEW_BBLOCK (cfg, is_null_bb);
3519 NEW_BBLOCK (cfg, false_bb);
3520 NEW_BBLOCK (cfg, end_bb);
3522 /* Do the assignment at the beginning, so the other assignment can be if converted */
3523 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
3524 ins->type = STACK_OBJ;
3527 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3528 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
3530 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3532 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3533 g_assert (!context_used);
3534 /* the is_null_bb target simply copies the input register to the output */
3535 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
3537 int klass_reg = alloc_preg (cfg);
3540 int rank_reg = alloc_preg (cfg);
3541 int eclass_reg = alloc_preg (cfg);
3543 g_assert (!context_used);
3544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
3545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
3546 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3548 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, G_STRUCT_OFFSET (MonoClass, cast_class));
3549 if (klass->cast_class == mono_defaults.object_class) {
3550 int parent_reg = alloc_preg (cfg);
3551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, G_STRUCT_OFFSET (MonoClass, parent));
3552 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
3553 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3554 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3555 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
3556 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
3557 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3558 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3559 } else if (klass->cast_class == mono_defaults.enum_class) {
3560 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
3561 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
3562 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
3563 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3565 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
3566 /* Check that the object is a vector too */
3567 int bounds_reg = alloc_preg (cfg);
3568 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, G_STRUCT_OFFSET (MonoArray, bounds));
3569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
3570 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3573 /* the is_null_bb target simply copies the input register to the output */
3574 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
3576 } else if (mono_class_is_nullable (klass)) {
3577 g_assert (!context_used);
3578 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3579 /* the is_null_bb target simply copies the input register to the output */
3580 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
3582 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
3583 g_assert (!context_used);
3584 /* the remoting code is broken, access the class for now */
3585 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
3586 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
3588 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3589 cfg->exception_ptr = klass;
3592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
3594 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3595 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
3597 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
3598 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
3600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3601 /* the is_null_bb target simply copies the input register to the output */
3602 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
3607 MONO_START_BB (cfg, false_bb);
3609 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
3610 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3612 MONO_START_BB (cfg, is_null_bb);
3614 MONO_START_BB (cfg, end_bb);
3620 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3622 /* This opcode takes as input an object reference and a class, and returns:
3623 0) if the object is an instance of the class,
3624 1) if the object is not instance of the class,
3625 2) if the object is a proxy whose type cannot be determined */
3628 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
3629 int obj_reg = src->dreg;
3630 int dreg = alloc_ireg (cfg);
3632 int klass_reg = alloc_preg (cfg);
3634 NEW_BBLOCK (cfg, true_bb);
3635 NEW_BBLOCK (cfg, false_bb);
3636 NEW_BBLOCK (cfg, false2_bb);
3637 NEW_BBLOCK (cfg, end_bb);
3638 NEW_BBLOCK (cfg, no_proxy_bb);
3640 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3641 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
3643 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3644 NEW_BBLOCK (cfg, interface_fail_bb);
3646 tmp_reg = alloc_preg (cfg);
3647 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3648 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
3649 MONO_START_BB (cfg, interface_fail_bb);
3650 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3652 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
3654 tmp_reg = alloc_preg (cfg);
3655 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3656 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3657 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
3659 tmp_reg = alloc_preg (cfg);
3660 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3661 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3663 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3664 tmp_reg = alloc_preg (cfg);
3665 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3666 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3668 tmp_reg = alloc_preg (cfg);
3669 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3673 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
3674 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
3676 MONO_START_BB (cfg, no_proxy_bb);
3678 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
3681 MONO_START_BB (cfg, false_bb);
3683 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3684 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3686 MONO_START_BB (cfg, false2_bb);
3688 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
3689 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3691 MONO_START_BB (cfg, true_bb);
3693 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3695 MONO_START_BB (cfg, end_bb);
3698 MONO_INST_NEW (cfg, ins, OP_ICONST);
3700 ins->type = STACK_I4;
3706 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
3708 /* This opcode takes as input an object reference and a class, and returns:
3709 0) if the object is an instance of the class,
3710 1) if the object is a proxy whose type cannot be determined
3711 an InvalidCastException exception is thrown otherwhise*/
3714 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
3715 int obj_reg = src->dreg;
3716 int dreg = alloc_ireg (cfg);
3717 int tmp_reg = alloc_preg (cfg);
3718 int klass_reg = alloc_preg (cfg);
3720 NEW_BBLOCK (cfg, end_bb);
3721 NEW_BBLOCK (cfg, ok_result_bb);
3723 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
3726 save_cast_details (cfg, klass, obj_reg);
3728 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3729 NEW_BBLOCK (cfg, interface_fail_bb);
3731 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3732 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
3733 MONO_START_BB (cfg, interface_fail_bb);
3734 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3736 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
3738 tmp_reg = alloc_preg (cfg);
3739 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3740 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3741 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
3743 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3744 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3747 NEW_BBLOCK (cfg, no_proxy_bb);
3749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoObject, vtable));
3750 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoVTable, klass));
3751 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
3753 tmp_reg = alloc_preg (cfg);
3754 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
3755 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, G_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
3757 tmp_reg = alloc_preg (cfg);
3758 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, G_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
3759 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
3760 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
3762 NEW_BBLOCK (cfg, fail_1_bb);
3764 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
3766 MONO_START_BB (cfg, fail_1_bb);
3768 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
3769 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3771 MONO_START_BB (cfg, no_proxy_bb);
3773 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
3776 MONO_START_BB (cfg, ok_result_bb);
3778 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
3780 MONO_START_BB (cfg, end_bb);
3783 MONO_INST_NEW (cfg, ins, OP_ICONST);
3785 ins->type = STACK_I4;
3791 * Returns NULL and set the cfg exception on error.
3793 static G_GNUC_UNUSED MonoInst*
3794 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used)
3798 gpointer *trampoline;
3799 MonoInst *obj, *method_ins, *tramp_ins;
3803 obj = handle_alloc (cfg, klass, FALSE, 0);
3807 /* Inline the contents of mono_delegate_ctor */
3809 /* Set target field */
3810 /* Optimize away setting of NULL target */
3811 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
3812 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
3813 if (cfg->gen_write_barriers) {
3814 dreg = alloc_preg (cfg);
3815 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, target));
3816 emit_write_barrier (cfg, ptr, target, 0);
3820 /* Set method field */
3821 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
3822 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
3823 if (cfg->gen_write_barriers) {
3824 dreg = alloc_preg (cfg);
3825 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method));
3826 emit_write_barrier (cfg, ptr, method_ins, 0);
3829 * To avoid looking up the compiled code belonging to the target method
3830 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
3831 * store it, and we fill it after the method has been compiled.
3833 if (!cfg->compile_aot && !method->dynamic) {
3834 MonoInst *code_slot_ins;
3837 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
3839 domain = mono_domain_get ();
3840 mono_domain_lock (domain);
3841 if (!domain_jit_info (domain)->method_code_hash)
3842 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
3843 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
3845 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
3846 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
3848 mono_domain_unlock (domain);
3850 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
3852 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
3855 /* Set invoke_impl field */
3856 if (cfg->compile_aot) {
3857 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, klass);
3859 trampoline = mono_create_delegate_trampoline (klass);
3860 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
3862 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, G_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
3864 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
3870 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
3872 MonoJitICallInfo *info;
3874 /* Need to register the icall so it gets an icall wrapper */
3875 info = mono_get_array_new_va_icall (rank);
3877 cfg->flags |= MONO_CFG_HAS_VARARGS;
3879 /* mono_array_new_va () needs a vararg calling convention */
3880 cfg->disable_llvm = TRUE;
3882 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
3883 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
3887 mono_emit_load_got_addr (MonoCompile *cfg)
3889 MonoInst *getaddr, *dummy_use;
3891 if (!cfg->got_var || cfg->got_var_allocated)
3894 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
3895 getaddr->dreg = cfg->got_var->dreg;
3897 /* Add it to the start of the first bblock */
3898 if (cfg->bb_entry->code) {
3899 getaddr->next = cfg->bb_entry->code;
3900 cfg->bb_entry->code = getaddr;
3903 MONO_ADD_INS (cfg->bb_entry, getaddr);
3905 cfg->got_var_allocated = TRUE;
3908 * Add a dummy use to keep the got_var alive, since real uses might
3909 * only be generated by the back ends.
3910 * Add it to end_bblock, so the variable's lifetime covers the whole
3912 * It would be better to make the usage of the got var explicit in all
3913 * cases when the backend needs it (i.e. calls, throw etc.), so this
3914 * wouldn't be needed.
3916 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
3917 MONO_ADD_INS (cfg->bb_exit, dummy_use);
3920 static int inline_limit;
3921 static gboolean inline_limit_inited;
3924 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
3926 MonoMethodHeaderSummary header;
3928 #ifdef MONO_ARCH_SOFT_FLOAT
3929 MonoMethodSignature *sig = mono_method_signature (method);
3933 if (cfg->generic_sharing_context)
3936 if (cfg->inline_depth > 10)
3939 #ifdef MONO_ARCH_HAVE_LMF_OPS
3940 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
3941 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
3942 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
3947 if (!mono_method_get_header_summary (method, &header))
3950 /*runtime, icall and pinvoke are checked by summary call*/
3951 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
3952 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
3953 (method->klass->marshalbyref) ||
3957 /* also consider num_locals? */
3958 /* Do the size check early to avoid creating vtables */
3959 if (!inline_limit_inited) {
3960 if (getenv ("MONO_INLINELIMIT"))
3961 inline_limit = atoi (getenv ("MONO_INLINELIMIT"));
3963 inline_limit = INLINE_LENGTH_LIMIT;
3964 inline_limit_inited = TRUE;
3966 if (header.code_size >= inline_limit)
3970 * if we can initialize the class of the method right away, we do,
3971 * otherwise we don't allow inlining if the class needs initialization,
3972 * since it would mean inserting a call to mono_runtime_class_init()
3973 * inside the inlined code
3975 if (!(cfg->opt & MONO_OPT_SHARED)) {
3976 if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
3977 if (cfg->run_cctors && method->klass->has_cctor) {
3978 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
3979 if (!method->klass->runtime_info)
3980 /* No vtable created yet */
3982 vtable = mono_class_vtable (cfg->domain, method->klass);
3985 /* This makes so that inline cannot trigger */
3986 /* .cctors: too many apps depend on them */
3987 /* running with a specific order... */
3988 if (! vtable->initialized)
3990 mono_runtime_class_init (vtable);
3992 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
3993 if (!method->klass->runtime_info)
3994 /* No vtable created yet */
3996 vtable = mono_class_vtable (cfg->domain, method->klass);
3999 if (!vtable->initialized)
4004 * If we're compiling for shared code
4005 * the cctor will need to be run at aot method load time, for example,
4006 * or at the end of the compilation of the inlining method.
4008 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4013 * CAS - do not inline methods with declarative security
4014 * Note: this has to be before any possible return TRUE;
4016 if (mono_method_has_declsec (method))
4019 #ifdef MONO_ARCH_SOFT_FLOAT
4021 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4023 for (i = 0; i < sig->param_count; ++i)
4024 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4032 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoVTable *vtable)
4034 if (vtable->initialized && !cfg->compile_aot)
4037 if (vtable->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)
4040 if (!mono_class_needs_cctor_run (vtable->klass, method))
4043 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (vtable->klass == method->klass))
4044 /* The initialization is already done before the method is called */
4051 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4055 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4057 mono_class_init (klass);
4058 size = mono_class_array_element_size (klass);
4060 mult_reg = alloc_preg (cfg);
4061 array_reg = arr->dreg;
4062 index_reg = index->dreg;
4064 #if SIZEOF_REGISTER == 8
4065 /* The array reg is 64 bits but the index reg is only 32 */
4066 if (COMPILE_LLVM (cfg)) {
4068 index2_reg = index_reg;
4070 index2_reg = alloc_preg (cfg);
4071 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4074 if (index->type == STACK_I8) {
4075 index2_reg = alloc_preg (cfg);
4076 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4078 index2_reg = index_reg;
4083 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4085 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4086 if (size == 1 || size == 2 || size == 4 || size == 8) {
4087 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4089 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], G_STRUCT_OFFSET (MonoArray, vector));
4090 ins->klass = mono_class_get_element_class (klass);
4091 ins->type = STACK_MP;
4097 add_reg = alloc_ireg_mp (cfg);
4099 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4100 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4101 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4102 ins->klass = mono_class_get_element_class (klass);
4103 ins->type = STACK_MP;
4104 MONO_ADD_INS (cfg->cbb, ins);
4109 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4111 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
4113 int bounds_reg = alloc_preg (cfg);
4114 int add_reg = alloc_ireg_mp (cfg);
4115 int mult_reg = alloc_preg (cfg);
4116 int mult2_reg = alloc_preg (cfg);
4117 int low1_reg = alloc_preg (cfg);
4118 int low2_reg = alloc_preg (cfg);
4119 int high1_reg = alloc_preg (cfg);
4120 int high2_reg = alloc_preg (cfg);
4121 int realidx1_reg = alloc_preg (cfg);
4122 int realidx2_reg = alloc_preg (cfg);
4123 int sum_reg = alloc_preg (cfg);
4128 mono_class_init (klass);
4129 size = mono_class_array_element_size (klass);
4131 index1 = index_ins1->dreg;
4132 index2 = index_ins2->dreg;
4134 /* range checking */
4135 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
4136 arr->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4138 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
4139 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4140 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
4141 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
4142 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4143 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
4144 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4146 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
4147 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4148 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
4149 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
4150 bounds_reg, sizeof (MonoArrayBounds) + G_STRUCT_OFFSET (MonoArrayBounds, length));
4151 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
4152 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
4154 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
4155 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
4156 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
4157 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
4158 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, G_STRUCT_OFFSET (MonoArray, vector));
4160 ins->type = STACK_MP;
4162 MONO_ADD_INS (cfg->cbb, ins);
4169 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
4173 MonoMethod *addr_method;
4176 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
4179 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
4181 #ifndef MONO_ARCH_EMULATE_MUL_DIV
4182 /* emit_ldelema_2 depends on OP_LMUL */
4183 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
4184 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
4188 element_size = mono_class_array_element_size (cmethod->klass->element_class);
4189 addr_method = mono_marshal_get_array_address (rank, element_size);
4190 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
4195 static MonoBreakPolicy
4196 always_insert_breakpoint (MonoMethod *method)
4198 return MONO_BREAK_POLICY_ALWAYS;
4201 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
4204 * mono_set_break_policy:
4205 * policy_callback: the new callback function
4207 * Allow embedders to decide wherther to actually obey breakpoint instructions
4208 * (both break IL instructions and Debugger.Break () method calls), for example
4209 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
4210 * untrusted or semi-trusted code.
4212 * @policy_callback will be called every time a break point instruction needs to
4213 * be inserted with the method argument being the method that calls Debugger.Break()
4214 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
4215 * if it wants the breakpoint to not be effective in the given method.
4216 * #MONO_BREAK_POLICY_ALWAYS is the default.
4219 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
4221 if (policy_callback)
4222 break_policy_func = policy_callback;
4224 break_policy_func = always_insert_breakpoint;
4228 should_insert_brekpoint (MonoMethod *method) {
4229 switch (break_policy_func (method)) {
4230 case MONO_BREAK_POLICY_ALWAYS:
4232 case MONO_BREAK_POLICY_NEVER:
4234 case MONO_BREAK_POLICY_ON_DBG:
4235 return mono_debug_using_mono_debugger ();
4237 g_warning ("Incorrect value returned from break policy callback");
4242 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
4244 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
4246 MonoInst *addr, *store, *load;
4247 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
4249 /* the bounds check is already done by the callers */
4250 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
4252 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
4253 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
4255 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
4256 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
4262 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4264 MonoInst *ins = NULL;
4265 #ifdef MONO_ARCH_SIMD_INTRINSICS
4266 if (cfg->opt & MONO_OPT_SIMD) {
4267 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4277 emit_memory_barrier (MonoCompile *cfg, int kind)
4279 MonoInst *ins = NULL;
4280 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4281 MONO_ADD_INS (cfg->cbb, ins);
4282 ins->backend.memory_barrier_kind = kind;
4288 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4290 MonoInst *ins = NULL;
4292 static MonoClass *runtime_helpers_class = NULL;
4293 if (! runtime_helpers_class)
4294 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
4295 "System.Runtime.CompilerServices", "RuntimeHelpers");
4297 if (cmethod->klass == mono_defaults.string_class) {
4298 if (strcmp (cmethod->name, "get_Chars") == 0) {
4299 int dreg = alloc_ireg (cfg);
4300 int index_reg = alloc_preg (cfg);
4301 int mult_reg = alloc_preg (cfg);
4302 int add_reg = alloc_preg (cfg);
4304 #if SIZEOF_REGISTER == 8
4305 /* The array reg is 64 bits but the index reg is only 32 */
4306 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
4308 index_reg = args [1]->dreg;
4310 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
4312 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4313 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, G_STRUCT_OFFSET (MonoString, chars));
4314 add_reg = ins->dreg;
4315 /* Avoid a warning */
4317 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4320 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
4321 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4322 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
4323 add_reg, G_STRUCT_OFFSET (MonoString, chars));
4325 type_from_op (ins, NULL, NULL);
4327 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4328 int dreg = alloc_ireg (cfg);
4329 /* Decompose later to allow more optimizations */
4330 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
4331 ins->type = STACK_I4;
4332 ins->flags |= MONO_INST_FAULT;
4333 cfg->cbb->has_array_access = TRUE;
4334 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
4337 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
4338 int mult_reg = alloc_preg (cfg);
4339 int add_reg = alloc_preg (cfg);
4341 /* The corlib functions check for oob already. */
4342 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
4343 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
4344 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, G_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
4345 return cfg->cbb->last_ins;
4348 } else if (cmethod->klass == mono_defaults.object_class) {
4350 if (strcmp (cmethod->name, "GetType") == 0) {
4351 int dreg = alloc_ireg_ref (cfg);
4352 int vt_reg = alloc_preg (cfg);
4353 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4354 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, G_STRUCT_OFFSET (MonoVTable, type));
4355 type_from_op (ins, NULL, NULL);
4358 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
4359 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
4360 int dreg = alloc_ireg (cfg);
4361 int t1 = alloc_ireg (cfg);
4363 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
4364 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
4365 ins->type = STACK_I4;
4369 } else if (strcmp (cmethod->name, ".ctor") == 0) {
4370 MONO_INST_NEW (cfg, ins, OP_NOP);
4371 MONO_ADD_INS (cfg->cbb, ins);
4375 } else if (cmethod->klass == mono_defaults.array_class) {
4376 if (strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
4377 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
4379 #ifndef MONO_BIG_ARRAYS
4381 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
4384 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
4385 int dreg = alloc_ireg (cfg);
4386 int bounds_reg = alloc_ireg_mp (cfg);
4387 MonoBasicBlock *end_bb, *szarray_bb;
4388 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
4390 NEW_BBLOCK (cfg, end_bb);
4391 NEW_BBLOCK (cfg, szarray_bb);
4393 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
4394 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, bounds));
4395 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4396 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
4397 /* Non-szarray case */
4399 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4400 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, length));
4402 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4403 bounds_reg, G_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
4404 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4405 MONO_START_BB (cfg, szarray_bb);
4408 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4409 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4411 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4412 MONO_START_BB (cfg, end_bb);
4414 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
4415 ins->type = STACK_I4;
4421 if (cmethod->name [0] != 'g')
4424 if (strcmp (cmethod->name, "get_Rank") == 0) {
4425 int dreg = alloc_ireg (cfg);
4426 int vtable_reg = alloc_preg (cfg);
4427 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
4428 args [0]->dreg, G_STRUCT_OFFSET (MonoObject, vtable));
4429 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
4430 vtable_reg, G_STRUCT_OFFSET (MonoVTable, rank));
4431 type_from_op (ins, NULL, NULL);
4434 } else if (strcmp (cmethod->name, "get_Length") == 0) {
4435 int dreg = alloc_ireg (cfg);
4437 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
4438 args [0]->dreg, G_STRUCT_OFFSET (MonoArray, max_length));
4439 type_from_op (ins, NULL, NULL);
4444 } else if (cmethod->klass == runtime_helpers_class) {
4446 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
4447 EMIT_NEW_ICONST (cfg, ins, G_STRUCT_OFFSET (MonoString, chars));
4451 } else if (cmethod->klass == mono_defaults.thread_class) {
4452 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
4453 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
4454 MONO_ADD_INS (cfg->cbb, ins);
4456 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
4457 return emit_memory_barrier (cfg, FullBarrier);
4459 } else if (cmethod->klass == mono_defaults.monitor_class) {
4461 /* FIXME this should be integrated to the check below once we support the trampoline version */
4462 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4463 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
4464 MonoMethod *fast_method = NULL;
4466 /* Avoid infinite recursion */
4467 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
4470 fast_method = mono_monitor_get_fast_path (cmethod);
4474 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4478 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
4479 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
4482 if (COMPILE_LLVM (cfg)) {
4484 * Pass the argument normally, the LLVM backend will handle the
4485 * calling convention problems.
4487 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4489 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
4490 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4491 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4492 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4495 return (MonoInst*)call;
4496 } else if (strcmp (cmethod->name, "Exit") == 0) {
4499 if (COMPILE_LLVM (cfg)) {
4500 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
4502 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
4503 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
4504 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
4505 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
4508 return (MonoInst*)call;
4510 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
4512 MonoMethod *fast_method = NULL;
4514 /* Avoid infinite recursion */
4515 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
4516 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
4517 strcmp (cfg->method->name, "FastMonitorExit") == 0))
4520 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) ||
4521 strcmp (cmethod->name, "Exit") == 0)
4522 fast_method = mono_monitor_get_fast_path (cmethod);
4526 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
4529 } else if (cmethod->klass->image == mono_defaults.corlib &&
4530 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
4531 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
4534 #if SIZEOF_REGISTER == 8
4535 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
4536 /* 64 bit reads are already atomic */
4537 MONO_INST_NEW (cfg, ins, OP_LOADI8_MEMBASE);
4538 ins->dreg = mono_alloc_preg (cfg);
4539 ins->inst_basereg = args [0]->dreg;
4540 ins->inst_offset = 0;
4541 MONO_ADD_INS (cfg->cbb, ins);
4545 #ifdef MONO_ARCH_HAVE_ATOMIC_ADD
4546 if (strcmp (cmethod->name, "Increment") == 0) {
4547 MonoInst *ins_iconst;
4550 if (fsig->params [0]->type == MONO_TYPE_I4)
4551 opcode = OP_ATOMIC_ADD_NEW_I4;
4552 #if SIZEOF_REGISTER == 8
4553 else if (fsig->params [0]->type == MONO_TYPE_I8)
4554 opcode = OP_ATOMIC_ADD_NEW_I8;
4557 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4558 ins_iconst->inst_c0 = 1;
4559 ins_iconst->dreg = mono_alloc_ireg (cfg);
4560 MONO_ADD_INS (cfg->cbb, ins_iconst);
4562 MONO_INST_NEW (cfg, ins, opcode);
4563 ins->dreg = mono_alloc_ireg (cfg);
4564 ins->inst_basereg = args [0]->dreg;
4565 ins->inst_offset = 0;
4566 ins->sreg2 = ins_iconst->dreg;
4567 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4568 MONO_ADD_INS (cfg->cbb, ins);
4570 } else if (strcmp (cmethod->name, "Decrement") == 0) {
4571 MonoInst *ins_iconst;
4574 if (fsig->params [0]->type == MONO_TYPE_I4)
4575 opcode = OP_ATOMIC_ADD_NEW_I4;
4576 #if SIZEOF_REGISTER == 8
4577 else if (fsig->params [0]->type == MONO_TYPE_I8)
4578 opcode = OP_ATOMIC_ADD_NEW_I8;
4581 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
4582 ins_iconst->inst_c0 = -1;
4583 ins_iconst->dreg = mono_alloc_ireg (cfg);
4584 MONO_ADD_INS (cfg->cbb, ins_iconst);
4586 MONO_INST_NEW (cfg, ins, opcode);
4587 ins->dreg = mono_alloc_ireg (cfg);
4588 ins->inst_basereg = args [0]->dreg;
4589 ins->inst_offset = 0;
4590 ins->sreg2 = ins_iconst->dreg;
4591 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4592 MONO_ADD_INS (cfg->cbb, ins);
4594 } else if (strcmp (cmethod->name, "Add") == 0) {
4597 if (fsig->params [0]->type == MONO_TYPE_I4)
4598 opcode = OP_ATOMIC_ADD_NEW_I4;
4599 #if SIZEOF_REGISTER == 8
4600 else if (fsig->params [0]->type == MONO_TYPE_I8)
4601 opcode = OP_ATOMIC_ADD_NEW_I8;
4605 MONO_INST_NEW (cfg, ins, opcode);
4606 ins->dreg = mono_alloc_ireg (cfg);
4607 ins->inst_basereg = args [0]->dreg;
4608 ins->inst_offset = 0;
4609 ins->sreg2 = args [1]->dreg;
4610 ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
4611 MONO_ADD_INS (cfg->cbb, ins);
4614 #endif /* MONO_ARCH_HAVE_ATOMIC_ADD */
4616 #ifdef MONO_ARCH_HAVE_ATOMIC_EXCHANGE
4617 if (strcmp (cmethod->name, "Exchange") == 0) {
4619 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
4621 if (fsig->params [0]->type == MONO_TYPE_I4)
4622 opcode = OP_ATOMIC_EXCHANGE_I4;
4623 #if SIZEOF_REGISTER == 8
4624 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
4625 (fsig->params [0]->type == MONO_TYPE_I))
4626 opcode = OP_ATOMIC_EXCHANGE_I8;
4628 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I))
4629 opcode = OP_ATOMIC_EXCHANGE_I4;
4634 MONO_INST_NEW (cfg, ins, opcode);
4635 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
4636 ins->inst_basereg = args [0]->dreg;
4637 ins->inst_offset = 0;
4638 ins->sreg2 = args [1]->dreg;
4639 MONO_ADD_INS (cfg->cbb, ins);
4641 switch (fsig->params [0]->type) {
4643 ins->type = STACK_I4;
4647 ins->type = STACK_I8;
4649 case MONO_TYPE_OBJECT:
4650 ins->type = STACK_OBJ;
4653 g_assert_not_reached ();
4656 if (cfg->gen_write_barriers && is_ref)
4657 emit_write_barrier (cfg, args [0], args [1], -1);
4659 #endif /* MONO_ARCH_HAVE_ATOMIC_EXCHANGE */
4661 #ifdef MONO_ARCH_HAVE_ATOMIC_CAS
4662 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
4664 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
4665 if (fsig->params [1]->type == MONO_TYPE_I4)
4667 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
4668 size = sizeof (gpointer);
4669 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
4672 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
4673 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4674 ins->sreg1 = args [0]->dreg;
4675 ins->sreg2 = args [1]->dreg;
4676 ins->sreg3 = args [2]->dreg;
4677 ins->type = STACK_I4;
4678 MONO_ADD_INS (cfg->cbb, ins);
4679 } else if (size == 8) {
4680 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
4681 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
4682 ins->sreg1 = args [0]->dreg;
4683 ins->sreg2 = args [1]->dreg;
4684 ins->sreg3 = args [2]->dreg;
4685 ins->type = STACK_I8;
4686 MONO_ADD_INS (cfg->cbb, ins);
4688 /* g_assert_not_reached (); */
4690 if (cfg->gen_write_barriers && is_ref)
4691 emit_write_barrier (cfg, args [0], args [1], -1);
4693 #endif /* MONO_ARCH_HAVE_ATOMIC_CAS */
4697 } else if (cmethod->klass->image == mono_defaults.corlib) {
4698 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
4699 && strcmp (cmethod->klass->name, "Debugger") == 0) {
4700 if (should_insert_brekpoint (cfg->method)) {
4701 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
4703 MONO_INST_NEW (cfg, ins, OP_NOP);
4704 MONO_ADD_INS (cfg->cbb, ins);
4708 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
4709 && strcmp (cmethod->klass->name, "Environment") == 0) {
4711 EMIT_NEW_ICONST (cfg, ins, 1);
4713 EMIT_NEW_ICONST (cfg, ins, 0);
4717 } else if (cmethod->klass == mono_defaults.math_class) {
4719 * There is general branches code for Min/Max, but it does not work for
4721 * http://everything2.com/?node_id=1051618
4725 #ifdef MONO_ARCH_SIMD_INTRINSICS
4726 if (cfg->opt & MONO_OPT_SIMD) {
4727 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
4733 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
4737 * This entry point could be used later for arbitrary method
4740 inline static MonoInst*
4741 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
4742 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
4744 if (method->klass == mono_defaults.string_class) {
4745 /* managed string allocation support */
4746 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
4747 MonoInst *iargs [2];
4748 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4749 MonoMethod *managed_alloc = NULL;
4751 g_assert (vtable); /*Should not fail since it System.String*/
4752 #ifndef MONO_CROSS_COMPILE
4753 managed_alloc = mono_gc_get_managed_allocator (vtable, FALSE);
4757 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4758 iargs [1] = args [0];
4759 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
4766 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
4768 MonoInst *store, *temp;
4771 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4772 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
4775 * FIXME: We should use *args++ = sp [0], but that would mean the arg
4776 * would be different than the MonoInst's used to represent arguments, and
4777 * the ldelema implementation can't deal with that.
4778 * Solution: When ldelema is used on an inline argument, create a var for
4779 * it, emit ldelema on that var, and emit the saving code below in
4780 * inline_method () if needed.
4782 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
4783 cfg->args [i] = temp;
4784 /* This uses cfg->args [i] which is set by the preceeding line */
4785 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
4786 store->cil_code = sp [0]->cil_code;
4791 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
4792 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
4794 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4796 check_inline_called_method_name_limit (MonoMethod *called_method)
4799 static char *limit = NULL;
4801 if (limit == NULL) {
4802 char *limit_string = getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
4804 if (limit_string != NULL)
4805 limit = limit_string;
4807 limit = (char *) "";
4810 if (limit [0] != '\0') {
4811 char *called_method_name = mono_method_full_name (called_method, TRUE);
4813 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
4814 g_free (called_method_name);
4816 //return (strncmp_result <= 0);
4817 return (strncmp_result == 0);
4824 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4826 check_inline_caller_method_name_limit (MonoMethod *caller_method)
4829 static char *limit = NULL;
4831 if (limit == NULL) {
4832 char *limit_string = getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
4833 if (limit_string != NULL) {
4834 limit = limit_string;
4836 limit = (char *) "";
4840 if (limit [0] != '\0') {
4841 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
4843 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
4844 g_free (caller_method_name);
4846 //return (strncmp_result <= 0);
4847 return (strncmp_result == 0);
4855 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
4856 guchar *ip, guint real_offset, GList *dont_inline, gboolean inline_always)
4858 MonoInst *ins, *rvar = NULL;
4859 MonoMethodHeader *cheader;
4860 MonoBasicBlock *ebblock, *sbblock;
4862 MonoMethod *prev_inlined_method;
4863 MonoInst **prev_locals, **prev_args;
4864 MonoType **prev_arg_types;
4865 guint prev_real_offset;
4866 GHashTable *prev_cbb_hash;
4867 MonoBasicBlock **prev_cil_offset_to_bb;
4868 MonoBasicBlock *prev_cbb;
4869 unsigned char* prev_cil_start;
4870 guint32 prev_cil_offset_to_bb_len;
4871 MonoMethod *prev_current_method;
4872 MonoGenericContext *prev_generic_context;
4873 gboolean ret_var_set, prev_ret_var_set, virtual = FALSE;
4875 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
4877 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
4878 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
4881 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
4882 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
4886 if (cfg->verbose_level > 2)
4887 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4889 if (!cmethod->inline_info) {
4890 cfg->stat_inlineable_methods++;
4891 cmethod->inline_info = 1;
4894 /* allocate local variables */
4895 cheader = mono_method_get_header (cmethod);
4897 if (cheader == NULL || mono_loader_get_last_error ()) {
4898 MonoLoaderError *error = mono_loader_get_last_error ();
4901 mono_metadata_free_mh (cheader);
4902 if (inline_always && error)
4903 mono_cfg_set_exception (cfg, error->exception_type);
4905 mono_loader_clear_error ();
4909 /*Must verify before creating locals as it can cause the JIT to assert.*/
4910 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
4911 mono_metadata_free_mh (cheader);
4915 /* allocate space to store the return value */
4916 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
4917 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
4920 prev_locals = cfg->locals;
4921 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
4922 for (i = 0; i < cheader->num_locals; ++i)
4923 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
4925 /* allocate start and end blocks */
4926 /* This is needed so if the inline is aborted, we can clean up */
4927 NEW_BBLOCK (cfg, sbblock);
4928 sbblock->real_offset = real_offset;
4930 NEW_BBLOCK (cfg, ebblock);
4931 ebblock->block_num = cfg->num_bblocks++;
4932 ebblock->real_offset = real_offset;
4934 prev_args = cfg->args;
4935 prev_arg_types = cfg->arg_types;
4936 prev_inlined_method = cfg->inlined_method;
4937 cfg->inlined_method = cmethod;
4938 cfg->ret_var_set = FALSE;
4939 cfg->inline_depth ++;
4940 prev_real_offset = cfg->real_offset;
4941 prev_cbb_hash = cfg->cbb_hash;
4942 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
4943 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
4944 prev_cil_start = cfg->cil_start;
4945 prev_cbb = cfg->cbb;
4946 prev_current_method = cfg->current_method;
4947 prev_generic_context = cfg->generic_context;
4948 prev_ret_var_set = cfg->ret_var_set;
4950 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
4953 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, dont_inline, sp, real_offset, virtual);
4955 ret_var_set = cfg->ret_var_set;
4957 cfg->inlined_method = prev_inlined_method;
4958 cfg->real_offset = prev_real_offset;
4959 cfg->cbb_hash = prev_cbb_hash;
4960 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
4961 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
4962 cfg->cil_start = prev_cil_start;
4963 cfg->locals = prev_locals;
4964 cfg->args = prev_args;
4965 cfg->arg_types = prev_arg_types;
4966 cfg->current_method = prev_current_method;
4967 cfg->generic_context = prev_generic_context;
4968 cfg->ret_var_set = prev_ret_var_set;
4969 cfg->inline_depth --;
4971 if ((costs >= 0 && costs < 60) || inline_always) {
4972 if (cfg->verbose_level > 2)
4973 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
4975 cfg->stat_inlined_methods++;
4977 /* always add some code to avoid block split failures */
4978 MONO_INST_NEW (cfg, ins, OP_NOP);
4979 MONO_ADD_INS (prev_cbb, ins);
4981 prev_cbb->next_bb = sbblock;
4982 link_bblock (cfg, prev_cbb, sbblock);
4985 * Get rid of the begin and end bblocks if possible to aid local
4988 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
4990 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
4991 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
4993 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
4994 MonoBasicBlock *prev = ebblock->in_bb [0];
4995 mono_merge_basic_blocks (cfg, prev, ebblock);
4997 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
4998 mono_merge_basic_blocks (cfg, prev_cbb, prev);
4999 cfg->cbb = prev_cbb;
5007 * If the inlined method contains only a throw, then the ret var is not
5008 * set, so set it to a dummy value.
5011 static double r8_0 = 0.0;
5013 switch (rvar->type) {
5015 MONO_EMIT_NEW_ICONST (cfg, rvar->dreg, 0);
5018 MONO_EMIT_NEW_I8CONST (cfg, rvar->dreg, 0);
5023 MONO_EMIT_NEW_PCONST (cfg, rvar->dreg, 0);
5026 MONO_INST_NEW (cfg, ins, OP_R8CONST);
5027 ins->type = STACK_R8;
5028 ins->inst_p0 = (void*)&r8_0;
5029 ins->dreg = rvar->dreg;
5030 MONO_ADD_INS (cfg->cbb, ins);
5033 MONO_EMIT_NEW_VZERO (cfg, rvar->dreg, mono_class_from_mono_type (fsig->ret));
5036 g_assert_not_reached ();
5040 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
5043 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5046 if (cfg->verbose_level > 2)
5047 printf ("INLINE ABORTED %s\n", mono_method_full_name (cmethod, TRUE));
5048 cfg->exception_type = MONO_EXCEPTION_NONE;
5049 mono_loader_clear_error ();
5051 /* This gets rid of the newly added bblocks */
5052 cfg->cbb = prev_cbb;
5054 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
5059 * Some of these comments may well be out-of-date.
5060 * Design decisions: we do a single pass over the IL code (and we do bblock
5061 * splitting/merging in the few cases when it's required: a back jump to an IL
5062 * address that was not already seen as bblock starting point).
5063 * Code is validated as we go (full verification is still better left to metadata/verify.c).
5064 * Complex operations are decomposed in simpler ones right away. We need to let the
5065 * arch-specific code peek and poke inside this process somehow (except when the
5066 * optimizations can take advantage of the full semantic info of coarse opcodes).
5067 * All the opcodes of the form opcode.s are 'normalized' to opcode.
5068 * MonoInst->opcode initially is the IL opcode or some simplification of that
5069 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
5070 * opcode with value bigger than OP_LAST.
5071 * At this point the IR can be handed over to an interpreter, a dumb code generator
5072 * or to the optimizing code generator that will translate it to SSA form.
5074 * Profiling directed optimizations.
5075 * We may compile by default with few or no optimizations and instrument the code
5076 * or the user may indicate what methods to optimize the most either in a config file
5077 * or through repeated runs where the compiler applies offline the optimizations to
5078 * each method and then decides if it was worth it.
5081 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
5082 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
5083 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
5084 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
5085 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
5086 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
5087 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
5088 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) {cfg->exception_ptr = klass; LOAD_ERROR;}
5090 /* offset from br.s -> br like opcodes */
5091 #define BIG_BRANCH_OFFSET 13
5094 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
5096 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
5098 return b == NULL || b == bb;
5102 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
5104 unsigned char *ip = start;
5105 unsigned char *target;
5108 MonoBasicBlock *bblock;
5109 const MonoOpcode *opcode;
5112 cli_addr = ip - start;
5113 i = mono_opcode_value ((const guint8 **)&ip, end);
5116 opcode = &mono_opcodes [i];
5117 switch (opcode->argument) {
5118 case MonoInlineNone:
5121 case MonoInlineString:
5122 case MonoInlineType:
5123 case MonoInlineField:
5124 case MonoInlineMethod:
5127 case MonoShortInlineR:
5134 case MonoShortInlineVar:
5135 case MonoShortInlineI:
5138 case MonoShortInlineBrTarget:
5139 target = start + cli_addr + 2 + (signed char)ip [1];
5140 GET_BBLOCK (cfg, bblock, target);
5143 GET_BBLOCK (cfg, bblock, ip);
5145 case MonoInlineBrTarget:
5146 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
5147 GET_BBLOCK (cfg, bblock, target);
5150 GET_BBLOCK (cfg, bblock, ip);
5152 case MonoInlineSwitch: {
5153 guint32 n = read32 (ip + 1);
5156 cli_addr += 5 + 4 * n;
5157 target = start + cli_addr;
5158 GET_BBLOCK (cfg, bblock, target);
5160 for (j = 0; j < n; ++j) {
5161 target = start + cli_addr + (gint32)read32 (ip);
5162 GET_BBLOCK (cfg, bblock, target);
5172 g_assert_not_reached ();
5175 if (i == CEE_THROW) {
5176 unsigned char *bb_start = ip - 1;
5178 /* Find the start of the bblock containing the throw */
5180 while ((bb_start >= start) && !bblock) {
5181 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
5185 bblock->out_of_line = 1;
5194 static inline MonoMethod *
5195 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5199 if (m->wrapper_type != MONO_WRAPPER_NONE)
5200 return mono_method_get_wrapper_data (m, token);
5202 method = mono_get_method_full (m->klass->image, token, klass, context);
5207 static inline MonoMethod *
5208 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
5210 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
5212 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
5218 static inline MonoClass*
5219 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
5223 if (method->wrapper_type != MONO_WRAPPER_NONE)
5224 klass = mono_method_get_wrapper_data (method, token);
5226 klass = mono_class_get_full (method->klass->image, token, context);
5228 mono_class_init (klass);
5233 * Returns TRUE if the JIT should abort inlining because "callee"
5234 * is influenced by security attributes.
5237 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
5241 if ((cfg->method != caller) && mono_method_has_declsec (callee)) {
5245 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
5246 if (result == MONO_JIT_SECURITY_OK)
5249 if (result == MONO_JIT_LINKDEMAND_ECMA) {
5250 /* Generate code to throw a SecurityException before the actual call/link */
5251 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5254 NEW_ICONST (cfg, args [0], 4);
5255 NEW_METHODCONST (cfg, args [1], caller);
5256 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
5257 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
5258 /* don't hide previous results */
5259 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
5260 cfg->exception_data = result;
5268 throw_exception (void)
5270 static MonoMethod *method = NULL;
5273 MonoSecurityManager *secman = mono_security_manager_get_methods ();
5274 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
5281 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
5283 MonoMethod *thrower = throw_exception ();
5286 EMIT_NEW_PCONST (cfg, args [0], ex);
5287 mono_emit_method_call (cfg, thrower, args, NULL);
5291 * Return the original method is a wrapper is specified. We can only access
5292 * the custom attributes from the original method.
5295 get_original_method (MonoMethod *method)
5297 if (method->wrapper_type == MONO_WRAPPER_NONE)
5300 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
5301 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
5304 /* in other cases we need to find the original method */
5305 return mono_marshal_method_from_wrapper (method);
5309 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
5310 MonoBasicBlock *bblock, unsigned char *ip)
5312 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5313 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
5315 emit_throw_exception (cfg, ex);
5319 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
5320 MonoBasicBlock *bblock, unsigned char *ip)
5322 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
5323 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
5325 emit_throw_exception (cfg, ex);
5329 * Check that the IL instructions at ip are the array initialization
5330 * sequence and return the pointer to the data and the size.
5333 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
5336 * newarr[System.Int32]
5338 * ldtoken field valuetype ...
5339 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
5341 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
5342 guint32 token = read32 (ip + 7);
5343 guint32 field_token = read32 (ip + 2);
5344 guint32 field_index = field_token & 0xffffff;
5346 const char *data_ptr;
5348 MonoMethod *cmethod;
5349 MonoClass *dummy_class;
5350 MonoClassField *field = mono_field_from_token (method->klass->image, field_token, &dummy_class, NULL);
5356 *out_field_token = field_token;
5358 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
5361 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
5363 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
5364 case MONO_TYPE_BOOLEAN:
5368 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
5369 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
5370 case MONO_TYPE_CHAR:
5380 return NULL; /* stupid ARM FP swapped format */
5390 if (size > mono_type_size (field->type, &dummy_align))
5393 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
5394 if (!method->klass->image->dynamic) {
5395 field_index = read32 (ip + 2) & 0xffffff;
5396 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
5397 data_ptr = mono_image_rva_map (method->klass->image, rva);
5398 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
5399 /* for aot code we do the lookup on load */
5400 if (aot && data_ptr)
5401 return GUINT_TO_POINTER (rva);
5403 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
5405 data_ptr = mono_field_get_data (field);
5413 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
5415 char *method_fname = mono_method_full_name (method, TRUE);
5417 MonoMethodHeader *header = mono_method_get_header (method);
5419 if (header->code_size == 0)
5420 method_code = g_strdup ("method body is empty.");
5422 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
5423 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5424 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
5425 g_free (method_fname);
5426 g_free (method_code);
5427 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
5431 set_exception_object (MonoCompile *cfg, MonoException *exception)
5433 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
5434 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
5435 cfg->exception_ptr = exception;
5439 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5441 return mini_type_is_reference (cfg, &klass->byval_arg);
5445 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
5448 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
5449 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
5450 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
5451 /* Optimize reg-reg moves away */
5453 * Can't optimize other opcodes, since sp[0] might point to
5454 * the last ins of a decomposed opcode.
5456 sp [0]->dreg = (cfg)->locals [n]->dreg;
5458 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
5463 * ldloca inhibits many optimizations so try to get rid of it in common
5466 static inline unsigned char *
5467 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
5476 local = read16 (ip + 2);
5480 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
5481 gboolean skip = FALSE;
5483 /* From the INITOBJ case */
5484 token = read32 (ip + 2);
5485 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
5486 CHECK_TYPELOAD (klass);
5487 if (mini_type_is_reference (cfg, &klass->byval_arg)) {
5488 MONO_EMIT_NEW_PCONST (cfg, cfg->locals [local]->dreg, NULL);
5489 } else if (MONO_TYPE_ISSTRUCT (&klass->byval_arg)) {
5490 MONO_EMIT_NEW_VZERO (cfg, cfg->locals [local]->dreg, klass);
5503 is_exception_class (MonoClass *class)
5506 if (class == mono_defaults.exception_class)
5508 class = class->parent;
5514 * is_jit_optimizer_disabled:
5516 * Determine whenever M's assembly has a DebuggableAttribute with the
5517 * IsJITOptimizerDisabled flag set.
5520 is_jit_optimizer_disabled (MonoMethod *m)
5522 MonoAssembly *ass = m->klass->image->assembly;
5523 MonoCustomAttrInfo* attrs;
5524 static MonoClass *klass;
5526 gboolean val = FALSE;
5529 if (ass->jit_optimizer_disabled_inited)
5530 return ass->jit_optimizer_disabled;
5533 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
5536 ass->jit_optimizer_disabled = FALSE;
5537 mono_memory_barrier ();
5538 ass->jit_optimizer_disabled_inited = TRUE;
5542 attrs = mono_custom_attrs_from_assembly (ass);
5544 for (i = 0; i < attrs->num_attrs; ++i) {
5545 MonoCustomAttrEntry *attr = &attrs->attrs [i];
5548 MonoMethodSignature *sig;
5550 if (!attr->ctor || attr->ctor->klass != klass)
5552 /* Decode the attribute. See reflection.c */
5553 len = attr->data_size;
5554 p = (const char*)attr->data;
5555 g_assert (read16 (p) == 0x0001);
5558 // FIXME: Support named parameters
5559 sig = mono_method_signature (attr->ctor);
5560 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
5562 /* Two boolean arguments */
5566 mono_custom_attrs_free (attrs);
5569 ass->jit_optimizer_disabled = val;
5570 mono_memory_barrier ();
5571 ass->jit_optimizer_disabled_inited = TRUE;
5577 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig)
5579 gboolean supported_tail_call;
5582 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
5583 supported_tail_call = MONO_ARCH_USE_OP_TAIL_CALL (mono_method_signature (method), mono_method_signature (cmethod));
5585 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
5588 for (i = 0; i < fsig->param_count; ++i) {
5589 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
5590 /* These can point to the current method's stack */
5591 supported_tail_call = FALSE;
5593 if (fsig->hasthis && cmethod->klass->valuetype)
5594 /* this might point to the current method's stack */
5595 supported_tail_call = FALSE;
5596 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
5597 supported_tail_call = FALSE;
5598 if (cfg->method->save_lmf)
5599 supported_tail_call = FALSE;
5600 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
5601 supported_tail_call = FALSE;
5603 /* Debugging support */
5605 if (supported_tail_call) {
5606 static int count = 0;
5608 if (getenv ("COUNT")) {
5609 if (count == atoi (getenv ("COUNT")))
5610 printf ("LAST: %s\n", mono_method_full_name (cmethod, TRUE));
5611 if (count > atoi (getenv ("COUNT")))
5612 supported_tail_call = FALSE;
5617 return supported_tail_call;
5620 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
5621 * it to the thread local value based on the tls_offset field. Every other kind of access to
5622 * the field causes an assert.
5625 is_magic_tls_access (MonoClassField *field)
5627 if (strcmp (field->name, "tlsdata"))
5629 if (strcmp (field->parent->name, "ThreadLocal`1"))
5631 return field->parent->image == mono_defaults.corlib;
5634 /* emits the code needed to access a managed tls var (like ThreadStatic)
5635 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
5636 * pointer for the current thread.
5637 * Returns the MonoInst* representing the address of the tls var.
5640 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
5643 int static_data_reg, array_reg, dreg;
5644 int offset2_reg, idx_reg;
5645 // inlined access to the tls data
5646 // idx = (offset >> 24) - 1;
5647 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
5648 static_data_reg = alloc_ireg (cfg);
5649 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
5650 idx_reg = alloc_ireg (cfg);
5651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
5652 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
5653 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
5654 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
5655 array_reg = alloc_ireg (cfg);
5656 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
5657 offset2_reg = alloc_ireg (cfg);
5658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
5659 dreg = alloc_ireg (cfg);
5660 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
5665 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
5666 * this address is cached per-method in cached_tls_addr.
5669 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
5671 MonoInst *load, *addr, *temp, *store, *thread_ins;
5672 MonoClassField *offset_field;
5674 if (*cached_tls_addr) {
5675 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
5678 thread_ins = mono_get_thread_intrinsic (cfg);
5679 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
5681 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
5683 MONO_ADD_INS (cfg->cbb, thread_ins);
5685 MonoMethod *thread_method;
5686 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
5687 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
5689 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
5690 addr->klass = mono_class_from_mono_type (tls_field->type);
5691 addr->type = STACK_MP;
5692 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
5693 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
5695 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
5700 * mono_method_to_ir:
5702 * Translate the .net IL into linear IR.
5705 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
5706 MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
5707 guint inline_offset, gboolean is_virtual_call)
5710 MonoInst *ins, **sp, **stack_start;
5711 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
5712 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
5713 MonoMethod *cmethod, *method_definition;
5714 MonoInst **arg_array;
5715 MonoMethodHeader *header;
5717 guint32 token, ins_flag;
5719 MonoClass *constrained_call = NULL;
5720 unsigned char *ip, *end, *target, *err_pos;
5721 static double r8_0 = 0.0;
5722 MonoMethodSignature *sig;
5723 MonoGenericContext *generic_context = NULL;
5724 MonoGenericContainer *generic_container = NULL;
5725 MonoType **param_types;
5726 int i, n, start_new_bblock, dreg;
5727 int num_calls = 0, inline_costs = 0;
5728 int breakpoint_id = 0;
5730 MonoBoolean security, pinvoke;
5731 MonoSecurityManager* secman = NULL;
5732 MonoDeclSecurityActions actions;
5733 GSList *class_inits = NULL;
5734 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
5736 gboolean init_locals, seq_points, skip_dead_blocks;
5737 gboolean disable_inline;
5738 MonoInst *cached_tls_addr = NULL;
5740 disable_inline = is_jit_optimizer_disabled (method);
5742 /* serialization and xdomain stuff may need access to private fields and methods */
5743 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
5744 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
5745 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
5746 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
5747 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
5748 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
5750 dont_verify |= mono_security_get_mode () == MONO_SECURITY_MODE_SMCS_HACK;
5752 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
5753 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
5754 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
5755 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
5756 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
5758 image = method->klass->image;
5759 header = mono_method_get_header (method);
5761 MonoLoaderError *error;
5763 if ((error = mono_loader_get_last_error ())) {
5764 mono_cfg_set_exception (cfg, error->exception_type);
5766 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
5767 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
5769 goto exception_exit;
5771 generic_container = mono_method_get_generic_container (method);
5772 sig = mono_method_signature (method);
5773 num_args = sig->hasthis + sig->param_count;
5774 ip = (unsigned char*)header->code;
5775 cfg->cil_start = ip;
5776 end = ip + header->code_size;
5777 cfg->stat_cil_code_size += header->code_size;
5778 init_locals = header->init_locals;
5780 seq_points = cfg->gen_seq_points && cfg->method == method;
5783 * Methods without init_locals set could cause asserts in various passes
5788 method_definition = method;
5789 while (method_definition->is_inflated) {
5790 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
5791 method_definition = imethod->declaring;
5794 /* SkipVerification is not allowed if core-clr is enabled */
5795 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
5797 dont_verify_stloc = TRUE;
5800 if (mono_debug_using_mono_debugger ())
5801 cfg->keep_cil_nops = TRUE;
5803 if (sig->is_inflated)
5804 generic_context = mono_method_get_context (method);
5805 else if (generic_container)
5806 generic_context = &generic_container->context;
5807 cfg->generic_context = generic_context;
5809 if (!cfg->generic_sharing_context)
5810 g_assert (!sig->has_type_parameters);
5812 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
5813 g_assert (method->is_inflated);
5814 g_assert (mono_method_get_context (method)->method_inst);
5816 if (method->is_inflated && mono_method_get_context (method)->method_inst)
5817 g_assert (sig->generic_param_count);
5819 if (cfg->method == method) {
5820 cfg->real_offset = 0;
5822 cfg->real_offset = inline_offset;
5825 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
5826 cfg->cil_offset_to_bb_len = header->code_size;
5828 cfg->current_method = method;
5830 if (cfg->verbose_level > 2)
5831 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
5833 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
5835 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
5836 for (n = 0; n < sig->param_count; ++n)
5837 param_types [n + sig->hasthis] = sig->params [n];
5838 cfg->arg_types = param_types;
5840 dont_inline = g_list_prepend (dont_inline, method);
5841 if (cfg->method == method) {
5843 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
5844 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
5847 NEW_BBLOCK (cfg, start_bblock);
5848 cfg->bb_entry = start_bblock;
5849 start_bblock->cil_code = NULL;
5850 start_bblock->cil_length = 0;
5851 #if defined(__native_client_codegen__)
5852 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
5853 ins->dreg = alloc_dreg (cfg, STACK_I4);
5854 MONO_ADD_INS (start_bblock, ins);
5858 NEW_BBLOCK (cfg, end_bblock);
5859 cfg->bb_exit = end_bblock;
5860 end_bblock->cil_code = NULL;
5861 end_bblock->cil_length = 0;
5862 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
5863 g_assert (cfg->num_bblocks == 2);
5865 arg_array = cfg->args;
5867 if (header->num_clauses) {
5868 cfg->spvars = g_hash_table_new (NULL, NULL);
5869 cfg->exvars = g_hash_table_new (NULL, NULL);
5871 /* handle exception clauses */
5872 for (i = 0; i < header->num_clauses; ++i) {
5873 MonoBasicBlock *try_bb;
5874 MonoExceptionClause *clause = &header->clauses [i];
5875 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
5876 try_bb->real_offset = clause->try_offset;
5877 try_bb->try_start = TRUE;
5878 try_bb->region = ((i + 1) << 8) | clause->flags;
5879 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
5880 tblock->real_offset = clause->handler_offset;
5881 tblock->flags |= BB_EXCEPTION_HANDLER;
5883 link_bblock (cfg, try_bb, tblock);
5885 if (*(ip + clause->handler_offset) == CEE_POP)
5886 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
5888 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
5889 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
5890 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
5891 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5892 MONO_ADD_INS (tblock, ins);
5894 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
5895 /* finally clauses already have a seq point */
5896 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
5897 MONO_ADD_INS (tblock, ins);
5900 /* todo: is a fault block unsafe to optimize? */
5901 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
5902 tblock->flags |= BB_EXCEPTION_UNSAFE;
5906 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
5908 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
5910 /* catch and filter blocks get the exception object on the stack */
5911 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
5912 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5913 MonoInst *dummy_use;
5915 /* mostly like handle_stack_args (), but just sets the input args */
5916 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
5917 tblock->in_scount = 1;
5918 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5919 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5922 * Add a dummy use for the exvar so its liveness info will be
5926 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
5928 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
5929 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
5930 tblock->flags |= BB_EXCEPTION_HANDLER;
5931 tblock->real_offset = clause->data.filter_offset;
5932 tblock->in_scount = 1;
5933 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
5934 /* The filter block shares the exvar with the handler block */
5935 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
5936 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
5937 MONO_ADD_INS (tblock, ins);
5941 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
5942 clause->data.catch_class &&
5943 cfg->generic_sharing_context &&
5944 mono_class_check_context_used (clause->data.catch_class)) {
5946 * In shared generic code with catch
5947 * clauses containing type variables
5948 * the exception handling code has to
5949 * be able to get to the rgctx.
5950 * Therefore we have to make sure that
5951 * the vtable/mrgctx argument (for
5952 * static or generic methods) or the
5953 * "this" argument (for non-static
5954 * methods) are live.
5956 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
5957 mini_method_get_context (method)->method_inst ||
5958 method->klass->valuetype) {
5959 mono_get_vtable_var (cfg);
5961 MonoInst *dummy_use;
5963 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
5968 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
5969 cfg->cbb = start_bblock;
5970 cfg->args = arg_array;
5971 mono_save_args (cfg, sig, inline_args);
5974 /* FIRST CODE BLOCK */
5975 NEW_BBLOCK (cfg, bblock);
5976 bblock->cil_code = ip;
5980 ADD_BBLOCK (cfg, bblock);
5982 if (cfg->method == method) {
5983 breakpoint_id = mono_debugger_method_has_breakpoint (method);
5984 if (breakpoint_id && (mono_debug_format != MONO_DEBUG_FORMAT_DEBUGGER)) {
5985 MONO_INST_NEW (cfg, ins, OP_BREAK);
5986 MONO_ADD_INS (bblock, ins);
5990 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
5991 secman = mono_security_manager_get_methods ();
5993 security = (secman && mono_method_has_declsec (method));
5994 /* at this point having security doesn't mean we have any code to generate */
5995 if (security && (cfg->method == method)) {
5996 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
5997 * And we do not want to enter the next section (with allocation) if we
5998 * have nothing to generate */
5999 security = mono_declsec_get_demands (method, &actions);
6002 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
6003 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
6005 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6006 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
6007 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
6009 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
6010 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6014 mono_custom_attrs_free (custom);
6017 custom = mono_custom_attrs_from_class (wrapped->klass);
6018 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
6022 mono_custom_attrs_free (custom);
6025 /* not a P/Invoke after all */
6030 if ((init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
6031 /* we use a separate basic block for the initialization code */
6032 NEW_BBLOCK (cfg, init_localsbb);
6033 cfg->bb_init = init_localsbb;
6034 init_localsbb->real_offset = cfg->real_offset;
6035 start_bblock->next_bb = init_localsbb;
6036 init_localsbb->next_bb = bblock;
6037 link_bblock (cfg, start_bblock, init_localsbb);
6038 link_bblock (cfg, init_localsbb, bblock);
6040 cfg->cbb = init_localsbb;
6042 start_bblock->next_bb = bblock;
6043 link_bblock (cfg, start_bblock, bblock);
6046 /* at this point we know, if security is TRUE, that some code needs to be generated */
6047 if (security && (cfg->method == method)) {
6050 cfg->stat_cas_demand_generation++;
6052 if (actions.demand.blob) {
6053 /* Add code for SecurityAction.Demand */
6054 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
6055 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
6056 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6057 mono_emit_method_call (cfg, secman->demand, args, NULL);
6059 if (actions.noncasdemand.blob) {
6060 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
6061 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
6062 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
6063 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
6064 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
6065 mono_emit_method_call (cfg, secman->demand, args, NULL);
6067 if (actions.demandchoice.blob) {
6068 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
6069 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
6070 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
6071 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
6072 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
6076 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
6078 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
6081 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
6082 /* check if this is native code, e.g. an icall or a p/invoke */
6083 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
6084 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
6086 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
6087 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
6089 /* if this ia a native call then it can only be JITted from platform code */
6090 if ((icall || pinvk) && method->klass && method->klass->image) {
6091 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
6092 MonoException *ex = icall ? mono_get_exception_security () :
6093 mono_get_exception_method_access ();
6094 emit_throw_exception (cfg, ex);
6101 if (header->code_size == 0)
6104 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
6109 if (cfg->method == method)
6110 mono_debug_init_method (cfg, bblock, breakpoint_id);
6112 for (n = 0; n < header->num_locals; ++n) {
6113 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
6118 /* We force the vtable variable here for all shared methods
6119 for the possibility that they might show up in a stack
6120 trace where their exact instantiation is needed. */
6121 if (cfg->generic_sharing_context && method == cfg->method) {
6122 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
6123 mini_method_get_context (method)->method_inst ||
6124 method->klass->valuetype) {
6125 mono_get_vtable_var (cfg);
6127 /* FIXME: Is there a better way to do this?
6128 We need the variable live for the duration
6129 of the whole method. */
6130 cfg->args [0]->flags |= MONO_INST_INDIRECT;
6134 /* add a check for this != NULL to inlined methods */
6135 if (is_virtual_call) {
6138 NEW_ARGLOAD (cfg, arg_ins, 0);
6139 MONO_ADD_INS (cfg->cbb, arg_ins);
6140 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
6143 skip_dead_blocks = !dont_verify;
6144 if (skip_dead_blocks) {
6145 original_bb = bb = mono_basic_block_split (method, &error);
6146 if (!mono_error_ok (&error)) {
6147 mono_error_cleanup (&error);
6153 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
6154 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
6157 start_new_bblock = 0;
6160 if (cfg->method == method)
6161 cfg->real_offset = ip - header->code;
6163 cfg->real_offset = inline_offset;
6168 if (start_new_bblock) {
6169 bblock->cil_length = ip - bblock->cil_code;
6170 if (start_new_bblock == 2) {
6171 g_assert (ip == tblock->cil_code);
6173 GET_BBLOCK (cfg, tblock, ip);
6175 bblock->next_bb = tblock;
6178 start_new_bblock = 0;
6179 for (i = 0; i < bblock->in_scount; ++i) {
6180 if (cfg->verbose_level > 3)
6181 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6182 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6186 g_slist_free (class_inits);
6189 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
6190 link_bblock (cfg, bblock, tblock);
6191 if (sp != stack_start) {
6192 handle_stack_args (cfg, stack_start, sp - stack_start);
6194 CHECK_UNVERIFIABLE (cfg);
6196 bblock->next_bb = tblock;
6199 for (i = 0; i < bblock->in_scount; ++i) {
6200 if (cfg->verbose_level > 3)
6201 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
6202 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
6205 g_slist_free (class_inits);
6210 if (skip_dead_blocks) {
6211 int ip_offset = ip - header->code;
6213 if (ip_offset == bb->end)
6217 int op_size = mono_opcode_size (ip, end);
6218 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
6220 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
6222 if (ip_offset + op_size == bb->end) {
6223 MONO_INST_NEW (cfg, ins, OP_NOP);
6224 MONO_ADD_INS (bblock, ins);
6225 start_new_bblock = 1;
6233 * Sequence points are points where the debugger can place a breakpoint.
6234 * Currently, we generate these automatically at points where the IL
6237 if (seq_points && sp == stack_start) {
6238 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
6239 MONO_ADD_INS (cfg->cbb, ins);
6242 bblock->real_offset = cfg->real_offset;
6244 if ((cfg->method == method) && cfg->coverage_info) {
6245 guint32 cil_offset = ip - header->code;
6246 cfg->coverage_info->data [cil_offset].cil_code = ip;
6248 /* TODO: Use an increment here */
6249 #if defined(TARGET_X86)
6250 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
6251 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
6253 MONO_ADD_INS (cfg->cbb, ins);
6255 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
6256 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
6260 if (cfg->verbose_level > 3)
6261 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
6265 if (cfg->keep_cil_nops)
6266 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
6268 MONO_INST_NEW (cfg, ins, OP_NOP);
6270 MONO_ADD_INS (bblock, ins);
6273 if (should_insert_brekpoint (cfg->method)) {
6274 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6276 MONO_INST_NEW (cfg, ins, OP_NOP);
6279 MONO_ADD_INS (bblock, ins);
6285 CHECK_STACK_OVF (1);
6286 n = (*ip)-CEE_LDARG_0;
6288 EMIT_NEW_ARGLOAD (cfg, ins, n);
6296 CHECK_STACK_OVF (1);
6297 n = (*ip)-CEE_LDLOC_0;
6299 EMIT_NEW_LOCLOAD (cfg, ins, n);
6308 n = (*ip)-CEE_STLOC_0;
6311 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
6313 emit_stloc_ir (cfg, sp, header, n);
6320 CHECK_STACK_OVF (1);
6323 EMIT_NEW_ARGLOAD (cfg, ins, n);
6329 CHECK_STACK_OVF (1);
6332 NEW_ARGLOADA (cfg, ins, n);
6333 MONO_ADD_INS (cfg->cbb, ins);
6343 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
6345 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
6350 CHECK_STACK_OVF (1);
6353 EMIT_NEW_LOCLOAD (cfg, ins, n);
6357 case CEE_LDLOCA_S: {
6358 unsigned char *tmp_ip;
6360 CHECK_STACK_OVF (1);
6361 CHECK_LOCAL (ip [1]);
6363 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
6369 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
6378 CHECK_LOCAL (ip [1]);
6379 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
6381 emit_stloc_ir (cfg, sp, header, ip [1]);
6386 CHECK_STACK_OVF (1);
6387 EMIT_NEW_PCONST (cfg, ins, NULL);
6388 ins->type = STACK_OBJ;
6393 CHECK_STACK_OVF (1);
6394 EMIT_NEW_ICONST (cfg, ins, -1);
6407 CHECK_STACK_OVF (1);
6408 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
6414 CHECK_STACK_OVF (1);
6416 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
6422 CHECK_STACK_OVF (1);
6423 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
6429 CHECK_STACK_OVF (1);
6430 MONO_INST_NEW (cfg, ins, OP_I8CONST);
6431 ins->type = STACK_I8;
6432 ins->dreg = alloc_dreg (cfg, STACK_I8);
6434 ins->inst_l = (gint64)read64 (ip);
6435 MONO_ADD_INS (bblock, ins);
6441 gboolean use_aotconst = FALSE;
6443 #ifdef TARGET_POWERPC
6444 /* FIXME: Clean this up */
6445 if (cfg->compile_aot)
6446 use_aotconst = TRUE;
6449 /* FIXME: we should really allocate this only late in the compilation process */
6450 f = mono_domain_alloc (cfg->domain, sizeof (float));
6452 CHECK_STACK_OVF (1);
6458 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
6460 dreg = alloc_freg (cfg);
6461 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
6462 ins->type = STACK_R8;
6464 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6465 ins->type = STACK_R8;
6466 ins->dreg = alloc_dreg (cfg, STACK_R8);
6468 MONO_ADD_INS (bblock, ins);
6478 gboolean use_aotconst = FALSE;
6480 #ifdef TARGET_POWERPC
6481 /* FIXME: Clean this up */
6482 if (cfg->compile_aot)
6483 use_aotconst = TRUE;
6486 /* FIXME: we should really allocate this only late in the compilation process */
6487 d = mono_domain_alloc (cfg->domain, sizeof (double));
6489 CHECK_STACK_OVF (1);
6495 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
6497 dreg = alloc_freg (cfg);
6498 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
6499 ins->type = STACK_R8;
6501 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6502 ins->type = STACK_R8;
6503 ins->dreg = alloc_dreg (cfg, STACK_R8);
6505 MONO_ADD_INS (bblock, ins);
6514 MonoInst *temp, *store;
6516 CHECK_STACK_OVF (1);
6520 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
6521 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
6523 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6526 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
6539 if (sp [0]->type == STACK_R8)
6540 /* we need to pop the value from the x86 FP stack */
6541 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
6550 if (stack_start != sp)
6552 token = read32 (ip + 1);
6553 /* FIXME: check the signature matches */
6554 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6556 if (!cmethod || mono_loader_get_last_error ())
6559 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
6560 GENERIC_SHARING_FAILURE (CEE_JMP);
6562 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS)
6563 CHECK_CFG_EXCEPTION;
6565 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
6567 MonoMethodSignature *fsig = mono_method_signature (cmethod);
6570 /* Handle tail calls similarly to calls */
6571 n = fsig->param_count + fsig->hasthis;
6573 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
6574 call->method = cmethod;
6575 call->tail_call = TRUE;
6576 call->signature = mono_method_signature (cmethod);
6577 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
6578 call->inst.inst_p0 = cmethod;
6579 for (i = 0; i < n; ++i)
6580 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
6582 mono_arch_emit_call (cfg, call);
6583 MONO_ADD_INS (bblock, (MonoInst*)call);
6586 for (i = 0; i < num_args; ++i)
6587 /* Prevent arguments from being optimized away */
6588 arg_array [i]->flags |= MONO_INST_VOLATILE;
6590 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
6591 ins = (MonoInst*)call;
6592 ins->inst_p0 = cmethod;
6593 MONO_ADD_INS (bblock, ins);
6597 start_new_bblock = 1;
6602 case CEE_CALLVIRT: {
6603 MonoInst *addr = NULL;
6604 MonoMethodSignature *fsig = NULL;
6606 int virtual = *ip == CEE_CALLVIRT;
6607 int calli = *ip == CEE_CALLI;
6608 gboolean pass_imt_from_rgctx = FALSE;
6609 MonoInst *imt_arg = NULL;
6610 gboolean pass_vtable = FALSE;
6611 gboolean pass_mrgctx = FALSE;
6612 MonoInst *vtable_arg = NULL;
6613 gboolean check_this = FALSE;
6614 gboolean supported_tail_call = FALSE;
6617 token = read32 (ip + 1);
6624 if (method->wrapper_type != MONO_WRAPPER_NONE)
6625 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6627 fsig = mono_metadata_parse_signature (image, token);
6629 n = fsig->param_count + fsig->hasthis;
6631 if (method->dynamic && fsig->pinvoke) {
6635 * This is a call through a function pointer using a pinvoke
6636 * signature. Have to create a wrapper and call that instead.
6637 * FIXME: This is very slow, need to create a wrapper at JIT time
6638 * instead based on the signature.
6640 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
6641 EMIT_NEW_PCONST (cfg, args [1], fsig);
6643 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
6646 MonoMethod *cil_method;
6648 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6649 if (constrained_call && cfg->verbose_level > 2)
6650 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6651 cmethod = (MonoMethod *)mono_method_get_wrapper_data (method, token);
6652 cil_method = cmethod;
6653 if (constrained_call && !((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
6654 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
6655 cfg->generic_sharing_context)) {
6656 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
6658 } else if (constrained_call) {
6659 if (cfg->verbose_level > 2)
6660 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
6662 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
6664 * This is needed since get_method_constrained can't find
6665 * the method in klass representing a type var.
6666 * The type var is guaranteed to be a reference type in this
6669 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6670 cil_method = cmethod;
6671 g_assert (!cmethod->klass->valuetype);
6673 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
6676 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
6677 cil_method = cmethod;
6680 if (!cmethod || mono_loader_get_last_error ())
6682 if (!dont_verify && !cfg->skip_visibility) {
6683 MonoMethod *target_method = cil_method;
6684 if (method->is_inflated) {
6685 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
6687 if (!mono_method_can_access_method (method_definition, target_method) &&
6688 !mono_method_can_access_method (method, cil_method))
6689 METHOD_ACCESS_FAILURE;
6692 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
6693 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
6695 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
6696 /* MS.NET seems to silently convert this to a callvirt */
6701 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
6702 * converts to a callvirt.
6704 * tests/bug-515884.il is an example of this behavior
6706 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
6707 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
6708 if (!virtual && cmethod->klass->marshalbyref && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
6712 if (!cmethod->klass->inited)
6713 if (!mono_class_init (cmethod->klass))
6716 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
6717 mini_class_is_system_array (cmethod->klass)) {
6718 array_rank = cmethod->klass->rank;
6719 fsig = mono_method_signature (cmethod);
6721 fsig = mono_method_signature (cmethod);
6726 if (fsig->pinvoke) {
6727 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
6728 check_for_pending_exc, FALSE);
6729 fsig = mono_method_signature (wrapper);
6730 } else if (constrained_call) {
6731 fsig = mono_method_signature (cmethod);
6733 fsig = mono_method_get_signature_full (cmethod, image, token, generic_context);
6737 mono_save_token_info (cfg, image, token, cil_method);
6739 n = fsig->param_count + fsig->hasthis;
6741 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
6742 if (check_linkdemand (cfg, method, cmethod))
6744 CHECK_CFG_EXCEPTION;
6747 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
6748 g_assert_not_reached ();
6751 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
6754 if (!cfg->generic_sharing_context && cmethod)
6755 g_assert (!mono_method_check_context_used (cmethod));
6759 //g_assert (!virtual || fsig->hasthis);
6763 if (constrained_call) {
6765 * We have the `constrained.' prefix opcode.
6767 if (constrained_call->valuetype && !cmethod->klass->valuetype) {
6769 * The type parameter is instantiated as a valuetype,
6770 * but that type doesn't override the method we're
6771 * calling, so we need to box `this'.
6773 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
6774 ins->klass = constrained_call;
6775 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call));
6776 CHECK_CFG_EXCEPTION;
6777 } else if (!constrained_call->valuetype) {
6778 int dreg = alloc_ireg_ref (cfg);
6781 * The type parameter is instantiated as a reference
6782 * type. We have a managed pointer on the stack, so
6783 * we need to dereference it here.
6785 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
6786 ins->type = STACK_OBJ;
6788 } else if (cmethod->klass->valuetype)
6790 constrained_call = NULL;
6793 if (*ip != CEE_CALLI && check_call_signature (cfg, fsig, sp))
6797 * If the callee is a shared method, then its static cctor
6798 * might not get called after the call was patched.
6800 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
6801 emit_generic_class_init (cfg, cmethod->klass);
6802 CHECK_TYPELOAD (cmethod->klass);
6805 if (cmethod && ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
6806 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
6807 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6808 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
6809 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6812 * Pass vtable iff target method might
6813 * be shared, which means that sharing
6814 * is enabled for its class and its
6815 * context is sharable (and it's not a
6818 if (sharing_enabled && context_sharable &&
6819 !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
6823 if (cmethod && mini_method_get_context (cmethod) &&
6824 mini_method_get_context (cmethod)->method_inst) {
6825 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
6826 MonoGenericContext *context = mini_method_get_context (cmethod);
6827 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
6829 g_assert (!pass_vtable);
6831 if (sharing_enabled && context_sharable)
6835 if (cfg->generic_sharing_context && cmethod) {
6836 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
6838 context_used = mono_method_check_context_used (cmethod);
6840 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
6841 /* Generic method interface
6842 calls are resolved via a
6843 helper function and don't
6845 if (!cmethod_context || !cmethod_context->method_inst)
6846 pass_imt_from_rgctx = TRUE;
6850 * If a shared method calls another
6851 * shared method then the caller must
6852 * have a generic sharing context
6853 * because the magic trampoline
6854 * requires it. FIXME: We shouldn't
6855 * have to force the vtable/mrgctx
6856 * variable here. Instead there
6857 * should be a flag in the cfg to
6858 * request a generic sharing context.
6861 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
6862 mono_get_vtable_var (cfg);
6867 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6869 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6871 CHECK_TYPELOAD (cmethod->klass);
6872 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6877 g_assert (!vtable_arg);
6879 if (!cfg->compile_aot) {
6881 * emit_get_rgctx_method () calls mono_class_vtable () so check
6882 * for type load errors before.
6884 mono_class_setup_vtable (cmethod->klass);
6885 CHECK_TYPELOAD (cmethod->klass);
6888 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6890 /* !marshalbyref is needed to properly handle generic methods + remoting */
6891 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
6892 MONO_METHOD_IS_FINAL (cmethod)) &&
6893 !cmethod->klass->marshalbyref) {
6900 if (pass_imt_from_rgctx) {
6901 g_assert (!pass_vtable);
6904 imt_arg = emit_get_rgctx_method (cfg, context_used,
6905 cmethod, MONO_RGCTX_INFO_METHOD);
6909 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
6911 /* Calling virtual generic methods */
6912 if (cmethod && virtual &&
6913 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
6914 !(MONO_METHOD_IS_FINAL (cmethod) &&
6915 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
6916 mono_method_signature (cmethod)->generic_param_count) {
6917 MonoInst *this_temp, *this_arg_temp, *store;
6918 MonoInst *iargs [4];
6920 g_assert (mono_method_signature (cmethod)->is_inflated);
6922 /* Prevent inlining of methods that contain indirect calls */
6925 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
6926 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt) {
6927 g_assert (!imt_arg);
6929 g_assert (cmethod->is_inflated);
6930 imt_arg = emit_get_rgctx_method (cfg, context_used,
6931 cmethod, MONO_RGCTX_INFO_METHOD);
6932 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, sp [0], imt_arg, NULL);
6936 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
6937 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
6938 MONO_ADD_INS (bblock, store);
6940 /* FIXME: This should be a managed pointer */
6941 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
6943 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
6944 iargs [1] = emit_get_rgctx_method (cfg, context_used,
6945 cmethod, MONO_RGCTX_INFO_METHOD);
6946 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
6947 addr = mono_emit_jit_icall (cfg,
6948 mono_helper_compile_generic_method, iargs);
6950 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
6952 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
6955 if (!MONO_TYPE_IS_VOID (fsig->ret))
6956 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
6958 CHECK_CFG_EXCEPTION;
6966 * Implement a workaround for the inherent races involved in locking:
6972 * If a thread abort happens between the call to Monitor.Enter () and the start of the
6973 * try block, the Exit () won't be executed, see:
6974 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
6975 * To work around this, we extend such try blocks to include the last x bytes
6976 * of the Monitor.Enter () call.
6978 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
6979 MonoBasicBlock *tbb;
6981 GET_BBLOCK (cfg, tbb, ip + 5);
6983 * Only extend try blocks with a finally, to avoid catching exceptions thrown
6984 * from Monitor.Enter like ArgumentNullException.
6986 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
6987 /* Mark this bblock as needing to be extended */
6988 tbb->extend_try_block = TRUE;
6992 /* Conversion to a JIT intrinsic */
6993 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
6995 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6996 type_to_eval_stack_type ((cfg), fsig->ret, ins);
7001 CHECK_CFG_EXCEPTION;
7009 if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
7010 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
7011 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
7012 !g_list_find (dont_inline, cmethod)) {
7014 gboolean always = FALSE;
7016 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
7017 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7018 /* Prevent inlining of methods that call wrappers */
7020 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
7024 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, always))) {
7026 cfg->real_offset += 5;
7029 if (!MONO_TYPE_IS_VOID (fsig->ret))
7030 /* *sp is already set by inline_method */
7033 inline_costs += costs;
7039 inline_costs += 10 * num_calls++;
7041 /* Tail recursion elimination */
7042 if ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
7043 gboolean has_vtargs = FALSE;
7046 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7049 /* keep it simple */
7050 for (i = fsig->param_count - 1; i >= 0; i--) {
7051 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
7056 for (i = 0; i < n; ++i)
7057 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7058 MONO_INST_NEW (cfg, ins, OP_BR);
7059 MONO_ADD_INS (bblock, ins);
7060 tblock = start_bblock->out_bb [0];
7061 link_bblock (cfg, bblock, tblock);
7062 ins->inst_target_bb = tblock;
7063 start_new_bblock = 1;
7065 /* skip the CEE_RET, too */
7066 if (ip_in_bb (cfg, bblock, ip + 5))
7076 /* Generic sharing */
7077 /* FIXME: only do this for generic methods if
7078 they are not shared! */
7079 if (context_used && !imt_arg && !array_rank &&
7080 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
7081 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
7082 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
7083 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
7086 g_assert (cfg->generic_sharing_context && cmethod);
7090 * We are compiling a call to a
7091 * generic method from shared code,
7092 * which means that we have to look up
7093 * the method in the rgctx and do an
7096 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7099 /* Indirect calls */
7101 g_assert (!imt_arg);
7103 if (*ip == CEE_CALL)
7104 g_assert (context_used);
7105 else if (*ip == CEE_CALLI)
7106 g_assert (!vtable_arg);
7108 /* FIXME: what the hell is this??? */
7109 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
7110 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
7112 /* Prevent inlining of methods with indirect calls */
7116 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, vtable_arg);
7118 if (addr->opcode == OP_AOTCONST && addr->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7120 * Instead of emitting an indirect call, emit a direct call
7121 * with the contents of the aotconst as the patch info.
7123 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_p0, fsig, sp);
7125 } else if (addr->opcode == OP_GOT_ENTRY && addr->inst_right->inst_c1 == MONO_PATCH_INFO_ICALL_ADDR) {
7126 ins = (MonoInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_ICALL_ADDR, addr->inst_right->inst_left, fsig, sp);
7129 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL);
7132 if (!MONO_TYPE_IS_VOID (fsig->ret))
7133 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7135 CHECK_CFG_EXCEPTION;
7146 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
7147 MonoInst *val = sp [fsig->param_count];
7149 if (val->type == STACK_OBJ) {
7150 MonoInst *iargs [2];
7155 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
7158 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
7159 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
7160 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
7161 emit_write_barrier (cfg, addr, val, 0);
7162 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
7163 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7165 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
7168 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
7169 if (!cmethod->klass->element_class->valuetype && !readonly)
7170 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
7171 CHECK_TYPELOAD (cmethod->klass);
7174 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
7177 g_assert_not_reached ();
7180 CHECK_CFG_EXCEPTION;
7187 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
7189 if (!MONO_TYPE_IS_VOID (fsig->ret))
7190 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7192 CHECK_CFG_EXCEPTION;
7199 /* Tail prefix / tail call optimization */
7201 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
7202 /* FIXME: runtime generic context pointer for jumps? */
7203 /* FIXME: handle this for generic sharing eventually */
7204 supported_tail_call = cmethod &&
7205 ((((ins_flag & MONO_INST_TAILCALL) && (*ip == CEE_CALL))
7206 ))//|| ((cfg->opt & MONO_OPT_TAILC) && *ip == CEE_CALL && ip [5] == CEE_RET))
7207 && !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig);
7209 if (supported_tail_call) {
7212 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
7215 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
7217 #ifdef MONO_ARCH_USE_OP_TAIL_CALL
7218 /* Handle tail calls similarly to calls */
7219 call = mono_emit_call_args (cfg, mono_method_signature (cmethod), sp, FALSE, FALSE, TRUE, FALSE);
7221 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
7222 call->tail_call = TRUE;
7223 call->method = cmethod;
7224 call->signature = mono_method_signature (cmethod);
7227 * We implement tail calls by storing the actual arguments into the
7228 * argument variables, then emitting a CEE_JMP.
7230 for (i = 0; i < n; ++i) {
7231 /* Prevent argument from being register allocated */
7232 arg_array [i]->flags |= MONO_INST_VOLATILE;
7233 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
7237 ins = (MonoInst*)call;
7238 ins->inst_p0 = cmethod;
7239 ins->inst_p1 = arg_array [0];
7240 MONO_ADD_INS (bblock, ins);
7241 link_bblock (cfg, bblock, end_bblock);
7242 start_new_bblock = 1;
7244 CHECK_CFG_EXCEPTION;
7249 // FIXME: Eliminate unreachable epilogs
7252 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
7253 * only reachable from this call.
7255 GET_BBLOCK (cfg, tblock, ip);
7256 if (tblock == bblock || tblock->in_count == 0)
7263 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL,
7264 imt_arg, vtable_arg);
7266 if (!MONO_TYPE_IS_VOID (fsig->ret))
7267 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
7269 CHECK_CFG_EXCEPTION;
7276 if (cfg->method != method) {
7277 /* return from inlined method */
7279 * If in_count == 0, that means the ret is unreachable due to
7280 * being preceeded by a throw. In that case, inline_method () will
7281 * handle setting the return value
7282 * (test case: test_0_inline_throw ()).
7284 if (return_var && cfg->cbb->in_count) {
7288 //g_assert (returnvar != -1);
7289 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
7290 cfg->ret_var_set = TRUE;
7294 MonoType *ret_type = mono_method_signature (method)->ret;
7298 * Place a seq point here too even through the IL stack is not
7299 * empty, so a step over on
7302 * will work correctly.
7304 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
7305 MONO_ADD_INS (cfg->cbb, ins);
7308 g_assert (!return_var);
7312 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
7315 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
7318 if (!cfg->vret_addr) {
7321 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
7323 EMIT_NEW_RETLOADA (cfg, ret_addr);
7325 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
7326 ins->klass = mono_class_from_mono_type (ret_type);
7329 #ifdef MONO_ARCH_SOFT_FLOAT
7330 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
7331 MonoInst *iargs [1];
7335 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
7336 mono_arch_emit_setret (cfg, method, conv);
7338 mono_arch_emit_setret (cfg, method, *sp);
7341 mono_arch_emit_setret (cfg, method, *sp);
7346 if (sp != stack_start)
7348 MONO_INST_NEW (cfg, ins, OP_BR);
7350 ins->inst_target_bb = end_bblock;
7351 MONO_ADD_INS (bblock, ins);
7352 link_bblock (cfg, bblock, end_bblock);
7353 start_new_bblock = 1;
7357 MONO_INST_NEW (cfg, ins, OP_BR);
7359 target = ip + 1 + (signed char)(*ip);
7361 GET_BBLOCK (cfg, tblock, target);
7362 link_bblock (cfg, bblock, tblock);
7363 ins->inst_target_bb = tblock;
7364 if (sp != stack_start) {
7365 handle_stack_args (cfg, stack_start, sp - stack_start);
7367 CHECK_UNVERIFIABLE (cfg);
7369 MONO_ADD_INS (bblock, ins);
7370 start_new_bblock = 1;
7371 inline_costs += BRANCH_COST;
7385 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
7387 target = ip + 1 + *(signed char*)ip;
7393 inline_costs += BRANCH_COST;
7397 MONO_INST_NEW (cfg, ins, OP_BR);
7400 target = ip + 4 + (gint32)read32(ip);
7402 GET_BBLOCK (cfg, tblock, target);
7403 link_bblock (cfg, bblock, tblock);
7404 ins->inst_target_bb = tblock;
7405 if (sp != stack_start) {
7406 handle_stack_args (cfg, stack_start, sp - stack_start);
7408 CHECK_UNVERIFIABLE (cfg);
7411 MONO_ADD_INS (bblock, ins);
7413 start_new_bblock = 1;
7414 inline_costs += BRANCH_COST;
7421 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
7422 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
7423 guint32 opsize = is_short ? 1 : 4;
7425 CHECK_OPSIZE (opsize);
7427 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
7430 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
7435 GET_BBLOCK (cfg, tblock, target);
7436 link_bblock (cfg, bblock, tblock);
7437 GET_BBLOCK (cfg, tblock, ip);
7438 link_bblock (cfg, bblock, tblock);
7440 if (sp != stack_start) {
7441 handle_stack_args (cfg, stack_start, sp - stack_start);
7442 CHECK_UNVERIFIABLE (cfg);
7445 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
7446 cmp->sreg1 = sp [0]->dreg;
7447 type_from_op (cmp, sp [0], NULL);
7450 #if SIZEOF_REGISTER == 4
7451 if (cmp->opcode == OP_LCOMPARE_IMM) {
7452 /* Convert it to OP_LCOMPARE */
7453 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7454 ins->type = STACK_I8;
7455 ins->dreg = alloc_dreg (cfg, STACK_I8);
7457 MONO_ADD_INS (bblock, ins);
7458 cmp->opcode = OP_LCOMPARE;
7459 cmp->sreg2 = ins->dreg;
7462 MONO_ADD_INS (bblock, cmp);
7464 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
7465 type_from_op (ins, sp [0], NULL);
7466 MONO_ADD_INS (bblock, ins);
7467 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
7468 GET_BBLOCK (cfg, tblock, target);
7469 ins->inst_true_bb = tblock;
7470 GET_BBLOCK (cfg, tblock, ip);
7471 ins->inst_false_bb = tblock;
7472 start_new_bblock = 2;
7475 inline_costs += BRANCH_COST;
7490 MONO_INST_NEW (cfg, ins, *ip);
7492 target = ip + 4 + (gint32)read32(ip);
7498 inline_costs += BRANCH_COST;
7502 MonoBasicBlock **targets;
7503 MonoBasicBlock *default_bblock;
7504 MonoJumpInfoBBTable *table;
7505 int offset_reg = alloc_preg (cfg);
7506 int target_reg = alloc_preg (cfg);
7507 int table_reg = alloc_preg (cfg);
7508 int sum_reg = alloc_preg (cfg);
7509 gboolean use_op_switch;
7513 n = read32 (ip + 1);
7516 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
7520 CHECK_OPSIZE (n * sizeof (guint32));
7521 target = ip + n * sizeof (guint32);
7523 GET_BBLOCK (cfg, default_bblock, target);
7524 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7526 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
7527 for (i = 0; i < n; ++i) {
7528 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
7529 targets [i] = tblock;
7530 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
7534 if (sp != stack_start) {
7536 * Link the current bb with the targets as well, so handle_stack_args
7537 * will set their in_stack correctly.
7539 link_bblock (cfg, bblock, default_bblock);
7540 for (i = 0; i < n; ++i)
7541 link_bblock (cfg, bblock, targets [i]);
7543 handle_stack_args (cfg, stack_start, sp - stack_start);
7545 CHECK_UNVERIFIABLE (cfg);
7548 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
7549 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
7552 for (i = 0; i < n; ++i)
7553 link_bblock (cfg, bblock, targets [i]);
7555 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
7556 table->table = targets;
7557 table->table_size = n;
7559 use_op_switch = FALSE;
7561 /* ARM implements SWITCH statements differently */
7562 /* FIXME: Make it use the generic implementation */
7563 if (!cfg->compile_aot)
7564 use_op_switch = TRUE;
7567 if (COMPILE_LLVM (cfg))
7568 use_op_switch = TRUE;
7570 cfg->cbb->has_jump_table = 1;
7572 if (use_op_switch) {
7573 MONO_INST_NEW (cfg, ins, OP_SWITCH);
7574 ins->sreg1 = src1->dreg;
7575 ins->inst_p0 = table;
7576 ins->inst_many_bb = targets;
7577 ins->klass = GUINT_TO_POINTER (n);
7578 MONO_ADD_INS (cfg->cbb, ins);
7580 if (sizeof (gpointer) == 8)
7581 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
7583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
7585 #if SIZEOF_REGISTER == 8
7586 /* The upper word might not be zero, and we add it to a 64 bit address later */
7587 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
7590 if (cfg->compile_aot) {
7591 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
7593 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
7594 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
7595 ins->inst_p0 = table;
7596 ins->dreg = table_reg;
7597 MONO_ADD_INS (cfg->cbb, ins);
7600 /* FIXME: Use load_memindex */
7601 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
7602 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
7603 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
7605 start_new_bblock = 1;
7606 inline_costs += (BRANCH_COST * 2);
7626 dreg = alloc_freg (cfg);
7629 dreg = alloc_lreg (cfg);
7632 dreg = alloc_ireg_ref (cfg);
7635 dreg = alloc_preg (cfg);
7638 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
7639 ins->type = ldind_type [*ip - CEE_LDIND_I1];
7640 ins->flags |= ins_flag;
7642 MONO_ADD_INS (bblock, ins);
7644 if (ins->flags & MONO_INST_VOLATILE) {
7645 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
7646 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7647 emit_memory_barrier (cfg, FullBarrier);
7662 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
7663 ins->flags |= ins_flag;
7666 if (ins->flags & MONO_INST_VOLATILE) {
7667 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
7668 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
7669 emit_memory_barrier (cfg, FullBarrier);
7672 MONO_ADD_INS (bblock, ins);
7674 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
7675 emit_write_barrier (cfg, sp [0], sp [1], -1);
7684 MONO_INST_NEW (cfg, ins, (*ip));
7686 ins->sreg1 = sp [0]->dreg;
7687 ins->sreg2 = sp [1]->dreg;
7688 type_from_op (ins, sp [0], sp [1]);
7690 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7692 /* Use the immediate opcodes if possible */
7693 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
7694 int imm_opcode = mono_op_to_op_imm (ins->opcode);
7695 if (imm_opcode != -1) {
7696 ins->opcode = imm_opcode;
7697 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
7700 sp [1]->opcode = OP_NOP;
7704 MONO_ADD_INS ((cfg)->cbb, (ins));
7706 *sp++ = mono_decompose_opcode (cfg, ins);
7723 MONO_INST_NEW (cfg, ins, (*ip));
7725 ins->sreg1 = sp [0]->dreg;
7726 ins->sreg2 = sp [1]->dreg;
7727 type_from_op (ins, sp [0], sp [1]);
7729 ADD_WIDEN_OP (ins, sp [0], sp [1]);
7730 ins->dreg = alloc_dreg ((cfg), (ins)->type);
7732 /* FIXME: Pass opcode to is_inst_imm */
7734 /* Use the immediate opcodes if possible */
7735 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
7738 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
7739 if (imm_opcode != -1) {
7740 ins->opcode = imm_opcode;
7741 if (sp [1]->opcode == OP_I8CONST) {
7742 #if SIZEOF_REGISTER == 8
7743 ins->inst_imm = sp [1]->inst_l;
7745 ins->inst_ls_word = sp [1]->inst_ls_word;
7746 ins->inst_ms_word = sp [1]->inst_ms_word;
7750 ins->inst_imm = (gssize)(sp [1]->inst_c0);
7753 /* Might be followed by an instruction added by ADD_WIDEN_OP */
7754 if (sp [1]->next == NULL)
7755 sp [1]->opcode = OP_NOP;
7758 MONO_ADD_INS ((cfg)->cbb, (ins));
7760 *sp++ = mono_decompose_opcode (cfg, ins);
7773 case CEE_CONV_OVF_I8:
7774 case CEE_CONV_OVF_U8:
7778 /* Special case this earlier so we have long constants in the IR */
7779 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
7780 int data = sp [-1]->inst_c0;
7781 sp [-1]->opcode = OP_I8CONST;
7782 sp [-1]->type = STACK_I8;
7783 #if SIZEOF_REGISTER == 8
7784 if ((*ip) == CEE_CONV_U8)
7785 sp [-1]->inst_c0 = (guint32)data;
7787 sp [-1]->inst_c0 = data;
7789 sp [-1]->inst_ls_word = data;
7790 if ((*ip) == CEE_CONV_U8)
7791 sp [-1]->inst_ms_word = 0;
7793 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
7795 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
7802 case CEE_CONV_OVF_I4:
7803 case CEE_CONV_OVF_I1:
7804 case CEE_CONV_OVF_I2:
7805 case CEE_CONV_OVF_I:
7806 case CEE_CONV_OVF_U:
7809 if (sp [-1]->type == STACK_R8) {
7810 ADD_UNOP (CEE_CONV_OVF_I8);
7817 case CEE_CONV_OVF_U1:
7818 case CEE_CONV_OVF_U2:
7819 case CEE_CONV_OVF_U4:
7822 if (sp [-1]->type == STACK_R8) {
7823 ADD_UNOP (CEE_CONV_OVF_U8);
7830 case CEE_CONV_OVF_I1_UN:
7831 case CEE_CONV_OVF_I2_UN:
7832 case CEE_CONV_OVF_I4_UN:
7833 case CEE_CONV_OVF_I8_UN:
7834 case CEE_CONV_OVF_U1_UN:
7835 case CEE_CONV_OVF_U2_UN:
7836 case CEE_CONV_OVF_U4_UN:
7837 case CEE_CONV_OVF_U8_UN:
7838 case CEE_CONV_OVF_I_UN:
7839 case CEE_CONV_OVF_U_UN:
7846 CHECK_CFG_EXCEPTION;
7850 case CEE_ADD_OVF_UN:
7852 case CEE_MUL_OVF_UN:
7854 case CEE_SUB_OVF_UN:
7862 token = read32 (ip + 1);
7863 klass = mini_get_class (method, token, generic_context);
7864 CHECK_TYPELOAD (klass);
7866 if (generic_class_is_reference_type (cfg, klass)) {
7867 MonoInst *store, *load;
7868 int dreg = alloc_ireg_ref (cfg);
7870 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
7871 load->flags |= ins_flag;
7872 MONO_ADD_INS (cfg->cbb, load);
7874 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
7875 store->flags |= ins_flag;
7876 MONO_ADD_INS (cfg->cbb, store);
7878 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
7879 emit_write_barrier (cfg, sp [0], sp [1], -1);
7881 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7893 token = read32 (ip + 1);
7894 klass = mini_get_class (method, token, generic_context);
7895 CHECK_TYPELOAD (klass);
7897 /* Optimize the common ldobj+stloc combination */
7907 loc_index = ip [5] - CEE_STLOC_0;
7914 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
7915 CHECK_LOCAL (loc_index);
7917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7918 ins->dreg = cfg->locals [loc_index]->dreg;
7924 /* Optimize the ldobj+stobj combination */
7925 /* The reference case ends up being a load+store anyway */
7926 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass)) {
7931 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
7938 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
7947 CHECK_STACK_OVF (1);
7949 n = read32 (ip + 1);
7951 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
7952 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
7953 ins->type = STACK_OBJ;
7956 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
7957 MonoInst *iargs [1];
7959 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
7960 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
7962 if (cfg->opt & MONO_OPT_SHARED) {
7963 MonoInst *iargs [3];
7965 if (cfg->compile_aot) {
7966 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
7968 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
7969 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
7970 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
7971 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
7972 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
7974 if (bblock->out_of_line) {
7975 MonoInst *iargs [2];
7977 if (image == mono_defaults.corlib) {
7979 * Avoid relocations in AOT and save some space by using a
7980 * version of helper_ldstr specialized to mscorlib.
7982 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
7983 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
7985 /* Avoid creating the string object */
7986 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
7987 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
7988 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
7992 if (cfg->compile_aot) {
7993 NEW_LDSTRCONST (cfg, ins, image, n);
7995 MONO_ADD_INS (bblock, ins);
7998 NEW_PCONST (cfg, ins, NULL);
7999 ins->type = STACK_OBJ;
8000 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
8002 OUT_OF_MEMORY_FAILURE;
8005 MONO_ADD_INS (bblock, ins);
8014 MonoInst *iargs [2];
8015 MonoMethodSignature *fsig;
8018 MonoInst *vtable_arg = NULL;
8021 token = read32 (ip + 1);
8022 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8023 if (!cmethod || mono_loader_get_last_error ())
8025 fsig = mono_method_get_signature (cmethod, image, token);
8029 mono_save_token_info (cfg, image, token, cmethod);
8031 if (!mono_class_init (cmethod->klass))
8034 if (cfg->generic_sharing_context)
8035 context_used = mono_method_check_context_used (cmethod);
8037 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
8038 if (check_linkdemand (cfg, method, cmethod))
8040 CHECK_CFG_EXCEPTION;
8041 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
8042 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
8045 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable_impl (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8046 emit_generic_class_init (cfg, cmethod->klass);
8047 CHECK_TYPELOAD (cmethod->klass);
8050 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
8051 mono_method_is_generic_sharable_impl (cmethod, TRUE)) {
8052 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
8053 mono_class_vtable (cfg->domain, cmethod->klass);
8054 CHECK_TYPELOAD (cmethod->klass);
8056 vtable_arg = emit_get_rgctx_method (cfg, context_used,
8057 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8060 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
8061 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8063 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8065 CHECK_TYPELOAD (cmethod->klass);
8066 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8071 n = fsig->param_count;
8075 * Generate smaller code for the common newobj <exception> instruction in
8076 * argument checking code.
8078 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
8079 is_exception_class (cmethod->klass) && n <= 2 &&
8080 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
8081 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
8082 MonoInst *iargs [3];
8084 g_assert (!vtable_arg);
8088 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
8091 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
8095 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
8100 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
8103 g_assert_not_reached ();
8111 /* move the args to allow room for 'this' in the first position */
8117 /* check_call_signature () requires sp[0] to be set */
8118 this_ins.type = STACK_OBJ;
8120 if (check_call_signature (cfg, fsig, sp))
8125 if (mini_class_is_system_array (cmethod->klass)) {
8126 g_assert (!vtable_arg);
8128 *sp = emit_get_rgctx_method (cfg, context_used,
8129 cmethod, MONO_RGCTX_INFO_METHOD);
8131 /* Avoid varargs in the common case */
8132 if (fsig->param_count == 1)
8133 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
8134 else if (fsig->param_count == 2)
8135 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
8136 else if (fsig->param_count == 3)
8137 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
8139 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
8140 } else if (cmethod->string_ctor) {
8141 g_assert (!context_used);
8142 g_assert (!vtable_arg);
8143 /* we simply pass a null pointer */
8144 EMIT_NEW_PCONST (cfg, *sp, NULL);
8145 /* now call the string ctor */
8146 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, sp, NULL, NULL, NULL);
8148 MonoInst* callvirt_this_arg = NULL;
8150 if (cmethod->klass->valuetype) {
8151 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
8152 MONO_EMIT_NEW_VZERO (cfg, iargs [0]->dreg, cmethod->klass);
8153 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
8158 * The code generated by mini_emit_virtual_call () expects
8159 * iargs [0] to be a boxed instance, but luckily the vcall
8160 * will be transformed into a normal call there.
8162 } else if (context_used) {
8163 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
8166 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8168 CHECK_TYPELOAD (cmethod->klass);
8171 * TypeInitializationExceptions thrown from the mono_runtime_class_init
8172 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
8173 * As a workaround, we call class cctors before allocating objects.
8175 if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
8176 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8177 if (cfg->verbose_level > 2)
8178 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
8179 class_inits = g_slist_prepend (class_inits, vtable);
8182 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
8185 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
8188 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
8190 /* Now call the actual ctor */
8191 /* Avoid virtual calls to ctors if possible */
8192 if (cmethod->klass->marshalbyref)
8193 callvirt_this_arg = sp [0];
8196 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
8197 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8198 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8203 CHECK_CFG_EXCEPTION;
8204 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
8205 !disable_inline && mono_method_check_inlining (cfg, cmethod) &&
8206 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
8207 !g_list_find (dont_inline, cmethod)) {
8210 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, dont_inline, FALSE))) {
8211 cfg->real_offset += 5;
8214 inline_costs += costs - 5;
8217 mono_emit_method_call_full (cfg, cmethod, fsig, sp, callvirt_this_arg, NULL, NULL);
8219 } else if (context_used &&
8220 (!mono_method_is_generic_sharable_impl (cmethod, TRUE) ||
8221 !mono_class_generic_sharing_enabled (cmethod->klass))) {
8222 MonoInst *cmethod_addr;
8224 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
8225 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8227 mono_emit_calli (cfg, fsig, sp, cmethod_addr, vtable_arg);
8230 ins = mono_emit_method_call_full (cfg, cmethod, fsig, sp,
8231 callvirt_this_arg, NULL, vtable_arg);
8235 if (alloc == NULL) {
8237 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
8238 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
8252 token = read32 (ip + 1);
8253 klass = mini_get_class (method, token, generic_context);
8254 CHECK_TYPELOAD (klass);
8255 if (sp [0]->type != STACK_OBJ)
8258 if (cfg->generic_sharing_context)
8259 context_used = mono_class_check_context_used (klass);
8261 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8262 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8269 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8272 if (cfg->compile_aot)
8273 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8275 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8277 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8278 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8281 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8282 MonoMethod *mono_castclass;
8283 MonoInst *iargs [1];
8286 mono_castclass = mono_marshal_get_castclass (klass);
8289 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8290 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8291 CHECK_CFG_EXCEPTION;
8292 g_assert (costs > 0);
8295 cfg->real_offset += 5;
8300 inline_costs += costs;
8303 ins = handle_castclass (cfg, klass, *sp, context_used);
8304 CHECK_CFG_EXCEPTION;
8314 token = read32 (ip + 1);
8315 klass = mini_get_class (method, token, generic_context);
8316 CHECK_TYPELOAD (klass);
8317 if (sp [0]->type != STACK_OBJ)
8320 if (cfg->generic_sharing_context)
8321 context_used = mono_class_check_context_used (klass);
8323 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8324 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
8331 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8334 if (cfg->compile_aot)
8335 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8337 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8339 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
8342 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8343 MonoMethod *mono_isinst;
8344 MonoInst *iargs [1];
8347 mono_isinst = mono_marshal_get_isinst (klass);
8350 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
8351 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8352 CHECK_CFG_EXCEPTION;
8353 g_assert (costs > 0);
8356 cfg->real_offset += 5;
8361 inline_costs += costs;
8364 ins = handle_isinst (cfg, klass, *sp, context_used);
8365 CHECK_CFG_EXCEPTION;
8372 case CEE_UNBOX_ANY: {
8376 token = read32 (ip + 1);
8377 klass = mini_get_class (method, token, generic_context);
8378 CHECK_TYPELOAD (klass);
8380 mono_save_token_info (cfg, image, token, klass);
8382 if (cfg->generic_sharing_context)
8383 context_used = mono_class_check_context_used (klass);
8385 if (generic_class_is_reference_type (cfg, klass)) {
8386 /* CASTCLASS FIXME kill this huge slice of duplicated code*/
8387 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
8388 MonoMethod *mono_castclass = mono_marshal_get_castclass_with_cache ();
8395 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
8398 /*FIXME AOT support*/
8399 if (cfg->compile_aot)
8400 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
8402 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
8404 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
8405 *sp++ = mono_emit_method_call (cfg, mono_castclass, args, NULL);
8408 } else if (!context_used && (klass->marshalbyref || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8409 MonoMethod *mono_castclass;
8410 MonoInst *iargs [1];
8413 mono_castclass = mono_marshal_get_castclass (klass);
8416 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
8417 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8418 CHECK_CFG_EXCEPTION;
8419 g_assert (costs > 0);
8422 cfg->real_offset += 5;
8426 inline_costs += costs;
8428 ins = handle_castclass (cfg, klass, *sp, context_used);
8429 CHECK_CFG_EXCEPTION;
8437 if (mono_class_is_nullable (klass)) {
8438 ins = handle_unbox_nullable (cfg, *sp, klass, context_used);
8445 ins = handle_unbox (cfg, klass, sp, context_used);
8451 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
8464 token = read32 (ip + 1);
8465 klass = mini_get_class (method, token, generic_context);
8466 CHECK_TYPELOAD (klass);
8468 mono_save_token_info (cfg, image, token, klass);
8470 if (cfg->generic_sharing_context)
8471 context_used = mono_class_check_context_used (klass);
8473 if (generic_class_is_reference_type (cfg, klass)) {
8479 if (klass == mono_defaults.void_class)
8481 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
8483 /* frequent check in generic code: box (struct), brtrue */
8485 // FIXME: LLVM can't handle the inconsistent bb linking
8486 if (!mono_class_is_nullable (klass) &&
8487 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
8488 (ip [5] == CEE_BRTRUE ||
8489 ip [5] == CEE_BRTRUE_S ||
8490 ip [5] == CEE_BRFALSE ||
8491 ip [5] == CEE_BRFALSE_S)) {
8492 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
8494 MonoBasicBlock *true_bb, *false_bb;
8498 if (cfg->verbose_level > 3) {
8499 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8500 printf ("<box+brtrue opt>\n");
8508 target = ip + 1 + (signed char)(*ip);
8515 target = ip + 4 + (gint)(read32 (ip));
8519 g_assert_not_reached ();
8523 * We need to link both bblocks, since it is needed for handling stack
8524 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
8525 * Branching to only one of them would lead to inconsistencies, so
8526 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
8528 GET_BBLOCK (cfg, true_bb, target);
8529 GET_BBLOCK (cfg, false_bb, ip);
8531 mono_link_bblock (cfg, cfg->cbb, true_bb);
8532 mono_link_bblock (cfg, cfg->cbb, false_bb);
8534 if (sp != stack_start) {
8535 handle_stack_args (cfg, stack_start, sp - stack_start);
8537 CHECK_UNVERIFIABLE (cfg);
8540 if (COMPILE_LLVM (cfg)) {
8541 dreg = alloc_ireg (cfg);
8542 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
8543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
8545 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
8547 /* The JIT can't eliminate the iconst+compare */
8548 MONO_INST_NEW (cfg, ins, OP_BR);
8549 ins->inst_target_bb = is_true ? true_bb : false_bb;
8550 MONO_ADD_INS (cfg->cbb, ins);
8553 start_new_bblock = 1;
8557 *sp++ = handle_box (cfg, val, klass, context_used);
8559 CHECK_CFG_EXCEPTION;
8568 token = read32 (ip + 1);
8569 klass = mini_get_class (method, token, generic_context);
8570 CHECK_TYPELOAD (klass);
8572 mono_save_token_info (cfg, image, token, klass);
8574 if (cfg->generic_sharing_context)
8575 context_used = mono_class_check_context_used (klass);
8577 if (mono_class_is_nullable (klass)) {
8580 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
8581 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
8585 ins = handle_unbox (cfg, klass, sp, context_used);
8595 MonoClassField *field;
8599 if (*ip == CEE_STFLD) {
8606 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
8608 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
8611 token = read32 (ip + 1);
8612 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8613 field = mono_method_get_wrapper_data (method, token);
8614 klass = field->parent;
8617 field = mono_field_from_token (image, token, &klass, generic_context);
8621 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8622 FIELD_ACCESS_FAILURE;
8623 mono_class_init (klass);
8625 if (*ip != CEE_LDFLDA && is_magic_tls_access (field))
8627 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
8628 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
8629 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8630 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8633 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
8634 if (*ip == CEE_STFLD) {
8635 if (target_type_is_incompatible (cfg, field->type, sp [1]))
8637 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8638 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
8639 MonoInst *iargs [5];
8642 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8643 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8644 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
8648 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8649 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
8650 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8651 CHECK_CFG_EXCEPTION;
8652 g_assert (costs > 0);
8654 cfg->real_offset += 5;
8657 inline_costs += costs;
8659 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
8664 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8666 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
8667 if (sp [0]->opcode != OP_LDADDR)
8668 store->flags |= MONO_INST_FAULT;
8670 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
8671 /* insert call to write barrier */
8675 dreg = alloc_ireg_mp (cfg);
8676 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8677 emit_write_barrier (cfg, ptr, sp [1], -1);
8680 store->flags |= ins_flag;
8687 if ((klass->marshalbyref && !MONO_CHECK_THIS (sp [0])) || klass->contextbound || klass == mono_defaults.marshalbyrefobject_class) {
8688 MonoMethod *wrapper = (*ip == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
8689 MonoInst *iargs [4];
8692 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
8693 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
8694 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
8695 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
8696 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
8697 iargs, ip, cfg->real_offset, dont_inline, TRUE);
8698 CHECK_CFG_EXCEPTION;
8700 g_assert (costs > 0);
8702 cfg->real_offset += 5;
8706 inline_costs += costs;
8708 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
8712 if (sp [0]->type == STACK_VTYPE) {
8715 /* Have to compute the address of the variable */
8717 var = get_vreg_to_inst (cfg, sp [0]->dreg);
8719 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
8721 g_assert (var->klass == klass);
8723 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
8727 if (*ip == CEE_LDFLDA) {
8728 if (is_magic_tls_access (field)) {
8730 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
8732 if (sp [0]->type == STACK_OBJ) {
8733 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
8734 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
8737 dreg = alloc_ireg_mp (cfg);
8739 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
8740 ins->klass = mono_class_from_mono_type (field->type);
8741 ins->type = STACK_MP;
8747 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
8749 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
8750 load->flags |= ins_flag;
8751 if (sp [0]->opcode != OP_LDADDR)
8752 load->flags |= MONO_INST_FAULT;
8763 MonoClassField *field;
8764 gpointer addr = NULL;
8765 gboolean is_special_static;
8769 token = read32 (ip + 1);
8771 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8772 field = mono_method_get_wrapper_data (method, token);
8773 klass = field->parent;
8776 field = mono_field_from_token (image, token, &klass, generic_context);
8779 mono_class_init (klass);
8780 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
8781 FIELD_ACCESS_FAILURE;
8783 /* if the class is Critical then transparent code cannot access it's fields */
8784 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
8785 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
8788 * We can only support shared generic static
8789 * field access on architectures where the
8790 * trampoline code has been extended to handle
8791 * the generic class init.
8793 #ifndef MONO_ARCH_VTABLE_REG
8794 GENERIC_SHARING_FAILURE (*ip);
8797 if (cfg->generic_sharing_context)
8798 context_used = mono_class_check_context_used (klass);
8800 ftype = mono_field_get_type (field);
8802 g_assert (!(ftype->attrs & FIELD_ATTRIBUTE_LITERAL));
8804 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
8805 * to be called here.
8807 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
8808 mono_class_vtable (cfg->domain, klass);
8809 CHECK_TYPELOAD (klass);
8811 mono_domain_lock (cfg->domain);
8812 if (cfg->domain->special_static_fields)
8813 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
8814 mono_domain_unlock (cfg->domain);
8816 is_special_static = mono_class_field_is_special_static (field);
8818 /* Generate IR to compute the field address */
8819 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && mono_get_thread_intrinsic (cfg) && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
8821 * Fast access to TLS data
8822 * Inline version of get_thread_static_data () in
8826 int idx, static_data_reg, array_reg, dreg;
8827 MonoInst *thread_ins;
8829 // offset &= 0x7fffffff;
8830 // idx = (offset >> 24) - 1;
8831 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
8833 thread_ins = mono_get_thread_intrinsic (cfg);
8834 MONO_ADD_INS (cfg->cbb, thread_ins);
8835 static_data_reg = alloc_ireg (cfg);
8836 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, G_STRUCT_OFFSET (MonoInternalThread, static_data));
8838 if (cfg->compile_aot) {
8839 int offset_reg, offset2_reg, idx_reg;
8841 /* For TLS variables, this will return the TLS offset */
8842 EMIT_NEW_SFLDACONST (cfg, ins, field);
8843 offset_reg = ins->dreg;
8844 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
8845 idx_reg = alloc_ireg (cfg);
8846 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
8847 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
8848 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
8849 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
8850 array_reg = alloc_ireg (cfg);
8851 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
8852 offset2_reg = alloc_ireg (cfg);
8853 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
8854 dreg = alloc_ireg (cfg);
8855 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
8857 offset = (gsize)addr & 0x7fffffff;
8858 idx = (offset >> 24) - 1;
8860 array_reg = alloc_ireg (cfg);
8861 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
8862 dreg = alloc_ireg (cfg);
8863 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
8865 } else if ((cfg->opt & MONO_OPT_SHARED) ||
8866 (cfg->compile_aot && is_special_static) ||
8867 (context_used && is_special_static)) {
8868 MonoInst *iargs [2];
8870 g_assert (field->parent);
8871 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8873 iargs [1] = emit_get_rgctx_field (cfg, context_used,
8874 field, MONO_RGCTX_INFO_CLASS_FIELD);
8876 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8878 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8879 } else if (context_used) {
8880 MonoInst *static_data;
8883 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
8884 method->klass->name_space, method->klass->name, method->name,
8885 depth, field->offset);
8888 if (mono_class_needs_cctor_run (klass, method))
8889 emit_generic_class_init (cfg, klass);
8892 * The pointer we're computing here is
8894 * super_info.static_data + field->offset
8896 static_data = emit_get_rgctx_klass (cfg, context_used,
8897 klass, MONO_RGCTX_INFO_STATIC_DATA);
8899 if (field->offset == 0) {
8902 int addr_reg = mono_alloc_preg (cfg);
8903 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
8905 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
8906 MonoInst *iargs [2];
8908 g_assert (field->parent);
8909 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
8910 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
8911 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
8913 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
8915 CHECK_TYPELOAD (klass);
8917 if (mini_field_access_needs_cctor_run (cfg, method, vtable)) {
8918 if (!(g_slist_find (class_inits, vtable))) {
8919 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, vtable->klass, helper_sig_class_init_trampoline, NULL);
8920 if (cfg->verbose_level > 2)
8921 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
8922 class_inits = g_slist_prepend (class_inits, vtable);
8925 if (cfg->run_cctors) {
8927 /* This makes so that inline cannot trigger */
8928 /* .cctors: too many apps depend on them */
8929 /* running with a specific order... */
8930 if (! vtable->initialized)
8932 ex = mono_runtime_class_init_full (vtable, FALSE);
8934 set_exception_object (cfg, ex);
8935 goto exception_exit;
8939 addr = (char*)vtable->data + field->offset;
8941 if (cfg->compile_aot)
8942 EMIT_NEW_SFLDACONST (cfg, ins, field);
8944 EMIT_NEW_PCONST (cfg, ins, addr);
8946 MonoInst *iargs [1];
8947 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
8948 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
8952 /* Generate IR to do the actual load/store operation */
8954 if (*ip == CEE_LDSFLDA) {
8955 ins->klass = mono_class_from_mono_type (ftype);
8956 ins->type = STACK_PTR;
8958 } else if (*ip == CEE_STSFLD) {
8963 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, sp [0]->dreg);
8964 store->flags |= ins_flag;
8966 gboolean is_const = FALSE;
8967 MonoVTable *vtable = NULL;
8969 if (!context_used) {
8970 vtable = mono_class_vtable (cfg->domain, klass);
8971 CHECK_TYPELOAD (klass);
8973 if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
8974 vtable->initialized && (ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
8975 gpointer addr = (char*)vtable->data + field->offset;
8976 int ro_type = ftype->type;
8977 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
8978 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
8980 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
8983 case MONO_TYPE_BOOLEAN:
8985 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
8989 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
8992 case MONO_TYPE_CHAR:
8994 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
8998 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
9003 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
9007 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
9013 case MONO_TYPE_FNPTR:
9014 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9015 type_to_eval_stack_type ((cfg), field->type, *sp);
9018 case MONO_TYPE_STRING:
9019 case MONO_TYPE_OBJECT:
9020 case MONO_TYPE_CLASS:
9021 case MONO_TYPE_SZARRAY:
9022 case MONO_TYPE_ARRAY:
9023 if (!mono_gc_is_moving ()) {
9024 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
9025 type_to_eval_stack_type ((cfg), field->type, *sp);
9033 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
9038 case MONO_TYPE_VALUETYPE:
9048 CHECK_STACK_OVF (1);
9050 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
9051 load->flags |= ins_flag;
9064 token = read32 (ip + 1);
9065 klass = mini_get_class (method, token, generic_context);
9066 CHECK_TYPELOAD (klass);
9067 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
9068 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
9069 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
9070 generic_class_is_reference_type (cfg, klass)) {
9071 /* insert call to write barrier */
9072 emit_write_barrier (cfg, sp [0], sp [1], -1);
9084 const char *data_ptr;
9086 guint32 field_token;
9092 token = read32 (ip + 1);
9094 klass = mini_get_class (method, token, generic_context);
9095 CHECK_TYPELOAD (klass);
9097 if (cfg->generic_sharing_context)
9098 context_used = mono_class_check_context_used (klass);
9100 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
9101 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_I4);
9102 ins->sreg1 = sp [0]->dreg;
9103 ins->type = STACK_I4;
9104 ins->dreg = alloc_ireg (cfg);
9105 MONO_ADD_INS (cfg->cbb, ins);
9106 *sp = mono_decompose_opcode (cfg, ins);
9111 MonoClass *array_class = mono_array_class_get (klass, 1);
9112 /* FIXME: we cannot get a managed
9113 allocator because we can't get the
9114 open generic class's vtable. We
9115 have the same problem in
9116 handle_alloc(). This
9117 needs to be solved so that we can
9118 have managed allocs of shared
9121 MonoVTable *array_class_vtable = mono_class_vtable (cfg->domain, array_class);
9122 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class_vtable, 1);
9124 MonoMethod *managed_alloc = NULL;
9126 /* FIXME: Decompose later to help abcrem */
9129 args [0] = emit_get_rgctx_klass (cfg, context_used,
9130 array_class, MONO_RGCTX_INFO_VTABLE);
9135 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
9137 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
9139 if (cfg->opt & MONO_OPT_SHARED) {
9140 /* Decompose now to avoid problems with references to the domainvar */
9141 MonoInst *iargs [3];
9143 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9144 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
9147 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
9149 /* Decompose later since it is needed by abcrem */
9150 MonoClass *array_type = mono_array_class_get (klass, 1);
9151 mono_class_vtable (cfg->domain, array_type);
9152 CHECK_TYPELOAD (array_type);
9154 MONO_INST_NEW (cfg, ins, OP_NEWARR);
9155 ins->dreg = alloc_ireg_ref (cfg);
9156 ins->sreg1 = sp [0]->dreg;
9157 ins->inst_newa_class = klass;
9158 ins->type = STACK_OBJ;
9160 MONO_ADD_INS (cfg->cbb, ins);
9161 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9162 cfg->cbb->has_array_access = TRUE;
9164 /* Needed so mono_emit_load_get_addr () gets called */
9165 mono_get_got_var (cfg);
9175 * we inline/optimize the initialization sequence if possible.
9176 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
9177 * for small sizes open code the memcpy
9178 * ensure the rva field is big enough
9180 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
9181 MonoMethod *memcpy_method = get_memcpy_method ();
9182 MonoInst *iargs [3];
9183 int add_reg = alloc_ireg_mp (cfg);
9185 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, G_STRUCT_OFFSET (MonoArray, vector));
9186 if (cfg->compile_aot) {
9187 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
9189 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
9191 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
9192 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
9201 if (sp [0]->type != STACK_OBJ)
9204 MONO_INST_NEW (cfg, ins, OP_LDLEN);
9205 ins->dreg = alloc_preg (cfg);
9206 ins->sreg1 = sp [0]->dreg;
9207 ins->type = STACK_I4;
9208 /* This flag will be inherited by the decomposition */
9209 ins->flags |= MONO_INST_FAULT;
9210 MONO_ADD_INS (cfg->cbb, ins);
9211 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
9212 cfg->cbb->has_array_access = TRUE;
9220 if (sp [0]->type != STACK_OBJ)
9223 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9225 klass = mini_get_class (method, read32 (ip + 1), generic_context);
9226 CHECK_TYPELOAD (klass);
9227 /* we need to make sure that this array is exactly the type it needs
9228 * to be for correctness. the wrappers are lax with their usage
9229 * so we need to ignore them here
9231 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
9232 MonoClass *array_class = mono_array_class_get (klass, 1);
9233 mini_emit_check_array_type (cfg, sp [0], array_class);
9234 CHECK_TYPELOAD (array_class);
9238 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9253 case CEE_LDELEM_REF: {
9259 if (*ip == CEE_LDELEM) {
9261 token = read32 (ip + 1);
9262 klass = mini_get_class (method, token, generic_context);
9263 CHECK_TYPELOAD (klass);
9264 mono_class_init (klass);
9267 klass = array_access_to_klass (*ip);
9269 if (sp [0]->type != STACK_OBJ)
9272 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9274 if (sp [1]->opcode == OP_ICONST) {
9275 int array_reg = sp [0]->dreg;
9276 int index_reg = sp [1]->dreg;
9277 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9279 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9280 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
9282 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9283 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
9286 if (*ip == CEE_LDELEM)
9299 case CEE_STELEM_REF:
9306 cfg->flags |= MONO_CFG_HAS_LDELEMA;
9308 if (*ip == CEE_STELEM) {
9310 token = read32 (ip + 1);
9311 klass = mini_get_class (method, token, generic_context);
9312 CHECK_TYPELOAD (klass);
9313 mono_class_init (klass);
9316 klass = array_access_to_klass (*ip);
9318 if (sp [0]->type != STACK_OBJ)
9321 /* storing a NULL doesn't need any of the complex checks in stelemref */
9322 if (generic_class_is_reference_type (cfg, klass) &&
9323 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
9324 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
9325 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
9326 MonoInst *iargs [3];
9329 mono_class_setup_vtable (obj_array);
9330 g_assert (helper->slot);
9332 if (sp [0]->type != STACK_OBJ)
9334 if (sp [2]->type != STACK_OBJ)
9341 mono_emit_method_call (cfg, helper, iargs, sp [0]);
9343 if (sp [1]->opcode == OP_ICONST) {
9344 int array_reg = sp [0]->dreg;
9345 int index_reg = sp [1]->dreg;
9346 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + G_STRUCT_OFFSET (MonoArray, vector);
9348 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
9349 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
9351 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
9352 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
9356 if (*ip == CEE_STELEM)
9363 case CEE_CKFINITE: {
9367 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
9368 ins->sreg1 = sp [0]->dreg;
9369 ins->dreg = alloc_freg (cfg);
9370 ins->type = STACK_R8;
9371 MONO_ADD_INS (bblock, ins);
9373 *sp++ = mono_decompose_opcode (cfg, ins);
9378 case CEE_REFANYVAL: {
9379 MonoInst *src_var, *src;
9381 int klass_reg = alloc_preg (cfg);
9382 int dreg = alloc_preg (cfg);
9385 MONO_INST_NEW (cfg, ins, *ip);
9388 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9389 CHECK_TYPELOAD (klass);
9390 mono_class_init (klass);
9392 if (cfg->generic_sharing_context)
9393 context_used = mono_class_check_context_used (klass);
9396 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9398 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
9399 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
9400 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass));
9403 MonoInst *klass_ins;
9405 klass_ins = emit_get_rgctx_klass (cfg, context_used,
9406 klass, MONO_RGCTX_INFO_KLASS);
9409 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
9410 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
9412 mini_emit_class_check (cfg, klass_reg, klass);
9414 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, value));
9415 ins->type = STACK_MP;
9420 case CEE_MKREFANY: {
9421 MonoInst *loc, *addr;
9424 MONO_INST_NEW (cfg, ins, *ip);
9427 klass = mono_class_get_full (image, read32 (ip + 1), generic_context);
9428 CHECK_TYPELOAD (klass);
9429 mono_class_init (klass);
9431 if (cfg->generic_sharing_context)
9432 context_used = mono_class_check_context_used (klass);
9434 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
9435 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
9438 MonoInst *const_ins;
9439 int type_reg = alloc_preg (cfg);
9441 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
9442 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
9443 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9444 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9445 } else if (cfg->compile_aot) {
9446 int const_reg = alloc_preg (cfg);
9447 int type_reg = alloc_preg (cfg);
9449 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
9450 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
9451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, G_STRUCT_OFFSET (MonoClass, byval_arg));
9452 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
9454 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
9455 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, klass), klass);
9457 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, G_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
9459 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
9460 ins->type = STACK_VTYPE;
9461 ins->klass = mono_defaults.typed_reference_class;
9468 MonoClass *handle_class;
9470 CHECK_STACK_OVF (1);
9473 n = read32 (ip + 1);
9475 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
9476 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9477 handle = mono_method_get_wrapper_data (method, n);
9478 handle_class = mono_method_get_wrapper_data (method, n + 1);
9479 if (handle_class == mono_defaults.typehandle_class)
9480 handle = &((MonoClass*)handle)->byval_arg;
9483 handle = mono_ldtoken (image, n, &handle_class, generic_context);
9487 mono_class_init (handle_class);
9488 if (cfg->generic_sharing_context) {
9489 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
9490 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
9491 /* This case handles ldtoken
9492 of an open type, like for
9495 } else if (handle_class == mono_defaults.typehandle_class) {
9496 /* If we get a MONO_TYPE_CLASS
9497 then we need to provide the
9499 instantiation of it. */
9500 if (mono_type_get_type (handle) == MONO_TYPE_CLASS)
9503 context_used = mono_class_check_context_used (mono_class_from_mono_type (handle));
9504 } else if (handle_class == mono_defaults.fieldhandle_class)
9505 context_used = mono_class_check_context_used (((MonoClassField*)handle)->parent);
9506 else if (handle_class == mono_defaults.methodhandle_class)
9507 context_used = mono_method_check_context_used (handle);
9509 g_assert_not_reached ();
9512 if ((cfg->opt & MONO_OPT_SHARED) &&
9513 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
9514 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
9515 MonoInst *addr, *vtvar, *iargs [3];
9516 int method_context_used;
9518 if (cfg->generic_sharing_context)
9519 method_context_used = mono_method_check_context_used (method);
9521 method_context_used = 0;
9523 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9525 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9526 EMIT_NEW_ICONST (cfg, iargs [1], n);
9527 if (method_context_used) {
9528 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
9529 method, MONO_RGCTX_INFO_METHOD);
9530 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
9532 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
9533 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
9535 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9537 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9539 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9541 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
9542 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
9543 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
9544 (cmethod->klass == mono_defaults.monotype_class->parent) &&
9545 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
9546 MonoClass *tclass = mono_class_from_mono_type (handle);
9548 mono_class_init (tclass);
9550 ins = emit_get_rgctx_klass (cfg, context_used,
9551 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
9552 } else if (cfg->compile_aot) {
9553 if (method->wrapper_type) {
9554 if (mono_class_get (tclass->image, tclass->type_token) == tclass && !generic_context) {
9555 /* Special case for static synchronized wrappers */
9556 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
9558 /* FIXME: n is not a normal token */
9559 cfg->disable_aot = TRUE;
9560 EMIT_NEW_PCONST (cfg, ins, NULL);
9563 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
9566 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
9568 ins->type = STACK_OBJ;
9569 ins->klass = cmethod->klass;
9572 MonoInst *addr, *vtvar;
9574 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
9577 if (handle_class == mono_defaults.typehandle_class) {
9578 ins = emit_get_rgctx_klass (cfg, context_used,
9579 mono_class_from_mono_type (handle),
9580 MONO_RGCTX_INFO_TYPE);
9581 } else if (handle_class == mono_defaults.methodhandle_class) {
9582 ins = emit_get_rgctx_method (cfg, context_used,
9583 handle, MONO_RGCTX_INFO_METHOD);
9584 } else if (handle_class == mono_defaults.fieldhandle_class) {
9585 ins = emit_get_rgctx_field (cfg, context_used,
9586 handle, MONO_RGCTX_INFO_CLASS_FIELD);
9588 g_assert_not_reached ();
9590 } else if (cfg->compile_aot) {
9591 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n);
9593 EMIT_NEW_PCONST (cfg, ins, handle);
9595 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
9596 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
9597 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
9607 MONO_INST_NEW (cfg, ins, OP_THROW);
9609 ins->sreg1 = sp [0]->dreg;
9611 bblock->out_of_line = TRUE;
9612 MONO_ADD_INS (bblock, ins);
9613 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
9614 MONO_ADD_INS (bblock, ins);
9617 link_bblock (cfg, bblock, end_bblock);
9618 start_new_bblock = 1;
9620 case CEE_ENDFINALLY:
9621 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
9622 MONO_ADD_INS (bblock, ins);
9624 start_new_bblock = 1;
9627 * Control will leave the method so empty the stack, otherwise
9628 * the next basic block will start with a nonempty stack.
9630 while (sp != stack_start) {
9638 if (*ip == CEE_LEAVE) {
9640 target = ip + 5 + (gint32)read32(ip + 1);
9643 target = ip + 2 + (signed char)(ip [1]);
9646 /* empty the stack */
9647 while (sp != stack_start) {
9652 * If this leave statement is in a catch block, check for a
9653 * pending exception, and rethrow it if necessary.
9654 * We avoid doing this in runtime invoke wrappers, since those are called
9655 * by native code which excepts the wrapper to catch all exceptions.
9657 for (i = 0; i < header->num_clauses; ++i) {
9658 MonoExceptionClause *clause = &header->clauses [i];
9661 * Use <= in the final comparison to handle clauses with multiple
9662 * leave statements, like in bug #78024.
9663 * The ordering of the exception clauses guarantees that we find the
9666 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
9668 MonoBasicBlock *dont_throw;
9673 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
9676 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
9678 NEW_BBLOCK (cfg, dont_throw);
9681 * Currently, we always rethrow the abort exception, despite the
9682 * fact that this is not correct. See thread6.cs for an example.
9683 * But propagating the abort exception is more important than
9684 * getting the sematics right.
9686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
9687 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
9688 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
9690 MONO_START_BB (cfg, dont_throw);
9695 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
9697 MonoExceptionClause *clause;
9699 for (tmp = handlers; tmp; tmp = tmp->next) {
9701 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
9703 link_bblock (cfg, bblock, tblock);
9704 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
9705 ins->inst_target_bb = tblock;
9706 ins->inst_eh_block = clause;
9707 MONO_ADD_INS (bblock, ins);
9708 bblock->has_call_handler = 1;
9709 if (COMPILE_LLVM (cfg)) {
9710 MonoBasicBlock *target_bb;
9713 * Link the finally bblock with the target, since it will
9714 * conceptually branch there.
9715 * FIXME: Have to link the bblock containing the endfinally.
9717 GET_BBLOCK (cfg, target_bb, target);
9718 link_bblock (cfg, tblock, target_bb);
9721 g_list_free (handlers);
9724 MONO_INST_NEW (cfg, ins, OP_BR);
9725 MONO_ADD_INS (bblock, ins);
9726 GET_BBLOCK (cfg, tblock, target);
9727 link_bblock (cfg, bblock, tblock);
9728 ins->inst_target_bb = tblock;
9729 start_new_bblock = 1;
9731 if (*ip == CEE_LEAVE)
9740 * Mono specific opcodes
9742 case MONO_CUSTOM_PREFIX: {
9744 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
9748 case CEE_MONO_ICALL: {
9750 MonoJitICallInfo *info;
9752 token = read32 (ip + 2);
9753 func = mono_method_get_wrapper_data (method, token);
9754 info = mono_find_jit_icall_by_addr (func);
9757 CHECK_STACK (info->sig->param_count);
9758 sp -= info->sig->param_count;
9760 ins = mono_emit_jit_icall (cfg, info->func, sp);
9761 if (!MONO_TYPE_IS_VOID (info->sig->ret))
9765 inline_costs += 10 * num_calls++;
9769 case CEE_MONO_LDPTR: {
9772 CHECK_STACK_OVF (1);
9774 token = read32 (ip + 2);
9776 ptr = mono_method_get_wrapper_data (method, token);
9777 if (cfg->compile_aot && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
9778 MonoJitICallInfo *callinfo;
9779 const char *icall_name;
9781 icall_name = method->name + strlen ("__icall_wrapper_");
9782 g_assert (icall_name);
9783 callinfo = mono_find_jit_icall_by_name (icall_name);
9784 g_assert (callinfo);
9786 if (ptr == callinfo->func) {
9787 /* Will be transformed into an AOTCONST later */
9788 EMIT_NEW_PCONST (cfg, ins, ptr);
9794 /* FIXME: Generalize this */
9795 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
9796 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
9801 EMIT_NEW_PCONST (cfg, ins, ptr);
9804 inline_costs += 10 * num_calls++;
9805 /* Can't embed random pointers into AOT code */
9806 cfg->disable_aot = 1;
9809 case CEE_MONO_ICALL_ADDR: {
9810 MonoMethod *cmethod;
9813 CHECK_STACK_OVF (1);
9815 token = read32 (ip + 2);
9817 cmethod = mono_method_get_wrapper_data (method, token);
9819 if (cfg->compile_aot) {
9820 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
9822 ptr = mono_lookup_internal_call (cmethod);
9824 EMIT_NEW_PCONST (cfg, ins, ptr);
9830 case CEE_MONO_VTADDR: {
9831 MonoInst *src_var, *src;
9837 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
9838 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
9843 case CEE_MONO_NEWOBJ: {
9844 MonoInst *iargs [2];
9846 CHECK_STACK_OVF (1);
9848 token = read32 (ip + 2);
9849 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9850 mono_class_init (klass);
9851 NEW_DOMAINCONST (cfg, iargs [0]);
9852 MONO_ADD_INS (cfg->cbb, iargs [0]);
9853 NEW_CLASSCONST (cfg, iargs [1], klass);
9854 MONO_ADD_INS (cfg->cbb, iargs [1]);
9855 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
9857 inline_costs += 10 * num_calls++;
9860 case CEE_MONO_OBJADDR:
9863 MONO_INST_NEW (cfg, ins, OP_MOVE);
9864 ins->dreg = alloc_ireg_mp (cfg);
9865 ins->sreg1 = sp [0]->dreg;
9866 ins->type = STACK_MP;
9867 MONO_ADD_INS (cfg->cbb, ins);
9871 case CEE_MONO_LDNATIVEOBJ:
9873 * Similar to LDOBJ, but instead load the unmanaged
9874 * representation of the vtype to the stack.
9879 token = read32 (ip + 2);
9880 klass = mono_method_get_wrapper_data (method, token);
9881 g_assert (klass->valuetype);
9882 mono_class_init (klass);
9885 MonoInst *src, *dest, *temp;
9888 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
9889 temp->backend.is_pinvoke = 1;
9890 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
9891 mini_emit_stobj (cfg, dest, src, klass, TRUE);
9893 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
9894 dest->type = STACK_VTYPE;
9895 dest->klass = klass;
9901 case CEE_MONO_RETOBJ: {
9903 * Same as RET, but return the native representation of a vtype
9906 g_assert (cfg->ret);
9907 g_assert (mono_method_signature (method)->pinvoke);
9912 token = read32 (ip + 2);
9913 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9915 if (!cfg->vret_addr) {
9916 g_assert (cfg->ret_var_is_local);
9918 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
9920 EMIT_NEW_RETLOADA (cfg, ins);
9922 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
9924 if (sp != stack_start)
9927 MONO_INST_NEW (cfg, ins, OP_BR);
9928 ins->inst_target_bb = end_bblock;
9929 MONO_ADD_INS (bblock, ins);
9930 link_bblock (cfg, bblock, end_bblock);
9931 start_new_bblock = 1;
9935 case CEE_MONO_CISINST:
9936 case CEE_MONO_CCASTCLASS: {
9941 token = read32 (ip + 2);
9942 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
9943 if (ip [1] == CEE_MONO_CISINST)
9944 ins = handle_cisinst (cfg, klass, sp [0]);
9946 ins = handle_ccastclass (cfg, klass, sp [0]);
9952 case CEE_MONO_SAVE_LMF:
9953 case CEE_MONO_RESTORE_LMF:
9954 #ifdef MONO_ARCH_HAVE_LMF_OPS
9955 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
9956 MONO_ADD_INS (bblock, ins);
9957 cfg->need_lmf_area = TRUE;
9961 case CEE_MONO_CLASSCONST:
9962 CHECK_STACK_OVF (1);
9964 token = read32 (ip + 2);
9965 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
9968 inline_costs += 10 * num_calls++;
9970 case CEE_MONO_NOT_TAKEN:
9971 bblock->out_of_line = TRUE;
9975 CHECK_STACK_OVF (1);
9977 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
9978 ins->dreg = alloc_preg (cfg);
9979 ins->inst_offset = (gint32)read32 (ip + 2);
9980 ins->type = STACK_PTR;
9981 MONO_ADD_INS (bblock, ins);
9985 case CEE_MONO_DYN_CALL: {
9988 /* It would be easier to call a trampoline, but that would put an
9989 * extra frame on the stack, confusing exception handling. So
9990 * implement it inline using an opcode for now.
9993 if (!cfg->dyn_call_var) {
9994 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9995 /* prevent it from being register allocated */
9996 cfg->dyn_call_var->flags |= MONO_INST_INDIRECT;
9999 /* Has to use a call inst since it local regalloc expects it */
10000 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
10001 ins = (MonoInst*)call;
10003 ins->sreg1 = sp [0]->dreg;
10004 ins->sreg2 = sp [1]->dreg;
10005 MONO_ADD_INS (bblock, ins);
10007 #ifdef MONO_ARCH_DYN_CALL_PARAM_AREA
10008 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
10012 inline_costs += 10 * num_calls++;
10016 case CEE_MONO_MEMORY_BARRIER: {
10018 emit_memory_barrier (cfg, (int)read32 (ip + 1));
10023 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
10029 case CEE_PREFIX1: {
10032 case CEE_ARGLIST: {
10033 /* somewhat similar to LDTOKEN */
10034 MonoInst *addr, *vtvar;
10035 CHECK_STACK_OVF (1);
10036 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
10038 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
10039 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
10041 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
10042 ins->type = STACK_VTYPE;
10043 ins->klass = mono_defaults.argumenthandle_class;
10056 * The following transforms:
10057 * CEE_CEQ into OP_CEQ
10058 * CEE_CGT into OP_CGT
10059 * CEE_CGT_UN into OP_CGT_UN
10060 * CEE_CLT into OP_CLT
10061 * CEE_CLT_UN into OP_CLT_UN
10063 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
10065 MONO_INST_NEW (cfg, ins, cmp->opcode);
10067 cmp->sreg1 = sp [0]->dreg;
10068 cmp->sreg2 = sp [1]->dreg;
10069 type_from_op (cmp, sp [0], sp [1]);
10071 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
10072 cmp->opcode = OP_LCOMPARE;
10073 else if (sp [0]->type == STACK_R8)
10074 cmp->opcode = OP_FCOMPARE;
10076 cmp->opcode = OP_ICOMPARE;
10077 MONO_ADD_INS (bblock, cmp);
10078 ins->type = STACK_I4;
10079 ins->dreg = alloc_dreg (cfg, ins->type);
10080 type_from_op (ins, sp [0], sp [1]);
10082 if (cmp->opcode == OP_FCOMPARE) {
10084 * The backends expect the fceq opcodes to do the
10087 cmp->opcode = OP_NOP;
10088 ins->sreg1 = cmp->sreg1;
10089 ins->sreg2 = cmp->sreg2;
10091 MONO_ADD_INS (bblock, ins);
10097 MonoInst *argconst;
10098 MonoMethod *cil_method;
10100 CHECK_STACK_OVF (1);
10102 n = read32 (ip + 2);
10103 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10104 if (!cmethod || mono_loader_get_last_error ())
10106 mono_class_init (cmethod->klass);
10108 mono_save_token_info (cfg, image, n, cmethod);
10110 if (cfg->generic_sharing_context)
10111 context_used = mono_method_check_context_used (cmethod);
10113 cil_method = cmethod;
10114 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
10115 METHOD_ACCESS_FAILURE;
10117 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10118 if (check_linkdemand (cfg, method, cmethod))
10120 CHECK_CFG_EXCEPTION;
10121 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10122 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10126 * Optimize the common case of ldftn+delegate creation
10128 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
10129 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
10130 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
10131 MonoInst *target_ins;
10132 MonoMethod *invoke;
10133 int invoke_context_used = 0;
10135 invoke = mono_get_delegate_invoke (ctor_method->klass);
10136 if (!invoke || !mono_method_signature (invoke))
10139 if (cfg->generic_sharing_context)
10140 invoke_context_used = mono_method_check_context_used (invoke);
10142 target_ins = sp [-1];
10144 if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
10145 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
10147 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
10148 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
10149 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
10150 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
10151 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
10155 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
10156 /* FIXME: SGEN support */
10157 if (invoke_context_used == 0) {
10159 if (cfg->verbose_level > 3)
10160 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10162 *sp = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used);
10163 CHECK_CFG_EXCEPTION;
10172 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
10173 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
10177 inline_costs += 10 * num_calls++;
10180 case CEE_LDVIRTFTN: {
10181 MonoInst *args [2];
10185 n = read32 (ip + 2);
10186 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
10187 if (!cmethod || mono_loader_get_last_error ())
10189 mono_class_init (cmethod->klass);
10191 if (cfg->generic_sharing_context)
10192 context_used = mono_method_check_context_used (cmethod);
10194 if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
10195 if (check_linkdemand (cfg, method, cmethod))
10197 CHECK_CFG_EXCEPTION;
10198 } else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) {
10199 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10205 args [1] = emit_get_rgctx_method (cfg, context_used,
10206 cmethod, MONO_RGCTX_INFO_METHOD);
10209 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
10211 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
10214 inline_costs += 10 * num_calls++;
10218 CHECK_STACK_OVF (1);
10220 n = read16 (ip + 2);
10222 EMIT_NEW_ARGLOAD (cfg, ins, n);
10227 CHECK_STACK_OVF (1);
10229 n = read16 (ip + 2);
10231 NEW_ARGLOADA (cfg, ins, n);
10232 MONO_ADD_INS (cfg->cbb, ins);
10240 n = read16 (ip + 2);
10242 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
10244 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
10248 CHECK_STACK_OVF (1);
10250 n = read16 (ip + 2);
10252 EMIT_NEW_LOCLOAD (cfg, ins, n);
10257 unsigned char *tmp_ip;
10258 CHECK_STACK_OVF (1);
10260 n = read16 (ip + 2);
10263 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
10269 EMIT_NEW_LOCLOADA (cfg, ins, n);
10278 n = read16 (ip + 2);
10280 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
10282 emit_stloc_ir (cfg, sp, header, n);
10289 if (sp != stack_start)
10291 if (cfg->method != method)
10293 * Inlining this into a loop in a parent could lead to
10294 * stack overflows which is different behavior than the
10295 * non-inlined case, thus disable inlining in this case.
10297 goto inline_failure;
10299 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
10300 ins->dreg = alloc_preg (cfg);
10301 ins->sreg1 = sp [0]->dreg;
10302 ins->type = STACK_PTR;
10303 MONO_ADD_INS (cfg->cbb, ins);
10305 cfg->flags |= MONO_CFG_HAS_ALLOCA;
10307 ins->flags |= MONO_INST_INIT;
10312 case CEE_ENDFILTER: {
10313 MonoExceptionClause *clause, *nearest;
10314 int cc, nearest_num;
10318 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
10320 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
10321 ins->sreg1 = (*sp)->dreg;
10322 MONO_ADD_INS (bblock, ins);
10323 start_new_bblock = 1;
10328 for (cc = 0; cc < header->num_clauses; ++cc) {
10329 clause = &header->clauses [cc];
10330 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
10331 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
10332 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
10337 g_assert (nearest);
10338 if ((ip - header->code) != nearest->handler_offset)
10343 case CEE_UNALIGNED_:
10344 ins_flag |= MONO_INST_UNALIGNED;
10345 /* FIXME: record alignment? we can assume 1 for now */
10349 case CEE_VOLATILE_:
10350 ins_flag |= MONO_INST_VOLATILE;
10354 ins_flag |= MONO_INST_TAILCALL;
10355 cfg->flags |= MONO_CFG_HAS_TAIL;
10356 /* Can't inline tail calls at this time */
10357 inline_costs += 100000;
10364 token = read32 (ip + 2);
10365 klass = mini_get_class (method, token, generic_context);
10366 CHECK_TYPELOAD (klass);
10367 if (generic_class_is_reference_type (cfg, klass))
10368 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
10370 mini_emit_initobj (cfg, *sp, NULL, klass);
10374 case CEE_CONSTRAINED_:
10376 token = read32 (ip + 2);
10377 if (method->wrapper_type != MONO_WRAPPER_NONE)
10378 constrained_call = (MonoClass *)mono_method_get_wrapper_data (method, token);
10380 constrained_call = mono_class_get_full (image, token, generic_context);
10381 CHECK_TYPELOAD (constrained_call);
10385 case CEE_INITBLK: {
10386 MonoInst *iargs [3];
10390 if ((ip [1] == CEE_CPBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
10391 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
10392 } else if ((ip [1] == CEE_INITBLK) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
10393 /* emit_memset only works when val == 0 */
10394 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
10396 iargs [0] = sp [0];
10397 iargs [1] = sp [1];
10398 iargs [2] = sp [2];
10399 if (ip [1] == CEE_CPBLK) {
10400 MonoMethod *memcpy_method = get_memcpy_method ();
10401 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10403 MonoMethod *memset_method = get_memset_method ();
10404 mono_emit_method_call (cfg, memset_method, iargs, NULL);
10414 ins_flag |= MONO_INST_NOTYPECHECK;
10416 ins_flag |= MONO_INST_NORANGECHECK;
10417 /* we ignore the no-nullcheck for now since we
10418 * really do it explicitly only when doing callvirt->call
10422 case CEE_RETHROW: {
10424 int handler_offset = -1;
10426 for (i = 0; i < header->num_clauses; ++i) {
10427 MonoExceptionClause *clause = &header->clauses [i];
10428 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
10429 handler_offset = clause->handler_offset;
10434 bblock->flags |= BB_EXCEPTION_UNSAFE;
10436 g_assert (handler_offset != -1);
10438 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
10439 MONO_INST_NEW (cfg, ins, OP_RETHROW);
10440 ins->sreg1 = load->dreg;
10441 MONO_ADD_INS (bblock, ins);
10443 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
10444 MONO_ADD_INS (bblock, ins);
10447 link_bblock (cfg, bblock, end_bblock);
10448 start_new_bblock = 1;
10456 CHECK_STACK_OVF (1);
10458 token = read32 (ip + 2);
10459 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !method->klass->image->dynamic && !generic_context) {
10460 MonoType *type = mono_type_create_from_typespec (image, token);
10461 token = mono_type_size (type, &ialign);
10463 MonoClass *klass = mono_class_get_full (image, token, generic_context);
10464 CHECK_TYPELOAD (klass);
10465 mono_class_init (klass);
10466 token = mono_class_value_size (klass, &align);
10468 EMIT_NEW_ICONST (cfg, ins, token);
10473 case CEE_REFANYTYPE: {
10474 MonoInst *src_var, *src;
10480 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10482 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10483 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10484 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, G_STRUCT_OFFSET (MonoTypedRef, type));
10489 case CEE_READONLY_:
10502 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
10512 g_warning ("opcode 0x%02x not handled", *ip);
10516 if (start_new_bblock != 1)
10519 bblock->cil_length = ip - bblock->cil_code;
10520 if (bblock->next_bb) {
10521 /* This could already be set because of inlining, #693905 */
10522 MonoBasicBlock *bb = bblock;
10524 while (bb->next_bb)
10526 bb->next_bb = end_bblock;
10528 bblock->next_bb = end_bblock;
10531 if (cfg->method == method && cfg->domainvar) {
10533 MonoInst *get_domain;
10535 cfg->cbb = init_localsbb;
10537 if (! (get_domain = mono_arch_get_domain_intrinsic (cfg))) {
10538 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
10541 get_domain->dreg = alloc_preg (cfg);
10542 MONO_ADD_INS (cfg->cbb, get_domain);
10544 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
10545 MONO_ADD_INS (cfg->cbb, store);
10548 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
10549 if (cfg->compile_aot)
10550 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
10551 mono_get_got_var (cfg);
10554 if (cfg->method == method && cfg->got_var)
10555 mono_emit_load_got_addr (cfg);
10560 cfg->cbb = init_localsbb;
10562 for (i = 0; i < header->num_locals; ++i) {
10563 MonoType *ptype = header->locals [i];
10564 int t = ptype->type;
10565 dreg = cfg->locals [i]->dreg;
10567 if (t == MONO_TYPE_VALUETYPE && ptype->data.klass->enumtype)
10568 t = mono_class_enum_basetype (ptype->data.klass)->type;
10569 if (ptype->byref) {
10570 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10571 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
10572 MONO_EMIT_NEW_ICONST (cfg, cfg->locals [i]->dreg, 0);
10573 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
10574 MONO_EMIT_NEW_I8CONST (cfg, cfg->locals [i]->dreg, 0);
10575 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
10576 MONO_INST_NEW (cfg, ins, OP_R8CONST);
10577 ins->type = STACK_R8;
10578 ins->inst_p0 = (void*)&r8_0;
10579 ins->dreg = alloc_dreg (cfg, STACK_R8);
10580 MONO_ADD_INS (init_localsbb, ins);
10581 EMIT_NEW_LOCSTORE (cfg, store, i, ins);
10582 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
10583 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (ptype))) {
10584 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (ptype));
10586 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
10591 if (cfg->init_ref_vars && cfg->method == method) {
10592 /* Emit initialization for ref vars */
10593 // FIXME: Avoid duplication initialization for IL locals.
10594 for (i = 0; i < cfg->num_varinfo; ++i) {
10595 MonoInst *ins = cfg->varinfo [i];
10597 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
10598 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
10602 /* Add a sequence point for method entry/exit events */
10604 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
10605 MONO_ADD_INS (init_localsbb, ins);
10606 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
10607 MONO_ADD_INS (cfg->bb_exit, ins);
10612 if (cfg->method == method) {
10613 MonoBasicBlock *bb;
10614 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
10615 bb->region = mono_find_block_region (cfg, bb->real_offset);
10617 mono_create_spvar_for_region (cfg, bb->region);
10618 if (cfg->verbose_level > 2)
10619 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
10623 g_slist_free (class_inits);
10624 dont_inline = g_list_remove (dont_inline, method);
10626 if (inline_costs < 0) {
10629 /* Method is too large */
10630 mname = mono_method_full_name (method, TRUE);
10631 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
10632 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
10634 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10635 mono_basic_block_free (original_bb);
10639 if ((cfg->verbose_level > 2) && (cfg->method == method))
10640 mono_print_code (cfg, "AFTER METHOD-TO-IR");
10642 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10643 mono_basic_block_free (original_bb);
10644 return inline_costs;
10647 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
10654 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
10658 set_exception_type_from_invalid_il (cfg, method, ip);
10662 g_slist_free (class_inits);
10663 mono_basic_block_free (original_bb);
10664 dont_inline = g_list_remove (dont_inline, method);
10665 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
10670 store_membase_reg_to_store_membase_imm (int opcode)
10673 case OP_STORE_MEMBASE_REG:
10674 return OP_STORE_MEMBASE_IMM;
10675 case OP_STOREI1_MEMBASE_REG:
10676 return OP_STOREI1_MEMBASE_IMM;
10677 case OP_STOREI2_MEMBASE_REG:
10678 return OP_STOREI2_MEMBASE_IMM;
10679 case OP_STOREI4_MEMBASE_REG:
10680 return OP_STOREI4_MEMBASE_IMM;
10681 case OP_STOREI8_MEMBASE_REG:
10682 return OP_STOREI8_MEMBASE_IMM;
10684 g_assert_not_reached ();
10690 #endif /* DISABLE_JIT */
10693 mono_op_to_op_imm (int opcode)
10697 return OP_IADD_IMM;
10699 return OP_ISUB_IMM;
10701 return OP_IDIV_IMM;
10703 return OP_IDIV_UN_IMM;
10705 return OP_IREM_IMM;
10707 return OP_IREM_UN_IMM;
10709 return OP_IMUL_IMM;
10711 return OP_IAND_IMM;
10715 return OP_IXOR_IMM;
10717 return OP_ISHL_IMM;
10719 return OP_ISHR_IMM;
10721 return OP_ISHR_UN_IMM;
10724 return OP_LADD_IMM;
10726 return OP_LSUB_IMM;
10728 return OP_LAND_IMM;
10732 return OP_LXOR_IMM;
10734 return OP_LSHL_IMM;
10736 return OP_LSHR_IMM;
10738 return OP_LSHR_UN_IMM;
10741 return OP_COMPARE_IMM;
10743 return OP_ICOMPARE_IMM;
10745 return OP_LCOMPARE_IMM;
10747 case OP_STORE_MEMBASE_REG:
10748 return OP_STORE_MEMBASE_IMM;
10749 case OP_STOREI1_MEMBASE_REG:
10750 return OP_STOREI1_MEMBASE_IMM;
10751 case OP_STOREI2_MEMBASE_REG:
10752 return OP_STOREI2_MEMBASE_IMM;
10753 case OP_STOREI4_MEMBASE_REG:
10754 return OP_STOREI4_MEMBASE_IMM;
10756 #if defined(TARGET_X86) || defined (TARGET_AMD64)
10758 return OP_X86_PUSH_IMM;
10759 case OP_X86_COMPARE_MEMBASE_REG:
10760 return OP_X86_COMPARE_MEMBASE_IMM;
10762 #if defined(TARGET_AMD64)
10763 case OP_AMD64_ICOMPARE_MEMBASE_REG:
10764 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
10766 case OP_VOIDCALL_REG:
10767 return OP_VOIDCALL;
10775 return OP_LOCALLOC_IMM;
10782 ldind_to_load_membase (int opcode)
10786 return OP_LOADI1_MEMBASE;
10788 return OP_LOADU1_MEMBASE;
10790 return OP_LOADI2_MEMBASE;
10792 return OP_LOADU2_MEMBASE;
10794 return OP_LOADI4_MEMBASE;
10796 return OP_LOADU4_MEMBASE;
10798 return OP_LOAD_MEMBASE;
10799 case CEE_LDIND_REF:
10800 return OP_LOAD_MEMBASE;
10802 return OP_LOADI8_MEMBASE;
10804 return OP_LOADR4_MEMBASE;
10806 return OP_LOADR8_MEMBASE;
10808 g_assert_not_reached ();
10815 stind_to_store_membase (int opcode)
10819 return OP_STOREI1_MEMBASE_REG;
10821 return OP_STOREI2_MEMBASE_REG;
10823 return OP_STOREI4_MEMBASE_REG;
10825 case CEE_STIND_REF:
10826 return OP_STORE_MEMBASE_REG;
10828 return OP_STOREI8_MEMBASE_REG;
10830 return OP_STORER4_MEMBASE_REG;
10832 return OP_STORER8_MEMBASE_REG;
10834 g_assert_not_reached ();
10841 mono_load_membase_to_load_mem (int opcode)
10843 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
10844 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10846 case OP_LOAD_MEMBASE:
10847 return OP_LOAD_MEM;
10848 case OP_LOADU1_MEMBASE:
10849 return OP_LOADU1_MEM;
10850 case OP_LOADU2_MEMBASE:
10851 return OP_LOADU2_MEM;
10852 case OP_LOADI4_MEMBASE:
10853 return OP_LOADI4_MEM;
10854 case OP_LOADU4_MEMBASE:
10855 return OP_LOADU4_MEM;
10856 #if SIZEOF_REGISTER == 8
10857 case OP_LOADI8_MEMBASE:
10858 return OP_LOADI8_MEM;
10867 op_to_op_dest_membase (int store_opcode, int opcode)
10869 #if defined(TARGET_X86)
10870 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
10875 return OP_X86_ADD_MEMBASE_REG;
10877 return OP_X86_SUB_MEMBASE_REG;
10879 return OP_X86_AND_MEMBASE_REG;
10881 return OP_X86_OR_MEMBASE_REG;
10883 return OP_X86_XOR_MEMBASE_REG;
10886 return OP_X86_ADD_MEMBASE_IMM;
10889 return OP_X86_SUB_MEMBASE_IMM;
10892 return OP_X86_AND_MEMBASE_IMM;
10895 return OP_X86_OR_MEMBASE_IMM;
10898 return OP_X86_XOR_MEMBASE_IMM;
10904 #if defined(TARGET_AMD64)
10905 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
10910 return OP_X86_ADD_MEMBASE_REG;
10912 return OP_X86_SUB_MEMBASE_REG;
10914 return OP_X86_AND_MEMBASE_REG;
10916 return OP_X86_OR_MEMBASE_REG;
10918 return OP_X86_XOR_MEMBASE_REG;
10920 return OP_X86_ADD_MEMBASE_IMM;
10922 return OP_X86_SUB_MEMBASE_IMM;
10924 return OP_X86_AND_MEMBASE_IMM;
10926 return OP_X86_OR_MEMBASE_IMM;
10928 return OP_X86_XOR_MEMBASE_IMM;
10930 return OP_AMD64_ADD_MEMBASE_REG;
10932 return OP_AMD64_SUB_MEMBASE_REG;
10934 return OP_AMD64_AND_MEMBASE_REG;
10936 return OP_AMD64_OR_MEMBASE_REG;
10938 return OP_AMD64_XOR_MEMBASE_REG;
10941 return OP_AMD64_ADD_MEMBASE_IMM;
10944 return OP_AMD64_SUB_MEMBASE_IMM;
10947 return OP_AMD64_AND_MEMBASE_IMM;
10950 return OP_AMD64_OR_MEMBASE_IMM;
10953 return OP_AMD64_XOR_MEMBASE_IMM;
10963 op_to_op_store_membase (int store_opcode, int opcode)
10965 #if defined(TARGET_X86) || defined(TARGET_AMD64)
10968 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10969 return OP_X86_SETEQ_MEMBASE;
10971 if (store_opcode == OP_STOREI1_MEMBASE_REG)
10972 return OP_X86_SETNE_MEMBASE;
10980 op_to_op_src1_membase (int load_opcode, int opcode)
10983 /* FIXME: This has sign extension issues */
10985 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
10986 return OP_X86_COMPARE_MEMBASE8_IMM;
10989 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
10994 return OP_X86_PUSH_MEMBASE;
10995 case OP_COMPARE_IMM:
10996 case OP_ICOMPARE_IMM:
10997 return OP_X86_COMPARE_MEMBASE_IMM;
11000 return OP_X86_COMPARE_MEMBASE_REG;
11004 #ifdef TARGET_AMD64
11005 /* FIXME: This has sign extension issues */
11007 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
11008 return OP_X86_COMPARE_MEMBASE8_IMM;
11013 #ifdef __mono_ilp32__
11014 if (load_opcode == OP_LOADI8_MEMBASE)
11016 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11018 return OP_X86_PUSH_MEMBASE;
11020 /* FIXME: This only works for 32 bit immediates
11021 case OP_COMPARE_IMM:
11022 case OP_LCOMPARE_IMM:
11023 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11024 return OP_AMD64_COMPARE_MEMBASE_IMM;
11026 case OP_ICOMPARE_IMM:
11027 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11028 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
11032 #ifdef __mono_ilp32__
11033 if (load_opcode == OP_LOAD_MEMBASE)
11034 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11035 if (load_opcode == OP_LOADI8_MEMBASE)
11037 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
11039 return OP_AMD64_COMPARE_MEMBASE_REG;
11042 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
11043 return OP_AMD64_ICOMPARE_MEMBASE_REG;
11052 op_to_op_src2_membase (int load_opcode, int opcode)
11055 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
11061 return OP_X86_COMPARE_REG_MEMBASE;
11063 return OP_X86_ADD_REG_MEMBASE;
11065 return OP_X86_SUB_REG_MEMBASE;
11067 return OP_X86_AND_REG_MEMBASE;
11069 return OP_X86_OR_REG_MEMBASE;
11071 return OP_X86_XOR_REG_MEMBASE;
11075 #ifdef TARGET_AMD64
11076 #ifdef __mono_ilp32__
11077 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
11079 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
11083 return OP_AMD64_ICOMPARE_REG_MEMBASE;
11085 return OP_X86_ADD_REG_MEMBASE;
11087 return OP_X86_SUB_REG_MEMBASE;
11089 return OP_X86_AND_REG_MEMBASE;
11091 return OP_X86_OR_REG_MEMBASE;
11093 return OP_X86_XOR_REG_MEMBASE;
11095 #ifdef __mono_ilp32__
11096 } else if (load_opcode == OP_LOADI8_MEMBASE) {
11098 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
11103 return OP_AMD64_COMPARE_REG_MEMBASE;
11105 return OP_AMD64_ADD_REG_MEMBASE;
11107 return OP_AMD64_SUB_REG_MEMBASE;
11109 return OP_AMD64_AND_REG_MEMBASE;
11111 return OP_AMD64_OR_REG_MEMBASE;
11113 return OP_AMD64_XOR_REG_MEMBASE;
11122 mono_op_to_op_imm_noemul (int opcode)
11125 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
11131 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
11139 return mono_op_to_op_imm (opcode);
11143 #ifndef DISABLE_JIT
11146 * mono_handle_global_vregs:
11148 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
11152 mono_handle_global_vregs (MonoCompile *cfg)
11154 gint32 *vreg_to_bb;
11155 MonoBasicBlock *bb;
11158 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
11160 #ifdef MONO_ARCH_SIMD_INTRINSICS
11161 if (cfg->uses_simd_intrinsics)
11162 mono_simd_simplify_indirection (cfg);
11165 /* Find local vregs used in more than one bb */
11166 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11167 MonoInst *ins = bb->code;
11168 int block_num = bb->block_num;
11170 if (cfg->verbose_level > 2)
11171 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
11174 for (; ins; ins = ins->next) {
11175 const char *spec = INS_INFO (ins->opcode);
11176 int regtype = 0, regindex;
11179 if (G_UNLIKELY (cfg->verbose_level > 2))
11180 mono_print_ins (ins);
11182 g_assert (ins->opcode >= MONO_CEE_LAST);
11184 for (regindex = 0; regindex < 4; regindex ++) {
11187 if (regindex == 0) {
11188 regtype = spec [MONO_INST_DEST];
11189 if (regtype == ' ')
11192 } else if (regindex == 1) {
11193 regtype = spec [MONO_INST_SRC1];
11194 if (regtype == ' ')
11197 } else if (regindex == 2) {
11198 regtype = spec [MONO_INST_SRC2];
11199 if (regtype == ' ')
11202 } else if (regindex == 3) {
11203 regtype = spec [MONO_INST_SRC3];
11204 if (regtype == ' ')
11209 #if SIZEOF_REGISTER == 4
11210 /* In the LLVM case, the long opcodes are not decomposed */
11211 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
11213 * Since some instructions reference the original long vreg,
11214 * and some reference the two component vregs, it is quite hard
11215 * to determine when it needs to be global. So be conservative.
11217 if (!get_vreg_to_inst (cfg, vreg)) {
11218 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11220 if (cfg->verbose_level > 2)
11221 printf ("LONG VREG R%d made global.\n", vreg);
11225 * Make the component vregs volatile since the optimizations can
11226 * get confused otherwise.
11228 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
11229 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
11233 g_assert (vreg != -1);
11235 prev_bb = vreg_to_bb [vreg];
11236 if (prev_bb == 0) {
11237 /* 0 is a valid block num */
11238 vreg_to_bb [vreg] = block_num + 1;
11239 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
11240 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
11243 if (!get_vreg_to_inst (cfg, vreg)) {
11244 if (G_UNLIKELY (cfg->verbose_level > 2))
11245 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
11249 if (vreg_is_ref (cfg, vreg))
11250 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
11252 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
11255 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
11258 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
11261 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
11264 g_assert_not_reached ();
11268 /* Flag as having been used in more than one bb */
11269 vreg_to_bb [vreg] = -1;
11275 /* If a variable is used in only one bblock, convert it into a local vreg */
11276 for (i = 0; i < cfg->num_varinfo; i++) {
11277 MonoInst *var = cfg->varinfo [i];
11278 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
11280 switch (var->type) {
11286 #if SIZEOF_REGISTER == 8
11289 #if !defined(TARGET_X86) && !defined(MONO_ARCH_SOFT_FLOAT)
11290 /* Enabling this screws up the fp stack on x86 */
11293 /* Arguments are implicitly global */
11294 /* Putting R4 vars into registers doesn't work currently */
11295 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg) {
11297 * Make that the variable's liveness interval doesn't contain a call, since
11298 * that would cause the lvreg to be spilled, making the whole optimization
11301 /* This is too slow for JIT compilation */
11303 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
11305 int def_index, call_index, ins_index;
11306 gboolean spilled = FALSE;
11311 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
11312 const char *spec = INS_INFO (ins->opcode);
11314 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
11315 def_index = ins_index;
11317 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
11318 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
11319 if (call_index > def_index) {
11325 if (MONO_IS_CALL (ins))
11326 call_index = ins_index;
11336 if (G_UNLIKELY (cfg->verbose_level > 2))
11337 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
11338 var->flags |= MONO_INST_IS_DEAD;
11339 cfg->vreg_to_inst [var->dreg] = NULL;
11346 * Compress the varinfo and vars tables so the liveness computation is faster and
11347 * takes up less space.
11350 for (i = 0; i < cfg->num_varinfo; ++i) {
11351 MonoInst *var = cfg->varinfo [i];
11352 if (pos < i && cfg->locals_start == i)
11353 cfg->locals_start = pos;
11354 if (!(var->flags & MONO_INST_IS_DEAD)) {
11356 cfg->varinfo [pos] = cfg->varinfo [i];
11357 cfg->varinfo [pos]->inst_c0 = pos;
11358 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
11359 cfg->vars [pos].idx = pos;
11360 #if SIZEOF_REGISTER == 4
11361 if (cfg->varinfo [pos]->type == STACK_I8) {
11362 /* Modify the two component vars too */
11365 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
11366 var1->inst_c0 = pos;
11367 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
11368 var1->inst_c0 = pos;
11375 cfg->num_varinfo = pos;
11376 if (cfg->locals_start > cfg->num_varinfo)
11377 cfg->locals_start = cfg->num_varinfo;
11381 * mono_spill_global_vars:
11383 * Generate spill code for variables which are not allocated to registers,
11384 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
11385 * code is generated which could be optimized by the local optimization passes.
11388 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
11390 MonoBasicBlock *bb;
11392 int orig_next_vreg;
11393 guint32 *vreg_to_lvreg;
11395 guint32 i, lvregs_len;
11396 gboolean dest_has_lvreg = FALSE;
11397 guint32 stacktypes [128];
11398 MonoInst **live_range_start, **live_range_end;
11399 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
11401 *need_local_opts = FALSE;
11403 memset (spec2, 0, sizeof (spec2));
11405 /* FIXME: Move this function to mini.c */
11406 stacktypes ['i'] = STACK_PTR;
11407 stacktypes ['l'] = STACK_I8;
11408 stacktypes ['f'] = STACK_R8;
11409 #ifdef MONO_ARCH_SIMD_INTRINSICS
11410 stacktypes ['x'] = STACK_VTYPE;
11413 #if SIZEOF_REGISTER == 4
11414 /* Create MonoInsts for longs */
11415 for (i = 0; i < cfg->num_varinfo; i++) {
11416 MonoInst *ins = cfg->varinfo [i];
11418 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
11419 switch (ins->type) {
11424 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
11427 g_assert (ins->opcode == OP_REGOFFSET);
11429 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
11431 tree->opcode = OP_REGOFFSET;
11432 tree->inst_basereg = ins->inst_basereg;
11433 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
11435 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
11437 tree->opcode = OP_REGOFFSET;
11438 tree->inst_basereg = ins->inst_basereg;
11439 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
11449 if (cfg->compute_gc_maps) {
11450 /* registers need liveness info even for !non refs */
11451 for (i = 0; i < cfg->num_varinfo; i++) {
11452 MonoInst *ins = cfg->varinfo [i];
11454 if (ins->opcode == OP_REGVAR)
11455 ins->flags |= MONO_INST_GC_TRACK;
11459 /* FIXME: widening and truncation */
11462 * As an optimization, when a variable allocated to the stack is first loaded into
11463 * an lvreg, we will remember the lvreg and use it the next time instead of loading
11464 * the variable again.
11466 orig_next_vreg = cfg->next_vreg;
11467 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
11468 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
11472 * These arrays contain the first and last instructions accessing a given
11474 * Since we emit bblocks in the same order we process them here, and we
11475 * don't split live ranges, these will precisely describe the live range of
11476 * the variable, i.e. the instruction range where a valid value can be found
11477 * in the variables location.
11478 * The live range is computed using the liveness info computed by the liveness pass.
11479 * We can't use vmv->range, since that is an abstract live range, and we need
11480 * one which is instruction precise.
11481 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
11483 /* FIXME: Only do this if debugging info is requested */
11484 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
11485 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
11486 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11487 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
11489 /* Add spill loads/stores */
11490 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
11493 if (cfg->verbose_level > 2)
11494 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
11496 /* Clear vreg_to_lvreg array */
11497 for (i = 0; i < lvregs_len; i++)
11498 vreg_to_lvreg [lvregs [i]] = 0;
11502 MONO_BB_FOR_EACH_INS (bb, ins) {
11503 const char *spec = INS_INFO (ins->opcode);
11504 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
11505 gboolean store, no_lvreg;
11506 int sregs [MONO_MAX_SRC_REGS];
11508 if (G_UNLIKELY (cfg->verbose_level > 2))
11509 mono_print_ins (ins);
11511 if (ins->opcode == OP_NOP)
11515 * We handle LDADDR here as well, since it can only be decomposed
11516 * when variable addresses are known.
11518 if (ins->opcode == OP_LDADDR) {
11519 MonoInst *var = ins->inst_p0;
11521 if (var->opcode == OP_VTARG_ADDR) {
11522 /* Happens on SPARC/S390 where vtypes are passed by reference */
11523 MonoInst *vtaddr = var->inst_left;
11524 if (vtaddr->opcode == OP_REGVAR) {
11525 ins->opcode = OP_MOVE;
11526 ins->sreg1 = vtaddr->dreg;
11528 else if (var->inst_left->opcode == OP_REGOFFSET) {
11529 ins->opcode = OP_LOAD_MEMBASE;
11530 ins->inst_basereg = vtaddr->inst_basereg;
11531 ins->inst_offset = vtaddr->inst_offset;
11535 g_assert (var->opcode == OP_REGOFFSET);
11537 ins->opcode = OP_ADD_IMM;
11538 ins->sreg1 = var->inst_basereg;
11539 ins->inst_imm = var->inst_offset;
11542 *need_local_opts = TRUE;
11543 spec = INS_INFO (ins->opcode);
11546 if (ins->opcode < MONO_CEE_LAST) {
11547 mono_print_ins (ins);
11548 g_assert_not_reached ();
11552 * Store opcodes have destbasereg in the dreg, but in reality, it is an
11556 if (MONO_IS_STORE_MEMBASE (ins)) {
11557 tmp_reg = ins->dreg;
11558 ins->dreg = ins->sreg2;
11559 ins->sreg2 = tmp_reg;
11562 spec2 [MONO_INST_DEST] = ' ';
11563 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11564 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11565 spec2 [MONO_INST_SRC3] = ' ';
11567 } else if (MONO_IS_STORE_MEMINDEX (ins))
11568 g_assert_not_reached ();
11573 if (G_UNLIKELY (cfg->verbose_level > 2)) {
11574 printf ("\t %.3s %d", spec, ins->dreg);
11575 num_sregs = mono_inst_get_src_registers (ins, sregs);
11576 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
11577 printf (" %d", sregs [srcindex]);
11584 regtype = spec [MONO_INST_DEST];
11585 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
11588 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
11589 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
11590 MonoInst *store_ins;
11592 MonoInst *def_ins = ins;
11593 int dreg = ins->dreg; /* The original vreg */
11595 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
11597 if (var->opcode == OP_REGVAR) {
11598 ins->dreg = var->dreg;
11599 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
11601 * Instead of emitting a load+store, use a _membase opcode.
11603 g_assert (var->opcode == OP_REGOFFSET);
11604 if (ins->opcode == OP_MOVE) {
11608 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
11609 ins->inst_basereg = var->inst_basereg;
11610 ins->inst_offset = var->inst_offset;
11613 spec = INS_INFO (ins->opcode);
11617 g_assert (var->opcode == OP_REGOFFSET);
11619 prev_dreg = ins->dreg;
11621 /* Invalidate any previous lvreg for this vreg */
11622 vreg_to_lvreg [ins->dreg] = 0;
11626 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
11628 store_opcode = OP_STOREI8_MEMBASE_REG;
11631 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
11633 if (regtype == 'l') {
11634 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
11635 mono_bblock_insert_after_ins (bb, ins, store_ins);
11636 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
11637 mono_bblock_insert_after_ins (bb, ins, store_ins);
11638 def_ins = store_ins;
11641 g_assert (store_opcode != OP_STOREV_MEMBASE);
11643 /* Try to fuse the store into the instruction itself */
11644 /* FIXME: Add more instructions */
11645 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
11646 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
11647 ins->inst_imm = ins->inst_c0;
11648 ins->inst_destbasereg = var->inst_basereg;
11649 ins->inst_offset = var->inst_offset;
11650 spec = INS_INFO (ins->opcode);
11651 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
11652 ins->opcode = store_opcode;
11653 ins->inst_destbasereg = var->inst_basereg;
11654 ins->inst_offset = var->inst_offset;
11658 tmp_reg = ins->dreg;
11659 ins->dreg = ins->sreg2;
11660 ins->sreg2 = tmp_reg;
11663 spec2 [MONO_INST_DEST] = ' ';
11664 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
11665 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
11666 spec2 [MONO_INST_SRC3] = ' ';
11668 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
11669 // FIXME: The backends expect the base reg to be in inst_basereg
11670 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
11672 ins->inst_basereg = var->inst_basereg;
11673 ins->inst_offset = var->inst_offset;
11674 spec = INS_INFO (ins->opcode);
11676 /* printf ("INS: "); mono_print_ins (ins); */
11677 /* Create a store instruction */
11678 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
11680 /* Insert it after the instruction */
11681 mono_bblock_insert_after_ins (bb, ins, store_ins);
11683 def_ins = store_ins;
11686 * We can't assign ins->dreg to var->dreg here, since the
11687 * sregs could use it. So set a flag, and do it after
11690 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
11691 dest_has_lvreg = TRUE;
11696 if (def_ins && !live_range_start [dreg]) {
11697 live_range_start [dreg] = def_ins;
11698 live_range_start_bb [dreg] = bb;
11701 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
11704 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
11705 tmp->inst_c1 = dreg;
11706 mono_bblock_insert_after_ins (bb, def_ins, tmp);
11713 num_sregs = mono_inst_get_src_registers (ins, sregs);
11714 for (srcindex = 0; srcindex < 3; ++srcindex) {
11715 regtype = spec [MONO_INST_SRC1 + srcindex];
11716 sreg = sregs [srcindex];
11718 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
11719 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
11720 MonoInst *var = get_vreg_to_inst (cfg, sreg);
11721 MonoInst *use_ins = ins;
11722 MonoInst *load_ins;
11723 guint32 load_opcode;
11725 if (var->opcode == OP_REGVAR) {
11726 sregs [srcindex] = var->dreg;
11727 //mono_inst_set_src_registers (ins, sregs);
11728 live_range_end [sreg] = use_ins;
11729 live_range_end_bb [sreg] = bb;
11731 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11734 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11735 /* var->dreg is a hreg */
11736 tmp->inst_c1 = sreg;
11737 mono_bblock_insert_after_ins (bb, ins, tmp);
11743 g_assert (var->opcode == OP_REGOFFSET);
11745 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
11747 g_assert (load_opcode != OP_LOADV_MEMBASE);
11749 if (vreg_to_lvreg [sreg]) {
11750 g_assert (vreg_to_lvreg [sreg] != -1);
11752 /* The variable is already loaded to an lvreg */
11753 if (G_UNLIKELY (cfg->verbose_level > 2))
11754 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
11755 sregs [srcindex] = vreg_to_lvreg [sreg];
11756 //mono_inst_set_src_registers (ins, sregs);
11760 /* Try to fuse the load into the instruction */
11761 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
11762 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
11763 sregs [0] = var->inst_basereg;
11764 //mono_inst_set_src_registers (ins, sregs);
11765 ins->inst_offset = var->inst_offset;
11766 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
11767 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
11768 sregs [1] = var->inst_basereg;
11769 //mono_inst_set_src_registers (ins, sregs);
11770 ins->inst_offset = var->inst_offset;
11772 if (MONO_IS_REAL_MOVE (ins)) {
11773 ins->opcode = OP_NOP;
11776 //printf ("%d ", srcindex); mono_print_ins (ins);
11778 sreg = alloc_dreg (cfg, stacktypes [regtype]);
11780 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
11781 if (var->dreg == prev_dreg) {
11783 * sreg refers to the value loaded by the load
11784 * emitted below, but we need to use ins->dreg
11785 * since it refers to the store emitted earlier.
11789 g_assert (sreg != -1);
11790 vreg_to_lvreg [var->dreg] = sreg;
11791 g_assert (lvregs_len < 1024);
11792 lvregs [lvregs_len ++] = var->dreg;
11796 sregs [srcindex] = sreg;
11797 //mono_inst_set_src_registers (ins, sregs);
11799 if (regtype == 'l') {
11800 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
11801 mono_bblock_insert_before_ins (bb, ins, load_ins);
11802 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
11803 mono_bblock_insert_before_ins (bb, ins, load_ins);
11804 use_ins = load_ins;
11807 #if SIZEOF_REGISTER == 4
11808 g_assert (load_opcode != OP_LOADI8_MEMBASE);
11810 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
11811 mono_bblock_insert_before_ins (bb, ins, load_ins);
11812 use_ins = load_ins;
11816 if (var->dreg < orig_next_vreg) {
11817 live_range_end [var->dreg] = use_ins;
11818 live_range_end_bb [var->dreg] = bb;
11821 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
11824 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
11825 tmp->inst_c1 = var->dreg;
11826 mono_bblock_insert_after_ins (bb, ins, tmp);
11830 mono_inst_set_src_registers (ins, sregs);
11832 if (dest_has_lvreg) {
11833 g_assert (ins->dreg != -1);
11834 vreg_to_lvreg [prev_dreg] = ins->dreg;
11835 g_assert (lvregs_len < 1024);
11836 lvregs [lvregs_len ++] = prev_dreg;
11837 dest_has_lvreg = FALSE;
11841 tmp_reg = ins->dreg;
11842 ins->dreg = ins->sreg2;
11843 ins->sreg2 = tmp_reg;
11846 if (MONO_IS_CALL (ins)) {
11847 /* Clear vreg_to_lvreg array */
11848 for (i = 0; i < lvregs_len; i++)
11849 vreg_to_lvreg [lvregs [i]] = 0;
11851 } else if (ins->opcode == OP_NOP) {
11853 MONO_INST_NULLIFY_SREGS (ins);
11856 if (cfg->verbose_level > 2)
11857 mono_print_ins_index (1, ins);
11860 /* Extend the live range based on the liveness info */
11861 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
11862 for (i = 0; i < cfg->num_varinfo; i ++) {
11863 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
11865 if (vreg_is_volatile (cfg, vi->vreg))
11866 /* The liveness info is incomplete */
11869 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
11870 /* Live from at least the first ins of this bb */
11871 live_range_start [vi->vreg] = bb->code;
11872 live_range_start_bb [vi->vreg] = bb;
11875 if (mono_bitset_test_fast (bb->live_out_set, i)) {
11876 /* Live at least until the last ins of this bb */
11877 live_range_end [vi->vreg] = bb->last_ins;
11878 live_range_end_bb [vi->vreg] = bb;
11884 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
11886 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
11887 * by storing the current native offset into MonoMethodVar->live_range_start/end.
11889 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
11890 for (i = 0; i < cfg->num_varinfo; ++i) {
11891 int vreg = MONO_VARINFO (cfg, i)->vreg;
11894 if (live_range_start [vreg]) {
11895 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
11897 ins->inst_c1 = vreg;
11898 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
11900 if (live_range_end [vreg]) {
11901 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
11903 ins->inst_c1 = vreg;
11904 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
11905 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
11907 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
11913 g_free (live_range_start);
11914 g_free (live_range_end);
11915 g_free (live_range_start_bb);
11916 g_free (live_range_end_bb);
11921 * - use 'iadd' instead of 'int_add'
11922 * - handling ovf opcodes: decompose in method_to_ir.
11923 * - unify iregs/fregs
11924 * -> partly done, the missing parts are:
11925 * - a more complete unification would involve unifying the hregs as well, so
11926 * code wouldn't need if (fp) all over the place. but that would mean the hregs
11927 * would no longer map to the machine hregs, so the code generators would need to
11928 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
11929 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
11930 * fp/non-fp branches speeds it up by about 15%.
11931 * - use sext/zext opcodes instead of shifts
11933 * - get rid of TEMPLOADs if possible and use vregs instead
11934 * - clean up usage of OP_P/OP_ opcodes
11935 * - cleanup usage of DUMMY_USE
11936 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
11938 * - set the stack type and allocate a dreg in the EMIT_NEW macros
11939 * - get rid of all the <foo>2 stuff when the new JIT is ready.
11940 * - make sure handle_stack_args () is called before the branch is emitted
11941 * - when the new IR is done, get rid of all unused stuff
11942 * - COMPARE/BEQ as separate instructions or unify them ?
11943 * - keeping them separate allows specialized compare instructions like
11944 * compare_imm, compare_membase
11945 * - most back ends unify fp compare+branch, fp compare+ceq
11946 * - integrate mono_save_args into inline_method
11947 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
11948 * - handle long shift opts on 32 bit platforms somehow: they require
11949 * 3 sregs (2 for arg1 and 1 for arg2)
11950 * - make byref a 'normal' type.
11951 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
11952 * variable if needed.
11953 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
11954 * like inline_method.
11955 * - remove inlining restrictions
11956 * - fix LNEG and enable cfold of INEG
11957 * - generalize x86 optimizations like ldelema as a peephole optimization
11958 * - add store_mem_imm for amd64
11959 * - optimize the loading of the interruption flag in the managed->native wrappers
11960 * - avoid special handling of OP_NOP in passes
11961 * - move code inserting instructions into one function/macro.
11962 * - try a coalescing phase after liveness analysis
11963 * - add float -> vreg conversion + local optimizations on !x86
11964 * - figure out how to handle decomposed branches during optimizations, ie.
11965 * compare+branch, op_jump_table+op_br etc.
11966 * - promote RuntimeXHandles to vregs
11967 * - vtype cleanups:
11968 * - add a NEW_VARLOADA_VREG macro
11969 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
11970 * accessing vtype fields.
11971 * - get rid of I8CONST on 64 bit platforms
11972 * - dealing with the increase in code size due to branches created during opcode
11974 * - use extended basic blocks
11975 * - all parts of the JIT
11976 * - handle_global_vregs () && local regalloc
11977 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
11978 * - sources of increase in code size:
11981 * - isinst and castclass
11982 * - lvregs not allocated to global registers even if used multiple times
11983 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
11985 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
11986 * - add all micro optimizations from the old JIT
11987 * - put tree optimizations into the deadce pass
11988 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
11989 * specific function.
11990 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
11991 * fcompare + branchCC.
11992 * - create a helper function for allocating a stack slot, taking into account
11993 * MONO_CFG_HAS_SPILLUP.
11995 * - merge the ia64 switch changes.
11996 * - optimize mono_regstate2_alloc_int/float.
11997 * - fix the pessimistic handling of variables accessed in exception handler blocks.
11998 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
11999 * parts of the tree could be separated by other instructions, killing the tree
12000 * arguments, or stores killing loads etc. Also, should we fold loads into other
12001 * instructions if the result of the load is used multiple times ?
12002 * - make the REM_IMM optimization in mini-x86.c arch-independent.
12003 * - LAST MERGE: 108395.
12004 * - when returning vtypes in registers, generate IR and append it to the end of the
12005 * last bb instead of doing it in the epilog.
12006 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
12014 - When to decompose opcodes:
12015 - earlier: this makes some optimizations hard to implement, since the low level IR
12016 no longer contains the neccessary information. But it is easier to do.
12017 - later: harder to implement, enables more optimizations.
12018 - Branches inside bblocks:
12019 - created when decomposing complex opcodes.
12020 - branches to another bblock: harmless, but not tracked by the branch
12021 optimizations, so need to branch to a label at the start of the bblock.
12022 - branches to inside the same bblock: very problematic, trips up the local
12023 reg allocator. Can be fixed by spitting the current bblock, but that is a
12024 complex operation, since some local vregs can become global vregs etc.
12025 - Local/global vregs:
12026 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
12027 local register allocator.
12028 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
12029 structure, created by mono_create_var (). Assigned to hregs or the stack by
12030 the global register allocator.
12031 - When to do optimizations like alu->alu_imm:
12032 - earlier -> saves work later on since the IR will be smaller/simpler
12033 - later -> can work on more instructions
12034 - Handling of valuetypes:
12035 - When a vtype is pushed on the stack, a new temporary is created, an
12036 instruction computing its address (LDADDR) is emitted and pushed on
12037 the stack. Need to optimize cases when the vtype is used immediately as in
12038 argument passing, stloc etc.
12039 - Instead of the to_end stuff in the old JIT, simply call the function handling
12040 the values on the stack before emitting the last instruction of the bb.
12043 #endif /* DISABLE_JIT */