2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
72 #define BRANCH_COST 10
73 #define INLINE_LENGTH_LIMIT 20
75 /* These have 'cfg' as an implicit argument */
76 #define INLINE_FAILURE(msg) do { \
77 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
78 inline_failure (cfg, msg); \
79 goto exception_exit; \
82 #define CHECK_CFG_EXCEPTION do {\
83 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
84 goto exception_exit; \
86 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
87 method_access_failure ((cfg), (method), (cmethod)); \
88 goto exception_exit; \
90 #define FIELD_ACCESS_FAILURE(method, field) do { \
91 field_access_failure ((cfg), (method), (field)); \
92 goto exception_exit; \
94 #define GENERIC_SHARING_FAILURE(opcode) do { \
96 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
97 goto exception_exit; \
100 #define GSHAREDVT_FAILURE(opcode) do { \
101 if (cfg->gsharedvt) { \
102 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
103 goto exception_exit; \
106 #define OUT_OF_MEMORY_FAILURE do { \
107 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
108 goto exception_exit; \
110 #define DISABLE_AOT(cfg) do { \
111 if ((cfg)->verbose_level >= 2) \
112 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
113 (cfg)->disable_aot = TRUE; \
115 #define LOAD_ERROR do { \
116 break_on_unverified (); \
117 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
118 goto exception_exit; \
121 #define TYPE_LOAD_ERROR(klass) do { \
122 cfg->exception_ptr = klass; \
126 #define CHECK_CFG_ERROR do {\
127 if (!mono_error_ok (&cfg->error)) { \
128 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
129 goto mono_error_exit; \
133 /* Determine whenever 'ins' represents a load of the 'this' argument */
134 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
136 static int ldind_to_load_membase (int opcode);
137 static int stind_to_store_membase (int opcode);
139 int mono_op_to_op_imm (int opcode);
140 int mono_op_to_op_imm_noemul (int opcode);
142 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
144 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
145 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
147 /* helper methods signatures */
148 static MonoMethodSignature *helper_sig_class_init_trampoline;
149 static MonoMethodSignature *helper_sig_domain_get;
150 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
152 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
153 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
157 * Instruction metadata
165 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
166 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
172 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
177 /* keep in sync with the enum in mini.h */
180 #include "mini-ops.h"
185 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
186 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
188 * This should contain the index of the last sreg + 1. This is not the same
189 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
191 const gint8 ins_sreg_counts[] = {
192 #include "mini-ops.h"
197 #define MONO_INIT_VARINFO(vi,id) do { \
198 (vi)->range.first_use.pos.bid = 0xffff; \
204 mono_alloc_ireg (MonoCompile *cfg)
206 return alloc_ireg (cfg);
210 mono_alloc_lreg (MonoCompile *cfg)
212 return alloc_lreg (cfg);
216 mono_alloc_freg (MonoCompile *cfg)
218 return alloc_freg (cfg);
222 mono_alloc_preg (MonoCompile *cfg)
224 return alloc_preg (cfg);
228 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
230 return alloc_dreg (cfg, stack_type);
234 * mono_alloc_ireg_ref:
236 * Allocate an IREG, and mark it as holding a GC ref.
239 mono_alloc_ireg_ref (MonoCompile *cfg)
241 return alloc_ireg_ref (cfg);
245 * mono_alloc_ireg_mp:
247 * Allocate an IREG, and mark it as holding a managed pointer.
250 mono_alloc_ireg_mp (MonoCompile *cfg)
252 return alloc_ireg_mp (cfg);
256 * mono_alloc_ireg_copy:
258 * Allocate an IREG with the same GC type as VREG.
261 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
263 if (vreg_is_ref (cfg, vreg))
264 return alloc_ireg_ref (cfg);
265 else if (vreg_is_mp (cfg, vreg))
266 return alloc_ireg_mp (cfg);
268 return alloc_ireg (cfg);
272 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
277 type = mini_replace_type (type);
279 switch (type->type) {
282 case MONO_TYPE_BOOLEAN:
294 case MONO_TYPE_FNPTR:
296 case MONO_TYPE_CLASS:
297 case MONO_TYPE_STRING:
298 case MONO_TYPE_OBJECT:
299 case MONO_TYPE_SZARRAY:
300 case MONO_TYPE_ARRAY:
304 #if SIZEOF_REGISTER == 8
313 case MONO_TYPE_VALUETYPE:
314 if (type->data.klass->enumtype) {
315 type = mono_class_enum_basetype (type->data.klass);
318 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 case MONO_TYPE_TYPEDBYREF:
323 case MONO_TYPE_GENERICINST:
324 type = &type->data.generic_class->container_class->byval_arg;
328 g_assert (cfg->generic_sharing_context);
329 if (mini_type_var_is_vt (cfg, type))
334 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
340 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 printf ("\n%s %d: [IN: ", msg, bb->block_num);
346 for (i = 0; i < bb->in_count; ++i)
347 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
349 for (i = 0; i < bb->out_count; ++i)
350 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
352 for (tree = bb->code; tree; tree = tree->next)
353 mono_print_ins_index (-1, tree);
357 mono_create_helper_signatures (void)
359 helper_sig_domain_get = mono_create_icall_signature ("ptr");
360 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
361 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
363 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
364 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
365 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
368 static MONO_NEVER_INLINE void
369 break_on_unverified (void)
371 if (mini_get_debug_options ()->break_on_unverified)
375 static MONO_NEVER_INLINE void
376 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
378 char *method_fname = mono_method_full_name (method, TRUE);
379 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
380 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
381 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
382 g_free (method_fname);
383 g_free (cil_method_fname);
386 static MONO_NEVER_INLINE void
387 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
389 char *method_fname = mono_method_full_name (method, TRUE);
390 char *field_fname = mono_field_full_name (field);
391 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
392 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
393 g_free (method_fname);
394 g_free (field_fname);
397 static MONO_NEVER_INLINE void
398 inline_failure (MonoCompile *cfg, const char *msg)
400 if (cfg->verbose_level >= 2)
401 printf ("inline failed: %s\n", msg);
402 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
405 static MONO_NEVER_INLINE void
406 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
408 if (cfg->verbose_level > 2) \
409 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
410 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
413 static MONO_NEVER_INLINE void
414 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
416 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
417 if (cfg->verbose_level >= 2)
418 printf ("%s\n", cfg->exception_message);
419 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
423 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
424 * foo<T> (int i) { ldarg.0; box T; }
426 #define UNVERIFIED do { \
427 if (cfg->gsharedvt) { \
428 if (cfg->verbose_level > 2) \
429 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
430 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
431 goto exception_exit; \
433 break_on_unverified (); \
437 #define GET_BBLOCK(cfg,tblock,ip) do { \
438 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
440 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
441 NEW_BBLOCK (cfg, (tblock)); \
442 (tblock)->cil_code = (ip); \
443 ADD_BBLOCK (cfg, (tblock)); \
447 #if defined(TARGET_X86) || defined(TARGET_AMD64)
448 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
449 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
450 (dest)->dreg = alloc_ireg_mp ((cfg)); \
451 (dest)->sreg1 = (sr1); \
452 (dest)->sreg2 = (sr2); \
453 (dest)->inst_imm = (imm); \
454 (dest)->backend.shift_amount = (shift); \
455 MONO_ADD_INS ((cfg)->cbb, (dest)); \
459 #if SIZEOF_REGISTER == 8
460 #define ADD_WIDEN_OP(ins, arg1, arg2) do { \
461 /* FIXME: Need to add many more cases */ \
462 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) { \
464 int dr = alloc_preg (cfg); \
465 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg); \
466 (ins)->sreg2 = widen->dreg; \
470 #define ADD_WIDEN_OP(ins, arg1, arg2)
473 #define ADD_BINOP(op) do { \
474 MONO_INST_NEW (cfg, ins, (op)); \
476 ins->sreg1 = sp [0]->dreg; \
477 ins->sreg2 = sp [1]->dreg; \
478 type_from_op (ins, sp [0], sp [1]); \
480 /* Have to insert a widening op */ \
481 ADD_WIDEN_OP (ins, sp [0], sp [1]); \
482 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
483 MONO_ADD_INS ((cfg)->cbb, (ins)); \
484 *sp++ = mono_decompose_opcode ((cfg), (ins)); \
487 #define ADD_UNOP(op) do { \
488 MONO_INST_NEW (cfg, ins, (op)); \
490 ins->sreg1 = sp [0]->dreg; \
491 type_from_op (ins, sp [0], NULL); \
493 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
494 MONO_ADD_INS ((cfg)->cbb, (ins)); \
495 *sp++ = mono_decompose_opcode (cfg, ins); \
498 #define ADD_BINCOND(next_block) do { \
501 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
502 cmp->sreg1 = sp [0]->dreg; \
503 cmp->sreg2 = sp [1]->dreg; \
504 type_from_op (cmp, sp [0], sp [1]); \
506 type_from_op (ins, sp [0], sp [1]); \
507 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
508 GET_BBLOCK (cfg, tblock, target); \
509 link_bblock (cfg, bblock, tblock); \
510 ins->inst_true_bb = tblock; \
511 if ((next_block)) { \
512 link_bblock (cfg, bblock, (next_block)); \
513 ins->inst_false_bb = (next_block); \
514 start_new_bblock = 1; \
516 GET_BBLOCK (cfg, tblock, ip); \
517 link_bblock (cfg, bblock, tblock); \
518 ins->inst_false_bb = tblock; \
519 start_new_bblock = 2; \
521 if (sp != stack_start) { \
522 handle_stack_args (cfg, stack_start, sp - stack_start); \
523 CHECK_UNVERIFIABLE (cfg); \
525 MONO_ADD_INS (bblock, cmp); \
526 MONO_ADD_INS (bblock, ins); \
530 * link_bblock: Links two basic blocks
532 * links two basic blocks in the control flow graph, the 'from'
533 * argument is the starting block and the 'to' argument is the block
534 * the control flow ends to after 'from'.
537 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
539 MonoBasicBlock **newa;
543 if (from->cil_code) {
545 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
547 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
550 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
552 printf ("edge from entry to exit\n");
557 for (i = 0; i < from->out_count; ++i) {
558 if (to == from->out_bb [i]) {
564 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
565 for (i = 0; i < from->out_count; ++i) {
566 newa [i] = from->out_bb [i];
574 for (i = 0; i < to->in_count; ++i) {
575 if (from == to->in_bb [i]) {
581 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
582 for (i = 0; i < to->in_count; ++i) {
583 newa [i] = to->in_bb [i];
592 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
594 link_bblock (cfg, from, to);
598 * mono_find_block_region:
600 * We mark each basic block with a region ID. We use that to avoid BB
601 * optimizations when blocks are in different regions.
604 * A region token that encodes where this region is, and information
605 * about the clause owner for this block.
607 * The region encodes the try/catch/filter clause that owns this block
608 * as well as the type. -1 is a special value that represents a block
609 * that is in none of try/catch/filter.
612 mono_find_block_region (MonoCompile *cfg, int offset)
614 MonoMethodHeader *header = cfg->header;
615 MonoExceptionClause *clause;
618 for (i = 0; i < header->num_clauses; ++i) {
619 clause = &header->clauses [i];
620 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
621 (offset < (clause->handler_offset)))
622 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
624 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
625 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
626 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
627 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
628 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
630 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
633 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
634 return ((i + 1) << 8) | clause->flags;
641 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
643 MonoMethodHeader *header = cfg->header;
644 MonoExceptionClause *clause;
648 for (i = 0; i < header->num_clauses; ++i) {
649 clause = &header->clauses [i];
650 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
651 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
652 if (clause->flags == type)
653 res = g_list_append (res, clause);
660 mono_create_spvar_for_region (MonoCompile *cfg, int region)
664 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
668 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
669 /* prevent it from being register allocated */
670 var->flags |= MONO_INST_VOLATILE;
672 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
676 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
678 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
682 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
686 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
690 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
691 /* prevent it from being register allocated */
692 var->flags |= MONO_INST_VOLATILE;
694 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
700 * Returns the type used in the eval stack when @type is loaded.
701 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
704 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
708 type = mini_replace_type (type);
709 inst->klass = klass = mono_class_from_mono_type (type);
711 inst->type = STACK_MP;
716 switch (type->type) {
718 inst->type = STACK_INV;
722 case MONO_TYPE_BOOLEAN:
728 inst->type = STACK_I4;
733 case MONO_TYPE_FNPTR:
734 inst->type = STACK_PTR;
736 case MONO_TYPE_CLASS:
737 case MONO_TYPE_STRING:
738 case MONO_TYPE_OBJECT:
739 case MONO_TYPE_SZARRAY:
740 case MONO_TYPE_ARRAY:
741 inst->type = STACK_OBJ;
745 inst->type = STACK_I8;
749 inst->type = STACK_R8;
751 case MONO_TYPE_VALUETYPE:
752 if (type->data.klass->enumtype) {
753 type = mono_class_enum_basetype (type->data.klass);
757 inst->type = STACK_VTYPE;
760 case MONO_TYPE_TYPEDBYREF:
761 inst->klass = mono_defaults.typed_reference_class;
762 inst->type = STACK_VTYPE;
764 case MONO_TYPE_GENERICINST:
765 type = &type->data.generic_class->container_class->byval_arg;
769 g_assert (cfg->generic_sharing_context);
770 if (mini_is_gsharedvt_type (cfg, type)) {
771 g_assert (cfg->gsharedvt);
772 inst->type = STACK_VTYPE;
774 inst->type = STACK_OBJ;
778 g_error ("unknown type 0x%02x in eval stack type", type->type);
783 * The following tables are used to quickly validate the IL code in type_from_op ().
786 bin_num_table [STACK_MAX] [STACK_MAX] = {
787 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
788 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
789 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
790 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
791 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV},
792 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
793 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
794 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
799 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV
802 /* reduce the size of this table */
804 bin_int_table [STACK_MAX] [STACK_MAX] = {
805 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
806 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
807 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
808 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
809 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
810 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
811 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
812 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
816 bin_comp_table [STACK_MAX] [STACK_MAX] = {
817 /* Inv i L p F & O vt */
819 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
820 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
821 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
822 {0, 0, 0, 0, 1, 0, 0, 0}, /* F, R8 */
823 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
824 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
825 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
828 /* reduce the size of this table */
830 shift_table [STACK_MAX] [STACK_MAX] = {
831 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
832 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
833 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
834 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
835 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
836 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
837 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
838 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
842 * Tables to map from the non-specific opcode to the matching
843 * type-specific opcode.
845 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
847 binops_op_map [STACK_MAX] = {
848 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD
851 /* handles from CEE_NEG to CEE_CONV_U8 */
853 unops_op_map [STACK_MAX] = {
854 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG
857 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
859 ovfops_op_map [STACK_MAX] = {
860 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2
863 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
865 ovf2ops_op_map [STACK_MAX] = {
866 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
869 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
871 ovf3ops_op_map [STACK_MAX] = {
872 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1
875 /* handles from CEE_BEQ to CEE_BLT_UN */
877 beqops_op_map [STACK_MAX] = {
878 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ
881 /* handles from CEE_CEQ to CEE_CLT_UN */
883 ceqops_op_map [STACK_MAX] = {
884 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ
888 * Sets ins->type (the type on the eval stack) according to the
889 * type of the opcode and the arguments to it.
890 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
892 * FIXME: this function sets ins->type unconditionally in some cases, but
893 * it should set it to invalid for some types (a conv.x on an object)
896 type_from_op (MonoInst *ins, MonoInst *src1, MonoInst *src2) {
898 switch (ins->opcode) {
905 /* FIXME: check unverifiable args for STACK_MP */
906 ins->type = bin_num_table [src1->type] [src2->type];
907 ins->opcode += binops_op_map [ins->type];
914 ins->type = bin_int_table [src1->type] [src2->type];
915 ins->opcode += binops_op_map [ins->type];
920 ins->type = shift_table [src1->type] [src2->type];
921 ins->opcode += binops_op_map [ins->type];
926 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
927 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
928 ins->opcode = OP_LCOMPARE;
929 else if (src1->type == STACK_R8)
930 ins->opcode = OP_FCOMPARE;
932 ins->opcode = OP_ICOMPARE;
934 case OP_ICOMPARE_IMM:
935 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
936 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
937 ins->opcode = OP_LCOMPARE_IMM;
949 ins->opcode += beqops_op_map [src1->type];
952 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
953 ins->opcode += ceqops_op_map [src1->type];
959 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
960 ins->opcode += ceqops_op_map [src1->type];
964 ins->type = neg_table [src1->type];
965 ins->opcode += unops_op_map [ins->type];
968 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
969 ins->type = src1->type;
971 ins->type = STACK_INV;
972 ins->opcode += unops_op_map [ins->type];
978 ins->type = STACK_I4;
979 ins->opcode += unops_op_map [src1->type];
982 ins->type = STACK_R8;
983 switch (src1->type) {
986 ins->opcode = OP_ICONV_TO_R_UN;
989 ins->opcode = OP_LCONV_TO_R_UN;
993 case CEE_CONV_OVF_I1:
994 case CEE_CONV_OVF_U1:
995 case CEE_CONV_OVF_I2:
996 case CEE_CONV_OVF_U2:
997 case CEE_CONV_OVF_I4:
998 case CEE_CONV_OVF_U4:
999 ins->type = STACK_I4;
1000 ins->opcode += ovf3ops_op_map [src1->type];
1002 case CEE_CONV_OVF_I_UN:
1003 case CEE_CONV_OVF_U_UN:
1004 ins->type = STACK_PTR;
1005 ins->opcode += ovf2ops_op_map [src1->type];
1007 case CEE_CONV_OVF_I1_UN:
1008 case CEE_CONV_OVF_I2_UN:
1009 case CEE_CONV_OVF_I4_UN:
1010 case CEE_CONV_OVF_U1_UN:
1011 case CEE_CONV_OVF_U2_UN:
1012 case CEE_CONV_OVF_U4_UN:
1013 ins->type = STACK_I4;
1014 ins->opcode += ovf2ops_op_map [src1->type];
1017 ins->type = STACK_PTR;
1018 switch (src1->type) {
1020 ins->opcode = OP_ICONV_TO_U;
1024 #if SIZEOF_VOID_P == 8
1025 ins->opcode = OP_LCONV_TO_U;
1027 ins->opcode = OP_MOVE;
1031 ins->opcode = OP_LCONV_TO_U;
1034 ins->opcode = OP_FCONV_TO_U;
1040 ins->type = STACK_I8;
1041 ins->opcode += unops_op_map [src1->type];
1043 case CEE_CONV_OVF_I8:
1044 case CEE_CONV_OVF_U8:
1045 ins->type = STACK_I8;
1046 ins->opcode += ovf3ops_op_map [src1->type];
1048 case CEE_CONV_OVF_U8_UN:
1049 case CEE_CONV_OVF_I8_UN:
1050 ins->type = STACK_I8;
1051 ins->opcode += ovf2ops_op_map [src1->type];
1055 ins->type = STACK_R8;
1056 ins->opcode += unops_op_map [src1->type];
1059 ins->type = STACK_R8;
1063 ins->type = STACK_I4;
1064 ins->opcode += ovfops_op_map [src1->type];
1067 case CEE_CONV_OVF_I:
1068 case CEE_CONV_OVF_U:
1069 ins->type = STACK_PTR;
1070 ins->opcode += ovfops_op_map [src1->type];
1073 case CEE_ADD_OVF_UN:
1075 case CEE_MUL_OVF_UN:
1077 case CEE_SUB_OVF_UN:
1078 ins->type = bin_num_table [src1->type] [src2->type];
1079 ins->opcode += ovfops_op_map [src1->type];
1080 if (ins->type == STACK_R8)
1081 ins->type = STACK_INV;
1083 case OP_LOAD_MEMBASE:
1084 ins->type = STACK_PTR;
1086 case OP_LOADI1_MEMBASE:
1087 case OP_LOADU1_MEMBASE:
1088 case OP_LOADI2_MEMBASE:
1089 case OP_LOADU2_MEMBASE:
1090 case OP_LOADI4_MEMBASE:
1091 case OP_LOADU4_MEMBASE:
1092 ins->type = STACK_PTR;
1094 case OP_LOADI8_MEMBASE:
1095 ins->type = STACK_I8;
1097 case OP_LOADR4_MEMBASE:
1098 case OP_LOADR8_MEMBASE:
1099 ins->type = STACK_R8;
1102 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1106 if (ins->type == STACK_MP)
1107 ins->klass = mono_defaults.object_class;
1112 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1118 param_table [STACK_MAX] [STACK_MAX] = {
1123 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1127 switch (args->type) {
1137 for (i = 0; i < sig->param_count; ++i) {
1138 switch (args [i].type) {
1142 if (!sig->params [i]->byref)
1146 if (sig->params [i]->byref)
1148 switch (sig->params [i]->type) {
1149 case MONO_TYPE_CLASS:
1150 case MONO_TYPE_STRING:
1151 case MONO_TYPE_OBJECT:
1152 case MONO_TYPE_SZARRAY:
1153 case MONO_TYPE_ARRAY:
1160 if (sig->params [i]->byref)
1162 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1171 /*if (!param_table [args [i].type] [sig->params [i]->type])
1179 * When we need a pointer to the current domain many times in a method, we
1180 * call mono_domain_get() once and we store the result in a local variable.
1181 * This function returns the variable that represents the MonoDomain*.
1183 inline static MonoInst *
1184 mono_get_domainvar (MonoCompile *cfg)
1186 if (!cfg->domainvar)
1187 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1188 return cfg->domainvar;
1192 * The got_var contains the address of the Global Offset Table when AOT
1196 mono_get_got_var (MonoCompile *cfg)
1198 #ifdef MONO_ARCH_NEED_GOT_VAR
1199 if (!cfg->compile_aot)
1201 if (!cfg->got_var) {
1202 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1204 return cfg->got_var;
1211 mono_get_vtable_var (MonoCompile *cfg)
1213 g_assert (cfg->generic_sharing_context);
1215 if (!cfg->rgctx_var) {
1216 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1217 /* force the var to be stack allocated */
1218 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1221 return cfg->rgctx_var;
1225 type_from_stack_type (MonoInst *ins) {
1226 switch (ins->type) {
1227 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1228 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1229 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1230 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1232 return &ins->klass->this_arg;
1233 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1234 case STACK_VTYPE: return &ins->klass->byval_arg;
1236 g_error ("stack type %d to monotype not handled\n", ins->type);
1241 static G_GNUC_UNUSED int
1242 type_to_stack_type (MonoType *t)
1244 t = mono_type_get_underlying_type (t);
1248 case MONO_TYPE_BOOLEAN:
1251 case MONO_TYPE_CHAR:
1258 case MONO_TYPE_FNPTR:
1260 case MONO_TYPE_CLASS:
1261 case MONO_TYPE_STRING:
1262 case MONO_TYPE_OBJECT:
1263 case MONO_TYPE_SZARRAY:
1264 case MONO_TYPE_ARRAY:
1272 case MONO_TYPE_VALUETYPE:
1273 case MONO_TYPE_TYPEDBYREF:
1275 case MONO_TYPE_GENERICINST:
1276 if (mono_type_generic_inst_is_valuetype (t))
1282 g_assert_not_reached ();
1289 array_access_to_klass (int opcode)
1293 return mono_defaults.byte_class;
1295 return mono_defaults.uint16_class;
1298 return mono_defaults.int_class;
1301 return mono_defaults.sbyte_class;
1304 return mono_defaults.int16_class;
1307 return mono_defaults.int32_class;
1309 return mono_defaults.uint32_class;
1312 return mono_defaults.int64_class;
1315 return mono_defaults.single_class;
1318 return mono_defaults.double_class;
1319 case CEE_LDELEM_REF:
1320 case CEE_STELEM_REF:
1321 return mono_defaults.object_class;
1323 g_assert_not_reached ();
1329 * We try to share variables when possible
1332 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1337 /* inlining can result in deeper stacks */
1338 if (slot >= cfg->header->max_stack)
1339 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1341 pos = ins->type - 1 + slot * STACK_MAX;
1343 switch (ins->type) {
1350 if ((vnum = cfg->intvars [pos]))
1351 return cfg->varinfo [vnum];
1352 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1353 cfg->intvars [pos] = res->inst_c0;
1356 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1362 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1365 * Don't use this if a generic_context is set, since that means AOT can't
1366 * look up the method using just the image+token.
1367 * table == 0 means this is a reference made from a wrapper.
1369 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1370 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1371 jump_info_token->image = image;
1372 jump_info_token->token = token;
1373 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1378 * This function is called to handle items that are left on the evaluation stack
1379 * at basic block boundaries. What happens is that we save the values to local variables
1380 * and we reload them later when first entering the target basic block (with the
1381 * handle_loaded_temps () function).
1382 * A single joint point will use the same variables (stored in the array bb->out_stack or
1383 * bb->in_stack, if the basic block is before or after the joint point).
1385 * This function needs to be called _before_ emitting the last instruction of
1386 * the bb (i.e. before emitting a branch).
1387 * If the stack merge fails at a join point, cfg->unverifiable is set.
1390 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1393 MonoBasicBlock *bb = cfg->cbb;
1394 MonoBasicBlock *outb;
1395 MonoInst *inst, **locals;
1400 if (cfg->verbose_level > 3)
1401 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1402 if (!bb->out_scount) {
1403 bb->out_scount = count;
1404 //printf ("bblock %d has out:", bb->block_num);
1406 for (i = 0; i < bb->out_count; ++i) {
1407 outb = bb->out_bb [i];
1408 /* exception handlers are linked, but they should not be considered for stack args */
1409 if (outb->flags & BB_EXCEPTION_HANDLER)
1411 //printf (" %d", outb->block_num);
1412 if (outb->in_stack) {
1414 bb->out_stack = outb->in_stack;
1420 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1421 for (i = 0; i < count; ++i) {
1423 * try to reuse temps already allocated for this purpouse, if they occupy the same
1424 * stack slot and if they are of the same type.
1425 * This won't cause conflicts since if 'local' is used to
1426 * store one of the values in the in_stack of a bblock, then
1427 * the same variable will be used for the same outgoing stack
1429 * This doesn't work when inlining methods, since the bblocks
1430 * in the inlined methods do not inherit their in_stack from
1431 * the bblock they are inlined to. See bug #58863 for an
1434 if (cfg->inlined_method)
1435 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1437 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1442 for (i = 0; i < bb->out_count; ++i) {
1443 outb = bb->out_bb [i];
1444 /* exception handlers are linked, but they should not be considered for stack args */
1445 if (outb->flags & BB_EXCEPTION_HANDLER)
1447 if (outb->in_scount) {
1448 if (outb->in_scount != bb->out_scount) {
1449 cfg->unverifiable = TRUE;
1452 continue; /* check they are the same locals */
1454 outb->in_scount = count;
1455 outb->in_stack = bb->out_stack;
1458 locals = bb->out_stack;
1460 for (i = 0; i < count; ++i) {
1461 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1462 inst->cil_code = sp [i]->cil_code;
1463 sp [i] = locals [i];
1464 if (cfg->verbose_level > 3)
1465 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1469 * It is possible that the out bblocks already have in_stack assigned, and
1470 * the in_stacks differ. In this case, we will store to all the different
1477 /* Find a bblock which has a different in_stack */
1479 while (bindex < bb->out_count) {
1480 outb = bb->out_bb [bindex];
1481 /* exception handlers are linked, but they should not be considered for stack args */
1482 if (outb->flags & BB_EXCEPTION_HANDLER) {
1486 if (outb->in_stack != locals) {
1487 for (i = 0; i < count; ++i) {
1488 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1489 inst->cil_code = sp [i]->cil_code;
1490 sp [i] = locals [i];
1491 if (cfg->verbose_level > 3)
1492 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1494 locals = outb->in_stack;
1503 /* Emit code which loads interface_offsets [klass->interface_id]
1504 * The array is stored in memory before vtable.
1507 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1509 if (cfg->compile_aot) {
1510 int ioffset_reg = alloc_preg (cfg);
1511 int iid_reg = alloc_preg (cfg);
1513 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1514 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1523 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1525 int ibitmap_reg = alloc_preg (cfg);
1526 #ifdef COMPRESSED_INTERFACE_BITMAP
1528 MonoInst *res, *ins;
1529 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1530 MONO_ADD_INS (cfg->cbb, ins);
1532 if (cfg->compile_aot)
1533 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1535 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1536 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1537 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1539 int ibitmap_byte_reg = alloc_preg (cfg);
1541 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1543 if (cfg->compile_aot) {
1544 int iid_reg = alloc_preg (cfg);
1545 int shifted_iid_reg = alloc_preg (cfg);
1546 int ibitmap_byte_address_reg = alloc_preg (cfg);
1547 int masked_iid_reg = alloc_preg (cfg);
1548 int iid_one_bit_reg = alloc_preg (cfg);
1549 int iid_bit_reg = alloc_preg (cfg);
1550 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1552 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1554 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1555 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1556 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1559 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1566 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1567 * stored in "klass_reg" implements the interface "klass".
1570 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1572 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1576 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1577 * stored in "vtable_reg" implements the interface "klass".
1580 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1582 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1586 * Emit code which checks whenever the interface id of @klass is smaller than
1587 * than the value given by max_iid_reg.
1590 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1591 MonoBasicBlock *false_target)
1593 if (cfg->compile_aot) {
1594 int iid_reg = alloc_preg (cfg);
1595 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1596 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1599 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1601 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1603 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1606 /* Same as above, but obtains max_iid from a vtable */
1608 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1609 MonoBasicBlock *false_target)
1611 int max_iid_reg = alloc_preg (cfg);
1613 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1614 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1617 /* Same as above, but obtains max_iid from a klass */
1619 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1620 MonoBasicBlock *false_target)
1622 int max_iid_reg = alloc_preg (cfg);
1624 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1625 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1629 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1631 int idepth_reg = alloc_preg (cfg);
1632 int stypes_reg = alloc_preg (cfg);
1633 int stype = alloc_preg (cfg);
1635 mono_class_setup_supertypes (klass);
1637 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1638 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1639 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1640 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1643 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1645 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1646 } else if (cfg->compile_aot) {
1647 int const_reg = alloc_preg (cfg);
1648 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1649 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1651 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1653 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1657 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1659 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1663 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1665 int intf_reg = alloc_preg (cfg);
1667 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1668 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1669 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1673 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1677 * Variant of the above that takes a register to the class, not the vtable.
1680 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1682 int intf_bit_reg = alloc_preg (cfg);
1684 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1685 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1686 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1688 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1690 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1694 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1697 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1698 } else if (cfg->compile_aot) {
1699 int const_reg = alloc_preg (cfg);
1700 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1701 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1703 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1705 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1709 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1711 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1715 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1717 if (cfg->compile_aot) {
1718 int const_reg = alloc_preg (cfg);
1719 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1720 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1722 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1724 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1728 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1731 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1734 int rank_reg = alloc_preg (cfg);
1735 int eclass_reg = alloc_preg (cfg);
1737 g_assert (!klass_inst);
1738 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1739 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1740 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1741 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1743 if (klass->cast_class == mono_defaults.object_class) {
1744 int parent_reg = alloc_preg (cfg);
1745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1746 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1747 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1748 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1749 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1750 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1751 } else if (klass->cast_class == mono_defaults.enum_class) {
1752 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1753 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1754 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1756 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1757 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1760 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1761 /* Check that the object is a vector too */
1762 int bounds_reg = alloc_preg (cfg);
1763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1764 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1765 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1768 int idepth_reg = alloc_preg (cfg);
1769 int stypes_reg = alloc_preg (cfg);
1770 int stype = alloc_preg (cfg);
1772 mono_class_setup_supertypes (klass);
1774 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1775 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1776 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1777 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1779 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1780 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1781 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1786 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1788 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1792 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1796 g_assert (val == 0);
1801 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1804 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1807 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1810 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1812 #if SIZEOF_REGISTER == 8
1814 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1820 val_reg = alloc_preg (cfg);
1822 if (SIZEOF_REGISTER == 8)
1823 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1825 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1828 /* This could be optimized further if neccesary */
1830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1837 #if !NO_UNALIGNED_ACCESS
1838 if (SIZEOF_REGISTER == 8) {
1840 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1845 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1853 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1858 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1863 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1870 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1877 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1878 g_assert (size < 10000);
1881 /* This could be optimized further if neccesary */
1883 cur_reg = alloc_preg (cfg);
1884 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1885 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1892 #if !NO_UNALIGNED_ACCESS
1893 if (SIZEOF_REGISTER == 8) {
1895 cur_reg = alloc_preg (cfg);
1896 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1897 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1906 cur_reg = alloc_preg (cfg);
1907 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1908 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1914 cur_reg = alloc_preg (cfg);
1915 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1916 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1922 cur_reg = alloc_preg (cfg);
1923 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1924 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1932 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1936 if (cfg->compile_aot) {
1937 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1938 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1940 ins->sreg2 = c->dreg;
1941 MONO_ADD_INS (cfg->cbb, ins);
1943 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1945 ins->inst_offset = mini_get_tls_offset (tls_key);
1946 MONO_ADD_INS (cfg->cbb, ins);
1953 * Emit IR to push the current LMF onto the LMF stack.
1956 emit_push_lmf (MonoCompile *cfg)
1959 * Emit IR to push the LMF:
1960 * lmf_addr = <lmf_addr from tls>
1961 * lmf->lmf_addr = lmf_addr
1962 * lmf->prev_lmf = *lmf_addr
1965 int lmf_reg, prev_lmf_reg;
1966 MonoInst *ins, *lmf_ins;
1971 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
1972 /* Load current lmf */
1973 lmf_ins = mono_get_lmf_intrinsic (cfg);
1975 MONO_ADD_INS (cfg->cbb, lmf_ins);
1976 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
1977 lmf_reg = ins->dreg;
1978 /* Save previous_lmf */
1979 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
1981 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
1984 * Store lmf_addr in a variable, so it can be allocated to a global register.
1986 if (!cfg->lmf_addr_var)
1987 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1990 ins = mono_get_jit_tls_intrinsic (cfg);
1992 int jit_tls_dreg = ins->dreg;
1994 MONO_ADD_INS (cfg->cbb, ins);
1995 lmf_reg = alloc_preg (cfg);
1996 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
1998 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2001 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2003 MONO_ADD_INS (cfg->cbb, lmf_ins);
2006 MonoInst *args [16], *jit_tls_ins, *ins;
2008 /* Inline mono_get_lmf_addr () */
2009 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2011 /* Load mono_jit_tls_id */
2012 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2013 /* call pthread_getspecific () */
2014 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2015 /* lmf_addr = &jit_tls->lmf */
2016 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2019 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2023 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2025 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2026 lmf_reg = ins->dreg;
2028 prev_lmf_reg = alloc_preg (cfg);
2029 /* Save previous_lmf */
2030 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2031 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2033 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2040 * Emit IR to pop the current LMF from the LMF stack.
2043 emit_pop_lmf (MonoCompile *cfg)
2045 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2051 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2052 lmf_reg = ins->dreg;
2054 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2055 /* Load previous_lmf */
2056 prev_lmf_reg = alloc_preg (cfg);
2057 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2059 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2062 * Emit IR to pop the LMF:
2063 * *(lmf->lmf_addr) = lmf->prev_lmf
2065 /* This could be called before emit_push_lmf () */
2066 if (!cfg->lmf_addr_var)
2067 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2068 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2070 prev_lmf_reg = alloc_preg (cfg);
2071 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2072 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2077 emit_instrumentation_call (MonoCompile *cfg, void *func)
2079 MonoInst *iargs [1];
2082 * Avoid instrumenting inlined methods since it can
2083 * distort profiling results.
2085 if (cfg->method != cfg->current_method)
2088 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2089 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2090 mono_emit_jit_icall (cfg, func, iargs);
2095 ret_type_to_call_opcode (MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2098 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2101 type = mini_get_basic_type_from_generic (gsctx, type);
2102 type = mini_replace_type (type);
2103 switch (type->type) {
2104 case MONO_TYPE_VOID:
2105 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2108 case MONO_TYPE_BOOLEAN:
2111 case MONO_TYPE_CHAR:
2114 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2118 case MONO_TYPE_FNPTR:
2119 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2120 case MONO_TYPE_CLASS:
2121 case MONO_TYPE_STRING:
2122 case MONO_TYPE_OBJECT:
2123 case MONO_TYPE_SZARRAY:
2124 case MONO_TYPE_ARRAY:
2125 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2128 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2131 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2132 case MONO_TYPE_VALUETYPE:
2133 if (type->data.klass->enumtype) {
2134 type = mono_class_enum_basetype (type->data.klass);
2137 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2138 case MONO_TYPE_TYPEDBYREF:
2139 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2140 case MONO_TYPE_GENERICINST:
2141 type = &type->data.generic_class->container_class->byval_arg;
2144 case MONO_TYPE_MVAR:
2146 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2148 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2154 * target_type_is_incompatible:
2155 * @cfg: MonoCompile context
2157 * Check that the item @arg on the evaluation stack can be stored
2158 * in the target type (can be a local, or field, etc).
2159 * The cfg arg can be used to check if we need verification or just
2162 * Returns: non-0 value if arg can't be stored on a target.
2165 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2167 MonoType *simple_type;
2170 target = mini_replace_type (target);
2171 if (target->byref) {
2172 /* FIXME: check that the pointed to types match */
2173 if (arg->type == STACK_MP)
2174 return arg->klass != mono_class_from_mono_type (target);
2175 if (arg->type == STACK_PTR)
2180 simple_type = mono_type_get_underlying_type (target);
2181 switch (simple_type->type) {
2182 case MONO_TYPE_VOID:
2186 case MONO_TYPE_BOOLEAN:
2189 case MONO_TYPE_CHAR:
2192 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2196 /* STACK_MP is needed when setting pinned locals */
2197 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2202 case MONO_TYPE_FNPTR:
2204 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2205 * in native int. (#688008).
2207 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2210 case MONO_TYPE_CLASS:
2211 case MONO_TYPE_STRING:
2212 case MONO_TYPE_OBJECT:
2213 case MONO_TYPE_SZARRAY:
2214 case MONO_TYPE_ARRAY:
2215 if (arg->type != STACK_OBJ)
2217 /* FIXME: check type compatibility */
2221 if (arg->type != STACK_I8)
2226 if (arg->type != STACK_R8)
2229 case MONO_TYPE_VALUETYPE:
2230 if (arg->type != STACK_VTYPE)
2232 klass = mono_class_from_mono_type (simple_type);
2233 if (klass != arg->klass)
2236 case MONO_TYPE_TYPEDBYREF:
2237 if (arg->type != STACK_VTYPE)
2239 klass = mono_class_from_mono_type (simple_type);
2240 if (klass != arg->klass)
2243 case MONO_TYPE_GENERICINST:
2244 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2245 if (arg->type != STACK_VTYPE)
2247 klass = mono_class_from_mono_type (simple_type);
2248 if (klass != arg->klass)
2252 if (arg->type != STACK_OBJ)
2254 /* FIXME: check type compatibility */
2258 case MONO_TYPE_MVAR:
2259 g_assert (cfg->generic_sharing_context);
2260 if (mini_type_var_is_vt (cfg, simple_type)) {
2261 if (arg->type != STACK_VTYPE)
2264 if (arg->type != STACK_OBJ)
2269 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2275 * Prepare arguments for passing to a function call.
2276 * Return a non-zero value if the arguments can't be passed to the given
2278 * The type checks are not yet complete and some conversions may need
2279 * casts on 32 or 64 bit architectures.
2281 * FIXME: implement this using target_type_is_incompatible ()
2284 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2286 MonoType *simple_type;
2290 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2294 for (i = 0; i < sig->param_count; ++i) {
2295 if (sig->params [i]->byref) {
2296 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2300 simple_type = sig->params [i];
2301 simple_type = mini_get_basic_type_from_generic (cfg->generic_sharing_context, simple_type);
2303 switch (simple_type->type) {
2304 case MONO_TYPE_VOID:
2309 case MONO_TYPE_BOOLEAN:
2312 case MONO_TYPE_CHAR:
2315 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2321 case MONO_TYPE_FNPTR:
2322 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2325 case MONO_TYPE_CLASS:
2326 case MONO_TYPE_STRING:
2327 case MONO_TYPE_OBJECT:
2328 case MONO_TYPE_SZARRAY:
2329 case MONO_TYPE_ARRAY:
2330 if (args [i]->type != STACK_OBJ)
2335 if (args [i]->type != STACK_I8)
2340 if (args [i]->type != STACK_R8)
2343 case MONO_TYPE_VALUETYPE:
2344 if (simple_type->data.klass->enumtype) {
2345 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2348 if (args [i]->type != STACK_VTYPE)
2351 case MONO_TYPE_TYPEDBYREF:
2352 if (args [i]->type != STACK_VTYPE)
2355 case MONO_TYPE_GENERICINST:
2356 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2359 case MONO_TYPE_MVAR:
2361 if (args [i]->type != STACK_VTYPE)
2365 g_error ("unknown type 0x%02x in check_call_signature",
2373 callvirt_to_call (int opcode)
2376 case OP_CALL_MEMBASE:
2378 case OP_VOIDCALL_MEMBASE:
2380 case OP_FCALL_MEMBASE:
2382 case OP_VCALL_MEMBASE:
2384 case OP_LCALL_MEMBASE:
2387 g_assert_not_reached ();
2393 /* Either METHOD or IMT_ARG needs to be set */
2395 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2399 if (COMPILE_LLVM (cfg)) {
2400 method_reg = alloc_preg (cfg);
2403 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2404 } else if (cfg->compile_aot) {
2405 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2408 MONO_INST_NEW (cfg, ins, OP_PCONST);
2409 ins->inst_p0 = method;
2410 ins->dreg = method_reg;
2411 MONO_ADD_INS (cfg->cbb, ins);
2415 call->imt_arg_reg = method_reg;
2417 #ifdef MONO_ARCH_IMT_REG
2418 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2420 /* Need this to keep the IMT arg alive */
2421 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2426 #ifdef MONO_ARCH_IMT_REG
2427 method_reg = alloc_preg (cfg);
2430 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2431 } else if (cfg->compile_aot) {
2432 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2435 MONO_INST_NEW (cfg, ins, OP_PCONST);
2436 ins->inst_p0 = method;
2437 ins->dreg = method_reg;
2438 MONO_ADD_INS (cfg->cbb, ins);
2441 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2443 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2447 static MonoJumpInfo *
2448 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2450 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2454 ji->data.target = target;
2460 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2462 if (cfg->generic_sharing_context)
2463 return mono_class_check_context_used (klass);
2469 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2471 if (cfg->generic_sharing_context)
2472 return mono_method_check_context_used (method);
2478 * check_method_sharing:
2480 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2483 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2485 gboolean pass_vtable = FALSE;
2486 gboolean pass_mrgctx = FALSE;
2488 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2489 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2490 gboolean sharable = FALSE;
2492 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2495 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2496 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2497 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2499 sharable = sharing_enabled && context_sharable;
2503 * Pass vtable iff target method might
2504 * be shared, which means that sharing
2505 * is enabled for its class and its
2506 * context is sharable (and it's not a
2509 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2513 if (mini_method_get_context (cmethod) &&
2514 mini_method_get_context (cmethod)->method_inst) {
2515 g_assert (!pass_vtable);
2517 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2520 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2521 MonoGenericContext *context = mini_method_get_context (cmethod);
2522 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2524 if (sharing_enabled && context_sharable)
2526 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2531 if (out_pass_vtable)
2532 *out_pass_vtable = pass_vtable;
2533 if (out_pass_mrgctx)
2534 *out_pass_mrgctx = pass_mrgctx;
2537 inline static MonoCallInst *
2538 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2539 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2543 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2548 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2550 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2552 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
2555 call->signature = sig;
2556 call->rgctx_reg = rgctx;
2557 sig_ret = mini_replace_type (sig->ret);
2559 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2562 if (mini_type_is_vtype (cfg, sig_ret)) {
2563 call->vret_var = cfg->vret_addr;
2564 //g_assert_not_reached ();
2566 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2567 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2570 temp->backend.is_pinvoke = sig->pinvoke;
2573 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2574 * address of return value to increase optimization opportunities.
2575 * Before vtype decomposition, the dreg of the call ins itself represents the
2576 * fact the call modifies the return value. After decomposition, the call will
2577 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2578 * will be transformed into an LDADDR.
2580 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2581 loada->dreg = alloc_preg (cfg);
2582 loada->inst_p0 = temp;
2583 /* We reference the call too since call->dreg could change during optimization */
2584 loada->inst_p1 = call;
2585 MONO_ADD_INS (cfg->cbb, loada);
2587 call->inst.dreg = temp->dreg;
2589 call->vret_var = loada;
2590 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2591 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2593 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2594 if (COMPILE_SOFT_FLOAT (cfg)) {
2596 * If the call has a float argument, we would need to do an r8->r4 conversion using
2597 * an icall, but that cannot be done during the call sequence since it would clobber
2598 * the call registers + the stack. So we do it before emitting the call.
2600 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2602 MonoInst *in = call->args [i];
2604 if (i >= sig->hasthis)
2605 t = sig->params [i - sig->hasthis];
2607 t = &mono_defaults.int_class->byval_arg;
2608 t = mono_type_get_underlying_type (t);
2610 if (!t->byref && t->type == MONO_TYPE_R4) {
2611 MonoInst *iargs [1];
2615 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2617 /* The result will be in an int vreg */
2618 call->args [i] = conv;
2624 call->need_unbox_trampoline = unbox_trampoline;
2627 if (COMPILE_LLVM (cfg))
2628 mono_llvm_emit_call (cfg, call);
2630 mono_arch_emit_call (cfg, call);
2632 mono_arch_emit_call (cfg, call);
2635 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2636 cfg->flags |= MONO_CFG_HAS_CALLS;
2642 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2644 #ifdef MONO_ARCH_RGCTX_REG
2645 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2646 cfg->uses_rgctx_reg = TRUE;
2647 call->rgctx_reg = TRUE;
2649 call->rgctx_arg_reg = rgctx_reg;
2656 inline static MonoInst*
2657 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2662 gboolean check_sp = FALSE;
2664 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2665 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2667 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2672 rgctx_reg = mono_alloc_preg (cfg);
2673 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2677 if (!cfg->stack_inbalance_var)
2678 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2680 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2681 ins->dreg = cfg->stack_inbalance_var->dreg;
2682 MONO_ADD_INS (cfg->cbb, ins);
2685 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2687 call->inst.sreg1 = addr->dreg;
2690 emit_imt_argument (cfg, call, NULL, imt_arg);
2692 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2697 sp_reg = mono_alloc_preg (cfg);
2699 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2701 MONO_ADD_INS (cfg->cbb, ins);
2703 /* Restore the stack so we don't crash when throwing the exception */
2704 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2705 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2706 MONO_ADD_INS (cfg->cbb, ins);
2708 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2709 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2713 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2715 return (MonoInst*)call;
2719 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2722 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2724 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2727 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2728 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2730 #ifndef DISABLE_REMOTING
2731 gboolean might_be_remote = FALSE;
2733 gboolean virtual = this != NULL;
2734 gboolean enable_for_aot = TRUE;
2738 gboolean need_unbox_trampoline;
2741 sig = mono_method_signature (method);
2744 rgctx_reg = mono_alloc_preg (cfg);
2745 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2748 if (method->string_ctor) {
2749 /* Create the real signature */
2750 /* FIXME: Cache these */
2751 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2752 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2757 context_used = mini_method_check_context_used (cfg, method);
2759 #ifndef DISABLE_REMOTING
2760 might_be_remote = this && sig->hasthis &&
2761 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2762 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2764 if (might_be_remote && context_used) {
2767 g_assert (cfg->generic_sharing_context);
2769 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2771 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2775 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2777 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2779 #ifndef DISABLE_REMOTING
2780 if (might_be_remote)
2781 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2784 call->method = method;
2785 call->inst.flags |= MONO_INST_HAS_METHOD;
2786 call->inst.inst_left = this;
2787 call->tail_call = tail;
2790 int vtable_reg, slot_reg, this_reg;
2793 this_reg = this->dreg;
2795 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2796 MonoInst *dummy_use;
2798 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2800 /* Make a call to delegate->invoke_impl */
2801 call->inst.inst_basereg = this_reg;
2802 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2803 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2805 /* We must emit a dummy use here because the delegate trampoline will
2806 replace the 'this' argument with the delegate target making this activation
2807 no longer a root for the delegate.
2808 This is an issue for delegates that target collectible code such as dynamic
2809 methods of GC'able assemblies.
2811 For a test case look into #667921.
2813 FIXME: a dummy use is not the best way to do it as the local register allocator
2814 will put it on a caller save register and spil it around the call.
2815 Ideally, we would either put it on a callee save register or only do the store part.
2817 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2819 return (MonoInst*)call;
2822 if ((!cfg->compile_aot || enable_for_aot) &&
2823 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2824 (MONO_METHOD_IS_FINAL (method) &&
2825 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2826 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2828 * the method is not virtual, we just need to ensure this is not null
2829 * and then we can call the method directly.
2831 #ifndef DISABLE_REMOTING
2832 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2834 * The check above ensures method is not gshared, this is needed since
2835 * gshared methods can't have wrappers.
2837 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2841 if (!method->string_ctor)
2842 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2844 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2845 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2847 * the method is virtual, but we can statically dispatch since either
2848 * it's class or the method itself are sealed.
2849 * But first we need to ensure it's not a null reference.
2851 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2853 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2855 vtable_reg = alloc_preg (cfg);
2856 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2857 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2860 guint32 imt_slot = mono_method_get_imt_slot (method);
2861 emit_imt_argument (cfg, call, call->method, imt_arg);
2862 slot_reg = vtable_reg;
2863 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2865 if (slot_reg == -1) {
2866 slot_reg = alloc_preg (cfg);
2867 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2868 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2871 slot_reg = vtable_reg;
2872 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2873 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2875 g_assert (mono_method_signature (method)->generic_param_count);
2876 emit_imt_argument (cfg, call, call->method, imt_arg);
2880 call->inst.sreg1 = slot_reg;
2881 call->inst.inst_offset = offset;
2882 call->virtual = TRUE;
2886 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2889 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2891 return (MonoInst*)call;
2895 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2897 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2901 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2908 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2911 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2913 return (MonoInst*)call;
2917 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2919 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2923 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2927 * mono_emit_abs_call:
2929 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2931 inline static MonoInst*
2932 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2933 MonoMethodSignature *sig, MonoInst **args)
2935 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2939 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2942 if (cfg->abs_patches == NULL)
2943 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2944 g_hash_table_insert (cfg->abs_patches, ji, ji);
2945 ins = mono_emit_native_call (cfg, ji, sig, args);
2946 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2951 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
2953 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
2954 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
2958 * Native code might return non register sized integers
2959 * without initializing the upper bits.
2961 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
2962 case OP_LOADI1_MEMBASE:
2963 widen_op = OP_ICONV_TO_I1;
2965 case OP_LOADU1_MEMBASE:
2966 widen_op = OP_ICONV_TO_U1;
2968 case OP_LOADI2_MEMBASE:
2969 widen_op = OP_ICONV_TO_I2;
2971 case OP_LOADU2_MEMBASE:
2972 widen_op = OP_ICONV_TO_U2;
2978 if (widen_op != -1) {
2979 int dreg = alloc_preg (cfg);
2982 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
2983 widen->type = ins->type;
2993 get_memcpy_method (void)
2995 static MonoMethod *memcpy_method = NULL;
2996 if (!memcpy_method) {
2997 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
2999 g_error ("Old corlib found. Install a new one");
3001 return memcpy_method;
3005 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3007 MonoClassField *field;
3008 gpointer iter = NULL;
3010 while ((field = mono_class_get_fields (klass, &iter))) {
3013 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3015 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3016 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3017 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3018 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3020 MonoClass *field_class = mono_class_from_mono_type (field->type);
3021 if (field_class->has_references)
3022 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3028 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3030 int card_table_shift_bits;
3031 gpointer card_table_mask;
3033 MonoInst *dummy_use;
3034 int nursery_shift_bits;
3035 size_t nursery_size;
3036 gboolean has_card_table_wb = FALSE;
3038 if (!cfg->gen_write_barriers)
3041 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3043 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3045 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3046 has_card_table_wb = TRUE;
3049 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3052 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3053 wbarrier->sreg1 = ptr->dreg;
3054 wbarrier->sreg2 = value->dreg;
3055 MONO_ADD_INS (cfg->cbb, wbarrier);
3056 } else if (card_table) {
3057 int offset_reg = alloc_preg (cfg);
3058 int card_reg = alloc_preg (cfg);
3061 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3062 if (card_table_mask)
3063 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3065 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3066 * IMM's larger than 32bits.
3068 if (cfg->compile_aot) {
3069 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3071 MONO_INST_NEW (cfg, ins, OP_PCONST);
3072 ins->inst_p0 = card_table;
3073 ins->dreg = card_reg;
3074 MONO_ADD_INS (cfg->cbb, ins);
3077 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3078 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3080 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3081 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3084 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3088 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3090 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3091 unsigned need_wb = 0;
3096 /*types with references can't have alignment smaller than sizeof(void*) */
3097 if (align < SIZEOF_VOID_P)
3100 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3101 if (size > 32 * SIZEOF_VOID_P)
3104 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3106 /* We don't unroll more than 5 stores to avoid code bloat. */
3107 if (size > 5 * SIZEOF_VOID_P) {
3108 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3109 size += (SIZEOF_VOID_P - 1);
3110 size &= ~(SIZEOF_VOID_P - 1);
3112 EMIT_NEW_ICONST (cfg, iargs [2], size);
3113 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3114 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3118 destreg = iargs [0]->dreg;
3119 srcreg = iargs [1]->dreg;
3122 dest_ptr_reg = alloc_preg (cfg);
3123 tmp_reg = alloc_preg (cfg);
3126 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3128 while (size >= SIZEOF_VOID_P) {
3129 MonoInst *load_inst;
3130 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3131 load_inst->dreg = tmp_reg;
3132 load_inst->inst_basereg = srcreg;
3133 load_inst->inst_offset = offset;
3134 MONO_ADD_INS (cfg->cbb, load_inst);
3136 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3139 emit_write_barrier (cfg, iargs [0], load_inst);
3141 offset += SIZEOF_VOID_P;
3142 size -= SIZEOF_VOID_P;
3145 /*tmp += sizeof (void*)*/
3146 if (size >= SIZEOF_VOID_P) {
3147 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3148 MONO_ADD_INS (cfg->cbb, iargs [0]);
3152 /* Those cannot be references since size < sizeof (void*) */
3154 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3155 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3161 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3162 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3168 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3169 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3178 * Emit code to copy a valuetype of type @klass whose address is stored in
3179 * @src->dreg to memory whose address is stored at @dest->dreg.
3182 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3184 MonoInst *iargs [4];
3185 int context_used, n;
3187 MonoMethod *memcpy_method;
3188 MonoInst *size_ins = NULL;
3189 MonoInst *memcpy_ins = NULL;
3193 * This check breaks with spilled vars... need to handle it during verification anyway.
3194 * g_assert (klass && klass == src->klass && klass == dest->klass);
3197 if (mini_is_gsharedvt_klass (cfg, klass)) {
3199 context_used = mini_class_check_context_used (cfg, klass);
3200 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3201 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3205 n = mono_class_native_size (klass, &align);
3207 n = mono_class_value_size (klass, &align);
3209 /* if native is true there should be no references in the struct */
3210 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3211 /* Avoid barriers when storing to the stack */
3212 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3213 (dest->opcode == OP_LDADDR))) {
3219 context_used = mini_class_check_context_used (cfg, klass);
3221 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3222 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3224 } else if (context_used) {
3225 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3227 if (cfg->compile_aot) {
3228 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3230 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3231 mono_class_compute_gc_descriptor (klass);
3236 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3238 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3243 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 5) {
3244 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3245 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3250 iargs [2] = size_ins;
3252 EMIT_NEW_ICONST (cfg, iargs [2], n);
3254 memcpy_method = get_memcpy_method ();
3256 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3258 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3263 get_memset_method (void)
3265 static MonoMethod *memset_method = NULL;
3266 if (!memset_method) {
3267 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3269 g_error ("Old corlib found. Install a new one");
3271 return memset_method;
3275 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3277 MonoInst *iargs [3];
3278 int n, context_used;
3280 MonoMethod *memset_method;
3281 MonoInst *size_ins = NULL;
3282 MonoInst *bzero_ins = NULL;
3283 static MonoMethod *bzero_method;
3285 /* FIXME: Optimize this for the case when dest is an LDADDR */
3287 mono_class_init (klass);
3288 if (mini_is_gsharedvt_klass (cfg, klass)) {
3289 context_used = mini_class_check_context_used (cfg, klass);
3290 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3291 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3293 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3294 g_assert (bzero_method);
3296 iargs [1] = size_ins;
3297 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3301 n = mono_class_value_size (klass, &align);
3303 if (n <= sizeof (gpointer) * 5) {
3304 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3307 memset_method = get_memset_method ();
3309 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3310 EMIT_NEW_ICONST (cfg, iargs [2], n);
3311 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3316 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3318 MonoInst *this = NULL;
3320 g_assert (cfg->generic_sharing_context);
3322 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3323 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3324 !method->klass->valuetype)
3325 EMIT_NEW_ARGLOAD (cfg, this, 0);
3327 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3328 MonoInst *mrgctx_loc, *mrgctx_var;
3331 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3333 mrgctx_loc = mono_get_vtable_var (cfg);
3334 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3337 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3338 MonoInst *vtable_loc, *vtable_var;
3342 vtable_loc = mono_get_vtable_var (cfg);
3343 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3345 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3346 MonoInst *mrgctx_var = vtable_var;
3349 vtable_reg = alloc_preg (cfg);
3350 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3351 vtable_var->type = STACK_PTR;
3359 vtable_reg = alloc_preg (cfg);
3360 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3365 static MonoJumpInfoRgctxEntry *
3366 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3368 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3369 res->method = method;
3370 res->in_mrgctx = in_mrgctx;
3371 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3372 res->data->type = patch_type;
3373 res->data->data.target = patch_data;
3374 res->info_type = info_type;
3379 static inline MonoInst*
3380 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3382 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3386 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3387 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3389 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3390 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3392 return emit_rgctx_fetch (cfg, rgctx, entry);
3396 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3397 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3399 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3400 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3402 return emit_rgctx_fetch (cfg, rgctx, entry);
3406 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3407 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3409 MonoJumpInfoGSharedVtCall *call_info;
3410 MonoJumpInfoRgctxEntry *entry;
3413 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3414 call_info->sig = sig;
3415 call_info->method = cmethod;
3417 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3418 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3420 return emit_rgctx_fetch (cfg, rgctx, entry);
3425 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3426 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3428 MonoJumpInfoRgctxEntry *entry;
3431 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3432 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3434 return emit_rgctx_fetch (cfg, rgctx, entry);
3438 * emit_get_rgctx_method:
3440 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3441 * normal constants, else emit a load from the rgctx.
3444 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3445 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3447 if (!context_used) {
3450 switch (rgctx_type) {
3451 case MONO_RGCTX_INFO_METHOD:
3452 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3454 case MONO_RGCTX_INFO_METHOD_RGCTX:
3455 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3458 g_assert_not_reached ();
3461 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3462 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3464 return emit_rgctx_fetch (cfg, rgctx, entry);
3469 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3470 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3472 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3473 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3475 return emit_rgctx_fetch (cfg, rgctx, entry);
3479 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3481 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3482 MonoRuntimeGenericContextInfoTemplate *template;
3487 for (i = 0; i < info->num_entries; ++i) {
3488 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3490 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3494 if (info->num_entries == info->count_entries) {
3495 MonoRuntimeGenericContextInfoTemplate *new_entries;
3496 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3498 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3500 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3501 info->entries = new_entries;
3502 info->count_entries = new_count_entries;
3505 idx = info->num_entries;
3506 template = &info->entries [idx];
3507 template->info_type = rgctx_type;
3508 template->data = data;
3510 info->num_entries ++;
3516 * emit_get_gsharedvt_info:
3518 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3521 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3526 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3527 /* Load info->entries [idx] */
3528 dreg = alloc_preg (cfg);
3529 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3535 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3537 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3541 * On return the caller must check @klass for load errors.
3544 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3546 MonoInst *vtable_arg;
3550 context_used = mini_class_check_context_used (cfg, klass);
3553 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3554 klass, MONO_RGCTX_INFO_VTABLE);
3556 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3560 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3563 if (COMPILE_LLVM (cfg))
3564 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3566 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3567 #ifdef MONO_ARCH_VTABLE_REG
3568 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3569 cfg->uses_vtable_reg = TRUE;
3576 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3580 if (cfg->gen_seq_points && cfg->method == method) {
3581 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3583 ins->flags |= MONO_INST_NONEMPTY_STACK;
3584 MONO_ADD_INS (cfg->cbb, ins);
3589 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3591 if (mini_get_debug_options ()->better_cast_details) {
3592 int vtable_reg = alloc_preg (cfg);
3593 int klass_reg = alloc_preg (cfg);
3594 MonoBasicBlock *is_null_bb = NULL;
3596 int to_klass_reg, context_used;
3599 NEW_BBLOCK (cfg, is_null_bb);
3601 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3602 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3605 tls_get = mono_get_jit_tls_intrinsic (cfg);
3607 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3611 MONO_ADD_INS (cfg->cbb, tls_get);
3612 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3613 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3615 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3617 context_used = mini_class_check_context_used (cfg, klass);
3619 MonoInst *class_ins;
3621 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3622 to_klass_reg = class_ins->dreg;
3624 to_klass_reg = alloc_preg (cfg);
3625 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3627 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3630 MONO_START_BB (cfg, is_null_bb);
3632 *out_bblock = cfg->cbb;
3638 reset_cast_details (MonoCompile *cfg)
3640 /* Reset the variables holding the cast details */
3641 if (mini_get_debug_options ()->better_cast_details) {
3642 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3644 MONO_ADD_INS (cfg->cbb, tls_get);
3645 /* It is enough to reset the from field */
3646 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3651 * On return the caller must check @array_class for load errors
3654 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3656 int vtable_reg = alloc_preg (cfg);
3659 context_used = mini_class_check_context_used (cfg, array_class);
3661 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3663 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3665 if (cfg->opt & MONO_OPT_SHARED) {
3666 int class_reg = alloc_preg (cfg);
3667 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3668 if (cfg->compile_aot) {
3669 int klass_reg = alloc_preg (cfg);
3670 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3671 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3673 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3675 } else if (context_used) {
3676 MonoInst *vtable_ins;
3678 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3679 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3681 if (cfg->compile_aot) {
3685 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3687 vt_reg = alloc_preg (cfg);
3688 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3689 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3692 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3698 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3700 reset_cast_details (cfg);
3704 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3705 * generic code is generated.
3708 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3710 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3713 MonoInst *rgctx, *addr;
3715 /* FIXME: What if the class is shared? We might not
3716 have to get the address of the method from the
3718 addr = emit_get_rgctx_method (cfg, context_used, method,
3719 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3721 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3723 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3725 gboolean pass_vtable, pass_mrgctx;
3726 MonoInst *rgctx_arg = NULL;
3728 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3729 g_assert (!pass_mrgctx);
3732 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3735 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3738 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3743 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3747 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3748 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3749 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3750 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3752 obj_reg = sp [0]->dreg;
3753 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3754 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3756 /* FIXME: generics */
3757 g_assert (klass->rank == 0);
3760 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3761 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3763 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3764 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3767 MonoInst *element_class;
3769 /* This assertion is from the unboxcast insn */
3770 g_assert (klass->rank == 0);
3772 element_class = emit_get_rgctx_klass (cfg, context_used,
3773 klass->element_class, MONO_RGCTX_INFO_KLASS);
3775 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3776 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3778 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3779 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3780 reset_cast_details (cfg);
3783 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3784 MONO_ADD_INS (cfg->cbb, add);
3785 add->type = STACK_MP;
3792 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3794 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3795 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3799 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3805 args [1] = klass_inst;
3808 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3810 NEW_BBLOCK (cfg, is_ref_bb);
3811 NEW_BBLOCK (cfg, is_nullable_bb);
3812 NEW_BBLOCK (cfg, end_bb);
3813 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3814 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3815 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3818 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3820 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3821 addr_reg = alloc_dreg (cfg, STACK_MP);
3825 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3826 MONO_ADD_INS (cfg->cbb, addr);
3828 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3831 MONO_START_BB (cfg, is_ref_bb);
3833 /* Save the ref to a temporary */
3834 dreg = alloc_ireg (cfg);
3835 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3836 addr->dreg = addr_reg;
3837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3838 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3841 MONO_START_BB (cfg, is_nullable_bb);
3844 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3845 MonoInst *unbox_call;
3846 MonoMethodSignature *unbox_sig;
3849 var = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
3851 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3852 unbox_sig->ret = &klass->byval_arg;
3853 unbox_sig->param_count = 1;
3854 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3855 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3857 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3858 addr->dreg = addr_reg;
3861 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3864 MONO_START_BB (cfg, end_bb);
3867 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3869 *out_cbb = cfg->cbb;
3875 * Returns NULL and set the cfg exception on error.
3878 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3880 MonoInst *iargs [2];
3886 MonoInst *iargs [2];
3888 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3890 if (cfg->opt & MONO_OPT_SHARED)
3891 rgctx_info = MONO_RGCTX_INFO_KLASS;
3893 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3894 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3896 if (cfg->opt & MONO_OPT_SHARED) {
3897 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3899 alloc_ftn = mono_object_new;
3902 alloc_ftn = mono_object_new_specific;
3905 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED))
3906 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3908 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3911 if (cfg->opt & MONO_OPT_SHARED) {
3912 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3913 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
3915 alloc_ftn = mono_object_new;
3916 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
3917 /* This happens often in argument checking code, eg. throw new FooException... */
3918 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
3919 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
3920 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
3922 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3923 MonoMethod *managed_alloc = NULL;
3927 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
3928 cfg->exception_ptr = klass;
3932 #ifndef MONO_CROSS_COMPILE
3933 managed_alloc = mono_gc_get_managed_allocator (klass, for_box);
3936 if (managed_alloc) {
3937 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3938 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
3940 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
3942 guint32 lw = vtable->klass->instance_size;
3943 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
3944 EMIT_NEW_ICONST (cfg, iargs [0], lw);
3945 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
3948 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
3952 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
3956 * Returns NULL and set the cfg exception on error.
3959 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
3961 MonoInst *alloc, *ins;
3963 *out_cbb = cfg->cbb;
3965 if (mono_class_is_nullable (klass)) {
3966 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
3969 /* FIXME: What if the class is shared? We might not
3970 have to get the method address from the RGCTX. */
3971 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
3972 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3973 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3975 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3977 gboolean pass_vtable, pass_mrgctx;
3978 MonoInst *rgctx_arg = NULL;
3980 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3981 g_assert (!pass_mrgctx);
3984 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3987 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3990 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3994 if (mini_is_gsharedvt_klass (cfg, klass)) {
3995 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3996 MonoInst *res, *is_ref, *src_var, *addr;
3999 dreg = alloc_ireg (cfg);
4001 NEW_BBLOCK (cfg, is_ref_bb);
4002 NEW_BBLOCK (cfg, is_nullable_bb);
4003 NEW_BBLOCK (cfg, end_bb);
4004 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4005 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4006 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4008 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4009 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4012 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4015 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4016 ins->opcode = OP_STOREV_MEMBASE;
4018 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4019 res->type = STACK_OBJ;
4021 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4024 MONO_START_BB (cfg, is_ref_bb);
4025 addr_reg = alloc_ireg (cfg);
4027 /* val is a vtype, so has to load the value manually */
4028 src_var = get_vreg_to_inst (cfg, val->dreg);
4030 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4031 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4032 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4033 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4036 MONO_START_BB (cfg, is_nullable_bb);
4039 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4040 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4042 MonoMethodSignature *box_sig;
4045 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4046 * construct that method at JIT time, so have to do things by hand.
4048 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4049 box_sig->ret = &mono_defaults.object_class->byval_arg;
4050 box_sig->param_count = 1;
4051 box_sig->params [0] = &klass->byval_arg;
4052 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4053 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4054 res->type = STACK_OBJ;
4058 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4060 MONO_START_BB (cfg, end_bb);
4062 *out_cbb = cfg->cbb;
4066 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4070 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4077 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4080 MonoGenericContainer *container;
4081 MonoGenericInst *ginst;
4083 if (klass->generic_class) {
4084 container = klass->generic_class->container_class->generic_container;
4085 ginst = klass->generic_class->context.class_inst;
4086 } else if (klass->generic_container && context_used) {
4087 container = klass->generic_container;
4088 ginst = container->context.class_inst;
4093 for (i = 0; i < container->type_argc; ++i) {
4095 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4097 type = ginst->type_argv [i];
4098 if (mini_type_is_reference (cfg, type))
4104 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4107 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4109 MonoMethod *mono_castclass;
4112 mono_castclass = mono_marshal_get_castclass_with_cache ();
4114 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4115 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4116 reset_cast_details (cfg);
4117 *out_bblock = cfg->cbb;
4123 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4132 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4135 if (cfg->compile_aot) {
4136 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4137 cfg->castclass_cache_index ++;
4138 idx = (cfg->method_index << 16) | cfg->castclass_cache_index;
4139 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4141 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4144 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4146 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4150 * Returns NULL and set the cfg exception on error.
4153 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4155 MonoBasicBlock *is_null_bb;
4156 int obj_reg = src->dreg;
4157 int vtable_reg = alloc_preg (cfg);
4159 MonoInst *klass_inst = NULL, *res;
4160 MonoBasicBlock *bblock;
4164 context_used = mini_class_check_context_used (cfg, klass);
4166 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4167 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4168 (*inline_costs) += 2;
4171 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4172 MonoMethod *mono_castclass;
4173 MonoInst *iargs [1];
4176 mono_castclass = mono_marshal_get_castclass (klass);
4179 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4180 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4181 iargs, ip, cfg->real_offset, TRUE, &bblock);
4182 reset_cast_details (cfg);
4183 CHECK_CFG_EXCEPTION;
4184 g_assert (costs > 0);
4186 cfg->real_offset += 5;
4188 (*inline_costs) += costs;
4197 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4198 MonoInst *cache_ins;
4200 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4205 /* klass - it's the second element of the cache entry*/
4206 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4209 args [2] = cache_ins;
4211 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4214 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4217 NEW_BBLOCK (cfg, is_null_bb);
4219 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4220 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4222 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4224 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4225 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4226 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4228 int klass_reg = alloc_preg (cfg);
4230 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4232 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4233 /* the remoting code is broken, access the class for now */
4234 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4235 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4237 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4238 cfg->exception_ptr = klass;
4241 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4243 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4244 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4246 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4248 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4249 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4253 MONO_START_BB (cfg, is_null_bb);
4255 reset_cast_details (cfg);
4266 * Returns NULL and set the cfg exception on error.
4269 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4272 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4273 int obj_reg = src->dreg;
4274 int vtable_reg = alloc_preg (cfg);
4275 int res_reg = alloc_ireg_ref (cfg);
4276 MonoInst *klass_inst = NULL;
4281 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4282 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4283 MonoInst *cache_ins;
4285 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4290 /* klass - it's the second element of the cache entry*/
4291 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4294 args [2] = cache_ins;
4296 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4299 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4302 NEW_BBLOCK (cfg, is_null_bb);
4303 NEW_BBLOCK (cfg, false_bb);
4304 NEW_BBLOCK (cfg, end_bb);
4306 /* Do the assignment at the beginning, so the other assignment can be if converted */
4307 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4308 ins->type = STACK_OBJ;
4311 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4312 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4314 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4316 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4317 g_assert (!context_used);
4318 /* the is_null_bb target simply copies the input register to the output */
4319 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4321 int klass_reg = alloc_preg (cfg);
4324 int rank_reg = alloc_preg (cfg);
4325 int eclass_reg = alloc_preg (cfg);
4327 g_assert (!context_used);
4328 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4329 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4330 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4331 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4332 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4333 if (klass->cast_class == mono_defaults.object_class) {
4334 int parent_reg = alloc_preg (cfg);
4335 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4336 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4337 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4338 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4339 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4340 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4341 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4342 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4343 } else if (klass->cast_class == mono_defaults.enum_class) {
4344 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4345 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4346 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4347 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4349 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4350 /* Check that the object is a vector too */
4351 int bounds_reg = alloc_preg (cfg);
4352 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4353 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4354 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4357 /* the is_null_bb target simply copies the input register to the output */
4358 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4360 } else if (mono_class_is_nullable (klass)) {
4361 g_assert (!context_used);
4362 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4363 /* the is_null_bb target simply copies the input register to the output */
4364 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4366 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4367 g_assert (!context_used);
4368 /* the remoting code is broken, access the class for now */
4369 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4370 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4372 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4373 cfg->exception_ptr = klass;
4376 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4378 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4379 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4381 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4382 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4385 /* the is_null_bb target simply copies the input register to the output */
4386 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4391 MONO_START_BB (cfg, false_bb);
4393 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4394 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4396 MONO_START_BB (cfg, is_null_bb);
4398 MONO_START_BB (cfg, end_bb);
4404 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4406 /* This opcode takes as input an object reference and a class, and returns:
4407 0) if the object is an instance of the class,
4408 1) if the object is not instance of the class,
4409 2) if the object is a proxy whose type cannot be determined */
4412 #ifndef DISABLE_REMOTING
4413 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4415 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4417 int obj_reg = src->dreg;
4418 int dreg = alloc_ireg (cfg);
4420 #ifndef DISABLE_REMOTING
4421 int klass_reg = alloc_preg (cfg);
4424 NEW_BBLOCK (cfg, true_bb);
4425 NEW_BBLOCK (cfg, false_bb);
4426 NEW_BBLOCK (cfg, end_bb);
4427 #ifndef DISABLE_REMOTING
4428 NEW_BBLOCK (cfg, false2_bb);
4429 NEW_BBLOCK (cfg, no_proxy_bb);
4432 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4433 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4435 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4436 #ifndef DISABLE_REMOTING
4437 NEW_BBLOCK (cfg, interface_fail_bb);
4440 tmp_reg = alloc_preg (cfg);
4441 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4442 #ifndef DISABLE_REMOTING
4443 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4444 MONO_START_BB (cfg, interface_fail_bb);
4445 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4447 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4449 tmp_reg = alloc_preg (cfg);
4450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4452 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4454 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4457 #ifndef DISABLE_REMOTING
4458 tmp_reg = alloc_preg (cfg);
4459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4460 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4462 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4463 tmp_reg = alloc_preg (cfg);
4464 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4465 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4467 tmp_reg = alloc_preg (cfg);
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4469 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4470 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4472 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4473 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4475 MONO_START_BB (cfg, no_proxy_bb);
4477 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4479 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4483 MONO_START_BB (cfg, false_bb);
4485 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4486 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4488 #ifndef DISABLE_REMOTING
4489 MONO_START_BB (cfg, false2_bb);
4491 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4492 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4495 MONO_START_BB (cfg, true_bb);
4497 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4499 MONO_START_BB (cfg, end_bb);
4502 MONO_INST_NEW (cfg, ins, OP_ICONST);
4504 ins->type = STACK_I4;
4510 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4512 /* This opcode takes as input an object reference and a class, and returns:
4513 0) if the object is an instance of the class,
4514 1) if the object is a proxy whose type cannot be determined
4515 an InvalidCastException exception is thrown otherwhise*/
4518 #ifndef DISABLE_REMOTING
4519 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4521 MonoBasicBlock *ok_result_bb;
4523 int obj_reg = src->dreg;
4524 int dreg = alloc_ireg (cfg);
4525 int tmp_reg = alloc_preg (cfg);
4527 #ifndef DISABLE_REMOTING
4528 int klass_reg = alloc_preg (cfg);
4529 NEW_BBLOCK (cfg, end_bb);
4532 NEW_BBLOCK (cfg, ok_result_bb);
4534 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4535 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4537 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4539 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4540 #ifndef DISABLE_REMOTING
4541 NEW_BBLOCK (cfg, interface_fail_bb);
4543 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4544 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4545 MONO_START_BB (cfg, interface_fail_bb);
4546 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4548 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4550 tmp_reg = alloc_preg (cfg);
4551 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4552 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4553 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4555 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4556 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4559 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4560 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4563 #ifndef DISABLE_REMOTING
4564 NEW_BBLOCK (cfg, no_proxy_bb);
4566 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4567 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4568 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4570 tmp_reg = alloc_preg (cfg);
4571 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4572 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4574 tmp_reg = alloc_preg (cfg);
4575 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4577 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4579 NEW_BBLOCK (cfg, fail_1_bb);
4581 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4583 MONO_START_BB (cfg, fail_1_bb);
4585 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4586 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4588 MONO_START_BB (cfg, no_proxy_bb);
4590 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4592 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4596 MONO_START_BB (cfg, ok_result_bb);
4598 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4600 #ifndef DISABLE_REMOTING
4601 MONO_START_BB (cfg, end_bb);
4605 MONO_INST_NEW (cfg, ins, OP_ICONST);
4607 ins->type = STACK_I4;
4613 * Returns NULL and set the cfg exception on error.
4615 static G_GNUC_UNUSED MonoInst*
4616 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4620 gpointer trampoline;
4621 MonoInst *obj, *method_ins, *tramp_ins;
4625 // FIXME reenable optimisation for virtual case
4630 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4633 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4637 obj = handle_alloc (cfg, klass, FALSE, 0);
4641 /* Inline the contents of mono_delegate_ctor */
4643 /* Set target field */
4644 /* Optimize away setting of NULL target */
4645 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4646 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4647 if (cfg->gen_write_barriers) {
4648 dreg = alloc_preg (cfg);
4649 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4650 emit_write_barrier (cfg, ptr, target);
4654 /* Set method field */
4655 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4656 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4659 * To avoid looking up the compiled code belonging to the target method
4660 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4661 * store it, and we fill it after the method has been compiled.
4663 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4664 MonoInst *code_slot_ins;
4667 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4669 domain = mono_domain_get ();
4670 mono_domain_lock (domain);
4671 if (!domain_jit_info (domain)->method_code_hash)
4672 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4673 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4675 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4676 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4678 mono_domain_unlock (domain);
4680 if (cfg->compile_aot)
4681 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4683 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4685 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4688 if (cfg->compile_aot) {
4689 MonoDelegateClassMethodPair *del_tramp;
4691 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4692 del_tramp->klass = klass;
4693 del_tramp->method = context_used ? NULL : method;
4694 del_tramp->virtual = virtual;
4695 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4698 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4700 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4701 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4704 /* Set invoke_impl field */
4706 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4708 dreg = alloc_preg (cfg);
4709 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4710 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4712 dreg = alloc_preg (cfg);
4713 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4714 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4717 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4723 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4725 MonoJitICallInfo *info;
4727 /* Need to register the icall so it gets an icall wrapper */
4728 info = mono_get_array_new_va_icall (rank);
4730 cfg->flags |= MONO_CFG_HAS_VARARGS;
4732 /* mono_array_new_va () needs a vararg calling convention */
4733 cfg->disable_llvm = TRUE;
4735 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4736 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4740 mono_emit_load_got_addr (MonoCompile *cfg)
4742 MonoInst *getaddr, *dummy_use;
4744 if (!cfg->got_var || cfg->got_var_allocated)
4747 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
4748 getaddr->cil_code = cfg->header->code;
4749 getaddr->dreg = cfg->got_var->dreg;
4751 /* Add it to the start of the first bblock */
4752 if (cfg->bb_entry->code) {
4753 getaddr->next = cfg->bb_entry->code;
4754 cfg->bb_entry->code = getaddr;
4757 MONO_ADD_INS (cfg->bb_entry, getaddr);
4759 cfg->got_var_allocated = TRUE;
4762 * Add a dummy use to keep the got_var alive, since real uses might
4763 * only be generated by the back ends.
4764 * Add it to end_bblock, so the variable's lifetime covers the whole
4766 * It would be better to make the usage of the got var explicit in all
4767 * cases when the backend needs it (i.e. calls, throw etc.), so this
4768 * wouldn't be needed.
4770 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
4771 MONO_ADD_INS (cfg->bb_exit, dummy_use);
4774 static int inline_limit;
4775 static gboolean inline_limit_inited;
4778 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
4780 MonoMethodHeaderSummary header;
4782 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4783 MonoMethodSignature *sig = mono_method_signature (method);
4787 if (cfg->disable_inline)
4789 if (cfg->generic_sharing_context)
4792 if (cfg->inline_depth > 10)
4795 #ifdef MONO_ARCH_HAVE_LMF_OPS
4796 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
4797 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
4798 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
4803 if (!mono_method_get_header_summary (method, &header))
4806 /*runtime, icall and pinvoke are checked by summary call*/
4807 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
4808 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
4809 (mono_class_is_marshalbyref (method->klass)) ||
4813 /* also consider num_locals? */
4814 /* Do the size check early to avoid creating vtables */
4815 if (!inline_limit_inited) {
4816 if (g_getenv ("MONO_INLINELIMIT"))
4817 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
4819 inline_limit = INLINE_LENGTH_LIMIT;
4820 inline_limit_inited = TRUE;
4822 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
4826 * if we can initialize the class of the method right away, we do,
4827 * otherwise we don't allow inlining if the class needs initialization,
4828 * since it would mean inserting a call to mono_runtime_class_init()
4829 * inside the inlined code
4831 if (!(cfg->opt & MONO_OPT_SHARED)) {
4832 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
4833 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
4834 vtable = mono_class_vtable (cfg->domain, method->klass);
4837 if (!cfg->compile_aot)
4838 mono_runtime_class_init (vtable);
4839 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4840 if (cfg->run_cctors && method->klass->has_cctor) {
4841 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
4842 if (!method->klass->runtime_info)
4843 /* No vtable created yet */
4845 vtable = mono_class_vtable (cfg->domain, method->klass);
4848 /* This makes so that inline cannot trigger */
4849 /* .cctors: too many apps depend on them */
4850 /* running with a specific order... */
4851 if (! vtable->initialized)
4853 mono_runtime_class_init (vtable);
4855 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
4856 if (!method->klass->runtime_info)
4857 /* No vtable created yet */
4859 vtable = mono_class_vtable (cfg->domain, method->klass);
4862 if (!vtable->initialized)
4867 * If we're compiling for shared code
4868 * the cctor will need to be run at aot method load time, for example,
4869 * or at the end of the compilation of the inlining method.
4871 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
4876 * CAS - do not inline methods with declarative security
4877 * Note: this has to be before any possible return TRUE;
4879 if (mono_security_method_has_declsec (method))
4882 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
4883 if (mono_arch_is_soft_float ()) {
4885 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
4887 for (i = 0; i < sig->param_count; ++i)
4888 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
4893 if (g_list_find (cfg->dont_inline, method))
4900 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
4902 if (!cfg->compile_aot) {
4904 if (vtable->initialized)
4908 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
4909 if (cfg->method == method)
4913 if (!mono_class_needs_cctor_run (klass, method))
4916 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
4917 /* The initialization is already done before the method is called */
4924 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
4928 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
4931 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
4934 mono_class_init (klass);
4935 size = mono_class_array_element_size (klass);
4938 mult_reg = alloc_preg (cfg);
4939 array_reg = arr->dreg;
4940 index_reg = index->dreg;
4942 #if SIZEOF_REGISTER == 8
4943 /* The array reg is 64 bits but the index reg is only 32 */
4944 if (COMPILE_LLVM (cfg)) {
4946 index2_reg = index_reg;
4948 index2_reg = alloc_preg (cfg);
4949 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
4952 if (index->type == STACK_I8) {
4953 index2_reg = alloc_preg (cfg);
4954 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
4956 index2_reg = index_reg;
4961 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
4963 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4964 if (size == 1 || size == 2 || size == 4 || size == 8) {
4965 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
4967 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
4968 ins->klass = mono_class_get_element_class (klass);
4969 ins->type = STACK_MP;
4975 add_reg = alloc_ireg_mp (cfg);
4978 MonoInst *rgctx_ins;
4981 g_assert (cfg->generic_sharing_context);
4982 context_used = mini_class_check_context_used (cfg, klass);
4983 g_assert (context_used);
4984 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
4985 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
4987 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
4989 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
4990 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
4991 ins->klass = mono_class_get_element_class (klass);
4992 ins->type = STACK_MP;
4993 MONO_ADD_INS (cfg->cbb, ins);
4998 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5000 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5002 int bounds_reg = alloc_preg (cfg);
5003 int add_reg = alloc_ireg_mp (cfg);
5004 int mult_reg = alloc_preg (cfg);
5005 int mult2_reg = alloc_preg (cfg);
5006 int low1_reg = alloc_preg (cfg);
5007 int low2_reg = alloc_preg (cfg);
5008 int high1_reg = alloc_preg (cfg);
5009 int high2_reg = alloc_preg (cfg);
5010 int realidx1_reg = alloc_preg (cfg);
5011 int realidx2_reg = alloc_preg (cfg);
5012 int sum_reg = alloc_preg (cfg);
5013 int index1, index2, tmpreg;
5017 mono_class_init (klass);
5018 size = mono_class_array_element_size (klass);
5020 index1 = index_ins1->dreg;
5021 index2 = index_ins2->dreg;
5023 #if SIZEOF_REGISTER == 8
5024 /* The array reg is 64 bits but the index reg is only 32 */
5025 if (COMPILE_LLVM (cfg)) {
5028 tmpreg = alloc_preg (cfg);
5029 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5031 tmpreg = alloc_preg (cfg);
5032 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5036 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5040 /* range checking */
5041 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5042 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5044 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5045 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5046 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5047 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5048 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5049 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5050 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5052 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5053 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5054 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5055 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5056 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5057 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5058 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5060 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5061 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5062 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5063 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5064 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5066 ins->type = STACK_MP;
5068 MONO_ADD_INS (cfg->cbb, ins);
5075 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5079 MonoMethod *addr_method;
5082 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5085 return mini_emit_ldelema_1_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], TRUE);
5087 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5088 /* emit_ldelema_2 depends on OP_LMUL */
5089 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS)) {
5090 return mini_emit_ldelema_2_ins (cfg, cmethod->klass->element_class, sp [0], sp [1], sp [2]);
5094 element_size = mono_class_array_element_size (cmethod->klass->element_class);
5095 addr_method = mono_marshal_get_array_address (rank, element_size);
5096 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5101 static MonoBreakPolicy
5102 always_insert_breakpoint (MonoMethod *method)
5104 return MONO_BREAK_POLICY_ALWAYS;
5107 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5110 * mono_set_break_policy:
5111 * policy_callback: the new callback function
5113 * Allow embedders to decide wherther to actually obey breakpoint instructions
5114 * (both break IL instructions and Debugger.Break () method calls), for example
5115 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5116 * untrusted or semi-trusted code.
5118 * @policy_callback will be called every time a break point instruction needs to
5119 * be inserted with the method argument being the method that calls Debugger.Break()
5120 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5121 * if it wants the breakpoint to not be effective in the given method.
5122 * #MONO_BREAK_POLICY_ALWAYS is the default.
5125 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5127 if (policy_callback)
5128 break_policy_func = policy_callback;
5130 break_policy_func = always_insert_breakpoint;
5134 should_insert_brekpoint (MonoMethod *method) {
5135 switch (break_policy_func (method)) {
5136 case MONO_BREAK_POLICY_ALWAYS:
5138 case MONO_BREAK_POLICY_NEVER:
5140 case MONO_BREAK_POLICY_ON_DBG:
5141 g_warning ("mdb no longer supported");
5144 g_warning ("Incorrect value returned from break policy callback");
5149 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5151 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5153 MonoInst *addr, *store, *load;
5154 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5156 /* the bounds check is already done by the callers */
5157 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5159 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5160 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5161 if (mini_type_is_reference (cfg, fsig->params [2]))
5162 emit_write_barrier (cfg, addr, load);
5164 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5165 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5172 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5174 return mini_type_is_reference (cfg, &klass->byval_arg);
5178 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5180 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5181 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5182 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5183 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5184 MonoInst *iargs [3];
5187 mono_class_setup_vtable (obj_array);
5188 g_assert (helper->slot);
5190 if (sp [0]->type != STACK_OBJ)
5192 if (sp [2]->type != STACK_OBJ)
5199 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5203 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5206 // FIXME-VT: OP_ICONST optimization
5207 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5208 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5209 ins->opcode = OP_STOREV_MEMBASE;
5210 } else if (sp [1]->opcode == OP_ICONST) {
5211 int array_reg = sp [0]->dreg;
5212 int index_reg = sp [1]->dreg;
5213 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5216 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5217 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5219 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5220 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5221 if (generic_class_is_reference_type (cfg, klass))
5222 emit_write_barrier (cfg, addr, sp [2]);
5229 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5234 eklass = mono_class_from_mono_type (fsig->params [2]);
5236 eklass = mono_class_from_mono_type (fsig->ret);
5239 return emit_array_store (cfg, eklass, args, FALSE);
5241 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5242 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5248 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5252 //Only allow for valuetypes
5253 if (!param_klass->valuetype || !return_klass->valuetype)
5257 if (param_klass->has_references || return_klass->has_references)
5260 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5261 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5262 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5265 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5266 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5269 //And have the same size
5270 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5276 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5278 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5279 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5281 //Valuetypes that are semantically equivalent
5282 if (is_unsafe_mov_compatible (param_klass, return_klass))
5285 //Arrays of valuetypes that are semantically equivalent
5286 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5293 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5295 #ifdef MONO_ARCH_SIMD_INTRINSICS
5296 MonoInst *ins = NULL;
5298 if (cfg->opt & MONO_OPT_SIMD) {
5299 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5305 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5309 emit_memory_barrier (MonoCompile *cfg, int kind)
5311 MonoInst *ins = NULL;
5312 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5313 MONO_ADD_INS (cfg->cbb, ins);
5314 ins->backend.memory_barrier_kind = kind;
5320 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5322 MonoInst *ins = NULL;
5325 /* The LLVM backend supports these intrinsics */
5326 if (cmethod->klass == mono_defaults.math_class) {
5327 if (strcmp (cmethod->name, "Sin") == 0) {
5329 } else if (strcmp (cmethod->name, "Cos") == 0) {
5331 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5333 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5338 MONO_INST_NEW (cfg, ins, opcode);
5339 ins->type = STACK_R8;
5340 ins->dreg = mono_alloc_freg (cfg);
5341 ins->sreg1 = args [0]->dreg;
5342 MONO_ADD_INS (cfg->cbb, ins);
5346 if (cfg->opt & MONO_OPT_CMOV) {
5347 if (strcmp (cmethod->name, "Min") == 0) {
5348 if (fsig->params [0]->type == MONO_TYPE_I4)
5350 if (fsig->params [0]->type == MONO_TYPE_U4)
5351 opcode = OP_IMIN_UN;
5352 else if (fsig->params [0]->type == MONO_TYPE_I8)
5354 else if (fsig->params [0]->type == MONO_TYPE_U8)
5355 opcode = OP_LMIN_UN;
5356 } else if (strcmp (cmethod->name, "Max") == 0) {
5357 if (fsig->params [0]->type == MONO_TYPE_I4)
5359 if (fsig->params [0]->type == MONO_TYPE_U4)
5360 opcode = OP_IMAX_UN;
5361 else if (fsig->params [0]->type == MONO_TYPE_I8)
5363 else if (fsig->params [0]->type == MONO_TYPE_U8)
5364 opcode = OP_LMAX_UN;
5369 MONO_INST_NEW (cfg, ins, opcode);
5370 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5371 ins->dreg = mono_alloc_ireg (cfg);
5372 ins->sreg1 = args [0]->dreg;
5373 ins->sreg2 = args [1]->dreg;
5374 MONO_ADD_INS (cfg->cbb, ins);
5382 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5384 if (cmethod->klass == mono_defaults.array_class) {
5385 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5386 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5387 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5388 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5389 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5390 return emit_array_unsafe_mov (cfg, fsig, args);
5397 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5399 MonoInst *ins = NULL;
5401 static MonoClass *runtime_helpers_class = NULL;
5402 if (! runtime_helpers_class)
5403 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5404 "System.Runtime.CompilerServices", "RuntimeHelpers");
5406 if (cmethod->klass == mono_defaults.string_class) {
5407 if (strcmp (cmethod->name, "get_Chars") == 0) {
5408 int dreg = alloc_ireg (cfg);
5409 int index_reg = alloc_preg (cfg);
5410 int mult_reg = alloc_preg (cfg);
5411 int add_reg = alloc_preg (cfg);
5413 #if SIZEOF_REGISTER == 8
5414 /* The array reg is 64 bits but the index reg is only 32 */
5415 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5417 index_reg = args [1]->dreg;
5419 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5421 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5422 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5423 add_reg = ins->dreg;
5424 /* Avoid a warning */
5426 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5429 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5430 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5431 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5432 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5434 type_from_op (ins, NULL, NULL);
5436 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5437 int dreg = alloc_ireg (cfg);
5438 /* Decompose later to allow more optimizations */
5439 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5440 ins->type = STACK_I4;
5441 ins->flags |= MONO_INST_FAULT;
5442 cfg->cbb->has_array_access = TRUE;
5443 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5446 } else if (strcmp (cmethod->name, "InternalSetChar") == 0) {
5447 int mult_reg = alloc_preg (cfg);
5448 int add_reg = alloc_preg (cfg);
5450 /* The corlib functions check for oob already. */
5451 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5452 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5453 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5454 return cfg->cbb->last_ins;
5457 } else if (cmethod->klass == mono_defaults.object_class) {
5459 if (strcmp (cmethod->name, "GetType") == 0) {
5460 int dreg = alloc_ireg_ref (cfg);
5461 int vt_reg = alloc_preg (cfg);
5462 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5463 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5464 type_from_op (ins, NULL, NULL);
5467 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5468 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && !mono_gc_is_moving ()) {
5469 int dreg = alloc_ireg (cfg);
5470 int t1 = alloc_ireg (cfg);
5472 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5473 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5474 ins->type = STACK_I4;
5478 } else if (strcmp (cmethod->name, ".ctor") == 0) {
5479 MONO_INST_NEW (cfg, ins, OP_NOP);
5480 MONO_ADD_INS (cfg->cbb, ins);
5484 } else if (cmethod->klass == mono_defaults.array_class) {
5485 if (!cfg->gsharedvt && strcmp (cmethod->name + 1, "etGenericValueImpl") == 0)
5486 return emit_array_generic_access (cfg, fsig, args, *cmethod->name == 'S');
5488 #ifndef MONO_BIG_ARRAYS
5490 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5493 if ((strcmp (cmethod->name, "GetLength") == 0 || strcmp (cmethod->name, "GetLowerBound") == 0) && args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5494 int dreg = alloc_ireg (cfg);
5495 int bounds_reg = alloc_ireg_mp (cfg);
5496 MonoBasicBlock *end_bb, *szarray_bb;
5497 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5499 NEW_BBLOCK (cfg, end_bb);
5500 NEW_BBLOCK (cfg, szarray_bb);
5502 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5503 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5504 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5505 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5506 /* Non-szarray case */
5508 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5509 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5511 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5512 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5513 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5514 MONO_START_BB (cfg, szarray_bb);
5517 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5518 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5520 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5521 MONO_START_BB (cfg, end_bb);
5523 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5524 ins->type = STACK_I4;
5530 if (cmethod->name [0] != 'g')
5533 if (strcmp (cmethod->name, "get_Rank") == 0) {
5534 int dreg = alloc_ireg (cfg);
5535 int vtable_reg = alloc_preg (cfg);
5536 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5537 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5538 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5539 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5540 type_from_op (ins, NULL, NULL);
5543 } else if (strcmp (cmethod->name, "get_Length") == 0) {
5544 int dreg = alloc_ireg (cfg);
5546 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5547 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5548 type_from_op (ins, NULL, NULL);
5553 } else if (cmethod->klass == runtime_helpers_class) {
5555 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0) {
5556 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5560 } else if (cmethod->klass == mono_defaults.thread_class) {
5561 if (strcmp (cmethod->name, "SpinWait_nop") == 0) {
5562 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5563 MONO_ADD_INS (cfg->cbb, ins);
5565 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0) {
5566 return emit_memory_barrier (cfg, FullBarrier);
5568 } else if (cmethod->klass == mono_defaults.monitor_class) {
5570 /* FIXME this should be integrated to the check below once we support the trampoline version */
5571 #if defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5572 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5573 MonoMethod *fast_method = NULL;
5575 /* Avoid infinite recursion */
5576 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN && !strcmp (cfg->method->name, "FastMonitorEnterV4"))
5579 fast_method = mono_monitor_get_fast_path (cmethod);
5583 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5587 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5588 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5591 if (COMPILE_LLVM (cfg)) {
5593 * Pass the argument normally, the LLVM backend will handle the
5594 * calling convention problems.
5596 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5598 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5599 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5600 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5601 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5604 return (MonoInst*)call;
5605 } else if (strcmp (cmethod->name, "Exit") == 0) {
5608 if (COMPILE_LLVM (cfg)) {
5609 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5611 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
5612 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5613 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5614 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5617 return (MonoInst*)call;
5619 #elif defined(MONO_ARCH_ENABLE_MONITOR_IL_FASTPATH)
5621 MonoMethod *fast_method = NULL;
5623 /* Avoid infinite recursion */
5624 if (cfg->method->wrapper_type == MONO_WRAPPER_UNKNOWN &&
5625 (strcmp (cfg->method->name, "FastMonitorEnter") == 0 ||
5626 strcmp (cfg->method->name, "FastMonitorExit") == 0))
5629 if ((strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) ||
5630 strcmp (cmethod->name, "Exit") == 0)
5631 fast_method = mono_monitor_get_fast_path (cmethod);
5635 return (MonoInst*)mono_emit_method_call (cfg, fast_method, args, NULL);
5638 } else if (cmethod->klass->image == mono_defaults.corlib &&
5639 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
5640 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
5643 #if SIZEOF_REGISTER == 8
5644 if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
5647 emit_memory_barrier (cfg, FullBarrier);
5649 /* 64 bit reads are already atomic */
5650 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
5651 load_ins->dreg = mono_alloc_preg (cfg);
5652 load_ins->inst_basereg = args [0]->dreg;
5653 load_ins->inst_offset = 0;
5654 MONO_ADD_INS (cfg->cbb, load_ins);
5656 emit_memory_barrier (cfg, FullBarrier);
5662 if (strcmp (cmethod->name, "Increment") == 0) {
5663 MonoInst *ins_iconst;
5666 if (fsig->params [0]->type == MONO_TYPE_I4) {
5667 opcode = OP_ATOMIC_ADD_I4;
5668 cfg->has_atomic_add_i4 = TRUE;
5670 #if SIZEOF_REGISTER == 8
5671 else if (fsig->params [0]->type == MONO_TYPE_I8)
5672 opcode = OP_ATOMIC_ADD_I8;
5675 if (!mono_arch_opcode_supported (opcode))
5677 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5678 ins_iconst->inst_c0 = 1;
5679 ins_iconst->dreg = mono_alloc_ireg (cfg);
5680 MONO_ADD_INS (cfg->cbb, ins_iconst);
5682 MONO_INST_NEW (cfg, ins, opcode);
5683 ins->dreg = mono_alloc_ireg (cfg);
5684 ins->inst_basereg = args [0]->dreg;
5685 ins->inst_offset = 0;
5686 ins->sreg2 = ins_iconst->dreg;
5687 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5688 MONO_ADD_INS (cfg->cbb, ins);
5690 } else if (strcmp (cmethod->name, "Decrement") == 0) {
5691 MonoInst *ins_iconst;
5694 if (fsig->params [0]->type == MONO_TYPE_I4) {
5695 opcode = OP_ATOMIC_ADD_I4;
5696 cfg->has_atomic_add_i4 = TRUE;
5698 #if SIZEOF_REGISTER == 8
5699 else if (fsig->params [0]->type == MONO_TYPE_I8)
5700 opcode = OP_ATOMIC_ADD_I8;
5703 if (!mono_arch_opcode_supported (opcode))
5705 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
5706 ins_iconst->inst_c0 = -1;
5707 ins_iconst->dreg = mono_alloc_ireg (cfg);
5708 MONO_ADD_INS (cfg->cbb, ins_iconst);
5710 MONO_INST_NEW (cfg, ins, opcode);
5711 ins->dreg = mono_alloc_ireg (cfg);
5712 ins->inst_basereg = args [0]->dreg;
5713 ins->inst_offset = 0;
5714 ins->sreg2 = ins_iconst->dreg;
5715 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5716 MONO_ADD_INS (cfg->cbb, ins);
5718 } else if (strcmp (cmethod->name, "Add") == 0) {
5721 if (fsig->params [0]->type == MONO_TYPE_I4) {
5722 opcode = OP_ATOMIC_ADD_I4;
5723 cfg->has_atomic_add_i4 = TRUE;
5725 #if SIZEOF_REGISTER == 8
5726 else if (fsig->params [0]->type == MONO_TYPE_I8)
5727 opcode = OP_ATOMIC_ADD_I8;
5730 if (!mono_arch_opcode_supported (opcode))
5732 MONO_INST_NEW (cfg, ins, opcode);
5733 ins->dreg = mono_alloc_ireg (cfg);
5734 ins->inst_basereg = args [0]->dreg;
5735 ins->inst_offset = 0;
5736 ins->sreg2 = args [1]->dreg;
5737 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
5738 MONO_ADD_INS (cfg->cbb, ins);
5742 if (strcmp (cmethod->name, "Exchange") == 0) {
5744 gboolean is_ref = fsig->params [0]->type == MONO_TYPE_OBJECT;
5746 if (fsig->params [0]->type == MONO_TYPE_I4) {
5747 opcode = OP_ATOMIC_EXCHANGE_I4;
5748 cfg->has_atomic_exchange_i4 = TRUE;
5750 #if SIZEOF_REGISTER == 8
5751 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I8) ||
5752 (fsig->params [0]->type == MONO_TYPE_I))
5753 opcode = OP_ATOMIC_EXCHANGE_I8;
5755 else if (is_ref || (fsig->params [0]->type == MONO_TYPE_I)) {
5756 opcode = OP_ATOMIC_EXCHANGE_I4;
5757 cfg->has_atomic_exchange_i4 = TRUE;
5763 if (!mono_arch_opcode_supported (opcode))
5766 MONO_INST_NEW (cfg, ins, opcode);
5767 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
5768 ins->inst_basereg = args [0]->dreg;
5769 ins->inst_offset = 0;
5770 ins->sreg2 = args [1]->dreg;
5771 MONO_ADD_INS (cfg->cbb, ins);
5773 switch (fsig->params [0]->type) {
5775 ins->type = STACK_I4;
5779 ins->type = STACK_I8;
5781 case MONO_TYPE_OBJECT:
5782 ins->type = STACK_OBJ;
5785 g_assert_not_reached ();
5788 if (cfg->gen_write_barriers && is_ref)
5789 emit_write_barrier (cfg, args [0], args [1]);
5792 if ((strcmp (cmethod->name, "CompareExchange") == 0)) {
5794 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
5795 if (fsig->params [1]->type == MONO_TYPE_I4)
5797 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I)
5798 size = sizeof (gpointer);
5799 else if (sizeof (gpointer) == 8 && fsig->params [1]->type == MONO_TYPE_I8)
5802 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
5804 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
5805 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5806 ins->sreg1 = args [0]->dreg;
5807 ins->sreg2 = args [1]->dreg;
5808 ins->sreg3 = args [2]->dreg;
5809 ins->type = STACK_I4;
5810 MONO_ADD_INS (cfg->cbb, ins);
5811 cfg->has_atomic_cas_i4 = TRUE;
5812 } else if (size == 8) {
5813 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I8))
5815 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I8);
5816 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
5817 ins->sreg1 = args [0]->dreg;
5818 ins->sreg2 = args [1]->dreg;
5819 ins->sreg3 = args [2]->dreg;
5820 ins->type = STACK_I8;
5821 MONO_ADD_INS (cfg->cbb, ins);
5823 /* g_assert_not_reached (); */
5825 if (cfg->gen_write_barriers && is_ref)
5826 emit_write_barrier (cfg, args [0], args [1]);
5829 if (strcmp (cmethod->name, "MemoryBarrier") == 0)
5830 ins = emit_memory_barrier (cfg, FullBarrier);
5834 } else if (cmethod->klass->image == mono_defaults.corlib) {
5835 if (cmethod->name [0] == 'B' && strcmp (cmethod->name, "Break") == 0
5836 && strcmp (cmethod->klass->name, "Debugger") == 0) {
5837 if (should_insert_brekpoint (cfg->method)) {
5838 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
5840 MONO_INST_NEW (cfg, ins, OP_NOP);
5841 MONO_ADD_INS (cfg->cbb, ins);
5845 if (cmethod->name [0] == 'g' && strcmp (cmethod->name, "get_IsRunningOnWindows") == 0
5846 && strcmp (cmethod->klass->name, "Environment") == 0) {
5848 EMIT_NEW_ICONST (cfg, ins, 1);
5850 EMIT_NEW_ICONST (cfg, ins, 0);
5854 } else if (cmethod->klass == mono_defaults.math_class) {
5856 * There is general branches code for Min/Max, but it does not work for
5858 * http://everything2.com/?node_id=1051618
5860 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") || !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) && !strcmp (cmethod->klass->name, "Selector") && !strcmp (cmethod->name, "GetHandle") && cfg->compile_aot && (args [0]->opcode == OP_GOT_ENTRY || args[0]->opcode == OP_AOTCONST)) {
5861 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
5863 MonoJumpInfoToken *ji;
5866 cfg->disable_llvm = TRUE;
5868 if (args [0]->opcode == OP_GOT_ENTRY) {
5869 pi = args [0]->inst_p1;
5870 g_assert (pi->opcode == OP_PATCH_INFO);
5871 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
5874 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
5875 ji = args [0]->inst_p0;
5878 NULLIFY_INS (args [0]);
5881 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
5882 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
5883 ins->dreg = mono_alloc_ireg (cfg);
5885 ins->inst_p0 = mono_string_to_utf8 (s);
5886 MONO_ADD_INS (cfg->cbb, ins);
5891 #ifdef MONO_ARCH_SIMD_INTRINSICS
5892 if (cfg->opt & MONO_OPT_SIMD) {
5893 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5899 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5903 if (COMPILE_LLVM (cfg)) {
5904 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
5909 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
5913 * This entry point could be used later for arbitrary method
5916 inline static MonoInst*
5917 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
5918 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
5920 if (method->klass == mono_defaults.string_class) {
5921 /* managed string allocation support */
5922 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
5923 MonoInst *iargs [2];
5924 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
5925 MonoMethod *managed_alloc = NULL;
5927 g_assert (vtable); /*Should not fail since it System.String*/
5928 #ifndef MONO_CROSS_COMPILE
5929 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE);
5933 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
5934 iargs [1] = args [0];
5935 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
5942 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
5944 MonoInst *store, *temp;
5947 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5948 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
5951 * FIXME: We should use *args++ = sp [0], but that would mean the arg
5952 * would be different than the MonoInst's used to represent arguments, and
5953 * the ldelema implementation can't deal with that.
5954 * Solution: When ldelema is used on an inline argument, create a var for
5955 * it, emit ldelema on that var, and emit the saving code below in
5956 * inline_method () if needed.
5958 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
5959 cfg->args [i] = temp;
5960 /* This uses cfg->args [i] which is set by the preceeding line */
5961 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
5962 store->cil_code = sp [0]->cil_code;
5967 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
5968 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
5970 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
5972 check_inline_called_method_name_limit (MonoMethod *called_method)
5975 static const char *limit = NULL;
5977 if (limit == NULL) {
5978 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
5980 if (limit_string != NULL)
5981 limit = limit_string;
5986 if (limit [0] != '\0') {
5987 char *called_method_name = mono_method_full_name (called_method, TRUE);
5989 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
5990 g_free (called_method_name);
5992 //return (strncmp_result <= 0);
5993 return (strncmp_result == 0);
6000 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6002 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6005 static const char *limit = NULL;
6007 if (limit == NULL) {
6008 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6009 if (limit_string != NULL) {
6010 limit = limit_string;
6016 if (limit [0] != '\0') {
6017 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6019 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6020 g_free (caller_method_name);
6022 //return (strncmp_result <= 0);
6023 return (strncmp_result == 0);
6031 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6033 static double r8_0 = 0.0;
6037 rtype = mini_replace_type (rtype);
6041 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6042 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6043 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6044 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6045 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6046 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6047 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6048 ins->type = STACK_R8;
6049 ins->inst_p0 = (void*)&r8_0;
6051 MONO_ADD_INS (cfg->cbb, ins);
6052 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6053 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6054 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6055 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6056 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6058 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6063 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6067 rtype = mini_replace_type (rtype);
6071 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6072 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6073 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6074 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6075 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6076 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6077 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6078 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6079 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6080 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6081 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6082 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6084 emit_init_rvar (cfg, dreg, rtype);
6088 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6090 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6092 MonoInst *var = cfg->locals [local];
6093 if (COMPILE_SOFT_FLOAT (cfg)) {
6095 int reg = alloc_dreg (cfg, var->type);
6096 emit_init_rvar (cfg, reg, type);
6097 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6100 emit_init_rvar (cfg, var->dreg, type);
6102 emit_dummy_init_rvar (cfg, var->dreg, type);
6109 * Return the cost of inlining CMETHOD.
6112 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6113 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6115 MonoInst *ins, *rvar = NULL;
6116 MonoMethodHeader *cheader;
6117 MonoBasicBlock *ebblock, *sbblock;
6119 MonoMethod *prev_inlined_method;
6120 MonoInst **prev_locals, **prev_args;
6121 MonoType **prev_arg_types;
6122 guint prev_real_offset;
6123 GHashTable *prev_cbb_hash;
6124 MonoBasicBlock **prev_cil_offset_to_bb;
6125 MonoBasicBlock *prev_cbb;
6126 unsigned char* prev_cil_start;
6127 guint32 prev_cil_offset_to_bb_len;
6128 MonoMethod *prev_current_method;
6129 MonoGenericContext *prev_generic_context;
6130 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6132 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6134 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6135 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6138 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6139 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6143 if (cfg->verbose_level > 2)
6144 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6146 if (!cmethod->inline_info) {
6147 cfg->stat_inlineable_methods++;
6148 cmethod->inline_info = 1;
6151 /* allocate local variables */
6152 cheader = mono_method_get_header (cmethod);
6154 if (cheader == NULL || mono_loader_get_last_error ()) {
6155 MonoLoaderError *error = mono_loader_get_last_error ();
6158 mono_metadata_free_mh (cheader);
6159 if (inline_always && error)
6160 mono_cfg_set_exception (cfg, error->exception_type);
6162 mono_loader_clear_error ();
6166 /*Must verify before creating locals as it can cause the JIT to assert.*/
6167 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6168 mono_metadata_free_mh (cheader);
6172 /* allocate space to store the return value */
6173 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6174 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6177 prev_locals = cfg->locals;
6178 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6179 for (i = 0; i < cheader->num_locals; ++i)
6180 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6182 /* allocate start and end blocks */
6183 /* This is needed so if the inline is aborted, we can clean up */
6184 NEW_BBLOCK (cfg, sbblock);
6185 sbblock->real_offset = real_offset;
6187 NEW_BBLOCK (cfg, ebblock);
6188 ebblock->block_num = cfg->num_bblocks++;
6189 ebblock->real_offset = real_offset;
6191 prev_args = cfg->args;
6192 prev_arg_types = cfg->arg_types;
6193 prev_inlined_method = cfg->inlined_method;
6194 cfg->inlined_method = cmethod;
6195 cfg->ret_var_set = FALSE;
6196 cfg->inline_depth ++;
6197 prev_real_offset = cfg->real_offset;
6198 prev_cbb_hash = cfg->cbb_hash;
6199 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6200 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6201 prev_cil_start = cfg->cil_start;
6202 prev_cbb = cfg->cbb;
6203 prev_current_method = cfg->current_method;
6204 prev_generic_context = cfg->generic_context;
6205 prev_ret_var_set = cfg->ret_var_set;
6206 prev_disable_inline = cfg->disable_inline;
6208 if (*ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6211 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6213 ret_var_set = cfg->ret_var_set;
6215 cfg->inlined_method = prev_inlined_method;
6216 cfg->real_offset = prev_real_offset;
6217 cfg->cbb_hash = prev_cbb_hash;
6218 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6219 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6220 cfg->cil_start = prev_cil_start;
6221 cfg->locals = prev_locals;
6222 cfg->args = prev_args;
6223 cfg->arg_types = prev_arg_types;
6224 cfg->current_method = prev_current_method;
6225 cfg->generic_context = prev_generic_context;
6226 cfg->ret_var_set = prev_ret_var_set;
6227 cfg->disable_inline = prev_disable_inline;
6228 cfg->inline_depth --;
6230 if ((costs >= 0 && costs < 60) || inline_always) {
6231 if (cfg->verbose_level > 2)
6232 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6234 cfg->stat_inlined_methods++;
6236 /* always add some code to avoid block split failures */
6237 MONO_INST_NEW (cfg, ins, OP_NOP);
6238 MONO_ADD_INS (prev_cbb, ins);
6240 prev_cbb->next_bb = sbblock;
6241 link_bblock (cfg, prev_cbb, sbblock);
6244 * Get rid of the begin and end bblocks if possible to aid local
6247 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6249 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6250 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6252 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6253 MonoBasicBlock *prev = ebblock->in_bb [0];
6254 mono_merge_basic_blocks (cfg, prev, ebblock);
6256 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6257 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6258 cfg->cbb = prev_cbb;
6262 * Its possible that the rvar is set in some prev bblock, but not in others.
6268 for (i = 0; i < ebblock->in_count; ++i) {
6269 bb = ebblock->in_bb [i];
6271 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6274 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6282 *out_cbb = cfg->cbb;
6286 * If the inlined method contains only a throw, then the ret var is not
6287 * set, so set it to a dummy value.
6290 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6292 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6295 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6298 if (cfg->verbose_level > 2)
6299 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6300 cfg->exception_type = MONO_EXCEPTION_NONE;
6301 mono_loader_clear_error ();
6303 /* This gets rid of the newly added bblocks */
6304 cfg->cbb = prev_cbb;
6306 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6311 * Some of these comments may well be out-of-date.
6312 * Design decisions: we do a single pass over the IL code (and we do bblock
6313 * splitting/merging in the few cases when it's required: a back jump to an IL
6314 * address that was not already seen as bblock starting point).
6315 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6316 * Complex operations are decomposed in simpler ones right away. We need to let the
6317 * arch-specific code peek and poke inside this process somehow (except when the
6318 * optimizations can take advantage of the full semantic info of coarse opcodes).
6319 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6320 * MonoInst->opcode initially is the IL opcode or some simplification of that
6321 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6322 * opcode with value bigger than OP_LAST.
6323 * At this point the IR can be handed over to an interpreter, a dumb code generator
6324 * or to the optimizing code generator that will translate it to SSA form.
6326 * Profiling directed optimizations.
6327 * We may compile by default with few or no optimizations and instrument the code
6328 * or the user may indicate what methods to optimize the most either in a config file
6329 * or through repeated runs where the compiler applies offline the optimizations to
6330 * each method and then decides if it was worth it.
6333 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
6334 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
6335 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
6336 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
6337 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
6338 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
6339 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
6340 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
6342 /* offset from br.s -> br like opcodes */
6343 #define BIG_BRANCH_OFFSET 13
6346 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
6348 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
6350 return b == NULL || b == bb;
6354 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
6356 unsigned char *ip = start;
6357 unsigned char *target;
6360 MonoBasicBlock *bblock;
6361 const MonoOpcode *opcode;
6364 cli_addr = ip - start;
6365 i = mono_opcode_value ((const guint8 **)&ip, end);
6368 opcode = &mono_opcodes [i];
6369 switch (opcode->argument) {
6370 case MonoInlineNone:
6373 case MonoInlineString:
6374 case MonoInlineType:
6375 case MonoInlineField:
6376 case MonoInlineMethod:
6379 case MonoShortInlineR:
6386 case MonoShortInlineVar:
6387 case MonoShortInlineI:
6390 case MonoShortInlineBrTarget:
6391 target = start + cli_addr + 2 + (signed char)ip [1];
6392 GET_BBLOCK (cfg, bblock, target);
6395 GET_BBLOCK (cfg, bblock, ip);
6397 case MonoInlineBrTarget:
6398 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
6399 GET_BBLOCK (cfg, bblock, target);
6402 GET_BBLOCK (cfg, bblock, ip);
6404 case MonoInlineSwitch: {
6405 guint32 n = read32 (ip + 1);
6408 cli_addr += 5 + 4 * n;
6409 target = start + cli_addr;
6410 GET_BBLOCK (cfg, bblock, target);
6412 for (j = 0; j < n; ++j) {
6413 target = start + cli_addr + (gint32)read32 (ip);
6414 GET_BBLOCK (cfg, bblock, target);
6424 g_assert_not_reached ();
6427 if (i == CEE_THROW) {
6428 unsigned char *bb_start = ip - 1;
6430 /* Find the start of the bblock containing the throw */
6432 while ((bb_start >= start) && !bblock) {
6433 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
6437 bblock->out_of_line = 1;
6447 static inline MonoMethod *
6448 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6452 if (m->wrapper_type != MONO_WRAPPER_NONE) {
6453 method = mono_method_get_wrapper_data (m, token);
6455 method = mono_class_inflate_generic_method (method, context);
6457 method = mono_get_method_full (m->klass->image, token, klass, context);
6463 static inline MonoMethod *
6464 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
6466 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
6468 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
6474 static inline MonoClass*
6475 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
6480 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6481 klass = mono_method_get_wrapper_data (method, token);
6483 klass = mono_class_inflate_generic_class (klass, context);
6485 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
6486 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6489 mono_class_init (klass);
6493 static inline MonoMethodSignature*
6494 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
6496 MonoMethodSignature *fsig;
6498 if (method->wrapper_type != MONO_WRAPPER_NONE) {
6501 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
6503 fsig = mono_inflate_generic_signature (fsig, context, &error);
6505 g_assert (mono_error_ok (&error));
6508 fsig = mono_metadata_parse_signature (method->klass->image, token);
6514 * Returns TRUE if the JIT should abort inlining because "callee"
6515 * is influenced by security attributes.
6518 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
6522 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
6526 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
6527 if (result == MONO_JIT_SECURITY_OK)
6530 if (result == MONO_JIT_LINKDEMAND_ECMA) {
6531 /* Generate code to throw a SecurityException before the actual call/link */
6532 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6535 NEW_ICONST (cfg, args [0], 4);
6536 NEW_METHODCONST (cfg, args [1], caller);
6537 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
6538 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
6539 /* don't hide previous results */
6540 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
6541 cfg->exception_data = result;
6549 throw_exception (void)
6551 static MonoMethod *method = NULL;
6554 MonoSecurityManager *secman = mono_security_manager_get_methods ();
6555 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
6562 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
6564 MonoMethod *thrower = throw_exception ();
6567 EMIT_NEW_PCONST (cfg, args [0], ex);
6568 mono_emit_method_call (cfg, thrower, args, NULL);
6572 * Return the original method is a wrapper is specified. We can only access
6573 * the custom attributes from the original method.
6576 get_original_method (MonoMethod *method)
6578 if (method->wrapper_type == MONO_WRAPPER_NONE)
6581 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
6582 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
6585 /* in other cases we need to find the original method */
6586 return mono_marshal_method_from_wrapper (method);
6590 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
6591 MonoBasicBlock *bblock, unsigned char *ip)
6593 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6594 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
6596 emit_throw_exception (cfg, ex);
6600 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
6601 MonoBasicBlock *bblock, unsigned char *ip)
6603 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
6604 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
6606 emit_throw_exception (cfg, ex);
6610 * Check that the IL instructions at ip are the array initialization
6611 * sequence and return the pointer to the data and the size.
6614 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
6617 * newarr[System.Int32]
6619 * ldtoken field valuetype ...
6620 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
6622 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
6624 guint32 token = read32 (ip + 7);
6625 guint32 field_token = read32 (ip + 2);
6626 guint32 field_index = field_token & 0xffffff;
6628 const char *data_ptr;
6630 MonoMethod *cmethod;
6631 MonoClass *dummy_class;
6632 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
6636 mono_error_cleanup (&error); /* FIXME don't swallow the error */
6640 *out_field_token = field_token;
6642 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
6645 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
6647 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
6648 case MONO_TYPE_BOOLEAN:
6652 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
6653 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
6654 case MONO_TYPE_CHAR:
6671 if (size > mono_type_size (field->type, &dummy_align))
6674 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
6675 if (!image_is_dynamic (method->klass->image)) {
6676 field_index = read32 (ip + 2) & 0xffffff;
6677 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
6678 data_ptr = mono_image_rva_map (method->klass->image, rva);
6679 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
6680 /* for aot code we do the lookup on load */
6681 if (aot && data_ptr)
6682 return GUINT_TO_POINTER (rva);
6684 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
6686 data_ptr = mono_field_get_data (field);
6694 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
6696 char *method_fname = mono_method_full_name (method, TRUE);
6698 MonoMethodHeader *header = mono_method_get_header (method);
6700 if (header->code_size == 0)
6701 method_code = g_strdup ("method body is empty.");
6703 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
6704 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
6705 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
6706 g_free (method_fname);
6707 g_free (method_code);
6708 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
6712 set_exception_object (MonoCompile *cfg, MonoException *exception)
6714 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
6715 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
6716 cfg->exception_ptr = exception;
6720 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
6723 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
6724 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
6725 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
6726 /* Optimize reg-reg moves away */
6728 * Can't optimize other opcodes, since sp[0] might point to
6729 * the last ins of a decomposed opcode.
6731 sp [0]->dreg = (cfg)->locals [n]->dreg;
6733 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
6738 * ldloca inhibits many optimizations so try to get rid of it in common
6741 static inline unsigned char *
6742 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
6752 local = read16 (ip + 2);
6756 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
6757 /* From the INITOBJ case */
6758 token = read32 (ip + 2);
6759 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
6760 CHECK_TYPELOAD (klass);
6761 type = mini_replace_type (&klass->byval_arg);
6762 emit_init_local (cfg, local, type, TRUE);
6770 is_exception_class (MonoClass *class)
6773 if (class == mono_defaults.exception_class)
6775 class = class->parent;
6781 * is_jit_optimizer_disabled:
6783 * Determine whenever M's assembly has a DebuggableAttribute with the
6784 * IsJITOptimizerDisabled flag set.
6787 is_jit_optimizer_disabled (MonoMethod *m)
6789 MonoAssembly *ass = m->klass->image->assembly;
6790 MonoCustomAttrInfo* attrs;
6791 static MonoClass *klass;
6793 gboolean val = FALSE;
6796 if (ass->jit_optimizer_disabled_inited)
6797 return ass->jit_optimizer_disabled;
6800 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
6803 ass->jit_optimizer_disabled = FALSE;
6804 mono_memory_barrier ();
6805 ass->jit_optimizer_disabled_inited = TRUE;
6809 attrs = mono_custom_attrs_from_assembly (ass);
6811 for (i = 0; i < attrs->num_attrs; ++i) {
6812 MonoCustomAttrEntry *attr = &attrs->attrs [i];
6815 MonoMethodSignature *sig;
6817 if (!attr->ctor || attr->ctor->klass != klass)
6819 /* Decode the attribute. See reflection.c */
6820 len = attr->data_size;
6821 p = (const char*)attr->data;
6822 g_assert (read16 (p) == 0x0001);
6825 // FIXME: Support named parameters
6826 sig = mono_method_signature (attr->ctor);
6827 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
6829 /* Two boolean arguments */
6833 mono_custom_attrs_free (attrs);
6836 ass->jit_optimizer_disabled = val;
6837 mono_memory_barrier ();
6838 ass->jit_optimizer_disabled_inited = TRUE;
6844 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
6846 gboolean supported_tail_call;
6849 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
6850 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
6852 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
6855 for (i = 0; i < fsig->param_count; ++i) {
6856 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
6857 /* These can point to the current method's stack */
6858 supported_tail_call = FALSE;
6860 if (fsig->hasthis && cmethod->klass->valuetype)
6861 /* this might point to the current method's stack */
6862 supported_tail_call = FALSE;
6863 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
6864 supported_tail_call = FALSE;
6865 if (cfg->method->save_lmf)
6866 supported_tail_call = FALSE;
6867 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
6868 supported_tail_call = FALSE;
6869 if (call_opcode != CEE_CALL)
6870 supported_tail_call = FALSE;
6872 /* Debugging support */
6874 if (supported_tail_call) {
6875 if (!mono_debug_count ())
6876 supported_tail_call = FALSE;
6880 return supported_tail_call;
6883 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
6884 * it to the thread local value based on the tls_offset field. Every other kind of access to
6885 * the field causes an assert.
6888 is_magic_tls_access (MonoClassField *field)
6890 if (strcmp (field->name, "tlsdata"))
6892 if (strcmp (field->parent->name, "ThreadLocal`1"))
6894 return field->parent->image == mono_defaults.corlib;
6897 /* emits the code needed to access a managed tls var (like ThreadStatic)
6898 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
6899 * pointer for the current thread.
6900 * Returns the MonoInst* representing the address of the tls var.
6903 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
6906 int static_data_reg, array_reg, dreg;
6907 int offset2_reg, idx_reg;
6908 // inlined access to the tls data
6909 // idx = (offset >> 24) - 1;
6910 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
6911 static_data_reg = alloc_ireg (cfg);
6912 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
6913 idx_reg = alloc_ireg (cfg);
6914 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
6915 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
6916 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
6917 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
6918 array_reg = alloc_ireg (cfg);
6919 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
6920 offset2_reg = alloc_ireg (cfg);
6921 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
6922 dreg = alloc_ireg (cfg);
6923 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
6928 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
6929 * this address is cached per-method in cached_tls_addr.
6932 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
6934 MonoInst *load, *addr, *temp, *store, *thread_ins;
6935 MonoClassField *offset_field;
6937 if (*cached_tls_addr) {
6938 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
6941 thread_ins = mono_get_thread_intrinsic (cfg);
6942 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
6944 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
6946 MONO_ADD_INS (cfg->cbb, thread_ins);
6948 MonoMethod *thread_method;
6949 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
6950 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
6952 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
6953 addr->klass = mono_class_from_mono_type (tls_field->type);
6954 addr->type = STACK_MP;
6955 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
6956 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
6958 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
6965 * Handle calls made to ctors from NEWOBJ opcodes.
6967 * REF_BBLOCK will point to the current bblock after the call.
6970 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
6971 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
6973 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
6974 MonoBasicBlock *bblock = *ref_bblock;
6976 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
6977 mono_method_is_generic_sharable (cmethod, TRUE)) {
6978 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
6979 mono_class_vtable (cfg->domain, cmethod->klass);
6980 CHECK_TYPELOAD (cmethod->klass);
6982 vtable_arg = emit_get_rgctx_method (cfg, context_used,
6983 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
6986 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
6987 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
6989 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
6991 CHECK_TYPELOAD (cmethod->klass);
6992 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
6997 /* Avoid virtual calls to ctors if possible */
6998 if (mono_class_is_marshalbyref (cmethod->klass))
6999 callvirt_this_arg = sp [0];
7001 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7002 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7003 CHECK_CFG_EXCEPTION;
7004 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7005 mono_method_check_inlining (cfg, cmethod) &&
7006 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7009 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7010 cfg->real_offset += 5;
7012 *inline_costs += costs - 5;
7013 *ref_bblock = bblock;
7015 INLINE_FAILURE ("inline failure");
7016 // FIXME-VT: Clean this up
7017 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7018 GSHAREDVT_FAILURE(*ip);
7019 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7021 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7024 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7025 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7026 } else if (context_used &&
7027 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7028 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7029 MonoInst *cmethod_addr;
7031 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7033 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7034 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7036 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7038 INLINE_FAILURE ("ctor call");
7039 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7040 callvirt_this_arg, NULL, vtable_arg);
7047 * mono_method_to_ir:
7049 * Translate the .net IL into linear IR.
7052 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7053 MonoInst *return_var, MonoInst **inline_args,
7054 guint inline_offset, gboolean is_virtual_call)
7057 MonoInst *ins, **sp, **stack_start;
7058 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7059 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7060 MonoMethod *cmethod, *method_definition;
7061 MonoInst **arg_array;
7062 MonoMethodHeader *header;
7064 guint32 token, ins_flag;
7066 MonoClass *constrained_call = NULL;
7067 unsigned char *ip, *end, *target, *err_pos;
7068 MonoMethodSignature *sig;
7069 MonoGenericContext *generic_context = NULL;
7070 MonoGenericContainer *generic_container = NULL;
7071 MonoType **param_types;
7072 int i, n, start_new_bblock, dreg;
7073 int num_calls = 0, inline_costs = 0;
7074 int breakpoint_id = 0;
7076 MonoBoolean security, pinvoke;
7077 MonoSecurityManager* secman = NULL;
7078 MonoDeclSecurityActions actions;
7079 GSList *class_inits = NULL;
7080 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7082 gboolean init_locals, seq_points, skip_dead_blocks;
7083 gboolean sym_seq_points = FALSE;
7084 MonoInst *cached_tls_addr = NULL;
7085 MonoDebugMethodInfo *minfo;
7086 MonoBitSet *seq_point_locs = NULL;
7087 MonoBitSet *seq_point_set_locs = NULL;
7089 cfg->disable_inline = is_jit_optimizer_disabled (method);
7091 /* serialization and xdomain stuff may need access to private fields and methods */
7092 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7093 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7094 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7095 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7096 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7097 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7099 dont_verify |= mono_security_smcs_hack_enabled ();
7101 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7102 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7103 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7104 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7105 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7107 image = method->klass->image;
7108 header = mono_method_get_header (method);
7110 MonoLoaderError *error;
7112 if ((error = mono_loader_get_last_error ())) {
7113 mono_cfg_set_exception (cfg, error->exception_type);
7115 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7116 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7118 goto exception_exit;
7120 generic_container = mono_method_get_generic_container (method);
7121 sig = mono_method_signature (method);
7122 num_args = sig->hasthis + sig->param_count;
7123 ip = (unsigned char*)header->code;
7124 cfg->cil_start = ip;
7125 end = ip + header->code_size;
7126 cfg->stat_cil_code_size += header->code_size;
7128 seq_points = cfg->gen_seq_points && cfg->method == method;
7129 #ifdef PLATFORM_ANDROID
7130 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7133 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7134 /* We could hit a seq point before attaching to the JIT (#8338) */
7138 if (cfg->gen_seq_points && cfg->method == method) {
7139 minfo = mono_debug_lookup_method (method);
7141 int i, n_il_offsets;
7145 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7146 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7147 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7148 sym_seq_points = TRUE;
7149 for (i = 0; i < n_il_offsets; ++i) {
7150 if (il_offsets [i] < header->code_size)
7151 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7153 g_free (il_offsets);
7154 g_free (line_numbers);
7159 * Methods without init_locals set could cause asserts in various passes
7160 * (#497220). To work around this, we emit dummy initialization opcodes
7161 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7162 * on some platforms.
7164 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7165 init_locals = header->init_locals;
7169 method_definition = method;
7170 while (method_definition->is_inflated) {
7171 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7172 method_definition = imethod->declaring;
7175 /* SkipVerification is not allowed if core-clr is enabled */
7176 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7178 dont_verify_stloc = TRUE;
7181 if (sig->is_inflated)
7182 generic_context = mono_method_get_context (method);
7183 else if (generic_container)
7184 generic_context = &generic_container->context;
7185 cfg->generic_context = generic_context;
7187 if (!cfg->generic_sharing_context)
7188 g_assert (!sig->has_type_parameters);
7190 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7191 g_assert (method->is_inflated);
7192 g_assert (mono_method_get_context (method)->method_inst);
7194 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7195 g_assert (sig->generic_param_count);
7197 if (cfg->method == method) {
7198 cfg->real_offset = 0;
7200 cfg->real_offset = inline_offset;
7203 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7204 cfg->cil_offset_to_bb_len = header->code_size;
7206 cfg->current_method = method;
7208 if (cfg->verbose_level > 2)
7209 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7211 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7213 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7214 for (n = 0; n < sig->param_count; ++n)
7215 param_types [n + sig->hasthis] = sig->params [n];
7216 cfg->arg_types = param_types;
7218 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7219 if (cfg->method == method) {
7221 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7222 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7225 NEW_BBLOCK (cfg, start_bblock);
7226 cfg->bb_entry = start_bblock;
7227 start_bblock->cil_code = NULL;
7228 start_bblock->cil_length = 0;
7229 #if defined(__native_client_codegen__)
7230 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7231 ins->dreg = alloc_dreg (cfg, STACK_I4);
7232 MONO_ADD_INS (start_bblock, ins);
7236 NEW_BBLOCK (cfg, end_bblock);
7237 cfg->bb_exit = end_bblock;
7238 end_bblock->cil_code = NULL;
7239 end_bblock->cil_length = 0;
7240 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7241 g_assert (cfg->num_bblocks == 2);
7243 arg_array = cfg->args;
7245 if (header->num_clauses) {
7246 cfg->spvars = g_hash_table_new (NULL, NULL);
7247 cfg->exvars = g_hash_table_new (NULL, NULL);
7249 /* handle exception clauses */
7250 for (i = 0; i < header->num_clauses; ++i) {
7251 MonoBasicBlock *try_bb;
7252 MonoExceptionClause *clause = &header->clauses [i];
7253 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7254 try_bb->real_offset = clause->try_offset;
7255 try_bb->try_start = TRUE;
7256 try_bb->region = ((i + 1) << 8) | clause->flags;
7257 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7258 tblock->real_offset = clause->handler_offset;
7259 tblock->flags |= BB_EXCEPTION_HANDLER;
7262 * Linking the try block with the EH block hinders inlining as we won't be able to
7263 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7265 if (COMPILE_LLVM (cfg))
7266 link_bblock (cfg, try_bb, tblock);
7268 if (*(ip + clause->handler_offset) == CEE_POP)
7269 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7271 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7272 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7273 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7274 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7275 MONO_ADD_INS (tblock, ins);
7277 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7278 /* finally clauses already have a seq point */
7279 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7280 MONO_ADD_INS (tblock, ins);
7283 /* todo: is a fault block unsafe to optimize? */
7284 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7285 tblock->flags |= BB_EXCEPTION_UNSAFE;
7289 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7291 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7293 /* catch and filter blocks get the exception object on the stack */
7294 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7295 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7296 MonoInst *dummy_use;
7298 /* mostly like handle_stack_args (), but just sets the input args */
7299 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7300 tblock->in_scount = 1;
7301 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7302 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7305 * Add a dummy use for the exvar so its liveness info will be
7309 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7311 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7312 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7313 tblock->flags |= BB_EXCEPTION_HANDLER;
7314 tblock->real_offset = clause->data.filter_offset;
7315 tblock->in_scount = 1;
7316 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7317 /* The filter block shares the exvar with the handler block */
7318 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7319 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7320 MONO_ADD_INS (tblock, ins);
7324 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
7325 clause->data.catch_class &&
7326 cfg->generic_sharing_context &&
7327 mono_class_check_context_used (clause->data.catch_class)) {
7329 * In shared generic code with catch
7330 * clauses containing type variables
7331 * the exception handling code has to
7332 * be able to get to the rgctx.
7333 * Therefore we have to make sure that
7334 * the vtable/mrgctx argument (for
7335 * static or generic methods) or the
7336 * "this" argument (for non-static
7337 * methods) are live.
7339 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7340 mini_method_get_context (method)->method_inst ||
7341 method->klass->valuetype) {
7342 mono_get_vtable_var (cfg);
7344 MonoInst *dummy_use;
7346 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
7351 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
7352 cfg->cbb = start_bblock;
7353 cfg->args = arg_array;
7354 mono_save_args (cfg, sig, inline_args);
7357 /* FIRST CODE BLOCK */
7358 NEW_BBLOCK (cfg, bblock);
7359 bblock->cil_code = ip;
7363 ADD_BBLOCK (cfg, bblock);
7365 if (cfg->method == method) {
7366 breakpoint_id = mono_debugger_method_has_breakpoint (method);
7367 if (breakpoint_id) {
7368 MONO_INST_NEW (cfg, ins, OP_BREAK);
7369 MONO_ADD_INS (bblock, ins);
7373 if (mono_security_cas_enabled ())
7374 secman = mono_security_manager_get_methods ();
7376 security = (secman && mono_security_method_has_declsec (method));
7377 /* at this point having security doesn't mean we have any code to generate */
7378 if (security && (cfg->method == method)) {
7379 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
7380 * And we do not want to enter the next section (with allocation) if we
7381 * have nothing to generate */
7382 security = mono_declsec_get_demands (method, &actions);
7385 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
7386 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
7388 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7389 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
7390 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
7392 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
7393 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7397 mono_custom_attrs_free (custom);
7400 custom = mono_custom_attrs_from_class (wrapped->klass);
7401 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
7405 mono_custom_attrs_free (custom);
7408 /* not a P/Invoke after all */
7413 /* we use a separate basic block for the initialization code */
7414 NEW_BBLOCK (cfg, init_localsbb);
7415 cfg->bb_init = init_localsbb;
7416 init_localsbb->real_offset = cfg->real_offset;
7417 start_bblock->next_bb = init_localsbb;
7418 init_localsbb->next_bb = bblock;
7419 link_bblock (cfg, start_bblock, init_localsbb);
7420 link_bblock (cfg, init_localsbb, bblock);
7422 cfg->cbb = init_localsbb;
7424 if (cfg->gsharedvt && cfg->method == method) {
7425 MonoGSharedVtMethodInfo *info;
7426 MonoInst *var, *locals_var;
7429 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
7430 info->method = cfg->method;
7431 info->count_entries = 16;
7432 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
7433 cfg->gsharedvt_info = info;
7435 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7436 /* prevent it from being register allocated */
7437 //var->flags |= MONO_INST_VOLATILE;
7438 cfg->gsharedvt_info_var = var;
7440 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
7441 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
7443 /* Allocate locals */
7444 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
7445 /* prevent it from being register allocated */
7446 //locals_var->flags |= MONO_INST_VOLATILE;
7447 cfg->gsharedvt_locals_var = locals_var;
7449 dreg = alloc_ireg (cfg);
7450 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
7452 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
7453 ins->dreg = locals_var->dreg;
7455 MONO_ADD_INS (cfg->cbb, ins);
7456 cfg->gsharedvt_locals_var_ins = ins;
7458 cfg->flags |= MONO_CFG_HAS_ALLOCA;
7461 ins->flags |= MONO_INST_INIT;
7465 /* at this point we know, if security is TRUE, that some code needs to be generated */
7466 if (security && (cfg->method == method)) {
7469 cfg->stat_cas_demand_generation++;
7471 if (actions.demand.blob) {
7472 /* Add code for SecurityAction.Demand */
7473 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
7474 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
7475 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7476 mono_emit_method_call (cfg, secman->demand, args, NULL);
7478 if (actions.noncasdemand.blob) {
7479 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
7480 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
7481 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
7482 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
7483 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
7484 mono_emit_method_call (cfg, secman->demand, args, NULL);
7486 if (actions.demandchoice.blob) {
7487 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
7488 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
7489 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
7490 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
7491 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
7495 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
7497 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
7500 if (mono_security_core_clr_enabled ()) {
7501 /* check if this is native code, e.g. an icall or a p/invoke */
7502 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
7503 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
7505 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
7506 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
7508 /* if this ia a native call then it can only be JITted from platform code */
7509 if ((icall || pinvk) && method->klass && method->klass->image) {
7510 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
7511 MonoException *ex = icall ? mono_get_exception_security () :
7512 mono_get_exception_method_access ();
7513 emit_throw_exception (cfg, ex);
7520 CHECK_CFG_EXCEPTION;
7522 if (header->code_size == 0)
7525 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
7530 if (cfg->method == method)
7531 mono_debug_init_method (cfg, bblock, breakpoint_id);
7533 for (n = 0; n < header->num_locals; ++n) {
7534 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
7539 /* We force the vtable variable here for all shared methods
7540 for the possibility that they might show up in a stack
7541 trace where their exact instantiation is needed. */
7542 if (cfg->generic_sharing_context && method == cfg->method) {
7543 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
7544 mini_method_get_context (method)->method_inst ||
7545 method->klass->valuetype) {
7546 mono_get_vtable_var (cfg);
7548 /* FIXME: Is there a better way to do this?
7549 We need the variable live for the duration
7550 of the whole method. */
7551 cfg->args [0]->flags |= MONO_INST_VOLATILE;
7555 /* add a check for this != NULL to inlined methods */
7556 if (is_virtual_call) {
7559 NEW_ARGLOAD (cfg, arg_ins, 0);
7560 MONO_ADD_INS (cfg->cbb, arg_ins);
7561 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
7564 skip_dead_blocks = !dont_verify;
7565 if (skip_dead_blocks) {
7566 original_bb = bb = mono_basic_block_split (method, &cfg->error);
7571 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
7572 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
7575 start_new_bblock = 0;
7578 if (cfg->method == method)
7579 cfg->real_offset = ip - header->code;
7581 cfg->real_offset = inline_offset;
7586 if (start_new_bblock) {
7587 bblock->cil_length = ip - bblock->cil_code;
7588 if (start_new_bblock == 2) {
7589 g_assert (ip == tblock->cil_code);
7591 GET_BBLOCK (cfg, tblock, ip);
7593 bblock->next_bb = tblock;
7596 start_new_bblock = 0;
7597 for (i = 0; i < bblock->in_scount; ++i) {
7598 if (cfg->verbose_level > 3)
7599 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7600 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7604 g_slist_free (class_inits);
7607 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
7608 link_bblock (cfg, bblock, tblock);
7609 if (sp != stack_start) {
7610 handle_stack_args (cfg, stack_start, sp - stack_start);
7612 CHECK_UNVERIFIABLE (cfg);
7614 bblock->next_bb = tblock;
7617 for (i = 0; i < bblock->in_scount; ++i) {
7618 if (cfg->verbose_level > 3)
7619 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
7620 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
7623 g_slist_free (class_inits);
7628 if (skip_dead_blocks) {
7629 int ip_offset = ip - header->code;
7631 if (ip_offset == bb->end)
7635 int op_size = mono_opcode_size (ip, end);
7636 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
7638 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
7640 if (ip_offset + op_size == bb->end) {
7641 MONO_INST_NEW (cfg, ins, OP_NOP);
7642 MONO_ADD_INS (bblock, ins);
7643 start_new_bblock = 1;
7651 * Sequence points are points where the debugger can place a breakpoint.
7652 * Currently, we generate these automatically at points where the IL
7655 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
7657 * Make methods interruptable at the beginning, and at the targets of
7658 * backward branches.
7659 * Also, do this at the start of every bblock in methods with clauses too,
7660 * to be able to handle instructions with inprecise control flow like
7662 * Backward branches are handled at the end of method-to-ir ().
7664 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
7666 /* Avoid sequence points on empty IL like .volatile */
7667 // FIXME: Enable this
7668 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
7669 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
7670 if (sp != stack_start)
7671 ins->flags |= MONO_INST_NONEMPTY_STACK;
7672 MONO_ADD_INS (cfg->cbb, ins);
7675 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
7678 bblock->real_offset = cfg->real_offset;
7680 if ((cfg->method == method) && cfg->coverage_info) {
7681 guint32 cil_offset = ip - header->code;
7682 cfg->coverage_info->data [cil_offset].cil_code = ip;
7684 /* TODO: Use an increment here */
7685 #if defined(TARGET_X86)
7686 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
7687 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
7689 MONO_ADD_INS (cfg->cbb, ins);
7691 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
7692 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
7696 if (cfg->verbose_level > 3)
7697 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
7701 if (seq_points && !sym_seq_points && sp != stack_start) {
7703 * The C# compiler uses these nops to notify the JIT that it should
7704 * insert seq points.
7706 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
7707 MONO_ADD_INS (cfg->cbb, ins);
7709 if (cfg->keep_cil_nops)
7710 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
7712 MONO_INST_NEW (cfg, ins, OP_NOP);
7714 MONO_ADD_INS (bblock, ins);
7717 if (should_insert_brekpoint (cfg->method)) {
7718 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
7720 MONO_INST_NEW (cfg, ins, OP_NOP);
7723 MONO_ADD_INS (bblock, ins);
7729 CHECK_STACK_OVF (1);
7730 n = (*ip)-CEE_LDARG_0;
7732 EMIT_NEW_ARGLOAD (cfg, ins, n);
7740 CHECK_STACK_OVF (1);
7741 n = (*ip)-CEE_LDLOC_0;
7743 EMIT_NEW_LOCLOAD (cfg, ins, n);
7752 n = (*ip)-CEE_STLOC_0;
7755 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
7757 emit_stloc_ir (cfg, sp, header, n);
7764 CHECK_STACK_OVF (1);
7767 EMIT_NEW_ARGLOAD (cfg, ins, n);
7773 CHECK_STACK_OVF (1);
7776 NEW_ARGLOADA (cfg, ins, n);
7777 MONO_ADD_INS (cfg->cbb, ins);
7787 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
7789 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
7794 CHECK_STACK_OVF (1);
7797 EMIT_NEW_LOCLOAD (cfg, ins, n);
7801 case CEE_LDLOCA_S: {
7802 unsigned char *tmp_ip;
7804 CHECK_STACK_OVF (1);
7805 CHECK_LOCAL (ip [1]);
7807 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
7813 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
7822 CHECK_LOCAL (ip [1]);
7823 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
7825 emit_stloc_ir (cfg, sp, header, ip [1]);
7830 CHECK_STACK_OVF (1);
7831 EMIT_NEW_PCONST (cfg, ins, NULL);
7832 ins->type = STACK_OBJ;
7837 CHECK_STACK_OVF (1);
7838 EMIT_NEW_ICONST (cfg, ins, -1);
7851 CHECK_STACK_OVF (1);
7852 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
7858 CHECK_STACK_OVF (1);
7860 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
7866 CHECK_STACK_OVF (1);
7867 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
7873 CHECK_STACK_OVF (1);
7874 MONO_INST_NEW (cfg, ins, OP_I8CONST);
7875 ins->type = STACK_I8;
7876 ins->dreg = alloc_dreg (cfg, STACK_I8);
7878 ins->inst_l = (gint64)read64 (ip);
7879 MONO_ADD_INS (bblock, ins);
7885 gboolean use_aotconst = FALSE;
7887 #ifdef TARGET_POWERPC
7888 /* FIXME: Clean this up */
7889 if (cfg->compile_aot)
7890 use_aotconst = TRUE;
7893 /* FIXME: we should really allocate this only late in the compilation process */
7894 f = mono_domain_alloc (cfg->domain, sizeof (float));
7896 CHECK_STACK_OVF (1);
7902 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
7904 dreg = alloc_freg (cfg);
7905 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
7906 ins->type = STACK_R8;
7908 MONO_INST_NEW (cfg, ins, OP_R4CONST);
7909 ins->type = STACK_R8;
7910 ins->dreg = alloc_dreg (cfg, STACK_R8);
7912 MONO_ADD_INS (bblock, ins);
7922 gboolean use_aotconst = FALSE;
7924 #ifdef TARGET_POWERPC
7925 /* FIXME: Clean this up */
7926 if (cfg->compile_aot)
7927 use_aotconst = TRUE;
7930 /* FIXME: we should really allocate this only late in the compilation process */
7931 d = mono_domain_alloc (cfg->domain, sizeof (double));
7933 CHECK_STACK_OVF (1);
7939 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
7941 dreg = alloc_freg (cfg);
7942 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
7943 ins->type = STACK_R8;
7945 MONO_INST_NEW (cfg, ins, OP_R8CONST);
7946 ins->type = STACK_R8;
7947 ins->dreg = alloc_dreg (cfg, STACK_R8);
7949 MONO_ADD_INS (bblock, ins);
7958 MonoInst *temp, *store;
7960 CHECK_STACK_OVF (1);
7964 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
7965 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
7967 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7970 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
7983 if (sp [0]->type == STACK_R8)
7984 /* we need to pop the value from the x86 FP stack */
7985 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
7991 INLINE_FAILURE ("jmp");
7992 GSHAREDVT_FAILURE (*ip);
7995 if (stack_start != sp)
7997 token = read32 (ip + 1);
7998 /* FIXME: check the signature matches */
7999 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8001 if (!cmethod || mono_loader_get_last_error ())
8004 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8005 GENERIC_SHARING_FAILURE (CEE_JMP);
8007 if (mono_security_cas_enabled ())
8008 CHECK_CFG_EXCEPTION;
8010 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8012 if (ARCH_HAVE_OP_TAIL_CALL) {
8013 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8016 /* Handle tail calls similarly to calls */
8017 n = fsig->param_count + fsig->hasthis;
8021 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8022 call->method = cmethod;
8023 call->tail_call = TRUE;
8024 call->signature = mono_method_signature (cmethod);
8025 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8026 call->inst.inst_p0 = cmethod;
8027 for (i = 0; i < n; ++i)
8028 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8030 mono_arch_emit_call (cfg, call);
8031 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8032 MONO_ADD_INS (bblock, (MonoInst*)call);
8034 for (i = 0; i < num_args; ++i)
8035 /* Prevent arguments from being optimized away */
8036 arg_array [i]->flags |= MONO_INST_VOLATILE;
8038 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8039 ins = (MonoInst*)call;
8040 ins->inst_p0 = cmethod;
8041 MONO_ADD_INS (bblock, ins);
8045 start_new_bblock = 1;
8050 case CEE_CALLVIRT: {
8051 MonoInst *addr = NULL;
8052 MonoMethodSignature *fsig = NULL;
8054 int virtual = *ip == CEE_CALLVIRT;
8055 int calli = *ip == CEE_CALLI;
8056 gboolean pass_imt_from_rgctx = FALSE;
8057 MonoInst *imt_arg = NULL;
8058 MonoInst *keep_this_alive = NULL;
8059 gboolean pass_vtable = FALSE;
8060 gboolean pass_mrgctx = FALSE;
8061 MonoInst *vtable_arg = NULL;
8062 gboolean check_this = FALSE;
8063 gboolean supported_tail_call = FALSE;
8064 gboolean tail_call = FALSE;
8065 gboolean need_seq_point = FALSE;
8066 guint32 call_opcode = *ip;
8067 gboolean emit_widen = TRUE;
8068 gboolean push_res = TRUE;
8069 gboolean skip_ret = FALSE;
8070 gboolean delegate_invoke = FALSE;
8073 token = read32 (ip + 1);
8078 //GSHAREDVT_FAILURE (*ip);
8083 fsig = mini_get_signature (method, token, generic_context);
8084 n = fsig->param_count + fsig->hasthis;
8086 if (method->dynamic && fsig->pinvoke) {
8090 * This is a call through a function pointer using a pinvoke
8091 * signature. Have to create a wrapper and call that instead.
8092 * FIXME: This is very slow, need to create a wrapper at JIT time
8093 * instead based on the signature.
8095 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8096 EMIT_NEW_PCONST (cfg, args [1], fsig);
8098 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8101 MonoMethod *cil_method;
8103 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8104 cil_method = cmethod;
8106 if (constrained_call) {
8107 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8108 if (cfg->verbose_level > 2)
8109 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8110 if (!((constrained_call->byval_arg.type == MONO_TYPE_VAR ||
8111 constrained_call->byval_arg.type == MONO_TYPE_MVAR) &&
8112 cfg->generic_sharing_context)) {
8113 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_call, generic_context);
8116 if (cfg->verbose_level > 2)
8117 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_call));
8119 if ((constrained_call->byval_arg.type == MONO_TYPE_VAR || constrained_call->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8121 * This is needed since get_method_constrained can't find
8122 * the method in klass representing a type var.
8123 * The type var is guaranteed to be a reference type in this
8126 if (!mini_is_gsharedvt_klass (cfg, constrained_call))
8127 g_assert (!cmethod->klass->valuetype);
8129 cmethod = mono_get_method_constrained (image, token, constrained_call, generic_context, &cil_method);
8134 if (!cmethod || mono_loader_get_last_error ())
8136 if (!dont_verify && !cfg->skip_visibility) {
8137 MonoMethod *target_method = cil_method;
8138 if (method->is_inflated) {
8139 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8141 if (!mono_method_can_access_method (method_definition, target_method) &&
8142 !mono_method_can_access_method (method, cil_method))
8143 METHOD_ACCESS_FAILURE (method, cil_method);
8146 if (mono_security_core_clr_enabled ())
8147 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8149 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8150 /* MS.NET seems to silently convert this to a callvirt */
8155 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8156 * converts to a callvirt.
8158 * tests/bug-515884.il is an example of this behavior
8160 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8161 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8162 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8166 if (!cmethod->klass->inited)
8167 if (!mono_class_init (cmethod->klass))
8168 TYPE_LOAD_ERROR (cmethod->klass);
8170 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8171 mini_class_is_system_array (cmethod->klass)) {
8172 array_rank = cmethod->klass->rank;
8173 fsig = mono_method_signature (cmethod);
8175 fsig = mono_method_signature (cmethod);
8180 if (fsig->pinvoke) {
8181 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8182 check_for_pending_exc, cfg->compile_aot);
8183 fsig = mono_method_signature (wrapper);
8184 } else if (constrained_call) {
8185 fsig = mono_method_signature (cmethod);
8187 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8192 mono_save_token_info (cfg, image, token, cil_method);
8194 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8196 * Need to emit an implicit seq point after every non-void call so single stepping through nested calls like
8197 * foo (bar (), baz ())
8198 * works correctly. MS does this also:
8199 * http://stackoverflow.com/questions/6937198/making-your-net-language-step-correctly-in-the-debugger
8200 * The problem with this approach is that the debugger will stop after all calls returning a value,
8201 * even for simple cases, like:
8204 /* Special case a few common successor opcodes */
8205 if (!(ip + 5 < end && (ip [5] == CEE_POP || ip [5] == CEE_NOP)) && !(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8206 need_seq_point = TRUE;
8209 n = fsig->param_count + fsig->hasthis;
8211 /* Don't support calls made using type arguments for now */
8213 if (cfg->gsharedvt) {
8214 if (mini_is_gsharedvt_signature (cfg, fsig))
8215 GSHAREDVT_FAILURE (*ip);
8219 if (mono_security_cas_enabled ()) {
8220 if (check_linkdemand (cfg, method, cmethod))
8221 INLINE_FAILURE ("linkdemand");
8222 CHECK_CFG_EXCEPTION;
8225 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8226 g_assert_not_reached ();
8229 if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
8232 if (!cfg->generic_sharing_context && cmethod)
8233 g_assert (!mono_method_check_context_used (cmethod));
8237 //g_assert (!virtual || fsig->hasthis);
8241 if (constrained_call) {
8242 if (mini_is_gsharedvt_klass (cfg, constrained_call)) {
8244 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
8246 if ((cmethod->klass != mono_defaults.object_class) && constrained_call->valuetype && cmethod->klass->valuetype) {
8247 /* The 'Own method' case below */
8248 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8249 /* 'The type parameter is instantiated as a reference type' case below. */
8250 } else if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
8251 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
8252 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
8253 MonoInst *args [16];
8256 * This case handles calls to
8257 * - object:ToString()/Equals()/GetHashCode(),
8258 * - System.IComparable<T>:CompareTo()
8259 * - System.IEquatable<T>:Equals ()
8260 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
8264 if (mono_method_check_context_used (cmethod))
8265 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
8267 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
8268 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_call), constrained_call, MONO_RGCTX_INFO_KLASS);
8270 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
8271 if (fsig->hasthis && fsig->param_count) {
8272 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
8273 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
8274 ins->dreg = alloc_preg (cfg);
8275 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
8276 MONO_ADD_INS (cfg->cbb, ins);
8279 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
8282 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
8284 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
8285 addr_reg = ins->dreg;
8286 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
8288 EMIT_NEW_ICONST (cfg, args [3], 0);
8289 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
8292 EMIT_NEW_ICONST (cfg, args [3], 0);
8293 EMIT_NEW_ICONST (cfg, args [4], 0);
8295 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
8298 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
8299 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
8300 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
8304 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
8305 MONO_ADD_INS (cfg->cbb, add);
8307 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
8308 MONO_ADD_INS (cfg->cbb, ins);
8309 /* ins represents the call result */
8314 GSHAREDVT_FAILURE (*ip);
8318 * We have the `constrained.' prefix opcode.
8320 if (constrained_call->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
8322 * The type parameter is instantiated as a valuetype,
8323 * but that type doesn't override the method we're
8324 * calling, so we need to box `this'.
8326 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8327 ins->klass = constrained_call;
8328 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8329 CHECK_CFG_EXCEPTION;
8330 } else if (!constrained_call->valuetype) {
8331 int dreg = alloc_ireg_ref (cfg);
8334 * The type parameter is instantiated as a reference
8335 * type. We have a managed pointer on the stack, so
8336 * we need to dereference it here.
8338 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
8339 ins->type = STACK_OBJ;
8342 if (cmethod->klass->valuetype) {
8345 /* Interface method */
8348 mono_class_setup_vtable (constrained_call);
8349 CHECK_TYPELOAD (constrained_call);
8350 ioffset = mono_class_interface_offset (constrained_call, cmethod->klass);
8352 TYPE_LOAD_ERROR (constrained_call);
8353 slot = mono_method_get_vtable_slot (cmethod);
8355 TYPE_LOAD_ERROR (cmethod->klass);
8356 cmethod = constrained_call->vtable [ioffset + slot];
8358 if (cmethod->klass == mono_defaults.enum_class) {
8359 /* Enum implements some interfaces, so treat this as the first case */
8360 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_call->byval_arg, sp [0]->dreg, 0);
8361 ins->klass = constrained_call;
8362 sp [0] = handle_box (cfg, ins, constrained_call, mono_class_check_context_used (constrained_call), &bblock);
8363 CHECK_CFG_EXCEPTION;
8368 constrained_call = NULL;
8371 if (!calli && check_call_signature (cfg, fsig, sp))
8374 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
8375 if (cmethod && (cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
8376 delegate_invoke = TRUE;
8379 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
8381 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8382 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8390 * If the callee is a shared method, then its static cctor
8391 * might not get called after the call was patched.
8393 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
8394 emit_generic_class_init (cfg, cmethod->klass);
8395 CHECK_TYPELOAD (cmethod->klass);
8399 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
8401 if (cfg->generic_sharing_context && cmethod) {
8402 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
8404 context_used = mini_method_check_context_used (cfg, cmethod);
8406 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
8407 /* Generic method interface
8408 calls are resolved via a
8409 helper function and don't
8411 if (!cmethod_context || !cmethod_context->method_inst)
8412 pass_imt_from_rgctx = TRUE;
8416 * If a shared method calls another
8417 * shared method then the caller must
8418 * have a generic sharing context
8419 * because the magic trampoline
8420 * requires it. FIXME: We shouldn't
8421 * have to force the vtable/mrgctx
8422 * variable here. Instead there
8423 * should be a flag in the cfg to
8424 * request a generic sharing context.
8427 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
8428 mono_get_vtable_var (cfg);
8433 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
8435 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
8437 CHECK_TYPELOAD (cmethod->klass);
8438 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
8443 g_assert (!vtable_arg);
8445 if (!cfg->compile_aot) {
8447 * emit_get_rgctx_method () calls mono_class_vtable () so check
8448 * for type load errors before.
8450 mono_class_setup_vtable (cmethod->klass);
8451 CHECK_TYPELOAD (cmethod->klass);
8454 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
8456 /* !marshalbyref is needed to properly handle generic methods + remoting */
8457 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
8458 MONO_METHOD_IS_FINAL (cmethod)) &&
8459 !mono_class_is_marshalbyref (cmethod->klass)) {
8466 if (pass_imt_from_rgctx) {
8467 g_assert (!pass_vtable);
8470 imt_arg = emit_get_rgctx_method (cfg, context_used,
8471 cmethod, MONO_RGCTX_INFO_METHOD);
8475 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8477 /* Calling virtual generic methods */
8478 if (cmethod && virtual &&
8479 (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
8480 !(MONO_METHOD_IS_FINAL (cmethod) &&
8481 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
8482 fsig->generic_param_count &&
8483 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
8484 MonoInst *this_temp, *this_arg_temp, *store;
8485 MonoInst *iargs [4];
8486 gboolean use_imt = FALSE;
8488 g_assert (fsig->is_inflated);
8490 /* Prevent inlining of methods that contain indirect calls */
8491 INLINE_FAILURE ("virtual generic call");
8493 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
8494 GSHAREDVT_FAILURE (*ip);
8496 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
8497 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
8502 g_assert (!imt_arg);
8504 g_assert (cmethod->is_inflated);
8505 imt_arg = emit_get_rgctx_method (cfg, context_used,
8506 cmethod, MONO_RGCTX_INFO_METHOD);
8507 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
8509 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
8510 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
8511 MONO_ADD_INS (bblock, store);
8513 /* FIXME: This should be a managed pointer */
8514 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8516 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
8517 iargs [1] = emit_get_rgctx_method (cfg, context_used,
8518 cmethod, MONO_RGCTX_INFO_METHOD);
8519 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
8520 addr = mono_emit_jit_icall (cfg,
8521 mono_helper_compile_generic_method, iargs);
8523 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
8525 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8532 * Implement a workaround for the inherent races involved in locking:
8538 * If a thread abort happens between the call to Monitor.Enter () and the start of the
8539 * try block, the Exit () won't be executed, see:
8540 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
8541 * To work around this, we extend such try blocks to include the last x bytes
8542 * of the Monitor.Enter () call.
8544 if (cmethod && cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
8545 MonoBasicBlock *tbb;
8547 GET_BBLOCK (cfg, tbb, ip + 5);
8549 * Only extend try blocks with a finally, to avoid catching exceptions thrown
8550 * from Monitor.Enter like ArgumentNullException.
8552 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
8553 /* Mark this bblock as needing to be extended */
8554 tbb->extend_try_block = TRUE;
8558 /* Conversion to a JIT intrinsic */
8559 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
8561 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8562 type_to_eval_stack_type ((cfg), fsig->ret, ins);
8569 if (cmethod && (cfg->opt & MONO_OPT_INLINE) &&
8570 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
8571 mono_method_check_inlining (cfg, cmethod)) {
8573 gboolean always = FALSE;
8575 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
8576 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8577 /* Prevent inlining of methods that call wrappers */
8578 INLINE_FAILURE ("wrapper call");
8579 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
8583 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
8585 cfg->real_offset += 5;
8587 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8588 /* *sp is already set by inline_method */
8593 inline_costs += costs;
8599 /* Tail recursion elimination */
8600 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
8601 gboolean has_vtargs = FALSE;
8604 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8605 INLINE_FAILURE ("tail call");
8607 /* keep it simple */
8608 for (i = fsig->param_count - 1; i >= 0; i--) {
8609 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
8614 for (i = 0; i < n; ++i)
8615 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8616 MONO_INST_NEW (cfg, ins, OP_BR);
8617 MONO_ADD_INS (bblock, ins);
8618 tblock = start_bblock->out_bb [0];
8619 link_bblock (cfg, bblock, tblock);
8620 ins->inst_target_bb = tblock;
8621 start_new_bblock = 1;
8623 /* skip the CEE_RET, too */
8624 if (ip_in_bb (cfg, bblock, ip + 5))
8631 inline_costs += 10 * num_calls++;
8634 * Making generic calls out of gsharedvt methods.
8635 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8636 * patching gshared method addresses into a gsharedvt method.
8638 if (cmethod && cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class)) {
8639 MonoRgctxInfoType info_type;
8642 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
8643 //GSHAREDVT_FAILURE (*ip);
8644 // disable for possible remoting calls
8645 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
8646 GSHAREDVT_FAILURE (*ip);
8647 if (fsig->generic_param_count) {
8648 /* virtual generic call */
8649 g_assert (mono_use_imt);
8650 g_assert (!imt_arg);
8651 /* Same as the virtual generic case above */
8652 imt_arg = emit_get_rgctx_method (cfg, context_used,
8653 cmethod, MONO_RGCTX_INFO_METHOD);
8654 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
8656 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
8657 /* This can happen when we call a fully instantiated iface method */
8658 imt_arg = emit_get_rgctx_method (cfg, context_used,
8659 cmethod, MONO_RGCTX_INFO_METHOD);
8664 if (cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)
8665 /* test_0_multi_dim_arrays () in gshared.cs */
8666 GSHAREDVT_FAILURE (*ip);
8668 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
8669 keep_this_alive = sp [0];
8671 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
8672 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
8674 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
8675 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
8677 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8679 } else if (calli && cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8681 * We pass the address to the gsharedvt trampoline in the rgctx reg
8683 MonoInst *callee = addr;
8685 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8687 GSHAREDVT_FAILURE (*ip);
8689 addr = emit_get_rgctx_sig (cfg, context_used,
8690 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8691 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8695 /* Generic sharing */
8698 * Use this if the callee is gsharedvt sharable too, since
8699 * at runtime we might find an instantiation so the call cannot
8700 * be patched (the 'no_patch' code path in mini-trampolines.c).
8702 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
8703 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
8704 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
8705 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
8706 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
8707 INLINE_FAILURE ("gshared");
8709 g_assert (cfg->generic_sharing_context && cmethod);
8713 * We are compiling a call to a
8714 * generic method from shared code,
8715 * which means that we have to look up
8716 * the method in the rgctx and do an
8720 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
8722 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
8723 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8727 /* Indirect calls */
8729 if (call_opcode == CEE_CALL)
8730 g_assert (context_used);
8731 else if (call_opcode == CEE_CALLI)
8732 g_assert (!vtable_arg);
8734 /* FIXME: what the hell is this??? */
8735 g_assert (cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
8736 !(cmethod->flags & METHOD_ATTRIBUTE_FINAL));
8738 /* Prevent inlining of methods with indirect calls */
8739 INLINE_FAILURE ("indirect call");
8741 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8746 * Instead of emitting an indirect call, emit a direct call
8747 * with the contents of the aotconst as the patch info.
8749 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8750 info_type = addr->inst_c1;
8751 info_data = addr->inst_p0;
8753 info_type = addr->inst_right->inst_c1;
8754 info_data = addr->inst_right->inst_left;
8757 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8758 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8763 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
8771 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
8772 MonoInst *val = sp [fsig->param_count];
8774 if (val->type == STACK_OBJ) {
8775 MonoInst *iargs [2];
8780 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
8783 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
8784 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
8785 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
8786 emit_write_barrier (cfg, addr, val);
8787 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
8788 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8790 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
8791 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
8792 if (!cmethod->klass->element_class->valuetype && !readonly)
8793 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
8794 CHECK_TYPELOAD (cmethod->klass);
8797 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
8800 g_assert_not_reached ();
8807 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
8811 /* Tail prefix / tail call optimization */
8813 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
8814 /* FIXME: runtime generic context pointer for jumps? */
8815 /* FIXME: handle this for generic sharing eventually */
8816 if (cmethod && (ins_flag & MONO_INST_TAILCALL) &&
8817 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
8818 supported_tail_call = TRUE;
8820 if (supported_tail_call) {
8823 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
8824 INLINE_FAILURE ("tail call");
8826 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
8828 if (ARCH_HAVE_OP_TAIL_CALL) {
8829 /* Handle tail calls similarly to normal calls */
8832 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8834 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8835 call->tail_call = TRUE;
8836 call->method = cmethod;
8837 call->signature = mono_method_signature (cmethod);
8840 * We implement tail calls by storing the actual arguments into the
8841 * argument variables, then emitting a CEE_JMP.
8843 for (i = 0; i < n; ++i) {
8844 /* Prevent argument from being register allocated */
8845 arg_array [i]->flags |= MONO_INST_VOLATILE;
8846 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
8848 ins = (MonoInst*)call;
8849 ins->inst_p0 = cmethod;
8850 ins->inst_p1 = arg_array [0];
8851 MONO_ADD_INS (bblock, ins);
8852 link_bblock (cfg, bblock, end_bblock);
8853 start_new_bblock = 1;
8855 // FIXME: Eliminate unreachable epilogs
8858 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8859 * only reachable from this call.
8861 GET_BBLOCK (cfg, tblock, ip + 5);
8862 if (tblock == bblock || tblock->in_count == 0)
8871 * Synchronized wrappers.
8872 * Its hard to determine where to replace a method with its synchronized
8873 * wrapper without causing an infinite recursion. The current solution is
8874 * to add the synchronized wrapper in the trampolines, and to
8875 * change the called method to a dummy wrapper, and resolve that wrapper
8876 * to the real method in mono_jit_compile_method ().
8878 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
8879 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
8880 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
8881 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
8885 INLINE_FAILURE ("call");
8886 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
8887 imt_arg, vtable_arg);
8890 link_bblock (cfg, bblock, end_bblock);
8891 start_new_bblock = 1;
8893 // FIXME: Eliminate unreachable epilogs
8896 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
8897 * only reachable from this call.
8899 GET_BBLOCK (cfg, tblock, ip + 5);
8900 if (tblock == bblock || tblock->in_count == 0)
8907 /* End of call, INS should contain the result of the call, if any */
8909 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
8912 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8917 if (keep_this_alive) {
8918 MonoInst *dummy_use;
8920 /* See mono_emit_method_call_full () */
8921 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
8924 CHECK_CFG_EXCEPTION;
8928 g_assert (*ip == CEE_RET);
8932 constrained_call = NULL;
8934 emit_seq_point (cfg, method, ip, FALSE, TRUE);
8938 if (cfg->method != method) {
8939 /* return from inlined method */
8941 * If in_count == 0, that means the ret is unreachable due to
8942 * being preceeded by a throw. In that case, inline_method () will
8943 * handle setting the return value
8944 * (test case: test_0_inline_throw ()).
8946 if (return_var && cfg->cbb->in_count) {
8947 MonoType *ret_type = mono_method_signature (method)->ret;
8953 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8956 //g_assert (returnvar != -1);
8957 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
8958 cfg->ret_var_set = TRUE;
8961 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8963 if (cfg->lmf_var && cfg->cbb->in_count)
8967 MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
8969 if (seq_points && !sym_seq_points) {
8971 * Place a seq point here too even through the IL stack is not
8972 * empty, so a step over on
8975 * will work correctly.
8977 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
8978 MONO_ADD_INS (cfg->cbb, ins);
8981 g_assert (!return_var);
8985 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
8988 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
8991 if (!cfg->vret_addr) {
8994 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
8996 EMIT_NEW_RETLOADA (cfg, ret_addr);
8998 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
8999 ins->klass = mono_class_from_mono_type (ret_type);
9002 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9003 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9004 MonoInst *iargs [1];
9008 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9009 mono_arch_emit_setret (cfg, method, conv);
9011 mono_arch_emit_setret (cfg, method, *sp);
9014 mono_arch_emit_setret (cfg, method, *sp);
9019 if (sp != stack_start)
9021 MONO_INST_NEW (cfg, ins, OP_BR);
9023 ins->inst_target_bb = end_bblock;
9024 MONO_ADD_INS (bblock, ins);
9025 link_bblock (cfg, bblock, end_bblock);
9026 start_new_bblock = 1;
9030 MONO_INST_NEW (cfg, ins, OP_BR);
9032 target = ip + 1 + (signed char)(*ip);
9034 GET_BBLOCK (cfg, tblock, target);
9035 link_bblock (cfg, bblock, tblock);
9036 ins->inst_target_bb = tblock;
9037 if (sp != stack_start) {
9038 handle_stack_args (cfg, stack_start, sp - stack_start);
9040 CHECK_UNVERIFIABLE (cfg);
9042 MONO_ADD_INS (bblock, ins);
9043 start_new_bblock = 1;
9044 inline_costs += BRANCH_COST;
9058 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9060 target = ip + 1 + *(signed char*)ip;
9066 inline_costs += BRANCH_COST;
9070 MONO_INST_NEW (cfg, ins, OP_BR);
9073 target = ip + 4 + (gint32)read32(ip);
9075 GET_BBLOCK (cfg, tblock, target);
9076 link_bblock (cfg, bblock, tblock);
9077 ins->inst_target_bb = tblock;
9078 if (sp != stack_start) {
9079 handle_stack_args (cfg, stack_start, sp - stack_start);
9081 CHECK_UNVERIFIABLE (cfg);
9084 MONO_ADD_INS (bblock, ins);
9086 start_new_bblock = 1;
9087 inline_costs += BRANCH_COST;
9094 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9095 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9096 guint32 opsize = is_short ? 1 : 4;
9098 CHECK_OPSIZE (opsize);
9100 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9103 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9108 GET_BBLOCK (cfg, tblock, target);
9109 link_bblock (cfg, bblock, tblock);
9110 GET_BBLOCK (cfg, tblock, ip);
9111 link_bblock (cfg, bblock, tblock);
9113 if (sp != stack_start) {
9114 handle_stack_args (cfg, stack_start, sp - stack_start);
9115 CHECK_UNVERIFIABLE (cfg);
9118 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9119 cmp->sreg1 = sp [0]->dreg;
9120 type_from_op (cmp, sp [0], NULL);
9123 #if SIZEOF_REGISTER == 4
9124 if (cmp->opcode == OP_LCOMPARE_IMM) {
9125 /* Convert it to OP_LCOMPARE */
9126 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9127 ins->type = STACK_I8;
9128 ins->dreg = alloc_dreg (cfg, STACK_I8);
9130 MONO_ADD_INS (bblock, ins);
9131 cmp->opcode = OP_LCOMPARE;
9132 cmp->sreg2 = ins->dreg;
9135 MONO_ADD_INS (bblock, cmp);
9137 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9138 type_from_op (ins, sp [0], NULL);
9139 MONO_ADD_INS (bblock, ins);
9140 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9141 GET_BBLOCK (cfg, tblock, target);
9142 ins->inst_true_bb = tblock;
9143 GET_BBLOCK (cfg, tblock, ip);
9144 ins->inst_false_bb = tblock;
9145 start_new_bblock = 2;
9148 inline_costs += BRANCH_COST;
9163 MONO_INST_NEW (cfg, ins, *ip);
9165 target = ip + 4 + (gint32)read32(ip);
9171 inline_costs += BRANCH_COST;
9175 MonoBasicBlock **targets;
9176 MonoBasicBlock *default_bblock;
9177 MonoJumpInfoBBTable *table;
9178 int offset_reg = alloc_preg (cfg);
9179 int target_reg = alloc_preg (cfg);
9180 int table_reg = alloc_preg (cfg);
9181 int sum_reg = alloc_preg (cfg);
9182 gboolean use_op_switch;
9186 n = read32 (ip + 1);
9189 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9193 CHECK_OPSIZE (n * sizeof (guint32));
9194 target = ip + n * sizeof (guint32);
9196 GET_BBLOCK (cfg, default_bblock, target);
9197 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9199 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9200 for (i = 0; i < n; ++i) {
9201 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9202 targets [i] = tblock;
9203 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9207 if (sp != stack_start) {
9209 * Link the current bb with the targets as well, so handle_stack_args
9210 * will set their in_stack correctly.
9212 link_bblock (cfg, bblock, default_bblock);
9213 for (i = 0; i < n; ++i)
9214 link_bblock (cfg, bblock, targets [i]);
9216 handle_stack_args (cfg, stack_start, sp - stack_start);
9218 CHECK_UNVERIFIABLE (cfg);
9221 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9222 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9225 for (i = 0; i < n; ++i)
9226 link_bblock (cfg, bblock, targets [i]);
9228 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9229 table->table = targets;
9230 table->table_size = n;
9232 use_op_switch = FALSE;
9234 /* ARM implements SWITCH statements differently */
9235 /* FIXME: Make it use the generic implementation */
9236 if (!cfg->compile_aot)
9237 use_op_switch = TRUE;
9240 if (COMPILE_LLVM (cfg))
9241 use_op_switch = TRUE;
9243 cfg->cbb->has_jump_table = 1;
9245 if (use_op_switch) {
9246 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9247 ins->sreg1 = src1->dreg;
9248 ins->inst_p0 = table;
9249 ins->inst_many_bb = targets;
9250 ins->klass = GUINT_TO_POINTER (n);
9251 MONO_ADD_INS (cfg->cbb, ins);
9253 if (sizeof (gpointer) == 8)
9254 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9256 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9258 #if SIZEOF_REGISTER == 8
9259 /* The upper word might not be zero, and we add it to a 64 bit address later */
9260 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9263 if (cfg->compile_aot) {
9264 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9266 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9267 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9268 ins->inst_p0 = table;
9269 ins->dreg = table_reg;
9270 MONO_ADD_INS (cfg->cbb, ins);
9273 /* FIXME: Use load_memindex */
9274 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9275 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9276 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9278 start_new_bblock = 1;
9279 inline_costs += (BRANCH_COST * 2);
9299 dreg = alloc_freg (cfg);
9302 dreg = alloc_lreg (cfg);
9305 dreg = alloc_ireg_ref (cfg);
9308 dreg = alloc_preg (cfg);
9311 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9312 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9313 ins->flags |= ins_flag;
9314 MONO_ADD_INS (bblock, ins);
9316 if (ins_flag & MONO_INST_VOLATILE) {
9317 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9318 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9319 emit_memory_barrier (cfg, FullBarrier);
9335 if (ins_flag & MONO_INST_VOLATILE) {
9336 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9337 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9338 emit_memory_barrier (cfg, FullBarrier);
9341 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9342 ins->flags |= ins_flag;
9345 MONO_ADD_INS (bblock, ins);
9347 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
9348 emit_write_barrier (cfg, sp [0], sp [1]);
9357 MONO_INST_NEW (cfg, ins, (*ip));
9359 ins->sreg1 = sp [0]->dreg;
9360 ins->sreg2 = sp [1]->dreg;
9361 type_from_op (ins, sp [0], sp [1]);
9363 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9365 /* Use the immediate opcodes if possible */
9366 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
9367 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9368 if (imm_opcode != -1) {
9369 ins->opcode = imm_opcode;
9370 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
9373 NULLIFY_INS (sp [1]);
9377 MONO_ADD_INS ((cfg)->cbb, (ins));
9379 *sp++ = mono_decompose_opcode (cfg, ins);
9396 MONO_INST_NEW (cfg, ins, (*ip));
9398 ins->sreg1 = sp [0]->dreg;
9399 ins->sreg2 = sp [1]->dreg;
9400 type_from_op (ins, sp [0], sp [1]);
9402 ADD_WIDEN_OP (ins, sp [0], sp [1]);
9403 ins->dreg = alloc_dreg ((cfg), (ins)->type);
9405 /* FIXME: Pass opcode to is_inst_imm */
9407 /* Use the immediate opcodes if possible */
9408 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
9411 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
9412 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
9413 /* Keep emulated opcodes which are optimized away later */
9414 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
9415 imm_opcode = mono_op_to_op_imm (ins->opcode);
9418 if (imm_opcode != -1) {
9419 ins->opcode = imm_opcode;
9420 if (sp [1]->opcode == OP_I8CONST) {
9421 #if SIZEOF_REGISTER == 8
9422 ins->inst_imm = sp [1]->inst_l;
9424 ins->inst_ls_word = sp [1]->inst_ls_word;
9425 ins->inst_ms_word = sp [1]->inst_ms_word;
9429 ins->inst_imm = (gssize)(sp [1]->inst_c0);
9432 /* Might be followed by an instruction added by ADD_WIDEN_OP */
9433 if (sp [1]->next == NULL)
9434 NULLIFY_INS (sp [1]);
9437 MONO_ADD_INS ((cfg)->cbb, (ins));
9439 *sp++ = mono_decompose_opcode (cfg, ins);
9452 case CEE_CONV_OVF_I8:
9453 case CEE_CONV_OVF_U8:
9457 /* Special case this earlier so we have long constants in the IR */
9458 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
9459 int data = sp [-1]->inst_c0;
9460 sp [-1]->opcode = OP_I8CONST;
9461 sp [-1]->type = STACK_I8;
9462 #if SIZEOF_REGISTER == 8
9463 if ((*ip) == CEE_CONV_U8)
9464 sp [-1]->inst_c0 = (guint32)data;
9466 sp [-1]->inst_c0 = data;
9468 sp [-1]->inst_ls_word = data;
9469 if ((*ip) == CEE_CONV_U8)
9470 sp [-1]->inst_ms_word = 0;
9472 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
9474 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
9481 case CEE_CONV_OVF_I4:
9482 case CEE_CONV_OVF_I1:
9483 case CEE_CONV_OVF_I2:
9484 case CEE_CONV_OVF_I:
9485 case CEE_CONV_OVF_U:
9488 if (sp [-1]->type == STACK_R8) {
9489 ADD_UNOP (CEE_CONV_OVF_I8);
9496 case CEE_CONV_OVF_U1:
9497 case CEE_CONV_OVF_U2:
9498 case CEE_CONV_OVF_U4:
9501 if (sp [-1]->type == STACK_R8) {
9502 ADD_UNOP (CEE_CONV_OVF_U8);
9509 case CEE_CONV_OVF_I1_UN:
9510 case CEE_CONV_OVF_I2_UN:
9511 case CEE_CONV_OVF_I4_UN:
9512 case CEE_CONV_OVF_I8_UN:
9513 case CEE_CONV_OVF_U1_UN:
9514 case CEE_CONV_OVF_U2_UN:
9515 case CEE_CONV_OVF_U4_UN:
9516 case CEE_CONV_OVF_U8_UN:
9517 case CEE_CONV_OVF_I_UN:
9518 case CEE_CONV_OVF_U_UN:
9525 CHECK_CFG_EXCEPTION;
9529 case CEE_ADD_OVF_UN:
9531 case CEE_MUL_OVF_UN:
9533 case CEE_SUB_OVF_UN:
9539 GSHAREDVT_FAILURE (*ip);
9542 token = read32 (ip + 1);
9543 klass = mini_get_class (method, token, generic_context);
9544 CHECK_TYPELOAD (klass);
9546 if (generic_class_is_reference_type (cfg, klass)) {
9547 MonoInst *store, *load;
9548 int dreg = alloc_ireg_ref (cfg);
9550 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
9551 load->flags |= ins_flag;
9552 MONO_ADD_INS (cfg->cbb, load);
9554 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
9555 store->flags |= ins_flag;
9556 MONO_ADD_INS (cfg->cbb, store);
9558 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
9559 emit_write_barrier (cfg, sp [0], sp [1]);
9561 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9573 token = read32 (ip + 1);
9574 klass = mini_get_class (method, token, generic_context);
9575 CHECK_TYPELOAD (klass);
9577 /* Optimize the common ldobj+stloc combination */
9587 loc_index = ip [5] - CEE_STLOC_0;
9594 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
9595 CHECK_LOCAL (loc_index);
9597 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9598 ins->dreg = cfg->locals [loc_index]->dreg;
9599 ins->flags |= ins_flag;
9602 if (ins_flag & MONO_INST_VOLATILE) {
9603 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9604 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9605 emit_memory_barrier (cfg, FullBarrier);
9611 /* Optimize the ldobj+stobj combination */
9612 /* The reference case ends up being a load+store anyway */
9613 /* Skip this if the operation is volatile. */
9614 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
9619 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
9626 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
9627 ins->flags |= ins_flag;
9630 if (ins_flag & MONO_INST_VOLATILE) {
9631 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9632 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
9633 emit_memory_barrier (cfg, FullBarrier);
9642 CHECK_STACK_OVF (1);
9644 n = read32 (ip + 1);
9646 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
9647 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
9648 ins->type = STACK_OBJ;
9651 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
9652 MonoInst *iargs [1];
9654 EMIT_NEW_PCONST (cfg, iargs [0], mono_method_get_wrapper_data (method, n));
9655 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
9657 if (cfg->opt & MONO_OPT_SHARED) {
9658 MonoInst *iargs [3];
9660 if (cfg->compile_aot) {
9661 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
9663 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
9664 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
9665 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
9666 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
9667 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9669 if (bblock->out_of_line) {
9670 MonoInst *iargs [2];
9672 if (image == mono_defaults.corlib) {
9674 * Avoid relocations in AOT and save some space by using a
9675 * version of helper_ldstr specialized to mscorlib.
9677 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
9678 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
9680 /* Avoid creating the string object */
9681 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
9682 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
9683 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
9687 if (cfg->compile_aot) {
9688 NEW_LDSTRCONST (cfg, ins, image, n);
9690 MONO_ADD_INS (bblock, ins);
9693 NEW_PCONST (cfg, ins, NULL);
9694 ins->type = STACK_OBJ;
9695 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
9697 OUT_OF_MEMORY_FAILURE;
9700 MONO_ADD_INS (bblock, ins);
9709 MonoInst *iargs [2];
9710 MonoMethodSignature *fsig;
9713 MonoInst *vtable_arg = NULL;
9716 token = read32 (ip + 1);
9717 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
9718 if (!cmethod || mono_loader_get_last_error ())
9720 fsig = mono_method_get_signature_checked (cmethod, image, token, NULL, &cfg->error);
9723 mono_save_token_info (cfg, image, token, cmethod);
9725 if (!mono_class_init (cmethod->klass))
9726 TYPE_LOAD_ERROR (cmethod->klass);
9728 context_used = mini_method_check_context_used (cfg, cmethod);
9730 if (mono_security_cas_enabled ()) {
9731 if (check_linkdemand (cfg, method, cmethod))
9732 INLINE_FAILURE ("linkdemand");
9733 CHECK_CFG_EXCEPTION;
9734 } else if (mono_security_core_clr_enabled ()) {
9735 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
9738 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9739 emit_generic_class_init (cfg, cmethod->klass);
9740 CHECK_TYPELOAD (cmethod->klass);
9744 if (cfg->gsharedvt) {
9745 if (mini_is_gsharedvt_variable_signature (sig))
9746 GSHAREDVT_FAILURE (*ip);
9750 n = fsig->param_count;
9754 * Generate smaller code for the common newobj <exception> instruction in
9755 * argument checking code.
9757 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
9758 is_exception_class (cmethod->klass) && n <= 2 &&
9759 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
9760 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
9761 MonoInst *iargs [3];
9765 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
9768 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
9772 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
9777 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
9780 g_assert_not_reached ();
9788 /* move the args to allow room for 'this' in the first position */
9794 /* check_call_signature () requires sp[0] to be set */
9795 this_ins.type = STACK_OBJ;
9797 if (check_call_signature (cfg, fsig, sp))
9802 if (mini_class_is_system_array (cmethod->klass)) {
9803 *sp = emit_get_rgctx_method (cfg, context_used,
9804 cmethod, MONO_RGCTX_INFO_METHOD);
9806 /* Avoid varargs in the common case */
9807 if (fsig->param_count == 1)
9808 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
9809 else if (fsig->param_count == 2)
9810 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
9811 else if (fsig->param_count == 3)
9812 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
9813 else if (fsig->param_count == 4)
9814 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
9816 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
9817 } else if (cmethod->string_ctor) {
9818 g_assert (!context_used);
9819 g_assert (!vtable_arg);
9820 /* we simply pass a null pointer */
9821 EMIT_NEW_PCONST (cfg, *sp, NULL);
9822 /* now call the string ctor */
9823 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
9825 if (cmethod->klass->valuetype) {
9826 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
9827 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
9828 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
9833 * The code generated by mini_emit_virtual_call () expects
9834 * iargs [0] to be a boxed instance, but luckily the vcall
9835 * will be transformed into a normal call there.
9837 } else if (context_used) {
9838 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
9841 MonoVTable *vtable = NULL;
9843 if (!cfg->compile_aot)
9844 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9845 CHECK_TYPELOAD (cmethod->klass);
9848 * TypeInitializationExceptions thrown from the mono_runtime_class_init
9849 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
9850 * As a workaround, we call class cctors before allocating objects.
9852 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
9853 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
9854 if (cfg->verbose_level > 2)
9855 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
9856 class_inits = g_slist_prepend (class_inits, cmethod->klass);
9859 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
9862 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
9865 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
9867 /* Now call the actual ctor */
9868 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
9869 CHECK_CFG_EXCEPTION;
9872 if (alloc == NULL) {
9874 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
9875 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
9889 token = read32 (ip + 1);
9890 klass = mini_get_class (method, token, generic_context);
9891 CHECK_TYPELOAD (klass);
9892 if (sp [0]->type != STACK_OBJ)
9895 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9896 CHECK_CFG_EXCEPTION;
9905 token = read32 (ip + 1);
9906 klass = mini_get_class (method, token, generic_context);
9907 CHECK_TYPELOAD (klass);
9908 if (sp [0]->type != STACK_OBJ)
9911 context_used = mini_class_check_context_used (cfg, klass);
9913 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
9914 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
9921 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
9924 if (cfg->compile_aot)
9925 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, NULL);
9927 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
9929 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
9932 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9933 MonoMethod *mono_isinst;
9934 MonoInst *iargs [1];
9937 mono_isinst = mono_marshal_get_isinst (klass);
9940 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
9941 iargs, ip, cfg->real_offset, TRUE, &bblock);
9942 CHECK_CFG_EXCEPTION;
9943 g_assert (costs > 0);
9946 cfg->real_offset += 5;
9950 inline_costs += costs;
9953 ins = handle_isinst (cfg, klass, *sp, context_used);
9954 CHECK_CFG_EXCEPTION;
9961 case CEE_UNBOX_ANY: {
9962 MonoInst *res, *addr;
9967 token = read32 (ip + 1);
9968 klass = mini_get_class (method, token, generic_context);
9969 CHECK_TYPELOAD (klass);
9971 mono_save_token_info (cfg, image, token, klass);
9973 context_used = mini_class_check_context_used (cfg, klass);
9975 if (mini_is_gsharedvt_klass (cfg, klass)) {
9976 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
9978 } else if (generic_class_is_reference_type (cfg, klass)) {
9979 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
9980 CHECK_CFG_EXCEPTION;
9981 } else if (mono_class_is_nullable (klass)) {
9982 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
9984 addr = handle_unbox (cfg, klass, sp, context_used);
9986 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10002 token = read32 (ip + 1);
10003 klass = mini_get_class (method, token, generic_context);
10004 CHECK_TYPELOAD (klass);
10006 mono_save_token_info (cfg, image, token, klass);
10008 context_used = mini_class_check_context_used (cfg, klass);
10010 if (generic_class_is_reference_type (cfg, klass)) {
10016 if (klass == mono_defaults.void_class)
10018 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10020 /* frequent check in generic code: box (struct), brtrue */
10022 // FIXME: LLVM can't handle the inconsistent bb linking
10023 if (!mono_class_is_nullable (klass) &&
10024 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10025 (ip [5] == CEE_BRTRUE ||
10026 ip [5] == CEE_BRTRUE_S ||
10027 ip [5] == CEE_BRFALSE ||
10028 ip [5] == CEE_BRFALSE_S)) {
10029 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10031 MonoBasicBlock *true_bb, *false_bb;
10035 if (cfg->verbose_level > 3) {
10036 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10037 printf ("<box+brtrue opt>\n");
10042 case CEE_BRFALSE_S:
10045 target = ip + 1 + (signed char)(*ip);
10052 target = ip + 4 + (gint)(read32 (ip));
10056 g_assert_not_reached ();
10060 * We need to link both bblocks, since it is needed for handling stack
10061 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10062 * Branching to only one of them would lead to inconsistencies, so
10063 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10065 GET_BBLOCK (cfg, true_bb, target);
10066 GET_BBLOCK (cfg, false_bb, ip);
10068 mono_link_bblock (cfg, cfg->cbb, true_bb);
10069 mono_link_bblock (cfg, cfg->cbb, false_bb);
10071 if (sp != stack_start) {
10072 handle_stack_args (cfg, stack_start, sp - stack_start);
10074 CHECK_UNVERIFIABLE (cfg);
10077 if (COMPILE_LLVM (cfg)) {
10078 dreg = alloc_ireg (cfg);
10079 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10080 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10082 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10084 /* The JIT can't eliminate the iconst+compare */
10085 MONO_INST_NEW (cfg, ins, OP_BR);
10086 ins->inst_target_bb = is_true ? true_bb : false_bb;
10087 MONO_ADD_INS (cfg->cbb, ins);
10090 start_new_bblock = 1;
10094 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10096 CHECK_CFG_EXCEPTION;
10105 token = read32 (ip + 1);
10106 klass = mini_get_class (method, token, generic_context);
10107 CHECK_TYPELOAD (klass);
10109 mono_save_token_info (cfg, image, token, klass);
10111 context_used = mini_class_check_context_used (cfg, klass);
10113 if (mono_class_is_nullable (klass)) {
10116 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10117 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10121 ins = handle_unbox (cfg, klass, sp, context_used);
10134 MonoClassField *field;
10135 #ifndef DISABLE_REMOTING
10139 gboolean is_instance;
10141 gpointer addr = NULL;
10142 gboolean is_special_static;
10144 MonoInst *store_val = NULL;
10145 MonoInst *thread_ins;
10148 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10150 if (op == CEE_STFLD) {
10153 store_val = sp [1];
10158 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10160 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10163 if (op == CEE_STSFLD) {
10166 store_val = sp [0];
10171 token = read32 (ip + 1);
10172 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10173 field = mono_method_get_wrapper_data (method, token);
10174 klass = field->parent;
10177 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10180 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10181 FIELD_ACCESS_FAILURE (method, field);
10182 mono_class_init (klass);
10184 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10187 /* if the class is Critical then transparent code cannot access it's fields */
10188 if (!is_instance && mono_security_core_clr_enabled ())
10189 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10191 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10192 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10193 if (mono_security_core_clr_enabled ())
10194 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10198 * LDFLD etc. is usable on static fields as well, so convert those cases to
10201 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10213 g_assert_not_reached ();
10215 is_instance = FALSE;
10218 context_used = mini_class_check_context_used (cfg, klass);
10220 /* INSTANCE CASE */
10222 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10223 if (op == CEE_STFLD) {
10224 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10226 #ifndef DISABLE_REMOTING
10227 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10228 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10229 MonoInst *iargs [5];
10231 GSHAREDVT_FAILURE (op);
10233 iargs [0] = sp [0];
10234 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10235 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10236 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10238 iargs [4] = sp [1];
10240 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10241 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10242 iargs, ip, cfg->real_offset, TRUE, &bblock);
10243 CHECK_CFG_EXCEPTION;
10244 g_assert (costs > 0);
10246 cfg->real_offset += 5;
10248 inline_costs += costs;
10250 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10257 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10259 if (mini_is_gsharedvt_klass (cfg, klass)) {
10260 MonoInst *offset_ins;
10262 context_used = mini_class_check_context_used (cfg, klass);
10264 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10265 dreg = alloc_ireg_mp (cfg);
10266 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10267 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10268 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10270 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10272 if (sp [0]->opcode != OP_LDADDR)
10273 store->flags |= MONO_INST_FAULT;
10275 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10276 /* insert call to write barrier */
10280 dreg = alloc_ireg_mp (cfg);
10281 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10282 emit_write_barrier (cfg, ptr, sp [1]);
10285 store->flags |= ins_flag;
10292 #ifndef DISABLE_REMOTING
10293 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
10294 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
10295 MonoInst *iargs [4];
10297 GSHAREDVT_FAILURE (op);
10299 iargs [0] = sp [0];
10300 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10301 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10302 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
10303 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10304 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
10305 iargs, ip, cfg->real_offset, TRUE, &bblock);
10306 CHECK_CFG_EXCEPTION;
10307 g_assert (costs > 0);
10309 cfg->real_offset += 5;
10313 inline_costs += costs;
10315 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
10321 if (sp [0]->type == STACK_VTYPE) {
10324 /* Have to compute the address of the variable */
10326 var = get_vreg_to_inst (cfg, sp [0]->dreg);
10328 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
10330 g_assert (var->klass == klass);
10332 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
10336 if (op == CEE_LDFLDA) {
10337 if (is_magic_tls_access (field)) {
10338 GSHAREDVT_FAILURE (*ip);
10340 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
10342 if (sp [0]->type == STACK_OBJ) {
10343 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
10344 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
10347 dreg = alloc_ireg_mp (cfg);
10349 if (mini_is_gsharedvt_klass (cfg, klass)) {
10350 MonoInst *offset_ins;
10352 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10353 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10355 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10357 ins->klass = mono_class_from_mono_type (field->type);
10358 ins->type = STACK_MP;
10364 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10366 if (mini_is_gsharedvt_klass (cfg, klass)) {
10367 MonoInst *offset_ins;
10369 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10370 dreg = alloc_ireg_mp (cfg);
10371 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10372 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
10374 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
10376 load->flags |= ins_flag;
10377 if (sp [0]->opcode != OP_LDADDR)
10378 load->flags |= MONO_INST_FAULT;
10392 * We can only support shared generic static
10393 * field access on architectures where the
10394 * trampoline code has been extended to handle
10395 * the generic class init.
10397 #ifndef MONO_ARCH_VTABLE_REG
10398 GENERIC_SHARING_FAILURE (op);
10401 context_used = mini_class_check_context_used (cfg, klass);
10403 ftype = mono_field_get_type (field);
10405 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
10408 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
10409 * to be called here.
10411 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
10412 mono_class_vtable (cfg->domain, klass);
10413 CHECK_TYPELOAD (klass);
10415 mono_domain_lock (cfg->domain);
10416 if (cfg->domain->special_static_fields)
10417 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
10418 mono_domain_unlock (cfg->domain);
10420 is_special_static = mono_class_field_is_special_static (field);
10422 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
10423 thread_ins = mono_get_thread_intrinsic (cfg);
10427 /* Generate IR to compute the field address */
10428 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
10430 * Fast access to TLS data
10431 * Inline version of get_thread_static_data () in
10435 int idx, static_data_reg, array_reg, dreg;
10437 GSHAREDVT_FAILURE (op);
10439 // offset &= 0x7fffffff;
10440 // idx = (offset >> 24) - 1;
10441 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
10442 MONO_ADD_INS (cfg->cbb, thread_ins);
10443 static_data_reg = alloc_ireg (cfg);
10444 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
10446 if (cfg->compile_aot) {
10447 int offset_reg, offset2_reg, idx_reg;
10449 /* For TLS variables, this will return the TLS offset */
10450 EMIT_NEW_SFLDACONST (cfg, ins, field);
10451 offset_reg = ins->dreg;
10452 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
10453 idx_reg = alloc_ireg (cfg);
10454 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
10455 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
10456 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
10457 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
10458 array_reg = alloc_ireg (cfg);
10459 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
10460 offset2_reg = alloc_ireg (cfg);
10461 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
10462 dreg = alloc_ireg (cfg);
10463 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
10465 offset = (gsize)addr & 0x7fffffff;
10466 idx = (offset >> 24) - 1;
10468 array_reg = alloc_ireg (cfg);
10469 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
10470 dreg = alloc_ireg (cfg);
10471 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
10473 } else if ((cfg->opt & MONO_OPT_SHARED) ||
10474 (cfg->compile_aot && is_special_static) ||
10475 (context_used && is_special_static)) {
10476 MonoInst *iargs [2];
10478 g_assert (field->parent);
10479 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10480 if (context_used) {
10481 iargs [1] = emit_get_rgctx_field (cfg, context_used,
10482 field, MONO_RGCTX_INFO_CLASS_FIELD);
10484 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10486 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10487 } else if (context_used) {
10488 MonoInst *static_data;
10491 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
10492 method->klass->name_space, method->klass->name, method->name,
10493 depth, field->offset);
10496 if (mono_class_needs_cctor_run (klass, method))
10497 emit_generic_class_init (cfg, klass);
10500 * The pointer we're computing here is
10502 * super_info.static_data + field->offset
10504 static_data = emit_get_rgctx_klass (cfg, context_used,
10505 klass, MONO_RGCTX_INFO_STATIC_DATA);
10507 if (mini_is_gsharedvt_klass (cfg, klass)) {
10508 MonoInst *offset_ins;
10510 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10511 dreg = alloc_ireg_mp (cfg);
10512 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
10513 } else if (field->offset == 0) {
10516 int addr_reg = mono_alloc_preg (cfg);
10517 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
10519 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
10520 MonoInst *iargs [2];
10522 g_assert (field->parent);
10523 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10524 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
10525 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
10527 MonoVTable *vtable = NULL;
10529 if (!cfg->compile_aot)
10530 vtable = mono_class_vtable (cfg->domain, klass);
10531 CHECK_TYPELOAD (klass);
10534 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
10535 if (!(g_slist_find (class_inits, klass))) {
10536 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
10537 if (cfg->verbose_level > 2)
10538 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
10539 class_inits = g_slist_prepend (class_inits, klass);
10542 if (cfg->run_cctors) {
10544 /* This makes so that inline cannot trigger */
10545 /* .cctors: too many apps depend on them */
10546 /* running with a specific order... */
10548 if (! vtable->initialized)
10549 INLINE_FAILURE ("class init");
10550 ex = mono_runtime_class_init_full (vtable, FALSE);
10552 set_exception_object (cfg, ex);
10553 goto exception_exit;
10557 if (cfg->compile_aot)
10558 EMIT_NEW_SFLDACONST (cfg, ins, field);
10561 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10563 EMIT_NEW_PCONST (cfg, ins, addr);
10566 MonoInst *iargs [1];
10567 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
10568 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
10572 /* Generate IR to do the actual load/store operation */
10574 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10575 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10576 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10577 emit_memory_barrier (cfg, FullBarrier);
10580 if (op == CEE_LDSFLDA) {
10581 ins->klass = mono_class_from_mono_type (ftype);
10582 ins->type = STACK_PTR;
10584 } else if (op == CEE_STSFLD) {
10587 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
10588 store->flags |= ins_flag;
10590 gboolean is_const = FALSE;
10591 MonoVTable *vtable = NULL;
10592 gpointer addr = NULL;
10594 if (!context_used) {
10595 vtable = mono_class_vtable (cfg->domain, klass);
10596 CHECK_TYPELOAD (klass);
10598 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
10599 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
10600 int ro_type = ftype->type;
10602 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
10603 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
10604 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
10607 GSHAREDVT_FAILURE (op);
10609 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
10612 case MONO_TYPE_BOOLEAN:
10614 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
10618 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
10621 case MONO_TYPE_CHAR:
10623 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
10627 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
10632 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
10636 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
10641 case MONO_TYPE_PTR:
10642 case MONO_TYPE_FNPTR:
10643 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10644 type_to_eval_stack_type ((cfg), field->type, *sp);
10647 case MONO_TYPE_STRING:
10648 case MONO_TYPE_OBJECT:
10649 case MONO_TYPE_CLASS:
10650 case MONO_TYPE_SZARRAY:
10651 case MONO_TYPE_ARRAY:
10652 if (!mono_gc_is_moving ()) {
10653 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
10654 type_to_eval_stack_type ((cfg), field->type, *sp);
10662 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
10667 case MONO_TYPE_VALUETYPE:
10677 CHECK_STACK_OVF (1);
10679 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
10680 load->flags |= ins_flag;
10686 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
10687 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10688 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10689 emit_memory_barrier (cfg, FullBarrier);
10700 token = read32 (ip + 1);
10701 klass = mini_get_class (method, token, generic_context);
10702 CHECK_TYPELOAD (klass);
10703 if (ins_flag & MONO_INST_VOLATILE) {
10704 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
10705 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
10706 emit_memory_barrier (cfg, FullBarrier);
10708 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
10709 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
10710 ins->flags |= ins_flag;
10711 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
10712 generic_class_is_reference_type (cfg, klass)) {
10713 /* insert call to write barrier */
10714 emit_write_barrier (cfg, sp [0], sp [1]);
10726 const char *data_ptr;
10728 guint32 field_token;
10734 token = read32 (ip + 1);
10736 klass = mini_get_class (method, token, generic_context);
10737 CHECK_TYPELOAD (klass);
10739 context_used = mini_class_check_context_used (cfg, klass);
10741 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
10742 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
10743 ins->sreg1 = sp [0]->dreg;
10744 ins->type = STACK_I4;
10745 ins->dreg = alloc_ireg (cfg);
10746 MONO_ADD_INS (cfg->cbb, ins);
10747 *sp = mono_decompose_opcode (cfg, ins);
10750 if (context_used) {
10751 MonoInst *args [3];
10752 MonoClass *array_class = mono_array_class_get (klass, 1);
10753 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
10755 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
10758 args [0] = emit_get_rgctx_klass (cfg, context_used,
10759 array_class, MONO_RGCTX_INFO_VTABLE);
10764 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
10766 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
10768 if (cfg->opt & MONO_OPT_SHARED) {
10769 /* Decompose now to avoid problems with references to the domainvar */
10770 MonoInst *iargs [3];
10772 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10773 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10774 iargs [2] = sp [0];
10776 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
10778 /* Decompose later since it is needed by abcrem */
10779 MonoClass *array_type = mono_array_class_get (klass, 1);
10780 mono_class_vtable (cfg->domain, array_type);
10781 CHECK_TYPELOAD (array_type);
10783 MONO_INST_NEW (cfg, ins, OP_NEWARR);
10784 ins->dreg = alloc_ireg_ref (cfg);
10785 ins->sreg1 = sp [0]->dreg;
10786 ins->inst_newa_class = klass;
10787 ins->type = STACK_OBJ;
10788 ins->klass = array_type;
10789 MONO_ADD_INS (cfg->cbb, ins);
10790 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10791 cfg->cbb->has_array_access = TRUE;
10793 /* Needed so mono_emit_load_get_addr () gets called */
10794 mono_get_got_var (cfg);
10804 * we inline/optimize the initialization sequence if possible.
10805 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
10806 * for small sizes open code the memcpy
10807 * ensure the rva field is big enough
10809 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
10810 MonoMethod *memcpy_method = get_memcpy_method ();
10811 MonoInst *iargs [3];
10812 int add_reg = alloc_ireg_mp (cfg);
10814 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
10815 if (cfg->compile_aot) {
10816 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
10818 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
10820 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
10821 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
10830 if (sp [0]->type != STACK_OBJ)
10833 MONO_INST_NEW (cfg, ins, OP_LDLEN);
10834 ins->dreg = alloc_preg (cfg);
10835 ins->sreg1 = sp [0]->dreg;
10836 ins->type = STACK_I4;
10837 /* This flag will be inherited by the decomposition */
10838 ins->flags |= MONO_INST_FAULT;
10839 MONO_ADD_INS (cfg->cbb, ins);
10840 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
10841 cfg->cbb->has_array_access = TRUE;
10849 if (sp [0]->type != STACK_OBJ)
10852 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10854 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10855 CHECK_TYPELOAD (klass);
10856 /* we need to make sure that this array is exactly the type it needs
10857 * to be for correctness. the wrappers are lax with their usage
10858 * so we need to ignore them here
10860 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
10861 MonoClass *array_class = mono_array_class_get (klass, 1);
10862 mini_emit_check_array_type (cfg, sp [0], array_class);
10863 CHECK_TYPELOAD (array_class);
10867 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10872 case CEE_LDELEM_I1:
10873 case CEE_LDELEM_U1:
10874 case CEE_LDELEM_I2:
10875 case CEE_LDELEM_U2:
10876 case CEE_LDELEM_I4:
10877 case CEE_LDELEM_U4:
10878 case CEE_LDELEM_I8:
10880 case CEE_LDELEM_R4:
10881 case CEE_LDELEM_R8:
10882 case CEE_LDELEM_REF: {
10888 if (*ip == CEE_LDELEM) {
10890 token = read32 (ip + 1);
10891 klass = mini_get_class (method, token, generic_context);
10892 CHECK_TYPELOAD (klass);
10893 mono_class_init (klass);
10896 klass = array_access_to_klass (*ip);
10898 if (sp [0]->type != STACK_OBJ)
10901 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10903 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
10904 // FIXME-VT: OP_ICONST optimization
10905 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10906 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10907 ins->opcode = OP_LOADV_MEMBASE;
10908 } else if (sp [1]->opcode == OP_ICONST) {
10909 int array_reg = sp [0]->dreg;
10910 int index_reg = sp [1]->dreg;
10911 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
10913 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
10914 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
10916 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
10917 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10920 if (*ip == CEE_LDELEM)
10927 case CEE_STELEM_I1:
10928 case CEE_STELEM_I2:
10929 case CEE_STELEM_I4:
10930 case CEE_STELEM_I8:
10931 case CEE_STELEM_R4:
10932 case CEE_STELEM_R8:
10933 case CEE_STELEM_REF:
10938 cfg->flags |= MONO_CFG_HAS_LDELEMA;
10940 if (*ip == CEE_STELEM) {
10942 token = read32 (ip + 1);
10943 klass = mini_get_class (method, token, generic_context);
10944 CHECK_TYPELOAD (klass);
10945 mono_class_init (klass);
10948 klass = array_access_to_klass (*ip);
10950 if (sp [0]->type != STACK_OBJ)
10953 emit_array_store (cfg, klass, sp, TRUE);
10955 if (*ip == CEE_STELEM)
10962 case CEE_CKFINITE: {
10966 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
10967 ins->sreg1 = sp [0]->dreg;
10968 ins->dreg = alloc_freg (cfg);
10969 ins->type = STACK_R8;
10970 MONO_ADD_INS (bblock, ins);
10972 *sp++ = mono_decompose_opcode (cfg, ins);
10977 case CEE_REFANYVAL: {
10978 MonoInst *src_var, *src;
10980 int klass_reg = alloc_preg (cfg);
10981 int dreg = alloc_preg (cfg);
10983 GSHAREDVT_FAILURE (*ip);
10986 MONO_INST_NEW (cfg, ins, *ip);
10989 klass = mini_get_class (method, read32 (ip + 1), generic_context);
10990 CHECK_TYPELOAD (klass);
10992 context_used = mini_class_check_context_used (cfg, klass);
10995 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
10997 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
10998 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
10999 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11001 if (context_used) {
11002 MonoInst *klass_ins;
11004 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11005 klass, MONO_RGCTX_INFO_KLASS);
11008 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11009 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11011 mini_emit_class_check (cfg, klass_reg, klass);
11013 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11014 ins->type = STACK_MP;
11019 case CEE_MKREFANY: {
11020 MonoInst *loc, *addr;
11022 GSHAREDVT_FAILURE (*ip);
11025 MONO_INST_NEW (cfg, ins, *ip);
11028 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11029 CHECK_TYPELOAD (klass);
11031 context_used = mini_class_check_context_used (cfg, klass);
11033 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11034 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11036 if (context_used) {
11037 MonoInst *const_ins;
11038 int type_reg = alloc_preg (cfg);
11040 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11041 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11042 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11043 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11044 } else if (cfg->compile_aot) {
11045 int const_reg = alloc_preg (cfg);
11046 int type_reg = alloc_preg (cfg);
11048 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11049 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11050 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11053 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11054 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11056 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11058 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11059 ins->type = STACK_VTYPE;
11060 ins->klass = mono_defaults.typed_reference_class;
11065 case CEE_LDTOKEN: {
11067 MonoClass *handle_class;
11069 CHECK_STACK_OVF (1);
11072 n = read32 (ip + 1);
11074 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11075 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11076 handle = mono_method_get_wrapper_data (method, n);
11077 handle_class = mono_method_get_wrapper_data (method, n + 1);
11078 if (handle_class == mono_defaults.typehandle_class)
11079 handle = &((MonoClass*)handle)->byval_arg;
11082 handle = mono_ldtoken (image, n, &handle_class, generic_context);
11086 mono_class_init (handle_class);
11087 if (cfg->generic_sharing_context) {
11088 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11089 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11090 /* This case handles ldtoken
11091 of an open type, like for
11094 } else if (handle_class == mono_defaults.typehandle_class) {
11095 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11096 } else if (handle_class == mono_defaults.fieldhandle_class)
11097 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11098 else if (handle_class == mono_defaults.methodhandle_class)
11099 context_used = mini_method_check_context_used (cfg, handle);
11101 g_assert_not_reached ();
11104 if ((cfg->opt & MONO_OPT_SHARED) &&
11105 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11106 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11107 MonoInst *addr, *vtvar, *iargs [3];
11108 int method_context_used;
11110 method_context_used = mini_method_check_context_used (cfg, method);
11112 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11114 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11115 EMIT_NEW_ICONST (cfg, iargs [1], n);
11116 if (method_context_used) {
11117 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11118 method, MONO_RGCTX_INFO_METHOD);
11119 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11121 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11122 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11124 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11126 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11128 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11130 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11131 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11132 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11133 (cmethod->klass == mono_defaults.systemtype_class) &&
11134 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11135 MonoClass *tclass = mono_class_from_mono_type (handle);
11137 mono_class_init (tclass);
11138 if (context_used) {
11139 ins = emit_get_rgctx_klass (cfg, context_used,
11140 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11141 } else if (cfg->compile_aot) {
11142 if (method->wrapper_type) {
11143 mono_error_init (&error); //got to do it since there are multiple conditionals below
11144 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11145 /* Special case for static synchronized wrappers */
11146 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11148 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11149 /* FIXME: n is not a normal token */
11151 EMIT_NEW_PCONST (cfg, ins, NULL);
11154 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11157 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11159 ins->type = STACK_OBJ;
11160 ins->klass = cmethod->klass;
11163 MonoInst *addr, *vtvar;
11165 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11167 if (context_used) {
11168 if (handle_class == mono_defaults.typehandle_class) {
11169 ins = emit_get_rgctx_klass (cfg, context_used,
11170 mono_class_from_mono_type (handle),
11171 MONO_RGCTX_INFO_TYPE);
11172 } else if (handle_class == mono_defaults.methodhandle_class) {
11173 ins = emit_get_rgctx_method (cfg, context_used,
11174 handle, MONO_RGCTX_INFO_METHOD);
11175 } else if (handle_class == mono_defaults.fieldhandle_class) {
11176 ins = emit_get_rgctx_field (cfg, context_used,
11177 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11179 g_assert_not_reached ();
11181 } else if (cfg->compile_aot) {
11182 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11184 EMIT_NEW_PCONST (cfg, ins, handle);
11186 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11187 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11188 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11198 MONO_INST_NEW (cfg, ins, OP_THROW);
11200 ins->sreg1 = sp [0]->dreg;
11202 bblock->out_of_line = TRUE;
11203 MONO_ADD_INS (bblock, ins);
11204 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11205 MONO_ADD_INS (bblock, ins);
11208 link_bblock (cfg, bblock, end_bblock);
11209 start_new_bblock = 1;
11211 case CEE_ENDFINALLY:
11212 /* mono_save_seq_point_info () depends on this */
11213 if (sp != stack_start)
11214 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11215 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11216 MONO_ADD_INS (bblock, ins);
11218 start_new_bblock = 1;
11221 * Control will leave the method so empty the stack, otherwise
11222 * the next basic block will start with a nonempty stack.
11224 while (sp != stack_start) {
11229 case CEE_LEAVE_S: {
11232 if (*ip == CEE_LEAVE) {
11234 target = ip + 5 + (gint32)read32(ip + 1);
11237 target = ip + 2 + (signed char)(ip [1]);
11240 /* empty the stack */
11241 while (sp != stack_start) {
11246 * If this leave statement is in a catch block, check for a
11247 * pending exception, and rethrow it if necessary.
11248 * We avoid doing this in runtime invoke wrappers, since those are called
11249 * by native code which excepts the wrapper to catch all exceptions.
11251 for (i = 0; i < header->num_clauses; ++i) {
11252 MonoExceptionClause *clause = &header->clauses [i];
11255 * Use <= in the final comparison to handle clauses with multiple
11256 * leave statements, like in bug #78024.
11257 * The ordering of the exception clauses guarantees that we find the
11258 * innermost clause.
11260 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11262 MonoBasicBlock *dont_throw;
11267 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11270 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11272 NEW_BBLOCK (cfg, dont_throw);
11275 * Currently, we always rethrow the abort exception, despite the
11276 * fact that this is not correct. See thread6.cs for an example.
11277 * But propagating the abort exception is more important than
11278 * getting the sematics right.
11280 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11281 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11282 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11284 MONO_START_BB (cfg, dont_throw);
11289 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11291 MonoExceptionClause *clause;
11293 for (tmp = handlers; tmp; tmp = tmp->next) {
11294 clause = tmp->data;
11295 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
11297 link_bblock (cfg, bblock, tblock);
11298 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
11299 ins->inst_target_bb = tblock;
11300 ins->inst_eh_block = clause;
11301 MONO_ADD_INS (bblock, ins);
11302 bblock->has_call_handler = 1;
11303 if (COMPILE_LLVM (cfg)) {
11304 MonoBasicBlock *target_bb;
11307 * Link the finally bblock with the target, since it will
11308 * conceptually branch there.
11309 * FIXME: Have to link the bblock containing the endfinally.
11311 GET_BBLOCK (cfg, target_bb, target);
11312 link_bblock (cfg, tblock, target_bb);
11315 g_list_free (handlers);
11318 MONO_INST_NEW (cfg, ins, OP_BR);
11319 MONO_ADD_INS (bblock, ins);
11320 GET_BBLOCK (cfg, tblock, target);
11321 link_bblock (cfg, bblock, tblock);
11322 ins->inst_target_bb = tblock;
11323 start_new_bblock = 1;
11325 if (*ip == CEE_LEAVE)
11334 * Mono specific opcodes
11336 case MONO_CUSTOM_PREFIX: {
11338 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
11342 case CEE_MONO_ICALL: {
11344 MonoJitICallInfo *info;
11346 token = read32 (ip + 2);
11347 func = mono_method_get_wrapper_data (method, token);
11348 info = mono_find_jit_icall_by_addr (func);
11350 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
11353 CHECK_STACK (info->sig->param_count);
11354 sp -= info->sig->param_count;
11356 ins = mono_emit_jit_icall (cfg, info->func, sp);
11357 if (!MONO_TYPE_IS_VOID (info->sig->ret))
11361 inline_costs += 10 * num_calls++;
11365 case CEE_MONO_LDPTR: {
11368 CHECK_STACK_OVF (1);
11370 token = read32 (ip + 2);
11372 ptr = mono_method_get_wrapper_data (method, token);
11373 /* FIXME: Generalize this */
11374 if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
11375 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
11380 EMIT_NEW_PCONST (cfg, ins, ptr);
11383 inline_costs += 10 * num_calls++;
11384 /* Can't embed random pointers into AOT code */
11388 case CEE_MONO_JIT_ICALL_ADDR: {
11389 MonoJitICallInfo *callinfo;
11392 CHECK_STACK_OVF (1);
11394 token = read32 (ip + 2);
11396 ptr = mono_method_get_wrapper_data (method, token);
11397 callinfo = mono_find_jit_icall_by_addr (ptr);
11398 g_assert (callinfo);
11399 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
11402 inline_costs += 10 * num_calls++;
11405 case CEE_MONO_ICALL_ADDR: {
11406 MonoMethod *cmethod;
11409 CHECK_STACK_OVF (1);
11411 token = read32 (ip + 2);
11413 cmethod = mono_method_get_wrapper_data (method, token);
11415 if (cfg->compile_aot) {
11416 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
11418 ptr = mono_lookup_internal_call (cmethod);
11420 EMIT_NEW_PCONST (cfg, ins, ptr);
11426 case CEE_MONO_VTADDR: {
11427 MonoInst *src_var, *src;
11433 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11434 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
11439 case CEE_MONO_NEWOBJ: {
11440 MonoInst *iargs [2];
11442 CHECK_STACK_OVF (1);
11444 token = read32 (ip + 2);
11445 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11446 mono_class_init (klass);
11447 NEW_DOMAINCONST (cfg, iargs [0]);
11448 MONO_ADD_INS (cfg->cbb, iargs [0]);
11449 NEW_CLASSCONST (cfg, iargs [1], klass);
11450 MONO_ADD_INS (cfg->cbb, iargs [1]);
11451 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
11453 inline_costs += 10 * num_calls++;
11456 case CEE_MONO_OBJADDR:
11459 MONO_INST_NEW (cfg, ins, OP_MOVE);
11460 ins->dreg = alloc_ireg_mp (cfg);
11461 ins->sreg1 = sp [0]->dreg;
11462 ins->type = STACK_MP;
11463 MONO_ADD_INS (cfg->cbb, ins);
11467 case CEE_MONO_LDNATIVEOBJ:
11469 * Similar to LDOBJ, but instead load the unmanaged
11470 * representation of the vtype to the stack.
11475 token = read32 (ip + 2);
11476 klass = mono_method_get_wrapper_data (method, token);
11477 g_assert (klass->valuetype);
11478 mono_class_init (klass);
11481 MonoInst *src, *dest, *temp;
11484 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
11485 temp->backend.is_pinvoke = 1;
11486 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
11487 mini_emit_stobj (cfg, dest, src, klass, TRUE);
11489 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
11490 dest->type = STACK_VTYPE;
11491 dest->klass = klass;
11497 case CEE_MONO_RETOBJ: {
11499 * Same as RET, but return the native representation of a vtype
11502 g_assert (cfg->ret);
11503 g_assert (mono_method_signature (method)->pinvoke);
11508 token = read32 (ip + 2);
11509 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11511 if (!cfg->vret_addr) {
11512 g_assert (cfg->ret_var_is_local);
11514 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
11516 EMIT_NEW_RETLOADA (cfg, ins);
11518 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
11520 if (sp != stack_start)
11523 MONO_INST_NEW (cfg, ins, OP_BR);
11524 ins->inst_target_bb = end_bblock;
11525 MONO_ADD_INS (bblock, ins);
11526 link_bblock (cfg, bblock, end_bblock);
11527 start_new_bblock = 1;
11531 case CEE_MONO_CISINST:
11532 case CEE_MONO_CCASTCLASS: {
11537 token = read32 (ip + 2);
11538 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
11539 if (ip [1] == CEE_MONO_CISINST)
11540 ins = handle_cisinst (cfg, klass, sp [0]);
11542 ins = handle_ccastclass (cfg, klass, sp [0]);
11548 case CEE_MONO_SAVE_LMF:
11549 case CEE_MONO_RESTORE_LMF:
11550 #ifdef MONO_ARCH_HAVE_LMF_OPS
11551 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
11552 MONO_ADD_INS (bblock, ins);
11553 cfg->need_lmf_area = TRUE;
11557 case CEE_MONO_CLASSCONST:
11558 CHECK_STACK_OVF (1);
11560 token = read32 (ip + 2);
11561 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
11564 inline_costs += 10 * num_calls++;
11566 case CEE_MONO_NOT_TAKEN:
11567 bblock->out_of_line = TRUE;
11570 case CEE_MONO_TLS: {
11573 CHECK_STACK_OVF (1);
11575 key = (gint32)read32 (ip + 2);
11576 g_assert (key < TLS_KEY_NUM);
11578 ins = mono_create_tls_get (cfg, key);
11580 if (cfg->compile_aot) {
11582 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
11583 ins->dreg = alloc_preg (cfg);
11584 ins->type = STACK_PTR;
11586 g_assert_not_reached ();
11589 ins->type = STACK_PTR;
11590 MONO_ADD_INS (bblock, ins);
11595 case CEE_MONO_DYN_CALL: {
11596 MonoCallInst *call;
11598 /* It would be easier to call a trampoline, but that would put an
11599 * extra frame on the stack, confusing exception handling. So
11600 * implement it inline using an opcode for now.
11603 if (!cfg->dyn_call_var) {
11604 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11605 /* prevent it from being register allocated */
11606 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
11609 /* Has to use a call inst since it local regalloc expects it */
11610 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
11611 ins = (MonoInst*)call;
11613 ins->sreg1 = sp [0]->dreg;
11614 ins->sreg2 = sp [1]->dreg;
11615 MONO_ADD_INS (bblock, ins);
11617 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
11620 inline_costs += 10 * num_calls++;
11624 case CEE_MONO_MEMORY_BARRIER: {
11626 emit_memory_barrier (cfg, (int)read32 (ip + 1));
11630 case CEE_MONO_JIT_ATTACH: {
11631 MonoInst *args [16], *domain_ins;
11632 MonoInst *ad_ins, *jit_tls_ins;
11633 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
11635 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
11637 EMIT_NEW_PCONST (cfg, ins, NULL);
11638 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11640 ad_ins = mono_get_domain_intrinsic (cfg);
11641 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
11643 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
11644 NEW_BBLOCK (cfg, next_bb);
11645 NEW_BBLOCK (cfg, call_bb);
11647 if (cfg->compile_aot) {
11648 /* AOT code is only used in the root domain */
11649 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
11651 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
11653 MONO_ADD_INS (cfg->cbb, ad_ins);
11654 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
11655 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
11657 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
11658 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
11659 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
11661 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
11662 MONO_START_BB (cfg, call_bb);
11665 if (cfg->compile_aot) {
11666 /* AOT code is only used in the root domain */
11667 EMIT_NEW_PCONST (cfg, args [0], NULL);
11669 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
11671 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
11672 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
11675 MONO_START_BB (cfg, next_bb);
11681 case CEE_MONO_JIT_DETACH: {
11682 MonoInst *args [16];
11684 /* Restore the original domain */
11685 dreg = alloc_ireg (cfg);
11686 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
11687 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
11692 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
11698 case CEE_PREFIX1: {
11701 case CEE_ARGLIST: {
11702 /* somewhat similar to LDTOKEN */
11703 MonoInst *addr, *vtvar;
11704 CHECK_STACK_OVF (1);
11705 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
11707 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11708 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
11710 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11711 ins->type = STACK_VTYPE;
11712 ins->klass = mono_defaults.argumenthandle_class;
11725 * The following transforms:
11726 * CEE_CEQ into OP_CEQ
11727 * CEE_CGT into OP_CGT
11728 * CEE_CGT_UN into OP_CGT_UN
11729 * CEE_CLT into OP_CLT
11730 * CEE_CLT_UN into OP_CLT_UN
11732 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
11734 MONO_INST_NEW (cfg, ins, cmp->opcode);
11736 cmp->sreg1 = sp [0]->dreg;
11737 cmp->sreg2 = sp [1]->dreg;
11738 type_from_op (cmp, sp [0], sp [1]);
11740 if ((sp [0]->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((sp [0]->type == STACK_PTR) || (sp [0]->type == STACK_OBJ) || (sp [0]->type == STACK_MP))))
11741 cmp->opcode = OP_LCOMPARE;
11742 else if (sp [0]->type == STACK_R8)
11743 cmp->opcode = OP_FCOMPARE;
11745 cmp->opcode = OP_ICOMPARE;
11746 MONO_ADD_INS (bblock, cmp);
11747 ins->type = STACK_I4;
11748 ins->dreg = alloc_dreg (cfg, ins->type);
11749 type_from_op (ins, sp [0], sp [1]);
11751 if (cmp->opcode == OP_FCOMPARE) {
11753 * The backends expect the fceq opcodes to do the
11756 ins->sreg1 = cmp->sreg1;
11757 ins->sreg2 = cmp->sreg2;
11760 MONO_ADD_INS (bblock, ins);
11766 MonoInst *argconst;
11767 MonoMethod *cil_method;
11769 CHECK_STACK_OVF (1);
11771 n = read32 (ip + 2);
11772 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11773 if (!cmethod || mono_loader_get_last_error ())
11775 mono_class_init (cmethod->klass);
11777 mono_save_token_info (cfg, image, n, cmethod);
11779 context_used = mini_method_check_context_used (cfg, cmethod);
11781 cil_method = cmethod;
11782 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
11783 METHOD_ACCESS_FAILURE (method, cil_method);
11785 if (mono_security_cas_enabled ()) {
11786 if (check_linkdemand (cfg, method, cmethod))
11787 INLINE_FAILURE ("linkdemand");
11788 CHECK_CFG_EXCEPTION;
11789 } else if (mono_security_core_clr_enabled ()) {
11790 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11794 * Optimize the common case of ldftn+delegate creation
11796 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
11797 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11798 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11799 MonoInst *target_ins, *handle_ins;
11800 MonoMethod *invoke;
11801 int invoke_context_used;
11803 invoke = mono_get_delegate_invoke (ctor_method->klass);
11804 if (!invoke || !mono_method_signature (invoke))
11807 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11809 target_ins = sp [-1];
11811 if (mono_security_core_clr_enabled ())
11812 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11814 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
11815 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
11816 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
11817 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
11818 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
11822 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11823 /* FIXME: SGEN support */
11824 if (invoke_context_used == 0) {
11826 if (cfg->verbose_level > 3)
11827 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11828 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
11831 CHECK_CFG_EXCEPTION;
11842 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
11843 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
11847 inline_costs += 10 * num_calls++;
11850 case CEE_LDVIRTFTN: {
11851 MonoInst *args [2];
11855 n = read32 (ip + 2);
11856 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
11857 if (!cmethod || mono_loader_get_last_error ())
11859 mono_class_init (cmethod->klass);
11861 context_used = mini_method_check_context_used (cfg, cmethod);
11863 if (mono_security_cas_enabled ()) {
11864 if (check_linkdemand (cfg, method, cmethod))
11865 INLINE_FAILURE ("linkdemand");
11866 CHECK_CFG_EXCEPTION;
11867 } else if (mono_security_core_clr_enabled ()) {
11868 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
11872 * Optimize the common case of ldvirtftn+delegate creation
11874 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
11875 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
11876 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
11877 MonoInst *target_ins, *handle_ins;
11878 MonoMethod *invoke;
11879 int invoke_context_used;
11881 invoke = mono_get_delegate_invoke (ctor_method->klass);
11882 if (!invoke || !mono_method_signature (invoke))
11885 invoke_context_used = mini_method_check_context_used (cfg, invoke);
11887 target_ins = sp [-1];
11889 if (mono_security_core_clr_enabled ())
11890 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
11892 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
11893 /* FIXME: SGEN support */
11894 if (invoke_context_used == 0) {
11896 if (cfg->verbose_level > 3)
11897 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
11898 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
11901 CHECK_CFG_EXCEPTION;
11915 args [1] = emit_get_rgctx_method (cfg, context_used,
11916 cmethod, MONO_RGCTX_INFO_METHOD);
11919 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
11921 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
11924 inline_costs += 10 * num_calls++;
11928 CHECK_STACK_OVF (1);
11930 n = read16 (ip + 2);
11932 EMIT_NEW_ARGLOAD (cfg, ins, n);
11937 CHECK_STACK_OVF (1);
11939 n = read16 (ip + 2);
11941 NEW_ARGLOADA (cfg, ins, n);
11942 MONO_ADD_INS (cfg->cbb, ins);
11950 n = read16 (ip + 2);
11952 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
11954 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
11958 CHECK_STACK_OVF (1);
11960 n = read16 (ip + 2);
11962 EMIT_NEW_LOCLOAD (cfg, ins, n);
11967 unsigned char *tmp_ip;
11968 CHECK_STACK_OVF (1);
11970 n = read16 (ip + 2);
11973 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
11979 EMIT_NEW_LOCLOADA (cfg, ins, n);
11988 n = read16 (ip + 2);
11990 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
11992 emit_stloc_ir (cfg, sp, header, n);
11999 if (sp != stack_start)
12001 if (cfg->method != method)
12003 * Inlining this into a loop in a parent could lead to
12004 * stack overflows which is different behavior than the
12005 * non-inlined case, thus disable inlining in this case.
12007 INLINE_FAILURE("localloc");
12009 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12010 ins->dreg = alloc_preg (cfg);
12011 ins->sreg1 = sp [0]->dreg;
12012 ins->type = STACK_PTR;
12013 MONO_ADD_INS (cfg->cbb, ins);
12015 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12017 ins->flags |= MONO_INST_INIT;
12022 case CEE_ENDFILTER: {
12023 MonoExceptionClause *clause, *nearest;
12024 int cc, nearest_num;
12028 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12030 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12031 ins->sreg1 = (*sp)->dreg;
12032 MONO_ADD_INS (bblock, ins);
12033 start_new_bblock = 1;
12038 for (cc = 0; cc < header->num_clauses; ++cc) {
12039 clause = &header->clauses [cc];
12040 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12041 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12042 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset))) {
12047 g_assert (nearest);
12048 if ((ip - header->code) != nearest->handler_offset)
12053 case CEE_UNALIGNED_:
12054 ins_flag |= MONO_INST_UNALIGNED;
12055 /* FIXME: record alignment? we can assume 1 for now */
12059 case CEE_VOLATILE_:
12060 ins_flag |= MONO_INST_VOLATILE;
12064 ins_flag |= MONO_INST_TAILCALL;
12065 cfg->flags |= MONO_CFG_HAS_TAIL;
12066 /* Can't inline tail calls at this time */
12067 inline_costs += 100000;
12074 token = read32 (ip + 2);
12075 klass = mini_get_class (method, token, generic_context);
12076 CHECK_TYPELOAD (klass);
12077 if (generic_class_is_reference_type (cfg, klass))
12078 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12080 mini_emit_initobj (cfg, *sp, NULL, klass);
12084 case CEE_CONSTRAINED_:
12086 token = read32 (ip + 2);
12087 constrained_call = mini_get_class (method, token, generic_context);
12088 CHECK_TYPELOAD (constrained_call);
12092 case CEE_INITBLK: {
12093 MonoInst *iargs [3];
12097 /* Skip optimized paths for volatile operations. */
12098 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12099 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12100 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12101 /* emit_memset only works when val == 0 */
12102 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12105 iargs [0] = sp [0];
12106 iargs [1] = sp [1];
12107 iargs [2] = sp [2];
12108 if (ip [1] == CEE_CPBLK) {
12110 * FIXME: It's unclear whether we should be emitting both the acquire
12111 * and release barriers for cpblk. It is technically both a load and
12112 * store operation, so it seems like that's the sensible thing to do.
12114 MonoMethod *memcpy_method = get_memcpy_method ();
12115 if (ins_flag & MONO_INST_VOLATILE) {
12116 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12117 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12118 emit_memory_barrier (cfg, FullBarrier);
12120 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12121 call->flags |= ins_flag;
12122 if (ins_flag & MONO_INST_VOLATILE) {
12123 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
12124 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12125 emit_memory_barrier (cfg, FullBarrier);
12128 MonoMethod *memset_method = get_memset_method ();
12129 if (ins_flag & MONO_INST_VOLATILE) {
12130 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12131 /* FIXME it's questionable if acquire semantics require full barrier or just LoadLoad*/
12132 emit_memory_barrier (cfg, FullBarrier);
12134 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12135 call->flags |= ins_flag;
12146 ins_flag |= MONO_INST_NOTYPECHECK;
12148 ins_flag |= MONO_INST_NORANGECHECK;
12149 /* we ignore the no-nullcheck for now since we
12150 * really do it explicitly only when doing callvirt->call
12154 case CEE_RETHROW: {
12156 int handler_offset = -1;
12158 for (i = 0; i < header->num_clauses; ++i) {
12159 MonoExceptionClause *clause = &header->clauses [i];
12160 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12161 handler_offset = clause->handler_offset;
12166 bblock->flags |= BB_EXCEPTION_UNSAFE;
12168 if (handler_offset == -1)
12171 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12172 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12173 ins->sreg1 = load->dreg;
12174 MONO_ADD_INS (bblock, ins);
12176 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12177 MONO_ADD_INS (bblock, ins);
12180 link_bblock (cfg, bblock, end_bblock);
12181 start_new_bblock = 1;
12189 CHECK_STACK_OVF (1);
12191 token = read32 (ip + 2);
12192 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12193 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12196 val = mono_type_size (type, &ialign);
12198 MonoClass *klass = mini_get_class (method, token, generic_context);
12199 CHECK_TYPELOAD (klass);
12201 val = mono_type_size (&klass->byval_arg, &ialign);
12203 if (mini_is_gsharedvt_klass (cfg, klass))
12204 GSHAREDVT_FAILURE (*ip);
12206 EMIT_NEW_ICONST (cfg, ins, val);
12211 case CEE_REFANYTYPE: {
12212 MonoInst *src_var, *src;
12214 GSHAREDVT_FAILURE (*ip);
12220 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12222 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12223 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12224 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12229 case CEE_READONLY_:
12242 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12252 g_warning ("opcode 0x%02x not handled", *ip);
12256 if (start_new_bblock != 1)
12259 bblock->cil_length = ip - bblock->cil_code;
12260 if (bblock->next_bb) {
12261 /* This could already be set because of inlining, #693905 */
12262 MonoBasicBlock *bb = bblock;
12264 while (bb->next_bb)
12266 bb->next_bb = end_bblock;
12268 bblock->next_bb = end_bblock;
12271 if (cfg->method == method && cfg->domainvar) {
12273 MonoInst *get_domain;
12275 cfg->cbb = init_localsbb;
12277 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
12278 MONO_ADD_INS (cfg->cbb, get_domain);
12280 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
12282 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
12283 MONO_ADD_INS (cfg->cbb, store);
12286 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
12287 if (cfg->compile_aot)
12288 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
12289 mono_get_got_var (cfg);
12292 if (cfg->method == method && cfg->got_var)
12293 mono_emit_load_got_addr (cfg);
12295 if (init_localsbb) {
12296 cfg->cbb = init_localsbb;
12298 for (i = 0; i < header->num_locals; ++i) {
12299 emit_init_local (cfg, i, header->locals [i], init_locals);
12303 if (cfg->init_ref_vars && cfg->method == method) {
12304 /* Emit initialization for ref vars */
12305 // FIXME: Avoid duplication initialization for IL locals.
12306 for (i = 0; i < cfg->num_varinfo; ++i) {
12307 MonoInst *ins = cfg->varinfo [i];
12309 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
12310 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
12314 if (cfg->lmf_var && cfg->method == method) {
12315 cfg->cbb = init_localsbb;
12316 emit_push_lmf (cfg);
12319 cfg->cbb = init_localsbb;
12320 emit_instrumentation_call (cfg, mono_profiler_method_enter);
12323 MonoBasicBlock *bb;
12326 * Make seq points at backward branch targets interruptable.
12328 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
12329 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
12330 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
12333 /* Add a sequence point for method entry/exit events */
12335 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
12336 MONO_ADD_INS (init_localsbb, ins);
12337 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
12338 MONO_ADD_INS (cfg->bb_exit, ins);
12342 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
12343 * the code they refer to was dead (#11880).
12345 if (sym_seq_points) {
12346 for (i = 0; i < header->code_size; ++i) {
12347 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
12350 NEW_SEQ_POINT (cfg, ins, i, FALSE);
12351 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
12358 if (cfg->method == method) {
12359 MonoBasicBlock *bb;
12360 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12361 bb->region = mono_find_block_region (cfg, bb->real_offset);
12363 mono_create_spvar_for_region (cfg, bb->region);
12364 if (cfg->verbose_level > 2)
12365 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
12369 if (inline_costs < 0) {
12372 /* Method is too large */
12373 mname = mono_method_full_name (method, TRUE);
12374 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
12375 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
12379 if ((cfg->verbose_level > 2) && (cfg->method == method))
12380 mono_print_code (cfg, "AFTER METHOD-TO-IR");
12385 g_assert (!mono_error_ok (&cfg->error));
12389 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
12393 set_exception_type_from_invalid_il (cfg, method, ip);
12397 g_slist_free (class_inits);
12398 mono_basic_block_free (original_bb);
12399 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
12400 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
12401 if (cfg->exception_type)
12404 return inline_costs;
12408 store_membase_reg_to_store_membase_imm (int opcode)
12411 case OP_STORE_MEMBASE_REG:
12412 return OP_STORE_MEMBASE_IMM;
12413 case OP_STOREI1_MEMBASE_REG:
12414 return OP_STOREI1_MEMBASE_IMM;
12415 case OP_STOREI2_MEMBASE_REG:
12416 return OP_STOREI2_MEMBASE_IMM;
12417 case OP_STOREI4_MEMBASE_REG:
12418 return OP_STOREI4_MEMBASE_IMM;
12419 case OP_STOREI8_MEMBASE_REG:
12420 return OP_STOREI8_MEMBASE_IMM;
12422 g_assert_not_reached ();
12429 mono_op_to_op_imm (int opcode)
12433 return OP_IADD_IMM;
12435 return OP_ISUB_IMM;
12437 return OP_IDIV_IMM;
12439 return OP_IDIV_UN_IMM;
12441 return OP_IREM_IMM;
12443 return OP_IREM_UN_IMM;
12445 return OP_IMUL_IMM;
12447 return OP_IAND_IMM;
12451 return OP_IXOR_IMM;
12453 return OP_ISHL_IMM;
12455 return OP_ISHR_IMM;
12457 return OP_ISHR_UN_IMM;
12460 return OP_LADD_IMM;
12462 return OP_LSUB_IMM;
12464 return OP_LAND_IMM;
12468 return OP_LXOR_IMM;
12470 return OP_LSHL_IMM;
12472 return OP_LSHR_IMM;
12474 return OP_LSHR_UN_IMM;
12475 #if SIZEOF_REGISTER == 8
12477 return OP_LREM_IMM;
12481 return OP_COMPARE_IMM;
12483 return OP_ICOMPARE_IMM;
12485 return OP_LCOMPARE_IMM;
12487 case OP_STORE_MEMBASE_REG:
12488 return OP_STORE_MEMBASE_IMM;
12489 case OP_STOREI1_MEMBASE_REG:
12490 return OP_STOREI1_MEMBASE_IMM;
12491 case OP_STOREI2_MEMBASE_REG:
12492 return OP_STOREI2_MEMBASE_IMM;
12493 case OP_STOREI4_MEMBASE_REG:
12494 return OP_STOREI4_MEMBASE_IMM;
12496 #if defined(TARGET_X86) || defined (TARGET_AMD64)
12498 return OP_X86_PUSH_IMM;
12499 case OP_X86_COMPARE_MEMBASE_REG:
12500 return OP_X86_COMPARE_MEMBASE_IMM;
12502 #if defined(TARGET_AMD64)
12503 case OP_AMD64_ICOMPARE_MEMBASE_REG:
12504 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12506 case OP_VOIDCALL_REG:
12507 return OP_VOIDCALL;
12515 return OP_LOCALLOC_IMM;
12522 ldind_to_load_membase (int opcode)
12526 return OP_LOADI1_MEMBASE;
12528 return OP_LOADU1_MEMBASE;
12530 return OP_LOADI2_MEMBASE;
12532 return OP_LOADU2_MEMBASE;
12534 return OP_LOADI4_MEMBASE;
12536 return OP_LOADU4_MEMBASE;
12538 return OP_LOAD_MEMBASE;
12539 case CEE_LDIND_REF:
12540 return OP_LOAD_MEMBASE;
12542 return OP_LOADI8_MEMBASE;
12544 return OP_LOADR4_MEMBASE;
12546 return OP_LOADR8_MEMBASE;
12548 g_assert_not_reached ();
12555 stind_to_store_membase (int opcode)
12559 return OP_STOREI1_MEMBASE_REG;
12561 return OP_STOREI2_MEMBASE_REG;
12563 return OP_STOREI4_MEMBASE_REG;
12565 case CEE_STIND_REF:
12566 return OP_STORE_MEMBASE_REG;
12568 return OP_STOREI8_MEMBASE_REG;
12570 return OP_STORER4_MEMBASE_REG;
12572 return OP_STORER8_MEMBASE_REG;
12574 g_assert_not_reached ();
12581 mono_load_membase_to_load_mem (int opcode)
12583 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
12584 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12586 case OP_LOAD_MEMBASE:
12587 return OP_LOAD_MEM;
12588 case OP_LOADU1_MEMBASE:
12589 return OP_LOADU1_MEM;
12590 case OP_LOADU2_MEMBASE:
12591 return OP_LOADU2_MEM;
12592 case OP_LOADI4_MEMBASE:
12593 return OP_LOADI4_MEM;
12594 case OP_LOADU4_MEMBASE:
12595 return OP_LOADU4_MEM;
12596 #if SIZEOF_REGISTER == 8
12597 case OP_LOADI8_MEMBASE:
12598 return OP_LOADI8_MEM;
12607 op_to_op_dest_membase (int store_opcode, int opcode)
12609 #if defined(TARGET_X86)
12610 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
12615 return OP_X86_ADD_MEMBASE_REG;
12617 return OP_X86_SUB_MEMBASE_REG;
12619 return OP_X86_AND_MEMBASE_REG;
12621 return OP_X86_OR_MEMBASE_REG;
12623 return OP_X86_XOR_MEMBASE_REG;
12626 return OP_X86_ADD_MEMBASE_IMM;
12629 return OP_X86_SUB_MEMBASE_IMM;
12632 return OP_X86_AND_MEMBASE_IMM;
12635 return OP_X86_OR_MEMBASE_IMM;
12638 return OP_X86_XOR_MEMBASE_IMM;
12644 #if defined(TARGET_AMD64)
12645 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
12650 return OP_X86_ADD_MEMBASE_REG;
12652 return OP_X86_SUB_MEMBASE_REG;
12654 return OP_X86_AND_MEMBASE_REG;
12656 return OP_X86_OR_MEMBASE_REG;
12658 return OP_X86_XOR_MEMBASE_REG;
12660 return OP_X86_ADD_MEMBASE_IMM;
12662 return OP_X86_SUB_MEMBASE_IMM;
12664 return OP_X86_AND_MEMBASE_IMM;
12666 return OP_X86_OR_MEMBASE_IMM;
12668 return OP_X86_XOR_MEMBASE_IMM;
12670 return OP_AMD64_ADD_MEMBASE_REG;
12672 return OP_AMD64_SUB_MEMBASE_REG;
12674 return OP_AMD64_AND_MEMBASE_REG;
12676 return OP_AMD64_OR_MEMBASE_REG;
12678 return OP_AMD64_XOR_MEMBASE_REG;
12681 return OP_AMD64_ADD_MEMBASE_IMM;
12684 return OP_AMD64_SUB_MEMBASE_IMM;
12687 return OP_AMD64_AND_MEMBASE_IMM;
12690 return OP_AMD64_OR_MEMBASE_IMM;
12693 return OP_AMD64_XOR_MEMBASE_IMM;
12703 op_to_op_store_membase (int store_opcode, int opcode)
12705 #if defined(TARGET_X86) || defined(TARGET_AMD64)
12708 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12709 return OP_X86_SETEQ_MEMBASE;
12711 if (store_opcode == OP_STOREI1_MEMBASE_REG)
12712 return OP_X86_SETNE_MEMBASE;
12720 op_to_op_src1_membase (int load_opcode, int opcode)
12723 /* FIXME: This has sign extension issues */
12725 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12726 return OP_X86_COMPARE_MEMBASE8_IMM;
12729 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12734 return OP_X86_PUSH_MEMBASE;
12735 case OP_COMPARE_IMM:
12736 case OP_ICOMPARE_IMM:
12737 return OP_X86_COMPARE_MEMBASE_IMM;
12740 return OP_X86_COMPARE_MEMBASE_REG;
12744 #ifdef TARGET_AMD64
12745 /* FIXME: This has sign extension issues */
12747 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
12748 return OP_X86_COMPARE_MEMBASE8_IMM;
12753 #ifdef __mono_ilp32__
12754 if (load_opcode == OP_LOADI8_MEMBASE)
12756 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12758 return OP_X86_PUSH_MEMBASE;
12760 /* FIXME: This only works for 32 bit immediates
12761 case OP_COMPARE_IMM:
12762 case OP_LCOMPARE_IMM:
12763 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12764 return OP_AMD64_COMPARE_MEMBASE_IMM;
12766 case OP_ICOMPARE_IMM:
12767 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12768 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
12772 #ifdef __mono_ilp32__
12773 if (load_opcode == OP_LOAD_MEMBASE)
12774 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12775 if (load_opcode == OP_LOADI8_MEMBASE)
12777 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
12779 return OP_AMD64_COMPARE_MEMBASE_REG;
12782 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
12783 return OP_AMD64_ICOMPARE_MEMBASE_REG;
12792 op_to_op_src2_membase (int load_opcode, int opcode)
12795 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
12801 return OP_X86_COMPARE_REG_MEMBASE;
12803 return OP_X86_ADD_REG_MEMBASE;
12805 return OP_X86_SUB_REG_MEMBASE;
12807 return OP_X86_AND_REG_MEMBASE;
12809 return OP_X86_OR_REG_MEMBASE;
12811 return OP_X86_XOR_REG_MEMBASE;
12815 #ifdef TARGET_AMD64
12816 #ifdef __mono_ilp32__
12817 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
12819 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
12823 return OP_AMD64_ICOMPARE_REG_MEMBASE;
12825 return OP_X86_ADD_REG_MEMBASE;
12827 return OP_X86_SUB_REG_MEMBASE;
12829 return OP_X86_AND_REG_MEMBASE;
12831 return OP_X86_OR_REG_MEMBASE;
12833 return OP_X86_XOR_REG_MEMBASE;
12835 #ifdef __mono_ilp32__
12836 } else if (load_opcode == OP_LOADI8_MEMBASE) {
12838 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
12843 return OP_AMD64_COMPARE_REG_MEMBASE;
12845 return OP_AMD64_ADD_REG_MEMBASE;
12847 return OP_AMD64_SUB_REG_MEMBASE;
12849 return OP_AMD64_AND_REG_MEMBASE;
12851 return OP_AMD64_OR_REG_MEMBASE;
12853 return OP_AMD64_XOR_REG_MEMBASE;
12862 mono_op_to_op_imm_noemul (int opcode)
12865 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
12871 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
12878 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
12883 return mono_op_to_op_imm (opcode);
12888 * mono_handle_global_vregs:
12890 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
12894 mono_handle_global_vregs (MonoCompile *cfg)
12896 gint32 *vreg_to_bb;
12897 MonoBasicBlock *bb;
12900 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
12902 #ifdef MONO_ARCH_SIMD_INTRINSICS
12903 if (cfg->uses_simd_intrinsics)
12904 mono_simd_simplify_indirection (cfg);
12907 /* Find local vregs used in more than one bb */
12908 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
12909 MonoInst *ins = bb->code;
12910 int block_num = bb->block_num;
12912 if (cfg->verbose_level > 2)
12913 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
12916 for (; ins; ins = ins->next) {
12917 const char *spec = INS_INFO (ins->opcode);
12918 int regtype = 0, regindex;
12921 if (G_UNLIKELY (cfg->verbose_level > 2))
12922 mono_print_ins (ins);
12924 g_assert (ins->opcode >= MONO_CEE_LAST);
12926 for (regindex = 0; regindex < 4; regindex ++) {
12929 if (regindex == 0) {
12930 regtype = spec [MONO_INST_DEST];
12931 if (regtype == ' ')
12934 } else if (regindex == 1) {
12935 regtype = spec [MONO_INST_SRC1];
12936 if (regtype == ' ')
12939 } else if (regindex == 2) {
12940 regtype = spec [MONO_INST_SRC2];
12941 if (regtype == ' ')
12944 } else if (regindex == 3) {
12945 regtype = spec [MONO_INST_SRC3];
12946 if (regtype == ' ')
12951 #if SIZEOF_REGISTER == 4
12952 /* In the LLVM case, the long opcodes are not decomposed */
12953 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
12955 * Since some instructions reference the original long vreg,
12956 * and some reference the two component vregs, it is quite hard
12957 * to determine when it needs to be global. So be conservative.
12959 if (!get_vreg_to_inst (cfg, vreg)) {
12960 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
12962 if (cfg->verbose_level > 2)
12963 printf ("LONG VREG R%d made global.\n", vreg);
12967 * Make the component vregs volatile since the optimizations can
12968 * get confused otherwise.
12970 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
12971 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
12975 g_assert (vreg != -1);
12977 prev_bb = vreg_to_bb [vreg];
12978 if (prev_bb == 0) {
12979 /* 0 is a valid block num */
12980 vreg_to_bb [vreg] = block_num + 1;
12981 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
12982 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
12985 if (!get_vreg_to_inst (cfg, vreg)) {
12986 if (G_UNLIKELY (cfg->verbose_level > 2))
12987 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
12991 if (vreg_is_ref (cfg, vreg))
12992 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
12994 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
12997 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13000 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13003 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13006 g_assert_not_reached ();
13010 /* Flag as having been used in more than one bb */
13011 vreg_to_bb [vreg] = -1;
13017 /* If a variable is used in only one bblock, convert it into a local vreg */
13018 for (i = 0; i < cfg->num_varinfo; i++) {
13019 MonoInst *var = cfg->varinfo [i];
13020 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13022 switch (var->type) {
13028 #if SIZEOF_REGISTER == 8
13031 #if !defined(TARGET_X86)
13032 /* Enabling this screws up the fp stack on x86 */
13035 if (mono_arch_is_soft_float ())
13038 /* Arguments are implicitly global */
13039 /* Putting R4 vars into registers doesn't work currently */
13040 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13041 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13043 * Make that the variable's liveness interval doesn't contain a call, since
13044 * that would cause the lvreg to be spilled, making the whole optimization
13047 /* This is too slow for JIT compilation */
13049 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13051 int def_index, call_index, ins_index;
13052 gboolean spilled = FALSE;
13057 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13058 const char *spec = INS_INFO (ins->opcode);
13060 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13061 def_index = ins_index;
13063 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13064 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13065 if (call_index > def_index) {
13071 if (MONO_IS_CALL (ins))
13072 call_index = ins_index;
13082 if (G_UNLIKELY (cfg->verbose_level > 2))
13083 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13084 var->flags |= MONO_INST_IS_DEAD;
13085 cfg->vreg_to_inst [var->dreg] = NULL;
13092 * Compress the varinfo and vars tables so the liveness computation is faster and
13093 * takes up less space.
13096 for (i = 0; i < cfg->num_varinfo; ++i) {
13097 MonoInst *var = cfg->varinfo [i];
13098 if (pos < i && cfg->locals_start == i)
13099 cfg->locals_start = pos;
13100 if (!(var->flags & MONO_INST_IS_DEAD)) {
13102 cfg->varinfo [pos] = cfg->varinfo [i];
13103 cfg->varinfo [pos]->inst_c0 = pos;
13104 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13105 cfg->vars [pos].idx = pos;
13106 #if SIZEOF_REGISTER == 4
13107 if (cfg->varinfo [pos]->type == STACK_I8) {
13108 /* Modify the two component vars too */
13111 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13112 var1->inst_c0 = pos;
13113 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13114 var1->inst_c0 = pos;
13121 cfg->num_varinfo = pos;
13122 if (cfg->locals_start > cfg->num_varinfo)
13123 cfg->locals_start = cfg->num_varinfo;
13127 * mono_spill_global_vars:
13129 * Generate spill code for variables which are not allocated to registers,
13130 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13131 * code is generated which could be optimized by the local optimization passes.
13134 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13136 MonoBasicBlock *bb;
13138 int orig_next_vreg;
13139 guint32 *vreg_to_lvreg;
13141 guint32 i, lvregs_len;
13142 gboolean dest_has_lvreg = FALSE;
13143 guint32 stacktypes [128];
13144 MonoInst **live_range_start, **live_range_end;
13145 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13146 int *gsharedvt_vreg_to_idx = NULL;
13148 *need_local_opts = FALSE;
13150 memset (spec2, 0, sizeof (spec2));
13152 /* FIXME: Move this function to mini.c */
13153 stacktypes ['i'] = STACK_PTR;
13154 stacktypes ['l'] = STACK_I8;
13155 stacktypes ['f'] = STACK_R8;
13156 #ifdef MONO_ARCH_SIMD_INTRINSICS
13157 stacktypes ['x'] = STACK_VTYPE;
13160 #if SIZEOF_REGISTER == 4
13161 /* Create MonoInsts for longs */
13162 for (i = 0; i < cfg->num_varinfo; i++) {
13163 MonoInst *ins = cfg->varinfo [i];
13165 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13166 switch (ins->type) {
13171 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13174 g_assert (ins->opcode == OP_REGOFFSET);
13176 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13178 tree->opcode = OP_REGOFFSET;
13179 tree->inst_basereg = ins->inst_basereg;
13180 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13182 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13184 tree->opcode = OP_REGOFFSET;
13185 tree->inst_basereg = ins->inst_basereg;
13186 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13196 if (cfg->compute_gc_maps) {
13197 /* registers need liveness info even for !non refs */
13198 for (i = 0; i < cfg->num_varinfo; i++) {
13199 MonoInst *ins = cfg->varinfo [i];
13201 if (ins->opcode == OP_REGVAR)
13202 ins->flags |= MONO_INST_GC_TRACK;
13206 if (cfg->gsharedvt) {
13207 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13209 for (i = 0; i < cfg->num_varinfo; ++i) {
13210 MonoInst *ins = cfg->varinfo [i];
13213 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13214 if (i >= cfg->locals_start) {
13216 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13217 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13218 ins->opcode = OP_GSHAREDVT_LOCAL;
13219 ins->inst_imm = idx;
13222 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13223 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13229 /* FIXME: widening and truncation */
13232 * As an optimization, when a variable allocated to the stack is first loaded into
13233 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13234 * the variable again.
13236 orig_next_vreg = cfg->next_vreg;
13237 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13238 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13242 * These arrays contain the first and last instructions accessing a given
13244 * Since we emit bblocks in the same order we process them here, and we
13245 * don't split live ranges, these will precisely describe the live range of
13246 * the variable, i.e. the instruction range where a valid value can be found
13247 * in the variables location.
13248 * The live range is computed using the liveness info computed by the liveness pass.
13249 * We can't use vmv->range, since that is an abstract live range, and we need
13250 * one which is instruction precise.
13251 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13253 /* FIXME: Only do this if debugging info is requested */
13254 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13255 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13256 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13257 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
13259 /* Add spill loads/stores */
13260 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13263 if (cfg->verbose_level > 2)
13264 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
13266 /* Clear vreg_to_lvreg array */
13267 for (i = 0; i < lvregs_len; i++)
13268 vreg_to_lvreg [lvregs [i]] = 0;
13272 MONO_BB_FOR_EACH_INS (bb, ins) {
13273 const char *spec = INS_INFO (ins->opcode);
13274 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
13275 gboolean store, no_lvreg;
13276 int sregs [MONO_MAX_SRC_REGS];
13278 if (G_UNLIKELY (cfg->verbose_level > 2))
13279 mono_print_ins (ins);
13281 if (ins->opcode == OP_NOP)
13285 * We handle LDADDR here as well, since it can only be decomposed
13286 * when variable addresses are known.
13288 if (ins->opcode == OP_LDADDR) {
13289 MonoInst *var = ins->inst_p0;
13291 if (var->opcode == OP_VTARG_ADDR) {
13292 /* Happens on SPARC/S390 where vtypes are passed by reference */
13293 MonoInst *vtaddr = var->inst_left;
13294 if (vtaddr->opcode == OP_REGVAR) {
13295 ins->opcode = OP_MOVE;
13296 ins->sreg1 = vtaddr->dreg;
13298 else if (var->inst_left->opcode == OP_REGOFFSET) {
13299 ins->opcode = OP_LOAD_MEMBASE;
13300 ins->inst_basereg = vtaddr->inst_basereg;
13301 ins->inst_offset = vtaddr->inst_offset;
13304 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
13305 /* gsharedvt arg passed by ref */
13306 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
13308 ins->opcode = OP_LOAD_MEMBASE;
13309 ins->inst_basereg = var->inst_basereg;
13310 ins->inst_offset = var->inst_offset;
13311 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
13312 MonoInst *load, *load2, *load3;
13313 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
13314 int reg1, reg2, reg3;
13315 MonoInst *info_var = cfg->gsharedvt_info_var;
13316 MonoInst *locals_var = cfg->gsharedvt_locals_var;
13320 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
13323 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
13325 g_assert (info_var);
13326 g_assert (locals_var);
13328 /* Mark the instruction used to compute the locals var as used */
13329 cfg->gsharedvt_locals_var_ins = NULL;
13331 /* Load the offset */
13332 if (info_var->opcode == OP_REGOFFSET) {
13333 reg1 = alloc_ireg (cfg);
13334 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
13335 } else if (info_var->opcode == OP_REGVAR) {
13337 reg1 = info_var->dreg;
13339 g_assert_not_reached ();
13341 reg2 = alloc_ireg (cfg);
13342 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
13343 /* Load the locals area address */
13344 reg3 = alloc_ireg (cfg);
13345 if (locals_var->opcode == OP_REGOFFSET) {
13346 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
13347 } else if (locals_var->opcode == OP_REGVAR) {
13348 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
13350 g_assert_not_reached ();
13352 /* Compute the address */
13353 ins->opcode = OP_PADD;
13357 mono_bblock_insert_before_ins (bb, ins, load3);
13358 mono_bblock_insert_before_ins (bb, load3, load2);
13360 mono_bblock_insert_before_ins (bb, load2, load);
13362 g_assert (var->opcode == OP_REGOFFSET);
13364 ins->opcode = OP_ADD_IMM;
13365 ins->sreg1 = var->inst_basereg;
13366 ins->inst_imm = var->inst_offset;
13369 *need_local_opts = TRUE;
13370 spec = INS_INFO (ins->opcode);
13373 if (ins->opcode < MONO_CEE_LAST) {
13374 mono_print_ins (ins);
13375 g_assert_not_reached ();
13379 * Store opcodes have destbasereg in the dreg, but in reality, it is an
13383 if (MONO_IS_STORE_MEMBASE (ins)) {
13384 tmp_reg = ins->dreg;
13385 ins->dreg = ins->sreg2;
13386 ins->sreg2 = tmp_reg;
13389 spec2 [MONO_INST_DEST] = ' ';
13390 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13391 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13392 spec2 [MONO_INST_SRC3] = ' ';
13394 } else if (MONO_IS_STORE_MEMINDEX (ins))
13395 g_assert_not_reached ();
13400 if (G_UNLIKELY (cfg->verbose_level > 2)) {
13401 printf ("\t %.3s %d", spec, ins->dreg);
13402 num_sregs = mono_inst_get_src_registers (ins, sregs);
13403 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
13404 printf (" %d", sregs [srcindex]);
13411 regtype = spec [MONO_INST_DEST];
13412 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
13415 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
13416 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
13417 MonoInst *store_ins;
13419 MonoInst *def_ins = ins;
13420 int dreg = ins->dreg; /* The original vreg */
13422 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
13424 if (var->opcode == OP_REGVAR) {
13425 ins->dreg = var->dreg;
13426 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
13428 * Instead of emitting a load+store, use a _membase opcode.
13430 g_assert (var->opcode == OP_REGOFFSET);
13431 if (ins->opcode == OP_MOVE) {
13435 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
13436 ins->inst_basereg = var->inst_basereg;
13437 ins->inst_offset = var->inst_offset;
13440 spec = INS_INFO (ins->opcode);
13444 g_assert (var->opcode == OP_REGOFFSET);
13446 prev_dreg = ins->dreg;
13448 /* Invalidate any previous lvreg for this vreg */
13449 vreg_to_lvreg [ins->dreg] = 0;
13453 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
13455 store_opcode = OP_STOREI8_MEMBASE_REG;
13458 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
13460 #if SIZEOF_REGISTER != 8
13461 if (regtype == 'l') {
13462 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
13463 mono_bblock_insert_after_ins (bb, ins, store_ins);
13464 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
13465 mono_bblock_insert_after_ins (bb, ins, store_ins);
13466 def_ins = store_ins;
13471 g_assert (store_opcode != OP_STOREV_MEMBASE);
13473 /* Try to fuse the store into the instruction itself */
13474 /* FIXME: Add more instructions */
13475 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
13476 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
13477 ins->inst_imm = ins->inst_c0;
13478 ins->inst_destbasereg = var->inst_basereg;
13479 ins->inst_offset = var->inst_offset;
13480 spec = INS_INFO (ins->opcode);
13481 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE))) {
13482 ins->opcode = store_opcode;
13483 ins->inst_destbasereg = var->inst_basereg;
13484 ins->inst_offset = var->inst_offset;
13488 tmp_reg = ins->dreg;
13489 ins->dreg = ins->sreg2;
13490 ins->sreg2 = tmp_reg;
13493 spec2 [MONO_INST_DEST] = ' ';
13494 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
13495 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
13496 spec2 [MONO_INST_SRC3] = ' ';
13498 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
13499 // FIXME: The backends expect the base reg to be in inst_basereg
13500 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
13502 ins->inst_basereg = var->inst_basereg;
13503 ins->inst_offset = var->inst_offset;
13504 spec = INS_INFO (ins->opcode);
13506 /* printf ("INS: "); mono_print_ins (ins); */
13507 /* Create a store instruction */
13508 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
13510 /* Insert it after the instruction */
13511 mono_bblock_insert_after_ins (bb, ins, store_ins);
13513 def_ins = store_ins;
13516 * We can't assign ins->dreg to var->dreg here, since the
13517 * sregs could use it. So set a flag, and do it after
13520 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
13521 dest_has_lvreg = TRUE;
13526 if (def_ins && !live_range_start [dreg]) {
13527 live_range_start [dreg] = def_ins;
13528 live_range_start_bb [dreg] = bb;
13531 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
13534 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
13535 tmp->inst_c1 = dreg;
13536 mono_bblock_insert_after_ins (bb, def_ins, tmp);
13543 num_sregs = mono_inst_get_src_registers (ins, sregs);
13544 for (srcindex = 0; srcindex < 3; ++srcindex) {
13545 regtype = spec [MONO_INST_SRC1 + srcindex];
13546 sreg = sregs [srcindex];
13548 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
13549 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
13550 MonoInst *var = get_vreg_to_inst (cfg, sreg);
13551 MonoInst *use_ins = ins;
13552 MonoInst *load_ins;
13553 guint32 load_opcode;
13555 if (var->opcode == OP_REGVAR) {
13556 sregs [srcindex] = var->dreg;
13557 //mono_inst_set_src_registers (ins, sregs);
13558 live_range_end [sreg] = use_ins;
13559 live_range_end_bb [sreg] = bb;
13561 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13564 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13565 /* var->dreg is a hreg */
13566 tmp->inst_c1 = sreg;
13567 mono_bblock_insert_after_ins (bb, ins, tmp);
13573 g_assert (var->opcode == OP_REGOFFSET);
13575 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
13577 g_assert (load_opcode != OP_LOADV_MEMBASE);
13579 if (vreg_to_lvreg [sreg]) {
13580 g_assert (vreg_to_lvreg [sreg] != -1);
13582 /* The variable is already loaded to an lvreg */
13583 if (G_UNLIKELY (cfg->verbose_level > 2))
13584 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
13585 sregs [srcindex] = vreg_to_lvreg [sreg];
13586 //mono_inst_set_src_registers (ins, sregs);
13590 /* Try to fuse the load into the instruction */
13591 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
13592 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
13593 sregs [0] = var->inst_basereg;
13594 //mono_inst_set_src_registers (ins, sregs);
13595 ins->inst_offset = var->inst_offset;
13596 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
13597 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
13598 sregs [1] = var->inst_basereg;
13599 //mono_inst_set_src_registers (ins, sregs);
13600 ins->inst_offset = var->inst_offset;
13602 if (MONO_IS_REAL_MOVE (ins)) {
13603 ins->opcode = OP_NOP;
13606 //printf ("%d ", srcindex); mono_print_ins (ins);
13608 sreg = alloc_dreg (cfg, stacktypes [regtype]);
13610 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
13611 if (var->dreg == prev_dreg) {
13613 * sreg refers to the value loaded by the load
13614 * emitted below, but we need to use ins->dreg
13615 * since it refers to the store emitted earlier.
13619 g_assert (sreg != -1);
13620 vreg_to_lvreg [var->dreg] = sreg;
13621 g_assert (lvregs_len < 1024);
13622 lvregs [lvregs_len ++] = var->dreg;
13626 sregs [srcindex] = sreg;
13627 //mono_inst_set_src_registers (ins, sregs);
13629 #if SIZEOF_REGISTER != 8
13630 if (regtype == 'l') {
13631 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
13632 mono_bblock_insert_before_ins (bb, ins, load_ins);
13633 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
13634 mono_bblock_insert_before_ins (bb, ins, load_ins);
13635 use_ins = load_ins;
13640 #if SIZEOF_REGISTER == 4
13641 g_assert (load_opcode != OP_LOADI8_MEMBASE);
13643 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
13644 mono_bblock_insert_before_ins (bb, ins, load_ins);
13645 use_ins = load_ins;
13649 if (var->dreg < orig_next_vreg) {
13650 live_range_end [var->dreg] = use_ins;
13651 live_range_end_bb [var->dreg] = bb;
13654 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
13657 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
13658 tmp->inst_c1 = var->dreg;
13659 mono_bblock_insert_after_ins (bb, ins, tmp);
13663 mono_inst_set_src_registers (ins, sregs);
13665 if (dest_has_lvreg) {
13666 g_assert (ins->dreg != -1);
13667 vreg_to_lvreg [prev_dreg] = ins->dreg;
13668 g_assert (lvregs_len < 1024);
13669 lvregs [lvregs_len ++] = prev_dreg;
13670 dest_has_lvreg = FALSE;
13674 tmp_reg = ins->dreg;
13675 ins->dreg = ins->sreg2;
13676 ins->sreg2 = tmp_reg;
13679 if (MONO_IS_CALL (ins)) {
13680 /* Clear vreg_to_lvreg array */
13681 for (i = 0; i < lvregs_len; i++)
13682 vreg_to_lvreg [lvregs [i]] = 0;
13684 } else if (ins->opcode == OP_NOP) {
13686 MONO_INST_NULLIFY_SREGS (ins);
13689 if (cfg->verbose_level > 2)
13690 mono_print_ins_index (1, ins);
13693 /* Extend the live range based on the liveness info */
13694 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
13695 for (i = 0; i < cfg->num_varinfo; i ++) {
13696 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
13698 if (vreg_is_volatile (cfg, vi->vreg))
13699 /* The liveness info is incomplete */
13702 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
13703 /* Live from at least the first ins of this bb */
13704 live_range_start [vi->vreg] = bb->code;
13705 live_range_start_bb [vi->vreg] = bb;
13708 if (mono_bitset_test_fast (bb->live_out_set, i)) {
13709 /* Live at least until the last ins of this bb */
13710 live_range_end [vi->vreg] = bb->last_ins;
13711 live_range_end_bb [vi->vreg] = bb;
13717 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
13719 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
13720 * by storing the current native offset into MonoMethodVar->live_range_start/end.
13722 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
13723 for (i = 0; i < cfg->num_varinfo; ++i) {
13724 int vreg = MONO_VARINFO (cfg, i)->vreg;
13727 if (live_range_start [vreg]) {
13728 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
13730 ins->inst_c1 = vreg;
13731 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
13733 if (live_range_end [vreg]) {
13734 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
13736 ins->inst_c1 = vreg;
13737 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
13738 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
13740 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
13746 if (cfg->gsharedvt_locals_var_ins) {
13747 /* Nullify if unused */
13748 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
13749 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
13752 g_free (live_range_start);
13753 g_free (live_range_end);
13754 g_free (live_range_start_bb);
13755 g_free (live_range_end_bb);
13760 * - use 'iadd' instead of 'int_add'
13761 * - handling ovf opcodes: decompose in method_to_ir.
13762 * - unify iregs/fregs
13763 * -> partly done, the missing parts are:
13764 * - a more complete unification would involve unifying the hregs as well, so
13765 * code wouldn't need if (fp) all over the place. but that would mean the hregs
13766 * would no longer map to the machine hregs, so the code generators would need to
13767 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
13768 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
13769 * fp/non-fp branches speeds it up by about 15%.
13770 * - use sext/zext opcodes instead of shifts
13772 * - get rid of TEMPLOADs if possible and use vregs instead
13773 * - clean up usage of OP_P/OP_ opcodes
13774 * - cleanup usage of DUMMY_USE
13775 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
13777 * - set the stack type and allocate a dreg in the EMIT_NEW macros
13778 * - get rid of all the <foo>2 stuff when the new JIT is ready.
13779 * - make sure handle_stack_args () is called before the branch is emitted
13780 * - when the new IR is done, get rid of all unused stuff
13781 * - COMPARE/BEQ as separate instructions or unify them ?
13782 * - keeping them separate allows specialized compare instructions like
13783 * compare_imm, compare_membase
13784 * - most back ends unify fp compare+branch, fp compare+ceq
13785 * - integrate mono_save_args into inline_method
13786 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
13787 * - handle long shift opts on 32 bit platforms somehow: they require
13788 * 3 sregs (2 for arg1 and 1 for arg2)
13789 * - make byref a 'normal' type.
13790 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
13791 * variable if needed.
13792 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
13793 * like inline_method.
13794 * - remove inlining restrictions
13795 * - fix LNEG and enable cfold of INEG
13796 * - generalize x86 optimizations like ldelema as a peephole optimization
13797 * - add store_mem_imm for amd64
13798 * - optimize the loading of the interruption flag in the managed->native wrappers
13799 * - avoid special handling of OP_NOP in passes
13800 * - move code inserting instructions into one function/macro.
13801 * - try a coalescing phase after liveness analysis
13802 * - add float -> vreg conversion + local optimizations on !x86
13803 * - figure out how to handle decomposed branches during optimizations, ie.
13804 * compare+branch, op_jump_table+op_br etc.
13805 * - promote RuntimeXHandles to vregs
13806 * - vtype cleanups:
13807 * - add a NEW_VARLOADA_VREG macro
13808 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
13809 * accessing vtype fields.
13810 * - get rid of I8CONST on 64 bit platforms
13811 * - dealing with the increase in code size due to branches created during opcode
13813 * - use extended basic blocks
13814 * - all parts of the JIT
13815 * - handle_global_vregs () && local regalloc
13816 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
13817 * - sources of increase in code size:
13820 * - isinst and castclass
13821 * - lvregs not allocated to global registers even if used multiple times
13822 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
13824 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
13825 * - add all micro optimizations from the old JIT
13826 * - put tree optimizations into the deadce pass
13827 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
13828 * specific function.
13829 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
13830 * fcompare + branchCC.
13831 * - create a helper function for allocating a stack slot, taking into account
13832 * MONO_CFG_HAS_SPILLUP.
13834 * - merge the ia64 switch changes.
13835 * - optimize mono_regstate2_alloc_int/float.
13836 * - fix the pessimistic handling of variables accessed in exception handler blocks.
13837 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
13838 * parts of the tree could be separated by other instructions, killing the tree
13839 * arguments, or stores killing loads etc. Also, should we fold loads into other
13840 * instructions if the result of the load is used multiple times ?
13841 * - make the REM_IMM optimization in mini-x86.c arch-independent.
13842 * - LAST MERGE: 108395.
13843 * - when returning vtypes in registers, generate IR and append it to the end of the
13844 * last bb instead of doing it in the epilog.
13845 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
13853 - When to decompose opcodes:
13854 - earlier: this makes some optimizations hard to implement, since the low level IR
13855 no longer contains the neccessary information. But it is easier to do.
13856 - later: harder to implement, enables more optimizations.
13857 - Branches inside bblocks:
13858 - created when decomposing complex opcodes.
13859 - branches to another bblock: harmless, but not tracked by the branch
13860 optimizations, so need to branch to a label at the start of the bblock.
13861 - branches to inside the same bblock: very problematic, trips up the local
13862 reg allocator. Can be fixed by spitting the current bblock, but that is a
13863 complex operation, since some local vregs can become global vregs etc.
13864 - Local/global vregs:
13865 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
13866 local register allocator.
13867 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
13868 structure, created by mono_create_var (). Assigned to hregs or the stack by
13869 the global register allocator.
13870 - When to do optimizations like alu->alu_imm:
13871 - earlier -> saves work later on since the IR will be smaller/simpler
13872 - later -> can work on more instructions
13873 - Handling of valuetypes:
13874 - When a vtype is pushed on the stack, a new temporary is created, an
13875 instruction computing its address (LDADDR) is emitted and pushed on
13876 the stack. Need to optimize cases when the vtype is used immediately as in
13877 argument passing, stloc etc.
13878 - Instead of the to_end stuff in the old JIT, simply call the function handling
13879 the values on the stack before emitting the last instruction of the bb.
13882 #endif /* DISABLE_JIT */