2 * method-to-ir.c: Convert CIL to the JIT internal representation
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2002 Ximian, Inc.
9 * Copyright 2003-2010 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
27 #ifdef HAVE_SYS_TIME_H
35 #include <mono/utils/memcheck.h>
37 #include <mono/metadata/abi-details.h>
38 #include <mono/metadata/assembly.h>
39 #include <mono/metadata/attrdefs.h>
40 #include <mono/metadata/loader.h>
41 #include <mono/metadata/tabledefs.h>
42 #include <mono/metadata/class.h>
43 #include <mono/metadata/object.h>
44 #include <mono/metadata/exception.h>
45 #include <mono/metadata/opcodes.h>
46 #include <mono/metadata/mono-endian.h>
47 #include <mono/metadata/tokentype.h>
48 #include <mono/metadata/tabledefs.h>
49 #include <mono/metadata/marshal.h>
50 #include <mono/metadata/debug-helpers.h>
51 #include <mono/metadata/mono-debug.h>
52 #include <mono/metadata/gc-internal.h>
53 #include <mono/metadata/security-manager.h>
54 #include <mono/metadata/threads-types.h>
55 #include <mono/metadata/security-core-clr.h>
56 #include <mono/metadata/monitor.h>
57 #include <mono/metadata/profiler-private.h>
58 #include <mono/metadata/profiler.h>
59 #include <mono/metadata/debug-mono-symfile.h>
60 #include <mono/utils/mono-compiler.h>
61 #include <mono/utils/mono-memory-model.h>
62 #include <mono/metadata/mono-basic-block.h>
68 #include "jit-icalls.h"
70 #include "debugger-agent.h"
71 #include "seq-points.h"
73 #define BRANCH_COST 10
74 #define INLINE_LENGTH_LIMIT 20
76 /* These have 'cfg' as an implicit argument */
77 #define INLINE_FAILURE(msg) do { \
78 if ((cfg->method != cfg->current_method) && (cfg->current_method->wrapper_type == MONO_WRAPPER_NONE)) { \
79 inline_failure (cfg, msg); \
80 goto exception_exit; \
83 #define CHECK_CFG_EXCEPTION do {\
84 if (cfg->exception_type != MONO_EXCEPTION_NONE) \
85 goto exception_exit; \
87 #define METHOD_ACCESS_FAILURE(method, cmethod) do { \
88 method_access_failure ((cfg), (method), (cmethod)); \
89 goto exception_exit; \
91 #define FIELD_ACCESS_FAILURE(method, field) do { \
92 field_access_failure ((cfg), (method), (field)); \
93 goto exception_exit; \
95 #define GENERIC_SHARING_FAILURE(opcode) do { \
97 gshared_failure (cfg, opcode, __FILE__, __LINE__); \
98 goto exception_exit; \
101 #define GSHAREDVT_FAILURE(opcode) do { \
102 if (cfg->gsharedvt) { \
103 gsharedvt_failure (cfg, opcode, __FILE__, __LINE__); \
104 goto exception_exit; \
107 #define OUT_OF_MEMORY_FAILURE do { \
108 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OUT_OF_MEMORY); \
109 goto exception_exit; \
111 #define DISABLE_AOT(cfg) do { \
112 if ((cfg)->verbose_level >= 2) \
113 printf ("AOT disabled: %s:%d\n", __FILE__, __LINE__); \
114 (cfg)->disable_aot = TRUE; \
116 #define LOAD_ERROR do { \
117 break_on_unverified (); \
118 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD); \
119 goto exception_exit; \
122 #define TYPE_LOAD_ERROR(klass) do { \
123 cfg->exception_ptr = klass; \
127 #define CHECK_CFG_ERROR do {\
128 if (!mono_error_ok (&cfg->error)) { \
129 mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR); \
130 goto mono_error_exit; \
134 /* Determine whenever 'ins' represents a load of the 'this' argument */
135 #define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && ((ins)->opcode == OP_MOVE) && ((ins)->sreg1 == cfg->args [0]->dreg))
137 static int ldind_to_load_membase (int opcode);
138 static int stind_to_store_membase (int opcode);
140 int mono_op_to_op_imm (int opcode);
141 int mono_op_to_op_imm_noemul (int opcode);
143 MONO_API MonoInst* mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig, MonoInst **args);
145 static int inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
146 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb);
148 /* helper methods signatures */
149 static MonoMethodSignature *helper_sig_class_init_trampoline;
150 static MonoMethodSignature *helper_sig_domain_get;
151 static MonoMethodSignature *helper_sig_generic_class_init_trampoline;
152 static MonoMethodSignature *helper_sig_generic_class_init_trampoline_llvm;
153 static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline;
154 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline;
155 static MonoMethodSignature *helper_sig_monitor_enter_exit_trampoline_llvm;
156 static MonoMethodSignature *helper_sig_monitor_enter_v4_trampoline_llvm;
159 * Instruction metadata
167 #define MINI_OP(a,b,dest,src1,src2) dest, src1, src2, ' ',
168 #define MINI_OP3(a,b,dest,src1,src2,src3) dest, src1, src2, src3,
174 #if SIZEOF_REGISTER == 8 && SIZEOF_REGISTER == SIZEOF_VOID_P
179 /* keep in sync with the enum in mini.h */
182 #include "mini-ops.h"
187 #define MINI_OP(a,b,dest,src1,src2) ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0)),
188 #define MINI_OP3(a,b,dest,src1,src2,src3) ((src3) != NONE ? 3 : ((src2) != NONE ? 2 : ((src1) != NONE ? 1 : 0))),
190 * This should contain the index of the last sreg + 1. This is not the same
191 * as the number of sregs for opcodes like IA64_CMP_EQ_IMM.
193 const gint8 ins_sreg_counts[] = {
194 #include "mini-ops.h"
199 #define MONO_INIT_VARINFO(vi,id) do { \
200 (vi)->range.first_use.pos.bid = 0xffff; \
206 mono_alloc_ireg (MonoCompile *cfg)
208 return alloc_ireg (cfg);
212 mono_alloc_lreg (MonoCompile *cfg)
214 return alloc_lreg (cfg);
218 mono_alloc_freg (MonoCompile *cfg)
220 return alloc_freg (cfg);
224 mono_alloc_preg (MonoCompile *cfg)
226 return alloc_preg (cfg);
230 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type)
232 return alloc_dreg (cfg, stack_type);
236 * mono_alloc_ireg_ref:
238 * Allocate an IREG, and mark it as holding a GC ref.
241 mono_alloc_ireg_ref (MonoCompile *cfg)
243 return alloc_ireg_ref (cfg);
247 * mono_alloc_ireg_mp:
249 * Allocate an IREG, and mark it as holding a managed pointer.
252 mono_alloc_ireg_mp (MonoCompile *cfg)
254 return alloc_ireg_mp (cfg);
258 * mono_alloc_ireg_copy:
260 * Allocate an IREG with the same GC type as VREG.
263 mono_alloc_ireg_copy (MonoCompile *cfg, guint32 vreg)
265 if (vreg_is_ref (cfg, vreg))
266 return alloc_ireg_ref (cfg);
267 else if (vreg_is_mp (cfg, vreg))
268 return alloc_ireg_mp (cfg);
270 return alloc_ireg (cfg);
274 mono_type_to_regmove (MonoCompile *cfg, MonoType *type)
279 type = mini_get_underlying_type (cfg, type);
281 switch (type->type) {
294 case MONO_TYPE_FNPTR:
296 case MONO_TYPE_CLASS:
297 case MONO_TYPE_STRING:
298 case MONO_TYPE_OBJECT:
299 case MONO_TYPE_SZARRAY:
300 case MONO_TYPE_ARRAY:
304 #if SIZEOF_REGISTER == 8
310 return cfg->r4fp ? OP_RMOVE : OP_FMOVE;
313 case MONO_TYPE_VALUETYPE:
314 if (type->data.klass->enumtype) {
315 type = mono_class_enum_basetype (type->data.klass);
318 if (MONO_CLASS_IS_SIMD (cfg, mono_class_from_mono_type (type)))
321 case MONO_TYPE_TYPEDBYREF:
323 case MONO_TYPE_GENERICINST:
324 type = &type->data.generic_class->container_class->byval_arg;
328 g_assert (cfg->generic_sharing_context);
329 if (mini_type_var_is_vt (cfg, type))
332 return mono_type_to_regmove (cfg, mini_get_underlying_type (cfg, type));
334 g_error ("unknown type 0x%02x in type_to_regstore", type->type);
340 mono_print_bb (MonoBasicBlock *bb, const char *msg)
345 printf ("\n%s %d: [IN: ", msg, bb->block_num);
346 for (i = 0; i < bb->in_count; ++i)
347 printf (" BB%d(%d)", bb->in_bb [i]->block_num, bb->in_bb [i]->dfn);
349 for (i = 0; i < bb->out_count; ++i)
350 printf (" BB%d(%d)", bb->out_bb [i]->block_num, bb->out_bb [i]->dfn);
352 for (tree = bb->code; tree; tree = tree->next)
353 mono_print_ins_index (-1, tree);
357 mono_create_helper_signatures (void)
359 helper_sig_domain_get = mono_create_icall_signature ("ptr");
360 helper_sig_class_init_trampoline = mono_create_icall_signature ("void");
361 helper_sig_generic_class_init_trampoline = mono_create_icall_signature ("void");
362 helper_sig_generic_class_init_trampoline_llvm = mono_create_icall_signature ("void ptr");
363 helper_sig_rgctx_lazy_fetch_trampoline = mono_create_icall_signature ("ptr ptr");
364 helper_sig_monitor_enter_exit_trampoline = mono_create_icall_signature ("void");
365 helper_sig_monitor_enter_exit_trampoline_llvm = mono_create_icall_signature ("void object");
366 helper_sig_monitor_enter_v4_trampoline_llvm = mono_create_icall_signature ("void object ptr");
369 static MONO_NEVER_INLINE void
370 break_on_unverified (void)
372 if (mini_get_debug_options ()->break_on_unverified)
376 static MONO_NEVER_INLINE void
377 method_access_failure (MonoCompile *cfg, MonoMethod *method, MonoMethod *cil_method)
379 char *method_fname = mono_method_full_name (method, TRUE);
380 char *cil_method_fname = mono_method_full_name (cil_method, TRUE);
381 mono_cfg_set_exception (cfg, MONO_EXCEPTION_METHOD_ACCESS);
382 cfg->exception_message = g_strdup_printf ("Method `%s' is inaccessible from method `%s'\n", cil_method_fname, method_fname);
383 g_free (method_fname);
384 g_free (cil_method_fname);
387 static MONO_NEVER_INLINE void
388 field_access_failure (MonoCompile *cfg, MonoMethod *method, MonoClassField *field)
390 char *method_fname = mono_method_full_name (method, TRUE);
391 char *field_fname = mono_field_full_name (field);
392 mono_cfg_set_exception (cfg, MONO_EXCEPTION_FIELD_ACCESS);
393 cfg->exception_message = g_strdup_printf ("Field `%s' is inaccessible from method `%s'\n", field_fname, method_fname);
394 g_free (method_fname);
395 g_free (field_fname);
398 static MONO_NEVER_INLINE void
399 inline_failure (MonoCompile *cfg, const char *msg)
401 if (cfg->verbose_level >= 2)
402 printf ("inline failed: %s\n", msg);
403 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INLINE_FAILED);
406 static MONO_NEVER_INLINE void
407 gshared_failure (MonoCompile *cfg, int opcode, const char *file, int line)
409 if (cfg->verbose_level > 2) \
410 printf ("sharing failed for method %s.%s.%s/%d opcode %s line %d\n", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), __LINE__);
411 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
414 static MONO_NEVER_INLINE void
415 gsharedvt_failure (MonoCompile *cfg, int opcode, const char *file, int line)
417 cfg->exception_message = g_strdup_printf ("gsharedvt failed for method %s.%s.%s/%d opcode %s %s:%d", cfg->current_method->klass->name_space, cfg->current_method->klass->name, cfg->current_method->name, cfg->current_method->signature->param_count, mono_opcode_name ((opcode)), file, line);
418 if (cfg->verbose_level >= 2)
419 printf ("%s\n", cfg->exception_message);
420 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED);
424 * When using gsharedvt, some instatiations might be verifiable, and some might be not. i.e.
425 * foo<T> (int i) { ldarg.0; box T; }
427 #define UNVERIFIED do { \
428 if (cfg->gsharedvt) { \
429 if (cfg->verbose_level > 2) \
430 printf ("gsharedvt method failed to verify, falling back to instantiation.\n"); \
431 mono_cfg_set_exception (cfg, MONO_EXCEPTION_GENERIC_SHARING_FAILED); \
432 goto exception_exit; \
434 break_on_unverified (); \
438 #define GET_BBLOCK(cfg,tblock,ip) do { \
439 (tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
441 if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
442 NEW_BBLOCK (cfg, (tblock)); \
443 (tblock)->cil_code = (ip); \
444 ADD_BBLOCK (cfg, (tblock)); \
448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
449 #define EMIT_NEW_X86_LEA(cfg,dest,sr1,sr2,shift,imm) do { \
450 MONO_INST_NEW (cfg, dest, OP_X86_LEA); \
451 (dest)->dreg = alloc_ireg_mp ((cfg)); \
452 (dest)->sreg1 = (sr1); \
453 (dest)->sreg2 = (sr2); \
454 (dest)->inst_imm = (imm); \
455 (dest)->backend.shift_amount = (shift); \
456 MONO_ADD_INS ((cfg)->cbb, (dest)); \
460 /* Emit conversions so both operands of a binary opcode are of the same type */
462 add_widen_op (MonoCompile *cfg, MonoInst *ins, MonoInst **arg1_ref, MonoInst **arg2_ref)
464 MonoInst *arg1 = *arg1_ref;
465 MonoInst *arg2 = *arg2_ref;
468 ((arg1->type == STACK_R4 && arg2->type == STACK_R8) ||
469 (arg1->type == STACK_R8 && arg2->type == STACK_R4))) {
472 /* Mixing r4/r8 is allowed by the spec */
473 if (arg1->type == STACK_R4) {
474 int dreg = alloc_freg (cfg);
476 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg1->dreg);
477 conv->type = STACK_R8;
481 if (arg2->type == STACK_R4) {
482 int dreg = alloc_freg (cfg);
484 EMIT_NEW_UNALU (cfg, conv, OP_RCONV_TO_R8, dreg, arg2->dreg);
485 conv->type = STACK_R8;
491 #if SIZEOF_REGISTER == 8
492 /* FIXME: Need to add many more cases */
493 if ((arg1)->type == STACK_PTR && (arg2)->type == STACK_I4) {
496 int dr = alloc_preg (cfg);
497 EMIT_NEW_UNALU (cfg, widen, OP_SEXT_I4, dr, (arg2)->dreg);
498 (ins)->sreg2 = widen->dreg;
503 #define ADD_BINOP(op) do { \
504 MONO_INST_NEW (cfg, ins, (op)); \
506 ins->sreg1 = sp [0]->dreg; \
507 ins->sreg2 = sp [1]->dreg; \
508 type_from_op (cfg, ins, sp [0], sp [1]); \
510 /* Have to insert a widening op */ \
511 add_widen_op (cfg, ins, &sp [0], &sp [1]); \
512 ins->dreg = alloc_dreg ((cfg), (ins)->type); \
513 MONO_ADD_INS ((cfg)->cbb, (ins)); \
514 *sp++ = mono_decompose_opcode ((cfg), (ins), &bblock); \
517 #define ADD_UNOP(op) do { \
518 MONO_INST_NEW (cfg, ins, (op)); \
520 ins->sreg1 = sp [0]->dreg; \
521 type_from_op (cfg, ins, sp [0], NULL); \
523 (ins)->dreg = alloc_dreg ((cfg), (ins)->type); \
524 MONO_ADD_INS ((cfg)->cbb, (ins)); \
525 *sp++ = mono_decompose_opcode (cfg, ins, &bblock); \
528 #define ADD_BINCOND(next_block) do { \
531 MONO_INST_NEW(cfg, cmp, OP_COMPARE); \
532 cmp->sreg1 = sp [0]->dreg; \
533 cmp->sreg2 = sp [1]->dreg; \
534 type_from_op (cfg, cmp, sp [0], sp [1]); \
536 add_widen_op (cfg, cmp, &sp [0], &sp [1]); \
537 type_from_op (cfg, ins, sp [0], sp [1]); \
538 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2); \
539 GET_BBLOCK (cfg, tblock, target); \
540 link_bblock (cfg, bblock, tblock); \
541 ins->inst_true_bb = tblock; \
542 if ((next_block)) { \
543 link_bblock (cfg, bblock, (next_block)); \
544 ins->inst_false_bb = (next_block); \
545 start_new_bblock = 1; \
547 GET_BBLOCK (cfg, tblock, ip); \
548 link_bblock (cfg, bblock, tblock); \
549 ins->inst_false_bb = tblock; \
550 start_new_bblock = 2; \
552 if (sp != stack_start) { \
553 handle_stack_args (cfg, stack_start, sp - stack_start); \
554 CHECK_UNVERIFIABLE (cfg); \
556 MONO_ADD_INS (bblock, cmp); \
557 MONO_ADD_INS (bblock, ins); \
561 * link_bblock: Links two basic blocks
563 * links two basic blocks in the control flow graph, the 'from'
564 * argument is the starting block and the 'to' argument is the block
565 * the control flow ends to after 'from'.
568 link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
570 MonoBasicBlock **newa;
574 if (from->cil_code) {
576 printf ("edge from IL%04x to IL_%04x\n", from->cil_code - cfg->cil_code, to->cil_code - cfg->cil_code);
578 printf ("edge from IL%04x to exit\n", from->cil_code - cfg->cil_code);
581 printf ("edge from entry to IL_%04x\n", to->cil_code - cfg->cil_code);
583 printf ("edge from entry to exit\n");
588 for (i = 0; i < from->out_count; ++i) {
589 if (to == from->out_bb [i]) {
595 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (from->out_count + 1));
596 for (i = 0; i < from->out_count; ++i) {
597 newa [i] = from->out_bb [i];
605 for (i = 0; i < to->in_count; ++i) {
606 if (from == to->in_bb [i]) {
612 newa = mono_mempool_alloc (cfg->mempool, sizeof (gpointer) * (to->in_count + 1));
613 for (i = 0; i < to->in_count; ++i) {
614 newa [i] = to->in_bb [i];
623 mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
625 link_bblock (cfg, from, to);
629 * mono_find_block_region:
631 * We mark each basic block with a region ID. We use that to avoid BB
632 * optimizations when blocks are in different regions.
635 * A region token that encodes where this region is, and information
636 * about the clause owner for this block.
638 * The region encodes the try/catch/filter clause that owns this block
639 * as well as the type. -1 is a special value that represents a block
640 * that is in none of try/catch/filter.
643 mono_find_block_region (MonoCompile *cfg, int offset)
645 MonoMethodHeader *header = cfg->header;
646 MonoExceptionClause *clause;
649 for (i = 0; i < header->num_clauses; ++i) {
650 clause = &header->clauses [i];
651 if ((clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) && (offset >= clause->data.filter_offset) &&
652 (offset < (clause->handler_offset)))
653 return ((i + 1) << 8) | MONO_REGION_FILTER | clause->flags;
655 if (MONO_OFFSET_IN_HANDLER (clause, offset)) {
656 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
657 return ((i + 1) << 8) | MONO_REGION_FINALLY | clause->flags;
658 else if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
659 return ((i + 1) << 8) | MONO_REGION_FAULT | clause->flags;
661 return ((i + 1) << 8) | MONO_REGION_CATCH | clause->flags;
664 for (i = 0; i < header->num_clauses; ++i) {
665 clause = &header->clauses [i];
667 if (MONO_OFFSET_IN_CLAUSE (clause, offset))
668 return ((i + 1) << 8) | clause->flags;
675 mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
677 MonoMethodHeader *header = cfg->header;
678 MonoExceptionClause *clause;
682 for (i = 0; i < header->num_clauses; ++i) {
683 clause = &header->clauses [i];
684 if (MONO_OFFSET_IN_CLAUSE (clause, (ip - header->code)) &&
685 (!MONO_OFFSET_IN_CLAUSE (clause, (target - header->code)))) {
686 if (clause->flags == type)
687 res = g_list_append (res, clause);
694 mono_create_spvar_for_region (MonoCompile *cfg, int region)
698 var = g_hash_table_lookup (cfg->spvars, GINT_TO_POINTER (region));
702 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
703 /* prevent it from being register allocated */
704 var->flags |= MONO_INST_VOLATILE;
706 g_hash_table_insert (cfg->spvars, GINT_TO_POINTER (region), var);
710 mono_find_exvar_for_offset (MonoCompile *cfg, int offset)
712 return g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
716 mono_create_exvar_for_offset (MonoCompile *cfg, int offset)
720 var = g_hash_table_lookup (cfg->exvars, GINT_TO_POINTER (offset));
724 var = mono_compile_create_var (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL);
725 /* prevent it from being register allocated */
726 var->flags |= MONO_INST_VOLATILE;
728 g_hash_table_insert (cfg->exvars, GINT_TO_POINTER (offset), var);
734 * Returns the type used in the eval stack when @type is loaded.
735 * FIXME: return a MonoType/MonoClass for the byref and VALUETYPE cases.
738 type_to_eval_stack_type (MonoCompile *cfg, MonoType *type, MonoInst *inst)
742 type = mini_get_underlying_type (cfg, type);
743 inst->klass = klass = mono_class_from_mono_type (type);
745 inst->type = STACK_MP;
750 switch (type->type) {
752 inst->type = STACK_INV;
760 inst->type = STACK_I4;
765 case MONO_TYPE_FNPTR:
766 inst->type = STACK_PTR;
768 case MONO_TYPE_CLASS:
769 case MONO_TYPE_STRING:
770 case MONO_TYPE_OBJECT:
771 case MONO_TYPE_SZARRAY:
772 case MONO_TYPE_ARRAY:
773 inst->type = STACK_OBJ;
777 inst->type = STACK_I8;
780 inst->type = cfg->r4_stack_type;
783 inst->type = STACK_R8;
785 case MONO_TYPE_VALUETYPE:
786 if (type->data.klass->enumtype) {
787 type = mono_class_enum_basetype (type->data.klass);
791 inst->type = STACK_VTYPE;
794 case MONO_TYPE_TYPEDBYREF:
795 inst->klass = mono_defaults.typed_reference_class;
796 inst->type = STACK_VTYPE;
798 case MONO_TYPE_GENERICINST:
799 type = &type->data.generic_class->container_class->byval_arg;
803 g_assert (cfg->generic_sharing_context);
804 if (mini_is_gsharedvt_type (cfg, type)) {
805 g_assert (cfg->gsharedvt);
806 inst->type = STACK_VTYPE;
808 type_to_eval_stack_type (cfg, mini_get_underlying_type (cfg, type), inst);
812 g_error ("unknown type 0x%02x in eval stack type", type->type);
817 * The following tables are used to quickly validate the IL code in type_from_op ().
820 bin_num_table [STACK_MAX] [STACK_MAX] = {
821 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
822 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
823 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
824 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_MP, STACK_INV, STACK_INV},
825 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R8},
826 {STACK_INV, STACK_MP, STACK_INV, STACK_MP, STACK_INV, STACK_PTR, STACK_INV, STACK_INV},
827 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
828 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
829 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4}
834 STACK_INV, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_INV, STACK_INV, STACK_INV, STACK_R4
837 /* reduce the size of this table */
839 bin_int_table [STACK_MAX] [STACK_MAX] = {
840 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
841 {STACK_INV, STACK_I4, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
842 {STACK_INV, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
843 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
844 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
845 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
846 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
847 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
851 bin_comp_table [STACK_MAX] [STACK_MAX] = {
852 /* Inv i L p F & O vt r4 */
854 {0, 1, 0, 1, 0, 0, 0, 0}, /* i, int32 */
855 {0, 0, 1, 0, 0, 0, 0, 0}, /* L, int64 */
856 {0, 1, 0, 1, 0, 2, 4, 0}, /* p, ptr */
857 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* F, R8 */
858 {0, 0, 0, 2, 0, 1, 0, 0}, /* &, managed pointer */
859 {0, 0, 0, 4, 0, 0, 3, 0}, /* O, reference */
860 {0, 0, 0, 0, 0, 0, 0, 0}, /* vt value type */
861 {0, 0, 0, 0, 1, 0, 0, 0, 1}, /* r, r4 */
864 /* reduce the size of this table */
866 shift_table [STACK_MAX] [STACK_MAX] = {
867 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
868 {STACK_INV, STACK_I4, STACK_INV, STACK_I4, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
869 {STACK_INV, STACK_I8, STACK_INV, STACK_I8, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
870 {STACK_INV, STACK_PTR, STACK_INV, STACK_PTR, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
871 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
872 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
873 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV},
874 {STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV, STACK_INV}
878 * Tables to map from the non-specific opcode to the matching
879 * type-specific opcode.
881 /* handles from CEE_ADD to CEE_SHR_UN (CEE_REM_UN for floats) */
883 binops_op_map [STACK_MAX] = {
884 0, OP_IADD-CEE_ADD, OP_LADD-CEE_ADD, OP_PADD-CEE_ADD, OP_FADD-CEE_ADD, OP_PADD-CEE_ADD, 0, 0, OP_RADD-CEE_ADD
887 /* handles from CEE_NEG to CEE_CONV_U8 */
889 unops_op_map [STACK_MAX] = {
890 0, OP_INEG-CEE_NEG, OP_LNEG-CEE_NEG, OP_PNEG-CEE_NEG, OP_FNEG-CEE_NEG, OP_PNEG-CEE_NEG, 0, 0, OP_RNEG-CEE_NEG
893 /* handles from CEE_CONV_U2 to CEE_SUB_OVF_UN */
895 ovfops_op_map [STACK_MAX] = {
896 0, OP_ICONV_TO_U2-CEE_CONV_U2, OP_LCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_FCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, OP_PCONV_TO_U2-CEE_CONV_U2, 0, OP_RCONV_TO_U2-CEE_CONV_U2
899 /* handles from CEE_CONV_OVF_I1_UN to CEE_CONV_OVF_U_UN */
901 ovf2ops_op_map [STACK_MAX] = {
902 0, OP_ICONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_LCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_FCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, OP_PCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN, 0, 0, OP_RCONV_TO_OVF_I1_UN-CEE_CONV_OVF_I1_UN
905 /* handles from CEE_CONV_OVF_I1 to CEE_CONV_OVF_U8 */
907 ovf3ops_op_map [STACK_MAX] = {
908 0, OP_ICONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_LCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_FCONV_TO_OVF_I1-CEE_CONV_OVF_I1, OP_PCONV_TO_OVF_I1-CEE_CONV_OVF_I1, 0, 0, OP_RCONV_TO_OVF_I1-CEE_CONV_OVF_I1
911 /* handles from CEE_BEQ to CEE_BLT_UN */
913 beqops_op_map [STACK_MAX] = {
914 0, OP_IBEQ-CEE_BEQ, OP_LBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_FBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, OP_PBEQ-CEE_BEQ, 0, OP_FBEQ-CEE_BEQ
917 /* handles from CEE_CEQ to CEE_CLT_UN */
919 ceqops_op_map [STACK_MAX] = {
920 0, OP_ICEQ-OP_CEQ, OP_LCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_FCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, OP_PCEQ-OP_CEQ, 0, OP_RCEQ-OP_CEQ
924 * Sets ins->type (the type on the eval stack) according to the
925 * type of the opcode and the arguments to it.
926 * Invalid IL code is marked by setting ins->type to the invalid value STACK_INV.
928 * FIXME: this function sets ins->type unconditionally in some cases, but
929 * it should set it to invalid for some types (a conv.x on an object)
932 type_from_op (MonoCompile *cfg, MonoInst *ins, MonoInst *src1, MonoInst *src2)
934 switch (ins->opcode) {
941 /* FIXME: check unverifiable args for STACK_MP */
942 ins->type = bin_num_table [src1->type] [src2->type];
943 ins->opcode += binops_op_map [ins->type];
950 ins->type = bin_int_table [src1->type] [src2->type];
951 ins->opcode += binops_op_map [ins->type];
956 ins->type = shift_table [src1->type] [src2->type];
957 ins->opcode += binops_op_map [ins->type];
962 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
963 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
964 ins->opcode = OP_LCOMPARE;
965 else if (src1->type == STACK_R4)
966 ins->opcode = OP_RCOMPARE;
967 else if (src1->type == STACK_R8)
968 ins->opcode = OP_FCOMPARE;
970 ins->opcode = OP_ICOMPARE;
972 case OP_ICOMPARE_IMM:
973 ins->type = bin_comp_table [src1->type] [src1->type] ? STACK_I4 : STACK_INV;
974 if ((src1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((src1->type == STACK_PTR) || (src1->type == STACK_OBJ) || (src1->type == STACK_MP))))
975 ins->opcode = OP_LCOMPARE_IMM;
987 ins->opcode += beqops_op_map [src1->type];
990 ins->type = bin_comp_table [src1->type] [src2->type] ? STACK_I4: STACK_INV;
991 ins->opcode += ceqops_op_map [src1->type];
997 ins->type = (bin_comp_table [src1->type] [src2->type] & 1) ? STACK_I4: STACK_INV;
998 ins->opcode += ceqops_op_map [src1->type];
1002 ins->type = neg_table [src1->type];
1003 ins->opcode += unops_op_map [ins->type];
1006 if (src1->type >= STACK_I4 && src1->type <= STACK_PTR)
1007 ins->type = src1->type;
1009 ins->type = STACK_INV;
1010 ins->opcode += unops_op_map [ins->type];
1016 ins->type = STACK_I4;
1017 ins->opcode += unops_op_map [src1->type];
1020 ins->type = STACK_R8;
1021 switch (src1->type) {
1024 ins->opcode = OP_ICONV_TO_R_UN;
1027 ins->opcode = OP_LCONV_TO_R_UN;
1031 case CEE_CONV_OVF_I1:
1032 case CEE_CONV_OVF_U1:
1033 case CEE_CONV_OVF_I2:
1034 case CEE_CONV_OVF_U2:
1035 case CEE_CONV_OVF_I4:
1036 case CEE_CONV_OVF_U4:
1037 ins->type = STACK_I4;
1038 ins->opcode += ovf3ops_op_map [src1->type];
1040 case CEE_CONV_OVF_I_UN:
1041 case CEE_CONV_OVF_U_UN:
1042 ins->type = STACK_PTR;
1043 ins->opcode += ovf2ops_op_map [src1->type];
1045 case CEE_CONV_OVF_I1_UN:
1046 case CEE_CONV_OVF_I2_UN:
1047 case CEE_CONV_OVF_I4_UN:
1048 case CEE_CONV_OVF_U1_UN:
1049 case CEE_CONV_OVF_U2_UN:
1050 case CEE_CONV_OVF_U4_UN:
1051 ins->type = STACK_I4;
1052 ins->opcode += ovf2ops_op_map [src1->type];
1055 ins->type = STACK_PTR;
1056 switch (src1->type) {
1058 ins->opcode = OP_ICONV_TO_U;
1062 #if SIZEOF_VOID_P == 8
1063 ins->opcode = OP_LCONV_TO_U;
1065 ins->opcode = OP_MOVE;
1069 ins->opcode = OP_LCONV_TO_U;
1072 ins->opcode = OP_FCONV_TO_U;
1078 ins->type = STACK_I8;
1079 ins->opcode += unops_op_map [src1->type];
1081 case CEE_CONV_OVF_I8:
1082 case CEE_CONV_OVF_U8:
1083 ins->type = STACK_I8;
1084 ins->opcode += ovf3ops_op_map [src1->type];
1086 case CEE_CONV_OVF_U8_UN:
1087 case CEE_CONV_OVF_I8_UN:
1088 ins->type = STACK_I8;
1089 ins->opcode += ovf2ops_op_map [src1->type];
1092 ins->type = cfg->r4_stack_type;
1093 ins->opcode += unops_op_map [src1->type];
1096 ins->type = STACK_R8;
1097 ins->opcode += unops_op_map [src1->type];
1100 ins->type = STACK_R8;
1104 ins->type = STACK_I4;
1105 ins->opcode += ovfops_op_map [src1->type];
1108 case CEE_CONV_OVF_I:
1109 case CEE_CONV_OVF_U:
1110 ins->type = STACK_PTR;
1111 ins->opcode += ovfops_op_map [src1->type];
1114 case CEE_ADD_OVF_UN:
1116 case CEE_MUL_OVF_UN:
1118 case CEE_SUB_OVF_UN:
1119 ins->type = bin_num_table [src1->type] [src2->type];
1120 ins->opcode += ovfops_op_map [src1->type];
1121 if (ins->type == STACK_R8)
1122 ins->type = STACK_INV;
1124 case OP_LOAD_MEMBASE:
1125 ins->type = STACK_PTR;
1127 case OP_LOADI1_MEMBASE:
1128 case OP_LOADU1_MEMBASE:
1129 case OP_LOADI2_MEMBASE:
1130 case OP_LOADU2_MEMBASE:
1131 case OP_LOADI4_MEMBASE:
1132 case OP_LOADU4_MEMBASE:
1133 ins->type = STACK_PTR;
1135 case OP_LOADI8_MEMBASE:
1136 ins->type = STACK_I8;
1138 case OP_LOADR4_MEMBASE:
1139 ins->type = cfg->r4_stack_type;
1141 case OP_LOADR8_MEMBASE:
1142 ins->type = STACK_R8;
1145 g_error ("opcode 0x%04x not handled in type from op", ins->opcode);
1149 if (ins->type == STACK_MP)
1150 ins->klass = mono_defaults.object_class;
1155 STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I4, STACK_I8, STACK_PTR, STACK_R8, STACK_R8, STACK_OBJ
1161 param_table [STACK_MAX] [STACK_MAX] = {
1166 check_values_to_signature (MonoInst *args, MonoType *this, MonoMethodSignature *sig) {
1170 switch (args->type) {
1180 for (i = 0; i < sig->param_count; ++i) {
1181 switch (args [i].type) {
1185 if (!sig->params [i]->byref)
1189 if (sig->params [i]->byref)
1191 switch (sig->params [i]->type) {
1192 case MONO_TYPE_CLASS:
1193 case MONO_TYPE_STRING:
1194 case MONO_TYPE_OBJECT:
1195 case MONO_TYPE_SZARRAY:
1196 case MONO_TYPE_ARRAY:
1203 if (sig->params [i]->byref)
1205 if (sig->params [i]->type != MONO_TYPE_R4 && sig->params [i]->type != MONO_TYPE_R8)
1214 /*if (!param_table [args [i].type] [sig->params [i]->type])
1222 * When we need a pointer to the current domain many times in a method, we
1223 * call mono_domain_get() once and we store the result in a local variable.
1224 * This function returns the variable that represents the MonoDomain*.
1226 inline static MonoInst *
1227 mono_get_domainvar (MonoCompile *cfg)
1229 if (!cfg->domainvar)
1230 cfg->domainvar = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1231 return cfg->domainvar;
1235 * The got_var contains the address of the Global Offset Table when AOT
1239 mono_get_got_var (MonoCompile *cfg)
1241 #ifdef MONO_ARCH_NEED_GOT_VAR
1242 if (!cfg->compile_aot)
1244 if (!cfg->got_var) {
1245 cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1247 return cfg->got_var;
1254 mono_get_vtable_var (MonoCompile *cfg)
1256 g_assert (cfg->generic_sharing_context);
1258 if (!cfg->rgctx_var) {
1259 cfg->rgctx_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1260 /* force the var to be stack allocated */
1261 cfg->rgctx_var->flags |= MONO_INST_VOLATILE;
1264 return cfg->rgctx_var;
1268 type_from_stack_type (MonoInst *ins) {
1269 switch (ins->type) {
1270 case STACK_I4: return &mono_defaults.int32_class->byval_arg;
1271 case STACK_I8: return &mono_defaults.int64_class->byval_arg;
1272 case STACK_PTR: return &mono_defaults.int_class->byval_arg;
1273 case STACK_R4: return &mono_defaults.single_class->byval_arg;
1274 case STACK_R8: return &mono_defaults.double_class->byval_arg;
1276 return &ins->klass->this_arg;
1277 case STACK_OBJ: return &mono_defaults.object_class->byval_arg;
1278 case STACK_VTYPE: return &ins->klass->byval_arg;
1280 g_error ("stack type %d to monotype not handled\n", ins->type);
1285 static G_GNUC_UNUSED int
1286 type_to_stack_type (MonoCompile *cfg, MonoType *t)
1288 t = mono_type_get_underlying_type (t);
1300 case MONO_TYPE_FNPTR:
1302 case MONO_TYPE_CLASS:
1303 case MONO_TYPE_STRING:
1304 case MONO_TYPE_OBJECT:
1305 case MONO_TYPE_SZARRAY:
1306 case MONO_TYPE_ARRAY:
1312 return cfg->r4_stack_type;
1315 case MONO_TYPE_VALUETYPE:
1316 case MONO_TYPE_TYPEDBYREF:
1318 case MONO_TYPE_GENERICINST:
1319 if (mono_type_generic_inst_is_valuetype (t))
1325 g_assert_not_reached ();
1332 array_access_to_klass (int opcode)
1336 return mono_defaults.byte_class;
1338 return mono_defaults.uint16_class;
1341 return mono_defaults.int_class;
1344 return mono_defaults.sbyte_class;
1347 return mono_defaults.int16_class;
1350 return mono_defaults.int32_class;
1352 return mono_defaults.uint32_class;
1355 return mono_defaults.int64_class;
1358 return mono_defaults.single_class;
1361 return mono_defaults.double_class;
1362 case CEE_LDELEM_REF:
1363 case CEE_STELEM_REF:
1364 return mono_defaults.object_class;
1366 g_assert_not_reached ();
1372 * We try to share variables when possible
1375 mono_compile_get_interface_var (MonoCompile *cfg, int slot, MonoInst *ins)
1380 /* inlining can result in deeper stacks */
1381 if (slot >= cfg->header->max_stack)
1382 return mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1384 pos = ins->type - 1 + slot * STACK_MAX;
1386 switch (ins->type) {
1393 if ((vnum = cfg->intvars [pos]))
1394 return cfg->varinfo [vnum];
1395 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1396 cfg->intvars [pos] = res->inst_c0;
1399 res = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
1405 mono_save_token_info (MonoCompile *cfg, MonoImage *image, guint32 token, gpointer key)
1408 * Don't use this if a generic_context is set, since that means AOT can't
1409 * look up the method using just the image+token.
1410 * table == 0 means this is a reference made from a wrapper.
1412 if (cfg->compile_aot && !cfg->generic_context && (mono_metadata_token_table (token) > 0)) {
1413 MonoJumpInfoToken *jump_info_token = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoToken));
1414 jump_info_token->image = image;
1415 jump_info_token->token = token;
1416 g_hash_table_insert (cfg->token_info_hash, key, jump_info_token);
1421 * This function is called to handle items that are left on the evaluation stack
1422 * at basic block boundaries. What happens is that we save the values to local variables
1423 * and we reload them later when first entering the target basic block (with the
1424 * handle_loaded_temps () function).
1425 * A single joint point will use the same variables (stored in the array bb->out_stack or
1426 * bb->in_stack, if the basic block is before or after the joint point).
1428 * This function needs to be called _before_ emitting the last instruction of
1429 * the bb (i.e. before emitting a branch).
1430 * If the stack merge fails at a join point, cfg->unverifiable is set.
1433 handle_stack_args (MonoCompile *cfg, MonoInst **sp, int count)
1436 MonoBasicBlock *bb = cfg->cbb;
1437 MonoBasicBlock *outb;
1438 MonoInst *inst, **locals;
1443 if (cfg->verbose_level > 3)
1444 printf ("%d item(s) on exit from B%d\n", count, bb->block_num);
1445 if (!bb->out_scount) {
1446 bb->out_scount = count;
1447 //printf ("bblock %d has out:", bb->block_num);
1449 for (i = 0; i < bb->out_count; ++i) {
1450 outb = bb->out_bb [i];
1451 /* exception handlers are linked, but they should not be considered for stack args */
1452 if (outb->flags & BB_EXCEPTION_HANDLER)
1454 //printf (" %d", outb->block_num);
1455 if (outb->in_stack) {
1457 bb->out_stack = outb->in_stack;
1463 bb->out_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * count);
1464 for (i = 0; i < count; ++i) {
1466 * try to reuse temps already allocated for this purpouse, if they occupy the same
1467 * stack slot and if they are of the same type.
1468 * This won't cause conflicts since if 'local' is used to
1469 * store one of the values in the in_stack of a bblock, then
1470 * the same variable will be used for the same outgoing stack
1472 * This doesn't work when inlining methods, since the bblocks
1473 * in the inlined methods do not inherit their in_stack from
1474 * the bblock they are inlined to. See bug #58863 for an
1477 if (cfg->inlined_method)
1478 bb->out_stack [i] = mono_compile_create_var (cfg, type_from_stack_type (sp [i]), OP_LOCAL);
1480 bb->out_stack [i] = mono_compile_get_interface_var (cfg, i, sp [i]);
1485 for (i = 0; i < bb->out_count; ++i) {
1486 outb = bb->out_bb [i];
1487 /* exception handlers are linked, but they should not be considered for stack args */
1488 if (outb->flags & BB_EXCEPTION_HANDLER)
1490 if (outb->in_scount) {
1491 if (outb->in_scount != bb->out_scount) {
1492 cfg->unverifiable = TRUE;
1495 continue; /* check they are the same locals */
1497 outb->in_scount = count;
1498 outb->in_stack = bb->out_stack;
1501 locals = bb->out_stack;
1503 for (i = 0; i < count; ++i) {
1504 EMIT_NEW_TEMPSTORE (cfg, inst, locals [i]->inst_c0, sp [i]);
1505 inst->cil_code = sp [i]->cil_code;
1506 sp [i] = locals [i];
1507 if (cfg->verbose_level > 3)
1508 printf ("storing %d to temp %d\n", i, (int)locals [i]->inst_c0);
1512 * It is possible that the out bblocks already have in_stack assigned, and
1513 * the in_stacks differ. In this case, we will store to all the different
1520 /* Find a bblock which has a different in_stack */
1522 while (bindex < bb->out_count) {
1523 outb = bb->out_bb [bindex];
1524 /* exception handlers are linked, but they should not be considered for stack args */
1525 if (outb->flags & BB_EXCEPTION_HANDLER) {
1529 if (outb->in_stack != locals) {
1530 for (i = 0; i < count; ++i) {
1531 EMIT_NEW_TEMPSTORE (cfg, inst, outb->in_stack [i]->inst_c0, sp [i]);
1532 inst->cil_code = sp [i]->cil_code;
1533 sp [i] = locals [i];
1534 if (cfg->verbose_level > 3)
1535 printf ("storing %d to temp %d\n", i, (int)outb->in_stack [i]->inst_c0);
1537 locals = outb->in_stack;
1546 /* Emit code which loads interface_offsets [klass->interface_id]
1547 * The array is stored in memory before vtable.
1550 mini_emit_load_intf_reg_vtable (MonoCompile *cfg, int intf_reg, int vtable_reg, MonoClass *klass)
1552 if (cfg->compile_aot) {
1553 int ioffset_reg = alloc_preg (cfg);
1554 int iid_reg = alloc_preg (cfg);
1556 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_ADJUSTED_IID);
1557 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ioffset_reg, iid_reg, vtable_reg);
1558 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, ioffset_reg, 0);
1561 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, intf_reg, vtable_reg, -((klass->interface_id + 1) * SIZEOF_VOID_P));
1566 mini_emit_interface_bitmap_check (MonoCompile *cfg, int intf_bit_reg, int base_reg, int offset, MonoClass *klass)
1568 int ibitmap_reg = alloc_preg (cfg);
1569 #ifdef COMPRESSED_INTERFACE_BITMAP
1571 MonoInst *res, *ins;
1572 NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, ibitmap_reg, base_reg, offset);
1573 MONO_ADD_INS (cfg->cbb, ins);
1575 if (cfg->compile_aot)
1576 EMIT_NEW_AOTCONST (cfg, args [1], MONO_PATCH_INFO_IID, klass);
1578 EMIT_NEW_ICONST (cfg, args [1], klass->interface_id);
1579 res = mono_emit_jit_icall (cfg, mono_class_interface_match, args);
1580 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, intf_bit_reg, res->dreg);
1582 int ibitmap_byte_reg = alloc_preg (cfg);
1584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, ibitmap_reg, base_reg, offset);
1586 if (cfg->compile_aot) {
1587 int iid_reg = alloc_preg (cfg);
1588 int shifted_iid_reg = alloc_preg (cfg);
1589 int ibitmap_byte_address_reg = alloc_preg (cfg);
1590 int masked_iid_reg = alloc_preg (cfg);
1591 int iid_one_bit_reg = alloc_preg (cfg);
1592 int iid_bit_reg = alloc_preg (cfg);
1593 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_IMM, shifted_iid_reg, iid_reg, 3);
1595 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, ibitmap_byte_address_reg, ibitmap_reg, shifted_iid_reg);
1596 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, ibitmap_byte_reg, ibitmap_byte_address_reg, 0);
1597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, masked_iid_reg, iid_reg, 7);
1598 MONO_EMIT_NEW_ICONST (cfg, iid_one_bit_reg, 1);
1599 MONO_EMIT_NEW_BIALU (cfg, OP_ISHL, iid_bit_reg, iid_one_bit_reg, masked_iid_reg);
1600 MONO_EMIT_NEW_BIALU (cfg, OP_IAND, intf_bit_reg, ibitmap_byte_reg, iid_bit_reg);
1602 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, ibitmap_byte_reg, ibitmap_reg, klass->interface_id >> 3);
1603 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_AND_IMM, intf_bit_reg, ibitmap_byte_reg, 1 << (klass->interface_id & 7));
1609 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoClass
1610 * stored in "klass_reg" implements the interface "klass".
1613 mini_emit_load_intf_bit_reg_class (MonoCompile *cfg, int intf_bit_reg, int klass_reg, MonoClass *klass)
1615 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, interface_bitmap), klass);
1619 * Emit code which loads into "intf_bit_reg" a nonzero value if the MonoVTable
1620 * stored in "vtable_reg" implements the interface "klass".
1623 mini_emit_load_intf_bit_reg_vtable (MonoCompile *cfg, int intf_bit_reg, int vtable_reg, MonoClass *klass)
1625 mini_emit_interface_bitmap_check (cfg, intf_bit_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, interface_bitmap), klass);
1629 * Emit code which checks whenever the interface id of @klass is smaller than
1630 * than the value given by max_iid_reg.
1633 mini_emit_max_iid_check (MonoCompile *cfg, int max_iid_reg, MonoClass *klass,
1634 MonoBasicBlock *false_target)
1636 if (cfg->compile_aot) {
1637 int iid_reg = alloc_preg (cfg);
1638 MONO_EMIT_NEW_AOTCONST (cfg, iid_reg, klass, MONO_PATCH_INFO_IID);
1639 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, max_iid_reg, iid_reg);
1642 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, max_iid_reg, klass->interface_id);
1644 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1646 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1649 /* Same as above, but obtains max_iid from a vtable */
1651 mini_emit_max_iid_check_vtable (MonoCompile *cfg, int vtable_reg, MonoClass *klass,
1652 MonoBasicBlock *false_target)
1654 int max_iid_reg = alloc_preg (cfg);
1656 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, max_interface_id));
1657 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1660 /* Same as above, but obtains max_iid from a klass */
1662 mini_emit_max_iid_check_class (MonoCompile *cfg, int klass_reg, MonoClass *klass,
1663 MonoBasicBlock *false_target)
1665 int max_iid_reg = alloc_preg (cfg);
1667 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, max_iid_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, max_interface_id));
1668 mini_emit_max_iid_check (cfg, max_iid_reg, klass, false_target);
1672 mini_emit_isninst_cast_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_ins, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1674 int idepth_reg = alloc_preg (cfg);
1675 int stypes_reg = alloc_preg (cfg);
1676 int stype = alloc_preg (cfg);
1678 mono_class_setup_supertypes (klass);
1680 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1681 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1682 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1683 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBLT_UN, false_target);
1685 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1686 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1688 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, klass_ins->dreg);
1689 } else if (cfg->compile_aot) {
1690 int const_reg = alloc_preg (cfg);
1691 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1692 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, stype, const_reg);
1694 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, stype, klass);
1696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, true_target);
1700 mini_emit_isninst_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1702 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, NULL, false_target, true_target);
1706 mini_emit_iface_cast (MonoCompile *cfg, int vtable_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1708 int intf_reg = alloc_preg (cfg);
1710 mini_emit_max_iid_check_vtable (cfg, vtable_reg, klass, false_target);
1711 mini_emit_load_intf_bit_reg_vtable (cfg, intf_reg, vtable_reg, klass);
1712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_reg, 0);
1714 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1716 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1720 * Variant of the above that takes a register to the class, not the vtable.
1723 mini_emit_iface_class_cast (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoBasicBlock *false_target, MonoBasicBlock *true_target)
1725 int intf_bit_reg = alloc_preg (cfg);
1727 mini_emit_max_iid_check_class (cfg, klass_reg, klass, false_target);
1728 mini_emit_load_intf_bit_reg_class (cfg, intf_bit_reg, klass_reg, klass);
1729 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, intf_bit_reg, 0);
1731 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, true_target);
1733 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
1737 mini_emit_class_check_inst (MonoCompile *cfg, int klass_reg, MonoClass *klass, MonoInst *klass_inst)
1740 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_inst->dreg);
1741 } else if (cfg->compile_aot) {
1742 int const_reg = alloc_preg (cfg);
1743 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1744 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1746 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1748 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1752 mini_emit_class_check (MonoCompile *cfg, int klass_reg, MonoClass *klass)
1754 mini_emit_class_check_inst (cfg, klass_reg, klass, NULL);
1758 mini_emit_class_check_branch (MonoCompile *cfg, int klass_reg, MonoClass *klass, int branch_op, MonoBasicBlock *target)
1760 if (cfg->compile_aot) {
1761 int const_reg = alloc_preg (cfg);
1762 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
1763 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, const_reg);
1765 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
1767 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, branch_op, target);
1771 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null);
1774 mini_emit_castclass_inst (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoInst *klass_inst, MonoBasicBlock *object_is_null)
1777 int rank_reg = alloc_preg (cfg);
1778 int eclass_reg = alloc_preg (cfg);
1780 g_assert (!klass_inst);
1781 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, rank));
1782 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
1783 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1784 // MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
1785 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
1786 if (klass->cast_class == mono_defaults.object_class) {
1787 int parent_reg = alloc_preg (cfg);
1788 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
1789 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, object_is_null);
1790 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1791 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
1792 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, object_is_null);
1793 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1794 } else if (klass->cast_class == mono_defaults.enum_class) {
1795 mini_emit_class_check (cfg, eclass_reg, mono_defaults.enum_class);
1796 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
1797 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, NULL, NULL);
1799 // Pass -1 as obj_reg to skip the check below for arrays of arrays
1800 mini_emit_castclass (cfg, -1, eclass_reg, klass->cast_class, object_is_null);
1803 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY) && (obj_reg != -1)) {
1804 /* Check that the object is a vector too */
1805 int bounds_reg = alloc_preg (cfg);
1806 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
1807 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
1808 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
1811 int idepth_reg = alloc_preg (cfg);
1812 int stypes_reg = alloc_preg (cfg);
1813 int stype = alloc_preg (cfg);
1815 mono_class_setup_supertypes (klass);
1817 if (klass->idepth > MONO_DEFAULT_SUPERTABLE_SIZE) {
1818 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, idepth_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, idepth));
1819 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, idepth_reg, klass->idepth);
1820 MONO_EMIT_NEW_COND_EXC (cfg, LT_UN, "InvalidCastException");
1822 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stypes_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, supertypes));
1823 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, stype, stypes_reg, ((klass->idepth - 1) * SIZEOF_VOID_P));
1824 mini_emit_class_check_inst (cfg, stype, klass, klass_inst);
1829 mini_emit_castclass (MonoCompile *cfg, int obj_reg, int klass_reg, MonoClass *klass, MonoBasicBlock *object_is_null)
1831 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, NULL, object_is_null);
1835 mini_emit_memset (MonoCompile *cfg, int destreg, int offset, int size, int val, int align)
1839 g_assert (val == 0);
1844 if ((size <= SIZEOF_REGISTER) && (size <= align)) {
1847 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, destreg, offset, val);
1850 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI2_MEMBASE_IMM, destreg, offset, val);
1853 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI4_MEMBASE_IMM, destreg, offset, val);
1855 #if SIZEOF_REGISTER == 8
1857 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI8_MEMBASE_IMM, destreg, offset, val);
1863 val_reg = alloc_preg (cfg);
1865 if (SIZEOF_REGISTER == 8)
1866 MONO_EMIT_NEW_I8CONST (cfg, val_reg, val);
1868 MONO_EMIT_NEW_ICONST (cfg, val_reg, val);
1871 /* This could be optimized further if neccesary */
1873 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1880 #if !NO_UNALIGNED_ACCESS
1881 if (SIZEOF_REGISTER == 8) {
1883 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1888 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, offset, val_reg);
1896 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, val_reg);
1901 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, val_reg);
1906 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, val_reg);
1913 mini_emit_memcpy (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align)
1920 /*FIXME arbitrary hack to avoid unbound code expansion.*/
1921 g_assert (size < 10000);
1924 /* This could be optimized further if neccesary */
1926 cur_reg = alloc_preg (cfg);
1927 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1928 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1935 #if !NO_UNALIGNED_ACCESS
1936 if (SIZEOF_REGISTER == 8) {
1938 cur_reg = alloc_preg (cfg);
1939 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI8_MEMBASE, cur_reg, srcreg, soffset);
1940 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, destreg, doffset, cur_reg);
1949 cur_reg = alloc_preg (cfg);
1950 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, cur_reg, srcreg, soffset);
1951 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, doffset, cur_reg);
1957 cur_reg = alloc_preg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, cur_reg, srcreg, soffset);
1959 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, doffset, cur_reg);
1965 cur_reg = alloc_preg (cfg);
1966 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, cur_reg, srcreg, soffset);
1967 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, doffset, cur_reg);
1975 emit_tls_set (MonoCompile *cfg, int sreg1, int tls_key)
1979 if (cfg->compile_aot) {
1980 EMIT_NEW_TLS_OFFSETCONST (cfg, c, tls_key);
1981 MONO_INST_NEW (cfg, ins, OP_TLS_SET_REG);
1983 ins->sreg2 = c->dreg;
1984 MONO_ADD_INS (cfg->cbb, ins);
1986 MONO_INST_NEW (cfg, ins, OP_TLS_SET);
1988 ins->inst_offset = mini_get_tls_offset (tls_key);
1989 MONO_ADD_INS (cfg->cbb, ins);
1996 * Emit IR to push the current LMF onto the LMF stack.
1999 emit_push_lmf (MonoCompile *cfg)
2002 * Emit IR to push the LMF:
2003 * lmf_addr = <lmf_addr from tls>
2004 * lmf->lmf_addr = lmf_addr
2005 * lmf->prev_lmf = *lmf_addr
2008 int lmf_reg, prev_lmf_reg;
2009 MonoInst *ins, *lmf_ins;
2014 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2015 /* Load current lmf */
2016 lmf_ins = mono_get_lmf_intrinsic (cfg);
2018 MONO_ADD_INS (cfg->cbb, lmf_ins);
2019 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2020 lmf_reg = ins->dreg;
2021 /* Save previous_lmf */
2022 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), lmf_ins->dreg);
2024 emit_tls_set (cfg, lmf_reg, TLS_KEY_LMF);
2027 * Store lmf_addr in a variable, so it can be allocated to a global register.
2029 if (!cfg->lmf_addr_var)
2030 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2033 ins = mono_get_jit_tls_intrinsic (cfg);
2035 int jit_tls_dreg = ins->dreg;
2037 MONO_ADD_INS (cfg->cbb, ins);
2038 lmf_reg = alloc_preg (cfg);
2039 EMIT_NEW_BIALU_IMM (cfg, lmf_ins, OP_PADD_IMM, lmf_reg, jit_tls_dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2041 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2044 lmf_ins = mono_get_lmf_addr_intrinsic (cfg);
2046 MONO_ADD_INS (cfg->cbb, lmf_ins);
2049 MonoInst *args [16], *jit_tls_ins, *ins;
2051 /* Inline mono_get_lmf_addr () */
2052 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
2054 /* Load mono_jit_tls_id */
2055 EMIT_NEW_AOTCONST (cfg, args [0], MONO_PATCH_INFO_JIT_TLS_ID, NULL);
2056 /* call pthread_getspecific () */
2057 jit_tls_ins = mono_emit_jit_icall (cfg, pthread_getspecific, args);
2058 /* lmf_addr = &jit_tls->lmf */
2059 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, cfg->lmf_addr_var->dreg, jit_tls_ins->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, lmf));
2062 lmf_ins = mono_emit_jit_icall (cfg, mono_get_lmf_addr, NULL);
2066 lmf_ins->dreg = cfg->lmf_addr_var->dreg;
2068 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2069 lmf_reg = ins->dreg;
2071 prev_lmf_reg = alloc_preg (cfg);
2072 /* Save previous_lmf */
2073 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, cfg->lmf_addr_var->dreg, 0);
2074 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), prev_lmf_reg);
2076 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, cfg->lmf_addr_var->dreg, 0, lmf_reg);
2083 * Emit IR to pop the current LMF from the LMF stack.
2086 emit_pop_lmf (MonoCompile *cfg)
2088 int lmf_reg, lmf_addr_reg, prev_lmf_reg;
2094 EMIT_NEW_VARLOADA (cfg, ins, cfg->lmf_var, NULL);
2095 lmf_reg = ins->dreg;
2097 if (cfg->lmf_ir_mono_lmf && mini_tls_get_supported (cfg, TLS_KEY_LMF)) {
2098 /* Load previous_lmf */
2099 prev_lmf_reg = alloc_preg (cfg);
2100 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2102 emit_tls_set (cfg, prev_lmf_reg, TLS_KEY_LMF);
2105 * Emit IR to pop the LMF:
2106 * *(lmf->lmf_addr) = lmf->prev_lmf
2108 /* This could be called before emit_push_lmf () */
2109 if (!cfg->lmf_addr_var)
2110 cfg->lmf_addr_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2111 lmf_addr_reg = cfg->lmf_addr_var->dreg;
2113 prev_lmf_reg = alloc_preg (cfg);
2114 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, prev_lmf_reg, lmf_reg, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
2115 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, lmf_addr_reg, 0, prev_lmf_reg);
2120 emit_instrumentation_call (MonoCompile *cfg, void *func)
2122 MonoInst *iargs [1];
2125 * Avoid instrumenting inlined methods since it can
2126 * distort profiling results.
2128 if (cfg->method != cfg->current_method)
2131 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
2132 EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
2133 mono_emit_jit_icall (cfg, func, iargs);
2138 ret_type_to_call_opcode (MonoCompile *cfg, MonoType *type, int calli, int virt, MonoGenericSharingContext *gsctx)
2141 type = mini_get_underlying_type (cfg, type);
2142 switch (type->type) {
2143 case MONO_TYPE_VOID:
2144 return calli? OP_VOIDCALL_REG: virt? OP_VOIDCALL_MEMBASE: OP_VOIDCALL;
2151 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2155 case MONO_TYPE_FNPTR:
2156 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2157 case MONO_TYPE_CLASS:
2158 case MONO_TYPE_STRING:
2159 case MONO_TYPE_OBJECT:
2160 case MONO_TYPE_SZARRAY:
2161 case MONO_TYPE_ARRAY:
2162 return calli? OP_CALL_REG: virt? OP_CALL_MEMBASE: OP_CALL;
2165 return calli? OP_LCALL_REG: virt? OP_LCALL_MEMBASE: OP_LCALL;
2168 return calli? OP_RCALL_REG: virt? OP_RCALL_MEMBASE: OP_RCALL;
2170 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2172 return calli? OP_FCALL_REG: virt? OP_FCALL_MEMBASE: OP_FCALL;
2173 case MONO_TYPE_VALUETYPE:
2174 if (type->data.klass->enumtype) {
2175 type = mono_class_enum_basetype (type->data.klass);
2178 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2179 case MONO_TYPE_TYPEDBYREF:
2180 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2181 case MONO_TYPE_GENERICINST:
2182 type = &type->data.generic_class->container_class->byval_arg;
2185 case MONO_TYPE_MVAR:
2187 return calli? OP_VCALL_REG: virt? OP_VCALL_MEMBASE: OP_VCALL;
2189 g_error ("unknown type 0x%02x in ret_type_to_call_opcode", type->type);
2195 * target_type_is_incompatible:
2196 * @cfg: MonoCompile context
2198 * Check that the item @arg on the evaluation stack can be stored
2199 * in the target type (can be a local, or field, etc).
2200 * The cfg arg can be used to check if we need verification or just
2203 * Returns: non-0 value if arg can't be stored on a target.
2206 target_type_is_incompatible (MonoCompile *cfg, MonoType *target, MonoInst *arg)
2208 MonoType *simple_type;
2211 if (target->byref) {
2212 /* FIXME: check that the pointed to types match */
2213 if (arg->type == STACK_MP)
2214 return arg->klass != mono_class_from_mono_type (target);
2215 if (arg->type == STACK_PTR)
2220 simple_type = mini_get_underlying_type (cfg, target);
2221 switch (simple_type->type) {
2222 case MONO_TYPE_VOID:
2230 if (arg->type != STACK_I4 && arg->type != STACK_PTR)
2234 /* STACK_MP is needed when setting pinned locals */
2235 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2240 case MONO_TYPE_FNPTR:
2242 * Some opcodes like ldloca returns 'transient pointers' which can be stored in
2243 * in native int. (#688008).
2245 if (arg->type != STACK_I4 && arg->type != STACK_PTR && arg->type != STACK_MP)
2248 case MONO_TYPE_CLASS:
2249 case MONO_TYPE_STRING:
2250 case MONO_TYPE_OBJECT:
2251 case MONO_TYPE_SZARRAY:
2252 case MONO_TYPE_ARRAY:
2253 if (arg->type != STACK_OBJ)
2255 /* FIXME: check type compatibility */
2259 if (arg->type != STACK_I8)
2263 if (arg->type != cfg->r4_stack_type)
2267 if (arg->type != STACK_R8)
2270 case MONO_TYPE_VALUETYPE:
2271 if (arg->type != STACK_VTYPE)
2273 klass = mono_class_from_mono_type (simple_type);
2274 if (klass != arg->klass)
2277 case MONO_TYPE_TYPEDBYREF:
2278 if (arg->type != STACK_VTYPE)
2280 klass = mono_class_from_mono_type (simple_type);
2281 if (klass != arg->klass)
2284 case MONO_TYPE_GENERICINST:
2285 if (mono_type_generic_inst_is_valuetype (simple_type)) {
2286 if (arg->type != STACK_VTYPE)
2288 klass = mono_class_from_mono_type (simple_type);
2289 if (klass != arg->klass)
2293 if (arg->type != STACK_OBJ)
2295 /* FIXME: check type compatibility */
2299 case MONO_TYPE_MVAR:
2300 g_assert (cfg->generic_sharing_context);
2301 if (mini_type_var_is_vt (cfg, simple_type)) {
2302 if (arg->type != STACK_VTYPE)
2305 if (arg->type != STACK_OBJ)
2310 g_error ("unknown type 0x%02x in target_type_is_incompatible", simple_type->type);
2316 * Prepare arguments for passing to a function call.
2317 * Return a non-zero value if the arguments can't be passed to the given
2319 * The type checks are not yet complete and some conversions may need
2320 * casts on 32 or 64 bit architectures.
2322 * FIXME: implement this using target_type_is_incompatible ()
2325 check_call_signature (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args)
2327 MonoType *simple_type;
2331 if (args [0]->type != STACK_OBJ && args [0]->type != STACK_MP && args [0]->type != STACK_PTR)
2335 for (i = 0; i < sig->param_count; ++i) {
2336 if (sig->params [i]->byref) {
2337 if (args [i]->type != STACK_MP && args [i]->type != STACK_PTR)
2341 simple_type = mini_get_underlying_type (cfg, sig->params [i]);
2343 switch (simple_type->type) {
2344 case MONO_TYPE_VOID:
2353 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR)
2359 case MONO_TYPE_FNPTR:
2360 if (args [i]->type != STACK_I4 && args [i]->type != STACK_PTR && args [i]->type != STACK_MP && args [i]->type != STACK_OBJ)
2363 case MONO_TYPE_CLASS:
2364 case MONO_TYPE_STRING:
2365 case MONO_TYPE_OBJECT:
2366 case MONO_TYPE_SZARRAY:
2367 case MONO_TYPE_ARRAY:
2368 if (args [i]->type != STACK_OBJ)
2373 if (args [i]->type != STACK_I8)
2377 if (args [i]->type != cfg->r4_stack_type)
2381 if (args [i]->type != STACK_R8)
2384 case MONO_TYPE_VALUETYPE:
2385 if (simple_type->data.klass->enumtype) {
2386 simple_type = mono_class_enum_basetype (simple_type->data.klass);
2389 if (args [i]->type != STACK_VTYPE)
2392 case MONO_TYPE_TYPEDBYREF:
2393 if (args [i]->type != STACK_VTYPE)
2396 case MONO_TYPE_GENERICINST:
2397 simple_type = &simple_type->data.generic_class->container_class->byval_arg;
2400 case MONO_TYPE_MVAR:
2402 if (args [i]->type != STACK_VTYPE)
2406 g_error ("unknown type 0x%02x in check_call_signature",
2414 callvirt_to_call (int opcode)
2417 case OP_CALL_MEMBASE:
2419 case OP_VOIDCALL_MEMBASE:
2421 case OP_FCALL_MEMBASE:
2423 case OP_RCALL_MEMBASE:
2425 case OP_VCALL_MEMBASE:
2427 case OP_LCALL_MEMBASE:
2430 g_assert_not_reached ();
2436 /* Either METHOD or IMT_ARG needs to be set */
2438 emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoMethod *method, MonoInst *imt_arg)
2442 if (COMPILE_LLVM (cfg)) {
2443 method_reg = alloc_preg (cfg);
2446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2447 } else if (cfg->compile_aot) {
2448 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2451 MONO_INST_NEW (cfg, ins, OP_PCONST);
2452 ins->inst_p0 = method;
2453 ins->dreg = method_reg;
2454 MONO_ADD_INS (cfg->cbb, ins);
2458 call->imt_arg_reg = method_reg;
2460 #ifdef MONO_ARCH_IMT_REG
2461 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2463 /* Need this to keep the IMT arg alive */
2464 mono_call_inst_add_outarg_reg (cfg, call, method_reg, 0, FALSE);
2469 #ifdef MONO_ARCH_IMT_REG
2470 method_reg = alloc_preg (cfg);
2473 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
2474 } else if (cfg->compile_aot) {
2475 MONO_EMIT_NEW_AOTCONST (cfg, method_reg, method, MONO_PATCH_INFO_METHODCONST);
2478 MONO_INST_NEW (cfg, ins, OP_PCONST);
2479 ins->inst_p0 = method;
2480 ins->dreg = method_reg;
2481 MONO_ADD_INS (cfg->cbb, ins);
2484 mono_call_inst_add_outarg_reg (cfg, call, method_reg, MONO_ARCH_IMT_REG, FALSE);
2486 mono_arch_emit_imt_argument (cfg, call, imt_arg);
2490 static MonoJumpInfo *
2491 mono_patch_info_new (MonoMemPool *mp, int ip, MonoJumpInfoType type, gconstpointer target)
2493 MonoJumpInfo *ji = mono_mempool_alloc (mp, sizeof (MonoJumpInfo));
2497 ji->data.target = target;
2503 mini_class_check_context_used (MonoCompile *cfg, MonoClass *klass)
2505 if (cfg->generic_sharing_context)
2506 return mono_class_check_context_used (klass);
2512 mini_method_check_context_used (MonoCompile *cfg, MonoMethod *method)
2514 if (cfg->generic_sharing_context)
2515 return mono_method_check_context_used (method);
2521 * check_method_sharing:
2523 * Check whenever the vtable or an mrgctx needs to be passed when calling CMETHOD.
2526 check_method_sharing (MonoCompile *cfg, MonoMethod *cmethod, gboolean *out_pass_vtable, gboolean *out_pass_mrgctx)
2528 gboolean pass_vtable = FALSE;
2529 gboolean pass_mrgctx = FALSE;
2531 if (((cmethod->flags & METHOD_ATTRIBUTE_STATIC) || cmethod->klass->valuetype) &&
2532 (cmethod->klass->generic_class || cmethod->klass->generic_container)) {
2533 gboolean sharable = FALSE;
2535 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2538 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2539 MonoGenericContext *context = mini_class_get_context (cmethod->klass);
2540 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2542 sharable = sharing_enabled && context_sharable;
2546 * Pass vtable iff target method might
2547 * be shared, which means that sharing
2548 * is enabled for its class and its
2549 * context is sharable (and it's not a
2552 if (sharable && !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
2556 if (mini_method_get_context (cmethod) &&
2557 mini_method_get_context (cmethod)->method_inst) {
2558 g_assert (!pass_vtable);
2560 if (mono_method_is_generic_sharable (cmethod, TRUE)) {
2563 gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
2564 MonoGenericContext *context = mini_method_get_context (cmethod);
2565 gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
2567 if (sharing_enabled && context_sharable)
2569 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, mono_method_signature (cmethod)))
2574 if (out_pass_vtable)
2575 *out_pass_vtable = pass_vtable;
2576 if (out_pass_mrgctx)
2577 *out_pass_mrgctx = pass_mrgctx;
2580 inline static MonoCallInst *
2581 mono_emit_call_args (MonoCompile *cfg, MonoMethodSignature *sig,
2582 MonoInst **args, int calli, int virtual, int tail, int rgctx, int unbox_trampoline)
2586 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2591 emit_instrumentation_call (cfg, mono_profiler_method_leave);
2593 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
2595 MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (cfg, sig->ret, calli, virtual, cfg->generic_sharing_context));
2598 call->signature = sig;
2599 call->rgctx_reg = rgctx;
2600 sig_ret = mini_get_underlying_type (cfg, sig->ret);
2602 type_to_eval_stack_type ((cfg), sig_ret, &call->inst);
2605 if (mini_type_is_vtype (cfg, sig_ret)) {
2606 call->vret_var = cfg->vret_addr;
2607 //g_assert_not_reached ();
2609 } else if (mini_type_is_vtype (cfg, sig_ret)) {
2610 MonoInst *temp = mono_compile_create_var (cfg, sig_ret, OP_LOCAL);
2613 temp->backend.is_pinvoke = sig->pinvoke;
2616 * We use a new opcode OP_OUTARG_VTRETADDR instead of LDADDR for emitting the
2617 * address of return value to increase optimization opportunities.
2618 * Before vtype decomposition, the dreg of the call ins itself represents the
2619 * fact the call modifies the return value. After decomposition, the call will
2620 * be transformed into one of the OP_VOIDCALL opcodes, and the VTRETADDR opcode
2621 * will be transformed into an LDADDR.
2623 MONO_INST_NEW (cfg, loada, OP_OUTARG_VTRETADDR);
2624 loada->dreg = alloc_preg (cfg);
2625 loada->inst_p0 = temp;
2626 /* We reference the call too since call->dreg could change during optimization */
2627 loada->inst_p1 = call;
2628 MONO_ADD_INS (cfg->cbb, loada);
2630 call->inst.dreg = temp->dreg;
2632 call->vret_var = loada;
2633 } else if (!MONO_TYPE_IS_VOID (sig_ret))
2634 call->inst.dreg = alloc_dreg (cfg, call->inst.type);
2636 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
2637 if (COMPILE_SOFT_FLOAT (cfg)) {
2639 * If the call has a float argument, we would need to do an r8->r4 conversion using
2640 * an icall, but that cannot be done during the call sequence since it would clobber
2641 * the call registers + the stack. So we do it before emitting the call.
2643 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2645 MonoInst *in = call->args [i];
2647 if (i >= sig->hasthis)
2648 t = sig->params [i - sig->hasthis];
2650 t = &mono_defaults.int_class->byval_arg;
2651 t = mono_type_get_underlying_type (t);
2653 if (!t->byref && t->type == MONO_TYPE_R4) {
2654 MonoInst *iargs [1];
2658 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
2660 /* The result will be in an int vreg */
2661 call->args [i] = conv;
2667 call->need_unbox_trampoline = unbox_trampoline;
2670 if (COMPILE_LLVM (cfg))
2671 mono_llvm_emit_call (cfg, call);
2673 mono_arch_emit_call (cfg, call);
2675 mono_arch_emit_call (cfg, call);
2678 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
2679 cfg->flags |= MONO_CFG_HAS_CALLS;
2685 set_rgctx_arg (MonoCompile *cfg, MonoCallInst *call, int rgctx_reg, MonoInst *rgctx_arg)
2687 #ifdef MONO_ARCH_RGCTX_REG
2688 mono_call_inst_add_outarg_reg (cfg, call, rgctx_reg, MONO_ARCH_RGCTX_REG, FALSE);
2689 cfg->uses_rgctx_reg = TRUE;
2690 call->rgctx_reg = TRUE;
2692 call->rgctx_arg_reg = rgctx_reg;
2699 inline static MonoInst*
2700 mono_emit_calli (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **args, MonoInst *addr, MonoInst *imt_arg, MonoInst *rgctx_arg)
2705 gboolean check_sp = FALSE;
2707 if (cfg->check_pinvoke_callconv && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
2708 WrapperInfo *info = mono_marshal_get_wrapper_info (cfg->method);
2710 if (info && info->subtype == WRAPPER_SUBTYPE_PINVOKE)
2715 rgctx_reg = mono_alloc_preg (cfg);
2716 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2720 if (!cfg->stack_inbalance_var)
2721 cfg->stack_inbalance_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2723 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2724 ins->dreg = cfg->stack_inbalance_var->dreg;
2725 MONO_ADD_INS (cfg->cbb, ins);
2728 call = mono_emit_call_args (cfg, sig, args, TRUE, FALSE, FALSE, rgctx_arg ? TRUE : FALSE, FALSE);
2730 call->inst.sreg1 = addr->dreg;
2733 emit_imt_argument (cfg, call, NULL, imt_arg);
2735 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2740 sp_reg = mono_alloc_preg (cfg);
2742 MONO_INST_NEW (cfg, ins, OP_GET_SP);
2744 MONO_ADD_INS (cfg->cbb, ins);
2746 /* Restore the stack so we don't crash when throwing the exception */
2747 MONO_INST_NEW (cfg, ins, OP_SET_SP);
2748 ins->sreg1 = cfg->stack_inbalance_var->dreg;
2749 MONO_ADD_INS (cfg->cbb, ins);
2751 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, cfg->stack_inbalance_var->dreg, sp_reg);
2752 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ExecutionEngineException");
2756 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2758 return (MonoInst*)call;
2762 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2765 emit_get_rgctx_method (MonoCompile *cfg, int context_used, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type);
2767 emit_get_rgctx_klass (MonoCompile *cfg, int context_used, MonoClass *klass, MonoRgctxInfoType rgctx_type);
2770 mono_emit_method_call_full (MonoCompile *cfg, MonoMethod *method, MonoMethodSignature *sig, gboolean tail,
2771 MonoInst **args, MonoInst *this, MonoInst *imt_arg, MonoInst *rgctx_arg)
2773 #ifndef DISABLE_REMOTING
2774 gboolean might_be_remote = FALSE;
2776 gboolean virtual = this != NULL;
2777 gboolean enable_for_aot = TRUE;
2781 gboolean need_unbox_trampoline;
2784 sig = mono_method_signature (method);
2787 rgctx_reg = mono_alloc_preg (cfg);
2788 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, rgctx_reg, rgctx_arg->dreg);
2791 if (method->string_ctor) {
2792 /* Create the real signature */
2793 /* FIXME: Cache these */
2794 MonoMethodSignature *ctor_sig = mono_metadata_signature_dup_mempool (cfg->mempool, sig);
2795 ctor_sig->ret = &mono_defaults.string_class->byval_arg;
2800 context_used = mini_method_check_context_used (cfg, method);
2802 #ifndef DISABLE_REMOTING
2803 might_be_remote = this && sig->hasthis &&
2804 (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) &&
2805 !(method->flags & METHOD_ATTRIBUTE_VIRTUAL) && (!MONO_CHECK_THIS (this) || context_used);
2807 if (might_be_remote && context_used) {
2810 g_assert (cfg->generic_sharing_context);
2812 addr = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_REMOTING_INVOKE_WITH_CHECK);
2814 return mono_emit_calli (cfg, sig, args, addr, NULL, NULL);
2818 need_unbox_trampoline = method->klass == mono_defaults.object_class || (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE);
2820 call = mono_emit_call_args (cfg, sig, args, FALSE, virtual, tail, rgctx_arg ? TRUE : FALSE, need_unbox_trampoline);
2822 #ifndef DISABLE_REMOTING
2823 if (might_be_remote)
2824 call->method = mono_marshal_get_remoting_invoke_with_check (method);
2827 call->method = method;
2828 call->inst.flags |= MONO_INST_HAS_METHOD;
2829 call->inst.inst_left = this;
2830 call->tail_call = tail;
2833 int vtable_reg, slot_reg, this_reg;
2836 this_reg = this->dreg;
2838 if (ARCH_HAVE_DELEGATE_TRAMPOLINES && (method->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (method->name, "Invoke")) {
2839 MonoInst *dummy_use;
2841 MONO_EMIT_NULL_CHECK (cfg, this_reg);
2843 /* Make a call to delegate->invoke_impl */
2844 call->inst.inst_basereg = this_reg;
2845 call->inst.inst_offset = MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl);
2846 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2848 /* We must emit a dummy use here because the delegate trampoline will
2849 replace the 'this' argument with the delegate target making this activation
2850 no longer a root for the delegate.
2851 This is an issue for delegates that target collectible code such as dynamic
2852 methods of GC'able assemblies.
2854 For a test case look into #667921.
2856 FIXME: a dummy use is not the best way to do it as the local register allocator
2857 will put it on a caller save register and spil it around the call.
2858 Ideally, we would either put it on a callee save register or only do the store part.
2860 EMIT_NEW_DUMMY_USE (cfg, dummy_use, args [0]);
2862 return (MonoInst*)call;
2865 if ((!cfg->compile_aot || enable_for_aot) &&
2866 (!(method->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
2867 (MONO_METHOD_IS_FINAL (method) &&
2868 method->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK)) &&
2869 !(mono_class_is_marshalbyref (method->klass) && context_used)) {
2871 * the method is not virtual, we just need to ensure this is not null
2872 * and then we can call the method directly.
2874 #ifndef DISABLE_REMOTING
2875 if (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class) {
2877 * The check above ensures method is not gshared, this is needed since
2878 * gshared methods can't have wrappers.
2880 method = call->method = mono_marshal_get_remoting_invoke_with_check (method);
2884 if (!method->string_ctor)
2885 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2887 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2888 } else if ((method->flags & METHOD_ATTRIBUTE_VIRTUAL) && MONO_METHOD_IS_FINAL (method)) {
2890 * the method is virtual, but we can statically dispatch since either
2891 * it's class or the method itself are sealed.
2892 * But first we need to ensure it's not a null reference.
2894 MONO_EMIT_NEW_CHECK_THIS (cfg, this_reg);
2896 call->inst.opcode = callvirt_to_call (call->inst.opcode);
2898 vtable_reg = alloc_preg (cfg);
2899 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
2900 if (method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2903 guint32 imt_slot = mono_method_get_imt_slot (method);
2904 emit_imt_argument (cfg, call, call->method, imt_arg);
2905 slot_reg = vtable_reg;
2906 offset = ((gint32)imt_slot - MONO_IMT_SIZE) * SIZEOF_VOID_P;
2908 if (slot_reg == -1) {
2909 slot_reg = alloc_preg (cfg);
2910 mini_emit_load_intf_reg_vtable (cfg, slot_reg, vtable_reg, method->klass);
2911 offset = mono_method_get_vtable_index (method) * SIZEOF_VOID_P;
2914 slot_reg = vtable_reg;
2915 offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) +
2916 ((mono_method_get_vtable_index (method)) * (SIZEOF_VOID_P));
2918 g_assert (mono_method_signature (method)->generic_param_count);
2919 emit_imt_argument (cfg, call, call->method, imt_arg);
2923 call->inst.sreg1 = slot_reg;
2924 call->inst.inst_offset = offset;
2925 call->virtual = TRUE;
2929 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2932 set_rgctx_arg (cfg, call, rgctx_reg, rgctx_arg);
2934 return (MonoInst*)call;
2938 mono_emit_method_call (MonoCompile *cfg, MonoMethod *method, MonoInst **args, MonoInst *this)
2940 return mono_emit_method_call_full (cfg, method, mono_method_signature (method), FALSE, args, this, NULL, NULL);
2944 mono_emit_native_call (MonoCompile *cfg, gconstpointer func, MonoMethodSignature *sig,
2951 call = mono_emit_call_args (cfg, sig, args, FALSE, FALSE, FALSE, FALSE, FALSE);
2954 MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
2956 return (MonoInst*)call;
2960 mono_emit_jit_icall (MonoCompile *cfg, gconstpointer func, MonoInst **args)
2962 MonoJitICallInfo *info = mono_find_jit_icall_by_addr (func);
2966 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
2970 * mono_emit_abs_call:
2972 * Emit a call to the runtime function described by PATCH_TYPE and DATA.
2974 inline static MonoInst*
2975 mono_emit_abs_call (MonoCompile *cfg, MonoJumpInfoType patch_type, gconstpointer data,
2976 MonoMethodSignature *sig, MonoInst **args)
2978 MonoJumpInfo *ji = mono_patch_info_new (cfg->mempool, 0, patch_type, data);
2982 * We pass ji as the call address, the PATCH_INFO_ABS resolving code will
2985 if (cfg->abs_patches == NULL)
2986 cfg->abs_patches = g_hash_table_new (NULL, NULL);
2987 g_hash_table_insert (cfg->abs_patches, ji, ji);
2988 ins = mono_emit_native_call (cfg, ji, sig, args);
2989 ((MonoCallInst*)ins)->fptr_is_patch = TRUE;
2994 direct_icalls_enabled (MonoCompile *cfg)
2996 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
2998 if (cfg->compile_llvm)
3001 if (cfg->gen_seq_points_debug_data || cfg->disable_direct_icalls)
3007 mono_emit_jit_icall_by_info (MonoCompile *cfg, MonoJitICallInfo *info, MonoInst **args, MonoBasicBlock **out_cbb)
3010 * Call the jit icall without a wrapper if possible.
3011 * The wrapper is needed for the following reasons:
3012 * - to handle exceptions thrown using mono_raise_exceptions () from the
3013 * icall function. The EH code needs the lmf frame pushed by the
3014 * wrapper to be able to unwind back to managed code.
3015 * - to be able to do stack walks for asynchronously suspended
3016 * threads when debugging.
3018 if (info->no_raise && direct_icalls_enabled (cfg)) {
3022 if (!info->wrapper_method) {
3023 name = g_strdup_printf ("__icall_wrapper_%s", info->name);
3024 info->wrapper_method = mono_marshal_get_icall_wrapper (info->sig, name, info->func, TRUE);
3026 mono_memory_barrier ();
3030 * Inline the wrapper method, which is basically a call to the C icall, and
3031 * an exception check.
3033 costs = inline_method (cfg, info->wrapper_method, NULL,
3034 args, NULL, cfg->real_offset, TRUE, out_cbb);
3035 g_assert (costs > 0);
3036 g_assert (!MONO_TYPE_IS_VOID (info->sig->ret));
3040 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, args);
3045 mono_emit_widen_call_res (MonoCompile *cfg, MonoInst *ins, MonoMethodSignature *fsig)
3047 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
3048 if ((fsig->pinvoke || LLVM_ENABLED) && !fsig->ret->byref) {
3052 * Native code might return non register sized integers
3053 * without initializing the upper bits.
3055 switch (mono_type_to_load_membase (cfg, fsig->ret)) {
3056 case OP_LOADI1_MEMBASE:
3057 widen_op = OP_ICONV_TO_I1;
3059 case OP_LOADU1_MEMBASE:
3060 widen_op = OP_ICONV_TO_U1;
3062 case OP_LOADI2_MEMBASE:
3063 widen_op = OP_ICONV_TO_I2;
3065 case OP_LOADU2_MEMBASE:
3066 widen_op = OP_ICONV_TO_U2;
3072 if (widen_op != -1) {
3073 int dreg = alloc_preg (cfg);
3076 EMIT_NEW_UNALU (cfg, widen, widen_op, dreg, ins->dreg);
3077 widen->type = ins->type;
3087 get_memcpy_method (void)
3089 static MonoMethod *memcpy_method = NULL;
3090 if (!memcpy_method) {
3091 memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
3093 g_error ("Old corlib found. Install a new one");
3095 return memcpy_method;
3099 create_write_barrier_bitmap (MonoCompile *cfg, MonoClass *klass, unsigned *wb_bitmap, int offset)
3101 MonoClassField *field;
3102 gpointer iter = NULL;
3104 while ((field = mono_class_get_fields (klass, &iter))) {
3107 if (field->type->attrs & FIELD_ATTRIBUTE_STATIC)
3109 foffset = klass->valuetype ? field->offset - sizeof (MonoObject): field->offset;
3110 if (mini_type_is_reference (cfg, mono_field_get_type (field))) {
3111 g_assert ((foffset % SIZEOF_VOID_P) == 0);
3112 *wb_bitmap |= 1 << ((offset + foffset) / SIZEOF_VOID_P);
3114 MonoClass *field_class = mono_class_from_mono_type (field->type);
3115 if (field_class->has_references)
3116 create_write_barrier_bitmap (cfg, field_class, wb_bitmap, offset + foffset);
3122 emit_write_barrier (MonoCompile *cfg, MonoInst *ptr, MonoInst *value)
3124 int card_table_shift_bits;
3125 gpointer card_table_mask;
3127 MonoInst *dummy_use;
3128 int nursery_shift_bits;
3129 size_t nursery_size;
3130 gboolean has_card_table_wb = FALSE;
3132 if (!cfg->gen_write_barriers)
3135 card_table = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
3137 mono_gc_get_nursery (&nursery_shift_bits, &nursery_size);
3139 #ifdef MONO_ARCH_HAVE_CARD_TABLE_WBARRIER
3140 has_card_table_wb = TRUE;
3143 if (has_card_table_wb && !cfg->compile_aot && card_table && nursery_shift_bits > 0 && !COMPILE_LLVM (cfg)) {
3146 MONO_INST_NEW (cfg, wbarrier, OP_CARD_TABLE_WBARRIER);
3147 wbarrier->sreg1 = ptr->dreg;
3148 wbarrier->sreg2 = value->dreg;
3149 MONO_ADD_INS (cfg->cbb, wbarrier);
3150 } else if (card_table && !cfg->compile_aot && !mono_gc_card_table_nursery_check ()) {
3151 int offset_reg = alloc_preg (cfg);
3152 int card_reg = alloc_preg (cfg);
3155 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHR_UN_IMM, offset_reg, ptr->dreg, card_table_shift_bits);
3156 if (card_table_mask)
3157 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PAND_IMM, offset_reg, offset_reg, card_table_mask);
3159 /*We can't use PADD_IMM since the cardtable might end up in high addresses and amd64 doesn't support
3160 * IMM's larger than 32bits.
3162 if (cfg->compile_aot) {
3163 MONO_EMIT_NEW_AOTCONST (cfg, card_reg, NULL, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR);
3165 MONO_INST_NEW (cfg, ins, OP_PCONST);
3166 ins->inst_p0 = card_table;
3167 ins->dreg = card_reg;
3168 MONO_ADD_INS (cfg->cbb, ins);
3171 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, offset_reg, offset_reg, card_reg);
3172 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREI1_MEMBASE_IMM, offset_reg, 0, 1);
3174 MonoMethod *write_barrier = mono_gc_get_write_barrier ();
3175 mono_emit_method_call (cfg, write_barrier, &ptr, NULL);
3178 EMIT_NEW_DUMMY_USE (cfg, dummy_use, value);
3182 mono_emit_wb_aware_memcpy (MonoCompile *cfg, MonoClass *klass, MonoInst *iargs[4], int size, int align)
3184 int dest_ptr_reg, tmp_reg, destreg, srcreg, offset;
3185 unsigned need_wb = 0;
3190 /*types with references can't have alignment smaller than sizeof(void*) */
3191 if (align < SIZEOF_VOID_P)
3194 /*This value cannot be bigger than 32 due to the way we calculate the required wb bitmap.*/
3195 if (size > 32 * SIZEOF_VOID_P)
3198 create_write_barrier_bitmap (cfg, klass, &need_wb, 0);
3200 /* We don't unroll more than 5 stores to avoid code bloat. */
3201 if (size > 5 * SIZEOF_VOID_P) {
3202 /*This is harmless and simplify mono_gc_wbarrier_value_copy_bitmap */
3203 size += (SIZEOF_VOID_P - 1);
3204 size &= ~(SIZEOF_VOID_P - 1);
3206 EMIT_NEW_ICONST (cfg, iargs [2], size);
3207 EMIT_NEW_ICONST (cfg, iargs [3], need_wb);
3208 mono_emit_jit_icall (cfg, mono_gc_wbarrier_value_copy_bitmap, iargs);
3212 destreg = iargs [0]->dreg;
3213 srcreg = iargs [1]->dreg;
3216 dest_ptr_reg = alloc_preg (cfg);
3217 tmp_reg = alloc_preg (cfg);
3220 EMIT_NEW_UNALU (cfg, iargs [0], OP_MOVE, dest_ptr_reg, destreg);
3222 while (size >= SIZEOF_VOID_P) {
3223 MonoInst *load_inst;
3224 MONO_INST_NEW (cfg, load_inst, OP_LOAD_MEMBASE);
3225 load_inst->dreg = tmp_reg;
3226 load_inst->inst_basereg = srcreg;
3227 load_inst->inst_offset = offset;
3228 MONO_ADD_INS (cfg->cbb, load_inst);
3230 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, dest_ptr_reg, 0, tmp_reg);
3233 emit_write_barrier (cfg, iargs [0], load_inst);
3235 offset += SIZEOF_VOID_P;
3236 size -= SIZEOF_VOID_P;
3239 /*tmp += sizeof (void*)*/
3240 if (size >= SIZEOF_VOID_P) {
3241 NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, dest_ptr_reg, dest_ptr_reg, SIZEOF_VOID_P);
3242 MONO_ADD_INS (cfg->cbb, iargs [0]);
3246 /* Those cannot be references since size < sizeof (void*) */
3248 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, tmp_reg, srcreg, offset);
3249 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, destreg, offset, tmp_reg);
3255 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI2_MEMBASE, tmp_reg, srcreg, offset);
3256 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, destreg, offset, tmp_reg);
3262 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI1_MEMBASE, tmp_reg, srcreg, offset);
3263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, destreg, offset, tmp_reg);
3272 * Emit code to copy a valuetype of type @klass whose address is stored in
3273 * @src->dreg to memory whose address is stored at @dest->dreg.
3276 mini_emit_stobj (MonoCompile *cfg, MonoInst *dest, MonoInst *src, MonoClass *klass, gboolean native)
3278 MonoInst *iargs [4];
3281 MonoMethod *memcpy_method;
3282 MonoInst *size_ins = NULL;
3283 MonoInst *memcpy_ins = NULL;
3286 if (cfg->generic_sharing_context)
3287 klass = mono_class_from_mono_type (mini_get_underlying_type (cfg, &klass->byval_arg));
3290 * This check breaks with spilled vars... need to handle it during verification anyway.
3291 * g_assert (klass && klass == src->klass && klass == dest->klass);
3294 if (mini_is_gsharedvt_klass (cfg, klass)) {
3296 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3297 memcpy_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_MEMCPY);
3301 n = mono_class_native_size (klass, &align);
3303 n = mono_class_value_size (klass, &align);
3305 /* if native is true there should be no references in the struct */
3306 if (cfg->gen_write_barriers && (klass->has_references || size_ins) && !native) {
3307 /* Avoid barriers when storing to the stack */
3308 if (!((dest->opcode == OP_ADD_IMM && dest->sreg1 == cfg->frame_reg) ||
3309 (dest->opcode == OP_LDADDR))) {
3315 context_used = mini_class_check_context_used (cfg, klass);
3317 /* It's ok to intrinsify under gsharing since shared code types are layout stable. */
3318 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && mono_emit_wb_aware_memcpy (cfg, klass, iargs, n, align)) {
3320 } else if (context_used) {
3321 iargs [2] = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3323 if (cfg->compile_aot) {
3324 EMIT_NEW_CLASSCONST (cfg, iargs [2], klass);
3326 EMIT_NEW_PCONST (cfg, iargs [2], klass);
3327 mono_class_compute_gc_descriptor (klass);
3332 mono_emit_jit_icall (cfg, mono_gsharedvt_value_copy, iargs);
3334 mono_emit_jit_icall (cfg, mono_value_copy, iargs);
3339 if (!size_ins && (cfg->opt & MONO_OPT_INTRINS) && n <= sizeof (gpointer) * 8) {
3340 /* FIXME: Optimize the case when src/dest is OP_LDADDR */
3341 mini_emit_memcpy (cfg, dest->dreg, 0, src->dreg, 0, n, align);
3346 iargs [2] = size_ins;
3348 EMIT_NEW_ICONST (cfg, iargs [2], n);
3350 memcpy_method = get_memcpy_method ();
3352 mono_emit_calli (cfg, mono_method_signature (memcpy_method), iargs, memcpy_ins, NULL, NULL);
3354 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
3359 get_memset_method (void)
3361 static MonoMethod *memset_method = NULL;
3362 if (!memset_method) {
3363 memset_method = mono_class_get_method_from_name (mono_defaults.string_class, "memset", 3);
3365 g_error ("Old corlib found. Install a new one");
3367 return memset_method;
3371 mini_emit_initobj (MonoCompile *cfg, MonoInst *dest, const guchar *ip, MonoClass *klass)
3373 MonoInst *iargs [3];
3376 MonoMethod *memset_method;
3377 MonoInst *size_ins = NULL;
3378 MonoInst *bzero_ins = NULL;
3379 static MonoMethod *bzero_method;
3381 /* FIXME: Optimize this for the case when dest is an LDADDR */
3382 mono_class_init (klass);
3383 if (mini_is_gsharedvt_klass (cfg, klass)) {
3384 size_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_VALUE_SIZE);
3385 bzero_ins = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_BZERO);
3387 bzero_method = mono_class_get_method_from_name (mono_defaults.string_class, "bzero_aligned_1", 2);
3388 g_assert (bzero_method);
3390 iargs [1] = size_ins;
3391 mono_emit_calli (cfg, mono_method_signature (bzero_method), iargs, bzero_ins, NULL, NULL);
3395 n = mono_class_value_size (klass, &align);
3397 if (n <= sizeof (gpointer) * 8) {
3398 mini_emit_memset (cfg, dest->dreg, 0, n, 0, align);
3401 memset_method = get_memset_method ();
3403 EMIT_NEW_ICONST (cfg, iargs [1], 0);
3404 EMIT_NEW_ICONST (cfg, iargs [2], n);
3405 mono_emit_method_call (cfg, memset_method, iargs, NULL);
3410 emit_get_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used)
3412 MonoInst *this = NULL;
3414 g_assert (cfg->generic_sharing_context);
3416 if (!(method->flags & METHOD_ATTRIBUTE_STATIC) &&
3417 !(context_used & MONO_GENERIC_CONTEXT_USED_METHOD) &&
3418 !method->klass->valuetype)
3419 EMIT_NEW_ARGLOAD (cfg, this, 0);
3421 if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
3422 MonoInst *mrgctx_loc, *mrgctx_var;
3425 g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
3427 mrgctx_loc = mono_get_vtable_var (cfg);
3428 EMIT_NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
3431 } else if (method->flags & METHOD_ATTRIBUTE_STATIC || method->klass->valuetype) {
3432 MonoInst *vtable_loc, *vtable_var;
3436 vtable_loc = mono_get_vtable_var (cfg);
3437 EMIT_NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
3439 if (method->is_inflated && mono_method_get_context (method)->method_inst) {
3440 MonoInst *mrgctx_var = vtable_var;
3443 vtable_reg = alloc_preg (cfg);
3444 EMIT_NEW_LOAD_MEMBASE (cfg, vtable_var, OP_LOAD_MEMBASE, vtable_reg, mrgctx_var->dreg, MONO_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable));
3445 vtable_var->type = STACK_PTR;
3453 vtable_reg = alloc_preg (cfg);
3454 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, vtable_reg, this->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3459 static MonoJumpInfoRgctxEntry *
3460 mono_patch_info_rgctx_entry_new (MonoMemPool *mp, MonoMethod *method, gboolean in_mrgctx, MonoJumpInfoType patch_type, gconstpointer patch_data, MonoRgctxInfoType info_type)
3462 MonoJumpInfoRgctxEntry *res = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfoRgctxEntry));
3463 res->method = method;
3464 res->in_mrgctx = in_mrgctx;
3465 res->data = mono_mempool_alloc0 (mp, sizeof (MonoJumpInfo));
3466 res->data->type = patch_type;
3467 res->data->data.target = patch_data;
3468 res->info_type = info_type;
3473 static inline MonoInst*
3474 emit_rgctx_fetch (MonoCompile *cfg, MonoInst *rgctx, MonoJumpInfoRgctxEntry *entry)
3476 return mono_emit_abs_call (cfg, MONO_PATCH_INFO_RGCTX_FETCH, entry, helper_sig_rgctx_lazy_fetch_trampoline, &rgctx);
3480 emit_get_rgctx_klass (MonoCompile *cfg, int context_used,
3481 MonoClass *klass, MonoRgctxInfoType rgctx_type)
3483 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_CLASS, klass, rgctx_type);
3484 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3486 return emit_rgctx_fetch (cfg, rgctx, entry);
3490 emit_get_rgctx_sig (MonoCompile *cfg, int context_used,
3491 MonoMethodSignature *sig, MonoRgctxInfoType rgctx_type)
3493 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_SIGNATURE, sig, rgctx_type);
3494 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3496 return emit_rgctx_fetch (cfg, rgctx, entry);
3500 emit_get_rgctx_gsharedvt_call (MonoCompile *cfg, int context_used,
3501 MonoMethodSignature *sig, MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3503 MonoJumpInfoGSharedVtCall *call_info;
3504 MonoJumpInfoRgctxEntry *entry;
3507 call_info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoJumpInfoGSharedVtCall));
3508 call_info->sig = sig;
3509 call_info->method = cmethod;
3511 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_CALL, call_info, rgctx_type);
3512 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3514 return emit_rgctx_fetch (cfg, rgctx, entry);
3519 emit_get_rgctx_gsharedvt_method (MonoCompile *cfg, int context_used,
3520 MonoMethod *cmethod, MonoGSharedVtMethodInfo *info)
3522 MonoJumpInfoRgctxEntry *entry;
3525 entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_GSHAREDVT_METHOD, info, MONO_RGCTX_INFO_METHOD_GSHAREDVT_INFO);
3526 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3528 return emit_rgctx_fetch (cfg, rgctx, entry);
3532 * emit_get_rgctx_method:
3534 * Emit IR to load the property RGCTX_TYPE of CMETHOD. If context_used is 0, emit
3535 * normal constants, else emit a load from the rgctx.
3538 emit_get_rgctx_method (MonoCompile *cfg, int context_used,
3539 MonoMethod *cmethod, MonoRgctxInfoType rgctx_type)
3541 if (!context_used) {
3544 switch (rgctx_type) {
3545 case MONO_RGCTX_INFO_METHOD:
3546 EMIT_NEW_METHODCONST (cfg, ins, cmethod);
3548 case MONO_RGCTX_INFO_METHOD_RGCTX:
3549 EMIT_NEW_METHOD_RGCTX_CONST (cfg, ins, cmethod);
3552 g_assert_not_reached ();
3555 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_METHODCONST, cmethod, rgctx_type);
3556 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3558 return emit_rgctx_fetch (cfg, rgctx, entry);
3563 emit_get_rgctx_field (MonoCompile *cfg, int context_used,
3564 MonoClassField *field, MonoRgctxInfoType rgctx_type)
3566 MonoJumpInfoRgctxEntry *entry = mono_patch_info_rgctx_entry_new (cfg->mempool, cfg->current_method, context_used & MONO_GENERIC_CONTEXT_USED_METHOD, MONO_PATCH_INFO_FIELD, field, rgctx_type);
3567 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3569 return emit_rgctx_fetch (cfg, rgctx, entry);
3573 get_gsharedvt_info_slot (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3575 MonoGSharedVtMethodInfo *info = cfg->gsharedvt_info;
3576 MonoRuntimeGenericContextInfoTemplate *template;
3581 for (i = 0; i < info->num_entries; ++i) {
3582 MonoRuntimeGenericContextInfoTemplate *otemplate = &info->entries [i];
3584 if (otemplate->info_type == rgctx_type && otemplate->data == data && rgctx_type != MONO_RGCTX_INFO_LOCAL_OFFSET)
3588 if (info->num_entries == info->count_entries) {
3589 MonoRuntimeGenericContextInfoTemplate *new_entries;
3590 int new_count_entries = info->count_entries ? info->count_entries * 2 : 16;
3592 new_entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * new_count_entries);
3594 memcpy (new_entries, info->entries, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
3595 info->entries = new_entries;
3596 info->count_entries = new_count_entries;
3599 idx = info->num_entries;
3600 template = &info->entries [idx];
3601 template->info_type = rgctx_type;
3602 template->data = data;
3604 info->num_entries ++;
3610 * emit_get_gsharedvt_info:
3612 * This is similar to emit_get_rgctx_.., but loads the data from the gsharedvt info var instead of calling an rgctx fetch trampoline.
3615 emit_get_gsharedvt_info (MonoCompile *cfg, gpointer data, MonoRgctxInfoType rgctx_type)
3620 idx = get_gsharedvt_info_slot (cfg, data, rgctx_type);
3621 /* Load info->entries [idx] */
3622 dreg = alloc_preg (cfg);
3623 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, cfg->gsharedvt_info_var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
3629 emit_get_gsharedvt_info_klass (MonoCompile *cfg, MonoClass *klass, MonoRgctxInfoType rgctx_type)
3631 return emit_get_gsharedvt_info (cfg, &klass->byval_arg, rgctx_type);
3635 * On return the caller must check @klass for load errors.
3638 emit_generic_class_init (MonoCompile *cfg, MonoClass *klass)
3640 MonoInst *vtable_arg;
3644 context_used = mini_class_check_context_used (cfg, klass);
3647 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
3648 klass, MONO_RGCTX_INFO_VTABLE);
3650 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
3654 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
3657 if (COMPILE_LLVM (cfg))
3658 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline_llvm, &vtable_arg);
3660 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_GENERIC_CLASS_INIT, NULL, helper_sig_generic_class_init_trampoline, &vtable_arg);
3661 #ifdef MONO_ARCH_VTABLE_REG
3662 mono_call_inst_add_outarg_reg (cfg, call, vtable_arg->dreg, MONO_ARCH_VTABLE_REG, FALSE);
3663 cfg->uses_vtable_reg = TRUE;
3670 emit_seq_point (MonoCompile *cfg, MonoMethod *method, guint8* ip, gboolean intr_loc, gboolean nonempty_stack)
3674 if (cfg->gen_seq_points && cfg->method == method) {
3675 NEW_SEQ_POINT (cfg, ins, ip - cfg->header->code, intr_loc);
3677 ins->flags |= MONO_INST_NONEMPTY_STACK;
3678 MONO_ADD_INS (cfg->cbb, ins);
3683 save_cast_details (MonoCompile *cfg, MonoClass *klass, int obj_reg, gboolean null_check, MonoBasicBlock **out_bblock)
3685 if (mini_get_debug_options ()->better_cast_details) {
3686 int vtable_reg = alloc_preg (cfg);
3687 int klass_reg = alloc_preg (cfg);
3688 MonoBasicBlock *is_null_bb = NULL;
3690 int to_klass_reg, context_used;
3693 NEW_BBLOCK (cfg, is_null_bb);
3695 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
3696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
3699 tls_get = mono_get_jit_tls_intrinsic (cfg);
3701 fprintf (stderr, "error: --debug=casts not supported on this platform.\n.");
3705 MONO_ADD_INS (cfg->cbb, tls_get);
3706 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3709 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), klass_reg);
3711 context_used = mini_class_check_context_used (cfg, klass);
3713 MonoInst *class_ins;
3715 class_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
3716 to_klass_reg = class_ins->dreg;
3718 to_klass_reg = alloc_preg (cfg);
3719 MONO_EMIT_NEW_CLASSCONST (cfg, to_klass_reg, klass);
3721 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_to), to_klass_reg);
3724 MONO_START_BB (cfg, is_null_bb);
3726 *out_bblock = cfg->cbb;
3732 reset_cast_details (MonoCompile *cfg)
3734 /* Reset the variables holding the cast details */
3735 if (mini_get_debug_options ()->better_cast_details) {
3736 MonoInst *tls_get = mono_get_jit_tls_intrinsic (cfg);
3738 MONO_ADD_INS (cfg->cbb, tls_get);
3739 /* It is enough to reset the from field */
3740 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, tls_get->dreg, MONO_STRUCT_OFFSET (MonoJitTlsData, class_cast_from), 0);
3745 * On return the caller must check @array_class for load errors
3748 mini_emit_check_array_type (MonoCompile *cfg, MonoInst *obj, MonoClass *array_class)
3750 int vtable_reg = alloc_preg (cfg);
3753 context_used = mini_class_check_context_used (cfg, array_class);
3755 save_cast_details (cfg, array_class, obj->dreg, FALSE, NULL);
3757 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3759 if (cfg->opt & MONO_OPT_SHARED) {
3760 int class_reg = alloc_preg (cfg);
3761 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, class_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3762 if (cfg->compile_aot) {
3763 int klass_reg = alloc_preg (cfg);
3764 MONO_EMIT_NEW_CLASSCONST (cfg, klass_reg, array_class);
3765 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, class_reg, klass_reg);
3767 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, class_reg, array_class);
3769 } else if (context_used) {
3770 MonoInst *vtable_ins;
3772 vtable_ins = emit_get_rgctx_klass (cfg, context_used, array_class, MONO_RGCTX_INFO_VTABLE);
3773 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vtable_ins->dreg);
3775 if (cfg->compile_aot) {
3779 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3781 vt_reg = alloc_preg (cfg);
3782 MONO_EMIT_NEW_VTABLECONST (cfg, vt_reg, vtable);
3783 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, vtable_reg, vt_reg);
3786 if (!(vtable = mono_class_vtable (cfg->domain, array_class)))
3788 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vtable);
3792 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "ArrayTypeMismatchException");
3794 reset_cast_details (cfg);
3798 * Handles unbox of a Nullable<T>. If context_used is non zero, then shared
3799 * generic code is generated.
3802 handle_unbox_nullable (MonoCompile* cfg, MonoInst* val, MonoClass* klass, int context_used)
3804 MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
3807 MonoInst *rgctx, *addr;
3809 /* FIXME: What if the class is shared? We might not
3810 have to get the address of the method from the
3812 addr = emit_get_rgctx_method (cfg, context_used, method,
3813 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
3815 rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
3817 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
3819 gboolean pass_vtable, pass_mrgctx;
3820 MonoInst *rgctx_arg = NULL;
3822 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
3823 g_assert (!pass_mrgctx);
3826 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
3829 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
3832 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
3837 handle_unbox (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, int context_used)
3841 int vtable_reg = alloc_dreg (cfg ,STACK_PTR);
3842 int klass_reg = alloc_dreg (cfg ,STACK_PTR);
3843 int eclass_reg = alloc_dreg (cfg ,STACK_PTR);
3844 int rank_reg = alloc_dreg (cfg ,STACK_I4);
3846 obj_reg = sp [0]->dreg;
3847 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
3848 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
3850 /* FIXME: generics */
3851 g_assert (klass->rank == 0);
3854 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, 0);
3855 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3857 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
3858 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, element_class));
3861 MonoInst *element_class;
3863 /* This assertion is from the unboxcast insn */
3864 g_assert (klass->rank == 0);
3866 element_class = emit_get_rgctx_klass (cfg, context_used,
3867 klass, MONO_RGCTX_INFO_ELEMENT_KLASS);
3869 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, eclass_reg, element_class->dreg);
3870 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
3872 save_cast_details (cfg, klass->element_class, obj_reg, FALSE, NULL);
3873 mini_emit_class_check (cfg, eclass_reg, klass->element_class);
3874 reset_cast_details (cfg);
3877 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), obj_reg, sizeof (MonoObject));
3878 MONO_ADD_INS (cfg->cbb, add);
3879 add->type = STACK_MP;
3886 handle_unbox_gsharedvt (MonoCompile *cfg, MonoClass *klass, MonoInst *obj, MonoBasicBlock **out_cbb)
3888 MonoInst *addr, *klass_inst, *is_ref, *args[16];
3889 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
3893 klass_inst = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_KLASS);
3899 args [1] = klass_inst;
3902 obj = mono_emit_jit_icall (cfg, mono_object_castclass_unbox, args);
3904 NEW_BBLOCK (cfg, is_ref_bb);
3905 NEW_BBLOCK (cfg, is_nullable_bb);
3906 NEW_BBLOCK (cfg, end_bb);
3907 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
3908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
3909 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
3911 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
3912 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
3914 /* This will contain either the address of the unboxed vtype, or an address of the temporary where the ref is stored */
3915 addr_reg = alloc_dreg (cfg, STACK_MP);
3919 NEW_BIALU_IMM (cfg, addr, OP_ADD_IMM, addr_reg, obj->dreg, sizeof (MonoObject));
3920 MONO_ADD_INS (cfg->cbb, addr);
3922 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3925 MONO_START_BB (cfg, is_ref_bb);
3927 /* Save the ref to a temporary */
3928 dreg = alloc_ireg (cfg);
3929 EMIT_NEW_VARLOADA_VREG (cfg, addr, dreg, &klass->byval_arg);
3930 addr->dreg = addr_reg;
3931 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, obj->dreg);
3932 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3935 MONO_START_BB (cfg, is_nullable_bb);
3938 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_NULLABLE_CLASS_UNBOX);
3939 MonoInst *unbox_call;
3940 MonoMethodSignature *unbox_sig;
3942 unbox_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
3943 unbox_sig->ret = &klass->byval_arg;
3944 unbox_sig->param_count = 1;
3945 unbox_sig->params [0] = &mono_defaults.object_class->byval_arg;
3946 unbox_call = mono_emit_calli (cfg, unbox_sig, &obj, addr, NULL, NULL);
3948 EMIT_NEW_VARLOADA_VREG (cfg, addr, unbox_call->dreg, &klass->byval_arg);
3949 addr->dreg = addr_reg;
3952 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
3955 MONO_START_BB (cfg, end_bb);
3958 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr_reg, 0);
3960 *out_cbb = cfg->cbb;
3966 * Returns NULL and set the cfg exception on error.
3969 handle_alloc (MonoCompile *cfg, MonoClass *klass, gboolean for_box, int context_used)
3971 MonoInst *iargs [2];
3977 MonoInst *iargs [2];
3978 gboolean known_instance_size = !mini_is_gsharedvt_klass (cfg, klass);
3980 MonoMethod *managed_alloc = mono_gc_get_managed_allocator (klass, for_box, known_instance_size);
3982 if (cfg->opt & MONO_OPT_SHARED)
3983 rgctx_info = MONO_RGCTX_INFO_KLASS;
3985 rgctx_info = MONO_RGCTX_INFO_VTABLE;
3986 data = emit_get_rgctx_klass (cfg, context_used, klass, rgctx_info);
3988 if (cfg->opt & MONO_OPT_SHARED) {
3989 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
3991 alloc_ftn = mono_object_new;
3994 alloc_ftn = mono_object_new_specific;
3997 if (managed_alloc && !(cfg->opt & MONO_OPT_SHARED)) {
3998 if (known_instance_size) {
3999 int size = mono_class_instance_size (klass);
4001 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4003 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4006 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4009 if (cfg->opt & MONO_OPT_SHARED) {
4010 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
4011 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
4013 alloc_ftn = mono_object_new;
4014 } else if (cfg->compile_aot && cfg->cbb->out_of_line && klass->type_token && klass->image == mono_defaults.corlib && !klass->generic_class) {
4015 /* This happens often in argument checking code, eg. throw new FooException... */
4016 /* Avoid relocations and save some space by calling a helper function specialized to mscorlib */
4017 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (klass->type_token));
4018 return mono_emit_jit_icall (cfg, mono_helper_newobj_mscorlib, iargs);
4020 MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
4021 MonoMethod *managed_alloc = NULL;
4025 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4026 cfg->exception_ptr = klass;
4030 #ifndef MONO_CROSS_COMPILE
4031 managed_alloc = mono_gc_get_managed_allocator (klass, for_box, TRUE);
4034 if (managed_alloc) {
4035 int size = mono_class_instance_size (klass);
4037 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4038 EMIT_NEW_ICONST (cfg, iargs [1], mono_gc_get_aligned_size_for_allocator (size));
4039 return mono_emit_method_call (cfg, managed_alloc, iargs, NULL);
4041 alloc_ftn = mono_class_get_allocation_ftn (vtable, for_box, &pass_lw);
4043 guint32 lw = vtable->klass->instance_size;
4044 lw = ((lw + (sizeof (gpointer) - 1)) & ~(sizeof (gpointer) - 1)) / sizeof (gpointer);
4045 EMIT_NEW_ICONST (cfg, iargs [0], lw);
4046 EMIT_NEW_VTABLECONST (cfg, iargs [1], vtable);
4049 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
4053 return mono_emit_jit_icall (cfg, alloc_ftn, iargs);
4057 * Returns NULL and set the cfg exception on error.
4060 handle_box (MonoCompile *cfg, MonoInst *val, MonoClass *klass, int context_used, MonoBasicBlock **out_cbb)
4062 MonoInst *alloc, *ins;
4064 *out_cbb = cfg->cbb;
4066 if (mono_class_is_nullable (klass)) {
4067 MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
4070 /* FIXME: What if the class is shared? We might not
4071 have to get the method address from the RGCTX. */
4072 MonoInst *addr = emit_get_rgctx_method (cfg, context_used, method,
4073 MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
4074 MonoInst *rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
4076 return mono_emit_calli (cfg, mono_method_signature (method), &val, addr, NULL, rgctx);
4078 gboolean pass_vtable, pass_mrgctx;
4079 MonoInst *rgctx_arg = NULL;
4081 check_method_sharing (cfg, method, &pass_vtable, &pass_mrgctx);
4082 g_assert (!pass_mrgctx);
4085 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
4088 EMIT_NEW_VTABLECONST (cfg, rgctx_arg, vtable);
4091 return mono_emit_method_call_full (cfg, method, NULL, FALSE, &val, NULL, NULL, rgctx_arg);
4095 if (mini_is_gsharedvt_klass (cfg, klass)) {
4096 MonoBasicBlock *is_ref_bb, *is_nullable_bb, *end_bb;
4097 MonoInst *res, *is_ref, *src_var, *addr;
4100 dreg = alloc_ireg (cfg);
4102 NEW_BBLOCK (cfg, is_ref_bb);
4103 NEW_BBLOCK (cfg, is_nullable_bb);
4104 NEW_BBLOCK (cfg, end_bb);
4105 is_ref = emit_get_gsharedvt_info_klass (cfg, klass, MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4106 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 1);
4107 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_ref_bb);
4109 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, is_ref->dreg, 2);
4110 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_nullable_bb);
4113 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4116 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4117 ins->opcode = OP_STOREV_MEMBASE;
4119 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, alloc->dreg);
4120 res->type = STACK_OBJ;
4122 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4125 MONO_START_BB (cfg, is_ref_bb);
4127 /* val is a vtype, so has to load the value manually */
4128 src_var = get_vreg_to_inst (cfg, val->dreg);
4130 src_var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, val->dreg);
4131 EMIT_NEW_VARLOADA (cfg, addr, src_var, src_var->inst_vtype);
4132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, addr->dreg, 0);
4133 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4136 MONO_START_BB (cfg, is_nullable_bb);
4139 MonoInst *addr = emit_get_gsharedvt_info_klass (cfg, klass,
4140 MONO_RGCTX_INFO_NULLABLE_CLASS_BOX);
4142 MonoMethodSignature *box_sig;
4145 * klass is Nullable<T>, need to call Nullable<T>.Box () using a gsharedvt signature, but we cannot
4146 * construct that method at JIT time, so have to do things by hand.
4148 box_sig = mono_mempool_alloc0 (cfg->mempool, MONO_SIZEOF_METHOD_SIGNATURE + (1 * sizeof (MonoType *)));
4149 box_sig->ret = &mono_defaults.object_class->byval_arg;
4150 box_sig->param_count = 1;
4151 box_sig->params [0] = &klass->byval_arg;
4152 box_call = mono_emit_calli (cfg, box_sig, &val, addr, NULL, NULL);
4153 EMIT_NEW_UNALU (cfg, res, OP_MOVE, dreg, box_call->dreg);
4154 res->type = STACK_OBJ;
4158 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4160 MONO_START_BB (cfg, end_bb);
4162 *out_cbb = cfg->cbb;
4166 alloc = handle_alloc (cfg, klass, TRUE, context_used);
4170 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, alloc->dreg, sizeof (MonoObject), val->dreg);
4176 mini_class_has_reference_variant_generic_argument (MonoCompile *cfg, MonoClass *klass, int context_used)
4179 MonoGenericContainer *container;
4180 MonoGenericInst *ginst;
4182 if (klass->generic_class) {
4183 container = klass->generic_class->container_class->generic_container;
4184 ginst = klass->generic_class->context.class_inst;
4185 } else if (klass->generic_container && context_used) {
4186 container = klass->generic_container;
4187 ginst = container->context.class_inst;
4192 for (i = 0; i < container->type_argc; ++i) {
4194 if (!(mono_generic_container_get_param_info (container, i)->flags & (MONO_GEN_PARAM_VARIANT|MONO_GEN_PARAM_COVARIANT)))
4196 type = ginst->type_argv [i];
4197 if (mini_type_is_reference (cfg, type))
4203 static GHashTable* direct_icall_type_hash;
4206 icall_is_direct_callable (MonoCompile *cfg, MonoMethod *cmethod)
4208 /* LLVM on amd64 can't handle calls to non-32 bit addresses */
4209 if (!direct_icalls_enabled (cfg))
4213 * An icall is directly callable if it doesn't directly or indirectly call mono_raise_exception ().
4214 * Whitelist a few icalls for now.
4216 if (!direct_icall_type_hash) {
4217 GHashTable *h = g_hash_table_new (g_str_hash, g_str_equal);
4219 g_hash_table_insert (h, (char*)"Decimal", GUINT_TO_POINTER (1));
4220 g_hash_table_insert (h, (char*)"Number", GUINT_TO_POINTER (1));
4221 g_hash_table_insert (h, (char*)"Buffer", GUINT_TO_POINTER (1));
4222 mono_memory_barrier ();
4223 direct_icall_type_hash = h;
4226 if (cmethod->klass == mono_defaults.math_class)
4228 /* No locking needed */
4229 if (cmethod->klass->image == mono_defaults.corlib && g_hash_table_lookup (direct_icall_type_hash, cmethod->klass->name))
4234 #define is_complex_isinst(klass) ((klass->flags & TYPE_ATTRIBUTE_INTERFACE) || klass->rank || mono_class_is_nullable (klass) || mono_class_is_marshalbyref (klass) || (klass->flags & TYPE_ATTRIBUTE_SEALED) || klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR)
4237 emit_castclass_with_cache (MonoCompile *cfg, MonoClass *klass, MonoInst **args, MonoBasicBlock **out_bblock)
4239 MonoMethod *mono_castclass;
4242 mono_castclass = mono_marshal_get_castclass_with_cache ();
4244 save_cast_details (cfg, klass, args [0]->dreg, TRUE, out_bblock);
4245 res = mono_emit_method_call (cfg, mono_castclass, args, NULL);
4246 reset_cast_details (cfg);
4247 *out_bblock = cfg->cbb;
4253 get_castclass_cache_idx (MonoCompile *cfg)
4255 /* Each CASTCLASS_CACHE patch needs a unique index which identifies the call site */
4256 cfg->castclass_cache_index ++;
4257 return (cfg->method_index << 16) | cfg->castclass_cache_index;
4261 emit_castclass_with_cache_nonshared (MonoCompile *cfg, MonoInst *obj, MonoClass *klass, MonoBasicBlock **out_bblock)
4270 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
4273 if (cfg->compile_aot) {
4274 idx = get_castclass_cache_idx (cfg);
4275 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
4277 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
4280 /*The wrapper doesn't inline well so the bloat of inlining doesn't pay off.*/
4282 return emit_castclass_with_cache (cfg, klass, args, out_bblock);
4286 * Returns NULL and set the cfg exception on error.
4289 handle_castclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src, guint8 *ip, MonoBasicBlock **out_bb, int *inline_costs)
4291 MonoBasicBlock *is_null_bb;
4292 int obj_reg = src->dreg;
4293 int vtable_reg = alloc_preg (cfg);
4295 MonoInst *klass_inst = NULL, *res;
4296 MonoBasicBlock *bblock;
4300 context_used = mini_class_check_context_used (cfg, klass);
4302 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
4303 res = emit_castclass_with_cache_nonshared (cfg, src, klass, &bblock);
4304 (*inline_costs) += 2;
4307 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
4308 MonoMethod *mono_castclass;
4309 MonoInst *iargs [1];
4312 mono_castclass = mono_marshal_get_castclass (klass);
4315 save_cast_details (cfg, klass, src->dreg, TRUE, &bblock);
4316 costs = inline_method (cfg, mono_castclass, mono_method_signature (mono_castclass),
4317 iargs, ip, cfg->real_offset, TRUE, &bblock);
4318 reset_cast_details (cfg);
4319 CHECK_CFG_EXCEPTION;
4320 g_assert (costs > 0);
4322 cfg->real_offset += 5;
4324 (*inline_costs) += costs;
4333 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4334 MonoInst *cache_ins;
4336 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4341 /* klass - it's the second element of the cache entry*/
4342 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4345 args [2] = cache_ins;
4347 return emit_castclass_with_cache (cfg, klass, args, out_bb);
4350 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4353 NEW_BBLOCK (cfg, is_null_bb);
4355 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4356 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, is_null_bb);
4358 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4360 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4361 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4362 mini_emit_iface_cast (cfg, vtable_reg, klass, NULL, NULL);
4364 int klass_reg = alloc_preg (cfg);
4366 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4368 if (!klass->rank && !cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4369 /* the remoting code is broken, access the class for now */
4370 if (0) { /*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4371 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4373 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4374 cfg->exception_ptr = klass;
4377 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4379 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4380 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4382 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
4384 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4385 mini_emit_castclass_inst (cfg, obj_reg, klass_reg, klass, klass_inst, is_null_bb);
4389 MONO_START_BB (cfg, is_null_bb);
4391 reset_cast_details (cfg);
4402 * Returns NULL and set the cfg exception on error.
4405 handle_isinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src, int context_used)
4408 MonoBasicBlock *is_null_bb, *false_bb, *end_bb;
4409 int obj_reg = src->dreg;
4410 int vtable_reg = alloc_preg (cfg);
4411 int res_reg = alloc_ireg_ref (cfg);
4412 MonoInst *klass_inst = NULL;
4417 if(mini_class_has_reference_variant_generic_argument (cfg, klass, context_used) || is_complex_isinst (klass)) {
4418 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
4419 MonoInst *cache_ins;
4421 cache_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CAST_CACHE);
4426 /* klass - it's the second element of the cache entry*/
4427 EMIT_NEW_LOAD_MEMBASE (cfg, args [1], OP_LOAD_MEMBASE, alloc_preg (cfg), cache_ins->dreg, sizeof (gpointer));
4430 args [2] = cache_ins;
4432 return mono_emit_method_call (cfg, mono_isinst, args, NULL);
4435 klass_inst = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
4438 NEW_BBLOCK (cfg, is_null_bb);
4439 NEW_BBLOCK (cfg, false_bb);
4440 NEW_BBLOCK (cfg, end_bb);
4442 /* Do the assignment at the beginning, so the other assignment can be if converted */
4443 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, res_reg, obj_reg);
4444 ins->type = STACK_OBJ;
4447 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4448 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, is_null_bb);
4450 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4452 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4453 g_assert (!context_used);
4454 /* the is_null_bb target simply copies the input register to the output */
4455 mini_emit_iface_cast (cfg, vtable_reg, klass, false_bb, is_null_bb);
4457 int klass_reg = alloc_preg (cfg);
4460 int rank_reg = alloc_preg (cfg);
4461 int eclass_reg = alloc_preg (cfg);
4463 g_assert (!context_used);
4464 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, rank_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
4465 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, rank_reg, klass->rank);
4466 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4467 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4468 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, eclass_reg, klass_reg, MONO_STRUCT_OFFSET (MonoClass, cast_class));
4469 if (klass->cast_class == mono_defaults.object_class) {
4470 int parent_reg = alloc_preg (cfg);
4471 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, parent_reg, eclass_reg, MONO_STRUCT_OFFSET (MonoClass, parent));
4472 mini_emit_class_check_branch (cfg, parent_reg, mono_defaults.enum_class->parent, OP_PBNE_UN, is_null_bb);
4473 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4474 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4475 } else if (klass->cast_class == mono_defaults.enum_class->parent) {
4476 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class->parent, OP_PBEQ, is_null_bb);
4477 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4478 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4479 } else if (klass->cast_class == mono_defaults.enum_class) {
4480 mini_emit_class_check_branch (cfg, eclass_reg, mono_defaults.enum_class, OP_PBEQ, is_null_bb);
4481 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false_bb);
4482 } else if (klass->cast_class->flags & TYPE_ATTRIBUTE_INTERFACE) {
4483 mini_emit_iface_class_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4485 if ((klass->rank == 1) && (klass->byval_arg.type == MONO_TYPE_SZARRAY)) {
4486 /* Check that the object is a vector too */
4487 int bounds_reg = alloc_preg (cfg);
4488 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg, obj_reg, MONO_STRUCT_OFFSET (MonoArray, bounds));
4489 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
4490 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4493 /* the is_null_bb target simply copies the input register to the output */
4494 mini_emit_isninst_cast (cfg, eclass_reg, klass->cast_class, false_bb, is_null_bb);
4496 } else if (mono_class_is_nullable (klass)) {
4497 g_assert (!context_used);
4498 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4499 /* the is_null_bb target simply copies the input register to the output */
4500 mini_emit_isninst_cast (cfg, klass_reg, klass->cast_class, false_bb, is_null_bb);
4502 if (!cfg->compile_aot && !(cfg->opt & MONO_OPT_SHARED) && (klass->flags & TYPE_ATTRIBUTE_SEALED)) {
4503 g_assert (!context_used);
4504 /* the remoting code is broken, access the class for now */
4505 if (0) {/*FIXME what exactly is broken? This change refers to r39380 from 2005 and mention some remoting fixes were due.*/
4506 MonoVTable *vt = mono_class_vtable (cfg->domain, klass);
4508 mono_cfg_set_exception (cfg, MONO_EXCEPTION_TYPE_LOAD);
4509 cfg->exception_ptr = klass;
4512 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, vtable_reg, vt);
4514 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4515 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, klass_reg, klass);
4517 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false_bb);
4518 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, is_null_bb);
4520 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4521 /* the is_null_bb target simply copies the input register to the output */
4522 mini_emit_isninst_cast_inst (cfg, klass_reg, klass, klass_inst, false_bb, is_null_bb);
4527 MONO_START_BB (cfg, false_bb);
4529 MONO_EMIT_NEW_PCONST (cfg, res_reg, 0);
4530 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4532 MONO_START_BB (cfg, is_null_bb);
4534 MONO_START_BB (cfg, end_bb);
4540 handle_cisinst (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4542 /* This opcode takes as input an object reference and a class, and returns:
4543 0) if the object is an instance of the class,
4544 1) if the object is not instance of the class,
4545 2) if the object is a proxy whose type cannot be determined */
4548 #ifndef DISABLE_REMOTING
4549 MonoBasicBlock *true_bb, *false_bb, *false2_bb, *end_bb, *no_proxy_bb, *interface_fail_bb;
4551 MonoBasicBlock *true_bb, *false_bb, *end_bb;
4553 int obj_reg = src->dreg;
4554 int dreg = alloc_ireg (cfg);
4556 #ifndef DISABLE_REMOTING
4557 int klass_reg = alloc_preg (cfg);
4560 NEW_BBLOCK (cfg, true_bb);
4561 NEW_BBLOCK (cfg, false_bb);
4562 NEW_BBLOCK (cfg, end_bb);
4563 #ifndef DISABLE_REMOTING
4564 NEW_BBLOCK (cfg, false2_bb);
4565 NEW_BBLOCK (cfg, no_proxy_bb);
4568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4569 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, false_bb);
4571 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4572 #ifndef DISABLE_REMOTING
4573 NEW_BBLOCK (cfg, interface_fail_bb);
4576 tmp_reg = alloc_preg (cfg);
4577 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4578 #ifndef DISABLE_REMOTING
4579 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, true_bb);
4580 MONO_START_BB (cfg, interface_fail_bb);
4581 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4583 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, false_bb);
4585 tmp_reg = alloc_preg (cfg);
4586 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4588 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, false2_bb);
4590 mini_emit_iface_cast (cfg, tmp_reg, klass, false_bb, true_bb);
4593 #ifndef DISABLE_REMOTING
4594 tmp_reg = alloc_preg (cfg);
4595 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4596 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4598 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4599 tmp_reg = alloc_preg (cfg);
4600 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4601 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4603 tmp_reg = alloc_preg (cfg);
4604 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4605 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4606 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4608 mini_emit_isninst_cast (cfg, klass_reg, klass, false2_bb, true_bb);
4609 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, false2_bb);
4611 MONO_START_BB (cfg, no_proxy_bb);
4613 mini_emit_isninst_cast (cfg, klass_reg, klass, false_bb, true_bb);
4615 g_error ("transparent proxy support is disabled while trying to JIT code that uses it");
4619 MONO_START_BB (cfg, false_bb);
4621 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4622 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4624 #ifndef DISABLE_REMOTING
4625 MONO_START_BB (cfg, false2_bb);
4627 MONO_EMIT_NEW_ICONST (cfg, dreg, 2);
4628 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4631 MONO_START_BB (cfg, true_bb);
4633 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4635 MONO_START_BB (cfg, end_bb);
4638 MONO_INST_NEW (cfg, ins, OP_ICONST);
4640 ins->type = STACK_I4;
4646 handle_ccastclass (MonoCompile *cfg, MonoClass *klass, MonoInst *src)
4648 /* This opcode takes as input an object reference and a class, and returns:
4649 0) if the object is an instance of the class,
4650 1) if the object is a proxy whose type cannot be determined
4651 an InvalidCastException exception is thrown otherwhise*/
4654 #ifndef DISABLE_REMOTING
4655 MonoBasicBlock *end_bb, *ok_result_bb, *no_proxy_bb, *interface_fail_bb, *fail_1_bb;
4657 MonoBasicBlock *ok_result_bb;
4659 int obj_reg = src->dreg;
4660 int dreg = alloc_ireg (cfg);
4661 int tmp_reg = alloc_preg (cfg);
4663 #ifndef DISABLE_REMOTING
4664 int klass_reg = alloc_preg (cfg);
4665 NEW_BBLOCK (cfg, end_bb);
4668 NEW_BBLOCK (cfg, ok_result_bb);
4670 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, obj_reg, 0);
4671 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, ok_result_bb);
4673 save_cast_details (cfg, klass, obj_reg, FALSE, NULL);
4675 if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
4676 #ifndef DISABLE_REMOTING
4677 NEW_BBLOCK (cfg, interface_fail_bb);
4679 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4680 mini_emit_iface_cast (cfg, tmp_reg, klass, interface_fail_bb, ok_result_bb);
4681 MONO_START_BB (cfg, interface_fail_bb);
4682 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4684 mini_emit_class_check (cfg, klass_reg, mono_defaults.transparent_proxy_class);
4686 tmp_reg = alloc_preg (cfg);
4687 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4688 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4689 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "InvalidCastException");
4691 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4692 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4694 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4695 mini_emit_iface_cast (cfg, tmp_reg, klass, NULL, NULL);
4696 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, ok_result_bb);
4699 #ifndef DISABLE_REMOTING
4700 NEW_BBLOCK (cfg, no_proxy_bb);
4702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
4703 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoVTable, klass));
4704 mini_emit_class_check_branch (cfg, klass_reg, mono_defaults.transparent_proxy_class, OP_PBNE_UN, no_proxy_bb);
4706 tmp_reg = alloc_preg (cfg);
4707 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, remote_class));
4708 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, tmp_reg, MONO_STRUCT_OFFSET (MonoRemoteClass, proxy_class));
4710 tmp_reg = alloc_preg (cfg);
4711 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, tmp_reg, obj_reg, MONO_STRUCT_OFFSET (MonoTransparentProxy, custom_type_info));
4712 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, tmp_reg, 0);
4713 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, no_proxy_bb);
4715 NEW_BBLOCK (cfg, fail_1_bb);
4717 mini_emit_isninst_cast (cfg, klass_reg, klass, fail_1_bb, ok_result_bb);
4719 MONO_START_BB (cfg, fail_1_bb);
4721 MONO_EMIT_NEW_ICONST (cfg, dreg, 1);
4722 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
4724 MONO_START_BB (cfg, no_proxy_bb);
4726 mini_emit_castclass (cfg, obj_reg, klass_reg, klass, ok_result_bb);
4728 g_error ("Transparent proxy support is disabled while trying to JIT code that uses it");
4732 MONO_START_BB (cfg, ok_result_bb);
4734 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
4736 #ifndef DISABLE_REMOTING
4737 MONO_START_BB (cfg, end_bb);
4741 MONO_INST_NEW (cfg, ins, OP_ICONST);
4743 ins->type = STACK_I4;
4748 static G_GNUC_UNUSED MonoInst*
4749 handle_enum_has_flag (MonoCompile *cfg, MonoClass *klass, MonoInst *enum_this, MonoInst *enum_flag)
4751 MonoType *enum_type = mono_type_get_underlying_type (&klass->byval_arg);
4752 guint32 load_opc = mono_type_to_load_membase (cfg, enum_type);
4755 switch (enum_type->type) {
4758 #if SIZEOF_REGISTER == 8
4770 MonoInst *load, *and, *cmp, *ceq;
4771 int enum_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4772 int and_reg = is_i4 ? alloc_ireg (cfg) : alloc_lreg (cfg);
4773 int dest_reg = alloc_ireg (cfg);
4775 EMIT_NEW_LOAD_MEMBASE (cfg, load, load_opc, enum_reg, enum_this->dreg, 0);
4776 EMIT_NEW_BIALU (cfg, and, is_i4 ? OP_IAND : OP_LAND, and_reg, enum_reg, enum_flag->dreg);
4777 EMIT_NEW_BIALU (cfg, cmp, is_i4 ? OP_ICOMPARE : OP_LCOMPARE, -1, and_reg, enum_flag->dreg);
4778 EMIT_NEW_UNALU (cfg, ceq, is_i4 ? OP_ICEQ : OP_LCEQ, dest_reg, -1);
4780 ceq->type = STACK_I4;
4783 load = mono_decompose_opcode (cfg, load, NULL);
4784 and = mono_decompose_opcode (cfg, and, NULL);
4785 cmp = mono_decompose_opcode (cfg, cmp, NULL);
4786 ceq = mono_decompose_opcode (cfg, ceq, NULL);
4794 * Returns NULL and set the cfg exception on error.
4796 static G_GNUC_UNUSED MonoInst*
4797 handle_delegate_ctor (MonoCompile *cfg, MonoClass *klass, MonoInst *target, MonoMethod *method, int context_used, gboolean virtual)
4801 gpointer trampoline;
4802 MonoInst *obj, *method_ins, *tramp_ins;
4806 // FIXME reenable optimisation for virtual case
4811 MonoMethod *invoke = mono_get_delegate_invoke (klass);
4814 if (!mono_get_delegate_virtual_invoke_impl (mono_method_signature (invoke), context_used ? NULL : method))
4818 obj = handle_alloc (cfg, klass, FALSE, 0);
4822 /* Inline the contents of mono_delegate_ctor */
4824 /* Set target field */
4825 /* Optimize away setting of NULL target */
4826 if (!(target->opcode == OP_PCONST && target->inst_p0 == 0)) {
4827 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target), target->dreg);
4828 if (cfg->gen_write_barriers) {
4829 dreg = alloc_preg (cfg);
4830 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, target));
4831 emit_write_barrier (cfg, ptr, target);
4835 /* Set method field */
4836 method_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD);
4837 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method), method_ins->dreg);
4840 * To avoid looking up the compiled code belonging to the target method
4841 * in mono_delegate_trampoline (), we allocate a per-domain memory slot to
4842 * store it, and we fill it after the method has been compiled.
4844 if (!method->dynamic && !(cfg->opt & MONO_OPT_SHARED)) {
4845 MonoInst *code_slot_ins;
4848 code_slot_ins = emit_get_rgctx_method (cfg, context_used, method, MONO_RGCTX_INFO_METHOD_DELEGATE_CODE);
4850 domain = mono_domain_get ();
4851 mono_domain_lock (domain);
4852 if (!domain_jit_info (domain)->method_code_hash)
4853 domain_jit_info (domain)->method_code_hash = g_hash_table_new (NULL, NULL);
4854 code_slot = g_hash_table_lookup (domain_jit_info (domain)->method_code_hash, method);
4856 code_slot = mono_domain_alloc0 (domain, sizeof (gpointer));
4857 g_hash_table_insert (domain_jit_info (domain)->method_code_hash, method, code_slot);
4859 mono_domain_unlock (domain);
4861 if (cfg->compile_aot)
4862 EMIT_NEW_AOTCONST (cfg, code_slot_ins, MONO_PATCH_INFO_METHOD_CODE_SLOT, method);
4864 EMIT_NEW_PCONST (cfg, code_slot_ins, code_slot);
4866 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_code), code_slot_ins->dreg);
4869 if (cfg->compile_aot) {
4870 MonoDelegateClassMethodPair *del_tramp;
4872 del_tramp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoDelegateClassMethodPair));
4873 del_tramp->klass = klass;
4874 del_tramp->method = context_used ? NULL : method;
4875 del_tramp->virtual = virtual;
4876 EMIT_NEW_AOTCONST (cfg, tramp_ins, MONO_PATCH_INFO_DELEGATE_TRAMPOLINE, del_tramp);
4879 trampoline = mono_create_delegate_virtual_trampoline (cfg->domain, klass, context_used ? NULL : method);
4881 trampoline = mono_create_delegate_trampoline_info (cfg->domain, klass, context_used ? NULL : method);
4882 EMIT_NEW_PCONST (cfg, tramp_ins, trampoline);
4885 /* Set invoke_impl field */
4887 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), tramp_ins->dreg);
4889 dreg = alloc_preg (cfg);
4890 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, invoke_impl));
4891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, invoke_impl), dreg);
4893 dreg = alloc_preg (cfg);
4894 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, tramp_ins->dreg, MONO_STRUCT_OFFSET (MonoDelegateTrampInfo, method_ptr));
4895 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, obj->dreg, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr), dreg);
4898 /* All the checks which are in mono_delegate_ctor () are done by the delegate trampoline */
4904 handle_array_new (MonoCompile *cfg, int rank, MonoInst **sp, unsigned char *ip)
4906 MonoJitICallInfo *info;
4908 /* Need to register the icall so it gets an icall wrapper */
4909 info = mono_get_array_new_va_icall (rank);
4911 cfg->flags |= MONO_CFG_HAS_VARARGS;
4913 /* mono_array_new_va () needs a vararg calling convention */
4914 cfg->disable_llvm = TRUE;
4916 /* FIXME: This uses info->sig, but it should use the signature of the wrapper */
4917 return mono_emit_native_call (cfg, mono_icall_get_wrapper (info), info->sig, sp);
4921 * handle_constrained_gsharedvt_call:
4923 * Handle constrained calls where the receiver is a gsharedvt type.
4924 * Return the instruction representing the call. Set the cfg exception on failure.
4927 handle_constrained_gsharedvt_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp, MonoClass *constrained_class,
4928 gboolean *ref_emit_widen, MonoBasicBlock **ref_bblock)
4930 MonoInst *ins = NULL;
4931 MonoBasicBlock *bblock = *ref_bblock;
4932 gboolean emit_widen = *ref_emit_widen;
4935 * Constrained calls need to behave differently at runtime dependending on whenever the receiver is instantiated as ref type or as a vtype.
4936 * This is hard to do with the current call code, since we would have to emit a branch and two different calls. So instead, we
4937 * pack the arguments into an array, and do the rest of the work in in an icall.
4939 if (((cmethod->klass == mono_defaults.object_class) || (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) || (!cmethod->klass->valuetype && cmethod->klass->image != mono_defaults.corlib)) &&
4940 (MONO_TYPE_IS_VOID (fsig->ret) || MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_IS_REFERENCE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret) || mini_is_gsharedvt_type (cfg, fsig->ret)) &&
4941 (fsig->param_count == 0 || (!fsig->hasthis && fsig->param_count == 1) || (fsig->param_count == 1 && (MONO_TYPE_IS_REFERENCE (fsig->params [0]) || fsig->params [0]->byref || mini_is_gsharedvt_type (cfg, fsig->params [0]))))) {
4942 MonoInst *args [16];
4945 * This case handles calls to
4946 * - object:ToString()/Equals()/GetHashCode(),
4947 * - System.IComparable<T>:CompareTo()
4948 * - System.IEquatable<T>:Equals ()
4949 * plus some simple interface calls enough to support AsyncTaskMethodBuilder.
4953 if (mono_method_check_context_used (cmethod))
4954 args [1] = emit_get_rgctx_method (cfg, mono_method_check_context_used (cmethod), cmethod, MONO_RGCTX_INFO_METHOD);
4956 EMIT_NEW_METHODCONST (cfg, args [1], cmethod);
4957 args [2] = emit_get_rgctx_klass (cfg, mono_class_check_context_used (constrained_class), constrained_class, MONO_RGCTX_INFO_KLASS);
4959 /* !fsig->hasthis is for the wrapper for the Object.GetType () icall */
4960 if (fsig->hasthis && fsig->param_count) {
4961 /* Pass the arguments using a localloc-ed array using the format expected by runtime_invoke () */
4962 MONO_INST_NEW (cfg, ins, OP_LOCALLOC_IMM);
4963 ins->dreg = alloc_preg (cfg);
4964 ins->inst_imm = fsig->param_count * sizeof (mgreg_t);
4965 MONO_ADD_INS (cfg->cbb, ins);
4968 if (mini_is_gsharedvt_type (cfg, fsig->params [0])) {
4971 args [3] = emit_get_gsharedvt_info_klass (cfg, mono_class_from_mono_type (fsig->params [0]), MONO_RGCTX_INFO_CLASS_BOX_TYPE);
4973 EMIT_NEW_VARLOADA_VREG (cfg, ins, sp [1]->dreg, fsig->params [0]);
4974 addr_reg = ins->dreg;
4975 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, addr_reg);
4977 EMIT_NEW_ICONST (cfg, args [3], 0);
4978 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STORE_MEMBASE_REG, args [4]->dreg, 0, sp [1]->dreg);
4981 EMIT_NEW_ICONST (cfg, args [3], 0);
4982 EMIT_NEW_ICONST (cfg, args [4], 0);
4984 ins = mono_emit_jit_icall (cfg, mono_gsharedvt_constrained_call, args);
4987 if (mini_is_gsharedvt_type (cfg, fsig->ret)) {
4988 ins = handle_unbox_gsharedvt (cfg, mono_class_from_mono_type (fsig->ret), ins, &bblock);
4989 } else if (MONO_TYPE_IS_PRIMITIVE (fsig->ret) || MONO_TYPE_ISSTRUCT (fsig->ret)) {
4993 NEW_BIALU_IMM (cfg, add, OP_ADD_IMM, alloc_dreg (cfg, STACK_MP), ins->dreg, sizeof (MonoObject));
4994 MONO_ADD_INS (cfg->cbb, add);
4996 NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, add->dreg, 0);
4997 MONO_ADD_INS (cfg->cbb, ins);
4998 /* ins represents the call result */
5001 GSHAREDVT_FAILURE (CEE_CALLVIRT);
5004 *ref_emit_widen = emit_widen;
5005 *ref_bblock = bblock;
5014 mono_emit_load_got_addr (MonoCompile *cfg)
5016 MonoInst *getaddr, *dummy_use;
5018 if (!cfg->got_var || cfg->got_var_allocated)
5021 MONO_INST_NEW (cfg, getaddr, OP_LOAD_GOTADDR);
5022 getaddr->cil_code = cfg->header->code;
5023 getaddr->dreg = cfg->got_var->dreg;
5025 /* Add it to the start of the first bblock */
5026 if (cfg->bb_entry->code) {
5027 getaddr->next = cfg->bb_entry->code;
5028 cfg->bb_entry->code = getaddr;
5031 MONO_ADD_INS (cfg->bb_entry, getaddr);
5033 cfg->got_var_allocated = TRUE;
5036 * Add a dummy use to keep the got_var alive, since real uses might
5037 * only be generated by the back ends.
5038 * Add it to end_bblock, so the variable's lifetime covers the whole
5040 * It would be better to make the usage of the got var explicit in all
5041 * cases when the backend needs it (i.e. calls, throw etc.), so this
5042 * wouldn't be needed.
5044 NEW_DUMMY_USE (cfg, dummy_use, cfg->got_var);
5045 MONO_ADD_INS (cfg->bb_exit, dummy_use);
5048 static int inline_limit;
5049 static gboolean inline_limit_inited;
5052 mono_method_check_inlining (MonoCompile *cfg, MonoMethod *method)
5054 MonoMethodHeaderSummary header;
5056 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5057 MonoMethodSignature *sig = mono_method_signature (method);
5061 if (cfg->disable_inline)
5063 if (cfg->generic_sharing_context)
5066 if (cfg->inline_depth > 10)
5069 #ifdef MONO_ARCH_HAVE_LMF_OPS
5070 if (((method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
5071 (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) &&
5072 !MONO_TYPE_ISSTRUCT (signature->ret) && !mini_class_is_system_array (method->klass))
5077 if (!mono_method_get_header_summary (method, &header))
5080 /*runtime, icall and pinvoke are checked by summary call*/
5081 if ((method->iflags & METHOD_IMPL_ATTRIBUTE_NOINLINING) ||
5082 (method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) ||
5083 (mono_class_is_marshalbyref (method->klass)) ||
5087 /* also consider num_locals? */
5088 /* Do the size check early to avoid creating vtables */
5089 if (!inline_limit_inited) {
5090 if (g_getenv ("MONO_INLINELIMIT"))
5091 inline_limit = atoi (g_getenv ("MONO_INLINELIMIT"));
5093 inline_limit = INLINE_LENGTH_LIMIT;
5094 inline_limit_inited = TRUE;
5096 if (header.code_size >= inline_limit && !(method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING))
5100 * if we can initialize the class of the method right away, we do,
5101 * otherwise we don't allow inlining if the class needs initialization,
5102 * since it would mean inserting a call to mono_runtime_class_init()
5103 * inside the inlined code
5105 if (!(cfg->opt & MONO_OPT_SHARED)) {
5106 /* The AggressiveInlining hint is a good excuse to force that cctor to run. */
5107 if (method->iflags & METHOD_IMPL_ATTRIBUTE_AGGRESSIVE_INLINING) {
5108 vtable = mono_class_vtable (cfg->domain, method->klass);
5111 if (!cfg->compile_aot)
5112 mono_runtime_class_init (vtable);
5113 } else if (method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5114 if (cfg->run_cctors && method->klass->has_cctor) {
5115 /*FIXME it would easier and lazier to just use mono_class_try_get_vtable */
5116 if (!method->klass->runtime_info)
5117 /* No vtable created yet */
5119 vtable = mono_class_vtable (cfg->domain, method->klass);
5122 /* This makes so that inline cannot trigger */
5123 /* .cctors: too many apps depend on them */
5124 /* running with a specific order... */
5125 if (! vtable->initialized)
5127 mono_runtime_class_init (vtable);
5129 } else if (mono_class_needs_cctor_run (method->klass, NULL)) {
5130 if (!method->klass->runtime_info)
5131 /* No vtable created yet */
5133 vtable = mono_class_vtable (cfg->domain, method->klass);
5136 if (!vtable->initialized)
5141 * If we're compiling for shared code
5142 * the cctor will need to be run at aot method load time, for example,
5143 * or at the end of the compilation of the inlining method.
5145 if (mono_class_needs_cctor_run (method->klass, NULL) && !((method->klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT)))
5150 * CAS - do not inline methods with declarative security
5151 * Note: this has to be before any possible return TRUE;
5153 if (mono_security_method_has_declsec (method))
5156 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
5157 if (mono_arch_is_soft_float ()) {
5159 if (sig->ret && sig->ret->type == MONO_TYPE_R4)
5161 for (i = 0; i < sig->param_count; ++i)
5162 if (!sig->params [i]->byref && sig->params [i]->type == MONO_TYPE_R4)
5167 if (g_list_find (cfg->dont_inline, method))
5174 mini_field_access_needs_cctor_run (MonoCompile *cfg, MonoMethod *method, MonoClass *klass, MonoVTable *vtable)
5176 if (!cfg->compile_aot) {
5178 if (vtable->initialized)
5182 if (klass->flags & TYPE_ATTRIBUTE_BEFORE_FIELD_INIT) {
5183 if (cfg->method == method)
5187 if (!mono_class_needs_cctor_run (klass, method))
5190 if (! (method->flags & METHOD_ATTRIBUTE_STATIC) && (klass == method->klass))
5191 /* The initialization is already done before the method is called */
5198 mini_emit_ldelema_1_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index, gboolean bcheck)
5202 int mult_reg, add_reg, array_reg, index_reg, index2_reg;
5205 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5208 mono_class_init (klass);
5209 size = mono_class_array_element_size (klass);
5212 mult_reg = alloc_preg (cfg);
5213 array_reg = arr->dreg;
5214 index_reg = index->dreg;
5216 #if SIZEOF_REGISTER == 8
5217 /* The array reg is 64 bits but the index reg is only 32 */
5218 if (COMPILE_LLVM (cfg)) {
5220 index2_reg = index_reg;
5222 index2_reg = alloc_preg (cfg);
5223 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index2_reg, index_reg);
5226 if (index->type == STACK_I8) {
5227 index2_reg = alloc_preg (cfg);
5228 MONO_EMIT_NEW_UNALU (cfg, OP_LCONV_TO_I4, index2_reg, index_reg);
5230 index2_reg = index_reg;
5235 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index2_reg);
5237 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5238 if (size == 1 || size == 2 || size == 4 || size == 8) {
5239 static const int fast_log2 [] = { 1, 0, 1, -1, 2, -1, -1, -1, 3 };
5241 EMIT_NEW_X86_LEA (cfg, ins, array_reg, index2_reg, fast_log2 [size], MONO_STRUCT_OFFSET (MonoArray, vector));
5242 ins->klass = mono_class_get_element_class (klass);
5243 ins->type = STACK_MP;
5249 add_reg = alloc_ireg_mp (cfg);
5252 MonoInst *rgctx_ins;
5255 g_assert (cfg->generic_sharing_context);
5256 context_used = mini_class_check_context_used (cfg, klass);
5257 g_assert (context_used);
5258 rgctx_ins = emit_get_gsharedvt_info (cfg, &klass->byval_arg, MONO_RGCTX_INFO_ARRAY_ELEMENT_SIZE);
5259 MONO_EMIT_NEW_BIALU (cfg, OP_IMUL, mult_reg, index2_reg, rgctx_ins->dreg);
5261 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_MUL_IMM, mult_reg, index2_reg, size);
5263 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, array_reg, mult_reg);
5264 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5265 ins->klass = mono_class_get_element_class (klass);
5266 ins->type = STACK_MP;
5267 MONO_ADD_INS (cfg->cbb, ins);
5272 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5274 mini_emit_ldelema_2_ins (MonoCompile *cfg, MonoClass *klass, MonoInst *arr, MonoInst *index_ins1, MonoInst *index_ins2)
5276 int bounds_reg = alloc_preg (cfg);
5277 int add_reg = alloc_ireg_mp (cfg);
5278 int mult_reg = alloc_preg (cfg);
5279 int mult2_reg = alloc_preg (cfg);
5280 int low1_reg = alloc_preg (cfg);
5281 int low2_reg = alloc_preg (cfg);
5282 int high1_reg = alloc_preg (cfg);
5283 int high2_reg = alloc_preg (cfg);
5284 int realidx1_reg = alloc_preg (cfg);
5285 int realidx2_reg = alloc_preg (cfg);
5286 int sum_reg = alloc_preg (cfg);
5287 int index1, index2, tmpreg;
5291 mono_class_init (klass);
5292 size = mono_class_array_element_size (klass);
5294 index1 = index_ins1->dreg;
5295 index2 = index_ins2->dreg;
5297 #if SIZEOF_REGISTER == 8
5298 /* The array reg is 64 bits but the index reg is only 32 */
5299 if (COMPILE_LLVM (cfg)) {
5302 tmpreg = alloc_preg (cfg);
5303 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index1);
5305 tmpreg = alloc_preg (cfg);
5306 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, tmpreg, index2);
5310 // FIXME: Do we need to do something here for i8 indexes, like in ldelema_1_ins ?
5314 /* range checking */
5315 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, bounds_reg,
5316 arr->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5318 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low1_reg,
5319 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5320 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx1_reg, index1, low1_reg);
5321 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high1_reg,
5322 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5323 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high1_reg, realidx1_reg);
5324 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5326 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, low2_reg,
5327 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5328 MONO_EMIT_NEW_BIALU (cfg, OP_PSUB, realidx2_reg, index2, low2_reg);
5329 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, high2_reg,
5330 bounds_reg, sizeof (MonoArrayBounds) + MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5331 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, high2_reg, realidx2_reg);
5332 MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException");
5334 MONO_EMIT_NEW_BIALU (cfg, OP_PMUL, mult_reg, high2_reg, realidx1_reg);
5335 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, mult_reg, realidx2_reg);
5336 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_PMUL_IMM, mult2_reg, sum_reg, size);
5337 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult2_reg, arr->dreg);
5338 NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, add_reg, add_reg, MONO_STRUCT_OFFSET (MonoArray, vector));
5340 ins->type = STACK_MP;
5342 MONO_ADD_INS (cfg->cbb, ins);
5349 mini_emit_ldelema_ins (MonoCompile *cfg, MonoMethod *cmethod, MonoInst **sp, unsigned char *ip, gboolean is_set)
5353 MonoMethod *addr_method;
5355 MonoClass *eclass = cmethod->klass->element_class;
5357 rank = mono_method_signature (cmethod)->param_count - (is_set? 1: 0);
5360 return mini_emit_ldelema_1_ins (cfg, eclass, sp [0], sp [1], TRUE);
5362 #ifndef MONO_ARCH_EMULATE_MUL_DIV
5363 /* emit_ldelema_2 depends on OP_LMUL */
5364 if (rank == 2 && (cfg->opt & MONO_OPT_INTRINS) && !mini_is_gsharedvt_variable_klass (cfg, eclass)) {
5365 return mini_emit_ldelema_2_ins (cfg, eclass, sp [0], sp [1], sp [2]);
5369 if (mini_is_gsharedvt_variable_klass (cfg, eclass))
5372 element_size = mono_class_array_element_size (eclass);
5373 addr_method = mono_marshal_get_array_address (rank, element_size);
5374 addr = mono_emit_method_call (cfg, addr_method, sp, NULL);
5379 static MonoBreakPolicy
5380 always_insert_breakpoint (MonoMethod *method)
5382 return MONO_BREAK_POLICY_ALWAYS;
5385 static MonoBreakPolicyFunc break_policy_func = always_insert_breakpoint;
5388 * mono_set_break_policy:
5389 * policy_callback: the new callback function
5391 * Allow embedders to decide wherther to actually obey breakpoint instructions
5392 * (both break IL instructions and Debugger.Break () method calls), for example
5393 * to not allow an app to be aborted by a perfectly valid IL opcode when executing
5394 * untrusted or semi-trusted code.
5396 * @policy_callback will be called every time a break point instruction needs to
5397 * be inserted with the method argument being the method that calls Debugger.Break()
5398 * or has the IL break instruction. The callback should return #MONO_BREAK_POLICY_NEVER
5399 * if it wants the breakpoint to not be effective in the given method.
5400 * #MONO_BREAK_POLICY_ALWAYS is the default.
5403 mono_set_break_policy (MonoBreakPolicyFunc policy_callback)
5405 if (policy_callback)
5406 break_policy_func = policy_callback;
5408 break_policy_func = always_insert_breakpoint;
5412 should_insert_brekpoint (MonoMethod *method) {
5413 switch (break_policy_func (method)) {
5414 case MONO_BREAK_POLICY_ALWAYS:
5416 case MONO_BREAK_POLICY_NEVER:
5418 case MONO_BREAK_POLICY_ON_DBG:
5419 g_warning ("mdb no longer supported");
5422 g_warning ("Incorrect value returned from break policy callback");
5427 /* optimize the simple GetGenericValueImpl/SetGenericValueImpl generic icalls */
5429 emit_array_generic_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5431 MonoInst *addr, *store, *load;
5432 MonoClass *eklass = mono_class_from_mono_type (fsig->params [2]);
5434 /* the bounds check is already done by the callers */
5435 addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5437 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, args [2]->dreg, 0);
5438 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, addr->dreg, 0, load->dreg);
5439 if (mini_type_is_reference (cfg, fsig->params [2]))
5440 emit_write_barrier (cfg, addr, load);
5442 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, &eklass->byval_arg, addr->dreg, 0);
5443 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, &eklass->byval_arg, args [2]->dreg, 0, load->dreg);
5450 generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
5452 return mini_type_is_reference (cfg, &klass->byval_arg);
5456 emit_array_store (MonoCompile *cfg, MonoClass *klass, MonoInst **sp, gboolean safety_checks)
5458 if (safety_checks && generic_class_is_reference_type (cfg, klass) &&
5459 !(sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL)) {
5460 MonoClass *obj_array = mono_array_class_get_cached (mono_defaults.object_class, 1);
5461 MonoMethod *helper = mono_marshal_get_virtual_stelemref (obj_array);
5462 MonoInst *iargs [3];
5465 mono_class_setup_vtable (obj_array);
5466 g_assert (helper->slot);
5468 if (sp [0]->type != STACK_OBJ)
5470 if (sp [2]->type != STACK_OBJ)
5477 return mono_emit_method_call (cfg, helper, iargs, sp [0]);
5481 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
5484 // FIXME-VT: OP_ICONST optimization
5485 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
5486 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5487 ins->opcode = OP_STOREV_MEMBASE;
5488 } else if (sp [1]->opcode == OP_ICONST) {
5489 int array_reg = sp [0]->dreg;
5490 int index_reg = sp [1]->dreg;
5491 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
5494 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
5495 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset, sp [2]->dreg);
5497 MonoInst *addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], safety_checks);
5498 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0, sp [2]->dreg);
5499 if (generic_class_is_reference_type (cfg, klass))
5500 emit_write_barrier (cfg, addr, sp [2]);
5507 emit_array_unsafe_access (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args, int is_set)
5512 eklass = mono_class_from_mono_type (fsig->params [2]);
5514 eklass = mono_class_from_mono_type (fsig->ret);
5517 return emit_array_store (cfg, eklass, args, FALSE);
5519 MonoInst *ins, *addr = mini_emit_ldelema_1_ins (cfg, eklass, args [0], args [1], FALSE);
5520 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &eklass->byval_arg, addr->dreg, 0);
5526 is_unsafe_mov_compatible (MonoClass *param_klass, MonoClass *return_klass)
5530 //Only allow for valuetypes
5531 if (!param_klass->valuetype || !return_klass->valuetype)
5535 if (param_klass->has_references || return_klass->has_references)
5538 /* Avoid mixing structs and primitive types/enums, they need to be handled differently in the JIT */
5539 if ((MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && !MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)) ||
5540 (!MONO_TYPE_ISSTRUCT (¶m_klass->byval_arg) && MONO_TYPE_ISSTRUCT (&return_klass->byval_arg)))
5543 if (param_klass->byval_arg.type == MONO_TYPE_R4 || param_klass->byval_arg.type == MONO_TYPE_R8 ||
5544 return_klass->byval_arg.type == MONO_TYPE_R4 || return_klass->byval_arg.type == MONO_TYPE_R8)
5547 //And have the same size
5548 if (mono_class_value_size (param_klass, &align) != mono_class_value_size (return_klass, &align))
5554 emit_array_unsafe_mov (MonoCompile *cfg, MonoMethodSignature *fsig, MonoInst **args)
5556 MonoClass *param_klass = mono_class_from_mono_type (fsig->params [0]);
5557 MonoClass *return_klass = mono_class_from_mono_type (fsig->ret);
5559 //Valuetypes that are semantically equivalent
5560 if (is_unsafe_mov_compatible (param_klass, return_klass))
5563 //Arrays of valuetypes that are semantically equivalent
5564 if (param_klass->rank == 1 && return_klass->rank == 1 && is_unsafe_mov_compatible (param_klass->element_class, return_klass->element_class))
5571 mini_emit_inst_for_ctor (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5573 #ifdef MONO_ARCH_SIMD_INTRINSICS
5574 MonoInst *ins = NULL;
5576 if (cfg->opt & MONO_OPT_SIMD) {
5577 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
5583 return mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
5587 emit_memory_barrier (MonoCompile *cfg, int kind)
5589 MonoInst *ins = NULL;
5590 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
5591 MONO_ADD_INS (cfg->cbb, ins);
5592 ins->backend.memory_barrier_kind = kind;
5598 llvm_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5600 MonoInst *ins = NULL;
5603 /* The LLVM backend supports these intrinsics */
5604 if (cmethod->klass == mono_defaults.math_class) {
5605 if (strcmp (cmethod->name, "Sin") == 0) {
5607 } else if (strcmp (cmethod->name, "Cos") == 0) {
5609 } else if (strcmp (cmethod->name, "Sqrt") == 0) {
5611 } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
5615 if (opcode && fsig->param_count == 1) {
5616 MONO_INST_NEW (cfg, ins, opcode);
5617 ins->type = STACK_R8;
5618 ins->dreg = mono_alloc_freg (cfg);
5619 ins->sreg1 = args [0]->dreg;
5620 MONO_ADD_INS (cfg->cbb, ins);
5624 if (cfg->opt & MONO_OPT_CMOV) {
5625 if (strcmp (cmethod->name, "Min") == 0) {
5626 if (fsig->params [0]->type == MONO_TYPE_I4)
5628 if (fsig->params [0]->type == MONO_TYPE_U4)
5629 opcode = OP_IMIN_UN;
5630 else if (fsig->params [0]->type == MONO_TYPE_I8)
5632 else if (fsig->params [0]->type == MONO_TYPE_U8)
5633 opcode = OP_LMIN_UN;
5634 } else if (strcmp (cmethod->name, "Max") == 0) {
5635 if (fsig->params [0]->type == MONO_TYPE_I4)
5637 if (fsig->params [0]->type == MONO_TYPE_U4)
5638 opcode = OP_IMAX_UN;
5639 else if (fsig->params [0]->type == MONO_TYPE_I8)
5641 else if (fsig->params [0]->type == MONO_TYPE_U8)
5642 opcode = OP_LMAX_UN;
5646 if (opcode && fsig->param_count == 2) {
5647 MONO_INST_NEW (cfg, ins, opcode);
5648 ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
5649 ins->dreg = mono_alloc_ireg (cfg);
5650 ins->sreg1 = args [0]->dreg;
5651 ins->sreg2 = args [1]->dreg;
5652 MONO_ADD_INS (cfg->cbb, ins);
5660 mini_emit_inst_for_sharable_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5662 if (cmethod->klass == mono_defaults.array_class) {
5663 if (strcmp (cmethod->name, "UnsafeStore") == 0)
5664 return emit_array_unsafe_access (cfg, fsig, args, TRUE);
5665 else if (strcmp (cmethod->name, "UnsafeLoad") == 0)
5666 return emit_array_unsafe_access (cfg, fsig, args, FALSE);
5667 else if (strcmp (cmethod->name, "UnsafeMov") == 0)
5668 return emit_array_unsafe_mov (cfg, fsig, args);
5675 mini_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5677 MonoInst *ins = NULL;
5679 static MonoClass *runtime_helpers_class = NULL;
5680 if (! runtime_helpers_class)
5681 runtime_helpers_class = mono_class_from_name (mono_defaults.corlib,
5682 "System.Runtime.CompilerServices", "RuntimeHelpers");
5684 if (cmethod->klass == mono_defaults.string_class) {
5685 if (strcmp (cmethod->name, "get_Chars") == 0 && fsig->param_count == 2) {
5686 int dreg = alloc_ireg (cfg);
5687 int index_reg = alloc_preg (cfg);
5688 int add_reg = alloc_preg (cfg);
5690 #if SIZEOF_REGISTER == 8
5691 /* The array reg is 64 bits but the index reg is only 32 */
5692 MONO_EMIT_NEW_UNALU (cfg, OP_SEXT_I4, index_reg, args [1]->dreg);
5694 index_reg = args [1]->dreg;
5696 MONO_EMIT_BOUNDS_CHECK (cfg, args [0]->dreg, MonoString, length, index_reg);
5698 #if defined(TARGET_X86) || defined(TARGET_AMD64)
5699 EMIT_NEW_X86_LEA (cfg, ins, args [0]->dreg, index_reg, 1, MONO_STRUCT_OFFSET (MonoString, chars));
5700 add_reg = ins->dreg;
5701 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5704 int mult_reg = alloc_preg (cfg);
5705 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, index_reg, 1);
5706 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5707 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU2_MEMBASE, dreg,
5708 add_reg, MONO_STRUCT_OFFSET (MonoString, chars));
5710 type_from_op (cfg, ins, NULL, NULL);
5712 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5713 int dreg = alloc_ireg (cfg);
5714 /* Decompose later to allow more optimizations */
5715 EMIT_NEW_UNALU (cfg, ins, OP_STRLEN, dreg, args [0]->dreg);
5716 ins->type = STACK_I4;
5717 ins->flags |= MONO_INST_FAULT;
5718 cfg->cbb->has_array_access = TRUE;
5719 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
5722 } else if (strcmp (cmethod->name, "InternalSetChar") == 0 && fsig->param_count == 3) {
5723 int mult_reg = alloc_preg (cfg);
5724 int add_reg = alloc_preg (cfg);
5726 /* The corlib functions check for oob already. */
5727 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, mult_reg, args [1]->dreg, 1);
5728 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, add_reg, mult_reg, args [0]->dreg);
5729 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI2_MEMBASE_REG, add_reg, MONO_STRUCT_OFFSET (MonoString, chars), args [2]->dreg);
5730 return cfg->cbb->last_ins;
5733 } else if (cmethod->klass == mono_defaults.object_class) {
5735 if (strcmp (cmethod->name, "GetType") == 0 && fsig->param_count == 1) {
5736 int dreg = alloc_ireg_ref (cfg);
5737 int vt_reg = alloc_preg (cfg);
5738 MONO_EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, vt_reg, args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5739 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, vt_reg, MONO_STRUCT_OFFSET (MonoVTable, type));
5740 type_from_op (cfg, ins, NULL, NULL);
5743 #if !defined(MONO_ARCH_EMULATE_MUL_DIV)
5744 } else if (strcmp (cmethod->name, "InternalGetHashCode") == 0 && fsig->param_count == 1 && !mono_gc_is_moving ()) {
5745 int dreg = alloc_ireg (cfg);
5746 int t1 = alloc_ireg (cfg);
5748 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, t1, args [0]->dreg, 3);
5749 EMIT_NEW_BIALU_IMM (cfg, ins, OP_MUL_IMM, dreg, t1, 2654435761u);
5750 ins->type = STACK_I4;
5754 } else if (strcmp (cmethod->name, ".ctor") == 0 && fsig->param_count == 0) {
5755 MONO_INST_NEW (cfg, ins, OP_NOP);
5756 MONO_ADD_INS (cfg->cbb, ins);
5760 } else if (cmethod->klass == mono_defaults.array_class) {
5761 if (strcmp (cmethod->name, "GetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5762 return emit_array_generic_access (cfg, fsig, args, FALSE);
5763 else if (strcmp (cmethod->name, "SetGenericValueImpl") == 0 && fsig->param_count == 3 && !cfg->gsharedvt)
5764 return emit_array_generic_access (cfg, fsig, args, TRUE);
5766 #ifndef MONO_BIG_ARRAYS
5768 * This is an inline version of GetLength/GetLowerBound(0) used frequently in
5771 else if (((strcmp (cmethod->name, "GetLength") == 0 && fsig->param_count == 2) ||
5772 (strcmp (cmethod->name, "GetLowerBound") == 0 && fsig->param_count == 2)) &&
5773 args [1]->opcode == OP_ICONST && args [1]->inst_c0 == 0) {
5774 int dreg = alloc_ireg (cfg);
5775 int bounds_reg = alloc_ireg_mp (cfg);
5776 MonoBasicBlock *end_bb, *szarray_bb;
5777 gboolean get_length = strcmp (cmethod->name, "GetLength") == 0;
5779 NEW_BBLOCK (cfg, end_bb);
5780 NEW_BBLOCK (cfg, szarray_bb);
5782 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOAD_MEMBASE, bounds_reg,
5783 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, bounds));
5784 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, bounds_reg, 0);
5785 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBEQ, szarray_bb);
5786 /* Non-szarray case */
5788 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5789 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, length));
5791 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5792 bounds_reg, MONO_STRUCT_OFFSET (MonoArrayBounds, lower_bound));
5793 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
5794 MONO_START_BB (cfg, szarray_bb);
5797 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5798 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5800 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
5801 MONO_START_BB (cfg, end_bb);
5803 EMIT_NEW_UNALU (cfg, ins, OP_MOVE, dreg, dreg);
5804 ins->type = STACK_I4;
5810 if (cmethod->name [0] != 'g')
5813 if (strcmp (cmethod->name, "get_Rank") == 0 && fsig->param_count == 1) {
5814 int dreg = alloc_ireg (cfg);
5815 int vtable_reg = alloc_preg (cfg);
5816 MONO_EMIT_NEW_LOAD_MEMBASE_OP_FAULT (cfg, OP_LOAD_MEMBASE, vtable_reg,
5817 args [0]->dreg, MONO_STRUCT_OFFSET (MonoObject, vtable));
5818 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADU1_MEMBASE, dreg,
5819 vtable_reg, MONO_STRUCT_OFFSET (MonoVTable, rank));
5820 type_from_op (cfg, ins, NULL, NULL);
5823 } else if (strcmp (cmethod->name, "get_Length") == 0 && fsig->param_count == 1) {
5824 int dreg = alloc_ireg (cfg);
5826 EMIT_NEW_LOAD_MEMBASE_FAULT (cfg, ins, OP_LOADI4_MEMBASE, dreg,
5827 args [0]->dreg, MONO_STRUCT_OFFSET (MonoArray, max_length));
5828 type_from_op (cfg, ins, NULL, NULL);
5833 } else if (cmethod->klass == runtime_helpers_class) {
5835 if (strcmp (cmethod->name, "get_OffsetToStringData") == 0 && fsig->param_count == 0) {
5836 EMIT_NEW_ICONST (cfg, ins, MONO_STRUCT_OFFSET (MonoString, chars));
5840 } else if (cmethod->klass == mono_defaults.thread_class) {
5841 if (strcmp (cmethod->name, "SpinWait_nop") == 0 && fsig->param_count == 0) {
5842 MONO_INST_NEW (cfg, ins, OP_RELAXED_NOP);
5843 MONO_ADD_INS (cfg->cbb, ins);
5845 } else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0) {
5846 return emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
5847 } else if (!strcmp (cmethod->name, "VolatileRead") && fsig->param_count == 1) {
5849 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5851 if (fsig->params [0]->type == MONO_TYPE_I1)
5852 opcode = OP_LOADI1_MEMBASE;
5853 else if (fsig->params [0]->type == MONO_TYPE_U1)
5854 opcode = OP_LOADU1_MEMBASE;
5855 else if (fsig->params [0]->type == MONO_TYPE_I2)
5856 opcode = OP_LOADI2_MEMBASE;
5857 else if (fsig->params [0]->type == MONO_TYPE_U2)
5858 opcode = OP_LOADU2_MEMBASE;
5859 else if (fsig->params [0]->type == MONO_TYPE_I4)
5860 opcode = OP_LOADI4_MEMBASE;
5861 else if (fsig->params [0]->type == MONO_TYPE_U4)
5862 opcode = OP_LOADU4_MEMBASE;
5863 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5864 opcode = OP_LOADI8_MEMBASE;
5865 else if (fsig->params [0]->type == MONO_TYPE_R4)
5866 opcode = OP_LOADR4_MEMBASE;
5867 else if (fsig->params [0]->type == MONO_TYPE_R8)
5868 opcode = OP_LOADR8_MEMBASE;
5869 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5870 opcode = OP_LOAD_MEMBASE;
5873 MONO_INST_NEW (cfg, ins, opcode);
5874 ins->inst_basereg = args [0]->dreg;
5875 ins->inst_offset = 0;
5876 MONO_ADD_INS (cfg->cbb, ins);
5878 switch (fsig->params [0]->type) {
5885 ins->dreg = mono_alloc_ireg (cfg);
5886 ins->type = STACK_I4;
5890 ins->dreg = mono_alloc_lreg (cfg);
5891 ins->type = STACK_I8;
5895 ins->dreg = mono_alloc_ireg (cfg);
5896 #if SIZEOF_REGISTER == 8
5897 ins->type = STACK_I8;
5899 ins->type = STACK_I4;
5904 ins->dreg = mono_alloc_freg (cfg);
5905 ins->type = STACK_R8;
5908 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
5909 ins->dreg = mono_alloc_ireg_ref (cfg);
5910 ins->type = STACK_OBJ;
5914 if (opcode == OP_LOADI8_MEMBASE)
5915 ins = mono_decompose_opcode (cfg, ins, NULL);
5917 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
5921 } else if (!strcmp (cmethod->name, "VolatileWrite") && fsig->param_count == 2) {
5923 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
5925 if (fsig->params [0]->type == MONO_TYPE_I1 || fsig->params [0]->type == MONO_TYPE_U1)
5926 opcode = OP_STOREI1_MEMBASE_REG;
5927 else if (fsig->params [0]->type == MONO_TYPE_I2 || fsig->params [0]->type == MONO_TYPE_U2)
5928 opcode = OP_STOREI2_MEMBASE_REG;
5929 else if (fsig->params [0]->type == MONO_TYPE_I4 || fsig->params [0]->type == MONO_TYPE_U4)
5930 opcode = OP_STOREI4_MEMBASE_REG;
5931 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_U8)
5932 opcode = OP_STOREI8_MEMBASE_REG;
5933 else if (fsig->params [0]->type == MONO_TYPE_R4)
5934 opcode = OP_STORER4_MEMBASE_REG;
5935 else if (fsig->params [0]->type == MONO_TYPE_R8)
5936 opcode = OP_STORER8_MEMBASE_REG;
5937 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I || fsig->params [0]->type == MONO_TYPE_U)
5938 opcode = OP_STORE_MEMBASE_REG;
5941 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
5943 MONO_INST_NEW (cfg, ins, opcode);
5944 ins->sreg1 = args [1]->dreg;
5945 ins->inst_destbasereg = args [0]->dreg;
5946 ins->inst_offset = 0;
5947 MONO_ADD_INS (cfg->cbb, ins);
5949 if (opcode == OP_STOREI8_MEMBASE_REG)
5950 ins = mono_decompose_opcode (cfg, ins, NULL);
5955 } else if (cmethod->klass == mono_defaults.monitor_class) {
5956 #if defined(MONO_ARCH_MONITOR_OBJECT_REG)
5957 if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 1) {
5960 if (COMPILE_LLVM (cfg)) {
5962 * Pass the argument normally, the LLVM backend will handle the
5963 * calling convention problems.
5965 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5967 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER,
5968 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5969 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
5970 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5973 return (MonoInst*)call;
5974 #if defined(MONO_ARCH_MONITOR_LOCK_TAKEN_REG)
5975 } else if (strcmp (cmethod->name, "Enter") == 0 && fsig->param_count == 2) {
5978 if (COMPILE_LLVM (cfg)) {
5980 * Pass the argument normally, the LLVM backend will handle the
5981 * calling convention problems.
5983 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4, NULL, helper_sig_monitor_enter_v4_trampoline_llvm, args);
5985 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_ENTER_V4,
5986 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
5987 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg, MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
5988 mono_call_inst_add_outarg_reg (cfg, call, args [1]->dreg, MONO_ARCH_MONITOR_LOCK_TAKEN_REG, FALSE);
5991 return (MonoInst*)call;
5993 } else if (strcmp (cmethod->name, "Exit") == 0 && fsig->param_count == 1) {
5996 if (COMPILE_LLVM (cfg)) {
5997 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT, NULL, helper_sig_monitor_enter_exit_trampoline_llvm, args);
5999 call = (MonoCallInst*)mono_emit_abs_call (cfg, MONO_PATCH_INFO_MONITOR_EXIT,
6000 NULL, helper_sig_monitor_enter_exit_trampoline, NULL);
6001 mono_call_inst_add_outarg_reg (cfg, call, args [0]->dreg,
6002 MONO_ARCH_MONITOR_OBJECT_REG, FALSE);
6005 return (MonoInst*)call;
6008 } else if (cmethod->klass->image == mono_defaults.corlib &&
6009 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6010 (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
6013 #if SIZEOF_REGISTER == 8
6014 if (strcmp (cmethod->name, "Read") == 0 && fsig->param_count == 1 && (fsig->params [0]->type == MONO_TYPE_I8)) {
6015 if (mono_arch_opcode_supported (OP_ATOMIC_LOAD_I8)) {
6016 MONO_INST_NEW (cfg, ins, OP_ATOMIC_LOAD_I8);
6017 ins->dreg = mono_alloc_preg (cfg);
6018 ins->sreg1 = args [0]->dreg;
6019 ins->type = STACK_I8;
6020 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_SEQ;
6021 MONO_ADD_INS (cfg->cbb, ins);
6025 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6027 /* 64 bit reads are already atomic */
6028 MONO_INST_NEW (cfg, load_ins, OP_LOADI8_MEMBASE);
6029 load_ins->dreg = mono_alloc_preg (cfg);
6030 load_ins->inst_basereg = args [0]->dreg;
6031 load_ins->inst_offset = 0;
6032 load_ins->type = STACK_I8;
6033 MONO_ADD_INS (cfg->cbb, load_ins);
6035 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6042 if (strcmp (cmethod->name, "Increment") == 0 && fsig->param_count == 1) {
6043 MonoInst *ins_iconst;
6046 if (fsig->params [0]->type == MONO_TYPE_I4) {
6047 opcode = OP_ATOMIC_ADD_I4;
6048 cfg->has_atomic_add_i4 = TRUE;
6050 #if SIZEOF_REGISTER == 8
6051 else if (fsig->params [0]->type == MONO_TYPE_I8)
6052 opcode = OP_ATOMIC_ADD_I8;
6055 if (!mono_arch_opcode_supported (opcode))
6057 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6058 ins_iconst->inst_c0 = 1;
6059 ins_iconst->dreg = mono_alloc_ireg (cfg);
6060 MONO_ADD_INS (cfg->cbb, ins_iconst);
6062 MONO_INST_NEW (cfg, ins, opcode);
6063 ins->dreg = mono_alloc_ireg (cfg);
6064 ins->inst_basereg = args [0]->dreg;
6065 ins->inst_offset = 0;
6066 ins->sreg2 = ins_iconst->dreg;
6067 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6068 MONO_ADD_INS (cfg->cbb, ins);
6070 } else if (strcmp (cmethod->name, "Decrement") == 0 && fsig->param_count == 1) {
6071 MonoInst *ins_iconst;
6074 if (fsig->params [0]->type == MONO_TYPE_I4) {
6075 opcode = OP_ATOMIC_ADD_I4;
6076 cfg->has_atomic_add_i4 = TRUE;
6078 #if SIZEOF_REGISTER == 8
6079 else if (fsig->params [0]->type == MONO_TYPE_I8)
6080 opcode = OP_ATOMIC_ADD_I8;
6083 if (!mono_arch_opcode_supported (opcode))
6085 MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
6086 ins_iconst->inst_c0 = -1;
6087 ins_iconst->dreg = mono_alloc_ireg (cfg);
6088 MONO_ADD_INS (cfg->cbb, ins_iconst);
6090 MONO_INST_NEW (cfg, ins, opcode);
6091 ins->dreg = mono_alloc_ireg (cfg);
6092 ins->inst_basereg = args [0]->dreg;
6093 ins->inst_offset = 0;
6094 ins->sreg2 = ins_iconst->dreg;
6095 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6096 MONO_ADD_INS (cfg->cbb, ins);
6098 } else if (strcmp (cmethod->name, "Add") == 0 && fsig->param_count == 2) {
6101 if (fsig->params [0]->type == MONO_TYPE_I4) {
6102 opcode = OP_ATOMIC_ADD_I4;
6103 cfg->has_atomic_add_i4 = TRUE;
6105 #if SIZEOF_REGISTER == 8
6106 else if (fsig->params [0]->type == MONO_TYPE_I8)
6107 opcode = OP_ATOMIC_ADD_I8;
6110 if (!mono_arch_opcode_supported (opcode))
6112 MONO_INST_NEW (cfg, ins, opcode);
6113 ins->dreg = mono_alloc_ireg (cfg);
6114 ins->inst_basereg = args [0]->dreg;
6115 ins->inst_offset = 0;
6116 ins->sreg2 = args [1]->dreg;
6117 ins->type = (opcode == OP_ATOMIC_ADD_I4) ? STACK_I4 : STACK_I8;
6118 MONO_ADD_INS (cfg->cbb, ins);
6121 else if (strcmp (cmethod->name, "Exchange") == 0 && fsig->param_count == 2) {
6122 MonoInst *f2i = NULL, *i2f;
6123 guint32 opcode, f2i_opcode, i2f_opcode;
6124 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6125 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6127 if (fsig->params [0]->type == MONO_TYPE_I4 ||
6128 fsig->params [0]->type == MONO_TYPE_R4) {
6129 opcode = OP_ATOMIC_EXCHANGE_I4;
6130 f2i_opcode = OP_MOVE_F_TO_I4;
6131 i2f_opcode = OP_MOVE_I4_TO_F;
6132 cfg->has_atomic_exchange_i4 = TRUE;
6134 #if SIZEOF_REGISTER == 8
6136 fsig->params [0]->type == MONO_TYPE_I8 ||
6137 fsig->params [0]->type == MONO_TYPE_R8 ||
6138 fsig->params [0]->type == MONO_TYPE_I) {
6139 opcode = OP_ATOMIC_EXCHANGE_I8;
6140 f2i_opcode = OP_MOVE_F_TO_I8;
6141 i2f_opcode = OP_MOVE_I8_TO_F;
6144 else if (is_ref || fsig->params [0]->type == MONO_TYPE_I) {
6145 opcode = OP_ATOMIC_EXCHANGE_I4;
6146 cfg->has_atomic_exchange_i4 = TRUE;
6152 if (!mono_arch_opcode_supported (opcode))
6156 /* TODO: Decompose these opcodes instead of bailing here. */
6157 if (COMPILE_SOFT_FLOAT (cfg))
6160 MONO_INST_NEW (cfg, f2i, f2i_opcode);
6161 f2i->dreg = mono_alloc_ireg (cfg);
6162 f2i->sreg1 = args [1]->dreg;
6163 if (f2i_opcode == OP_MOVE_F_TO_I4)
6164 f2i->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6165 MONO_ADD_INS (cfg->cbb, f2i);
6168 MONO_INST_NEW (cfg, ins, opcode);
6169 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : mono_alloc_ireg (cfg);
6170 ins->inst_basereg = args [0]->dreg;
6171 ins->inst_offset = 0;
6172 ins->sreg2 = is_float ? f2i->dreg : args [1]->dreg;
6173 MONO_ADD_INS (cfg->cbb, ins);
6175 switch (fsig->params [0]->type) {
6177 ins->type = STACK_I4;
6180 ins->type = STACK_I8;
6183 #if SIZEOF_REGISTER == 8
6184 ins->type = STACK_I8;
6186 ins->type = STACK_I4;
6191 ins->type = STACK_R8;
6194 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6195 ins->type = STACK_OBJ;
6200 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6201 i2f->dreg = mono_alloc_freg (cfg);
6202 i2f->sreg1 = ins->dreg;
6203 i2f->type = STACK_R8;
6204 if (i2f_opcode == OP_MOVE_I4_TO_F)
6205 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6206 MONO_ADD_INS (cfg->cbb, i2f);
6211 if (cfg->gen_write_barriers && is_ref)
6212 emit_write_barrier (cfg, args [0], args [1]);
6214 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 3) {
6215 MonoInst *f2i_new = NULL, *f2i_cmp = NULL, *i2f;
6216 guint32 opcode, f2i_opcode, i2f_opcode;
6217 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [1]);
6218 gboolean is_float = fsig->params [1]->type == MONO_TYPE_R4 || fsig->params [1]->type == MONO_TYPE_R8;
6220 if (fsig->params [1]->type == MONO_TYPE_I4 ||
6221 fsig->params [1]->type == MONO_TYPE_R4) {
6222 opcode = OP_ATOMIC_CAS_I4;
6223 f2i_opcode = OP_MOVE_F_TO_I4;
6224 i2f_opcode = OP_MOVE_I4_TO_F;
6225 cfg->has_atomic_cas_i4 = TRUE;
6227 #if SIZEOF_REGISTER == 8
6229 fsig->params [1]->type == MONO_TYPE_I8 ||
6230 fsig->params [1]->type == MONO_TYPE_R8 ||
6231 fsig->params [1]->type == MONO_TYPE_I) {
6232 opcode = OP_ATOMIC_CAS_I8;
6233 f2i_opcode = OP_MOVE_F_TO_I8;
6234 i2f_opcode = OP_MOVE_I8_TO_F;
6237 else if (is_ref || fsig->params [1]->type == MONO_TYPE_I) {
6238 opcode = OP_ATOMIC_CAS_I4;
6239 cfg->has_atomic_cas_i4 = TRUE;
6245 if (!mono_arch_opcode_supported (opcode))
6249 /* TODO: Decompose these opcodes instead of bailing here. */
6250 if (COMPILE_SOFT_FLOAT (cfg))
6253 MONO_INST_NEW (cfg, f2i_new, f2i_opcode);
6254 f2i_new->dreg = mono_alloc_ireg (cfg);
6255 f2i_new->sreg1 = args [1]->dreg;
6256 if (f2i_opcode == OP_MOVE_F_TO_I4)
6257 f2i_new->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6258 MONO_ADD_INS (cfg->cbb, f2i_new);
6260 MONO_INST_NEW (cfg, f2i_cmp, f2i_opcode);
6261 f2i_cmp->dreg = mono_alloc_ireg (cfg);
6262 f2i_cmp->sreg1 = args [2]->dreg;
6263 if (f2i_opcode == OP_MOVE_F_TO_I4)
6264 f2i_cmp->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6265 MONO_ADD_INS (cfg->cbb, f2i_cmp);
6268 MONO_INST_NEW (cfg, ins, opcode);
6269 ins->dreg = is_ref ? alloc_ireg_ref (cfg) : alloc_ireg (cfg);
6270 ins->sreg1 = args [0]->dreg;
6271 ins->sreg2 = is_float ? f2i_new->dreg : args [1]->dreg;
6272 ins->sreg3 = is_float ? f2i_cmp->dreg : args [2]->dreg;
6273 MONO_ADD_INS (cfg->cbb, ins);
6275 switch (fsig->params [1]->type) {
6277 ins->type = STACK_I4;
6280 ins->type = STACK_I8;
6283 #if SIZEOF_REGISTER == 8
6284 ins->type = STACK_I8;
6286 ins->type = STACK_I4;
6291 ins->type = STACK_R8;
6294 g_assert (mini_type_is_reference (cfg, fsig->params [1]));
6295 ins->type = STACK_OBJ;
6300 MONO_INST_NEW (cfg, i2f, i2f_opcode);
6301 i2f->dreg = mono_alloc_freg (cfg);
6302 i2f->sreg1 = ins->dreg;
6303 i2f->type = STACK_R8;
6304 if (i2f_opcode == OP_MOVE_I4_TO_F)
6305 i2f->backend.spill_var = mini_get_int_to_float_spill_area (cfg);
6306 MONO_ADD_INS (cfg->cbb, i2f);
6311 if (cfg->gen_write_barriers && is_ref)
6312 emit_write_barrier (cfg, args [0], args [1]);
6314 else if ((strcmp (cmethod->name, "CompareExchange") == 0) && fsig->param_count == 4 &&
6315 fsig->params [1]->type == MONO_TYPE_I4) {
6316 MonoInst *cmp, *ceq;
6318 if (!mono_arch_opcode_supported (OP_ATOMIC_CAS_I4))
6321 /* int32 r = CAS (location, value, comparand); */
6322 MONO_INST_NEW (cfg, ins, OP_ATOMIC_CAS_I4);
6323 ins->dreg = alloc_ireg (cfg);
6324 ins->sreg1 = args [0]->dreg;
6325 ins->sreg2 = args [1]->dreg;
6326 ins->sreg3 = args [2]->dreg;
6327 ins->type = STACK_I4;
6328 MONO_ADD_INS (cfg->cbb, ins);
6330 /* bool result = r == comparand; */
6331 MONO_INST_NEW (cfg, cmp, OP_ICOMPARE);
6332 cmp->sreg1 = ins->dreg;
6333 cmp->sreg2 = args [2]->dreg;
6334 cmp->type = STACK_I4;
6335 MONO_ADD_INS (cfg->cbb, cmp);
6337 MONO_INST_NEW (cfg, ceq, OP_ICEQ);
6338 ceq->dreg = alloc_ireg (cfg);
6339 ceq->type = STACK_I4;
6340 MONO_ADD_INS (cfg->cbb, ceq);
6342 /* *success = result; */
6343 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI1_MEMBASE_REG, args [3]->dreg, 0, ceq->dreg);
6345 cfg->has_atomic_cas_i4 = TRUE;
6347 else if (strcmp (cmethod->name, "MemoryBarrier") == 0 && fsig->param_count == 0)
6348 ins = emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
6352 } else if (cmethod->klass->image == mono_defaults.corlib &&
6353 (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
6354 (strcmp (cmethod->klass->name, "Volatile") == 0)) {
6357 if (!strcmp (cmethod->name, "Read") && fsig->param_count == 1) {
6359 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6360 gboolean is_float = fsig->params [0]->type == MONO_TYPE_R4 || fsig->params [0]->type == MONO_TYPE_R8;
6362 if (fsig->params [0]->type == MONO_TYPE_I1)
6363 opcode = OP_ATOMIC_LOAD_I1;
6364 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6365 opcode = OP_ATOMIC_LOAD_U1;
6366 else if (fsig->params [0]->type == MONO_TYPE_I2)
6367 opcode = OP_ATOMIC_LOAD_I2;
6368 else if (fsig->params [0]->type == MONO_TYPE_U2)
6369 opcode = OP_ATOMIC_LOAD_U2;
6370 else if (fsig->params [0]->type == MONO_TYPE_I4)
6371 opcode = OP_ATOMIC_LOAD_I4;
6372 else if (fsig->params [0]->type == MONO_TYPE_U4)
6373 opcode = OP_ATOMIC_LOAD_U4;
6374 else if (fsig->params [0]->type == MONO_TYPE_R4)
6375 opcode = OP_ATOMIC_LOAD_R4;
6376 else if (fsig->params [0]->type == MONO_TYPE_R8)
6377 opcode = OP_ATOMIC_LOAD_R8;
6378 #if SIZEOF_REGISTER == 8
6379 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6380 opcode = OP_ATOMIC_LOAD_I8;
6381 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6382 opcode = OP_ATOMIC_LOAD_U8;
6384 else if (fsig->params [0]->type == MONO_TYPE_I)
6385 opcode = OP_ATOMIC_LOAD_I4;
6386 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6387 opcode = OP_ATOMIC_LOAD_U4;
6391 if (!mono_arch_opcode_supported (opcode))
6394 MONO_INST_NEW (cfg, ins, opcode);
6395 ins->dreg = is_ref ? mono_alloc_ireg_ref (cfg) : (is_float ? mono_alloc_freg (cfg) : mono_alloc_ireg (cfg));
6396 ins->sreg1 = args [0]->dreg;
6397 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_ACQ;
6398 MONO_ADD_INS (cfg->cbb, ins);
6400 switch (fsig->params [0]->type) {
6401 case MONO_TYPE_BOOLEAN:
6408 ins->type = STACK_I4;
6412 ins->type = STACK_I8;
6416 #if SIZEOF_REGISTER == 8
6417 ins->type = STACK_I8;
6419 ins->type = STACK_I4;
6424 ins->type = STACK_R8;
6427 g_assert (mini_type_is_reference (cfg, fsig->params [0]));
6428 ins->type = STACK_OBJ;
6434 if (!strcmp (cmethod->name, "Write") && fsig->param_count == 2) {
6436 gboolean is_ref = mini_type_is_reference (cfg, fsig->params [0]);
6438 if (fsig->params [0]->type == MONO_TYPE_I1)
6439 opcode = OP_ATOMIC_STORE_I1;
6440 else if (fsig->params [0]->type == MONO_TYPE_U1 || fsig->params [0]->type == MONO_TYPE_BOOLEAN)
6441 opcode = OP_ATOMIC_STORE_U1;
6442 else if (fsig->params [0]->type == MONO_TYPE_I2)
6443 opcode = OP_ATOMIC_STORE_I2;
6444 else if (fsig->params [0]->type == MONO_TYPE_U2)
6445 opcode = OP_ATOMIC_STORE_U2;
6446 else if (fsig->params [0]->type == MONO_TYPE_I4)
6447 opcode = OP_ATOMIC_STORE_I4;
6448 else if (fsig->params [0]->type == MONO_TYPE_U4)
6449 opcode = OP_ATOMIC_STORE_U4;
6450 else if (fsig->params [0]->type == MONO_TYPE_R4)
6451 opcode = OP_ATOMIC_STORE_R4;
6452 else if (fsig->params [0]->type == MONO_TYPE_R8)
6453 opcode = OP_ATOMIC_STORE_R8;
6454 #if SIZEOF_REGISTER == 8
6455 else if (fsig->params [0]->type == MONO_TYPE_I8 || fsig->params [0]->type == MONO_TYPE_I)
6456 opcode = OP_ATOMIC_STORE_I8;
6457 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U8 || fsig->params [0]->type == MONO_TYPE_U)
6458 opcode = OP_ATOMIC_STORE_U8;
6460 else if (fsig->params [0]->type == MONO_TYPE_I)
6461 opcode = OP_ATOMIC_STORE_I4;
6462 else if (is_ref || fsig->params [0]->type == MONO_TYPE_U)
6463 opcode = OP_ATOMIC_STORE_U4;
6467 if (!mono_arch_opcode_supported (opcode))
6470 MONO_INST_NEW (cfg, ins, opcode);
6471 ins->dreg = args [0]->dreg;
6472 ins->sreg1 = args [1]->dreg;
6473 ins->backend.memory_barrier_kind = MONO_MEMORY_BARRIER_REL;
6474 MONO_ADD_INS (cfg->cbb, ins);
6476 if (cfg->gen_write_barriers && is_ref)
6477 emit_write_barrier (cfg, args [0], args [1]);
6483 } else if (cmethod->klass->image == mono_defaults.corlib &&
6484 (strcmp (cmethod->klass->name_space, "System.Diagnostics") == 0) &&
6485 (strcmp (cmethod->klass->name, "Debugger") == 0)) {
6486 if (!strcmp (cmethod->name, "Break") && fsig->param_count == 0) {
6487 if (should_insert_brekpoint (cfg->method)) {
6488 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
6490 MONO_INST_NEW (cfg, ins, OP_NOP);
6491 MONO_ADD_INS (cfg->cbb, ins);
6495 } else if (cmethod->klass->image == mono_defaults.corlib &&
6496 (strcmp (cmethod->klass->name_space, "System") == 0) &&
6497 (strcmp (cmethod->klass->name, "Environment") == 0)) {
6498 if (!strcmp (cmethod->name, "get_IsRunningOnWindows") && fsig->param_count == 0) {
6500 EMIT_NEW_ICONST (cfg, ins, 1);
6502 EMIT_NEW_ICONST (cfg, ins, 0);
6505 } else if (cmethod->klass == mono_defaults.math_class) {
6507 * There is general branchless code for Min/Max, but it does not work for
6509 * http://everything2.com/?node_id=1051618
6511 } else if ((!strcmp (cmethod->klass->image->assembly->aname.name, "MonoMac") ||
6512 !strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
6513 !strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
6514 !strcmp (cmethod->klass->name, "Selector")) {
6515 #ifdef MONO_ARCH_HAVE_OBJC_GET_SELECTOR
6516 if (!strcmp (cmethod->klass->name, "GetHandle") && fsig->param_count == 1 &&
6517 (args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
6520 MonoJumpInfoToken *ji;
6523 cfg->disable_llvm = TRUE;
6525 if (args [0]->opcode == OP_GOT_ENTRY) {
6526 pi = args [0]->inst_p1;
6527 g_assert (pi->opcode == OP_PATCH_INFO);
6528 g_assert (GPOINTER_TO_INT (pi->inst_p1) == MONO_PATCH_INFO_LDSTR);
6531 g_assert (GPOINTER_TO_INT (args [0]->inst_p1) == MONO_PATCH_INFO_LDSTR);
6532 ji = args [0]->inst_p0;
6535 NULLIFY_INS (args [0]);
6538 s = mono_ldstr (cfg->domain, ji->image, mono_metadata_token_index (ji->token));
6539 MONO_INST_NEW (cfg, ins, OP_OBJC_GET_SELECTOR);
6540 ins->dreg = mono_alloc_ireg (cfg);
6542 ins->inst_p0 = mono_string_to_utf8 (s);
6543 MONO_ADD_INS (cfg->cbb, ins);
6549 #ifdef MONO_ARCH_SIMD_INTRINSICS
6550 if (cfg->opt & MONO_OPT_SIMD) {
6551 ins = mono_emit_simd_intrinsics (cfg, cmethod, fsig, args);
6557 ins = mono_emit_native_types_intrinsics (cfg, cmethod, fsig, args);
6561 if (COMPILE_LLVM (cfg)) {
6562 ins = llvm_emit_inst_for_method (cfg, cmethod, fsig, args);
6567 return mono_arch_emit_inst_for_method (cfg, cmethod, fsig, args);
6571 * This entry point could be used later for arbitrary method
6574 inline static MonoInst*
6575 mini_redirect_call (MonoCompile *cfg, MonoMethod *method,
6576 MonoMethodSignature *signature, MonoInst **args, MonoInst *this)
6578 if (method->klass == mono_defaults.string_class) {
6579 /* managed string allocation support */
6580 if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
6581 MonoInst *iargs [2];
6582 MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
6583 MonoMethod *managed_alloc = NULL;
6585 g_assert (vtable); /*Should not fail since it System.String*/
6586 #ifndef MONO_CROSS_COMPILE
6587 managed_alloc = mono_gc_get_managed_allocator (method->klass, FALSE, FALSE);
6591 EMIT_NEW_VTABLECONST (cfg, iargs [0], vtable);
6592 iargs [1] = args [0];
6593 return mono_emit_method_call (cfg, managed_alloc, iargs, this);
6600 mono_save_args (MonoCompile *cfg, MonoMethodSignature *sig, MonoInst **sp)
6602 MonoInst *store, *temp;
6605 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6606 MonoType *argtype = (sig->hasthis && (i == 0)) ? type_from_stack_type (*sp) : sig->params [i - sig->hasthis];
6609 * FIXME: We should use *args++ = sp [0], but that would mean the arg
6610 * would be different than the MonoInst's used to represent arguments, and
6611 * the ldelema implementation can't deal with that.
6612 * Solution: When ldelema is used on an inline argument, create a var for
6613 * it, emit ldelema on that var, and emit the saving code below in
6614 * inline_method () if needed.
6616 temp = mono_compile_create_var (cfg, argtype, OP_LOCAL);
6617 cfg->args [i] = temp;
6618 /* This uses cfg->args [i] which is set by the preceeding line */
6619 EMIT_NEW_ARGSTORE (cfg, store, i, *sp);
6620 store->cil_code = sp [0]->cil_code;
6625 #define MONO_INLINE_CALLED_LIMITED_METHODS 1
6626 #define MONO_INLINE_CALLER_LIMITED_METHODS 1
6628 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6630 check_inline_called_method_name_limit (MonoMethod *called_method)
6633 static const char *limit = NULL;
6635 if (limit == NULL) {
6636 const char *limit_string = g_getenv ("MONO_INLINE_CALLED_METHOD_NAME_LIMIT");
6638 if (limit_string != NULL)
6639 limit = limit_string;
6644 if (limit [0] != '\0') {
6645 char *called_method_name = mono_method_full_name (called_method, TRUE);
6647 strncmp_result = strncmp (called_method_name, limit, strlen (limit));
6648 g_free (called_method_name);
6650 //return (strncmp_result <= 0);
6651 return (strncmp_result == 0);
6658 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6660 check_inline_caller_method_name_limit (MonoMethod *caller_method)
6663 static const char *limit = NULL;
6665 if (limit == NULL) {
6666 const char *limit_string = g_getenv ("MONO_INLINE_CALLER_METHOD_NAME_LIMIT");
6667 if (limit_string != NULL) {
6668 limit = limit_string;
6674 if (limit [0] != '\0') {
6675 char *caller_method_name = mono_method_full_name (caller_method, TRUE);
6677 strncmp_result = strncmp (caller_method_name, limit, strlen (limit));
6678 g_free (caller_method_name);
6680 //return (strncmp_result <= 0);
6681 return (strncmp_result == 0);
6689 emit_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6691 static double r8_0 = 0.0;
6692 static float r4_0 = 0.0;
6696 rtype = mini_get_underlying_type (cfg, rtype);
6700 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6701 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6702 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
6703 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6704 MONO_EMIT_NEW_I8CONST (cfg, dreg, 0);
6705 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6706 MONO_INST_NEW (cfg, ins, OP_R4CONST);
6707 ins->type = STACK_R4;
6708 ins->inst_p0 = (void*)&r4_0;
6710 MONO_ADD_INS (cfg->cbb, ins);
6711 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6712 MONO_INST_NEW (cfg, ins, OP_R8CONST);
6713 ins->type = STACK_R8;
6714 ins->inst_p0 = (void*)&r8_0;
6716 MONO_ADD_INS (cfg->cbb, ins);
6717 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6718 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6719 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6720 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6721 MONO_EMIT_NEW_VZERO (cfg, dreg, mono_class_from_mono_type (rtype));
6723 MONO_EMIT_NEW_PCONST (cfg, dreg, NULL);
6728 emit_dummy_init_rvar (MonoCompile *cfg, int dreg, MonoType *rtype)
6732 rtype = mini_get_underlying_type (cfg, rtype);
6736 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_PCONST);
6737 } else if (t >= MONO_TYPE_BOOLEAN && t <= MONO_TYPE_U4) {
6738 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_ICONST);
6739 } else if (t == MONO_TYPE_I8 || t == MONO_TYPE_U8) {
6740 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_I8CONST);
6741 } else if (cfg->r4fp && t == MONO_TYPE_R4) {
6742 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R4CONST);
6743 } else if (t == MONO_TYPE_R4 || t == MONO_TYPE_R8) {
6744 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_R8CONST);
6745 } else if ((t == MONO_TYPE_VALUETYPE) || (t == MONO_TYPE_TYPEDBYREF) ||
6746 ((t == MONO_TYPE_GENERICINST) && mono_type_generic_inst_is_valuetype (rtype))) {
6747 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6748 } else if (((t == MONO_TYPE_VAR) || (t == MONO_TYPE_MVAR)) && mini_type_var_is_vt (cfg, rtype)) {
6749 MONO_EMIT_NEW_DUMMY_INIT (cfg, dreg, OP_DUMMY_VZERO);
6751 emit_init_rvar (cfg, dreg, rtype);
6755 /* If INIT is FALSE, emit dummy initialization statements to keep the IR valid */
6757 emit_init_local (MonoCompile *cfg, int local, MonoType *type, gboolean init)
6759 MonoInst *var = cfg->locals [local];
6760 if (COMPILE_SOFT_FLOAT (cfg)) {
6762 int reg = alloc_dreg (cfg, var->type);
6763 emit_init_rvar (cfg, reg, type);
6764 EMIT_NEW_LOCSTORE (cfg, store, local, cfg->cbb->last_ins);
6767 emit_init_rvar (cfg, var->dreg, type);
6769 emit_dummy_init_rvar (cfg, var->dreg, type);
6776 * Return the cost of inlining CMETHOD.
6779 inline_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **sp,
6780 guchar *ip, guint real_offset, gboolean inline_always, MonoBasicBlock **out_cbb)
6782 MonoInst *ins, *rvar = NULL;
6783 MonoMethodHeader *cheader;
6784 MonoBasicBlock *ebblock, *sbblock;
6786 MonoMethod *prev_inlined_method;
6787 MonoInst **prev_locals, **prev_args;
6788 MonoType **prev_arg_types;
6789 guint prev_real_offset;
6790 GHashTable *prev_cbb_hash;
6791 MonoBasicBlock **prev_cil_offset_to_bb;
6792 MonoBasicBlock *prev_cbb;
6793 unsigned char* prev_cil_start;
6794 guint32 prev_cil_offset_to_bb_len;
6795 MonoMethod *prev_current_method;
6796 MonoGenericContext *prev_generic_context;
6797 gboolean ret_var_set, prev_ret_var_set, prev_disable_inline, virtual = FALSE;
6799 g_assert (cfg->exception_type == MONO_EXCEPTION_NONE);
6801 #if (MONO_INLINE_CALLED_LIMITED_METHODS)
6802 if ((! inline_always) && ! check_inline_called_method_name_limit (cmethod))
6805 #if (MONO_INLINE_CALLER_LIMITED_METHODS)
6806 if ((! inline_always) && ! check_inline_caller_method_name_limit (cfg->method))
6811 fsig = mono_method_signature (cmethod);
6813 if (cfg->verbose_level > 2)
6814 printf ("INLINE START %p %s -> %s\n", cmethod, mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6816 if (!cmethod->inline_info) {
6817 cfg->stat_inlineable_methods++;
6818 cmethod->inline_info = 1;
6821 /* allocate local variables */
6822 cheader = mono_method_get_header (cmethod);
6824 if (cheader == NULL || mono_loader_get_last_error ()) {
6825 MonoLoaderError *error = mono_loader_get_last_error ();
6828 mono_metadata_free_mh (cheader);
6829 if (inline_always && error)
6830 mono_cfg_set_exception (cfg, error->exception_type);
6832 mono_loader_clear_error ();
6836 /*Must verify before creating locals as it can cause the JIT to assert.*/
6837 if (mono_compile_is_broken (cfg, cmethod, FALSE)) {
6838 mono_metadata_free_mh (cheader);
6842 /* allocate space to store the return value */
6843 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
6844 rvar = mono_compile_create_var (cfg, fsig->ret, OP_LOCAL);
6847 prev_locals = cfg->locals;
6848 cfg->locals = mono_mempool_alloc0 (cfg->mempool, cheader->num_locals * sizeof (MonoInst*));
6849 for (i = 0; i < cheader->num_locals; ++i)
6850 cfg->locals [i] = mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
6852 /* allocate start and end blocks */
6853 /* This is needed so if the inline is aborted, we can clean up */
6854 NEW_BBLOCK (cfg, sbblock);
6855 sbblock->real_offset = real_offset;
6857 NEW_BBLOCK (cfg, ebblock);
6858 ebblock->block_num = cfg->num_bblocks++;
6859 ebblock->real_offset = real_offset;
6861 prev_args = cfg->args;
6862 prev_arg_types = cfg->arg_types;
6863 prev_inlined_method = cfg->inlined_method;
6864 cfg->inlined_method = cmethod;
6865 cfg->ret_var_set = FALSE;
6866 cfg->inline_depth ++;
6867 prev_real_offset = cfg->real_offset;
6868 prev_cbb_hash = cfg->cbb_hash;
6869 prev_cil_offset_to_bb = cfg->cil_offset_to_bb;
6870 prev_cil_offset_to_bb_len = cfg->cil_offset_to_bb_len;
6871 prev_cil_start = cfg->cil_start;
6872 prev_cbb = cfg->cbb;
6873 prev_current_method = cfg->current_method;
6874 prev_generic_context = cfg->generic_context;
6875 prev_ret_var_set = cfg->ret_var_set;
6876 prev_disable_inline = cfg->disable_inline;
6878 if (ip && *ip == CEE_CALLVIRT && !(cmethod->flags & METHOD_ATTRIBUTE_STATIC))
6881 costs = mono_method_to_ir (cfg, cmethod, sbblock, ebblock, rvar, sp, real_offset, virtual);
6883 ret_var_set = cfg->ret_var_set;
6885 cfg->inlined_method = prev_inlined_method;
6886 cfg->real_offset = prev_real_offset;
6887 cfg->cbb_hash = prev_cbb_hash;
6888 cfg->cil_offset_to_bb = prev_cil_offset_to_bb;
6889 cfg->cil_offset_to_bb_len = prev_cil_offset_to_bb_len;
6890 cfg->cil_start = prev_cil_start;
6891 cfg->locals = prev_locals;
6892 cfg->args = prev_args;
6893 cfg->arg_types = prev_arg_types;
6894 cfg->current_method = prev_current_method;
6895 cfg->generic_context = prev_generic_context;
6896 cfg->ret_var_set = prev_ret_var_set;
6897 cfg->disable_inline = prev_disable_inline;
6898 cfg->inline_depth --;
6900 if ((costs >= 0 && costs < 60) || inline_always) {
6901 if (cfg->verbose_level > 2)
6902 printf ("INLINE END %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
6904 cfg->stat_inlined_methods++;
6906 /* always add some code to avoid block split failures */
6907 MONO_INST_NEW (cfg, ins, OP_NOP);
6908 MONO_ADD_INS (prev_cbb, ins);
6910 prev_cbb->next_bb = sbblock;
6911 link_bblock (cfg, prev_cbb, sbblock);
6914 * Get rid of the begin and end bblocks if possible to aid local
6917 mono_merge_basic_blocks (cfg, prev_cbb, sbblock);
6919 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] != ebblock))
6920 mono_merge_basic_blocks (cfg, prev_cbb, prev_cbb->out_bb [0]);
6922 if ((ebblock->in_count == 1) && ebblock->in_bb [0]->out_count == 1) {
6923 MonoBasicBlock *prev = ebblock->in_bb [0];
6924 mono_merge_basic_blocks (cfg, prev, ebblock);
6926 if ((prev_cbb->out_count == 1) && (prev_cbb->out_bb [0]->in_count == 1) && (prev_cbb->out_bb [0] == prev)) {
6927 mono_merge_basic_blocks (cfg, prev_cbb, prev);
6928 cfg->cbb = prev_cbb;
6932 * Its possible that the rvar is set in some prev bblock, but not in others.
6938 for (i = 0; i < ebblock->in_count; ++i) {
6939 bb = ebblock->in_bb [i];
6941 if (bb->last_ins && bb->last_ins->opcode == OP_NOT_REACHED) {
6944 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6953 *out_cbb = cfg->cbb;
6957 * If the inlined method contains only a throw, then the ret var is not
6958 * set, so set it to a dummy value.
6961 emit_init_rvar (cfg, rvar->dreg, fsig->ret);
6963 EMIT_NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
6966 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6969 if (cfg->verbose_level > 2)
6970 printf ("INLINE ABORTED %s (cost %d)\n", mono_method_full_name (cmethod, TRUE), costs);
6971 cfg->exception_type = MONO_EXCEPTION_NONE;
6972 mono_loader_clear_error ();
6974 /* This gets rid of the newly added bblocks */
6975 cfg->cbb = prev_cbb;
6977 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, cheader);
6982 * Some of these comments may well be out-of-date.
6983 * Design decisions: we do a single pass over the IL code (and we do bblock
6984 * splitting/merging in the few cases when it's required: a back jump to an IL
6985 * address that was not already seen as bblock starting point).
6986 * Code is validated as we go (full verification is still better left to metadata/verify.c).
6987 * Complex operations are decomposed in simpler ones right away. We need to let the
6988 * arch-specific code peek and poke inside this process somehow (except when the
6989 * optimizations can take advantage of the full semantic info of coarse opcodes).
6990 * All the opcodes of the form opcode.s are 'normalized' to opcode.
6991 * MonoInst->opcode initially is the IL opcode or some simplification of that
6992 * (OP_LOAD, OP_STORE). The arch-specific code may rearrange it to an arch-specific
6993 * opcode with value bigger than OP_LAST.
6994 * At this point the IR can be handed over to an interpreter, a dumb code generator
6995 * or to the optimizing code generator that will translate it to SSA form.
6997 * Profiling directed optimizations.
6998 * We may compile by default with few or no optimizations and instrument the code
6999 * or the user may indicate what methods to optimize the most either in a config file
7000 * or through repeated runs where the compiler applies offline the optimizations to
7001 * each method and then decides if it was worth it.
7004 #define CHECK_TYPE(ins) if (!(ins)->type) UNVERIFIED
7005 #define CHECK_STACK(num) if ((sp - stack_start) < (num)) UNVERIFIED
7006 #define CHECK_STACK_OVF(num) if (((sp - stack_start) + (num)) > header->max_stack) UNVERIFIED
7007 #define CHECK_ARG(num) if ((unsigned)(num) >= (unsigned)num_args) UNVERIFIED
7008 #define CHECK_LOCAL(num) if ((unsigned)(num) >= (unsigned)header->num_locals) UNVERIFIED
7009 #define CHECK_OPSIZE(size) if (ip + size > end) UNVERIFIED
7010 #define CHECK_UNVERIFIABLE(cfg) if (cfg->unverifiable) UNVERIFIED
7011 #define CHECK_TYPELOAD(klass) if (!(klass) || (klass)->exception_type) TYPE_LOAD_ERROR ((klass))
7013 /* offset from br.s -> br like opcodes */
7014 #define BIG_BRANCH_OFFSET 13
7017 ip_in_bb (MonoCompile *cfg, MonoBasicBlock *bb, const guint8* ip)
7019 MonoBasicBlock *b = cfg->cil_offset_to_bb [ip - cfg->cil_start];
7021 return b == NULL || b == bb;
7025 get_basic_blocks (MonoCompile *cfg, MonoMethodHeader* header, guint real_offset, unsigned char *start, unsigned char *end, unsigned char **pos)
7027 unsigned char *ip = start;
7028 unsigned char *target;
7031 MonoBasicBlock *bblock;
7032 const MonoOpcode *opcode;
7035 cli_addr = ip - start;
7036 i = mono_opcode_value ((const guint8 **)&ip, end);
7039 opcode = &mono_opcodes [i];
7040 switch (opcode->argument) {
7041 case MonoInlineNone:
7044 case MonoInlineString:
7045 case MonoInlineType:
7046 case MonoInlineField:
7047 case MonoInlineMethod:
7050 case MonoShortInlineR:
7057 case MonoShortInlineVar:
7058 case MonoShortInlineI:
7061 case MonoShortInlineBrTarget:
7062 target = start + cli_addr + 2 + (signed char)ip [1];
7063 GET_BBLOCK (cfg, bblock, target);
7066 GET_BBLOCK (cfg, bblock, ip);
7068 case MonoInlineBrTarget:
7069 target = start + cli_addr + 5 + (gint32)read32 (ip + 1);
7070 GET_BBLOCK (cfg, bblock, target);
7073 GET_BBLOCK (cfg, bblock, ip);
7075 case MonoInlineSwitch: {
7076 guint32 n = read32 (ip + 1);
7079 cli_addr += 5 + 4 * n;
7080 target = start + cli_addr;
7081 GET_BBLOCK (cfg, bblock, target);
7083 for (j = 0; j < n; ++j) {
7084 target = start + cli_addr + (gint32)read32 (ip);
7085 GET_BBLOCK (cfg, bblock, target);
7095 g_assert_not_reached ();
7098 if (i == CEE_THROW) {
7099 unsigned char *bb_start = ip - 1;
7101 /* Find the start of the bblock containing the throw */
7103 while ((bb_start >= start) && !bblock) {
7104 bblock = cfg->cil_offset_to_bb [(bb_start) - start];
7108 bblock->out_of_line = 1;
7118 static inline MonoMethod *
7119 mini_get_method_allow_open (MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7123 if (m->wrapper_type != MONO_WRAPPER_NONE) {
7124 method = mono_method_get_wrapper_data (m, token);
7127 method = mono_class_inflate_generic_method_checked (method, context, &error);
7128 g_assert (mono_error_ok (&error)); /* FIXME don't swallow the error */
7131 method = mono_get_method_full (m->klass->image, token, klass, context);
7137 static inline MonoMethod *
7138 mini_get_method (MonoCompile *cfg, MonoMethod *m, guint32 token, MonoClass *klass, MonoGenericContext *context)
7140 MonoMethod *method = mini_get_method_allow_open (m, token, klass, context);
7142 if (method && cfg && !cfg->generic_sharing_context && mono_class_is_open_constructed_type (&method->klass->byval_arg))
7148 static inline MonoClass*
7149 mini_get_class (MonoMethod *method, guint32 token, MonoGenericContext *context)
7154 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7155 klass = mono_method_get_wrapper_data (method, token);
7157 klass = mono_class_inflate_generic_class (klass, context);
7159 klass = mono_class_get_and_inflate_typespec_checked (method->klass->image, token, context, &error);
7160 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7163 mono_class_init (klass);
7167 static inline MonoMethodSignature*
7168 mini_get_signature (MonoMethod *method, guint32 token, MonoGenericContext *context)
7170 MonoMethodSignature *fsig;
7172 if (method->wrapper_type != MONO_WRAPPER_NONE) {
7175 fsig = (MonoMethodSignature *)mono_method_get_wrapper_data (method, token);
7177 fsig = mono_inflate_generic_signature (fsig, context, &error);
7179 g_assert (mono_error_ok (&error));
7182 fsig = mono_metadata_parse_signature (method->klass->image, token);
7188 * Returns TRUE if the JIT should abort inlining because "callee"
7189 * is influenced by security attributes.
7192 gboolean check_linkdemand (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee)
7196 if ((cfg->method != caller) && mono_security_method_has_declsec (callee)) {
7200 result = mono_declsec_linkdemand (cfg->domain, caller, callee);
7201 if (result == MONO_JIT_SECURITY_OK)
7204 if (result == MONO_JIT_LINKDEMAND_ECMA) {
7205 /* Generate code to throw a SecurityException before the actual call/link */
7206 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7209 NEW_ICONST (cfg, args [0], 4);
7210 NEW_METHODCONST (cfg, args [1], caller);
7211 mono_emit_method_call (cfg, secman->linkdemandsecurityexception, args, NULL);
7212 } else if (cfg->exception_type == MONO_EXCEPTION_NONE) {
7213 /* don't hide previous results */
7214 mono_cfg_set_exception (cfg, MONO_EXCEPTION_SECURITY_LINKDEMAND);
7215 cfg->exception_data = result;
7223 throw_exception (void)
7225 static MonoMethod *method = NULL;
7228 MonoSecurityManager *secman = mono_security_manager_get_methods ();
7229 method = mono_class_get_method_from_name (secman->securitymanager, "ThrowException", 1);
7236 emit_throw_exception (MonoCompile *cfg, MonoException *ex)
7238 MonoMethod *thrower = throw_exception ();
7241 EMIT_NEW_PCONST (cfg, args [0], ex);
7242 mono_emit_method_call (cfg, thrower, args, NULL);
7246 * Return the original method is a wrapper is specified. We can only access
7247 * the custom attributes from the original method.
7250 get_original_method (MonoMethod *method)
7252 if (method->wrapper_type == MONO_WRAPPER_NONE)
7255 /* native code (which is like Critical) can call any managed method XXX FIXME XXX to validate all usages */
7256 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
7259 /* in other cases we need to find the original method */
7260 return mono_marshal_method_from_wrapper (method);
7264 ensure_method_is_allowed_to_access_field (MonoCompile *cfg, MonoMethod *caller, MonoClassField *field,
7265 MonoBasicBlock *bblock, unsigned char *ip)
7267 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7268 MonoException *ex = mono_security_core_clr_is_field_access_allowed (get_original_method (caller), field);
7270 emit_throw_exception (cfg, ex);
7274 ensure_method_is_allowed_to_call_method (MonoCompile *cfg, MonoMethod *caller, MonoMethod *callee,
7275 MonoBasicBlock *bblock, unsigned char *ip)
7277 /* we can't get the coreclr security level on wrappers since they don't have the attributes */
7278 MonoException *ex = mono_security_core_clr_is_call_allowed (get_original_method (caller), callee);
7280 emit_throw_exception (cfg, ex);
7284 * Check that the IL instructions at ip are the array initialization
7285 * sequence and return the pointer to the data and the size.
7288 initialize_array_data (MonoMethod *method, gboolean aot, unsigned char *ip, MonoClass *klass, guint32 len, int *out_size, guint32 *out_field_token)
7291 * newarr[System.Int32]
7293 * ldtoken field valuetype ...
7294 * call void class [mscorlib]System.Runtime.CompilerServices.RuntimeHelpers::InitializeArray(class [mscorlib]System.Array, valuetype [mscorlib]System.RuntimeFieldHandle)
7296 if (ip [0] == CEE_DUP && ip [1] == CEE_LDTOKEN && ip [5] == 0x4 && ip [6] == CEE_CALL) {
7298 guint32 token = read32 (ip + 7);
7299 guint32 field_token = read32 (ip + 2);
7300 guint32 field_index = field_token & 0xffffff;
7302 const char *data_ptr;
7304 MonoMethod *cmethod;
7305 MonoClass *dummy_class;
7306 MonoClassField *field = mono_field_from_token_checked (method->klass->image, field_token, &dummy_class, NULL, &error);
7310 mono_error_cleanup (&error); /* FIXME don't swallow the error */
7314 *out_field_token = field_token;
7316 cmethod = mini_get_method (NULL, method, token, NULL, NULL);
7319 if (strcmp (cmethod->name, "InitializeArray") || strcmp (cmethod->klass->name, "RuntimeHelpers") || cmethod->klass->image != mono_defaults.corlib)
7321 switch (mono_type_get_underlying_type (&klass->byval_arg)->type) {
7322 case MONO_TYPE_BOOLEAN:
7326 /* we need to swap on big endian, so punt. Should we handle R4 and R8 as well? */
7327 #if TARGET_BYTE_ORDER == G_LITTLE_ENDIAN
7328 case MONO_TYPE_CHAR:
7345 if (size > mono_type_size (field->type, &dummy_align))
7348 /*g_print ("optimized in %s: size: %d, numelems: %d\n", method->name, size, newarr->inst_newa_len->inst_c0);*/
7349 if (!image_is_dynamic (method->klass->image)) {
7350 field_index = read32 (ip + 2) & 0xffffff;
7351 mono_metadata_field_info (method->klass->image, field_index - 1, NULL, &rva, NULL);
7352 data_ptr = mono_image_rva_map (method->klass->image, rva);
7353 /*g_print ("field: 0x%08x, rva: %d, rva_ptr: %p\n", read32 (ip + 2), rva, data_ptr);*/
7354 /* for aot code we do the lookup on load */
7355 if (aot && data_ptr)
7356 return GUINT_TO_POINTER (rva);
7358 /*FIXME is it possible to AOT a SRE assembly not meant to be saved? */
7360 data_ptr = mono_field_get_data (field);
7368 set_exception_type_from_invalid_il (MonoCompile *cfg, MonoMethod *method, unsigned char *ip)
7370 char *method_fname = mono_method_full_name (method, TRUE);
7372 MonoMethodHeader *header = mono_method_get_header (method);
7374 if (header->code_size == 0)
7375 method_code = g_strdup ("method body is empty.");
7377 method_code = mono_disasm_code_one (NULL, method, ip, NULL);
7378 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7379 cfg->exception_message = g_strdup_printf ("Invalid IL code in %s: %s\n", method_fname, method_code);
7380 g_free (method_fname);
7381 g_free (method_code);
7382 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
7386 set_exception_object (MonoCompile *cfg, MonoException *exception)
7388 mono_cfg_set_exception (cfg, MONO_EXCEPTION_OBJECT_SUPPLIED);
7389 MONO_GC_REGISTER_ROOT_SINGLE (cfg->exception_ptr);
7390 cfg->exception_ptr = exception;
7394 emit_stloc_ir (MonoCompile *cfg, MonoInst **sp, MonoMethodHeader *header, int n)
7397 guint32 opcode = mono_type_to_regmove (cfg, header->locals [n]);
7398 if ((opcode == OP_MOVE) && cfg->cbb->last_ins == sp [0] &&
7399 ((sp [0]->opcode == OP_ICONST) || (sp [0]->opcode == OP_I8CONST))) {
7400 /* Optimize reg-reg moves away */
7402 * Can't optimize other opcodes, since sp[0] might point to
7403 * the last ins of a decomposed opcode.
7405 sp [0]->dreg = (cfg)->locals [n]->dreg;
7407 EMIT_NEW_LOCSTORE (cfg, ins, n, *sp);
7412 * ldloca inhibits many optimizations so try to get rid of it in common
7415 static inline unsigned char *
7416 emit_optimized_ldloca_ir (MonoCompile *cfg, unsigned char *ip, unsigned char *end, int size)
7426 local = read16 (ip + 2);
7430 if (ip + 6 < end && (ip [0] == CEE_PREFIX1) && (ip [1] == CEE_INITOBJ) && ip_in_bb (cfg, cfg->cbb, ip + 1)) {
7431 /* From the INITOBJ case */
7432 token = read32 (ip + 2);
7433 klass = mini_get_class (cfg->current_method, token, cfg->generic_context);
7434 CHECK_TYPELOAD (klass);
7435 type = mini_get_underlying_type (cfg, &klass->byval_arg);
7436 emit_init_local (cfg, local, type, TRUE);
7444 is_exception_class (MonoClass *class)
7447 if (class == mono_defaults.exception_class)
7449 class = class->parent;
7455 * is_jit_optimizer_disabled:
7457 * Determine whenever M's assembly has a DebuggableAttribute with the
7458 * IsJITOptimizerDisabled flag set.
7461 is_jit_optimizer_disabled (MonoMethod *m)
7463 MonoAssembly *ass = m->klass->image->assembly;
7464 MonoCustomAttrInfo* attrs;
7465 static MonoClass *klass;
7467 gboolean val = FALSE;
7470 if (ass->jit_optimizer_disabled_inited)
7471 return ass->jit_optimizer_disabled;
7474 klass = mono_class_from_name (mono_defaults.corlib, "System.Diagnostics", "DebuggableAttribute");
7477 ass->jit_optimizer_disabled = FALSE;
7478 mono_memory_barrier ();
7479 ass->jit_optimizer_disabled_inited = TRUE;
7483 attrs = mono_custom_attrs_from_assembly (ass);
7485 for (i = 0; i < attrs->num_attrs; ++i) {
7486 MonoCustomAttrEntry *attr = &attrs->attrs [i];
7488 MonoMethodSignature *sig;
7490 if (!attr->ctor || attr->ctor->klass != klass)
7492 /* Decode the attribute. See reflection.c */
7493 p = (const char*)attr->data;
7494 g_assert (read16 (p) == 0x0001);
7497 // FIXME: Support named parameters
7498 sig = mono_method_signature (attr->ctor);
7499 if (sig->param_count != 2 || sig->params [0]->type != MONO_TYPE_BOOLEAN || sig->params [1]->type != MONO_TYPE_BOOLEAN)
7501 /* Two boolean arguments */
7505 mono_custom_attrs_free (attrs);
7508 ass->jit_optimizer_disabled = val;
7509 mono_memory_barrier ();
7510 ass->jit_optimizer_disabled_inited = TRUE;
7516 is_supported_tail_call (MonoCompile *cfg, MonoMethod *method, MonoMethod *cmethod, MonoMethodSignature *fsig, int call_opcode)
7518 gboolean supported_tail_call;
7521 #ifdef MONO_ARCH_HAVE_OP_TAIL_CALL
7522 supported_tail_call = mono_arch_tail_call_supported (cfg, mono_method_signature (method), mono_method_signature (cmethod));
7524 supported_tail_call = mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)) && !MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->ret);
7527 for (i = 0; i < fsig->param_count; ++i) {
7528 if (fsig->params [i]->byref || fsig->params [i]->type == MONO_TYPE_PTR || fsig->params [i]->type == MONO_TYPE_FNPTR)
7529 /* These can point to the current method's stack */
7530 supported_tail_call = FALSE;
7532 if (fsig->hasthis && cmethod->klass->valuetype)
7533 /* this might point to the current method's stack */
7534 supported_tail_call = FALSE;
7535 if (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)
7536 supported_tail_call = FALSE;
7537 if (cfg->method->save_lmf)
7538 supported_tail_call = FALSE;
7539 if (cmethod->wrapper_type && cmethod->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD)
7540 supported_tail_call = FALSE;
7541 if (call_opcode != CEE_CALL)
7542 supported_tail_call = FALSE;
7544 /* Debugging support */
7546 if (supported_tail_call) {
7547 if (!mono_debug_count ())
7548 supported_tail_call = FALSE;
7552 return supported_tail_call;
7555 /* the JIT intercepts ldflda instructions to the tlsdata field in ThreadLocal<T> and redirects
7556 * it to the thread local value based on the tls_offset field. Every other kind of access to
7557 * the field causes an assert.
7560 is_magic_tls_access (MonoClassField *field)
7562 if (strcmp (field->name, "tlsdata"))
7564 if (strcmp (field->parent->name, "ThreadLocal`1"))
7566 return field->parent->image == mono_defaults.corlib;
7569 /* emits the code needed to access a managed tls var (like ThreadStatic)
7570 * with the value of the tls offset in offset_reg. thread_ins represents the MonoInternalThread
7571 * pointer for the current thread.
7572 * Returns the MonoInst* representing the address of the tls var.
7575 emit_managed_static_data_access (MonoCompile *cfg, MonoInst *thread_ins, int offset_reg)
7578 int static_data_reg, array_reg, dreg;
7579 int offset2_reg, idx_reg;
7580 // inlined access to the tls data
7581 // idx = (offset >> 24) - 1;
7582 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
7583 static_data_reg = alloc_ireg (cfg);
7584 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
7585 idx_reg = alloc_ireg (cfg);
7586 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
7587 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
7588 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
7589 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
7590 array_reg = alloc_ireg (cfg);
7591 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
7592 offset2_reg = alloc_ireg (cfg);
7593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
7594 dreg = alloc_ireg (cfg);
7595 EMIT_NEW_BIALU (cfg, addr, OP_PADD, dreg, array_reg, offset2_reg);
7600 * redirect access to the tlsdata field to the tls var given by the tls_offset field.
7601 * this address is cached per-method in cached_tls_addr.
7604 create_magic_tls_access (MonoCompile *cfg, MonoClassField *tls_field, MonoInst **cached_tls_addr, MonoInst *thread_local)
7606 MonoInst *load, *addr, *temp, *store, *thread_ins;
7607 MonoClassField *offset_field;
7609 if (*cached_tls_addr) {
7610 EMIT_NEW_TEMPLOAD (cfg, addr, (*cached_tls_addr)->inst_c0);
7613 thread_ins = mono_get_thread_intrinsic (cfg);
7614 offset_field = mono_class_get_field_from_name (tls_field->parent, "tls_offset");
7616 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, offset_field->type, thread_local->dreg, offset_field->offset);
7618 MONO_ADD_INS (cfg->cbb, thread_ins);
7620 MonoMethod *thread_method;
7621 thread_method = mono_class_get_method_from_name (mono_get_thread_class(), "CurrentInternalThread_internal", 0);
7622 thread_ins = mono_emit_method_call (cfg, thread_method, NULL, NULL);
7624 addr = emit_managed_static_data_access (cfg, thread_ins, load->dreg);
7625 addr->klass = mono_class_from_mono_type (tls_field->type);
7626 addr->type = STACK_MP;
7627 *cached_tls_addr = temp = mono_compile_create_var (cfg, type_from_stack_type (addr), OP_LOCAL);
7628 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, addr);
7630 EMIT_NEW_TEMPLOAD (cfg, addr, temp->inst_c0);
7637 * Handle calls made to ctors from NEWOBJ opcodes.
7639 * REF_BBLOCK will point to the current bblock after the call.
7642 handle_ctor_call (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, int context_used,
7643 MonoInst **sp, guint8 *ip, MonoBasicBlock **ref_bblock, int *inline_costs)
7645 MonoInst *vtable_arg = NULL, *callvirt_this_arg = NULL, *ins;
7646 MonoBasicBlock *bblock = *ref_bblock;
7648 if (cmethod->klass->valuetype && mono_class_generic_sharing_enabled (cmethod->klass) &&
7649 mono_method_is_generic_sharable (cmethod, TRUE)) {
7650 if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) {
7651 mono_class_vtable (cfg->domain, cmethod->klass);
7652 CHECK_TYPELOAD (cmethod->klass);
7654 vtable_arg = emit_get_rgctx_method (cfg, context_used,
7655 cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
7658 vtable_arg = emit_get_rgctx_klass (cfg, context_used,
7659 cmethod->klass, MONO_RGCTX_INFO_VTABLE);
7661 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
7663 CHECK_TYPELOAD (cmethod->klass);
7664 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
7669 /* Avoid virtual calls to ctors if possible */
7670 if (mono_class_is_marshalbyref (cmethod->klass))
7671 callvirt_this_arg = sp [0];
7673 if (cmethod && (cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_ctor (cfg, cmethod, fsig, sp))) {
7674 g_assert (MONO_TYPE_IS_VOID (fsig->ret));
7675 CHECK_CFG_EXCEPTION;
7676 } else if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used && !vtable_arg &&
7677 mono_method_check_inlining (cfg, cmethod) &&
7678 !mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE)) {
7681 if ((costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, FALSE, &bblock))) {
7682 cfg->real_offset += 5;
7684 *inline_costs += costs - 5;
7685 *ref_bblock = bblock;
7687 INLINE_FAILURE ("inline failure");
7688 // FIXME-VT: Clean this up
7689 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
7690 GSHAREDVT_FAILURE(*ip);
7691 mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, callvirt_this_arg, NULL, NULL);
7693 } else if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
7696 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE);
7697 mono_emit_calli (cfg, fsig, sp, addr, NULL, vtable_arg);
7698 } else if (context_used &&
7699 ((!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
7700 !mono_class_generic_sharing_enabled (cmethod->klass)) || cfg->gsharedvt)) {
7701 MonoInst *cmethod_addr;
7703 /* Generic calls made out of gsharedvt methods cannot be patched, so use an indirect call */
7705 cmethod_addr = emit_get_rgctx_method (cfg, context_used,
7706 cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
7708 mono_emit_calli (cfg, fsig, sp, cmethod_addr, NULL, vtable_arg);
7710 INLINE_FAILURE ("ctor call");
7711 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp,
7712 callvirt_this_arg, NULL, vtable_arg);
7719 * mono_method_to_ir:
7721 * Translate the .net IL into linear IR.
7724 mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
7725 MonoInst *return_var, MonoInst **inline_args,
7726 guint inline_offset, gboolean is_virtual_call)
7729 MonoInst *ins, **sp, **stack_start;
7730 MonoBasicBlock *bblock, *tblock = NULL, *init_localsbb = NULL;
7731 MonoSimpleBasicBlock *bb = NULL, *original_bb = NULL;
7732 MonoMethod *cmethod, *method_definition;
7733 MonoInst **arg_array;
7734 MonoMethodHeader *header;
7736 guint32 token, ins_flag;
7738 MonoClass *constrained_class = NULL;
7739 unsigned char *ip, *end, *target, *err_pos;
7740 MonoMethodSignature *sig;
7741 MonoGenericContext *generic_context = NULL;
7742 MonoGenericContainer *generic_container = NULL;
7743 MonoType **param_types;
7744 int i, n, start_new_bblock, dreg;
7745 int num_calls = 0, inline_costs = 0;
7746 int breakpoint_id = 0;
7748 MonoBoolean security, pinvoke;
7749 MonoSecurityManager* secman = NULL;
7750 MonoDeclSecurityActions actions;
7751 GSList *class_inits = NULL;
7752 gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
7754 gboolean init_locals, seq_points, skip_dead_blocks;
7755 gboolean sym_seq_points = FALSE;
7756 MonoInst *cached_tls_addr = NULL;
7757 MonoDebugMethodInfo *minfo;
7758 MonoBitSet *seq_point_locs = NULL;
7759 MonoBitSet *seq_point_set_locs = NULL;
7761 cfg->disable_inline = is_jit_optimizer_disabled (method);
7763 /* serialization and xdomain stuff may need access to private fields and methods */
7764 dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
7765 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE;
7766 dont_verify |= method->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH;
7767 dont_verify |= method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE; /* bug #77896 */
7768 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP;
7769 dont_verify |= method->wrapper_type == MONO_WRAPPER_COMINTEROP_INVOKE;
7771 dont_verify |= mono_security_smcs_hack_enabled ();
7773 /* still some type unsafety issues in marshal wrappers... (unknown is PtrToStructure) */
7774 dont_verify_stloc = method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE;
7775 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_UNKNOWN;
7776 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED;
7777 dont_verify_stloc |= method->wrapper_type == MONO_WRAPPER_STELEMREF;
7779 image = method->klass->image;
7780 header = mono_method_get_header (method);
7782 MonoLoaderError *error;
7784 if ((error = mono_loader_get_last_error ())) {
7785 mono_cfg_set_exception (cfg, error->exception_type);
7787 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
7788 cfg->exception_message = g_strdup_printf ("Missing or incorrect header for method %s", cfg->method->name);
7790 goto exception_exit;
7792 generic_container = mono_method_get_generic_container (method);
7793 sig = mono_method_signature (method);
7794 num_args = sig->hasthis + sig->param_count;
7795 ip = (unsigned char*)header->code;
7796 cfg->cil_start = ip;
7797 end = ip + header->code_size;
7798 cfg->stat_cil_code_size += header->code_size;
7800 seq_points = cfg->gen_seq_points && cfg->method == method;
7801 #ifdef PLATFORM_ANDROID
7802 seq_points &= cfg->method->wrapper_type == MONO_WRAPPER_NONE;
7805 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
7806 /* We could hit a seq point before attaching to the JIT (#8338) */
7810 if (cfg->gen_seq_points_debug_data && cfg->method == method) {
7811 minfo = mono_debug_lookup_method (method);
7813 int i, n_il_offsets;
7817 mono_debug_symfile_get_line_numbers_full (minfo, NULL, NULL, &n_il_offsets, &il_offsets, &line_numbers, NULL, NULL, NULL, NULL);
7818 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7819 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7820 sym_seq_points = TRUE;
7821 for (i = 0; i < n_il_offsets; ++i) {
7822 if (il_offsets [i] < header->code_size)
7823 mono_bitset_set_fast (seq_point_locs, il_offsets [i]);
7825 g_free (il_offsets);
7826 g_free (line_numbers);
7827 } else if (!method->wrapper_type && !method->dynamic && mono_debug_image_has_debug_info (method->klass->image)) {
7828 /* Methods without line number info like auto-generated property accessors */
7829 seq_point_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7830 seq_point_set_locs = mono_bitset_mem_new (mono_mempool_alloc0 (cfg->mempool, mono_bitset_alloc_size (header->code_size, 0)), header->code_size, 0);
7831 sym_seq_points = TRUE;
7836 * Methods without init_locals set could cause asserts in various passes
7837 * (#497220). To work around this, we emit dummy initialization opcodes
7838 * (OP_DUMMY_ICONST etc.) which generate no code. These are only supported
7839 * on some platforms.
7841 if ((cfg->opt & MONO_OPT_UNSAFE) && ARCH_HAVE_DUMMY_INIT)
7842 init_locals = header->init_locals;
7846 method_definition = method;
7847 while (method_definition->is_inflated) {
7848 MonoMethodInflated *imethod = (MonoMethodInflated *) method_definition;
7849 method_definition = imethod->declaring;
7852 /* SkipVerification is not allowed if core-clr is enabled */
7853 if (!dont_verify && mini_assembly_can_skip_verification (cfg->domain, method)) {
7855 dont_verify_stloc = TRUE;
7858 if (sig->is_inflated)
7859 generic_context = mono_method_get_context (method);
7860 else if (generic_container)
7861 generic_context = &generic_container->context;
7862 cfg->generic_context = generic_context;
7864 if (!cfg->generic_sharing_context)
7865 g_assert (!sig->has_type_parameters);
7867 if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
7868 g_assert (method->is_inflated);
7869 g_assert (mono_method_get_context (method)->method_inst);
7871 if (method->is_inflated && mono_method_get_context (method)->method_inst)
7872 g_assert (sig->generic_param_count);
7874 if (cfg->method == method) {
7875 cfg->real_offset = 0;
7877 cfg->real_offset = inline_offset;
7880 cfg->cil_offset_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoBasicBlock*) * header->code_size);
7881 cfg->cil_offset_to_bb_len = header->code_size;
7883 cfg->current_method = method;
7885 if (cfg->verbose_level > 2)
7886 printf ("method to IR %s\n", mono_method_full_name (method, TRUE));
7888 param_types = mono_mempool_alloc (cfg->mempool, sizeof (MonoType*) * num_args);
7890 param_types [0] = method->klass->valuetype?&method->klass->this_arg:&method->klass->byval_arg;
7891 for (n = 0; n < sig->param_count; ++n)
7892 param_types [n + sig->hasthis] = sig->params [n];
7893 cfg->arg_types = param_types;
7895 cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
7896 if (cfg->method == method) {
7898 if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
7899 cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
7902 NEW_BBLOCK (cfg, start_bblock);
7903 cfg->bb_entry = start_bblock;
7904 start_bblock->cil_code = NULL;
7905 start_bblock->cil_length = 0;
7906 #if defined(__native_client_codegen__)
7907 MONO_INST_NEW (cfg, ins, OP_NACL_GC_SAFE_POINT);
7908 ins->dreg = alloc_dreg (cfg, STACK_I4);
7909 MONO_ADD_INS (start_bblock, ins);
7913 NEW_BBLOCK (cfg, end_bblock);
7914 cfg->bb_exit = end_bblock;
7915 end_bblock->cil_code = NULL;
7916 end_bblock->cil_length = 0;
7917 end_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
7918 g_assert (cfg->num_bblocks == 2);
7920 arg_array = cfg->args;
7922 if (header->num_clauses) {
7923 cfg->spvars = g_hash_table_new (NULL, NULL);
7924 cfg->exvars = g_hash_table_new (NULL, NULL);
7926 /* handle exception clauses */
7927 for (i = 0; i < header->num_clauses; ++i) {
7928 MonoBasicBlock *try_bb;
7929 MonoExceptionClause *clause = &header->clauses [i];
7930 GET_BBLOCK (cfg, try_bb, ip + clause->try_offset);
7931 try_bb->real_offset = clause->try_offset;
7932 try_bb->try_start = TRUE;
7933 try_bb->region = ((i + 1) << 8) | clause->flags;
7934 GET_BBLOCK (cfg, tblock, ip + clause->handler_offset);
7935 tblock->real_offset = clause->handler_offset;
7936 tblock->flags |= BB_EXCEPTION_HANDLER;
7939 * Linking the try block with the EH block hinders inlining as we won't be able to
7940 * merge the bblocks from inlining and produce an artificial hole for no good reason.
7942 if (COMPILE_LLVM (cfg))
7943 link_bblock (cfg, try_bb, tblock);
7945 if (*(ip + clause->handler_offset) == CEE_POP)
7946 tblock->flags |= BB_EXCEPTION_DEAD_OBJ;
7948 if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY ||
7949 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER ||
7950 clause->flags == MONO_EXCEPTION_CLAUSE_FAULT) {
7951 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7952 MONO_ADD_INS (tblock, ins);
7954 if (seq_points && clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY) {
7955 /* finally clauses already have a seq point */
7956 NEW_SEQ_POINT (cfg, ins, clause->handler_offset, TRUE);
7957 MONO_ADD_INS (tblock, ins);
7960 /* todo: is a fault block unsafe to optimize? */
7961 if (clause->flags == MONO_EXCEPTION_CLAUSE_FAULT)
7962 tblock->flags |= BB_EXCEPTION_UNSAFE;
7966 /*printf ("clause try IL_%04x to IL_%04x handler %d at IL_%04x to IL_%04x\n", clause->try_offset, clause->try_offset + clause->try_len, clause->flags, clause->handler_offset, clause->handler_offset + clause->handler_len);
7968 printf ("%s", mono_disasm_code_one (NULL, method, p, &p));
7970 /* catch and filter blocks get the exception object on the stack */
7971 if (clause->flags == MONO_EXCEPTION_CLAUSE_NONE ||
7972 clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7973 MonoInst *dummy_use;
7975 /* mostly like handle_stack_args (), but just sets the input args */
7976 /* printf ("handling clause at IL_%04x\n", clause->handler_offset); */
7977 tblock->in_scount = 1;
7978 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7979 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7982 * Add a dummy use for the exvar so its liveness info will be
7986 EMIT_NEW_DUMMY_USE (cfg, dummy_use, tblock->in_stack [0]);
7988 if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) {
7989 GET_BBLOCK (cfg, tblock, ip + clause->data.filter_offset);
7990 tblock->flags |= BB_EXCEPTION_HANDLER;
7991 tblock->real_offset = clause->data.filter_offset;
7992 tblock->in_scount = 1;
7993 tblock->in_stack = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*));
7994 /* The filter block shares the exvar with the handler block */
7995 tblock->in_stack [0] = mono_create_exvar_for_offset (cfg, clause->handler_offset);
7996 MONO_INST_NEW (cfg, ins, OP_START_HANDLER);
7997 MONO_ADD_INS (tblock, ins);
8001 if (clause->flags != MONO_EXCEPTION_CLAUSE_FILTER &&
8002 clause->data.catch_class &&
8003 cfg->generic_sharing_context &&
8004 mono_class_check_context_used (clause->data.catch_class)) {
8006 * In shared generic code with catch
8007 * clauses containing type variables
8008 * the exception handling code has to
8009 * be able to get to the rgctx.
8010 * Therefore we have to make sure that
8011 * the vtable/mrgctx argument (for
8012 * static or generic methods) or the
8013 * "this" argument (for non-static
8014 * methods) are live.
8016 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8017 mini_method_get_context (method)->method_inst ||
8018 method->klass->valuetype) {
8019 mono_get_vtable_var (cfg);
8021 MonoInst *dummy_use;
8023 EMIT_NEW_DUMMY_USE (cfg, dummy_use, arg_array [0]);
8028 arg_array = (MonoInst **) alloca (sizeof (MonoInst *) * num_args);
8029 cfg->cbb = start_bblock;
8030 cfg->args = arg_array;
8031 mono_save_args (cfg, sig, inline_args);
8034 /* FIRST CODE BLOCK */
8035 NEW_BBLOCK (cfg, bblock);
8036 bblock->cil_code = ip;
8040 ADD_BBLOCK (cfg, bblock);
8042 if (cfg->method == method) {
8043 breakpoint_id = mono_debugger_method_has_breakpoint (method);
8044 if (breakpoint_id) {
8045 MONO_INST_NEW (cfg, ins, OP_BREAK);
8046 MONO_ADD_INS (bblock, ins);
8050 if (mono_security_cas_enabled ())
8051 secman = mono_security_manager_get_methods ();
8053 security = (secman && mono_security_method_has_declsec (method));
8054 /* at this point having security doesn't mean we have any code to generate */
8055 if (security && (cfg->method == method)) {
8056 /* Only Demand, NonCasDemand and DemandChoice requires code generation.
8057 * And we do not want to enter the next section (with allocation) if we
8058 * have nothing to generate */
8059 security = mono_declsec_get_demands (method, &actions);
8062 /* we must Demand SecurityPermission.Unmanaged before P/Invoking */
8063 pinvoke = (secman && (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE));
8065 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8066 if (wrapped && (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
8067 MonoCustomAttrInfo* custom = mono_custom_attrs_from_method (wrapped);
8069 /* unless the method or it's class has the [SuppressUnmanagedCodeSecurity] attribute */
8070 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8074 mono_custom_attrs_free (custom);
8077 custom = mono_custom_attrs_from_class (wrapped->klass);
8078 if (custom && mono_custom_attrs_has_attr (custom, secman->suppressunmanagedcodesecurity)) {
8082 mono_custom_attrs_free (custom);
8085 /* not a P/Invoke after all */
8090 /* we use a separate basic block for the initialization code */
8091 NEW_BBLOCK (cfg, init_localsbb);
8092 cfg->bb_init = init_localsbb;
8093 init_localsbb->real_offset = cfg->real_offset;
8094 start_bblock->next_bb = init_localsbb;
8095 init_localsbb->next_bb = bblock;
8096 link_bblock (cfg, start_bblock, init_localsbb);
8097 link_bblock (cfg, init_localsbb, bblock);
8099 cfg->cbb = init_localsbb;
8101 if (cfg->gsharedvt && cfg->method == method) {
8102 MonoGSharedVtMethodInfo *info;
8103 MonoInst *var, *locals_var;
8106 info = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoGSharedVtMethodInfo));
8107 info->method = cfg->method;
8108 info->count_entries = 16;
8109 info->entries = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoRuntimeGenericContextInfoTemplate) * info->count_entries);
8110 cfg->gsharedvt_info = info;
8112 var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8113 /* prevent it from being register allocated */
8114 //var->flags |= MONO_INST_VOLATILE;
8115 cfg->gsharedvt_info_var = var;
8117 ins = emit_get_rgctx_gsharedvt_method (cfg, mini_method_check_context_used (cfg, method), method, info);
8118 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, var->dreg, ins->dreg);
8120 /* Allocate locals */
8121 locals_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
8122 /* prevent it from being register allocated */
8123 //locals_var->flags |= MONO_INST_VOLATILE;
8124 cfg->gsharedvt_locals_var = locals_var;
8126 dreg = alloc_ireg (cfg);
8127 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADI4_MEMBASE, dreg, var->dreg, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, locals_size));
8129 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
8130 ins->dreg = locals_var->dreg;
8132 MONO_ADD_INS (cfg->cbb, ins);
8133 cfg->gsharedvt_locals_var_ins = ins;
8135 cfg->flags |= MONO_CFG_HAS_ALLOCA;
8138 ins->flags |= MONO_INST_INIT;
8142 /* at this point we know, if security is TRUE, that some code needs to be generated */
8143 if (security && (cfg->method == method)) {
8146 cfg->stat_cas_demand_generation++;
8148 if (actions.demand.blob) {
8149 /* Add code for SecurityAction.Demand */
8150 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demand);
8151 EMIT_NEW_ICONST (cfg, args [1], actions.demand.size);
8152 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8153 mono_emit_method_call (cfg, secman->demand, args, NULL);
8155 if (actions.noncasdemand.blob) {
8156 /* CLR 1.x uses a .noncasdemand (but 2.x doesn't) */
8157 /* For Mono we re-route non-CAS Demand to Demand (as the managed code must deal with it anyway) */
8158 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.noncasdemand);
8159 EMIT_NEW_ICONST (cfg, args [1], actions.noncasdemand.size);
8160 /* Calls static void SecurityManager.InternalDemand (byte* permissions, int size); */
8161 mono_emit_method_call (cfg, secman->demand, args, NULL);
8163 if (actions.demandchoice.blob) {
8164 /* New in 2.0, Demand must succeed for one of the permissions (i.e. not all) */
8165 EMIT_NEW_DECLSECCONST (cfg, args[0], image, actions.demandchoice);
8166 EMIT_NEW_ICONST (cfg, args [1], actions.demandchoice.size);
8167 /* Calls static void SecurityManager.InternalDemandChoice (byte* permissions, int size); */
8168 mono_emit_method_call (cfg, secman->demandchoice, args, NULL);
8172 /* we must Demand SecurityPermission.Unmanaged before p/invoking */
8174 mono_emit_method_call (cfg, secman->demandunmanaged, NULL, NULL);
8177 if (mono_security_core_clr_enabled ()) {
8178 /* check if this is native code, e.g. an icall or a p/invoke */
8179 if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
8180 MonoMethod *wrapped = mono_marshal_method_from_wrapper (method);
8182 gboolean pinvk = (wrapped->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
8183 gboolean icall = (wrapped->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL);
8185 /* if this ia a native call then it can only be JITted from platform code */
8186 if ((icall || pinvk) && method->klass && method->klass->image) {
8187 if (!mono_security_core_clr_is_platform_image (method->klass->image)) {
8188 MonoException *ex = icall ? mono_get_exception_security () :
8189 mono_get_exception_method_access ();
8190 emit_throw_exception (cfg, ex);
8197 CHECK_CFG_EXCEPTION;
8199 if (header->code_size == 0)
8202 if (get_basic_blocks (cfg, header, cfg->real_offset, ip, end, &err_pos)) {
8207 if (cfg->method == method)
8208 mono_debug_init_method (cfg, bblock, breakpoint_id);
8210 for (n = 0; n < header->num_locals; ++n) {
8211 if (header->locals [n]->type == MONO_TYPE_VOID && !header->locals [n]->byref)
8216 /* We force the vtable variable here for all shared methods
8217 for the possibility that they might show up in a stack
8218 trace where their exact instantiation is needed. */
8219 if (cfg->generic_sharing_context && method == cfg->method) {
8220 if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
8221 mini_method_get_context (method)->method_inst ||
8222 method->klass->valuetype) {
8223 mono_get_vtable_var (cfg);
8225 /* FIXME: Is there a better way to do this?
8226 We need the variable live for the duration
8227 of the whole method. */
8228 cfg->args [0]->flags |= MONO_INST_VOLATILE;
8232 /* add a check for this != NULL to inlined methods */
8233 if (is_virtual_call) {
8236 NEW_ARGLOAD (cfg, arg_ins, 0);
8237 MONO_ADD_INS (cfg->cbb, arg_ins);
8238 MONO_EMIT_NEW_CHECK_THIS (cfg, arg_ins->dreg);
8241 skip_dead_blocks = !dont_verify;
8242 if (skip_dead_blocks) {
8243 original_bb = bb = mono_basic_block_split (method, &cfg->error);
8248 /* we use a spare stack slot in SWITCH and NEWOBJ and others */
8249 stack_start = sp = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * (header->max_stack + 1));
8252 start_new_bblock = 0;
8255 if (cfg->method == method)
8256 cfg->real_offset = ip - header->code;
8258 cfg->real_offset = inline_offset;
8263 if (start_new_bblock) {
8264 bblock->cil_length = ip - bblock->cil_code;
8265 if (start_new_bblock == 2) {
8266 g_assert (ip == tblock->cil_code);
8268 GET_BBLOCK (cfg, tblock, ip);
8270 bblock->next_bb = tblock;
8273 start_new_bblock = 0;
8274 for (i = 0; i < bblock->in_scount; ++i) {
8275 if (cfg->verbose_level > 3)
8276 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8277 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8281 g_slist_free (class_inits);
8284 if ((tblock = cfg->cil_offset_to_bb [ip - cfg->cil_start]) && (tblock != bblock)) {
8285 link_bblock (cfg, bblock, tblock);
8286 if (sp != stack_start) {
8287 handle_stack_args (cfg, stack_start, sp - stack_start);
8289 CHECK_UNVERIFIABLE (cfg);
8291 bblock->next_bb = tblock;
8294 for (i = 0; i < bblock->in_scount; ++i) {
8295 if (cfg->verbose_level > 3)
8296 printf ("loading %d from temp %d\n", i, (int)bblock->in_stack [i]->inst_c0);
8297 EMIT_NEW_TEMPLOAD (cfg, ins, bblock->in_stack [i]->inst_c0);
8300 g_slist_free (class_inits);
8305 if (skip_dead_blocks) {
8306 int ip_offset = ip - header->code;
8308 if (ip_offset == bb->end)
8312 int op_size = mono_opcode_size (ip, end);
8313 g_assert (op_size > 0); /*The BB formation pass must catch all bad ops*/
8315 if (cfg->verbose_level > 3) printf ("SKIPPING DEAD OP at %x\n", ip_offset);
8317 if (ip_offset + op_size == bb->end) {
8318 MONO_INST_NEW (cfg, ins, OP_NOP);
8319 MONO_ADD_INS (bblock, ins);
8320 start_new_bblock = 1;
8328 * Sequence points are points where the debugger can place a breakpoint.
8329 * Currently, we generate these automatically at points where the IL
8332 if (seq_points && ((!sym_seq_points && (sp == stack_start)) || (sym_seq_points && mono_bitset_test_fast (seq_point_locs, ip - header->code)))) {
8334 * Make methods interruptable at the beginning, and at the targets of
8335 * backward branches.
8336 * Also, do this at the start of every bblock in methods with clauses too,
8337 * to be able to handle instructions with inprecise control flow like
8339 * Backward branches are handled at the end of method-to-ir ().
8341 gboolean intr_loc = ip == header->code || (!cfg->cbb->last_ins && cfg->header->num_clauses);
8343 /* Avoid sequence points on empty IL like .volatile */
8344 // FIXME: Enable this
8345 //if (!(cfg->cbb->last_ins && cfg->cbb->last_ins->opcode == OP_SEQ_POINT)) {
8346 NEW_SEQ_POINT (cfg, ins, ip - header->code, intr_loc);
8347 if (sp != stack_start)
8348 ins->flags |= MONO_INST_NONEMPTY_STACK;
8349 MONO_ADD_INS (cfg->cbb, ins);
8352 mono_bitset_set_fast (seq_point_set_locs, ip - header->code);
8355 bblock->real_offset = cfg->real_offset;
8357 if ((cfg->method == method) && cfg->coverage_info) {
8358 guint32 cil_offset = ip - header->code;
8359 cfg->coverage_info->data [cil_offset].cil_code = ip;
8361 /* TODO: Use an increment here */
8362 #if defined(TARGET_X86)
8363 MONO_INST_NEW (cfg, ins, OP_STORE_MEM_IMM);
8364 ins->inst_p0 = &(cfg->coverage_info->data [cil_offset].count);
8366 MONO_ADD_INS (cfg->cbb, ins);
8368 EMIT_NEW_PCONST (cfg, ins, &(cfg->coverage_info->data [cil_offset].count));
8369 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, ins->dreg, 0, 1);
8373 if (cfg->verbose_level > 3)
8374 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
8378 if (seq_points && !sym_seq_points && sp != stack_start) {
8380 * The C# compiler uses these nops to notify the JIT that it should
8381 * insert seq points.
8383 NEW_SEQ_POINT (cfg, ins, ip - header->code, FALSE);
8384 MONO_ADD_INS (cfg->cbb, ins);
8386 if (cfg->keep_cil_nops)
8387 MONO_INST_NEW (cfg, ins, OP_HARD_NOP);
8389 MONO_INST_NEW (cfg, ins, OP_NOP);
8391 MONO_ADD_INS (bblock, ins);
8394 if (should_insert_brekpoint (cfg->method)) {
8395 ins = mono_emit_jit_icall (cfg, mono_debugger_agent_user_break, NULL);
8397 MONO_INST_NEW (cfg, ins, OP_NOP);
8400 MONO_ADD_INS (bblock, ins);
8406 CHECK_STACK_OVF (1);
8407 n = (*ip)-CEE_LDARG_0;
8409 EMIT_NEW_ARGLOAD (cfg, ins, n);
8417 CHECK_STACK_OVF (1);
8418 n = (*ip)-CEE_LDLOC_0;
8420 EMIT_NEW_LOCLOAD (cfg, ins, n);
8429 n = (*ip)-CEE_STLOC_0;
8432 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
8434 emit_stloc_ir (cfg, sp, header, n);
8441 CHECK_STACK_OVF (1);
8444 EMIT_NEW_ARGLOAD (cfg, ins, n);
8450 CHECK_STACK_OVF (1);
8453 NEW_ARGLOADA (cfg, ins, n);
8454 MONO_ADD_INS (cfg->cbb, ins);
8464 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [ip [1]], *sp))
8466 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
8471 CHECK_STACK_OVF (1);
8474 EMIT_NEW_LOCLOAD (cfg, ins, n);
8478 case CEE_LDLOCA_S: {
8479 unsigned char *tmp_ip;
8481 CHECK_STACK_OVF (1);
8482 CHECK_LOCAL (ip [1]);
8484 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 1))) {
8490 EMIT_NEW_LOCLOADA (cfg, ins, ip [1]);
8499 CHECK_LOCAL (ip [1]);
8500 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [ip [1]], *sp))
8502 emit_stloc_ir (cfg, sp, header, ip [1]);
8507 CHECK_STACK_OVF (1);
8508 EMIT_NEW_PCONST (cfg, ins, NULL);
8509 ins->type = STACK_OBJ;
8514 CHECK_STACK_OVF (1);
8515 EMIT_NEW_ICONST (cfg, ins, -1);
8528 CHECK_STACK_OVF (1);
8529 EMIT_NEW_ICONST (cfg, ins, (*ip) - CEE_LDC_I4_0);
8535 CHECK_STACK_OVF (1);
8537 EMIT_NEW_ICONST (cfg, ins, *((signed char*)ip));
8543 CHECK_STACK_OVF (1);
8544 EMIT_NEW_ICONST (cfg, ins, (gint32)read32 (ip + 1));
8550 CHECK_STACK_OVF (1);
8551 MONO_INST_NEW (cfg, ins, OP_I8CONST);
8552 ins->type = STACK_I8;
8553 ins->dreg = alloc_dreg (cfg, STACK_I8);
8555 ins->inst_l = (gint64)read64 (ip);
8556 MONO_ADD_INS (bblock, ins);
8562 gboolean use_aotconst = FALSE;
8564 #ifdef TARGET_POWERPC
8565 /* FIXME: Clean this up */
8566 if (cfg->compile_aot)
8567 use_aotconst = TRUE;
8570 /* FIXME: we should really allocate this only late in the compilation process */
8571 f = mono_domain_alloc (cfg->domain, sizeof (float));
8573 CHECK_STACK_OVF (1);
8579 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R4, f);
8581 dreg = alloc_freg (cfg);
8582 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR4_MEMBASE, dreg, cons->dreg, 0);
8583 ins->type = cfg->r4_stack_type;
8585 MONO_INST_NEW (cfg, ins, OP_R4CONST);
8586 ins->type = cfg->r4_stack_type;
8587 ins->dreg = alloc_dreg (cfg, STACK_R8);
8589 MONO_ADD_INS (bblock, ins);
8599 gboolean use_aotconst = FALSE;
8601 #ifdef TARGET_POWERPC
8602 /* FIXME: Clean this up */
8603 if (cfg->compile_aot)
8604 use_aotconst = TRUE;
8607 /* FIXME: we should really allocate this only late in the compilation process */
8608 d = mono_domain_alloc (cfg->domain, sizeof (double));
8610 CHECK_STACK_OVF (1);
8616 EMIT_NEW_AOTCONST (cfg, cons, MONO_PATCH_INFO_R8, d);
8618 dreg = alloc_freg (cfg);
8619 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOADR8_MEMBASE, dreg, cons->dreg, 0);
8620 ins->type = STACK_R8;
8622 MONO_INST_NEW (cfg, ins, OP_R8CONST);
8623 ins->type = STACK_R8;
8624 ins->dreg = alloc_dreg (cfg, STACK_R8);
8626 MONO_ADD_INS (bblock, ins);
8635 MonoInst *temp, *store;
8637 CHECK_STACK_OVF (1);
8641 temp = mono_compile_create_var (cfg, type_from_stack_type (ins), OP_LOCAL);
8642 EMIT_NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
8644 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8647 EMIT_NEW_TEMPLOAD (cfg, ins, temp->inst_c0);
8660 if (sp [0]->type == STACK_R8)
8661 /* we need to pop the value from the x86 FP stack */
8662 MONO_EMIT_NEW_UNALU (cfg, OP_X86_FPOP, -1, sp [0]->dreg);
8668 INLINE_FAILURE ("jmp");
8669 GSHAREDVT_FAILURE (*ip);
8672 if (stack_start != sp)
8674 token = read32 (ip + 1);
8675 /* FIXME: check the signature matches */
8676 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8678 if (!cmethod || mono_loader_get_last_error ())
8681 if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
8682 GENERIC_SHARING_FAILURE (CEE_JMP);
8684 if (mono_security_cas_enabled ())
8685 CHECK_CFG_EXCEPTION;
8687 emit_instrumentation_call (cfg, mono_profiler_method_leave);
8689 if (ARCH_HAVE_OP_TAIL_CALL) {
8690 MonoMethodSignature *fsig = mono_method_signature (cmethod);
8693 /* Handle tail calls similarly to calls */
8694 n = fsig->param_count + fsig->hasthis;
8698 MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
8699 call->method = cmethod;
8700 call->tail_call = TRUE;
8701 call->signature = mono_method_signature (cmethod);
8702 call->args = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * n);
8703 call->inst.inst_p0 = cmethod;
8704 for (i = 0; i < n; ++i)
8705 EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
8707 mono_arch_emit_call (cfg, call);
8708 cfg->param_area = MAX(cfg->param_area, call->stack_usage);
8709 MONO_ADD_INS (bblock, (MonoInst*)call);
8711 for (i = 0; i < num_args; ++i)
8712 /* Prevent arguments from being optimized away */
8713 arg_array [i]->flags |= MONO_INST_VOLATILE;
8715 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
8716 ins = (MonoInst*)call;
8717 ins->inst_p0 = cmethod;
8718 MONO_ADD_INS (bblock, ins);
8722 start_new_bblock = 1;
8727 MonoMethodSignature *fsig;
8730 token = read32 (ip + 1);
8734 //GSHAREDVT_FAILURE (*ip);
8739 fsig = mini_get_signature (method, token, generic_context);
8741 if (method->dynamic && fsig->pinvoke) {
8745 * This is a call through a function pointer using a pinvoke
8746 * signature. Have to create a wrapper and call that instead.
8747 * FIXME: This is very slow, need to create a wrapper at JIT time
8748 * instead based on the signature.
8750 EMIT_NEW_IMAGECONST (cfg, args [0], method->klass->image);
8751 EMIT_NEW_PCONST (cfg, args [1], fsig);
8753 addr = mono_emit_jit_icall (cfg, mono_get_native_calli_wrapper, args);
8756 n = fsig->param_count + fsig->hasthis;
8760 //g_assert (!virtual || fsig->hasthis);
8764 inline_costs += 10 * num_calls++;
8767 * Making generic calls out of gsharedvt methods.
8768 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
8769 * patching gshared method addresses into a gsharedvt method.
8771 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig)) {
8773 * We pass the address to the gsharedvt trampoline in the rgctx reg
8775 MonoInst *callee = addr;
8777 if (method->wrapper_type != MONO_WRAPPER_DELEGATE_INVOKE)
8779 GSHAREDVT_FAILURE (*ip);
8781 addr = emit_get_rgctx_sig (cfg, context_used,
8782 fsig, MONO_RGCTX_INFO_SIG_GSHAREDVT_OUT_TRAMPOLINE_CALLI);
8783 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, callee);
8787 /* Prevent inlining of methods with indirect calls */
8788 INLINE_FAILURE ("indirect call");
8790 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST || addr->opcode == OP_GOT_ENTRY) {
8795 * Instead of emitting an indirect call, emit a direct call
8796 * with the contents of the aotconst as the patch info.
8798 if (addr->opcode == OP_PCONST || addr->opcode == OP_AOTCONST) {
8799 info_type = addr->inst_c1;
8800 info_data = addr->inst_p0;
8802 info_type = addr->inst_right->inst_c1;
8803 info_data = addr->inst_right->inst_left;
8806 if (info_type == MONO_PATCH_INFO_ICALL_ADDR || info_type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
8807 ins = (MonoInst*)mono_emit_abs_call (cfg, info_type, info_data, fsig, sp);
8812 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
8816 /* End of call, INS should contain the result of the call, if any */
8818 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
8820 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
8823 CHECK_CFG_EXCEPTION;
8827 constrained_class = NULL;
8831 case CEE_CALLVIRT: {
8832 MonoInst *addr = NULL;
8833 MonoMethodSignature *fsig = NULL;
8835 int virtual = *ip == CEE_CALLVIRT;
8836 gboolean pass_imt_from_rgctx = FALSE;
8837 MonoInst *imt_arg = NULL;
8838 MonoInst *keep_this_alive = NULL;
8839 gboolean pass_vtable = FALSE;
8840 gboolean pass_mrgctx = FALSE;
8841 MonoInst *vtable_arg = NULL;
8842 gboolean check_this = FALSE;
8843 gboolean supported_tail_call = FALSE;
8844 gboolean tail_call = FALSE;
8845 gboolean need_seq_point = FALSE;
8846 guint32 call_opcode = *ip;
8847 gboolean emit_widen = TRUE;
8848 gboolean push_res = TRUE;
8849 gboolean skip_ret = FALSE;
8850 gboolean delegate_invoke = FALSE;
8851 gboolean direct_icall = FALSE;
8852 MonoMethod *cil_method;
8855 token = read32 (ip + 1);
8859 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
8860 cil_method = cmethod;
8862 if (constrained_class) {
8863 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8864 if (!mini_is_gsharedvt_klass (cfg, constrained_class)) {
8865 g_assert (!cmethod->klass->valuetype);
8866 if (!mini_type_is_reference (cfg, &constrained_class->byval_arg)) {
8867 /* FIXME: gshared type constrained to a primitive type */
8868 GENERIC_SHARING_FAILURE (CEE_CALL);
8873 if (method->wrapper_type != MONO_WRAPPER_NONE) {
8874 if (cfg->verbose_level > 2)
8875 printf ("DM Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8876 if (!((constrained_class->byval_arg.type == MONO_TYPE_VAR ||
8877 constrained_class->byval_arg.type == MONO_TYPE_MVAR) &&
8878 cfg->generic_sharing_context)) {
8879 cmethod = mono_get_method_constrained_with_method (image, cil_method, constrained_class, generic_context, &cfg->error);
8883 if (cfg->verbose_level > 2)
8884 printf ("Constrained call to %s\n", mono_type_get_full_name (constrained_class));
8886 if ((constrained_class->byval_arg.type == MONO_TYPE_VAR || constrained_class->byval_arg.type == MONO_TYPE_MVAR) && cfg->generic_sharing_context) {
8888 * This is needed since get_method_constrained can't find
8889 * the method in klass representing a type var.
8890 * The type var is guaranteed to be a reference type in this
8893 if (!mini_is_gsharedvt_klass (cfg, constrained_class))
8894 g_assert (!cmethod->klass->valuetype);
8896 cmethod = mono_get_method_constrained_checked (image, token, constrained_class, generic_context, &cil_method, &cfg->error);
8902 if (!cmethod || mono_loader_get_last_error ())
8904 if (!dont_verify && !cfg->skip_visibility) {
8905 MonoMethod *target_method = cil_method;
8906 if (method->is_inflated) {
8907 target_method = mini_get_method_allow_open (method, token, NULL, &(mono_method_get_generic_container (method_definition)->context));
8909 if (!mono_method_can_access_method (method_definition, target_method) &&
8910 !mono_method_can_access_method (method, cil_method))
8911 METHOD_ACCESS_FAILURE (method, cil_method);
8914 if (mono_security_core_clr_enabled ())
8915 ensure_method_is_allowed_to_call_method (cfg, method, cil_method, bblock, ip);
8917 if (!virtual && (cmethod->flags & METHOD_ATTRIBUTE_ABSTRACT))
8918 /* MS.NET seems to silently convert this to a callvirt */
8923 * MS.NET accepts non virtual calls to virtual final methods of transparent proxy classes and
8924 * converts to a callvirt.
8926 * tests/bug-515884.il is an example of this behavior
8928 const int test_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL | METHOD_ATTRIBUTE_STATIC;
8929 const int expected_flags = METHOD_ATTRIBUTE_VIRTUAL | METHOD_ATTRIBUTE_FINAL;
8930 if (!virtual && mono_class_is_marshalbyref (cmethod->klass) && (cmethod->flags & test_flags) == expected_flags && cfg->method->wrapper_type == MONO_WRAPPER_NONE)
8934 if (!cmethod->klass->inited)
8935 if (!mono_class_init (cmethod->klass))
8936 TYPE_LOAD_ERROR (cmethod->klass);
8938 fsig = mono_method_signature (cmethod);
8941 if (cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL &&
8942 mini_class_is_system_array (cmethod->klass)) {
8943 array_rank = cmethod->klass->rank;
8944 } else if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) && icall_is_direct_callable (cfg, cmethod)) {
8945 direct_icall = TRUE;
8946 } else if (fsig->pinvoke) {
8947 MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod,
8948 check_for_pending_exc, cfg->compile_aot);
8949 fsig = mono_method_signature (wrapper);
8950 } else if (constrained_class) {
8952 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
8956 mono_save_token_info (cfg, image, token, cil_method);
8958 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip + 5 - header->code)))
8959 need_seq_point = TRUE;
8961 /* Don't support calls made using type arguments for now */
8963 if (cfg->gsharedvt) {
8964 if (mini_is_gsharedvt_signature (cfg, fsig))
8965 GSHAREDVT_FAILURE (*ip);
8969 if (mono_security_cas_enabled ()) {
8970 if (check_linkdemand (cfg, method, cmethod))
8971 INLINE_FAILURE ("linkdemand");
8972 CHECK_CFG_EXCEPTION;
8975 if (cmethod->string_ctor && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE)
8976 g_assert_not_reached ();
8978 n = fsig->param_count + fsig->hasthis;
8980 if (!cfg->generic_sharing_context && cmethod->klass->generic_container)
8983 if (!cfg->generic_sharing_context)
8984 g_assert (!mono_method_check_context_used (cmethod));
8988 //g_assert (!virtual || fsig->hasthis);
8992 if (constrained_class) {
8993 if (mini_is_gsharedvt_klass (cfg, constrained_class)) {
8994 if ((cmethod->klass != mono_defaults.object_class) && constrained_class->valuetype && cmethod->klass->valuetype) {
8995 /* The 'Own method' case below */
8996 } else if (cmethod->klass->image != mono_defaults.corlib && !(cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !cmethod->klass->valuetype) {
8997 /* 'The type parameter is instantiated as a reference type' case below. */
8999 ins = handle_constrained_gsharedvt_call (cfg, cmethod, fsig, sp, constrained_class, &emit_widen, &bblock);
9000 CHECK_CFG_EXCEPTION;
9007 * We have the `constrained.' prefix opcode.
9009 if (constrained_class->valuetype && (cmethod->klass == mono_defaults.object_class || cmethod->klass == mono_defaults.enum_class->parent || cmethod->klass == mono_defaults.enum_class)) {
9011 * The type parameter is instantiated as a valuetype,
9012 * but that type doesn't override the method we're
9013 * calling, so we need to box `this'.
9015 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9016 ins->klass = constrained_class;
9017 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9018 CHECK_CFG_EXCEPTION;
9019 } else if (!constrained_class->valuetype) {
9020 int dreg = alloc_ireg_ref (cfg);
9023 * The type parameter is instantiated as a reference
9024 * type. We have a managed pointer on the stack, so
9025 * we need to dereference it here.
9027 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, sp [0]->dreg, 0);
9028 ins->type = STACK_OBJ;
9031 if (cmethod->klass->valuetype) {
9034 /* Interface method */
9037 mono_class_setup_vtable (constrained_class);
9038 CHECK_TYPELOAD (constrained_class);
9039 ioffset = mono_class_interface_offset (constrained_class, cmethod->klass);
9041 TYPE_LOAD_ERROR (constrained_class);
9042 slot = mono_method_get_vtable_slot (cmethod);
9044 TYPE_LOAD_ERROR (cmethod->klass);
9045 cmethod = constrained_class->vtable [ioffset + slot];
9047 if (cmethod->klass == mono_defaults.enum_class) {
9048 /* Enum implements some interfaces, so treat this as the first case */
9049 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &constrained_class->byval_arg, sp [0]->dreg, 0);
9050 ins->klass = constrained_class;
9051 sp [0] = handle_box (cfg, ins, constrained_class, mono_class_check_context_used (constrained_class), &bblock);
9052 CHECK_CFG_EXCEPTION;
9057 constrained_class = NULL;
9060 if (check_call_signature (cfg, fsig, sp))
9063 #ifdef MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE
9064 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && !strcmp (cmethod->name, "Invoke"))
9065 delegate_invoke = TRUE;
9068 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_sharable_method (cfg, cmethod, fsig, sp))) {
9070 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9071 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9079 * If the callee is a shared method, then its static cctor
9080 * might not get called after the call was patched.
9082 if (cfg->generic_sharing_context && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
9083 emit_generic_class_init (cfg, cmethod->klass);
9084 CHECK_TYPELOAD (cmethod->klass);
9087 check_method_sharing (cfg, cmethod, &pass_vtable, &pass_mrgctx);
9089 if (cfg->generic_sharing_context) {
9090 MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
9092 context_used = mini_method_check_context_used (cfg, cmethod);
9094 if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
9095 /* Generic method interface
9096 calls are resolved via a
9097 helper function and don't
9099 if (!cmethod_context || !cmethod_context->method_inst)
9100 pass_imt_from_rgctx = TRUE;
9104 * If a shared method calls another
9105 * shared method then the caller must
9106 * have a generic sharing context
9107 * because the magic trampoline
9108 * requires it. FIXME: We shouldn't
9109 * have to force the vtable/mrgctx
9110 * variable here. Instead there
9111 * should be a flag in the cfg to
9112 * request a generic sharing context.
9115 ((method->flags & METHOD_ATTRIBUTE_STATIC) || method->klass->valuetype))
9116 mono_get_vtable_var (cfg);
9121 vtable_arg = emit_get_rgctx_klass (cfg, context_used, cmethod->klass, MONO_RGCTX_INFO_VTABLE);
9123 MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
9125 CHECK_TYPELOAD (cmethod->klass);
9126 EMIT_NEW_VTABLECONST (cfg, vtable_arg, vtable);
9131 g_assert (!vtable_arg);
9133 if (!cfg->compile_aot) {
9135 * emit_get_rgctx_method () calls mono_class_vtable () so check
9136 * for type load errors before.
9138 mono_class_setup_vtable (cmethod->klass);
9139 CHECK_TYPELOAD (cmethod->klass);
9142 vtable_arg = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD_RGCTX);
9144 /* !marshalbyref is needed to properly handle generic methods + remoting */
9145 if ((!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
9146 MONO_METHOD_IS_FINAL (cmethod)) &&
9147 !mono_class_is_marshalbyref (cmethod->klass)) {
9154 if (pass_imt_from_rgctx) {
9155 g_assert (!pass_vtable);
9157 imt_arg = emit_get_rgctx_method (cfg, context_used,
9158 cmethod, MONO_RGCTX_INFO_METHOD);
9162 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9164 /* Calling virtual generic methods */
9165 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
9166 !(MONO_METHOD_IS_FINAL (cmethod) &&
9167 cmethod->wrapper_type != MONO_WRAPPER_REMOTING_INVOKE_WITH_CHECK) &&
9168 fsig->generic_param_count &&
9169 !(cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))) {
9170 MonoInst *this_temp, *this_arg_temp, *store;
9171 MonoInst *iargs [4];
9172 gboolean use_imt = FALSE;
9174 g_assert (fsig->is_inflated);
9176 /* Prevent inlining of methods that contain indirect calls */
9177 INLINE_FAILURE ("virtual generic call");
9179 if (cfg->gsharedvt && mini_is_gsharedvt_signature (cfg, fsig))
9180 GSHAREDVT_FAILURE (*ip);
9182 #if MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK && defined(MONO_ARCH_GSHARED_SUPPORTED)
9183 if (cmethod->wrapper_type == MONO_WRAPPER_NONE && mono_use_imt)
9188 g_assert (!imt_arg);
9190 g_assert (cmethod->is_inflated);
9191 imt_arg = emit_get_rgctx_method (cfg, context_used,
9192 cmethod, MONO_RGCTX_INFO_METHOD);
9193 ins = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, sp [0], imt_arg, NULL);
9195 this_temp = mono_compile_create_var (cfg, type_from_stack_type (sp [0]), OP_LOCAL);
9196 NEW_TEMPSTORE (cfg, store, this_temp->inst_c0, sp [0]);
9197 MONO_ADD_INS (bblock, store);
9199 /* FIXME: This should be a managed pointer */
9200 this_arg_temp = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
9202 EMIT_NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
9203 iargs [1] = emit_get_rgctx_method (cfg, context_used,
9204 cmethod, MONO_RGCTX_INFO_METHOD);
9205 EMIT_NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
9206 addr = mono_emit_jit_icall (cfg,
9207 mono_helper_compile_generic_method, iargs);
9209 EMIT_NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
9211 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, NULL, NULL);
9218 * Implement a workaround for the inherent races involved in locking:
9224 * If a thread abort happens between the call to Monitor.Enter () and the start of the
9225 * try block, the Exit () won't be executed, see:
9226 * http://www.bluebytesoftware.com/blog/2007/01/30/MonitorEnterThreadAbortsAndOrphanedLocks.aspx
9227 * To work around this, we extend such try blocks to include the last x bytes
9228 * of the Monitor.Enter () call.
9230 if (cmethod->klass == mono_defaults.monitor_class && !strcmp (cmethod->name, "Enter") && mono_method_signature (cmethod)->param_count == 1) {
9231 MonoBasicBlock *tbb;
9233 GET_BBLOCK (cfg, tbb, ip + 5);
9235 * Only extend try blocks with a finally, to avoid catching exceptions thrown
9236 * from Monitor.Enter like ArgumentNullException.
9238 if (tbb->try_start && MONO_REGION_FLAGS(tbb->region) == MONO_EXCEPTION_CLAUSE_FINALLY) {
9239 /* Mark this bblock as needing to be extended */
9240 tbb->extend_try_block = TRUE;
9244 /* Conversion to a JIT intrinsic */
9245 if ((cfg->opt & MONO_OPT_INTRINS) && (ins = mini_emit_inst_for_method (cfg, cmethod, fsig, sp))) {
9247 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9248 type_to_eval_stack_type ((cfg), fsig->ret, ins);
9255 if ((cfg->opt & MONO_OPT_INLINE) &&
9256 (!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || MONO_METHOD_IS_FINAL (cmethod)) &&
9257 mono_method_check_inlining (cfg, cmethod)) {
9259 gboolean always = FALSE;
9261 if ((cmethod->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
9262 (cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
9263 /* Prevent inlining of methods that call wrappers */
9264 INLINE_FAILURE ("wrapper call");
9265 cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
9269 costs = inline_method (cfg, cmethod, fsig, sp, ip, cfg->real_offset, always, &bblock);
9271 cfg->real_offset += 5;
9273 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9274 /* *sp is already set by inline_method */
9279 inline_costs += costs;
9285 /* Tail recursion elimination */
9286 if ((cfg->opt & MONO_OPT_TAILC) && call_opcode == CEE_CALL && cmethod == method && ip [5] == CEE_RET && !vtable_arg) {
9287 gboolean has_vtargs = FALSE;
9290 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9291 INLINE_FAILURE ("tail call");
9293 /* keep it simple */
9294 for (i = fsig->param_count - 1; i >= 0; i--) {
9295 if (MONO_TYPE_ISSTRUCT (mono_method_signature (cmethod)->params [i]))
9300 for (i = 0; i < n; ++i)
9301 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9302 MONO_INST_NEW (cfg, ins, OP_BR);
9303 MONO_ADD_INS (bblock, ins);
9304 tblock = start_bblock->out_bb [0];
9305 link_bblock (cfg, bblock, tblock);
9306 ins->inst_target_bb = tblock;
9307 start_new_bblock = 1;
9309 /* skip the CEE_RET, too */
9310 if (ip_in_bb (cfg, bblock, ip + 5))
9317 inline_costs += 10 * num_calls++;
9320 * Making generic calls out of gsharedvt methods.
9321 * This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
9322 * patching gshared method addresses into a gsharedvt method.
9324 if (cfg->gsharedvt && (mini_is_gsharedvt_signature (cfg, fsig) || cmethod->is_inflated || cmethod->klass->generic_class) &&
9325 !(cmethod->klass->rank && cmethod->klass->byval_arg.type != MONO_TYPE_SZARRAY)) {
9326 MonoRgctxInfoType info_type;
9329 //if (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
9330 //GSHAREDVT_FAILURE (*ip);
9331 // disable for possible remoting calls
9332 if (fsig->hasthis && (mono_class_is_marshalbyref (method->klass) || method->klass == mono_defaults.object_class))
9333 GSHAREDVT_FAILURE (*ip);
9334 if (fsig->generic_param_count) {
9335 /* virtual generic call */
9336 g_assert (mono_use_imt);
9337 g_assert (!imt_arg);
9338 /* Same as the virtual generic case above */
9339 imt_arg = emit_get_rgctx_method (cfg, context_used,
9340 cmethod, MONO_RGCTX_INFO_METHOD);
9341 /* This is not needed, as the trampoline code will pass one, and it might be passed in the same reg as the imt arg */
9343 } else if ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) && !imt_arg) {
9344 /* This can happen when we call a fully instantiated iface method */
9345 imt_arg = emit_get_rgctx_method (cfg, context_used,
9346 cmethod, MONO_RGCTX_INFO_METHOD);
9351 if ((cmethod->klass->parent == mono_defaults.multicastdelegate_class) && (!strcmp (cmethod->name, "Invoke")))
9352 keep_this_alive = sp [0];
9354 if (virtual && (cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))
9355 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE_VIRT;
9357 info_type = MONO_RGCTX_INFO_METHOD_GSHAREDVT_OUT_TRAMPOLINE;
9358 addr = emit_get_rgctx_gsharedvt_call (cfg, context_used, fsig, cmethod, info_type);
9360 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9364 /* Generic sharing */
9367 * Use this if the callee is gsharedvt sharable too, since
9368 * at runtime we might find an instantiation so the call cannot
9369 * be patched (the 'no_patch' code path in mini-trampolines.c).
9371 if (context_used && !imt_arg && !array_rank && !delegate_invoke &&
9372 (!mono_method_is_generic_sharable_full (cmethod, TRUE, FALSE, FALSE) ||
9373 !mono_class_generic_sharing_enabled (cmethod->klass)) &&
9374 (!virtual || MONO_METHOD_IS_FINAL (cmethod) ||
9375 !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
9376 INLINE_FAILURE ("gshared");
9378 g_assert (cfg->generic_sharing_context && cmethod);
9382 * We are compiling a call to a
9383 * generic method from shared code,
9384 * which means that we have to look up
9385 * the method in the rgctx and do an
9389 MONO_EMIT_NEW_CHECK_THIS (cfg, sp [0]->dreg);
9391 addr = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
9392 ins = (MonoInst*)mono_emit_calli (cfg, fsig, sp, addr, imt_arg, vtable_arg);
9396 /* Direct calls to icalls */
9398 MonoMethod *wrapper;
9401 /* Inline the wrapper */
9402 wrapper = mono_marshal_get_native_wrapper (cmethod, TRUE, cfg->compile_aot);
9404 costs = inline_method (cfg, wrapper, fsig, sp, ip, cfg->real_offset, TRUE, &bblock);
9405 g_assert (costs > 0);
9406 cfg->real_offset += 5;
9408 if (!MONO_TYPE_IS_VOID (fsig->ret)) {
9409 /* *sp is already set by inline_method */
9414 inline_costs += costs;
9423 if (strcmp (cmethod->name, "Set") == 0) { /* array Set */
9424 MonoInst *val = sp [fsig->param_count];
9426 if (val->type == STACK_OBJ) {
9427 MonoInst *iargs [2];
9432 mono_emit_jit_icall (cfg, mono_helper_stelem_ref_check, iargs);
9435 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, TRUE);
9436 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, fsig->params [fsig->param_count - 1], addr->dreg, 0, val->dreg);
9437 if (cfg->gen_write_barriers && val->type == STACK_OBJ && !(val->opcode == OP_PCONST && val->inst_c0 == 0))
9438 emit_write_barrier (cfg, addr, val);
9439 if (cfg->gen_write_barriers && mini_is_gsharedvt_klass (cfg, cmethod->klass))
9440 GSHAREDVT_FAILURE (*ip);
9441 } else if (strcmp (cmethod->name, "Get") == 0) { /* array Get */
9442 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9444 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, fsig->ret, addr->dreg, 0);
9445 } else if (strcmp (cmethod->name, "Address") == 0) { /* array Address */
9446 if (!cmethod->klass->element_class->valuetype && !readonly)
9447 mini_emit_check_array_type (cfg, sp [0], cmethod->klass);
9448 CHECK_TYPELOAD (cmethod->klass);
9451 addr = mini_emit_ldelema_ins (cfg, cmethod, sp, ip, FALSE);
9454 g_assert_not_reached ();
9461 ins = mini_redirect_call (cfg, cmethod, fsig, sp, virtual ? sp [0] : NULL);
9465 /* Tail prefix / tail call optimization */
9467 /* FIXME: Enabling TAILC breaks some inlining/stack trace/etc tests */
9468 /* FIXME: runtime generic context pointer for jumps? */
9469 /* FIXME: handle this for generic sharing eventually */
9470 if ((ins_flag & MONO_INST_TAILCALL) &&
9471 !vtable_arg && !cfg->generic_sharing_context && is_supported_tail_call (cfg, method, cmethod, fsig, call_opcode))
9472 supported_tail_call = TRUE;
9474 if (supported_tail_call) {
9477 /* Prevent inlining of methods with tail calls (the call stack would be altered) */
9478 INLINE_FAILURE ("tail call");
9480 //printf ("HIT: %s -> %s\n", mono_method_full_name (cfg->method, TRUE), mono_method_full_name (cmethod, TRUE));
9482 if (ARCH_HAVE_OP_TAIL_CALL) {
9483 /* Handle tail calls similarly to normal calls */
9486 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9488 MONO_INST_NEW_CALL (cfg, call, OP_JMP);
9489 call->tail_call = TRUE;
9490 call->method = cmethod;
9491 call->signature = mono_method_signature (cmethod);
9494 * We implement tail calls by storing the actual arguments into the
9495 * argument variables, then emitting a CEE_JMP.
9497 for (i = 0; i < n; ++i) {
9498 /* Prevent argument from being register allocated */
9499 arg_array [i]->flags |= MONO_INST_VOLATILE;
9500 EMIT_NEW_ARGSTORE (cfg, ins, i, sp [i]);
9502 ins = (MonoInst*)call;
9503 ins->inst_p0 = cmethod;
9504 ins->inst_p1 = arg_array [0];
9505 MONO_ADD_INS (bblock, ins);
9506 link_bblock (cfg, bblock, end_bblock);
9507 start_new_bblock = 1;
9509 // FIXME: Eliminate unreachable epilogs
9512 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9513 * only reachable from this call.
9515 GET_BBLOCK (cfg, tblock, ip + 5);
9516 if (tblock == bblock || tblock->in_count == 0)
9525 * Synchronized wrappers.
9526 * Its hard to determine where to replace a method with its synchronized
9527 * wrapper without causing an infinite recursion. The current solution is
9528 * to add the synchronized wrapper in the trampolines, and to
9529 * change the called method to a dummy wrapper, and resolve that wrapper
9530 * to the real method in mono_jit_compile_method ().
9532 if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
9533 MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
9534 if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
9535 cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
9539 INLINE_FAILURE ("call");
9540 ins = mono_emit_method_call_full (cfg, cmethod, fsig, tail_call, sp, virtual ? sp [0] : NULL,
9541 imt_arg, vtable_arg);
9544 link_bblock (cfg, bblock, end_bblock);
9545 start_new_bblock = 1;
9547 // FIXME: Eliminate unreachable epilogs
9550 * OP_TAILCALL has no return value, so skip the CEE_RET if it is
9551 * only reachable from this call.
9553 GET_BBLOCK (cfg, tblock, ip + 5);
9554 if (tblock == bblock || tblock->in_count == 0)
9561 /* End of call, INS should contain the result of the call, if any */
9563 if (push_res && !MONO_TYPE_IS_VOID (fsig->ret)) {
9566 *sp++ = mono_emit_widen_call_res (cfg, ins, fsig);
9571 if (keep_this_alive) {
9572 MonoInst *dummy_use;
9574 /* See mono_emit_method_call_full () */
9575 EMIT_NEW_DUMMY_USE (cfg, dummy_use, keep_this_alive);
9578 CHECK_CFG_EXCEPTION;
9582 g_assert (*ip == CEE_RET);
9586 constrained_class = NULL;
9588 emit_seq_point (cfg, method, ip, FALSE, TRUE);
9592 if (cfg->method != method) {
9593 /* return from inlined method */
9595 * If in_count == 0, that means the ret is unreachable due to
9596 * being preceeded by a throw. In that case, inline_method () will
9597 * handle setting the return value
9598 * (test case: test_0_inline_throw ()).
9600 if (return_var && cfg->cbb->in_count) {
9601 MonoType *ret_type = mono_method_signature (method)->ret;
9607 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9610 //g_assert (returnvar != -1);
9611 EMIT_NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
9612 cfg->ret_var_set = TRUE;
9615 emit_instrumentation_call (cfg, mono_profiler_method_leave);
9617 if (cfg->lmf_var && cfg->cbb->in_count)
9621 MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
9623 if (seq_points && !sym_seq_points) {
9625 * Place a seq point here too even through the IL stack is not
9626 * empty, so a step over on
9629 * will work correctly.
9631 NEW_SEQ_POINT (cfg, ins, ip - header->code, TRUE);
9632 MONO_ADD_INS (cfg->cbb, ins);
9635 g_assert (!return_var);
9639 if ((method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD || method->wrapper_type == MONO_WRAPPER_NONE) && target_type_is_incompatible (cfg, ret_type, *sp))
9642 if (mini_type_to_stind (cfg, ret_type) == CEE_STOBJ) {
9645 if (!cfg->vret_addr) {
9648 EMIT_NEW_VARSTORE (cfg, ins, cfg->ret, ret_type, (*sp));
9650 EMIT_NEW_RETLOADA (cfg, ret_addr);
9652 EMIT_NEW_STORE_MEMBASE (cfg, ins, OP_STOREV_MEMBASE, ret_addr->dreg, 0, (*sp)->dreg);
9653 ins->klass = mono_class_from_mono_type (ret_type);
9656 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
9657 if (COMPILE_SOFT_FLOAT (cfg) && !ret_type->byref && ret_type->type == MONO_TYPE_R4) {
9658 MonoInst *iargs [1];
9662 conv = mono_emit_jit_icall (cfg, mono_fload_r4_arg, iargs);
9663 mono_arch_emit_setret (cfg, method, conv);
9665 mono_arch_emit_setret (cfg, method, *sp);
9668 mono_arch_emit_setret (cfg, method, *sp);
9673 if (sp != stack_start)
9675 MONO_INST_NEW (cfg, ins, OP_BR);
9677 ins->inst_target_bb = end_bblock;
9678 MONO_ADD_INS (bblock, ins);
9679 link_bblock (cfg, bblock, end_bblock);
9680 start_new_bblock = 1;
9684 MONO_INST_NEW (cfg, ins, OP_BR);
9686 target = ip + 1 + (signed char)(*ip);
9688 GET_BBLOCK (cfg, tblock, target);
9689 link_bblock (cfg, bblock, tblock);
9690 ins->inst_target_bb = tblock;
9691 if (sp != stack_start) {
9692 handle_stack_args (cfg, stack_start, sp - stack_start);
9694 CHECK_UNVERIFIABLE (cfg);
9696 MONO_ADD_INS (bblock, ins);
9697 start_new_bblock = 1;
9698 inline_costs += BRANCH_COST;
9712 MONO_INST_NEW (cfg, ins, *ip + BIG_BRANCH_OFFSET);
9714 target = ip + 1 + *(signed char*)ip;
9720 inline_costs += BRANCH_COST;
9724 MONO_INST_NEW (cfg, ins, OP_BR);
9727 target = ip + 4 + (gint32)read32(ip);
9729 GET_BBLOCK (cfg, tblock, target);
9730 link_bblock (cfg, bblock, tblock);
9731 ins->inst_target_bb = tblock;
9732 if (sp != stack_start) {
9733 handle_stack_args (cfg, stack_start, sp - stack_start);
9735 CHECK_UNVERIFIABLE (cfg);
9738 MONO_ADD_INS (bblock, ins);
9740 start_new_bblock = 1;
9741 inline_costs += BRANCH_COST;
9748 gboolean is_short = ((*ip) == CEE_BRFALSE_S) || ((*ip) == CEE_BRTRUE_S);
9749 gboolean is_true = ((*ip) == CEE_BRTRUE_S) || ((*ip) == CEE_BRTRUE);
9750 guint32 opsize = is_short ? 1 : 4;
9752 CHECK_OPSIZE (opsize);
9754 if (sp [-1]->type == STACK_VTYPE || sp [-1]->type == STACK_R8)
9757 target = ip + opsize + (is_short ? *(signed char*)ip : (gint32)read32(ip));
9762 GET_BBLOCK (cfg, tblock, target);
9763 link_bblock (cfg, bblock, tblock);
9764 GET_BBLOCK (cfg, tblock, ip);
9765 link_bblock (cfg, bblock, tblock);
9767 if (sp != stack_start) {
9768 handle_stack_args (cfg, stack_start, sp - stack_start);
9769 CHECK_UNVERIFIABLE (cfg);
9772 MONO_INST_NEW(cfg, cmp, OP_ICOMPARE_IMM);
9773 cmp->sreg1 = sp [0]->dreg;
9774 type_from_op (cfg, cmp, sp [0], NULL);
9777 #if SIZEOF_REGISTER == 4
9778 if (cmp->opcode == OP_LCOMPARE_IMM) {
9779 /* Convert it to OP_LCOMPARE */
9780 MONO_INST_NEW (cfg, ins, OP_I8CONST);
9781 ins->type = STACK_I8;
9782 ins->dreg = alloc_dreg (cfg, STACK_I8);
9784 MONO_ADD_INS (bblock, ins);
9785 cmp->opcode = OP_LCOMPARE;
9786 cmp->sreg2 = ins->dreg;
9789 MONO_ADD_INS (bblock, cmp);
9791 MONO_INST_NEW (cfg, ins, is_true ? CEE_BNE_UN : CEE_BEQ);
9792 type_from_op (cfg, ins, sp [0], NULL);
9793 MONO_ADD_INS (bblock, ins);
9794 ins->inst_many_bb = mono_mempool_alloc (cfg->mempool, sizeof(gpointer)*2);
9795 GET_BBLOCK (cfg, tblock, target);
9796 ins->inst_true_bb = tblock;
9797 GET_BBLOCK (cfg, tblock, ip);
9798 ins->inst_false_bb = tblock;
9799 start_new_bblock = 2;
9802 inline_costs += BRANCH_COST;
9817 MONO_INST_NEW (cfg, ins, *ip);
9819 target = ip + 4 + (gint32)read32(ip);
9825 inline_costs += BRANCH_COST;
9829 MonoBasicBlock **targets;
9830 MonoBasicBlock *default_bblock;
9831 MonoJumpInfoBBTable *table;
9832 int offset_reg = alloc_preg (cfg);
9833 int target_reg = alloc_preg (cfg);
9834 int table_reg = alloc_preg (cfg);
9835 int sum_reg = alloc_preg (cfg);
9836 gboolean use_op_switch;
9840 n = read32 (ip + 1);
9843 if ((src1->type != STACK_I4) && (src1->type != STACK_PTR))
9847 CHECK_OPSIZE (n * sizeof (guint32));
9848 target = ip + n * sizeof (guint32);
9850 GET_BBLOCK (cfg, default_bblock, target);
9851 default_bblock->flags |= BB_INDIRECT_JUMP_TARGET;
9853 targets = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * n);
9854 for (i = 0; i < n; ++i) {
9855 GET_BBLOCK (cfg, tblock, target + (gint32)read32(ip));
9856 targets [i] = tblock;
9857 targets [i]->flags |= BB_INDIRECT_JUMP_TARGET;
9861 if (sp != stack_start) {
9863 * Link the current bb with the targets as well, so handle_stack_args
9864 * will set their in_stack correctly.
9866 link_bblock (cfg, bblock, default_bblock);
9867 for (i = 0; i < n; ++i)
9868 link_bblock (cfg, bblock, targets [i]);
9870 handle_stack_args (cfg, stack_start, sp - stack_start);
9872 CHECK_UNVERIFIABLE (cfg);
9875 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ICOMPARE_IMM, -1, src1->dreg, n);
9876 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_IBGE_UN, default_bblock);
9879 for (i = 0; i < n; ++i)
9880 link_bblock (cfg, bblock, targets [i]);
9882 table = mono_mempool_alloc (cfg->mempool, sizeof (MonoJumpInfoBBTable));
9883 table->table = targets;
9884 table->table_size = n;
9886 use_op_switch = FALSE;
9888 /* ARM implements SWITCH statements differently */
9889 /* FIXME: Make it use the generic implementation */
9890 if (!cfg->compile_aot)
9891 use_op_switch = TRUE;
9894 if (COMPILE_LLVM (cfg))
9895 use_op_switch = TRUE;
9897 cfg->cbb->has_jump_table = 1;
9899 if (use_op_switch) {
9900 MONO_INST_NEW (cfg, ins, OP_SWITCH);
9901 ins->sreg1 = src1->dreg;
9902 ins->inst_p0 = table;
9903 ins->inst_many_bb = targets;
9904 ins->klass = GUINT_TO_POINTER (n);
9905 MONO_ADD_INS (cfg->cbb, ins);
9907 if (sizeof (gpointer) == 8)
9908 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 3);
9910 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, offset_reg, src1->dreg, 2);
9912 #if SIZEOF_REGISTER == 8
9913 /* The upper word might not be zero, and we add it to a 64 bit address later */
9914 MONO_EMIT_NEW_UNALU (cfg, OP_ZEXT_I4, offset_reg, offset_reg);
9917 if (cfg->compile_aot) {
9918 MONO_EMIT_NEW_AOTCONST (cfg, table_reg, table, MONO_PATCH_INFO_SWITCH);
9920 MONO_INST_NEW (cfg, ins, OP_JUMP_TABLE);
9921 ins->inst_c1 = MONO_PATCH_INFO_SWITCH;
9922 ins->inst_p0 = table;
9923 ins->dreg = table_reg;
9924 MONO_ADD_INS (cfg->cbb, ins);
9927 /* FIXME: Use load_memindex */
9928 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, sum_reg, table_reg, offset_reg);
9929 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, target_reg, sum_reg, 0);
9930 MONO_EMIT_NEW_UNALU (cfg, OP_BR_REG, -1, target_reg);
9932 start_new_bblock = 1;
9933 inline_costs += (BRANCH_COST * 2);
9953 dreg = alloc_freg (cfg);
9956 dreg = alloc_lreg (cfg);
9959 dreg = alloc_ireg_ref (cfg);
9962 dreg = alloc_preg (cfg);
9965 NEW_LOAD_MEMBASE (cfg, ins, ldind_to_load_membase (*ip), dreg, sp [0]->dreg, 0);
9966 ins->type = ldind_type [*ip - CEE_LDIND_I1];
9967 if (*ip == CEE_LDIND_R4)
9968 ins->type = cfg->r4_stack_type;
9969 ins->flags |= ins_flag;
9970 MONO_ADD_INS (bblock, ins);
9972 if (ins_flag & MONO_INST_VOLATILE) {
9973 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
9974 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
9990 if (ins_flag & MONO_INST_VOLATILE) {
9991 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
9992 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
9995 NEW_STORE_MEMBASE (cfg, ins, stind_to_store_membase (*ip), sp [0]->dreg, 0, sp [1]->dreg);
9996 ins->flags |= ins_flag;
9999 MONO_ADD_INS (bblock, ins);
10001 if (cfg->gen_write_barriers && *ip == CEE_STIND_REF && method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER && !((sp [1]->opcode == OP_PCONST) && (sp [1]->inst_p0 == 0)))
10002 emit_write_barrier (cfg, sp [0], sp [1]);
10011 MONO_INST_NEW (cfg, ins, (*ip));
10013 ins->sreg1 = sp [0]->dreg;
10014 ins->sreg2 = sp [1]->dreg;
10015 type_from_op (cfg, ins, sp [0], sp [1]);
10017 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10019 /* Use the immediate opcodes if possible */
10020 if ((sp [1]->opcode == OP_ICONST) && mono_arch_is_inst_imm (sp [1]->inst_c0)) {
10021 int imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10022 if (imm_opcode != -1) {
10023 ins->opcode = imm_opcode;
10024 ins->inst_p1 = (gpointer)(gssize)(sp [1]->inst_c0);
10027 NULLIFY_INS (sp [1]);
10031 MONO_ADD_INS ((cfg)->cbb, (ins));
10033 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10050 MONO_INST_NEW (cfg, ins, (*ip));
10052 ins->sreg1 = sp [0]->dreg;
10053 ins->sreg2 = sp [1]->dreg;
10054 type_from_op (cfg, ins, sp [0], sp [1]);
10056 add_widen_op (cfg, ins, &sp [0], &sp [1]);
10057 ins->dreg = alloc_dreg ((cfg), (ins)->type);
10059 /* FIXME: Pass opcode to is_inst_imm */
10061 /* Use the immediate opcodes if possible */
10062 if (((sp [1]->opcode == OP_ICONST) || (sp [1]->opcode == OP_I8CONST)) && mono_arch_is_inst_imm (sp [1]->opcode == OP_ICONST ? sp [1]->inst_c0 : sp [1]->inst_l)) {
10065 imm_opcode = mono_op_to_op_imm_noemul (ins->opcode);
10066 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
10067 /* Keep emulated opcodes which are optimized away later */
10068 if ((ins->opcode == OP_IREM_UN || ins->opcode == OP_IDIV_UN_IMM) && (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP)) && sp [1]->opcode == OP_ICONST && mono_is_power_of_two (sp [1]->inst_c0) >= 0) {
10069 imm_opcode = mono_op_to_op_imm (ins->opcode);
10072 if (imm_opcode != -1) {
10073 ins->opcode = imm_opcode;
10074 if (sp [1]->opcode == OP_I8CONST) {
10075 #if SIZEOF_REGISTER == 8
10076 ins->inst_imm = sp [1]->inst_l;
10078 ins->inst_ls_word = sp [1]->inst_ls_word;
10079 ins->inst_ms_word = sp [1]->inst_ms_word;
10083 ins->inst_imm = (gssize)(sp [1]->inst_c0);
10086 /* Might be followed by an instruction added by add_widen_op */
10087 if (sp [1]->next == NULL)
10088 NULLIFY_INS (sp [1]);
10091 MONO_ADD_INS ((cfg)->cbb, (ins));
10093 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
10106 case CEE_CONV_OVF_I8:
10107 case CEE_CONV_OVF_U8:
10108 case CEE_CONV_R_UN:
10111 /* Special case this earlier so we have long constants in the IR */
10112 if ((((*ip) == CEE_CONV_I8) || ((*ip) == CEE_CONV_U8)) && (sp [-1]->opcode == OP_ICONST)) {
10113 int data = sp [-1]->inst_c0;
10114 sp [-1]->opcode = OP_I8CONST;
10115 sp [-1]->type = STACK_I8;
10116 #if SIZEOF_REGISTER == 8
10117 if ((*ip) == CEE_CONV_U8)
10118 sp [-1]->inst_c0 = (guint32)data;
10120 sp [-1]->inst_c0 = data;
10122 sp [-1]->inst_ls_word = data;
10123 if ((*ip) == CEE_CONV_U8)
10124 sp [-1]->inst_ms_word = 0;
10126 sp [-1]->inst_ms_word = (data < 0) ? -1 : 0;
10128 sp [-1]->dreg = alloc_dreg (cfg, STACK_I8);
10135 case CEE_CONV_OVF_I4:
10136 case CEE_CONV_OVF_I1:
10137 case CEE_CONV_OVF_I2:
10138 case CEE_CONV_OVF_I:
10139 case CEE_CONV_OVF_U:
10142 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10143 ADD_UNOP (CEE_CONV_OVF_I8);
10150 case CEE_CONV_OVF_U1:
10151 case CEE_CONV_OVF_U2:
10152 case CEE_CONV_OVF_U4:
10155 if (sp [-1]->type == STACK_R8 || sp [-1]->type == STACK_R4) {
10156 ADD_UNOP (CEE_CONV_OVF_U8);
10163 case CEE_CONV_OVF_I1_UN:
10164 case CEE_CONV_OVF_I2_UN:
10165 case CEE_CONV_OVF_I4_UN:
10166 case CEE_CONV_OVF_I8_UN:
10167 case CEE_CONV_OVF_U1_UN:
10168 case CEE_CONV_OVF_U2_UN:
10169 case CEE_CONV_OVF_U4_UN:
10170 case CEE_CONV_OVF_U8_UN:
10171 case CEE_CONV_OVF_I_UN:
10172 case CEE_CONV_OVF_U_UN:
10179 CHECK_CFG_EXCEPTION;
10183 case CEE_ADD_OVF_UN:
10185 case CEE_MUL_OVF_UN:
10187 case CEE_SUB_OVF_UN:
10193 GSHAREDVT_FAILURE (*ip);
10196 token = read32 (ip + 1);
10197 klass = mini_get_class (method, token, generic_context);
10198 CHECK_TYPELOAD (klass);
10200 if (generic_class_is_reference_type (cfg, klass)) {
10201 MonoInst *store, *load;
10202 int dreg = alloc_ireg_ref (cfg);
10204 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, dreg, sp [1]->dreg, 0);
10205 load->flags |= ins_flag;
10206 MONO_ADD_INS (cfg->cbb, load);
10208 NEW_STORE_MEMBASE (cfg, store, OP_STORE_MEMBASE_REG, sp [0]->dreg, 0, dreg);
10209 store->flags |= ins_flag;
10210 MONO_ADD_INS (cfg->cbb, store);
10212 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER)
10213 emit_write_barrier (cfg, sp [0], sp [1]);
10215 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10221 int loc_index = -1;
10227 token = read32 (ip + 1);
10228 klass = mini_get_class (method, token, generic_context);
10229 CHECK_TYPELOAD (klass);
10231 /* Optimize the common ldobj+stloc combination */
10234 loc_index = ip [6];
10241 loc_index = ip [5] - CEE_STLOC_0;
10248 if ((loc_index != -1) && ip_in_bb (cfg, bblock, ip + 5)) {
10249 CHECK_LOCAL (loc_index);
10251 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10252 ins->dreg = cfg->locals [loc_index]->dreg;
10253 ins->flags |= ins_flag;
10256 if (ins_flag & MONO_INST_VOLATILE) {
10257 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10258 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10264 /* Optimize the ldobj+stobj combination */
10265 /* The reference case ends up being a load+store anyway */
10266 /* Skip this if the operation is volatile. */
10267 if (((ip [5] == CEE_STOBJ) && ip_in_bb (cfg, bblock, ip + 5) && read32 (ip + 6) == token) && !generic_class_is_reference_type (cfg, klass) && !(ins_flag & MONO_INST_VOLATILE)) {
10272 mini_emit_stobj (cfg, sp [0], sp [1], klass, FALSE);
10279 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0);
10280 ins->flags |= ins_flag;
10283 if (ins_flag & MONO_INST_VOLATILE) {
10284 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
10285 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
10294 CHECK_STACK_OVF (1);
10296 n = read32 (ip + 1);
10298 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD) {
10299 EMIT_NEW_PCONST (cfg, ins, mono_method_get_wrapper_data (method, n));
10300 ins->type = STACK_OBJ;
10303 else if (method->wrapper_type != MONO_WRAPPER_NONE) {
10304 MonoInst *iargs [1];
10305 char *str = mono_method_get_wrapper_data (method, n);
10307 if (cfg->compile_aot)
10308 EMIT_NEW_LDSTRLITCONST (cfg, iargs [0], str);
10310 EMIT_NEW_PCONST (cfg, iargs [0], str);
10311 *sp = mono_emit_jit_icall (cfg, mono_string_new_wrapper, iargs);
10313 if (cfg->opt & MONO_OPT_SHARED) {
10314 MonoInst *iargs [3];
10316 if (cfg->compile_aot) {
10317 cfg->ldstr_list = g_list_prepend (cfg->ldstr_list, GINT_TO_POINTER (n));
10319 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
10320 EMIT_NEW_IMAGECONST (cfg, iargs [1], image);
10321 EMIT_NEW_ICONST (cfg, iargs [2], mono_metadata_token_index (n));
10322 *sp = mono_emit_jit_icall (cfg, mono_ldstr, iargs);
10323 mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10325 if (bblock->out_of_line) {
10326 MonoInst *iargs [2];
10328 if (image == mono_defaults.corlib) {
10330 * Avoid relocations in AOT and save some space by using a
10331 * version of helper_ldstr specialized to mscorlib.
10333 EMIT_NEW_ICONST (cfg, iargs [0], mono_metadata_token_index (n));
10334 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr_mscorlib, iargs);
10336 /* Avoid creating the string object */
10337 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
10338 EMIT_NEW_ICONST (cfg, iargs [1], mono_metadata_token_index (n));
10339 *sp = mono_emit_jit_icall (cfg, mono_helper_ldstr, iargs);
10343 if (cfg->compile_aot) {
10344 NEW_LDSTRCONST (cfg, ins, image, n);
10346 MONO_ADD_INS (bblock, ins);
10349 NEW_PCONST (cfg, ins, NULL);
10350 ins->type = STACK_OBJ;
10351 ins->inst_p0 = mono_ldstr (cfg->domain, image, mono_metadata_token_index (n));
10353 OUT_OF_MEMORY_FAILURE;
10356 MONO_ADD_INS (bblock, ins);
10365 MonoInst *iargs [2];
10366 MonoMethodSignature *fsig;
10369 MonoInst *vtable_arg = NULL;
10372 token = read32 (ip + 1);
10373 cmethod = mini_get_method (cfg, method, token, NULL, generic_context);
10374 if (!cmethod || mono_loader_get_last_error ())
10376 fsig = mono_method_get_signature_checked (cmethod, image, token, generic_context, &cfg->error);
10379 mono_save_token_info (cfg, image, token, cmethod);
10381 if (!mono_class_init (cmethod->klass))
10382 TYPE_LOAD_ERROR (cmethod->klass);
10384 context_used = mini_method_check_context_used (cfg, cmethod);
10386 if (mono_security_cas_enabled ()) {
10387 if (check_linkdemand (cfg, method, cmethod))
10388 INLINE_FAILURE ("linkdemand");
10389 CHECK_CFG_EXCEPTION;
10390 } else if (mono_security_core_clr_enabled ()) {
10391 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
10394 if (cfg->generic_sharing_context && cmethod && cmethod->klass != method->klass && cmethod->klass->generic_class && mono_method_is_generic_sharable (cmethod, TRUE) && mono_class_needs_cctor_run (cmethod->klass, method)) {
10395 emit_generic_class_init (cfg, cmethod->klass);
10396 CHECK_TYPELOAD (cmethod->klass);
10400 if (cfg->gsharedvt) {
10401 if (mini_is_gsharedvt_variable_signature (sig))
10402 GSHAREDVT_FAILURE (*ip);
10406 n = fsig->param_count;
10410 * Generate smaller code for the common newobj <exception> instruction in
10411 * argument checking code.
10413 if (bblock->out_of_line && cmethod->klass->image == mono_defaults.corlib &&
10414 is_exception_class (cmethod->klass) && n <= 2 &&
10415 ((n < 1) || (!fsig->params [0]->byref && fsig->params [0]->type == MONO_TYPE_STRING)) &&
10416 ((n < 2) || (!fsig->params [1]->byref && fsig->params [1]->type == MONO_TYPE_STRING))) {
10417 MonoInst *iargs [3];
10421 EMIT_NEW_ICONST (cfg, iargs [0], cmethod->klass->type_token);
10424 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_0, iargs);
10427 iargs [1] = sp [0];
10428 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_1, iargs);
10431 iargs [1] = sp [0];
10432 iargs [2] = sp [1];
10433 *sp ++ = mono_emit_jit_icall (cfg, mono_create_corlib_exception_2, iargs);
10436 g_assert_not_reached ();
10444 /* move the args to allow room for 'this' in the first position */
10450 /* check_call_signature () requires sp[0] to be set */
10451 this_ins.type = STACK_OBJ;
10452 sp [0] = &this_ins;
10453 if (check_call_signature (cfg, fsig, sp))
10458 if (mini_class_is_system_array (cmethod->klass)) {
10459 *sp = emit_get_rgctx_method (cfg, context_used,
10460 cmethod, MONO_RGCTX_INFO_METHOD);
10462 /* Avoid varargs in the common case */
10463 if (fsig->param_count == 1)
10464 alloc = mono_emit_jit_icall (cfg, mono_array_new_1, sp);
10465 else if (fsig->param_count == 2)
10466 alloc = mono_emit_jit_icall (cfg, mono_array_new_2, sp);
10467 else if (fsig->param_count == 3)
10468 alloc = mono_emit_jit_icall (cfg, mono_array_new_3, sp);
10469 else if (fsig->param_count == 4)
10470 alloc = mono_emit_jit_icall (cfg, mono_array_new_4, sp);
10472 alloc = handle_array_new (cfg, fsig->param_count, sp, ip);
10473 } else if (cmethod->string_ctor) {
10474 g_assert (!context_used);
10475 g_assert (!vtable_arg);
10476 /* we simply pass a null pointer */
10477 EMIT_NEW_PCONST (cfg, *sp, NULL);
10478 /* now call the string ctor */
10479 alloc = mono_emit_method_call_full (cfg, cmethod, fsig, FALSE, sp, NULL, NULL, NULL);
10481 if (cmethod->klass->valuetype) {
10482 iargs [0] = mono_compile_create_var (cfg, &cmethod->klass->byval_arg, OP_LOCAL);
10483 emit_init_rvar (cfg, iargs [0]->dreg, &cmethod->klass->byval_arg);
10484 EMIT_NEW_TEMPLOADA (cfg, *sp, iargs [0]->inst_c0);
10489 * The code generated by mini_emit_virtual_call () expects
10490 * iargs [0] to be a boxed instance, but luckily the vcall
10491 * will be transformed into a normal call there.
10493 } else if (context_used) {
10494 alloc = handle_alloc (cfg, cmethod->klass, FALSE, context_used);
10497 MonoVTable *vtable = NULL;
10499 if (!cfg->compile_aot)
10500 vtable = mono_class_vtable (cfg->domain, cmethod->klass);
10501 CHECK_TYPELOAD (cmethod->klass);
10504 * TypeInitializationExceptions thrown from the mono_runtime_class_init
10505 * call in mono_jit_runtime_invoke () can abort the finalizer thread.
10506 * As a workaround, we call class cctors before allocating objects.
10508 if (mini_field_access_needs_cctor_run (cfg, method, cmethod->klass, vtable) && !(g_slist_find (class_inits, cmethod->klass))) {
10509 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, cmethod->klass, helper_sig_class_init_trampoline, NULL);
10510 if (cfg->verbose_level > 2)
10511 printf ("class %s.%s needs init call for ctor\n", cmethod->klass->name_space, cmethod->klass->name);
10512 class_inits = g_slist_prepend (class_inits, cmethod->klass);
10515 alloc = handle_alloc (cfg, cmethod->klass, FALSE, 0);
10518 CHECK_CFG_EXCEPTION; /*for handle_alloc*/
10521 MONO_EMIT_NEW_UNALU (cfg, OP_NOT_NULL, -1, alloc->dreg);
10523 /* Now call the actual ctor */
10524 handle_ctor_call (cfg, cmethod, fsig, context_used, sp, ip, &bblock, &inline_costs);
10525 CHECK_CFG_EXCEPTION;
10528 if (alloc == NULL) {
10530 EMIT_NEW_TEMPLOAD (cfg, ins, iargs [0]->inst_c0);
10531 type_to_eval_stack_type (cfg, &ins->klass->byval_arg, ins);
10539 if (!(seq_point_locs && mono_bitset_test_fast (seq_point_locs, ip - header->code)))
10540 emit_seq_point (cfg, method, ip, FALSE, TRUE);
10543 case CEE_CASTCLASS:
10547 token = read32 (ip + 1);
10548 klass = mini_get_class (method, token, generic_context);
10549 CHECK_TYPELOAD (klass);
10550 if (sp [0]->type != STACK_OBJ)
10553 ins = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10554 CHECK_CFG_EXCEPTION;
10563 token = read32 (ip + 1);
10564 klass = mini_get_class (method, token, generic_context);
10565 CHECK_TYPELOAD (klass);
10566 if (sp [0]->type != STACK_OBJ)
10569 context_used = mini_class_check_context_used (cfg, klass);
10571 if (!context_used && mini_class_has_reference_variant_generic_argument (cfg, klass, context_used)) {
10572 MonoMethod *mono_isinst = mono_marshal_get_isinst_with_cache ();
10573 MonoInst *args [3];
10580 EMIT_NEW_CLASSCONST (cfg, args [1], klass);
10583 if (cfg->compile_aot) {
10584 idx = get_castclass_cache_idx (cfg);
10585 EMIT_NEW_AOTCONST (cfg, args [2], MONO_PATCH_INFO_CASTCLASS_CACHE, GINT_TO_POINTER (idx));
10587 EMIT_NEW_PCONST (cfg, args [2], mono_domain_alloc0 (cfg->domain, sizeof (gpointer)));
10590 *sp++ = mono_emit_method_call (cfg, mono_isinst, args, NULL);
10593 } else if (!context_used && (mono_class_is_marshalbyref (klass) || klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
10594 MonoMethod *mono_isinst;
10595 MonoInst *iargs [1];
10598 mono_isinst = mono_marshal_get_isinst (klass);
10599 iargs [0] = sp [0];
10601 costs = inline_method (cfg, mono_isinst, mono_method_signature (mono_isinst),
10602 iargs, ip, cfg->real_offset, TRUE, &bblock);
10603 CHECK_CFG_EXCEPTION;
10604 g_assert (costs > 0);
10607 cfg->real_offset += 5;
10611 inline_costs += costs;
10614 ins = handle_isinst (cfg, klass, *sp, context_used);
10615 CHECK_CFG_EXCEPTION;
10622 case CEE_UNBOX_ANY: {
10623 MonoInst *res, *addr;
10628 token = read32 (ip + 1);
10629 klass = mini_get_class (method, token, generic_context);
10630 CHECK_TYPELOAD (klass);
10632 mono_save_token_info (cfg, image, token, klass);
10634 context_used = mini_class_check_context_used (cfg, klass);
10636 if (mini_is_gsharedvt_klass (cfg, klass)) {
10637 res = handle_unbox_gsharedvt (cfg, klass, *sp, &bblock);
10639 } else if (generic_class_is_reference_type (cfg, klass)) {
10640 res = handle_castclass (cfg, klass, *sp, ip, &bblock, &inline_costs);
10641 CHECK_CFG_EXCEPTION;
10642 } else if (mono_class_is_nullable (klass)) {
10643 res = handle_unbox_nullable (cfg, *sp, klass, context_used);
10645 addr = handle_unbox (cfg, klass, sp, context_used);
10647 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
10658 MonoClass *enum_class;
10659 MonoMethod *has_flag;
10665 token = read32 (ip + 1);
10666 klass = mini_get_class (method, token, generic_context);
10667 CHECK_TYPELOAD (klass);
10669 mono_save_token_info (cfg, image, token, klass);
10671 context_used = mini_class_check_context_used (cfg, klass);
10673 if (generic_class_is_reference_type (cfg, klass)) {
10679 if (klass == mono_defaults.void_class)
10681 if (target_type_is_incompatible (cfg, &klass->byval_arg, *sp))
10683 /* frequent check in generic code: box (struct), brtrue */
10688 * <push int/long ptr>
10691 * constrained. MyFlags
10692 * callvirt instace bool class [mscorlib] System.Enum::HasFlag (class [mscorlib] System.Enum)
10694 * If we find this sequence and the operand types on box and constrained
10695 * are equal, we can emit a specialized instruction sequence instead of
10696 * the very slow HasFlag () call.
10698 if ((cfg->opt & MONO_OPT_INTRINS) &&
10699 /* Cheap checks first. */
10700 ip + 5 + 6 + 5 < end &&
10701 ip [5] == CEE_PREFIX1 &&
10702 ip [6] == CEE_CONSTRAINED_ &&
10703 ip [11] == CEE_CALLVIRT &&
10704 ip_in_bb (cfg, bblock, ip + 5 + 6 + 5) &&
10705 mono_class_is_enum (klass) &&
10706 (enum_class = mini_get_class (method, read32 (ip + 7), generic_context)) &&
10707 (has_flag = mini_get_method (cfg, method, read32 (ip + 12), NULL, generic_context)) &&
10708 has_flag->klass == mono_defaults.enum_class &&
10709 !strcmp (has_flag->name, "HasFlag") &&
10710 has_flag->signature->hasthis &&
10711 has_flag->signature->param_count == 1) {
10712 CHECK_TYPELOAD (enum_class);
10714 if (enum_class == klass) {
10715 MonoInst *enum_this, *enum_flag;
10720 enum_this = sp [0];
10721 enum_flag = sp [1];
10723 *sp++ = handle_enum_has_flag (cfg, klass, enum_this, enum_flag);
10728 // FIXME: LLVM can't handle the inconsistent bb linking
10729 if (!mono_class_is_nullable (klass) &&
10730 ip + 5 < end && ip_in_bb (cfg, bblock, ip + 5) &&
10731 (ip [5] == CEE_BRTRUE ||
10732 ip [5] == CEE_BRTRUE_S ||
10733 ip [5] == CEE_BRFALSE ||
10734 ip [5] == CEE_BRFALSE_S)) {
10735 gboolean is_true = ip [5] == CEE_BRTRUE || ip [5] == CEE_BRTRUE_S;
10737 MonoBasicBlock *true_bb, *false_bb;
10741 if (cfg->verbose_level > 3) {
10742 printf ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
10743 printf ("<box+brtrue opt>\n");
10748 case CEE_BRFALSE_S:
10751 target = ip + 1 + (signed char)(*ip);
10758 target = ip + 4 + (gint)(read32 (ip));
10762 g_assert_not_reached ();
10766 * We need to link both bblocks, since it is needed for handling stack
10767 * arguments correctly (See test_0_box_brtrue_opt_regress_81102).
10768 * Branching to only one of them would lead to inconsistencies, so
10769 * generate an ICONST+BRTRUE, the branch opts will get rid of them.
10771 GET_BBLOCK (cfg, true_bb, target);
10772 GET_BBLOCK (cfg, false_bb, ip);
10774 mono_link_bblock (cfg, cfg->cbb, true_bb);
10775 mono_link_bblock (cfg, cfg->cbb, false_bb);
10777 if (sp != stack_start) {
10778 handle_stack_args (cfg, stack_start, sp - stack_start);
10780 CHECK_UNVERIFIABLE (cfg);
10783 if (COMPILE_LLVM (cfg)) {
10784 dreg = alloc_ireg (cfg);
10785 MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
10786 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, dreg, is_true ? 0 : 1);
10788 MONO_EMIT_NEW_BRANCH_BLOCK2 (cfg, OP_IBEQ, true_bb, false_bb);
10790 /* The JIT can't eliminate the iconst+compare */
10791 MONO_INST_NEW (cfg, ins, OP_BR);
10792 ins->inst_target_bb = is_true ? true_bb : false_bb;
10793 MONO_ADD_INS (cfg->cbb, ins);
10796 start_new_bblock = 1;
10800 *sp++ = handle_box (cfg, val, klass, context_used, &bblock);
10802 CHECK_CFG_EXCEPTION;
10811 token = read32 (ip + 1);
10812 klass = mini_get_class (method, token, generic_context);
10813 CHECK_TYPELOAD (klass);
10815 mono_save_token_info (cfg, image, token, klass);
10817 context_used = mini_class_check_context_used (cfg, klass);
10819 if (mono_class_is_nullable (klass)) {
10822 val = handle_unbox_nullable (cfg, *sp, klass, context_used);
10823 EMIT_NEW_VARLOADA (cfg, ins, get_vreg_to_inst (cfg, val->dreg), &val->klass->byval_arg);
10827 ins = handle_unbox (cfg, klass, sp, context_used);
10840 MonoClassField *field;
10841 #ifndef DISABLE_REMOTING
10845 gboolean is_instance;
10847 gpointer addr = NULL;
10848 gboolean is_special_static;
10850 MonoInst *store_val = NULL;
10851 MonoInst *thread_ins;
10854 is_instance = (op == CEE_LDFLD || op == CEE_LDFLDA || op == CEE_STFLD);
10856 if (op == CEE_STFLD) {
10859 store_val = sp [1];
10864 if (sp [0]->type == STACK_I4 || sp [0]->type == STACK_I8 || sp [0]->type == STACK_R8)
10866 if (*ip != CEE_LDFLD && sp [0]->type == STACK_VTYPE)
10869 if (op == CEE_STSFLD) {
10872 store_val = sp [0];
10877 token = read32 (ip + 1);
10878 if (method->wrapper_type != MONO_WRAPPER_NONE) {
10879 field = mono_method_get_wrapper_data (method, token);
10880 klass = field->parent;
10883 field = mono_field_from_token_checked (image, token, &klass, generic_context, &cfg->error);
10886 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_field (method, field))
10887 FIELD_ACCESS_FAILURE (method, field);
10888 mono_class_init (klass);
10890 if (is_instance && *ip != CEE_LDFLDA && is_magic_tls_access (field))
10893 /* if the class is Critical then transparent code cannot access it's fields */
10894 if (!is_instance && mono_security_core_clr_enabled ())
10895 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10897 /* XXX this is technically required but, so far (SL2), no [SecurityCritical] types (not many exists) have
10898 any visible *instance* field (in fact there's a single case for a static field in Marshal) XXX
10899 if (mono_security_core_clr_enabled ())
10900 ensure_method_is_allowed_to_access_field (cfg, method, field, bblock, ip);
10904 * LDFLD etc. is usable on static fields as well, so convert those cases to
10907 if (is_instance && field->type->attrs & FIELD_ATTRIBUTE_STATIC) {
10919 g_assert_not_reached ();
10921 is_instance = FALSE;
10924 context_used = mini_class_check_context_used (cfg, klass);
10926 /* INSTANCE CASE */
10928 foffset = klass->valuetype? field->offset - sizeof (MonoObject): field->offset;
10929 if (op == CEE_STFLD) {
10930 if (target_type_is_incompatible (cfg, field->type, sp [1]))
10932 #ifndef DISABLE_REMOTING
10933 if ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class) {
10934 MonoMethod *stfld_wrapper = mono_marshal_get_stfld_wrapper (field->type);
10935 MonoInst *iargs [5];
10937 GSHAREDVT_FAILURE (op);
10939 iargs [0] = sp [0];
10940 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
10941 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
10942 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) :
10944 iargs [4] = sp [1];
10946 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
10947 costs = inline_method (cfg, stfld_wrapper, mono_method_signature (stfld_wrapper),
10948 iargs, ip, cfg->real_offset, TRUE, &bblock);
10949 CHECK_CFG_EXCEPTION;
10950 g_assert (costs > 0);
10952 cfg->real_offset += 5;
10954 inline_costs += costs;
10956 mono_emit_method_call (cfg, stfld_wrapper, iargs, NULL);
10963 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
10965 if (mini_is_gsharedvt_klass (cfg, klass)) {
10966 MonoInst *offset_ins;
10968 context_used = mini_class_check_context_used (cfg, klass);
10970 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
10971 dreg = alloc_ireg_mp (cfg);
10972 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
10973 /* The decomposition will call mini_emit_stobj () which will emit a wbarrier if needed */
10974 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, dreg, 0, sp [1]->dreg);
10976 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, field->type, sp [0]->dreg, foffset, sp [1]->dreg);
10978 if (sp [0]->opcode != OP_LDADDR)
10979 store->flags |= MONO_INST_FAULT;
10981 if (cfg->gen_write_barriers && mini_type_to_stind (cfg, field->type) == CEE_STIND_REF && !(sp [1]->opcode == OP_PCONST && sp [1]->inst_c0 == 0)) {
10982 /* insert call to write barrier */
10986 dreg = alloc_ireg_mp (cfg);
10987 EMIT_NEW_BIALU_IMM (cfg, ptr, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
10988 emit_write_barrier (cfg, ptr, sp [1]);
10991 store->flags |= ins_flag;
10998 #ifndef DISABLE_REMOTING
10999 if (is_instance && ((mono_class_is_marshalbyref (klass) && !MONO_CHECK_THIS (sp [0])) || mono_class_is_contextbound (klass) || klass == mono_defaults.marshalbyrefobject_class)) {
11000 MonoMethod *wrapper = (op == CEE_LDFLDA) ? mono_marshal_get_ldflda_wrapper (field->type) : mono_marshal_get_ldfld_wrapper (field->type);
11001 MonoInst *iargs [4];
11003 GSHAREDVT_FAILURE (op);
11005 iargs [0] = sp [0];
11006 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11007 EMIT_NEW_FIELDCONST (cfg, iargs [2], field);
11008 EMIT_NEW_ICONST (cfg, iargs [3], klass->valuetype ? field->offset - sizeof (MonoObject) : field->offset);
11009 if (cfg->opt & MONO_OPT_INLINE || cfg->compile_aot) {
11010 costs = inline_method (cfg, wrapper, mono_method_signature (wrapper),
11011 iargs, ip, cfg->real_offset, TRUE, &bblock);
11012 CHECK_CFG_EXCEPTION;
11013 g_assert (costs > 0);
11015 cfg->real_offset += 5;
11019 inline_costs += costs;
11021 ins = mono_emit_method_call (cfg, wrapper, iargs, NULL);
11027 if (sp [0]->type == STACK_VTYPE) {
11030 /* Have to compute the address of the variable */
11032 var = get_vreg_to_inst (cfg, sp [0]->dreg);
11034 var = mono_compile_create_var_for_vreg (cfg, &klass->byval_arg, OP_LOCAL, sp [0]->dreg);
11036 g_assert (var->klass == klass);
11038 EMIT_NEW_VARLOADA (cfg, ins, var, &var->klass->byval_arg);
11042 if (op == CEE_LDFLDA) {
11043 if (is_magic_tls_access (field)) {
11044 GSHAREDVT_FAILURE (*ip);
11046 *sp++ = create_magic_tls_access (cfg, field, &cached_tls_addr, ins);
11048 if (sp [0]->type == STACK_OBJ) {
11049 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
11050 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "NullReferenceException");
11053 dreg = alloc_ireg_mp (cfg);
11055 if (mini_is_gsharedvt_klass (cfg, klass)) {
11056 MonoInst *offset_ins;
11058 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11059 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11061 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, dreg, sp [0]->dreg, foffset);
11063 ins->klass = mono_class_from_mono_type (field->type);
11064 ins->type = STACK_MP;
11070 MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
11072 if (mini_is_gsharedvt_klass (cfg, klass)) {
11073 MonoInst *offset_ins;
11075 offset_ins = emit_get_gsharedvt_info (cfg, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11076 dreg = alloc_ireg_mp (cfg);
11077 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, sp [0]->dreg, offset_ins->dreg);
11078 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, dreg, 0);
11080 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, sp [0]->dreg, foffset);
11082 load->flags |= ins_flag;
11083 if (sp [0]->opcode != OP_LDADDR)
11084 load->flags |= MONO_INST_FAULT;
11098 * We can only support shared generic static
11099 * field access on architectures where the
11100 * trampoline code has been extended to handle
11101 * the generic class init.
11103 #ifndef MONO_ARCH_VTABLE_REG
11104 GENERIC_SHARING_FAILURE (op);
11107 context_used = mini_class_check_context_used (cfg, klass);
11109 ftype = mono_field_get_type (field);
11111 if (ftype->attrs & FIELD_ATTRIBUTE_LITERAL)
11114 /* The special_static_fields field is init'd in mono_class_vtable, so it needs
11115 * to be called here.
11117 if (!context_used && !(cfg->opt & MONO_OPT_SHARED)) {
11118 mono_class_vtable (cfg->domain, klass);
11119 CHECK_TYPELOAD (klass);
11121 mono_domain_lock (cfg->domain);
11122 if (cfg->domain->special_static_fields)
11123 addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
11124 mono_domain_unlock (cfg->domain);
11126 is_special_static = mono_class_field_is_special_static (field);
11128 if (is_special_static && ((gsize)addr & 0x80000000) == 0)
11129 thread_ins = mono_get_thread_intrinsic (cfg);
11133 /* Generate IR to compute the field address */
11134 if (is_special_static && ((gsize)addr & 0x80000000) == 0 && thread_ins && !(cfg->opt & MONO_OPT_SHARED) && !context_used) {
11136 * Fast access to TLS data
11137 * Inline version of get_thread_static_data () in
11141 int idx, static_data_reg, array_reg, dreg;
11143 GSHAREDVT_FAILURE (op);
11145 // offset &= 0x7fffffff;
11146 // idx = (offset >> 24) - 1;
11147 // return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
11148 MONO_ADD_INS (cfg->cbb, thread_ins);
11149 static_data_reg = alloc_ireg (cfg);
11150 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, static_data_reg, thread_ins->dreg, MONO_STRUCT_OFFSET (MonoInternalThread, static_data));
11152 if (cfg->compile_aot) {
11153 int offset_reg, offset2_reg, idx_reg;
11155 /* For TLS variables, this will return the TLS offset */
11156 EMIT_NEW_SFLDACONST (cfg, ins, field);
11157 offset_reg = ins->dreg;
11158 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset_reg, offset_reg, 0x7fffffff);
11159 idx_reg = alloc_ireg (cfg);
11160 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHR_IMM, idx_reg, offset_reg, 24);
11161 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISUB_IMM, idx_reg, idx_reg, 1);
11162 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ISHL_IMM, idx_reg, idx_reg, sizeof (gpointer) == 8 ? 3 : 2);
11163 MONO_EMIT_NEW_BIALU (cfg, OP_PADD, static_data_reg, static_data_reg, idx_reg);
11164 array_reg = alloc_ireg (cfg);
11165 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, 0);
11166 offset2_reg = alloc_ireg (cfg);
11167 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_IAND_IMM, offset2_reg, offset_reg, 0xffffff);
11168 dreg = alloc_ireg (cfg);
11169 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, array_reg, offset2_reg);
11171 offset = (gsize)addr & 0x7fffffff;
11172 idx = (offset >> 24) - 1;
11174 array_reg = alloc_ireg (cfg);
11175 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, array_reg, static_data_reg, idx * sizeof (gpointer));
11176 dreg = alloc_ireg (cfg);
11177 EMIT_NEW_BIALU_IMM (cfg, ins, OP_ADD_IMM, dreg, array_reg, (offset & 0xffffff));
11179 } else if ((cfg->opt & MONO_OPT_SHARED) ||
11180 (cfg->compile_aot && is_special_static) ||
11181 (context_used && is_special_static)) {
11182 MonoInst *iargs [2];
11184 g_assert (field->parent);
11185 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11186 if (context_used) {
11187 iargs [1] = emit_get_rgctx_field (cfg, context_used,
11188 field, MONO_RGCTX_INFO_CLASS_FIELD);
11190 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11192 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11193 } else if (context_used) {
11194 MonoInst *static_data;
11197 g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
11198 method->klass->name_space, method->klass->name, method->name,
11199 depth, field->offset);
11202 if (mono_class_needs_cctor_run (klass, method))
11203 emit_generic_class_init (cfg, klass);
11206 * The pointer we're computing here is
11208 * super_info.static_data + field->offset
11210 static_data = emit_get_rgctx_klass (cfg, context_used,
11211 klass, MONO_RGCTX_INFO_STATIC_DATA);
11213 if (mini_is_gsharedvt_klass (cfg, klass)) {
11214 MonoInst *offset_ins;
11216 offset_ins = emit_get_rgctx_field (cfg, context_used, field, MONO_RGCTX_INFO_FIELD_OFFSET);
11217 dreg = alloc_ireg_mp (cfg);
11218 EMIT_NEW_BIALU (cfg, ins, OP_PADD, dreg, static_data->dreg, offset_ins->dreg);
11219 } else if (field->offset == 0) {
11222 int addr_reg = mono_alloc_preg (cfg);
11223 EMIT_NEW_BIALU_IMM (cfg, ins, OP_PADD_IMM, addr_reg, static_data->dreg, field->offset);
11225 } else if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
11226 MonoInst *iargs [2];
11228 g_assert (field->parent);
11229 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11230 EMIT_NEW_FIELDCONST (cfg, iargs [1], field);
11231 ins = mono_emit_jit_icall (cfg, mono_class_static_field_address, iargs);
11233 MonoVTable *vtable = NULL;
11235 if (!cfg->compile_aot)
11236 vtable = mono_class_vtable (cfg->domain, klass);
11237 CHECK_TYPELOAD (klass);
11240 if (mini_field_access_needs_cctor_run (cfg, method, klass, vtable)) {
11241 if (!(g_slist_find (class_inits, klass))) {
11242 mono_emit_abs_call (cfg, MONO_PATCH_INFO_CLASS_INIT, klass, helper_sig_class_init_trampoline, NULL);
11243 if (cfg->verbose_level > 2)
11244 printf ("class %s.%s needs init call for %s\n", klass->name_space, klass->name, mono_field_get_name (field));
11245 class_inits = g_slist_prepend (class_inits, klass);
11248 if (cfg->run_cctors) {
11250 /* This makes so that inline cannot trigger */
11251 /* .cctors: too many apps depend on them */
11252 /* running with a specific order... */
11254 if (! vtable->initialized)
11255 INLINE_FAILURE ("class init");
11256 ex = mono_runtime_class_init_full (vtable, FALSE);
11258 set_exception_object (cfg, ex);
11259 goto exception_exit;
11263 if (cfg->compile_aot)
11264 EMIT_NEW_SFLDACONST (cfg, ins, field);
11267 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11269 EMIT_NEW_PCONST (cfg, ins, addr);
11272 MonoInst *iargs [1];
11273 EMIT_NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
11274 ins = mono_emit_jit_icall (cfg, mono_get_special_static_data, iargs);
11278 /* Generate IR to do the actual load/store operation */
11280 if ((op == CEE_STFLD || op == CEE_STSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11281 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11282 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11285 if (op == CEE_LDSFLDA) {
11286 ins->klass = mono_class_from_mono_type (ftype);
11287 ins->type = STACK_PTR;
11289 } else if (op == CEE_STSFLD) {
11292 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, store, ftype, ins->dreg, 0, store_val->dreg);
11293 store->flags |= ins_flag;
11295 gboolean is_const = FALSE;
11296 MonoVTable *vtable = NULL;
11297 gpointer addr = NULL;
11299 if (!context_used) {
11300 vtable = mono_class_vtable (cfg->domain, klass);
11301 CHECK_TYPELOAD (klass);
11303 if ((ftype->attrs & FIELD_ATTRIBUTE_INIT_ONLY) && (((addr = mono_aot_readonly_field_override (field)) != NULL) ||
11304 (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) && vtable->initialized))) {
11305 int ro_type = ftype->type;
11307 addr = (char*)mono_vtable_get_static_field_data (vtable) + field->offset;
11308 if (ro_type == MONO_TYPE_VALUETYPE && ftype->data.klass->enumtype) {
11309 ro_type = mono_class_enum_basetype (ftype->data.klass)->type;
11312 GSHAREDVT_FAILURE (op);
11314 /* printf ("RO-FIELD %s.%s:%s\n", klass->name_space, klass->name, mono_field_get_name (field));*/
11317 case MONO_TYPE_BOOLEAN:
11319 EMIT_NEW_ICONST (cfg, *sp, *((guint8 *)addr));
11323 EMIT_NEW_ICONST (cfg, *sp, *((gint8 *)addr));
11326 case MONO_TYPE_CHAR:
11328 EMIT_NEW_ICONST (cfg, *sp, *((guint16 *)addr));
11332 EMIT_NEW_ICONST (cfg, *sp, *((gint16 *)addr));
11337 EMIT_NEW_ICONST (cfg, *sp, *((gint32 *)addr));
11341 EMIT_NEW_ICONST (cfg, *sp, *((guint32 *)addr));
11346 case MONO_TYPE_PTR:
11347 case MONO_TYPE_FNPTR:
11348 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11349 type_to_eval_stack_type ((cfg), field->type, *sp);
11352 case MONO_TYPE_STRING:
11353 case MONO_TYPE_OBJECT:
11354 case MONO_TYPE_CLASS:
11355 case MONO_TYPE_SZARRAY:
11356 case MONO_TYPE_ARRAY:
11357 if (!mono_gc_is_moving ()) {
11358 EMIT_NEW_PCONST (cfg, *sp, *((gpointer *)addr));
11359 type_to_eval_stack_type ((cfg), field->type, *sp);
11367 EMIT_NEW_I8CONST (cfg, *sp, *((gint64 *)addr));
11372 case MONO_TYPE_VALUETYPE:
11382 CHECK_STACK_OVF (1);
11384 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, load, field->type, ins->dreg, 0);
11385 load->flags |= ins_flag;
11391 if ((op == CEE_LDFLD || op == CEE_LDSFLD) && (ins_flag & MONO_INST_VOLATILE)) {
11392 /* Volatile loads have acquire semantics, see 12.6.7 in Ecma 335 */
11393 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_ACQ);
11404 token = read32 (ip + 1);
11405 klass = mini_get_class (method, token, generic_context);
11406 CHECK_TYPELOAD (klass);
11407 if (ins_flag & MONO_INST_VOLATILE) {
11408 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
11409 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
11411 /* FIXME: should check item at sp [1] is compatible with the type of the store. */
11412 EMIT_NEW_STORE_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, sp [0]->dreg, 0, sp [1]->dreg);
11413 ins->flags |= ins_flag;
11414 if (cfg->gen_write_barriers && cfg->method->wrapper_type != MONO_WRAPPER_WRITE_BARRIER &&
11415 generic_class_is_reference_type (cfg, klass)) {
11416 /* insert call to write barrier */
11417 emit_write_barrier (cfg, sp [0], sp [1]);
11429 const char *data_ptr;
11431 guint32 field_token;
11437 token = read32 (ip + 1);
11439 klass = mini_get_class (method, token, generic_context);
11440 CHECK_TYPELOAD (klass);
11442 context_used = mini_class_check_context_used (cfg, klass);
11444 if (sp [0]->type == STACK_I8 || (SIZEOF_VOID_P == 8 && sp [0]->type == STACK_PTR)) {
11445 MONO_INST_NEW (cfg, ins, OP_LCONV_TO_OVF_U4);
11446 ins->sreg1 = sp [0]->dreg;
11447 ins->type = STACK_I4;
11448 ins->dreg = alloc_ireg (cfg);
11449 MONO_ADD_INS (cfg->cbb, ins);
11450 *sp = mono_decompose_opcode (cfg, ins, &bblock);
11453 if (context_used) {
11454 MonoInst *args [3];
11455 MonoClass *array_class = mono_array_class_get (klass, 1);
11456 MonoMethod *managed_alloc = mono_gc_get_managed_array_allocator (array_class);
11458 /* FIXME: Use OP_NEWARR and decompose later to help abcrem */
11461 args [0] = emit_get_rgctx_klass (cfg, context_used,
11462 array_class, MONO_RGCTX_INFO_VTABLE);
11467 ins = mono_emit_method_call (cfg, managed_alloc, args, NULL);
11469 ins = mono_emit_jit_icall (cfg, mono_array_new_specific, args);
11471 if (cfg->opt & MONO_OPT_SHARED) {
11472 /* Decompose now to avoid problems with references to the domainvar */
11473 MonoInst *iargs [3];
11475 EMIT_NEW_DOMAINCONST (cfg, iargs [0]);
11476 EMIT_NEW_CLASSCONST (cfg, iargs [1], klass);
11477 iargs [2] = sp [0];
11479 ins = mono_emit_jit_icall (cfg, mono_array_new, iargs);
11481 /* Decompose later since it is needed by abcrem */
11482 MonoClass *array_type = mono_array_class_get (klass, 1);
11483 mono_class_vtable (cfg->domain, array_type);
11484 CHECK_TYPELOAD (array_type);
11486 MONO_INST_NEW (cfg, ins, OP_NEWARR);
11487 ins->dreg = alloc_ireg_ref (cfg);
11488 ins->sreg1 = sp [0]->dreg;
11489 ins->inst_newa_class = klass;
11490 ins->type = STACK_OBJ;
11491 ins->klass = array_type;
11492 MONO_ADD_INS (cfg->cbb, ins);
11493 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11494 cfg->cbb->has_array_access = TRUE;
11496 /* Needed so mono_emit_load_get_addr () gets called */
11497 mono_get_got_var (cfg);
11507 * we inline/optimize the initialization sequence if possible.
11508 * we should also allocate the array as not cleared, since we spend as much time clearing to 0 as initializing
11509 * for small sizes open code the memcpy
11510 * ensure the rva field is big enough
11512 if ((cfg->opt & MONO_OPT_INTRINS) && ip + 6 < end && ip_in_bb (cfg, bblock, ip + 6) && (len_ins->opcode == OP_ICONST) && (data_ptr = initialize_array_data (method, cfg->compile_aot, ip, klass, len_ins->inst_c0, &data_size, &field_token))) {
11513 MonoMethod *memcpy_method = get_memcpy_method ();
11514 MonoInst *iargs [3];
11515 int add_reg = alloc_ireg_mp (cfg);
11517 EMIT_NEW_BIALU_IMM (cfg, iargs [0], OP_PADD_IMM, add_reg, ins->dreg, MONO_STRUCT_OFFSET (MonoArray, vector));
11518 if (cfg->compile_aot) {
11519 EMIT_NEW_AOTCONST_TOKEN (cfg, iargs [1], MONO_PATCH_INFO_RVA, method->klass->image, GPOINTER_TO_UINT(field_token), STACK_PTR, NULL);
11521 EMIT_NEW_PCONST (cfg, iargs [1], (char*)data_ptr);
11523 EMIT_NEW_ICONST (cfg, iargs [2], data_size);
11524 mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
11533 if (sp [0]->type != STACK_OBJ)
11536 MONO_INST_NEW (cfg, ins, OP_LDLEN);
11537 ins->dreg = alloc_preg (cfg);
11538 ins->sreg1 = sp [0]->dreg;
11539 ins->type = STACK_I4;
11540 /* This flag will be inherited by the decomposition */
11541 ins->flags |= MONO_INST_FAULT;
11542 MONO_ADD_INS (cfg->cbb, ins);
11543 cfg->flags |= MONO_CFG_HAS_ARRAY_ACCESS;
11544 cfg->cbb->has_array_access = TRUE;
11552 if (sp [0]->type != STACK_OBJ)
11555 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11557 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11558 CHECK_TYPELOAD (klass);
11559 /* we need to make sure that this array is exactly the type it needs
11560 * to be for correctness. the wrappers are lax with their usage
11561 * so we need to ignore them here
11563 if (!klass->valuetype && method->wrapper_type == MONO_WRAPPER_NONE && !readonly) {
11564 MonoClass *array_class = mono_array_class_get (klass, 1);
11565 mini_emit_check_array_type (cfg, sp [0], array_class);
11566 CHECK_TYPELOAD (array_class);
11570 ins = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11575 case CEE_LDELEM_I1:
11576 case CEE_LDELEM_U1:
11577 case CEE_LDELEM_I2:
11578 case CEE_LDELEM_U2:
11579 case CEE_LDELEM_I4:
11580 case CEE_LDELEM_U4:
11581 case CEE_LDELEM_I8:
11583 case CEE_LDELEM_R4:
11584 case CEE_LDELEM_R8:
11585 case CEE_LDELEM_REF: {
11591 if (*ip == CEE_LDELEM) {
11593 token = read32 (ip + 1);
11594 klass = mini_get_class (method, token, generic_context);
11595 CHECK_TYPELOAD (klass);
11596 mono_class_init (klass);
11599 klass = array_access_to_klass (*ip);
11601 if (sp [0]->type != STACK_OBJ)
11604 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11606 if (mini_is_gsharedvt_variable_klass (cfg, klass)) {
11607 // FIXME-VT: OP_ICONST optimization
11608 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11609 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11610 ins->opcode = OP_LOADV_MEMBASE;
11611 } else if (sp [1]->opcode == OP_ICONST) {
11612 int array_reg = sp [0]->dreg;
11613 int index_reg = sp [1]->dreg;
11614 int offset = (mono_class_array_element_size (klass) * sp [1]->inst_c0) + MONO_STRUCT_OFFSET (MonoArray, vector);
11616 MONO_EMIT_BOUNDS_CHECK (cfg, array_reg, MonoArray, max_length, index_reg);
11617 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, array_reg, offset);
11619 addr = mini_emit_ldelema_1_ins (cfg, klass, sp [0], sp [1], TRUE);
11620 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &klass->byval_arg, addr->dreg, 0);
11623 if (*ip == CEE_LDELEM)
11630 case CEE_STELEM_I1:
11631 case CEE_STELEM_I2:
11632 case CEE_STELEM_I4:
11633 case CEE_STELEM_I8:
11634 case CEE_STELEM_R4:
11635 case CEE_STELEM_R8:
11636 case CEE_STELEM_REF:
11641 cfg->flags |= MONO_CFG_HAS_LDELEMA;
11643 if (*ip == CEE_STELEM) {
11645 token = read32 (ip + 1);
11646 klass = mini_get_class (method, token, generic_context);
11647 CHECK_TYPELOAD (klass);
11648 mono_class_init (klass);
11651 klass = array_access_to_klass (*ip);
11653 if (sp [0]->type != STACK_OBJ)
11656 emit_array_store (cfg, klass, sp, TRUE);
11658 if (*ip == CEE_STELEM)
11665 case CEE_CKFINITE: {
11669 MONO_INST_NEW (cfg, ins, OP_CKFINITE);
11670 ins->sreg1 = sp [0]->dreg;
11671 ins->dreg = alloc_freg (cfg);
11672 ins->type = STACK_R8;
11673 MONO_ADD_INS (bblock, ins);
11675 *sp++ = mono_decompose_opcode (cfg, ins, &bblock);
11680 case CEE_REFANYVAL: {
11681 MonoInst *src_var, *src;
11683 int klass_reg = alloc_preg (cfg);
11684 int dreg = alloc_preg (cfg);
11686 GSHAREDVT_FAILURE (*ip);
11689 MONO_INST_NEW (cfg, ins, *ip);
11692 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11693 CHECK_TYPELOAD (klass);
11695 context_used = mini_class_check_context_used (cfg, klass);
11698 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
11700 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
11701 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
11702 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, klass_reg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass));
11704 if (context_used) {
11705 MonoInst *klass_ins;
11707 klass_ins = emit_get_rgctx_klass (cfg, context_used,
11708 klass, MONO_RGCTX_INFO_KLASS);
11711 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, klass_reg, klass_ins->dreg);
11712 MONO_EMIT_NEW_COND_EXC (cfg, NE_UN, "InvalidCastException");
11714 mini_emit_class_check (cfg, klass_reg, klass);
11716 EMIT_NEW_LOAD_MEMBASE (cfg, ins, OP_LOAD_MEMBASE, dreg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value));
11717 ins->type = STACK_MP;
11722 case CEE_MKREFANY: {
11723 MonoInst *loc, *addr;
11725 GSHAREDVT_FAILURE (*ip);
11728 MONO_INST_NEW (cfg, ins, *ip);
11731 klass = mini_get_class (method, read32 (ip + 1), generic_context);
11732 CHECK_TYPELOAD (klass);
11734 context_used = mini_class_check_context_used (cfg, klass);
11736 loc = mono_compile_create_var (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL);
11737 EMIT_NEW_TEMPLOADA (cfg, addr, loc->inst_c0);
11739 if (context_used) {
11740 MonoInst *const_ins;
11741 int type_reg = alloc_preg (cfg);
11743 const_ins = emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_KLASS);
11744 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_ins->dreg);
11745 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_ins->dreg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11746 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11747 } else if (cfg->compile_aot) {
11748 int const_reg = alloc_preg (cfg);
11749 int type_reg = alloc_preg (cfg);
11751 MONO_EMIT_NEW_CLASSCONST (cfg, const_reg, klass);
11752 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), const_reg);
11753 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, type_reg, const_reg, MONO_STRUCT_OFFSET (MonoClass, byval_arg));
11754 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), type_reg);
11756 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type), &klass->byval_arg);
11757 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STOREP_MEMBASE_IMM, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, klass), klass);
11759 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREP_MEMBASE_REG, addr->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, value), sp [0]->dreg);
11761 EMIT_NEW_TEMPLOAD (cfg, ins, loc->inst_c0);
11762 ins->type = STACK_VTYPE;
11763 ins->klass = mono_defaults.typed_reference_class;
11768 case CEE_LDTOKEN: {
11770 MonoClass *handle_class;
11772 CHECK_STACK_OVF (1);
11775 n = read32 (ip + 1);
11777 if (method->wrapper_type == MONO_WRAPPER_DYNAMIC_METHOD ||
11778 method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
11779 handle = mono_method_get_wrapper_data (method, n);
11780 handle_class = mono_method_get_wrapper_data (method, n + 1);
11781 if (handle_class == mono_defaults.typehandle_class)
11782 handle = &((MonoClass*)handle)->byval_arg;
11785 handle = mono_ldtoken_checked (image, n, &handle_class, generic_context, &cfg->error);
11790 mono_class_init (handle_class);
11791 if (cfg->generic_sharing_context) {
11792 if (mono_metadata_token_table (n) == MONO_TABLE_TYPEDEF ||
11793 mono_metadata_token_table (n) == MONO_TABLE_TYPEREF) {
11794 /* This case handles ldtoken
11795 of an open type, like for
11798 } else if (handle_class == mono_defaults.typehandle_class) {
11799 context_used = mini_class_check_context_used (cfg, mono_class_from_mono_type (handle));
11800 } else if (handle_class == mono_defaults.fieldhandle_class)
11801 context_used = mini_class_check_context_used (cfg, ((MonoClassField*)handle)->parent);
11802 else if (handle_class == mono_defaults.methodhandle_class)
11803 context_used = mini_method_check_context_used (cfg, handle);
11805 g_assert_not_reached ();
11808 if ((cfg->opt & MONO_OPT_SHARED) &&
11809 method->wrapper_type != MONO_WRAPPER_DYNAMIC_METHOD &&
11810 method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED) {
11811 MonoInst *addr, *vtvar, *iargs [3];
11812 int method_context_used;
11814 method_context_used = mini_method_check_context_used (cfg, method);
11816 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11818 EMIT_NEW_IMAGECONST (cfg, iargs [0], image);
11819 EMIT_NEW_ICONST (cfg, iargs [1], n);
11820 if (method_context_used) {
11821 iargs [2] = emit_get_rgctx_method (cfg, method_context_used,
11822 method, MONO_RGCTX_INFO_METHOD);
11823 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper_generic_shared, iargs);
11825 EMIT_NEW_PCONST (cfg, iargs [2], generic_context);
11826 ins = mono_emit_jit_icall (cfg, mono_ldtoken_wrapper, iargs);
11828 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11830 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11832 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11834 if ((ip + 5 < end) && ip_in_bb (cfg, bblock, ip + 5) &&
11835 ((ip [5] == CEE_CALL) || (ip [5] == CEE_CALLVIRT)) &&
11836 (cmethod = mini_get_method (cfg, method, read32 (ip + 6), NULL, generic_context)) &&
11837 (cmethod->klass == mono_defaults.systemtype_class) &&
11838 (strcmp (cmethod->name, "GetTypeFromHandle") == 0)) {
11839 MonoClass *tclass = mono_class_from_mono_type (handle);
11841 mono_class_init (tclass);
11842 if (context_used) {
11843 ins = emit_get_rgctx_klass (cfg, context_used,
11844 tclass, MONO_RGCTX_INFO_REFLECTION_TYPE);
11845 } else if (cfg->compile_aot) {
11846 if (method->wrapper_type) {
11847 mono_error_init (&error); //got to do it since there are multiple conditionals below
11848 if (mono_class_get_checked (tclass->image, tclass->type_token, &error) == tclass && !generic_context) {
11849 /* Special case for static synchronized wrappers */
11850 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, tclass->image, tclass->type_token, generic_context);
11852 mono_error_cleanup (&error); /* FIXME don't swallow the error */
11853 /* FIXME: n is not a normal token */
11855 EMIT_NEW_PCONST (cfg, ins, NULL);
11858 EMIT_NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n, generic_context);
11861 EMIT_NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
11863 ins->type = STACK_OBJ;
11864 ins->klass = cmethod->klass;
11867 MonoInst *addr, *vtvar;
11869 vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
11871 if (context_used) {
11872 if (handle_class == mono_defaults.typehandle_class) {
11873 ins = emit_get_rgctx_klass (cfg, context_used,
11874 mono_class_from_mono_type (handle),
11875 MONO_RGCTX_INFO_TYPE);
11876 } else if (handle_class == mono_defaults.methodhandle_class) {
11877 ins = emit_get_rgctx_method (cfg, context_used,
11878 handle, MONO_RGCTX_INFO_METHOD);
11879 } else if (handle_class == mono_defaults.fieldhandle_class) {
11880 ins = emit_get_rgctx_field (cfg, context_used,
11881 handle, MONO_RGCTX_INFO_CLASS_FIELD);
11883 g_assert_not_reached ();
11885 } else if (cfg->compile_aot) {
11886 EMIT_NEW_LDTOKENCONST (cfg, ins, image, n, generic_context);
11888 EMIT_NEW_PCONST (cfg, ins, handle);
11890 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
11891 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, addr->dreg, 0, ins->dreg);
11892 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
11902 MONO_INST_NEW (cfg, ins, OP_THROW);
11904 ins->sreg1 = sp [0]->dreg;
11906 bblock->out_of_line = TRUE;
11907 MONO_ADD_INS (bblock, ins);
11908 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
11909 MONO_ADD_INS (bblock, ins);
11912 link_bblock (cfg, bblock, end_bblock);
11913 start_new_bblock = 1;
11915 case CEE_ENDFINALLY:
11916 /* mono_save_seq_point_info () depends on this */
11917 if (sp != stack_start)
11918 emit_seq_point (cfg, method, ip, FALSE, FALSE);
11919 MONO_INST_NEW (cfg, ins, OP_ENDFINALLY);
11920 MONO_ADD_INS (bblock, ins);
11922 start_new_bblock = 1;
11925 * Control will leave the method so empty the stack, otherwise
11926 * the next basic block will start with a nonempty stack.
11928 while (sp != stack_start) {
11933 case CEE_LEAVE_S: {
11936 if (*ip == CEE_LEAVE) {
11938 target = ip + 5 + (gint32)read32(ip + 1);
11941 target = ip + 2 + (signed char)(ip [1]);
11944 /* empty the stack */
11945 while (sp != stack_start) {
11950 * If this leave statement is in a catch block, check for a
11951 * pending exception, and rethrow it if necessary.
11952 * We avoid doing this in runtime invoke wrappers, since those are called
11953 * by native code which excepts the wrapper to catch all exceptions.
11955 for (i = 0; i < header->num_clauses; ++i) {
11956 MonoExceptionClause *clause = &header->clauses [i];
11959 * Use <= in the final comparison to handle clauses with multiple
11960 * leave statements, like in bug #78024.
11961 * The ordering of the exception clauses guarantees that we find the
11962 * innermost clause.
11964 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && (clause->flags == MONO_EXCEPTION_CLAUSE_NONE) && (ip - header->code + ((*ip == CEE_LEAVE) ? 5 : 2)) <= (clause->handler_offset + clause->handler_len) && method->wrapper_type != MONO_WRAPPER_RUNTIME_INVOKE) {
11966 MonoBasicBlock *dont_throw;
11971 NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, clause->handler_offset)->inst_c0);
11974 exc_ins = mono_emit_jit_icall (cfg, mono_thread_get_undeniable_exception, NULL);
11976 NEW_BBLOCK (cfg, dont_throw);
11979 * Currently, we always rethrow the abort exception, despite the
11980 * fact that this is not correct. See thread6.cs for an example.
11981 * But propagating the abort exception is more important than
11982 * getting the sematics right.
11984 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, exc_ins->dreg, 0);
11985 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
11986 MONO_EMIT_NEW_UNALU (cfg, OP_THROW, -1, exc_ins->dreg);
11988 MONO_START_BB (cfg, dont_throw);
11993 if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
11995 MonoExceptionClause *clause;
11997 for (tmp = handlers; tmp; tmp = tmp->next) {
11998 clause = tmp->data;
11999 tblock = cfg->cil_offset_to_bb [clause->handler_offset];
12001 link_bblock (cfg, bblock, tblock);
12002 MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
12003 ins->inst_target_bb = tblock;
12004 ins->inst_eh_block = clause;
12005 MONO_ADD_INS (bblock, ins);
12006 bblock->has_call_handler = 1;
12007 if (COMPILE_LLVM (cfg)) {
12008 MonoBasicBlock *target_bb;
12011 * Link the finally bblock with the target, since it will
12012 * conceptually branch there.
12013 * FIXME: Have to link the bblock containing the endfinally.
12015 GET_BBLOCK (cfg, target_bb, target);
12016 link_bblock (cfg, tblock, target_bb);
12019 g_list_free (handlers);
12022 MONO_INST_NEW (cfg, ins, OP_BR);
12023 MONO_ADD_INS (bblock, ins);
12024 GET_BBLOCK (cfg, tblock, target);
12025 link_bblock (cfg, bblock, tblock);
12026 ins->inst_target_bb = tblock;
12027 start_new_bblock = 1;
12029 if (*ip == CEE_LEAVE)
12038 * Mono specific opcodes
12040 case MONO_CUSTOM_PREFIX: {
12042 g_assert (method->wrapper_type != MONO_WRAPPER_NONE);
12046 case CEE_MONO_ICALL: {
12048 MonoJitICallInfo *info;
12050 token = read32 (ip + 2);
12051 func = mono_method_get_wrapper_data (method, token);
12052 info = mono_find_jit_icall_by_addr (func);
12054 g_error ("Could not find icall address in wrapper %s", mono_method_full_name (method, 1));
12057 CHECK_STACK (info->sig->param_count);
12058 sp -= info->sig->param_count;
12060 ins = mono_emit_jit_icall (cfg, info->func, sp);
12061 if (!MONO_TYPE_IS_VOID (info->sig->ret))
12065 inline_costs += 10 * num_calls++;
12069 case CEE_MONO_LDPTR_CARD_TABLE: {
12071 gpointer card_mask;
12072 CHECK_STACK_OVF (1);
12074 if (cfg->compile_aot)
12075 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
12077 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_card_table (&shift_bits, &card_mask));
12081 inline_costs += 10 * num_calls++;
12084 case CEE_MONO_LDPTR_NURSERY_START: {
12087 CHECK_STACK_OVF (1);
12089 if (cfg->compile_aot)
12090 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_GC_NURSERY_START, NULL);
12092 EMIT_NEW_PCONST (cfg, ins, mono_gc_get_nursery (&shift_bits, &size));
12096 inline_costs += 10 * num_calls++;
12099 case CEE_MONO_LDPTR_INT_REQ_FLAG: {
12100 CHECK_STACK_OVF (1);
12102 if (cfg->compile_aot)
12103 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
12105 EMIT_NEW_PCONST (cfg, ins, mono_thread_interruption_request_flag ());
12109 inline_costs += 10 * num_calls++;
12112 case CEE_MONO_LDPTR: {
12115 CHECK_STACK_OVF (1);
12117 token = read32 (ip + 2);
12119 ptr = mono_method_get_wrapper_data (method, token);
12120 EMIT_NEW_PCONST (cfg, ins, ptr);
12123 inline_costs += 10 * num_calls++;
12124 /* Can't embed random pointers into AOT code */
12128 case CEE_MONO_JIT_ICALL_ADDR: {
12129 MonoJitICallInfo *callinfo;
12132 CHECK_STACK_OVF (1);
12134 token = read32 (ip + 2);
12136 ptr = mono_method_get_wrapper_data (method, token);
12137 callinfo = mono_find_jit_icall_by_addr (ptr);
12138 g_assert (callinfo);
12139 EMIT_NEW_JIT_ICALL_ADDRCONST (cfg, ins, (char*)callinfo->name);
12142 inline_costs += 10 * num_calls++;
12145 case CEE_MONO_ICALL_ADDR: {
12146 MonoMethod *cmethod;
12149 CHECK_STACK_OVF (1);
12151 token = read32 (ip + 2);
12153 cmethod = mono_method_get_wrapper_data (method, token);
12155 if (cfg->compile_aot) {
12156 EMIT_NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
12158 ptr = mono_lookup_internal_call (cmethod);
12160 EMIT_NEW_PCONST (cfg, ins, ptr);
12166 case CEE_MONO_VTADDR: {
12167 MonoInst *src_var, *src;
12173 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12174 EMIT_NEW_VARLOADA ((cfg), (src), src_var, src_var->inst_vtype);
12179 case CEE_MONO_NEWOBJ: {
12180 MonoInst *iargs [2];
12182 CHECK_STACK_OVF (1);
12184 token = read32 (ip + 2);
12185 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12186 mono_class_init (klass);
12187 NEW_DOMAINCONST (cfg, iargs [0]);
12188 MONO_ADD_INS (cfg->cbb, iargs [0]);
12189 NEW_CLASSCONST (cfg, iargs [1], klass);
12190 MONO_ADD_INS (cfg->cbb, iargs [1]);
12191 *sp++ = mono_emit_jit_icall (cfg, mono_object_new, iargs);
12193 inline_costs += 10 * num_calls++;
12196 case CEE_MONO_OBJADDR:
12199 MONO_INST_NEW (cfg, ins, OP_MOVE);
12200 ins->dreg = alloc_ireg_mp (cfg);
12201 ins->sreg1 = sp [0]->dreg;
12202 ins->type = STACK_MP;
12203 MONO_ADD_INS (cfg->cbb, ins);
12207 case CEE_MONO_LDNATIVEOBJ:
12209 * Similar to LDOBJ, but instead load the unmanaged
12210 * representation of the vtype to the stack.
12215 token = read32 (ip + 2);
12216 klass = mono_method_get_wrapper_data (method, token);
12217 g_assert (klass->valuetype);
12218 mono_class_init (klass);
12221 MonoInst *src, *dest, *temp;
12224 temp = mono_compile_create_var (cfg, &klass->byval_arg, OP_LOCAL);
12225 temp->backend.is_pinvoke = 1;
12226 EMIT_NEW_TEMPLOADA (cfg, dest, temp->inst_c0);
12227 mini_emit_stobj (cfg, dest, src, klass, TRUE);
12229 EMIT_NEW_TEMPLOAD (cfg, dest, temp->inst_c0);
12230 dest->type = STACK_VTYPE;
12231 dest->klass = klass;
12237 case CEE_MONO_RETOBJ: {
12239 * Same as RET, but return the native representation of a vtype
12242 g_assert (cfg->ret);
12243 g_assert (mono_method_signature (method)->pinvoke);
12248 token = read32 (ip + 2);
12249 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12251 if (!cfg->vret_addr) {
12252 g_assert (cfg->ret_var_is_local);
12254 EMIT_NEW_VARLOADA (cfg, ins, cfg->ret, cfg->ret->inst_vtype);
12256 EMIT_NEW_RETLOADA (cfg, ins);
12258 mini_emit_stobj (cfg, ins, sp [0], klass, TRUE);
12260 if (sp != stack_start)
12263 MONO_INST_NEW (cfg, ins, OP_BR);
12264 ins->inst_target_bb = end_bblock;
12265 MONO_ADD_INS (bblock, ins);
12266 link_bblock (cfg, bblock, end_bblock);
12267 start_new_bblock = 1;
12271 case CEE_MONO_CISINST:
12272 case CEE_MONO_CCASTCLASS: {
12277 token = read32 (ip + 2);
12278 klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
12279 if (ip [1] == CEE_MONO_CISINST)
12280 ins = handle_cisinst (cfg, klass, sp [0]);
12282 ins = handle_ccastclass (cfg, klass, sp [0]);
12288 case CEE_MONO_SAVE_LMF:
12289 case CEE_MONO_RESTORE_LMF:
12290 #ifdef MONO_ARCH_HAVE_LMF_OPS
12291 MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_SAVE_LMF) ? OP_SAVE_LMF : OP_RESTORE_LMF);
12292 MONO_ADD_INS (bblock, ins);
12293 cfg->need_lmf_area = TRUE;
12297 case CEE_MONO_CLASSCONST:
12298 CHECK_STACK_OVF (1);
12300 token = read32 (ip + 2);
12301 EMIT_NEW_CLASSCONST (cfg, ins, mono_method_get_wrapper_data (method, token));
12304 inline_costs += 10 * num_calls++;
12306 case CEE_MONO_NOT_TAKEN:
12307 bblock->out_of_line = TRUE;
12310 case CEE_MONO_TLS: {
12313 CHECK_STACK_OVF (1);
12315 key = (gint32)read32 (ip + 2);
12316 g_assert (key < TLS_KEY_NUM);
12318 ins = mono_create_tls_get (cfg, key);
12320 if (cfg->compile_aot) {
12322 MONO_INST_NEW (cfg, ins, OP_TLS_GET);
12323 ins->dreg = alloc_preg (cfg);
12324 ins->type = STACK_PTR;
12326 g_assert_not_reached ();
12329 ins->type = STACK_PTR;
12330 MONO_ADD_INS (bblock, ins);
12335 case CEE_MONO_DYN_CALL: {
12336 MonoCallInst *call;
12338 /* It would be easier to call a trampoline, but that would put an
12339 * extra frame on the stack, confusing exception handling. So
12340 * implement it inline using an opcode for now.
12343 if (!cfg->dyn_call_var) {
12344 cfg->dyn_call_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12345 /* prevent it from being register allocated */
12346 cfg->dyn_call_var->flags |= MONO_INST_VOLATILE;
12349 /* Has to use a call inst since it local regalloc expects it */
12350 MONO_INST_NEW_CALL (cfg, call, OP_DYN_CALL);
12351 ins = (MonoInst*)call;
12353 ins->sreg1 = sp [0]->dreg;
12354 ins->sreg2 = sp [1]->dreg;
12355 MONO_ADD_INS (bblock, ins);
12357 cfg->param_area = MAX (cfg->param_area, MONO_ARCH_DYN_CALL_PARAM_AREA);
12360 inline_costs += 10 * num_calls++;
12364 case CEE_MONO_MEMORY_BARRIER: {
12366 emit_memory_barrier (cfg, (int)read32 (ip + 2));
12370 case CEE_MONO_JIT_ATTACH: {
12371 MonoInst *args [16], *domain_ins;
12372 MonoInst *ad_ins, *jit_tls_ins;
12373 MonoBasicBlock *next_bb = NULL, *call_bb = NULL;
12375 cfg->orig_domain_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
12377 EMIT_NEW_PCONST (cfg, ins, NULL);
12378 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12380 ad_ins = mono_get_domain_intrinsic (cfg);
12381 jit_tls_ins = mono_get_jit_tls_intrinsic (cfg);
12383 if (MONO_ARCH_HAVE_TLS_GET && ad_ins && jit_tls_ins) {
12384 NEW_BBLOCK (cfg, next_bb);
12385 NEW_BBLOCK (cfg, call_bb);
12387 if (cfg->compile_aot) {
12388 /* AOT code is only used in the root domain */
12389 EMIT_NEW_PCONST (cfg, domain_ins, NULL);
12391 EMIT_NEW_PCONST (cfg, domain_ins, cfg->domain);
12393 MONO_ADD_INS (cfg->cbb, ad_ins);
12394 MONO_EMIT_NEW_BIALU (cfg, OP_COMPARE, -1, ad_ins->dreg, domain_ins->dreg);
12395 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, call_bb);
12397 MONO_ADD_INS (cfg->cbb, jit_tls_ins);
12398 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, jit_tls_ins->dreg, 0);
12399 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, call_bb);
12401 MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, next_bb);
12402 MONO_START_BB (cfg, call_bb);
12405 if (cfg->compile_aot) {
12406 /* AOT code is only used in the root domain */
12407 EMIT_NEW_PCONST (cfg, args [0], NULL);
12409 EMIT_NEW_PCONST (cfg, args [0], cfg->domain);
12411 ins = mono_emit_jit_icall (cfg, mono_jit_thread_attach, args);
12412 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->orig_domain_var->dreg, ins->dreg);
12415 MONO_START_BB (cfg, next_bb);
12421 case CEE_MONO_JIT_DETACH: {
12422 MonoInst *args [16];
12424 /* Restore the original domain */
12425 dreg = alloc_ireg (cfg);
12426 EMIT_NEW_UNALU (cfg, args [0], OP_MOVE, dreg, cfg->orig_domain_var->dreg);
12427 mono_emit_jit_icall (cfg, mono_jit_set_domain, args);
12432 g_error ("opcode 0x%02x 0x%02x not handled", MONO_CUSTOM_PREFIX, ip [1]);
12438 case CEE_PREFIX1: {
12441 case CEE_ARGLIST: {
12442 /* somewhat similar to LDTOKEN */
12443 MonoInst *addr, *vtvar;
12444 CHECK_STACK_OVF (1);
12445 vtvar = mono_compile_create_var (cfg, &mono_defaults.argumenthandle_class->byval_arg, OP_LOCAL);
12447 EMIT_NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
12448 EMIT_NEW_UNALU (cfg, ins, OP_ARGLIST, -1, addr->dreg);
12450 EMIT_NEW_TEMPLOAD (cfg, ins, vtvar->inst_c0);
12451 ins->type = STACK_VTYPE;
12452 ins->klass = mono_defaults.argumenthandle_class;
12462 MonoInst *cmp, *arg1, *arg2;
12470 * The following transforms:
12471 * CEE_CEQ into OP_CEQ
12472 * CEE_CGT into OP_CGT
12473 * CEE_CGT_UN into OP_CGT_UN
12474 * CEE_CLT into OP_CLT
12475 * CEE_CLT_UN into OP_CLT_UN
12477 MONO_INST_NEW (cfg, cmp, (OP_CEQ - CEE_CEQ) + ip [1]);
12479 MONO_INST_NEW (cfg, ins, cmp->opcode);
12480 cmp->sreg1 = arg1->dreg;
12481 cmp->sreg2 = arg2->dreg;
12482 type_from_op (cfg, cmp, arg1, arg2);
12484 add_widen_op (cfg, cmp, &arg1, &arg2);
12485 if ((arg1->type == STACK_I8) || ((SIZEOF_VOID_P == 8) && ((arg1->type == STACK_PTR) || (arg1->type == STACK_OBJ) || (arg1->type == STACK_MP))))
12486 cmp->opcode = OP_LCOMPARE;
12487 else if (arg1->type == STACK_R4)
12488 cmp->opcode = OP_RCOMPARE;
12489 else if (arg1->type == STACK_R8)
12490 cmp->opcode = OP_FCOMPARE;
12492 cmp->opcode = OP_ICOMPARE;
12493 MONO_ADD_INS (bblock, cmp);
12494 ins->type = STACK_I4;
12495 ins->dreg = alloc_dreg (cfg, ins->type);
12496 type_from_op (cfg, ins, arg1, arg2);
12498 if (cmp->opcode == OP_FCOMPARE || cmp->opcode == OP_RCOMPARE) {
12500 * The backends expect the fceq opcodes to do the
12503 ins->sreg1 = cmp->sreg1;
12504 ins->sreg2 = cmp->sreg2;
12507 MONO_ADD_INS (bblock, ins);
12513 MonoInst *argconst;
12514 MonoMethod *cil_method;
12516 CHECK_STACK_OVF (1);
12518 n = read32 (ip + 2);
12519 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12520 if (!cmethod || mono_loader_get_last_error ())
12522 mono_class_init (cmethod->klass);
12524 mono_save_token_info (cfg, image, n, cmethod);
12526 context_used = mini_method_check_context_used (cfg, cmethod);
12528 cil_method = cmethod;
12529 if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
12530 METHOD_ACCESS_FAILURE (method, cil_method);
12532 if (mono_security_cas_enabled ()) {
12533 if (check_linkdemand (cfg, method, cmethod))
12534 INLINE_FAILURE ("linkdemand");
12535 CHECK_CFG_EXCEPTION;
12536 } else if (mono_security_core_clr_enabled ()) {
12537 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12541 * Optimize the common case of ldftn+delegate creation
12543 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ)) {
12544 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12545 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12546 MonoInst *target_ins, *handle_ins;
12547 MonoMethod *invoke;
12548 int invoke_context_used;
12550 invoke = mono_get_delegate_invoke (ctor_method->klass);
12551 if (!invoke || !mono_method_signature (invoke))
12554 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12556 target_ins = sp [-1];
12558 if (mono_security_core_clr_enabled ())
12559 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12561 if (!(cmethod->flags & METHOD_ATTRIBUTE_STATIC)) {
12562 /*LAME IMPL: We must not add a null check for virtual invoke delegates.*/
12563 if (mono_method_signature (invoke)->param_count == mono_method_signature (cmethod)->param_count) {
12564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, target_ins->dreg, 0);
12565 MONO_EMIT_NEW_COND_EXC (cfg, EQ, "ArgumentException");
12569 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12570 /* FIXME: SGEN support */
12571 if (invoke_context_used == 0) {
12573 if (cfg->verbose_level > 3)
12574 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12575 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, FALSE))) {
12578 CHECK_CFG_EXCEPTION;
12589 argconst = emit_get_rgctx_method (cfg, context_used, cmethod, MONO_RGCTX_INFO_METHOD);
12590 ins = mono_emit_jit_icall (cfg, mono_ldftn, &argconst);
12594 inline_costs += 10 * num_calls++;
12597 case CEE_LDVIRTFTN: {
12598 MonoInst *args [2];
12602 n = read32 (ip + 2);
12603 cmethod = mini_get_method (cfg, method, n, NULL, generic_context);
12604 if (!cmethod || mono_loader_get_last_error ())
12606 mono_class_init (cmethod->klass);
12608 context_used = mini_method_check_context_used (cfg, cmethod);
12610 if (mono_security_cas_enabled ()) {
12611 if (check_linkdemand (cfg, method, cmethod))
12612 INLINE_FAILURE ("linkdemand");
12613 CHECK_CFG_EXCEPTION;
12614 } else if (mono_security_core_clr_enabled ()) {
12615 ensure_method_is_allowed_to_call_method (cfg, method, cmethod, bblock, ip);
12619 * Optimize the common case of ldvirtftn+delegate creation
12621 if ((sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ip > header->code && ip [-1] == CEE_DUP)) {
12622 MonoMethod *ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context);
12623 if (ctor_method && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
12624 MonoInst *target_ins, *handle_ins;
12625 MonoMethod *invoke;
12626 int invoke_context_used;
12628 invoke = mono_get_delegate_invoke (ctor_method->klass);
12629 if (!invoke || !mono_method_signature (invoke))
12632 invoke_context_used = mini_method_check_context_used (cfg, invoke);
12634 target_ins = sp [-1];
12636 if (mono_security_core_clr_enabled ())
12637 ensure_method_is_allowed_to_call_method (cfg, method, ctor_method, bblock, ip);
12639 #if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE)
12640 /* FIXME: SGEN support */
12641 if (invoke_context_used == 0) {
12643 if (cfg->verbose_level > 3)
12644 g_print ("converting (in B%d: stack: %d) %s", bblock->block_num, (int)(sp - stack_start), mono_disasm_code_one (NULL, method, ip, NULL));
12645 if ((handle_ins = handle_delegate_ctor (cfg, ctor_method->klass, target_ins, cmethod, context_used, TRUE))) {
12648 CHECK_CFG_EXCEPTION;
12662 args [1] = emit_get_rgctx_method (cfg, context_used,
12663 cmethod, MONO_RGCTX_INFO_METHOD);
12666 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn_gshared, args);
12668 *sp++ = mono_emit_jit_icall (cfg, mono_ldvirtfn, args);
12671 inline_costs += 10 * num_calls++;
12675 CHECK_STACK_OVF (1);
12677 n = read16 (ip + 2);
12679 EMIT_NEW_ARGLOAD (cfg, ins, n);
12684 CHECK_STACK_OVF (1);
12686 n = read16 (ip + 2);
12688 NEW_ARGLOADA (cfg, ins, n);
12689 MONO_ADD_INS (cfg->cbb, ins);
12697 n = read16 (ip + 2);
12699 if (!dont_verify_stloc && target_type_is_incompatible (cfg, param_types [n], *sp))
12701 EMIT_NEW_ARGSTORE (cfg, ins, n, *sp);
12705 CHECK_STACK_OVF (1);
12707 n = read16 (ip + 2);
12709 EMIT_NEW_LOCLOAD (cfg, ins, n);
12714 unsigned char *tmp_ip;
12715 CHECK_STACK_OVF (1);
12717 n = read16 (ip + 2);
12720 if ((tmp_ip = emit_optimized_ldloca_ir (cfg, ip, end, 2))) {
12726 EMIT_NEW_LOCLOADA (cfg, ins, n);
12735 n = read16 (ip + 2);
12737 if (!dont_verify_stloc && target_type_is_incompatible (cfg, header->locals [n], *sp))
12739 emit_stloc_ir (cfg, sp, header, n);
12746 if (sp != stack_start)
12748 if (cfg->method != method)
12750 * Inlining this into a loop in a parent could lead to
12751 * stack overflows which is different behavior than the
12752 * non-inlined case, thus disable inlining in this case.
12754 INLINE_FAILURE("localloc");
12756 MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
12757 ins->dreg = alloc_preg (cfg);
12758 ins->sreg1 = sp [0]->dreg;
12759 ins->type = STACK_PTR;
12760 MONO_ADD_INS (cfg->cbb, ins);
12762 cfg->flags |= MONO_CFG_HAS_ALLOCA;
12764 ins->flags |= MONO_INST_INIT;
12769 case CEE_ENDFILTER: {
12770 MonoExceptionClause *clause, *nearest;
12775 if ((sp != stack_start) || (sp [0]->type != STACK_I4))
12777 MONO_INST_NEW (cfg, ins, OP_ENDFILTER);
12778 ins->sreg1 = (*sp)->dreg;
12779 MONO_ADD_INS (bblock, ins);
12780 start_new_bblock = 1;
12784 for (cc = 0; cc < header->num_clauses; ++cc) {
12785 clause = &header->clauses [cc];
12786 if ((clause->flags & MONO_EXCEPTION_CLAUSE_FILTER) &&
12787 ((ip - header->code) > clause->data.filter_offset && (ip - header->code) <= clause->handler_offset) &&
12788 (!nearest || (clause->data.filter_offset < nearest->data.filter_offset)))
12791 g_assert (nearest);
12792 if ((ip - header->code) != nearest->handler_offset)
12797 case CEE_UNALIGNED_:
12798 ins_flag |= MONO_INST_UNALIGNED;
12799 /* FIXME: record alignment? we can assume 1 for now */
12803 case CEE_VOLATILE_:
12804 ins_flag |= MONO_INST_VOLATILE;
12808 ins_flag |= MONO_INST_TAILCALL;
12809 cfg->flags |= MONO_CFG_HAS_TAIL;
12810 /* Can't inline tail calls at this time */
12811 inline_costs += 100000;
12818 token = read32 (ip + 2);
12819 klass = mini_get_class (method, token, generic_context);
12820 CHECK_TYPELOAD (klass);
12821 if (generic_class_is_reference_type (cfg, klass))
12822 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sp [0]->dreg, 0, 0);
12824 mini_emit_initobj (cfg, *sp, NULL, klass);
12828 case CEE_CONSTRAINED_:
12830 token = read32 (ip + 2);
12831 constrained_class = mini_get_class (method, token, generic_context);
12832 CHECK_TYPELOAD (constrained_class);
12836 case CEE_INITBLK: {
12837 MonoInst *iargs [3];
12841 /* Skip optimized paths for volatile operations. */
12842 if ((ip [1] == CEE_CPBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5)) {
12843 mini_emit_memcpy (cfg, sp [0]->dreg, 0, sp [1]->dreg, 0, sp [2]->inst_c0, 0);
12844 } else if ((ip [1] == CEE_INITBLK) && !(ins_flag & MONO_INST_VOLATILE) && (cfg->opt & MONO_OPT_INTRINS) && (sp [2]->opcode == OP_ICONST) && ((n = sp [2]->inst_c0) <= sizeof (gpointer) * 5) && (sp [1]->opcode == OP_ICONST) && (sp [1]->inst_c0 == 0)) {
12845 /* emit_memset only works when val == 0 */
12846 mini_emit_memset (cfg, sp [0]->dreg, 0, sp [2]->inst_c0, sp [1]->inst_c0, 0);
12849 iargs [0] = sp [0];
12850 iargs [1] = sp [1];
12851 iargs [2] = sp [2];
12852 if (ip [1] == CEE_CPBLK) {
12854 * FIXME: It's unclear whether we should be emitting both the acquire
12855 * and release barriers for cpblk. It is technically both a load and
12856 * store operation, so it seems like that's the sensible thing to do.
12858 * FIXME: We emit full barriers on both sides of the operation for
12859 * simplicity. We should have a separate atomic memcpy method instead.
12861 MonoMethod *memcpy_method = get_memcpy_method ();
12863 if (ins_flag & MONO_INST_VOLATILE)
12864 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12866 call = mono_emit_method_call (cfg, memcpy_method, iargs, NULL);
12867 call->flags |= ins_flag;
12869 if (ins_flag & MONO_INST_VOLATILE)
12870 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_SEQ);
12872 MonoMethod *memset_method = get_memset_method ();
12873 if (ins_flag & MONO_INST_VOLATILE) {
12874 /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
12875 emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
12877 call = mono_emit_method_call (cfg, memset_method, iargs, NULL);
12878 call->flags |= ins_flag;
12889 ins_flag |= MONO_INST_NOTYPECHECK;
12891 ins_flag |= MONO_INST_NORANGECHECK;
12892 /* we ignore the no-nullcheck for now since we
12893 * really do it explicitly only when doing callvirt->call
12897 case CEE_RETHROW: {
12899 int handler_offset = -1;
12901 for (i = 0; i < header->num_clauses; ++i) {
12902 MonoExceptionClause *clause = &header->clauses [i];
12903 if (MONO_OFFSET_IN_HANDLER (clause, ip - header->code) && !(clause->flags & MONO_EXCEPTION_CLAUSE_FINALLY)) {
12904 handler_offset = clause->handler_offset;
12909 bblock->flags |= BB_EXCEPTION_UNSAFE;
12911 if (handler_offset == -1)
12914 EMIT_NEW_TEMPLOAD (cfg, load, mono_find_exvar_for_offset (cfg, handler_offset)->inst_c0);
12915 MONO_INST_NEW (cfg, ins, OP_RETHROW);
12916 ins->sreg1 = load->dreg;
12917 MONO_ADD_INS (bblock, ins);
12919 MONO_INST_NEW (cfg, ins, OP_NOT_REACHED);
12920 MONO_ADD_INS (bblock, ins);
12923 link_bblock (cfg, bblock, end_bblock);
12924 start_new_bblock = 1;
12932 CHECK_STACK_OVF (1);
12934 token = read32 (ip + 2);
12935 if (mono_metadata_token_table (token) == MONO_TABLE_TYPESPEC && !image_is_dynamic (method->klass->image) && !generic_context) {
12936 MonoType *type = mono_type_create_from_typespec_checked (image, token, &cfg->error);
12939 val = mono_type_size (type, &ialign);
12941 MonoClass *klass = mini_get_class (method, token, generic_context);
12942 CHECK_TYPELOAD (klass);
12944 val = mono_type_size (&klass->byval_arg, &ialign);
12946 if (mini_is_gsharedvt_klass (cfg, klass))
12947 GSHAREDVT_FAILURE (*ip);
12949 EMIT_NEW_ICONST (cfg, ins, val);
12954 case CEE_REFANYTYPE: {
12955 MonoInst *src_var, *src;
12957 GSHAREDVT_FAILURE (*ip);
12963 src_var = get_vreg_to_inst (cfg, sp [0]->dreg);
12965 src_var = mono_compile_create_var_for_vreg (cfg, &mono_defaults.typed_reference_class->byval_arg, OP_LOCAL, sp [0]->dreg);
12966 EMIT_NEW_VARLOADA (cfg, src, src_var, src_var->inst_vtype);
12967 EMIT_NEW_LOAD_MEMBASE_TYPE (cfg, ins, &mono_defaults.typehandle_class->byval_arg, src->dreg, MONO_STRUCT_OFFSET (MonoTypedRef, type));
12972 case CEE_READONLY_:
12985 g_warning ("opcode 0xfe 0x%02x not handled", ip [1]);
12995 g_warning ("opcode 0x%02x not handled", *ip);
12999 if (start_new_bblock != 1)
13002 bblock->cil_length = ip - bblock->cil_code;
13003 if (bblock->next_bb) {
13004 /* This could already be set because of inlining, #693905 */
13005 MonoBasicBlock *bb = bblock;
13007 while (bb->next_bb)
13009 bb->next_bb = end_bblock;
13011 bblock->next_bb = end_bblock;
13014 if (cfg->method == method && cfg->domainvar) {
13016 MonoInst *get_domain;
13018 cfg->cbb = init_localsbb;
13020 if ((get_domain = mono_get_domain_intrinsic (cfg))) {
13021 MONO_ADD_INS (cfg->cbb, get_domain);
13023 get_domain = mono_emit_jit_icall (cfg, mono_domain_get, NULL);
13025 NEW_TEMPSTORE (cfg, store, cfg->domainvar->inst_c0, get_domain);
13026 MONO_ADD_INS (cfg->cbb, store);
13029 #if defined(TARGET_POWERPC) || defined(TARGET_X86)
13030 if (cfg->compile_aot)
13031 /* FIXME: The plt slots require a GOT var even if the method doesn't use it */
13032 mono_get_got_var (cfg);
13035 if (cfg->method == method && cfg->got_var)
13036 mono_emit_load_got_addr (cfg);
13038 if (init_localsbb) {
13039 cfg->cbb = init_localsbb;
13041 for (i = 0; i < header->num_locals; ++i) {
13042 emit_init_local (cfg, i, header->locals [i], init_locals);
13046 if (cfg->init_ref_vars && cfg->method == method) {
13047 /* Emit initialization for ref vars */
13048 // FIXME: Avoid duplication initialization for IL locals.
13049 for (i = 0; i < cfg->num_varinfo; ++i) {
13050 MonoInst *ins = cfg->varinfo [i];
13052 if (ins->opcode == OP_LOCAL && ins->type == STACK_OBJ)
13053 MONO_EMIT_NEW_PCONST (cfg, ins->dreg, NULL);
13057 if (cfg->lmf_var && cfg->method == method) {
13058 cfg->cbb = init_localsbb;
13059 emit_push_lmf (cfg);
13062 cfg->cbb = init_localsbb;
13063 emit_instrumentation_call (cfg, mono_profiler_method_enter);
13066 MonoBasicBlock *bb;
13069 * Make seq points at backward branch targets interruptable.
13071 for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
13072 if (bb->code && bb->in_count > 1 && bb->code->opcode == OP_SEQ_POINT)
13073 bb->code->flags |= MONO_INST_SINGLE_STEP_LOC;
13076 /* Add a sequence point for method entry/exit events */
13077 if (seq_points && cfg->gen_seq_points_debug_data) {
13078 NEW_SEQ_POINT (cfg, ins, METHOD_ENTRY_IL_OFFSET, FALSE);
13079 MONO_ADD_INS (init_localsbb, ins);
13080 NEW_SEQ_POINT (cfg, ins, METHOD_EXIT_IL_OFFSET, FALSE);
13081 MONO_ADD_INS (cfg->bb_exit, ins);
13085 * Add seq points for IL offsets which have line number info, but wasn't generated a seq point during JITting because
13086 * the code they refer to was dead (#11880).
13088 if (sym_seq_points) {
13089 for (i = 0; i < header->code_size; ++i) {
13090 if (mono_bitset_test_fast (seq_point_locs, i) && !mono_bitset_test_fast (seq_point_set_locs, i)) {
13093 NEW_SEQ_POINT (cfg, ins, i, FALSE);
13094 mono_add_seq_point (cfg, NULL, ins, SEQ_POINT_NATIVE_OFFSET_DEAD_CODE);
13101 if (cfg->method == method) {
13102 MonoBasicBlock *bb;
13103 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13104 bb->region = mono_find_block_region (cfg, bb->real_offset);
13106 mono_create_spvar_for_region (cfg, bb->region);
13107 if (cfg->verbose_level > 2)
13108 printf ("REGION BB%d IL_%04x ID_%08X\n", bb->block_num, bb->real_offset, bb->region);
13112 if (inline_costs < 0) {
13115 /* Method is too large */
13116 mname = mono_method_full_name (method, TRUE);
13117 mono_cfg_set_exception (cfg, MONO_EXCEPTION_INVALID_PROGRAM);
13118 cfg->exception_message = g_strdup_printf ("Method %s is too complex.", mname);
13122 if ((cfg->verbose_level > 2) && (cfg->method == method))
13123 mono_print_code (cfg, "AFTER METHOD-TO-IR");
13128 g_assert (!mono_error_ok (&cfg->error));
13132 g_assert (cfg->exception_type != MONO_EXCEPTION_NONE);
13136 set_exception_type_from_invalid_il (cfg, method, ip);
13140 g_slist_free (class_inits);
13141 mono_basic_block_free (original_bb);
13142 cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
13143 cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
13144 if (cfg->exception_type)
13147 return inline_costs;
13151 store_membase_reg_to_store_membase_imm (int opcode)
13154 case OP_STORE_MEMBASE_REG:
13155 return OP_STORE_MEMBASE_IMM;
13156 case OP_STOREI1_MEMBASE_REG:
13157 return OP_STOREI1_MEMBASE_IMM;
13158 case OP_STOREI2_MEMBASE_REG:
13159 return OP_STOREI2_MEMBASE_IMM;
13160 case OP_STOREI4_MEMBASE_REG:
13161 return OP_STOREI4_MEMBASE_IMM;
13162 case OP_STOREI8_MEMBASE_REG:
13163 return OP_STOREI8_MEMBASE_IMM;
13165 g_assert_not_reached ();
13172 mono_op_to_op_imm (int opcode)
13176 return OP_IADD_IMM;
13178 return OP_ISUB_IMM;
13180 return OP_IDIV_IMM;
13182 return OP_IDIV_UN_IMM;
13184 return OP_IREM_IMM;
13186 return OP_IREM_UN_IMM;
13188 return OP_IMUL_IMM;
13190 return OP_IAND_IMM;
13194 return OP_IXOR_IMM;
13196 return OP_ISHL_IMM;
13198 return OP_ISHR_IMM;
13200 return OP_ISHR_UN_IMM;
13203 return OP_LADD_IMM;
13205 return OP_LSUB_IMM;
13207 return OP_LAND_IMM;
13211 return OP_LXOR_IMM;
13213 return OP_LSHL_IMM;
13215 return OP_LSHR_IMM;
13217 return OP_LSHR_UN_IMM;
13218 #if SIZEOF_REGISTER == 8
13220 return OP_LREM_IMM;
13224 return OP_COMPARE_IMM;
13226 return OP_ICOMPARE_IMM;
13228 return OP_LCOMPARE_IMM;
13230 case OP_STORE_MEMBASE_REG:
13231 return OP_STORE_MEMBASE_IMM;
13232 case OP_STOREI1_MEMBASE_REG:
13233 return OP_STOREI1_MEMBASE_IMM;
13234 case OP_STOREI2_MEMBASE_REG:
13235 return OP_STOREI2_MEMBASE_IMM;
13236 case OP_STOREI4_MEMBASE_REG:
13237 return OP_STOREI4_MEMBASE_IMM;
13239 #if defined(TARGET_X86) || defined (TARGET_AMD64)
13241 return OP_X86_PUSH_IMM;
13242 case OP_X86_COMPARE_MEMBASE_REG:
13243 return OP_X86_COMPARE_MEMBASE_IMM;
13245 #if defined(TARGET_AMD64)
13246 case OP_AMD64_ICOMPARE_MEMBASE_REG:
13247 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13249 case OP_VOIDCALL_REG:
13250 return OP_VOIDCALL;
13258 return OP_LOCALLOC_IMM;
13265 ldind_to_load_membase (int opcode)
13269 return OP_LOADI1_MEMBASE;
13271 return OP_LOADU1_MEMBASE;
13273 return OP_LOADI2_MEMBASE;
13275 return OP_LOADU2_MEMBASE;
13277 return OP_LOADI4_MEMBASE;
13279 return OP_LOADU4_MEMBASE;
13281 return OP_LOAD_MEMBASE;
13282 case CEE_LDIND_REF:
13283 return OP_LOAD_MEMBASE;
13285 return OP_LOADI8_MEMBASE;
13287 return OP_LOADR4_MEMBASE;
13289 return OP_LOADR8_MEMBASE;
13291 g_assert_not_reached ();
13298 stind_to_store_membase (int opcode)
13302 return OP_STOREI1_MEMBASE_REG;
13304 return OP_STOREI2_MEMBASE_REG;
13306 return OP_STOREI4_MEMBASE_REG;
13308 case CEE_STIND_REF:
13309 return OP_STORE_MEMBASE_REG;
13311 return OP_STOREI8_MEMBASE_REG;
13313 return OP_STORER4_MEMBASE_REG;
13315 return OP_STORER8_MEMBASE_REG;
13317 g_assert_not_reached ();
13324 mono_load_membase_to_load_mem (int opcode)
13326 // FIXME: Add a MONO_ARCH_HAVE_LOAD_MEM macro
13327 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13329 case OP_LOAD_MEMBASE:
13330 return OP_LOAD_MEM;
13331 case OP_LOADU1_MEMBASE:
13332 return OP_LOADU1_MEM;
13333 case OP_LOADU2_MEMBASE:
13334 return OP_LOADU2_MEM;
13335 case OP_LOADI4_MEMBASE:
13336 return OP_LOADI4_MEM;
13337 case OP_LOADU4_MEMBASE:
13338 return OP_LOADU4_MEM;
13339 #if SIZEOF_REGISTER == 8
13340 case OP_LOADI8_MEMBASE:
13341 return OP_LOADI8_MEM;
13350 op_to_op_dest_membase (int store_opcode, int opcode)
13352 #if defined(TARGET_X86)
13353 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG)))
13358 return OP_X86_ADD_MEMBASE_REG;
13360 return OP_X86_SUB_MEMBASE_REG;
13362 return OP_X86_AND_MEMBASE_REG;
13364 return OP_X86_OR_MEMBASE_REG;
13366 return OP_X86_XOR_MEMBASE_REG;
13369 return OP_X86_ADD_MEMBASE_IMM;
13372 return OP_X86_SUB_MEMBASE_IMM;
13375 return OP_X86_AND_MEMBASE_IMM;
13378 return OP_X86_OR_MEMBASE_IMM;
13381 return OP_X86_XOR_MEMBASE_IMM;
13387 #if defined(TARGET_AMD64)
13388 if (!((store_opcode == OP_STORE_MEMBASE_REG) || (store_opcode == OP_STOREI4_MEMBASE_REG) || (store_opcode == OP_STOREI8_MEMBASE_REG)))
13393 return OP_X86_ADD_MEMBASE_REG;
13395 return OP_X86_SUB_MEMBASE_REG;
13397 return OP_X86_AND_MEMBASE_REG;
13399 return OP_X86_OR_MEMBASE_REG;
13401 return OP_X86_XOR_MEMBASE_REG;
13403 return OP_X86_ADD_MEMBASE_IMM;
13405 return OP_X86_SUB_MEMBASE_IMM;
13407 return OP_X86_AND_MEMBASE_IMM;
13409 return OP_X86_OR_MEMBASE_IMM;
13411 return OP_X86_XOR_MEMBASE_IMM;
13413 return OP_AMD64_ADD_MEMBASE_REG;
13415 return OP_AMD64_SUB_MEMBASE_REG;
13417 return OP_AMD64_AND_MEMBASE_REG;
13419 return OP_AMD64_OR_MEMBASE_REG;
13421 return OP_AMD64_XOR_MEMBASE_REG;
13424 return OP_AMD64_ADD_MEMBASE_IMM;
13427 return OP_AMD64_SUB_MEMBASE_IMM;
13430 return OP_AMD64_AND_MEMBASE_IMM;
13433 return OP_AMD64_OR_MEMBASE_IMM;
13436 return OP_AMD64_XOR_MEMBASE_IMM;
13446 op_to_op_store_membase (int store_opcode, int opcode)
13448 #if defined(TARGET_X86) || defined(TARGET_AMD64)
13451 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13452 return OP_X86_SETEQ_MEMBASE;
13454 if (store_opcode == OP_STOREI1_MEMBASE_REG)
13455 return OP_X86_SETNE_MEMBASE;
13463 op_to_op_src1_membase (int load_opcode, int opcode)
13466 /* FIXME: This has sign extension issues */
13468 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13469 return OP_X86_COMPARE_MEMBASE8_IMM;
13472 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13477 return OP_X86_PUSH_MEMBASE;
13478 case OP_COMPARE_IMM:
13479 case OP_ICOMPARE_IMM:
13480 return OP_X86_COMPARE_MEMBASE_IMM;
13483 return OP_X86_COMPARE_MEMBASE_REG;
13487 #ifdef TARGET_AMD64
13488 /* FIXME: This has sign extension issues */
13490 if ((opcode == OP_ICOMPARE_IMM) && (load_opcode == OP_LOADU1_MEMBASE))
13491 return OP_X86_COMPARE_MEMBASE8_IMM;
13496 #ifdef __mono_ilp32__
13497 if (load_opcode == OP_LOADI8_MEMBASE)
13499 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13501 return OP_X86_PUSH_MEMBASE;
13503 /* FIXME: This only works for 32 bit immediates
13504 case OP_COMPARE_IMM:
13505 case OP_LCOMPARE_IMM:
13506 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13507 return OP_AMD64_COMPARE_MEMBASE_IMM;
13509 case OP_ICOMPARE_IMM:
13510 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13511 return OP_AMD64_ICOMPARE_MEMBASE_IMM;
13515 #ifdef __mono_ilp32__
13516 if (load_opcode == OP_LOAD_MEMBASE)
13517 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13518 if (load_opcode == OP_LOADI8_MEMBASE)
13520 if ((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI8_MEMBASE))
13522 return OP_AMD64_COMPARE_MEMBASE_REG;
13525 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE))
13526 return OP_AMD64_ICOMPARE_MEMBASE_REG;
13535 op_to_op_src2_membase (int load_opcode, int opcode)
13538 if (!((load_opcode == OP_LOAD_MEMBASE) || (load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)))
13544 return OP_X86_COMPARE_REG_MEMBASE;
13546 return OP_X86_ADD_REG_MEMBASE;
13548 return OP_X86_SUB_REG_MEMBASE;
13550 return OP_X86_AND_REG_MEMBASE;
13552 return OP_X86_OR_REG_MEMBASE;
13554 return OP_X86_XOR_REG_MEMBASE;
13558 #ifdef TARGET_AMD64
13559 #ifdef __mono_ilp32__
13560 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE) ) {
13562 if ((load_opcode == OP_LOADI4_MEMBASE) || (load_opcode == OP_LOADU4_MEMBASE)) {
13566 return OP_AMD64_ICOMPARE_REG_MEMBASE;
13568 return OP_X86_ADD_REG_MEMBASE;
13570 return OP_X86_SUB_REG_MEMBASE;
13572 return OP_X86_AND_REG_MEMBASE;
13574 return OP_X86_OR_REG_MEMBASE;
13576 return OP_X86_XOR_REG_MEMBASE;
13578 #ifdef __mono_ilp32__
13579 } else if (load_opcode == OP_LOADI8_MEMBASE) {
13581 } else if ((load_opcode == OP_LOADI8_MEMBASE) || (load_opcode == OP_LOAD_MEMBASE)) {
13586 return OP_AMD64_COMPARE_REG_MEMBASE;
13588 return OP_AMD64_ADD_REG_MEMBASE;
13590 return OP_AMD64_SUB_REG_MEMBASE;
13592 return OP_AMD64_AND_REG_MEMBASE;
13594 return OP_AMD64_OR_REG_MEMBASE;
13596 return OP_AMD64_XOR_REG_MEMBASE;
13605 mono_op_to_op_imm_noemul (int opcode)
13608 #if SIZEOF_REGISTER == 4 && !defined(MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS)
13614 #if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_EMULATE_DIV)
13621 #if defined(MONO_ARCH_EMULATE_MUL_DIV)
13626 return mono_op_to_op_imm (opcode);
13631 * mono_handle_global_vregs:
13633 * Make vregs used in more than one bblock 'global', i.e. allocate a variable
13637 mono_handle_global_vregs (MonoCompile *cfg)
13639 gint32 *vreg_to_bb;
13640 MonoBasicBlock *bb;
13643 vreg_to_bb = mono_mempool_alloc0 (cfg->mempool, sizeof (gint32*) * cfg->next_vreg + 1);
13645 #ifdef MONO_ARCH_SIMD_INTRINSICS
13646 if (cfg->uses_simd_intrinsics)
13647 mono_simd_simplify_indirection (cfg);
13650 /* Find local vregs used in more than one bb */
13651 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
13652 MonoInst *ins = bb->code;
13653 int block_num = bb->block_num;
13655 if (cfg->verbose_level > 2)
13656 printf ("\nHANDLE-GLOBAL-VREGS BLOCK %d:\n", bb->block_num);
13659 for (; ins; ins = ins->next) {
13660 const char *spec = INS_INFO (ins->opcode);
13661 int regtype = 0, regindex;
13664 if (G_UNLIKELY (cfg->verbose_level > 2))
13665 mono_print_ins (ins);
13667 g_assert (ins->opcode >= MONO_CEE_LAST);
13669 for (regindex = 0; regindex < 4; regindex ++) {
13672 if (regindex == 0) {
13673 regtype = spec [MONO_INST_DEST];
13674 if (regtype == ' ')
13677 } else if (regindex == 1) {
13678 regtype = spec [MONO_INST_SRC1];
13679 if (regtype == ' ')
13682 } else if (regindex == 2) {
13683 regtype = spec [MONO_INST_SRC2];
13684 if (regtype == ' ')
13687 } else if (regindex == 3) {
13688 regtype = spec [MONO_INST_SRC3];
13689 if (regtype == ' ')
13694 #if SIZEOF_REGISTER == 4
13695 /* In the LLVM case, the long opcodes are not decomposed */
13696 if (regtype == 'l' && !COMPILE_LLVM (cfg)) {
13698 * Since some instructions reference the original long vreg,
13699 * and some reference the two component vregs, it is quite hard
13700 * to determine when it needs to be global. So be conservative.
13702 if (!get_vreg_to_inst (cfg, vreg)) {
13703 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13705 if (cfg->verbose_level > 2)
13706 printf ("LONG VREG R%d made global.\n", vreg);
13710 * Make the component vregs volatile since the optimizations can
13711 * get confused otherwise.
13713 get_vreg_to_inst (cfg, vreg + 1)->flags |= MONO_INST_VOLATILE;
13714 get_vreg_to_inst (cfg, vreg + 2)->flags |= MONO_INST_VOLATILE;
13718 g_assert (vreg != -1);
13720 prev_bb = vreg_to_bb [vreg];
13721 if (prev_bb == 0) {
13722 /* 0 is a valid block num */
13723 vreg_to_bb [vreg] = block_num + 1;
13724 } else if ((prev_bb != block_num + 1) && (prev_bb != -1)) {
13725 if (((regtype == 'i' && (vreg < MONO_MAX_IREGS))) || (regtype == 'f' && (vreg < MONO_MAX_FREGS)))
13728 if (!get_vreg_to_inst (cfg, vreg)) {
13729 if (G_UNLIKELY (cfg->verbose_level > 2))
13730 printf ("VREG R%d used in BB%d and BB%d made global.\n", vreg, vreg_to_bb [vreg], block_num);
13734 if (vreg_is_ref (cfg, vreg))
13735 mono_compile_create_var_for_vreg (cfg, &mono_defaults.object_class->byval_arg, OP_LOCAL, vreg);
13737 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL, vreg);
13740 mono_compile_create_var_for_vreg (cfg, &mono_defaults.int64_class->byval_arg, OP_LOCAL, vreg);
13743 mono_compile_create_var_for_vreg (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL, vreg);
13746 mono_compile_create_var_for_vreg (cfg, &ins->klass->byval_arg, OP_LOCAL, vreg);
13749 g_assert_not_reached ();
13753 /* Flag as having been used in more than one bb */
13754 vreg_to_bb [vreg] = -1;
13760 /* If a variable is used in only one bblock, convert it into a local vreg */
13761 for (i = 0; i < cfg->num_varinfo; i++) {
13762 MonoInst *var = cfg->varinfo [i];
13763 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
13765 switch (var->type) {
13771 #if SIZEOF_REGISTER == 8
13774 #if !defined(TARGET_X86)
13775 /* Enabling this screws up the fp stack on x86 */
13778 if (mono_arch_is_soft_float ())
13781 /* Arguments are implicitly global */
13782 /* Putting R4 vars into registers doesn't work currently */
13783 /* The gsharedvt vars are implicitly referenced by ldaddr opcodes, but those opcodes are only generated later */
13784 if ((var->opcode != OP_ARG) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && (vreg_to_bb [var->dreg] != -1) && (var->klass->byval_arg.type != MONO_TYPE_R4) && !cfg->disable_vreg_to_lvreg && var != cfg->gsharedvt_info_var && var != cfg->gsharedvt_locals_var && var != cfg->lmf_addr_var) {
13786 * Make that the variable's liveness interval doesn't contain a call, since
13787 * that would cause the lvreg to be spilled, making the whole optimization
13790 /* This is too slow for JIT compilation */
13792 if (cfg->compile_aot && vreg_to_bb [var->dreg]) {
13794 int def_index, call_index, ins_index;
13795 gboolean spilled = FALSE;
13800 for (ins = vreg_to_bb [var->dreg]->code; ins; ins = ins->next) {
13801 const char *spec = INS_INFO (ins->opcode);
13803 if ((spec [MONO_INST_DEST] != ' ') && (ins->dreg == var->dreg))
13804 def_index = ins_index;
13806 if (((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg)) ||
13807 ((spec [MONO_INST_SRC1] != ' ') && (ins->sreg1 == var->dreg))) {
13808 if (call_index > def_index) {
13814 if (MONO_IS_CALL (ins))
13815 call_index = ins_index;
13825 if (G_UNLIKELY (cfg->verbose_level > 2))
13826 printf ("CONVERTED R%d(%d) TO VREG.\n", var->dreg, vmv->idx);
13827 var->flags |= MONO_INST_IS_DEAD;
13828 cfg->vreg_to_inst [var->dreg] = NULL;
13835 * Compress the varinfo and vars tables so the liveness computation is faster and
13836 * takes up less space.
13839 for (i = 0; i < cfg->num_varinfo; ++i) {
13840 MonoInst *var = cfg->varinfo [i];
13841 if (pos < i && cfg->locals_start == i)
13842 cfg->locals_start = pos;
13843 if (!(var->flags & MONO_INST_IS_DEAD)) {
13845 cfg->varinfo [pos] = cfg->varinfo [i];
13846 cfg->varinfo [pos]->inst_c0 = pos;
13847 memcpy (&cfg->vars [pos], &cfg->vars [i], sizeof (MonoMethodVar));
13848 cfg->vars [pos].idx = pos;
13849 #if SIZEOF_REGISTER == 4
13850 if (cfg->varinfo [pos]->type == STACK_I8) {
13851 /* Modify the two component vars too */
13854 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 1);
13855 var1->inst_c0 = pos;
13856 var1 = get_vreg_to_inst (cfg, cfg->varinfo [pos]->dreg + 2);
13857 var1->inst_c0 = pos;
13864 cfg->num_varinfo = pos;
13865 if (cfg->locals_start > cfg->num_varinfo)
13866 cfg->locals_start = cfg->num_varinfo;
13870 * mono_spill_global_vars:
13872 * Generate spill code for variables which are not allocated to registers,
13873 * and replace vregs with their allocated hregs. *need_local_opts is set to TRUE if
13874 * code is generated which could be optimized by the local optimization passes.
13877 mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts)
13879 MonoBasicBlock *bb;
13881 int orig_next_vreg;
13882 guint32 *vreg_to_lvreg;
13884 guint32 i, lvregs_len;
13885 gboolean dest_has_lvreg = FALSE;
13886 guint32 stacktypes [128];
13887 MonoInst **live_range_start, **live_range_end;
13888 MonoBasicBlock **live_range_start_bb, **live_range_end_bb;
13889 int *gsharedvt_vreg_to_idx = NULL;
13891 *need_local_opts = FALSE;
13893 memset (spec2, 0, sizeof (spec2));
13895 /* FIXME: Move this function to mini.c */
13896 stacktypes ['i'] = STACK_PTR;
13897 stacktypes ['l'] = STACK_I8;
13898 stacktypes ['f'] = STACK_R8;
13899 #ifdef MONO_ARCH_SIMD_INTRINSICS
13900 stacktypes ['x'] = STACK_VTYPE;
13903 #if SIZEOF_REGISTER == 4
13904 /* Create MonoInsts for longs */
13905 for (i = 0; i < cfg->num_varinfo; i++) {
13906 MonoInst *ins = cfg->varinfo [i];
13908 if ((ins->opcode != OP_REGVAR) && !(ins->flags & MONO_INST_IS_DEAD)) {
13909 switch (ins->type) {
13914 if (ins->type == STACK_R8 && !COMPILE_SOFT_FLOAT (cfg))
13917 g_assert (ins->opcode == OP_REGOFFSET);
13919 tree = get_vreg_to_inst (cfg, ins->dreg + 1);
13921 tree->opcode = OP_REGOFFSET;
13922 tree->inst_basereg = ins->inst_basereg;
13923 tree->inst_offset = ins->inst_offset + MINI_LS_WORD_OFFSET;
13925 tree = get_vreg_to_inst (cfg, ins->dreg + 2);
13927 tree->opcode = OP_REGOFFSET;
13928 tree->inst_basereg = ins->inst_basereg;
13929 tree->inst_offset = ins->inst_offset + MINI_MS_WORD_OFFSET;
13939 if (cfg->compute_gc_maps) {
13940 /* registers need liveness info even for !non refs */
13941 for (i = 0; i < cfg->num_varinfo; i++) {
13942 MonoInst *ins = cfg->varinfo [i];
13944 if (ins->opcode == OP_REGVAR)
13945 ins->flags |= MONO_INST_GC_TRACK;
13949 if (cfg->gsharedvt) {
13950 gsharedvt_vreg_to_idx = mono_mempool_alloc0 (cfg->mempool, sizeof (int) * cfg->next_vreg);
13952 for (i = 0; i < cfg->num_varinfo; ++i) {
13953 MonoInst *ins = cfg->varinfo [i];
13956 if (mini_is_gsharedvt_variable_type (cfg, ins->inst_vtype)) {
13957 if (i >= cfg->locals_start) {
13959 idx = get_gsharedvt_info_slot (cfg, ins->inst_vtype, MONO_RGCTX_INFO_LOCAL_OFFSET);
13960 gsharedvt_vreg_to_idx [ins->dreg] = idx + 1;
13961 ins->opcode = OP_GSHAREDVT_LOCAL;
13962 ins->inst_imm = idx;
13965 gsharedvt_vreg_to_idx [ins->dreg] = -1;
13966 ins->opcode = OP_GSHAREDVT_ARG_REGOFFSET;
13972 /* FIXME: widening and truncation */
13975 * As an optimization, when a variable allocated to the stack is first loaded into
13976 * an lvreg, we will remember the lvreg and use it the next time instead of loading
13977 * the variable again.
13979 orig_next_vreg = cfg->next_vreg;
13980 vreg_to_lvreg = mono_mempool_alloc0 (cfg->mempool, sizeof (guint32) * cfg->next_vreg);
13981 lvregs = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * 1024);
13985 * These arrays contain the first and last instructions accessing a given
13987 * Since we emit bblocks in the same order we process them here, and we
13988 * don't split live ranges, these will precisely describe the live range of
13989 * the variable, i.e. the instruction range where a valid value can be found
13990 * in the variables location.
13991 * The live range is computed using the liveness info computed by the liveness pass.
13992 * We can't use vmv->range, since that is an abstract live range, and we need
13993 * one which is instruction precise.
13994 * FIXME: Variables used in out-of-line bblocks have a hole in their live range.
13996 /* FIXME: Only do this if debugging info is requested */
13997 live_range_start = g_new0 (MonoInst*, cfg->next_vreg);
13998 live_range_end = g_new0 (MonoInst*, cfg->next_vreg);
13999 live_range_start_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14000 live_range_end_bb = g_new (MonoBasicBlock*, cfg->next_vreg);
14002 /* Add spill loads/stores */
14003 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
14006 if (cfg->verbose_level > 2)
14007 printf ("\nSPILL BLOCK %d:\n", bb->block_num);
14009 /* Clear vreg_to_lvreg array */
14010 for (i = 0; i < lvregs_len; i++)
14011 vreg_to_lvreg [lvregs [i]] = 0;
14015 MONO_BB_FOR_EACH_INS (bb, ins) {
14016 const char *spec = INS_INFO (ins->opcode);
14017 int regtype, srcindex, sreg, tmp_reg, prev_dreg, num_sregs;
14018 gboolean store, no_lvreg;
14019 int sregs [MONO_MAX_SRC_REGS];
14021 if (G_UNLIKELY (cfg->verbose_level > 2))
14022 mono_print_ins (ins);
14024 if (ins->opcode == OP_NOP)
14028 * We handle LDADDR here as well, since it can only be decomposed
14029 * when variable addresses are known.
14031 if (ins->opcode == OP_LDADDR) {
14032 MonoInst *var = ins->inst_p0;
14034 if (var->opcode == OP_VTARG_ADDR) {
14035 /* Happens on SPARC/S390 where vtypes are passed by reference */
14036 MonoInst *vtaddr = var->inst_left;
14037 if (vtaddr->opcode == OP_REGVAR) {
14038 ins->opcode = OP_MOVE;
14039 ins->sreg1 = vtaddr->dreg;
14041 else if (var->inst_left->opcode == OP_REGOFFSET) {
14042 ins->opcode = OP_LOAD_MEMBASE;
14043 ins->inst_basereg = vtaddr->inst_basereg;
14044 ins->inst_offset = vtaddr->inst_offset;
14047 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg] < 0) {
14048 /* gsharedvt arg passed by ref */
14049 g_assert (var->opcode == OP_GSHAREDVT_ARG_REGOFFSET);
14051 ins->opcode = OP_LOAD_MEMBASE;
14052 ins->inst_basereg = var->inst_basereg;
14053 ins->inst_offset = var->inst_offset;
14054 } else if (cfg->gsharedvt && gsharedvt_vreg_to_idx [var->dreg]) {
14055 MonoInst *load, *load2, *load3;
14056 int idx = gsharedvt_vreg_to_idx [var->dreg] - 1;
14057 int reg1, reg2, reg3;
14058 MonoInst *info_var = cfg->gsharedvt_info_var;
14059 MonoInst *locals_var = cfg->gsharedvt_locals_var;
14063 * Compute the address of the local as gsharedvt_locals_var + gsharedvt_info_var->locals_offsets [idx].
14066 g_assert (var->opcode == OP_GSHAREDVT_LOCAL);
14068 g_assert (info_var);
14069 g_assert (locals_var);
14071 /* Mark the instruction used to compute the locals var as used */
14072 cfg->gsharedvt_locals_var_ins = NULL;
14074 /* Load the offset */
14075 if (info_var->opcode == OP_REGOFFSET) {
14076 reg1 = alloc_ireg (cfg);
14077 NEW_LOAD_MEMBASE (cfg, load, OP_LOAD_MEMBASE, reg1, info_var->inst_basereg, info_var->inst_offset);
14078 } else if (info_var->opcode == OP_REGVAR) {
14080 reg1 = info_var->dreg;
14082 g_assert_not_reached ();
14084 reg2 = alloc_ireg (cfg);
14085 NEW_LOAD_MEMBASE (cfg, load2, OP_LOADI4_MEMBASE, reg2, reg1, MONO_STRUCT_OFFSET (MonoGSharedVtMethodRuntimeInfo, entries) + (idx * sizeof (gpointer)));
14086 /* Load the locals area address */
14087 reg3 = alloc_ireg (cfg);
14088 if (locals_var->opcode == OP_REGOFFSET) {
14089 NEW_LOAD_MEMBASE (cfg, load3, OP_LOAD_MEMBASE, reg3, locals_var->inst_basereg, locals_var->inst_offset);
14090 } else if (locals_var->opcode == OP_REGVAR) {
14091 NEW_UNALU (cfg, load3, OP_MOVE, reg3, locals_var->dreg);
14093 g_assert_not_reached ();
14095 /* Compute the address */
14096 ins->opcode = OP_PADD;
14100 mono_bblock_insert_before_ins (bb, ins, load3);
14101 mono_bblock_insert_before_ins (bb, load3, load2);
14103 mono_bblock_insert_before_ins (bb, load2, load);
14105 g_assert (var->opcode == OP_REGOFFSET);
14107 ins->opcode = OP_ADD_IMM;
14108 ins->sreg1 = var->inst_basereg;
14109 ins->inst_imm = var->inst_offset;
14112 *need_local_opts = TRUE;
14113 spec = INS_INFO (ins->opcode);
14116 if (ins->opcode < MONO_CEE_LAST) {
14117 mono_print_ins (ins);
14118 g_assert_not_reached ();
14122 * Store opcodes have destbasereg in the dreg, but in reality, it is an
14126 if (MONO_IS_STORE_MEMBASE (ins)) {
14127 tmp_reg = ins->dreg;
14128 ins->dreg = ins->sreg2;
14129 ins->sreg2 = tmp_reg;
14132 spec2 [MONO_INST_DEST] = ' ';
14133 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14134 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14135 spec2 [MONO_INST_SRC3] = ' ';
14137 } else if (MONO_IS_STORE_MEMINDEX (ins))
14138 g_assert_not_reached ();
14143 if (G_UNLIKELY (cfg->verbose_level > 2)) {
14144 printf ("\t %.3s %d", spec, ins->dreg);
14145 num_sregs = mono_inst_get_src_registers (ins, sregs);
14146 for (srcindex = 0; srcindex < num_sregs; ++srcindex)
14147 printf (" %d", sregs [srcindex]);
14154 regtype = spec [MONO_INST_DEST];
14155 g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
14158 if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
14159 MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
14160 MonoInst *store_ins;
14162 MonoInst *def_ins = ins;
14163 int dreg = ins->dreg; /* The original vreg */
14165 store_opcode = mono_type_to_store_membase (cfg, var->inst_vtype);
14167 if (var->opcode == OP_REGVAR) {
14168 ins->dreg = var->dreg;
14169 } else if ((ins->dreg == ins->sreg1) && (spec [MONO_INST_DEST] == 'i') && (spec [MONO_INST_SRC1] == 'i') && !vreg_to_lvreg [ins->dreg] && (op_to_op_dest_membase (store_opcode, ins->opcode) != -1)) {
14171 * Instead of emitting a load+store, use a _membase opcode.
14173 g_assert (var->opcode == OP_REGOFFSET);
14174 if (ins->opcode == OP_MOVE) {
14178 ins->opcode = op_to_op_dest_membase (store_opcode, ins->opcode);
14179 ins->inst_basereg = var->inst_basereg;
14180 ins->inst_offset = var->inst_offset;
14183 spec = INS_INFO (ins->opcode);
14187 g_assert (var->opcode == OP_REGOFFSET);
14189 prev_dreg = ins->dreg;
14191 /* Invalidate any previous lvreg for this vreg */
14192 vreg_to_lvreg [ins->dreg] = 0;
14196 if (COMPILE_SOFT_FLOAT (cfg) && store_opcode == OP_STORER8_MEMBASE_REG) {
14198 store_opcode = OP_STOREI8_MEMBASE_REG;
14201 ins->dreg = alloc_dreg (cfg, stacktypes [regtype]);
14203 #if SIZEOF_REGISTER != 8
14204 if (regtype == 'l') {
14205 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET, ins->dreg + 1);
14206 mono_bblock_insert_after_ins (bb, ins, store_ins);
14207 NEW_STORE_MEMBASE (cfg, store_ins, OP_STOREI4_MEMBASE_REG, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET, ins->dreg + 2);
14208 mono_bblock_insert_after_ins (bb, ins, store_ins);
14209 def_ins = store_ins;
14214 g_assert (store_opcode != OP_STOREV_MEMBASE);
14216 /* Try to fuse the store into the instruction itself */
14217 /* FIXME: Add more instructions */
14218 if (!lvreg && ((ins->opcode == OP_ICONST) || ((ins->opcode == OP_I8CONST) && (ins->inst_c0 == 0)))) {
14219 ins->opcode = store_membase_reg_to_store_membase_imm (store_opcode);
14220 ins->inst_imm = ins->inst_c0;
14221 ins->inst_destbasereg = var->inst_basereg;
14222 ins->inst_offset = var->inst_offset;
14223 spec = INS_INFO (ins->opcode);
14224 } else if (!lvreg && ((ins->opcode == OP_MOVE) || (ins->opcode == OP_FMOVE) || (ins->opcode == OP_LMOVE) || (ins->opcode == OP_RMOVE))) {
14225 ins->opcode = store_opcode;
14226 ins->inst_destbasereg = var->inst_basereg;
14227 ins->inst_offset = var->inst_offset;
14231 tmp_reg = ins->dreg;
14232 ins->dreg = ins->sreg2;
14233 ins->sreg2 = tmp_reg;
14236 spec2 [MONO_INST_DEST] = ' ';
14237 spec2 [MONO_INST_SRC1] = spec [MONO_INST_SRC1];
14238 spec2 [MONO_INST_SRC2] = spec [MONO_INST_DEST];
14239 spec2 [MONO_INST_SRC3] = ' ';
14241 } else if (!lvreg && (op_to_op_store_membase (store_opcode, ins->opcode) != -1)) {
14242 // FIXME: The backends expect the base reg to be in inst_basereg
14243 ins->opcode = op_to_op_store_membase (store_opcode, ins->opcode);
14245 ins->inst_basereg = var->inst_basereg;
14246 ins->inst_offset = var->inst_offset;
14247 spec = INS_INFO (ins->opcode);
14249 /* printf ("INS: "); mono_print_ins (ins); */
14250 /* Create a store instruction */
14251 NEW_STORE_MEMBASE (cfg, store_ins, store_opcode, var->inst_basereg, var->inst_offset, ins->dreg);
14253 /* Insert it after the instruction */
14254 mono_bblock_insert_after_ins (bb, ins, store_ins);
14256 def_ins = store_ins;
14259 * We can't assign ins->dreg to var->dreg here, since the
14260 * sregs could use it. So set a flag, and do it after
14263 if ((!MONO_ARCH_USE_FPSTACK || ((store_opcode != OP_STORER8_MEMBASE_REG) && (store_opcode != OP_STORER4_MEMBASE_REG))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)))
14264 dest_has_lvreg = TRUE;
14269 if (def_ins && !live_range_start [dreg]) {
14270 live_range_start [dreg] = def_ins;
14271 live_range_start_bb [dreg] = bb;
14274 if (cfg->compute_gc_maps && def_ins && (var->flags & MONO_INST_GC_TRACK)) {
14277 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_DEF);
14278 tmp->inst_c1 = dreg;
14279 mono_bblock_insert_after_ins (bb, def_ins, tmp);
14286 num_sregs = mono_inst_get_src_registers (ins, sregs);
14287 for (srcindex = 0; srcindex < 3; ++srcindex) {
14288 regtype = spec [MONO_INST_SRC1 + srcindex];
14289 sreg = sregs [srcindex];
14291 g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
14292 if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
14293 MonoInst *var = get_vreg_to_inst (cfg, sreg);
14294 MonoInst *use_ins = ins;
14295 MonoInst *load_ins;
14296 guint32 load_opcode;
14298 if (var->opcode == OP_REGVAR) {
14299 sregs [srcindex] = var->dreg;
14300 //mono_inst_set_src_registers (ins, sregs);
14301 live_range_end [sreg] = use_ins;
14302 live_range_end_bb [sreg] = bb;
14304 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14307 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14308 /* var->dreg is a hreg */
14309 tmp->inst_c1 = sreg;
14310 mono_bblock_insert_after_ins (bb, ins, tmp);
14316 g_assert (var->opcode == OP_REGOFFSET);
14318 load_opcode = mono_type_to_load_membase (cfg, var->inst_vtype);
14320 g_assert (load_opcode != OP_LOADV_MEMBASE);
14322 if (vreg_to_lvreg [sreg]) {
14323 g_assert (vreg_to_lvreg [sreg] != -1);
14325 /* The variable is already loaded to an lvreg */
14326 if (G_UNLIKELY (cfg->verbose_level > 2))
14327 printf ("\t\tUse lvreg R%d for R%d.\n", vreg_to_lvreg [sreg], sreg);
14328 sregs [srcindex] = vreg_to_lvreg [sreg];
14329 //mono_inst_set_src_registers (ins, sregs);
14333 /* Try to fuse the load into the instruction */
14334 if ((srcindex == 0) && (op_to_op_src1_membase (load_opcode, ins->opcode) != -1)) {
14335 ins->opcode = op_to_op_src1_membase (load_opcode, ins->opcode);
14336 sregs [0] = var->inst_basereg;
14337 //mono_inst_set_src_registers (ins, sregs);
14338 ins->inst_offset = var->inst_offset;
14339 } else if ((srcindex == 1) && (op_to_op_src2_membase (load_opcode, ins->opcode) != -1)) {
14340 ins->opcode = op_to_op_src2_membase (load_opcode, ins->opcode);
14341 sregs [1] = var->inst_basereg;
14342 //mono_inst_set_src_registers (ins, sregs);
14343 ins->inst_offset = var->inst_offset;
14345 if (MONO_IS_REAL_MOVE (ins)) {
14346 ins->opcode = OP_NOP;
14349 //printf ("%d ", srcindex); mono_print_ins (ins);
14351 sreg = alloc_dreg (cfg, stacktypes [regtype]);
14353 if ((!MONO_ARCH_USE_FPSTACK || ((load_opcode != OP_LOADR8_MEMBASE) && (load_opcode != OP_LOADR4_MEMBASE))) && !((var)->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && !no_lvreg) {
14354 if (var->dreg == prev_dreg) {
14356 * sreg refers to the value loaded by the load
14357 * emitted below, but we need to use ins->dreg
14358 * since it refers to the store emitted earlier.
14362 g_assert (sreg != -1);
14363 vreg_to_lvreg [var->dreg] = sreg;
14364 g_assert (lvregs_len < 1024);
14365 lvregs [lvregs_len ++] = var->dreg;
14369 sregs [srcindex] = sreg;
14370 //mono_inst_set_src_registers (ins, sregs);
14372 #if SIZEOF_REGISTER != 8
14373 if (regtype == 'l') {
14374 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 2, var->inst_basereg, var->inst_offset + MINI_MS_WORD_OFFSET);
14375 mono_bblock_insert_before_ins (bb, ins, load_ins);
14376 NEW_LOAD_MEMBASE (cfg, load_ins, OP_LOADI4_MEMBASE, sreg + 1, var->inst_basereg, var->inst_offset + MINI_LS_WORD_OFFSET);
14377 mono_bblock_insert_before_ins (bb, ins, load_ins);
14378 use_ins = load_ins;
14383 #if SIZEOF_REGISTER == 4
14384 g_assert (load_opcode != OP_LOADI8_MEMBASE);
14386 NEW_LOAD_MEMBASE (cfg, load_ins, load_opcode, sreg, var->inst_basereg, var->inst_offset);
14387 mono_bblock_insert_before_ins (bb, ins, load_ins);
14388 use_ins = load_ins;
14392 if (var->dreg < orig_next_vreg) {
14393 live_range_end [var->dreg] = use_ins;
14394 live_range_end_bb [var->dreg] = bb;
14397 if (cfg->compute_gc_maps && var->dreg < orig_next_vreg && (var->flags & MONO_INST_GC_TRACK)) {
14400 MONO_INST_NEW (cfg, tmp, OP_GC_LIVENESS_USE);
14401 tmp->inst_c1 = var->dreg;
14402 mono_bblock_insert_after_ins (bb, ins, tmp);
14406 mono_inst_set_src_registers (ins, sregs);
14408 if (dest_has_lvreg) {
14409 g_assert (ins->dreg != -1);
14410 vreg_to_lvreg [prev_dreg] = ins->dreg;
14411 g_assert (lvregs_len < 1024);
14412 lvregs [lvregs_len ++] = prev_dreg;
14413 dest_has_lvreg = FALSE;
14417 tmp_reg = ins->dreg;
14418 ins->dreg = ins->sreg2;
14419 ins->sreg2 = tmp_reg;
14422 if (MONO_IS_CALL (ins)) {
14423 /* Clear vreg_to_lvreg array */
14424 for (i = 0; i < lvregs_len; i++)
14425 vreg_to_lvreg [lvregs [i]] = 0;
14427 } else if (ins->opcode == OP_NOP) {
14429 MONO_INST_NULLIFY_SREGS (ins);
14432 if (cfg->verbose_level > 2)
14433 mono_print_ins_index (1, ins);
14436 /* Extend the live range based on the liveness info */
14437 if (cfg->compute_precise_live_ranges && bb->live_out_set && bb->code) {
14438 for (i = 0; i < cfg->num_varinfo; i ++) {
14439 MonoMethodVar *vi = MONO_VARINFO (cfg, i);
14441 if (vreg_is_volatile (cfg, vi->vreg))
14442 /* The liveness info is incomplete */
14445 if (mono_bitset_test_fast (bb->live_in_set, i) && !live_range_start [vi->vreg]) {
14446 /* Live from at least the first ins of this bb */
14447 live_range_start [vi->vreg] = bb->code;
14448 live_range_start_bb [vi->vreg] = bb;
14451 if (mono_bitset_test_fast (bb->live_out_set, i)) {
14452 /* Live at least until the last ins of this bb */
14453 live_range_end [vi->vreg] = bb->last_ins;
14454 live_range_end_bb [vi->vreg] = bb;
14460 #ifdef MONO_ARCH_HAVE_LIVERANGE_OPS
14462 * Emit LIVERANGE_START/LIVERANGE_END opcodes, the backend will implement them
14463 * by storing the current native offset into MonoMethodVar->live_range_start/end.
14465 if (cfg->compute_precise_live_ranges && cfg->comp_done & MONO_COMP_LIVENESS) {
14466 for (i = 0; i < cfg->num_varinfo; ++i) {
14467 int vreg = MONO_VARINFO (cfg, i)->vreg;
14470 if (live_range_start [vreg]) {
14471 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_START);
14473 ins->inst_c1 = vreg;
14474 mono_bblock_insert_after_ins (live_range_start_bb [vreg], live_range_start [vreg], ins);
14476 if (live_range_end [vreg]) {
14477 MONO_INST_NEW (cfg, ins, OP_LIVERANGE_END);
14479 ins->inst_c1 = vreg;
14480 if (live_range_end [vreg] == live_range_end_bb [vreg]->last_ins)
14481 mono_add_ins_to_end (live_range_end_bb [vreg], ins);
14483 mono_bblock_insert_after_ins (live_range_end_bb [vreg], live_range_end [vreg], ins);
14489 if (cfg->gsharedvt_locals_var_ins) {
14490 /* Nullify if unused */
14491 cfg->gsharedvt_locals_var_ins->opcode = OP_PCONST;
14492 cfg->gsharedvt_locals_var_ins->inst_imm = 0;
14495 g_free (live_range_start);
14496 g_free (live_range_end);
14497 g_free (live_range_start_bb);
14498 g_free (live_range_end_bb);
14503 * - use 'iadd' instead of 'int_add'
14504 * - handling ovf opcodes: decompose in method_to_ir.
14505 * - unify iregs/fregs
14506 * -> partly done, the missing parts are:
14507 * - a more complete unification would involve unifying the hregs as well, so
14508 * code wouldn't need if (fp) all over the place. but that would mean the hregs
14509 * would no longer map to the machine hregs, so the code generators would need to
14510 * be modified. Also, on ia64 for example, niregs + nfregs > 256 -> bitmasks
14511 * wouldn't work any more. Duplicating the code in mono_local_regalloc () into
14512 * fp/non-fp branches speeds it up by about 15%.
14513 * - use sext/zext opcodes instead of shifts
14515 * - get rid of TEMPLOADs if possible and use vregs instead
14516 * - clean up usage of OP_P/OP_ opcodes
14517 * - cleanup usage of DUMMY_USE
14518 * - cleanup the setting of ins->type for MonoInst's which are pushed on the
14520 * - set the stack type and allocate a dreg in the EMIT_NEW macros
14521 * - get rid of all the <foo>2 stuff when the new JIT is ready.
14522 * - make sure handle_stack_args () is called before the branch is emitted
14523 * - when the new IR is done, get rid of all unused stuff
14524 * - COMPARE/BEQ as separate instructions or unify them ?
14525 * - keeping them separate allows specialized compare instructions like
14526 * compare_imm, compare_membase
14527 * - most back ends unify fp compare+branch, fp compare+ceq
14528 * - integrate mono_save_args into inline_method
14529 * - get rid of the empty bblocks created by MONO_EMIT_NEW_BRACH_BLOCK2
14530 * - handle long shift opts on 32 bit platforms somehow: they require
14531 * 3 sregs (2 for arg1 and 1 for arg2)
14532 * - make byref a 'normal' type.
14533 * - use vregs for bb->out_stacks if possible, handle_global_vreg will make them a
14534 * variable if needed.
14535 * - do not start a new IL level bblock when cfg->cbb is changed by a function call
14536 * like inline_method.
14537 * - remove inlining restrictions
14538 * - fix LNEG and enable cfold of INEG
14539 * - generalize x86 optimizations like ldelema as a peephole optimization
14540 * - add store_mem_imm for amd64
14541 * - optimize the loading of the interruption flag in the managed->native wrappers
14542 * - avoid special handling of OP_NOP in passes
14543 * - move code inserting instructions into one function/macro.
14544 * - try a coalescing phase after liveness analysis
14545 * - add float -> vreg conversion + local optimizations on !x86
14546 * - figure out how to handle decomposed branches during optimizations, ie.
14547 * compare+branch, op_jump_table+op_br etc.
14548 * - promote RuntimeXHandles to vregs
14549 * - vtype cleanups:
14550 * - add a NEW_VARLOADA_VREG macro
14551 * - the vtype optimizations are blocked by the LDADDR opcodes generated for
14552 * accessing vtype fields.
14553 * - get rid of I8CONST on 64 bit platforms
14554 * - dealing with the increase in code size due to branches created during opcode
14556 * - use extended basic blocks
14557 * - all parts of the JIT
14558 * - handle_global_vregs () && local regalloc
14559 * - avoid introducing global vregs during decomposition, like 'vtable' in isinst
14560 * - sources of increase in code size:
14563 * - isinst and castclass
14564 * - lvregs not allocated to global registers even if used multiple times
14565 * - call cctors outside the JIT, to make -v output more readable and JIT timings more
14567 * - check for fp stack leakage in other opcodes too. (-> 'exceptions' optimization)
14568 * - add all micro optimizations from the old JIT
14569 * - put tree optimizations into the deadce pass
14570 * - decompose op_start_handler/op_endfilter/op_endfinally earlier using an arch
14571 * specific function.
14572 * - unify the float comparison opcodes with the other comparison opcodes, i.e.
14573 * fcompare + branchCC.
14574 * - create a helper function for allocating a stack slot, taking into account
14575 * MONO_CFG_HAS_SPILLUP.
14577 * - merge the ia64 switch changes.
14578 * - optimize mono_regstate2_alloc_int/float.
14579 * - fix the pessimistic handling of variables accessed in exception handler blocks.
14580 * - need to write a tree optimization pass, but the creation of trees is difficult, i.e.
14581 * parts of the tree could be separated by other instructions, killing the tree
14582 * arguments, or stores killing loads etc. Also, should we fold loads into other
14583 * instructions if the result of the load is used multiple times ?
14584 * - make the REM_IMM optimization in mini-x86.c arch-independent.
14585 * - LAST MERGE: 108395.
14586 * - when returning vtypes in registers, generate IR and append it to the end of the
14587 * last bb instead of doing it in the epilog.
14588 * - change the store opcodes so they use sreg1 instead of dreg to store the base register.
14596 - When to decompose opcodes:
14597 - earlier: this makes some optimizations hard to implement, since the low level IR
14598 no longer contains the neccessary information. But it is easier to do.
14599 - later: harder to implement, enables more optimizations.
14600 - Branches inside bblocks:
14601 - created when decomposing complex opcodes.
14602 - branches to another bblock: harmless, but not tracked by the branch
14603 optimizations, so need to branch to a label at the start of the bblock.
14604 - branches to inside the same bblock: very problematic, trips up the local
14605 reg allocator. Can be fixed by spitting the current bblock, but that is a
14606 complex operation, since some local vregs can become global vregs etc.
14607 - Local/global vregs:
14608 - local vregs: temporary vregs used inside one bblock. Assigned to hregs by the
14609 local register allocator.
14610 - global vregs: used in more than one bblock. Have an associated MonoMethodVar
14611 structure, created by mono_create_var (). Assigned to hregs or the stack by
14612 the global register allocator.
14613 - When to do optimizations like alu->alu_imm:
14614 - earlier -> saves work later on since the IR will be smaller/simpler
14615 - later -> can work on more instructions
14616 - Handling of valuetypes:
14617 - When a vtype is pushed on the stack, a new temporary is created, an
14618 instruction computing its address (LDADDR) is emitted and pushed on
14619 the stack. Need to optimize cases when the vtype is used immediately as in
14620 argument passing, stloc etc.
14621 - Instead of the to_end stuff in the old JIT, simply call the function handling
14622 the values on the stack before emitting the last instruction of the bb.
14625 #endif /* DISABLE_JIT */